Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.33-rc6 1976 lines 55 kB view raw
1/* 2 Written 1998-2000 by Donald Becker. 3 4 This software may be used and distributed according to the terms of 5 the GNU General Public License (GPL), incorporated herein by reference. 6 Drivers based on or derived from this code fall under the GPL and must 7 retain the authorship, copyright and license notice. This file is not 8 a complete program and may only be used when the entire operating 9 system is licensed under the GPL. 10 11 The author may be reached as becker@scyld.com, or C/O 12 Scyld Computing Corporation 13 410 Severn Ave., Suite 210 14 Annapolis MD 21403 15 16 Support information and updates available at 17 http://www.scyld.com/network/pci-skeleton.html 18 19 Linux kernel updates: 20 21 Version 2.51, Nov 17, 2001 (jgarzik): 22 - Add ethtool support 23 - Replace some MII-related magic numbers with constants 24 25*/ 26 27#define DRV_NAME "fealnx" 28#define DRV_VERSION "2.52" 29#define DRV_RELDATE "Sep-11-2006" 30 31static int debug; /* 1-> print debug message */ 32static int max_interrupt_work = 20; 33 34/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */ 35static int multicast_filter_limit = 32; 36 37/* Set the copy breakpoint for the copy-only-tiny-frames scheme. */ 38/* Setting to > 1518 effectively disables this feature. */ 39static int rx_copybreak; 40 41/* Used to pass the media type, etc. */ 42/* Both 'options[]' and 'full_duplex[]' should exist for driver */ 43/* interoperability. */ 44/* The media type is usually passed in 'options[]'. */ 45#define MAX_UNITS 8 /* More are supported, limit only on options */ 46static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; 47static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; 48 49/* Operational parameters that are set at compile time. */ 50/* Keep the ring sizes a power of two for compile efficiency. */ 51/* The compiler will convert <unsigned>'%'<2^N> into a bit mask. */ 52/* Making the Tx ring too large decreases the effectiveness of channel */ 53/* bonding and packet priority. */ 54/* There are no ill effects from too-large receive rings. */ 55// 88-12-9 modify, 56// #define TX_RING_SIZE 16 57// #define RX_RING_SIZE 32 58#define TX_RING_SIZE 6 59#define RX_RING_SIZE 12 60#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct fealnx_desc) 61#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct fealnx_desc) 62 63/* Operational parameters that usually are not changed. */ 64/* Time in jiffies before concluding the transmitter is hung. */ 65#define TX_TIMEOUT (2*HZ) 66 67#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */ 68 69 70/* Include files, designed to support most kernel versions 2.0.0 and later. */ 71#include <linux/module.h> 72#include <linux/kernel.h> 73#include <linux/string.h> 74#include <linux/timer.h> 75#include <linux/errno.h> 76#include <linux/ioport.h> 77#include <linux/slab.h> 78#include <linux/interrupt.h> 79#include <linux/pci.h> 80#include <linux/netdevice.h> 81#include <linux/etherdevice.h> 82#include <linux/skbuff.h> 83#include <linux/init.h> 84#include <linux/mii.h> 85#include <linux/ethtool.h> 86#include <linux/crc32.h> 87#include <linux/delay.h> 88#include <linux/bitops.h> 89 90#include <asm/processor.h> /* Processor type for cache alignment. */ 91#include <asm/io.h> 92#include <asm/uaccess.h> 93#include <asm/byteorder.h> 94 95/* These identify the driver base version and may not be removed. */ 96static const char version[] __devinitconst = 97 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n"; 98 99 100/* This driver was written to use PCI memory space, however some x86 systems 101 work only with I/O space accesses. */ 102#ifndef __alpha__ 103#define USE_IO_OPS 104#endif 105 106/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */ 107/* This is only in the support-all-kernels source code. */ 108 109#define RUN_AT(x) (jiffies + (x)) 110 111MODULE_AUTHOR("Myson or whoever"); 112MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver"); 113MODULE_LICENSE("GPL"); 114module_param(max_interrupt_work, int, 0); 115module_param(debug, int, 0); 116module_param(rx_copybreak, int, 0); 117module_param(multicast_filter_limit, int, 0); 118module_param_array(options, int, NULL, 0); 119module_param_array(full_duplex, int, NULL, 0); 120MODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt"); 121MODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)"); 122MODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames"); 123MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses"); 124MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex"); 125MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)"); 126 127enum { 128 MIN_REGION_SIZE = 136, 129}; 130 131/* A chip capabilities table, matching the entries in pci_tbl[] above. */ 132enum chip_capability_flags { 133 HAS_MII_XCVR, 134 HAS_CHIP_XCVR, 135}; 136 137/* 89/6/13 add, */ 138/* for different PHY */ 139enum phy_type_flags { 140 MysonPHY = 1, 141 AhdocPHY = 2, 142 SeeqPHY = 3, 143 MarvellPHY = 4, 144 Myson981 = 5, 145 LevelOnePHY = 6, 146 OtherPHY = 10, 147}; 148 149struct chip_info { 150 char *chip_name; 151 int flags; 152}; 153 154static const struct chip_info skel_netdrv_tbl[] __devinitdata = { 155 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, 156 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR }, 157 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, 158}; 159 160/* Offsets to the Command and Status Registers. */ 161enum fealnx_offsets { 162 PAR0 = 0x0, /* physical address 0-3 */ 163 PAR1 = 0x04, /* physical address 4-5 */ 164 MAR0 = 0x08, /* multicast address 0-3 */ 165 MAR1 = 0x0C, /* multicast address 4-7 */ 166 FAR0 = 0x10, /* flow-control address 0-3 */ 167 FAR1 = 0x14, /* flow-control address 4-5 */ 168 TCRRCR = 0x18, /* receive & transmit configuration */ 169 BCR = 0x1C, /* bus command */ 170 TXPDR = 0x20, /* transmit polling demand */ 171 RXPDR = 0x24, /* receive polling demand */ 172 RXCWP = 0x28, /* receive current word pointer */ 173 TXLBA = 0x2C, /* transmit list base address */ 174 RXLBA = 0x30, /* receive list base address */ 175 ISR = 0x34, /* interrupt status */ 176 IMR = 0x38, /* interrupt mask */ 177 FTH = 0x3C, /* flow control high/low threshold */ 178 MANAGEMENT = 0x40, /* bootrom/eeprom and mii management */ 179 TALLY = 0x44, /* tally counters for crc and mpa */ 180 TSR = 0x48, /* tally counter for transmit status */ 181 BMCRSR = 0x4c, /* basic mode control and status */ 182 PHYIDENTIFIER = 0x50, /* phy identifier */ 183 ANARANLPAR = 0x54, /* auto-negotiation advertisement and link 184 partner ability */ 185 ANEROCR = 0x58, /* auto-negotiation expansion and pci conf. */ 186 BPREMRPSR = 0x5c, /* bypass & receive error mask and phy status */ 187}; 188 189/* Bits in the interrupt status/enable registers. */ 190/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */ 191enum intr_status_bits { 192 RFCON = 0x00020000, /* receive flow control xon packet */ 193 RFCOFF = 0x00010000, /* receive flow control xoff packet */ 194 LSCStatus = 0x00008000, /* link status change */ 195 ANCStatus = 0x00004000, /* autonegotiation completed */ 196 FBE = 0x00002000, /* fatal bus error */ 197 FBEMask = 0x00001800, /* mask bit12-11 */ 198 ParityErr = 0x00000000, /* parity error */ 199 TargetErr = 0x00001000, /* target abort */ 200 MasterErr = 0x00000800, /* master error */ 201 TUNF = 0x00000400, /* transmit underflow */ 202 ROVF = 0x00000200, /* receive overflow */ 203 ETI = 0x00000100, /* transmit early int */ 204 ERI = 0x00000080, /* receive early int */ 205 CNTOVF = 0x00000040, /* counter overflow */ 206 RBU = 0x00000020, /* receive buffer unavailable */ 207 TBU = 0x00000010, /* transmit buffer unavilable */ 208 TI = 0x00000008, /* transmit interrupt */ 209 RI = 0x00000004, /* receive interrupt */ 210 RxErr = 0x00000002, /* receive error */ 211}; 212 213/* Bits in the NetworkConfig register, W for writing, R for reading */ 214/* FIXME: some names are invented by me. Marked with (name?) */ 215/* If you have docs and know bit names, please fix 'em */ 216enum rx_mode_bits { 217 CR_W_ENH = 0x02000000, /* enhanced mode (name?) */ 218 CR_W_FD = 0x00100000, /* full duplex */ 219 CR_W_PS10 = 0x00080000, /* 10 mbit */ 220 CR_W_TXEN = 0x00040000, /* tx enable (name?) */ 221 CR_W_PS1000 = 0x00010000, /* 1000 mbit */ 222 /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */ 223 CR_W_RXMODEMASK = 0x000000e0, 224 CR_W_PROM = 0x00000080, /* promiscuous mode */ 225 CR_W_AB = 0x00000040, /* accept broadcast */ 226 CR_W_AM = 0x00000020, /* accept mutlicast */ 227 CR_W_ARP = 0x00000008, /* receive runt pkt */ 228 CR_W_ALP = 0x00000004, /* receive long pkt */ 229 CR_W_SEP = 0x00000002, /* receive error pkt */ 230 CR_W_RXEN = 0x00000001, /* rx enable (unicast?) (name?) */ 231 232 CR_R_TXSTOP = 0x04000000, /* tx stopped (name?) */ 233 CR_R_FD = 0x00100000, /* full duplex detected */ 234 CR_R_PS10 = 0x00080000, /* 10 mbit detected */ 235 CR_R_RXSTOP = 0x00008000, /* rx stopped (name?) */ 236}; 237 238/* The Tulip Rx and Tx buffer descriptors. */ 239struct fealnx_desc { 240 s32 status; 241 s32 control; 242 u32 buffer; 243 u32 next_desc; 244 struct fealnx_desc *next_desc_logical; 245 struct sk_buff *skbuff; 246 u32 reserved1; 247 u32 reserved2; 248}; 249 250/* Bits in network_desc.status */ 251enum rx_desc_status_bits { 252 RXOWN = 0x80000000, /* own bit */ 253 FLNGMASK = 0x0fff0000, /* frame length */ 254 FLNGShift = 16, 255 MARSTATUS = 0x00004000, /* multicast address received */ 256 BARSTATUS = 0x00002000, /* broadcast address received */ 257 PHYSTATUS = 0x00001000, /* physical address received */ 258 RXFSD = 0x00000800, /* first descriptor */ 259 RXLSD = 0x00000400, /* last descriptor */ 260 ErrorSummary = 0x80, /* error summary */ 261 RUNT = 0x40, /* runt packet received */ 262 LONG = 0x20, /* long packet received */ 263 FAE = 0x10, /* frame align error */ 264 CRC = 0x08, /* crc error */ 265 RXER = 0x04, /* receive error */ 266}; 267 268enum rx_desc_control_bits { 269 RXIC = 0x00800000, /* interrupt control */ 270 RBSShift = 0, 271}; 272 273enum tx_desc_status_bits { 274 TXOWN = 0x80000000, /* own bit */ 275 JABTO = 0x00004000, /* jabber timeout */ 276 CSL = 0x00002000, /* carrier sense lost */ 277 LC = 0x00001000, /* late collision */ 278 EC = 0x00000800, /* excessive collision */ 279 UDF = 0x00000400, /* fifo underflow */ 280 DFR = 0x00000200, /* deferred */ 281 HF = 0x00000100, /* heartbeat fail */ 282 NCRMask = 0x000000ff, /* collision retry count */ 283 NCRShift = 0, 284}; 285 286enum tx_desc_control_bits { 287 TXIC = 0x80000000, /* interrupt control */ 288 ETIControl = 0x40000000, /* early transmit interrupt */ 289 TXLD = 0x20000000, /* last descriptor */ 290 TXFD = 0x10000000, /* first descriptor */ 291 CRCEnable = 0x08000000, /* crc control */ 292 PADEnable = 0x04000000, /* padding control */ 293 RetryTxLC = 0x02000000, /* retry late collision */ 294 PKTSMask = 0x3ff800, /* packet size bit21-11 */ 295 PKTSShift = 11, 296 TBSMask = 0x000007ff, /* transmit buffer bit 10-0 */ 297 TBSShift = 0, 298}; 299 300/* BootROM/EEPROM/MII Management Register */ 301#define MASK_MIIR_MII_READ 0x00000000 302#define MASK_MIIR_MII_WRITE 0x00000008 303#define MASK_MIIR_MII_MDO 0x00000004 304#define MASK_MIIR_MII_MDI 0x00000002 305#define MASK_MIIR_MII_MDC 0x00000001 306 307/* ST+OP+PHYAD+REGAD+TA */ 308#define OP_READ 0x6000 /* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */ 309#define OP_WRITE 0x5002 /* ST:01+OP:01+PHYAD+REGAD+TA:10 */ 310 311/* ------------------------------------------------------------------------- */ 312/* Constants for Myson PHY */ 313/* ------------------------------------------------------------------------- */ 314#define MysonPHYID 0xd0000302 315/* 89-7-27 add, (begin) */ 316#define MysonPHYID0 0x0302 317#define StatusRegister 18 318#define SPEED100 0x0400 // bit10 319#define FULLMODE 0x0800 // bit11 320/* 89-7-27 add, (end) */ 321 322/* ------------------------------------------------------------------------- */ 323/* Constants for Seeq 80225 PHY */ 324/* ------------------------------------------------------------------------- */ 325#define SeeqPHYID0 0x0016 326 327#define MIIRegister18 18 328#define SPD_DET_100 0x80 329#define DPLX_DET_FULL 0x40 330 331/* ------------------------------------------------------------------------- */ 332/* Constants for Ahdoc 101 PHY */ 333/* ------------------------------------------------------------------------- */ 334#define AhdocPHYID0 0x0022 335 336#define DiagnosticReg 18 337#define DPLX_FULL 0x0800 338#define Speed_100 0x0400 339 340/* 89/6/13 add, */ 341/* -------------------------------------------------------------------------- */ 342/* Constants */ 343/* -------------------------------------------------------------------------- */ 344#define MarvellPHYID0 0x0141 345#define LevelOnePHYID0 0x0013 346 347#define MII1000BaseTControlReg 9 348#define MII1000BaseTStatusReg 10 349#define SpecificReg 17 350 351/* for 1000BaseT Control Register */ 352#define PHYAbletoPerform1000FullDuplex 0x0200 353#define PHYAbletoPerform1000HalfDuplex 0x0100 354#define PHY1000AbilityMask 0x300 355 356// for phy specific status register, marvell phy. 357#define SpeedMask 0x0c000 358#define Speed_1000M 0x08000 359#define Speed_100M 0x4000 360#define Speed_10M 0 361#define Full_Duplex 0x2000 362 363// 89/12/29 add, for phy specific status register, levelone phy, (begin) 364#define LXT1000_100M 0x08000 365#define LXT1000_1000M 0x0c000 366#define LXT1000_Full 0x200 367// 89/12/29 add, for phy specific status register, levelone phy, (end) 368 369/* for 3-in-1 case, BMCRSR register */ 370#define LinkIsUp2 0x00040000 371 372/* for PHY */ 373#define LinkIsUp 0x0004 374 375 376struct netdev_private { 377 /* Descriptor rings first for alignment. */ 378 struct fealnx_desc *rx_ring; 379 struct fealnx_desc *tx_ring; 380 381 dma_addr_t rx_ring_dma; 382 dma_addr_t tx_ring_dma; 383 384 spinlock_t lock; 385 386 struct net_device_stats stats; 387 388 /* Media monitoring timer. */ 389 struct timer_list timer; 390 391 /* Reset timer */ 392 struct timer_list reset_timer; 393 int reset_timer_armed; 394 unsigned long crvalue_sv; 395 unsigned long imrvalue_sv; 396 397 /* Frequently used values: keep some adjacent for cache effect. */ 398 int flags; 399 struct pci_dev *pci_dev; 400 unsigned long crvalue; 401 unsigned long bcrvalue; 402 unsigned long imrvalue; 403 struct fealnx_desc *cur_rx; 404 struct fealnx_desc *lack_rxbuf; 405 int really_rx_count; 406 struct fealnx_desc *cur_tx; 407 struct fealnx_desc *cur_tx_copy; 408 int really_tx_count; 409 int free_tx_count; 410 unsigned int rx_buf_sz; /* Based on MTU+slack. */ 411 412 /* These values are keep track of the transceiver/media in use. */ 413 unsigned int linkok; 414 unsigned int line_speed; 415 unsigned int duplexmode; 416 unsigned int default_port:4; /* Last dev->if_port value. */ 417 unsigned int PHYType; 418 419 /* MII transceiver section. */ 420 int mii_cnt; /* MII device addresses. */ 421 unsigned char phys[2]; /* MII device addresses. */ 422 struct mii_if_info mii; 423 void __iomem *mem; 424}; 425 426 427static int mdio_read(struct net_device *dev, int phy_id, int location); 428static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 429static int netdev_open(struct net_device *dev); 430static void getlinktype(struct net_device *dev); 431static void getlinkstatus(struct net_device *dev); 432static void netdev_timer(unsigned long data); 433static void reset_timer(unsigned long data); 434static void fealnx_tx_timeout(struct net_device *dev); 435static void init_ring(struct net_device *dev); 436static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); 437static irqreturn_t intr_handler(int irq, void *dev_instance); 438static int netdev_rx(struct net_device *dev); 439static void set_rx_mode(struct net_device *dev); 440static void __set_rx_mode(struct net_device *dev); 441static struct net_device_stats *get_stats(struct net_device *dev); 442static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 443static const struct ethtool_ops netdev_ethtool_ops; 444static int netdev_close(struct net_device *dev); 445static void reset_rx_descriptors(struct net_device *dev); 446static void reset_tx_descriptors(struct net_device *dev); 447 448static void stop_nic_rx(void __iomem *ioaddr, long crvalue) 449{ 450 int delay = 0x1000; 451 iowrite32(crvalue & ~(CR_W_RXEN), ioaddr + TCRRCR); 452 while (--delay) { 453 if ( (ioread32(ioaddr + TCRRCR) & CR_R_RXSTOP) == CR_R_RXSTOP) 454 break; 455 } 456} 457 458 459static void stop_nic_rxtx(void __iomem *ioaddr, long crvalue) 460{ 461 int delay = 0x1000; 462 iowrite32(crvalue & ~(CR_W_RXEN+CR_W_TXEN), ioaddr + TCRRCR); 463 while (--delay) { 464 if ( (ioread32(ioaddr + TCRRCR) & (CR_R_RXSTOP+CR_R_TXSTOP)) 465 == (CR_R_RXSTOP+CR_R_TXSTOP) ) 466 break; 467 } 468} 469 470static const struct net_device_ops netdev_ops = { 471 .ndo_open = netdev_open, 472 .ndo_stop = netdev_close, 473 .ndo_start_xmit = start_tx, 474 .ndo_get_stats = get_stats, 475 .ndo_set_multicast_list = set_rx_mode, 476 .ndo_do_ioctl = mii_ioctl, 477 .ndo_tx_timeout = fealnx_tx_timeout, 478 .ndo_change_mtu = eth_change_mtu, 479 .ndo_set_mac_address = eth_mac_addr, 480 .ndo_validate_addr = eth_validate_addr, 481}; 482 483static int __devinit fealnx_init_one(struct pci_dev *pdev, 484 const struct pci_device_id *ent) 485{ 486 struct netdev_private *np; 487 int i, option, err, irq; 488 static int card_idx = -1; 489 char boardname[12]; 490 void __iomem *ioaddr; 491 unsigned long len; 492 unsigned int chip_id = ent->driver_data; 493 struct net_device *dev; 494 void *ring_space; 495 dma_addr_t ring_dma; 496#ifdef USE_IO_OPS 497 int bar = 0; 498#else 499 int bar = 1; 500#endif 501 502/* when built into the kernel, we only print version if device is found */ 503#ifndef MODULE 504 static int printed_version; 505 if (!printed_version++) 506 printk(version); 507#endif 508 509 card_idx++; 510 sprintf(boardname, "fealnx%d", card_idx); 511 512 option = card_idx < MAX_UNITS ? options[card_idx] : 0; 513 514 i = pci_enable_device(pdev); 515 if (i) return i; 516 pci_set_master(pdev); 517 518 len = pci_resource_len(pdev, bar); 519 if (len < MIN_REGION_SIZE) { 520 dev_err(&pdev->dev, 521 "region size %ld too small, aborting\n", len); 522 return -ENODEV; 523 } 524 525 i = pci_request_regions(pdev, boardname); 526 if (i) 527 return i; 528 529 irq = pdev->irq; 530 531 ioaddr = pci_iomap(pdev, bar, len); 532 if (!ioaddr) { 533 err = -ENOMEM; 534 goto err_out_res; 535 } 536 537 dev = alloc_etherdev(sizeof(struct netdev_private)); 538 if (!dev) { 539 err = -ENOMEM; 540 goto err_out_unmap; 541 } 542 SET_NETDEV_DEV(dev, &pdev->dev); 543 544 /* read ethernet id */ 545 for (i = 0; i < 6; ++i) 546 dev->dev_addr[i] = ioread8(ioaddr + PAR0 + i); 547 548 /* Reset the chip to erase previous misconfiguration. */ 549 iowrite32(0x00000001, ioaddr + BCR); 550 551 dev->base_addr = (unsigned long)ioaddr; 552 dev->irq = irq; 553 554 /* Make certain the descriptor lists are aligned. */ 555 np = netdev_priv(dev); 556 np->mem = ioaddr; 557 spin_lock_init(&np->lock); 558 np->pci_dev = pdev; 559 np->flags = skel_netdrv_tbl[chip_id].flags; 560 pci_set_drvdata(pdev, dev); 561 np->mii.dev = dev; 562 np->mii.mdio_read = mdio_read; 563 np->mii.mdio_write = mdio_write; 564 np->mii.phy_id_mask = 0x1f; 565 np->mii.reg_num_mask = 0x1f; 566 567 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); 568 if (!ring_space) { 569 err = -ENOMEM; 570 goto err_out_free_dev; 571 } 572 np->rx_ring = (struct fealnx_desc *)ring_space; 573 np->rx_ring_dma = ring_dma; 574 575 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); 576 if (!ring_space) { 577 err = -ENOMEM; 578 goto err_out_free_rx; 579 } 580 np->tx_ring = (struct fealnx_desc *)ring_space; 581 np->tx_ring_dma = ring_dma; 582 583 /* find the connected MII xcvrs */ 584 if (np->flags == HAS_MII_XCVR) { 585 int phy, phy_idx = 0; 586 587 for (phy = 1; phy < 32 && phy_idx < ARRAY_SIZE(np->phys); 588 phy++) { 589 int mii_status = mdio_read(dev, phy, 1); 590 591 if (mii_status != 0xffff && mii_status != 0x0000) { 592 np->phys[phy_idx++] = phy; 593 dev_info(&pdev->dev, 594 "MII PHY found at address %d, status " 595 "0x%4.4x.\n", phy, mii_status); 596 /* get phy type */ 597 { 598 unsigned int data; 599 600 data = mdio_read(dev, np->phys[0], 2); 601 if (data == SeeqPHYID0) 602 np->PHYType = SeeqPHY; 603 else if (data == AhdocPHYID0) 604 np->PHYType = AhdocPHY; 605 else if (data == MarvellPHYID0) 606 np->PHYType = MarvellPHY; 607 else if (data == MysonPHYID0) 608 np->PHYType = Myson981; 609 else if (data == LevelOnePHYID0) 610 np->PHYType = LevelOnePHY; 611 else 612 np->PHYType = OtherPHY; 613 } 614 } 615 } 616 617 np->mii_cnt = phy_idx; 618 if (phy_idx == 0) 619 dev_warn(&pdev->dev, 620 "MII PHY not found -- this device may " 621 "not operate correctly.\n"); 622 } else { 623 np->phys[0] = 32; 624/* 89/6/23 add, (begin) */ 625 /* get phy type */ 626 if (ioread32(ioaddr + PHYIDENTIFIER) == MysonPHYID) 627 np->PHYType = MysonPHY; 628 else 629 np->PHYType = OtherPHY; 630 } 631 np->mii.phy_id = np->phys[0]; 632 633 if (dev->mem_start) 634 option = dev->mem_start; 635 636 /* The lower four bits are the media type. */ 637 if (option > 0) { 638 if (option & 0x200) 639 np->mii.full_duplex = 1; 640 np->default_port = option & 15; 641 } 642 643 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0) 644 np->mii.full_duplex = full_duplex[card_idx]; 645 646 if (np->mii.full_duplex) { 647 dev_info(&pdev->dev, "Media type forced to Full Duplex.\n"); 648/* 89/6/13 add, (begin) */ 649// if (np->PHYType==MarvellPHY) 650 if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) { 651 unsigned int data; 652 653 data = mdio_read(dev, np->phys[0], 9); 654 data = (data & 0xfcff) | 0x0200; 655 mdio_write(dev, np->phys[0], 9, data); 656 } 657/* 89/6/13 add, (end) */ 658 if (np->flags == HAS_MII_XCVR) 659 mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL); 660 else 661 iowrite32(ADVERTISE_FULL, ioaddr + ANARANLPAR); 662 np->mii.force_media = 1; 663 } 664 665 dev->netdev_ops = &netdev_ops; 666 dev->ethtool_ops = &netdev_ethtool_ops; 667 dev->watchdog_timeo = TX_TIMEOUT; 668 669 err = register_netdev(dev); 670 if (err) 671 goto err_out_free_tx; 672 673 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n", 674 dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr, 675 dev->dev_addr, irq); 676 677 return 0; 678 679err_out_free_tx: 680 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); 681err_out_free_rx: 682 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); 683err_out_free_dev: 684 free_netdev(dev); 685err_out_unmap: 686 pci_iounmap(pdev, ioaddr); 687err_out_res: 688 pci_release_regions(pdev); 689 return err; 690} 691 692 693static void __devexit fealnx_remove_one(struct pci_dev *pdev) 694{ 695 struct net_device *dev = pci_get_drvdata(pdev); 696 697 if (dev) { 698 struct netdev_private *np = netdev_priv(dev); 699 700 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, 701 np->tx_ring_dma); 702 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, 703 np->rx_ring_dma); 704 unregister_netdev(dev); 705 pci_iounmap(pdev, np->mem); 706 free_netdev(dev); 707 pci_release_regions(pdev); 708 pci_set_drvdata(pdev, NULL); 709 } else 710 printk(KERN_ERR "fealnx: remove for unknown device\n"); 711} 712 713 714static ulong m80x_send_cmd_to_phy(void __iomem *miiport, int opcode, int phyad, int regad) 715{ 716 ulong miir; 717 int i; 718 unsigned int mask, data; 719 720 /* enable MII output */ 721 miir = (ulong) ioread32(miiport); 722 miir &= 0xfffffff0; 723 724 miir |= MASK_MIIR_MII_WRITE + MASK_MIIR_MII_MDO; 725 726 /* send 32 1's preamble */ 727 for (i = 0; i < 32; i++) { 728 /* low MDC; MDO is already high (miir) */ 729 miir &= ~MASK_MIIR_MII_MDC; 730 iowrite32(miir, miiport); 731 732 /* high MDC */ 733 miir |= MASK_MIIR_MII_MDC; 734 iowrite32(miir, miiport); 735 } 736 737 /* calculate ST+OP+PHYAD+REGAD+TA */ 738 data = opcode | (phyad << 7) | (regad << 2); 739 740 /* sent out */ 741 mask = 0x8000; 742 while (mask) { 743 /* low MDC, prepare MDO */ 744 miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO); 745 if (mask & data) 746 miir |= MASK_MIIR_MII_MDO; 747 748 iowrite32(miir, miiport); 749 /* high MDC */ 750 miir |= MASK_MIIR_MII_MDC; 751 iowrite32(miir, miiport); 752 udelay(30); 753 754 /* next */ 755 mask >>= 1; 756 if (mask == 0x2 && opcode == OP_READ) 757 miir &= ~MASK_MIIR_MII_WRITE; 758 } 759 return miir; 760} 761 762 763static int mdio_read(struct net_device *dev, int phyad, int regad) 764{ 765 struct netdev_private *np = netdev_priv(dev); 766 void __iomem *miiport = np->mem + MANAGEMENT; 767 ulong miir; 768 unsigned int mask, data; 769 770 miir = m80x_send_cmd_to_phy(miiport, OP_READ, phyad, regad); 771 772 /* read data */ 773 mask = 0x8000; 774 data = 0; 775 while (mask) { 776 /* low MDC */ 777 miir &= ~MASK_MIIR_MII_MDC; 778 iowrite32(miir, miiport); 779 780 /* read MDI */ 781 miir = ioread32(miiport); 782 if (miir & MASK_MIIR_MII_MDI) 783 data |= mask; 784 785 /* high MDC, and wait */ 786 miir |= MASK_MIIR_MII_MDC; 787 iowrite32(miir, miiport); 788 udelay(30); 789 790 /* next */ 791 mask >>= 1; 792 } 793 794 /* low MDC */ 795 miir &= ~MASK_MIIR_MII_MDC; 796 iowrite32(miir, miiport); 797 798 return data & 0xffff; 799} 800 801 802static void mdio_write(struct net_device *dev, int phyad, int regad, int data) 803{ 804 struct netdev_private *np = netdev_priv(dev); 805 void __iomem *miiport = np->mem + MANAGEMENT; 806 ulong miir; 807 unsigned int mask; 808 809 miir = m80x_send_cmd_to_phy(miiport, OP_WRITE, phyad, regad); 810 811 /* write data */ 812 mask = 0x8000; 813 while (mask) { 814 /* low MDC, prepare MDO */ 815 miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO); 816 if (mask & data) 817 miir |= MASK_MIIR_MII_MDO; 818 iowrite32(miir, miiport); 819 820 /* high MDC */ 821 miir |= MASK_MIIR_MII_MDC; 822 iowrite32(miir, miiport); 823 824 /* next */ 825 mask >>= 1; 826 } 827 828 /* low MDC */ 829 miir &= ~MASK_MIIR_MII_MDC; 830 iowrite32(miir, miiport); 831} 832 833 834static int netdev_open(struct net_device *dev) 835{ 836 struct netdev_private *np = netdev_priv(dev); 837 void __iomem *ioaddr = np->mem; 838 int i; 839 840 iowrite32(0x00000001, ioaddr + BCR); /* Reset */ 841 842 if (request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev)) 843 return -EAGAIN; 844 845 for (i = 0; i < 3; i++) 846 iowrite16(((unsigned short*)dev->dev_addr)[i], 847 ioaddr + PAR0 + i*2); 848 849 init_ring(dev); 850 851 iowrite32(np->rx_ring_dma, ioaddr + RXLBA); 852 iowrite32(np->tx_ring_dma, ioaddr + TXLBA); 853 854 /* Initialize other registers. */ 855 /* Configure the PCI bus bursts and FIFO thresholds. 856 486: Set 8 longword burst. 857 586: no burst limit. 858 Burst length 5:3 859 0 0 0 1 860 0 0 1 4 861 0 1 0 8 862 0 1 1 16 863 1 0 0 32 864 1 0 1 64 865 1 1 0 128 866 1 1 1 256 867 Wait the specified 50 PCI cycles after a reset by initializing 868 Tx and Rx queues and the address filter list. 869 FIXME (Ueimor): optimistic for alpha + posted writes ? */ 870 871 np->bcrvalue = 0x10; /* little-endian, 8 burst length */ 872#ifdef __BIG_ENDIAN 873 np->bcrvalue |= 0x04; /* big-endian */ 874#endif 875 876#if defined(__i386__) && !defined(MODULE) 877 if (boot_cpu_data.x86 <= 4) 878 np->crvalue = 0xa00; 879 else 880#endif 881 np->crvalue = 0xe00; /* rx 128 burst length */ 882 883 884// 89/12/29 add, 885// 90/1/16 modify, 886// np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI; 887 np->imrvalue = TUNF | CNTOVF | RBU | TI | RI; 888 if (np->pci_dev->device == 0x891) { 889 np->bcrvalue |= 0x200; /* set PROG bit */ 890 np->crvalue |= CR_W_ENH; /* set enhanced bit */ 891 np->imrvalue |= ETI; 892 } 893 iowrite32(np->bcrvalue, ioaddr + BCR); 894 895 if (dev->if_port == 0) 896 dev->if_port = np->default_port; 897 898 iowrite32(0, ioaddr + RXPDR); 899// 89/9/1 modify, 900// np->crvalue = 0x00e40001; /* tx store and forward, tx/rx enable */ 901 np->crvalue |= 0x00e40001; /* tx store and forward, tx/rx enable */ 902 np->mii.full_duplex = np->mii.force_media; 903 getlinkstatus(dev); 904 if (np->linkok) 905 getlinktype(dev); 906 __set_rx_mode(dev); 907 908 netif_start_queue(dev); 909 910 /* Clear and Enable interrupts by setting the interrupt mask. */ 911 iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR); 912 iowrite32(np->imrvalue, ioaddr + IMR); 913 914 if (debug) 915 printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name); 916 917 /* Set the timer to check for link beat. */ 918 init_timer(&np->timer); 919 np->timer.expires = RUN_AT(3 * HZ); 920 np->timer.data = (unsigned long) dev; 921 np->timer.function = &netdev_timer; 922 923 /* timer handler */ 924 add_timer(&np->timer); 925 926 init_timer(&np->reset_timer); 927 np->reset_timer.data = (unsigned long) dev; 928 np->reset_timer.function = &reset_timer; 929 np->reset_timer_armed = 0; 930 931 return 0; 932} 933 934 935static void getlinkstatus(struct net_device *dev) 936/* function: Routine will read MII Status Register to get link status. */ 937/* input : dev... pointer to the adapter block. */ 938/* output : none. */ 939{ 940 struct netdev_private *np = netdev_priv(dev); 941 unsigned int i, DelayTime = 0x1000; 942 943 np->linkok = 0; 944 945 if (np->PHYType == MysonPHY) { 946 for (i = 0; i < DelayTime; ++i) { 947 if (ioread32(np->mem + BMCRSR) & LinkIsUp2) { 948 np->linkok = 1; 949 return; 950 } 951 udelay(100); 952 } 953 } else { 954 for (i = 0; i < DelayTime; ++i) { 955 if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) { 956 np->linkok = 1; 957 return; 958 } 959 udelay(100); 960 } 961 } 962} 963 964 965static void getlinktype(struct net_device *dev) 966{ 967 struct netdev_private *np = netdev_priv(dev); 968 969 if (np->PHYType == MysonPHY) { /* 3-in-1 case */ 970 if (ioread32(np->mem + TCRRCR) & CR_R_FD) 971 np->duplexmode = 2; /* full duplex */ 972 else 973 np->duplexmode = 1; /* half duplex */ 974 if (ioread32(np->mem + TCRRCR) & CR_R_PS10) 975 np->line_speed = 1; /* 10M */ 976 else 977 np->line_speed = 2; /* 100M */ 978 } else { 979 if (np->PHYType == SeeqPHY) { /* this PHY is SEEQ 80225 */ 980 unsigned int data; 981 982 data = mdio_read(dev, np->phys[0], MIIRegister18); 983 if (data & SPD_DET_100) 984 np->line_speed = 2; /* 100M */ 985 else 986 np->line_speed = 1; /* 10M */ 987 if (data & DPLX_DET_FULL) 988 np->duplexmode = 2; /* full duplex mode */ 989 else 990 np->duplexmode = 1; /* half duplex mode */ 991 } else if (np->PHYType == AhdocPHY) { 992 unsigned int data; 993 994 data = mdio_read(dev, np->phys[0], DiagnosticReg); 995 if (data & Speed_100) 996 np->line_speed = 2; /* 100M */ 997 else 998 np->line_speed = 1; /* 10M */ 999 if (data & DPLX_FULL) 1000 np->duplexmode = 2; /* full duplex mode */ 1001 else 1002 np->duplexmode = 1; /* half duplex mode */ 1003 } 1004/* 89/6/13 add, (begin) */ 1005 else if (np->PHYType == MarvellPHY) { 1006 unsigned int data; 1007 1008 data = mdio_read(dev, np->phys[0], SpecificReg); 1009 if (data & Full_Duplex) 1010 np->duplexmode = 2; /* full duplex mode */ 1011 else 1012 np->duplexmode = 1; /* half duplex mode */ 1013 data &= SpeedMask; 1014 if (data == Speed_1000M) 1015 np->line_speed = 3; /* 1000M */ 1016 else if (data == Speed_100M) 1017 np->line_speed = 2; /* 100M */ 1018 else 1019 np->line_speed = 1; /* 10M */ 1020 } 1021/* 89/6/13 add, (end) */ 1022/* 89/7/27 add, (begin) */ 1023 else if (np->PHYType == Myson981) { 1024 unsigned int data; 1025 1026 data = mdio_read(dev, np->phys[0], StatusRegister); 1027 1028 if (data & SPEED100) 1029 np->line_speed = 2; 1030 else 1031 np->line_speed = 1; 1032 1033 if (data & FULLMODE) 1034 np->duplexmode = 2; 1035 else 1036 np->duplexmode = 1; 1037 } 1038/* 89/7/27 add, (end) */ 1039/* 89/12/29 add */ 1040 else if (np->PHYType == LevelOnePHY) { 1041 unsigned int data; 1042 1043 data = mdio_read(dev, np->phys[0], SpecificReg); 1044 if (data & LXT1000_Full) 1045 np->duplexmode = 2; /* full duplex mode */ 1046 else 1047 np->duplexmode = 1; /* half duplex mode */ 1048 data &= SpeedMask; 1049 if (data == LXT1000_1000M) 1050 np->line_speed = 3; /* 1000M */ 1051 else if (data == LXT1000_100M) 1052 np->line_speed = 2; /* 100M */ 1053 else 1054 np->line_speed = 1; /* 10M */ 1055 } 1056 np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000); 1057 if (np->line_speed == 1) 1058 np->crvalue |= CR_W_PS10; 1059 else if (np->line_speed == 3) 1060 np->crvalue |= CR_W_PS1000; 1061 if (np->duplexmode == 2) 1062 np->crvalue |= CR_W_FD; 1063 } 1064} 1065 1066 1067/* Take lock before calling this */ 1068static void allocate_rx_buffers(struct net_device *dev) 1069{ 1070 struct netdev_private *np = netdev_priv(dev); 1071 1072 /* allocate skb for rx buffers */ 1073 while (np->really_rx_count != RX_RING_SIZE) { 1074 struct sk_buff *skb; 1075 1076 skb = dev_alloc_skb(np->rx_buf_sz); 1077 if (skb == NULL) 1078 break; /* Better luck next round. */ 1079 1080 while (np->lack_rxbuf->skbuff) 1081 np->lack_rxbuf = np->lack_rxbuf->next_desc_logical; 1082 1083 skb->dev = dev; /* Mark as being used by this device. */ 1084 np->lack_rxbuf->skbuff = skb; 1085 np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data, 1086 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1087 np->lack_rxbuf->status = RXOWN; 1088 ++np->really_rx_count; 1089 } 1090} 1091 1092 1093static void netdev_timer(unsigned long data) 1094{ 1095 struct net_device *dev = (struct net_device *) data; 1096 struct netdev_private *np = netdev_priv(dev); 1097 void __iomem *ioaddr = np->mem; 1098 int old_crvalue = np->crvalue; 1099 unsigned int old_linkok = np->linkok; 1100 unsigned long flags; 1101 1102 if (debug) 1103 printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x " 1104 "config %8.8x.\n", dev->name, ioread32(ioaddr + ISR), 1105 ioread32(ioaddr + TCRRCR)); 1106 1107 spin_lock_irqsave(&np->lock, flags); 1108 1109 if (np->flags == HAS_MII_XCVR) { 1110 getlinkstatus(dev); 1111 if ((old_linkok == 0) && (np->linkok == 1)) { /* we need to detect the media type again */ 1112 getlinktype(dev); 1113 if (np->crvalue != old_crvalue) { 1114 stop_nic_rxtx(ioaddr, np->crvalue); 1115 iowrite32(np->crvalue, ioaddr + TCRRCR); 1116 } 1117 } 1118 } 1119 1120 allocate_rx_buffers(dev); 1121 1122 spin_unlock_irqrestore(&np->lock, flags); 1123 1124 np->timer.expires = RUN_AT(10 * HZ); 1125 add_timer(&np->timer); 1126} 1127 1128 1129/* Take lock before calling */ 1130/* Reset chip and disable rx, tx and interrupts */ 1131static void reset_and_disable_rxtx(struct net_device *dev) 1132{ 1133 struct netdev_private *np = netdev_priv(dev); 1134 void __iomem *ioaddr = np->mem; 1135 int delay=51; 1136 1137 /* Reset the chip's Tx and Rx processes. */ 1138 stop_nic_rxtx(ioaddr, 0); 1139 1140 /* Disable interrupts by clearing the interrupt mask. */ 1141 iowrite32(0, ioaddr + IMR); 1142 1143 /* Reset the chip to erase previous misconfiguration. */ 1144 iowrite32(0x00000001, ioaddr + BCR); 1145 1146 /* Ueimor: wait for 50 PCI cycles (and flush posted writes btw). 1147 We surely wait too long (address+data phase). Who cares? */ 1148 while (--delay) { 1149 ioread32(ioaddr + BCR); 1150 rmb(); 1151 } 1152} 1153 1154 1155/* Take lock before calling */ 1156/* Restore chip after reset */ 1157static void enable_rxtx(struct net_device *dev) 1158{ 1159 struct netdev_private *np = netdev_priv(dev); 1160 void __iomem *ioaddr = np->mem; 1161 1162 reset_rx_descriptors(dev); 1163 1164 iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring), 1165 ioaddr + TXLBA); 1166 iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring), 1167 ioaddr + RXLBA); 1168 1169 iowrite32(np->bcrvalue, ioaddr + BCR); 1170 1171 iowrite32(0, ioaddr + RXPDR); 1172 __set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */ 1173 1174 /* Clear and Enable interrupts by setting the interrupt mask. */ 1175 iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR); 1176 iowrite32(np->imrvalue, ioaddr + IMR); 1177 1178 iowrite32(0, ioaddr + TXPDR); 1179} 1180 1181 1182static void reset_timer(unsigned long data) 1183{ 1184 struct net_device *dev = (struct net_device *) data; 1185 struct netdev_private *np = netdev_priv(dev); 1186 unsigned long flags; 1187 1188 printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name); 1189 1190 spin_lock_irqsave(&np->lock, flags); 1191 np->crvalue = np->crvalue_sv; 1192 np->imrvalue = np->imrvalue_sv; 1193 1194 reset_and_disable_rxtx(dev); 1195 /* works for me without this: 1196 reset_tx_descriptors(dev); */ 1197 enable_rxtx(dev); 1198 netif_start_queue(dev); /* FIXME: or netif_wake_queue(dev); ? */ 1199 1200 np->reset_timer_armed = 0; 1201 1202 spin_unlock_irqrestore(&np->lock, flags); 1203} 1204 1205 1206static void fealnx_tx_timeout(struct net_device *dev) 1207{ 1208 struct netdev_private *np = netdev_priv(dev); 1209 void __iomem *ioaddr = np->mem; 1210 unsigned long flags; 1211 int i; 1212 1213 printk(KERN_WARNING 1214 "%s: Transmit timed out, status %8.8x, resetting...\n", 1215 dev->name, ioread32(ioaddr + ISR)); 1216 1217 { 1218 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); 1219 for (i = 0; i < RX_RING_SIZE; i++) 1220 printk(KERN_CONT " %8.8x", 1221 (unsigned int) np->rx_ring[i].status); 1222 printk(KERN_CONT "\n"); 1223 printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring); 1224 for (i = 0; i < TX_RING_SIZE; i++) 1225 printk(KERN_CONT " %4.4x", np->tx_ring[i].status); 1226 printk(KERN_CONT "\n"); 1227 } 1228 1229 spin_lock_irqsave(&np->lock, flags); 1230 1231 reset_and_disable_rxtx(dev); 1232 reset_tx_descriptors(dev); 1233 enable_rxtx(dev); 1234 1235 spin_unlock_irqrestore(&np->lock, flags); 1236 1237 dev->trans_start = jiffies; 1238 np->stats.tx_errors++; 1239 netif_wake_queue(dev); /* or .._start_.. ?? */ 1240} 1241 1242 1243/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ 1244static void init_ring(struct net_device *dev) 1245{ 1246 struct netdev_private *np = netdev_priv(dev); 1247 int i; 1248 1249 /* initialize rx variables */ 1250 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); 1251 np->cur_rx = &np->rx_ring[0]; 1252 np->lack_rxbuf = np->rx_ring; 1253 np->really_rx_count = 0; 1254 1255 /* initial rx descriptors. */ 1256 for (i = 0; i < RX_RING_SIZE; i++) { 1257 np->rx_ring[i].status = 0; 1258 np->rx_ring[i].control = np->rx_buf_sz << RBSShift; 1259 np->rx_ring[i].next_desc = np->rx_ring_dma + 1260 (i + 1)*sizeof(struct fealnx_desc); 1261 np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1]; 1262 np->rx_ring[i].skbuff = NULL; 1263 } 1264 1265 /* for the last rx descriptor */ 1266 np->rx_ring[i - 1].next_desc = np->rx_ring_dma; 1267 np->rx_ring[i - 1].next_desc_logical = np->rx_ring; 1268 1269 /* allocate skb for rx buffers */ 1270 for (i = 0; i < RX_RING_SIZE; i++) { 1271 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); 1272 1273 if (skb == NULL) { 1274 np->lack_rxbuf = &np->rx_ring[i]; 1275 break; 1276 } 1277 1278 ++np->really_rx_count; 1279 np->rx_ring[i].skbuff = skb; 1280 skb->dev = dev; /* Mark as being used by this device. */ 1281 np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data, 1282 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1283 np->rx_ring[i].status = RXOWN; 1284 np->rx_ring[i].control |= RXIC; 1285 } 1286 1287 /* initialize tx variables */ 1288 np->cur_tx = &np->tx_ring[0]; 1289 np->cur_tx_copy = &np->tx_ring[0]; 1290 np->really_tx_count = 0; 1291 np->free_tx_count = TX_RING_SIZE; 1292 1293 for (i = 0; i < TX_RING_SIZE; i++) { 1294 np->tx_ring[i].status = 0; 1295 /* do we need np->tx_ring[i].control = XXX; ?? */ 1296 np->tx_ring[i].next_desc = np->tx_ring_dma + 1297 (i + 1)*sizeof(struct fealnx_desc); 1298 np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1]; 1299 np->tx_ring[i].skbuff = NULL; 1300 } 1301 1302 /* for the last tx descriptor */ 1303 np->tx_ring[i - 1].next_desc = np->tx_ring_dma; 1304 np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0]; 1305} 1306 1307 1308static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) 1309{ 1310 struct netdev_private *np = netdev_priv(dev); 1311 unsigned long flags; 1312 1313 spin_lock_irqsave(&np->lock, flags); 1314 1315 np->cur_tx_copy->skbuff = skb; 1316 1317#define one_buffer 1318#define BPT 1022 1319#if defined(one_buffer) 1320 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, 1321 skb->len, PCI_DMA_TODEVICE); 1322 np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; 1323 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ 1324 np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */ 1325// 89/12/29 add, 1326 if (np->pci_dev->device == 0x891) 1327 np->cur_tx_copy->control |= ETIControl | RetryTxLC; 1328 np->cur_tx_copy->status = TXOWN; 1329 np->cur_tx_copy = np->cur_tx_copy->next_desc_logical; 1330 --np->free_tx_count; 1331#elif defined(two_buffer) 1332 if (skb->len > BPT) { 1333 struct fealnx_desc *next; 1334 1335 /* for the first descriptor */ 1336 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, 1337 BPT, PCI_DMA_TODEVICE); 1338 np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable; 1339 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ 1340 np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */ 1341 1342 /* for the last descriptor */ 1343 next = np->cur_tx_copy->next_desc_logical; 1344 next->skbuff = skb; 1345 next->control = TXIC | TXLD | CRCEnable | PADEnable; 1346 next->control |= (skb->len << PKTSShift); /* pkt size */ 1347 next->control |= ((skb->len - BPT) << TBSShift); /* buf size */ 1348// 89/12/29 add, 1349 if (np->pci_dev->device == 0x891) 1350 np->cur_tx_copy->control |= ETIControl | RetryTxLC; 1351 next->buffer = pci_map_single(ep->pci_dev, skb->data + BPT, 1352 skb->len - BPT, PCI_DMA_TODEVICE); 1353 1354 next->status = TXOWN; 1355 np->cur_tx_copy->status = TXOWN; 1356 1357 np->cur_tx_copy = next->next_desc_logical; 1358 np->free_tx_count -= 2; 1359 } else { 1360 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, 1361 skb->len, PCI_DMA_TODEVICE); 1362 np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; 1363 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ 1364 np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */ 1365// 89/12/29 add, 1366 if (np->pci_dev->device == 0x891) 1367 np->cur_tx_copy->control |= ETIControl | RetryTxLC; 1368 np->cur_tx_copy->status = TXOWN; 1369 np->cur_tx_copy = np->cur_tx_copy->next_desc_logical; 1370 --np->free_tx_count; 1371 } 1372#endif 1373 1374 if (np->free_tx_count < 2) 1375 netif_stop_queue(dev); 1376 ++np->really_tx_count; 1377 iowrite32(0, np->mem + TXPDR); 1378 dev->trans_start = jiffies; 1379 1380 spin_unlock_irqrestore(&np->lock, flags); 1381 return NETDEV_TX_OK; 1382} 1383 1384 1385/* Take lock before calling */ 1386/* Chip probably hosed tx ring. Clean up. */ 1387static void reset_tx_descriptors(struct net_device *dev) 1388{ 1389 struct netdev_private *np = netdev_priv(dev); 1390 struct fealnx_desc *cur; 1391 int i; 1392 1393 /* initialize tx variables */ 1394 np->cur_tx = &np->tx_ring[0]; 1395 np->cur_tx_copy = &np->tx_ring[0]; 1396 np->really_tx_count = 0; 1397 np->free_tx_count = TX_RING_SIZE; 1398 1399 for (i = 0; i < TX_RING_SIZE; i++) { 1400 cur = &np->tx_ring[i]; 1401 if (cur->skbuff) { 1402 pci_unmap_single(np->pci_dev, cur->buffer, 1403 cur->skbuff->len, PCI_DMA_TODEVICE); 1404 dev_kfree_skb_any(cur->skbuff); 1405 cur->skbuff = NULL; 1406 } 1407 cur->status = 0; 1408 cur->control = 0; /* needed? */ 1409 /* probably not needed. We do it for purely paranoid reasons */ 1410 cur->next_desc = np->tx_ring_dma + 1411 (i + 1)*sizeof(struct fealnx_desc); 1412 cur->next_desc_logical = &np->tx_ring[i + 1]; 1413 } 1414 /* for the last tx descriptor */ 1415 np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma; 1416 np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0]; 1417} 1418 1419 1420/* Take lock and stop rx before calling this */ 1421static void reset_rx_descriptors(struct net_device *dev) 1422{ 1423 struct netdev_private *np = netdev_priv(dev); 1424 struct fealnx_desc *cur = np->cur_rx; 1425 int i; 1426 1427 allocate_rx_buffers(dev); 1428 1429 for (i = 0; i < RX_RING_SIZE; i++) { 1430 if (cur->skbuff) 1431 cur->status = RXOWN; 1432 cur = cur->next_desc_logical; 1433 } 1434 1435 iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring), 1436 np->mem + RXLBA); 1437} 1438 1439 1440/* The interrupt handler does all of the Rx thread work and cleans up 1441 after the Tx thread. */ 1442static irqreturn_t intr_handler(int irq, void *dev_instance) 1443{ 1444 struct net_device *dev = (struct net_device *) dev_instance; 1445 struct netdev_private *np = netdev_priv(dev); 1446 void __iomem *ioaddr = np->mem; 1447 long boguscnt = max_interrupt_work; 1448 unsigned int num_tx = 0; 1449 int handled = 0; 1450 1451 spin_lock(&np->lock); 1452 1453 iowrite32(0, ioaddr + IMR); 1454 1455 do { 1456 u32 intr_status = ioread32(ioaddr + ISR); 1457 1458 /* Acknowledge all of the current interrupt sources ASAP. */ 1459 iowrite32(intr_status, ioaddr + ISR); 1460 1461 if (debug) 1462 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name, 1463 intr_status); 1464 1465 if (!(intr_status & np->imrvalue)) 1466 break; 1467 1468 handled = 1; 1469 1470// 90/1/16 delete, 1471// 1472// if (intr_status & FBE) 1473// { /* fatal error */ 1474// stop_nic_tx(ioaddr, 0); 1475// stop_nic_rx(ioaddr, 0); 1476// break; 1477// }; 1478 1479 if (intr_status & TUNF) 1480 iowrite32(0, ioaddr + TXPDR); 1481 1482 if (intr_status & CNTOVF) { 1483 /* missed pkts */ 1484 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; 1485 1486 /* crc error */ 1487 np->stats.rx_crc_errors += 1488 (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; 1489 } 1490 1491 if (intr_status & (RI | RBU)) { 1492 if (intr_status & RI) 1493 netdev_rx(dev); 1494 else { 1495 stop_nic_rx(ioaddr, np->crvalue); 1496 reset_rx_descriptors(dev); 1497 iowrite32(np->crvalue, ioaddr + TCRRCR); 1498 } 1499 } 1500 1501 while (np->really_tx_count) { 1502 long tx_status = np->cur_tx->status; 1503 long tx_control = np->cur_tx->control; 1504 1505 if (!(tx_control & TXLD)) { /* this pkt is combined by two tx descriptors */ 1506 struct fealnx_desc *next; 1507 1508 next = np->cur_tx->next_desc_logical; 1509 tx_status = next->status; 1510 tx_control = next->control; 1511 } 1512 1513 if (tx_status & TXOWN) 1514 break; 1515 1516 if (!(np->crvalue & CR_W_ENH)) { 1517 if (tx_status & (CSL | LC | EC | UDF | HF)) { 1518 np->stats.tx_errors++; 1519 if (tx_status & EC) 1520 np->stats.tx_aborted_errors++; 1521 if (tx_status & CSL) 1522 np->stats.tx_carrier_errors++; 1523 if (tx_status & LC) 1524 np->stats.tx_window_errors++; 1525 if (tx_status & UDF) 1526 np->stats.tx_fifo_errors++; 1527 if ((tx_status & HF) && np->mii.full_duplex == 0) 1528 np->stats.tx_heartbeat_errors++; 1529 1530 } else { 1531 np->stats.tx_bytes += 1532 ((tx_control & PKTSMask) >> PKTSShift); 1533 1534 np->stats.collisions += 1535 ((tx_status & NCRMask) >> NCRShift); 1536 np->stats.tx_packets++; 1537 } 1538 } else { 1539 np->stats.tx_bytes += 1540 ((tx_control & PKTSMask) >> PKTSShift); 1541 np->stats.tx_packets++; 1542 } 1543 1544 /* Free the original skb. */ 1545 pci_unmap_single(np->pci_dev, np->cur_tx->buffer, 1546 np->cur_tx->skbuff->len, PCI_DMA_TODEVICE); 1547 dev_kfree_skb_irq(np->cur_tx->skbuff); 1548 np->cur_tx->skbuff = NULL; 1549 --np->really_tx_count; 1550 if (np->cur_tx->control & TXLD) { 1551 np->cur_tx = np->cur_tx->next_desc_logical; 1552 ++np->free_tx_count; 1553 } else { 1554 np->cur_tx = np->cur_tx->next_desc_logical; 1555 np->cur_tx = np->cur_tx->next_desc_logical; 1556 np->free_tx_count += 2; 1557 } 1558 num_tx++; 1559 } /* end of for loop */ 1560 1561 if (num_tx && np->free_tx_count >= 2) 1562 netif_wake_queue(dev); 1563 1564 /* read transmit status for enhanced mode only */ 1565 if (np->crvalue & CR_W_ENH) { 1566 long data; 1567 1568 data = ioread32(ioaddr + TSR); 1569 np->stats.tx_errors += (data & 0xff000000) >> 24; 1570 np->stats.tx_aborted_errors += (data & 0xff000000) >> 24; 1571 np->stats.tx_window_errors += (data & 0x00ff0000) >> 16; 1572 np->stats.collisions += (data & 0x0000ffff); 1573 } 1574 1575 if (--boguscnt < 0) { 1576 printk(KERN_WARNING "%s: Too much work at interrupt, " 1577 "status=0x%4.4x.\n", dev->name, intr_status); 1578 if (!np->reset_timer_armed) { 1579 np->reset_timer_armed = 1; 1580 np->reset_timer.expires = RUN_AT(HZ/2); 1581 add_timer(&np->reset_timer); 1582 stop_nic_rxtx(ioaddr, 0); 1583 netif_stop_queue(dev); 1584 /* or netif_tx_disable(dev); ?? */ 1585 /* Prevent other paths from enabling tx,rx,intrs */ 1586 np->crvalue_sv = np->crvalue; 1587 np->imrvalue_sv = np->imrvalue; 1588 np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */ 1589 np->imrvalue = 0; 1590 } 1591 1592 break; 1593 } 1594 } while (1); 1595 1596 /* read the tally counters */ 1597 /* missed pkts */ 1598 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; 1599 1600 /* crc error */ 1601 np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; 1602 1603 if (debug) 1604 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", 1605 dev->name, ioread32(ioaddr + ISR)); 1606 1607 iowrite32(np->imrvalue, ioaddr + IMR); 1608 1609 spin_unlock(&np->lock); 1610 1611 return IRQ_RETVAL(handled); 1612} 1613 1614 1615/* This routine is logically part of the interrupt handler, but separated 1616 for clarity and better register allocation. */ 1617static int netdev_rx(struct net_device *dev) 1618{ 1619 struct netdev_private *np = netdev_priv(dev); 1620 void __iomem *ioaddr = np->mem; 1621 1622 /* If EOP is set on the next entry, it's a new packet. Send it up. */ 1623 while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) { 1624 s32 rx_status = np->cur_rx->status; 1625 1626 if (np->really_rx_count == 0) 1627 break; 1628 1629 if (debug) 1630 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", rx_status); 1631 1632 if ((!((rx_status & RXFSD) && (rx_status & RXLSD))) || 1633 (rx_status & ErrorSummary)) { 1634 if (rx_status & ErrorSummary) { /* there was a fatal error */ 1635 if (debug) 1636 printk(KERN_DEBUG 1637 "%s: Receive error, Rx status %8.8x.\n", 1638 dev->name, rx_status); 1639 1640 np->stats.rx_errors++; /* end of a packet. */ 1641 if (rx_status & (LONG | RUNT)) 1642 np->stats.rx_length_errors++; 1643 if (rx_status & RXER) 1644 np->stats.rx_frame_errors++; 1645 if (rx_status & CRC) 1646 np->stats.rx_crc_errors++; 1647 } else { 1648 int need_to_reset = 0; 1649 int desno = 0; 1650 1651 if (rx_status & RXFSD) { /* this pkt is too long, over one rx buffer */ 1652 struct fealnx_desc *cur; 1653 1654 /* check this packet is received completely? */ 1655 cur = np->cur_rx; 1656 while (desno <= np->really_rx_count) { 1657 ++desno; 1658 if ((!(cur->status & RXOWN)) && 1659 (cur->status & RXLSD)) 1660 break; 1661 /* goto next rx descriptor */ 1662 cur = cur->next_desc_logical; 1663 } 1664 if (desno > np->really_rx_count) 1665 need_to_reset = 1; 1666 } else /* RXLSD did not find, something error */ 1667 need_to_reset = 1; 1668 1669 if (need_to_reset == 0) { 1670 int i; 1671 1672 np->stats.rx_length_errors++; 1673 1674 /* free all rx descriptors related this long pkt */ 1675 for (i = 0; i < desno; ++i) { 1676 if (!np->cur_rx->skbuff) { 1677 printk(KERN_DEBUG 1678 "%s: I'm scared\n", dev->name); 1679 break; 1680 } 1681 np->cur_rx->status = RXOWN; 1682 np->cur_rx = np->cur_rx->next_desc_logical; 1683 } 1684 continue; 1685 } else { /* rx error, need to reset this chip */ 1686 stop_nic_rx(ioaddr, np->crvalue); 1687 reset_rx_descriptors(dev); 1688 iowrite32(np->crvalue, ioaddr + TCRRCR); 1689 } 1690 break; /* exit the while loop */ 1691 } 1692 } else { /* this received pkt is ok */ 1693 1694 struct sk_buff *skb; 1695 /* Omit the four octet CRC from the length. */ 1696 short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4; 1697 1698#ifndef final_version 1699 if (debug) 1700 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d" 1701 " status %x.\n", pkt_len, rx_status); 1702#endif 1703 1704 /* Check if the packet is long enough to accept without copying 1705 to a minimally-sized skbuff. */ 1706 if (pkt_len < rx_copybreak && 1707 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1708 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1709 pci_dma_sync_single_for_cpu(np->pci_dev, 1710 np->cur_rx->buffer, 1711 np->rx_buf_sz, 1712 PCI_DMA_FROMDEVICE); 1713 /* Call copy + cksum if available. */ 1714 1715#if ! defined(__alpha__) 1716 skb_copy_to_linear_data(skb, 1717 np->cur_rx->skbuff->data, pkt_len); 1718 skb_put(skb, pkt_len); 1719#else 1720 memcpy(skb_put(skb, pkt_len), 1721 np->cur_rx->skbuff->data, pkt_len); 1722#endif 1723 pci_dma_sync_single_for_device(np->pci_dev, 1724 np->cur_rx->buffer, 1725 np->rx_buf_sz, 1726 PCI_DMA_FROMDEVICE); 1727 } else { 1728 pci_unmap_single(np->pci_dev, 1729 np->cur_rx->buffer, 1730 np->rx_buf_sz, 1731 PCI_DMA_FROMDEVICE); 1732 skb_put(skb = np->cur_rx->skbuff, pkt_len); 1733 np->cur_rx->skbuff = NULL; 1734 --np->really_rx_count; 1735 } 1736 skb->protocol = eth_type_trans(skb, dev); 1737 netif_rx(skb); 1738 np->stats.rx_packets++; 1739 np->stats.rx_bytes += pkt_len; 1740 } 1741 1742 np->cur_rx = np->cur_rx->next_desc_logical; 1743 } /* end of while loop */ 1744 1745 /* allocate skb for rx buffers */ 1746 allocate_rx_buffers(dev); 1747 1748 return 0; 1749} 1750 1751 1752static struct net_device_stats *get_stats(struct net_device *dev) 1753{ 1754 struct netdev_private *np = netdev_priv(dev); 1755 void __iomem *ioaddr = np->mem; 1756 1757 /* The chip only need report frame silently dropped. */ 1758 if (netif_running(dev)) { 1759 np->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; 1760 np->stats.rx_crc_errors += (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; 1761 } 1762 1763 return &np->stats; 1764} 1765 1766 1767/* for dev->set_multicast_list */ 1768static void set_rx_mode(struct net_device *dev) 1769{ 1770 spinlock_t *lp = &((struct netdev_private *)netdev_priv(dev))->lock; 1771 unsigned long flags; 1772 spin_lock_irqsave(lp, flags); 1773 __set_rx_mode(dev); 1774 spin_unlock_irqrestore(lp, flags); 1775} 1776 1777 1778/* Take lock before calling */ 1779static void __set_rx_mode(struct net_device *dev) 1780{ 1781 struct netdev_private *np = netdev_priv(dev); 1782 void __iomem *ioaddr = np->mem; 1783 u32 mc_filter[2]; /* Multicast hash filter */ 1784 u32 rx_mode; 1785 1786 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1787 memset(mc_filter, 0xff, sizeof(mc_filter)); 1788 rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM; 1789 } else if ((dev->mc_count > multicast_filter_limit) || 1790 (dev->flags & IFF_ALLMULTI)) { 1791 /* Too many to match, or accept all multicasts. */ 1792 memset(mc_filter, 0xff, sizeof(mc_filter)); 1793 rx_mode = CR_W_AB | CR_W_AM; 1794 } else { 1795 struct dev_mc_list *mclist; 1796 int i; 1797 1798 memset(mc_filter, 0, sizeof(mc_filter)); 1799 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1800 i++, mclist = mclist->next) { 1801 unsigned int bit; 1802 bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F; 1803 mc_filter[bit >> 5] |= (1 << bit); 1804 } 1805 rx_mode = CR_W_AB | CR_W_AM; 1806 } 1807 1808 stop_nic_rxtx(ioaddr, np->crvalue); 1809 1810 iowrite32(mc_filter[0], ioaddr + MAR0); 1811 iowrite32(mc_filter[1], ioaddr + MAR1); 1812 np->crvalue &= ~CR_W_RXMODEMASK; 1813 np->crvalue |= rx_mode; 1814 iowrite32(np->crvalue, ioaddr + TCRRCR); 1815} 1816 1817static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1818{ 1819 struct netdev_private *np = netdev_priv(dev); 1820 1821 strcpy(info->driver, DRV_NAME); 1822 strcpy(info->version, DRV_VERSION); 1823 strcpy(info->bus_info, pci_name(np->pci_dev)); 1824} 1825 1826static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1827{ 1828 struct netdev_private *np = netdev_priv(dev); 1829 int rc; 1830 1831 spin_lock_irq(&np->lock); 1832 rc = mii_ethtool_gset(&np->mii, cmd); 1833 spin_unlock_irq(&np->lock); 1834 1835 return rc; 1836} 1837 1838static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1839{ 1840 struct netdev_private *np = netdev_priv(dev); 1841 int rc; 1842 1843 spin_lock_irq(&np->lock); 1844 rc = mii_ethtool_sset(&np->mii, cmd); 1845 spin_unlock_irq(&np->lock); 1846 1847 return rc; 1848} 1849 1850static int netdev_nway_reset(struct net_device *dev) 1851{ 1852 struct netdev_private *np = netdev_priv(dev); 1853 return mii_nway_restart(&np->mii); 1854} 1855 1856static u32 netdev_get_link(struct net_device *dev) 1857{ 1858 struct netdev_private *np = netdev_priv(dev); 1859 return mii_link_ok(&np->mii); 1860} 1861 1862static u32 netdev_get_msglevel(struct net_device *dev) 1863{ 1864 return debug; 1865} 1866 1867static void netdev_set_msglevel(struct net_device *dev, u32 value) 1868{ 1869 debug = value; 1870} 1871 1872static const struct ethtool_ops netdev_ethtool_ops = { 1873 .get_drvinfo = netdev_get_drvinfo, 1874 .get_settings = netdev_get_settings, 1875 .set_settings = netdev_set_settings, 1876 .nway_reset = netdev_nway_reset, 1877 .get_link = netdev_get_link, 1878 .get_msglevel = netdev_get_msglevel, 1879 .set_msglevel = netdev_set_msglevel, 1880}; 1881 1882static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1883{ 1884 struct netdev_private *np = netdev_priv(dev); 1885 int rc; 1886 1887 if (!netif_running(dev)) 1888 return -EINVAL; 1889 1890 spin_lock_irq(&np->lock); 1891 rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL); 1892 spin_unlock_irq(&np->lock); 1893 1894 return rc; 1895} 1896 1897 1898static int netdev_close(struct net_device *dev) 1899{ 1900 struct netdev_private *np = netdev_priv(dev); 1901 void __iomem *ioaddr = np->mem; 1902 int i; 1903 1904 netif_stop_queue(dev); 1905 1906 /* Disable interrupts by clearing the interrupt mask. */ 1907 iowrite32(0x0000, ioaddr + IMR); 1908 1909 /* Stop the chip's Tx and Rx processes. */ 1910 stop_nic_rxtx(ioaddr, 0); 1911 1912 del_timer_sync(&np->timer); 1913 del_timer_sync(&np->reset_timer); 1914 1915 free_irq(dev->irq, dev); 1916 1917 /* Free all the skbuffs in the Rx queue. */ 1918 for (i = 0; i < RX_RING_SIZE; i++) { 1919 struct sk_buff *skb = np->rx_ring[i].skbuff; 1920 1921 np->rx_ring[i].status = 0; 1922 if (skb) { 1923 pci_unmap_single(np->pci_dev, np->rx_ring[i].buffer, 1924 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1925 dev_kfree_skb(skb); 1926 np->rx_ring[i].skbuff = NULL; 1927 } 1928 } 1929 1930 for (i = 0; i < TX_RING_SIZE; i++) { 1931 struct sk_buff *skb = np->tx_ring[i].skbuff; 1932 1933 if (skb) { 1934 pci_unmap_single(np->pci_dev, np->tx_ring[i].buffer, 1935 skb->len, PCI_DMA_TODEVICE); 1936 dev_kfree_skb(skb); 1937 np->tx_ring[i].skbuff = NULL; 1938 } 1939 } 1940 1941 return 0; 1942} 1943 1944static struct pci_device_id fealnx_pci_tbl[] = { 1945 {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1946 {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, 1947 {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, 1948 {} /* terminate list */ 1949}; 1950MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl); 1951 1952 1953static struct pci_driver fealnx_driver = { 1954 .name = "fealnx", 1955 .id_table = fealnx_pci_tbl, 1956 .probe = fealnx_init_one, 1957 .remove = __devexit_p(fealnx_remove_one), 1958}; 1959 1960static int __init fealnx_init(void) 1961{ 1962/* when a module, this is printed whether or not devices are found in probe */ 1963#ifdef MODULE 1964 printk(version); 1965#endif 1966 1967 return pci_register_driver(&fealnx_driver); 1968} 1969 1970static void __exit fealnx_exit(void) 1971{ 1972 pci_unregister_driver(&fealnx_driver); 1973} 1974 1975module_init(fealnx_init); 1976module_exit(fealnx_exit);