Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.14 2493 lines 70 kB view raw
1/******************************************************************************* 2 3 4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms of the GNU General Public License as published by the Free 8 Software Foundation; either version 2 of the License, or (at your option) 9 any later version. 10 11 This program is distributed in the hope that it will be useful, but WITHOUT 12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 more details. 15 16 You should have received a copy of the GNU General Public License along with 17 this program; if not, write to the Free Software Foundation, Inc., 59 18 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 20 The full GNU General Public License is included in this distribution in the 21 file called LICENSE. 22 23 Contact Information: 24 Linux NICS <linux.nics@intel.com> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27*******************************************************************************/ 28 29/* 30 * e100.c: Intel(R) PRO/100 ethernet driver 31 * 32 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on 33 * original e100 driver, but better described as a munging of 34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers. 35 * 36 * References: 37 * Intel 8255x 10/100 Mbps Ethernet Controller Family, 38 * Open Source Software Developers Manual, 39 * http://sourceforge.net/projects/e1000 40 * 41 * 42 * Theory of Operation 43 * 44 * I. General 45 * 46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet 47 * controller family, which includes the 82557, 82558, 82559, 82550, 48 * 82551, and 82562 devices. 82558 and greater controllers 49 * integrate the Intel 82555 PHY. The controllers are used in 50 * server and client network interface cards, as well as in 51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx 52 * configurations. 8255x supports a 32-bit linear addressing 53 * mode and operates at 33Mhz PCI clock rate. 54 * 55 * II. Driver Operation 56 * 57 * Memory-mapped mode is used exclusively to access the device's 58 * shared-memory structure, the Control/Status Registers (CSR). All 59 * setup, configuration, and control of the device, including queuing 60 * of Tx, Rx, and configuration commands is through the CSR. 61 * cmd_lock serializes accesses to the CSR command register. cb_lock 62 * protects the shared Command Block List (CBL). 63 * 64 * 8255x is highly MII-compliant and all access to the PHY go 65 * through the Management Data Interface (MDI). Consequently, the 66 * driver leverages the mii.c library shared with other MII-compliant 67 * devices. 68 * 69 * Big- and Little-Endian byte order as well as 32- and 64-bit 70 * archs are supported. Weak-ordered memory and non-cache-coherent 71 * archs are supported. 72 * 73 * III. Transmit 74 * 75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked 76 * together in a fixed-size ring (CBL) thus forming the flexible mode 77 * memory structure. A TCB marked with the suspend-bit indicates 78 * the end of the ring. The last TCB processed suspends the 79 * controller, and the controller can be restarted by issue a CU 80 * resume command to continue from the suspend point, or a CU start 81 * command to start at a given position in the ring. 82 * 83 * Non-Tx commands (config, multicast setup, etc) are linked 84 * into the CBL ring along with Tx commands. The common structure 85 * used for both Tx and non-Tx commands is the Command Block (CB). 86 * 87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean 88 * is the next CB to check for completion; cb_to_send is the first 89 * CB to start on in case of a previous failure to resume. CB clean 90 * up happens in interrupt context in response to a CU interrupt. 91 * cbs_avail keeps track of number of free CB resources available. 92 * 93 * Hardware padding of short packets to minimum packet size is 94 * enabled. 82557 pads with 7Eh, while the later controllers pad 95 * with 00h. 96 * 97 * IV. Recieve 98 * 99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame 100 * Descriptors (RFD) + data buffer, thus forming the simplified mode 101 * memory structure. Rx skbs are allocated to contain both the RFD 102 * and the data buffer, but the RFD is pulled off before the skb is 103 * indicated. The data buffer is aligned such that encapsulated 104 * protocol headers are u32-aligned. Since the RFD is part of the 105 * mapped shared memory, and completion status is contained within 106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent 107 * view from software and hardware. 108 * 109 * Under typical operation, the receive unit (RU) is start once, 110 * and the controller happily fills RFDs as frames arrive. If 111 * replacement RFDs cannot be allocated, or the RU goes non-active, 112 * the RU must be restarted. Frame arrival generates an interrupt, 113 * and Rx indication and re-allocation happen in the same context, 114 * therefore no locking is required. A software-generated interrupt 115 * is generated from the watchdog to recover from a failed allocation 116 * senario where all Rx resources have been indicated and none re- 117 * placed. 118 * 119 * V. Miscellaneous 120 * 121 * VLAN offloading of tagging, stripping and filtering is not 122 * supported, but driver will accommodate the extra 4-byte VLAN tag 123 * for processing by upper layers. Tx/Rx Checksum offloading is not 124 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is 125 * not supported (hardware limitation). 126 * 127 * MagicPacket(tm) WoL support is enabled/disabled via ethtool. 128 * 129 * Thanks to JC (jchapman@katalix.com) for helping with 130 * testing/troubleshooting the development driver. 131 * 132 * TODO: 133 * o several entry points race with dev->close 134 * o check for tx-no-resources/stop Q races with tx clean/wake Q 135 */ 136 137#include <linux/config.h> 138#include <linux/module.h> 139#include <linux/moduleparam.h> 140#include <linux/kernel.h> 141#include <linux/types.h> 142#include <linux/slab.h> 143#include <linux/delay.h> 144#include <linux/init.h> 145#include <linux/pci.h> 146#include <linux/dma-mapping.h> 147#include <linux/netdevice.h> 148#include <linux/etherdevice.h> 149#include <linux/mii.h> 150#include <linux/if_vlan.h> 151#include <linux/skbuff.h> 152#include <linux/ethtool.h> 153#include <linux/string.h> 154#include <asm/unaligned.h> 155 156 157#define DRV_NAME "e100" 158#define DRV_EXT "-NAPI" 159#define DRV_VERSION "3.4.14-k2"DRV_EXT 160#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" 161#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation" 162#define PFX DRV_NAME ": " 163 164#define E100_WATCHDOG_PERIOD (2 * HZ) 165#define E100_NAPI_WEIGHT 16 166 167MODULE_DESCRIPTION(DRV_DESCRIPTION); 168MODULE_AUTHOR(DRV_COPYRIGHT); 169MODULE_LICENSE("GPL"); 170MODULE_VERSION(DRV_VERSION); 171 172static int debug = 3; 173module_param(debug, int, 0); 174MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 175#define DPRINTK(nlevel, klevel, fmt, args...) \ 176 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \ 177 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \ 178 __FUNCTION__ , ## args)) 179 180#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ 181 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ 182 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich } 183static struct pci_device_id e100_id_table[] = { 184 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0), 185 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0), 186 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3), 187 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3), 188 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3), 189 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3), 190 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3), 191 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4), 192 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4), 193 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4), 194 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4), 195 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4), 196 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4), 197 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5), 198 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5), 199 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5), 200 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5), 201 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5), 202 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5), 203 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5), 204 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5), 205 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0), 206 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6), 207 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6), 208 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6), 209 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6), 210 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6), 211 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), 212 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), 213 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), 214 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7), 215 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7), 216 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7), 217 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7), 218 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7), 219 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), 220 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), 221 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), 222 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), 223 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), 224 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7), 225 { 0, } 226}; 227MODULE_DEVICE_TABLE(pci, e100_id_table); 228 229enum mac { 230 mac_82557_D100_A = 0, 231 mac_82557_D100_B = 1, 232 mac_82557_D100_C = 2, 233 mac_82558_D101_A4 = 4, 234 mac_82558_D101_B0 = 5, 235 mac_82559_D101M = 8, 236 mac_82559_D101S = 9, 237 mac_82550_D102 = 12, 238 mac_82550_D102_C = 13, 239 mac_82551_E = 14, 240 mac_82551_F = 15, 241 mac_82551_10 = 16, 242 mac_unknown = 0xFF, 243}; 244 245enum phy { 246 phy_100a = 0x000003E0, 247 phy_100c = 0x035002A8, 248 phy_82555_tx = 0x015002A8, 249 phy_nsc_tx = 0x5C002000, 250 phy_82562_et = 0x033002A8, 251 phy_82562_em = 0x032002A8, 252 phy_82562_ek = 0x031002A8, 253 phy_82562_eh = 0x017002A8, 254 phy_unknown = 0xFFFFFFFF, 255}; 256 257/* CSR (Control/Status Registers) */ 258struct csr { 259 struct { 260 u8 status; 261 u8 stat_ack; 262 u8 cmd_lo; 263 u8 cmd_hi; 264 u32 gen_ptr; 265 } scb; 266 u32 port; 267 u16 flash_ctrl; 268 u8 eeprom_ctrl_lo; 269 u8 eeprom_ctrl_hi; 270 u32 mdi_ctrl; 271 u32 rx_dma_count; 272}; 273 274enum scb_status { 275 rus_ready = 0x10, 276 rus_mask = 0x3C, 277}; 278 279enum ru_state { 280 RU_SUSPENDED = 0, 281 RU_RUNNING = 1, 282 RU_UNINITIALIZED = -1, 283}; 284 285enum scb_stat_ack { 286 stat_ack_not_ours = 0x00, 287 stat_ack_sw_gen = 0x04, 288 stat_ack_rnr = 0x10, 289 stat_ack_cu_idle = 0x20, 290 stat_ack_frame_rx = 0x40, 291 stat_ack_cu_cmd_done = 0x80, 292 stat_ack_not_present = 0xFF, 293 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx), 294 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done), 295}; 296 297enum scb_cmd_hi { 298 irq_mask_none = 0x00, 299 irq_mask_all = 0x01, 300 irq_sw_gen = 0x02, 301}; 302 303enum scb_cmd_lo { 304 cuc_nop = 0x00, 305 ruc_start = 0x01, 306 ruc_load_base = 0x06, 307 cuc_start = 0x10, 308 cuc_resume = 0x20, 309 cuc_dump_addr = 0x40, 310 cuc_dump_stats = 0x50, 311 cuc_load_base = 0x60, 312 cuc_dump_reset = 0x70, 313}; 314 315enum cuc_dump { 316 cuc_dump_complete = 0x0000A005, 317 cuc_dump_reset_complete = 0x0000A007, 318}; 319 320enum port { 321 software_reset = 0x0000, 322 selftest = 0x0001, 323 selective_reset = 0x0002, 324}; 325 326enum eeprom_ctrl_lo { 327 eesk = 0x01, 328 eecs = 0x02, 329 eedi = 0x04, 330 eedo = 0x08, 331}; 332 333enum mdi_ctrl { 334 mdi_write = 0x04000000, 335 mdi_read = 0x08000000, 336 mdi_ready = 0x10000000, 337}; 338 339enum eeprom_op { 340 op_write = 0x05, 341 op_read = 0x06, 342 op_ewds = 0x10, 343 op_ewen = 0x13, 344}; 345 346enum eeprom_offsets { 347 eeprom_cnfg_mdix = 0x03, 348 eeprom_id = 0x0A, 349 eeprom_config_asf = 0x0D, 350 eeprom_smbus_addr = 0x90, 351}; 352 353enum eeprom_cnfg_mdix { 354 eeprom_mdix_enabled = 0x0080, 355}; 356 357enum eeprom_id { 358 eeprom_id_wol = 0x0020, 359}; 360 361enum eeprom_config_asf { 362 eeprom_asf = 0x8000, 363 eeprom_gcl = 0x4000, 364}; 365 366enum cb_status { 367 cb_complete = 0x8000, 368 cb_ok = 0x2000, 369}; 370 371enum cb_command { 372 cb_nop = 0x0000, 373 cb_iaaddr = 0x0001, 374 cb_config = 0x0002, 375 cb_multi = 0x0003, 376 cb_tx = 0x0004, 377 cb_ucode = 0x0005, 378 cb_dump = 0x0006, 379 cb_tx_sf = 0x0008, 380 cb_cid = 0x1f00, 381 cb_i = 0x2000, 382 cb_s = 0x4000, 383 cb_el = 0x8000, 384}; 385 386struct rfd { 387 u16 status; 388 u16 command; 389 u32 link; 390 u32 rbd; 391 u16 actual_size; 392 u16 size; 393}; 394 395struct rx { 396 struct rx *next, *prev; 397 struct sk_buff *skb; 398 dma_addr_t dma_addr; 399}; 400 401#if defined(__BIG_ENDIAN_BITFIELD) 402#define X(a,b) b,a 403#else 404#define X(a,b) a,b 405#endif 406struct config { 407/*0*/ u8 X(byte_count:6, pad0:2); 408/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1); 409/*2*/ u8 adaptive_ifs; 410/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1), 411 term_write_cache_line:1), pad3:4); 412/*4*/ u8 X(rx_dma_max_count:7, pad4:1); 413/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1); 414/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1), 415 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1), 416 rx_discard_overruns:1), rx_save_bad_frames:1); 417/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2), 418 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1), 419 tx_dynamic_tbd:1); 420/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1); 421/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1), 422 link_status_wake:1), arp_wake:1), mcmatch_wake:1); 423/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2), 424 loopback:2); 425/*11*/ u8 X(linear_priority:3, pad11:5); 426/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4); 427/*13*/ u8 ip_addr_lo; 428/*14*/ u8 ip_addr_hi; 429/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1), 430 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1), 431 pad15_2:1), crs_or_cdt:1); 432/*16*/ u8 fc_delay_lo; 433/*17*/ u8 fc_delay_hi; 434/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1), 435 rx_long_ok:1), fc_priority_threshold:3), pad18:1); 436/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1), 437 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1), 438 full_duplex_force:1), full_duplex_pin:1); 439/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1); 440/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4); 441/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6); 442 u8 pad_d102[9]; 443}; 444 445#define E100_MAX_MULTICAST_ADDRS 64 446struct multi { 447 u16 count; 448 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/]; 449}; 450 451/* Important: keep total struct u32-aligned */ 452#define UCODE_SIZE 134 453struct cb { 454 u16 status; 455 u16 command; 456 u32 link; 457 union { 458 u8 iaaddr[ETH_ALEN]; 459 u32 ucode[UCODE_SIZE]; 460 struct config config; 461 struct multi multi; 462 struct { 463 u32 tbd_array; 464 u16 tcb_byte_count; 465 u8 threshold; 466 u8 tbd_count; 467 struct { 468 u32 buf_addr; 469 u16 size; 470 u16 eol; 471 } tbd; 472 } tcb; 473 u32 dump_buffer_addr; 474 } u; 475 struct cb *next, *prev; 476 dma_addr_t dma_addr; 477 struct sk_buff *skb; 478}; 479 480enum loopback { 481 lb_none = 0, lb_mac = 1, lb_phy = 3, 482}; 483 484struct stats { 485 u32 tx_good_frames, tx_max_collisions, tx_late_collisions, 486 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions, 487 tx_multiple_collisions, tx_total_collisions; 488 u32 rx_good_frames, rx_crc_errors, rx_alignment_errors, 489 rx_resource_errors, rx_overrun_errors, rx_cdt_errors, 490 rx_short_frame_errors; 491 u32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported; 492 u16 xmt_tco_frames, rcv_tco_frames; 493 u32 complete; 494}; 495 496struct mem { 497 struct { 498 u32 signature; 499 u32 result; 500 } selftest; 501 struct stats stats; 502 u8 dump_buf[596]; 503}; 504 505struct param_range { 506 u32 min; 507 u32 max; 508 u32 count; 509}; 510 511struct params { 512 struct param_range rfds; 513 struct param_range cbs; 514}; 515 516struct nic { 517 /* Begin: frequently used values: keep adjacent for cache effect */ 518 u32 msg_enable ____cacheline_aligned; 519 struct net_device *netdev; 520 struct pci_dev *pdev; 521 522 struct rx *rxs ____cacheline_aligned; 523 struct rx *rx_to_use; 524 struct rx *rx_to_clean; 525 struct rfd blank_rfd; 526 enum ru_state ru_running; 527 528 spinlock_t cb_lock ____cacheline_aligned; 529 spinlock_t cmd_lock; 530 struct csr __iomem *csr; 531 enum scb_cmd_lo cuc_cmd; 532 unsigned int cbs_avail; 533 struct cb *cbs; 534 struct cb *cb_to_use; 535 struct cb *cb_to_send; 536 struct cb *cb_to_clean; 537 u16 tx_command; 538 /* End: frequently used values: keep adjacent for cache effect */ 539 540 enum { 541 ich = (1 << 0), 542 promiscuous = (1 << 1), 543 multicast_all = (1 << 2), 544 wol_magic = (1 << 3), 545 ich_10h_workaround = (1 << 4), 546 } flags ____cacheline_aligned; 547 548 enum mac mac; 549 enum phy phy; 550 struct params params; 551 struct net_device_stats net_stats; 552 struct timer_list watchdog; 553 struct timer_list blink_timer; 554 struct mii_if_info mii; 555 struct work_struct tx_timeout_task; 556 enum loopback loopback; 557 558 struct mem *mem; 559 dma_addr_t dma_addr; 560 561 dma_addr_t cbs_dma_addr; 562 u8 adaptive_ifs; 563 u8 tx_threshold; 564 u32 tx_frames; 565 u32 tx_collisions; 566 u32 tx_deferred; 567 u32 tx_single_collisions; 568 u32 tx_multiple_collisions; 569 u32 tx_fc_pause; 570 u32 tx_tco_frames; 571 572 u32 rx_fc_pause; 573 u32 rx_fc_unsupported; 574 u32 rx_tco_frames; 575 u32 rx_over_length_errors; 576 577 u8 rev_id; 578 u16 leds; 579 u16 eeprom_wc; 580 u16 eeprom[256]; 581}; 582 583static inline void e100_write_flush(struct nic *nic) 584{ 585 /* Flush previous PCI writes through intermediate bridges 586 * by doing a benign read */ 587 (void)readb(&nic->csr->scb.status); 588} 589 590static inline void e100_enable_irq(struct nic *nic) 591{ 592 unsigned long flags; 593 594 spin_lock_irqsave(&nic->cmd_lock, flags); 595 writeb(irq_mask_none, &nic->csr->scb.cmd_hi); 596 spin_unlock_irqrestore(&nic->cmd_lock, flags); 597 e100_write_flush(nic); 598} 599 600static inline void e100_disable_irq(struct nic *nic) 601{ 602 unsigned long flags; 603 604 spin_lock_irqsave(&nic->cmd_lock, flags); 605 writeb(irq_mask_all, &nic->csr->scb.cmd_hi); 606 spin_unlock_irqrestore(&nic->cmd_lock, flags); 607 e100_write_flush(nic); 608} 609 610static void e100_hw_reset(struct nic *nic) 611{ 612 /* Put CU and RU into idle with a selective reset to get 613 * device off of PCI bus */ 614 writel(selective_reset, &nic->csr->port); 615 e100_write_flush(nic); udelay(20); 616 617 /* Now fully reset device */ 618 writel(software_reset, &nic->csr->port); 619 e100_write_flush(nic); udelay(20); 620 621 /* Mask off our interrupt line - it's unmasked after reset */ 622 e100_disable_irq(nic); 623} 624 625static int e100_self_test(struct nic *nic) 626{ 627 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest); 628 629 /* Passing the self-test is a pretty good indication 630 * that the device can DMA to/from host memory */ 631 632 nic->mem->selftest.signature = 0; 633 nic->mem->selftest.result = 0xFFFFFFFF; 634 635 writel(selftest | dma_addr, &nic->csr->port); 636 e100_write_flush(nic); 637 /* Wait 10 msec for self-test to complete */ 638 msleep(10); 639 640 /* Interrupts are enabled after self-test */ 641 e100_disable_irq(nic); 642 643 /* Check results of self-test */ 644 if(nic->mem->selftest.result != 0) { 645 DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n", 646 nic->mem->selftest.result); 647 return -ETIMEDOUT; 648 } 649 if(nic->mem->selftest.signature == 0) { 650 DPRINTK(HW, ERR, "Self-test failed: timed out\n"); 651 return -ETIMEDOUT; 652 } 653 654 return 0; 655} 656 657static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, u16 data) 658{ 659 u32 cmd_addr_data[3]; 660 u8 ctrl; 661 int i, j; 662 663 /* Three cmds: write/erase enable, write data, write/erase disable */ 664 cmd_addr_data[0] = op_ewen << (addr_len - 2); 665 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) | 666 cpu_to_le16(data); 667 cmd_addr_data[2] = op_ewds << (addr_len - 2); 668 669 /* Bit-bang cmds to write word to eeprom */ 670 for(j = 0; j < 3; j++) { 671 672 /* Chip select */ 673 writeb(eecs | eesk, &nic->csr->eeprom_ctrl_lo); 674 e100_write_flush(nic); udelay(4); 675 676 for(i = 31; i >= 0; i--) { 677 ctrl = (cmd_addr_data[j] & (1 << i)) ? 678 eecs | eedi : eecs; 679 writeb(ctrl, &nic->csr->eeprom_ctrl_lo); 680 e100_write_flush(nic); udelay(4); 681 682 writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); 683 e100_write_flush(nic); udelay(4); 684 } 685 /* Wait 10 msec for cmd to complete */ 686 msleep(10); 687 688 /* Chip deselect */ 689 writeb(0, &nic->csr->eeprom_ctrl_lo); 690 e100_write_flush(nic); udelay(4); 691 } 692}; 693 694/* General technique stolen from the eepro100 driver - very clever */ 695static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr) 696{ 697 u32 cmd_addr_data; 698 u16 data = 0; 699 u8 ctrl; 700 int i; 701 702 cmd_addr_data = ((op_read << *addr_len) | addr) << 16; 703 704 /* Chip select */ 705 writeb(eecs | eesk, &nic->csr->eeprom_ctrl_lo); 706 e100_write_flush(nic); udelay(4); 707 708 /* Bit-bang to read word from eeprom */ 709 for(i = 31; i >= 0; i--) { 710 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; 711 writeb(ctrl, &nic->csr->eeprom_ctrl_lo); 712 e100_write_flush(nic); udelay(4); 713 714 writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); 715 e100_write_flush(nic); udelay(4); 716 717 /* Eeprom drives a dummy zero to EEDO after receiving 718 * complete address. Use this to adjust addr_len. */ 719 ctrl = readb(&nic->csr->eeprom_ctrl_lo); 720 if(!(ctrl & eedo) && i > 16) { 721 *addr_len -= (i - 16); 722 i = 17; 723 } 724 725 data = (data << 1) | (ctrl & eedo ? 1 : 0); 726 } 727 728 /* Chip deselect */ 729 writeb(0, &nic->csr->eeprom_ctrl_lo); 730 e100_write_flush(nic); udelay(4); 731 732 return le16_to_cpu(data); 733}; 734 735/* Load entire EEPROM image into driver cache and validate checksum */ 736static int e100_eeprom_load(struct nic *nic) 737{ 738 u16 addr, addr_len = 8, checksum = 0; 739 740 /* Try reading with an 8-bit addr len to discover actual addr len */ 741 e100_eeprom_read(nic, &addr_len, 0); 742 nic->eeprom_wc = 1 << addr_len; 743 744 for(addr = 0; addr < nic->eeprom_wc; addr++) { 745 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr); 746 if(addr < nic->eeprom_wc - 1) 747 checksum += cpu_to_le16(nic->eeprom[addr]); 748 } 749 750 /* The checksum, stored in the last word, is calculated such that 751 * the sum of words should be 0xBABA */ 752 checksum = le16_to_cpu(0xBABA - checksum); 753 if(checksum != nic->eeprom[nic->eeprom_wc - 1]) { 754 DPRINTK(PROBE, ERR, "EEPROM corrupted\n"); 755 return -EAGAIN; 756 } 757 758 return 0; 759} 760 761/* Save (portion of) driver EEPROM cache to device and update checksum */ 762static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) 763{ 764 u16 addr, addr_len = 8, checksum = 0; 765 766 /* Try reading with an 8-bit addr len to discover actual addr len */ 767 e100_eeprom_read(nic, &addr_len, 0); 768 nic->eeprom_wc = 1 << addr_len; 769 770 if(start + count >= nic->eeprom_wc) 771 return -EINVAL; 772 773 for(addr = start; addr < start + count; addr++) 774 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]); 775 776 /* The checksum, stored in the last word, is calculated such that 777 * the sum of words should be 0xBABA */ 778 for(addr = 0; addr < nic->eeprom_wc - 1; addr++) 779 checksum += cpu_to_le16(nic->eeprom[addr]); 780 nic->eeprom[nic->eeprom_wc - 1] = le16_to_cpu(0xBABA - checksum); 781 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1, 782 nic->eeprom[nic->eeprom_wc - 1]); 783 784 return 0; 785} 786 787#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ 788#define E100_WAIT_SCB_FAST 20 /* delay like the old code */ 789static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) 790{ 791 unsigned long flags; 792 unsigned int i; 793 int err = 0; 794 795 spin_lock_irqsave(&nic->cmd_lock, flags); 796 797 /* Previous command is accepted when SCB clears */ 798 for(i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) { 799 if(likely(!readb(&nic->csr->scb.cmd_lo))) 800 break; 801 cpu_relax(); 802 if(unlikely(i > E100_WAIT_SCB_FAST)) 803 udelay(5); 804 } 805 if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) { 806 err = -EAGAIN; 807 goto err_unlock; 808 } 809 810 if(unlikely(cmd != cuc_resume)) 811 writel(dma_addr, &nic->csr->scb.gen_ptr); 812 writeb(cmd, &nic->csr->scb.cmd_lo); 813 814err_unlock: 815 spin_unlock_irqrestore(&nic->cmd_lock, flags); 816 817 return err; 818} 819 820static inline int e100_exec_cb(struct nic *nic, struct sk_buff *skb, 821 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) 822{ 823 struct cb *cb; 824 unsigned long flags; 825 int err = 0; 826 827 spin_lock_irqsave(&nic->cb_lock, flags); 828 829 if(unlikely(!nic->cbs_avail)) { 830 err = -ENOMEM; 831 goto err_unlock; 832 } 833 834 cb = nic->cb_to_use; 835 nic->cb_to_use = cb->next; 836 nic->cbs_avail--; 837 cb->skb = skb; 838 839 if(unlikely(!nic->cbs_avail)) 840 err = -ENOSPC; 841 842 cb_prepare(nic, cb, skb); 843 844 /* Order is important otherwise we'll be in a race with h/w: 845 * set S-bit in current first, then clear S-bit in previous. */ 846 cb->command |= cpu_to_le16(cb_s); 847 wmb(); 848 cb->prev->command &= cpu_to_le16(~cb_s); 849 850 while(nic->cb_to_send != nic->cb_to_use) { 851 if(unlikely(e100_exec_cmd(nic, nic->cuc_cmd, 852 nic->cb_to_send->dma_addr))) { 853 /* Ok, here's where things get sticky. It's 854 * possible that we can't schedule the command 855 * because the controller is too busy, so 856 * let's just queue the command and try again 857 * when another command is scheduled. */ 858 if(err == -ENOSPC) { 859 //request a reset 860 schedule_work(&nic->tx_timeout_task); 861 } 862 break; 863 } else { 864 nic->cuc_cmd = cuc_resume; 865 nic->cb_to_send = nic->cb_to_send->next; 866 } 867 } 868 869err_unlock: 870 spin_unlock_irqrestore(&nic->cb_lock, flags); 871 872 return err; 873} 874 875static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) 876{ 877 u32 data_out = 0; 878 unsigned int i; 879 880 writel((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl); 881 882 for(i = 0; i < 100; i++) { 883 udelay(20); 884 if((data_out = readl(&nic->csr->mdi_ctrl)) & mdi_ready) 885 break; 886 } 887 888 DPRINTK(HW, DEBUG, 889 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", 890 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out); 891 return (u16)data_out; 892} 893 894static int mdio_read(struct net_device *netdev, int addr, int reg) 895{ 896 return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0); 897} 898 899static void mdio_write(struct net_device *netdev, int addr, int reg, int data) 900{ 901 mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data); 902} 903 904static void e100_get_defaults(struct nic *nic) 905{ 906 struct param_range rfds = { .min = 16, .max = 256, .count = 64 }; 907 struct param_range cbs = { .min = 64, .max = 256, .count = 64 }; 908 909 pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id); 910 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ 911 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->rev_id; 912 if(nic->mac == mac_unknown) 913 nic->mac = mac_82557_D100_A; 914 915 nic->params.rfds = rfds; 916 nic->params.cbs = cbs; 917 918 /* Quadwords to DMA into FIFO before starting frame transmit */ 919 nic->tx_threshold = 0xE0; 920 921 /* no interrupt for every tx completion, delay = 256us if not 557*/ 922 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | 923 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); 924 925 /* Template for a freshly allocated RFD */ 926 nic->blank_rfd.command = cpu_to_le16(cb_el); 927 nic->blank_rfd.rbd = 0xFFFFFFFF; 928 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN); 929 930 /* MII setup */ 931 nic->mii.phy_id_mask = 0x1F; 932 nic->mii.reg_num_mask = 0x1F; 933 nic->mii.dev = nic->netdev; 934 nic->mii.mdio_read = mdio_read; 935 nic->mii.mdio_write = mdio_write; 936} 937 938static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) 939{ 940 struct config *config = &cb->u.config; 941 u8 *c = (u8 *)config; 942 943 cb->command = cpu_to_le16(cb_config); 944 945 memset(config, 0, sizeof(struct config)); 946 947 config->byte_count = 0x16; /* bytes in this struct */ 948 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */ 949 config->direct_rx_dma = 0x1; /* reserved */ 950 config->standard_tcb = 0x1; /* 1=standard, 0=extended */ 951 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */ 952 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */ 953 config->tx_underrun_retry = 0x3; /* # of underrun retries */ 954 config->mii_mode = 0x1; /* 1=MII mode, 0=503 mode */ 955 config->pad10 = 0x6; 956 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */ 957 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */ 958 config->ifs = 0x6; /* x16 = inter frame spacing */ 959 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */ 960 config->pad15_1 = 0x1; 961 config->pad15_2 = 0x1; 962 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */ 963 config->fc_delay_hi = 0x40; /* time delay for fc frame */ 964 config->tx_padding = 0x1; /* 1=pad short frames */ 965 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */ 966 config->pad18 = 0x1; 967 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */ 968 config->pad20_1 = 0x1F; 969 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */ 970 config->pad21_1 = 0x5; 971 972 config->adaptive_ifs = nic->adaptive_ifs; 973 config->loopback = nic->loopback; 974 975 if(nic->mii.force_media && nic->mii.full_duplex) 976 config->full_duplex_force = 0x1; /* 1=force, 0=auto */ 977 978 if(nic->flags & promiscuous || nic->loopback) { 979 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ 980 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ 981 config->promiscuous_mode = 0x1; /* 1=on, 0=off */ 982 } 983 984 if(nic->flags & multicast_all) 985 config->multicast_all = 0x1; /* 1=accept, 0=no */ 986 987 /* disable WoL when up */ 988 if(netif_running(nic->netdev) || !(nic->flags & wol_magic)) 989 config->magic_packet_disable = 0x1; /* 1=off, 0=on */ 990 991 if(nic->mac >= mac_82558_D101_A4) { 992 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */ 993 config->mwi_enable = 0x1; /* 1=enable, 0=disable */ 994 config->standard_tcb = 0x0; /* 1=standard, 0=extended */ 995 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */ 996 if(nic->mac >= mac_82559_D101M) 997 config->tno_intr = 0x1; /* TCO stats enable */ 998 else 999 config->standard_stat_counter = 0x0; 1000 } 1001 1002 DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", 1003 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]); 1004 DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", 1005 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]); 1006 DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", 1007 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); 1008} 1009 1010static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb) 1011{ 1012 int i; 1013 static const u32 ucode[UCODE_SIZE] = { 1014 /* NFS packets are misinterpreted as TCO packets and 1015 * incorrectly routed to the BMC over SMBus. This 1016 * microcode patch checks the fragmented IP bit in the 1017 * NFS/UDP header to distinguish between NFS and TCO. */ 1018 0x0EF70E36, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF, 1019 0x1FFF1FFF, 0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, 1020 0x00906EFD, 0x00900EFD, 0x00E00EF8, 1021 }; 1022 1023 if(nic->mac == mac_82551_F || nic->mac == mac_82551_10) { 1024 for(i = 0; i < UCODE_SIZE; i++) 1025 cb->u.ucode[i] = cpu_to_le32(ucode[i]); 1026 cb->command = cpu_to_le16(cb_ucode); 1027 } else 1028 cb->command = cpu_to_le16(cb_nop); 1029} 1030 1031static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, 1032 struct sk_buff *skb) 1033{ 1034 cb->command = cpu_to_le16(cb_iaaddr); 1035 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); 1036} 1037 1038static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) 1039{ 1040 cb->command = cpu_to_le16(cb_dump); 1041 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + 1042 offsetof(struct mem, dump_buf)); 1043} 1044 1045#define NCONFIG_AUTO_SWITCH 0x0080 1046#define MII_NSC_CONG MII_RESV1 1047#define NSC_CONG_ENABLE 0x0100 1048#define NSC_CONG_TXREADY 0x0400 1049#define ADVERTISE_FC_SUPPORTED 0x0400 1050static int e100_phy_init(struct nic *nic) 1051{ 1052 struct net_device *netdev = nic->netdev; 1053 u32 addr; 1054 u16 bmcr, stat, id_lo, id_hi, cong; 1055 1056 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */ 1057 for(addr = 0; addr < 32; addr++) { 1058 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr; 1059 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); 1060 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); 1061 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); 1062 if(!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0)))) 1063 break; 1064 } 1065 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id); 1066 if(addr == 32) 1067 return -EAGAIN; 1068 1069 /* Selected the phy and isolate the rest */ 1070 for(addr = 0; addr < 32; addr++) { 1071 if(addr != nic->mii.phy_id) { 1072 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE); 1073 } else { 1074 bmcr = mdio_read(netdev, addr, MII_BMCR); 1075 mdio_write(netdev, addr, MII_BMCR, 1076 bmcr & ~BMCR_ISOLATE); 1077 } 1078 } 1079 1080 /* Get phy ID */ 1081 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); 1082 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); 1083 nic->phy = (u32)id_hi << 16 | (u32)id_lo; 1084 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy); 1085 1086 /* Handle National tx phys */ 1087#define NCS_PHY_MODEL_MASK 0xFFF0FFFF 1088 if((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { 1089 /* Disable congestion control */ 1090 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG); 1091 cong |= NSC_CONG_TXREADY; 1092 cong &= ~NSC_CONG_ENABLE; 1093 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); 1094 } 1095 1096 if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && 1097 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000))) { 1098 /* enable/disable MDI/MDI-X auto-switching. 1099 MDI/MDI-X auto-switching is disabled for 82551ER/QM chips */ 1100 if((nic->mac == mac_82551_E) || (nic->mac == mac_82551_F) || 1101 (nic->mac == mac_82551_10) || (nic->mii.force_media) || 1102 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled)) 1103 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, 0); 1104 else 1105 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, NCONFIG_AUTO_SWITCH); 1106 } 1107 1108 return 0; 1109} 1110 1111static int e100_hw_init(struct nic *nic) 1112{ 1113 int err; 1114 1115 e100_hw_reset(nic); 1116 1117 DPRINTK(HW, ERR, "e100_hw_init\n"); 1118 if(!in_interrupt() && (err = e100_self_test(nic))) 1119 return err; 1120 1121 if((err = e100_phy_init(nic))) 1122 return err; 1123 if((err = e100_exec_cmd(nic, cuc_load_base, 0))) 1124 return err; 1125 if((err = e100_exec_cmd(nic, ruc_load_base, 0))) 1126 return err; 1127 if((err = e100_exec_cb(nic, NULL, e100_load_ucode))) 1128 return err; 1129 if((err = e100_exec_cb(nic, NULL, e100_configure))) 1130 return err; 1131 if((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr))) 1132 return err; 1133 if((err = e100_exec_cmd(nic, cuc_dump_addr, 1134 nic->dma_addr + offsetof(struct mem, stats)))) 1135 return err; 1136 if((err = e100_exec_cmd(nic, cuc_dump_reset, 0))) 1137 return err; 1138 1139 e100_disable_irq(nic); 1140 1141 return 0; 1142} 1143 1144static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) 1145{ 1146 struct net_device *netdev = nic->netdev; 1147 struct dev_mc_list *list = netdev->mc_list; 1148 u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS); 1149 1150 cb->command = cpu_to_le16(cb_multi); 1151 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); 1152 for(i = 0; list && i < count; i++, list = list->next) 1153 memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr, 1154 ETH_ALEN); 1155} 1156 1157static void e100_set_multicast_list(struct net_device *netdev) 1158{ 1159 struct nic *nic = netdev_priv(netdev); 1160 1161 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n", 1162 netdev->mc_count, netdev->flags); 1163 1164 if(netdev->flags & IFF_PROMISC) 1165 nic->flags |= promiscuous; 1166 else 1167 nic->flags &= ~promiscuous; 1168 1169 if(netdev->flags & IFF_ALLMULTI || 1170 netdev->mc_count > E100_MAX_MULTICAST_ADDRS) 1171 nic->flags |= multicast_all; 1172 else 1173 nic->flags &= ~multicast_all; 1174 1175 e100_exec_cb(nic, NULL, e100_configure); 1176 e100_exec_cb(nic, NULL, e100_multi); 1177} 1178 1179static void e100_update_stats(struct nic *nic) 1180{ 1181 struct net_device_stats *ns = &nic->net_stats; 1182 struct stats *s = &nic->mem->stats; 1183 u32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : 1184 (nic->mac < mac_82559_D101M) ? (u32 *)&s->xmt_tco_frames : 1185 &s->complete; 1186 1187 /* Device's stats reporting may take several microseconds to 1188 * complete, so where always waiting for results of the 1189 * previous command. */ 1190 1191 if(*complete == le32_to_cpu(cuc_dump_reset_complete)) { 1192 *complete = 0; 1193 nic->tx_frames = le32_to_cpu(s->tx_good_frames); 1194 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions); 1195 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions); 1196 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions); 1197 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs); 1198 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns); 1199 ns->collisions += nic->tx_collisions; 1200 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + 1201 le32_to_cpu(s->tx_lost_crs); 1202 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) + 1203 nic->rx_over_length_errors; 1204 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); 1205 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); 1206 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); 1207 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); 1208 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors); 1209 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + 1210 le32_to_cpu(s->rx_alignment_errors) + 1211 le32_to_cpu(s->rx_short_frame_errors) + 1212 le32_to_cpu(s->rx_cdt_errors); 1213 nic->tx_deferred += le32_to_cpu(s->tx_deferred); 1214 nic->tx_single_collisions += 1215 le32_to_cpu(s->tx_single_collisions); 1216 nic->tx_multiple_collisions += 1217 le32_to_cpu(s->tx_multiple_collisions); 1218 if(nic->mac >= mac_82558_D101_A4) { 1219 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause); 1220 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause); 1221 nic->rx_fc_unsupported += 1222 le32_to_cpu(s->fc_rcv_unsupported); 1223 if(nic->mac >= mac_82559_D101M) { 1224 nic->tx_tco_frames += 1225 le16_to_cpu(s->xmt_tco_frames); 1226 nic->rx_tco_frames += 1227 le16_to_cpu(s->rcv_tco_frames); 1228 } 1229 } 1230 } 1231 1232 1233 if(e100_exec_cmd(nic, cuc_dump_reset, 0)) 1234 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n"); 1235} 1236 1237static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) 1238{ 1239 /* Adjust inter-frame-spacing (IFS) between two transmits if 1240 * we're getting collisions on a half-duplex connection. */ 1241 1242 if(duplex == DUPLEX_HALF) { 1243 u32 prev = nic->adaptive_ifs; 1244 u32 min_frames = (speed == SPEED_100) ? 1000 : 100; 1245 1246 if((nic->tx_frames / 32 < nic->tx_collisions) && 1247 (nic->tx_frames > min_frames)) { 1248 if(nic->adaptive_ifs < 60) 1249 nic->adaptive_ifs += 5; 1250 } else if (nic->tx_frames < min_frames) { 1251 if(nic->adaptive_ifs >= 5) 1252 nic->adaptive_ifs -= 5; 1253 } 1254 if(nic->adaptive_ifs != prev) 1255 e100_exec_cb(nic, NULL, e100_configure); 1256 } 1257} 1258 1259static void e100_watchdog(unsigned long data) 1260{ 1261 struct nic *nic = (struct nic *)data; 1262 struct ethtool_cmd cmd; 1263 1264 DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies); 1265 1266 /* mii library handles link maintenance tasks */ 1267 1268 mii_ethtool_gset(&nic->mii, &cmd); 1269 1270 if(mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { 1271 DPRINTK(LINK, INFO, "link up, %sMbps, %s-duplex\n", 1272 cmd.speed == SPEED_100 ? "100" : "10", 1273 cmd.duplex == DUPLEX_FULL ? "full" : "half"); 1274 } else if(!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { 1275 DPRINTK(LINK, INFO, "link down\n"); 1276 } 1277 1278 mii_check_link(&nic->mii); 1279 1280 /* Software generated interrupt to recover from (rare) Rx 1281 * allocation failure. 1282 * Unfortunately have to use a spinlock to not re-enable interrupts 1283 * accidentally, due to hardware that shares a register between the 1284 * interrupt mask bit and the SW Interrupt generation bit */ 1285 spin_lock_irq(&nic->cmd_lock); 1286 writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); 1287 spin_unlock_irq(&nic->cmd_lock); 1288 e100_write_flush(nic); 1289 1290 e100_update_stats(nic); 1291 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex); 1292 1293 if(nic->mac <= mac_82557_D100_C) 1294 /* Issue a multicast command to workaround a 557 lock up */ 1295 e100_set_multicast_list(nic->netdev); 1296 1297 if(nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF) 1298 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */ 1299 nic->flags |= ich_10h_workaround; 1300 else 1301 nic->flags &= ~ich_10h_workaround; 1302 1303 mod_timer(&nic->watchdog, jiffies + E100_WATCHDOG_PERIOD); 1304} 1305 1306static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb, 1307 struct sk_buff *skb) 1308{ 1309 cb->command = nic->tx_command; 1310 /* interrupt every 16 packets regardless of delay */ 1311 if((nic->cbs_avail & ~15) == nic->cbs_avail) 1312 cb->command |= cpu_to_le16(cb_i); 1313 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); 1314 cb->u.tcb.tcb_byte_count = 0; 1315 cb->u.tcb.threshold = nic->tx_threshold; 1316 cb->u.tcb.tbd_count = 1; 1317 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, 1318 skb->data, skb->len, PCI_DMA_TODEVICE)); 1319 /* check for mapping failure? */ 1320 cb->u.tcb.tbd.size = cpu_to_le16(skb->len); 1321} 1322 1323static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1324{ 1325 struct nic *nic = netdev_priv(netdev); 1326 int err; 1327 1328 if(nic->flags & ich_10h_workaround) { 1329 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. 1330 Issue a NOP command followed by a 1us delay before 1331 issuing the Tx command. */ 1332 if(e100_exec_cmd(nic, cuc_nop, 0)) 1333 DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n"); 1334 udelay(1); 1335 } 1336 1337 err = e100_exec_cb(nic, skb, e100_xmit_prepare); 1338 1339 switch(err) { 1340 case -ENOSPC: 1341 /* We queued the skb, but now we're out of space. */ 1342 DPRINTK(TX_ERR, DEBUG, "No space for CB\n"); 1343 netif_stop_queue(netdev); 1344 break; 1345 case -ENOMEM: 1346 /* This is a hard error - log it. */ 1347 DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n"); 1348 netif_stop_queue(netdev); 1349 return 1; 1350 } 1351 1352 netdev->trans_start = jiffies; 1353 return 0; 1354} 1355 1356static inline int e100_tx_clean(struct nic *nic) 1357{ 1358 struct cb *cb; 1359 int tx_cleaned = 0; 1360 1361 spin_lock(&nic->cb_lock); 1362 1363 DPRINTK(TX_DONE, DEBUG, "cb->status = 0x%04X\n", 1364 nic->cb_to_clean->status); 1365 1366 /* Clean CBs marked complete */ 1367 for(cb = nic->cb_to_clean; 1368 cb->status & cpu_to_le16(cb_complete); 1369 cb = nic->cb_to_clean = cb->next) { 1370 if(likely(cb->skb != NULL)) { 1371 nic->net_stats.tx_packets++; 1372 nic->net_stats.tx_bytes += cb->skb->len; 1373 1374 pci_unmap_single(nic->pdev, 1375 le32_to_cpu(cb->u.tcb.tbd.buf_addr), 1376 le16_to_cpu(cb->u.tcb.tbd.size), 1377 PCI_DMA_TODEVICE); 1378 dev_kfree_skb_any(cb->skb); 1379 cb->skb = NULL; 1380 tx_cleaned = 1; 1381 } 1382 cb->status = 0; 1383 nic->cbs_avail++; 1384 } 1385 1386 spin_unlock(&nic->cb_lock); 1387 1388 /* Recover from running out of Tx resources in xmit_frame */ 1389 if(unlikely(tx_cleaned && netif_queue_stopped(nic->netdev))) 1390 netif_wake_queue(nic->netdev); 1391 1392 return tx_cleaned; 1393} 1394 1395static void e100_clean_cbs(struct nic *nic) 1396{ 1397 if(nic->cbs) { 1398 while(nic->cbs_avail != nic->params.cbs.count) { 1399 struct cb *cb = nic->cb_to_clean; 1400 if(cb->skb) { 1401 pci_unmap_single(nic->pdev, 1402 le32_to_cpu(cb->u.tcb.tbd.buf_addr), 1403 le16_to_cpu(cb->u.tcb.tbd.size), 1404 PCI_DMA_TODEVICE); 1405 dev_kfree_skb(cb->skb); 1406 } 1407 nic->cb_to_clean = nic->cb_to_clean->next; 1408 nic->cbs_avail++; 1409 } 1410 pci_free_consistent(nic->pdev, 1411 sizeof(struct cb) * nic->params.cbs.count, 1412 nic->cbs, nic->cbs_dma_addr); 1413 nic->cbs = NULL; 1414 nic->cbs_avail = 0; 1415 } 1416 nic->cuc_cmd = cuc_start; 1417 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = 1418 nic->cbs; 1419} 1420 1421static int e100_alloc_cbs(struct nic *nic) 1422{ 1423 struct cb *cb; 1424 unsigned int i, count = nic->params.cbs.count; 1425 1426 nic->cuc_cmd = cuc_start; 1427 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; 1428 nic->cbs_avail = 0; 1429 1430 nic->cbs = pci_alloc_consistent(nic->pdev, 1431 sizeof(struct cb) * count, &nic->cbs_dma_addr); 1432 if(!nic->cbs) 1433 return -ENOMEM; 1434 1435 for(cb = nic->cbs, i = 0; i < count; cb++, i++) { 1436 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; 1437 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1; 1438 1439 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); 1440 cb->link = cpu_to_le32(nic->cbs_dma_addr + 1441 ((i+1) % count) * sizeof(struct cb)); 1442 cb->skb = NULL; 1443 } 1444 1445 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; 1446 nic->cbs_avail = count; 1447 1448 return 0; 1449} 1450 1451static inline void e100_start_receiver(struct nic *nic, struct rx *rx) 1452{ 1453 if(!nic->rxs) return; 1454 if(RU_SUSPENDED != nic->ru_running) return; 1455 1456 /* handle init time starts */ 1457 if(!rx) rx = nic->rxs; 1458 1459 /* (Re)start RU if suspended or idle and RFA is non-NULL */ 1460 if(rx->skb) { 1461 e100_exec_cmd(nic, ruc_start, rx->dma_addr); 1462 nic->ru_running = RU_RUNNING; 1463 } 1464} 1465 1466#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN) 1467static inline int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) 1468{ 1469 if(!(rx->skb = dev_alloc_skb(RFD_BUF_LEN + NET_IP_ALIGN))) 1470 return -ENOMEM; 1471 1472 /* Align, init, and map the RFD. */ 1473 rx->skb->dev = nic->netdev; 1474 skb_reserve(rx->skb, NET_IP_ALIGN); 1475 memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd)); 1476 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, 1477 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); 1478 1479 if(pci_dma_mapping_error(rx->dma_addr)) { 1480 dev_kfree_skb_any(rx->skb); 1481 rx->skb = 0; 1482 rx->dma_addr = 0; 1483 return -ENOMEM; 1484 } 1485 1486 /* Link the RFD to end of RFA by linking previous RFD to 1487 * this one, and clearing EL bit of previous. */ 1488 if(rx->prev->skb) { 1489 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; 1490 put_unaligned(cpu_to_le32(rx->dma_addr), 1491 (u32 *)&prev_rfd->link); 1492 wmb(); 1493 prev_rfd->command &= ~cpu_to_le16(cb_el); 1494 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr, 1495 sizeof(struct rfd), PCI_DMA_TODEVICE); 1496 } 1497 1498 return 0; 1499} 1500 1501static inline int e100_rx_indicate(struct nic *nic, struct rx *rx, 1502 unsigned int *work_done, unsigned int work_to_do) 1503{ 1504 struct sk_buff *skb = rx->skb; 1505 struct rfd *rfd = (struct rfd *)skb->data; 1506 u16 rfd_status, actual_size; 1507 1508 if(unlikely(work_done && *work_done >= work_to_do)) 1509 return -EAGAIN; 1510 1511 /* Need to sync before taking a peek at cb_complete bit */ 1512 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr, 1513 sizeof(struct rfd), PCI_DMA_FROMDEVICE); 1514 rfd_status = le16_to_cpu(rfd->status); 1515 1516 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status); 1517 1518 /* If data isn't ready, nothing to indicate */ 1519 if(unlikely(!(rfd_status & cb_complete))) 1520 return -ENODATA; 1521 1522 /* Get actual data size */ 1523 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; 1524 if(unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd))) 1525 actual_size = RFD_BUF_LEN - sizeof(struct rfd); 1526 1527 /* Get data */ 1528 pci_unmap_single(nic->pdev, rx->dma_addr, 1529 RFD_BUF_LEN, PCI_DMA_FROMDEVICE); 1530 1531 /* this allows for a fast restart without re-enabling interrupts */ 1532 if(le16_to_cpu(rfd->command) & cb_el) 1533 nic->ru_running = RU_SUSPENDED; 1534 1535 /* Pull off the RFD and put the actual data (minus eth hdr) */ 1536 skb_reserve(skb, sizeof(struct rfd)); 1537 skb_put(skb, actual_size); 1538 skb->protocol = eth_type_trans(skb, nic->netdev); 1539 1540 if(unlikely(!(rfd_status & cb_ok))) { 1541 /* Don't indicate if hardware indicates errors */ 1542 dev_kfree_skb_any(skb); 1543 } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) { 1544 /* Don't indicate oversized frames */ 1545 nic->rx_over_length_errors++; 1546 dev_kfree_skb_any(skb); 1547 } else { 1548 nic->net_stats.rx_packets++; 1549 nic->net_stats.rx_bytes += actual_size; 1550 nic->netdev->last_rx = jiffies; 1551 netif_receive_skb(skb); 1552 if(work_done) 1553 (*work_done)++; 1554 } 1555 1556 rx->skb = NULL; 1557 1558 return 0; 1559} 1560 1561static inline void e100_rx_clean(struct nic *nic, unsigned int *work_done, 1562 unsigned int work_to_do) 1563{ 1564 struct rx *rx; 1565 int restart_required = 0; 1566 struct rx *rx_to_start = NULL; 1567 1568 /* are we already rnr? then pay attention!!! this ensures that 1569 * the state machine progression never allows a start with a 1570 * partially cleaned list, avoiding a race between hardware 1571 * and rx_to_clean when in NAPI mode */ 1572 if(RU_SUSPENDED == nic->ru_running) 1573 restart_required = 1; 1574 1575 /* Indicate newly arrived packets */ 1576 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { 1577 int err = e100_rx_indicate(nic, rx, work_done, work_to_do); 1578 if(-EAGAIN == err) { 1579 /* hit quota so have more work to do, restart once 1580 * cleanup is complete */ 1581 restart_required = 0; 1582 break; 1583 } else if(-ENODATA == err) 1584 break; /* No more to clean */ 1585 } 1586 1587 /* save our starting point as the place we'll restart the receiver */ 1588 if(restart_required) 1589 rx_to_start = nic->rx_to_clean; 1590 1591 /* Alloc new skbs to refill list */ 1592 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { 1593 if(unlikely(e100_rx_alloc_skb(nic, rx))) 1594 break; /* Better luck next time (see watchdog) */ 1595 } 1596 1597 if(restart_required) { 1598 // ack the rnr? 1599 writeb(stat_ack_rnr, &nic->csr->scb.stat_ack); 1600 e100_start_receiver(nic, rx_to_start); 1601 if(work_done) 1602 (*work_done)++; 1603 } 1604} 1605 1606static void e100_rx_clean_list(struct nic *nic) 1607{ 1608 struct rx *rx; 1609 unsigned int i, count = nic->params.rfds.count; 1610 1611 nic->ru_running = RU_UNINITIALIZED; 1612 1613 if(nic->rxs) { 1614 for(rx = nic->rxs, i = 0; i < count; rx++, i++) { 1615 if(rx->skb) { 1616 pci_unmap_single(nic->pdev, rx->dma_addr, 1617 RFD_BUF_LEN, PCI_DMA_FROMDEVICE); 1618 dev_kfree_skb(rx->skb); 1619 } 1620 } 1621 kfree(nic->rxs); 1622 nic->rxs = NULL; 1623 } 1624 1625 nic->rx_to_use = nic->rx_to_clean = NULL; 1626} 1627 1628static int e100_rx_alloc_list(struct nic *nic) 1629{ 1630 struct rx *rx; 1631 unsigned int i, count = nic->params.rfds.count; 1632 1633 nic->rx_to_use = nic->rx_to_clean = NULL; 1634 nic->ru_running = RU_UNINITIALIZED; 1635 1636 if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC))) 1637 return -ENOMEM; 1638 memset(nic->rxs, 0, sizeof(struct rx) * count); 1639 1640 for(rx = nic->rxs, i = 0; i < count; rx++, i++) { 1641 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs; 1642 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1; 1643 if(e100_rx_alloc_skb(nic, rx)) { 1644 e100_rx_clean_list(nic); 1645 return -ENOMEM; 1646 } 1647 } 1648 1649 nic->rx_to_use = nic->rx_to_clean = nic->rxs; 1650 nic->ru_running = RU_SUSPENDED; 1651 1652 return 0; 1653} 1654 1655static irqreturn_t e100_intr(int irq, void *dev_id, struct pt_regs *regs) 1656{ 1657 struct net_device *netdev = dev_id; 1658 struct nic *nic = netdev_priv(netdev); 1659 u8 stat_ack = readb(&nic->csr->scb.stat_ack); 1660 1661 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack); 1662 1663 if(stat_ack == stat_ack_not_ours || /* Not our interrupt */ 1664 stat_ack == stat_ack_not_present) /* Hardware is ejected */ 1665 return IRQ_NONE; 1666 1667 /* Ack interrupt(s) */ 1668 writeb(stat_ack, &nic->csr->scb.stat_ack); 1669 1670 /* We hit Receive No Resource (RNR); restart RU after cleaning */ 1671 if(stat_ack & stat_ack_rnr) 1672 nic->ru_running = RU_SUSPENDED; 1673 1674 if(likely(netif_rx_schedule_prep(netdev))) { 1675 e100_disable_irq(nic); 1676 __netif_rx_schedule(netdev); 1677 } 1678 1679 return IRQ_HANDLED; 1680} 1681 1682static int e100_poll(struct net_device *netdev, int *budget) 1683{ 1684 struct nic *nic = netdev_priv(netdev); 1685 unsigned int work_to_do = min(netdev->quota, *budget); 1686 unsigned int work_done = 0; 1687 int tx_cleaned; 1688 1689 e100_rx_clean(nic, &work_done, work_to_do); 1690 tx_cleaned = e100_tx_clean(nic); 1691 1692 /* If no Rx and Tx cleanup work was done, exit polling mode. */ 1693 if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) { 1694 netif_rx_complete(netdev); 1695 e100_enable_irq(nic); 1696 return 0; 1697 } 1698 1699 *budget -= work_done; 1700 netdev->quota -= work_done; 1701 1702 return 1; 1703} 1704 1705#ifdef CONFIG_NET_POLL_CONTROLLER 1706static void e100_netpoll(struct net_device *netdev) 1707{ 1708 struct nic *nic = netdev_priv(netdev); 1709 1710 e100_disable_irq(nic); 1711 e100_intr(nic->pdev->irq, netdev, NULL); 1712 e100_tx_clean(nic); 1713 e100_enable_irq(nic); 1714} 1715#endif 1716 1717static struct net_device_stats *e100_get_stats(struct net_device *netdev) 1718{ 1719 struct nic *nic = netdev_priv(netdev); 1720 return &nic->net_stats; 1721} 1722 1723static int e100_set_mac_address(struct net_device *netdev, void *p) 1724{ 1725 struct nic *nic = netdev_priv(netdev); 1726 struct sockaddr *addr = p; 1727 1728 if (!is_valid_ether_addr(addr->sa_data)) 1729 return -EADDRNOTAVAIL; 1730 1731 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1732 e100_exec_cb(nic, NULL, e100_setup_iaaddr); 1733 1734 return 0; 1735} 1736 1737static int e100_change_mtu(struct net_device *netdev, int new_mtu) 1738{ 1739 if(new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN) 1740 return -EINVAL; 1741 netdev->mtu = new_mtu; 1742 return 0; 1743} 1744 1745#ifdef CONFIG_PM 1746static int e100_asf(struct nic *nic) 1747{ 1748 /* ASF can be enabled from eeprom */ 1749 return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && 1750 (nic->eeprom[eeprom_config_asf] & eeprom_asf) && 1751 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) && 1752 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE)); 1753} 1754#endif 1755 1756static int e100_up(struct nic *nic) 1757{ 1758 int err; 1759 1760 if((err = e100_rx_alloc_list(nic))) 1761 return err; 1762 if((err = e100_alloc_cbs(nic))) 1763 goto err_rx_clean_list; 1764 if((err = e100_hw_init(nic))) 1765 goto err_clean_cbs; 1766 e100_set_multicast_list(nic->netdev); 1767 e100_start_receiver(nic, 0); 1768 mod_timer(&nic->watchdog, jiffies); 1769 if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ, 1770 nic->netdev->name, nic->netdev))) 1771 goto err_no_irq; 1772 netif_wake_queue(nic->netdev); 1773 netif_poll_enable(nic->netdev); 1774 /* enable ints _after_ enabling poll, preventing a race between 1775 * disable ints+schedule */ 1776 e100_enable_irq(nic); 1777 return 0; 1778 1779err_no_irq: 1780 del_timer_sync(&nic->watchdog); 1781err_clean_cbs: 1782 e100_clean_cbs(nic); 1783err_rx_clean_list: 1784 e100_rx_clean_list(nic); 1785 return err; 1786} 1787 1788static void e100_down(struct nic *nic) 1789{ 1790 /* wait here for poll to complete */ 1791 netif_poll_disable(nic->netdev); 1792 netif_stop_queue(nic->netdev); 1793 e100_hw_reset(nic); 1794 free_irq(nic->pdev->irq, nic->netdev); 1795 del_timer_sync(&nic->watchdog); 1796 netif_carrier_off(nic->netdev); 1797 e100_clean_cbs(nic); 1798 e100_rx_clean_list(nic); 1799} 1800 1801static void e100_tx_timeout(struct net_device *netdev) 1802{ 1803 struct nic *nic = netdev_priv(netdev); 1804 1805 /* Reset outside of interrupt context, to avoid request_irq 1806 * in interrupt context */ 1807 schedule_work(&nic->tx_timeout_task); 1808} 1809 1810static void e100_tx_timeout_task(struct net_device *netdev) 1811{ 1812 struct nic *nic = netdev_priv(netdev); 1813 1814 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", 1815 readb(&nic->csr->scb.status)); 1816 e100_down(netdev_priv(netdev)); 1817 e100_up(netdev_priv(netdev)); 1818} 1819 1820static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) 1821{ 1822 int err; 1823 struct sk_buff *skb; 1824 1825 /* Use driver resources to perform internal MAC or PHY 1826 * loopback test. A single packet is prepared and transmitted 1827 * in loopback mode, and the test passes if the received 1828 * packet compares byte-for-byte to the transmitted packet. */ 1829 1830 if((err = e100_rx_alloc_list(nic))) 1831 return err; 1832 if((err = e100_alloc_cbs(nic))) 1833 goto err_clean_rx; 1834 1835 /* ICH PHY loopback is broken so do MAC loopback instead */ 1836 if(nic->flags & ich && loopback_mode == lb_phy) 1837 loopback_mode = lb_mac; 1838 1839 nic->loopback = loopback_mode; 1840 if((err = e100_hw_init(nic))) 1841 goto err_loopback_none; 1842 1843 if(loopback_mode == lb_phy) 1844 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 1845 BMCR_LOOPBACK); 1846 1847 e100_start_receiver(nic, 0); 1848 1849 if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) { 1850 err = -ENOMEM; 1851 goto err_loopback_none; 1852 } 1853 skb_put(skb, ETH_DATA_LEN); 1854 memset(skb->data, 0xFF, ETH_DATA_LEN); 1855 e100_xmit_frame(skb, nic->netdev); 1856 1857 msleep(10); 1858 1859 if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), 1860 skb->data, ETH_DATA_LEN)) 1861 err = -EAGAIN; 1862 1863err_loopback_none: 1864 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0); 1865 nic->loopback = lb_none; 1866 e100_hw_init(nic); 1867 e100_clean_cbs(nic); 1868err_clean_rx: 1869 e100_rx_clean_list(nic); 1870 return err; 1871} 1872 1873#define MII_LED_CONTROL 0x1B 1874static void e100_blink_led(unsigned long data) 1875{ 1876 struct nic *nic = (struct nic *)data; 1877 enum led_state { 1878 led_on = 0x01, 1879 led_off = 0x04, 1880 led_on_559 = 0x05, 1881 led_on_557 = 0x07, 1882 }; 1883 1884 nic->leds = (nic->leds & led_on) ? led_off : 1885 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559; 1886 mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds); 1887 mod_timer(&nic->blink_timer, jiffies + HZ / 4); 1888} 1889 1890static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) 1891{ 1892 struct nic *nic = netdev_priv(netdev); 1893 return mii_ethtool_gset(&nic->mii, cmd); 1894} 1895 1896static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) 1897{ 1898 struct nic *nic = netdev_priv(netdev); 1899 int err; 1900 1901 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET); 1902 err = mii_ethtool_sset(&nic->mii, cmd); 1903 e100_exec_cb(nic, NULL, e100_configure); 1904 1905 return err; 1906} 1907 1908static void e100_get_drvinfo(struct net_device *netdev, 1909 struct ethtool_drvinfo *info) 1910{ 1911 struct nic *nic = netdev_priv(netdev); 1912 strcpy(info->driver, DRV_NAME); 1913 strcpy(info->version, DRV_VERSION); 1914 strcpy(info->fw_version, "N/A"); 1915 strcpy(info->bus_info, pci_name(nic->pdev)); 1916} 1917 1918static int e100_get_regs_len(struct net_device *netdev) 1919{ 1920 struct nic *nic = netdev_priv(netdev); 1921#define E100_PHY_REGS 0x1C 1922#define E100_REGS_LEN 1 + E100_PHY_REGS + \ 1923 sizeof(nic->mem->dump_buf) / sizeof(u32) 1924 return E100_REGS_LEN * sizeof(u32); 1925} 1926 1927static void e100_get_regs(struct net_device *netdev, 1928 struct ethtool_regs *regs, void *p) 1929{ 1930 struct nic *nic = netdev_priv(netdev); 1931 u32 *buff = p; 1932 int i; 1933 1934 regs->version = (1 << 24) | nic->rev_id; 1935 buff[0] = readb(&nic->csr->scb.cmd_hi) << 24 | 1936 readb(&nic->csr->scb.cmd_lo) << 16 | 1937 readw(&nic->csr->scb.status); 1938 for(i = E100_PHY_REGS; i >= 0; i--) 1939 buff[1 + E100_PHY_REGS - i] = 1940 mdio_read(netdev, nic->mii.phy_id, i); 1941 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); 1942 e100_exec_cb(nic, NULL, e100_dump); 1943 msleep(10); 1944 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf, 1945 sizeof(nic->mem->dump_buf)); 1946} 1947 1948static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 1949{ 1950 struct nic *nic = netdev_priv(netdev); 1951 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0; 1952 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0; 1953} 1954 1955static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 1956{ 1957 struct nic *nic = netdev_priv(netdev); 1958 1959 if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0) 1960 return -EOPNOTSUPP; 1961 1962 if(wol->wolopts) 1963 nic->flags |= wol_magic; 1964 else 1965 nic->flags &= ~wol_magic; 1966 1967 e100_exec_cb(nic, NULL, e100_configure); 1968 1969 return 0; 1970} 1971 1972static u32 e100_get_msglevel(struct net_device *netdev) 1973{ 1974 struct nic *nic = netdev_priv(netdev); 1975 return nic->msg_enable; 1976} 1977 1978static void e100_set_msglevel(struct net_device *netdev, u32 value) 1979{ 1980 struct nic *nic = netdev_priv(netdev); 1981 nic->msg_enable = value; 1982} 1983 1984static int e100_nway_reset(struct net_device *netdev) 1985{ 1986 struct nic *nic = netdev_priv(netdev); 1987 return mii_nway_restart(&nic->mii); 1988} 1989 1990static u32 e100_get_link(struct net_device *netdev) 1991{ 1992 struct nic *nic = netdev_priv(netdev); 1993 return mii_link_ok(&nic->mii); 1994} 1995 1996static int e100_get_eeprom_len(struct net_device *netdev) 1997{ 1998 struct nic *nic = netdev_priv(netdev); 1999 return nic->eeprom_wc << 1; 2000} 2001 2002#define E100_EEPROM_MAGIC 0x1234 2003static int e100_get_eeprom(struct net_device *netdev, 2004 struct ethtool_eeprom *eeprom, u8 *bytes) 2005{ 2006 struct nic *nic = netdev_priv(netdev); 2007 2008 eeprom->magic = E100_EEPROM_MAGIC; 2009 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len); 2010 2011 return 0; 2012} 2013 2014static int e100_set_eeprom(struct net_device *netdev, 2015 struct ethtool_eeprom *eeprom, u8 *bytes) 2016{ 2017 struct nic *nic = netdev_priv(netdev); 2018 2019 if(eeprom->magic != E100_EEPROM_MAGIC) 2020 return -EINVAL; 2021 2022 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len); 2023 2024 return e100_eeprom_save(nic, eeprom->offset >> 1, 2025 (eeprom->len >> 1) + 1); 2026} 2027 2028static void e100_get_ringparam(struct net_device *netdev, 2029 struct ethtool_ringparam *ring) 2030{ 2031 struct nic *nic = netdev_priv(netdev); 2032 struct param_range *rfds = &nic->params.rfds; 2033 struct param_range *cbs = &nic->params.cbs; 2034 2035 ring->rx_max_pending = rfds->max; 2036 ring->tx_max_pending = cbs->max; 2037 ring->rx_mini_max_pending = 0; 2038 ring->rx_jumbo_max_pending = 0; 2039 ring->rx_pending = rfds->count; 2040 ring->tx_pending = cbs->count; 2041 ring->rx_mini_pending = 0; 2042 ring->rx_jumbo_pending = 0; 2043} 2044 2045static int e100_set_ringparam(struct net_device *netdev, 2046 struct ethtool_ringparam *ring) 2047{ 2048 struct nic *nic = netdev_priv(netdev); 2049 struct param_range *rfds = &nic->params.rfds; 2050 struct param_range *cbs = &nic->params.cbs; 2051 2052 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 2053 return -EINVAL; 2054 2055 if(netif_running(netdev)) 2056 e100_down(nic); 2057 rfds->count = max(ring->rx_pending, rfds->min); 2058 rfds->count = min(rfds->count, rfds->max); 2059 cbs->count = max(ring->tx_pending, cbs->min); 2060 cbs->count = min(cbs->count, cbs->max); 2061 DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n", 2062 rfds->count, cbs->count); 2063 if(netif_running(netdev)) 2064 e100_up(nic); 2065 2066 return 0; 2067} 2068 2069static const char e100_gstrings_test[][ETH_GSTRING_LEN] = { 2070 "Link test (on/offline)", 2071 "Eeprom test (on/offline)", 2072 "Self test (offline)", 2073 "Mac loopback (offline)", 2074 "Phy loopback (offline)", 2075}; 2076#define E100_TEST_LEN sizeof(e100_gstrings_test) / ETH_GSTRING_LEN 2077 2078static int e100_diag_test_count(struct net_device *netdev) 2079{ 2080 return E100_TEST_LEN; 2081} 2082 2083static void e100_diag_test(struct net_device *netdev, 2084 struct ethtool_test *test, u64 *data) 2085{ 2086 struct ethtool_cmd cmd; 2087 struct nic *nic = netdev_priv(netdev); 2088 int i, err; 2089 2090 memset(data, 0, E100_TEST_LEN * sizeof(u64)); 2091 data[0] = !mii_link_ok(&nic->mii); 2092 data[1] = e100_eeprom_load(nic); 2093 if(test->flags & ETH_TEST_FL_OFFLINE) { 2094 2095 /* save speed, duplex & autoneg settings */ 2096 err = mii_ethtool_gset(&nic->mii, &cmd); 2097 2098 if(netif_running(netdev)) 2099 e100_down(nic); 2100 data[2] = e100_self_test(nic); 2101 data[3] = e100_loopback_test(nic, lb_mac); 2102 data[4] = e100_loopback_test(nic, lb_phy); 2103 2104 /* restore speed, duplex & autoneg settings */ 2105 err = mii_ethtool_sset(&nic->mii, &cmd); 2106 2107 if(netif_running(netdev)) 2108 e100_up(nic); 2109 } 2110 for(i = 0; i < E100_TEST_LEN; i++) 2111 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; 2112 2113 msleep_interruptible(4 * 1000); 2114} 2115 2116static int e100_phys_id(struct net_device *netdev, u32 data) 2117{ 2118 struct nic *nic = netdev_priv(netdev); 2119 2120 if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) 2121 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); 2122 mod_timer(&nic->blink_timer, jiffies); 2123 msleep_interruptible(data * 1000); 2124 del_timer_sync(&nic->blink_timer); 2125 mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0); 2126 2127 return 0; 2128} 2129 2130static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = { 2131 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", 2132 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", 2133 "rx_length_errors", "rx_over_errors", "rx_crc_errors", 2134 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", 2135 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", 2136 "tx_heartbeat_errors", "tx_window_errors", 2137 /* device-specific stats */ 2138 "tx_deferred", "tx_single_collisions", "tx_multi_collisions", 2139 "tx_flow_control_pause", "rx_flow_control_pause", 2140 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets", 2141}; 2142#define E100_NET_STATS_LEN 21 2143#define E100_STATS_LEN sizeof(e100_gstrings_stats) / ETH_GSTRING_LEN 2144 2145static int e100_get_stats_count(struct net_device *netdev) 2146{ 2147 return E100_STATS_LEN; 2148} 2149 2150static void e100_get_ethtool_stats(struct net_device *netdev, 2151 struct ethtool_stats *stats, u64 *data) 2152{ 2153 struct nic *nic = netdev_priv(netdev); 2154 int i; 2155 2156 for(i = 0; i < E100_NET_STATS_LEN; i++) 2157 data[i] = ((unsigned long *)&nic->net_stats)[i]; 2158 2159 data[i++] = nic->tx_deferred; 2160 data[i++] = nic->tx_single_collisions; 2161 data[i++] = nic->tx_multiple_collisions; 2162 data[i++] = nic->tx_fc_pause; 2163 data[i++] = nic->rx_fc_pause; 2164 data[i++] = nic->rx_fc_unsupported; 2165 data[i++] = nic->tx_tco_frames; 2166 data[i++] = nic->rx_tco_frames; 2167} 2168 2169static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 2170{ 2171 switch(stringset) { 2172 case ETH_SS_TEST: 2173 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test)); 2174 break; 2175 case ETH_SS_STATS: 2176 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats)); 2177 break; 2178 } 2179} 2180 2181static struct ethtool_ops e100_ethtool_ops = { 2182 .get_settings = e100_get_settings, 2183 .set_settings = e100_set_settings, 2184 .get_drvinfo = e100_get_drvinfo, 2185 .get_regs_len = e100_get_regs_len, 2186 .get_regs = e100_get_regs, 2187 .get_wol = e100_get_wol, 2188 .set_wol = e100_set_wol, 2189 .get_msglevel = e100_get_msglevel, 2190 .set_msglevel = e100_set_msglevel, 2191 .nway_reset = e100_nway_reset, 2192 .get_link = e100_get_link, 2193 .get_eeprom_len = e100_get_eeprom_len, 2194 .get_eeprom = e100_get_eeprom, 2195 .set_eeprom = e100_set_eeprom, 2196 .get_ringparam = e100_get_ringparam, 2197 .set_ringparam = e100_set_ringparam, 2198 .self_test_count = e100_diag_test_count, 2199 .self_test = e100_diag_test, 2200 .get_strings = e100_get_strings, 2201 .phys_id = e100_phys_id, 2202 .get_stats_count = e100_get_stats_count, 2203 .get_ethtool_stats = e100_get_ethtool_stats, 2204}; 2205 2206static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2207{ 2208 struct nic *nic = netdev_priv(netdev); 2209 2210 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL); 2211} 2212 2213static int e100_alloc(struct nic *nic) 2214{ 2215 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem), 2216 &nic->dma_addr); 2217 return nic->mem ? 0 : -ENOMEM; 2218} 2219 2220static void e100_free(struct nic *nic) 2221{ 2222 if(nic->mem) { 2223 pci_free_consistent(nic->pdev, sizeof(struct mem), 2224 nic->mem, nic->dma_addr); 2225 nic->mem = NULL; 2226 } 2227} 2228 2229static int e100_open(struct net_device *netdev) 2230{ 2231 struct nic *nic = netdev_priv(netdev); 2232 int err = 0; 2233 2234 netif_carrier_off(netdev); 2235 if((err = e100_up(nic))) 2236 DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n"); 2237 return err; 2238} 2239 2240static int e100_close(struct net_device *netdev) 2241{ 2242 e100_down(netdev_priv(netdev)); 2243 return 0; 2244} 2245 2246static int __devinit e100_probe(struct pci_dev *pdev, 2247 const struct pci_device_id *ent) 2248{ 2249 struct net_device *netdev; 2250 struct nic *nic; 2251 int err; 2252 2253 if(!(netdev = alloc_etherdev(sizeof(struct nic)))) { 2254 if(((1 << debug) - 1) & NETIF_MSG_PROBE) 2255 printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n"); 2256 return -ENOMEM; 2257 } 2258 2259 netdev->open = e100_open; 2260 netdev->stop = e100_close; 2261 netdev->hard_start_xmit = e100_xmit_frame; 2262 netdev->get_stats = e100_get_stats; 2263 netdev->set_multicast_list = e100_set_multicast_list; 2264 netdev->set_mac_address = e100_set_mac_address; 2265 netdev->change_mtu = e100_change_mtu; 2266 netdev->do_ioctl = e100_do_ioctl; 2267 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops); 2268 netdev->tx_timeout = e100_tx_timeout; 2269 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; 2270 netdev->poll = e100_poll; 2271 netdev->weight = E100_NAPI_WEIGHT; 2272#ifdef CONFIG_NET_POLL_CONTROLLER 2273 netdev->poll_controller = e100_netpoll; 2274#endif 2275 strcpy(netdev->name, pci_name(pdev)); 2276 2277 nic = netdev_priv(netdev); 2278 nic->netdev = netdev; 2279 nic->pdev = pdev; 2280 nic->msg_enable = (1 << debug) - 1; 2281 pci_set_drvdata(pdev, netdev); 2282 2283 if((err = pci_enable_device(pdev))) { 2284 DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n"); 2285 goto err_out_free_dev; 2286 } 2287 2288 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 2289 DPRINTK(PROBE, ERR, "Cannot find proper PCI device " 2290 "base address, aborting.\n"); 2291 err = -ENODEV; 2292 goto err_out_disable_pdev; 2293 } 2294 2295 if((err = pci_request_regions(pdev, DRV_NAME))) { 2296 DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n"); 2297 goto err_out_disable_pdev; 2298 } 2299 2300 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { 2301 DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n"); 2302 goto err_out_free_res; 2303 } 2304 2305 SET_MODULE_OWNER(netdev); 2306 SET_NETDEV_DEV(netdev, &pdev->dev); 2307 2308 nic->csr = ioremap(pci_resource_start(pdev, 0), sizeof(struct csr)); 2309 if(!nic->csr) { 2310 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n"); 2311 err = -ENOMEM; 2312 goto err_out_free_res; 2313 } 2314 2315 if(ent->driver_data) 2316 nic->flags |= ich; 2317 else 2318 nic->flags &= ~ich; 2319 2320 e100_get_defaults(nic); 2321 2322 /* locks must be initialized before calling hw_reset */ 2323 spin_lock_init(&nic->cb_lock); 2324 spin_lock_init(&nic->cmd_lock); 2325 2326 /* Reset the device before pci_set_master() in case device is in some 2327 * funky state and has an interrupt pending - hint: we don't have the 2328 * interrupt handler registered yet. */ 2329 e100_hw_reset(nic); 2330 2331 pci_set_master(pdev); 2332 2333 init_timer(&nic->watchdog); 2334 nic->watchdog.function = e100_watchdog; 2335 nic->watchdog.data = (unsigned long)nic; 2336 init_timer(&nic->blink_timer); 2337 nic->blink_timer.function = e100_blink_led; 2338 nic->blink_timer.data = (unsigned long)nic; 2339 2340 INIT_WORK(&nic->tx_timeout_task, 2341 (void (*)(void *))e100_tx_timeout_task, netdev); 2342 2343 if((err = e100_alloc(nic))) { 2344 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n"); 2345 goto err_out_iounmap; 2346 } 2347 2348 if((err = e100_eeprom_load(nic))) 2349 goto err_out_free; 2350 2351 e100_phy_init(nic); 2352 2353 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN); 2354 if(!is_valid_ether_addr(netdev->dev_addr)) { 2355 DPRINTK(PROBE, ERR, "Invalid MAC address from " 2356 "EEPROM, aborting.\n"); 2357 err = -EAGAIN; 2358 goto err_out_free; 2359 } 2360 2361 /* Wol magic packet can be enabled from eeprom */ 2362 if((nic->mac >= mac_82558_D101_A4) && 2363 (nic->eeprom[eeprom_id] & eeprom_id_wol)) 2364 nic->flags |= wol_magic; 2365 2366 /* ack any pending wake events, disable PME */ 2367 pci_enable_wake(pdev, 0, 0); 2368 2369 strcpy(netdev->name, "eth%d"); 2370 if((err = register_netdev(netdev))) { 2371 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n"); 2372 goto err_out_free; 2373 } 2374 2375 DPRINTK(PROBE, INFO, "addr 0x%lx, irq %d, " 2376 "MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n", 2377 pci_resource_start(pdev, 0), pdev->irq, 2378 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], 2379 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); 2380 2381 return 0; 2382 2383err_out_free: 2384 e100_free(nic); 2385err_out_iounmap: 2386 iounmap(nic->csr); 2387err_out_free_res: 2388 pci_release_regions(pdev); 2389err_out_disable_pdev: 2390 pci_disable_device(pdev); 2391err_out_free_dev: 2392 pci_set_drvdata(pdev, NULL); 2393 free_netdev(netdev); 2394 return err; 2395} 2396 2397static void __devexit e100_remove(struct pci_dev *pdev) 2398{ 2399 struct net_device *netdev = pci_get_drvdata(pdev); 2400 2401 if(netdev) { 2402 struct nic *nic = netdev_priv(netdev); 2403 unregister_netdev(netdev); 2404 e100_free(nic); 2405 iounmap(nic->csr); 2406 free_netdev(netdev); 2407 pci_release_regions(pdev); 2408 pci_disable_device(pdev); 2409 pci_set_drvdata(pdev, NULL); 2410 } 2411} 2412 2413#ifdef CONFIG_PM 2414static int e100_suspend(struct pci_dev *pdev, pm_message_t state) 2415{ 2416 struct net_device *netdev = pci_get_drvdata(pdev); 2417 struct nic *nic = netdev_priv(netdev); 2418 2419 if(netif_running(netdev)) 2420 e100_down(nic); 2421 e100_hw_reset(nic); 2422 netif_device_detach(netdev); 2423 2424 pci_save_state(pdev); 2425 pci_enable_wake(pdev, pci_choose_state(pdev, state), nic->flags & (wol_magic | e100_asf(nic))); 2426 pci_disable_device(pdev); 2427 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 2428 2429 return 0; 2430} 2431 2432static int e100_resume(struct pci_dev *pdev) 2433{ 2434 struct net_device *netdev = pci_get_drvdata(pdev); 2435 struct nic *nic = netdev_priv(netdev); 2436 2437 pci_set_power_state(pdev, PCI_D0); 2438 pci_restore_state(pdev); 2439 /* ack any pending wake events, disable PME */ 2440 pci_enable_wake(pdev, 0, 0); 2441 if(e100_hw_init(nic)) 2442 DPRINTK(HW, ERR, "e100_hw_init failed\n"); 2443 2444 netif_device_attach(netdev); 2445 if(netif_running(netdev)) 2446 e100_up(nic); 2447 2448 return 0; 2449} 2450#endif 2451 2452 2453static void e100_shutdown(struct pci_dev *pdev) 2454{ 2455 struct net_device *netdev = pci_get_drvdata(pdev); 2456 struct nic *nic = netdev_priv(netdev); 2457 2458#ifdef CONFIG_PM 2459 pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); 2460#else 2461 pci_enable_wake(pdev, 0, nic->flags & (wol_magic)); 2462#endif 2463} 2464 2465 2466static struct pci_driver e100_driver = { 2467 .name = DRV_NAME, 2468 .id_table = e100_id_table, 2469 .probe = e100_probe, 2470 .remove = __devexit_p(e100_remove), 2471#ifdef CONFIG_PM 2472 .suspend = e100_suspend, 2473 .resume = e100_resume, 2474#endif 2475 .shutdown = e100_shutdown, 2476}; 2477 2478static int __init e100_init_module(void) 2479{ 2480 if(((1 << debug) - 1) & NETIF_MSG_DRV) { 2481 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 2482 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT); 2483 } 2484 return pci_module_init(&e100_driver); 2485} 2486 2487static void __exit e100_cleanup_module(void) 2488{ 2489 pci_unregister_driver(&e100_driver); 2490} 2491 2492module_init(e100_init_module); 2493module_exit(e100_cleanup_module);