Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.14-rc7 4751 lines 142 kB view raw
1/* Agere Systems Inc. 2 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs 3 * 4 * Copyright © 2005 Agere Systems Inc. 5 * All rights reserved. 6 * http://www.agere.com 7 * 8 * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com> 9 * 10 *------------------------------------------------------------------------------ 11 * 12 * SOFTWARE LICENSE 13 * 14 * This software is provided subject to the following terms and conditions, 15 * which you should read carefully before using the software. Using this 16 * software indicates your acceptance of these terms and conditions. If you do 17 * not agree with these terms and conditions, do not use the software. 18 * 19 * Copyright © 2005 Agere Systems Inc. 20 * All rights reserved. 21 * 22 * Redistribution and use in source or binary forms, with or without 23 * modifications, are permitted provided that the following conditions are met: 24 * 25 * . Redistributions of source code must retain the above copyright notice, this 26 * list of conditions and the following Disclaimer as comments in the code as 27 * well as in the documentation and/or other materials provided with the 28 * distribution. 29 * 30 * . Redistributions in binary form must reproduce the above copyright notice, 31 * this list of conditions and the following Disclaimer in the documentation 32 * and/or other materials provided with the distribution. 33 * 34 * . Neither the name of Agere Systems Inc. nor the names of the contributors 35 * may be used to endorse or promote products derived from this software 36 * without specific prior written permission. 37 * 38 * Disclaimer 39 * 40 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 41 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF 42 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY 43 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN 44 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY 45 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 47 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 48 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT 49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 50 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 51 * DAMAGE. 52 */ 53 54#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 55 56#include <linux/pci.h> 57#include <linux/module.h> 58#include <linux/types.h> 59#include <linux/kernel.h> 60 61#include <linux/sched.h> 62#include <linux/ptrace.h> 63#include <linux/slab.h> 64#include <linux/ctype.h> 65#include <linux/string.h> 66#include <linux/timer.h> 67#include <linux/interrupt.h> 68#include <linux/in.h> 69#include <linux/delay.h> 70#include <linux/bitops.h> 71#include <linux/io.h> 72 73#include <linux/netdevice.h> 74#include <linux/etherdevice.h> 75#include <linux/skbuff.h> 76#include <linux/if_arp.h> 77#include <linux/ioport.h> 78#include <linux/crc32.h> 79#include <linux/random.h> 80#include <linux/phy.h> 81 82#include "et131x.h" 83 84MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>"); 85MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>"); 86MODULE_LICENSE("Dual BSD/GPL"); 87MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems"); 88 89/* EEPROM defines */ 90#define MAX_NUM_REGISTER_POLLS 1000 91#define MAX_NUM_WRITE_RETRIES 2 92 93/* MAC defines */ 94#define COUNTER_WRAP_16_BIT 0x10000 95#define COUNTER_WRAP_12_BIT 0x1000 96 97/* PCI defines */ 98#define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */ 99#define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */ 100 101/* ISR defines */ 102/* For interrupts, normal running is: 103 * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt, 104 * watchdog_interrupt & txdma_xfer_done 105 * 106 * In both cases, when flow control is enabled for either Tx or bi-direction, 107 * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the 108 * buffer rings are running low. 109 */ 110#define INT_MASK_DISABLE 0xffffffff 111 112/* NOTE: Masking out MAC_STAT Interrupt for now... 113 * #define INT_MASK_ENABLE 0xfff6bf17 114 * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7 115 */ 116#define INT_MASK_ENABLE 0xfffebf17 117#define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7 118 119/* General defines */ 120/* Packet and header sizes */ 121#define NIC_MIN_PACKET_SIZE 60 122 123/* Multicast list size */ 124#define NIC_MAX_MCAST_LIST 128 125 126/* Supported Filters */ 127#define ET131X_PACKET_TYPE_DIRECTED 0x0001 128#define ET131X_PACKET_TYPE_MULTICAST 0x0002 129#define ET131X_PACKET_TYPE_BROADCAST 0x0004 130#define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008 131#define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010 132 133/* Tx Timeout */ 134#define ET131X_TX_TIMEOUT (1 * HZ) 135#define NIC_SEND_HANG_THRESHOLD 0 136 137/* MP_TCB flags */ 138#define FMP_DEST_MULTI 0x00000001 139#define FMP_DEST_BROAD 0x00000002 140 141/* MP_ADAPTER flags */ 142#define FMP_ADAPTER_INTERRUPT_IN_USE 0x00000008 143 144/* MP_SHARED flags */ 145#define FMP_ADAPTER_LOWER_POWER 0x00200000 146 147#define FMP_ADAPTER_NON_RECOVER_ERROR 0x00800000 148#define FMP_ADAPTER_HARDWARE_ERROR 0x04000000 149 150#define FMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000 151 152/* Some offsets in PCI config space that are actually used. */ 153#define ET1310_PCI_MAC_ADDRESS 0xA4 154#define ET1310_PCI_EEPROM_STATUS 0xB2 155#define ET1310_PCI_ACK_NACK 0xC0 156#define ET1310_PCI_REPLAY 0xC2 157#define ET1310_PCI_L0L1LATENCY 0xCF 158 159/* PCI Product IDs */ 160#define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */ 161#define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */ 162 163/* Define order of magnitude converter */ 164#define NANO_IN_A_MICRO 1000 165 166#define PARM_RX_NUM_BUFS_DEF 4 167#define PARM_RX_TIME_INT_DEF 10 168#define PARM_RX_MEM_END_DEF 0x2bc 169#define PARM_TX_TIME_INT_DEF 40 170#define PARM_TX_NUM_BUFS_DEF 4 171#define PARM_DMA_CACHE_DEF 0 172 173/* RX defines */ 174#define FBR_CHUNKS 32 175#define MAX_DESC_PER_RING_RX 1024 176 177/* number of RFDs - default and min */ 178#define RFD_LOW_WATER_MARK 40 179#define NIC_DEFAULT_NUM_RFD 1024 180#define NUM_FBRS 2 181 182#define NUM_PACKETS_HANDLED 256 183 184#define ALCATEL_MULTICAST_PKT 0x01000000 185#define ALCATEL_BROADCAST_PKT 0x02000000 186 187/* typedefs for Free Buffer Descriptors */ 188struct fbr_desc { 189 u32 addr_lo; 190 u32 addr_hi; 191 u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */ 192}; 193 194/* Packet Status Ring Descriptors 195 * 196 * Word 0: 197 * 198 * top 16 bits are from the Alcatel Status Word as enumerated in 199 * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2) 200 * 201 * 0: hp hash pass 202 * 1: ipa IP checksum assist 203 * 2: ipp IP checksum pass 204 * 3: tcpa TCP checksum assist 205 * 4: tcpp TCP checksum pass 206 * 5: wol WOL Event 207 * 6: rxmac_error RXMAC Error Indicator 208 * 7: drop Drop packet 209 * 8: ft Frame Truncated 210 * 9: jp Jumbo Packet 211 * 10: vp VLAN Packet 212 * 11-15: unused 213 * 16: asw_prev_pkt_dropped e.g. IFG too small on previous 214 * 17: asw_RX_DV_event short receive event detected 215 * 18: asw_false_carrier_event bad carrier since last good packet 216 * 19: asw_code_err one or more nibbles signalled as errors 217 * 20: asw_CRC_err CRC error 218 * 21: asw_len_chk_err frame length field incorrect 219 * 22: asw_too_long frame length > 1518 bytes 220 * 23: asw_OK valid CRC + no code error 221 * 24: asw_multicast has a multicast address 222 * 25: asw_broadcast has a broadcast address 223 * 26: asw_dribble_nibble spurious bits after EOP 224 * 27: asw_control_frame is a control frame 225 * 28: asw_pause_frame is a pause frame 226 * 29: asw_unsupported_op unsupported OP code 227 * 30: asw_VLAN_tag VLAN tag detected 228 * 31: asw_long_evt Rx long event 229 * 230 * Word 1: 231 * 0-15: length length in bytes 232 * 16-25: bi Buffer Index 233 * 26-27: ri Ring Index 234 * 28-31: reserved 235 */ 236 237struct pkt_stat_desc { 238 u32 word0; 239 u32 word1; 240}; 241 242/* Typedefs for the RX DMA status word */ 243 244/* rx status word 0 holds part of the status bits of the Rx DMA engine 245 * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word 246 * which contains the Free Buffer ring 0 and 1 available offset. 247 * 248 * bit 0-9 FBR1 offset 249 * bit 10 Wrap flag for FBR1 250 * bit 16-25 FBR0 offset 251 * bit 26 Wrap flag for FBR0 252 */ 253 254/* RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine 255 * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word 256 * which contains the Packet Status Ring available offset. 257 * 258 * bit 0-15 reserved 259 * bit 16-27 PSRoffset 260 * bit 28 PSRwrap 261 * bit 29-31 unused 262 */ 263 264/* struct rx_status_block is a structure representing the status of the Rx 265 * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020 266 */ 267struct rx_status_block { 268 u32 word0; 269 u32 word1; 270}; 271 272/* Structure for look-up table holding free buffer ring pointers, addresses 273 * and state. 274 */ 275struct fbr_lookup { 276 void *virt[MAX_DESC_PER_RING_RX]; 277 u32 bus_high[MAX_DESC_PER_RING_RX]; 278 u32 bus_low[MAX_DESC_PER_RING_RX]; 279 void *ring_virtaddr; 280 dma_addr_t ring_physaddr; 281 void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; 282 dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; 283 u32 local_full; 284 u32 num_entries; 285 dma_addr_t buffsize; 286}; 287 288/* struct rx_ring is the sructure representing the adaptor's local 289 * reference(s) to the rings 290 */ 291struct rx_ring { 292 struct fbr_lookup *fbr[NUM_FBRS]; 293 void *ps_ring_virtaddr; 294 dma_addr_t ps_ring_physaddr; 295 u32 local_psr_full; 296 u32 psr_num_entries; 297 298 struct rx_status_block *rx_status_block; 299 dma_addr_t rx_status_bus; 300 301 /* RECV */ 302 struct list_head recv_list; 303 u32 num_ready_recv; 304 305 u32 num_rfd; 306 307 bool unfinished_receives; 308}; 309 310/* TX defines */ 311/* word 2 of the control bits in the Tx Descriptor ring for the ET-1310 312 * 313 * 0-15: length of packet 314 * 16-27: VLAN tag 315 * 28: VLAN CFI 316 * 29-31: VLAN priority 317 * 318 * word 3 of the control bits in the Tx Descriptor ring for the ET-1310 319 * 320 * 0: last packet in the sequence 321 * 1: first packet in the sequence 322 * 2: interrupt the processor when this pkt sent 323 * 3: Control word - no packet data 324 * 4: Issue half-duplex backpressure : XON/XOFF 325 * 5: send pause frame 326 * 6: Tx frame has error 327 * 7: append CRC 328 * 8: MAC override 329 * 9: pad packet 330 * 10: Packet is a Huge packet 331 * 11: append VLAN tag 332 * 12: IP checksum assist 333 * 13: TCP checksum assist 334 * 14: UDP checksum assist 335 */ 336 337#define TXDESC_FLAG_LASTPKT 0x0001 338#define TXDESC_FLAG_FIRSTPKT 0x0002 339#define TXDESC_FLAG_INTPROC 0x0004 340 341/* struct tx_desc represents each descriptor on the ring */ 342struct tx_desc { 343 u32 addr_hi; 344 u32 addr_lo; 345 u32 len_vlan; /* control words how to xmit the */ 346 u32 flags; /* data (detailed above) */ 347}; 348 349/* The status of the Tx DMA engine it sits in free memory, and is pointed to 350 * by 0x101c / 0x1020. This is a DMA10 type 351 */ 352 353/* TCB (Transmit Control Block: Host Side) */ 354struct tcb { 355 struct tcb *next; /* Next entry in ring */ 356 u32 flags; /* Our flags for the packet */ 357 u32 count; /* Used to spot stuck/lost packets */ 358 u32 stale; /* Used to spot stuck/lost packets */ 359 struct sk_buff *skb; /* Network skb we are tied to */ 360 u32 index; /* Ring indexes */ 361 u32 index_start; 362}; 363 364/* Structure representing our local reference(s) to the ring */ 365struct tx_ring { 366 /* TCB (Transmit Control Block) memory and lists */ 367 struct tcb *tcb_ring; 368 369 /* List of TCBs that are ready to be used */ 370 struct tcb *tcb_qhead; 371 struct tcb *tcb_qtail; 372 373 /* list of TCBs that are currently being sent. NOTE that access to all 374 * three of these (including used) are controlled via the 375 * TCBSendQLock. This lock should be secured prior to incementing / 376 * decrementing used, or any queue manipulation on send_head / 377 * tail 378 */ 379 struct tcb *send_head; 380 struct tcb *send_tail; 381 int used; 382 383 /* The actual descriptor ring */ 384 struct tx_desc *tx_desc_ring; 385 dma_addr_t tx_desc_ring_pa; 386 387 /* send_idx indicates where we last wrote to in the descriptor ring. */ 388 u32 send_idx; 389 390 /* The location of the write-back status block */ 391 u32 *tx_status; 392 dma_addr_t tx_status_pa; 393 394 /* Packets since the last IRQ: used for interrupt coalescing */ 395 int since_irq; 396}; 397 398/* Do not change these values: if changed, then change also in respective 399 * TXdma and Rxdma engines 400 */ 401#define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */ 402#define NUM_TCB 64 403 404/* These values are all superseded by registry entries to facilitate tuning. 405 * Once the desired performance has been achieved, the optimal registry values 406 * should be re-populated to these #defines: 407 */ 408#define TX_ERROR_PERIOD 1000 409 410#define LO_MARK_PERCENT_FOR_PSR 15 411#define LO_MARK_PERCENT_FOR_RX 15 412 413/* RFD (Receive Frame Descriptor) */ 414struct rfd { 415 struct list_head list_node; 416 struct sk_buff *skb; 417 u32 len; /* total size of receive frame */ 418 u16 bufferindex; 419 u8 ringindex; 420}; 421 422/* Flow Control */ 423#define FLOW_BOTH 0 424#define FLOW_TXONLY 1 425#define FLOW_RXONLY 2 426#define FLOW_NONE 3 427 428/* Struct to define some device statistics */ 429struct ce_stats { 430 /* MIB II variables 431 * 432 * NOTE: atomic_t types are only guaranteed to store 24-bits; if we 433 * MUST have 32, then we'll need another way to perform atomic 434 * operations 435 */ 436 u32 unicast_pkts_rcvd; 437 atomic_t unicast_pkts_xmtd; 438 u32 multicast_pkts_rcvd; 439 atomic_t multicast_pkts_xmtd; 440 u32 broadcast_pkts_rcvd; 441 atomic_t broadcast_pkts_xmtd; 442 u32 rcvd_pkts_dropped; 443 444 /* Tx Statistics. */ 445 u32 tx_underflows; 446 447 u32 tx_collisions; 448 u32 tx_excessive_collisions; 449 u32 tx_first_collisions; 450 u32 tx_late_collisions; 451 u32 tx_max_pkt_errs; 452 u32 tx_deferred; 453 454 /* Rx Statistics. */ 455 u32 rx_overflows; 456 457 u32 rx_length_errs; 458 u32 rx_align_errs; 459 u32 rx_crc_errs; 460 u32 rx_code_violations; 461 u32 rx_other_errs; 462 463 u32 synchronous_iterations; 464 u32 interrupt_status; 465}; 466 467/* The private adapter structure */ 468struct et131x_adapter { 469 struct net_device *netdev; 470 struct pci_dev *pdev; 471 struct mii_bus *mii_bus; 472 struct phy_device *phydev; 473 struct work_struct task; 474 475 /* Flags that indicate current state of the adapter */ 476 u32 flags; 477 478 /* local link state, to determine if a state change has occurred */ 479 int link; 480 481 /* Configuration */ 482 u8 rom_addr[ETH_ALEN]; 483 u8 addr[ETH_ALEN]; 484 bool has_eeprom; 485 u8 eeprom_data[2]; 486 487 /* Spinlocks */ 488 spinlock_t lock; 489 490 spinlock_t tcb_send_qlock; 491 spinlock_t tcb_ready_qlock; 492 spinlock_t send_hw_lock; 493 494 spinlock_t rcv_lock; 495 spinlock_t fbr_lock; 496 497 /* Packet Filter and look ahead size */ 498 u32 packet_filter; 499 500 /* multicast list */ 501 u32 multicast_addr_count; 502 u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN]; 503 504 /* Pointer to the device's PCI register space */ 505 struct address_map __iomem *regs; 506 507 /* Registry parameters */ 508 u8 wanted_flow; /* Flow we want for 802.3x flow control */ 509 u32 registry_jumbo_packet; /* Max supported ethernet packet size */ 510 511 /* Derived from the registry: */ 512 u8 flowcontrol; /* flow control validated by the far-end */ 513 514 /* Minimize init-time */ 515 struct timer_list error_timer; 516 517 /* variable putting the phy into coma mode when boot up with no cable 518 * plugged in after 5 seconds 519 */ 520 u8 boot_coma; 521 522 /* Next two used to save power information at power down. This 523 * information will be used during power up to set up parts of Power 524 * Management in JAGCore 525 */ 526 u16 pdown_speed; 527 u8 pdown_duplex; 528 529 /* Tx Memory Variables */ 530 struct tx_ring tx_ring; 531 532 /* Rx Memory Variables */ 533 struct rx_ring rx_ring; 534 535 /* Stats */ 536 struct ce_stats stats; 537 538 struct net_device_stats net_stats; 539}; 540 541static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status) 542{ 543 u32 reg; 544 int i; 545 546 /* 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and 547 * bits 7,1:0 both equal to 1, at least once after reset. 548 * Subsequent operations need only to check that bits 1:0 are equal 549 * to 1 prior to starting a single byte read/write 550 */ 551 552 for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) { 553 /* Read registers grouped in DWORD1 */ 554 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg)) 555 return -EIO; 556 557 /* I2C idle and Phy Queue Avail both true */ 558 if ((reg & 0x3000) == 0x3000) { 559 if (status) 560 *status = reg; 561 return reg & 0xFF; 562 } 563 } 564 return -ETIMEDOUT; 565} 566 567/* eeprom_write - Write a byte to the ET1310's EEPROM 568 * @adapter: pointer to our private adapter structure 569 * @addr: the address to write 570 * @data: the value to write 571 * 572 * Returns 1 for a successful write. 573 */ 574static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data) 575{ 576 struct pci_dev *pdev = adapter->pdev; 577 int index = 0; 578 int retries; 579 int err = 0; 580 int i2c_wack = 0; 581 int writeok = 0; 582 u32 status; 583 u32 val = 0; 584 585 /* For an EEPROM, an I2C single byte write is defined as a START 586 * condition followed by the device address, EEPROM address, one byte 587 * of data and a STOP condition. The STOP condition will trigger the 588 * EEPROM's internally timed write cycle to the nonvolatile memory. 589 * All inputs are disabled during this write cycle and the EEPROM will 590 * not respond to any access until the internal write is complete. 591 */ 592 593 err = eeprom_wait_ready(pdev, NULL); 594 if (err < 0) 595 return err; 596 597 /* 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0, 598 * and bits 1:0 both =0. Bit 5 should be set according to the 599 * type of EEPROM being accessed (1=two byte addressing, 0=one 600 * byte addressing). 601 */ 602 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, 603 LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE)) 604 return -EIO; 605 606 i2c_wack = 1; 607 608 /* Prepare EEPROM address for Step 3 */ 609 610 for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) { 611 /* Write the address to the LBCIF Address Register */ 612 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) 613 break; 614 /* Write the data to the LBCIF Data Register (the I2C write 615 * will begin). 616 */ 617 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data)) 618 break; 619 /* Monitor bit 1:0 of the LBCIF Status Register. When bits 620 * 1:0 are both equal to 1, the I2C write has completed and the 621 * internal write cycle of the EEPROM is about to start. 622 * (bits 1:0 = 01 is a legal state while waiting from both 623 * equal to 1, but bits 1:0 = 10 is invalid and implies that 624 * something is broken). 625 */ 626 err = eeprom_wait_ready(pdev, &status); 627 if (err < 0) 628 return 0; 629 630 /* Check bit 3 of the LBCIF Status Register. If equal to 1, 631 * an error has occurred.Don't break here if we are revision 632 * 1, this is so we do a blind write for load bug. 633 */ 634 if ((status & LBCIF_STATUS_GENERAL_ERROR) 635 && adapter->pdev->revision == 0) 636 break; 637 638 /* Check bit 2 of the LBCIF Status Register. If equal to 1 an 639 * ACK error has occurred on the address phase of the write. 640 * This could be due to an actual hardware failure or the 641 * EEPROM may still be in its internal write cycle from a 642 * previous write. This write operation was ignored and must be 643 *repeated later. 644 */ 645 if (status & LBCIF_STATUS_ACK_ERROR) { 646 /* This could be due to an actual hardware failure 647 * or the EEPROM may still be in its internal write 648 * cycle from a previous write. This write operation 649 * was ignored and must be repeated later. 650 */ 651 udelay(10); 652 continue; 653 } 654 655 writeok = 1; 656 break; 657 } 658 659 /* Set bit 6 of the LBCIF Control Register = 0. 660 */ 661 udelay(10); 662 663 while (i2c_wack) { 664 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, 665 LBCIF_CONTROL_LBCIF_ENABLE)) 666 writeok = 0; 667 668 /* Do read until internal ACK_ERROR goes away meaning write 669 * completed 670 */ 671 do { 672 pci_write_config_dword(pdev, 673 LBCIF_ADDRESS_REGISTER, 674 addr); 675 do { 676 pci_read_config_dword(pdev, 677 LBCIF_DATA_REGISTER, &val); 678 } while ((val & 0x00010000) == 0); 679 } while (val & 0x00040000); 680 681 if ((val & 0xFF00) != 0xC000 || index == 10000) 682 break; 683 index++; 684 } 685 return writeok ? 0 : -EIO; 686} 687 688/* eeprom_read - Read a byte from the ET1310's EEPROM 689 * @adapter: pointer to our private adapter structure 690 * @addr: the address from which to read 691 * @pdata: a pointer to a byte in which to store the value of the read 692 * @eeprom_id: the ID of the EEPROM 693 * @addrmode: how the EEPROM is to be accessed 694 * 695 * Returns 1 for a successful read 696 */ 697static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata) 698{ 699 struct pci_dev *pdev = adapter->pdev; 700 int err; 701 u32 status; 702 703 /* A single byte read is similar to the single byte write, with the 704 * exception of the data flow: 705 */ 706 707 err = eeprom_wait_ready(pdev, NULL); 708 if (err < 0) 709 return err; 710 /* Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0, 711 * and bits 1:0 both =0. Bit 5 should be set according to the type 712 * of EEPROM being accessed (1=two byte addressing, 0=one byte 713 * addressing). 714 */ 715 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, 716 LBCIF_CONTROL_LBCIF_ENABLE)) 717 return -EIO; 718 /* Write the address to the LBCIF Address Register (I2C read will 719 * begin). 720 */ 721 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) 722 return -EIO; 723 /* Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read 724 * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure 725 * has occurred). 726 */ 727 err = eeprom_wait_ready(pdev, &status); 728 if (err < 0) 729 return err; 730 /* Regardless of error status, read data byte from LBCIF Data 731 * Register. 732 */ 733 *pdata = err; 734 /* Check bit 2 of the LBCIF Status Register. If = 1, 735 * then an error has occurred. 736 */ 737 return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0; 738} 739 740static int et131x_init_eeprom(struct et131x_adapter *adapter) 741{ 742 struct pci_dev *pdev = adapter->pdev; 743 u8 eestatus; 744 745 /* We first need to check the EEPROM Status code located at offset 746 * 0xB2 of config space 747 */ 748 pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus); 749 750 /* THIS IS A WORKAROUND: 751 * I need to call this function twice to get my card in a 752 * LG M1 Express Dual running. I tried also a msleep before this 753 * function, because I thought there could be some time conditions 754 * but it didn't work. Call the whole function twice also work. 755 */ 756 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) { 757 dev_err(&pdev->dev, 758 "Could not read PCI config space for EEPROM Status\n"); 759 return -EIO; 760 } 761 762 /* Determine if the error(s) we care about are present. If they are 763 * present we need to fail. 764 */ 765 if (eestatus & 0x4C) { 766 int write_failed = 0; 767 if (pdev->revision == 0x01) { 768 int i; 769 static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF }; 770 771 /* Re-write the first 4 bytes if we have an eeprom 772 * present and the revision id is 1, this fixes the 773 * corruption seen with 1310 B Silicon 774 */ 775 for (i = 0; i < 3; i++) 776 if (eeprom_write(adapter, i, eedata[i]) < 0) 777 write_failed = 1; 778 } 779 if (pdev->revision != 0x01 || write_failed) { 780 dev_err(&pdev->dev, 781 "Fatal EEPROM Status Error - 0x%04x\n", eestatus); 782 783 /* This error could mean that there was an error 784 * reading the eeprom or that the eeprom doesn't exist. 785 * We will treat each case the same and not try to 786 * gather additional information that normally would 787 * come from the eeprom, like MAC Address 788 */ 789 adapter->has_eeprom = 0; 790 return -EIO; 791 } 792 } 793 adapter->has_eeprom = 1; 794 795 /* Read the EEPROM for information regarding LED behavior. Refer to 796 * ET1310_phy.c, et131x_xcvr_init(), for its use. 797 */ 798 eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]); 799 eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]); 800 801 if (adapter->eeprom_data[0] != 0xcd) 802 /* Disable all optional features */ 803 adapter->eeprom_data[1] = 0x00; 804 805 return 0; 806} 807 808/* et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310. 809 * @adapter: pointer to our adapter structure 810 */ 811static void et131x_rx_dma_enable(struct et131x_adapter *adapter) 812{ 813 /* Setup the receive dma configuration register for normal operation */ 814 u32 csr = ET_RXDMA_CSR_FBR1_ENABLE; 815 struct rx_ring *rx_ring = &adapter->rx_ring; 816 817 if (rx_ring->fbr[1]->buffsize == 4096) 818 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO; 819 else if (rx_ring->fbr[1]->buffsize == 8192) 820 csr |= ET_RXDMA_CSR_FBR1_SIZE_HI; 821 else if (rx_ring->fbr[1]->buffsize == 16384) 822 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO | ET_RXDMA_CSR_FBR1_SIZE_HI; 823 824 csr |= ET_RXDMA_CSR_FBR0_ENABLE; 825 if (rx_ring->fbr[0]->buffsize == 256) 826 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO; 827 else if (rx_ring->fbr[0]->buffsize == 512) 828 csr |= ET_RXDMA_CSR_FBR0_SIZE_HI; 829 else if (rx_ring->fbr[0]->buffsize == 1024) 830 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO | ET_RXDMA_CSR_FBR0_SIZE_HI; 831 writel(csr, &adapter->regs->rxdma.csr); 832 833 csr = readl(&adapter->regs->rxdma.csr); 834 if (csr & ET_RXDMA_CSR_HALT_STATUS) { 835 udelay(5); 836 csr = readl(&adapter->regs->rxdma.csr); 837 if (csr & ET_RXDMA_CSR_HALT_STATUS) { 838 dev_err(&adapter->pdev->dev, 839 "RX Dma failed to exit halt state. CSR 0x%08x\n", 840 csr); 841 } 842 } 843} 844 845/* et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310 846 * @adapter: pointer to our adapter structure 847 */ 848static void et131x_rx_dma_disable(struct et131x_adapter *adapter) 849{ 850 u32 csr; 851 /* Setup the receive dma configuration register */ 852 writel(ET_RXDMA_CSR_HALT | ET_RXDMA_CSR_FBR1_ENABLE, 853 &adapter->regs->rxdma.csr); 854 csr = readl(&adapter->regs->rxdma.csr); 855 if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) { 856 udelay(5); 857 csr = readl(&adapter->regs->rxdma.csr); 858 if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) 859 dev_err(&adapter->pdev->dev, 860 "RX Dma failed to enter halt state. CSR 0x%08x\n", 861 csr); 862 } 863} 864 865/* et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310. 866 * @adapter: pointer to our adapter structure 867 * 868 * Mainly used after a return to the D0 (full-power) state from a lower state. 869 */ 870static void et131x_tx_dma_enable(struct et131x_adapter *adapter) 871{ 872 /* Setup the transmit dma configuration register for normal 873 * operation 874 */ 875 writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT), 876 &adapter->regs->txdma.csr); 877} 878 879static inline void add_10bit(u32 *v, int n) 880{ 881 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP); 882} 883 884static inline void add_12bit(u32 *v, int n) 885{ 886 *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP); 887} 888 889/* et1310_config_mac_regs1 - Initialize the first part of MAC regs 890 * @adapter: pointer to our adapter structure 891 */ 892static void et1310_config_mac_regs1(struct et131x_adapter *adapter) 893{ 894 struct mac_regs __iomem *macregs = &adapter->regs->mac; 895 u32 station1; 896 u32 station2; 897 u32 ipg; 898 899 /* First we need to reset everything. Write to MAC configuration 900 * register 1 to perform reset. 901 */ 902 writel(ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET | 903 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC | 904 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC, 905 &macregs->cfg1); 906 907 /* Next lets configure the MAC Inter-packet gap register */ 908 ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */ 909 ipg |= 0x50 << 8; /* ifg enforce 0x50 */ 910 writel(ipg, &macregs->ipg); 911 912 /* Next lets configure the MAC Half Duplex register */ 913 /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */ 914 writel(0x00A1F037, &macregs->hfdp); 915 916 /* Next lets configure the MAC Interface Control register */ 917 writel(0, &macregs->if_ctrl); 918 919 /* Let's move on to setting up the mii management configuration */ 920 writel(ET_MAC_MIIMGMT_CLK_RST, &macregs->mii_mgmt_cfg); 921 922 /* Next lets configure the MAC Station Address register. These 923 * values are read from the EEPROM during initialization and stored 924 * in the adapter structure. We write what is stored in the adapter 925 * structure to the MAC Station Address registers high and low. This 926 * station address is used for generating and checking pause control 927 * packets. 928 */ 929 station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) | 930 (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT); 931 station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) | 932 (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) | 933 (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) | 934 adapter->addr[2]; 935 writel(station1, &macregs->station_addr_1); 936 writel(station2, &macregs->station_addr_2); 937 938 /* Max ethernet packet in bytes that will be passed by the mac without 939 * being truncated. Allow the MAC to pass 4 more than our max packet 940 * size. This is 4 for the Ethernet CRC. 941 * 942 * Packets larger than (registry_jumbo_packet) that do not contain a 943 * VLAN ID will be dropped by the Rx function. 944 */ 945 writel(adapter->registry_jumbo_packet + 4, &macregs->max_fm_len); 946 947 /* clear out MAC config reset */ 948 writel(0, &macregs->cfg1); 949} 950 951/* et1310_config_mac_regs2 - Initialize the second part of MAC regs 952 * @adapter: pointer to our adapter structure 953 */ 954static void et1310_config_mac_regs2(struct et131x_adapter *adapter) 955{ 956 int32_t delay = 0; 957 struct mac_regs __iomem *mac = &adapter->regs->mac; 958 struct phy_device *phydev = adapter->phydev; 959 u32 cfg1; 960 u32 cfg2; 961 u32 ifctrl; 962 u32 ctl; 963 964 ctl = readl(&adapter->regs->txmac.ctl); 965 cfg1 = readl(&mac->cfg1); 966 cfg2 = readl(&mac->cfg2); 967 ifctrl = readl(&mac->if_ctrl); 968 969 /* Set up the if mode bits */ 970 cfg2 &= ~ET_MAC_CFG2_IFMODE_MASK; 971 if (phydev->speed == SPEED_1000) { 972 cfg2 |= ET_MAC_CFG2_IFMODE_1000; 973 /* Phy mode bit */ 974 ifctrl &= ~ET_MAC_IFCTRL_PHYMODE; 975 } else { 976 cfg2 |= ET_MAC_CFG2_IFMODE_100; 977 ifctrl |= ET_MAC_IFCTRL_PHYMODE; 978 } 979 980 /* We need to enable Rx/Tx */ 981 cfg1 |= ET_MAC_CFG1_RX_ENABLE | ET_MAC_CFG1_TX_ENABLE | 982 ET_MAC_CFG1_TX_FLOW; 983 /* Initialize loop back to off */ 984 cfg1 &= ~(ET_MAC_CFG1_LOOPBACK | ET_MAC_CFG1_RX_FLOW); 985 if (adapter->flowcontrol == FLOW_RXONLY || 986 adapter->flowcontrol == FLOW_BOTH) 987 cfg1 |= ET_MAC_CFG1_RX_FLOW; 988 writel(cfg1, &mac->cfg1); 989 990 /* Now we need to initialize the MAC Configuration 2 register */ 991 /* preamble 7, check length, huge frame off, pad crc, crc enable 992 * full duplex off 993 */ 994 cfg2 |= 0x7 << ET_MAC_CFG2_PREAMBLE_SHIFT; 995 cfg2 |= ET_MAC_CFG2_IFMODE_LEN_CHECK; 996 cfg2 |= ET_MAC_CFG2_IFMODE_PAD_CRC; 997 cfg2 |= ET_MAC_CFG2_IFMODE_CRC_ENABLE; 998 cfg2 &= ~ET_MAC_CFG2_IFMODE_HUGE_FRAME; 999 cfg2 &= ~ET_MAC_CFG2_IFMODE_FULL_DPLX; 1000 1001 /* Turn on duplex if needed */ 1002 if (phydev->duplex == DUPLEX_FULL) 1003 cfg2 |= ET_MAC_CFG2_IFMODE_FULL_DPLX; 1004 1005 ifctrl &= ~ET_MAC_IFCTRL_GHDMODE; 1006 if (phydev->duplex == DUPLEX_HALF) 1007 ifctrl |= ET_MAC_IFCTRL_GHDMODE; 1008 1009 writel(ifctrl, &mac->if_ctrl); 1010 writel(cfg2, &mac->cfg2); 1011 1012 do { 1013 udelay(10); 1014 delay++; 1015 cfg1 = readl(&mac->cfg1); 1016 } while ((cfg1 & ET_MAC_CFG1_WAIT) != ET_MAC_CFG1_WAIT && delay < 100); 1017 1018 if (delay == 100) { 1019 dev_warn(&adapter->pdev->dev, 1020 "Syncd bits did not respond correctly cfg1 word 0x%08x\n", 1021 cfg1); 1022 } 1023 1024 /* Enable txmac */ 1025 ctl |= ET_TX_CTRL_TXMAC_ENABLE | ET_TX_CTRL_FC_DISABLE; 1026 writel(ctl, &adapter->regs->txmac.ctl); 1027 1028 /* Ready to start the RXDMA/TXDMA engine */ 1029 if (adapter->flags & FMP_ADAPTER_LOWER_POWER) { 1030 et131x_rx_dma_enable(adapter); 1031 et131x_tx_dma_enable(adapter); 1032 } 1033} 1034 1035/* et1310_in_phy_coma - check if the device is in phy coma 1036 * @adapter: pointer to our adapter structure 1037 * 1038 * Returns 0 if the device is not in phy coma, 1 if it is in phy coma 1039 */ 1040static int et1310_in_phy_coma(struct et131x_adapter *adapter) 1041{ 1042 u32 pmcsr = readl(&adapter->regs->global.pm_csr); 1043 1044 return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0; 1045} 1046 1047static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter) 1048{ 1049 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; 1050 u32 hash1 = 0; 1051 u32 hash2 = 0; 1052 u32 hash3 = 0; 1053 u32 hash4 = 0; 1054 u32 pm_csr; 1055 1056 /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision 1057 * the multi-cast LIST. If it is NOT specified, (and "ALL" is not 1058 * specified) then we should pass NO multi-cast addresses to the 1059 * driver. 1060 */ 1061 if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) { 1062 int i; 1063 1064 /* Loop through our multicast array and set up the device */ 1065 for (i = 0; i < adapter->multicast_addr_count; i++) { 1066 u32 result; 1067 1068 result = ether_crc(6, adapter->multicast_list[i]); 1069 1070 result = (result & 0x3F800000) >> 23; 1071 1072 if (result < 32) { 1073 hash1 |= (1 << result); 1074 } else if ((31 < result) && (result < 64)) { 1075 result -= 32; 1076 hash2 |= (1 << result); 1077 } else if ((63 < result) && (result < 96)) { 1078 result -= 64; 1079 hash3 |= (1 << result); 1080 } else { 1081 result -= 96; 1082 hash4 |= (1 << result); 1083 } 1084 } 1085 } 1086 1087 /* Write out the new hash to the device */ 1088 pm_csr = readl(&adapter->regs->global.pm_csr); 1089 if (!et1310_in_phy_coma(adapter)) { 1090 writel(hash1, &rxmac->multi_hash1); 1091 writel(hash2, &rxmac->multi_hash2); 1092 writel(hash3, &rxmac->multi_hash3); 1093 writel(hash4, &rxmac->multi_hash4); 1094 } 1095} 1096 1097static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter) 1098{ 1099 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; 1100 u32 uni_pf1; 1101 u32 uni_pf2; 1102 u32 uni_pf3; 1103 u32 pm_csr; 1104 1105 /* Set up unicast packet filter reg 3 to be the first two octets of 1106 * the MAC address for both address 1107 * 1108 * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the 1109 * MAC address for second address 1110 * 1111 * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the 1112 * MAC address for first address 1113 */ 1114 uni_pf3 = (adapter->addr[0] << ET_RX_UNI_PF_ADDR2_1_SHIFT) | 1115 (adapter->addr[1] << ET_RX_UNI_PF_ADDR2_2_SHIFT) | 1116 (adapter->addr[0] << ET_RX_UNI_PF_ADDR1_1_SHIFT) | 1117 adapter->addr[1]; 1118 1119 uni_pf2 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR2_3_SHIFT) | 1120 (adapter->addr[3] << ET_RX_UNI_PF_ADDR2_4_SHIFT) | 1121 (adapter->addr[4] << ET_RX_UNI_PF_ADDR2_5_SHIFT) | 1122 adapter->addr[5]; 1123 1124 uni_pf1 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR1_3_SHIFT) | 1125 (adapter->addr[3] << ET_RX_UNI_PF_ADDR1_4_SHIFT) | 1126 (adapter->addr[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT) | 1127 adapter->addr[5]; 1128 1129 pm_csr = readl(&adapter->regs->global.pm_csr); 1130 if (!et1310_in_phy_coma(adapter)) { 1131 writel(uni_pf1, &rxmac->uni_pf_addr1); 1132 writel(uni_pf2, &rxmac->uni_pf_addr2); 1133 writel(uni_pf3, &rxmac->uni_pf_addr3); 1134 } 1135} 1136 1137static void et1310_config_rxmac_regs(struct et131x_adapter *adapter) 1138{ 1139 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; 1140 struct phy_device *phydev = adapter->phydev; 1141 u32 sa_lo; 1142 u32 sa_hi = 0; 1143 u32 pf_ctrl = 0; 1144 1145 /* Disable the MAC while it is being configured (also disable WOL) */ 1146 writel(0x8, &rxmac->ctrl); 1147 1148 /* Initialize WOL to disabled. */ 1149 writel(0, &rxmac->crc0); 1150 writel(0, &rxmac->crc12); 1151 writel(0, &rxmac->crc34); 1152 1153 /* We need to set the WOL mask0 - mask4 next. We initialize it to 1154 * its default Values of 0x00000000 because there are not WOL masks 1155 * as of this time. 1156 */ 1157 writel(0, &rxmac->mask0_word0); 1158 writel(0, &rxmac->mask0_word1); 1159 writel(0, &rxmac->mask0_word2); 1160 writel(0, &rxmac->mask0_word3); 1161 1162 writel(0, &rxmac->mask1_word0); 1163 writel(0, &rxmac->mask1_word1); 1164 writel(0, &rxmac->mask1_word2); 1165 writel(0, &rxmac->mask1_word3); 1166 1167 writel(0, &rxmac->mask2_word0); 1168 writel(0, &rxmac->mask2_word1); 1169 writel(0, &rxmac->mask2_word2); 1170 writel(0, &rxmac->mask2_word3); 1171 1172 writel(0, &rxmac->mask3_word0); 1173 writel(0, &rxmac->mask3_word1); 1174 writel(0, &rxmac->mask3_word2); 1175 writel(0, &rxmac->mask3_word3); 1176 1177 writel(0, &rxmac->mask4_word0); 1178 writel(0, &rxmac->mask4_word1); 1179 writel(0, &rxmac->mask4_word2); 1180 writel(0, &rxmac->mask4_word3); 1181 1182 /* Lets setup the WOL Source Address */ 1183 sa_lo = (adapter->addr[2] << ET_RX_WOL_LO_SA3_SHIFT) | 1184 (adapter->addr[3] << ET_RX_WOL_LO_SA4_SHIFT) | 1185 (adapter->addr[4] << ET_RX_WOL_LO_SA5_SHIFT) | 1186 adapter->addr[5]; 1187 writel(sa_lo, &rxmac->sa_lo); 1188 1189 sa_hi = (u32) (adapter->addr[0] << ET_RX_WOL_HI_SA1_SHIFT) | 1190 adapter->addr[1]; 1191 writel(sa_hi, &rxmac->sa_hi); 1192 1193 /* Disable all Packet Filtering */ 1194 writel(0, &rxmac->pf_ctrl); 1195 1196 /* Let's initialize the Unicast Packet filtering address */ 1197 if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) { 1198 et1310_setup_device_for_unicast(adapter); 1199 pf_ctrl |= ET_RX_PFCTRL_UNICST_FILTER_ENABLE; 1200 } else { 1201 writel(0, &rxmac->uni_pf_addr1); 1202 writel(0, &rxmac->uni_pf_addr2); 1203 writel(0, &rxmac->uni_pf_addr3); 1204 } 1205 1206 /* Let's initialize the Multicast hash */ 1207 if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) { 1208 pf_ctrl |= ET_RX_PFCTRL_MLTCST_FILTER_ENABLE; 1209 et1310_setup_device_for_multicast(adapter); 1210 } 1211 1212 /* Runt packet filtering. Didn't work in version A silicon. */ 1213 pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT; 1214 pf_ctrl |= ET_RX_PFCTRL_FRAG_FILTER_ENABLE; 1215 1216 if (adapter->registry_jumbo_packet > 8192) 1217 /* In order to transmit jumbo packets greater than 8k, the 1218 * FIFO between RxMAC and RxDMA needs to be reduced in size 1219 * to (16k - Jumbo packet size). In order to implement this, 1220 * we must use "cut through" mode in the RxMAC, which chops 1221 * packets down into segments which are (max_size * 16). In 1222 * this case we selected 256 bytes, since this is the size of 1223 * the PCI-Express TLP's that the 1310 uses. 1224 * 1225 * seg_en on, fc_en off, size 0x10 1226 */ 1227 writel(0x41, &rxmac->mcif_ctrl_max_seg); 1228 else 1229 writel(0, &rxmac->mcif_ctrl_max_seg); 1230 1231 /* Initialize the MCIF water marks */ 1232 writel(0, &rxmac->mcif_water_mark); 1233 1234 /* Initialize the MIF control */ 1235 writel(0, &rxmac->mif_ctrl); 1236 1237 /* Initialize the Space Available Register */ 1238 writel(0, &rxmac->space_avail); 1239 1240 /* Initialize the the mif_ctrl register 1241 * bit 3: Receive code error. One or more nibbles were signaled as 1242 * errors during the reception of the packet. Clear this 1243 * bit in Gigabit, set it in 100Mbit. This was derived 1244 * experimentally at UNH. 1245 * bit 4: Receive CRC error. The packet's CRC did not match the 1246 * internally generated CRC. 1247 * bit 5: Receive length check error. Indicates that frame length 1248 * field value in the packet does not match the actual data 1249 * byte length and is not a type field. 1250 * bit 16: Receive frame truncated. 1251 * bit 17: Drop packet enable 1252 */ 1253 if (phydev && phydev->speed == SPEED_100) 1254 writel(0x30038, &rxmac->mif_ctrl); 1255 else 1256 writel(0x30030, &rxmac->mif_ctrl); 1257 1258 /* Finally we initialize RxMac to be enabled & WOL disabled. Packet 1259 * filter is always enabled since it is where the runt packets are 1260 * supposed to be dropped. For version A silicon, runt packet 1261 * dropping doesn't work, so it is disabled in the pf_ctrl register, 1262 * but we still leave the packet filter on. 1263 */ 1264 writel(pf_ctrl, &rxmac->pf_ctrl); 1265 writel(ET_RX_CTRL_RXMAC_ENABLE | ET_RX_CTRL_WOL_DISABLE, &rxmac->ctrl); 1266} 1267 1268static void et1310_config_txmac_regs(struct et131x_adapter *adapter) 1269{ 1270 struct txmac_regs __iomem *txmac = &adapter->regs->txmac; 1271 1272 /* We need to update the Control Frame Parameters 1273 * cfpt - control frame pause timer set to 64 (0x40) 1274 * cfep - control frame extended pause timer set to 0x0 1275 */ 1276 if (adapter->flowcontrol == FLOW_NONE) 1277 writel(0, &txmac->cf_param); 1278 else 1279 writel(0x40, &txmac->cf_param); 1280} 1281 1282static void et1310_config_macstat_regs(struct et131x_adapter *adapter) 1283{ 1284 struct macstat_regs __iomem *macstat = 1285 &adapter->regs->macstat; 1286 1287 /* Next we need to initialize all the macstat registers to zero on 1288 * the device. 1289 */ 1290 writel(0, &macstat->txrx_0_64_byte_frames); 1291 writel(0, &macstat->txrx_65_127_byte_frames); 1292 writel(0, &macstat->txrx_128_255_byte_frames); 1293 writel(0, &macstat->txrx_256_511_byte_frames); 1294 writel(0, &macstat->txrx_512_1023_byte_frames); 1295 writel(0, &macstat->txrx_1024_1518_byte_frames); 1296 writel(0, &macstat->txrx_1519_1522_gvln_frames); 1297 1298 writel(0, &macstat->rx_bytes); 1299 writel(0, &macstat->rx_packets); 1300 writel(0, &macstat->rx_fcs_errs); 1301 writel(0, &macstat->rx_multicast_packets); 1302 writel(0, &macstat->rx_broadcast_packets); 1303 writel(0, &macstat->rx_control_frames); 1304 writel(0, &macstat->rx_pause_frames); 1305 writel(0, &macstat->rx_unknown_opcodes); 1306 writel(0, &macstat->rx_align_errs); 1307 writel(0, &macstat->rx_frame_len_errs); 1308 writel(0, &macstat->rx_code_errs); 1309 writel(0, &macstat->rx_carrier_sense_errs); 1310 writel(0, &macstat->rx_undersize_packets); 1311 writel(0, &macstat->rx_oversize_packets); 1312 writel(0, &macstat->rx_fragment_packets); 1313 writel(0, &macstat->rx_jabbers); 1314 writel(0, &macstat->rx_drops); 1315 1316 writel(0, &macstat->tx_bytes); 1317 writel(0, &macstat->tx_packets); 1318 writel(0, &macstat->tx_multicast_packets); 1319 writel(0, &macstat->tx_broadcast_packets); 1320 writel(0, &macstat->tx_pause_frames); 1321 writel(0, &macstat->tx_deferred); 1322 writel(0, &macstat->tx_excessive_deferred); 1323 writel(0, &macstat->tx_single_collisions); 1324 writel(0, &macstat->tx_multiple_collisions); 1325 writel(0, &macstat->tx_late_collisions); 1326 writel(0, &macstat->tx_excessive_collisions); 1327 writel(0, &macstat->tx_total_collisions); 1328 writel(0, &macstat->tx_pause_honored_frames); 1329 writel(0, &macstat->tx_drops); 1330 writel(0, &macstat->tx_jabbers); 1331 writel(0, &macstat->tx_fcs_errs); 1332 writel(0, &macstat->tx_control_frames); 1333 writel(0, &macstat->tx_oversize_frames); 1334 writel(0, &macstat->tx_undersize_frames); 1335 writel(0, &macstat->tx_fragments); 1336 writel(0, &macstat->carry_reg1); 1337 writel(0, &macstat->carry_reg2); 1338 1339 /* Unmask any counters that we want to track the overflow of. 1340 * Initially this will be all counters. It may become clear later 1341 * that we do not need to track all counters. 1342 */ 1343 writel(0xFFFFBE32, &macstat->carry_reg1_mask); 1344 writel(0xFFFE7E8B, &macstat->carry_reg2_mask); 1345} 1346 1347/* et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC 1348 * @adapter: pointer to our private adapter structure 1349 * @addr: the address of the transceiver 1350 * @reg: the register to read 1351 * @value: pointer to a 16-bit value in which the value will be stored 1352 */ 1353static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr, 1354 u8 reg, u16 *value) 1355{ 1356 struct mac_regs __iomem *mac = &adapter->regs->mac; 1357 int status = 0; 1358 u32 delay = 0; 1359 u32 mii_addr; 1360 u32 mii_cmd; 1361 u32 mii_indicator; 1362 1363 /* Save a local copy of the registers we are dealing with so we can 1364 * set them back 1365 */ 1366 mii_addr = readl(&mac->mii_mgmt_addr); 1367 mii_cmd = readl(&mac->mii_mgmt_cmd); 1368 1369 /* Stop the current operation */ 1370 writel(0, &mac->mii_mgmt_cmd); 1371 1372 /* Set up the register we need to read from on the correct PHY */ 1373 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr); 1374 1375 writel(0x1, &mac->mii_mgmt_cmd); 1376 1377 do { 1378 udelay(50); 1379 delay++; 1380 mii_indicator = readl(&mac->mii_mgmt_indicator); 1381 } while ((mii_indicator & ET_MAC_MGMT_WAIT) && delay < 50); 1382 1383 /* If we hit the max delay, we could not read the register */ 1384 if (delay == 50) { 1385 dev_warn(&adapter->pdev->dev, 1386 "reg 0x%08x could not be read\n", reg); 1387 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", 1388 mii_indicator); 1389 1390 status = -EIO; 1391 } 1392 1393 /* If we hit here we were able to read the register and we need to 1394 * return the value to the caller 1395 */ 1396 *value = readl(&mac->mii_mgmt_stat) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK; 1397 1398 /* Stop the read operation */ 1399 writel(0, &mac->mii_mgmt_cmd); 1400 1401 /* set the registers we touched back to the state at which we entered 1402 * this function 1403 */ 1404 writel(mii_addr, &mac->mii_mgmt_addr); 1405 writel(mii_cmd, &mac->mii_mgmt_cmd); 1406 1407 return status; 1408} 1409 1410static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) 1411{ 1412 struct phy_device *phydev = adapter->phydev; 1413 1414 if (!phydev) 1415 return -EIO; 1416 1417 return et131x_phy_mii_read(adapter, phydev->addr, reg, value); 1418} 1419 1420/* et131x_mii_write - Write to a PHY reg through the MII interface of the MAC 1421 * @adapter: pointer to our private adapter structure 1422 * @reg: the register to read 1423 * @value: 16-bit value to write 1424 */ 1425static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value) 1426{ 1427 struct mac_regs __iomem *mac = &adapter->regs->mac; 1428 struct phy_device *phydev = adapter->phydev; 1429 int status = 0; 1430 u8 addr; 1431 u32 delay = 0; 1432 u32 mii_addr; 1433 u32 mii_cmd; 1434 u32 mii_indicator; 1435 1436 if (!phydev) 1437 return -EIO; 1438 1439 addr = phydev->addr; 1440 1441 /* Save a local copy of the registers we are dealing with so we can 1442 * set them back 1443 */ 1444 mii_addr = readl(&mac->mii_mgmt_addr); 1445 mii_cmd = readl(&mac->mii_mgmt_cmd); 1446 1447 /* Stop the current operation */ 1448 writel(0, &mac->mii_mgmt_cmd); 1449 1450 /* Set up the register we need to write to on the correct PHY */ 1451 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr); 1452 1453 /* Add the value to write to the registers to the mac */ 1454 writel(value, &mac->mii_mgmt_ctrl); 1455 1456 do { 1457 udelay(50); 1458 delay++; 1459 mii_indicator = readl(&mac->mii_mgmt_indicator); 1460 } while ((mii_indicator & ET_MAC_MGMT_BUSY) && delay < 100); 1461 1462 /* If we hit the max delay, we could not write the register */ 1463 if (delay == 100) { 1464 u16 tmp; 1465 1466 dev_warn(&adapter->pdev->dev, 1467 "reg 0x%08x could not be written", reg); 1468 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", 1469 mii_indicator); 1470 dev_warn(&adapter->pdev->dev, "command is 0x%08x\n", 1471 readl(&mac->mii_mgmt_cmd)); 1472 1473 et131x_mii_read(adapter, reg, &tmp); 1474 1475 status = -EIO; 1476 } 1477 /* Stop the write operation */ 1478 writel(0, &mac->mii_mgmt_cmd); 1479 1480 /* set the registers we touched back to the state at which we entered 1481 * this function 1482 */ 1483 writel(mii_addr, &mac->mii_mgmt_addr); 1484 writel(mii_cmd, &mac->mii_mgmt_cmd); 1485 1486 return status; 1487} 1488 1489static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter, 1490 u16 regnum, 1491 u16 bitnum, 1492 u8 *value) 1493{ 1494 u16 reg; 1495 u16 mask = 1 << bitnum; 1496 1497 /* Read the requested register */ 1498 et131x_mii_read(adapter, regnum, &reg); 1499 1500 *value = (reg & mask) >> bitnum; 1501} 1502 1503static void et1310_config_flow_control(struct et131x_adapter *adapter) 1504{ 1505 struct phy_device *phydev = adapter->phydev; 1506 1507 if (phydev->duplex == DUPLEX_HALF) { 1508 adapter->flowcontrol = FLOW_NONE; 1509 } else { 1510 char remote_pause, remote_async_pause; 1511 1512 et1310_phy_read_mii_bit(adapter, 5, 10, &remote_pause); 1513 et1310_phy_read_mii_bit(adapter, 5, 11, &remote_async_pause); 1514 1515 if (remote_pause && remote_async_pause) { 1516 adapter->flowcontrol = adapter->wanted_flow; 1517 } else if (remote_pause && !remote_async_pause) { 1518 if (adapter->wanted_flow == FLOW_BOTH) 1519 adapter->flowcontrol = FLOW_BOTH; 1520 else 1521 adapter->flowcontrol = FLOW_NONE; 1522 } else if (!remote_pause && !remote_async_pause) { 1523 adapter->flowcontrol = FLOW_NONE; 1524 } else { 1525 if (adapter->wanted_flow == FLOW_BOTH) 1526 adapter->flowcontrol = FLOW_RXONLY; 1527 else 1528 adapter->flowcontrol = FLOW_NONE; 1529 } 1530 } 1531} 1532 1533/* et1310_update_macstat_host_counters - Update local copy of the statistics */ 1534static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter) 1535{ 1536 struct ce_stats *stats = &adapter->stats; 1537 struct macstat_regs __iomem *macstat = 1538 &adapter->regs->macstat; 1539 1540 stats->tx_collisions += readl(&macstat->tx_total_collisions); 1541 stats->tx_first_collisions += readl(&macstat->tx_single_collisions); 1542 stats->tx_deferred += readl(&macstat->tx_deferred); 1543 stats->tx_excessive_collisions += 1544 readl(&macstat->tx_multiple_collisions); 1545 stats->tx_late_collisions += readl(&macstat->tx_late_collisions); 1546 stats->tx_underflows += readl(&macstat->tx_undersize_frames); 1547 stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames); 1548 1549 stats->rx_align_errs += readl(&macstat->rx_align_errs); 1550 stats->rx_crc_errs += readl(&macstat->rx_code_errs); 1551 stats->rcvd_pkts_dropped += readl(&macstat->rx_drops); 1552 stats->rx_overflows += readl(&macstat->rx_oversize_packets); 1553 stats->rx_code_violations += readl(&macstat->rx_fcs_errs); 1554 stats->rx_length_errs += readl(&macstat->rx_frame_len_errs); 1555 stats->rx_other_errs += readl(&macstat->rx_fragment_packets); 1556} 1557 1558/* et1310_handle_macstat_interrupt 1559 * 1560 * One of the MACSTAT counters has wrapped. Update the local copy of 1561 * the statistics held in the adapter structure, checking the "wrap" 1562 * bit for each counter. 1563 */ 1564static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter) 1565{ 1566 u32 carry_reg1; 1567 u32 carry_reg2; 1568 1569 /* Read the interrupt bits from the register(s). These are Clear On 1570 * Write. 1571 */ 1572 carry_reg1 = readl(&adapter->regs->macstat.carry_reg1); 1573 carry_reg2 = readl(&adapter->regs->macstat.carry_reg2); 1574 1575 writel(carry_reg1, &adapter->regs->macstat.carry_reg1); 1576 writel(carry_reg2, &adapter->regs->macstat.carry_reg2); 1577 1578 /* We need to do update the host copy of all the MAC_STAT counters. 1579 * For each counter, check it's overflow bit. If the overflow bit is 1580 * set, then increment the host version of the count by one complete 1581 * revolution of the counter. This routine is called when the counter 1582 * block indicates that one of the counters has wrapped. 1583 */ 1584 if (carry_reg1 & (1 << 14)) 1585 adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT; 1586 if (carry_reg1 & (1 << 8)) 1587 adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT; 1588 if (carry_reg1 & (1 << 7)) 1589 adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT; 1590 if (carry_reg1 & (1 << 2)) 1591 adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT; 1592 if (carry_reg1 & (1 << 6)) 1593 adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT; 1594 if (carry_reg1 & (1 << 3)) 1595 adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT; 1596 if (carry_reg1 & (1 << 0)) 1597 adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT; 1598 if (carry_reg2 & (1 << 16)) 1599 adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT; 1600 if (carry_reg2 & (1 << 15)) 1601 adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT; 1602 if (carry_reg2 & (1 << 6)) 1603 adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT; 1604 if (carry_reg2 & (1 << 8)) 1605 adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT; 1606 if (carry_reg2 & (1 << 5)) 1607 adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT; 1608 if (carry_reg2 & (1 << 4)) 1609 adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT; 1610 if (carry_reg2 & (1 << 2)) 1611 adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT; 1612} 1613 1614static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg) 1615{ 1616 struct net_device *netdev = bus->priv; 1617 struct et131x_adapter *adapter = netdev_priv(netdev); 1618 u16 value; 1619 int ret; 1620 1621 ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value); 1622 1623 if (ret < 0) 1624 return ret; 1625 else 1626 return value; 1627} 1628 1629static int et131x_mdio_write(struct mii_bus *bus, int phy_addr, 1630 int reg, u16 value) 1631{ 1632 struct net_device *netdev = bus->priv; 1633 struct et131x_adapter *adapter = netdev_priv(netdev); 1634 1635 return et131x_mii_write(adapter, reg, value); 1636} 1637 1638static int et131x_mdio_reset(struct mii_bus *bus) 1639{ 1640 struct net_device *netdev = bus->priv; 1641 struct et131x_adapter *adapter = netdev_priv(netdev); 1642 1643 et131x_mii_write(adapter, MII_BMCR, BMCR_RESET); 1644 1645 return 0; 1646} 1647 1648/* et1310_phy_power_switch - PHY power control 1649 * @adapter: device to control 1650 * @down: true for off/false for back on 1651 * 1652 * one hundred, ten, one thousand megs 1653 * How would you like to have your LAN accessed 1654 * Can't you see that this code processed 1655 * Phy power, phy power.. 1656 */ 1657static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down) 1658{ 1659 u16 data; 1660 1661 et131x_mii_read(adapter, MII_BMCR, &data); 1662 data &= ~BMCR_PDOWN; 1663 if (down) 1664 data |= BMCR_PDOWN; 1665 et131x_mii_write(adapter, MII_BMCR, data); 1666} 1667 1668/* et131x_xcvr_init - Init the phy if we are setting it into force mode */ 1669static void et131x_xcvr_init(struct et131x_adapter *adapter) 1670{ 1671 u16 lcr2; 1672 1673 /* Set the LED behavior such that LED 1 indicates speed (off = 1674 * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates 1675 * link and activity (on for link, blink off for activity). 1676 * 1677 * NOTE: Some customizations have been added here for specific 1678 * vendors; The LED behavior is now determined by vendor data in the 1679 * EEPROM. However, the above description is the default. 1680 */ 1681 if ((adapter->eeprom_data[1] & 0x4) == 0) { 1682 et131x_mii_read(adapter, PHY_LED_2, &lcr2); 1683 1684 lcr2 &= (ET_LED2_LED_100TX | ET_LED2_LED_1000T); 1685 lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT); 1686 1687 if ((adapter->eeprom_data[1] & 0x8) == 0) 1688 lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT); 1689 else 1690 lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT); 1691 1692 et131x_mii_write(adapter, PHY_LED_2, lcr2); 1693 } 1694} 1695 1696/* et131x_configure_global_regs - configure JAGCore global regs 1697 * 1698 * Used to configure the global registers on the JAGCore 1699 */ 1700static void et131x_configure_global_regs(struct et131x_adapter *adapter) 1701{ 1702 struct global_regs __iomem *regs = &adapter->regs->global; 1703 1704 writel(0, &regs->rxq_start_addr); 1705 writel(INTERNAL_MEM_SIZE - 1, &regs->txq_end_addr); 1706 1707 if (adapter->registry_jumbo_packet < 2048) { 1708 /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word 1709 * block of RAM that the driver can split between Tx 1710 * and Rx as it desires. Our default is to split it 1711 * 50/50: 1712 */ 1713 writel(PARM_RX_MEM_END_DEF, &regs->rxq_end_addr); 1714 writel(PARM_RX_MEM_END_DEF + 1, &regs->txq_start_addr); 1715 } else if (adapter->registry_jumbo_packet < 8192) { 1716 /* For jumbo packets > 2k but < 8k, split 50-50. */ 1717 writel(INTERNAL_MEM_RX_OFFSET, &regs->rxq_end_addr); 1718 writel(INTERNAL_MEM_RX_OFFSET + 1, &regs->txq_start_addr); 1719 } else { 1720 /* 9216 is the only packet size greater than 8k that 1721 * is available. The Tx buffer has to be big enough 1722 * for one whole packet on the Tx side. We'll make 1723 * the Tx 9408, and give the rest to Rx 1724 */ 1725 writel(0x01b3, &regs->rxq_end_addr); 1726 writel(0x01b4, &regs->txq_start_addr); 1727 } 1728 1729 /* Initialize the loopback register. Disable all loopbacks. */ 1730 writel(0, &regs->loopback); 1731 1732 /* MSI Register */ 1733 writel(0, &regs->msi_config); 1734 1735 /* By default, disable the watchdog timer. It will be enabled when 1736 * a packet is queued. 1737 */ 1738 writel(0, &regs->watchdog_timer); 1739} 1740 1741/* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence */ 1742static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter) 1743{ 1744 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; 1745 struct rx_ring *rx_local = &adapter->rx_ring; 1746 struct fbr_desc *fbr_entry; 1747 u32 entry; 1748 u32 psr_num_des; 1749 unsigned long flags; 1750 u8 id; 1751 1752 /* Halt RXDMA to perform the reconfigure. */ 1753 et131x_rx_dma_disable(adapter); 1754 1755 /* Load the completion writeback physical address */ 1756 writel(upper_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_hi); 1757 writel(lower_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_lo); 1758 1759 memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block)); 1760 1761 /* Set the address and parameters of the packet status ring into the 1762 * 1310's registers 1763 */ 1764 writel(upper_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_hi); 1765 writel(lower_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_lo); 1766 writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des); 1767 writel(0, &rx_dma->psr_full_offset); 1768 1769 psr_num_des = readl(&rx_dma->psr_num_des) & ET_RXDMA_PSR_NUM_DES_MASK; 1770 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100, 1771 &rx_dma->psr_min_des); 1772 1773 spin_lock_irqsave(&adapter->rcv_lock, flags); 1774 1775 /* These local variables track the PSR in the adapter structure */ 1776 rx_local->local_psr_full = 0; 1777 1778 for (id = 0; id < NUM_FBRS; id++) { 1779 u32 __iomem *num_des; 1780 u32 __iomem *full_offset; 1781 u32 __iomem *min_des; 1782 u32 __iomem *base_hi; 1783 u32 __iomem *base_lo; 1784 struct fbr_lookup *fbr = rx_local->fbr[id]; 1785 1786 if (id == 0) { 1787 num_des = &rx_dma->fbr0_num_des; 1788 full_offset = &rx_dma->fbr0_full_offset; 1789 min_des = &rx_dma->fbr0_min_des; 1790 base_hi = &rx_dma->fbr0_base_hi; 1791 base_lo = &rx_dma->fbr0_base_lo; 1792 } else { 1793 num_des = &rx_dma->fbr1_num_des; 1794 full_offset = &rx_dma->fbr1_full_offset; 1795 min_des = &rx_dma->fbr1_min_des; 1796 base_hi = &rx_dma->fbr1_base_hi; 1797 base_lo = &rx_dma->fbr1_base_lo; 1798 } 1799 1800 /* Now's the best time to initialize FBR contents */ 1801 fbr_entry = fbr->ring_virtaddr; 1802 for (entry = 0; entry < fbr->num_entries; entry++) { 1803 fbr_entry->addr_hi = fbr->bus_high[entry]; 1804 fbr_entry->addr_lo = fbr->bus_low[entry]; 1805 fbr_entry->word2 = entry; 1806 fbr_entry++; 1807 } 1808 1809 /* Set the address and parameters of Free buffer ring 1 and 0 1810 * into the 1310's registers 1811 */ 1812 writel(upper_32_bits(fbr->ring_physaddr), base_hi); 1813 writel(lower_32_bits(fbr->ring_physaddr), base_lo); 1814 writel(fbr->num_entries - 1, num_des); 1815 writel(ET_DMA10_WRAP, full_offset); 1816 1817 /* This variable tracks the free buffer ring 1 full position, 1818 * so it has to match the above. 1819 */ 1820 fbr->local_full = ET_DMA10_WRAP; 1821 writel(((fbr->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, 1822 min_des); 1823 } 1824 1825 /* Program the number of packets we will receive before generating an 1826 * interrupt. 1827 * For version B silicon, this value gets updated once autoneg is 1828 *complete. 1829 */ 1830 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done); 1831 1832 /* The "time_done" is not working correctly to coalesce interrupts 1833 * after a given time period, but rather is giving us an interrupt 1834 * regardless of whether we have received packets. 1835 * This value gets updated once autoneg is complete. 1836 */ 1837 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time); 1838 1839 spin_unlock_irqrestore(&adapter->rcv_lock, flags); 1840} 1841 1842/* et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore. 1843 * 1844 * Configure the transmit engine with the ring buffers we have created 1845 * and prepare it for use. 1846 */ 1847static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter) 1848{ 1849 struct txdma_regs __iomem *txdma = &adapter->regs->txdma; 1850 struct tx_ring *tx_ring = &adapter->tx_ring; 1851 1852 /* Load the hardware with the start of the transmit descriptor ring. */ 1853 writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi); 1854 writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo); 1855 1856 /* Initialise the transmit DMA engine */ 1857 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des); 1858 1859 /* Load the completion writeback physical address */ 1860 writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi); 1861 writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo); 1862 1863 *tx_ring->tx_status = 0; 1864 1865 writel(0, &txdma->service_request); 1866 tx_ring->send_idx = 0; 1867} 1868 1869/* et131x_adapter_setup - Set the adapter up as per cassini+ documentation */ 1870static void et131x_adapter_setup(struct et131x_adapter *adapter) 1871{ 1872 /* Configure the JAGCore */ 1873 et131x_configure_global_regs(adapter); 1874 1875 et1310_config_mac_regs1(adapter); 1876 1877 /* Configure the MMC registers */ 1878 /* All we need to do is initialize the Memory Control Register */ 1879 writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl); 1880 1881 et1310_config_rxmac_regs(adapter); 1882 et1310_config_txmac_regs(adapter); 1883 1884 et131x_config_rx_dma_regs(adapter); 1885 et131x_config_tx_dma_regs(adapter); 1886 1887 et1310_config_macstat_regs(adapter); 1888 1889 et1310_phy_power_switch(adapter, 0); 1890 et131x_xcvr_init(adapter); 1891} 1892 1893/* et131x_soft_reset - Issue soft reset to the hardware, complete for ET1310 */ 1894static void et131x_soft_reset(struct et131x_adapter *adapter) 1895{ 1896 u32 reg; 1897 1898 /* Disable MAC Core */ 1899 reg = ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET | 1900 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC | 1901 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC; 1902 writel(reg, &adapter->regs->mac.cfg1); 1903 1904 reg = ET_RESET_ALL; 1905 writel(reg, &adapter->regs->global.sw_reset); 1906 1907 reg = ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC | 1908 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC; 1909 writel(reg, &adapter->regs->mac.cfg1); 1910 writel(0, &adapter->regs->mac.cfg1); 1911} 1912 1913/* et131x_enable_interrupts - enable interrupt 1914 * 1915 * Enable the appropriate interrupts on the ET131x according to our 1916 * configuration 1917 */ 1918static void et131x_enable_interrupts(struct et131x_adapter *adapter) 1919{ 1920 u32 mask; 1921 1922 /* Enable all global interrupts */ 1923 if (adapter->flowcontrol == FLOW_TXONLY || 1924 adapter->flowcontrol == FLOW_BOTH) 1925 mask = INT_MASK_ENABLE; 1926 else 1927 mask = INT_MASK_ENABLE_NO_FLOW; 1928 1929 writel(mask, &adapter->regs->global.int_mask); 1930} 1931 1932/* et131x_disable_interrupts - interrupt disable 1933 * 1934 * Block all interrupts from the et131x device at the device itself 1935 */ 1936static void et131x_disable_interrupts(struct et131x_adapter *adapter) 1937{ 1938 /* Disable all global interrupts */ 1939 writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask); 1940} 1941 1942/* et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310 */ 1943static void et131x_tx_dma_disable(struct et131x_adapter *adapter) 1944{ 1945 /* Setup the tramsmit dma configuration register */ 1946 writel(ET_TXDMA_CSR_HALT | ET_TXDMA_SNGL_EPKT, 1947 &adapter->regs->txdma.csr); 1948} 1949 1950/* et131x_enable_txrx - Enable tx/rx queues */ 1951static void et131x_enable_txrx(struct net_device *netdev) 1952{ 1953 struct et131x_adapter *adapter = netdev_priv(netdev); 1954 1955 /* Enable the Tx and Rx DMA engines (if not already enabled) */ 1956 et131x_rx_dma_enable(adapter); 1957 et131x_tx_dma_enable(adapter); 1958 1959 /* Enable device interrupts */ 1960 if (adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE) 1961 et131x_enable_interrupts(adapter); 1962 1963 /* We're ready to move some data, so start the queue */ 1964 netif_start_queue(netdev); 1965} 1966 1967/* et131x_disable_txrx - Disable tx/rx queues */ 1968static void et131x_disable_txrx(struct net_device *netdev) 1969{ 1970 struct et131x_adapter *adapter = netdev_priv(netdev); 1971 1972 /* First thing is to stop the queue */ 1973 netif_stop_queue(netdev); 1974 1975 /* Stop the Tx and Rx DMA engines */ 1976 et131x_rx_dma_disable(adapter); 1977 et131x_tx_dma_disable(adapter); 1978 1979 /* Disable device interrupts */ 1980 et131x_disable_interrupts(adapter); 1981} 1982 1983/* et131x_init_send - Initialize send data structures */ 1984static void et131x_init_send(struct et131x_adapter *adapter) 1985{ 1986 u32 ct; 1987 struct tx_ring *tx_ring = &adapter->tx_ring; 1988 struct tcb *tcb = tx_ring->tcb_ring; 1989 1990 tx_ring->tcb_qhead = tcb; 1991 1992 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB); 1993 1994 /* Go through and set up each TCB */ 1995 for (ct = 0; ct++ < NUM_TCB; tcb++) 1996 /* Set the link pointer in HW TCB to the next TCB in the 1997 * chain 1998 */ 1999 tcb->next = tcb + 1; 2000 2001 /* Set the tail pointer */ 2002 tcb--; 2003 tx_ring->tcb_qtail = tcb; 2004 tcb->next = NULL; 2005 /* Curr send queue should now be empty */ 2006 tx_ring->send_head = NULL; 2007 tx_ring->send_tail = NULL; 2008} 2009 2010/* et1310_enable_phy_coma - called when network cable is unplugged 2011 * 2012 * driver receive an phy status change interrupt while in D0 and check that 2013 * phy_status is down. 2014 * 2015 * -- gate off JAGCore; 2016 * -- set gigE PHY in Coma mode 2017 * -- wake on phy_interrupt; Perform software reset JAGCore, 2018 * re-initialize jagcore and gigE PHY 2019 * 2020 * Add D0-ASPM-PhyLinkDown Support: 2021 * -- while in D0, when there is a phy_interrupt indicating phy link 2022 * down status, call the MPSetPhyComa routine to enter this active 2023 * state power saving mode 2024 * -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt 2025 * indicating linkup status, call the MPDisablePhyComa routine to 2026 * restore JAGCore and gigE PHY 2027 */ 2028static void et1310_enable_phy_coma(struct et131x_adapter *adapter) 2029{ 2030 unsigned long flags; 2031 u32 pmcsr; 2032 2033 pmcsr = readl(&adapter->regs->global.pm_csr); 2034 2035 /* Save the GbE PHY speed and duplex modes. Need to restore this 2036 * when cable is plugged back in 2037 */ 2038 2039 /* Stop sending packets. */ 2040 spin_lock_irqsave(&adapter->send_hw_lock, flags); 2041 adapter->flags |= FMP_ADAPTER_LOWER_POWER; 2042 spin_unlock_irqrestore(&adapter->send_hw_lock, flags); 2043 2044 /* Wait for outstanding Receive packets */ 2045 2046 et131x_disable_txrx(adapter->netdev); 2047 2048 /* Gate off JAGCore 3 clock domains */ 2049 pmcsr &= ~ET_PMCSR_INIT; 2050 writel(pmcsr, &adapter->regs->global.pm_csr); 2051 2052 /* Program gigE PHY in to Coma mode */ 2053 pmcsr |= ET_PM_PHY_SW_COMA; 2054 writel(pmcsr, &adapter->regs->global.pm_csr); 2055} 2056 2057/* et1310_disable_phy_coma - Disable the Phy Coma Mode */ 2058static void et1310_disable_phy_coma(struct et131x_adapter *adapter) 2059{ 2060 u32 pmcsr; 2061 2062 pmcsr = readl(&adapter->regs->global.pm_csr); 2063 2064 /* Disable phy_sw_coma register and re-enable JAGCore clocks */ 2065 pmcsr |= ET_PMCSR_INIT; 2066 pmcsr &= ~ET_PM_PHY_SW_COMA; 2067 writel(pmcsr, &adapter->regs->global.pm_csr); 2068 2069 /* Restore the GbE PHY speed and duplex modes; 2070 * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY 2071 */ 2072 2073 /* Re-initialize the send structures */ 2074 et131x_init_send(adapter); 2075 2076 /* Bring the device back to the state it was during init prior to 2077 * autonegotiation being complete. This way, when we get the auto-neg 2078 * complete interrupt, we can complete init by calling ConfigMacREGS2. 2079 */ 2080 et131x_soft_reset(adapter); 2081 2082 /* setup et1310 as per the documentation ?? */ 2083 et131x_adapter_setup(adapter); 2084 2085 /* Allow Tx to restart */ 2086 adapter->flags &= ~FMP_ADAPTER_LOWER_POWER; 2087 2088 et131x_enable_txrx(adapter->netdev); 2089} 2090 2091static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit) 2092{ 2093 u32 tmp_free_buff_ring = *free_buff_ring; 2094 tmp_free_buff_ring++; 2095 /* This works for all cases where limit < 1024. The 1023 case 2096 * works because 1023++ is 1024 which means the if condition is not 2097 * taken but the carry of the bit into the wrap bit toggles the wrap 2098 * value correctly 2099 */ 2100 if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) { 2101 tmp_free_buff_ring &= ~ET_DMA10_MASK; 2102 tmp_free_buff_ring ^= ET_DMA10_WRAP; 2103 } 2104 /* For the 1023 case */ 2105 tmp_free_buff_ring &= (ET_DMA10_MASK | ET_DMA10_WRAP); 2106 *free_buff_ring = tmp_free_buff_ring; 2107 return tmp_free_buff_ring; 2108} 2109 2110/* et131x_rx_dma_memory_alloc 2111 * 2112 * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required, 2113 * and the Packet Status Ring. 2114 */ 2115static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) 2116{ 2117 u8 id; 2118 u32 i, j; 2119 u32 bufsize; 2120 u32 pktstat_ringsize; 2121 u32 fbr_chunksize; 2122 struct rx_ring *rx_ring = &adapter->rx_ring; 2123 struct fbr_lookup *fbr; 2124 2125 /* Alloc memory for the lookup table */ 2126 rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); 2127 rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); 2128 2129 /* The first thing we will do is configure the sizes of the buffer 2130 * rings. These will change based on jumbo packet support. Larger 2131 * jumbo packets increases the size of each entry in FBR0, and the 2132 * number of entries in FBR0, while at the same time decreasing the 2133 * number of entries in FBR1. 2134 * 2135 * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1 2136 * entries are huge in order to accommodate a "jumbo" frame, then it 2137 * will have less entries. Conversely, FBR1 will now be relied upon 2138 * to carry more "normal" frames, thus it's entry size also increases 2139 * and the number of entries goes up too (since it now carries 2140 * "small" + "regular" packets. 2141 * 2142 * In this scheme, we try to maintain 512 entries between the two 2143 * rings. Also, FBR1 remains a constant size - when it's size doubles 2144 * the number of entries halves. FBR0 increases in size, however. 2145 */ 2146 2147 if (adapter->registry_jumbo_packet < 2048) { 2148 rx_ring->fbr[0]->buffsize = 256; 2149 rx_ring->fbr[0]->num_entries = 512; 2150 rx_ring->fbr[1]->buffsize = 2048; 2151 rx_ring->fbr[1]->num_entries = 512; 2152 } else if (adapter->registry_jumbo_packet < 4096) { 2153 rx_ring->fbr[0]->buffsize = 512; 2154 rx_ring->fbr[0]->num_entries = 1024; 2155 rx_ring->fbr[1]->buffsize = 4096; 2156 rx_ring->fbr[1]->num_entries = 512; 2157 } else { 2158 rx_ring->fbr[0]->buffsize = 1024; 2159 rx_ring->fbr[0]->num_entries = 768; 2160 rx_ring->fbr[1]->buffsize = 16384; 2161 rx_ring->fbr[1]->num_entries = 128; 2162 } 2163 2164 rx_ring->psr_num_entries = rx_ring->fbr[0]->num_entries + 2165 rx_ring->fbr[1]->num_entries; 2166 2167 for (id = 0; id < NUM_FBRS; id++) { 2168 fbr = rx_ring->fbr[id]; 2169 /* Allocate an area of memory for Free Buffer Ring */ 2170 bufsize = sizeof(struct fbr_desc) * fbr->num_entries; 2171 fbr->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, 2172 bufsize, 2173 &fbr->ring_physaddr, 2174 GFP_KERNEL); 2175 if (!fbr->ring_virtaddr) { 2176 dev_err(&adapter->pdev->dev, 2177 "Cannot alloc memory for Free Buffer Ring %d\n", id); 2178 return -ENOMEM; 2179 } 2180 } 2181 2182 for (id = 0; id < NUM_FBRS; id++) { 2183 fbr = rx_ring->fbr[id]; 2184 fbr_chunksize = (FBR_CHUNKS * fbr->buffsize); 2185 2186 for (i = 0; i < fbr->num_entries / FBR_CHUNKS; i++) { 2187 dma_addr_t fbr_tmp_physaddr; 2188 2189 fbr->mem_virtaddrs[i] = dma_alloc_coherent( 2190 &adapter->pdev->dev, fbr_chunksize, 2191 &fbr->mem_physaddrs[i], 2192 GFP_KERNEL); 2193 2194 if (!fbr->mem_virtaddrs[i]) { 2195 dev_err(&adapter->pdev->dev, 2196 "Could not alloc memory\n"); 2197 return -ENOMEM; 2198 } 2199 2200 /* See NOTE in "Save Physical Address" comment above */ 2201 fbr_tmp_physaddr = fbr->mem_physaddrs[i]; 2202 2203 for (j = 0; j < FBR_CHUNKS; j++) { 2204 u32 index = (i * FBR_CHUNKS) + j; 2205 2206 /* Save the Virtual address of this index for 2207 * quick access later 2208 */ 2209 fbr->virt[index] = (u8 *)fbr->mem_virtaddrs[i] + 2210 (j * fbr->buffsize); 2211 2212 /* now store the physical address in the 2213 * descriptor so the device can access it 2214 */ 2215 fbr->bus_high[index] = 2216 upper_32_bits(fbr_tmp_physaddr); 2217 fbr->bus_low[index] = 2218 lower_32_bits(fbr_tmp_physaddr); 2219 2220 fbr_tmp_physaddr += fbr->buffsize; 2221 } 2222 } 2223 } 2224 2225 /* Allocate an area of memory for FIFO of Packet Status ring entries */ 2226 pktstat_ringsize = 2227 sizeof(struct pkt_stat_desc) * rx_ring->psr_num_entries; 2228 2229 rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, 2230 pktstat_ringsize, 2231 &rx_ring->ps_ring_physaddr, 2232 GFP_KERNEL); 2233 2234 if (!rx_ring->ps_ring_virtaddr) { 2235 dev_err(&adapter->pdev->dev, 2236 "Cannot alloc memory for Packet Status Ring\n"); 2237 return -ENOMEM; 2238 } 2239 2240 /* NOTE : dma_alloc_coherent(), used above to alloc DMA regions, 2241 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses 2242 * are ever returned, make sure the high part is retrieved here before 2243 * storing the adjusted address. 2244 */ 2245 2246 /* Allocate an area of memory for writeback of status information */ 2247 rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev, 2248 sizeof(struct rx_status_block), 2249 &rx_ring->rx_status_bus, 2250 GFP_KERNEL); 2251 if (!rx_ring->rx_status_block) { 2252 dev_err(&adapter->pdev->dev, 2253 "Cannot alloc memory for Status Block\n"); 2254 return -ENOMEM; 2255 } 2256 rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD; 2257 2258 /* The RFDs are going to be put on lists later on, so initialize the 2259 * lists now. 2260 */ 2261 INIT_LIST_HEAD(&rx_ring->recv_list); 2262 return 0; 2263} 2264 2265/* et131x_rx_dma_memory_free - Free all memory allocated within this module */ 2266static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) 2267{ 2268 u8 id; 2269 u32 index; 2270 u32 bufsize; 2271 u32 pktstat_ringsize; 2272 struct rfd *rfd; 2273 struct rx_ring *rx_ring = &adapter->rx_ring; 2274 struct fbr_lookup *fbr; 2275 2276 /* Free RFDs and associated packet descriptors */ 2277 WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd); 2278 2279 while (!list_empty(&rx_ring->recv_list)) { 2280 rfd = list_entry(rx_ring->recv_list.next, 2281 struct rfd, list_node); 2282 2283 list_del(&rfd->list_node); 2284 rfd->skb = NULL; 2285 kfree(rfd); 2286 } 2287 2288 /* Free Free Buffer Rings */ 2289 for (id = 0; id < NUM_FBRS; id++) { 2290 fbr = rx_ring->fbr[id]; 2291 2292 if (!fbr->ring_virtaddr) 2293 continue; 2294 2295 /* First the packet memory */ 2296 for (index = 0; 2297 index < fbr->num_entries / FBR_CHUNKS; 2298 index++) { 2299 if (fbr->mem_virtaddrs[index]) { 2300 bufsize = fbr->buffsize * FBR_CHUNKS; 2301 2302 dma_free_coherent(&adapter->pdev->dev, 2303 bufsize, 2304 fbr->mem_virtaddrs[index], 2305 fbr->mem_physaddrs[index]); 2306 2307 fbr->mem_virtaddrs[index] = NULL; 2308 } 2309 } 2310 2311 bufsize = sizeof(struct fbr_desc) * fbr->num_entries; 2312 2313 dma_free_coherent(&adapter->pdev->dev, 2314 bufsize, 2315 fbr->ring_virtaddr, 2316 fbr->ring_physaddr); 2317 2318 fbr->ring_virtaddr = NULL; 2319 } 2320 2321 /* Free Packet Status Ring */ 2322 if (rx_ring->ps_ring_virtaddr) { 2323 pktstat_ringsize = sizeof(struct pkt_stat_desc) * 2324 rx_ring->psr_num_entries; 2325 2326 dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize, 2327 rx_ring->ps_ring_virtaddr, 2328 rx_ring->ps_ring_physaddr); 2329 2330 rx_ring->ps_ring_virtaddr = NULL; 2331 } 2332 2333 /* Free area of memory for the writeback of status information */ 2334 if (rx_ring->rx_status_block) { 2335 dma_free_coherent(&adapter->pdev->dev, 2336 sizeof(struct rx_status_block), 2337 rx_ring->rx_status_block, rx_ring->rx_status_bus); 2338 rx_ring->rx_status_block = NULL; 2339 } 2340 2341 /* Free the FBR Lookup Table */ 2342 kfree(rx_ring->fbr[0]); 2343 kfree(rx_ring->fbr[1]); 2344 2345 /* Reset Counters */ 2346 rx_ring->num_ready_recv = 0; 2347} 2348 2349/* et131x_init_recv - Initialize receive data structures */ 2350static int et131x_init_recv(struct et131x_adapter *adapter) 2351{ 2352 struct rfd *rfd; 2353 u32 rfdct; 2354 struct rx_ring *rx_ring = &adapter->rx_ring; 2355 2356 /* Setup each RFD */ 2357 for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) { 2358 rfd = kzalloc(sizeof(struct rfd), GFP_ATOMIC | GFP_DMA); 2359 if (!rfd) 2360 return -ENOMEM; 2361 2362 rfd->skb = NULL; 2363 2364 /* Add this RFD to the recv_list */ 2365 list_add_tail(&rfd->list_node, &rx_ring->recv_list); 2366 2367 /* Increment the available RFD's */ 2368 rx_ring->num_ready_recv++; 2369 } 2370 2371 return 0; 2372} 2373 2374/* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */ 2375static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter) 2376{ 2377 struct phy_device *phydev = adapter->phydev; 2378 2379 /* For version B silicon, we do not use the RxDMA timer for 10 and 100 2380 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing. 2381 */ 2382 if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) { 2383 writel(0, &adapter->regs->rxdma.max_pkt_time); 2384 writel(1, &adapter->regs->rxdma.num_pkt_done); 2385 } 2386} 2387 2388/* NICReturnRFD - Recycle a RFD and put it back onto the receive list 2389 * @adapter: pointer to our adapter 2390 * @rfd: pointer to the RFD 2391 */ 2392static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd) 2393{ 2394 struct rx_ring *rx_local = &adapter->rx_ring; 2395 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; 2396 u16 buff_index = rfd->bufferindex; 2397 u8 ring_index = rfd->ringindex; 2398 unsigned long flags; 2399 struct fbr_lookup *fbr = rx_local->fbr[ring_index]; 2400 2401 /* We don't use any of the OOB data besides status. Otherwise, we 2402 * need to clean up OOB data 2403 */ 2404 if (buff_index < fbr->num_entries) { 2405 u32 free_buff_ring; 2406 u32 __iomem *offset; 2407 struct fbr_desc *next; 2408 2409 spin_lock_irqsave(&adapter->fbr_lock, flags); 2410 2411 if (ring_index == 0) 2412 offset = &rx_dma->fbr0_full_offset; 2413 else 2414 offset = &rx_dma->fbr1_full_offset; 2415 2416 next = (struct fbr_desc *)(fbr->ring_virtaddr) + 2417 INDEX10(fbr->local_full); 2418 2419 /* Handle the Free Buffer Ring advancement here. Write 2420 * the PA / Buffer Index for the returned buffer into 2421 * the oldest (next to be freed)FBR entry 2422 */ 2423 next->addr_hi = fbr->bus_high[buff_index]; 2424 next->addr_lo = fbr->bus_low[buff_index]; 2425 next->word2 = buff_index; 2426 2427 free_buff_ring = bump_free_buff_ring(&fbr->local_full, 2428 fbr->num_entries - 1); 2429 writel(free_buff_ring, offset); 2430 2431 spin_unlock_irqrestore(&adapter->fbr_lock, flags); 2432 } else { 2433 dev_err(&adapter->pdev->dev, 2434 "%s illegal Buffer Index returned\n", __func__); 2435 } 2436 2437 /* The processing on this RFD is done, so put it back on the tail of 2438 * our list 2439 */ 2440 spin_lock_irqsave(&adapter->rcv_lock, flags); 2441 list_add_tail(&rfd->list_node, &rx_local->recv_list); 2442 rx_local->num_ready_recv++; 2443 spin_unlock_irqrestore(&adapter->rcv_lock, flags); 2444 2445 WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd); 2446} 2447 2448/* nic_rx_pkts - Checks the hardware for available packets 2449 * 2450 * Returns rfd, a pointer to our MPRFD. 2451 * 2452 * Checks the hardware for available packets, using completion ring 2453 * If packets are available, it gets an RFD from the recv_list, attaches 2454 * the packet to it, puts the RFD in the RecvPendList, and also returns 2455 * the pointer to the RFD. 2456 */ 2457static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) 2458{ 2459 struct rx_ring *rx_local = &adapter->rx_ring; 2460 struct rx_status_block *status; 2461 struct pkt_stat_desc *psr; 2462 struct rfd *rfd; 2463 u32 i; 2464 u8 *buf; 2465 unsigned long flags; 2466 struct list_head *element; 2467 u8 ring_index; 2468 u16 buff_index; 2469 u32 len; 2470 u32 word0; 2471 u32 word1; 2472 struct sk_buff *skb; 2473 struct fbr_lookup *fbr; 2474 2475 /* RX Status block is written by the DMA engine prior to every 2476 * interrupt. It contains the next to be used entry in the Packet 2477 * Status Ring, and also the two Free Buffer rings. 2478 */ 2479 status = rx_local->rx_status_block; 2480 word1 = status->word1 >> 16; /* Get the useful bits */ 2481 2482 /* Check the PSR and wrap bits do not match */ 2483 if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF)) 2484 return NULL; /* Looks like this ring is not updated yet */ 2485 2486 /* The packet status ring indicates that data is available. */ 2487 psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) + 2488 (rx_local->local_psr_full & 0xFFF); 2489 2490 /* Grab any information that is required once the PSR is advanced, 2491 * since we can no longer rely on the memory being accurate 2492 */ 2493 len = psr->word1 & 0xFFFF; 2494 ring_index = (psr->word1 >> 26) & 0x03; 2495 fbr = rx_local->fbr[ring_index]; 2496 buff_index = (psr->word1 >> 16) & 0x3FF; 2497 word0 = psr->word0; 2498 2499 /* Indicate that we have used this PSR entry. */ 2500 /* FIXME wrap 12 */ 2501 add_12bit(&rx_local->local_psr_full, 1); 2502 if ( 2503 (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) { 2504 /* Clear psr full and toggle the wrap bit */ 2505 rx_local->local_psr_full &= ~0xFFF; 2506 rx_local->local_psr_full ^= 0x1000; 2507 } 2508 2509 writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset); 2510 2511 if (ring_index > 1 || buff_index > fbr->num_entries - 1) { 2512 /* Illegal buffer or ring index cannot be used by S/W*/ 2513 dev_err(&adapter->pdev->dev, 2514 "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n", 2515 rx_local->local_psr_full & 0xFFF, len, buff_index); 2516 return NULL; 2517 } 2518 2519 /* Get and fill the RFD. */ 2520 spin_lock_irqsave(&adapter->rcv_lock, flags); 2521 2522 element = rx_local->recv_list.next; 2523 rfd = list_entry(element, struct rfd, list_node); 2524 2525 if (!rfd) { 2526 spin_unlock_irqrestore(&adapter->rcv_lock, flags); 2527 return NULL; 2528 } 2529 2530 list_del(&rfd->list_node); 2531 rx_local->num_ready_recv--; 2532 2533 spin_unlock_irqrestore(&adapter->rcv_lock, flags); 2534 2535 rfd->bufferindex = buff_index; 2536 rfd->ringindex = ring_index; 2537 2538 /* In V1 silicon, there is a bug which screws up filtering of runt 2539 * packets. Therefore runt packet filtering is disabled in the MAC and 2540 * the packets are dropped here. They are also counted here. 2541 */ 2542 if (len < (NIC_MIN_PACKET_SIZE + 4)) { 2543 adapter->stats.rx_other_errs++; 2544 len = 0; 2545 } 2546 2547 if (len == 0) { 2548 rfd->len = 0; 2549 goto out; 2550 } 2551 2552 /* Determine if this is a multicast packet coming in */ 2553 if ((word0 & ALCATEL_MULTICAST_PKT) && 2554 !(word0 & ALCATEL_BROADCAST_PKT)) { 2555 /* Promiscuous mode and Multicast mode are not mutually 2556 * exclusive as was first thought. I guess Promiscuous is just 2557 * considered a super-set of the other filters. Generally filter 2558 * is 0x2b when in promiscuous mode. 2559 */ 2560 if ((adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) 2561 && !(adapter->packet_filter & ET131X_PACKET_TYPE_PROMISCUOUS) 2562 && !(adapter->packet_filter & 2563 ET131X_PACKET_TYPE_ALL_MULTICAST)) { 2564 buf = fbr->virt[buff_index]; 2565 2566 /* Loop through our list to see if the destination 2567 * address of this packet matches one in our list. 2568 */ 2569 for (i = 0; i < adapter->multicast_addr_count; i++) { 2570 if (buf[0] == adapter->multicast_list[i][0] 2571 && buf[1] == adapter->multicast_list[i][1] 2572 && buf[2] == adapter->multicast_list[i][2] 2573 && buf[3] == adapter->multicast_list[i][3] 2574 && buf[4] == adapter->multicast_list[i][4] 2575 && buf[5] == adapter->multicast_list[i][5]) { 2576 break; 2577 } 2578 } 2579 2580 /* If our index is equal to the number of Multicast 2581 * address we have, then this means we did not find this 2582 * packet's matching address in our list. Set the len to 2583 * zero, so we free our RFD when we return from this 2584 * function. 2585 */ 2586 if (i == adapter->multicast_addr_count) 2587 len = 0; 2588 } 2589 2590 if (len > 0) 2591 adapter->stats.multicast_pkts_rcvd++; 2592 } else if (word0 & ALCATEL_BROADCAST_PKT) { 2593 adapter->stats.broadcast_pkts_rcvd++; 2594 } else { 2595 /* Not sure what this counter measures in promiscuous mode. 2596 * Perhaps we should check the MAC address to see if it is 2597 * directed to us in promiscuous mode. 2598 */ 2599 adapter->stats.unicast_pkts_rcvd++; 2600 } 2601 2602 if (!len) { 2603 rfd->len = 0; 2604 goto out; 2605 } 2606 2607 rfd->len = len; 2608 2609 skb = dev_alloc_skb(rfd->len + 2); 2610 if (!skb) { 2611 dev_err(&adapter->pdev->dev, "Couldn't alloc an SKB for Rx\n"); 2612 return NULL; 2613 } 2614 2615 adapter->net_stats.rx_bytes += rfd->len; 2616 2617 memcpy(skb_put(skb, rfd->len), fbr->virt[buff_index], rfd->len); 2618 2619 skb->protocol = eth_type_trans(skb, adapter->netdev); 2620 skb->ip_summed = CHECKSUM_NONE; 2621 netif_rx_ni(skb); 2622 2623out: 2624 nic_return_rfd(adapter, rfd); 2625 return rfd; 2626} 2627 2628/* et131x_handle_recv_interrupt - Interrupt handler for receive processing 2629 * 2630 * Assumption, Rcv spinlock has been acquired. 2631 */ 2632static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter) 2633{ 2634 struct rfd *rfd = NULL; 2635 u32 count = 0; 2636 bool done = true; 2637 struct rx_ring *rx_ring = &adapter->rx_ring; 2638 2639 /* Process up to available RFD's */ 2640 while (count < NUM_PACKETS_HANDLED) { 2641 if (list_empty(&rx_ring->recv_list)) { 2642 WARN_ON(rx_ring->num_ready_recv != 0); 2643 done = false; 2644 break; 2645 } 2646 2647 rfd = nic_rx_pkts(adapter); 2648 2649 if (rfd == NULL) 2650 break; 2651 2652 /* Do not receive any packets until a filter has been set. 2653 * Do not receive any packets until we have link. 2654 * If length is zero, return the RFD in order to advance the 2655 * Free buffer ring. 2656 */ 2657 if (!adapter->packet_filter || 2658 !netif_carrier_ok(adapter->netdev) || 2659 rfd->len == 0) 2660 continue; 2661 2662 /* Increment the number of packets we received */ 2663 adapter->net_stats.rx_packets++; 2664 2665 /* Set the status on the packet, either resources or success */ 2666 if (rx_ring->num_ready_recv < RFD_LOW_WATER_MARK) 2667 dev_warn(&adapter->pdev->dev, "RFD's are running out\n"); 2668 2669 count++; 2670 } 2671 2672 if (count == NUM_PACKETS_HANDLED || !done) { 2673 rx_ring->unfinished_receives = true; 2674 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, 2675 &adapter->regs->global.watchdog_timer); 2676 } else 2677 /* Watchdog timer will disable itself if appropriate. */ 2678 rx_ring->unfinished_receives = false; 2679} 2680 2681/* et131x_tx_dma_memory_alloc 2682 * 2683 * Allocates memory that will be visible both to the device and to the CPU. 2684 * The OS will pass us packets, pointers to which we will insert in the Tx 2685 * Descriptor queue. The device will read this queue to find the packets in 2686 * memory. The device will update the "status" in memory each time it xmits a 2687 * packet. 2688 */ 2689static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) 2690{ 2691 int desc_size = 0; 2692 struct tx_ring *tx_ring = &adapter->tx_ring; 2693 2694 /* Allocate memory for the TCB's (Transmit Control Block) */ 2695 tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb), 2696 GFP_ATOMIC | GFP_DMA); 2697 if (!tx_ring->tcb_ring) 2698 return -ENOMEM; 2699 2700 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX); 2701 tx_ring->tx_desc_ring = dma_alloc_coherent(&adapter->pdev->dev, 2702 desc_size, 2703 &tx_ring->tx_desc_ring_pa, 2704 GFP_KERNEL); 2705 if (!tx_ring->tx_desc_ring) { 2706 dev_err(&adapter->pdev->dev, 2707 "Cannot alloc memory for Tx Ring\n"); 2708 return -ENOMEM; 2709 } 2710 2711 /* Save physical address 2712 * 2713 * NOTE: dma_alloc_coherent(), used above to alloc DMA regions, 2714 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses 2715 * are ever returned, make sure the high part is retrieved here before 2716 * storing the adjusted address. 2717 */ 2718 /* Allocate memory for the Tx status block */ 2719 tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev, 2720 sizeof(u32), 2721 &tx_ring->tx_status_pa, 2722 GFP_KERNEL); 2723 if (!tx_ring->tx_status_pa) { 2724 dev_err(&adapter->pdev->dev, 2725 "Cannot alloc memory for Tx status block\n"); 2726 return -ENOMEM; 2727 } 2728 return 0; 2729} 2730 2731/* et131x_tx_dma_memory_free - Free all memory allocated within this module */ 2732static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) 2733{ 2734 int desc_size = 0; 2735 struct tx_ring *tx_ring = &adapter->tx_ring; 2736 2737 if (tx_ring->tx_desc_ring) { 2738 /* Free memory relating to Tx rings here */ 2739 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX); 2740 dma_free_coherent(&adapter->pdev->dev, 2741 desc_size, 2742 tx_ring->tx_desc_ring, 2743 tx_ring->tx_desc_ring_pa); 2744 tx_ring->tx_desc_ring = NULL; 2745 } 2746 2747 /* Free memory for the Tx status block */ 2748 if (tx_ring->tx_status) { 2749 dma_free_coherent(&adapter->pdev->dev, 2750 sizeof(u32), 2751 tx_ring->tx_status, 2752 tx_ring->tx_status_pa); 2753 2754 tx_ring->tx_status = NULL; 2755 } 2756 /* Free the memory for the tcb structures */ 2757 kfree(tx_ring->tcb_ring); 2758} 2759 2760/* nic_send_packet - NIC specific send handler for version B silicon. 2761 * @adapter: pointer to our adapter 2762 * @tcb: pointer to struct tcb 2763 */ 2764static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) 2765{ 2766 u32 i; 2767 struct tx_desc desc[24]; /* 24 x 16 byte */ 2768 u32 frag = 0; 2769 u32 thiscopy, remainder; 2770 struct sk_buff *skb = tcb->skb; 2771 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; 2772 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; 2773 unsigned long flags; 2774 struct phy_device *phydev = adapter->phydev; 2775 dma_addr_t dma_addr; 2776 struct tx_ring *tx_ring = &adapter->tx_ring; 2777 2778 /* Part of the optimizations of this send routine restrict us to 2779 * sending 24 fragments at a pass. In practice we should never see 2780 * more than 5 fragments. 2781 * 2782 * NOTE: The older version of this function (below) can handle any 2783 * number of fragments. If needed, we can call this function, 2784 * although it is less efficient. 2785 */ 2786 2787 /* nr_frags should be no more than 18. */ 2788 BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23); 2789 2790 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1)); 2791 2792 for (i = 0; i < nr_frags; i++) { 2793 /* If there is something in this element, lets get a 2794 * descriptor from the ring and get the necessary data 2795 */ 2796 if (i == 0) { 2797 /* If the fragments are smaller than a standard MTU, 2798 * then map them to a single descriptor in the Tx 2799 * Desc ring. However, if they're larger, as is 2800 * possible with support for jumbo packets, then 2801 * split them each across 2 descriptors. 2802 * 2803 * This will work until we determine why the hardware 2804 * doesn't seem to like large fragments. 2805 */ 2806 if (skb_headlen(skb) <= 1514) { 2807 /* Low 16bits are length, high is vlan and 2808 * unused currently so zero 2809 */ 2810 desc[frag].len_vlan = skb_headlen(skb); 2811 dma_addr = dma_map_single(&adapter->pdev->dev, 2812 skb->data, 2813 skb_headlen(skb), 2814 DMA_TO_DEVICE); 2815 desc[frag].addr_lo = lower_32_bits(dma_addr); 2816 desc[frag].addr_hi = upper_32_bits(dma_addr); 2817 frag++; 2818 } else { 2819 desc[frag].len_vlan = skb_headlen(skb) / 2; 2820 dma_addr = dma_map_single(&adapter->pdev->dev, 2821 skb->data, 2822 (skb_headlen(skb) / 2), 2823 DMA_TO_DEVICE); 2824 desc[frag].addr_lo = lower_32_bits(dma_addr); 2825 desc[frag].addr_hi = upper_32_bits(dma_addr); 2826 frag++; 2827 2828 desc[frag].len_vlan = skb_headlen(skb) / 2; 2829 dma_addr = dma_map_single(&adapter->pdev->dev, 2830 skb->data + 2831 (skb_headlen(skb) / 2), 2832 (skb_headlen(skb) / 2), 2833 DMA_TO_DEVICE); 2834 desc[frag].addr_lo = lower_32_bits(dma_addr); 2835 desc[frag].addr_hi = upper_32_bits(dma_addr); 2836 frag++; 2837 } 2838 } else { 2839 desc[frag].len_vlan = frags[i - 1].size; 2840 dma_addr = skb_frag_dma_map(&adapter->pdev->dev, 2841 &frags[i - 1], 2842 0, 2843 frags[i - 1].size, 2844 DMA_TO_DEVICE); 2845 desc[frag].addr_lo = lower_32_bits(dma_addr); 2846 desc[frag].addr_hi = upper_32_bits(dma_addr); 2847 frag++; 2848 } 2849 } 2850 2851 if (phydev && phydev->speed == SPEED_1000) { 2852 if (++tx_ring->since_irq == PARM_TX_NUM_BUFS_DEF) { 2853 /* Last element & Interrupt flag */ 2854 desc[frag - 1].flags = 2855 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT; 2856 tx_ring->since_irq = 0; 2857 } else { /* Last element */ 2858 desc[frag - 1].flags = TXDESC_FLAG_LASTPKT; 2859 } 2860 } else 2861 desc[frag - 1].flags = 2862 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT; 2863 2864 desc[0].flags |= TXDESC_FLAG_FIRSTPKT; 2865 2866 tcb->index_start = tx_ring->send_idx; 2867 tcb->stale = 0; 2868 2869 spin_lock_irqsave(&adapter->send_hw_lock, flags); 2870 2871 thiscopy = NUM_DESC_PER_RING_TX - INDEX10(tx_ring->send_idx); 2872 2873 if (thiscopy >= frag) { 2874 remainder = 0; 2875 thiscopy = frag; 2876 } else { 2877 remainder = frag - thiscopy; 2878 } 2879 2880 memcpy(tx_ring->tx_desc_ring + INDEX10(tx_ring->send_idx), 2881 desc, 2882 sizeof(struct tx_desc) * thiscopy); 2883 2884 add_10bit(&tx_ring->send_idx, thiscopy); 2885 2886 if (INDEX10(tx_ring->send_idx) == 0 || 2887 INDEX10(tx_ring->send_idx) == NUM_DESC_PER_RING_TX) { 2888 tx_ring->send_idx &= ~ET_DMA10_MASK; 2889 tx_ring->send_idx ^= ET_DMA10_WRAP; 2890 } 2891 2892 if (remainder) { 2893 memcpy(tx_ring->tx_desc_ring, 2894 desc + thiscopy, 2895 sizeof(struct tx_desc) * remainder); 2896 2897 add_10bit(&tx_ring->send_idx, remainder); 2898 } 2899 2900 if (INDEX10(tx_ring->send_idx) == 0) { 2901 if (tx_ring->send_idx) 2902 tcb->index = NUM_DESC_PER_RING_TX - 1; 2903 else 2904 tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1); 2905 } else 2906 tcb->index = tx_ring->send_idx - 1; 2907 2908 spin_lock(&adapter->tcb_send_qlock); 2909 2910 if (tx_ring->send_tail) 2911 tx_ring->send_tail->next = tcb; 2912 else 2913 tx_ring->send_head = tcb; 2914 2915 tx_ring->send_tail = tcb; 2916 2917 WARN_ON(tcb->next != NULL); 2918 2919 tx_ring->used++; 2920 2921 spin_unlock(&adapter->tcb_send_qlock); 2922 2923 /* Write the new write pointer back to the device. */ 2924 writel(tx_ring->send_idx, &adapter->regs->txdma.service_request); 2925 2926 /* For Gig only, we use Tx Interrupt coalescing. Enable the software 2927 * timer to wake us up if this packet isn't followed by N more. 2928 */ 2929 if (phydev && phydev->speed == SPEED_1000) { 2930 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, 2931 &adapter->regs->global.watchdog_timer); 2932 } 2933 spin_unlock_irqrestore(&adapter->send_hw_lock, flags); 2934 2935 return 0; 2936} 2937 2938/* send_packet - Do the work to send a packet 2939 * 2940 * Assumption: Send spinlock has been acquired 2941 */ 2942static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter) 2943{ 2944 int status; 2945 struct tcb *tcb; 2946 u16 *shbufva; 2947 unsigned long flags; 2948 struct tx_ring *tx_ring = &adapter->tx_ring; 2949 2950 /* All packets must have at least a MAC address and a protocol type */ 2951 if (skb->len < ETH_HLEN) 2952 return -EIO; 2953 2954 /* Get a TCB for this packet */ 2955 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); 2956 2957 tcb = tx_ring->tcb_qhead; 2958 2959 if (tcb == NULL) { 2960 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 2961 return -ENOMEM; 2962 } 2963 2964 tx_ring->tcb_qhead = tcb->next; 2965 2966 if (tx_ring->tcb_qhead == NULL) 2967 tx_ring->tcb_qtail = NULL; 2968 2969 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 2970 2971 tcb->skb = skb; 2972 2973 if (skb->data != NULL && skb_headlen(skb) >= 6) { 2974 shbufva = (u16 *) skb->data; 2975 2976 if ((shbufva[0] == 0xffff) && 2977 (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) 2978 tcb->flags |= FMP_DEST_BROAD; 2979 else if ((shbufva[0] & 0x3) == 0x0001) 2980 tcb->flags |= FMP_DEST_MULTI; 2981 } 2982 2983 tcb->next = NULL; 2984 2985 /* Call the NIC specific send handler. */ 2986 status = nic_send_packet(adapter, tcb); 2987 2988 if (status != 0) { 2989 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); 2990 2991 if (tx_ring->tcb_qtail) 2992 tx_ring->tcb_qtail->next = tcb; 2993 else 2994 /* Apparently ready Q is empty. */ 2995 tx_ring->tcb_qhead = tcb; 2996 2997 tx_ring->tcb_qtail = tcb; 2998 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 2999 return status; 3000 } 3001 WARN_ON(tx_ring->used > NUM_TCB); 3002 return 0; 3003} 3004 3005/* et131x_send_packets - This function is called by the OS to send packets */ 3006static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) 3007{ 3008 int status = 0; 3009 struct et131x_adapter *adapter = netdev_priv(netdev); 3010 struct tx_ring *tx_ring = &adapter->tx_ring; 3011 3012 /* Send these packets 3013 * 3014 * NOTE: The Linux Tx entry point is only given one packet at a time 3015 * to Tx, so the PacketCount and it's array used makes no sense here 3016 */ 3017 3018 /* TCB is not available */ 3019 if (tx_ring->used >= NUM_TCB) { 3020 /* NOTE: If there's an error on send, no need to queue the 3021 * packet under Linux; if we just send an error up to the 3022 * netif layer, it will resend the skb to us. 3023 */ 3024 status = -ENOMEM; 3025 } else { 3026 /* We need to see if the link is up; if it's not, make the 3027 * netif layer think we're good and drop the packet 3028 */ 3029 if ((adapter->flags & FMP_ADAPTER_FAIL_SEND_MASK) || 3030 !netif_carrier_ok(netdev)) { 3031 dev_kfree_skb_any(skb); 3032 skb = NULL; 3033 3034 adapter->net_stats.tx_dropped++; 3035 } else { 3036 status = send_packet(skb, adapter); 3037 if (status != 0 && status != -ENOMEM) { 3038 /* On any other error, make netif think we're 3039 * OK and drop the packet 3040 */ 3041 dev_kfree_skb_any(skb); 3042 skb = NULL; 3043 adapter->net_stats.tx_dropped++; 3044 } 3045 } 3046 } 3047 return status; 3048} 3049 3050/* free_send_packet - Recycle a struct tcb 3051 * @adapter: pointer to our adapter 3052 * @tcb: pointer to struct tcb 3053 * 3054 * Complete the packet if necessary 3055 * Assumption - Send spinlock has been acquired 3056 */ 3057static inline void free_send_packet(struct et131x_adapter *adapter, 3058 struct tcb *tcb) 3059{ 3060 unsigned long flags; 3061 struct tx_desc *desc = NULL; 3062 struct net_device_stats *stats = &adapter->net_stats; 3063 struct tx_ring *tx_ring = &adapter->tx_ring; 3064 u64 dma_addr; 3065 3066 if (tcb->flags & FMP_DEST_BROAD) 3067 atomic_inc(&adapter->stats.broadcast_pkts_xmtd); 3068 else if (tcb->flags & FMP_DEST_MULTI) 3069 atomic_inc(&adapter->stats.multicast_pkts_xmtd); 3070 else 3071 atomic_inc(&adapter->stats.unicast_pkts_xmtd); 3072 3073 if (tcb->skb) { 3074 stats->tx_bytes += tcb->skb->len; 3075 3076 /* Iterate through the TX descriptors on the ring 3077 * corresponding to this packet and umap the fragments 3078 * they point to 3079 */ 3080 do { 3081 desc = tx_ring->tx_desc_ring + 3082 INDEX10(tcb->index_start); 3083 3084 dma_addr = desc->addr_lo; 3085 dma_addr |= (u64)desc->addr_hi << 32; 3086 3087 dma_unmap_single(&adapter->pdev->dev, 3088 dma_addr, 3089 desc->len_vlan, DMA_TO_DEVICE); 3090 3091 add_10bit(&tcb->index_start, 1); 3092 if (INDEX10(tcb->index_start) >= 3093 NUM_DESC_PER_RING_TX) { 3094 tcb->index_start &= ~ET_DMA10_MASK; 3095 tcb->index_start ^= ET_DMA10_WRAP; 3096 } 3097 } while (desc != tx_ring->tx_desc_ring + INDEX10(tcb->index)); 3098 3099 dev_kfree_skb_any(tcb->skb); 3100 } 3101 3102 memset(tcb, 0, sizeof(struct tcb)); 3103 3104 /* Add the TCB to the Ready Q */ 3105 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); 3106 3107 adapter->net_stats.tx_packets++; 3108 3109 if (tx_ring->tcb_qtail) 3110 tx_ring->tcb_qtail->next = tcb; 3111 else 3112 /* Apparently ready Q is empty. */ 3113 tx_ring->tcb_qhead = tcb; 3114 3115 tx_ring->tcb_qtail = tcb; 3116 3117 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 3118 WARN_ON(tx_ring->used < 0); 3119} 3120 3121/* et131x_free_busy_send_packets - Free and complete the stopped active sends 3122 * 3123 * Assumption - Send spinlock has been acquired 3124 */ 3125static void et131x_free_busy_send_packets(struct et131x_adapter *adapter) 3126{ 3127 struct tcb *tcb; 3128 unsigned long flags; 3129 u32 freed = 0; 3130 struct tx_ring *tx_ring = &adapter->tx_ring; 3131 3132 /* Any packets being sent? Check the first TCB on the send list */ 3133 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 3134 3135 tcb = tx_ring->send_head; 3136 3137 while (tcb != NULL && freed < NUM_TCB) { 3138 struct tcb *next = tcb->next; 3139 3140 tx_ring->send_head = next; 3141 3142 if (next == NULL) 3143 tx_ring->send_tail = NULL; 3144 3145 tx_ring->used--; 3146 3147 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 3148 3149 freed++; 3150 free_send_packet(adapter, tcb); 3151 3152 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 3153 3154 tcb = tx_ring->send_head; 3155 } 3156 3157 WARN_ON(freed == NUM_TCB); 3158 3159 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 3160 3161 tx_ring->used = 0; 3162} 3163 3164/* et131x_handle_send_interrupt - Interrupt handler for sending processing 3165 * 3166 * Re-claim the send resources, complete sends and get more to send from 3167 * the send wait queue. 3168 * 3169 * Assumption - Send spinlock has been acquired 3170 */ 3171static void et131x_handle_send_interrupt(struct et131x_adapter *adapter) 3172{ 3173 unsigned long flags; 3174 u32 serviced; 3175 struct tcb *tcb; 3176 u32 index; 3177 struct tx_ring *tx_ring = &adapter->tx_ring; 3178 3179 serviced = readl(&adapter->regs->txdma.new_service_complete); 3180 index = INDEX10(serviced); 3181 3182 /* Has the ring wrapped? Process any descriptors that do not have 3183 * the same "wrap" indicator as the current completion indicator 3184 */ 3185 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 3186 3187 tcb = tx_ring->send_head; 3188 3189 while (tcb && 3190 ((serviced ^ tcb->index) & ET_DMA10_WRAP) && 3191 index < INDEX10(tcb->index)) { 3192 tx_ring->used--; 3193 tx_ring->send_head = tcb->next; 3194 if (tcb->next == NULL) 3195 tx_ring->send_tail = NULL; 3196 3197 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 3198 free_send_packet(adapter, tcb); 3199 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 3200 3201 /* Goto the next packet */ 3202 tcb = tx_ring->send_head; 3203 } 3204 while (tcb && 3205 !((serviced ^ tcb->index) & ET_DMA10_WRAP) 3206 && index > (tcb->index & ET_DMA10_MASK)) { 3207 tx_ring->used--; 3208 tx_ring->send_head = tcb->next; 3209 if (tcb->next == NULL) 3210 tx_ring->send_tail = NULL; 3211 3212 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 3213 free_send_packet(adapter, tcb); 3214 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 3215 3216 /* Goto the next packet */ 3217 tcb = tx_ring->send_head; 3218 } 3219 3220 /* Wake up the queue when we hit a low-water mark */ 3221 if (tx_ring->used <= NUM_TCB / 3) 3222 netif_wake_queue(adapter->netdev); 3223 3224 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 3225} 3226 3227static int et131x_get_settings(struct net_device *netdev, 3228 struct ethtool_cmd *cmd) 3229{ 3230 struct et131x_adapter *adapter = netdev_priv(netdev); 3231 3232 return phy_ethtool_gset(adapter->phydev, cmd); 3233} 3234 3235static int et131x_set_settings(struct net_device *netdev, 3236 struct ethtool_cmd *cmd) 3237{ 3238 struct et131x_adapter *adapter = netdev_priv(netdev); 3239 3240 return phy_ethtool_sset(adapter->phydev, cmd); 3241} 3242 3243static int et131x_get_regs_len(struct net_device *netdev) 3244{ 3245#define ET131X_REGS_LEN 256 3246 return ET131X_REGS_LEN * sizeof(u32); 3247} 3248 3249static void et131x_get_regs(struct net_device *netdev, 3250 struct ethtool_regs *regs, void *regs_data) 3251{ 3252 struct et131x_adapter *adapter = netdev_priv(netdev); 3253 struct address_map __iomem *aregs = adapter->regs; 3254 u32 *regs_buff = regs_data; 3255 u32 num = 0; 3256 u16 tmp; 3257 3258 memset(regs_data, 0, et131x_get_regs_len(netdev)); 3259 3260 regs->version = (1 << 24) | (adapter->pdev->revision << 16) | 3261 adapter->pdev->device; 3262 3263 /* PHY regs */ 3264 et131x_mii_read(adapter, MII_BMCR, &tmp); 3265 regs_buff[num++] = tmp; 3266 et131x_mii_read(adapter, MII_BMSR, &tmp); 3267 regs_buff[num++] = tmp; 3268 et131x_mii_read(adapter, MII_PHYSID1, &tmp); 3269 regs_buff[num++] = tmp; 3270 et131x_mii_read(adapter, MII_PHYSID2, &tmp); 3271 regs_buff[num++] = tmp; 3272 et131x_mii_read(adapter, MII_ADVERTISE, &tmp); 3273 regs_buff[num++] = tmp; 3274 et131x_mii_read(adapter, MII_LPA, &tmp); 3275 regs_buff[num++] = tmp; 3276 et131x_mii_read(adapter, MII_EXPANSION, &tmp); 3277 regs_buff[num++] = tmp; 3278 /* Autoneg next page transmit reg */ 3279 et131x_mii_read(adapter, 0x07, &tmp); 3280 regs_buff[num++] = tmp; 3281 /* Link partner next page reg */ 3282 et131x_mii_read(adapter, 0x08, &tmp); 3283 regs_buff[num++] = tmp; 3284 et131x_mii_read(adapter, MII_CTRL1000, &tmp); 3285 regs_buff[num++] = tmp; 3286 et131x_mii_read(adapter, MII_STAT1000, &tmp); 3287 regs_buff[num++] = tmp; 3288 et131x_mii_read(adapter, 0x0b, &tmp); 3289 regs_buff[num++] = tmp; 3290 et131x_mii_read(adapter, 0x0c, &tmp); 3291 regs_buff[num++] = tmp; 3292 et131x_mii_read(adapter, MII_MMD_CTRL, &tmp); 3293 regs_buff[num++] = tmp; 3294 et131x_mii_read(adapter, MII_MMD_DATA, &tmp); 3295 regs_buff[num++] = tmp; 3296 et131x_mii_read(adapter, MII_ESTATUS, &tmp); 3297 regs_buff[num++] = tmp; 3298 3299 et131x_mii_read(adapter, PHY_INDEX_REG, &tmp); 3300 regs_buff[num++] = tmp; 3301 et131x_mii_read(adapter, PHY_DATA_REG, &tmp); 3302 regs_buff[num++] = tmp; 3303 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &tmp); 3304 regs_buff[num++] = tmp; 3305 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, &tmp); 3306 regs_buff[num++] = tmp; 3307 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL + 1, &tmp); 3308 regs_buff[num++] = tmp; 3309 3310 et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, &tmp); 3311 regs_buff[num++] = tmp; 3312 et131x_mii_read(adapter, PHY_CONFIG, &tmp); 3313 regs_buff[num++] = tmp; 3314 et131x_mii_read(adapter, PHY_PHY_CONTROL, &tmp); 3315 regs_buff[num++] = tmp; 3316 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &tmp); 3317 regs_buff[num++] = tmp; 3318 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &tmp); 3319 regs_buff[num++] = tmp; 3320 et131x_mii_read(adapter, PHY_PHY_STATUS, &tmp); 3321 regs_buff[num++] = tmp; 3322 et131x_mii_read(adapter, PHY_LED_1, &tmp); 3323 regs_buff[num++] = tmp; 3324 et131x_mii_read(adapter, PHY_LED_2, &tmp); 3325 regs_buff[num++] = tmp; 3326 3327 /* Global regs */ 3328 regs_buff[num++] = readl(&aregs->global.txq_start_addr); 3329 regs_buff[num++] = readl(&aregs->global.txq_end_addr); 3330 regs_buff[num++] = readl(&aregs->global.rxq_start_addr); 3331 regs_buff[num++] = readl(&aregs->global.rxq_end_addr); 3332 regs_buff[num++] = readl(&aregs->global.pm_csr); 3333 regs_buff[num++] = adapter->stats.interrupt_status; 3334 regs_buff[num++] = readl(&aregs->global.int_mask); 3335 regs_buff[num++] = readl(&aregs->global.int_alias_clr_en); 3336 regs_buff[num++] = readl(&aregs->global.int_status_alias); 3337 regs_buff[num++] = readl(&aregs->global.sw_reset); 3338 regs_buff[num++] = readl(&aregs->global.slv_timer); 3339 regs_buff[num++] = readl(&aregs->global.msi_config); 3340 regs_buff[num++] = readl(&aregs->global.loopback); 3341 regs_buff[num++] = readl(&aregs->global.watchdog_timer); 3342 3343 /* TXDMA regs */ 3344 regs_buff[num++] = readl(&aregs->txdma.csr); 3345 regs_buff[num++] = readl(&aregs->txdma.pr_base_hi); 3346 regs_buff[num++] = readl(&aregs->txdma.pr_base_lo); 3347 regs_buff[num++] = readl(&aregs->txdma.pr_num_des); 3348 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr); 3349 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext); 3350 regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr); 3351 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi); 3352 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo); 3353 regs_buff[num++] = readl(&aregs->txdma.service_request); 3354 regs_buff[num++] = readl(&aregs->txdma.service_complete); 3355 regs_buff[num++] = readl(&aregs->txdma.cache_rd_index); 3356 regs_buff[num++] = readl(&aregs->txdma.cache_wr_index); 3357 regs_buff[num++] = readl(&aregs->txdma.tx_dma_error); 3358 regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt); 3359 regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt); 3360 regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt); 3361 regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt); 3362 regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt); 3363 regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt); 3364 regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt); 3365 regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt); 3366 regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt); 3367 regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt); 3368 regs_buff[num++] = readl(&aregs->txdma.new_service_complete); 3369 regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt); 3370 3371 /* RXDMA regs */ 3372 regs_buff[num++] = readl(&aregs->rxdma.csr); 3373 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi); 3374 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo); 3375 regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done); 3376 regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time); 3377 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr); 3378 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext); 3379 regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr); 3380 regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi); 3381 regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo); 3382 regs_buff[num++] = readl(&aregs->rxdma.psr_num_des); 3383 regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset); 3384 regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset); 3385 regs_buff[num++] = readl(&aregs->rxdma.psr_access_index); 3386 regs_buff[num++] = readl(&aregs->rxdma.psr_min_des); 3387 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo); 3388 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi); 3389 regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des); 3390 regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset); 3391 regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset); 3392 regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index); 3393 regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des); 3394 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo); 3395 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi); 3396 regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des); 3397 regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset); 3398 regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset); 3399 regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index); 3400 regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des); 3401} 3402 3403static void et131x_get_drvinfo(struct net_device *netdev, 3404 struct ethtool_drvinfo *info) 3405{ 3406 struct et131x_adapter *adapter = netdev_priv(netdev); 3407 3408 strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver)); 3409 strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); 3410 strlcpy(info->bus_info, pci_name(adapter->pdev), 3411 sizeof(info->bus_info)); 3412} 3413 3414static struct ethtool_ops et131x_ethtool_ops = { 3415 .get_settings = et131x_get_settings, 3416 .set_settings = et131x_set_settings, 3417 .get_drvinfo = et131x_get_drvinfo, 3418 .get_regs_len = et131x_get_regs_len, 3419 .get_regs = et131x_get_regs, 3420 .get_link = ethtool_op_get_link, 3421}; 3422 3423/* et131x_hwaddr_init - set up the MAC Address on the ET1310 */ 3424static void et131x_hwaddr_init(struct et131x_adapter *adapter) 3425{ 3426 /* If have our default mac from init and no mac address from 3427 * EEPROM then we need to generate the last octet and set it on the 3428 * device 3429 */ 3430 if (is_zero_ether_addr(adapter->rom_addr)) { 3431 /* We need to randomly generate the last octet so we 3432 * decrease our chances of setting the mac address to 3433 * same as another one of our cards in the system 3434 */ 3435 get_random_bytes(&adapter->addr[5], 1); 3436 /* We have the default value in the register we are 3437 * working with so we need to copy the current 3438 * address into the permanent address 3439 */ 3440 memcpy(adapter->rom_addr, 3441 adapter->addr, ETH_ALEN); 3442 } else { 3443 /* We do not have an override address, so set the 3444 * current address to the permanent address and add 3445 * it to the device 3446 */ 3447 memcpy(adapter->addr, 3448 adapter->rom_addr, ETH_ALEN); 3449 } 3450} 3451 3452/* et131x_pci_init - initial PCI setup 3453 * 3454 * Perform the initial setup of PCI registers and if possible initialise 3455 * the MAC address. At this point the I/O registers have yet to be mapped 3456 */ 3457static int et131x_pci_init(struct et131x_adapter *adapter, 3458 struct pci_dev *pdev) 3459{ 3460 u16 max_payload; 3461 int i, rc; 3462 3463 rc = et131x_init_eeprom(adapter); 3464 if (rc < 0) 3465 goto out; 3466 3467 if (!pci_is_pcie(pdev)) { 3468 dev_err(&pdev->dev, "Missing PCIe capabilities\n"); 3469 goto err_out; 3470 } 3471 3472 /* Let's set up the PORT LOGIC Register. */ 3473 3474 /* Program the Ack/Nak latency and replay timers */ 3475 max_payload = pdev->pcie_mpss; 3476 3477 if (max_payload < 2) { 3478 static const u16 acknak[2] = { 0x76, 0xD0 }; 3479 static const u16 replay[2] = { 0x1E0, 0x2ED }; 3480 3481 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK, 3482 acknak[max_payload])) { 3483 dev_err(&pdev->dev, 3484 "Could not write PCI config space for ACK/NAK\n"); 3485 goto err_out; 3486 } 3487 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY, 3488 replay[max_payload])) { 3489 dev_err(&pdev->dev, 3490 "Could not write PCI config space for Replay Timer\n"); 3491 goto err_out; 3492 } 3493 } 3494 3495 /* l0s and l1 latency timers. We are using default values. 3496 * Representing 001 for L0s and 010 for L1 3497 */ 3498 if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) { 3499 dev_err(&pdev->dev, 3500 "Could not write PCI config space for Latency Timers\n"); 3501 goto err_out; 3502 } 3503 3504 /* Change the max read size to 2k */ 3505 if (pcie_set_readrq(pdev, 2048)) { 3506 dev_err(&pdev->dev, 3507 "Couldn't change PCI config space for Max read size\n"); 3508 goto err_out; 3509 } 3510 3511 /* Get MAC address from config space if an eeprom exists, otherwise 3512 * the MAC address there will not be valid 3513 */ 3514 if (!adapter->has_eeprom) { 3515 et131x_hwaddr_init(adapter); 3516 return 0; 3517 } 3518 3519 for (i = 0; i < ETH_ALEN; i++) { 3520 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i, 3521 adapter->rom_addr + i)) { 3522 dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n"); 3523 goto err_out; 3524 } 3525 } 3526 memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN); 3527out: 3528 return rc; 3529err_out: 3530 rc = -EIO; 3531 goto out; 3532} 3533 3534/* et131x_error_timer_handler 3535 * @data: timer-specific variable; here a pointer to our adapter structure 3536 * 3537 * The routine called when the error timer expires, to track the number of 3538 * recurring errors. 3539 */ 3540static void et131x_error_timer_handler(unsigned long data) 3541{ 3542 struct et131x_adapter *adapter = (struct et131x_adapter *) data; 3543 struct phy_device *phydev = adapter->phydev; 3544 3545 if (et1310_in_phy_coma(adapter)) { 3546 /* Bring the device immediately out of coma, to 3547 * prevent it from sleeping indefinitely, this 3548 * mechanism could be improved! 3549 */ 3550 et1310_disable_phy_coma(adapter); 3551 adapter->boot_coma = 20; 3552 } else { 3553 et1310_update_macstat_host_counters(adapter); 3554 } 3555 3556 if (!phydev->link && adapter->boot_coma < 11) 3557 adapter->boot_coma++; 3558 3559 if (adapter->boot_coma == 10) { 3560 if (!phydev->link) { 3561 if (!et1310_in_phy_coma(adapter)) { 3562 /* NOTE - This was originally a 'sync with 3563 * interrupt'. How to do that under Linux? 3564 */ 3565 et131x_enable_interrupts(adapter); 3566 et1310_enable_phy_coma(adapter); 3567 } 3568 } 3569 } 3570 3571 /* This is a periodic timer, so reschedule */ 3572 mod_timer(&adapter->error_timer, jiffies + TX_ERROR_PERIOD * HZ / 1000); 3573} 3574 3575/* et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx */ 3576static void et131x_adapter_memory_free(struct et131x_adapter *adapter) 3577{ 3578 et131x_tx_dma_memory_free(adapter); 3579 et131x_rx_dma_memory_free(adapter); 3580} 3581 3582/* et131x_adapter_memory_alloc 3583 * Allocate all the memory blocks for send, receive and others. 3584 */ 3585static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) 3586{ 3587 int status; 3588 3589 /* Allocate memory for the Tx Ring */ 3590 status = et131x_tx_dma_memory_alloc(adapter); 3591 if (status) { 3592 dev_err(&adapter->pdev->dev, 3593 "et131x_tx_dma_memory_alloc FAILED\n"); 3594 return status; 3595 } 3596 /* Receive buffer memory allocation */ 3597 status = et131x_rx_dma_memory_alloc(adapter); 3598 if (status) { 3599 dev_err(&adapter->pdev->dev, 3600 "et131x_rx_dma_memory_alloc FAILED\n"); 3601 et131x_tx_dma_memory_free(adapter); 3602 return status; 3603 } 3604 3605 /* Init receive data structures */ 3606 status = et131x_init_recv(adapter); 3607 if (status) { 3608 dev_err(&adapter->pdev->dev, "et131x_init_recv FAILED\n"); 3609 et131x_adapter_memory_free(adapter); 3610 } 3611 return status; 3612} 3613 3614static void et131x_adjust_link(struct net_device *netdev) 3615{ 3616 struct et131x_adapter *adapter = netdev_priv(netdev); 3617 struct phy_device *phydev = adapter->phydev; 3618 3619 if (!phydev) 3620 return; 3621 if (phydev->link == adapter->link) 3622 return; 3623 3624 /* Check to see if we are in coma mode and if 3625 * so, disable it because we will not be able 3626 * to read PHY values until we are out. 3627 */ 3628 if (et1310_in_phy_coma(adapter)) 3629 et1310_disable_phy_coma(adapter); 3630 3631 adapter->link = phydev->link; 3632 phy_print_status(phydev); 3633 3634 if (phydev->link) { 3635 adapter->boot_coma = 20; 3636 if (phydev->speed == SPEED_10) { 3637 u16 register18; 3638 3639 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, 3640 &register18); 3641 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 3642 register18 | 0x4); 3643 et131x_mii_write(adapter, PHY_INDEX_REG, 3644 register18 | 0x8402); 3645 et131x_mii_write(adapter, PHY_DATA_REG, 3646 register18 | 511); 3647 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 3648 register18); 3649 } 3650 3651 et1310_config_flow_control(adapter); 3652 3653 if (phydev->speed == SPEED_1000 && 3654 adapter->registry_jumbo_packet > 2048) { 3655 u16 reg; 3656 3657 et131x_mii_read(adapter, PHY_CONFIG, &reg); 3658 reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; 3659 reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; 3660 et131x_mii_write(adapter, PHY_CONFIG, reg); 3661 } 3662 3663 et131x_set_rx_dma_timer(adapter); 3664 et1310_config_mac_regs2(adapter); 3665 } else { 3666 adapter->boot_coma = 0; 3667 3668 if (phydev->speed == SPEED_10) { 3669 u16 register18; 3670 3671 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, 3672 &register18); 3673 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 3674 register18 | 0x4); 3675 et131x_mii_write(adapter, PHY_INDEX_REG, 3676 register18 | 0x8402); 3677 et131x_mii_write(adapter, PHY_DATA_REG, 3678 register18 | 511); 3679 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 3680 register18); 3681 } 3682 3683 /* Free the packets being actively sent & stopped */ 3684 et131x_free_busy_send_packets(adapter); 3685 3686 /* Re-initialize the send structures */ 3687 et131x_init_send(adapter); 3688 3689 /* Bring the device back to the state it was during 3690 * init prior to autonegotiation being complete. This 3691 * way, when we get the auto-neg complete interrupt, 3692 * we can complete init by calling config_mac_regs2. 3693 */ 3694 et131x_soft_reset(adapter); 3695 3696 /* Setup ET1310 as per the documentation */ 3697 et131x_adapter_setup(adapter); 3698 3699 /* perform reset of tx/rx */ 3700 et131x_disable_txrx(netdev); 3701 et131x_enable_txrx(netdev); 3702 } 3703} 3704 3705static int et131x_mii_probe(struct net_device *netdev) 3706{ 3707 struct et131x_adapter *adapter = netdev_priv(netdev); 3708 struct phy_device *phydev = NULL; 3709 3710 phydev = phy_find_first(adapter->mii_bus); 3711 if (!phydev) { 3712 dev_err(&adapter->pdev->dev, "no PHY found\n"); 3713 return -ENODEV; 3714 } 3715 3716 phydev = phy_connect(netdev, dev_name(&phydev->dev), 3717 &et131x_adjust_link, PHY_INTERFACE_MODE_MII); 3718 3719 if (IS_ERR(phydev)) { 3720 dev_err(&adapter->pdev->dev, "Could not attach to PHY\n"); 3721 return PTR_ERR(phydev); 3722 } 3723 3724 phydev->supported &= (SUPPORTED_10baseT_Half 3725 | SUPPORTED_10baseT_Full 3726 | SUPPORTED_100baseT_Half 3727 | SUPPORTED_100baseT_Full 3728 | SUPPORTED_Autoneg 3729 | SUPPORTED_MII 3730 | SUPPORTED_TP); 3731 3732 if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST) 3733 phydev->supported |= SUPPORTED_1000baseT_Full; 3734 3735 phydev->advertising = phydev->supported; 3736 adapter->phydev = phydev; 3737 3738 dev_info(&adapter->pdev->dev, 3739 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", 3740 phydev->drv->name, dev_name(&phydev->dev)); 3741 3742 return 0; 3743} 3744 3745/* et131x_adapter_init 3746 * 3747 * Initialize the data structures for the et131x_adapter object and link 3748 * them together with the platform provided device structures. 3749 */ 3750static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev, 3751 struct pci_dev *pdev) 3752{ 3753 static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 }; 3754 3755 struct et131x_adapter *adapter; 3756 3757 /* Allocate private adapter struct and copy in relevant information */ 3758 adapter = netdev_priv(netdev); 3759 adapter->pdev = pci_dev_get(pdev); 3760 adapter->netdev = netdev; 3761 3762 /* Initialize spinlocks here */ 3763 spin_lock_init(&adapter->lock); 3764 spin_lock_init(&adapter->tcb_send_qlock); 3765 spin_lock_init(&adapter->tcb_ready_qlock); 3766 spin_lock_init(&adapter->send_hw_lock); 3767 spin_lock_init(&adapter->rcv_lock); 3768 spin_lock_init(&adapter->fbr_lock); 3769 3770 adapter->registry_jumbo_packet = 1514; /* 1514-9216 */ 3771 3772 /* Set the MAC address to a default */ 3773 memcpy(adapter->addr, default_mac, ETH_ALEN); 3774 3775 return adapter; 3776} 3777 3778/* et131x_pci_remove 3779 * 3780 * Registered in the pci_driver structure, this function is called when the 3781 * PCI subsystem detects that a PCI device which matches the information 3782 * contained in the pci_device_id table has been removed. 3783 */ 3784static void et131x_pci_remove(struct pci_dev *pdev) 3785{ 3786 struct net_device *netdev = pci_get_drvdata(pdev); 3787 struct et131x_adapter *adapter = netdev_priv(netdev); 3788 3789 unregister_netdev(netdev); 3790 phy_disconnect(adapter->phydev); 3791 mdiobus_unregister(adapter->mii_bus); 3792 cancel_work_sync(&adapter->task); 3793 kfree(adapter->mii_bus->irq); 3794 mdiobus_free(adapter->mii_bus); 3795 3796 et131x_adapter_memory_free(adapter); 3797 iounmap(adapter->regs); 3798 pci_dev_put(pdev); 3799 3800 free_netdev(netdev); 3801 pci_release_regions(pdev); 3802 pci_disable_device(pdev); 3803} 3804 3805/* et131x_up - Bring up a device for use. */ 3806static void et131x_up(struct net_device *netdev) 3807{ 3808 struct et131x_adapter *adapter = netdev_priv(netdev); 3809 3810 et131x_enable_txrx(netdev); 3811 phy_start(adapter->phydev); 3812} 3813 3814/* et131x_down - Bring down the device */ 3815static void et131x_down(struct net_device *netdev) 3816{ 3817 struct et131x_adapter *adapter = netdev_priv(netdev); 3818 3819 /* Save the timestamp for the TX watchdog, prevent a timeout */ 3820 netdev->trans_start = jiffies; 3821 3822 phy_stop(adapter->phydev); 3823 et131x_disable_txrx(netdev); 3824} 3825 3826#ifdef CONFIG_PM_SLEEP 3827static int et131x_suspend(struct device *dev) 3828{ 3829 struct pci_dev *pdev = to_pci_dev(dev); 3830 struct net_device *netdev = pci_get_drvdata(pdev); 3831 3832 if (netif_running(netdev)) { 3833 netif_device_detach(netdev); 3834 et131x_down(netdev); 3835 pci_save_state(pdev); 3836 } 3837 3838 return 0; 3839} 3840 3841static int et131x_resume(struct device *dev) 3842{ 3843 struct pci_dev *pdev = to_pci_dev(dev); 3844 struct net_device *netdev = pci_get_drvdata(pdev); 3845 3846 if (netif_running(netdev)) { 3847 pci_restore_state(pdev); 3848 et131x_up(netdev); 3849 netif_device_attach(netdev); 3850 } 3851 3852 return 0; 3853} 3854 3855static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume); 3856#define ET131X_PM_OPS (&et131x_pm_ops) 3857#else 3858#define ET131X_PM_OPS NULL 3859#endif 3860 3861/* et131x_isr - The Interrupt Service Routine for the driver. 3862 * @irq: the IRQ on which the interrupt was received. 3863 * @dev_id: device-specific info (here a pointer to a net_device struct) 3864 * 3865 * Returns a value indicating if the interrupt was handled. 3866 */ 3867static irqreturn_t et131x_isr(int irq, void *dev_id) 3868{ 3869 bool handled = true; 3870 struct net_device *netdev = (struct net_device *)dev_id; 3871 struct et131x_adapter *adapter = netdev_priv(netdev); 3872 struct rx_ring *rx_ring = &adapter->rx_ring; 3873 struct tx_ring *tx_ring = &adapter->tx_ring; 3874 u32 status; 3875 3876 if (!netif_device_present(netdev)) { 3877 handled = false; 3878 goto out; 3879 } 3880 3881 /* If the adapter is in low power state, then it should not 3882 * recognize any interrupt 3883 */ 3884 3885 /* Disable Device Interrupts */ 3886 et131x_disable_interrupts(adapter); 3887 3888 /* Get a copy of the value in the interrupt status register 3889 * so we can process the interrupting section 3890 */ 3891 status = readl(&adapter->regs->global.int_status); 3892 3893 if (adapter->flowcontrol == FLOW_TXONLY || 3894 adapter->flowcontrol == FLOW_BOTH) { 3895 status &= ~INT_MASK_ENABLE; 3896 } else { 3897 status &= ~INT_MASK_ENABLE_NO_FLOW; 3898 } 3899 3900 /* Make sure this is our interrupt */ 3901 if (!status) { 3902 handled = false; 3903 et131x_enable_interrupts(adapter); 3904 goto out; 3905 } 3906 3907 /* This is our interrupt, so process accordingly */ 3908 3909 if (status & ET_INTR_WATCHDOG) { 3910 struct tcb *tcb = tx_ring->send_head; 3911 3912 if (tcb) 3913 if (++tcb->stale > 1) 3914 status |= ET_INTR_TXDMA_ISR; 3915 3916 if (rx_ring->unfinished_receives) 3917 status |= ET_INTR_RXDMA_XFR_DONE; 3918 else if (tcb == NULL) 3919 writel(0, &adapter->regs->global.watchdog_timer); 3920 3921 status &= ~ET_INTR_WATCHDOG; 3922 } 3923 3924 if (!status) { 3925 /* This interrupt has in some way been "handled" by 3926 * the ISR. Either it was a spurious Rx interrupt, or 3927 * it was a Tx interrupt that has been filtered by 3928 * the ISR. 3929 */ 3930 et131x_enable_interrupts(adapter); 3931 goto out; 3932 } 3933 3934 /* We need to save the interrupt status value for use in our 3935 * DPC. We will clear the software copy of that in that 3936 * routine. 3937 */ 3938 adapter->stats.interrupt_status = status; 3939 3940 /* Schedule the ISR handler as a bottom-half task in the 3941 * kernel's tq_immediate queue, and mark the queue for 3942 * execution 3943 */ 3944 schedule_work(&adapter->task); 3945out: 3946 return IRQ_RETVAL(handled); 3947} 3948 3949/* et131x_isr_handler - The ISR handler 3950 * 3951 * scheduled to run in a deferred context by the ISR. This is where the ISR's 3952 * work actually gets done. 3953 */ 3954static void et131x_isr_handler(struct work_struct *work) 3955{ 3956 struct et131x_adapter *adapter = 3957 container_of(work, struct et131x_adapter, task); 3958 u32 status = adapter->stats.interrupt_status; 3959 struct address_map __iomem *iomem = adapter->regs; 3960 3961 /* These first two are by far the most common. Once handled, we clear 3962 * their two bits in the status word. If the word is now zero, we 3963 * exit. 3964 */ 3965 /* Handle all the completed Transmit interrupts */ 3966 if (status & ET_INTR_TXDMA_ISR) 3967 et131x_handle_send_interrupt(adapter); 3968 3969 /* Handle all the completed Receives interrupts */ 3970 if (status & ET_INTR_RXDMA_XFR_DONE) 3971 et131x_handle_recv_interrupt(adapter); 3972 3973 status &= ~(ET_INTR_TXDMA_ERR | ET_INTR_RXDMA_XFR_DONE); 3974 3975 if (!status) 3976 goto out; 3977 3978 /* Handle the TXDMA Error interrupt */ 3979 if (status & ET_INTR_TXDMA_ERR) { 3980 /* Following read also clears the register (COR) */ 3981 u32 txdma_err = readl(&iomem->txdma.tx_dma_error); 3982 3983 dev_warn(&adapter->pdev->dev, 3984 "TXDMA_ERR interrupt, error = %d\n", 3985 txdma_err); 3986 } 3987 3988 /* Handle Free Buffer Ring 0 and 1 Low interrupt */ 3989 if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) { 3990 /* This indicates the number of unused buffers in RXDMA free 3991 * buffer ring 0 is <= the limit you programmed. Free buffer 3992 * resources need to be returned. Free buffers are consumed as 3993 * packets are passed from the network to the host. The host 3994 * becomes aware of the packets from the contents of the packet 3995 * status ring. This ring is queried when the packet done 3996 * interrupt occurs. Packets are then passed to the OS. When 3997 * the OS is done with the packets the resources can be 3998 * returned to the ET1310 for re-use. This interrupt is one 3999 * method of returning resources. 4000 */ 4001 4002 /* If the user has flow control on, then we will 4003 * send a pause packet, otherwise just exit 4004 */ 4005 if (adapter->flowcontrol == FLOW_TXONLY || 4006 adapter->flowcontrol == FLOW_BOTH) { 4007 u32 pm_csr; 4008 4009 /* Tell the device to send a pause packet via the back 4010 * pressure register (bp req and bp xon/xoff) 4011 */ 4012 pm_csr = readl(&iomem->global.pm_csr); 4013 if (!et1310_in_phy_coma(adapter)) 4014 writel(3, &iomem->txmac.bp_ctrl); 4015 } 4016 } 4017 4018 /* Handle Packet Status Ring Low Interrupt */ 4019 if (status & ET_INTR_RXDMA_STAT_LOW) { 4020 /* Same idea as with the two Free Buffer Rings. Packets going 4021 * from the network to the host each consume a free buffer 4022 * resource and a packet status resource. These resoures are 4023 * passed to the OS. When the OS is done with the resources, 4024 * they need to be returned to the ET1310. This is one method 4025 * of returning the resources. 4026 */ 4027 } 4028 4029 /* Handle RXDMA Error Interrupt */ 4030 if (status & ET_INTR_RXDMA_ERR) { 4031 /* The rxdma_error interrupt is sent when a time-out on a 4032 * request issued by the JAGCore has occurred or a completion is 4033 * returned with an un-successful status. In both cases the 4034 * request is considered complete. The JAGCore will 4035 * automatically re-try the request in question. Normally 4036 * information on events like these are sent to the host using 4037 * the "Advanced Error Reporting" capability. This interrupt is 4038 * another way of getting similar information. The only thing 4039 * required is to clear the interrupt by reading the ISR in the 4040 * global resources. The JAGCore will do a re-try on the 4041 * request. Normally you should never see this interrupt. If 4042 * you start to see this interrupt occurring frequently then 4043 * something bad has occurred. A reset might be the thing to do. 4044 */ 4045 /* TRAP();*/ 4046 4047 dev_warn(&adapter->pdev->dev, 4048 "RxDMA_ERR interrupt, error %x\n", 4049 readl(&iomem->txmac.tx_test)); 4050 } 4051 4052 /* Handle the Wake on LAN Event */ 4053 if (status & ET_INTR_WOL) { 4054 /* This is a secondary interrupt for wake on LAN. The driver 4055 * should never see this, if it does, something serious is 4056 * wrong. We will TRAP the message when we are in DBG mode, 4057 * otherwise we will ignore it. 4058 */ 4059 dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n"); 4060 } 4061 4062 /* Let's move on to the TxMac */ 4063 if (status & ET_INTR_TXMAC) { 4064 u32 err = readl(&iomem->txmac.err); 4065 4066 /* When any of the errors occur and TXMAC generates an 4067 * interrupt to report these errors, it usually means that 4068 * TXMAC has detected an error in the data stream retrieved 4069 * from the on-chip Tx Q. All of these errors are catastrophic 4070 * and TXMAC won't be able to recover data when these errors 4071 * occur. In a nutshell, the whole Tx path will have to be reset 4072 * and re-configured afterwards. 4073 */ 4074 dev_warn(&adapter->pdev->dev, 4075 "TXMAC interrupt, error 0x%08x\n", 4076 err); 4077 4078 /* If we are debugging, we want to see this error, otherwise we 4079 * just want the device to be reset and continue 4080 */ 4081 } 4082 4083 /* Handle RXMAC Interrupt */ 4084 if (status & ET_INTR_RXMAC) { 4085 /* These interrupts are catastrophic to the device, what we need 4086 * to do is disable the interrupts and set the flag to cause us 4087 * to reset so we can solve this issue. 4088 */ 4089 /* MP_SET_FLAG( adapter, FMP_ADAPTER_HARDWARE_ERROR); */ 4090 4091 dev_warn(&adapter->pdev->dev, 4092 "RXMAC interrupt, error 0x%08x. Requesting reset\n", 4093 readl(&iomem->rxmac.err_reg)); 4094 4095 dev_warn(&adapter->pdev->dev, 4096 "Enable 0x%08x, Diag 0x%08x\n", 4097 readl(&iomem->rxmac.ctrl), 4098 readl(&iomem->rxmac.rxq_diag)); 4099 4100 /* If we are debugging, we want to see this error, otherwise we 4101 * just want the device to be reset and continue 4102 */ 4103 } 4104 4105 /* Handle MAC_STAT Interrupt */ 4106 if (status & ET_INTR_MAC_STAT) { 4107 /* This means at least one of the un-masked counters in the 4108 * MAC_STAT block has rolled over. Use this to maintain the top, 4109 * software managed bits of the counter(s). 4110 */ 4111 et1310_handle_macstat_interrupt(adapter); 4112 } 4113 4114 /* Handle SLV Timeout Interrupt */ 4115 if (status & ET_INTR_SLV_TIMEOUT) { 4116 /* This means a timeout has occurred on a read or write request 4117 * to one of the JAGCore registers. The Global Resources block 4118 * has terminated the request and on a read request, returned a 4119 * "fake" value. The most likely reasons are: Bad Address or the 4120 * addressed module is in a power-down state and can't respond. 4121 */ 4122 } 4123out: 4124 et131x_enable_interrupts(adapter); 4125} 4126 4127/* et131x_stats - Return the current device statistics */ 4128static struct net_device_stats *et131x_stats(struct net_device *netdev) 4129{ 4130 struct et131x_adapter *adapter = netdev_priv(netdev); 4131 struct net_device_stats *stats = &adapter->net_stats; 4132 struct ce_stats *devstat = &adapter->stats; 4133 4134 stats->rx_errors = devstat->rx_length_errs + 4135 devstat->rx_align_errs + 4136 devstat->rx_crc_errs + 4137 devstat->rx_code_violations + 4138 devstat->rx_other_errs; 4139 stats->tx_errors = devstat->tx_max_pkt_errs; 4140 stats->multicast = devstat->multicast_pkts_rcvd; 4141 stats->collisions = devstat->tx_collisions; 4142 4143 stats->rx_length_errors = devstat->rx_length_errs; 4144 stats->rx_over_errors = devstat->rx_overflows; 4145 stats->rx_crc_errors = devstat->rx_crc_errs; 4146 4147 /* NOTE: These stats don't have corresponding values in CE_STATS, 4148 * so we're going to have to update these directly from within the 4149 * TX/RX code 4150 */ 4151 /* stats->rx_bytes = 20; devstat->; */ 4152 /* stats->tx_bytes = 20; devstat->; */ 4153 /* stats->rx_dropped = devstat->; */ 4154 /* stats->tx_dropped = devstat->; */ 4155 4156 /* NOTE: Not used, can't find analogous statistics */ 4157 /* stats->rx_frame_errors = devstat->; */ 4158 /* stats->rx_fifo_errors = devstat->; */ 4159 /* stats->rx_missed_errors = devstat->; */ 4160 4161 /* stats->tx_aborted_errors = devstat->; */ 4162 /* stats->tx_carrier_errors = devstat->; */ 4163 /* stats->tx_fifo_errors = devstat->; */ 4164 /* stats->tx_heartbeat_errors = devstat->; */ 4165 /* stats->tx_window_errors = devstat->; */ 4166 return stats; 4167} 4168 4169/* et131x_open - Open the device for use. */ 4170static int et131x_open(struct net_device *netdev) 4171{ 4172 struct et131x_adapter *adapter = netdev_priv(netdev); 4173 struct pci_dev *pdev = adapter->pdev; 4174 unsigned int irq = pdev->irq; 4175 int result; 4176 4177 /* Start the timer to track NIC errors */ 4178 init_timer(&adapter->error_timer); 4179 adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000; 4180 adapter->error_timer.function = et131x_error_timer_handler; 4181 adapter->error_timer.data = (unsigned long)adapter; 4182 add_timer(&adapter->error_timer); 4183 4184 result = request_irq(irq, et131x_isr, 4185 IRQF_SHARED, netdev->name, netdev); 4186 if (result) { 4187 dev_err(&pdev->dev, "could not register IRQ %d\n", irq); 4188 return result; 4189 } 4190 4191 adapter->flags |= FMP_ADAPTER_INTERRUPT_IN_USE; 4192 4193 et131x_up(netdev); 4194 4195 return result; 4196} 4197 4198/* et131x_close - Close the device */ 4199static int et131x_close(struct net_device *netdev) 4200{ 4201 struct et131x_adapter *adapter = netdev_priv(netdev); 4202 4203 et131x_down(netdev); 4204 4205 adapter->flags &= ~FMP_ADAPTER_INTERRUPT_IN_USE; 4206 free_irq(adapter->pdev->irq, netdev); 4207 4208 /* Stop the error timer */ 4209 return del_timer_sync(&adapter->error_timer); 4210} 4211 4212/* et131x_ioctl - The I/O Control handler for the driver 4213 * @netdev: device on which the control request is being made 4214 * @reqbuf: a pointer to the IOCTL request buffer 4215 * @cmd: the IOCTL command code 4216 */ 4217static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, 4218 int cmd) 4219{ 4220 struct et131x_adapter *adapter = netdev_priv(netdev); 4221 4222 if (!adapter->phydev) 4223 return -EINVAL; 4224 4225 return phy_mii_ioctl(adapter->phydev, reqbuf, cmd); 4226} 4227 4228/* et131x_set_packet_filter - Configures the Rx Packet filtering on the device 4229 * @adapter: pointer to our private adapter structure 4230 * 4231 * FIXME: lot of dups with MAC code 4232 */ 4233static int et131x_set_packet_filter(struct et131x_adapter *adapter) 4234{ 4235 int filter = adapter->packet_filter; 4236 int status = 0; 4237 u32 ctrl; 4238 u32 pf_ctrl; 4239 4240 ctrl = readl(&adapter->regs->rxmac.ctrl); 4241 pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl); 4242 4243 /* Default to disabled packet filtering. Enable it in the individual 4244 * case statements that require the device to filter something 4245 */ 4246 ctrl |= 0x04; 4247 4248 /* Set us to be in promiscuous mode so we receive everything, this 4249 * is also true when we get a packet filter of 0 4250 */ 4251 if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0) 4252 pf_ctrl &= ~7; /* Clear filter bits */ 4253 else { 4254 /* Set us up with Multicast packet filtering. Three cases are 4255 * possible - (1) we have a multi-cast list, (2) we receive ALL 4256 * multicast entries or (3) we receive none. 4257 */ 4258 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST) 4259 pf_ctrl &= ~2; /* Multicast filter bit */ 4260 else { 4261 et1310_setup_device_for_multicast(adapter); 4262 pf_ctrl |= 2; 4263 ctrl &= ~0x04; 4264 } 4265 4266 /* Set us up with Unicast packet filtering */ 4267 if (filter & ET131X_PACKET_TYPE_DIRECTED) { 4268 et1310_setup_device_for_unicast(adapter); 4269 pf_ctrl |= 4; 4270 ctrl &= ~0x04; 4271 } 4272 4273 /* Set us up with Broadcast packet filtering */ 4274 if (filter & ET131X_PACKET_TYPE_BROADCAST) { 4275 pf_ctrl |= 1; /* Broadcast filter bit */ 4276 ctrl &= ~0x04; 4277 } else 4278 pf_ctrl &= ~1; 4279 4280 /* Setup the receive mac configuration registers - Packet 4281 * Filter control + the enable / disable for packet filter 4282 * in the control reg. 4283 */ 4284 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl); 4285 writel(ctrl, &adapter->regs->rxmac.ctrl); 4286 } 4287 return status; 4288} 4289 4290/* et131x_multicast - The handler to configure multicasting on the interface */ 4291static void et131x_multicast(struct net_device *netdev) 4292{ 4293 struct et131x_adapter *adapter = netdev_priv(netdev); 4294 int packet_filter; 4295 unsigned long flags; 4296 struct netdev_hw_addr *ha; 4297 int i; 4298 4299 spin_lock_irqsave(&adapter->lock, flags); 4300 4301 /* Before we modify the platform-independent filter flags, store them 4302 * locally. This allows us to determine if anything's changed and if 4303 * we even need to bother the hardware 4304 */ 4305 packet_filter = adapter->packet_filter; 4306 4307 /* Clear the 'multicast' flag locally; because we only have a single 4308 * flag to check multicast, and multiple multicast addresses can be 4309 * set, this is the easiest way to determine if more than one 4310 * multicast address is being set. 4311 */ 4312 packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; 4313 4314 /* Check the net_device flags and set the device independent flags 4315 * accordingly 4316 */ 4317 4318 if (netdev->flags & IFF_PROMISC) 4319 adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS; 4320 else 4321 adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS; 4322 4323 if (netdev->flags & IFF_ALLMULTI) 4324 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; 4325 4326 if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST) 4327 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; 4328 4329 if (netdev_mc_count(netdev) < 1) { 4330 adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST; 4331 adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; 4332 } else 4333 adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST; 4334 4335 /* Set values in the private adapter struct */ 4336 i = 0; 4337 netdev_for_each_mc_addr(ha, netdev) { 4338 if (i == NIC_MAX_MCAST_LIST) 4339 break; 4340 memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN); 4341 } 4342 adapter->multicast_addr_count = i; 4343 4344 /* Are the new flags different from the previous ones? If not, then no 4345 * action is required 4346 * 4347 * NOTE - This block will always update the multicast_list with the 4348 * hardware, even if the addresses aren't the same. 4349 */ 4350 if (packet_filter != adapter->packet_filter) 4351 et131x_set_packet_filter(adapter); 4352 4353 spin_unlock_irqrestore(&adapter->lock, flags); 4354} 4355 4356/* et131x_tx - The handler to tx a packet on the device */ 4357static int et131x_tx(struct sk_buff *skb, struct net_device *netdev) 4358{ 4359 int status = 0; 4360 struct et131x_adapter *adapter = netdev_priv(netdev); 4361 struct tx_ring *tx_ring = &adapter->tx_ring; 4362 4363 /* stop the queue if it's getting full */ 4364 if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev)) 4365 netif_stop_queue(netdev); 4366 4367 /* Save the timestamp for the TX timeout watchdog */ 4368 netdev->trans_start = jiffies; 4369 4370 /* Call the device-specific data Tx routine */ 4371 status = et131x_send_packets(skb, netdev); 4372 4373 /* Check status and manage the netif queue if necessary */ 4374 if (status != 0) { 4375 if (status == -ENOMEM) 4376 status = NETDEV_TX_BUSY; 4377 else 4378 status = NETDEV_TX_OK; 4379 } 4380 return status; 4381} 4382 4383/* et131x_tx_timeout - Timeout handler 4384 * 4385 * The handler called when a Tx request times out. The timeout period is 4386 * specified by the 'tx_timeo" element in the net_device structure (see 4387 * et131x_alloc_device() to see how this value is set). 4388 */ 4389static void et131x_tx_timeout(struct net_device *netdev) 4390{ 4391 struct et131x_adapter *adapter = netdev_priv(netdev); 4392 struct tx_ring *tx_ring = &adapter->tx_ring; 4393 struct tcb *tcb; 4394 unsigned long flags; 4395 4396 /* If the device is closed, ignore the timeout */ 4397 if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)) 4398 return; 4399 4400 /* Any nonrecoverable hardware error? 4401 * Checks adapter->flags for any failure in phy reading 4402 */ 4403 if (adapter->flags & FMP_ADAPTER_NON_RECOVER_ERROR) 4404 return; 4405 4406 /* Hardware failure? */ 4407 if (adapter->flags & FMP_ADAPTER_HARDWARE_ERROR) { 4408 dev_err(&adapter->pdev->dev, "hardware error - reset\n"); 4409 return; 4410 } 4411 4412 /* Is send stuck? */ 4413 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 4414 4415 tcb = tx_ring->send_head; 4416 4417 if (tcb != NULL) { 4418 tcb->count++; 4419 4420 if (tcb->count > NIC_SEND_HANG_THRESHOLD) { 4421 spin_unlock_irqrestore(&adapter->tcb_send_qlock, 4422 flags); 4423 4424 dev_warn(&adapter->pdev->dev, 4425 "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n", 4426 tcb->index, 4427 tcb->flags); 4428 4429 adapter->net_stats.tx_errors++; 4430 4431 /* perform reset of tx/rx */ 4432 et131x_disable_txrx(netdev); 4433 et131x_enable_txrx(netdev); 4434 return; 4435 } 4436 } 4437 4438 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 4439} 4440 4441/* et131x_change_mtu - The handler called to change the MTU for the device */ 4442static int et131x_change_mtu(struct net_device *netdev, int new_mtu) 4443{ 4444 int result = 0; 4445 struct et131x_adapter *adapter = netdev_priv(netdev); 4446 4447 /* Make sure the requested MTU is valid */ 4448 if (new_mtu < 64 || new_mtu > 9216) 4449 return -EINVAL; 4450 4451 et131x_disable_txrx(netdev); 4452 et131x_handle_send_interrupt(adapter); 4453 et131x_handle_recv_interrupt(adapter); 4454 4455 /* Set the new MTU */ 4456 netdev->mtu = new_mtu; 4457 4458 /* Free Rx DMA memory */ 4459 et131x_adapter_memory_free(adapter); 4460 4461 /* Set the config parameter for Jumbo Packet support */ 4462 adapter->registry_jumbo_packet = new_mtu + 14; 4463 et131x_soft_reset(adapter); 4464 4465 /* Alloc and init Rx DMA memory */ 4466 result = et131x_adapter_memory_alloc(adapter); 4467 if (result != 0) { 4468 dev_warn(&adapter->pdev->dev, 4469 "Change MTU failed; couldn't re-alloc DMA memory\n"); 4470 return result; 4471 } 4472 4473 et131x_init_send(adapter); 4474 4475 et131x_hwaddr_init(adapter); 4476 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); 4477 4478 /* Init the device with the new settings */ 4479 et131x_adapter_setup(adapter); 4480 4481 et131x_enable_txrx(netdev); 4482 4483 return result; 4484} 4485 4486/* et131x_set_mac_addr - handler to change the MAC address for the device */ 4487static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac) 4488{ 4489 int result = 0; 4490 struct et131x_adapter *adapter = netdev_priv(netdev); 4491 struct sockaddr *address = new_mac; 4492 4493 if (adapter == NULL) 4494 return -ENODEV; 4495 4496 /* Make sure the requested MAC is valid */ 4497 if (!is_valid_ether_addr(address->sa_data)) 4498 return -EADDRNOTAVAIL; 4499 4500 et131x_disable_txrx(netdev); 4501 et131x_handle_send_interrupt(adapter); 4502 et131x_handle_recv_interrupt(adapter); 4503 4504 /* Set the new MAC */ 4505 /* netdev->set_mac_address = &new_mac; */ 4506 4507 memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len); 4508 4509 netdev_info(netdev, "Setting MAC address to %pM\n", 4510 netdev->dev_addr); 4511 4512 /* Free Rx DMA memory */ 4513 et131x_adapter_memory_free(adapter); 4514 4515 et131x_soft_reset(adapter); 4516 4517 /* Alloc and init Rx DMA memory */ 4518 result = et131x_adapter_memory_alloc(adapter); 4519 if (result != 0) { 4520 dev_err(&adapter->pdev->dev, 4521 "Change MAC failed; couldn't re-alloc DMA memory\n"); 4522 return result; 4523 } 4524 4525 et131x_init_send(adapter); 4526 4527 et131x_hwaddr_init(adapter); 4528 4529 /* Init the device with the new settings */ 4530 et131x_adapter_setup(adapter); 4531 4532 et131x_enable_txrx(netdev); 4533 4534 return result; 4535} 4536 4537static const struct net_device_ops et131x_netdev_ops = { 4538 .ndo_open = et131x_open, 4539 .ndo_stop = et131x_close, 4540 .ndo_start_xmit = et131x_tx, 4541 .ndo_set_rx_mode = et131x_multicast, 4542 .ndo_tx_timeout = et131x_tx_timeout, 4543 .ndo_change_mtu = et131x_change_mtu, 4544 .ndo_set_mac_address = et131x_set_mac_addr, 4545 .ndo_validate_addr = eth_validate_addr, 4546 .ndo_get_stats = et131x_stats, 4547 .ndo_do_ioctl = et131x_ioctl, 4548}; 4549 4550/* et131x_pci_setup - Perform device initialization 4551 * @pdev: a pointer to the device's pci_dev structure 4552 * @ent: this device's entry in the pci_device_id table 4553 * 4554 * Registered in the pci_driver structure, this function is called when the 4555 * PCI subsystem finds a new PCI device which matches the information 4556 * contained in the pci_device_id table. This routine is the equivalent to 4557 * a device insertion routine. 4558 */ 4559static int et131x_pci_setup(struct pci_dev *pdev, 4560 const struct pci_device_id *ent) 4561{ 4562 struct net_device *netdev; 4563 struct et131x_adapter *adapter; 4564 int rc; 4565 int ii; 4566 4567 rc = pci_enable_device(pdev); 4568 if (rc < 0) { 4569 dev_err(&pdev->dev, "pci_enable_device() failed\n"); 4570 goto out; 4571 } 4572 4573 /* Perform some basic PCI checks */ 4574 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 4575 dev_err(&pdev->dev, "Can't find PCI device's base address\n"); 4576 rc = -ENODEV; 4577 goto err_disable; 4578 } 4579 4580 rc = pci_request_regions(pdev, DRIVER_NAME); 4581 if (rc < 0) { 4582 dev_err(&pdev->dev, "Can't get PCI resources\n"); 4583 goto err_disable; 4584 } 4585 4586 pci_set_master(pdev); 4587 4588 /* Check the DMA addressing support of this device */ 4589 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) && 4590 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { 4591 dev_err(&pdev->dev, "No usable DMA addressing method\n"); 4592 rc = -EIO; 4593 goto err_release_res; 4594 } 4595 4596 /* Allocate netdev and private adapter structs */ 4597 netdev = alloc_etherdev(sizeof(struct et131x_adapter)); 4598 if (!netdev) { 4599 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n"); 4600 rc = -ENOMEM; 4601 goto err_release_res; 4602 } 4603 4604 netdev->watchdog_timeo = ET131X_TX_TIMEOUT; 4605 netdev->netdev_ops = &et131x_netdev_ops; 4606 4607 SET_NETDEV_DEV(netdev, &pdev->dev); 4608 SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops); 4609 4610 adapter = et131x_adapter_init(netdev, pdev); 4611 4612 rc = et131x_pci_init(adapter, pdev); 4613 if (rc < 0) 4614 goto err_free_dev; 4615 4616 /* Map the bus-relative registers to system virtual memory */ 4617 adapter->regs = pci_ioremap_bar(pdev, 0); 4618 if (!adapter->regs) { 4619 dev_err(&pdev->dev, "Cannot map device registers\n"); 4620 rc = -ENOMEM; 4621 goto err_free_dev; 4622 } 4623 4624 /* If Phy COMA mode was enabled when we went down, disable it here. */ 4625 writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr); 4626 4627 /* Issue a global reset to the et1310 */ 4628 et131x_soft_reset(adapter); 4629 4630 /* Disable all interrupts (paranoid) */ 4631 et131x_disable_interrupts(adapter); 4632 4633 /* Allocate DMA memory */ 4634 rc = et131x_adapter_memory_alloc(adapter); 4635 if (rc < 0) { 4636 dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n"); 4637 goto err_iounmap; 4638 } 4639 4640 /* Init send data structures */ 4641 et131x_init_send(adapter); 4642 4643 /* Set up the task structure for the ISR's deferred handler */ 4644 INIT_WORK(&adapter->task, et131x_isr_handler); 4645 4646 /* Copy address into the net_device struct */ 4647 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); 4648 4649 /* Init variable for counting how long we do not have link status */ 4650 adapter->boot_coma = 0; 4651 et1310_disable_phy_coma(adapter); 4652 4653 rc = -ENOMEM; 4654 4655 /* Setup the mii_bus struct */ 4656 adapter->mii_bus = mdiobus_alloc(); 4657 if (!adapter->mii_bus) { 4658 dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n"); 4659 goto err_mem_free; 4660 } 4661 4662 adapter->mii_bus->name = "et131x_eth_mii"; 4663 snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x", 4664 (adapter->pdev->bus->number << 8) | adapter->pdev->devfn); 4665 adapter->mii_bus->priv = netdev; 4666 adapter->mii_bus->read = et131x_mdio_read; 4667 adapter->mii_bus->write = et131x_mdio_write; 4668 adapter->mii_bus->reset = et131x_mdio_reset; 4669 adapter->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), 4670 GFP_KERNEL); 4671 if (!adapter->mii_bus->irq) 4672 goto err_mdio_free; 4673 4674 for (ii = 0; ii < PHY_MAX_ADDR; ii++) 4675 adapter->mii_bus->irq[ii] = PHY_POLL; 4676 4677 rc = mdiobus_register(adapter->mii_bus); 4678 if (rc < 0) { 4679 dev_err(&pdev->dev, "failed to register MII bus\n"); 4680 goto err_mdio_free_irq; 4681 } 4682 4683 rc = et131x_mii_probe(netdev); 4684 if (rc < 0) { 4685 dev_err(&pdev->dev, "failed to probe MII bus\n"); 4686 goto err_mdio_unregister; 4687 } 4688 4689 /* Setup et1310 as per the documentation */ 4690 et131x_adapter_setup(adapter); 4691 4692 /* We can enable interrupts now 4693 * 4694 * NOTE - Because registration of interrupt handler is done in the 4695 * device's open(), defer enabling device interrupts to that 4696 * point 4697 */ 4698 4699 /* Register the net_device struct with the Linux network layer */ 4700 rc = register_netdev(netdev); 4701 if (rc < 0) { 4702 dev_err(&pdev->dev, "register_netdev() failed\n"); 4703 goto err_phy_disconnect; 4704 } 4705 4706 /* Register the net_device struct with the PCI subsystem. Save a copy 4707 * of the PCI config space for this device now that the device has 4708 * been initialized, just in case it needs to be quickly restored. 4709 */ 4710 pci_set_drvdata(pdev, netdev); 4711out: 4712 return rc; 4713 4714err_phy_disconnect: 4715 phy_disconnect(adapter->phydev); 4716err_mdio_unregister: 4717 mdiobus_unregister(adapter->mii_bus); 4718err_mdio_free_irq: 4719 kfree(adapter->mii_bus->irq); 4720err_mdio_free: 4721 mdiobus_free(adapter->mii_bus); 4722err_mem_free: 4723 et131x_adapter_memory_free(adapter); 4724err_iounmap: 4725 iounmap(adapter->regs); 4726err_free_dev: 4727 pci_dev_put(pdev); 4728 free_netdev(netdev); 4729err_release_res: 4730 pci_release_regions(pdev); 4731err_disable: 4732 pci_disable_device(pdev); 4733 goto out; 4734} 4735 4736static const struct pci_device_id et131x_pci_table[] = { 4737 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL}, 4738 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL}, 4739 {0,} 4740}; 4741MODULE_DEVICE_TABLE(pci, et131x_pci_table); 4742 4743static struct pci_driver et131x_driver = { 4744 .name = DRIVER_NAME, 4745 .id_table = et131x_pci_table, 4746 .probe = et131x_pci_setup, 4747 .remove = et131x_pci_remove, 4748 .driver.pm = ET131X_PM_OPS, 4749}; 4750 4751module_pci_driver(et131x_driver);