Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.2-rc3 5514 lines 161 kB view raw
1/* 2 * Agere Systems Inc. 3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs 4 * 5 * Copyright © 2005 Agere Systems Inc. 6 * All rights reserved. 7 * http://www.agere.com 8 * 9 * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com> 10 * 11 *------------------------------------------------------------------------------ 12 * 13 * SOFTWARE LICENSE 14 * 15 * This software is provided subject to the following terms and conditions, 16 * which you should read carefully before using the software. Using this 17 * software indicates your acceptance of these terms and conditions. If you do 18 * not agree with these terms and conditions, do not use the software. 19 * 20 * Copyright © 2005 Agere Systems Inc. 21 * All rights reserved. 22 * 23 * Redistribution and use in source or binary forms, with or without 24 * modifications, are permitted provided that the following conditions are met: 25 * 26 * . Redistributions of source code must retain the above copyright notice, this 27 * list of conditions and the following Disclaimer as comments in the code as 28 * well as in the documentation and/or other materials provided with the 29 * distribution. 30 * 31 * . Redistributions in binary form must reproduce the above copyright notice, 32 * this list of conditions and the following Disclaimer in the documentation 33 * and/or other materials provided with the distribution. 34 * 35 * . Neither the name of Agere Systems Inc. nor the names of the contributors 36 * may be used to endorse or promote products derived from this software 37 * without specific prior written permission. 38 * 39 * Disclaimer 40 * 41 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 42 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF 43 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY 44 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN 45 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY 46 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 47 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 48 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 49 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT 50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 51 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 52 * DAMAGE. 53 * 54 */ 55 56#include <linux/pci.h> 57#include <linux/init.h> 58#include <linux/module.h> 59#include <linux/types.h> 60#include <linux/kernel.h> 61 62#include <linux/sched.h> 63#include <linux/ptrace.h> 64#include <linux/slab.h> 65#include <linux/ctype.h> 66#include <linux/string.h> 67#include <linux/timer.h> 68#include <linux/interrupt.h> 69#include <linux/in.h> 70#include <linux/delay.h> 71#include <linux/bitops.h> 72#include <linux/io.h> 73#include <asm/system.h> 74 75#include <linux/netdevice.h> 76#include <linux/etherdevice.h> 77#include <linux/skbuff.h> 78#include <linux/if_arp.h> 79#include <linux/ioport.h> 80#include <linux/crc32.h> 81#include <linux/random.h> 82#include <linux/phy.h> 83 84#include "et131x.h" 85 86MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>"); 87MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>"); 88MODULE_LICENSE("Dual BSD/GPL"); 89MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver " 90 "for the ET1310 by Agere Systems"); 91 92/* EEPROM defines */ 93#define MAX_NUM_REGISTER_POLLS 1000 94#define MAX_NUM_WRITE_RETRIES 2 95 96/* MAC defines */ 97#define COUNTER_WRAP_16_BIT 0x10000 98#define COUNTER_WRAP_12_BIT 0x1000 99 100/* PCI defines */ 101#define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */ 102#define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */ 103 104/* ISR defines */ 105/* 106 * For interrupts, normal running is: 107 * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt, 108 * watchdog_interrupt & txdma_xfer_done 109 * 110 * In both cases, when flow control is enabled for either Tx or bi-direction, 111 * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the 112 * buffer rings are running low. 113 */ 114#define INT_MASK_DISABLE 0xffffffff 115 116/* NOTE: Masking out MAC_STAT Interrupt for now... 117 * #define INT_MASK_ENABLE 0xfff6bf17 118 * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7 119 */ 120#define INT_MASK_ENABLE 0xfffebf17 121#define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7 122 123/* General defines */ 124/* Packet and header sizes */ 125#define NIC_MIN_PACKET_SIZE 60 126 127/* Multicast list size */ 128#define NIC_MAX_MCAST_LIST 128 129 130/* Supported Filters */ 131#define ET131X_PACKET_TYPE_DIRECTED 0x0001 132#define ET131X_PACKET_TYPE_MULTICAST 0x0002 133#define ET131X_PACKET_TYPE_BROADCAST 0x0004 134#define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008 135#define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010 136 137/* Tx Timeout */ 138#define ET131X_TX_TIMEOUT (1 * HZ) 139#define NIC_SEND_HANG_THRESHOLD 0 140 141/* MP_TCB flags */ 142#define fMP_DEST_MULTI 0x00000001 143#define fMP_DEST_BROAD 0x00000002 144 145/* MP_ADAPTER flags */ 146#define fMP_ADAPTER_RECV_LOOKASIDE 0x00000004 147#define fMP_ADAPTER_INTERRUPT_IN_USE 0x00000008 148 149/* MP_SHARED flags */ 150#define fMP_ADAPTER_LOWER_POWER 0x00200000 151 152#define fMP_ADAPTER_NON_RECOVER_ERROR 0x00800000 153#define fMP_ADAPTER_HARDWARE_ERROR 0x04000000 154 155#define fMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000 156 157/* Some offsets in PCI config space that are actually used. */ 158#define ET1310_PCI_MAX_PYLD 0x4C 159#define ET1310_PCI_MAC_ADDRESS 0xA4 160#define ET1310_PCI_EEPROM_STATUS 0xB2 161#define ET1310_PCI_ACK_NACK 0xC0 162#define ET1310_PCI_REPLAY 0xC2 163#define ET1310_PCI_L0L1LATENCY 0xCF 164 165/* PCI Product IDs */ 166#define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */ 167#define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */ 168 169/* Define order of magnitude converter */ 170#define NANO_IN_A_MICRO 1000 171 172#define PARM_RX_NUM_BUFS_DEF 4 173#define PARM_RX_TIME_INT_DEF 10 174#define PARM_RX_MEM_END_DEF 0x2bc 175#define PARM_TX_TIME_INT_DEF 40 176#define PARM_TX_NUM_BUFS_DEF 4 177#define PARM_DMA_CACHE_DEF 0 178 179/* RX defines */ 180#define USE_FBR0 1 181 182#define FBR_CHUNKS 32 183 184#define MAX_DESC_PER_RING_RX 1024 185 186/* number of RFDs - default and min */ 187#ifdef USE_FBR0 188#define RFD_LOW_WATER_MARK 40 189#define NIC_DEFAULT_NUM_RFD 1024 190#define NUM_FBRS 2 191#else 192#define RFD_LOW_WATER_MARK 20 193#define NIC_DEFAULT_NUM_RFD 256 194#define NUM_FBRS 1 195#endif 196 197#define NIC_MIN_NUM_RFD 64 198 199#define NUM_PACKETS_HANDLED 256 200 201#define ALCATEL_MULTICAST_PKT 0x01000000 202#define ALCATEL_BROADCAST_PKT 0x02000000 203 204/* typedefs for Free Buffer Descriptors */ 205struct fbr_desc { 206 u32 addr_lo; 207 u32 addr_hi; 208 u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */ 209}; 210 211/* Packet Status Ring Descriptors 212 * 213 * Word 0: 214 * 215 * top 16 bits are from the Alcatel Status Word as enumerated in 216 * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2) 217 * 218 * 0: hp hash pass 219 * 1: ipa IP checksum assist 220 * 2: ipp IP checksum pass 221 * 3: tcpa TCP checksum assist 222 * 4: tcpp TCP checksum pass 223 * 5: wol WOL Event 224 * 6: rxmac_error RXMAC Error Indicator 225 * 7: drop Drop packet 226 * 8: ft Frame Truncated 227 * 9: jp Jumbo Packet 228 * 10: vp VLAN Packet 229 * 11-15: unused 230 * 16: asw_prev_pkt_dropped e.g. IFG too small on previous 231 * 17: asw_RX_DV_event short receive event detected 232 * 18: asw_false_carrier_event bad carrier since last good packet 233 * 19: asw_code_err one or more nibbles signalled as errors 234 * 20: asw_CRC_err CRC error 235 * 21: asw_len_chk_err frame length field incorrect 236 * 22: asw_too_long frame length > 1518 bytes 237 * 23: asw_OK valid CRC + no code error 238 * 24: asw_multicast has a multicast address 239 * 25: asw_broadcast has a broadcast address 240 * 26: asw_dribble_nibble spurious bits after EOP 241 * 27: asw_control_frame is a control frame 242 * 28: asw_pause_frame is a pause frame 243 * 29: asw_unsupported_op unsupported OP code 244 * 30: asw_VLAN_tag VLAN tag detected 245 * 31: asw_long_evt Rx long event 246 * 247 * Word 1: 248 * 0-15: length length in bytes 249 * 16-25: bi Buffer Index 250 * 26-27: ri Ring Index 251 * 28-31: reserved 252 */ 253 254struct pkt_stat_desc { 255 u32 word0; 256 u32 word1; 257}; 258 259/* Typedefs for the RX DMA status word */ 260 261/* 262 * rx status word 0 holds part of the status bits of the Rx DMA engine 263 * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word 264 * which contains the Free Buffer ring 0 and 1 available offset. 265 * 266 * bit 0-9 FBR1 offset 267 * bit 10 Wrap flag for FBR1 268 * bit 16-25 FBR0 offset 269 * bit 26 Wrap flag for FBR0 270 */ 271 272/* 273 * RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine 274 * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word 275 * which contains the Packet Status Ring available offset. 276 * 277 * bit 0-15 reserved 278 * bit 16-27 PSRoffset 279 * bit 28 PSRwrap 280 * bit 29-31 unused 281 */ 282 283/* 284 * struct rx_status_block is a structure representing the status of the Rx 285 * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020 286 */ 287struct rx_status_block { 288 u32 word0; 289 u32 word1; 290}; 291 292/* 293 * Structure for look-up table holding free buffer ring pointers, addresses 294 * and state. 295 */ 296struct fbr_lookup { 297 void *virt[MAX_DESC_PER_RING_RX]; 298 void *buffer1[MAX_DESC_PER_RING_RX]; 299 void *buffer2[MAX_DESC_PER_RING_RX]; 300 u32 bus_high[MAX_DESC_PER_RING_RX]; 301 u32 bus_low[MAX_DESC_PER_RING_RX]; 302 void *ring_virtaddr; 303 dma_addr_t ring_physaddr; 304 void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; 305 dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; 306 uint64_t real_physaddr; 307 uint64_t offset; 308 u32 local_full; 309 u32 num_entries; 310 u32 buffsize; 311}; 312 313/* 314 * struct rx_ring is the sructure representing the adaptor's local 315 * reference(s) to the rings 316 * 317 ****************************************************************************** 318 * IMPORTANT NOTE :- fbr_lookup *fbr[NUM_FBRS] uses index 0 to refer to FBR1 319 * and index 1 to refer to FRB0 320 ****************************************************************************** 321 */ 322struct rx_ring { 323 struct fbr_lookup *fbr[NUM_FBRS]; 324 void *ps_ring_virtaddr; 325 dma_addr_t ps_ring_physaddr; 326 u32 local_psr_full; 327 u32 psr_num_entries; 328 329 struct rx_status_block *rx_status_block; 330 dma_addr_t rx_status_bus; 331 332 /* RECV */ 333 struct list_head recv_list; 334 u32 num_ready_recv; 335 336 u32 num_rfd; 337 338 bool unfinished_receives; 339 340 /* lookaside lists */ 341 struct kmem_cache *recv_lookaside; 342}; 343 344/* TX defines */ 345/* 346 * word 2 of the control bits in the Tx Descriptor ring for the ET-1310 347 * 348 * 0-15: length of packet 349 * 16-27: VLAN tag 350 * 28: VLAN CFI 351 * 29-31: VLAN priority 352 * 353 * word 3 of the control bits in the Tx Descriptor ring for the ET-1310 354 * 355 * 0: last packet in the sequence 356 * 1: first packet in the sequence 357 * 2: interrupt the processor when this pkt sent 358 * 3: Control word - no packet data 359 * 4: Issue half-duplex backpressure : XON/XOFF 360 * 5: send pause frame 361 * 6: Tx frame has error 362 * 7: append CRC 363 * 8: MAC override 364 * 9: pad packet 365 * 10: Packet is a Huge packet 366 * 11: append VLAN tag 367 * 12: IP checksum assist 368 * 13: TCP checksum assist 369 * 14: UDP checksum assist 370 */ 371 372/* struct tx_desc represents each descriptor on the ring */ 373struct tx_desc { 374 u32 addr_hi; 375 u32 addr_lo; 376 u32 len_vlan; /* control words how to xmit the */ 377 u32 flags; /* data (detailed above) */ 378}; 379 380/* 381 * The status of the Tx DMA engine it sits in free memory, and is pointed to 382 * by 0x101c / 0x1020. This is a DMA10 type 383 */ 384 385/* TCB (Transmit Control Block: Host Side) */ 386struct tcb { 387 struct tcb *next; /* Next entry in ring */ 388 u32 flags; /* Our flags for the packet */ 389 u32 count; /* Used to spot stuck/lost packets */ 390 u32 stale; /* Used to spot stuck/lost packets */ 391 struct sk_buff *skb; /* Network skb we are tied to */ 392 u32 index; /* Ring indexes */ 393 u32 index_start; 394}; 395 396/* Structure representing our local reference(s) to the ring */ 397struct tx_ring { 398 /* TCB (Transmit Control Block) memory and lists */ 399 struct tcb *tcb_ring; 400 401 /* List of TCBs that are ready to be used */ 402 struct tcb *tcb_qhead; 403 struct tcb *tcb_qtail; 404 405 /* list of TCBs that are currently being sent. NOTE that access to all 406 * three of these (including used) are controlled via the 407 * TCBSendQLock. This lock should be secured prior to incementing / 408 * decrementing used, or any queue manipulation on send_head / 409 * tail 410 */ 411 struct tcb *send_head; 412 struct tcb *send_tail; 413 int used; 414 415 /* The actual descriptor ring */ 416 struct tx_desc *tx_desc_ring; 417 dma_addr_t tx_desc_ring_pa; 418 419 /* send_idx indicates where we last wrote to in the descriptor ring. */ 420 u32 send_idx; 421 422 /* The location of the write-back status block */ 423 u32 *tx_status; 424 dma_addr_t tx_status_pa; 425 426 /* Packets since the last IRQ: used for interrupt coalescing */ 427 int since_irq; 428}; 429 430/* ADAPTER defines */ 431/* 432 * Do not change these values: if changed, then change also in respective 433 * TXdma and Rxdma engines 434 */ 435#define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */ 436#define NUM_TCB 64 437 438/* 439 * These values are all superseded by registry entries to facilitate tuning. 440 * Once the desired performance has been achieved, the optimal registry values 441 * should be re-populated to these #defines: 442 */ 443#define TX_ERROR_PERIOD 1000 444 445#define LO_MARK_PERCENT_FOR_PSR 15 446#define LO_MARK_PERCENT_FOR_RX 15 447 448/* RFD (Receive Frame Descriptor) */ 449struct rfd { 450 struct list_head list_node; 451 struct sk_buff *skb; 452 u32 len; /* total size of receive frame */ 453 u16 bufferindex; 454 u8 ringindex; 455}; 456 457/* Flow Control */ 458#define FLOW_BOTH 0 459#define FLOW_TXONLY 1 460#define FLOW_RXONLY 2 461#define FLOW_NONE 3 462 463/* Struct to define some device statistics */ 464struct ce_stats { 465 /* MIB II variables 466 * 467 * NOTE: atomic_t types are only guaranteed to store 24-bits; if we 468 * MUST have 32, then we'll need another way to perform atomic 469 * operations 470 */ 471 u32 unicast_pkts_rcvd; 472 atomic_t unicast_pkts_xmtd; 473 u32 multicast_pkts_rcvd; 474 atomic_t multicast_pkts_xmtd; 475 u32 broadcast_pkts_rcvd; 476 atomic_t broadcast_pkts_xmtd; 477 u32 rcvd_pkts_dropped; 478 479 /* Tx Statistics. */ 480 u32 tx_underflows; 481 482 u32 tx_collisions; 483 u32 tx_excessive_collisions; 484 u32 tx_first_collisions; 485 u32 tx_late_collisions; 486 u32 tx_max_pkt_errs; 487 u32 tx_deferred; 488 489 /* Rx Statistics. */ 490 u32 rx_overflows; 491 492 u32 rx_length_errs; 493 u32 rx_align_errs; 494 u32 rx_crc_errs; 495 u32 rx_code_violations; 496 u32 rx_other_errs; 497 498 u32 synchronous_iterations; 499 u32 interrupt_status; 500}; 501 502/* The private adapter structure */ 503struct et131x_adapter { 504 struct net_device *netdev; 505 struct pci_dev *pdev; 506 struct mii_bus *mii_bus; 507 struct phy_device *phydev; 508 struct work_struct task; 509 510 /* Flags that indicate current state of the adapter */ 511 u32 flags; 512 513 /* local link state, to determine if a state change has occurred */ 514 int link; 515 516 /* Configuration */ 517 u8 rom_addr[ETH_ALEN]; 518 u8 addr[ETH_ALEN]; 519 bool has_eeprom; 520 u8 eeprom_data[2]; 521 522 /* Spinlocks */ 523 spinlock_t lock; 524 525 spinlock_t tcb_send_qlock; 526 spinlock_t tcb_ready_qlock; 527 spinlock_t send_hw_lock; 528 529 spinlock_t rcv_lock; 530 spinlock_t rcv_pend_lock; 531 spinlock_t fbr_lock; 532 533 spinlock_t phy_lock; 534 535 /* Packet Filter and look ahead size */ 536 u32 packet_filter; 537 538 /* multicast list */ 539 u32 multicast_addr_count; 540 u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN]; 541 542 /* Pointer to the device's PCI register space */ 543 struct address_map __iomem *regs; 544 545 /* Registry parameters */ 546 u8 wanted_flow; /* Flow we want for 802.3x flow control */ 547 u32 registry_jumbo_packet; /* Max supported ethernet packet size */ 548 549 /* Derived from the registry: */ 550 u8 flowcontrol; /* flow control validated by the far-end */ 551 552 /* Minimize init-time */ 553 struct timer_list error_timer; 554 555 /* variable putting the phy into coma mode when boot up with no cable 556 * plugged in after 5 seconds 557 */ 558 u8 boot_coma; 559 560 /* Next two used to save power information at power down. This 561 * information will be used during power up to set up parts of Power 562 * Management in JAGCore 563 */ 564 u16 pdown_speed; 565 u8 pdown_duplex; 566 567 /* Tx Memory Variables */ 568 struct tx_ring tx_ring; 569 570 /* Rx Memory Variables */ 571 struct rx_ring rx_ring; 572 573 /* Stats */ 574 struct ce_stats stats; 575 576 struct net_device_stats net_stats; 577}; 578 579/* EEPROM functions */ 580 581static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status) 582{ 583 u32 reg; 584 int i; 585 586 /* 587 * 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and 588 * bits 7,1:0 both equal to 1, at least once after reset. 589 * Subsequent operations need only to check that bits 1:0 are equal 590 * to 1 prior to starting a single byte read/write 591 */ 592 593 for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) { 594 /* Read registers grouped in DWORD1 */ 595 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg)) 596 return -EIO; 597 598 /* I2C idle and Phy Queue Avail both true */ 599 if ((reg & 0x3000) == 0x3000) { 600 if (status) 601 *status = reg; 602 return reg & 0xFF; 603 } 604 } 605 return -ETIMEDOUT; 606} 607 608 609/** 610 * eeprom_write - Write a byte to the ET1310's EEPROM 611 * @adapter: pointer to our private adapter structure 612 * @addr: the address to write 613 * @data: the value to write 614 * 615 * Returns 1 for a successful write. 616 */ 617static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data) 618{ 619 struct pci_dev *pdev = adapter->pdev; 620 int index = 0; 621 int retries; 622 int err = 0; 623 int i2c_wack = 0; 624 int writeok = 0; 625 u32 status; 626 u32 val = 0; 627 628 /* 629 * For an EEPROM, an I2C single byte write is defined as a START 630 * condition followed by the device address, EEPROM address, one byte 631 * of data and a STOP condition. The STOP condition will trigger the 632 * EEPROM's internally timed write cycle to the nonvolatile memory. 633 * All inputs are disabled during this write cycle and the EEPROM will 634 * not respond to any access until the internal write is complete. 635 */ 636 637 err = eeprom_wait_ready(pdev, NULL); 638 if (err) 639 return err; 640 641 /* 642 * 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0, 643 * and bits 1:0 both =0. Bit 5 should be set according to the 644 * type of EEPROM being accessed (1=two byte addressing, 0=one 645 * byte addressing). 646 */ 647 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, 648 LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE)) 649 return -EIO; 650 651 i2c_wack = 1; 652 653 /* Prepare EEPROM address for Step 3 */ 654 655 for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) { 656 /* Write the address to the LBCIF Address Register */ 657 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) 658 break; 659 /* 660 * Write the data to the LBCIF Data Register (the I2C write 661 * will begin). 662 */ 663 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data)) 664 break; 665 /* 666 * Monitor bit 1:0 of the LBCIF Status Register. When bits 667 * 1:0 are both equal to 1, the I2C write has completed and the 668 * internal write cycle of the EEPROM is about to start. 669 * (bits 1:0 = 01 is a legal state while waiting from both 670 * equal to 1, but bits 1:0 = 10 is invalid and implies that 671 * something is broken). 672 */ 673 err = eeprom_wait_ready(pdev, &status); 674 if (err < 0) 675 return 0; 676 677 /* 678 * Check bit 3 of the LBCIF Status Register. If equal to 1, 679 * an error has occurred.Don't break here if we are revision 680 * 1, this is so we do a blind write for load bug. 681 */ 682 if ((status & LBCIF_STATUS_GENERAL_ERROR) 683 && adapter->pdev->revision == 0) 684 break; 685 686 /* 687 * Check bit 2 of the LBCIF Status Register. If equal to 1 an 688 * ACK error has occurred on the address phase of the write. 689 * This could be due to an actual hardware failure or the 690 * EEPROM may still be in its internal write cycle from a 691 * previous write. This write operation was ignored and must be 692 *repeated later. 693 */ 694 if (status & LBCIF_STATUS_ACK_ERROR) { 695 /* 696 * This could be due to an actual hardware failure 697 * or the EEPROM may still be in its internal write 698 * cycle from a previous write. This write operation 699 * was ignored and must be repeated later. 700 */ 701 udelay(10); 702 continue; 703 } 704 705 writeok = 1; 706 break; 707 } 708 709 /* 710 * Set bit 6 of the LBCIF Control Register = 0. 711 */ 712 udelay(10); 713 714 while (i2c_wack) { 715 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, 716 LBCIF_CONTROL_LBCIF_ENABLE)) 717 writeok = 0; 718 719 /* Do read until internal ACK_ERROR goes away meaning write 720 * completed 721 */ 722 do { 723 pci_write_config_dword(pdev, 724 LBCIF_ADDRESS_REGISTER, 725 addr); 726 do { 727 pci_read_config_dword(pdev, 728 LBCIF_DATA_REGISTER, &val); 729 } while ((val & 0x00010000) == 0); 730 } while (val & 0x00040000); 731 732 if ((val & 0xFF00) != 0xC000 || index == 10000) 733 break; 734 index++; 735 } 736 return writeok ? 0 : -EIO; 737} 738 739/** 740 * eeprom_read - Read a byte from the ET1310's EEPROM 741 * @adapter: pointer to our private adapter structure 742 * @addr: the address from which to read 743 * @pdata: a pointer to a byte in which to store the value of the read 744 * @eeprom_id: the ID of the EEPROM 745 * @addrmode: how the EEPROM is to be accessed 746 * 747 * Returns 1 for a successful read 748 */ 749static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata) 750{ 751 struct pci_dev *pdev = adapter->pdev; 752 int err; 753 u32 status; 754 755 /* 756 * A single byte read is similar to the single byte write, with the 757 * exception of the data flow: 758 */ 759 760 err = eeprom_wait_ready(pdev, NULL); 761 if (err) 762 return err; 763 /* 764 * Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0, 765 * and bits 1:0 both =0. Bit 5 should be set according to the type 766 * of EEPROM being accessed (1=two byte addressing, 0=one byte 767 * addressing). 768 */ 769 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, 770 LBCIF_CONTROL_LBCIF_ENABLE)) 771 return -EIO; 772 /* 773 * Write the address to the LBCIF Address Register (I2C read will 774 * begin). 775 */ 776 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) 777 return -EIO; 778 /* 779 * Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read 780 * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure 781 * has occurred). 782 */ 783 err = eeprom_wait_ready(pdev, &status); 784 if (err < 0) 785 return err; 786 /* 787 * Regardless of error status, read data byte from LBCIF Data 788 * Register. 789 */ 790 *pdata = err; 791 /* 792 * Check bit 2 of the LBCIF Status Register. If = 1, 793 * then an error has occurred. 794 */ 795 return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0; 796} 797 798int et131x_init_eeprom(struct et131x_adapter *adapter) 799{ 800 struct pci_dev *pdev = adapter->pdev; 801 u8 eestatus; 802 803 /* We first need to check the EEPROM Status code located at offset 804 * 0xB2 of config space 805 */ 806 pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, 807 &eestatus); 808 809 /* THIS IS A WORKAROUND: 810 * I need to call this function twice to get my card in a 811 * LG M1 Express Dual running. I tried also a msleep before this 812 * function, because I thougth there could be some time condidions 813 * but it didn't work. Call the whole function twice also work. 814 */ 815 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) { 816 dev_err(&pdev->dev, 817 "Could not read PCI config space for EEPROM Status\n"); 818 return -EIO; 819 } 820 821 /* Determine if the error(s) we care about are present. If they are 822 * present we need to fail. 823 */ 824 if (eestatus & 0x4C) { 825 int write_failed = 0; 826 if (pdev->revision == 0x01) { 827 int i; 828 static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF }; 829 830 /* Re-write the first 4 bytes if we have an eeprom 831 * present and the revision id is 1, this fixes the 832 * corruption seen with 1310 B Silicon 833 */ 834 for (i = 0; i < 3; i++) 835 if (eeprom_write(adapter, i, eedata[i]) < 0) 836 write_failed = 1; 837 } 838 if (pdev->revision != 0x01 || write_failed) { 839 dev_err(&pdev->dev, 840 "Fatal EEPROM Status Error - 0x%04x\n", eestatus); 841 842 /* This error could mean that there was an error 843 * reading the eeprom or that the eeprom doesn't exist. 844 * We will treat each case the same and not try to 845 * gather additional information that normally would 846 * come from the eeprom, like MAC Address 847 */ 848 adapter->has_eeprom = 0; 849 return -EIO; 850 } 851 } 852 adapter->has_eeprom = 1; 853 854 /* Read the EEPROM for information regarding LED behavior. Refer to 855 * ET1310_phy.c, et131x_xcvr_init(), for its use. 856 */ 857 eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]); 858 eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]); 859 860 if (adapter->eeprom_data[0] != 0xcd) 861 /* Disable all optional features */ 862 adapter->eeprom_data[1] = 0x00; 863 864 return 0; 865} 866 867/** 868 * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310. 869 * @adapter: pointer to our adapter structure 870 */ 871void et131x_rx_dma_enable(struct et131x_adapter *adapter) 872{ 873 /* Setup the receive dma configuration register for normal operation */ 874 u32 csr = 0x2000; /* FBR1 enable */ 875 876 if (adapter->rx_ring.fbr[0]->buffsize == 4096) 877 csr |= 0x0800; 878 else if (adapter->rx_ring.fbr[0]->buffsize == 8192) 879 csr |= 0x1000; 880 else if (adapter->rx_ring.fbr[0]->buffsize == 16384) 881 csr |= 0x1800; 882#ifdef USE_FBR0 883 csr |= 0x0400; /* FBR0 enable */ 884 if (adapter->rx_ring.fbr[1]->buffsize == 256) 885 csr |= 0x0100; 886 else if (adapter->rx_ring.fbr[1]->buffsize == 512) 887 csr |= 0x0200; 888 else if (adapter->rx_ring.fbr[1]->buffsize == 1024) 889 csr |= 0x0300; 890#endif 891 writel(csr, &adapter->regs->rxdma.csr); 892 893 csr = readl(&adapter->regs->rxdma.csr); 894 if ((csr & 0x00020000) != 0) { 895 udelay(5); 896 csr = readl(&adapter->regs->rxdma.csr); 897 if ((csr & 0x00020000) != 0) { 898 dev_err(&adapter->pdev->dev, 899 "RX Dma failed to exit halt state. CSR 0x%08x\n", 900 csr); 901 } 902 } 903} 904 905/** 906 * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310 907 * @adapter: pointer to our adapter structure 908 */ 909void et131x_rx_dma_disable(struct et131x_adapter *adapter) 910{ 911 u32 csr; 912 /* Setup the receive dma configuration register */ 913 writel(0x00002001, &adapter->regs->rxdma.csr); 914 csr = readl(&adapter->regs->rxdma.csr); 915 if ((csr & 0x00020000) == 0) { /* Check halt status (bit 17) */ 916 udelay(5); 917 csr = readl(&adapter->regs->rxdma.csr); 918 if ((csr & 0x00020000) == 0) 919 dev_err(&adapter->pdev->dev, 920 "RX Dma failed to enter halt state. CSR 0x%08x\n", 921 csr); 922 } 923} 924 925/** 926 * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310. 927 * @adapter: pointer to our adapter structure 928 * 929 * Mainly used after a return to the D0 (full-power) state from a lower state. 930 */ 931void et131x_tx_dma_enable(struct et131x_adapter *adapter) 932{ 933 /* Setup the transmit dma configuration register for normal 934 * operation 935 */ 936 writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT), 937 &adapter->regs->txdma.csr); 938} 939 940static inline void add_10bit(u32 *v, int n) 941{ 942 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP); 943} 944 945static inline void add_12bit(u32 *v, int n) 946{ 947 *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP); 948} 949 950/** 951 * nic_rx_pkts - Checks the hardware for available packets 952 * @adapter: pointer to our adapter 953 * 954 * Returns rfd, a pointer to our MPRFD. 955 * 956 * Checks the hardware for available packets, using completion ring 957 * If packets are available, it gets an RFD from the recv_list, attaches 958 * the packet to it, puts the RFD in the RecvPendList, and also returns 959 * the pointer to the RFD. 960 */ 961/* MAC functions */ 962 963/** 964 * et1310_config_mac_regs1 - Initialize the first part of MAC regs 965 * @adapter: pointer to our adapter structure 966 */ 967void et1310_config_mac_regs1(struct et131x_adapter *adapter) 968{ 969 struct mac_regs __iomem *macregs = &adapter->regs->mac; 970 u32 station1; 971 u32 station2; 972 u32 ipg; 973 974 /* First we need to reset everything. Write to MAC configuration 975 * register 1 to perform reset. 976 */ 977 writel(0xC00F0000, &macregs->cfg1); 978 979 /* Next lets configure the MAC Inter-packet gap register */ 980 ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */ 981 ipg |= 0x50 << 8; /* ifg enforce 0x50 */ 982 writel(ipg, &macregs->ipg); 983 984 /* Next lets configure the MAC Half Duplex register */ 985 /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */ 986 writel(0x00A1F037, &macregs->hfdp); 987 988 /* Next lets configure the MAC Interface Control register */ 989 writel(0, &macregs->if_ctrl); 990 991 /* Let's move on to setting up the mii management configuration */ 992 writel(0x07, &macregs->mii_mgmt_cfg); /* Clock reset 0x7 */ 993 994 /* Next lets configure the MAC Station Address register. These 995 * values are read from the EEPROM during initialization and stored 996 * in the adapter structure. We write what is stored in the adapter 997 * structure to the MAC Station Address registers high and low. This 998 * station address is used for generating and checking pause control 999 * packets. 1000 */ 1001 station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) | 1002 (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT); 1003 station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) | 1004 (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) | 1005 (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) | 1006 adapter->addr[2]; 1007 writel(station1, &macregs->station_addr_1); 1008 writel(station2, &macregs->station_addr_2); 1009 1010 /* Max ethernet packet in bytes that will passed by the mac without 1011 * being truncated. Allow the MAC to pass 4 more than our max packet 1012 * size. This is 4 for the Ethernet CRC. 1013 * 1014 * Packets larger than (registry_jumbo_packet) that do not contain a 1015 * VLAN ID will be dropped by the Rx function. 1016 */ 1017 writel(adapter->registry_jumbo_packet + 4, &macregs->max_fm_len); 1018 1019 /* clear out MAC config reset */ 1020 writel(0, &macregs->cfg1); 1021} 1022 1023/** 1024 * et1310_config_mac_regs2 - Initialize the second part of MAC regs 1025 * @adapter: pointer to our adapter structure 1026 */ 1027void et1310_config_mac_regs2(struct et131x_adapter *adapter) 1028{ 1029 int32_t delay = 0; 1030 struct mac_regs __iomem *mac = &adapter->regs->mac; 1031 struct phy_device *phydev = adapter->phydev; 1032 u32 cfg1; 1033 u32 cfg2; 1034 u32 ifctrl; 1035 u32 ctl; 1036 1037 ctl = readl(&adapter->regs->txmac.ctl); 1038 cfg1 = readl(&mac->cfg1); 1039 cfg2 = readl(&mac->cfg2); 1040 ifctrl = readl(&mac->if_ctrl); 1041 1042 /* Set up the if mode bits */ 1043 cfg2 &= ~0x300; 1044 if (phydev && phydev->speed == SPEED_1000) { 1045 cfg2 |= 0x200; 1046 /* Phy mode bit */ 1047 ifctrl &= ~(1 << 24); 1048 } else { 1049 cfg2 |= 0x100; 1050 ifctrl |= (1 << 24); 1051 } 1052 1053 /* We need to enable Rx/Tx */ 1054 cfg1 |= CFG1_RX_ENABLE | CFG1_TX_ENABLE | CFG1_TX_FLOW; 1055 /* Initialize loop back to off */ 1056 cfg1 &= ~(CFG1_LOOPBACK | CFG1_RX_FLOW); 1057 if (adapter->flowcontrol == FLOW_RXONLY || 1058 adapter->flowcontrol == FLOW_BOTH) 1059 cfg1 |= CFG1_RX_FLOW; 1060 writel(cfg1, &mac->cfg1); 1061 1062 /* Now we need to initialize the MAC Configuration 2 register */ 1063 /* preamble 7, check length, huge frame off, pad crc, crc enable 1064 full duplex off */ 1065 cfg2 |= 0x7016; 1066 cfg2 &= ~0x0021; 1067 1068 /* Turn on duplex if needed */ 1069 if (phydev && phydev->duplex == DUPLEX_FULL) 1070 cfg2 |= 0x01; 1071 1072 ifctrl &= ~(1 << 26); 1073 if (phydev && phydev->duplex == DUPLEX_HALF) 1074 ifctrl |= (1<<26); /* Enable ghd */ 1075 1076 writel(ifctrl, &mac->if_ctrl); 1077 writel(cfg2, &mac->cfg2); 1078 1079 do { 1080 udelay(10); 1081 delay++; 1082 cfg1 = readl(&mac->cfg1); 1083 } while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100); 1084 1085 if (delay == 100) { 1086 dev_warn(&adapter->pdev->dev, 1087 "Syncd bits did not respond correctly cfg1 word 0x%08x\n", 1088 cfg1); 1089 } 1090 1091 /* Enable txmac */ 1092 ctl |= 0x09; /* TX mac enable, FC disable */ 1093 writel(ctl, &adapter->regs->txmac.ctl); 1094 1095 /* Ready to start the RXDMA/TXDMA engine */ 1096 if (adapter->flags & fMP_ADAPTER_LOWER_POWER) { 1097 et131x_rx_dma_enable(adapter); 1098 et131x_tx_dma_enable(adapter); 1099 } 1100} 1101 1102/** 1103 * et1310_in_phy_coma - check if the device is in phy coma 1104 * @adapter: pointer to our adapter structure 1105 * 1106 * Returns 0 if the device is not in phy coma, 1 if it is in phy coma 1107 */ 1108int et1310_in_phy_coma(struct et131x_adapter *adapter) 1109{ 1110 u32 pmcsr; 1111 1112 pmcsr = readl(&adapter->regs->global.pm_csr); 1113 1114 return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0; 1115} 1116 1117void et1310_setup_device_for_multicast(struct et131x_adapter *adapter) 1118{ 1119 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; 1120 uint32_t nIndex; 1121 uint32_t result; 1122 uint32_t hash1 = 0; 1123 uint32_t hash2 = 0; 1124 uint32_t hash3 = 0; 1125 uint32_t hash4 = 0; 1126 u32 pm_csr; 1127 1128 /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision 1129 * the multi-cast LIST. If it is NOT specified, (and "ALL" is not 1130 * specified) then we should pass NO multi-cast addresses to the 1131 * driver. 1132 */ 1133 if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) { 1134 /* Loop through our multicast array and set up the device */ 1135 for (nIndex = 0; nIndex < adapter->multicast_addr_count; 1136 nIndex++) { 1137 result = ether_crc(6, adapter->multicast_list[nIndex]); 1138 1139 result = (result & 0x3F800000) >> 23; 1140 1141 if (result < 32) { 1142 hash1 |= (1 << result); 1143 } else if ((31 < result) && (result < 64)) { 1144 result -= 32; 1145 hash2 |= (1 << result); 1146 } else if ((63 < result) && (result < 96)) { 1147 result -= 64; 1148 hash3 |= (1 << result); 1149 } else { 1150 result -= 96; 1151 hash4 |= (1 << result); 1152 } 1153 } 1154 } 1155 1156 /* Write out the new hash to the device */ 1157 pm_csr = readl(&adapter->regs->global.pm_csr); 1158 if (!et1310_in_phy_coma(adapter)) { 1159 writel(hash1, &rxmac->multi_hash1); 1160 writel(hash2, &rxmac->multi_hash2); 1161 writel(hash3, &rxmac->multi_hash3); 1162 writel(hash4, &rxmac->multi_hash4); 1163 } 1164} 1165 1166void et1310_setup_device_for_unicast(struct et131x_adapter *adapter) 1167{ 1168 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; 1169 u32 uni_pf1; 1170 u32 uni_pf2; 1171 u32 uni_pf3; 1172 u32 pm_csr; 1173 1174 /* Set up unicast packet filter reg 3 to be the first two octets of 1175 * the MAC address for both address 1176 * 1177 * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the 1178 * MAC address for second address 1179 * 1180 * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the 1181 * MAC address for first address 1182 */ 1183 uni_pf3 = (adapter->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) | 1184 (adapter->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) | 1185 (adapter->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) | 1186 adapter->addr[1]; 1187 1188 uni_pf2 = (adapter->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) | 1189 (adapter->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) | 1190 (adapter->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) | 1191 adapter->addr[5]; 1192 1193 uni_pf1 = (adapter->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) | 1194 (adapter->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) | 1195 (adapter->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) | 1196 adapter->addr[5]; 1197 1198 pm_csr = readl(&adapter->regs->global.pm_csr); 1199 if (!et1310_in_phy_coma(adapter)) { 1200 writel(uni_pf1, &rxmac->uni_pf_addr1); 1201 writel(uni_pf2, &rxmac->uni_pf_addr2); 1202 writel(uni_pf3, &rxmac->uni_pf_addr3); 1203 } 1204} 1205 1206void et1310_config_rxmac_regs(struct et131x_adapter *adapter) 1207{ 1208 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; 1209 struct phy_device *phydev = adapter->phydev; 1210 u32 sa_lo; 1211 u32 sa_hi = 0; 1212 u32 pf_ctrl = 0; 1213 1214 /* Disable the MAC while it is being configured (also disable WOL) */ 1215 writel(0x8, &rxmac->ctrl); 1216 1217 /* Initialize WOL to disabled. */ 1218 writel(0, &rxmac->crc0); 1219 writel(0, &rxmac->crc12); 1220 writel(0, &rxmac->crc34); 1221 1222 /* We need to set the WOL mask0 - mask4 next. We initialize it to 1223 * its default Values of 0x00000000 because there are not WOL masks 1224 * as of this time. 1225 */ 1226 writel(0, &rxmac->mask0_word0); 1227 writel(0, &rxmac->mask0_word1); 1228 writel(0, &rxmac->mask0_word2); 1229 writel(0, &rxmac->mask0_word3); 1230 1231 writel(0, &rxmac->mask1_word0); 1232 writel(0, &rxmac->mask1_word1); 1233 writel(0, &rxmac->mask1_word2); 1234 writel(0, &rxmac->mask1_word3); 1235 1236 writel(0, &rxmac->mask2_word0); 1237 writel(0, &rxmac->mask2_word1); 1238 writel(0, &rxmac->mask2_word2); 1239 writel(0, &rxmac->mask2_word3); 1240 1241 writel(0, &rxmac->mask3_word0); 1242 writel(0, &rxmac->mask3_word1); 1243 writel(0, &rxmac->mask3_word2); 1244 writel(0, &rxmac->mask3_word3); 1245 1246 writel(0, &rxmac->mask4_word0); 1247 writel(0, &rxmac->mask4_word1); 1248 writel(0, &rxmac->mask4_word2); 1249 writel(0, &rxmac->mask4_word3); 1250 1251 /* Lets setup the WOL Source Address */ 1252 sa_lo = (adapter->addr[2] << ET_WOL_LO_SA3_SHIFT) | 1253 (adapter->addr[3] << ET_WOL_LO_SA4_SHIFT) | 1254 (adapter->addr[4] << ET_WOL_LO_SA5_SHIFT) | 1255 adapter->addr[5]; 1256 writel(sa_lo, &rxmac->sa_lo); 1257 1258 sa_hi = (u32) (adapter->addr[0] << ET_WOL_HI_SA1_SHIFT) | 1259 adapter->addr[1]; 1260 writel(sa_hi, &rxmac->sa_hi); 1261 1262 /* Disable all Packet Filtering */ 1263 writel(0, &rxmac->pf_ctrl); 1264 1265 /* Let's initialize the Unicast Packet filtering address */ 1266 if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) { 1267 et1310_setup_device_for_unicast(adapter); 1268 pf_ctrl |= 4; /* Unicast filter */ 1269 } else { 1270 writel(0, &rxmac->uni_pf_addr1); 1271 writel(0, &rxmac->uni_pf_addr2); 1272 writel(0, &rxmac->uni_pf_addr3); 1273 } 1274 1275 /* Let's initialize the Multicast hash */ 1276 if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) { 1277 pf_ctrl |= 2; /* Multicast filter */ 1278 et1310_setup_device_for_multicast(adapter); 1279 } 1280 1281 /* Runt packet filtering. Didn't work in version A silicon. */ 1282 pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16; 1283 pf_ctrl |= 8; /* Fragment filter */ 1284 1285 if (adapter->registry_jumbo_packet > 8192) 1286 /* In order to transmit jumbo packets greater than 8k, the 1287 * FIFO between RxMAC and RxDMA needs to be reduced in size 1288 * to (16k - Jumbo packet size). In order to implement this, 1289 * we must use "cut through" mode in the RxMAC, which chops 1290 * packets down into segments which are (max_size * 16). In 1291 * this case we selected 256 bytes, since this is the size of 1292 * the PCI-Express TLP's that the 1310 uses. 1293 * 1294 * seg_en on, fc_en off, size 0x10 1295 */ 1296 writel(0x41, &rxmac->mcif_ctrl_max_seg); 1297 else 1298 writel(0, &rxmac->mcif_ctrl_max_seg); 1299 1300 /* Initialize the MCIF water marks */ 1301 writel(0, &rxmac->mcif_water_mark); 1302 1303 /* Initialize the MIF control */ 1304 writel(0, &rxmac->mif_ctrl); 1305 1306 /* Initialize the Space Available Register */ 1307 writel(0, &rxmac->space_avail); 1308 1309 /* Initialize the the mif_ctrl register 1310 * bit 3: Receive code error. One or more nibbles were signaled as 1311 * errors during the reception of the packet. Clear this 1312 * bit in Gigabit, set it in 100Mbit. This was derived 1313 * experimentally at UNH. 1314 * bit 4: Receive CRC error. The packet's CRC did not match the 1315 * internally generated CRC. 1316 * bit 5: Receive length check error. Indicates that frame length 1317 * field value in the packet does not match the actual data 1318 * byte length and is not a type field. 1319 * bit 16: Receive frame truncated. 1320 * bit 17: Drop packet enable 1321 */ 1322 if (phydev && phydev->speed == SPEED_100) 1323 writel(0x30038, &rxmac->mif_ctrl); 1324 else 1325 writel(0x30030, &rxmac->mif_ctrl); 1326 1327 /* Finally we initialize RxMac to be enabled & WOL disabled. Packet 1328 * filter is always enabled since it is where the runt packets are 1329 * supposed to be dropped. For version A silicon, runt packet 1330 * dropping doesn't work, so it is disabled in the pf_ctrl register, 1331 * but we still leave the packet filter on. 1332 */ 1333 writel(pf_ctrl, &rxmac->pf_ctrl); 1334 writel(0x9, &rxmac->ctrl); 1335} 1336 1337void et1310_config_txmac_regs(struct et131x_adapter *adapter) 1338{ 1339 struct txmac_regs __iomem *txmac = &adapter->regs->txmac; 1340 1341 /* We need to update the Control Frame Parameters 1342 * cfpt - control frame pause timer set to 64 (0x40) 1343 * cfep - control frame extended pause timer set to 0x0 1344 */ 1345 if (adapter->flowcontrol == FLOW_NONE) 1346 writel(0, &txmac->cf_param); 1347 else 1348 writel(0x40, &txmac->cf_param); 1349} 1350 1351void et1310_config_macstat_regs(struct et131x_adapter *adapter) 1352{ 1353 struct macstat_regs __iomem *macstat = 1354 &adapter->regs->macstat; 1355 1356 /* Next we need to initialize all the macstat registers to zero on 1357 * the device. 1358 */ 1359 writel(0, &macstat->txrx_0_64_byte_frames); 1360 writel(0, &macstat->txrx_65_127_byte_frames); 1361 writel(0, &macstat->txrx_128_255_byte_frames); 1362 writel(0, &macstat->txrx_256_511_byte_frames); 1363 writel(0, &macstat->txrx_512_1023_byte_frames); 1364 writel(0, &macstat->txrx_1024_1518_byte_frames); 1365 writel(0, &macstat->txrx_1519_1522_gvln_frames); 1366 1367 writel(0, &macstat->rx_bytes); 1368 writel(0, &macstat->rx_packets); 1369 writel(0, &macstat->rx_fcs_errs); 1370 writel(0, &macstat->rx_multicast_packets); 1371 writel(0, &macstat->rx_broadcast_packets); 1372 writel(0, &macstat->rx_control_frames); 1373 writel(0, &macstat->rx_pause_frames); 1374 writel(0, &macstat->rx_unknown_opcodes); 1375 writel(0, &macstat->rx_align_errs); 1376 writel(0, &macstat->rx_frame_len_errs); 1377 writel(0, &macstat->rx_code_errs); 1378 writel(0, &macstat->rx_carrier_sense_errs); 1379 writel(0, &macstat->rx_undersize_packets); 1380 writel(0, &macstat->rx_oversize_packets); 1381 writel(0, &macstat->rx_fragment_packets); 1382 writel(0, &macstat->rx_jabbers); 1383 writel(0, &macstat->rx_drops); 1384 1385 writel(0, &macstat->tx_bytes); 1386 writel(0, &macstat->tx_packets); 1387 writel(0, &macstat->tx_multicast_packets); 1388 writel(0, &macstat->tx_broadcast_packets); 1389 writel(0, &macstat->tx_pause_frames); 1390 writel(0, &macstat->tx_deferred); 1391 writel(0, &macstat->tx_excessive_deferred); 1392 writel(0, &macstat->tx_single_collisions); 1393 writel(0, &macstat->tx_multiple_collisions); 1394 writel(0, &macstat->tx_late_collisions); 1395 writel(0, &macstat->tx_excessive_collisions); 1396 writel(0, &macstat->tx_total_collisions); 1397 writel(0, &macstat->tx_pause_honored_frames); 1398 writel(0, &macstat->tx_drops); 1399 writel(0, &macstat->tx_jabbers); 1400 writel(0, &macstat->tx_fcs_errs); 1401 writel(0, &macstat->tx_control_frames); 1402 writel(0, &macstat->tx_oversize_frames); 1403 writel(0, &macstat->tx_undersize_frames); 1404 writel(0, &macstat->tx_fragments); 1405 writel(0, &macstat->carry_reg1); 1406 writel(0, &macstat->carry_reg2); 1407 1408 /* Unmask any counters that we want to track the overflow of. 1409 * Initially this will be all counters. It may become clear later 1410 * that we do not need to track all counters. 1411 */ 1412 writel(0xFFFFBE32, &macstat->carry_reg1_mask); 1413 writel(0xFFFE7E8B, &macstat->carry_reg2_mask); 1414} 1415 1416/** 1417 * et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC 1418 * @adapter: pointer to our private adapter structure 1419 * @addr: the address of the transceiver 1420 * @reg: the register to read 1421 * @value: pointer to a 16-bit value in which the value will be stored 1422 * 1423 * Returns 0 on success, errno on failure (as defined in errno.h) 1424 */ 1425int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr, 1426 u8 reg, u16 *value) 1427{ 1428 struct mac_regs __iomem *mac = &adapter->regs->mac; 1429 int status = 0; 1430 u32 delay = 0; 1431 u32 mii_addr; 1432 u32 mii_cmd; 1433 u32 mii_indicator; 1434 1435 /* Save a local copy of the registers we are dealing with so we can 1436 * set them back 1437 */ 1438 mii_addr = readl(&mac->mii_mgmt_addr); 1439 mii_cmd = readl(&mac->mii_mgmt_cmd); 1440 1441 /* Stop the current operation */ 1442 writel(0, &mac->mii_mgmt_cmd); 1443 1444 /* Set up the register we need to read from on the correct PHY */ 1445 writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr); 1446 1447 writel(0x1, &mac->mii_mgmt_cmd); 1448 1449 do { 1450 udelay(50); 1451 delay++; 1452 mii_indicator = readl(&mac->mii_mgmt_indicator); 1453 } while ((mii_indicator & MGMT_WAIT) && delay < 50); 1454 1455 /* If we hit the max delay, we could not read the register */ 1456 if (delay == 50) { 1457 dev_warn(&adapter->pdev->dev, 1458 "reg 0x%08x could not be read\n", reg); 1459 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", 1460 mii_indicator); 1461 1462 status = -EIO; 1463 } 1464 1465 /* If we hit here we were able to read the register and we need to 1466 * return the value to the caller */ 1467 *value = readl(&mac->mii_mgmt_stat) & 0xFFFF; 1468 1469 /* Stop the read operation */ 1470 writel(0, &mac->mii_mgmt_cmd); 1471 1472 /* set the registers we touched back to the state at which we entered 1473 * this function 1474 */ 1475 writel(mii_addr, &mac->mii_mgmt_addr); 1476 writel(mii_cmd, &mac->mii_mgmt_cmd); 1477 1478 return status; 1479} 1480 1481int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) 1482{ 1483 struct phy_device *phydev = adapter->phydev; 1484 1485 if (!phydev) 1486 return -EIO; 1487 1488 return et131x_phy_mii_read(adapter, phydev->addr, reg, value); 1489} 1490 1491/** 1492 * et131x_mii_write - Write to a PHY register through the MII interface of the MAC 1493 * @adapter: pointer to our private adapter structure 1494 * @reg: the register to read 1495 * @value: 16-bit value to write 1496 * 1497 * FIXME: one caller in netdev still 1498 * 1499 * Return 0 on success, errno on failure (as defined in errno.h) 1500 */ 1501int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value) 1502{ 1503 struct mac_regs __iomem *mac = &adapter->regs->mac; 1504 struct phy_device *phydev = adapter->phydev; 1505 int status = 0; 1506 u8 addr; 1507 u32 delay = 0; 1508 u32 mii_addr; 1509 u32 mii_cmd; 1510 u32 mii_indicator; 1511 1512 if (!phydev) 1513 return -EIO; 1514 1515 addr = phydev->addr; 1516 1517 /* Save a local copy of the registers we are dealing with so we can 1518 * set them back 1519 */ 1520 mii_addr = readl(&mac->mii_mgmt_addr); 1521 mii_cmd = readl(&mac->mii_mgmt_cmd); 1522 1523 /* Stop the current operation */ 1524 writel(0, &mac->mii_mgmt_cmd); 1525 1526 /* Set up the register we need to write to on the correct PHY */ 1527 writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr); 1528 1529 /* Add the value to write to the registers to the mac */ 1530 writel(value, &mac->mii_mgmt_ctrl); 1531 1532 do { 1533 udelay(50); 1534 delay++; 1535 mii_indicator = readl(&mac->mii_mgmt_indicator); 1536 } while ((mii_indicator & MGMT_BUSY) && delay < 100); 1537 1538 /* If we hit the max delay, we could not write the register */ 1539 if (delay == 100) { 1540 u16 tmp; 1541 1542 dev_warn(&adapter->pdev->dev, 1543 "reg 0x%08x could not be written", reg); 1544 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", 1545 mii_indicator); 1546 dev_warn(&adapter->pdev->dev, "command is 0x%08x\n", 1547 readl(&mac->mii_mgmt_cmd)); 1548 1549 et131x_mii_read(adapter, reg, &tmp); 1550 1551 status = -EIO; 1552 } 1553 /* Stop the write operation */ 1554 writel(0, &mac->mii_mgmt_cmd); 1555 1556 /* 1557 * set the registers we touched back to the state at which we entered 1558 * this function 1559 */ 1560 writel(mii_addr, &mac->mii_mgmt_addr); 1561 writel(mii_cmd, &mac->mii_mgmt_cmd); 1562 1563 return status; 1564} 1565 1566/* Still used from _mac for BIT_READ */ 1567void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, u16 action, 1568 u16 regnum, u16 bitnum, u8 *value) 1569{ 1570 u16 reg; 1571 u16 mask = 0x0001 << bitnum; 1572 1573 /* Read the requested register */ 1574 et131x_mii_read(adapter, regnum, &reg); 1575 1576 switch (action) { 1577 case TRUEPHY_BIT_READ: 1578 *value = (reg & mask) >> bitnum; 1579 break; 1580 1581 case TRUEPHY_BIT_SET: 1582 et131x_mii_write(adapter, regnum, reg | mask); 1583 break; 1584 1585 case TRUEPHY_BIT_CLEAR: 1586 et131x_mii_write(adapter, regnum, reg & ~mask); 1587 break; 1588 1589 default: 1590 break; 1591 } 1592} 1593 1594void et1310_config_flow_control(struct et131x_adapter *adapter) 1595{ 1596 struct phy_device *phydev = adapter->phydev; 1597 1598 if (phydev->duplex == DUPLEX_HALF) { 1599 adapter->flowcontrol = FLOW_NONE; 1600 } else { 1601 char remote_pause, remote_async_pause; 1602 1603 et1310_phy_access_mii_bit(adapter, 1604 TRUEPHY_BIT_READ, 5, 10, &remote_pause); 1605 et1310_phy_access_mii_bit(adapter, 1606 TRUEPHY_BIT_READ, 5, 11, 1607 &remote_async_pause); 1608 1609 if ((remote_pause == TRUEPHY_BIT_SET) && 1610 (remote_async_pause == TRUEPHY_BIT_SET)) { 1611 adapter->flowcontrol = adapter->wanted_flow; 1612 } else if ((remote_pause == TRUEPHY_BIT_SET) && 1613 (remote_async_pause == TRUEPHY_BIT_CLEAR)) { 1614 if (adapter->wanted_flow == FLOW_BOTH) 1615 adapter->flowcontrol = FLOW_BOTH; 1616 else 1617 adapter->flowcontrol = FLOW_NONE; 1618 } else if ((remote_pause == TRUEPHY_BIT_CLEAR) && 1619 (remote_async_pause == TRUEPHY_BIT_CLEAR)) { 1620 adapter->flowcontrol = FLOW_NONE; 1621 } else {/* if (remote_pause == TRUEPHY_CLEAR_BIT && 1622 remote_async_pause == TRUEPHY_SET_BIT) */ 1623 if (adapter->wanted_flow == FLOW_BOTH) 1624 adapter->flowcontrol = FLOW_RXONLY; 1625 else 1626 adapter->flowcontrol = FLOW_NONE; 1627 } 1628 } 1629} 1630 1631/** 1632 * et1310_update_macstat_host_counters - Update the local copy of the statistics 1633 * @adapter: pointer to the adapter structure 1634 */ 1635void et1310_update_macstat_host_counters(struct et131x_adapter *adapter) 1636{ 1637 struct ce_stats *stats = &adapter->stats; 1638 struct macstat_regs __iomem *macstat = 1639 &adapter->regs->macstat; 1640 1641 stats->tx_collisions += readl(&macstat->tx_total_collisions); 1642 stats->tx_first_collisions += readl(&macstat->tx_single_collisions); 1643 stats->tx_deferred += readl(&macstat->tx_deferred); 1644 stats->tx_excessive_collisions += 1645 readl(&macstat->tx_multiple_collisions); 1646 stats->tx_late_collisions += readl(&macstat->tx_late_collisions); 1647 stats->tx_underflows += readl(&macstat->tx_undersize_frames); 1648 stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames); 1649 1650 stats->rx_align_errs += readl(&macstat->rx_align_errs); 1651 stats->rx_crc_errs += readl(&macstat->rx_code_errs); 1652 stats->rcvd_pkts_dropped += readl(&macstat->rx_drops); 1653 stats->rx_overflows += readl(&macstat->rx_oversize_packets); 1654 stats->rx_code_violations += readl(&macstat->rx_fcs_errs); 1655 stats->rx_length_errs += readl(&macstat->rx_frame_len_errs); 1656 stats->rx_other_errs += readl(&macstat->rx_fragment_packets); 1657} 1658 1659/** 1660 * et1310_handle_macstat_interrupt 1661 * @adapter: pointer to the adapter structure 1662 * 1663 * One of the MACSTAT counters has wrapped. Update the local copy of 1664 * the statistics held in the adapter structure, checking the "wrap" 1665 * bit for each counter. 1666 */ 1667void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter) 1668{ 1669 u32 carry_reg1; 1670 u32 carry_reg2; 1671 1672 /* Read the interrupt bits from the register(s). These are Clear On 1673 * Write. 1674 */ 1675 carry_reg1 = readl(&adapter->regs->macstat.carry_reg1); 1676 carry_reg2 = readl(&adapter->regs->macstat.carry_reg2); 1677 1678 writel(carry_reg1, &adapter->regs->macstat.carry_reg1); 1679 writel(carry_reg2, &adapter->regs->macstat.carry_reg2); 1680 1681 /* We need to do update the host copy of all the MAC_STAT counters. 1682 * For each counter, check it's overflow bit. If the overflow bit is 1683 * set, then increment the host version of the count by one complete 1684 * revolution of the counter. This routine is called when the counter 1685 * block indicates that one of the counters has wrapped. 1686 */ 1687 if (carry_reg1 & (1 << 14)) 1688 adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT; 1689 if (carry_reg1 & (1 << 8)) 1690 adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT; 1691 if (carry_reg1 & (1 << 7)) 1692 adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT; 1693 if (carry_reg1 & (1 << 2)) 1694 adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT; 1695 if (carry_reg1 & (1 << 6)) 1696 adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT; 1697 if (carry_reg1 & (1 << 3)) 1698 adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT; 1699 if (carry_reg1 & (1 << 0)) 1700 adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT; 1701 if (carry_reg2 & (1 << 16)) 1702 adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT; 1703 if (carry_reg2 & (1 << 15)) 1704 adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT; 1705 if (carry_reg2 & (1 << 6)) 1706 adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT; 1707 if (carry_reg2 & (1 << 8)) 1708 adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT; 1709 if (carry_reg2 & (1 << 5)) 1710 adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT; 1711 if (carry_reg2 & (1 << 4)) 1712 adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT; 1713 if (carry_reg2 & (1 << 2)) 1714 adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT; 1715} 1716 1717/* PHY functions */ 1718 1719int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg) 1720{ 1721 struct net_device *netdev = bus->priv; 1722 struct et131x_adapter *adapter = netdev_priv(netdev); 1723 u16 value; 1724 int ret; 1725 1726 ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value); 1727 1728 if (ret < 0) 1729 return ret; 1730 else 1731 return value; 1732} 1733 1734int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value) 1735{ 1736 struct net_device *netdev = bus->priv; 1737 struct et131x_adapter *adapter = netdev_priv(netdev); 1738 1739 return et131x_mii_write(adapter, reg, value); 1740} 1741 1742int et131x_mdio_reset(struct mii_bus *bus) 1743{ 1744 struct net_device *netdev = bus->priv; 1745 struct et131x_adapter *adapter = netdev_priv(netdev); 1746 1747 et131x_mii_write(adapter, MII_BMCR, BMCR_RESET); 1748 1749 return 0; 1750} 1751 1752/** 1753 * et1310_phy_power_down - PHY power control 1754 * @adapter: device to control 1755 * @down: true for off/false for back on 1756 * 1757 * one hundred, ten, one thousand megs 1758 * How would you like to have your LAN accessed 1759 * Can't you see that this code processed 1760 * Phy power, phy power.. 1761 */ 1762void et1310_phy_power_down(struct et131x_adapter *adapter, bool down) 1763{ 1764 u16 data; 1765 1766 et131x_mii_read(adapter, MII_BMCR, &data); 1767 data &= ~BMCR_PDOWN; 1768 if (down) 1769 data |= BMCR_PDOWN; 1770 et131x_mii_write(adapter, MII_BMCR, data); 1771} 1772 1773/** 1774 * et131x_xcvr_init - Init the phy if we are setting it into force mode 1775 * @adapter: pointer to our private adapter structure 1776 * 1777 */ 1778void et131x_xcvr_init(struct et131x_adapter *adapter) 1779{ 1780 u16 imr; 1781 u16 isr; 1782 u16 lcr2; 1783 1784 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &isr); 1785 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &imr); 1786 1787 /* Set the link status interrupt only. Bad behavior when link status 1788 * and auto neg are set, we run into a nested interrupt problem 1789 */ 1790 imr |= (ET_PHY_INT_MASK_AUTONEGSTAT & 1791 ET_PHY_INT_MASK_LINKSTAT & 1792 ET_PHY_INT_MASK_ENABLE); 1793 1794 et131x_mii_write(adapter, PHY_INTERRUPT_MASK, imr); 1795 1796 /* Set the LED behavior such that LED 1 indicates speed (off = 1797 * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates 1798 * link and activity (on for link, blink off for activity). 1799 * 1800 * NOTE: Some customizations have been added here for specific 1801 * vendors; The LED behavior is now determined by vendor data in the 1802 * EEPROM. However, the above description is the default. 1803 */ 1804 if ((adapter->eeprom_data[1] & 0x4) == 0) { 1805 et131x_mii_read(adapter, PHY_LED_2, &lcr2); 1806 1807 lcr2 &= (ET_LED2_LED_100TX & ET_LED2_LED_1000T); 1808 lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT); 1809 1810 if ((adapter->eeprom_data[1] & 0x8) == 0) 1811 lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT); 1812 else 1813 lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT); 1814 1815 et131x_mii_write(adapter, PHY_LED_2, lcr2); 1816 } 1817} 1818 1819/** 1820 * et131x_configure_global_regs - configure JAGCore global regs 1821 * @adapter: pointer to our adapter structure 1822 * 1823 * Used to configure the global registers on the JAGCore 1824 */ 1825void et131x_configure_global_regs(struct et131x_adapter *adapter) 1826{ 1827 struct global_regs __iomem *regs = &adapter->regs->global; 1828 1829 writel(0, &regs->rxq_start_addr); 1830 writel(INTERNAL_MEM_SIZE - 1, &regs->txq_end_addr); 1831 1832 if (adapter->registry_jumbo_packet < 2048) { 1833 /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word 1834 * block of RAM that the driver can split between Tx 1835 * and Rx as it desires. Our default is to split it 1836 * 50/50: 1837 */ 1838 writel(PARM_RX_MEM_END_DEF, &regs->rxq_end_addr); 1839 writel(PARM_RX_MEM_END_DEF + 1, &regs->txq_start_addr); 1840 } else if (adapter->registry_jumbo_packet < 8192) { 1841 /* For jumbo packets > 2k but < 8k, split 50-50. */ 1842 writel(INTERNAL_MEM_RX_OFFSET, &regs->rxq_end_addr); 1843 writel(INTERNAL_MEM_RX_OFFSET + 1, &regs->txq_start_addr); 1844 } else { 1845 /* 9216 is the only packet size greater than 8k that 1846 * is available. The Tx buffer has to be big enough 1847 * for one whole packet on the Tx side. We'll make 1848 * the Tx 9408, and give the rest to Rx 1849 */ 1850 writel(0x01b3, &regs->rxq_end_addr); 1851 writel(0x01b4, &regs->txq_start_addr); 1852 } 1853 1854 /* Initialize the loopback register. Disable all loopbacks. */ 1855 writel(0, &regs->loopback); 1856 1857 /* MSI Register */ 1858 writel(0, &regs->msi_config); 1859 1860 /* By default, disable the watchdog timer. It will be enabled when 1861 * a packet is queued. 1862 */ 1863 writel(0, &regs->watchdog_timer); 1864} 1865 1866/* PM functions */ 1867 1868/** 1869 * et131x_config_rx_dma_regs - Start of Rx_DMA init sequence 1870 * @adapter: pointer to our adapter structure 1871 */ 1872void et131x_config_rx_dma_regs(struct et131x_adapter *adapter) 1873{ 1874 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; 1875 struct rx_ring *rx_local = &adapter->rx_ring; 1876 struct fbr_desc *fbr_entry; 1877 u32 entry; 1878 u32 psr_num_des; 1879 unsigned long flags; 1880 1881 /* Halt RXDMA to perform the reconfigure. */ 1882 et131x_rx_dma_disable(adapter); 1883 1884 /* Load the completion writeback physical address 1885 * 1886 * NOTE : dma_alloc_coherent(), used above to alloc DMA regions, 1887 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses 1888 * are ever returned, make sure the high part is retrieved here 1889 * before storing the adjusted address. 1890 */ 1891 writel((u32) ((u64)rx_local->rx_status_bus >> 32), 1892 &rx_dma->dma_wb_base_hi); 1893 writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo); 1894 1895 memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block)); 1896 1897 /* Set the address and parameters of the packet status ring into the 1898 * 1310's registers 1899 */ 1900 writel((u32) ((u64)rx_local->ps_ring_physaddr >> 32), 1901 &rx_dma->psr_base_hi); 1902 writel((u32) rx_local->ps_ring_physaddr, &rx_dma->psr_base_lo); 1903 writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des); 1904 writel(0, &rx_dma->psr_full_offset); 1905 1906 psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF; 1907 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100, 1908 &rx_dma->psr_min_des); 1909 1910 spin_lock_irqsave(&adapter->rcv_lock, flags); 1911 1912 /* These local variables track the PSR in the adapter structure */ 1913 rx_local->local_psr_full = 0; 1914 1915 /* Now's the best time to initialize FBR1 contents */ 1916 fbr_entry = (struct fbr_desc *) rx_local->fbr[0]->ring_virtaddr; 1917 for (entry = 0; entry < rx_local->fbr[0]->num_entries; entry++) { 1918 fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry]; 1919 fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry]; 1920 fbr_entry->word2 = entry; 1921 fbr_entry++; 1922 } 1923 1924 /* Set the address and parameters of Free buffer ring 1 (and 0 if 1925 * required) into the 1310's registers 1926 */ 1927 writel((u32) (rx_local->fbr[0]->real_physaddr >> 32), 1928 &rx_dma->fbr1_base_hi); 1929 writel((u32) rx_local->fbr[0]->real_physaddr, &rx_dma->fbr1_base_lo); 1930 writel(rx_local->fbr[0]->num_entries - 1, &rx_dma->fbr1_num_des); 1931 writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset); 1932 1933 /* This variable tracks the free buffer ring 1 full position, so it 1934 * has to match the above. 1935 */ 1936 rx_local->fbr[0]->local_full = ET_DMA10_WRAP; 1937 writel( 1938 ((rx_local->fbr[0]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, 1939 &rx_dma->fbr1_min_des); 1940 1941#ifdef USE_FBR0 1942 /* Now's the best time to initialize FBR0 contents */ 1943 fbr_entry = (struct fbr_desc *) rx_local->fbr[1]->ring_virtaddr; 1944 for (entry = 0; entry < rx_local->fbr[1]->num_entries; entry++) { 1945 fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry]; 1946 fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry]; 1947 fbr_entry->word2 = entry; 1948 fbr_entry++; 1949 } 1950 1951 writel((u32) (rx_local->fbr[1]->real_physaddr >> 32), 1952 &rx_dma->fbr0_base_hi); 1953 writel((u32) rx_local->fbr[1]->real_physaddr, &rx_dma->fbr0_base_lo); 1954 writel(rx_local->fbr[1]->num_entries - 1, &rx_dma->fbr0_num_des); 1955 writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset); 1956 1957 /* This variable tracks the free buffer ring 0 full position, so it 1958 * has to match the above. 1959 */ 1960 rx_local->fbr[1]->local_full = ET_DMA10_WRAP; 1961 writel( 1962 ((rx_local->fbr[1]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, 1963 &rx_dma->fbr0_min_des); 1964#endif 1965 1966 /* Program the number of packets we will receive before generating an 1967 * interrupt. 1968 * For version B silicon, this value gets updated once autoneg is 1969 *complete. 1970 */ 1971 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done); 1972 1973 /* The "time_done" is not working correctly to coalesce interrupts 1974 * after a given time period, but rather is giving us an interrupt 1975 * regardless of whether we have received packets. 1976 * This value gets updated once autoneg is complete. 1977 */ 1978 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time); 1979 1980 spin_unlock_irqrestore(&adapter->rcv_lock, flags); 1981} 1982 1983/** 1984 * et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore. 1985 * @adapter: pointer to our private adapter structure 1986 * 1987 * Configure the transmit engine with the ring buffers we have created 1988 * and prepare it for use. 1989 */ 1990void et131x_config_tx_dma_regs(struct et131x_adapter *adapter) 1991{ 1992 struct txdma_regs __iomem *txdma = &adapter->regs->txdma; 1993 1994 /* Load the hardware with the start of the transmit descriptor ring. */ 1995 writel((u32) ((u64)adapter->tx_ring.tx_desc_ring_pa >> 32), 1996 &txdma->pr_base_hi); 1997 writel((u32) adapter->tx_ring.tx_desc_ring_pa, 1998 &txdma->pr_base_lo); 1999 2000 /* Initialise the transmit DMA engine */ 2001 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des); 2002 2003 /* Load the completion writeback physical address */ 2004 writel((u32)((u64)adapter->tx_ring.tx_status_pa >> 32), 2005 &txdma->dma_wb_base_hi); 2006 writel((u32)adapter->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo); 2007 2008 *adapter->tx_ring.tx_status = 0; 2009 2010 writel(0, &txdma->service_request); 2011 adapter->tx_ring.send_idx = 0; 2012} 2013 2014/** 2015 * et131x_adapter_setup - Set the adapter up as per cassini+ documentation 2016 * @adapter: pointer to our private adapter structure 2017 * 2018 * Returns 0 on success, errno on failure (as defined in errno.h) 2019 */ 2020void et131x_adapter_setup(struct et131x_adapter *adapter) 2021{ 2022 /* Configure the JAGCore */ 2023 et131x_configure_global_regs(adapter); 2024 2025 et1310_config_mac_regs1(adapter); 2026 2027 /* Configure the MMC registers */ 2028 /* All we need to do is initialize the Memory Control Register */ 2029 writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl); 2030 2031 et1310_config_rxmac_regs(adapter); 2032 et1310_config_txmac_regs(adapter); 2033 2034 et131x_config_rx_dma_regs(adapter); 2035 et131x_config_tx_dma_regs(adapter); 2036 2037 et1310_config_macstat_regs(adapter); 2038 2039 et1310_phy_power_down(adapter, 0); 2040 et131x_xcvr_init(adapter); 2041} 2042 2043/** 2044 * et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310 2045 * @adapter: pointer to our private adapter structure 2046 */ 2047void et131x_soft_reset(struct et131x_adapter *adapter) 2048{ 2049 /* Disable MAC Core */ 2050 writel(0xc00f0000, &adapter->regs->mac.cfg1); 2051 2052 /* Set everything to a reset value */ 2053 writel(0x7F, &adapter->regs->global.sw_reset); 2054 writel(0x000f0000, &adapter->regs->mac.cfg1); 2055 writel(0x00000000, &adapter->regs->mac.cfg1); 2056} 2057 2058/** 2059 * et131x_enable_interrupts - enable interrupt 2060 * @adapter: et131x device 2061 * 2062 * Enable the appropriate interrupts on the ET131x according to our 2063 * configuration 2064 */ 2065void et131x_enable_interrupts(struct et131x_adapter *adapter) 2066{ 2067 u32 mask; 2068 2069 /* Enable all global interrupts */ 2070 if (adapter->flowcontrol == FLOW_TXONLY || 2071 adapter->flowcontrol == FLOW_BOTH) 2072 mask = INT_MASK_ENABLE; 2073 else 2074 mask = INT_MASK_ENABLE_NO_FLOW; 2075 2076 writel(mask, &adapter->regs->global.int_mask); 2077} 2078 2079/** 2080 * et131x_disable_interrupts - interrupt disable 2081 * @adapter: et131x device 2082 * 2083 * Block all interrupts from the et131x device at the device itself 2084 */ 2085void et131x_disable_interrupts(struct et131x_adapter *adapter) 2086{ 2087 /* Disable all global interrupts */ 2088 writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask); 2089} 2090 2091/** 2092 * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310 2093 * @adapter: pointer to our adapter structure 2094 */ 2095void et131x_tx_dma_disable(struct et131x_adapter *adapter) 2096{ 2097 /* Setup the tramsmit dma configuration register */ 2098 writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT, 2099 &adapter->regs->txdma.csr); 2100} 2101 2102/** 2103 * et131x_enable_txrx - Enable tx/rx queues 2104 * @netdev: device to be enabled 2105 */ 2106void et131x_enable_txrx(struct net_device *netdev) 2107{ 2108 struct et131x_adapter *adapter = netdev_priv(netdev); 2109 2110 /* Enable the Tx and Rx DMA engines (if not already enabled) */ 2111 et131x_rx_dma_enable(adapter); 2112 et131x_tx_dma_enable(adapter); 2113 2114 /* Enable device interrupts */ 2115 if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE) 2116 et131x_enable_interrupts(adapter); 2117 2118 /* We're ready to move some data, so start the queue */ 2119 netif_start_queue(netdev); 2120} 2121 2122/** 2123 * et131x_disable_txrx - Disable tx/rx queues 2124 * @netdev: device to be disabled 2125 */ 2126void et131x_disable_txrx(struct net_device *netdev) 2127{ 2128 struct et131x_adapter *adapter = netdev_priv(netdev); 2129 2130 /* First thing is to stop the queue */ 2131 netif_stop_queue(netdev); 2132 2133 /* Stop the Tx and Rx DMA engines */ 2134 et131x_rx_dma_disable(adapter); 2135 et131x_tx_dma_disable(adapter); 2136 2137 /* Disable device interrupts */ 2138 et131x_disable_interrupts(adapter); 2139} 2140 2141/** 2142 * et131x_init_send - Initialize send data structures 2143 * @adapter: pointer to our private adapter structure 2144 */ 2145void et131x_init_send(struct et131x_adapter *adapter) 2146{ 2147 struct tcb *tcb; 2148 u32 ct; 2149 struct tx_ring *tx_ring; 2150 2151 /* Setup some convenience pointers */ 2152 tx_ring = &adapter->tx_ring; 2153 tcb = adapter->tx_ring.tcb_ring; 2154 2155 tx_ring->tcb_qhead = tcb; 2156 2157 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB); 2158 2159 /* Go through and set up each TCB */ 2160 for (ct = 0; ct++ < NUM_TCB; tcb++) 2161 /* Set the link pointer in HW TCB to the next TCB in the 2162 * chain 2163 */ 2164 tcb->next = tcb + 1; 2165 2166 /* Set the tail pointer */ 2167 tcb--; 2168 tx_ring->tcb_qtail = tcb; 2169 tcb->next = NULL; 2170 /* Curr send queue should now be empty */ 2171 tx_ring->send_head = NULL; 2172 tx_ring->send_tail = NULL; 2173} 2174 2175/** 2176 * et1310_enable_phy_coma - called when network cable is unplugged 2177 * @adapter: pointer to our adapter structure 2178 * 2179 * driver receive an phy status change interrupt while in D0 and check that 2180 * phy_status is down. 2181 * 2182 * -- gate off JAGCore; 2183 * -- set gigE PHY in Coma mode 2184 * -- wake on phy_interrupt; Perform software reset JAGCore, 2185 * re-initialize jagcore and gigE PHY 2186 * 2187 * Add D0-ASPM-PhyLinkDown Support: 2188 * -- while in D0, when there is a phy_interrupt indicating phy link 2189 * down status, call the MPSetPhyComa routine to enter this active 2190 * state power saving mode 2191 * -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt 2192 * indicating linkup status, call the MPDisablePhyComa routine to 2193 * restore JAGCore and gigE PHY 2194 */ 2195void et1310_enable_phy_coma(struct et131x_adapter *adapter) 2196{ 2197 unsigned long flags; 2198 u32 pmcsr; 2199 2200 pmcsr = readl(&adapter->regs->global.pm_csr); 2201 2202 /* Save the GbE PHY speed and duplex modes. Need to restore this 2203 * when cable is plugged back in 2204 */ 2205 /* 2206 * TODO - when PM is re-enabled, check if we need to 2207 * perform a similar task as this - 2208 * adapter->pdown_speed = adapter->ai_force_speed; 2209 * adapter->pdown_duplex = adapter->ai_force_duplex; 2210 */ 2211 2212 /* Stop sending packets. */ 2213 spin_lock_irqsave(&adapter->send_hw_lock, flags); 2214 adapter->flags |= fMP_ADAPTER_LOWER_POWER; 2215 spin_unlock_irqrestore(&adapter->send_hw_lock, flags); 2216 2217 /* Wait for outstanding Receive packets */ 2218 2219 et131x_disable_txrx(adapter->netdev); 2220 2221 /* Gate off JAGCore 3 clock domains */ 2222 pmcsr &= ~ET_PMCSR_INIT; 2223 writel(pmcsr, &adapter->regs->global.pm_csr); 2224 2225 /* Program gigE PHY in to Coma mode */ 2226 pmcsr |= ET_PM_PHY_SW_COMA; 2227 writel(pmcsr, &adapter->regs->global.pm_csr); 2228} 2229 2230/** 2231 * et1310_disable_phy_coma - Disable the Phy Coma Mode 2232 * @adapter: pointer to our adapter structure 2233 */ 2234void et1310_disable_phy_coma(struct et131x_adapter *adapter) 2235{ 2236 u32 pmcsr; 2237 2238 pmcsr = readl(&adapter->regs->global.pm_csr); 2239 2240 /* Disable phy_sw_coma register and re-enable JAGCore clocks */ 2241 pmcsr |= ET_PMCSR_INIT; 2242 pmcsr &= ~ET_PM_PHY_SW_COMA; 2243 writel(pmcsr, &adapter->regs->global.pm_csr); 2244 2245 /* Restore the GbE PHY speed and duplex modes; 2246 * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY 2247 */ 2248 /* TODO - when PM is re-enabled, check if we need to 2249 * perform a similar task as this - 2250 * adapter->ai_force_speed = adapter->pdown_speed; 2251 * adapter->ai_force_duplex = adapter->pdown_duplex; 2252 */ 2253 2254 /* Re-initialize the send structures */ 2255 et131x_init_send(adapter); 2256 2257 /* Bring the device back to the state it was during init prior to 2258 * autonegotiation being complete. This way, when we get the auto-neg 2259 * complete interrupt, we can complete init by calling ConfigMacREGS2. 2260 */ 2261 et131x_soft_reset(adapter); 2262 2263 /* setup et1310 as per the documentation ?? */ 2264 et131x_adapter_setup(adapter); 2265 2266 /* Allow Tx to restart */ 2267 adapter->flags &= ~fMP_ADAPTER_LOWER_POWER; 2268 2269 et131x_enable_txrx(adapter->netdev); 2270} 2271 2272/* RX functions */ 2273 2274static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit) 2275{ 2276 u32 tmp_free_buff_ring = *free_buff_ring; 2277 tmp_free_buff_ring++; 2278 /* This works for all cases where limit < 1024. The 1023 case 2279 works because 1023++ is 1024 which means the if condition is not 2280 taken but the carry of the bit into the wrap bit toggles the wrap 2281 value correctly */ 2282 if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) { 2283 tmp_free_buff_ring &= ~ET_DMA10_MASK; 2284 tmp_free_buff_ring ^= ET_DMA10_WRAP; 2285 } 2286 /* For the 1023 case */ 2287 tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP); 2288 *free_buff_ring = tmp_free_buff_ring; 2289 return tmp_free_buff_ring; 2290} 2291 2292/** 2293 * et131x_align_allocated_memory - Align allocated memory on a given boundary 2294 * @adapter: pointer to our adapter structure 2295 * @phys_addr: pointer to Physical address 2296 * @offset: pointer to the offset variable 2297 * @mask: correct mask 2298 */ 2299void et131x_align_allocated_memory(struct et131x_adapter *adapter, 2300 uint64_t *phys_addr, 2301 uint64_t *offset, uint64_t mask) 2302{ 2303 uint64_t new_addr; 2304 2305 *offset = 0; 2306 2307 new_addr = *phys_addr & ~mask; 2308 2309 if (new_addr != *phys_addr) { 2310 /* Move to next aligned block */ 2311 new_addr += mask + 1; 2312 /* Return offset for adjusting virt addr */ 2313 *offset = new_addr - *phys_addr; 2314 /* Return new physical address */ 2315 *phys_addr = new_addr; 2316 } 2317} 2318 2319/** 2320 * et131x_rx_dma_memory_alloc 2321 * @adapter: pointer to our private adapter structure 2322 * 2323 * Returns 0 on success and errno on failure (as defined in errno.h) 2324 * 2325 * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required, 2326 * and the Packet Status Ring. 2327 */ 2328int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) 2329{ 2330 u32 i, j; 2331 u32 bufsize; 2332 u32 pktstat_ringsize, fbr_chunksize; 2333 struct rx_ring *rx_ring; 2334 2335 /* Setup some convenience pointers */ 2336 rx_ring = &adapter->rx_ring; 2337 2338 /* Alloc memory for the lookup table */ 2339#ifdef USE_FBR0 2340 rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); 2341#endif 2342 rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); 2343 2344 /* The first thing we will do is configure the sizes of the buffer 2345 * rings. These will change based on jumbo packet support. Larger 2346 * jumbo packets increases the size of each entry in FBR0, and the 2347 * number of entries in FBR0, while at the same time decreasing the 2348 * number of entries in FBR1. 2349 * 2350 * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1 2351 * entries are huge in order to accommodate a "jumbo" frame, then it 2352 * will have less entries. Conversely, FBR1 will now be relied upon 2353 * to carry more "normal" frames, thus it's entry size also increases 2354 * and the number of entries goes up too (since it now carries 2355 * "small" + "regular" packets. 2356 * 2357 * In this scheme, we try to maintain 512 entries between the two 2358 * rings. Also, FBR1 remains a constant size - when it's size doubles 2359 * the number of entries halves. FBR0 increases in size, however. 2360 */ 2361 2362 if (adapter->registry_jumbo_packet < 2048) { 2363#ifdef USE_FBR0 2364 rx_ring->fbr[1]->buffsize = 256; 2365 rx_ring->fbr[1]->num_entries = 512; 2366#endif 2367 rx_ring->fbr[0]->buffsize = 2048; 2368 rx_ring->fbr[0]->num_entries = 512; 2369 } else if (adapter->registry_jumbo_packet < 4096) { 2370#ifdef USE_FBR0 2371 rx_ring->fbr[1]->buffsize = 512; 2372 rx_ring->fbr[1]->num_entries = 1024; 2373#endif 2374 rx_ring->fbr[0]->buffsize = 4096; 2375 rx_ring->fbr[0]->num_entries = 512; 2376 } else { 2377#ifdef USE_FBR0 2378 rx_ring->fbr[1]->buffsize = 1024; 2379 rx_ring->fbr[1]->num_entries = 768; 2380#endif 2381 rx_ring->fbr[0]->buffsize = 16384; 2382 rx_ring->fbr[0]->num_entries = 128; 2383 } 2384 2385#ifdef USE_FBR0 2386 adapter->rx_ring.psr_num_entries = 2387 adapter->rx_ring.fbr[1]->num_entries + 2388 adapter->rx_ring.fbr[0]->num_entries; 2389#else 2390 adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[0]->num_entries; 2391#endif 2392 2393 /* Allocate an area of memory for Free Buffer Ring 1 */ 2394 bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) + 2395 0xfff; 2396 rx_ring->fbr[0]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, 2397 bufsize, 2398 &rx_ring->fbr[0]->ring_physaddr, 2399 GFP_KERNEL); 2400 if (!rx_ring->fbr[0]->ring_virtaddr) { 2401 dev_err(&adapter->pdev->dev, 2402 "Cannot alloc memory for Free Buffer Ring 1\n"); 2403 return -ENOMEM; 2404 } 2405 2406 /* Save physical address 2407 * 2408 * NOTE: dma_alloc_coherent(), used above to alloc DMA regions, 2409 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses 2410 * are ever returned, make sure the high part is retrieved here 2411 * before storing the adjusted address. 2412 */ 2413 rx_ring->fbr[0]->real_physaddr = rx_ring->fbr[0]->ring_physaddr; 2414 2415 /* Align Free Buffer Ring 1 on a 4K boundary */ 2416 et131x_align_allocated_memory(adapter, 2417 &rx_ring->fbr[0]->real_physaddr, 2418 &rx_ring->fbr[0]->offset, 0x0FFF); 2419 2420 rx_ring->fbr[0]->ring_virtaddr = 2421 (void *)((u8 *) rx_ring->fbr[0]->ring_virtaddr + 2422 rx_ring->fbr[0]->offset); 2423 2424#ifdef USE_FBR0 2425 /* Allocate an area of memory for Free Buffer Ring 0 */ 2426 bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) + 2427 0xfff; 2428 rx_ring->fbr[1]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, 2429 bufsize, 2430 &rx_ring->fbr[1]->ring_physaddr, 2431 GFP_KERNEL); 2432 if (!rx_ring->fbr[1]->ring_virtaddr) { 2433 dev_err(&adapter->pdev->dev, 2434 "Cannot alloc memory for Free Buffer Ring 0\n"); 2435 return -ENOMEM; 2436 } 2437 2438 /* Save physical address 2439 * 2440 * NOTE: dma_alloc_coherent(), used above to alloc DMA regions, 2441 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses 2442 * are ever returned, make sure the high part is retrieved here before 2443 * storing the adjusted address. 2444 */ 2445 rx_ring->fbr[1]->real_physaddr = rx_ring->fbr[1]->ring_physaddr; 2446 2447 /* Align Free Buffer Ring 0 on a 4K boundary */ 2448 et131x_align_allocated_memory(adapter, 2449 &rx_ring->fbr[1]->real_physaddr, 2450 &rx_ring->fbr[1]->offset, 0x0FFF); 2451 2452 rx_ring->fbr[1]->ring_virtaddr = 2453 (void *)((u8 *) rx_ring->fbr[1]->ring_virtaddr + 2454 rx_ring->fbr[1]->offset); 2455#endif 2456 for (i = 0; i < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); i++) { 2457 u64 fbr1_offset; 2458 u64 fbr1_tmp_physaddr; 2459 u32 fbr1_align; 2460 2461 /* This code allocates an area of memory big enough for N 2462 * free buffers + (buffer_size - 1) so that the buffers can 2463 * be aligned on 4k boundaries. If each buffer were aligned 2464 * to a buffer_size boundary, the effect would be to double 2465 * the size of FBR0. By allocating N buffers at once, we 2466 * reduce this overhead. 2467 */ 2468 if (rx_ring->fbr[0]->buffsize > 4096) 2469 fbr1_align = 4096; 2470 else 2471 fbr1_align = rx_ring->fbr[0]->buffsize; 2472 2473 fbr_chunksize = 2474 (FBR_CHUNKS * rx_ring->fbr[0]->buffsize) + fbr1_align - 1; 2475 rx_ring->fbr[0]->mem_virtaddrs[i] = 2476 dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize, 2477 &rx_ring->fbr[0]->mem_physaddrs[i], 2478 GFP_KERNEL); 2479 2480 if (!rx_ring->fbr[0]->mem_virtaddrs[i]) { 2481 dev_err(&adapter->pdev->dev, 2482 "Could not alloc memory\n"); 2483 return -ENOMEM; 2484 } 2485 2486 /* See NOTE in "Save Physical Address" comment above */ 2487 fbr1_tmp_physaddr = rx_ring->fbr[0]->mem_physaddrs[i]; 2488 2489 et131x_align_allocated_memory(adapter, 2490 &fbr1_tmp_physaddr, 2491 &fbr1_offset, (fbr1_align - 1)); 2492 2493 for (j = 0; j < FBR_CHUNKS; j++) { 2494 u32 index = (i * FBR_CHUNKS) + j; 2495 2496 /* Save the Virtual address of this index for quick 2497 * access later 2498 */ 2499 rx_ring->fbr[0]->virt[index] = 2500 (u8 *) rx_ring->fbr[0]->mem_virtaddrs[i] + 2501 (j * rx_ring->fbr[0]->buffsize) + fbr1_offset; 2502 2503 /* now store the physical address in the descriptor 2504 * so the device can access it 2505 */ 2506 rx_ring->fbr[0]->bus_high[index] = 2507 (u32) (fbr1_tmp_physaddr >> 32); 2508 rx_ring->fbr[0]->bus_low[index] = 2509 (u32) fbr1_tmp_physaddr; 2510 2511 fbr1_tmp_physaddr += rx_ring->fbr[0]->buffsize; 2512 2513 rx_ring->fbr[0]->buffer1[index] = 2514 rx_ring->fbr[0]->virt[index]; 2515 rx_ring->fbr[0]->buffer2[index] = 2516 rx_ring->fbr[0]->virt[index] - 4; 2517 } 2518 } 2519 2520#ifdef USE_FBR0 2521 /* Same for FBR0 (if in use) */ 2522 for (i = 0; i < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); i++) { 2523 u64 fbr0_offset; 2524 u64 fbr0_tmp_physaddr; 2525 2526 fbr_chunksize = 2527 ((FBR_CHUNKS + 1) * rx_ring->fbr[1]->buffsize) - 1; 2528 rx_ring->fbr[1]->mem_virtaddrs[i] = 2529 dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize, 2530 &rx_ring->fbr[1]->mem_physaddrs[i], 2531 GFP_KERNEL); 2532 2533 if (!rx_ring->fbr[1]->mem_virtaddrs[i]) { 2534 dev_err(&adapter->pdev->dev, 2535 "Could not alloc memory\n"); 2536 return -ENOMEM; 2537 } 2538 2539 /* See NOTE in "Save Physical Address" comment above */ 2540 fbr0_tmp_physaddr = rx_ring->fbr[1]->mem_physaddrs[i]; 2541 2542 et131x_align_allocated_memory(adapter, 2543 &fbr0_tmp_physaddr, 2544 &fbr0_offset, 2545 rx_ring->fbr[1]->buffsize - 1); 2546 2547 for (j = 0; j < FBR_CHUNKS; j++) { 2548 u32 index = (i * FBR_CHUNKS) + j; 2549 2550 rx_ring->fbr[1]->virt[index] = 2551 (u8 *) rx_ring->fbr[1]->mem_virtaddrs[i] + 2552 (j * rx_ring->fbr[1]->buffsize) + fbr0_offset; 2553 2554 rx_ring->fbr[1]->bus_high[index] = 2555 (u32) (fbr0_tmp_physaddr >> 32); 2556 rx_ring->fbr[1]->bus_low[index] = 2557 (u32) fbr0_tmp_physaddr; 2558 2559 fbr0_tmp_physaddr += rx_ring->fbr[1]->buffsize; 2560 2561 rx_ring->fbr[1]->buffer1[index] = 2562 rx_ring->fbr[1]->virt[index]; 2563 rx_ring->fbr[1]->buffer2[index] = 2564 rx_ring->fbr[1]->virt[index] - 4; 2565 } 2566 } 2567#endif 2568 2569 /* Allocate an area of memory for FIFO of Packet Status ring entries */ 2570 pktstat_ringsize = 2571 sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries; 2572 2573 rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, 2574 pktstat_ringsize, 2575 &rx_ring->ps_ring_physaddr, 2576 GFP_KERNEL); 2577 2578 if (!rx_ring->ps_ring_virtaddr) { 2579 dev_err(&adapter->pdev->dev, 2580 "Cannot alloc memory for Packet Status Ring\n"); 2581 return -ENOMEM; 2582 } 2583 printk(KERN_INFO "Packet Status Ring %lx\n", 2584 (unsigned long) rx_ring->ps_ring_physaddr); 2585 2586 /* 2587 * NOTE : dma_alloc_coherent(), used above to alloc DMA regions, 2588 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses 2589 * are ever returned, make sure the high part is retrieved here before 2590 * storing the adjusted address. 2591 */ 2592 2593 /* Allocate an area of memory for writeback of status information */ 2594 rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev, 2595 sizeof(struct rx_status_block), 2596 &rx_ring->rx_status_bus, 2597 GFP_KERNEL); 2598 if (!rx_ring->rx_status_block) { 2599 dev_err(&adapter->pdev->dev, 2600 "Cannot alloc memory for Status Block\n"); 2601 return -ENOMEM; 2602 } 2603 rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD; 2604 printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus); 2605 2606 /* Recv 2607 * kmem_cache_create initializes a lookaside list. After successful 2608 * creation, nonpaged fixed-size blocks can be allocated from and 2609 * freed to the lookaside list. 2610 * RFDs will be allocated from this pool. 2611 */ 2612 rx_ring->recv_lookaside = kmem_cache_create(adapter->netdev->name, 2613 sizeof(struct rfd), 2614 0, 2615 SLAB_CACHE_DMA | 2616 SLAB_HWCACHE_ALIGN, 2617 NULL); 2618 2619 adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE; 2620 2621 /* The RFDs are going to be put on lists later on, so initialize the 2622 * lists now. 2623 */ 2624 INIT_LIST_HEAD(&rx_ring->recv_list); 2625 return 0; 2626} 2627 2628/** 2629 * et131x_rx_dma_memory_free - Free all memory allocated within this module. 2630 * @adapter: pointer to our private adapter structure 2631 */ 2632void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) 2633{ 2634 u32 index; 2635 u32 bufsize; 2636 u32 pktstat_ringsize; 2637 struct rfd *rfd; 2638 struct rx_ring *rx_ring; 2639 2640 /* Setup some convenience pointers */ 2641 rx_ring = &adapter->rx_ring; 2642 2643 /* Free RFDs and associated packet descriptors */ 2644 WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd); 2645 2646 while (!list_empty(&rx_ring->recv_list)) { 2647 rfd = (struct rfd *) list_entry(rx_ring->recv_list.next, 2648 struct rfd, list_node); 2649 2650 list_del(&rfd->list_node); 2651 rfd->skb = NULL; 2652 kmem_cache_free(adapter->rx_ring.recv_lookaside, rfd); 2653 } 2654 2655 /* Free Free Buffer Ring 1 */ 2656 if (rx_ring->fbr[0]->ring_virtaddr) { 2657 /* First the packet memory */ 2658 for (index = 0; index < 2659 (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); index++) { 2660 if (rx_ring->fbr[0]->mem_virtaddrs[index]) { 2661 u32 fbr1_align; 2662 2663 if (rx_ring->fbr[0]->buffsize > 4096) 2664 fbr1_align = 4096; 2665 else 2666 fbr1_align = rx_ring->fbr[0]->buffsize; 2667 2668 bufsize = 2669 (rx_ring->fbr[0]->buffsize * FBR_CHUNKS) + 2670 fbr1_align - 1; 2671 2672 dma_free_coherent(&adapter->pdev->dev, 2673 bufsize, 2674 rx_ring->fbr[0]->mem_virtaddrs[index], 2675 rx_ring->fbr[0]->mem_physaddrs[index]); 2676 2677 rx_ring->fbr[0]->mem_virtaddrs[index] = NULL; 2678 } 2679 } 2680 2681 /* Now the FIFO itself */ 2682 rx_ring->fbr[0]->ring_virtaddr = (void *)((u8 *) 2683 rx_ring->fbr[0]->ring_virtaddr - rx_ring->fbr[0]->offset); 2684 2685 bufsize = 2686 (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) + 2687 0xfff; 2688 2689 dma_free_coherent(&adapter->pdev->dev, bufsize, 2690 rx_ring->fbr[0]->ring_virtaddr, 2691 rx_ring->fbr[0]->ring_physaddr); 2692 2693 rx_ring->fbr[0]->ring_virtaddr = NULL; 2694 } 2695 2696#ifdef USE_FBR0 2697 /* Now the same for Free Buffer Ring 0 */ 2698 if (rx_ring->fbr[1]->ring_virtaddr) { 2699 /* First the packet memory */ 2700 for (index = 0; index < 2701 (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); index++) { 2702 if (rx_ring->fbr[1]->mem_virtaddrs[index]) { 2703 bufsize = 2704 (rx_ring->fbr[1]->buffsize * 2705 (FBR_CHUNKS + 1)) - 1; 2706 2707 dma_free_coherent(&adapter->pdev->dev, 2708 bufsize, 2709 rx_ring->fbr[1]->mem_virtaddrs[index], 2710 rx_ring->fbr[1]->mem_physaddrs[index]); 2711 2712 rx_ring->fbr[1]->mem_virtaddrs[index] = NULL; 2713 } 2714 } 2715 2716 /* Now the FIFO itself */ 2717 rx_ring->fbr[1]->ring_virtaddr = (void *)((u8 *) 2718 rx_ring->fbr[1]->ring_virtaddr - rx_ring->fbr[1]->offset); 2719 2720 bufsize = 2721 (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) + 2722 0xfff; 2723 2724 dma_free_coherent(&adapter->pdev->dev, 2725 bufsize, 2726 rx_ring->fbr[1]->ring_virtaddr, 2727 rx_ring->fbr[1]->ring_physaddr); 2728 2729 rx_ring->fbr[1]->ring_virtaddr = NULL; 2730 } 2731#endif 2732 2733 /* Free Packet Status Ring */ 2734 if (rx_ring->ps_ring_virtaddr) { 2735 pktstat_ringsize = 2736 sizeof(struct pkt_stat_desc) * 2737 adapter->rx_ring.psr_num_entries; 2738 2739 dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize, 2740 rx_ring->ps_ring_virtaddr, 2741 rx_ring->ps_ring_physaddr); 2742 2743 rx_ring->ps_ring_virtaddr = NULL; 2744 } 2745 2746 /* Free area of memory for the writeback of status information */ 2747 if (rx_ring->rx_status_block) { 2748 dma_free_coherent(&adapter->pdev->dev, 2749 sizeof(struct rx_status_block), 2750 rx_ring->rx_status_block, rx_ring->rx_status_bus); 2751 rx_ring->rx_status_block = NULL; 2752 } 2753 2754 /* Destroy the lookaside (RFD) pool */ 2755 if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) { 2756 kmem_cache_destroy(rx_ring->recv_lookaside); 2757 adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE; 2758 } 2759 2760 /* Free the FBR Lookup Table */ 2761#ifdef USE_FBR0 2762 kfree(rx_ring->fbr[1]); 2763#endif 2764 2765 kfree(rx_ring->fbr[0]); 2766 2767 /* Reset Counters */ 2768 rx_ring->num_ready_recv = 0; 2769} 2770 2771/** 2772 * et131x_init_recv - Initialize receive data structures. 2773 * @adapter: pointer to our private adapter structure 2774 * 2775 * Returns 0 on success and errno on failure (as defined in errno.h) 2776 */ 2777int et131x_init_recv(struct et131x_adapter *adapter) 2778{ 2779 int status = -ENOMEM; 2780 struct rfd *rfd = NULL; 2781 u32 rfdct; 2782 u32 numrfd = 0; 2783 struct rx_ring *rx_ring; 2784 2785 /* Setup some convenience pointers */ 2786 rx_ring = &adapter->rx_ring; 2787 2788 /* Setup each RFD */ 2789 for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) { 2790 rfd = kmem_cache_alloc(rx_ring->recv_lookaside, 2791 GFP_ATOMIC | GFP_DMA); 2792 2793 if (!rfd) { 2794 dev_err(&adapter->pdev->dev, 2795 "Couldn't alloc RFD out of kmem_cache\n"); 2796 status = -ENOMEM; 2797 continue; 2798 } 2799 2800 rfd->skb = NULL; 2801 2802 /* Add this RFD to the recv_list */ 2803 list_add_tail(&rfd->list_node, &rx_ring->recv_list); 2804 2805 /* Increment both the available RFD's, and the total RFD's. */ 2806 rx_ring->num_ready_recv++; 2807 numrfd++; 2808 } 2809 2810 if (numrfd > NIC_MIN_NUM_RFD) 2811 status = 0; 2812 2813 rx_ring->num_rfd = numrfd; 2814 2815 if (status != 0) { 2816 kmem_cache_free(rx_ring->recv_lookaside, rfd); 2817 dev_err(&adapter->pdev->dev, 2818 "Allocation problems in et131x_init_recv\n"); 2819 } 2820 return status; 2821} 2822 2823/** 2824 * et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate. 2825 * @adapter: pointer to our adapter structure 2826 */ 2827void et131x_set_rx_dma_timer(struct et131x_adapter *adapter) 2828{ 2829 struct phy_device *phydev = adapter->phydev; 2830 2831 if (!phydev) 2832 return; 2833 2834 /* For version B silicon, we do not use the RxDMA timer for 10 and 100 2835 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing. 2836 */ 2837 if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) { 2838 writel(0, &adapter->regs->rxdma.max_pkt_time); 2839 writel(1, &adapter->regs->rxdma.num_pkt_done); 2840 } 2841} 2842 2843/** 2844 * NICReturnRFD - Recycle a RFD and put it back onto the receive list 2845 * @adapter: pointer to our adapter 2846 * @rfd: pointer to the RFD 2847 */ 2848static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd) 2849{ 2850 struct rx_ring *rx_local = &adapter->rx_ring; 2851 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; 2852 u16 buff_index = rfd->bufferindex; 2853 u8 ring_index = rfd->ringindex; 2854 unsigned long flags; 2855 2856 /* We don't use any of the OOB data besides status. Otherwise, we 2857 * need to clean up OOB data 2858 */ 2859 if ( 2860#ifdef USE_FBR0 2861 (ring_index == 0 && buff_index < rx_local->fbr[1]->num_entries) || 2862#endif 2863 (ring_index == 1 && buff_index < rx_local->fbr[0]->num_entries)) { 2864 spin_lock_irqsave(&adapter->fbr_lock, flags); 2865 2866 if (ring_index == 1) { 2867 struct fbr_desc *next = (struct fbr_desc *) 2868 (rx_local->fbr[0]->ring_virtaddr) + 2869 INDEX10(rx_local->fbr[0]->local_full); 2870 2871 /* Handle the Free Buffer Ring advancement here. Write 2872 * the PA / Buffer Index for the returned buffer into 2873 * the oldest (next to be freed)FBR entry 2874 */ 2875 next->addr_hi = rx_local->fbr[0]->bus_high[buff_index]; 2876 next->addr_lo = rx_local->fbr[0]->bus_low[buff_index]; 2877 next->word2 = buff_index; 2878 2879 writel(bump_free_buff_ring( 2880 &rx_local->fbr[0]->local_full, 2881 rx_local->fbr[0]->num_entries - 1), 2882 &rx_dma->fbr1_full_offset); 2883 } 2884#ifdef USE_FBR0 2885 else { 2886 struct fbr_desc *next = (struct fbr_desc *) 2887 rx_local->fbr[1]->ring_virtaddr + 2888 INDEX10(rx_local->fbr[1]->local_full); 2889 2890 /* Handle the Free Buffer Ring advancement here. Write 2891 * the PA / Buffer Index for the returned buffer into 2892 * the oldest (next to be freed) FBR entry 2893 */ 2894 next->addr_hi = rx_local->fbr[1]->bus_high[buff_index]; 2895 next->addr_lo = rx_local->fbr[1]->bus_low[buff_index]; 2896 next->word2 = buff_index; 2897 2898 writel(bump_free_buff_ring( 2899 &rx_local->fbr[1]->local_full, 2900 rx_local->fbr[1]->num_entries - 1), 2901 &rx_dma->fbr0_full_offset); 2902 } 2903#endif 2904 spin_unlock_irqrestore(&adapter->fbr_lock, flags); 2905 } else { 2906 dev_err(&adapter->pdev->dev, 2907 "%s illegal Buffer Index returned\n", __func__); 2908 } 2909 2910 /* The processing on this RFD is done, so put it back on the tail of 2911 * our list 2912 */ 2913 spin_lock_irqsave(&adapter->rcv_lock, flags); 2914 list_add_tail(&rfd->list_node, &rx_local->recv_list); 2915 rx_local->num_ready_recv++; 2916 spin_unlock_irqrestore(&adapter->rcv_lock, flags); 2917 2918 WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd); 2919} 2920 2921static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) 2922{ 2923 struct rx_ring *rx_local = &adapter->rx_ring; 2924 struct rx_status_block *status; 2925 struct pkt_stat_desc *psr; 2926 struct rfd *rfd; 2927 u32 i; 2928 u8 *buf; 2929 unsigned long flags; 2930 struct list_head *element; 2931 u8 ring_index; 2932 u16 buff_index; 2933 u32 len; 2934 u32 word0; 2935 u32 word1; 2936 2937 /* RX Status block is written by the DMA engine prior to every 2938 * interrupt. It contains the next to be used entry in the Packet 2939 * Status Ring, and also the two Free Buffer rings. 2940 */ 2941 status = rx_local->rx_status_block; 2942 word1 = status->word1 >> 16; /* Get the useful bits */ 2943 2944 /* Check the PSR and wrap bits do not match */ 2945 if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF)) 2946 /* Looks like this ring is not updated yet */ 2947 return NULL; 2948 2949 /* The packet status ring indicates that data is available. */ 2950 psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) + 2951 (rx_local->local_psr_full & 0xFFF); 2952 2953 /* Grab any information that is required once the PSR is 2954 * advanced, since we can no longer rely on the memory being 2955 * accurate 2956 */ 2957 len = psr->word1 & 0xFFFF; 2958 ring_index = (psr->word1 >> 26) & 0x03; 2959 buff_index = (psr->word1 >> 16) & 0x3FF; 2960 word0 = psr->word0; 2961 2962 /* Indicate that we have used this PSR entry. */ 2963 /* FIXME wrap 12 */ 2964 add_12bit(&rx_local->local_psr_full, 1); 2965 if ( 2966 (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) { 2967 /* Clear psr full and toggle the wrap bit */ 2968 rx_local->local_psr_full &= ~0xFFF; 2969 rx_local->local_psr_full ^= 0x1000; 2970 } 2971 2972 writel(rx_local->local_psr_full, 2973 &adapter->regs->rxdma.psr_full_offset); 2974 2975#ifndef USE_FBR0 2976 if (ring_index != 1) 2977 return NULL; 2978#endif 2979 2980#ifdef USE_FBR0 2981 if (ring_index > 1 || 2982 (ring_index == 0 && 2983 buff_index > rx_local->fbr[1]->num_entries - 1) || 2984 (ring_index == 1 && 2985 buff_index > rx_local->fbr[0]->num_entries - 1)) 2986#else 2987 if (ring_index != 1 || buff_index > rx_local->fbr[0]->num_entries - 1) 2988#endif 2989 { 2990 /* Illegal buffer or ring index cannot be used by S/W*/ 2991 dev_err(&adapter->pdev->dev, 2992 "NICRxPkts PSR Entry %d indicates " 2993 "length of %d and/or bad bi(%d)\n", 2994 rx_local->local_psr_full & 0xFFF, 2995 len, buff_index); 2996 return NULL; 2997 } 2998 2999 /* Get and fill the RFD. */ 3000 spin_lock_irqsave(&adapter->rcv_lock, flags); 3001 3002 rfd = NULL; 3003 element = rx_local->recv_list.next; 3004 rfd = (struct rfd *) list_entry(element, struct rfd, list_node); 3005 3006 if (rfd == NULL) { 3007 spin_unlock_irqrestore(&adapter->rcv_lock, flags); 3008 return NULL; 3009 } 3010 3011 list_del(&rfd->list_node); 3012 rx_local->num_ready_recv--; 3013 3014 spin_unlock_irqrestore(&adapter->rcv_lock, flags); 3015 3016 rfd->bufferindex = buff_index; 3017 rfd->ringindex = ring_index; 3018 3019 /* In V1 silicon, there is a bug which screws up filtering of 3020 * runt packets. Therefore runt packet filtering is disabled 3021 * in the MAC and the packets are dropped here. They are 3022 * also counted here. 3023 */ 3024 if (len < (NIC_MIN_PACKET_SIZE + 4)) { 3025 adapter->stats.rx_other_errs++; 3026 len = 0; 3027 } 3028 3029 if (len) { 3030 /* Determine if this is a multicast packet coming in */ 3031 if ((word0 & ALCATEL_MULTICAST_PKT) && 3032 !(word0 & ALCATEL_BROADCAST_PKT)) { 3033 /* Promiscuous mode and Multicast mode are 3034 * not mutually exclusive as was first 3035 * thought. I guess Promiscuous is just 3036 * considered a super-set of the other 3037 * filters. Generally filter is 0x2b when in 3038 * promiscuous mode. 3039 */ 3040 if ((adapter->packet_filter & 3041 ET131X_PACKET_TYPE_MULTICAST) 3042 && !(adapter->packet_filter & 3043 ET131X_PACKET_TYPE_PROMISCUOUS) 3044 && !(adapter->packet_filter & 3045 ET131X_PACKET_TYPE_ALL_MULTICAST)) { 3046 /* 3047 * Note - ring_index for fbr[] array is reversed 3048 * 1 for FBR0 etc 3049 */ 3050 buf = rx_local->fbr[(ring_index == 0 ? 1 : 0)]-> 3051 virt[buff_index]; 3052 3053 /* Loop through our list to see if the 3054 * destination address of this packet 3055 * matches one in our list. 3056 */ 3057 for (i = 0; i < adapter->multicast_addr_count; 3058 i++) { 3059 if (buf[0] == 3060 adapter->multicast_list[i][0] 3061 && buf[1] == 3062 adapter->multicast_list[i][1] 3063 && buf[2] == 3064 adapter->multicast_list[i][2] 3065 && buf[3] == 3066 adapter->multicast_list[i][3] 3067 && buf[4] == 3068 adapter->multicast_list[i][4] 3069 && buf[5] == 3070 adapter->multicast_list[i][5]) { 3071 break; 3072 } 3073 } 3074 3075 /* If our index is equal to the number 3076 * of Multicast address we have, then 3077 * this means we did not find this 3078 * packet's matching address in our 3079 * list. Set the len to zero, 3080 * so we free our RFD when we return 3081 * from this function. 3082 */ 3083 if (i == adapter->multicast_addr_count) 3084 len = 0; 3085 } 3086 3087 if (len > 0) 3088 adapter->stats.multicast_pkts_rcvd++; 3089 } else if (word0 & ALCATEL_BROADCAST_PKT) 3090 adapter->stats.broadcast_pkts_rcvd++; 3091 else 3092 /* Not sure what this counter measures in 3093 * promiscuous mode. Perhaps we should check 3094 * the MAC address to see if it is directed 3095 * to us in promiscuous mode. 3096 */ 3097 adapter->stats.unicast_pkts_rcvd++; 3098 } 3099 3100 if (len > 0) { 3101 struct sk_buff *skb = NULL; 3102 3103 /*rfd->len = len - 4; */ 3104 rfd->len = len; 3105 3106 skb = dev_alloc_skb(rfd->len + 2); 3107 if (!skb) { 3108 dev_err(&adapter->pdev->dev, 3109 "Couldn't alloc an SKB for Rx\n"); 3110 return NULL; 3111 } 3112 3113 adapter->net_stats.rx_bytes += rfd->len; 3114 3115 /* 3116 * Note - ring_index for fbr[] array is reversed, 3117 * 1 for FBR0 etc 3118 */ 3119 memcpy(skb_put(skb, rfd->len), 3120 rx_local->fbr[(ring_index == 0 ? 1 : 0)]->virt[buff_index], 3121 rfd->len); 3122 3123 skb->dev = adapter->netdev; 3124 skb->protocol = eth_type_trans(skb, adapter->netdev); 3125 skb->ip_summed = CHECKSUM_NONE; 3126 3127 netif_rx(skb); 3128 } else { 3129 rfd->len = 0; 3130 } 3131 3132 nic_return_rfd(adapter, rfd); 3133 return rfd; 3134} 3135 3136/** 3137 * et131x_handle_recv_interrupt - Interrupt handler for receive processing 3138 * @adapter: pointer to our adapter 3139 * 3140 * Assumption, Rcv spinlock has been acquired. 3141 */ 3142void et131x_handle_recv_interrupt(struct et131x_adapter *adapter) 3143{ 3144 struct rfd *rfd = NULL; 3145 u32 count = 0; 3146 bool done = true; 3147 3148 /* Process up to available RFD's */ 3149 while (count < NUM_PACKETS_HANDLED) { 3150 if (list_empty(&adapter->rx_ring.recv_list)) { 3151 WARN_ON(adapter->rx_ring.num_ready_recv != 0); 3152 done = false; 3153 break; 3154 } 3155 3156 rfd = nic_rx_pkts(adapter); 3157 3158 if (rfd == NULL) 3159 break; 3160 3161 /* Do not receive any packets until a filter has been set. 3162 * Do not receive any packets until we have link. 3163 * If length is zero, return the RFD in order to advance the 3164 * Free buffer ring. 3165 */ 3166 if (!adapter->packet_filter || 3167 !netif_carrier_ok(adapter->netdev) || 3168 rfd->len == 0) 3169 continue; 3170 3171 /* Increment the number of packets we received */ 3172 adapter->net_stats.rx_packets++; 3173 3174 /* Set the status on the packet, either resources or success */ 3175 if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) { 3176 dev_warn(&adapter->pdev->dev, 3177 "RFD's are running out\n"); 3178 } 3179 count++; 3180 } 3181 3182 if (count == NUM_PACKETS_HANDLED || !done) { 3183 adapter->rx_ring.unfinished_receives = true; 3184 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, 3185 &adapter->regs->global.watchdog_timer); 3186 } else 3187 /* Watchdog timer will disable itself if appropriate. */ 3188 adapter->rx_ring.unfinished_receives = false; 3189} 3190 3191/* TX functions */ 3192 3193/** 3194 * et131x_tx_dma_memory_alloc 3195 * @adapter: pointer to our private adapter structure 3196 * 3197 * Returns 0 on success and errno on failure (as defined in errno.h). 3198 * 3199 * Allocates memory that will be visible both to the device and to the CPU. 3200 * The OS will pass us packets, pointers to which we will insert in the Tx 3201 * Descriptor queue. The device will read this queue to find the packets in 3202 * memory. The device will update the "status" in memory each time it xmits a 3203 * packet. 3204 */ 3205int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) 3206{ 3207 int desc_size = 0; 3208 struct tx_ring *tx_ring = &adapter->tx_ring; 3209 3210 /* Allocate memory for the TCB's (Transmit Control Block) */ 3211 adapter->tx_ring.tcb_ring = 3212 kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA); 3213 if (!adapter->tx_ring.tcb_ring) { 3214 dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n"); 3215 return -ENOMEM; 3216 } 3217 3218 /* Allocate enough memory for the Tx descriptor ring, and allocate 3219 * some extra so that the ring can be aligned on a 4k boundary. 3220 */ 3221 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1; 3222 tx_ring->tx_desc_ring = 3223 (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev, 3224 desc_size, 3225 &tx_ring->tx_desc_ring_pa, 3226 GFP_KERNEL); 3227 if (!adapter->tx_ring.tx_desc_ring) { 3228 dev_err(&adapter->pdev->dev, 3229 "Cannot alloc memory for Tx Ring\n"); 3230 return -ENOMEM; 3231 } 3232 3233 /* Save physical address 3234 * 3235 * NOTE: dma_alloc_coherent(), used above to alloc DMA regions, 3236 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses 3237 * are ever returned, make sure the high part is retrieved here before 3238 * storing the adjusted address. 3239 */ 3240 /* Allocate memory for the Tx status block */ 3241 tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev, 3242 sizeof(u32), 3243 &tx_ring->tx_status_pa, 3244 GFP_KERNEL); 3245 if (!adapter->tx_ring.tx_status_pa) { 3246 dev_err(&adapter->pdev->dev, 3247 "Cannot alloc memory for Tx status block\n"); 3248 return -ENOMEM; 3249 } 3250 return 0; 3251} 3252 3253/** 3254 * et131x_tx_dma_memory_free - Free all memory allocated within this module 3255 * @adapter: pointer to our private adapter structure 3256 * 3257 * Returns 0 on success and errno on failure (as defined in errno.h). 3258 */ 3259void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) 3260{ 3261 int desc_size = 0; 3262 3263 if (adapter->tx_ring.tx_desc_ring) { 3264 /* Free memory relating to Tx rings here */ 3265 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) 3266 + 4096 - 1; 3267 dma_free_coherent(&adapter->pdev->dev, 3268 desc_size, 3269 adapter->tx_ring.tx_desc_ring, 3270 adapter->tx_ring.tx_desc_ring_pa); 3271 adapter->tx_ring.tx_desc_ring = NULL; 3272 } 3273 3274 /* Free memory for the Tx status block */ 3275 if (adapter->tx_ring.tx_status) { 3276 dma_free_coherent(&adapter->pdev->dev, 3277 sizeof(u32), 3278 adapter->tx_ring.tx_status, 3279 adapter->tx_ring.tx_status_pa); 3280 3281 adapter->tx_ring.tx_status = NULL; 3282 } 3283 /* Free the memory for the tcb structures */ 3284 kfree(adapter->tx_ring.tcb_ring); 3285} 3286 3287/** 3288 * nic_send_packet - NIC specific send handler for version B silicon. 3289 * @adapter: pointer to our adapter 3290 * @tcb: pointer to struct tcb 3291 * 3292 * Returns 0 or errno. 3293 */ 3294static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) 3295{ 3296 u32 i; 3297 struct tx_desc desc[24]; /* 24 x 16 byte */ 3298 u32 frag = 0; 3299 u32 thiscopy, remainder; 3300 struct sk_buff *skb = tcb->skb; 3301 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; 3302 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; 3303 unsigned long flags; 3304 struct phy_device *phydev = adapter->phydev; 3305 3306 /* Part of the optimizations of this send routine restrict us to 3307 * sending 24 fragments at a pass. In practice we should never see 3308 * more than 5 fragments. 3309 * 3310 * NOTE: The older version of this function (below) can handle any 3311 * number of fragments. If needed, we can call this function, 3312 * although it is less efficient. 3313 */ 3314 if (nr_frags > 23) 3315 return -EIO; 3316 3317 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1)); 3318 3319 for (i = 0; i < nr_frags; i++) { 3320 /* If there is something in this element, lets get a 3321 * descriptor from the ring and get the necessary data 3322 */ 3323 if (i == 0) { 3324 /* If the fragments are smaller than a standard MTU, 3325 * then map them to a single descriptor in the Tx 3326 * Desc ring. However, if they're larger, as is 3327 * possible with support for jumbo packets, then 3328 * split them each across 2 descriptors. 3329 * 3330 * This will work until we determine why the hardware 3331 * doesn't seem to like large fragments. 3332 */ 3333 if ((skb->len - skb->data_len) <= 1514) { 3334 desc[frag].addr_hi = 0; 3335 /* Low 16bits are length, high is vlan and 3336 unused currently so zero */ 3337 desc[frag].len_vlan = 3338 skb->len - skb->data_len; 3339 3340 /* NOTE: Here, the dma_addr_t returned from 3341 * dma_map_single() is implicitly cast as a 3342 * u32. Although dma_addr_t can be 3343 * 64-bit, the address returned by 3344 * dma_map_single() is always 32-bit 3345 * addressable (as defined by the pci/dma 3346 * subsystem) 3347 */ 3348 desc[frag++].addr_lo = 3349 dma_map_single(&adapter->pdev->dev, 3350 skb->data, 3351 skb->len - 3352 skb->data_len, 3353 DMA_TO_DEVICE); 3354 } else { 3355 desc[frag].addr_hi = 0; 3356 desc[frag].len_vlan = 3357 (skb->len - skb->data_len) / 2; 3358 3359 /* NOTE: Here, the dma_addr_t returned from 3360 * dma_map_single() is implicitly cast as a 3361 * u32. Although dma_addr_t can be 3362 * 64-bit, the address returned by 3363 * dma_map_single() is always 32-bit 3364 * addressable (as defined by the pci/dma 3365 * subsystem) 3366 */ 3367 desc[frag++].addr_lo = 3368 dma_map_single(&adapter->pdev->dev, 3369 skb->data, 3370 ((skb->len - 3371 skb->data_len) / 2), 3372 DMA_TO_DEVICE); 3373 desc[frag].addr_hi = 0; 3374 3375 desc[frag].len_vlan = 3376 (skb->len - skb->data_len) / 2; 3377 3378 /* NOTE: Here, the dma_addr_t returned from 3379 * dma_map_single() is implicitly cast as a 3380 * u32. Although dma_addr_t can be 3381 * 64-bit, the address returned by 3382 * dma_map_single() is always 32-bit 3383 * addressable (as defined by the pci/dma 3384 * subsystem) 3385 */ 3386 desc[frag++].addr_lo = 3387 dma_map_single(&adapter->pdev->dev, 3388 skb->data + 3389 ((skb->len - 3390 skb->data_len) / 2), 3391 ((skb->len - 3392 skb->data_len) / 2), 3393 DMA_TO_DEVICE); 3394 } 3395 } else { 3396 desc[frag].addr_hi = 0; 3397 desc[frag].len_vlan = 3398 frags[i - 1].size; 3399 3400 /* NOTE: Here, the dma_addr_t returned from 3401 * dma_map_page() is implicitly cast as a u32. 3402 * Although dma_addr_t can be 64-bit, the address 3403 * returned by dma_map_page() is always 32-bit 3404 * addressable (as defined by the pci/dma subsystem) 3405 */ 3406 desc[frag++].addr_lo = skb_frag_dma_map( 3407 &adapter->pdev->dev, 3408 &frags[i - 1], 3409 0, 3410 frags[i - 1].size, 3411 DMA_TO_DEVICE); 3412 } 3413 } 3414 3415 if (phydev && phydev->speed == SPEED_1000) { 3416 if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) { 3417 /* Last element & Interrupt flag */ 3418 desc[frag - 1].flags = 0x5; 3419 adapter->tx_ring.since_irq = 0; 3420 } else { /* Last element */ 3421 desc[frag - 1].flags = 0x1; 3422 } 3423 } else 3424 desc[frag - 1].flags = 0x5; 3425 3426 desc[0].flags |= 2; /* First element flag */ 3427 3428 tcb->index_start = adapter->tx_ring.send_idx; 3429 tcb->stale = 0; 3430 3431 spin_lock_irqsave(&adapter->send_hw_lock, flags); 3432 3433 thiscopy = NUM_DESC_PER_RING_TX - 3434 INDEX10(adapter->tx_ring.send_idx); 3435 3436 if (thiscopy >= frag) { 3437 remainder = 0; 3438 thiscopy = frag; 3439 } else { 3440 remainder = frag - thiscopy; 3441 } 3442 3443 memcpy(adapter->tx_ring.tx_desc_ring + 3444 INDEX10(adapter->tx_ring.send_idx), desc, 3445 sizeof(struct tx_desc) * thiscopy); 3446 3447 add_10bit(&adapter->tx_ring.send_idx, thiscopy); 3448 3449 if (INDEX10(adapter->tx_ring.send_idx) == 0 || 3450 INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) { 3451 adapter->tx_ring.send_idx &= ~ET_DMA10_MASK; 3452 adapter->tx_ring.send_idx ^= ET_DMA10_WRAP; 3453 } 3454 3455 if (remainder) { 3456 memcpy(adapter->tx_ring.tx_desc_ring, 3457 desc + thiscopy, 3458 sizeof(struct tx_desc) * remainder); 3459 3460 add_10bit(&adapter->tx_ring.send_idx, remainder); 3461 } 3462 3463 if (INDEX10(adapter->tx_ring.send_idx) == 0) { 3464 if (adapter->tx_ring.send_idx) 3465 tcb->index = NUM_DESC_PER_RING_TX - 1; 3466 else 3467 tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1); 3468 } else 3469 tcb->index = adapter->tx_ring.send_idx - 1; 3470 3471 spin_lock(&adapter->tcb_send_qlock); 3472 3473 if (adapter->tx_ring.send_tail) 3474 adapter->tx_ring.send_tail->next = tcb; 3475 else 3476 adapter->tx_ring.send_head = tcb; 3477 3478 adapter->tx_ring.send_tail = tcb; 3479 3480 WARN_ON(tcb->next != NULL); 3481 3482 adapter->tx_ring.used++; 3483 3484 spin_unlock(&adapter->tcb_send_qlock); 3485 3486 /* Write the new write pointer back to the device. */ 3487 writel(adapter->tx_ring.send_idx, 3488 &adapter->regs->txdma.service_request); 3489 3490 /* For Gig only, we use Tx Interrupt coalescing. Enable the software 3491 * timer to wake us up if this packet isn't followed by N more. 3492 */ 3493 if (phydev && phydev->speed == SPEED_1000) { 3494 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, 3495 &adapter->regs->global.watchdog_timer); 3496 } 3497 spin_unlock_irqrestore(&adapter->send_hw_lock, flags); 3498 3499 return 0; 3500} 3501 3502/** 3503 * send_packet - Do the work to send a packet 3504 * @skb: the packet(s) to send 3505 * @adapter: a pointer to the device's private adapter structure 3506 * 3507 * Return 0 in almost all cases; non-zero value in extreme hard failure only. 3508 * 3509 * Assumption: Send spinlock has been acquired 3510 */ 3511static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter) 3512{ 3513 int status; 3514 struct tcb *tcb = NULL; 3515 u16 *shbufva; 3516 unsigned long flags; 3517 3518 /* All packets must have at least a MAC address and a protocol type */ 3519 if (skb->len < ETH_HLEN) 3520 return -EIO; 3521 3522 /* Get a TCB for this packet */ 3523 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); 3524 3525 tcb = adapter->tx_ring.tcb_qhead; 3526 3527 if (tcb == NULL) { 3528 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 3529 return -ENOMEM; 3530 } 3531 3532 adapter->tx_ring.tcb_qhead = tcb->next; 3533 3534 if (adapter->tx_ring.tcb_qhead == NULL) 3535 adapter->tx_ring.tcb_qtail = NULL; 3536 3537 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 3538 3539 tcb->skb = skb; 3540 3541 if (skb->data != NULL && skb->len - skb->data_len >= 6) { 3542 shbufva = (u16 *) skb->data; 3543 3544 if ((shbufva[0] == 0xffff) && 3545 (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) { 3546 tcb->flags |= fMP_DEST_BROAD; 3547 } else if ((shbufva[0] & 0x3) == 0x0001) { 3548 tcb->flags |= fMP_DEST_MULTI; 3549 } 3550 } 3551 3552 tcb->next = NULL; 3553 3554 /* Call the NIC specific send handler. */ 3555 status = nic_send_packet(adapter, tcb); 3556 3557 if (status != 0) { 3558 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); 3559 3560 if (adapter->tx_ring.tcb_qtail) 3561 adapter->tx_ring.tcb_qtail->next = tcb; 3562 else 3563 /* Apparently ready Q is empty. */ 3564 adapter->tx_ring.tcb_qhead = tcb; 3565 3566 adapter->tx_ring.tcb_qtail = tcb; 3567 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 3568 return status; 3569 } 3570 WARN_ON(adapter->tx_ring.used > NUM_TCB); 3571 return 0; 3572} 3573 3574/** 3575 * et131x_send_packets - This function is called by the OS to send packets 3576 * @skb: the packet(s) to send 3577 * @netdev:device on which to TX the above packet(s) 3578 * 3579 * Return 0 in almost all cases; non-zero value in extreme hard failure only 3580 */ 3581int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) 3582{ 3583 int status = 0; 3584 struct et131x_adapter *adapter = netdev_priv(netdev); 3585 3586 /* Send these packets 3587 * 3588 * NOTE: The Linux Tx entry point is only given one packet at a time 3589 * to Tx, so the PacketCount and it's array used makes no sense here 3590 */ 3591 3592 /* TCB is not available */ 3593 if (adapter->tx_ring.used >= NUM_TCB) { 3594 /* NOTE: If there's an error on send, no need to queue the 3595 * packet under Linux; if we just send an error up to the 3596 * netif layer, it will resend the skb to us. 3597 */ 3598 status = -ENOMEM; 3599 } else { 3600 /* We need to see if the link is up; if it's not, make the 3601 * netif layer think we're good and drop the packet 3602 */ 3603 if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) || 3604 !netif_carrier_ok(netdev)) { 3605 dev_kfree_skb_any(skb); 3606 skb = NULL; 3607 3608 adapter->net_stats.tx_dropped++; 3609 } else { 3610 status = send_packet(skb, adapter); 3611 if (status != 0 && status != -ENOMEM) { 3612 /* On any other error, make netif think we're 3613 * OK and drop the packet 3614 */ 3615 dev_kfree_skb_any(skb); 3616 skb = NULL; 3617 adapter->net_stats.tx_dropped++; 3618 } 3619 } 3620 } 3621 return status; 3622} 3623 3624/** 3625 * free_send_packet - Recycle a struct tcb 3626 * @adapter: pointer to our adapter 3627 * @tcb: pointer to struct tcb 3628 * 3629 * Complete the packet if necessary 3630 * Assumption - Send spinlock has been acquired 3631 */ 3632static inline void free_send_packet(struct et131x_adapter *adapter, 3633 struct tcb *tcb) 3634{ 3635 unsigned long flags; 3636 struct tx_desc *desc = NULL; 3637 struct net_device_stats *stats = &adapter->net_stats; 3638 3639 if (tcb->flags & fMP_DEST_BROAD) 3640 atomic_inc(&adapter->stats.broadcast_pkts_xmtd); 3641 else if (tcb->flags & fMP_DEST_MULTI) 3642 atomic_inc(&adapter->stats.multicast_pkts_xmtd); 3643 else 3644 atomic_inc(&adapter->stats.unicast_pkts_xmtd); 3645 3646 if (tcb->skb) { 3647 stats->tx_bytes += tcb->skb->len; 3648 3649 /* Iterate through the TX descriptors on the ring 3650 * corresponding to this packet and umap the fragments 3651 * they point to 3652 */ 3653 do { 3654 desc = (struct tx_desc *) 3655 (adapter->tx_ring.tx_desc_ring + 3656 INDEX10(tcb->index_start)); 3657 3658 dma_unmap_single(&adapter->pdev->dev, 3659 desc->addr_lo, 3660 desc->len_vlan, DMA_TO_DEVICE); 3661 3662 add_10bit(&tcb->index_start, 1); 3663 if (INDEX10(tcb->index_start) >= 3664 NUM_DESC_PER_RING_TX) { 3665 tcb->index_start &= ~ET_DMA10_MASK; 3666 tcb->index_start ^= ET_DMA10_WRAP; 3667 } 3668 } while (desc != (adapter->tx_ring.tx_desc_ring + 3669 INDEX10(tcb->index))); 3670 3671 dev_kfree_skb_any(tcb->skb); 3672 } 3673 3674 memset(tcb, 0, sizeof(struct tcb)); 3675 3676 /* Add the TCB to the Ready Q */ 3677 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); 3678 3679 adapter->net_stats.tx_packets++; 3680 3681 if (adapter->tx_ring.tcb_qtail) 3682 adapter->tx_ring.tcb_qtail->next = tcb; 3683 else 3684 /* Apparently ready Q is empty. */ 3685 adapter->tx_ring.tcb_qhead = tcb; 3686 3687 adapter->tx_ring.tcb_qtail = tcb; 3688 3689 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 3690 WARN_ON(adapter->tx_ring.used < 0); 3691} 3692 3693/** 3694 * et131x_free_busy_send_packets - Free and complete the stopped active sends 3695 * @adapter: pointer to our adapter 3696 * 3697 * Assumption - Send spinlock has been acquired 3698 */ 3699void et131x_free_busy_send_packets(struct et131x_adapter *adapter) 3700{ 3701 struct tcb *tcb; 3702 unsigned long flags; 3703 u32 freed = 0; 3704 3705 /* Any packets being sent? Check the first TCB on the send list */ 3706 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 3707 3708 tcb = adapter->tx_ring.send_head; 3709 3710 while (tcb != NULL && freed < NUM_TCB) { 3711 struct tcb *next = tcb->next; 3712 3713 adapter->tx_ring.send_head = next; 3714 3715 if (next == NULL) 3716 adapter->tx_ring.send_tail = NULL; 3717 3718 adapter->tx_ring.used--; 3719 3720 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 3721 3722 freed++; 3723 free_send_packet(adapter, tcb); 3724 3725 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 3726 3727 tcb = adapter->tx_ring.send_head; 3728 } 3729 3730 WARN_ON(freed == NUM_TCB); 3731 3732 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 3733 3734 adapter->tx_ring.used = 0; 3735} 3736 3737/** 3738 * et131x_handle_send_interrupt - Interrupt handler for sending processing 3739 * @adapter: pointer to our adapter 3740 * 3741 * Re-claim the send resources, complete sends and get more to send from 3742 * the send wait queue. 3743 * 3744 * Assumption - Send spinlock has been acquired 3745 */ 3746void et131x_handle_send_interrupt(struct et131x_adapter *adapter) 3747{ 3748 unsigned long flags; 3749 u32 serviced; 3750 struct tcb *tcb; 3751 u32 index; 3752 3753 serviced = readl(&adapter->regs->txdma.new_service_complete); 3754 index = INDEX10(serviced); 3755 3756 /* Has the ring wrapped? Process any descriptors that do not have 3757 * the same "wrap" indicator as the current completion indicator 3758 */ 3759 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 3760 3761 tcb = adapter->tx_ring.send_head; 3762 3763 while (tcb && 3764 ((serviced ^ tcb->index) & ET_DMA10_WRAP) && 3765 index < INDEX10(tcb->index)) { 3766 adapter->tx_ring.used--; 3767 adapter->tx_ring.send_head = tcb->next; 3768 if (tcb->next == NULL) 3769 adapter->tx_ring.send_tail = NULL; 3770 3771 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 3772 free_send_packet(adapter, tcb); 3773 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 3774 3775 /* Goto the next packet */ 3776 tcb = adapter->tx_ring.send_head; 3777 } 3778 while (tcb && 3779 !((serviced ^ tcb->index) & ET_DMA10_WRAP) 3780 && index > (tcb->index & ET_DMA10_MASK)) { 3781 adapter->tx_ring.used--; 3782 adapter->tx_ring.send_head = tcb->next; 3783 if (tcb->next == NULL) 3784 adapter->tx_ring.send_tail = NULL; 3785 3786 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 3787 free_send_packet(adapter, tcb); 3788 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 3789 3790 /* Goto the next packet */ 3791 tcb = adapter->tx_ring.send_head; 3792 } 3793 3794 /* Wake up the queue when we hit a low-water mark */ 3795 if (adapter->tx_ring.used <= NUM_TCB / 3) 3796 netif_wake_queue(adapter->netdev); 3797 3798 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 3799} 3800 3801/* ETHTOOL functions */ 3802 3803static int et131x_get_settings(struct net_device *netdev, 3804 struct ethtool_cmd *cmd) 3805{ 3806 struct et131x_adapter *adapter = netdev_priv(netdev); 3807 3808 return phy_ethtool_gset(adapter->phydev, cmd); 3809} 3810 3811static int et131x_set_settings(struct net_device *netdev, 3812 struct ethtool_cmd *cmd) 3813{ 3814 struct et131x_adapter *adapter = netdev_priv(netdev); 3815 3816 return phy_ethtool_sset(adapter->phydev, cmd); 3817} 3818 3819static int et131x_get_regs_len(struct net_device *netdev) 3820{ 3821#define ET131X_REGS_LEN 256 3822 return ET131X_REGS_LEN * sizeof(u32); 3823} 3824 3825static void et131x_get_regs(struct net_device *netdev, 3826 struct ethtool_regs *regs, void *regs_data) 3827{ 3828 struct et131x_adapter *adapter = netdev_priv(netdev); 3829 struct address_map __iomem *aregs = adapter->regs; 3830 u32 *regs_buff = regs_data; 3831 u32 num = 0; 3832 3833 memset(regs_data, 0, et131x_get_regs_len(netdev)); 3834 3835 regs->version = (1 << 24) | (adapter->pdev->revision << 16) | 3836 adapter->pdev->device; 3837 3838 /* PHY regs */ 3839 et131x_mii_read(adapter, MII_BMCR, (u16 *)&regs_buff[num++]); 3840 et131x_mii_read(adapter, MII_BMSR, (u16 *)&regs_buff[num++]); 3841 et131x_mii_read(adapter, MII_PHYSID1, (u16 *)&regs_buff[num++]); 3842 et131x_mii_read(adapter, MII_PHYSID2, (u16 *)&regs_buff[num++]); 3843 et131x_mii_read(adapter, MII_ADVERTISE, (u16 *)&regs_buff[num++]); 3844 et131x_mii_read(adapter, MII_LPA, (u16 *)&regs_buff[num++]); 3845 et131x_mii_read(adapter, MII_EXPANSION, (u16 *)&regs_buff[num++]); 3846 /* Autoneg next page transmit reg */ 3847 et131x_mii_read(adapter, 0x07, (u16 *)&regs_buff[num++]); 3848 /* Link partner next page reg */ 3849 et131x_mii_read(adapter, 0x08, (u16 *)&regs_buff[num++]); 3850 et131x_mii_read(adapter, MII_CTRL1000, (u16 *)&regs_buff[num++]); 3851 et131x_mii_read(adapter, MII_STAT1000, (u16 *)&regs_buff[num++]); 3852 et131x_mii_read(adapter, MII_ESTATUS, (u16 *)&regs_buff[num++]); 3853 et131x_mii_read(adapter, PHY_INDEX_REG, (u16 *)&regs_buff[num++]); 3854 et131x_mii_read(adapter, PHY_DATA_REG, (u16 *)&regs_buff[num++]); 3855 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, 3856 (u16 *)&regs_buff[num++]); 3857 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, 3858 (u16 *)&regs_buff[num++]); 3859 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL+1, 3860 (u16 *)&regs_buff[num++]); 3861 et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, 3862 (u16 *)&regs_buff[num++]); 3863 et131x_mii_read(adapter, PHY_CONFIG, (u16 *)&regs_buff[num++]); 3864 et131x_mii_read(adapter, PHY_PHY_CONTROL, (u16 *)&regs_buff[num++]); 3865 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, (u16 *)&regs_buff[num++]); 3866 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, 3867 (u16 *)&regs_buff[num++]); 3868 et131x_mii_read(adapter, PHY_PHY_STATUS, (u16 *)&regs_buff[num++]); 3869 et131x_mii_read(adapter, PHY_LED_1, (u16 *)&regs_buff[num++]); 3870 et131x_mii_read(adapter, PHY_LED_2, (u16 *)&regs_buff[num++]); 3871 3872 /* Global regs */ 3873 regs_buff[num++] = readl(&aregs->global.txq_start_addr); 3874 regs_buff[num++] = readl(&aregs->global.txq_end_addr); 3875 regs_buff[num++] = readl(&aregs->global.rxq_start_addr); 3876 regs_buff[num++] = readl(&aregs->global.rxq_end_addr); 3877 regs_buff[num++] = readl(&aregs->global.pm_csr); 3878 regs_buff[num++] = adapter->stats.interrupt_status; 3879 regs_buff[num++] = readl(&aregs->global.int_mask); 3880 regs_buff[num++] = readl(&aregs->global.int_alias_clr_en); 3881 regs_buff[num++] = readl(&aregs->global.int_status_alias); 3882 regs_buff[num++] = readl(&aregs->global.sw_reset); 3883 regs_buff[num++] = readl(&aregs->global.slv_timer); 3884 regs_buff[num++] = readl(&aregs->global.msi_config); 3885 regs_buff[num++] = readl(&aregs->global.loopback); 3886 regs_buff[num++] = readl(&aregs->global.watchdog_timer); 3887 3888 /* TXDMA regs */ 3889 regs_buff[num++] = readl(&aregs->txdma.csr); 3890 regs_buff[num++] = readl(&aregs->txdma.pr_base_hi); 3891 regs_buff[num++] = readl(&aregs->txdma.pr_base_lo); 3892 regs_buff[num++] = readl(&aregs->txdma.pr_num_des); 3893 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr); 3894 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext); 3895 regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr); 3896 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi); 3897 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo); 3898 regs_buff[num++] = readl(&aregs->txdma.service_request); 3899 regs_buff[num++] = readl(&aregs->txdma.service_complete); 3900 regs_buff[num++] = readl(&aregs->txdma.cache_rd_index); 3901 regs_buff[num++] = readl(&aregs->txdma.cache_wr_index); 3902 regs_buff[num++] = readl(&aregs->txdma.tx_dma_error); 3903 regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt); 3904 regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt); 3905 regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt); 3906 regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt); 3907 regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt); 3908 regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt); 3909 regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt); 3910 regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt); 3911 regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt); 3912 regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt); 3913 regs_buff[num++] = readl(&aregs->txdma.new_service_complete); 3914 regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt); 3915 3916 /* RXDMA regs */ 3917 regs_buff[num++] = readl(&aregs->rxdma.csr); 3918 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi); 3919 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo); 3920 regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done); 3921 regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time); 3922 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr); 3923 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext); 3924 regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr); 3925 regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi); 3926 regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo); 3927 regs_buff[num++] = readl(&aregs->rxdma.psr_num_des); 3928 regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset); 3929 regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset); 3930 regs_buff[num++] = readl(&aregs->rxdma.psr_access_index); 3931 regs_buff[num++] = readl(&aregs->rxdma.psr_min_des); 3932 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo); 3933 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi); 3934 regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des); 3935 regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset); 3936 regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset); 3937 regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index); 3938 regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des); 3939 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo); 3940 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi); 3941 regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des); 3942 regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset); 3943 regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset); 3944 regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index); 3945 regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des); 3946} 3947 3948#define ET131X_DRVINFO_LEN 32 /* value from ethtool.h */ 3949static void et131x_get_drvinfo(struct net_device *netdev, 3950 struct ethtool_drvinfo *info) 3951{ 3952 struct et131x_adapter *adapter = netdev_priv(netdev); 3953 3954 strncpy(info->driver, DRIVER_NAME, ET131X_DRVINFO_LEN); 3955 strncpy(info->version, DRIVER_VERSION, ET131X_DRVINFO_LEN); 3956 strncpy(info->bus_info, pci_name(adapter->pdev), ET131X_DRVINFO_LEN); 3957} 3958 3959static struct ethtool_ops et131x_ethtool_ops = { 3960 .get_settings = et131x_get_settings, 3961 .set_settings = et131x_set_settings, 3962 .get_drvinfo = et131x_get_drvinfo, 3963 .get_regs_len = et131x_get_regs_len, 3964 .get_regs = et131x_get_regs, 3965 .get_link = ethtool_op_get_link, 3966}; 3967 3968void et131x_set_ethtool_ops(struct net_device *netdev) 3969{ 3970 SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops); 3971} 3972 3973/* PCI functions */ 3974 3975/** 3976 * et131x_hwaddr_init - set up the MAC Address on the ET1310 3977 * @adapter: pointer to our private adapter structure 3978 */ 3979void et131x_hwaddr_init(struct et131x_adapter *adapter) 3980{ 3981 /* If have our default mac from init and no mac address from 3982 * EEPROM then we need to generate the last octet and set it on the 3983 * device 3984 */ 3985 if (adapter->rom_addr[0] == 0x00 && 3986 adapter->rom_addr[1] == 0x00 && 3987 adapter->rom_addr[2] == 0x00 && 3988 adapter->rom_addr[3] == 0x00 && 3989 adapter->rom_addr[4] == 0x00 && 3990 adapter->rom_addr[5] == 0x00) { 3991 /* 3992 * We need to randomly generate the last octet so we 3993 * decrease our chances of setting the mac address to 3994 * same as another one of our cards in the system 3995 */ 3996 get_random_bytes(&adapter->addr[5], 1); 3997 /* 3998 * We have the default value in the register we are 3999 * working with so we need to copy the current 4000 * address into the permanent address 4001 */ 4002 memcpy(adapter->rom_addr, 4003 adapter->addr, ETH_ALEN); 4004 } else { 4005 /* We do not have an override address, so set the 4006 * current address to the permanent address and add 4007 * it to the device 4008 */ 4009 memcpy(adapter->addr, 4010 adapter->rom_addr, ETH_ALEN); 4011 } 4012} 4013 4014/** 4015 * et131x_pci_init - initial PCI setup 4016 * @adapter: pointer to our private adapter structure 4017 * @pdev: our PCI device 4018 * 4019 * Perform the initial setup of PCI registers and if possible initialise 4020 * the MAC address. At this point the I/O registers have yet to be mapped 4021 */ 4022static int et131x_pci_init(struct et131x_adapter *adapter, 4023 struct pci_dev *pdev) 4024{ 4025 int i; 4026 u8 max_payload; 4027 u8 read_size_reg; 4028 4029 if (et131x_init_eeprom(adapter) < 0) 4030 return -EIO; 4031 4032 /* Let's set up the PORT LOGIC Register. First we need to know what 4033 * the max_payload_size is 4034 */ 4035 if (pci_read_config_byte(pdev, ET1310_PCI_MAX_PYLD, &max_payload)) { 4036 dev_err(&pdev->dev, 4037 "Could not read PCI config space for Max Payload Size\n"); 4038 return -EIO; 4039 } 4040 4041 /* Program the Ack/Nak latency and replay timers */ 4042 max_payload &= 0x07; /* Only the lower 3 bits are valid */ 4043 4044 if (max_payload < 2) { 4045 static const u16 acknak[2] = { 0x76, 0xD0 }; 4046 static const u16 replay[2] = { 0x1E0, 0x2ED }; 4047 4048 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK, 4049 acknak[max_payload])) { 4050 dev_err(&pdev->dev, 4051 "Could not write PCI config space for ACK/NAK\n"); 4052 return -EIO; 4053 } 4054 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY, 4055 replay[max_payload])) { 4056 dev_err(&pdev->dev, 4057 "Could not write PCI config space for Replay Timer\n"); 4058 return -EIO; 4059 } 4060 } 4061 4062 /* l0s and l1 latency timers. We are using default values. 4063 * Representing 001 for L0s and 010 for L1 4064 */ 4065 if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) { 4066 dev_err(&pdev->dev, 4067 "Could not write PCI config space for Latency Timers\n"); 4068 return -EIO; 4069 } 4070 4071 /* Change the max read size to 2k */ 4072 if (pci_read_config_byte(pdev, 0x51, &read_size_reg)) { 4073 dev_err(&pdev->dev, 4074 "Could not read PCI config space for Max read size\n"); 4075 return -EIO; 4076 } 4077 4078 read_size_reg &= 0x8f; 4079 read_size_reg |= 0x40; 4080 4081 if (pci_write_config_byte(pdev, 0x51, read_size_reg)) { 4082 dev_err(&pdev->dev, 4083 "Could not write PCI config space for Max read size\n"); 4084 return -EIO; 4085 } 4086 4087 /* Get MAC address from config space if an eeprom exists, otherwise 4088 * the MAC address there will not be valid 4089 */ 4090 if (!adapter->has_eeprom) { 4091 et131x_hwaddr_init(adapter); 4092 return 0; 4093 } 4094 4095 for (i = 0; i < ETH_ALEN; i++) { 4096 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i, 4097 adapter->rom_addr + i)) { 4098 dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n"); 4099 return -EIO; 4100 } 4101 } 4102 memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN); 4103 return 0; 4104} 4105 4106/** 4107 * et131x_error_timer_handler 4108 * @data: timer-specific variable; here a pointer to our adapter structure 4109 * 4110 * The routine called when the error timer expires, to track the number of 4111 * recurring errors. 4112 */ 4113void et131x_error_timer_handler(unsigned long data) 4114{ 4115 struct et131x_adapter *adapter = (struct et131x_adapter *) data; 4116 struct phy_device *phydev = adapter->phydev; 4117 4118 if (et1310_in_phy_coma(adapter)) { 4119 /* Bring the device immediately out of coma, to 4120 * prevent it from sleeping indefinitely, this 4121 * mechanism could be improved! */ 4122 et1310_disable_phy_coma(adapter); 4123 adapter->boot_coma = 20; 4124 } else { 4125 et1310_update_macstat_host_counters(adapter); 4126 } 4127 4128 if (!phydev->link && adapter->boot_coma < 11) 4129 adapter->boot_coma++; 4130 4131 if (adapter->boot_coma == 10) { 4132 if (!phydev->link) { 4133 if (!et1310_in_phy_coma(adapter)) { 4134 /* NOTE - This was originally a 'sync with 4135 * interrupt'. How to do that under Linux? 4136 */ 4137 et131x_enable_interrupts(adapter); 4138 et1310_enable_phy_coma(adapter); 4139 } 4140 } 4141 } 4142 4143 /* This is a periodic timer, so reschedule */ 4144 mod_timer(&adapter->error_timer, jiffies + 4145 TX_ERROR_PERIOD * HZ / 1000); 4146} 4147 4148/** 4149 * et131x_adapter_memory_alloc 4150 * @adapter: pointer to our private adapter structure 4151 * 4152 * Returns 0 on success, errno on failure (as defined in errno.h). 4153 * 4154 * Allocate all the memory blocks for send, receive and others. 4155 */ 4156int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) 4157{ 4158 int status; 4159 4160 /* Allocate memory for the Tx Ring */ 4161 status = et131x_tx_dma_memory_alloc(adapter); 4162 if (status != 0) { 4163 dev_err(&adapter->pdev->dev, 4164 "et131x_tx_dma_memory_alloc FAILED\n"); 4165 return status; 4166 } 4167 /* Receive buffer memory allocation */ 4168 status = et131x_rx_dma_memory_alloc(adapter); 4169 if (status != 0) { 4170 dev_err(&adapter->pdev->dev, 4171 "et131x_rx_dma_memory_alloc FAILED\n"); 4172 et131x_tx_dma_memory_free(adapter); 4173 return status; 4174 } 4175 4176 /* Init receive data structures */ 4177 status = et131x_init_recv(adapter); 4178 if (status != 0) { 4179 dev_err(&adapter->pdev->dev, 4180 "et131x_init_recv FAILED\n"); 4181 et131x_tx_dma_memory_free(adapter); 4182 et131x_rx_dma_memory_free(adapter); 4183 } 4184 return status; 4185} 4186 4187/** 4188 * et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx 4189 * @adapter: pointer to our private adapter structure 4190 */ 4191void et131x_adapter_memory_free(struct et131x_adapter *adapter) 4192{ 4193 /* Free DMA memory */ 4194 et131x_tx_dma_memory_free(adapter); 4195 et131x_rx_dma_memory_free(adapter); 4196} 4197 4198static void et131x_adjust_link(struct net_device *netdev) 4199{ 4200 struct et131x_adapter *adapter = netdev_priv(netdev); 4201 struct phy_device *phydev = adapter->phydev; 4202 4203 if (netif_carrier_ok(netdev)) { 4204 adapter->boot_coma = 20; 4205 4206 if (phydev && phydev->speed == SPEED_10) { 4207 /* 4208 * NOTE - Is there a way to query this without 4209 * TruePHY? 4210 * && TRU_QueryCoreType(adapter->hTruePhy, 0)== 4211 * EMI_TRUEPHY_A13O) { 4212 */ 4213 u16 register18; 4214 4215 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, 4216 &register18); 4217 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 4218 register18 | 0x4); 4219 et131x_mii_write(adapter, PHY_INDEX_REG, 4220 register18 | 0x8402); 4221 et131x_mii_write(adapter, PHY_DATA_REG, 4222 register18 | 511); 4223 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 4224 register18); 4225 } 4226 4227 et1310_config_flow_control(adapter); 4228 4229 if (phydev && phydev->speed == SPEED_1000 && 4230 adapter->registry_jumbo_packet > 2048) { 4231 u16 reg; 4232 4233 et131x_mii_read(adapter, PHY_CONFIG, &reg); 4234 reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; 4235 reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; 4236 et131x_mii_write(adapter, PHY_CONFIG, reg); 4237 } 4238 4239 et131x_set_rx_dma_timer(adapter); 4240 et1310_config_mac_regs2(adapter); 4241 } 4242 4243 if (phydev && phydev->link != adapter->link) { 4244 /* 4245 * Check to see if we are in coma mode and if 4246 * so, disable it because we will not be able 4247 * to read PHY values until we are out. 4248 */ 4249 if (et1310_in_phy_coma(adapter)) 4250 et1310_disable_phy_coma(adapter); 4251 4252 if (phydev->link) { 4253 adapter->boot_coma = 20; 4254 } else { 4255 dev_warn(&adapter->pdev->dev, 4256 "Link down - cable problem ?\n"); 4257 adapter->boot_coma = 0; 4258 4259 if (phydev->speed == SPEED_10) { 4260 /* NOTE - Is there a way to query this without 4261 * TruePHY? 4262 * && TRU_QueryCoreType(adapter->hTruePhy, 0) == 4263 * EMI_TRUEPHY_A13O) 4264 */ 4265 u16 register18; 4266 4267 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, 4268 &register18); 4269 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 4270 register18 | 0x4); 4271 et131x_mii_write(adapter, PHY_INDEX_REG, 4272 register18 | 0x8402); 4273 et131x_mii_write(adapter, PHY_DATA_REG, 4274 register18 | 511); 4275 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 4276 register18); 4277 } 4278 4279 /* Free the packets being actively sent & stopped */ 4280 et131x_free_busy_send_packets(adapter); 4281 4282 /* Re-initialize the send structures */ 4283 et131x_init_send(adapter); 4284 4285 /* 4286 * Bring the device back to the state it was during 4287 * init prior to autonegotiation being complete. This 4288 * way, when we get the auto-neg complete interrupt, 4289 * we can complete init by calling config_mac_regs2. 4290 */ 4291 et131x_soft_reset(adapter); 4292 4293 /* Setup ET1310 as per the documentation */ 4294 et131x_adapter_setup(adapter); 4295 4296 /* perform reset of tx/rx */ 4297 et131x_disable_txrx(netdev); 4298 et131x_enable_txrx(netdev); 4299 } 4300 4301 adapter->link = phydev->link; 4302 4303 phy_print_status(phydev); 4304 } 4305} 4306 4307static int et131x_mii_probe(struct net_device *netdev) 4308{ 4309 struct et131x_adapter *adapter = netdev_priv(netdev); 4310 struct phy_device *phydev = NULL; 4311 4312 phydev = phy_find_first(adapter->mii_bus); 4313 if (!phydev) { 4314 dev_err(&adapter->pdev->dev, "no PHY found\n"); 4315 return -ENODEV; 4316 } 4317 4318 phydev = phy_connect(netdev, dev_name(&phydev->dev), 4319 &et131x_adjust_link, 0, PHY_INTERFACE_MODE_MII); 4320 4321 if (IS_ERR(phydev)) { 4322 dev_err(&adapter->pdev->dev, "Could not attach to PHY\n"); 4323 return PTR_ERR(phydev); 4324 } 4325 4326 phydev->supported &= (SUPPORTED_10baseT_Half 4327 | SUPPORTED_10baseT_Full 4328 | SUPPORTED_100baseT_Half 4329 | SUPPORTED_100baseT_Full 4330 | SUPPORTED_Autoneg 4331 | SUPPORTED_MII 4332 | SUPPORTED_TP); 4333 4334 if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST) 4335 phydev->supported |= SUPPORTED_1000baseT_Full; 4336 4337 phydev->advertising = phydev->supported; 4338 adapter->phydev = phydev; 4339 4340 dev_info(&adapter->pdev->dev, "attached PHY driver [%s] " 4341 "(mii_bus:phy_addr=%s)\n", 4342 phydev->drv->name, dev_name(&phydev->dev)); 4343 4344 return 0; 4345} 4346 4347/** 4348 * et131x_adapter_init 4349 * @adapter: pointer to the private adapter struct 4350 * @pdev: pointer to the PCI device 4351 * 4352 * Initialize the data structures for the et131x_adapter object and link 4353 * them together with the platform provided device structures. 4354 */ 4355static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev, 4356 struct pci_dev *pdev) 4357{ 4358 static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 }; 4359 4360 struct et131x_adapter *adapter; 4361 4362 /* Allocate private adapter struct and copy in relevant information */ 4363 adapter = netdev_priv(netdev); 4364 adapter->pdev = pci_dev_get(pdev); 4365 adapter->netdev = netdev; 4366 4367 /* Do the same for the netdev struct */ 4368 netdev->irq = pdev->irq; 4369 netdev->base_addr = pci_resource_start(pdev, 0); 4370 4371 /* Initialize spinlocks here */ 4372 spin_lock_init(&adapter->lock); 4373 spin_lock_init(&adapter->tcb_send_qlock); 4374 spin_lock_init(&adapter->tcb_ready_qlock); 4375 spin_lock_init(&adapter->send_hw_lock); 4376 spin_lock_init(&adapter->rcv_lock); 4377 spin_lock_init(&adapter->rcv_pend_lock); 4378 spin_lock_init(&adapter->fbr_lock); 4379 spin_lock_init(&adapter->phy_lock); 4380 4381 adapter->registry_jumbo_packet = 1514; /* 1514-9216 */ 4382 4383 /* Set the MAC address to a default */ 4384 memcpy(adapter->addr, default_mac, ETH_ALEN); 4385 4386 return adapter; 4387} 4388 4389/** 4390 * et131x_pci_remove 4391 * @pdev: a pointer to the device's pci_dev structure 4392 * 4393 * Registered in the pci_driver structure, this function is called when the 4394 * PCI subsystem detects that a PCI device which matches the information 4395 * contained in the pci_device_id table has been removed. 4396 */ 4397static void __devexit et131x_pci_remove(struct pci_dev *pdev) 4398{ 4399 struct net_device *netdev = pci_get_drvdata(pdev); 4400 struct et131x_adapter *adapter = netdev_priv(netdev); 4401 4402 unregister_netdev(netdev); 4403 mdiobus_unregister(adapter->mii_bus); 4404 kfree(adapter->mii_bus->irq); 4405 mdiobus_free(adapter->mii_bus); 4406 4407 et131x_adapter_memory_free(adapter); 4408 iounmap(adapter->regs); 4409 pci_dev_put(pdev); 4410 4411 free_netdev(netdev); 4412 pci_release_regions(pdev); 4413 pci_disable_device(pdev); 4414} 4415 4416/** 4417 * et131x_up - Bring up a device for use. 4418 * @netdev: device to be opened 4419 */ 4420void et131x_up(struct net_device *netdev) 4421{ 4422 struct et131x_adapter *adapter = netdev_priv(netdev); 4423 4424 et131x_enable_txrx(netdev); 4425 phy_start(adapter->phydev); 4426} 4427 4428/** 4429 * et131x_down - Bring down the device 4430 * @netdev: device to be broght down 4431 */ 4432void et131x_down(struct net_device *netdev) 4433{ 4434 struct et131x_adapter *adapter = netdev_priv(netdev); 4435 4436 /* Save the timestamp for the TX watchdog, prevent a timeout */ 4437 netdev->trans_start = jiffies; 4438 4439 phy_stop(adapter->phydev); 4440 et131x_disable_txrx(netdev); 4441} 4442 4443#ifdef CONFIG_PM_SLEEP 4444static int et131x_suspend(struct device *dev) 4445{ 4446 struct pci_dev *pdev = to_pci_dev(dev); 4447 struct net_device *netdev = pci_get_drvdata(pdev); 4448 4449 if (netif_running(netdev)) { 4450 netif_device_detach(netdev); 4451 et131x_down(netdev); 4452 pci_save_state(pdev); 4453 } 4454 4455 return 0; 4456} 4457 4458static int et131x_resume(struct device *dev) 4459{ 4460 struct pci_dev *pdev = to_pci_dev(dev); 4461 struct net_device *netdev = pci_get_drvdata(pdev); 4462 4463 if (netif_running(netdev)) { 4464 pci_restore_state(pdev); 4465 et131x_up(netdev); 4466 netif_device_attach(netdev); 4467 } 4468 4469 return 0; 4470} 4471 4472static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume); 4473#define ET131X_PM_OPS (&et131x_pm_ops) 4474#else 4475#define ET131X_PM_OPS NULL 4476#endif 4477 4478/* ISR functions */ 4479 4480/** 4481 * et131x_isr - The Interrupt Service Routine for the driver. 4482 * @irq: the IRQ on which the interrupt was received. 4483 * @dev_id: device-specific info (here a pointer to a net_device struct) 4484 * 4485 * Returns a value indicating if the interrupt was handled. 4486 */ 4487irqreturn_t et131x_isr(int irq, void *dev_id) 4488{ 4489 bool handled = true; 4490 struct net_device *netdev = (struct net_device *)dev_id; 4491 struct et131x_adapter *adapter = NULL; 4492 u32 status; 4493 4494 if (!netif_device_present(netdev)) { 4495 handled = false; 4496 goto out; 4497 } 4498 4499 adapter = netdev_priv(netdev); 4500 4501 /* If the adapter is in low power state, then it should not 4502 * recognize any interrupt 4503 */ 4504 4505 /* Disable Device Interrupts */ 4506 et131x_disable_interrupts(adapter); 4507 4508 /* Get a copy of the value in the interrupt status register 4509 * so we can process the interrupting section 4510 */ 4511 status = readl(&adapter->regs->global.int_status); 4512 4513 if (adapter->flowcontrol == FLOW_TXONLY || 4514 adapter->flowcontrol == FLOW_BOTH) { 4515 status &= ~INT_MASK_ENABLE; 4516 } else { 4517 status &= ~INT_MASK_ENABLE_NO_FLOW; 4518 } 4519 4520 /* Make sure this is our interrupt */ 4521 if (!status) { 4522 handled = false; 4523 et131x_enable_interrupts(adapter); 4524 goto out; 4525 } 4526 4527 /* This is our interrupt, so process accordingly */ 4528 4529 if (status & ET_INTR_WATCHDOG) { 4530 struct tcb *tcb = adapter->tx_ring.send_head; 4531 4532 if (tcb) 4533 if (++tcb->stale > 1) 4534 status |= ET_INTR_TXDMA_ISR; 4535 4536 if (adapter->rx_ring.unfinished_receives) 4537 status |= ET_INTR_RXDMA_XFR_DONE; 4538 else if (tcb == NULL) 4539 writel(0, &adapter->regs->global.watchdog_timer); 4540 4541 status &= ~ET_INTR_WATCHDOG; 4542 } 4543 4544 if (status == 0) { 4545 /* This interrupt has in some way been "handled" by 4546 * the ISR. Either it was a spurious Rx interrupt, or 4547 * it was a Tx interrupt that has been filtered by 4548 * the ISR. 4549 */ 4550 et131x_enable_interrupts(adapter); 4551 goto out; 4552 } 4553 4554 /* We need to save the interrupt status value for use in our 4555 * DPC. We will clear the software copy of that in that 4556 * routine. 4557 */ 4558 adapter->stats.interrupt_status = status; 4559 4560 /* Schedule the ISR handler as a bottom-half task in the 4561 * kernel's tq_immediate queue, and mark the queue for 4562 * execution 4563 */ 4564 schedule_work(&adapter->task); 4565out: 4566 return IRQ_RETVAL(handled); 4567} 4568 4569/** 4570 * et131x_isr_handler - The ISR handler 4571 * @p_adapter, a pointer to the device's private adapter structure 4572 * 4573 * scheduled to run in a deferred context by the ISR. This is where the ISR's 4574 * work actually gets done. 4575 */ 4576void et131x_isr_handler(struct work_struct *work) 4577{ 4578 struct et131x_adapter *adapter = 4579 container_of(work, struct et131x_adapter, task); 4580 u32 status = adapter->stats.interrupt_status; 4581 struct address_map __iomem *iomem = adapter->regs; 4582 4583 /* 4584 * These first two are by far the most common. Once handled, we clear 4585 * their two bits in the status word. If the word is now zero, we 4586 * exit. 4587 */ 4588 /* Handle all the completed Transmit interrupts */ 4589 if (status & ET_INTR_TXDMA_ISR) 4590 et131x_handle_send_interrupt(adapter); 4591 4592 /* Handle all the completed Receives interrupts */ 4593 if (status & ET_INTR_RXDMA_XFR_DONE) 4594 et131x_handle_recv_interrupt(adapter); 4595 4596 status &= 0xffffffd7; 4597 4598 if (status) { 4599 /* Handle the TXDMA Error interrupt */ 4600 if (status & ET_INTR_TXDMA_ERR) { 4601 u32 txdma_err; 4602 4603 /* Following read also clears the register (COR) */ 4604 txdma_err = readl(&iomem->txdma.tx_dma_error); 4605 4606 dev_warn(&adapter->pdev->dev, 4607 "TXDMA_ERR interrupt, error = %d\n", 4608 txdma_err); 4609 } 4610 4611 /* Handle Free Buffer Ring 0 and 1 Low interrupt */ 4612 if (status & 4613 (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) { 4614 /* 4615 * This indicates the number of unused buffers in 4616 * RXDMA free buffer ring 0 is <= the limit you 4617 * programmed. Free buffer resources need to be 4618 * returned. Free buffers are consumed as packets 4619 * are passed from the network to the host. The host 4620 * becomes aware of the packets from the contents of 4621 * the packet status ring. This ring is queried when 4622 * the packet done interrupt occurs. Packets are then 4623 * passed to the OS. When the OS is done with the 4624 * packets the resources can be returned to the 4625 * ET1310 for re-use. This interrupt is one method of 4626 * returning resources. 4627 */ 4628 4629 /* If the user has flow control on, then we will 4630 * send a pause packet, otherwise just exit 4631 */ 4632 if (adapter->flowcontrol == FLOW_TXONLY || 4633 adapter->flowcontrol == FLOW_BOTH) { 4634 u32 pm_csr; 4635 4636 /* Tell the device to send a pause packet via 4637 * the back pressure register (bp req and 4638 * bp xon/xoff) 4639 */ 4640 pm_csr = readl(&iomem->global.pm_csr); 4641 if (!et1310_in_phy_coma(adapter)) 4642 writel(3, &iomem->txmac.bp_ctrl); 4643 } 4644 } 4645 4646 /* Handle Packet Status Ring Low Interrupt */ 4647 if (status & ET_INTR_RXDMA_STAT_LOW) { 4648 4649 /* 4650 * Same idea as with the two Free Buffer Rings. 4651 * Packets going from the network to the host each 4652 * consume a free buffer resource and a packet status 4653 * resource. These resoures are passed to the OS. 4654 * When the OS is done with the resources, they need 4655 * to be returned to the ET1310. This is one method 4656 * of returning the resources. 4657 */ 4658 } 4659 4660 /* Handle RXDMA Error Interrupt */ 4661 if (status & ET_INTR_RXDMA_ERR) { 4662 /* 4663 * The rxdma_error interrupt is sent when a time-out 4664 * on a request issued by the JAGCore has occurred or 4665 * a completion is returned with an un-successful 4666 * status. In both cases the request is considered 4667 * complete. The JAGCore will automatically re-try the 4668 * request in question. Normally information on events 4669 * like these are sent to the host using the "Advanced 4670 * Error Reporting" capability. This interrupt is 4671 * another way of getting similar information. The 4672 * only thing required is to clear the interrupt by 4673 * reading the ISR in the global resources. The 4674 * JAGCore will do a re-try on the request. Normally 4675 * you should never see this interrupt. If you start 4676 * to see this interrupt occurring frequently then 4677 * something bad has occurred. A reset might be the 4678 * thing to do. 4679 */ 4680 /* TRAP();*/ 4681 4682 dev_warn(&adapter->pdev->dev, 4683 "RxDMA_ERR interrupt, error %x\n", 4684 readl(&iomem->txmac.tx_test)); 4685 } 4686 4687 /* Handle the Wake on LAN Event */ 4688 if (status & ET_INTR_WOL) { 4689 /* 4690 * This is a secondary interrupt for wake on LAN. 4691 * The driver should never see this, if it does, 4692 * something serious is wrong. We will TRAP the 4693 * message when we are in DBG mode, otherwise we 4694 * will ignore it. 4695 */ 4696 dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n"); 4697 } 4698 4699 /* Let's move on to the TxMac */ 4700 if (status & ET_INTR_TXMAC) { 4701 u32 err = readl(&iomem->txmac.err); 4702 4703 /* 4704 * When any of the errors occur and TXMAC generates 4705 * an interrupt to report these errors, it usually 4706 * means that TXMAC has detected an error in the data 4707 * stream retrieved from the on-chip Tx Q. All of 4708 * these errors are catastrophic and TXMAC won't be 4709 * able to recover data when these errors occur. In 4710 * a nutshell, the whole Tx path will have to be reset 4711 * and re-configured afterwards. 4712 */ 4713 dev_warn(&adapter->pdev->dev, 4714 "TXMAC interrupt, error 0x%08x\n", 4715 err); 4716 4717 /* If we are debugging, we want to see this error, 4718 * otherwise we just want the device to be reset and 4719 * continue 4720 */ 4721 } 4722 4723 /* Handle RXMAC Interrupt */ 4724 if (status & ET_INTR_RXMAC) { 4725 /* 4726 * These interrupts are catastrophic to the device, 4727 * what we need to do is disable the interrupts and 4728 * set the flag to cause us to reset so we can solve 4729 * this issue. 4730 */ 4731 /* MP_SET_FLAG( adapter, 4732 fMP_ADAPTER_HARDWARE_ERROR); */ 4733 4734 dev_warn(&adapter->pdev->dev, 4735 "RXMAC interrupt, error 0x%08x. Requesting reset\n", 4736 readl(&iomem->rxmac.err_reg)); 4737 4738 dev_warn(&adapter->pdev->dev, 4739 "Enable 0x%08x, Diag 0x%08x\n", 4740 readl(&iomem->rxmac.ctrl), 4741 readl(&iomem->rxmac.rxq_diag)); 4742 4743 /* 4744 * If we are debugging, we want to see this error, 4745 * otherwise we just want the device to be reset and 4746 * continue 4747 */ 4748 } 4749 4750 /* Handle MAC_STAT Interrupt */ 4751 if (status & ET_INTR_MAC_STAT) { 4752 /* 4753 * This means at least one of the un-masked counters 4754 * in the MAC_STAT block has rolled over. Use this 4755 * to maintain the top, software managed bits of the 4756 * counter(s). 4757 */ 4758 et1310_handle_macstat_interrupt(adapter); 4759 } 4760 4761 /* Handle SLV Timeout Interrupt */ 4762 if (status & ET_INTR_SLV_TIMEOUT) { 4763 /* 4764 * This means a timeout has occurred on a read or 4765 * write request to one of the JAGCore registers. The 4766 * Global Resources block has terminated the request 4767 * and on a read request, returned a "fake" value. 4768 * The most likely reasons are: Bad Address or the 4769 * addressed module is in a power-down state and 4770 * can't respond. 4771 */ 4772 } 4773 } 4774 et131x_enable_interrupts(adapter); 4775} 4776 4777/* NETDEV functions */ 4778 4779/** 4780 * et131x_stats - Return the current device statistics. 4781 * @netdev: device whose stats are being queried 4782 * 4783 * Returns 0 on success, errno on failure (as defined in errno.h) 4784 */ 4785static struct net_device_stats *et131x_stats(struct net_device *netdev) 4786{ 4787 struct et131x_adapter *adapter = netdev_priv(netdev); 4788 struct net_device_stats *stats = &adapter->net_stats; 4789 struct ce_stats *devstat = &adapter->stats; 4790 4791 stats->rx_errors = devstat->rx_length_errs + 4792 devstat->rx_align_errs + 4793 devstat->rx_crc_errs + 4794 devstat->rx_code_violations + 4795 devstat->rx_other_errs; 4796 stats->tx_errors = devstat->tx_max_pkt_errs; 4797 stats->multicast = devstat->multicast_pkts_rcvd; 4798 stats->collisions = devstat->tx_collisions; 4799 4800 stats->rx_length_errors = devstat->rx_length_errs; 4801 stats->rx_over_errors = devstat->rx_overflows; 4802 stats->rx_crc_errors = devstat->rx_crc_errs; 4803 4804 /* NOTE: These stats don't have corresponding values in CE_STATS, 4805 * so we're going to have to update these directly from within the 4806 * TX/RX code 4807 */ 4808 /* stats->rx_bytes = 20; devstat->; */ 4809 /* stats->tx_bytes = 20; devstat->; */ 4810 /* stats->rx_dropped = devstat->; */ 4811 /* stats->tx_dropped = devstat->; */ 4812 4813 /* NOTE: Not used, can't find analogous statistics */ 4814 /* stats->rx_frame_errors = devstat->; */ 4815 /* stats->rx_fifo_errors = devstat->; */ 4816 /* stats->rx_missed_errors = devstat->; */ 4817 4818 /* stats->tx_aborted_errors = devstat->; */ 4819 /* stats->tx_carrier_errors = devstat->; */ 4820 /* stats->tx_fifo_errors = devstat->; */ 4821 /* stats->tx_heartbeat_errors = devstat->; */ 4822 /* stats->tx_window_errors = devstat->; */ 4823 return stats; 4824} 4825 4826/** 4827 * et131x_open - Open the device for use. 4828 * @netdev: device to be opened 4829 * 4830 * Returns 0 on success, errno on failure (as defined in errno.h) 4831 */ 4832int et131x_open(struct net_device *netdev) 4833{ 4834 int result = 0; 4835 struct et131x_adapter *adapter = netdev_priv(netdev); 4836 4837 /* Start the timer to track NIC errors */ 4838 init_timer(&adapter->error_timer); 4839 adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000; 4840 adapter->error_timer.function = et131x_error_timer_handler; 4841 adapter->error_timer.data = (unsigned long)adapter; 4842 add_timer(&adapter->error_timer); 4843 4844 /* Register our IRQ */ 4845 result = request_irq(netdev->irq, et131x_isr, IRQF_SHARED, 4846 netdev->name, netdev); 4847 if (result) { 4848 dev_err(&adapter->pdev->dev, "could not register IRQ %d\n", 4849 netdev->irq); 4850 return result; 4851 } 4852 4853 adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE; 4854 4855 et131x_up(netdev); 4856 4857 return result; 4858} 4859 4860/** 4861 * et131x_close - Close the device 4862 * @netdev: device to be closed 4863 * 4864 * Returns 0 on success, errno on failure (as defined in errno.h) 4865 */ 4866int et131x_close(struct net_device *netdev) 4867{ 4868 struct et131x_adapter *adapter = netdev_priv(netdev); 4869 4870 et131x_down(netdev); 4871 4872 adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE; 4873 free_irq(netdev->irq, netdev); 4874 4875 /* Stop the error timer */ 4876 return del_timer_sync(&adapter->error_timer); 4877} 4878 4879/** 4880 * et131x_ioctl - The I/O Control handler for the driver 4881 * @netdev: device on which the control request is being made 4882 * @reqbuf: a pointer to the IOCTL request buffer 4883 * @cmd: the IOCTL command code 4884 * 4885 * Returns 0 on success, errno on failure (as defined in errno.h) 4886 */ 4887static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, 4888 int cmd) 4889{ 4890 struct et131x_adapter *adapter = netdev_priv(netdev); 4891 4892 if (!adapter->phydev) 4893 return -EINVAL; 4894 4895 return phy_mii_ioctl(adapter->phydev, reqbuf, cmd); 4896} 4897 4898/** 4899 * et131x_set_packet_filter - Configures the Rx Packet filtering on the device 4900 * @adapter: pointer to our private adapter structure 4901 * 4902 * FIXME: lot of dups with MAC code 4903 * 4904 * Returns 0 on success, errno on failure 4905 */ 4906static int et131x_set_packet_filter(struct et131x_adapter *adapter) 4907{ 4908 int status = 0; 4909 uint32_t filter = adapter->packet_filter; 4910 u32 ctrl; 4911 u32 pf_ctrl; 4912 4913 ctrl = readl(&adapter->regs->rxmac.ctrl); 4914 pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl); 4915 4916 /* Default to disabled packet filtering. Enable it in the individual 4917 * case statements that require the device to filter something 4918 */ 4919 ctrl |= 0x04; 4920 4921 /* Set us to be in promiscuous mode so we receive everything, this 4922 * is also true when we get a packet filter of 0 4923 */ 4924 if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0) 4925 pf_ctrl &= ~7; /* Clear filter bits */ 4926 else { 4927 /* 4928 * Set us up with Multicast packet filtering. Three cases are 4929 * possible - (1) we have a multi-cast list, (2) we receive ALL 4930 * multicast entries or (3) we receive none. 4931 */ 4932 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST) 4933 pf_ctrl &= ~2; /* Multicast filter bit */ 4934 else { 4935 et1310_setup_device_for_multicast(adapter); 4936 pf_ctrl |= 2; 4937 ctrl &= ~0x04; 4938 } 4939 4940 /* Set us up with Unicast packet filtering */ 4941 if (filter & ET131X_PACKET_TYPE_DIRECTED) { 4942 et1310_setup_device_for_unicast(adapter); 4943 pf_ctrl |= 4; 4944 ctrl &= ~0x04; 4945 } 4946 4947 /* Set us up with Broadcast packet filtering */ 4948 if (filter & ET131X_PACKET_TYPE_BROADCAST) { 4949 pf_ctrl |= 1; /* Broadcast filter bit */ 4950 ctrl &= ~0x04; 4951 } else 4952 pf_ctrl &= ~1; 4953 4954 /* Setup the receive mac configuration registers - Packet 4955 * Filter control + the enable / disable for packet filter 4956 * in the control reg. 4957 */ 4958 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl); 4959 writel(ctrl, &adapter->regs->rxmac.ctrl); 4960 } 4961 return status; 4962} 4963 4964/** 4965 * et131x_multicast - The handler to configure multicasting on the interface 4966 * @netdev: a pointer to a net_device struct representing the device 4967 */ 4968static void et131x_multicast(struct net_device *netdev) 4969{ 4970 struct et131x_adapter *adapter = netdev_priv(netdev); 4971 uint32_t packet_filter = 0; 4972 unsigned long flags; 4973 struct netdev_hw_addr *ha; 4974 int i; 4975 4976 spin_lock_irqsave(&adapter->lock, flags); 4977 4978 /* Before we modify the platform-independent filter flags, store them 4979 * locally. This allows us to determine if anything's changed and if 4980 * we even need to bother the hardware 4981 */ 4982 packet_filter = adapter->packet_filter; 4983 4984 /* Clear the 'multicast' flag locally; because we only have a single 4985 * flag to check multicast, and multiple multicast addresses can be 4986 * set, this is the easiest way to determine if more than one 4987 * multicast address is being set. 4988 */ 4989 packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; 4990 4991 /* Check the net_device flags and set the device independent flags 4992 * accordingly 4993 */ 4994 4995 if (netdev->flags & IFF_PROMISC) 4996 adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS; 4997 else 4998 adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS; 4999 5000 if (netdev->flags & IFF_ALLMULTI) 5001 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; 5002 5003 if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST) 5004 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; 5005 5006 if (netdev_mc_count(netdev) < 1) { 5007 adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST; 5008 adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; 5009 } else 5010 adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST; 5011 5012 /* Set values in the private adapter struct */ 5013 i = 0; 5014 netdev_for_each_mc_addr(ha, netdev) { 5015 if (i == NIC_MAX_MCAST_LIST) 5016 break; 5017 memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN); 5018 } 5019 adapter->multicast_addr_count = i; 5020 5021 /* Are the new flags different from the previous ones? If not, then no 5022 * action is required 5023 * 5024 * NOTE - This block will always update the multicast_list with the 5025 * hardware, even if the addresses aren't the same. 5026 */ 5027 if (packet_filter != adapter->packet_filter) { 5028 /* Call the device's filter function */ 5029 et131x_set_packet_filter(adapter); 5030 } 5031 spin_unlock_irqrestore(&adapter->lock, flags); 5032} 5033 5034/** 5035 * et131x_tx - The handler to tx a packet on the device 5036 * @skb: data to be Tx'd 5037 * @netdev: device on which data is to be Tx'd 5038 * 5039 * Returns 0 on success, errno on failure (as defined in errno.h) 5040 */ 5041static int et131x_tx(struct sk_buff *skb, struct net_device *netdev) 5042{ 5043 int status = 0; 5044 struct et131x_adapter *adapter = netdev_priv(netdev); 5045 5046 /* stop the queue if it's getting full */ 5047 if (adapter->tx_ring.used >= NUM_TCB - 1 && 5048 !netif_queue_stopped(netdev)) 5049 netif_stop_queue(netdev); 5050 5051 /* Save the timestamp for the TX timeout watchdog */ 5052 netdev->trans_start = jiffies; 5053 5054 /* Call the device-specific data Tx routine */ 5055 status = et131x_send_packets(skb, netdev); 5056 5057 /* Check status and manage the netif queue if necessary */ 5058 if (status != 0) { 5059 if (status == -ENOMEM) 5060 status = NETDEV_TX_BUSY; 5061 else 5062 status = NETDEV_TX_OK; 5063 } 5064 return status; 5065} 5066 5067/** 5068 * et131x_tx_timeout - Timeout handler 5069 * @netdev: a pointer to a net_device struct representing the device 5070 * 5071 * The handler called when a Tx request times out. The timeout period is 5072 * specified by the 'tx_timeo" element in the net_device structure (see 5073 * et131x_alloc_device() to see how this value is set). 5074 */ 5075static void et131x_tx_timeout(struct net_device *netdev) 5076{ 5077 struct et131x_adapter *adapter = netdev_priv(netdev); 5078 struct tcb *tcb; 5079 unsigned long flags; 5080 5081 /* If the device is closed, ignore the timeout */ 5082 if (~(adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)) 5083 return; 5084 5085 /* Any nonrecoverable hardware error? 5086 * Checks adapter->flags for any failure in phy reading 5087 */ 5088 if (adapter->flags & fMP_ADAPTER_NON_RECOVER_ERROR) 5089 return; 5090 5091 /* Hardware failure? */ 5092 if (adapter->flags & fMP_ADAPTER_HARDWARE_ERROR) { 5093 dev_err(&adapter->pdev->dev, "hardware error - reset\n"); 5094 return; 5095 } 5096 5097 /* Is send stuck? */ 5098 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 5099 5100 tcb = adapter->tx_ring.send_head; 5101 5102 if (tcb != NULL) { 5103 tcb->count++; 5104 5105 if (tcb->count > NIC_SEND_HANG_THRESHOLD) { 5106 spin_unlock_irqrestore(&adapter->tcb_send_qlock, 5107 flags); 5108 5109 dev_warn(&adapter->pdev->dev, 5110 "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n", 5111 tcb->index, 5112 tcb->flags); 5113 5114 adapter->net_stats.tx_errors++; 5115 5116 /* perform reset of tx/rx */ 5117 et131x_disable_txrx(netdev); 5118 et131x_enable_txrx(netdev); 5119 return; 5120 } 5121 } 5122 5123 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 5124} 5125 5126/** 5127 * et131x_change_mtu - The handler called to change the MTU for the device 5128 * @netdev: device whose MTU is to be changed 5129 * @new_mtu: the desired MTU 5130 * 5131 * Returns 0 on success, errno on failure (as defined in errno.h) 5132 */ 5133static int et131x_change_mtu(struct net_device *netdev, int new_mtu) 5134{ 5135 int result = 0; 5136 struct et131x_adapter *adapter = netdev_priv(netdev); 5137 5138 /* Make sure the requested MTU is valid */ 5139 if (new_mtu < 64 || new_mtu > 9216) 5140 return -EINVAL; 5141 5142 et131x_disable_txrx(netdev); 5143 et131x_handle_send_interrupt(adapter); 5144 et131x_handle_recv_interrupt(adapter); 5145 5146 /* Set the new MTU */ 5147 netdev->mtu = new_mtu; 5148 5149 /* Free Rx DMA memory */ 5150 et131x_adapter_memory_free(adapter); 5151 5152 /* Set the config parameter for Jumbo Packet support */ 5153 adapter->registry_jumbo_packet = new_mtu + 14; 5154 et131x_soft_reset(adapter); 5155 5156 /* Alloc and init Rx DMA memory */ 5157 result = et131x_adapter_memory_alloc(adapter); 5158 if (result != 0) { 5159 dev_warn(&adapter->pdev->dev, 5160 "Change MTU failed; couldn't re-alloc DMA memory\n"); 5161 return result; 5162 } 5163 5164 et131x_init_send(adapter); 5165 5166 et131x_hwaddr_init(adapter); 5167 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); 5168 5169 /* Init the device with the new settings */ 5170 et131x_adapter_setup(adapter); 5171 5172 et131x_enable_txrx(netdev); 5173 5174 return result; 5175} 5176 5177/** 5178 * et131x_set_mac_addr - handler to change the MAC address for the device 5179 * @netdev: device whose MAC is to be changed 5180 * @new_mac: the desired MAC address 5181 * 5182 * Returns 0 on success, errno on failure (as defined in errno.h) 5183 * 5184 * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14 5185 */ 5186static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac) 5187{ 5188 int result = 0; 5189 struct et131x_adapter *adapter = netdev_priv(netdev); 5190 struct sockaddr *address = new_mac; 5191 5192 /* begin blux */ 5193 5194 if (adapter == NULL) 5195 return -ENODEV; 5196 5197 /* Make sure the requested MAC is valid */ 5198 if (!is_valid_ether_addr(address->sa_data)) 5199 return -EINVAL; 5200 5201 et131x_disable_txrx(netdev); 5202 et131x_handle_send_interrupt(adapter); 5203 et131x_handle_recv_interrupt(adapter); 5204 5205 /* Set the new MAC */ 5206 /* netdev->set_mac_address = &new_mac; */ 5207 5208 memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len); 5209 5210 printk(KERN_INFO "%s: Setting MAC address to %pM\n", 5211 netdev->name, netdev->dev_addr); 5212 5213 /* Free Rx DMA memory */ 5214 et131x_adapter_memory_free(adapter); 5215 5216 et131x_soft_reset(adapter); 5217 5218 /* Alloc and init Rx DMA memory */ 5219 result = et131x_adapter_memory_alloc(adapter); 5220 if (result != 0) { 5221 dev_err(&adapter->pdev->dev, 5222 "Change MAC failed; couldn't re-alloc DMA memory\n"); 5223 return result; 5224 } 5225 5226 et131x_init_send(adapter); 5227 5228 et131x_hwaddr_init(adapter); 5229 5230 /* Init the device with the new settings */ 5231 et131x_adapter_setup(adapter); 5232 5233 et131x_enable_txrx(netdev); 5234 5235 return result; 5236} 5237 5238static const struct net_device_ops et131x_netdev_ops = { 5239 .ndo_open = et131x_open, 5240 .ndo_stop = et131x_close, 5241 .ndo_start_xmit = et131x_tx, 5242 .ndo_set_rx_mode = et131x_multicast, 5243 .ndo_tx_timeout = et131x_tx_timeout, 5244 .ndo_change_mtu = et131x_change_mtu, 5245 .ndo_set_mac_address = et131x_set_mac_addr, 5246 .ndo_validate_addr = eth_validate_addr, 5247 .ndo_get_stats = et131x_stats, 5248 .ndo_do_ioctl = et131x_ioctl, 5249}; 5250 5251/** 5252 * et131x_device_alloc 5253 * 5254 * Returns pointer to the allocated and initialized net_device struct for 5255 * this device. 5256 * 5257 * Create instances of net_device and wl_private for the new adapter and 5258 * register the device's entry points in the net_device structure. 5259 */ 5260struct net_device *et131x_device_alloc(void) 5261{ 5262 struct net_device *netdev; 5263 5264 /* Alloc net_device and adapter structs */ 5265 netdev = alloc_etherdev(sizeof(struct et131x_adapter)); 5266 5267 if (!netdev) { 5268 printk(KERN_ERR "et131x: Alloc of net_device struct failed\n"); 5269 return NULL; 5270 } 5271 5272 /* 5273 * Setup the function registration table (and other data) for a 5274 * net_device 5275 */ 5276 netdev->watchdog_timeo = ET131X_TX_TIMEOUT; 5277 netdev->netdev_ops = &et131x_netdev_ops; 5278 5279 /* Poll? */ 5280 /* netdev->poll = &et131x_poll; */ 5281 /* netdev->poll_controller = &et131x_poll_controller; */ 5282 return netdev; 5283} 5284 5285/** 5286 * et131x_pci_setup - Perform device initialization 5287 * @pdev: a pointer to the device's pci_dev structure 5288 * @ent: this device's entry in the pci_device_id table 5289 * 5290 * Returns 0 on success, errno on failure (as defined in errno.h) 5291 * 5292 * Registered in the pci_driver structure, this function is called when the 5293 * PCI subsystem finds a new PCI device which matches the information 5294 * contained in the pci_device_id table. This routine is the equivalent to 5295 * a device insertion routine. 5296 */ 5297static int __devinit et131x_pci_setup(struct pci_dev *pdev, 5298 const struct pci_device_id *ent) 5299{ 5300 int result; 5301 struct net_device *netdev; 5302 struct et131x_adapter *adapter; 5303 int ii; 5304 5305 result = pci_enable_device(pdev); 5306 if (result) { 5307 dev_err(&pdev->dev, "pci_enable_device() failed\n"); 5308 goto err_out; 5309 } 5310 5311 /* Perform some basic PCI checks */ 5312 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 5313 dev_err(&pdev->dev, "Can't find PCI device's base address\n"); 5314 goto err_disable; 5315 } 5316 5317 if (pci_request_regions(pdev, DRIVER_NAME)) { 5318 dev_err(&pdev->dev, "Can't get PCI resources\n"); 5319 goto err_disable; 5320 } 5321 5322 pci_set_master(pdev); 5323 5324 /* Check the DMA addressing support of this device */ 5325 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 5326 result = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 5327 if (result) { 5328 dev_err(&pdev->dev, 5329 "Unable to obtain 64 bit DMA for consistent allocations\n"); 5330 goto err_release_res; 5331 } 5332 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { 5333 result = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 5334 if (result) { 5335 dev_err(&pdev->dev, 5336 "Unable to obtain 32 bit DMA for consistent allocations\n"); 5337 goto err_release_res; 5338 } 5339 } else { 5340 dev_err(&pdev->dev, "No usable DMA addressing method\n"); 5341 result = -EIO; 5342 goto err_release_res; 5343 } 5344 5345 /* Allocate netdev and private adapter structs */ 5346 netdev = et131x_device_alloc(); 5347 if (!netdev) { 5348 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n"); 5349 result = -ENOMEM; 5350 goto err_release_res; 5351 } 5352 5353 SET_NETDEV_DEV(netdev, &pdev->dev); 5354 et131x_set_ethtool_ops(netdev); 5355 5356 adapter = et131x_adapter_init(netdev, pdev); 5357 5358 /* Initialise the PCI setup for the device */ 5359 et131x_pci_init(adapter, pdev); 5360 5361 /* Map the bus-relative registers to system virtual memory */ 5362 adapter->regs = pci_ioremap_bar(pdev, 0); 5363 if (!adapter->regs) { 5364 dev_err(&pdev->dev, "Cannot map device registers\n"); 5365 result = -ENOMEM; 5366 goto err_free_dev; 5367 } 5368 5369 /* If Phy COMA mode was enabled when we went down, disable it here. */ 5370 writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr); 5371 5372 /* Issue a global reset to the et1310 */ 5373 et131x_soft_reset(adapter); 5374 5375 /* Disable all interrupts (paranoid) */ 5376 et131x_disable_interrupts(adapter); 5377 5378 /* Allocate DMA memory */ 5379 result = et131x_adapter_memory_alloc(adapter); 5380 if (result) { 5381 dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n"); 5382 goto err_iounmap; 5383 } 5384 5385 /* Init send data structures */ 5386 et131x_init_send(adapter); 5387 5388 /* Set up the task structure for the ISR's deferred handler */ 5389 INIT_WORK(&adapter->task, et131x_isr_handler); 5390 5391 /* Copy address into the net_device struct */ 5392 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); 5393 5394 /* Init variable for counting how long we do not have link status */ 5395 adapter->boot_coma = 0; 5396 et1310_disable_phy_coma(adapter); 5397 5398 /* Setup the mii_bus struct */ 5399 adapter->mii_bus = mdiobus_alloc(); 5400 if (!adapter->mii_bus) { 5401 dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n"); 5402 goto err_mem_free; 5403 } 5404 5405 adapter->mii_bus->name = "et131x_eth_mii"; 5406 snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x", 5407 (adapter->pdev->bus->number << 8) | adapter->pdev->devfn); 5408 adapter->mii_bus->priv = netdev; 5409 adapter->mii_bus->read = et131x_mdio_read; 5410 adapter->mii_bus->write = et131x_mdio_write; 5411 adapter->mii_bus->reset = et131x_mdio_reset; 5412 adapter->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 5413 if (!adapter->mii_bus->irq) { 5414 dev_err(&pdev->dev, "mii_bus irq allocation failed\n"); 5415 goto err_mdio_free; 5416 } 5417 5418 for (ii = 0; ii < PHY_MAX_ADDR; ii++) 5419 adapter->mii_bus->irq[ii] = PHY_POLL; 5420 5421 if (mdiobus_register(adapter->mii_bus)) { 5422 dev_err(&pdev->dev, "failed to register MII bus\n"); 5423 mdiobus_free(adapter->mii_bus); 5424 goto err_mdio_free_irq; 5425 } 5426 5427 if (et131x_mii_probe(netdev)) { 5428 dev_err(&pdev->dev, "failed to probe MII bus\n"); 5429 goto err_mdio_unregister; 5430 } 5431 5432 /* Setup et1310 as per the documentation */ 5433 et131x_adapter_setup(adapter); 5434 5435 /* We can enable interrupts now 5436 * 5437 * NOTE - Because registration of interrupt handler is done in the 5438 * device's open(), defer enabling device interrupts to that 5439 * point 5440 */ 5441 5442 /* Register the net_device struct with the Linux network layer */ 5443 result = register_netdev(netdev); 5444 if (result != 0) { 5445 dev_err(&pdev->dev, "register_netdev() failed\n"); 5446 goto err_mdio_unregister; 5447 } 5448 5449 /* Register the net_device struct with the PCI subsystem. Save a copy 5450 * of the PCI config space for this device now that the device has 5451 * been initialized, just in case it needs to be quickly restored. 5452 */ 5453 pci_set_drvdata(pdev, netdev); 5454 pci_save_state(adapter->pdev); 5455 5456 return result; 5457 5458err_mdio_unregister: 5459 mdiobus_unregister(adapter->mii_bus); 5460err_mdio_free_irq: 5461 kfree(adapter->mii_bus->irq); 5462err_mdio_free: 5463 mdiobus_free(adapter->mii_bus); 5464err_mem_free: 5465 et131x_adapter_memory_free(adapter); 5466err_iounmap: 5467 iounmap(adapter->regs); 5468err_free_dev: 5469 pci_dev_put(pdev); 5470 free_netdev(netdev); 5471err_release_res: 5472 pci_release_regions(pdev); 5473err_disable: 5474 pci_disable_device(pdev); 5475err_out: 5476 return result; 5477} 5478 5479static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = { 5480 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL}, 5481 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL}, 5482 {0,} 5483}; 5484MODULE_DEVICE_TABLE(pci, et131x_pci_table); 5485 5486static struct pci_driver et131x_driver = { 5487 .name = DRIVER_NAME, 5488 .id_table = et131x_pci_table, 5489 .probe = et131x_pci_setup, 5490 .remove = __devexit_p(et131x_pci_remove), 5491 .driver.pm = ET131X_PM_OPS, 5492}; 5493 5494/** 5495 * et131x_init_module - The "main" entry point called on driver initialization 5496 * 5497 * Returns 0 on success, errno on failure (as defined in errno.h) 5498 */ 5499static int __init et131x_init_module(void) 5500{ 5501 return pci_register_driver(&et131x_driver); 5502} 5503 5504/** 5505 * et131x_cleanup_module - The entry point called on driver cleanup 5506 */ 5507static void __exit et131x_cleanup_module(void) 5508{ 5509 pci_unregister_driver(&et131x_driver); 5510} 5511 5512module_init(et131x_init_module); 5513module_exit(et131x_cleanup_module); 5514