Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.13-rc5 4948 lines 148 kB view raw
1/* Agere Systems Inc. 2 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs 3 * 4 * Copyright © 2005 Agere Systems Inc. 5 * All rights reserved. 6 * http://www.agere.com 7 * 8 * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com> 9 * 10 *------------------------------------------------------------------------------ 11 * 12 * SOFTWARE LICENSE 13 * 14 * This software is provided subject to the following terms and conditions, 15 * which you should read carefully before using the software. Using this 16 * software indicates your acceptance of these terms and conditions. If you do 17 * not agree with these terms and conditions, do not use the software. 18 * 19 * Copyright © 2005 Agere Systems Inc. 20 * All rights reserved. 21 * 22 * Redistribution and use in source or binary forms, with or without 23 * modifications, are permitted provided that the following conditions are met: 24 * 25 * . Redistributions of source code must retain the above copyright notice, this 26 * list of conditions and the following Disclaimer as comments in the code as 27 * well as in the documentation and/or other materials provided with the 28 * distribution. 29 * 30 * . Redistributions in binary form must reproduce the above copyright notice, 31 * this list of conditions and the following Disclaimer in the documentation 32 * and/or other materials provided with the distribution. 33 * 34 * . Neither the name of Agere Systems Inc. nor the names of the contributors 35 * may be used to endorse or promote products derived from this software 36 * without specific prior written permission. 37 * 38 * Disclaimer 39 * 40 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 41 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF 42 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY 43 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN 44 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY 45 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 47 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 48 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT 49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 50 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 51 * DAMAGE. 52 */ 53 54#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 55 56#include <linux/pci.h> 57#include <linux/init.h> 58#include <linux/module.h> 59#include <linux/types.h> 60#include <linux/kernel.h> 61 62#include <linux/sched.h> 63#include <linux/ptrace.h> 64#include <linux/slab.h> 65#include <linux/ctype.h> 66#include <linux/string.h> 67#include <linux/timer.h> 68#include <linux/interrupt.h> 69#include <linux/in.h> 70#include <linux/delay.h> 71#include <linux/bitops.h> 72#include <linux/io.h> 73 74#include <linux/netdevice.h> 75#include <linux/etherdevice.h> 76#include <linux/skbuff.h> 77#include <linux/if_arp.h> 78#include <linux/ioport.h> 79#include <linux/crc32.h> 80#include <linux/random.h> 81#include <linux/phy.h> 82 83#include "et131x.h" 84 85MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>"); 86MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>"); 87MODULE_LICENSE("Dual BSD/GPL"); 88MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems"); 89 90/* EEPROM defines */ 91#define MAX_NUM_REGISTER_POLLS 1000 92#define MAX_NUM_WRITE_RETRIES 2 93 94/* MAC defines */ 95#define COUNTER_WRAP_16_BIT 0x10000 96#define COUNTER_WRAP_12_BIT 0x1000 97 98/* PCI defines */ 99#define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */ 100#define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */ 101 102/* ISR defines */ 103/* For interrupts, normal running is: 104 * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt, 105 * watchdog_interrupt & txdma_xfer_done 106 * 107 * In both cases, when flow control is enabled for either Tx or bi-direction, 108 * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the 109 * buffer rings are running low. 110 */ 111#define INT_MASK_DISABLE 0xffffffff 112 113/* NOTE: Masking out MAC_STAT Interrupt for now... 114 * #define INT_MASK_ENABLE 0xfff6bf17 115 * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7 116 */ 117#define INT_MASK_ENABLE 0xfffebf17 118#define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7 119 120/* General defines */ 121/* Packet and header sizes */ 122#define NIC_MIN_PACKET_SIZE 60 123 124/* Multicast list size */ 125#define NIC_MAX_MCAST_LIST 128 126 127/* Supported Filters */ 128#define ET131X_PACKET_TYPE_DIRECTED 0x0001 129#define ET131X_PACKET_TYPE_MULTICAST 0x0002 130#define ET131X_PACKET_TYPE_BROADCAST 0x0004 131#define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008 132#define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010 133 134/* Tx Timeout */ 135#define ET131X_TX_TIMEOUT (1 * HZ) 136#define NIC_SEND_HANG_THRESHOLD 0 137 138/* MP_TCB flags */ 139#define FMP_DEST_MULTI 0x00000001 140#define FMP_DEST_BROAD 0x00000002 141 142/* MP_ADAPTER flags */ 143#define FMP_ADAPTER_INTERRUPT_IN_USE 0x00000008 144 145/* MP_SHARED flags */ 146#define FMP_ADAPTER_LOWER_POWER 0x00200000 147 148#define FMP_ADAPTER_NON_RECOVER_ERROR 0x00800000 149#define FMP_ADAPTER_HARDWARE_ERROR 0x04000000 150 151#define FMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000 152 153/* Some offsets in PCI config space that are actually used. */ 154#define ET1310_PCI_MAC_ADDRESS 0xA4 155#define ET1310_PCI_EEPROM_STATUS 0xB2 156#define ET1310_PCI_ACK_NACK 0xC0 157#define ET1310_PCI_REPLAY 0xC2 158#define ET1310_PCI_L0L1LATENCY 0xCF 159 160/* PCI Product IDs */ 161#define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */ 162#define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */ 163 164/* Define order of magnitude converter */ 165#define NANO_IN_A_MICRO 1000 166 167#define PARM_RX_NUM_BUFS_DEF 4 168#define PARM_RX_TIME_INT_DEF 10 169#define PARM_RX_MEM_END_DEF 0x2bc 170#define PARM_TX_TIME_INT_DEF 40 171#define PARM_TX_NUM_BUFS_DEF 4 172#define PARM_DMA_CACHE_DEF 0 173 174/* RX defines */ 175#define FBR_CHUNKS 32 176#define MAX_DESC_PER_RING_RX 1024 177 178/* number of RFDs - default and min */ 179#define RFD_LOW_WATER_MARK 40 180#define NIC_DEFAULT_NUM_RFD 1024 181#define NUM_FBRS 2 182 183#define NUM_PACKETS_HANDLED 256 184 185#define ALCATEL_MULTICAST_PKT 0x01000000 186#define ALCATEL_BROADCAST_PKT 0x02000000 187 188/* typedefs for Free Buffer Descriptors */ 189struct fbr_desc { 190 u32 addr_lo; 191 u32 addr_hi; 192 u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */ 193}; 194 195/* Packet Status Ring Descriptors 196 * 197 * Word 0: 198 * 199 * top 16 bits are from the Alcatel Status Word as enumerated in 200 * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2) 201 * 202 * 0: hp hash pass 203 * 1: ipa IP checksum assist 204 * 2: ipp IP checksum pass 205 * 3: tcpa TCP checksum assist 206 * 4: tcpp TCP checksum pass 207 * 5: wol WOL Event 208 * 6: rxmac_error RXMAC Error Indicator 209 * 7: drop Drop packet 210 * 8: ft Frame Truncated 211 * 9: jp Jumbo Packet 212 * 10: vp VLAN Packet 213 * 11-15: unused 214 * 16: asw_prev_pkt_dropped e.g. IFG too small on previous 215 * 17: asw_RX_DV_event short receive event detected 216 * 18: asw_false_carrier_event bad carrier since last good packet 217 * 19: asw_code_err one or more nibbles signalled as errors 218 * 20: asw_CRC_err CRC error 219 * 21: asw_len_chk_err frame length field incorrect 220 * 22: asw_too_long frame length > 1518 bytes 221 * 23: asw_OK valid CRC + no code error 222 * 24: asw_multicast has a multicast address 223 * 25: asw_broadcast has a broadcast address 224 * 26: asw_dribble_nibble spurious bits after EOP 225 * 27: asw_control_frame is a control frame 226 * 28: asw_pause_frame is a pause frame 227 * 29: asw_unsupported_op unsupported OP code 228 * 30: asw_VLAN_tag VLAN tag detected 229 * 31: asw_long_evt Rx long event 230 * 231 * Word 1: 232 * 0-15: length length in bytes 233 * 16-25: bi Buffer Index 234 * 26-27: ri Ring Index 235 * 28-31: reserved 236 */ 237 238struct pkt_stat_desc { 239 u32 word0; 240 u32 word1; 241}; 242 243/* Typedefs for the RX DMA status word */ 244 245/* rx status word 0 holds part of the status bits of the Rx DMA engine 246 * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word 247 * which contains the Free Buffer ring 0 and 1 available offset. 248 * 249 * bit 0-9 FBR1 offset 250 * bit 10 Wrap flag for FBR1 251 * bit 16-25 FBR0 offset 252 * bit 26 Wrap flag for FBR0 253 */ 254 255/* RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine 256 * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word 257 * which contains the Packet Status Ring available offset. 258 * 259 * bit 0-15 reserved 260 * bit 16-27 PSRoffset 261 * bit 28 PSRwrap 262 * bit 29-31 unused 263 */ 264 265/* struct rx_status_block is a structure representing the status of the Rx 266 * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020 267 */ 268struct rx_status_block { 269 u32 word0; 270 u32 word1; 271}; 272 273/* Structure for look-up table holding free buffer ring pointers, addresses 274 * and state. 275 */ 276struct fbr_lookup { 277 void *virt[MAX_DESC_PER_RING_RX]; 278 u32 bus_high[MAX_DESC_PER_RING_RX]; 279 u32 bus_low[MAX_DESC_PER_RING_RX]; 280 void *ring_virtaddr; 281 dma_addr_t ring_physaddr; 282 void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; 283 dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; 284 u32 local_full; 285 u32 num_entries; 286 dma_addr_t buffsize; 287}; 288 289/* struct rx_ring is the sructure representing the adaptor's local 290 * reference(s) to the rings 291 */ 292struct rx_ring { 293 struct fbr_lookup *fbr[NUM_FBRS]; 294 void *ps_ring_virtaddr; 295 dma_addr_t ps_ring_physaddr; 296 u32 local_psr_full; 297 u32 psr_num_entries; 298 299 struct rx_status_block *rx_status_block; 300 dma_addr_t rx_status_bus; 301 302 /* RECV */ 303 struct list_head recv_list; 304 u32 num_ready_recv; 305 306 u32 num_rfd; 307 308 bool unfinished_receives; 309}; 310 311/* TX defines */ 312/* word 2 of the control bits in the Tx Descriptor ring for the ET-1310 313 * 314 * 0-15: length of packet 315 * 16-27: VLAN tag 316 * 28: VLAN CFI 317 * 29-31: VLAN priority 318 * 319 * word 3 of the control bits in the Tx Descriptor ring for the ET-1310 320 * 321 * 0: last packet in the sequence 322 * 1: first packet in the sequence 323 * 2: interrupt the processor when this pkt sent 324 * 3: Control word - no packet data 325 * 4: Issue half-duplex backpressure : XON/XOFF 326 * 5: send pause frame 327 * 6: Tx frame has error 328 * 7: append CRC 329 * 8: MAC override 330 * 9: pad packet 331 * 10: Packet is a Huge packet 332 * 11: append VLAN tag 333 * 12: IP checksum assist 334 * 13: TCP checksum assist 335 * 14: UDP checksum assist 336 */ 337 338#define TXDESC_FLAG_LASTPKT 0x0001 339#define TXDESC_FLAG_FIRSTPKT 0x0002 340#define TXDESC_FLAG_INTPROC 0x0004 341 342/* struct tx_desc represents each descriptor on the ring */ 343struct tx_desc { 344 u32 addr_hi; 345 u32 addr_lo; 346 u32 len_vlan; /* control words how to xmit the */ 347 u32 flags; /* data (detailed above) */ 348}; 349 350/* The status of the Tx DMA engine it sits in free memory, and is pointed to 351 * by 0x101c / 0x1020. This is a DMA10 type 352 */ 353 354/* TCB (Transmit Control Block: Host Side) */ 355struct tcb { 356 struct tcb *next; /* Next entry in ring */ 357 u32 flags; /* Our flags for the packet */ 358 u32 count; /* Used to spot stuck/lost packets */ 359 u32 stale; /* Used to spot stuck/lost packets */ 360 struct sk_buff *skb; /* Network skb we are tied to */ 361 u32 index; /* Ring indexes */ 362 u32 index_start; 363}; 364 365/* Structure representing our local reference(s) to the ring */ 366struct tx_ring { 367 /* TCB (Transmit Control Block) memory and lists */ 368 struct tcb *tcb_ring; 369 370 /* List of TCBs that are ready to be used */ 371 struct tcb *tcb_qhead; 372 struct tcb *tcb_qtail; 373 374 /* list of TCBs that are currently being sent. NOTE that access to all 375 * three of these (including used) are controlled via the 376 * TCBSendQLock. This lock should be secured prior to incementing / 377 * decrementing used, or any queue manipulation on send_head / 378 * tail 379 */ 380 struct tcb *send_head; 381 struct tcb *send_tail; 382 int used; 383 384 /* The actual descriptor ring */ 385 struct tx_desc *tx_desc_ring; 386 dma_addr_t tx_desc_ring_pa; 387 388 /* send_idx indicates where we last wrote to in the descriptor ring. */ 389 u32 send_idx; 390 391 /* The location of the write-back status block */ 392 u32 *tx_status; 393 dma_addr_t tx_status_pa; 394 395 /* Packets since the last IRQ: used for interrupt coalescing */ 396 int since_irq; 397}; 398 399/* Do not change these values: if changed, then change also in respective 400 * TXdma and Rxdma engines 401 */ 402#define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */ 403#define NUM_TCB 64 404 405/* These values are all superseded by registry entries to facilitate tuning. 406 * Once the desired performance has been achieved, the optimal registry values 407 * should be re-populated to these #defines: 408 */ 409#define TX_ERROR_PERIOD 1000 410 411#define LO_MARK_PERCENT_FOR_PSR 15 412#define LO_MARK_PERCENT_FOR_RX 15 413 414/* RFD (Receive Frame Descriptor) */ 415struct rfd { 416 struct list_head list_node; 417 struct sk_buff *skb; 418 u32 len; /* total size of receive frame */ 419 u16 bufferindex; 420 u8 ringindex; 421}; 422 423/* Flow Control */ 424#define FLOW_BOTH 0 425#define FLOW_TXONLY 1 426#define FLOW_RXONLY 2 427#define FLOW_NONE 3 428 429/* Struct to define some device statistics */ 430struct ce_stats { 431 /* MIB II variables 432 * 433 * NOTE: atomic_t types are only guaranteed to store 24-bits; if we 434 * MUST have 32, then we'll need another way to perform atomic 435 * operations 436 */ 437 u32 unicast_pkts_rcvd; 438 atomic_t unicast_pkts_xmtd; 439 u32 multicast_pkts_rcvd; 440 atomic_t multicast_pkts_xmtd; 441 u32 broadcast_pkts_rcvd; 442 atomic_t broadcast_pkts_xmtd; 443 u32 rcvd_pkts_dropped; 444 445 /* Tx Statistics. */ 446 u32 tx_underflows; 447 448 u32 tx_collisions; 449 u32 tx_excessive_collisions; 450 u32 tx_first_collisions; 451 u32 tx_late_collisions; 452 u32 tx_max_pkt_errs; 453 u32 tx_deferred; 454 455 /* Rx Statistics. */ 456 u32 rx_overflows; 457 458 u32 rx_length_errs; 459 u32 rx_align_errs; 460 u32 rx_crc_errs; 461 u32 rx_code_violations; 462 u32 rx_other_errs; 463 464 u32 synchronous_iterations; 465 u32 interrupt_status; 466}; 467 468/* The private adapter structure */ 469struct et131x_adapter { 470 struct net_device *netdev; 471 struct pci_dev *pdev; 472 struct mii_bus *mii_bus; 473 struct phy_device *phydev; 474 struct work_struct task; 475 476 /* Flags that indicate current state of the adapter */ 477 u32 flags; 478 479 /* local link state, to determine if a state change has occurred */ 480 int link; 481 482 /* Configuration */ 483 u8 rom_addr[ETH_ALEN]; 484 u8 addr[ETH_ALEN]; 485 bool has_eeprom; 486 u8 eeprom_data[2]; 487 488 /* Spinlocks */ 489 spinlock_t lock; 490 491 spinlock_t tcb_send_qlock; 492 spinlock_t tcb_ready_qlock; 493 spinlock_t send_hw_lock; 494 495 spinlock_t rcv_lock; 496 spinlock_t fbr_lock; 497 498 /* Packet Filter and look ahead size */ 499 u32 packet_filter; 500 501 /* multicast list */ 502 u32 multicast_addr_count; 503 u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN]; 504 505 /* Pointer to the device's PCI register space */ 506 struct address_map __iomem *regs; 507 508 /* Registry parameters */ 509 u8 wanted_flow; /* Flow we want for 802.3x flow control */ 510 u32 registry_jumbo_packet; /* Max supported ethernet packet size */ 511 512 /* Derived from the registry: */ 513 u8 flowcontrol; /* flow control validated by the far-end */ 514 515 /* Minimize init-time */ 516 struct timer_list error_timer; 517 518 /* variable putting the phy into coma mode when boot up with no cable 519 * plugged in after 5 seconds 520 */ 521 u8 boot_coma; 522 523 /* Next two used to save power information at power down. This 524 * information will be used during power up to set up parts of Power 525 * Management in JAGCore 526 */ 527 u16 pdown_speed; 528 u8 pdown_duplex; 529 530 /* Tx Memory Variables */ 531 struct tx_ring tx_ring; 532 533 /* Rx Memory Variables */ 534 struct rx_ring rx_ring; 535 536 /* Stats */ 537 struct ce_stats stats; 538 539 struct net_device_stats net_stats; 540}; 541 542static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status) 543{ 544 u32 reg; 545 int i; 546 547 /* 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and 548 * bits 7,1:0 both equal to 1, at least once after reset. 549 * Subsequent operations need only to check that bits 1:0 are equal 550 * to 1 prior to starting a single byte read/write 551 */ 552 553 for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) { 554 /* Read registers grouped in DWORD1 */ 555 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg)) 556 return -EIO; 557 558 /* I2C idle and Phy Queue Avail both true */ 559 if ((reg & 0x3000) == 0x3000) { 560 if (status) 561 *status = reg; 562 return reg & 0xFF; 563 } 564 } 565 return -ETIMEDOUT; 566} 567 568/* eeprom_write - Write a byte to the ET1310's EEPROM 569 * @adapter: pointer to our private adapter structure 570 * @addr: the address to write 571 * @data: the value to write 572 * 573 * Returns 1 for a successful write. 574 */ 575static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data) 576{ 577 struct pci_dev *pdev = adapter->pdev; 578 int index = 0; 579 int retries; 580 int err = 0; 581 int i2c_wack = 0; 582 int writeok = 0; 583 u32 status; 584 u32 val = 0; 585 586 /* For an EEPROM, an I2C single byte write is defined as a START 587 * condition followed by the device address, EEPROM address, one byte 588 * of data and a STOP condition. The STOP condition will trigger the 589 * EEPROM's internally timed write cycle to the nonvolatile memory. 590 * All inputs are disabled during this write cycle and the EEPROM will 591 * not respond to any access until the internal write is complete. 592 */ 593 594 err = eeprom_wait_ready(pdev, NULL); 595 if (err < 0) 596 return err; 597 598 /* 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0, 599 * and bits 1:0 both =0. Bit 5 should be set according to the 600 * type of EEPROM being accessed (1=two byte addressing, 0=one 601 * byte addressing). 602 */ 603 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, 604 LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE)) 605 return -EIO; 606 607 i2c_wack = 1; 608 609 /* Prepare EEPROM address for Step 3 */ 610 611 for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) { 612 /* Write the address to the LBCIF Address Register */ 613 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) 614 break; 615 /* Write the data to the LBCIF Data Register (the I2C write 616 * will begin). 617 */ 618 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data)) 619 break; 620 /* Monitor bit 1:0 of the LBCIF Status Register. When bits 621 * 1:0 are both equal to 1, the I2C write has completed and the 622 * internal write cycle of the EEPROM is about to start. 623 * (bits 1:0 = 01 is a legal state while waiting from both 624 * equal to 1, but bits 1:0 = 10 is invalid and implies that 625 * something is broken). 626 */ 627 err = eeprom_wait_ready(pdev, &status); 628 if (err < 0) 629 return 0; 630 631 /* Check bit 3 of the LBCIF Status Register. If equal to 1, 632 * an error has occurred.Don't break here if we are revision 633 * 1, this is so we do a blind write for load bug. 634 */ 635 if ((status & LBCIF_STATUS_GENERAL_ERROR) 636 && adapter->pdev->revision == 0) 637 break; 638 639 /* Check bit 2 of the LBCIF Status Register. If equal to 1 an 640 * ACK error has occurred on the address phase of the write. 641 * This could be due to an actual hardware failure or the 642 * EEPROM may still be in its internal write cycle from a 643 * previous write. This write operation was ignored and must be 644 *repeated later. 645 */ 646 if (status & LBCIF_STATUS_ACK_ERROR) { 647 /* This could be due to an actual hardware failure 648 * or the EEPROM may still be in its internal write 649 * cycle from a previous write. This write operation 650 * was ignored and must be repeated later. 651 */ 652 udelay(10); 653 continue; 654 } 655 656 writeok = 1; 657 break; 658 } 659 660 /* Set bit 6 of the LBCIF Control Register = 0. 661 */ 662 udelay(10); 663 664 while (i2c_wack) { 665 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, 666 LBCIF_CONTROL_LBCIF_ENABLE)) 667 writeok = 0; 668 669 /* Do read until internal ACK_ERROR goes away meaning write 670 * completed 671 */ 672 do { 673 pci_write_config_dword(pdev, 674 LBCIF_ADDRESS_REGISTER, 675 addr); 676 do { 677 pci_read_config_dword(pdev, 678 LBCIF_DATA_REGISTER, &val); 679 } while ((val & 0x00010000) == 0); 680 } while (val & 0x00040000); 681 682 if ((val & 0xFF00) != 0xC000 || index == 10000) 683 break; 684 index++; 685 } 686 return writeok ? 0 : -EIO; 687} 688 689/* eeprom_read - Read a byte from the ET1310's EEPROM 690 * @adapter: pointer to our private adapter structure 691 * @addr: the address from which to read 692 * @pdata: a pointer to a byte in which to store the value of the read 693 * @eeprom_id: the ID of the EEPROM 694 * @addrmode: how the EEPROM is to be accessed 695 * 696 * Returns 1 for a successful read 697 */ 698static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata) 699{ 700 struct pci_dev *pdev = adapter->pdev; 701 int err; 702 u32 status; 703 704 /* A single byte read is similar to the single byte write, with the 705 * exception of the data flow: 706 */ 707 708 err = eeprom_wait_ready(pdev, NULL); 709 if (err < 0) 710 return err; 711 /* Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0, 712 * and bits 1:0 both =0. Bit 5 should be set according to the type 713 * of EEPROM being accessed (1=two byte addressing, 0=one byte 714 * addressing). 715 */ 716 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, 717 LBCIF_CONTROL_LBCIF_ENABLE)) 718 return -EIO; 719 /* Write the address to the LBCIF Address Register (I2C read will 720 * begin). 721 */ 722 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) 723 return -EIO; 724 /* Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read 725 * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure 726 * has occurred). 727 */ 728 err = eeprom_wait_ready(pdev, &status); 729 if (err < 0) 730 return err; 731 /* Regardless of error status, read data byte from LBCIF Data 732 * Register. 733 */ 734 *pdata = err; 735 /* Check bit 2 of the LBCIF Status Register. If = 1, 736 * then an error has occurred. 737 */ 738 return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0; 739} 740 741static int et131x_init_eeprom(struct et131x_adapter *adapter) 742{ 743 struct pci_dev *pdev = adapter->pdev; 744 u8 eestatus; 745 746 /* We first need to check the EEPROM Status code located at offset 747 * 0xB2 of config space 748 */ 749 pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus); 750 751 /* THIS IS A WORKAROUND: 752 * I need to call this function twice to get my card in a 753 * LG M1 Express Dual running. I tried also a msleep before this 754 * function, because I thought there could be some time conditions 755 * but it didn't work. Call the whole function twice also work. 756 */ 757 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) { 758 dev_err(&pdev->dev, 759 "Could not read PCI config space for EEPROM Status\n"); 760 return -EIO; 761 } 762 763 /* Determine if the error(s) we care about are present. If they are 764 * present we need to fail. 765 */ 766 if (eestatus & 0x4C) { 767 int write_failed = 0; 768 if (pdev->revision == 0x01) { 769 int i; 770 static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF }; 771 772 /* Re-write the first 4 bytes if we have an eeprom 773 * present and the revision id is 1, this fixes the 774 * corruption seen with 1310 B Silicon 775 */ 776 for (i = 0; i < 3; i++) 777 if (eeprom_write(adapter, i, eedata[i]) < 0) 778 write_failed = 1; 779 } 780 if (pdev->revision != 0x01 || write_failed) { 781 dev_err(&pdev->dev, 782 "Fatal EEPROM Status Error - 0x%04x\n", eestatus); 783 784 /* This error could mean that there was an error 785 * reading the eeprom or that the eeprom doesn't exist. 786 * We will treat each case the same and not try to 787 * gather additional information that normally would 788 * come from the eeprom, like MAC Address 789 */ 790 adapter->has_eeprom = 0; 791 return -EIO; 792 } 793 } 794 adapter->has_eeprom = 1; 795 796 /* Read the EEPROM for information regarding LED behavior. Refer to 797 * ET1310_phy.c, et131x_xcvr_init(), for its use. 798 */ 799 eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]); 800 eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]); 801 802 if (adapter->eeprom_data[0] != 0xcd) 803 /* Disable all optional features */ 804 adapter->eeprom_data[1] = 0x00; 805 806 return 0; 807} 808 809/* et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310. 810 * @adapter: pointer to our adapter structure 811 */ 812static void et131x_rx_dma_enable(struct et131x_adapter *adapter) 813{ 814 /* Setup the receive dma configuration register for normal operation */ 815 u32 csr = ET_RXDMA_CSR_FBR1_ENABLE; 816 817 if (adapter->rx_ring.fbr[1]->buffsize == 4096) 818 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO; 819 else if (adapter->rx_ring.fbr[1]->buffsize == 8192) 820 csr |= ET_RXDMA_CSR_FBR1_SIZE_HI; 821 else if (adapter->rx_ring.fbr[1]->buffsize == 16384) 822 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO | ET_RXDMA_CSR_FBR1_SIZE_HI; 823 824 csr |= ET_RXDMA_CSR_FBR0_ENABLE; 825 if (adapter->rx_ring.fbr[0]->buffsize == 256) 826 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO; 827 else if (adapter->rx_ring.fbr[0]->buffsize == 512) 828 csr |= ET_RXDMA_CSR_FBR0_SIZE_HI; 829 else if (adapter->rx_ring.fbr[0]->buffsize == 1024) 830 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO | ET_RXDMA_CSR_FBR0_SIZE_HI; 831 writel(csr, &adapter->regs->rxdma.csr); 832 833 csr = readl(&adapter->regs->rxdma.csr); 834 if (csr & ET_RXDMA_CSR_HALT_STATUS) { 835 udelay(5); 836 csr = readl(&adapter->regs->rxdma.csr); 837 if (csr & ET_RXDMA_CSR_HALT_STATUS) { 838 dev_err(&adapter->pdev->dev, 839 "RX Dma failed to exit halt state. CSR 0x%08x\n", 840 csr); 841 } 842 } 843} 844 845/* et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310 846 * @adapter: pointer to our adapter structure 847 */ 848static void et131x_rx_dma_disable(struct et131x_adapter *adapter) 849{ 850 u32 csr; 851 /* Setup the receive dma configuration register */ 852 writel(ET_RXDMA_CSR_HALT | ET_RXDMA_CSR_FBR1_ENABLE, 853 &adapter->regs->rxdma.csr); 854 csr = readl(&adapter->regs->rxdma.csr); 855 if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) { 856 udelay(5); 857 csr = readl(&adapter->regs->rxdma.csr); 858 if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) 859 dev_err(&adapter->pdev->dev, 860 "RX Dma failed to enter halt state. CSR 0x%08x\n", 861 csr); 862 } 863} 864 865/* et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310. 866 * @adapter: pointer to our adapter structure 867 * 868 * Mainly used after a return to the D0 (full-power) state from a lower state. 869 */ 870static void et131x_tx_dma_enable(struct et131x_adapter *adapter) 871{ 872 /* Setup the transmit dma configuration register for normal 873 * operation 874 */ 875 writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT), 876 &adapter->regs->txdma.csr); 877} 878 879static inline void add_10bit(u32 *v, int n) 880{ 881 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP); 882} 883 884static inline void add_12bit(u32 *v, int n) 885{ 886 *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP); 887} 888 889/* et1310_config_mac_regs1 - Initialize the first part of MAC regs 890 * @adapter: pointer to our adapter structure 891 */ 892static void et1310_config_mac_regs1(struct et131x_adapter *adapter) 893{ 894 struct mac_regs __iomem *macregs = &adapter->regs->mac; 895 u32 station1; 896 u32 station2; 897 u32 ipg; 898 899 /* First we need to reset everything. Write to MAC configuration 900 * register 1 to perform reset. 901 */ 902 writel(ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET | 903 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC | 904 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC, 905 &macregs->cfg1); 906 907 /* Next lets configure the MAC Inter-packet gap register */ 908 ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */ 909 ipg |= 0x50 << 8; /* ifg enforce 0x50 */ 910 writel(ipg, &macregs->ipg); 911 912 /* Next lets configure the MAC Half Duplex register */ 913 /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */ 914 writel(0x00A1F037, &macregs->hfdp); 915 916 /* Next lets configure the MAC Interface Control register */ 917 writel(0, &macregs->if_ctrl); 918 919 /* Let's move on to setting up the mii management configuration */ 920 writel(ET_MAC_MIIMGMT_CLK_RST, &macregs->mii_mgmt_cfg); 921 922 /* Next lets configure the MAC Station Address register. These 923 * values are read from the EEPROM during initialization and stored 924 * in the adapter structure. We write what is stored in the adapter 925 * structure to the MAC Station Address registers high and low. This 926 * station address is used for generating and checking pause control 927 * packets. 928 */ 929 station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) | 930 (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT); 931 station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) | 932 (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) | 933 (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) | 934 adapter->addr[2]; 935 writel(station1, &macregs->station_addr_1); 936 writel(station2, &macregs->station_addr_2); 937 938 /* Max ethernet packet in bytes that will be passed by the mac without 939 * being truncated. Allow the MAC to pass 4 more than our max packet 940 * size. This is 4 for the Ethernet CRC. 941 * 942 * Packets larger than (registry_jumbo_packet) that do not contain a 943 * VLAN ID will be dropped by the Rx function. 944 */ 945 writel(adapter->registry_jumbo_packet + 4, &macregs->max_fm_len); 946 947 /* clear out MAC config reset */ 948 writel(0, &macregs->cfg1); 949} 950 951/* et1310_config_mac_regs2 - Initialize the second part of MAC regs 952 * @adapter: pointer to our adapter structure 953 */ 954static void et1310_config_mac_regs2(struct et131x_adapter *adapter) 955{ 956 int32_t delay = 0; 957 struct mac_regs __iomem *mac = &adapter->regs->mac; 958 struct phy_device *phydev = adapter->phydev; 959 u32 cfg1; 960 u32 cfg2; 961 u32 ifctrl; 962 u32 ctl; 963 964 ctl = readl(&adapter->regs->txmac.ctl); 965 cfg1 = readl(&mac->cfg1); 966 cfg2 = readl(&mac->cfg2); 967 ifctrl = readl(&mac->if_ctrl); 968 969 /* Set up the if mode bits */ 970 cfg2 &= ~ET_MAC_CFG2_IFMODE_MASK; 971 if (phydev && phydev->speed == SPEED_1000) { 972 cfg2 |= ET_MAC_CFG2_IFMODE_1000; 973 /* Phy mode bit */ 974 ifctrl &= ~ET_MAC_IFCTRL_PHYMODE; 975 } else { 976 cfg2 |= ET_MAC_CFG2_IFMODE_100; 977 ifctrl |= ET_MAC_IFCTRL_PHYMODE; 978 } 979 980 /* We need to enable Rx/Tx */ 981 cfg1 |= ET_MAC_CFG1_RX_ENABLE | ET_MAC_CFG1_TX_ENABLE | 982 ET_MAC_CFG1_TX_FLOW; 983 /* Initialize loop back to off */ 984 cfg1 &= ~(ET_MAC_CFG1_LOOPBACK | ET_MAC_CFG1_RX_FLOW); 985 if (adapter->flowcontrol == FLOW_RXONLY || 986 adapter->flowcontrol == FLOW_BOTH) 987 cfg1 |= ET_MAC_CFG1_RX_FLOW; 988 writel(cfg1, &mac->cfg1); 989 990 /* Now we need to initialize the MAC Configuration 2 register */ 991 /* preamble 7, check length, huge frame off, pad crc, crc enable 992 * full duplex off 993 */ 994 cfg2 |= 0x7 << ET_MAC_CFG2_PREAMBLE_SHIFT; 995 cfg2 |= ET_MAC_CFG2_IFMODE_LEN_CHECK; 996 cfg2 |= ET_MAC_CFG2_IFMODE_PAD_CRC; 997 cfg2 |= ET_MAC_CFG2_IFMODE_CRC_ENABLE; 998 cfg2 &= ~ET_MAC_CFG2_IFMODE_HUGE_FRAME; 999 cfg2 &= ~ET_MAC_CFG2_IFMODE_FULL_DPLX; 1000 1001 /* Turn on duplex if needed */ 1002 if (phydev && phydev->duplex == DUPLEX_FULL) 1003 cfg2 |= ET_MAC_CFG2_IFMODE_FULL_DPLX; 1004 1005 ifctrl &= ~ET_MAC_IFCTRL_GHDMODE; 1006 if (phydev && phydev->duplex == DUPLEX_HALF) 1007 ifctrl |= ET_MAC_IFCTRL_GHDMODE; 1008 1009 writel(ifctrl, &mac->if_ctrl); 1010 writel(cfg2, &mac->cfg2); 1011 1012 do { 1013 udelay(10); 1014 delay++; 1015 cfg1 = readl(&mac->cfg1); 1016 } while ((cfg1 & ET_MAC_CFG1_WAIT) != ET_MAC_CFG1_WAIT && delay < 100); 1017 1018 if (delay == 100) { 1019 dev_warn(&adapter->pdev->dev, 1020 "Syncd bits did not respond correctly cfg1 word 0x%08x\n", 1021 cfg1); 1022 } 1023 1024 /* Enable txmac */ 1025 ctl |= ET_TX_CTRL_TXMAC_ENABLE | ET_TX_CTRL_FC_DISABLE; 1026 writel(ctl, &adapter->regs->txmac.ctl); 1027 1028 /* Ready to start the RXDMA/TXDMA engine */ 1029 if (adapter->flags & FMP_ADAPTER_LOWER_POWER) { 1030 et131x_rx_dma_enable(adapter); 1031 et131x_tx_dma_enable(adapter); 1032 } 1033} 1034 1035/* et1310_in_phy_coma - check if the device is in phy coma 1036 * @adapter: pointer to our adapter structure 1037 * 1038 * Returns 0 if the device is not in phy coma, 1 if it is in phy coma 1039 */ 1040static int et1310_in_phy_coma(struct et131x_adapter *adapter) 1041{ 1042 u32 pmcsr; 1043 1044 pmcsr = readl(&adapter->regs->global.pm_csr); 1045 1046 return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0; 1047} 1048 1049static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter) 1050{ 1051 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; 1052 u32 hash1 = 0; 1053 u32 hash2 = 0; 1054 u32 hash3 = 0; 1055 u32 hash4 = 0; 1056 u32 pm_csr; 1057 1058 /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision 1059 * the multi-cast LIST. If it is NOT specified, (and "ALL" is not 1060 * specified) then we should pass NO multi-cast addresses to the 1061 * driver. 1062 */ 1063 if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) { 1064 int i; 1065 1066 /* Loop through our multicast array and set up the device */ 1067 for (i = 0; i < adapter->multicast_addr_count; i++) { 1068 u32 result; 1069 1070 result = ether_crc(6, adapter->multicast_list[i]); 1071 1072 result = (result & 0x3F800000) >> 23; 1073 1074 if (result < 32) { 1075 hash1 |= (1 << result); 1076 } else if ((31 < result) && (result < 64)) { 1077 result -= 32; 1078 hash2 |= (1 << result); 1079 } else if ((63 < result) && (result < 96)) { 1080 result -= 64; 1081 hash3 |= (1 << result); 1082 } else { 1083 result -= 96; 1084 hash4 |= (1 << result); 1085 } 1086 } 1087 } 1088 1089 /* Write out the new hash to the device */ 1090 pm_csr = readl(&adapter->regs->global.pm_csr); 1091 if (!et1310_in_phy_coma(adapter)) { 1092 writel(hash1, &rxmac->multi_hash1); 1093 writel(hash2, &rxmac->multi_hash2); 1094 writel(hash3, &rxmac->multi_hash3); 1095 writel(hash4, &rxmac->multi_hash4); 1096 } 1097} 1098 1099static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter) 1100{ 1101 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; 1102 u32 uni_pf1; 1103 u32 uni_pf2; 1104 u32 uni_pf3; 1105 u32 pm_csr; 1106 1107 /* Set up unicast packet filter reg 3 to be the first two octets of 1108 * the MAC address for both address 1109 * 1110 * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the 1111 * MAC address for second address 1112 * 1113 * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the 1114 * MAC address for first address 1115 */ 1116 uni_pf3 = (adapter->addr[0] << ET_RX_UNI_PF_ADDR2_1_SHIFT) | 1117 (adapter->addr[1] << ET_RX_UNI_PF_ADDR2_2_SHIFT) | 1118 (adapter->addr[0] << ET_RX_UNI_PF_ADDR1_1_SHIFT) | 1119 adapter->addr[1]; 1120 1121 uni_pf2 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR2_3_SHIFT) | 1122 (adapter->addr[3] << ET_RX_UNI_PF_ADDR2_4_SHIFT) | 1123 (adapter->addr[4] << ET_RX_UNI_PF_ADDR2_5_SHIFT) | 1124 adapter->addr[5]; 1125 1126 uni_pf1 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR1_3_SHIFT) | 1127 (adapter->addr[3] << ET_RX_UNI_PF_ADDR1_4_SHIFT) | 1128 (adapter->addr[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT) | 1129 adapter->addr[5]; 1130 1131 pm_csr = readl(&adapter->regs->global.pm_csr); 1132 if (!et1310_in_phy_coma(adapter)) { 1133 writel(uni_pf1, &rxmac->uni_pf_addr1); 1134 writel(uni_pf2, &rxmac->uni_pf_addr2); 1135 writel(uni_pf3, &rxmac->uni_pf_addr3); 1136 } 1137} 1138 1139static void et1310_config_rxmac_regs(struct et131x_adapter *adapter) 1140{ 1141 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; 1142 struct phy_device *phydev = adapter->phydev; 1143 u32 sa_lo; 1144 u32 sa_hi = 0; 1145 u32 pf_ctrl = 0; 1146 1147 /* Disable the MAC while it is being configured (also disable WOL) */ 1148 writel(0x8, &rxmac->ctrl); 1149 1150 /* Initialize WOL to disabled. */ 1151 writel(0, &rxmac->crc0); 1152 writel(0, &rxmac->crc12); 1153 writel(0, &rxmac->crc34); 1154 1155 /* We need to set the WOL mask0 - mask4 next. We initialize it to 1156 * its default Values of 0x00000000 because there are not WOL masks 1157 * as of this time. 1158 */ 1159 writel(0, &rxmac->mask0_word0); 1160 writel(0, &rxmac->mask0_word1); 1161 writel(0, &rxmac->mask0_word2); 1162 writel(0, &rxmac->mask0_word3); 1163 1164 writel(0, &rxmac->mask1_word0); 1165 writel(0, &rxmac->mask1_word1); 1166 writel(0, &rxmac->mask1_word2); 1167 writel(0, &rxmac->mask1_word3); 1168 1169 writel(0, &rxmac->mask2_word0); 1170 writel(0, &rxmac->mask2_word1); 1171 writel(0, &rxmac->mask2_word2); 1172 writel(0, &rxmac->mask2_word3); 1173 1174 writel(0, &rxmac->mask3_word0); 1175 writel(0, &rxmac->mask3_word1); 1176 writel(0, &rxmac->mask3_word2); 1177 writel(0, &rxmac->mask3_word3); 1178 1179 writel(0, &rxmac->mask4_word0); 1180 writel(0, &rxmac->mask4_word1); 1181 writel(0, &rxmac->mask4_word2); 1182 writel(0, &rxmac->mask4_word3); 1183 1184 /* Lets setup the WOL Source Address */ 1185 sa_lo = (adapter->addr[2] << ET_RX_WOL_LO_SA3_SHIFT) | 1186 (adapter->addr[3] << ET_RX_WOL_LO_SA4_SHIFT) | 1187 (adapter->addr[4] << ET_RX_WOL_LO_SA5_SHIFT) | 1188 adapter->addr[5]; 1189 writel(sa_lo, &rxmac->sa_lo); 1190 1191 sa_hi = (u32) (adapter->addr[0] << ET_RX_WOL_HI_SA1_SHIFT) | 1192 adapter->addr[1]; 1193 writel(sa_hi, &rxmac->sa_hi); 1194 1195 /* Disable all Packet Filtering */ 1196 writel(0, &rxmac->pf_ctrl); 1197 1198 /* Let's initialize the Unicast Packet filtering address */ 1199 if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) { 1200 et1310_setup_device_for_unicast(adapter); 1201 pf_ctrl |= ET_RX_PFCTRL_UNICST_FILTER_ENABLE; 1202 } else { 1203 writel(0, &rxmac->uni_pf_addr1); 1204 writel(0, &rxmac->uni_pf_addr2); 1205 writel(0, &rxmac->uni_pf_addr3); 1206 } 1207 1208 /* Let's initialize the Multicast hash */ 1209 if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) { 1210 pf_ctrl |= ET_RX_PFCTRL_MLTCST_FILTER_ENABLE; 1211 et1310_setup_device_for_multicast(adapter); 1212 } 1213 1214 /* Runt packet filtering. Didn't work in version A silicon. */ 1215 pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT; 1216 pf_ctrl |= ET_RX_PFCTRL_FRAG_FILTER_ENABLE; 1217 1218 if (adapter->registry_jumbo_packet > 8192) 1219 /* In order to transmit jumbo packets greater than 8k, the 1220 * FIFO between RxMAC and RxDMA needs to be reduced in size 1221 * to (16k - Jumbo packet size). In order to implement this, 1222 * we must use "cut through" mode in the RxMAC, which chops 1223 * packets down into segments which are (max_size * 16). In 1224 * this case we selected 256 bytes, since this is the size of 1225 * the PCI-Express TLP's that the 1310 uses. 1226 * 1227 * seg_en on, fc_en off, size 0x10 1228 */ 1229 writel(0x41, &rxmac->mcif_ctrl_max_seg); 1230 else 1231 writel(0, &rxmac->mcif_ctrl_max_seg); 1232 1233 /* Initialize the MCIF water marks */ 1234 writel(0, &rxmac->mcif_water_mark); 1235 1236 /* Initialize the MIF control */ 1237 writel(0, &rxmac->mif_ctrl); 1238 1239 /* Initialize the Space Available Register */ 1240 writel(0, &rxmac->space_avail); 1241 1242 /* Initialize the the mif_ctrl register 1243 * bit 3: Receive code error. One or more nibbles were signaled as 1244 * errors during the reception of the packet. Clear this 1245 * bit in Gigabit, set it in 100Mbit. This was derived 1246 * experimentally at UNH. 1247 * bit 4: Receive CRC error. The packet's CRC did not match the 1248 * internally generated CRC. 1249 * bit 5: Receive length check error. Indicates that frame length 1250 * field value in the packet does not match the actual data 1251 * byte length and is not a type field. 1252 * bit 16: Receive frame truncated. 1253 * bit 17: Drop packet enable 1254 */ 1255 if (phydev && phydev->speed == SPEED_100) 1256 writel(0x30038, &rxmac->mif_ctrl); 1257 else 1258 writel(0x30030, &rxmac->mif_ctrl); 1259 1260 /* Finally we initialize RxMac to be enabled & WOL disabled. Packet 1261 * filter is always enabled since it is where the runt packets are 1262 * supposed to be dropped. For version A silicon, runt packet 1263 * dropping doesn't work, so it is disabled in the pf_ctrl register, 1264 * but we still leave the packet filter on. 1265 */ 1266 writel(pf_ctrl, &rxmac->pf_ctrl); 1267 writel(ET_RX_CTRL_RXMAC_ENABLE | ET_RX_CTRL_WOL_DISABLE, &rxmac->ctrl); 1268} 1269 1270static void et1310_config_txmac_regs(struct et131x_adapter *adapter) 1271{ 1272 struct txmac_regs __iomem *txmac = &adapter->regs->txmac; 1273 1274 /* We need to update the Control Frame Parameters 1275 * cfpt - control frame pause timer set to 64 (0x40) 1276 * cfep - control frame extended pause timer set to 0x0 1277 */ 1278 if (adapter->flowcontrol == FLOW_NONE) 1279 writel(0, &txmac->cf_param); 1280 else 1281 writel(0x40, &txmac->cf_param); 1282} 1283 1284static void et1310_config_macstat_regs(struct et131x_adapter *adapter) 1285{ 1286 struct macstat_regs __iomem *macstat = 1287 &adapter->regs->macstat; 1288 1289 /* Next we need to initialize all the macstat registers to zero on 1290 * the device. 1291 */ 1292 writel(0, &macstat->txrx_0_64_byte_frames); 1293 writel(0, &macstat->txrx_65_127_byte_frames); 1294 writel(0, &macstat->txrx_128_255_byte_frames); 1295 writel(0, &macstat->txrx_256_511_byte_frames); 1296 writel(0, &macstat->txrx_512_1023_byte_frames); 1297 writel(0, &macstat->txrx_1024_1518_byte_frames); 1298 writel(0, &macstat->txrx_1519_1522_gvln_frames); 1299 1300 writel(0, &macstat->rx_bytes); 1301 writel(0, &macstat->rx_packets); 1302 writel(0, &macstat->rx_fcs_errs); 1303 writel(0, &macstat->rx_multicast_packets); 1304 writel(0, &macstat->rx_broadcast_packets); 1305 writel(0, &macstat->rx_control_frames); 1306 writel(0, &macstat->rx_pause_frames); 1307 writel(0, &macstat->rx_unknown_opcodes); 1308 writel(0, &macstat->rx_align_errs); 1309 writel(0, &macstat->rx_frame_len_errs); 1310 writel(0, &macstat->rx_code_errs); 1311 writel(0, &macstat->rx_carrier_sense_errs); 1312 writel(0, &macstat->rx_undersize_packets); 1313 writel(0, &macstat->rx_oversize_packets); 1314 writel(0, &macstat->rx_fragment_packets); 1315 writel(0, &macstat->rx_jabbers); 1316 writel(0, &macstat->rx_drops); 1317 1318 writel(0, &macstat->tx_bytes); 1319 writel(0, &macstat->tx_packets); 1320 writel(0, &macstat->tx_multicast_packets); 1321 writel(0, &macstat->tx_broadcast_packets); 1322 writel(0, &macstat->tx_pause_frames); 1323 writel(0, &macstat->tx_deferred); 1324 writel(0, &macstat->tx_excessive_deferred); 1325 writel(0, &macstat->tx_single_collisions); 1326 writel(0, &macstat->tx_multiple_collisions); 1327 writel(0, &macstat->tx_late_collisions); 1328 writel(0, &macstat->tx_excessive_collisions); 1329 writel(0, &macstat->tx_total_collisions); 1330 writel(0, &macstat->tx_pause_honored_frames); 1331 writel(0, &macstat->tx_drops); 1332 writel(0, &macstat->tx_jabbers); 1333 writel(0, &macstat->tx_fcs_errs); 1334 writel(0, &macstat->tx_control_frames); 1335 writel(0, &macstat->tx_oversize_frames); 1336 writel(0, &macstat->tx_undersize_frames); 1337 writel(0, &macstat->tx_fragments); 1338 writel(0, &macstat->carry_reg1); 1339 writel(0, &macstat->carry_reg2); 1340 1341 /* Unmask any counters that we want to track the overflow of. 1342 * Initially this will be all counters. It may become clear later 1343 * that we do not need to track all counters. 1344 */ 1345 writel(0xFFFFBE32, &macstat->carry_reg1_mask); 1346 writel(0xFFFE7E8B, &macstat->carry_reg2_mask); 1347} 1348 1349/* et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC 1350 * @adapter: pointer to our private adapter structure 1351 * @addr: the address of the transceiver 1352 * @reg: the register to read 1353 * @value: pointer to a 16-bit value in which the value will be stored 1354 * 1355 * Returns 0 on success, errno on failure (as defined in errno.h) 1356 */ 1357static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr, 1358 u8 reg, u16 *value) 1359{ 1360 struct mac_regs __iomem *mac = &adapter->regs->mac; 1361 int status = 0; 1362 u32 delay = 0; 1363 u32 mii_addr; 1364 u32 mii_cmd; 1365 u32 mii_indicator; 1366 1367 /* Save a local copy of the registers we are dealing with so we can 1368 * set them back 1369 */ 1370 mii_addr = readl(&mac->mii_mgmt_addr); 1371 mii_cmd = readl(&mac->mii_mgmt_cmd); 1372 1373 /* Stop the current operation */ 1374 writel(0, &mac->mii_mgmt_cmd); 1375 1376 /* Set up the register we need to read from on the correct PHY */ 1377 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr); 1378 1379 writel(0x1, &mac->mii_mgmt_cmd); 1380 1381 do { 1382 udelay(50); 1383 delay++; 1384 mii_indicator = readl(&mac->mii_mgmt_indicator); 1385 } while ((mii_indicator & ET_MAC_MGMT_WAIT) && delay < 50); 1386 1387 /* If we hit the max delay, we could not read the register */ 1388 if (delay == 50) { 1389 dev_warn(&adapter->pdev->dev, 1390 "reg 0x%08x could not be read\n", reg); 1391 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", 1392 mii_indicator); 1393 1394 status = -EIO; 1395 } 1396 1397 /* If we hit here we were able to read the register and we need to 1398 * return the value to the caller 1399 */ 1400 *value = readl(&mac->mii_mgmt_stat) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK; 1401 1402 /* Stop the read operation */ 1403 writel(0, &mac->mii_mgmt_cmd); 1404 1405 /* set the registers we touched back to the state at which we entered 1406 * this function 1407 */ 1408 writel(mii_addr, &mac->mii_mgmt_addr); 1409 writel(mii_cmd, &mac->mii_mgmt_cmd); 1410 1411 return status; 1412} 1413 1414static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) 1415{ 1416 struct phy_device *phydev = adapter->phydev; 1417 1418 if (!phydev) 1419 return -EIO; 1420 1421 return et131x_phy_mii_read(adapter, phydev->addr, reg, value); 1422} 1423 1424/* et131x_mii_write - Write to a PHY reg through the MII interface of the MAC 1425 * @adapter: pointer to our private adapter structure 1426 * @reg: the register to read 1427 * @value: 16-bit value to write 1428 * 1429 * FIXME: one caller in netdev still 1430 * 1431 * Return 0 on success, errno on failure (as defined in errno.h) 1432 */ 1433static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value) 1434{ 1435 struct mac_regs __iomem *mac = &adapter->regs->mac; 1436 struct phy_device *phydev = adapter->phydev; 1437 int status = 0; 1438 u8 addr; 1439 u32 delay = 0; 1440 u32 mii_addr; 1441 u32 mii_cmd; 1442 u32 mii_indicator; 1443 1444 if (!phydev) 1445 return -EIO; 1446 1447 addr = phydev->addr; 1448 1449 /* Save a local copy of the registers we are dealing with so we can 1450 * set them back 1451 */ 1452 mii_addr = readl(&mac->mii_mgmt_addr); 1453 mii_cmd = readl(&mac->mii_mgmt_cmd); 1454 1455 /* Stop the current operation */ 1456 writel(0, &mac->mii_mgmt_cmd); 1457 1458 /* Set up the register we need to write to on the correct PHY */ 1459 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr); 1460 1461 /* Add the value to write to the registers to the mac */ 1462 writel(value, &mac->mii_mgmt_ctrl); 1463 1464 do { 1465 udelay(50); 1466 delay++; 1467 mii_indicator = readl(&mac->mii_mgmt_indicator); 1468 } while ((mii_indicator & ET_MAC_MGMT_BUSY) && delay < 100); 1469 1470 /* If we hit the max delay, we could not write the register */ 1471 if (delay == 100) { 1472 u16 tmp; 1473 1474 dev_warn(&adapter->pdev->dev, 1475 "reg 0x%08x could not be written", reg); 1476 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", 1477 mii_indicator); 1478 dev_warn(&adapter->pdev->dev, "command is 0x%08x\n", 1479 readl(&mac->mii_mgmt_cmd)); 1480 1481 et131x_mii_read(adapter, reg, &tmp); 1482 1483 status = -EIO; 1484 } 1485 /* Stop the write operation */ 1486 writel(0, &mac->mii_mgmt_cmd); 1487 1488 /* set the registers we touched back to the state at which we entered 1489 * this function 1490 */ 1491 writel(mii_addr, &mac->mii_mgmt_addr); 1492 writel(mii_cmd, &mac->mii_mgmt_cmd); 1493 1494 return status; 1495} 1496 1497/* Still used from _mac for BIT_READ */ 1498static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, 1499 u16 action, u16 regnum, u16 bitnum, 1500 u8 *value) 1501{ 1502 u16 reg; 1503 u16 mask = 1 << bitnum; 1504 1505 /* Read the requested register */ 1506 et131x_mii_read(adapter, regnum, &reg); 1507 1508 switch (action) { 1509 case TRUEPHY_BIT_READ: 1510 *value = (reg & mask) >> bitnum; 1511 break; 1512 1513 case TRUEPHY_BIT_SET: 1514 et131x_mii_write(adapter, regnum, reg | mask); 1515 break; 1516 1517 case TRUEPHY_BIT_CLEAR: 1518 et131x_mii_write(adapter, regnum, reg & ~mask); 1519 break; 1520 1521 default: 1522 break; 1523 } 1524} 1525 1526static void et1310_config_flow_control(struct et131x_adapter *adapter) 1527{ 1528 struct phy_device *phydev = adapter->phydev; 1529 1530 if (phydev->duplex == DUPLEX_HALF) { 1531 adapter->flowcontrol = FLOW_NONE; 1532 } else { 1533 char remote_pause, remote_async_pause; 1534 1535 et1310_phy_access_mii_bit(adapter, 1536 TRUEPHY_BIT_READ, 5, 10, &remote_pause); 1537 et1310_phy_access_mii_bit(adapter, 1538 TRUEPHY_BIT_READ, 5, 11, 1539 &remote_async_pause); 1540 1541 if ((remote_pause == TRUEPHY_BIT_SET) && 1542 (remote_async_pause == TRUEPHY_BIT_SET)) { 1543 adapter->flowcontrol = adapter->wanted_flow; 1544 } else if ((remote_pause == TRUEPHY_BIT_SET) && 1545 (remote_async_pause == TRUEPHY_BIT_CLEAR)) { 1546 if (adapter->wanted_flow == FLOW_BOTH) 1547 adapter->flowcontrol = FLOW_BOTH; 1548 else 1549 adapter->flowcontrol = FLOW_NONE; 1550 } else if ((remote_pause == TRUEPHY_BIT_CLEAR) && 1551 (remote_async_pause == TRUEPHY_BIT_CLEAR)) { 1552 adapter->flowcontrol = FLOW_NONE; 1553 } else {/* if (remote_pause == TRUEPHY_CLEAR_BIT && 1554 * remote_async_pause == TRUEPHY_SET_BIT) 1555 */ 1556 if (adapter->wanted_flow == FLOW_BOTH) 1557 adapter->flowcontrol = FLOW_RXONLY; 1558 else 1559 adapter->flowcontrol = FLOW_NONE; 1560 } 1561 } 1562} 1563 1564/* et1310_update_macstat_host_counters - Update the local copy of the statistics 1565 * @adapter: pointer to the adapter structure 1566 */ 1567static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter) 1568{ 1569 struct ce_stats *stats = &adapter->stats; 1570 struct macstat_regs __iomem *macstat = 1571 &adapter->regs->macstat; 1572 1573 stats->tx_collisions += readl(&macstat->tx_total_collisions); 1574 stats->tx_first_collisions += readl(&macstat->tx_single_collisions); 1575 stats->tx_deferred += readl(&macstat->tx_deferred); 1576 stats->tx_excessive_collisions += 1577 readl(&macstat->tx_multiple_collisions); 1578 stats->tx_late_collisions += readl(&macstat->tx_late_collisions); 1579 stats->tx_underflows += readl(&macstat->tx_undersize_frames); 1580 stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames); 1581 1582 stats->rx_align_errs += readl(&macstat->rx_align_errs); 1583 stats->rx_crc_errs += readl(&macstat->rx_code_errs); 1584 stats->rcvd_pkts_dropped += readl(&macstat->rx_drops); 1585 stats->rx_overflows += readl(&macstat->rx_oversize_packets); 1586 stats->rx_code_violations += readl(&macstat->rx_fcs_errs); 1587 stats->rx_length_errs += readl(&macstat->rx_frame_len_errs); 1588 stats->rx_other_errs += readl(&macstat->rx_fragment_packets); 1589} 1590 1591/* et1310_handle_macstat_interrupt 1592 * @adapter: pointer to the adapter structure 1593 * 1594 * One of the MACSTAT counters has wrapped. Update the local copy of 1595 * the statistics held in the adapter structure, checking the "wrap" 1596 * bit for each counter. 1597 */ 1598static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter) 1599{ 1600 u32 carry_reg1; 1601 u32 carry_reg2; 1602 1603 /* Read the interrupt bits from the register(s). These are Clear On 1604 * Write. 1605 */ 1606 carry_reg1 = readl(&adapter->regs->macstat.carry_reg1); 1607 carry_reg2 = readl(&adapter->regs->macstat.carry_reg2); 1608 1609 writel(carry_reg1, &adapter->regs->macstat.carry_reg1); 1610 writel(carry_reg2, &adapter->regs->macstat.carry_reg2); 1611 1612 /* We need to do update the host copy of all the MAC_STAT counters. 1613 * For each counter, check it's overflow bit. If the overflow bit is 1614 * set, then increment the host version of the count by one complete 1615 * revolution of the counter. This routine is called when the counter 1616 * block indicates that one of the counters has wrapped. 1617 */ 1618 if (carry_reg1 & (1 << 14)) 1619 adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT; 1620 if (carry_reg1 & (1 << 8)) 1621 adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT; 1622 if (carry_reg1 & (1 << 7)) 1623 adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT; 1624 if (carry_reg1 & (1 << 2)) 1625 adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT; 1626 if (carry_reg1 & (1 << 6)) 1627 adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT; 1628 if (carry_reg1 & (1 << 3)) 1629 adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT; 1630 if (carry_reg1 & (1 << 0)) 1631 adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT; 1632 if (carry_reg2 & (1 << 16)) 1633 adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT; 1634 if (carry_reg2 & (1 << 15)) 1635 adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT; 1636 if (carry_reg2 & (1 << 6)) 1637 adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT; 1638 if (carry_reg2 & (1 << 8)) 1639 adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT; 1640 if (carry_reg2 & (1 << 5)) 1641 adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT; 1642 if (carry_reg2 & (1 << 4)) 1643 adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT; 1644 if (carry_reg2 & (1 << 2)) 1645 adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT; 1646} 1647 1648static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg) 1649{ 1650 struct net_device *netdev = bus->priv; 1651 struct et131x_adapter *adapter = netdev_priv(netdev); 1652 u16 value; 1653 int ret; 1654 1655 ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value); 1656 1657 if (ret < 0) 1658 return ret; 1659 else 1660 return value; 1661} 1662 1663static int et131x_mdio_write(struct mii_bus *bus, int phy_addr, 1664 int reg, u16 value) 1665{ 1666 struct net_device *netdev = bus->priv; 1667 struct et131x_adapter *adapter = netdev_priv(netdev); 1668 1669 return et131x_mii_write(adapter, reg, value); 1670} 1671 1672static int et131x_mdio_reset(struct mii_bus *bus) 1673{ 1674 struct net_device *netdev = bus->priv; 1675 struct et131x_adapter *adapter = netdev_priv(netdev); 1676 1677 et131x_mii_write(adapter, MII_BMCR, BMCR_RESET); 1678 1679 return 0; 1680} 1681 1682/* et1310_phy_power_down - PHY power control 1683 * @adapter: device to control 1684 * @down: true for off/false for back on 1685 * 1686 * one hundred, ten, one thousand megs 1687 * How would you like to have your LAN accessed 1688 * Can't you see that this code processed 1689 * Phy power, phy power.. 1690 */ 1691static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down) 1692{ 1693 u16 data; 1694 1695 et131x_mii_read(adapter, MII_BMCR, &data); 1696 data &= ~BMCR_PDOWN; 1697 if (down) 1698 data |= BMCR_PDOWN; 1699 et131x_mii_write(adapter, MII_BMCR, data); 1700} 1701 1702/* et131x_xcvr_init - Init the phy if we are setting it into force mode 1703 * @adapter: pointer to our private adapter structure 1704 * 1705 */ 1706static void et131x_xcvr_init(struct et131x_adapter *adapter) 1707{ 1708 u16 lcr2; 1709 1710 /* Set the LED behavior such that LED 1 indicates speed (off = 1711 * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates 1712 * link and activity (on for link, blink off for activity). 1713 * 1714 * NOTE: Some customizations have been added here for specific 1715 * vendors; The LED behavior is now determined by vendor data in the 1716 * EEPROM. However, the above description is the default. 1717 */ 1718 if ((adapter->eeprom_data[1] & 0x4) == 0) { 1719 et131x_mii_read(adapter, PHY_LED_2, &lcr2); 1720 1721 lcr2 &= (ET_LED2_LED_100TX | ET_LED2_LED_1000T); 1722 lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT); 1723 1724 if ((adapter->eeprom_data[1] & 0x8) == 0) 1725 lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT); 1726 else 1727 lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT); 1728 1729 et131x_mii_write(adapter, PHY_LED_2, lcr2); 1730 } 1731} 1732 1733/* et131x_configure_global_regs - configure JAGCore global regs 1734 * @adapter: pointer to our adapter structure 1735 * 1736 * Used to configure the global registers on the JAGCore 1737 */ 1738static void et131x_configure_global_regs(struct et131x_adapter *adapter) 1739{ 1740 struct global_regs __iomem *regs = &adapter->regs->global; 1741 1742 writel(0, &regs->rxq_start_addr); 1743 writel(INTERNAL_MEM_SIZE - 1, &regs->txq_end_addr); 1744 1745 if (adapter->registry_jumbo_packet < 2048) { 1746 /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word 1747 * block of RAM that the driver can split between Tx 1748 * and Rx as it desires. Our default is to split it 1749 * 50/50: 1750 */ 1751 writel(PARM_RX_MEM_END_DEF, &regs->rxq_end_addr); 1752 writel(PARM_RX_MEM_END_DEF + 1, &regs->txq_start_addr); 1753 } else if (adapter->registry_jumbo_packet < 8192) { 1754 /* For jumbo packets > 2k but < 8k, split 50-50. */ 1755 writel(INTERNAL_MEM_RX_OFFSET, &regs->rxq_end_addr); 1756 writel(INTERNAL_MEM_RX_OFFSET + 1, &regs->txq_start_addr); 1757 } else { 1758 /* 9216 is the only packet size greater than 8k that 1759 * is available. The Tx buffer has to be big enough 1760 * for one whole packet on the Tx side. We'll make 1761 * the Tx 9408, and give the rest to Rx 1762 */ 1763 writel(0x01b3, &regs->rxq_end_addr); 1764 writel(0x01b4, &regs->txq_start_addr); 1765 } 1766 1767 /* Initialize the loopback register. Disable all loopbacks. */ 1768 writel(0, &regs->loopback); 1769 1770 /* MSI Register */ 1771 writel(0, &regs->msi_config); 1772 1773 /* By default, disable the watchdog timer. It will be enabled when 1774 * a packet is queued. 1775 */ 1776 writel(0, &regs->watchdog_timer); 1777} 1778 1779/* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence 1780 * @adapter: pointer to our adapter structure 1781 */ 1782static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter) 1783{ 1784 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; 1785 struct rx_ring *rx_local = &adapter->rx_ring; 1786 struct fbr_desc *fbr_entry; 1787 u32 entry; 1788 u32 psr_num_des; 1789 unsigned long flags; 1790 u8 id; 1791 1792 /* Halt RXDMA to perform the reconfigure. */ 1793 et131x_rx_dma_disable(adapter); 1794 1795 /* Load the completion writeback physical address */ 1796 writel(upper_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_hi); 1797 writel(lower_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_lo); 1798 1799 memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block)); 1800 1801 /* Set the address and parameters of the packet status ring into the 1802 * 1310's registers 1803 */ 1804 writel(upper_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_hi); 1805 writel(lower_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_lo); 1806 writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des); 1807 writel(0, &rx_dma->psr_full_offset); 1808 1809 psr_num_des = readl(&rx_dma->psr_num_des) & ET_RXDMA_PSR_NUM_DES_MASK; 1810 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100, 1811 &rx_dma->psr_min_des); 1812 1813 spin_lock_irqsave(&adapter->rcv_lock, flags); 1814 1815 /* These local variables track the PSR in the adapter structure */ 1816 rx_local->local_psr_full = 0; 1817 1818 for (id = 0; id < NUM_FBRS; id++) { 1819 u32 __iomem *num_des; 1820 u32 __iomem *full_offset; 1821 u32 __iomem *min_des; 1822 u32 __iomem *base_hi; 1823 u32 __iomem *base_lo; 1824 1825 if (id == 0) { 1826 num_des = &rx_dma->fbr0_num_des; 1827 full_offset = &rx_dma->fbr0_full_offset; 1828 min_des = &rx_dma->fbr0_min_des; 1829 base_hi = &rx_dma->fbr0_base_hi; 1830 base_lo = &rx_dma->fbr0_base_lo; 1831 } else { 1832 num_des = &rx_dma->fbr1_num_des; 1833 full_offset = &rx_dma->fbr1_full_offset; 1834 min_des = &rx_dma->fbr1_min_des; 1835 base_hi = &rx_dma->fbr1_base_hi; 1836 base_lo = &rx_dma->fbr1_base_lo; 1837 } 1838 1839 /* Now's the best time to initialize FBR contents */ 1840 fbr_entry = 1841 (struct fbr_desc *) rx_local->fbr[id]->ring_virtaddr; 1842 for (entry = 0; 1843 entry < rx_local->fbr[id]->num_entries; entry++) { 1844 fbr_entry->addr_hi = rx_local->fbr[id]->bus_high[entry]; 1845 fbr_entry->addr_lo = rx_local->fbr[id]->bus_low[entry]; 1846 fbr_entry->word2 = entry; 1847 fbr_entry++; 1848 } 1849 1850 /* Set the address and parameters of Free buffer ring 1 and 0 1851 * into the 1310's registers 1852 */ 1853 writel(upper_32_bits(rx_local->fbr[id]->ring_physaddr), 1854 base_hi); 1855 writel(lower_32_bits(rx_local->fbr[id]->ring_physaddr), 1856 base_lo); 1857 writel(rx_local->fbr[id]->num_entries - 1, num_des); 1858 writel(ET_DMA10_WRAP, full_offset); 1859 1860 /* This variable tracks the free buffer ring 1 full position, 1861 * so it has to match the above. 1862 */ 1863 rx_local->fbr[id]->local_full = ET_DMA10_WRAP; 1864 writel(((rx_local->fbr[id]->num_entries * 1865 LO_MARK_PERCENT_FOR_RX) / 100) - 1, 1866 min_des); 1867 } 1868 1869 /* Program the number of packets we will receive before generating an 1870 * interrupt. 1871 * For version B silicon, this value gets updated once autoneg is 1872 *complete. 1873 */ 1874 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done); 1875 1876 /* The "time_done" is not working correctly to coalesce interrupts 1877 * after a given time period, but rather is giving us an interrupt 1878 * regardless of whether we have received packets. 1879 * This value gets updated once autoneg is complete. 1880 */ 1881 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time); 1882 1883 spin_unlock_irqrestore(&adapter->rcv_lock, flags); 1884} 1885 1886/* et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore. 1887 * @adapter: pointer to our private adapter structure 1888 * 1889 * Configure the transmit engine with the ring buffers we have created 1890 * and prepare it for use. 1891 */ 1892static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter) 1893{ 1894 struct txdma_regs __iomem *txdma = &adapter->regs->txdma; 1895 1896 /* Load the hardware with the start of the transmit descriptor ring. */ 1897 writel(upper_32_bits(adapter->tx_ring.tx_desc_ring_pa), 1898 &txdma->pr_base_hi); 1899 writel(lower_32_bits(adapter->tx_ring.tx_desc_ring_pa), 1900 &txdma->pr_base_lo); 1901 1902 /* Initialise the transmit DMA engine */ 1903 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des); 1904 1905 /* Load the completion writeback physical address */ 1906 writel(upper_32_bits(adapter->tx_ring.tx_status_pa), 1907 &txdma->dma_wb_base_hi); 1908 writel(lower_32_bits(adapter->tx_ring.tx_status_pa), 1909 &txdma->dma_wb_base_lo); 1910 1911 *adapter->tx_ring.tx_status = 0; 1912 1913 writel(0, &txdma->service_request); 1914 adapter->tx_ring.send_idx = 0; 1915} 1916 1917/* et131x_adapter_setup - Set the adapter up as per cassini+ documentation 1918 * @adapter: pointer to our private adapter structure 1919 * 1920 * Returns 0 on success, errno on failure (as defined in errno.h) 1921 */ 1922static void et131x_adapter_setup(struct et131x_adapter *adapter) 1923{ 1924 /* Configure the JAGCore */ 1925 et131x_configure_global_regs(adapter); 1926 1927 et1310_config_mac_regs1(adapter); 1928 1929 /* Configure the MMC registers */ 1930 /* All we need to do is initialize the Memory Control Register */ 1931 writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl); 1932 1933 et1310_config_rxmac_regs(adapter); 1934 et1310_config_txmac_regs(adapter); 1935 1936 et131x_config_rx_dma_regs(adapter); 1937 et131x_config_tx_dma_regs(adapter); 1938 1939 et1310_config_macstat_regs(adapter); 1940 1941 et1310_phy_power_down(adapter, 0); 1942 et131x_xcvr_init(adapter); 1943} 1944 1945/* et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310 1946 * @adapter: pointer to our private adapter structure 1947 */ 1948static void et131x_soft_reset(struct et131x_adapter *adapter) 1949{ 1950 u32 reg; 1951 1952 /* Disable MAC Core */ 1953 reg = ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET | 1954 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC | 1955 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC; 1956 writel(reg, &adapter->regs->mac.cfg1); 1957 1958 reg = ET_RESET_ALL; 1959 writel(reg, &adapter->regs->global.sw_reset); 1960 1961 reg = ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC | 1962 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC; 1963 writel(reg, &adapter->regs->mac.cfg1); 1964 writel(0, &adapter->regs->mac.cfg1); 1965} 1966 1967/* et131x_enable_interrupts - enable interrupt 1968 * @adapter: et131x device 1969 * 1970 * Enable the appropriate interrupts on the ET131x according to our 1971 * configuration 1972 */ 1973static void et131x_enable_interrupts(struct et131x_adapter *adapter) 1974{ 1975 u32 mask; 1976 1977 /* Enable all global interrupts */ 1978 if (adapter->flowcontrol == FLOW_TXONLY || 1979 adapter->flowcontrol == FLOW_BOTH) 1980 mask = INT_MASK_ENABLE; 1981 else 1982 mask = INT_MASK_ENABLE_NO_FLOW; 1983 1984 writel(mask, &adapter->regs->global.int_mask); 1985} 1986 1987/* et131x_disable_interrupts - interrupt disable 1988 * @adapter: et131x device 1989 * 1990 * Block all interrupts from the et131x device at the device itself 1991 */ 1992static void et131x_disable_interrupts(struct et131x_adapter *adapter) 1993{ 1994 /* Disable all global interrupts */ 1995 writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask); 1996} 1997 1998/* et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310 1999 * @adapter: pointer to our adapter structure 2000 */ 2001static void et131x_tx_dma_disable(struct et131x_adapter *adapter) 2002{ 2003 /* Setup the tramsmit dma configuration register */ 2004 writel(ET_TXDMA_CSR_HALT | ET_TXDMA_SNGL_EPKT, 2005 &adapter->regs->txdma.csr); 2006} 2007 2008/* et131x_enable_txrx - Enable tx/rx queues 2009 * @netdev: device to be enabled 2010 */ 2011static void et131x_enable_txrx(struct net_device *netdev) 2012{ 2013 struct et131x_adapter *adapter = netdev_priv(netdev); 2014 2015 /* Enable the Tx and Rx DMA engines (if not already enabled) */ 2016 et131x_rx_dma_enable(adapter); 2017 et131x_tx_dma_enable(adapter); 2018 2019 /* Enable device interrupts */ 2020 if (adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE) 2021 et131x_enable_interrupts(adapter); 2022 2023 /* We're ready to move some data, so start the queue */ 2024 netif_start_queue(netdev); 2025} 2026 2027/* et131x_disable_txrx - Disable tx/rx queues 2028 * @netdev: device to be disabled 2029 */ 2030static void et131x_disable_txrx(struct net_device *netdev) 2031{ 2032 struct et131x_adapter *adapter = netdev_priv(netdev); 2033 2034 /* First thing is to stop the queue */ 2035 netif_stop_queue(netdev); 2036 2037 /* Stop the Tx and Rx DMA engines */ 2038 et131x_rx_dma_disable(adapter); 2039 et131x_tx_dma_disable(adapter); 2040 2041 /* Disable device interrupts */ 2042 et131x_disable_interrupts(adapter); 2043} 2044 2045/* et131x_init_send - Initialize send data structures 2046 * @adapter: pointer to our private adapter structure 2047 */ 2048static void et131x_init_send(struct et131x_adapter *adapter) 2049{ 2050 struct tcb *tcb; 2051 u32 ct; 2052 struct tx_ring *tx_ring; 2053 2054 /* Setup some convenience pointers */ 2055 tx_ring = &adapter->tx_ring; 2056 tcb = adapter->tx_ring.tcb_ring; 2057 2058 tx_ring->tcb_qhead = tcb; 2059 2060 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB); 2061 2062 /* Go through and set up each TCB */ 2063 for (ct = 0; ct++ < NUM_TCB; tcb++) 2064 /* Set the link pointer in HW TCB to the next TCB in the 2065 * chain 2066 */ 2067 tcb->next = tcb + 1; 2068 2069 /* Set the tail pointer */ 2070 tcb--; 2071 tx_ring->tcb_qtail = tcb; 2072 tcb->next = NULL; 2073 /* Curr send queue should now be empty */ 2074 tx_ring->send_head = NULL; 2075 tx_ring->send_tail = NULL; 2076} 2077 2078/* et1310_enable_phy_coma - called when network cable is unplugged 2079 * @adapter: pointer to our adapter structure 2080 * 2081 * driver receive an phy status change interrupt while in D0 and check that 2082 * phy_status is down. 2083 * 2084 * -- gate off JAGCore; 2085 * -- set gigE PHY in Coma mode 2086 * -- wake on phy_interrupt; Perform software reset JAGCore, 2087 * re-initialize jagcore and gigE PHY 2088 * 2089 * Add D0-ASPM-PhyLinkDown Support: 2090 * -- while in D0, when there is a phy_interrupt indicating phy link 2091 * down status, call the MPSetPhyComa routine to enter this active 2092 * state power saving mode 2093 * -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt 2094 * indicating linkup status, call the MPDisablePhyComa routine to 2095 * restore JAGCore and gigE PHY 2096 */ 2097static void et1310_enable_phy_coma(struct et131x_adapter *adapter) 2098{ 2099 unsigned long flags; 2100 u32 pmcsr; 2101 2102 pmcsr = readl(&adapter->regs->global.pm_csr); 2103 2104 /* Save the GbE PHY speed and duplex modes. Need to restore this 2105 * when cable is plugged back in 2106 */ 2107 /* TODO - when PM is re-enabled, check if we need to 2108 * perform a similar task as this - 2109 * adapter->pdown_speed = adapter->ai_force_speed; 2110 * adapter->pdown_duplex = adapter->ai_force_duplex; 2111 */ 2112 2113 /* Stop sending packets. */ 2114 spin_lock_irqsave(&adapter->send_hw_lock, flags); 2115 adapter->flags |= FMP_ADAPTER_LOWER_POWER; 2116 spin_unlock_irqrestore(&adapter->send_hw_lock, flags); 2117 2118 /* Wait for outstanding Receive packets */ 2119 2120 et131x_disable_txrx(adapter->netdev); 2121 2122 /* Gate off JAGCore 3 clock domains */ 2123 pmcsr &= ~ET_PMCSR_INIT; 2124 writel(pmcsr, &adapter->regs->global.pm_csr); 2125 2126 /* Program gigE PHY in to Coma mode */ 2127 pmcsr |= ET_PM_PHY_SW_COMA; 2128 writel(pmcsr, &adapter->regs->global.pm_csr); 2129} 2130 2131/* et1310_disable_phy_coma - Disable the Phy Coma Mode 2132 * @adapter: pointer to our adapter structure 2133 */ 2134static void et1310_disable_phy_coma(struct et131x_adapter *adapter) 2135{ 2136 u32 pmcsr; 2137 2138 pmcsr = readl(&adapter->regs->global.pm_csr); 2139 2140 /* Disable phy_sw_coma register and re-enable JAGCore clocks */ 2141 pmcsr |= ET_PMCSR_INIT; 2142 pmcsr &= ~ET_PM_PHY_SW_COMA; 2143 writel(pmcsr, &adapter->regs->global.pm_csr); 2144 2145 /* Restore the GbE PHY speed and duplex modes; 2146 * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY 2147 */ 2148 /* TODO - when PM is re-enabled, check if we need to 2149 * perform a similar task as this - 2150 * adapter->ai_force_speed = adapter->pdown_speed; 2151 * adapter->ai_force_duplex = adapter->pdown_duplex; 2152 */ 2153 2154 /* Re-initialize the send structures */ 2155 et131x_init_send(adapter); 2156 2157 /* Bring the device back to the state it was during init prior to 2158 * autonegotiation being complete. This way, when we get the auto-neg 2159 * complete interrupt, we can complete init by calling ConfigMacREGS2. 2160 */ 2161 et131x_soft_reset(adapter); 2162 2163 /* setup et1310 as per the documentation ?? */ 2164 et131x_adapter_setup(adapter); 2165 2166 /* Allow Tx to restart */ 2167 adapter->flags &= ~FMP_ADAPTER_LOWER_POWER; 2168 2169 et131x_enable_txrx(adapter->netdev); 2170} 2171 2172static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit) 2173{ 2174 u32 tmp_free_buff_ring = *free_buff_ring; 2175 tmp_free_buff_ring++; 2176 /* This works for all cases where limit < 1024. The 1023 case 2177 * works because 1023++ is 1024 which means the if condition is not 2178 * taken but the carry of the bit into the wrap bit toggles the wrap 2179 * value correctly 2180 */ 2181 if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) { 2182 tmp_free_buff_ring &= ~ET_DMA10_MASK; 2183 tmp_free_buff_ring ^= ET_DMA10_WRAP; 2184 } 2185 /* For the 1023 case */ 2186 tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP); 2187 *free_buff_ring = tmp_free_buff_ring; 2188 return tmp_free_buff_ring; 2189} 2190 2191/* et131x_rx_dma_memory_alloc 2192 * @adapter: pointer to our private adapter structure 2193 * 2194 * Returns 0 on success and errno on failure (as defined in errno.h) 2195 * 2196 * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required, 2197 * and the Packet Status Ring. 2198 */ 2199static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) 2200{ 2201 u8 id; 2202 u32 i, j; 2203 u32 bufsize; 2204 u32 pktstat_ringsize; 2205 u32 fbr_chunksize; 2206 struct rx_ring *rx_ring; 2207 2208 /* Setup some convenience pointers */ 2209 rx_ring = &adapter->rx_ring; 2210 2211 /* Alloc memory for the lookup table */ 2212 rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); 2213 rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); 2214 2215 /* The first thing we will do is configure the sizes of the buffer 2216 * rings. These will change based on jumbo packet support. Larger 2217 * jumbo packets increases the size of each entry in FBR0, and the 2218 * number of entries in FBR0, while at the same time decreasing the 2219 * number of entries in FBR1. 2220 * 2221 * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1 2222 * entries are huge in order to accommodate a "jumbo" frame, then it 2223 * will have less entries. Conversely, FBR1 will now be relied upon 2224 * to carry more "normal" frames, thus it's entry size also increases 2225 * and the number of entries goes up too (since it now carries 2226 * "small" + "regular" packets. 2227 * 2228 * In this scheme, we try to maintain 512 entries between the two 2229 * rings. Also, FBR1 remains a constant size - when it's size doubles 2230 * the number of entries halves. FBR0 increases in size, however. 2231 */ 2232 2233 if (adapter->registry_jumbo_packet < 2048) { 2234 rx_ring->fbr[0]->buffsize = 256; 2235 rx_ring->fbr[0]->num_entries = 512; 2236 rx_ring->fbr[1]->buffsize = 2048; 2237 rx_ring->fbr[1]->num_entries = 512; 2238 } else if (adapter->registry_jumbo_packet < 4096) { 2239 rx_ring->fbr[0]->buffsize = 512; 2240 rx_ring->fbr[0]->num_entries = 1024; 2241 rx_ring->fbr[1]->buffsize = 4096; 2242 rx_ring->fbr[1]->num_entries = 512; 2243 } else { 2244 rx_ring->fbr[0]->buffsize = 1024; 2245 rx_ring->fbr[0]->num_entries = 768; 2246 rx_ring->fbr[1]->buffsize = 16384; 2247 rx_ring->fbr[1]->num_entries = 128; 2248 } 2249 2250 adapter->rx_ring.psr_num_entries = 2251 adapter->rx_ring.fbr[0]->num_entries + 2252 adapter->rx_ring.fbr[1]->num_entries; 2253 2254 for (id = 0; id < NUM_FBRS; id++) { 2255 /* Allocate an area of memory for Free Buffer Ring */ 2256 bufsize = 2257 (sizeof(struct fbr_desc) * rx_ring->fbr[id]->num_entries); 2258 rx_ring->fbr[id]->ring_virtaddr = 2259 dma_alloc_coherent(&adapter->pdev->dev, 2260 bufsize, 2261 &rx_ring->fbr[id]->ring_physaddr, 2262 GFP_KERNEL); 2263 if (!rx_ring->fbr[id]->ring_virtaddr) { 2264 dev_err(&adapter->pdev->dev, 2265 "Cannot alloc memory for Free Buffer Ring %d\n", id); 2266 return -ENOMEM; 2267 } 2268 } 2269 2270 for (id = 0; id < NUM_FBRS; id++) { 2271 fbr_chunksize = (FBR_CHUNKS * rx_ring->fbr[id]->buffsize); 2272 2273 for (i = 0; 2274 i < (rx_ring->fbr[id]->num_entries / FBR_CHUNKS); i++) { 2275 dma_addr_t fbr_tmp_physaddr; 2276 2277 rx_ring->fbr[id]->mem_virtaddrs[i] = dma_alloc_coherent( 2278 &adapter->pdev->dev, fbr_chunksize, 2279 &rx_ring->fbr[id]->mem_physaddrs[i], 2280 GFP_KERNEL); 2281 2282 if (!rx_ring->fbr[id]->mem_virtaddrs[i]) { 2283 dev_err(&adapter->pdev->dev, 2284 "Could not alloc memory\n"); 2285 return -ENOMEM; 2286 } 2287 2288 /* See NOTE in "Save Physical Address" comment above */ 2289 fbr_tmp_physaddr = rx_ring->fbr[id]->mem_physaddrs[i]; 2290 2291 for (j = 0; j < FBR_CHUNKS; j++) { 2292 u32 index = (i * FBR_CHUNKS) + j; 2293 2294 /* Save the Virtual address of this index for 2295 * quick access later 2296 */ 2297 rx_ring->fbr[id]->virt[index] = 2298 (u8 *) rx_ring->fbr[id]->mem_virtaddrs[i] + 2299 (j * rx_ring->fbr[id]->buffsize); 2300 2301 /* now store the physical address in the 2302 * descriptor so the device can access it 2303 */ 2304 rx_ring->fbr[id]->bus_high[index] = 2305 upper_32_bits(fbr_tmp_physaddr); 2306 rx_ring->fbr[id]->bus_low[index] = 2307 lower_32_bits(fbr_tmp_physaddr); 2308 2309 fbr_tmp_physaddr += rx_ring->fbr[id]->buffsize; 2310 } 2311 } 2312 } 2313 2314 /* Allocate an area of memory for FIFO of Packet Status ring entries */ 2315 pktstat_ringsize = 2316 sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries; 2317 2318 rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, 2319 pktstat_ringsize, 2320 &rx_ring->ps_ring_physaddr, 2321 GFP_KERNEL); 2322 2323 if (!rx_ring->ps_ring_virtaddr) { 2324 dev_err(&adapter->pdev->dev, 2325 "Cannot alloc memory for Packet Status Ring\n"); 2326 return -ENOMEM; 2327 } 2328 pr_info("Packet Status Ring %llx\n", 2329 (unsigned long long) rx_ring->ps_ring_physaddr); 2330 2331 /* NOTE : dma_alloc_coherent(), used above to alloc DMA regions, 2332 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses 2333 * are ever returned, make sure the high part is retrieved here before 2334 * storing the adjusted address. 2335 */ 2336 2337 /* Allocate an area of memory for writeback of status information */ 2338 rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev, 2339 sizeof(struct rx_status_block), 2340 &rx_ring->rx_status_bus, 2341 GFP_KERNEL); 2342 if (!rx_ring->rx_status_block) { 2343 dev_err(&adapter->pdev->dev, 2344 "Cannot alloc memory for Status Block\n"); 2345 return -ENOMEM; 2346 } 2347 rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD; 2348 pr_info("PRS %llx\n", (unsigned long long)rx_ring->rx_status_bus); 2349 2350 /* The RFDs are going to be put on lists later on, so initialize the 2351 * lists now. 2352 */ 2353 INIT_LIST_HEAD(&rx_ring->recv_list); 2354 return 0; 2355} 2356 2357/* et131x_rx_dma_memory_free - Free all memory allocated within this module. 2358 * @adapter: pointer to our private adapter structure 2359 */ 2360static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) 2361{ 2362 u8 id; 2363 u32 index; 2364 u32 bufsize; 2365 u32 pktstat_ringsize; 2366 struct rfd *rfd; 2367 struct rx_ring *rx_ring; 2368 2369 /* Setup some convenience pointers */ 2370 rx_ring = &adapter->rx_ring; 2371 2372 /* Free RFDs and associated packet descriptors */ 2373 WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd); 2374 2375 while (!list_empty(&rx_ring->recv_list)) { 2376 rfd = (struct rfd *) list_entry(rx_ring->recv_list.next, 2377 struct rfd, list_node); 2378 2379 list_del(&rfd->list_node); 2380 rfd->skb = NULL; 2381 kfree(rfd); 2382 } 2383 2384 /* Free Free Buffer Rings */ 2385 for (id = 0; id < NUM_FBRS; id++) { 2386 if (!rx_ring->fbr[id]->ring_virtaddr) 2387 continue; 2388 2389 /* First the packet memory */ 2390 for (index = 0; 2391 index < (rx_ring->fbr[id]->num_entries / FBR_CHUNKS); 2392 index++) { 2393 if (rx_ring->fbr[id]->mem_virtaddrs[index]) { 2394 bufsize = 2395 rx_ring->fbr[id]->buffsize * FBR_CHUNKS; 2396 2397 dma_free_coherent(&adapter->pdev->dev, 2398 bufsize, 2399 rx_ring->fbr[id]->mem_virtaddrs[index], 2400 rx_ring->fbr[id]->mem_physaddrs[index]); 2401 2402 rx_ring->fbr[id]->mem_virtaddrs[index] = NULL; 2403 } 2404 } 2405 2406 bufsize = 2407 sizeof(struct fbr_desc) * rx_ring->fbr[id]->num_entries; 2408 2409 dma_free_coherent(&adapter->pdev->dev, bufsize, 2410 rx_ring->fbr[id]->ring_virtaddr, 2411 rx_ring->fbr[id]->ring_physaddr); 2412 2413 rx_ring->fbr[id]->ring_virtaddr = NULL; 2414 } 2415 2416 /* Free Packet Status Ring */ 2417 if (rx_ring->ps_ring_virtaddr) { 2418 pktstat_ringsize = sizeof(struct pkt_stat_desc) * 2419 adapter->rx_ring.psr_num_entries; 2420 2421 dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize, 2422 rx_ring->ps_ring_virtaddr, 2423 rx_ring->ps_ring_physaddr); 2424 2425 rx_ring->ps_ring_virtaddr = NULL; 2426 } 2427 2428 /* Free area of memory for the writeback of status information */ 2429 if (rx_ring->rx_status_block) { 2430 dma_free_coherent(&adapter->pdev->dev, 2431 sizeof(struct rx_status_block), 2432 rx_ring->rx_status_block, rx_ring->rx_status_bus); 2433 rx_ring->rx_status_block = NULL; 2434 } 2435 2436 /* Free the FBR Lookup Table */ 2437 kfree(rx_ring->fbr[0]); 2438 kfree(rx_ring->fbr[1]); 2439 2440 /* Reset Counters */ 2441 rx_ring->num_ready_recv = 0; 2442} 2443 2444/* et131x_init_recv - Initialize receive data structures. 2445 * @adapter: pointer to our private adapter structure 2446 * 2447 * Returns 0 on success and errno on failure (as defined in errno.h) 2448 */ 2449static int et131x_init_recv(struct et131x_adapter *adapter) 2450{ 2451 struct rfd *rfd; 2452 u32 rfdct; 2453 u32 numrfd = 0; 2454 struct rx_ring *rx_ring; 2455 2456 /* Setup some convenience pointers */ 2457 rx_ring = &adapter->rx_ring; 2458 2459 /* Setup each RFD */ 2460 for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) { 2461 rfd = kzalloc(sizeof(struct rfd), GFP_ATOMIC | GFP_DMA); 2462 if (!rfd) 2463 return -ENOMEM; 2464 2465 rfd->skb = NULL; 2466 2467 /* Add this RFD to the recv_list */ 2468 list_add_tail(&rfd->list_node, &rx_ring->recv_list); 2469 2470 /* Increment both the available RFD's, and the total RFD's. */ 2471 rx_ring->num_ready_recv++; 2472 numrfd++; 2473 } 2474 2475 return 0; 2476} 2477 2478/* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate. 2479 * @adapter: pointer to our adapter structure 2480 */ 2481static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter) 2482{ 2483 struct phy_device *phydev = adapter->phydev; 2484 2485 if (!phydev) 2486 return; 2487 2488 /* For version B silicon, we do not use the RxDMA timer for 10 and 100 2489 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing. 2490 */ 2491 if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) { 2492 writel(0, &adapter->regs->rxdma.max_pkt_time); 2493 writel(1, &adapter->regs->rxdma.num_pkt_done); 2494 } 2495} 2496 2497/* NICReturnRFD - Recycle a RFD and put it back onto the receive list 2498 * @adapter: pointer to our adapter 2499 * @rfd: pointer to the RFD 2500 */ 2501static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd) 2502{ 2503 struct rx_ring *rx_local = &adapter->rx_ring; 2504 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; 2505 u16 buff_index = rfd->bufferindex; 2506 u8 ring_index = rfd->ringindex; 2507 unsigned long flags; 2508 2509 /* We don't use any of the OOB data besides status. Otherwise, we 2510 * need to clean up OOB data 2511 */ 2512 if (buff_index < rx_local->fbr[ring_index]->num_entries) { 2513 u32 __iomem *offset; 2514 struct fbr_desc *next; 2515 2516 spin_lock_irqsave(&adapter->fbr_lock, flags); 2517 2518 if (ring_index == 0) 2519 offset = &rx_dma->fbr0_full_offset; 2520 else 2521 offset = &rx_dma->fbr1_full_offset; 2522 2523 next = (struct fbr_desc *) 2524 (rx_local->fbr[ring_index]->ring_virtaddr) + 2525 INDEX10(rx_local->fbr[ring_index]->local_full); 2526 2527 /* Handle the Free Buffer Ring advancement here. Write 2528 * the PA / Buffer Index for the returned buffer into 2529 * the oldest (next to be freed)FBR entry 2530 */ 2531 next->addr_hi = rx_local->fbr[ring_index]->bus_high[buff_index]; 2532 next->addr_lo = rx_local->fbr[ring_index]->bus_low[buff_index]; 2533 next->word2 = buff_index; 2534 2535 writel(bump_free_buff_ring( 2536 &rx_local->fbr[ring_index]->local_full, 2537 rx_local->fbr[ring_index]->num_entries - 1), 2538 offset); 2539 2540 spin_unlock_irqrestore(&adapter->fbr_lock, flags); 2541 } else { 2542 dev_err(&adapter->pdev->dev, 2543 "%s illegal Buffer Index returned\n", __func__); 2544 } 2545 2546 /* The processing on this RFD is done, so put it back on the tail of 2547 * our list 2548 */ 2549 spin_lock_irqsave(&adapter->rcv_lock, flags); 2550 list_add_tail(&rfd->list_node, &rx_local->recv_list); 2551 rx_local->num_ready_recv++; 2552 spin_unlock_irqrestore(&adapter->rcv_lock, flags); 2553 2554 WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd); 2555} 2556 2557/* nic_rx_pkts - Checks the hardware for available packets 2558 * @adapter: pointer to our adapter 2559 * 2560 * Returns rfd, a pointer to our MPRFD. 2561 * 2562 * Checks the hardware for available packets, using completion ring 2563 * If packets are available, it gets an RFD from the recv_list, attaches 2564 * the packet to it, puts the RFD in the RecvPendList, and also returns 2565 * the pointer to the RFD. 2566 */ 2567static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) 2568{ 2569 struct rx_ring *rx_local = &adapter->rx_ring; 2570 struct rx_status_block *status; 2571 struct pkt_stat_desc *psr; 2572 struct rfd *rfd; 2573 u32 i; 2574 u8 *buf; 2575 unsigned long flags; 2576 struct list_head *element; 2577 u8 ring_index; 2578 u16 buff_index; 2579 u32 len; 2580 u32 word0; 2581 u32 word1; 2582 struct sk_buff *skb; 2583 2584 /* RX Status block is written by the DMA engine prior to every 2585 * interrupt. It contains the next to be used entry in the Packet 2586 * Status Ring, and also the two Free Buffer rings. 2587 */ 2588 status = rx_local->rx_status_block; 2589 word1 = status->word1 >> 16; /* Get the useful bits */ 2590 2591 /* Check the PSR and wrap bits do not match */ 2592 if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF)) 2593 return NULL; /* Looks like this ring is not updated yet */ 2594 2595 /* The packet status ring indicates that data is available. */ 2596 psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) + 2597 (rx_local->local_psr_full & 0xFFF); 2598 2599 /* Grab any information that is required once the PSR is advanced, 2600 * since we can no longer rely on the memory being accurate 2601 */ 2602 len = psr->word1 & 0xFFFF; 2603 ring_index = (psr->word1 >> 26) & 0x03; 2604 buff_index = (psr->word1 >> 16) & 0x3FF; 2605 word0 = psr->word0; 2606 2607 /* Indicate that we have used this PSR entry. */ 2608 /* FIXME wrap 12 */ 2609 add_12bit(&rx_local->local_psr_full, 1); 2610 if ( 2611 (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) { 2612 /* Clear psr full and toggle the wrap bit */ 2613 rx_local->local_psr_full &= ~0xFFF; 2614 rx_local->local_psr_full ^= 0x1000; 2615 } 2616 2617 writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset); 2618 2619 if (ring_index > 1 || 2620 buff_index > rx_local->fbr[ring_index]->num_entries - 1) { 2621 /* Illegal buffer or ring index cannot be used by S/W*/ 2622 dev_err(&adapter->pdev->dev, 2623 "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n", 2624 rx_local->local_psr_full & 0xFFF, len, buff_index); 2625 return NULL; 2626 } 2627 2628 /* Get and fill the RFD. */ 2629 spin_lock_irqsave(&adapter->rcv_lock, flags); 2630 2631 element = rx_local->recv_list.next; 2632 rfd = (struct rfd *) list_entry(element, struct rfd, list_node); 2633 2634 if (!rfd) { 2635 spin_unlock_irqrestore(&adapter->rcv_lock, flags); 2636 return NULL; 2637 } 2638 2639 list_del(&rfd->list_node); 2640 rx_local->num_ready_recv--; 2641 2642 spin_unlock_irqrestore(&adapter->rcv_lock, flags); 2643 2644 rfd->bufferindex = buff_index; 2645 rfd->ringindex = ring_index; 2646 2647 /* In V1 silicon, there is a bug which screws up filtering of runt 2648 * packets. Therefore runt packet filtering is disabled in the MAC and 2649 * the packets are dropped here. They are also counted here. 2650 */ 2651 if (len < (NIC_MIN_PACKET_SIZE + 4)) { 2652 adapter->stats.rx_other_errs++; 2653 len = 0; 2654 } 2655 2656 if (len == 0) { 2657 rfd->len = 0; 2658 goto out; 2659 } 2660 2661 /* Determine if this is a multicast packet coming in */ 2662 if ((word0 & ALCATEL_MULTICAST_PKT) && 2663 !(word0 & ALCATEL_BROADCAST_PKT)) { 2664 /* Promiscuous mode and Multicast mode are not mutually 2665 * exclusive as was first thought. I guess Promiscuous is just 2666 * considered a super-set of the other filters. Generally filter 2667 * is 0x2b when in promiscuous mode. 2668 */ 2669 if ((adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) 2670 && !(adapter->packet_filter & ET131X_PACKET_TYPE_PROMISCUOUS) 2671 && !(adapter->packet_filter & 2672 ET131X_PACKET_TYPE_ALL_MULTICAST)) { 2673 buf = rx_local->fbr[ring_index]->virt[buff_index]; 2674 2675 /* Loop through our list to see if the destination 2676 * address of this packet matches one in our list. 2677 */ 2678 for (i = 0; i < adapter->multicast_addr_count; i++) { 2679 if (buf[0] == adapter->multicast_list[i][0] 2680 && buf[1] == adapter->multicast_list[i][1] 2681 && buf[2] == adapter->multicast_list[i][2] 2682 && buf[3] == adapter->multicast_list[i][3] 2683 && buf[4] == adapter->multicast_list[i][4] 2684 && buf[5] == adapter->multicast_list[i][5]) { 2685 break; 2686 } 2687 } 2688 2689 /* If our index is equal to the number of Multicast 2690 * address we have, then this means we did not find this 2691 * packet's matching address in our list. Set the len to 2692 * zero, so we free our RFD when we return from this 2693 * function. 2694 */ 2695 if (i == adapter->multicast_addr_count) 2696 len = 0; 2697 } 2698 2699 if (len > 0) 2700 adapter->stats.multicast_pkts_rcvd++; 2701 } else if (word0 & ALCATEL_BROADCAST_PKT) { 2702 adapter->stats.broadcast_pkts_rcvd++; 2703 } else { 2704 /* Not sure what this counter measures in promiscuous mode. 2705 * Perhaps we should check the MAC address to see if it is 2706 * directed to us in promiscuous mode. 2707 */ 2708 adapter->stats.unicast_pkts_rcvd++; 2709 } 2710 2711 if (len == 0) { 2712 rfd->len = 0; 2713 goto out; 2714 } 2715 2716 rfd->len = len; 2717 2718 skb = dev_alloc_skb(rfd->len + 2); 2719 if (!skb) { 2720 dev_err(&adapter->pdev->dev, "Couldn't alloc an SKB for Rx\n"); 2721 return NULL; 2722 } 2723 2724 adapter->net_stats.rx_bytes += rfd->len; 2725 2726 memcpy(skb_put(skb, rfd->len), 2727 rx_local->fbr[ring_index]->virt[buff_index], 2728 rfd->len); 2729 2730 skb->protocol = eth_type_trans(skb, adapter->netdev); 2731 skb->ip_summed = CHECKSUM_NONE; 2732 netif_rx_ni(skb); 2733 2734out: 2735 nic_return_rfd(adapter, rfd); 2736 return rfd; 2737} 2738 2739/* et131x_handle_recv_interrupt - Interrupt handler for receive processing 2740 * @adapter: pointer to our adapter 2741 * 2742 * Assumption, Rcv spinlock has been acquired. 2743 */ 2744static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter) 2745{ 2746 struct rfd *rfd = NULL; 2747 u32 count = 0; 2748 bool done = true; 2749 2750 /* Process up to available RFD's */ 2751 while (count < NUM_PACKETS_HANDLED) { 2752 if (list_empty(&adapter->rx_ring.recv_list)) { 2753 WARN_ON(adapter->rx_ring.num_ready_recv != 0); 2754 done = false; 2755 break; 2756 } 2757 2758 rfd = nic_rx_pkts(adapter); 2759 2760 if (rfd == NULL) 2761 break; 2762 2763 /* Do not receive any packets until a filter has been set. 2764 * Do not receive any packets until we have link. 2765 * If length is zero, return the RFD in order to advance the 2766 * Free buffer ring. 2767 */ 2768 if (!adapter->packet_filter || 2769 !netif_carrier_ok(adapter->netdev) || 2770 rfd->len == 0) 2771 continue; 2772 2773 /* Increment the number of packets we received */ 2774 adapter->net_stats.rx_packets++; 2775 2776 /* Set the status on the packet, either resources or success */ 2777 if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) 2778 dev_warn(&adapter->pdev->dev, "RFD's are running out\n"); 2779 2780 count++; 2781 } 2782 2783 if (count == NUM_PACKETS_HANDLED || !done) { 2784 adapter->rx_ring.unfinished_receives = true; 2785 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, 2786 &adapter->regs->global.watchdog_timer); 2787 } else 2788 /* Watchdog timer will disable itself if appropriate. */ 2789 adapter->rx_ring.unfinished_receives = false; 2790} 2791 2792/* et131x_tx_dma_memory_alloc 2793 * @adapter: pointer to our private adapter structure 2794 * 2795 * Returns 0 on success and errno on failure (as defined in errno.h). 2796 * 2797 * Allocates memory that will be visible both to the device and to the CPU. 2798 * The OS will pass us packets, pointers to which we will insert in the Tx 2799 * Descriptor queue. The device will read this queue to find the packets in 2800 * memory. The device will update the "status" in memory each time it xmits a 2801 * packet. 2802 */ 2803static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) 2804{ 2805 int desc_size = 0; 2806 struct tx_ring *tx_ring = &adapter->tx_ring; 2807 2808 /* Allocate memory for the TCB's (Transmit Control Block) */ 2809 adapter->tx_ring.tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb), 2810 GFP_ATOMIC | GFP_DMA); 2811 if (!adapter->tx_ring.tcb_ring) 2812 return -ENOMEM; 2813 2814 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX); 2815 tx_ring->tx_desc_ring = 2816 (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev, 2817 desc_size, 2818 &tx_ring->tx_desc_ring_pa, 2819 GFP_KERNEL); 2820 if (!adapter->tx_ring.tx_desc_ring) { 2821 dev_err(&adapter->pdev->dev, 2822 "Cannot alloc memory for Tx Ring\n"); 2823 return -ENOMEM; 2824 } 2825 2826 /* Save physical address 2827 * 2828 * NOTE: dma_alloc_coherent(), used above to alloc DMA regions, 2829 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses 2830 * are ever returned, make sure the high part is retrieved here before 2831 * storing the adjusted address. 2832 */ 2833 /* Allocate memory for the Tx status block */ 2834 tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev, 2835 sizeof(u32), 2836 &tx_ring->tx_status_pa, 2837 GFP_KERNEL); 2838 if (!adapter->tx_ring.tx_status_pa) { 2839 dev_err(&adapter->pdev->dev, 2840 "Cannot alloc memory for Tx status block\n"); 2841 return -ENOMEM; 2842 } 2843 return 0; 2844} 2845 2846/* et131x_tx_dma_memory_free - Free all memory allocated within this module 2847 * @adapter: pointer to our private adapter structure 2848 * 2849 * Returns 0 on success and errno on failure (as defined in errno.h). 2850 */ 2851static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) 2852{ 2853 int desc_size = 0; 2854 2855 if (adapter->tx_ring.tx_desc_ring) { 2856 /* Free memory relating to Tx rings here */ 2857 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX); 2858 dma_free_coherent(&adapter->pdev->dev, 2859 desc_size, 2860 adapter->tx_ring.tx_desc_ring, 2861 adapter->tx_ring.tx_desc_ring_pa); 2862 adapter->tx_ring.tx_desc_ring = NULL; 2863 } 2864 2865 /* Free memory for the Tx status block */ 2866 if (adapter->tx_ring.tx_status) { 2867 dma_free_coherent(&adapter->pdev->dev, 2868 sizeof(u32), 2869 adapter->tx_ring.tx_status, 2870 adapter->tx_ring.tx_status_pa); 2871 2872 adapter->tx_ring.tx_status = NULL; 2873 } 2874 /* Free the memory for the tcb structures */ 2875 kfree(adapter->tx_ring.tcb_ring); 2876} 2877 2878/* nic_send_packet - NIC specific send handler for version B silicon. 2879 * @adapter: pointer to our adapter 2880 * @tcb: pointer to struct tcb 2881 * 2882 * Returns 0 or errno. 2883 */ 2884static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) 2885{ 2886 u32 i; 2887 struct tx_desc desc[24]; /* 24 x 16 byte */ 2888 u32 frag = 0; 2889 u32 thiscopy, remainder; 2890 struct sk_buff *skb = tcb->skb; 2891 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; 2892 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; 2893 unsigned long flags; 2894 struct phy_device *phydev = adapter->phydev; 2895 dma_addr_t dma_addr; 2896 2897 /* Part of the optimizations of this send routine restrict us to 2898 * sending 24 fragments at a pass. In practice we should never see 2899 * more than 5 fragments. 2900 * 2901 * NOTE: The older version of this function (below) can handle any 2902 * number of fragments. If needed, we can call this function, 2903 * although it is less efficient. 2904 */ 2905 2906 /* nr_frags should be no more than 18. */ 2907 BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23); 2908 2909 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1)); 2910 2911 for (i = 0; i < nr_frags; i++) { 2912 /* If there is something in this element, lets get a 2913 * descriptor from the ring and get the necessary data 2914 */ 2915 if (i == 0) { 2916 /* If the fragments are smaller than a standard MTU, 2917 * then map them to a single descriptor in the Tx 2918 * Desc ring. However, if they're larger, as is 2919 * possible with support for jumbo packets, then 2920 * split them each across 2 descriptors. 2921 * 2922 * This will work until we determine why the hardware 2923 * doesn't seem to like large fragments. 2924 */ 2925 if (skb_headlen(skb) <= 1514) { 2926 /* Low 16bits are length, high is vlan and 2927 * unused currently so zero 2928 */ 2929 desc[frag].len_vlan = skb_headlen(skb); 2930 dma_addr = dma_map_single(&adapter->pdev->dev, 2931 skb->data, 2932 skb_headlen(skb), 2933 DMA_TO_DEVICE); 2934 desc[frag].addr_lo = lower_32_bits(dma_addr); 2935 desc[frag].addr_hi = upper_32_bits(dma_addr); 2936 frag++; 2937 } else { 2938 desc[frag].len_vlan = skb_headlen(skb) / 2; 2939 dma_addr = dma_map_single(&adapter->pdev->dev, 2940 skb->data, 2941 (skb_headlen(skb) / 2), 2942 DMA_TO_DEVICE); 2943 desc[frag].addr_lo = lower_32_bits(dma_addr); 2944 desc[frag].addr_hi = upper_32_bits(dma_addr); 2945 frag++; 2946 2947 desc[frag].len_vlan = skb_headlen(skb) / 2; 2948 dma_addr = dma_map_single(&adapter->pdev->dev, 2949 skb->data + 2950 (skb_headlen(skb) / 2), 2951 (skb_headlen(skb) / 2), 2952 DMA_TO_DEVICE); 2953 desc[frag].addr_lo = lower_32_bits(dma_addr); 2954 desc[frag].addr_hi = upper_32_bits(dma_addr); 2955 frag++; 2956 } 2957 } else { 2958 desc[frag].len_vlan = frags[i - 1].size; 2959 dma_addr = skb_frag_dma_map(&adapter->pdev->dev, 2960 &frags[i - 1], 2961 0, 2962 frags[i - 1].size, 2963 DMA_TO_DEVICE); 2964 desc[frag].addr_lo = lower_32_bits(dma_addr); 2965 desc[frag].addr_hi = upper_32_bits(dma_addr); 2966 frag++; 2967 } 2968 } 2969 2970 if (phydev && phydev->speed == SPEED_1000) { 2971 if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) { 2972 /* Last element & Interrupt flag */ 2973 desc[frag - 1].flags = 2974 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT; 2975 adapter->tx_ring.since_irq = 0; 2976 } else { /* Last element */ 2977 desc[frag - 1].flags = TXDESC_FLAG_LASTPKT; 2978 } 2979 } else 2980 desc[frag - 1].flags = 2981 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT; 2982 2983 desc[0].flags |= TXDESC_FLAG_FIRSTPKT; 2984 2985 tcb->index_start = adapter->tx_ring.send_idx; 2986 tcb->stale = 0; 2987 2988 spin_lock_irqsave(&adapter->send_hw_lock, flags); 2989 2990 thiscopy = NUM_DESC_PER_RING_TX - INDEX10(adapter->tx_ring.send_idx); 2991 2992 if (thiscopy >= frag) { 2993 remainder = 0; 2994 thiscopy = frag; 2995 } else { 2996 remainder = frag - thiscopy; 2997 } 2998 2999 memcpy(adapter->tx_ring.tx_desc_ring + 3000 INDEX10(adapter->tx_ring.send_idx), desc, 3001 sizeof(struct tx_desc) * thiscopy); 3002 3003 add_10bit(&adapter->tx_ring.send_idx, thiscopy); 3004 3005 if (INDEX10(adapter->tx_ring.send_idx) == 0 || 3006 INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) { 3007 adapter->tx_ring.send_idx &= ~ET_DMA10_MASK; 3008 adapter->tx_ring.send_idx ^= ET_DMA10_WRAP; 3009 } 3010 3011 if (remainder) { 3012 memcpy(adapter->tx_ring.tx_desc_ring, 3013 desc + thiscopy, 3014 sizeof(struct tx_desc) * remainder); 3015 3016 add_10bit(&adapter->tx_ring.send_idx, remainder); 3017 } 3018 3019 if (INDEX10(adapter->tx_ring.send_idx) == 0) { 3020 if (adapter->tx_ring.send_idx) 3021 tcb->index = NUM_DESC_PER_RING_TX - 1; 3022 else 3023 tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1); 3024 } else 3025 tcb->index = adapter->tx_ring.send_idx - 1; 3026 3027 spin_lock(&adapter->tcb_send_qlock); 3028 3029 if (adapter->tx_ring.send_tail) 3030 adapter->tx_ring.send_tail->next = tcb; 3031 else 3032 adapter->tx_ring.send_head = tcb; 3033 3034 adapter->tx_ring.send_tail = tcb; 3035 3036 WARN_ON(tcb->next != NULL); 3037 3038 adapter->tx_ring.used++; 3039 3040 spin_unlock(&adapter->tcb_send_qlock); 3041 3042 /* Write the new write pointer back to the device. */ 3043 writel(adapter->tx_ring.send_idx, 3044 &adapter->regs->txdma.service_request); 3045 3046 /* For Gig only, we use Tx Interrupt coalescing. Enable the software 3047 * timer to wake us up if this packet isn't followed by N more. 3048 */ 3049 if (phydev && phydev->speed == SPEED_1000) { 3050 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, 3051 &adapter->regs->global.watchdog_timer); 3052 } 3053 spin_unlock_irqrestore(&adapter->send_hw_lock, flags); 3054 3055 return 0; 3056} 3057 3058/* send_packet - Do the work to send a packet 3059 * @skb: the packet(s) to send 3060 * @adapter: a pointer to the device's private adapter structure 3061 * 3062 * Return 0 in almost all cases; non-zero value in extreme hard failure only. 3063 * 3064 * Assumption: Send spinlock has been acquired 3065 */ 3066static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter) 3067{ 3068 int status; 3069 struct tcb *tcb = NULL; 3070 u16 *shbufva; 3071 unsigned long flags; 3072 3073 /* All packets must have at least a MAC address and a protocol type */ 3074 if (skb->len < ETH_HLEN) 3075 return -EIO; 3076 3077 /* Get a TCB for this packet */ 3078 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); 3079 3080 tcb = adapter->tx_ring.tcb_qhead; 3081 3082 if (tcb == NULL) { 3083 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 3084 return -ENOMEM; 3085 } 3086 3087 adapter->tx_ring.tcb_qhead = tcb->next; 3088 3089 if (adapter->tx_ring.tcb_qhead == NULL) 3090 adapter->tx_ring.tcb_qtail = NULL; 3091 3092 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 3093 3094 tcb->skb = skb; 3095 3096 if (skb->data != NULL && skb_headlen(skb) >= 6) { 3097 shbufva = (u16 *) skb->data; 3098 3099 if ((shbufva[0] == 0xffff) && 3100 (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) 3101 tcb->flags |= FMP_DEST_BROAD; 3102 else if ((shbufva[0] & 0x3) == 0x0001) 3103 tcb->flags |= FMP_DEST_MULTI; 3104 } 3105 3106 tcb->next = NULL; 3107 3108 /* Call the NIC specific send handler. */ 3109 status = nic_send_packet(adapter, tcb); 3110 3111 if (status != 0) { 3112 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); 3113 3114 if (adapter->tx_ring.tcb_qtail) 3115 adapter->tx_ring.tcb_qtail->next = tcb; 3116 else 3117 /* Apparently ready Q is empty. */ 3118 adapter->tx_ring.tcb_qhead = tcb; 3119 3120 adapter->tx_ring.tcb_qtail = tcb; 3121 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 3122 return status; 3123 } 3124 WARN_ON(adapter->tx_ring.used > NUM_TCB); 3125 return 0; 3126} 3127 3128/* et131x_send_packets - This function is called by the OS to send packets 3129 * @skb: the packet(s) to send 3130 * @netdev:device on which to TX the above packet(s) 3131 * 3132 * Return 0 in almost all cases; non-zero value in extreme hard failure only 3133 */ 3134static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) 3135{ 3136 int status = 0; 3137 struct et131x_adapter *adapter = netdev_priv(netdev); 3138 3139 /* Send these packets 3140 * 3141 * NOTE: The Linux Tx entry point is only given one packet at a time 3142 * to Tx, so the PacketCount and it's array used makes no sense here 3143 */ 3144 3145 /* TCB is not available */ 3146 if (adapter->tx_ring.used >= NUM_TCB) { 3147 /* NOTE: If there's an error on send, no need to queue the 3148 * packet under Linux; if we just send an error up to the 3149 * netif layer, it will resend the skb to us. 3150 */ 3151 status = -ENOMEM; 3152 } else { 3153 /* We need to see if the link is up; if it's not, make the 3154 * netif layer think we're good and drop the packet 3155 */ 3156 if ((adapter->flags & FMP_ADAPTER_FAIL_SEND_MASK) || 3157 !netif_carrier_ok(netdev)) { 3158 dev_kfree_skb_any(skb); 3159 skb = NULL; 3160 3161 adapter->net_stats.tx_dropped++; 3162 } else { 3163 status = send_packet(skb, adapter); 3164 if (status != 0 && status != -ENOMEM) { 3165 /* On any other error, make netif think we're 3166 * OK and drop the packet 3167 */ 3168 dev_kfree_skb_any(skb); 3169 skb = NULL; 3170 adapter->net_stats.tx_dropped++; 3171 } 3172 } 3173 } 3174 return status; 3175} 3176 3177/* free_send_packet - Recycle a struct tcb 3178 * @adapter: pointer to our adapter 3179 * @tcb: pointer to struct tcb 3180 * 3181 * Complete the packet if necessary 3182 * Assumption - Send spinlock has been acquired 3183 */ 3184static inline void free_send_packet(struct et131x_adapter *adapter, 3185 struct tcb *tcb) 3186{ 3187 unsigned long flags; 3188 struct tx_desc *desc = NULL; 3189 struct net_device_stats *stats = &adapter->net_stats; 3190 u64 dma_addr; 3191 3192 if (tcb->flags & FMP_DEST_BROAD) 3193 atomic_inc(&adapter->stats.broadcast_pkts_xmtd); 3194 else if (tcb->flags & FMP_DEST_MULTI) 3195 atomic_inc(&adapter->stats.multicast_pkts_xmtd); 3196 else 3197 atomic_inc(&adapter->stats.unicast_pkts_xmtd); 3198 3199 if (tcb->skb) { 3200 stats->tx_bytes += tcb->skb->len; 3201 3202 /* Iterate through the TX descriptors on the ring 3203 * corresponding to this packet and umap the fragments 3204 * they point to 3205 */ 3206 do { 3207 desc = (struct tx_desc *) 3208 (adapter->tx_ring.tx_desc_ring + 3209 INDEX10(tcb->index_start)); 3210 3211 dma_addr = desc->addr_lo; 3212 dma_addr |= (u64)desc->addr_hi << 32; 3213 3214 dma_unmap_single(&adapter->pdev->dev, 3215 dma_addr, 3216 desc->len_vlan, DMA_TO_DEVICE); 3217 3218 add_10bit(&tcb->index_start, 1); 3219 if (INDEX10(tcb->index_start) >= 3220 NUM_DESC_PER_RING_TX) { 3221 tcb->index_start &= ~ET_DMA10_MASK; 3222 tcb->index_start ^= ET_DMA10_WRAP; 3223 } 3224 } while (desc != (adapter->tx_ring.tx_desc_ring + 3225 INDEX10(tcb->index))); 3226 3227 dev_kfree_skb_any(tcb->skb); 3228 } 3229 3230 memset(tcb, 0, sizeof(struct tcb)); 3231 3232 /* Add the TCB to the Ready Q */ 3233 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); 3234 3235 adapter->net_stats.tx_packets++; 3236 3237 if (adapter->tx_ring.tcb_qtail) 3238 adapter->tx_ring.tcb_qtail->next = tcb; 3239 else 3240 /* Apparently ready Q is empty. */ 3241 adapter->tx_ring.tcb_qhead = tcb; 3242 3243 adapter->tx_ring.tcb_qtail = tcb; 3244 3245 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 3246 WARN_ON(adapter->tx_ring.used < 0); 3247} 3248 3249/* et131x_free_busy_send_packets - Free and complete the stopped active sends 3250 * @adapter: pointer to our adapter 3251 * 3252 * Assumption - Send spinlock has been acquired 3253 */ 3254static void et131x_free_busy_send_packets(struct et131x_adapter *adapter) 3255{ 3256 struct tcb *tcb; 3257 unsigned long flags; 3258 u32 freed = 0; 3259 3260 /* Any packets being sent? Check the first TCB on the send list */ 3261 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 3262 3263 tcb = adapter->tx_ring.send_head; 3264 3265 while (tcb != NULL && freed < NUM_TCB) { 3266 struct tcb *next = tcb->next; 3267 3268 adapter->tx_ring.send_head = next; 3269 3270 if (next == NULL) 3271 adapter->tx_ring.send_tail = NULL; 3272 3273 adapter->tx_ring.used--; 3274 3275 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 3276 3277 freed++; 3278 free_send_packet(adapter, tcb); 3279 3280 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 3281 3282 tcb = adapter->tx_ring.send_head; 3283 } 3284 3285 WARN_ON(freed == NUM_TCB); 3286 3287 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 3288 3289 adapter->tx_ring.used = 0; 3290} 3291 3292/* et131x_handle_send_interrupt - Interrupt handler for sending processing 3293 * @adapter: pointer to our adapter 3294 * 3295 * Re-claim the send resources, complete sends and get more to send from 3296 * the send wait queue. 3297 * 3298 * Assumption - Send spinlock has been acquired 3299 */ 3300static void et131x_handle_send_interrupt(struct et131x_adapter *adapter) 3301{ 3302 unsigned long flags; 3303 u32 serviced; 3304 struct tcb *tcb; 3305 u32 index; 3306 3307 serviced = readl(&adapter->regs->txdma.new_service_complete); 3308 index = INDEX10(serviced); 3309 3310 /* Has the ring wrapped? Process any descriptors that do not have 3311 * the same "wrap" indicator as the current completion indicator 3312 */ 3313 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 3314 3315 tcb = adapter->tx_ring.send_head; 3316 3317 while (tcb && 3318 ((serviced ^ tcb->index) & ET_DMA10_WRAP) && 3319 index < INDEX10(tcb->index)) { 3320 adapter->tx_ring.used--; 3321 adapter->tx_ring.send_head = tcb->next; 3322 if (tcb->next == NULL) 3323 adapter->tx_ring.send_tail = NULL; 3324 3325 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 3326 free_send_packet(adapter, tcb); 3327 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 3328 3329 /* Goto the next packet */ 3330 tcb = adapter->tx_ring.send_head; 3331 } 3332 while (tcb && 3333 !((serviced ^ tcb->index) & ET_DMA10_WRAP) 3334 && index > (tcb->index & ET_DMA10_MASK)) { 3335 adapter->tx_ring.used--; 3336 adapter->tx_ring.send_head = tcb->next; 3337 if (tcb->next == NULL) 3338 adapter->tx_ring.send_tail = NULL; 3339 3340 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 3341 free_send_packet(adapter, tcb); 3342 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 3343 3344 /* Goto the next packet */ 3345 tcb = adapter->tx_ring.send_head; 3346 } 3347 3348 /* Wake up the queue when we hit a low-water mark */ 3349 if (adapter->tx_ring.used <= NUM_TCB / 3) 3350 netif_wake_queue(adapter->netdev); 3351 3352 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 3353} 3354 3355static int et131x_get_settings(struct net_device *netdev, 3356 struct ethtool_cmd *cmd) 3357{ 3358 struct et131x_adapter *adapter = netdev_priv(netdev); 3359 3360 return phy_ethtool_gset(adapter->phydev, cmd); 3361} 3362 3363static int et131x_set_settings(struct net_device *netdev, 3364 struct ethtool_cmd *cmd) 3365{ 3366 struct et131x_adapter *adapter = netdev_priv(netdev); 3367 3368 return phy_ethtool_sset(adapter->phydev, cmd); 3369} 3370 3371static int et131x_get_regs_len(struct net_device *netdev) 3372{ 3373#define ET131X_REGS_LEN 256 3374 return ET131X_REGS_LEN * sizeof(u32); 3375} 3376 3377static void et131x_get_regs(struct net_device *netdev, 3378 struct ethtool_regs *regs, void *regs_data) 3379{ 3380 struct et131x_adapter *adapter = netdev_priv(netdev); 3381 struct address_map __iomem *aregs = adapter->regs; 3382 u32 *regs_buff = regs_data; 3383 u32 num = 0; 3384 u16 tmp; 3385 3386 memset(regs_data, 0, et131x_get_regs_len(netdev)); 3387 3388 regs->version = (1 << 24) | (adapter->pdev->revision << 16) | 3389 adapter->pdev->device; 3390 3391 /* PHY regs */ 3392 et131x_mii_read(adapter, MII_BMCR, &tmp); 3393 regs_buff[num++] = tmp; 3394 et131x_mii_read(adapter, MII_BMSR, &tmp); 3395 regs_buff[num++] = tmp; 3396 et131x_mii_read(adapter, MII_PHYSID1, &tmp); 3397 regs_buff[num++] = tmp; 3398 et131x_mii_read(adapter, MII_PHYSID2, &tmp); 3399 regs_buff[num++] = tmp; 3400 et131x_mii_read(adapter, MII_ADVERTISE, &tmp); 3401 regs_buff[num++] = tmp; 3402 et131x_mii_read(adapter, MII_LPA, &tmp); 3403 regs_buff[num++] = tmp; 3404 et131x_mii_read(adapter, MII_EXPANSION, &tmp); 3405 regs_buff[num++] = tmp; 3406 /* Autoneg next page transmit reg */ 3407 et131x_mii_read(adapter, 0x07, &tmp); 3408 regs_buff[num++] = tmp; 3409 /* Link partner next page reg */ 3410 et131x_mii_read(adapter, 0x08, &tmp); 3411 regs_buff[num++] = tmp; 3412 et131x_mii_read(adapter, MII_CTRL1000, &tmp); 3413 regs_buff[num++] = tmp; 3414 et131x_mii_read(adapter, MII_STAT1000, &tmp); 3415 regs_buff[num++] = tmp; 3416 et131x_mii_read(adapter, 0x0b, &tmp); 3417 regs_buff[num++] = tmp; 3418 et131x_mii_read(adapter, 0x0c, &tmp); 3419 regs_buff[num++] = tmp; 3420 et131x_mii_read(adapter, MII_MMD_CTRL, &tmp); 3421 regs_buff[num++] = tmp; 3422 et131x_mii_read(adapter, MII_MMD_DATA, &tmp); 3423 regs_buff[num++] = tmp; 3424 et131x_mii_read(adapter, MII_ESTATUS, &tmp); 3425 regs_buff[num++] = tmp; 3426 3427 et131x_mii_read(adapter, PHY_INDEX_REG, &tmp); 3428 regs_buff[num++] = tmp; 3429 et131x_mii_read(adapter, PHY_DATA_REG, &tmp); 3430 regs_buff[num++] = tmp; 3431 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &tmp); 3432 regs_buff[num++] = tmp; 3433 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, &tmp); 3434 regs_buff[num++] = tmp; 3435 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL + 1, &tmp); 3436 regs_buff[num++] = tmp; 3437 3438 et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, &tmp); 3439 regs_buff[num++] = tmp; 3440 et131x_mii_read(adapter, PHY_CONFIG, &tmp); 3441 regs_buff[num++] = tmp; 3442 et131x_mii_read(adapter, PHY_PHY_CONTROL, &tmp); 3443 regs_buff[num++] = tmp; 3444 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &tmp); 3445 regs_buff[num++] = tmp; 3446 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &tmp); 3447 regs_buff[num++] = tmp; 3448 et131x_mii_read(adapter, PHY_PHY_STATUS, &tmp); 3449 regs_buff[num++] = tmp; 3450 et131x_mii_read(adapter, PHY_LED_1, &tmp); 3451 regs_buff[num++] = tmp; 3452 et131x_mii_read(adapter, PHY_LED_2, &tmp); 3453 regs_buff[num++] = tmp; 3454 3455 /* Global regs */ 3456 regs_buff[num++] = readl(&aregs->global.txq_start_addr); 3457 regs_buff[num++] = readl(&aregs->global.txq_end_addr); 3458 regs_buff[num++] = readl(&aregs->global.rxq_start_addr); 3459 regs_buff[num++] = readl(&aregs->global.rxq_end_addr); 3460 regs_buff[num++] = readl(&aregs->global.pm_csr); 3461 regs_buff[num++] = adapter->stats.interrupt_status; 3462 regs_buff[num++] = readl(&aregs->global.int_mask); 3463 regs_buff[num++] = readl(&aregs->global.int_alias_clr_en); 3464 regs_buff[num++] = readl(&aregs->global.int_status_alias); 3465 regs_buff[num++] = readl(&aregs->global.sw_reset); 3466 regs_buff[num++] = readl(&aregs->global.slv_timer); 3467 regs_buff[num++] = readl(&aregs->global.msi_config); 3468 regs_buff[num++] = readl(&aregs->global.loopback); 3469 regs_buff[num++] = readl(&aregs->global.watchdog_timer); 3470 3471 /* TXDMA regs */ 3472 regs_buff[num++] = readl(&aregs->txdma.csr); 3473 regs_buff[num++] = readl(&aregs->txdma.pr_base_hi); 3474 regs_buff[num++] = readl(&aregs->txdma.pr_base_lo); 3475 regs_buff[num++] = readl(&aregs->txdma.pr_num_des); 3476 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr); 3477 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext); 3478 regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr); 3479 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi); 3480 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo); 3481 regs_buff[num++] = readl(&aregs->txdma.service_request); 3482 regs_buff[num++] = readl(&aregs->txdma.service_complete); 3483 regs_buff[num++] = readl(&aregs->txdma.cache_rd_index); 3484 regs_buff[num++] = readl(&aregs->txdma.cache_wr_index); 3485 regs_buff[num++] = readl(&aregs->txdma.tx_dma_error); 3486 regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt); 3487 regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt); 3488 regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt); 3489 regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt); 3490 regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt); 3491 regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt); 3492 regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt); 3493 regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt); 3494 regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt); 3495 regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt); 3496 regs_buff[num++] = readl(&aregs->txdma.new_service_complete); 3497 regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt); 3498 3499 /* RXDMA regs */ 3500 regs_buff[num++] = readl(&aregs->rxdma.csr); 3501 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi); 3502 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo); 3503 regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done); 3504 regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time); 3505 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr); 3506 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext); 3507 regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr); 3508 regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi); 3509 regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo); 3510 regs_buff[num++] = readl(&aregs->rxdma.psr_num_des); 3511 regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset); 3512 regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset); 3513 regs_buff[num++] = readl(&aregs->rxdma.psr_access_index); 3514 regs_buff[num++] = readl(&aregs->rxdma.psr_min_des); 3515 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo); 3516 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi); 3517 regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des); 3518 regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset); 3519 regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset); 3520 regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index); 3521 regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des); 3522 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo); 3523 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi); 3524 regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des); 3525 regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset); 3526 regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset); 3527 regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index); 3528 regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des); 3529} 3530 3531static void et131x_get_drvinfo(struct net_device *netdev, 3532 struct ethtool_drvinfo *info) 3533{ 3534 struct et131x_adapter *adapter = netdev_priv(netdev); 3535 3536 strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver)); 3537 strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); 3538 strlcpy(info->bus_info, pci_name(adapter->pdev), 3539 sizeof(info->bus_info)); 3540} 3541 3542static struct ethtool_ops et131x_ethtool_ops = { 3543 .get_settings = et131x_get_settings, 3544 .set_settings = et131x_set_settings, 3545 .get_drvinfo = et131x_get_drvinfo, 3546 .get_regs_len = et131x_get_regs_len, 3547 .get_regs = et131x_get_regs, 3548 .get_link = ethtool_op_get_link, 3549}; 3550 3551/* et131x_hwaddr_init - set up the MAC Address on the ET1310 3552 * @adapter: pointer to our private adapter structure 3553 */ 3554static void et131x_hwaddr_init(struct et131x_adapter *adapter) 3555{ 3556 /* If have our default mac from init and no mac address from 3557 * EEPROM then we need to generate the last octet and set it on the 3558 * device 3559 */ 3560 if (is_zero_ether_addr(adapter->rom_addr)) { 3561 /* We need to randomly generate the last octet so we 3562 * decrease our chances of setting the mac address to 3563 * same as another one of our cards in the system 3564 */ 3565 get_random_bytes(&adapter->addr[5], 1); 3566 /* We have the default value in the register we are 3567 * working with so we need to copy the current 3568 * address into the permanent address 3569 */ 3570 memcpy(adapter->rom_addr, 3571 adapter->addr, ETH_ALEN); 3572 } else { 3573 /* We do not have an override address, so set the 3574 * current address to the permanent address and add 3575 * it to the device 3576 */ 3577 memcpy(adapter->addr, 3578 adapter->rom_addr, ETH_ALEN); 3579 } 3580} 3581 3582/* et131x_pci_init - initial PCI setup 3583 * @adapter: pointer to our private adapter structure 3584 * @pdev: our PCI device 3585 * 3586 * Perform the initial setup of PCI registers and if possible initialise 3587 * the MAC address. At this point the I/O registers have yet to be mapped 3588 */ 3589static int et131x_pci_init(struct et131x_adapter *adapter, 3590 struct pci_dev *pdev) 3591{ 3592 u16 max_payload; 3593 int i, rc; 3594 3595 rc = et131x_init_eeprom(adapter); 3596 if (rc < 0) 3597 goto out; 3598 3599 if (!pci_is_pcie(pdev)) { 3600 dev_err(&pdev->dev, "Missing PCIe capabilities\n"); 3601 goto err_out; 3602 } 3603 3604 /* Let's set up the PORT LOGIC Register. */ 3605 3606 /* Program the Ack/Nak latency and replay timers */ 3607 max_payload = pdev->pcie_mpss; 3608 3609 if (max_payload < 2) { 3610 static const u16 acknak[2] = { 0x76, 0xD0 }; 3611 static const u16 replay[2] = { 0x1E0, 0x2ED }; 3612 3613 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK, 3614 acknak[max_payload])) { 3615 dev_err(&pdev->dev, 3616 "Could not write PCI config space for ACK/NAK\n"); 3617 goto err_out; 3618 } 3619 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY, 3620 replay[max_payload])) { 3621 dev_err(&pdev->dev, 3622 "Could not write PCI config space for Replay Timer\n"); 3623 goto err_out; 3624 } 3625 } 3626 3627 /* l0s and l1 latency timers. We are using default values. 3628 * Representing 001 for L0s and 010 for L1 3629 */ 3630 if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) { 3631 dev_err(&pdev->dev, 3632 "Could not write PCI config space for Latency Timers\n"); 3633 goto err_out; 3634 } 3635 3636 /* Change the max read size to 2k */ 3637 if (pcie_set_readrq(pdev, 2048)) { 3638 dev_err(&pdev->dev, 3639 "Couldn't change PCI config space for Max read size\n"); 3640 goto err_out; 3641 } 3642 3643 /* Get MAC address from config space if an eeprom exists, otherwise 3644 * the MAC address there will not be valid 3645 */ 3646 if (!adapter->has_eeprom) { 3647 et131x_hwaddr_init(adapter); 3648 return 0; 3649 } 3650 3651 for (i = 0; i < ETH_ALEN; i++) { 3652 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i, 3653 adapter->rom_addr + i)) { 3654 dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n"); 3655 goto err_out; 3656 } 3657 } 3658 memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN); 3659out: 3660 return rc; 3661err_out: 3662 rc = -EIO; 3663 goto out; 3664} 3665 3666/* et131x_error_timer_handler 3667 * @data: timer-specific variable; here a pointer to our adapter structure 3668 * 3669 * The routine called when the error timer expires, to track the number of 3670 * recurring errors. 3671 */ 3672static void et131x_error_timer_handler(unsigned long data) 3673{ 3674 struct et131x_adapter *adapter = (struct et131x_adapter *) data; 3675 struct phy_device *phydev = adapter->phydev; 3676 3677 if (et1310_in_phy_coma(adapter)) { 3678 /* Bring the device immediately out of coma, to 3679 * prevent it from sleeping indefinitely, this 3680 * mechanism could be improved! 3681 */ 3682 et1310_disable_phy_coma(adapter); 3683 adapter->boot_coma = 20; 3684 } else { 3685 et1310_update_macstat_host_counters(adapter); 3686 } 3687 3688 if (!phydev->link && adapter->boot_coma < 11) 3689 adapter->boot_coma++; 3690 3691 if (adapter->boot_coma == 10) { 3692 if (!phydev->link) { 3693 if (!et1310_in_phy_coma(adapter)) { 3694 /* NOTE - This was originally a 'sync with 3695 * interrupt'. How to do that under Linux? 3696 */ 3697 et131x_enable_interrupts(adapter); 3698 et1310_enable_phy_coma(adapter); 3699 } 3700 } 3701 } 3702 3703 /* This is a periodic timer, so reschedule */ 3704 mod_timer(&adapter->error_timer, jiffies + TX_ERROR_PERIOD * HZ / 1000); 3705} 3706 3707/* et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx 3708 * @adapter: pointer to our private adapter structure 3709 */ 3710static void et131x_adapter_memory_free(struct et131x_adapter *adapter) 3711{ 3712 /* Free DMA memory */ 3713 et131x_tx_dma_memory_free(adapter); 3714 et131x_rx_dma_memory_free(adapter); 3715} 3716 3717/* et131x_adapter_memory_alloc 3718 * @adapter: pointer to our private adapter structure 3719 * 3720 * Returns 0 on success, errno on failure (as defined in errno.h). 3721 * 3722 * Allocate all the memory blocks for send, receive and others. 3723 */ 3724static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) 3725{ 3726 int status; 3727 3728 /* Allocate memory for the Tx Ring */ 3729 status = et131x_tx_dma_memory_alloc(adapter); 3730 if (status != 0) { 3731 dev_err(&adapter->pdev->dev, 3732 "et131x_tx_dma_memory_alloc FAILED\n"); 3733 return status; 3734 } 3735 /* Receive buffer memory allocation */ 3736 status = et131x_rx_dma_memory_alloc(adapter); 3737 if (status != 0) { 3738 dev_err(&adapter->pdev->dev, 3739 "et131x_rx_dma_memory_alloc FAILED\n"); 3740 et131x_tx_dma_memory_free(adapter); 3741 return status; 3742 } 3743 3744 /* Init receive data structures */ 3745 status = et131x_init_recv(adapter); 3746 if (status) { 3747 dev_err(&adapter->pdev->dev, 3748 "et131x_init_recv FAILED\n"); 3749 et131x_adapter_memory_free(adapter); 3750 } 3751 return status; 3752} 3753 3754static void et131x_adjust_link(struct net_device *netdev) 3755{ 3756 struct et131x_adapter *adapter = netdev_priv(netdev); 3757 struct phy_device *phydev = adapter->phydev; 3758 3759 if (phydev && phydev->link != adapter->link) { 3760 /* Check to see if we are in coma mode and if 3761 * so, disable it because we will not be able 3762 * to read PHY values until we are out. 3763 */ 3764 if (et1310_in_phy_coma(adapter)) 3765 et1310_disable_phy_coma(adapter); 3766 3767 adapter->link = phydev->link; 3768 phy_print_status(phydev); 3769 3770 if (phydev->link) { 3771 adapter->boot_coma = 20; 3772 if (phydev && phydev->speed == SPEED_10) { 3773 /* NOTE - Is there a way to query this without 3774 * TruePHY? 3775 * && TRU_QueryCoreType(adapter->hTruePhy, 0)== 3776 * EMI_TRUEPHY_A13O) { 3777 */ 3778 u16 register18; 3779 3780 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, 3781 &register18); 3782 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 3783 register18 | 0x4); 3784 et131x_mii_write(adapter, PHY_INDEX_REG, 3785 register18 | 0x8402); 3786 et131x_mii_write(adapter, PHY_DATA_REG, 3787 register18 | 511); 3788 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 3789 register18); 3790 } 3791 3792 et1310_config_flow_control(adapter); 3793 3794 if (phydev && phydev->speed == SPEED_1000 && 3795 adapter->registry_jumbo_packet > 2048) { 3796 u16 reg; 3797 3798 et131x_mii_read(adapter, PHY_CONFIG, &reg); 3799 reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; 3800 reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; 3801 et131x_mii_write(adapter, PHY_CONFIG, reg); 3802 } 3803 3804 et131x_set_rx_dma_timer(adapter); 3805 et1310_config_mac_regs2(adapter); 3806 } else { 3807 adapter->boot_coma = 0; 3808 3809 if (phydev->speed == SPEED_10) { 3810 /* NOTE - Is there a way to query this without 3811 * TruePHY? 3812 * && TRU_QueryCoreType(adapter->hTruePhy, 0) == 3813 * EMI_TRUEPHY_A13O) 3814 */ 3815 u16 register18; 3816 3817 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, 3818 &register18); 3819 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 3820 register18 | 0x4); 3821 et131x_mii_write(adapter, PHY_INDEX_REG, 3822 register18 | 0x8402); 3823 et131x_mii_write(adapter, PHY_DATA_REG, 3824 register18 | 511); 3825 et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 3826 register18); 3827 } 3828 3829 /* Free the packets being actively sent & stopped */ 3830 et131x_free_busy_send_packets(adapter); 3831 3832 /* Re-initialize the send structures */ 3833 et131x_init_send(adapter); 3834 3835 /* Bring the device back to the state it was during 3836 * init prior to autonegotiation being complete. This 3837 * way, when we get the auto-neg complete interrupt, 3838 * we can complete init by calling config_mac_regs2. 3839 */ 3840 et131x_soft_reset(adapter); 3841 3842 /* Setup ET1310 as per the documentation */ 3843 et131x_adapter_setup(adapter); 3844 3845 /* perform reset of tx/rx */ 3846 et131x_disable_txrx(netdev); 3847 et131x_enable_txrx(netdev); 3848 } 3849 3850 } 3851} 3852 3853static int et131x_mii_probe(struct net_device *netdev) 3854{ 3855 struct et131x_adapter *adapter = netdev_priv(netdev); 3856 struct phy_device *phydev = NULL; 3857 3858 phydev = phy_find_first(adapter->mii_bus); 3859 if (!phydev) { 3860 dev_err(&adapter->pdev->dev, "no PHY found\n"); 3861 return -ENODEV; 3862 } 3863 3864 phydev = phy_connect(netdev, dev_name(&phydev->dev), 3865 &et131x_adjust_link, PHY_INTERFACE_MODE_MII); 3866 3867 if (IS_ERR(phydev)) { 3868 dev_err(&adapter->pdev->dev, "Could not attach to PHY\n"); 3869 return PTR_ERR(phydev); 3870 } 3871 3872 phydev->supported &= (SUPPORTED_10baseT_Half 3873 | SUPPORTED_10baseT_Full 3874 | SUPPORTED_100baseT_Half 3875 | SUPPORTED_100baseT_Full 3876 | SUPPORTED_Autoneg 3877 | SUPPORTED_MII 3878 | SUPPORTED_TP); 3879 3880 if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST) 3881 phydev->supported |= SUPPORTED_1000baseT_Full; 3882 3883 phydev->advertising = phydev->supported; 3884 adapter->phydev = phydev; 3885 3886 dev_info(&adapter->pdev->dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", 3887 phydev->drv->name, dev_name(&phydev->dev)); 3888 3889 return 0; 3890} 3891 3892/* et131x_adapter_init 3893 * @adapter: pointer to the private adapter struct 3894 * @pdev: pointer to the PCI device 3895 * 3896 * Initialize the data structures for the et131x_adapter object and link 3897 * them together with the platform provided device structures. 3898 */ 3899static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev, 3900 struct pci_dev *pdev) 3901{ 3902 static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 }; 3903 3904 struct et131x_adapter *adapter; 3905 3906 /* Allocate private adapter struct and copy in relevant information */ 3907 adapter = netdev_priv(netdev); 3908 adapter->pdev = pci_dev_get(pdev); 3909 adapter->netdev = netdev; 3910 3911 /* Initialize spinlocks here */ 3912 spin_lock_init(&adapter->lock); 3913 spin_lock_init(&adapter->tcb_send_qlock); 3914 spin_lock_init(&adapter->tcb_ready_qlock); 3915 spin_lock_init(&adapter->send_hw_lock); 3916 spin_lock_init(&adapter->rcv_lock); 3917 spin_lock_init(&adapter->fbr_lock); 3918 3919 adapter->registry_jumbo_packet = 1514; /* 1514-9216 */ 3920 3921 /* Set the MAC address to a default */ 3922 memcpy(adapter->addr, default_mac, ETH_ALEN); 3923 3924 return adapter; 3925} 3926 3927/* et131x_pci_remove 3928 * @pdev: a pointer to the device's pci_dev structure 3929 * 3930 * Registered in the pci_driver structure, this function is called when the 3931 * PCI subsystem detects that a PCI device which matches the information 3932 * contained in the pci_device_id table has been removed. 3933 */ 3934static void et131x_pci_remove(struct pci_dev *pdev) 3935{ 3936 struct net_device *netdev = pci_get_drvdata(pdev); 3937 struct et131x_adapter *adapter = netdev_priv(netdev); 3938 3939 unregister_netdev(netdev); 3940 phy_disconnect(adapter->phydev); 3941 mdiobus_unregister(adapter->mii_bus); 3942 cancel_work_sync(&adapter->task); 3943 kfree(adapter->mii_bus->irq); 3944 mdiobus_free(adapter->mii_bus); 3945 3946 et131x_adapter_memory_free(adapter); 3947 iounmap(adapter->regs); 3948 pci_dev_put(pdev); 3949 3950 free_netdev(netdev); 3951 pci_release_regions(pdev); 3952 pci_disable_device(pdev); 3953} 3954 3955/* et131x_up - Bring up a device for use. 3956 * @netdev: device to be opened 3957 */ 3958static void et131x_up(struct net_device *netdev) 3959{ 3960 struct et131x_adapter *adapter = netdev_priv(netdev); 3961 3962 et131x_enable_txrx(netdev); 3963 phy_start(adapter->phydev); 3964} 3965 3966/* et131x_down - Bring down the device 3967 * @netdev: device to be brought down 3968 */ 3969static void et131x_down(struct net_device *netdev) 3970{ 3971 struct et131x_adapter *adapter = netdev_priv(netdev); 3972 3973 /* Save the timestamp for the TX watchdog, prevent a timeout */ 3974 netdev->trans_start = jiffies; 3975 3976 phy_stop(adapter->phydev); 3977 et131x_disable_txrx(netdev); 3978} 3979 3980#ifdef CONFIG_PM_SLEEP 3981static int et131x_suspend(struct device *dev) 3982{ 3983 struct pci_dev *pdev = to_pci_dev(dev); 3984 struct net_device *netdev = pci_get_drvdata(pdev); 3985 3986 if (netif_running(netdev)) { 3987 netif_device_detach(netdev); 3988 et131x_down(netdev); 3989 pci_save_state(pdev); 3990 } 3991 3992 return 0; 3993} 3994 3995static int et131x_resume(struct device *dev) 3996{ 3997 struct pci_dev *pdev = to_pci_dev(dev); 3998 struct net_device *netdev = pci_get_drvdata(pdev); 3999 4000 if (netif_running(netdev)) { 4001 pci_restore_state(pdev); 4002 et131x_up(netdev); 4003 netif_device_attach(netdev); 4004 } 4005 4006 return 0; 4007} 4008 4009static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume); 4010#define ET131X_PM_OPS (&et131x_pm_ops) 4011#else 4012#define ET131X_PM_OPS NULL 4013#endif 4014 4015/* et131x_isr - The Interrupt Service Routine for the driver. 4016 * @irq: the IRQ on which the interrupt was received. 4017 * @dev_id: device-specific info (here a pointer to a net_device struct) 4018 * 4019 * Returns a value indicating if the interrupt was handled. 4020 */ 4021static irqreturn_t et131x_isr(int irq, void *dev_id) 4022{ 4023 bool handled = true; 4024 struct net_device *netdev = (struct net_device *)dev_id; 4025 struct et131x_adapter *adapter = NULL; 4026 u32 status; 4027 4028 if (!netif_device_present(netdev)) { 4029 handled = false; 4030 goto out; 4031 } 4032 4033 adapter = netdev_priv(netdev); 4034 4035 /* If the adapter is in low power state, then it should not 4036 * recognize any interrupt 4037 */ 4038 4039 /* Disable Device Interrupts */ 4040 et131x_disable_interrupts(adapter); 4041 4042 /* Get a copy of the value in the interrupt status register 4043 * so we can process the interrupting section 4044 */ 4045 status = readl(&adapter->regs->global.int_status); 4046 4047 if (adapter->flowcontrol == FLOW_TXONLY || 4048 adapter->flowcontrol == FLOW_BOTH) { 4049 status &= ~INT_MASK_ENABLE; 4050 } else { 4051 status &= ~INT_MASK_ENABLE_NO_FLOW; 4052 } 4053 4054 /* Make sure this is our interrupt */ 4055 if (!status) { 4056 handled = false; 4057 et131x_enable_interrupts(adapter); 4058 goto out; 4059 } 4060 4061 /* This is our interrupt, so process accordingly */ 4062 4063 if (status & ET_INTR_WATCHDOG) { 4064 struct tcb *tcb = adapter->tx_ring.send_head; 4065 4066 if (tcb) 4067 if (++tcb->stale > 1) 4068 status |= ET_INTR_TXDMA_ISR; 4069 4070 if (adapter->rx_ring.unfinished_receives) 4071 status |= ET_INTR_RXDMA_XFR_DONE; 4072 else if (tcb == NULL) 4073 writel(0, &adapter->regs->global.watchdog_timer); 4074 4075 status &= ~ET_INTR_WATCHDOG; 4076 } 4077 4078 if (status == 0) { 4079 /* This interrupt has in some way been "handled" by 4080 * the ISR. Either it was a spurious Rx interrupt, or 4081 * it was a Tx interrupt that has been filtered by 4082 * the ISR. 4083 */ 4084 et131x_enable_interrupts(adapter); 4085 goto out; 4086 } 4087 4088 /* We need to save the interrupt status value for use in our 4089 * DPC. We will clear the software copy of that in that 4090 * routine. 4091 */ 4092 adapter->stats.interrupt_status = status; 4093 4094 /* Schedule the ISR handler as a bottom-half task in the 4095 * kernel's tq_immediate queue, and mark the queue for 4096 * execution 4097 */ 4098 schedule_work(&adapter->task); 4099out: 4100 return IRQ_RETVAL(handled); 4101} 4102 4103/* et131x_isr_handler - The ISR handler 4104 * @p_adapter, a pointer to the device's private adapter structure 4105 * 4106 * scheduled to run in a deferred context by the ISR. This is where the ISR's 4107 * work actually gets done. 4108 */ 4109static void et131x_isr_handler(struct work_struct *work) 4110{ 4111 struct et131x_adapter *adapter = 4112 container_of(work, struct et131x_adapter, task); 4113 u32 status = adapter->stats.interrupt_status; 4114 struct address_map __iomem *iomem = adapter->regs; 4115 4116 /* These first two are by far the most common. Once handled, we clear 4117 * their two bits in the status word. If the word is now zero, we 4118 * exit. 4119 */ 4120 /* Handle all the completed Transmit interrupts */ 4121 if (status & ET_INTR_TXDMA_ISR) 4122 et131x_handle_send_interrupt(adapter); 4123 4124 /* Handle all the completed Receives interrupts */ 4125 if (status & ET_INTR_RXDMA_XFR_DONE) 4126 et131x_handle_recv_interrupt(adapter); 4127 4128 status &= 0xffffffd7; 4129 4130 if (!status) 4131 goto out; 4132 4133 /* Handle the TXDMA Error interrupt */ 4134 if (status & ET_INTR_TXDMA_ERR) { 4135 u32 txdma_err; 4136 4137 /* Following read also clears the register (COR) */ 4138 txdma_err = readl(&iomem->txdma.tx_dma_error); 4139 4140 dev_warn(&adapter->pdev->dev, 4141 "TXDMA_ERR interrupt, error = %d\n", 4142 txdma_err); 4143 } 4144 4145 /* Handle Free Buffer Ring 0 and 1 Low interrupt */ 4146 if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) { 4147 /* This indicates the number of unused buffers in RXDMA free 4148 * buffer ring 0 is <= the limit you programmed. Free buffer 4149 * resources need to be returned. Free buffers are consumed as 4150 * packets are passed from the network to the host. The host 4151 * becomes aware of the packets from the contents of the packet 4152 * status ring. This ring is queried when the packet done 4153 * interrupt occurs. Packets are then passed to the OS. When 4154 * the OS is done with the packets the resources can be 4155 * returned to the ET1310 for re-use. This interrupt is one 4156 * method of returning resources. 4157 */ 4158 4159 /* If the user has flow control on, then we will 4160 * send a pause packet, otherwise just exit 4161 */ 4162 if (adapter->flowcontrol == FLOW_TXONLY || 4163 adapter->flowcontrol == FLOW_BOTH) { 4164 u32 pm_csr; 4165 4166 /* Tell the device to send a pause packet via the back 4167 * pressure register (bp req and bp xon/xoff) 4168 */ 4169 pm_csr = readl(&iomem->global.pm_csr); 4170 if (!et1310_in_phy_coma(adapter)) 4171 writel(3, &iomem->txmac.bp_ctrl); 4172 } 4173 } 4174 4175 /* Handle Packet Status Ring Low Interrupt */ 4176 if (status & ET_INTR_RXDMA_STAT_LOW) { 4177 /* Same idea as with the two Free Buffer Rings. Packets going 4178 * from the network to the host each consume a free buffer 4179 * resource and a packet status resource. These resoures are 4180 * passed to the OS. When the OS is done with the resources, 4181 * they need to be returned to the ET1310. This is one method 4182 * of returning the resources. 4183 */ 4184 } 4185 4186 /* Handle RXDMA Error Interrupt */ 4187 if (status & ET_INTR_RXDMA_ERR) { 4188 /* The rxdma_error interrupt is sent when a time-out on a 4189 * request issued by the JAGCore has occurred or a completion is 4190 * returned with an un-successful status. In both cases the 4191 * request is considered complete. The JAGCore will 4192 * automatically re-try the request in question. Normally 4193 * information on events like these are sent to the host using 4194 * the "Advanced Error Reporting" capability. This interrupt is 4195 * another way of getting similar information. The only thing 4196 * required is to clear the interrupt by reading the ISR in the 4197 * global resources. The JAGCore will do a re-try on the 4198 * request. Normally you should never see this interrupt. If 4199 * you start to see this interrupt occurring frequently then 4200 * something bad has occurred. A reset might be the thing to do. 4201 */ 4202 /* TRAP();*/ 4203 4204 dev_warn(&adapter->pdev->dev, 4205 "RxDMA_ERR interrupt, error %x\n", 4206 readl(&iomem->txmac.tx_test)); 4207 } 4208 4209 /* Handle the Wake on LAN Event */ 4210 if (status & ET_INTR_WOL) { 4211 /* This is a secondary interrupt for wake on LAN. The driver 4212 * should never see this, if it does, something serious is 4213 * wrong. We will TRAP the message when we are in DBG mode, 4214 * otherwise we will ignore it. 4215 */ 4216 dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n"); 4217 } 4218 4219 /* Let's move on to the TxMac */ 4220 if (status & ET_INTR_TXMAC) { 4221 u32 err = readl(&iomem->txmac.err); 4222 4223 /* When any of the errors occur and TXMAC generates an 4224 * interrupt to report these errors, it usually means that 4225 * TXMAC has detected an error in the data stream retrieved 4226 * from the on-chip Tx Q. All of these errors are catastrophic 4227 * and TXMAC won't be able to recover data when these errors 4228 * occur. In a nutshell, the whole Tx path will have to be reset 4229 * and re-configured afterwards. 4230 */ 4231 dev_warn(&adapter->pdev->dev, 4232 "TXMAC interrupt, error 0x%08x\n", 4233 err); 4234 4235 /* If we are debugging, we want to see this error, otherwise we 4236 * just want the device to be reset and continue 4237 */ 4238 } 4239 4240 /* Handle RXMAC Interrupt */ 4241 if (status & ET_INTR_RXMAC) { 4242 /* These interrupts are catastrophic to the device, what we need 4243 * to do is disable the interrupts and set the flag to cause us 4244 * to reset so we can solve this issue. 4245 */ 4246 /* MP_SET_FLAG( adapter, FMP_ADAPTER_HARDWARE_ERROR); */ 4247 4248 dev_warn(&adapter->pdev->dev, 4249 "RXMAC interrupt, error 0x%08x. Requesting reset\n", 4250 readl(&iomem->rxmac.err_reg)); 4251 4252 dev_warn(&adapter->pdev->dev, 4253 "Enable 0x%08x, Diag 0x%08x\n", 4254 readl(&iomem->rxmac.ctrl), 4255 readl(&iomem->rxmac.rxq_diag)); 4256 4257 /* If we are debugging, we want to see this error, otherwise we 4258 * just want the device to be reset and continue 4259 */ 4260 } 4261 4262 /* Handle MAC_STAT Interrupt */ 4263 if (status & ET_INTR_MAC_STAT) { 4264 /* This means at least one of the un-masked counters in the 4265 * MAC_STAT block has rolled over. Use this to maintain the top, 4266 * software managed bits of the counter(s). 4267 */ 4268 et1310_handle_macstat_interrupt(adapter); 4269 } 4270 4271 /* Handle SLV Timeout Interrupt */ 4272 if (status & ET_INTR_SLV_TIMEOUT) { 4273 /* This means a timeout has occurred on a read or write request 4274 * to one of the JAGCore registers. The Global Resources block 4275 * has terminated the request and on a read request, returned a 4276 * "fake" value. The most likely reasons are: Bad Address or the 4277 * addressed module is in a power-down state and can't respond. 4278 */ 4279 } 4280out: 4281 et131x_enable_interrupts(adapter); 4282} 4283 4284/* et131x_stats - Return the current device statistics. 4285 * @netdev: device whose stats are being queried 4286 * 4287 * Returns 0 on success, errno on failure (as defined in errno.h) 4288 */ 4289static struct net_device_stats *et131x_stats(struct net_device *netdev) 4290{ 4291 struct et131x_adapter *adapter = netdev_priv(netdev); 4292 struct net_device_stats *stats = &adapter->net_stats; 4293 struct ce_stats *devstat = &adapter->stats; 4294 4295 stats->rx_errors = devstat->rx_length_errs + 4296 devstat->rx_align_errs + 4297 devstat->rx_crc_errs + 4298 devstat->rx_code_violations + 4299 devstat->rx_other_errs; 4300 stats->tx_errors = devstat->tx_max_pkt_errs; 4301 stats->multicast = devstat->multicast_pkts_rcvd; 4302 stats->collisions = devstat->tx_collisions; 4303 4304 stats->rx_length_errors = devstat->rx_length_errs; 4305 stats->rx_over_errors = devstat->rx_overflows; 4306 stats->rx_crc_errors = devstat->rx_crc_errs; 4307 4308 /* NOTE: These stats don't have corresponding values in CE_STATS, 4309 * so we're going to have to update these directly from within the 4310 * TX/RX code 4311 */ 4312 /* stats->rx_bytes = 20; devstat->; */ 4313 /* stats->tx_bytes = 20; devstat->; */ 4314 /* stats->rx_dropped = devstat->; */ 4315 /* stats->tx_dropped = devstat->; */ 4316 4317 /* NOTE: Not used, can't find analogous statistics */ 4318 /* stats->rx_frame_errors = devstat->; */ 4319 /* stats->rx_fifo_errors = devstat->; */ 4320 /* stats->rx_missed_errors = devstat->; */ 4321 4322 /* stats->tx_aborted_errors = devstat->; */ 4323 /* stats->tx_carrier_errors = devstat->; */ 4324 /* stats->tx_fifo_errors = devstat->; */ 4325 /* stats->tx_heartbeat_errors = devstat->; */ 4326 /* stats->tx_window_errors = devstat->; */ 4327 return stats; 4328} 4329 4330/* et131x_open - Open the device for use. 4331 * @netdev: device to be opened 4332 * 4333 * Returns 0 on success, errno on failure (as defined in errno.h) 4334 */ 4335static int et131x_open(struct net_device *netdev) 4336{ 4337 struct et131x_adapter *adapter = netdev_priv(netdev); 4338 struct pci_dev *pdev = adapter->pdev; 4339 unsigned int irq = pdev->irq; 4340 int result; 4341 4342 /* Start the timer to track NIC errors */ 4343 init_timer(&adapter->error_timer); 4344 adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000; 4345 adapter->error_timer.function = et131x_error_timer_handler; 4346 adapter->error_timer.data = (unsigned long)adapter; 4347 add_timer(&adapter->error_timer); 4348 4349 result = request_irq(irq, et131x_isr, 4350 IRQF_SHARED, netdev->name, netdev); 4351 if (result) { 4352 dev_err(&pdev->dev, "could not register IRQ %d\n", irq); 4353 return result; 4354 } 4355 4356 adapter->flags |= FMP_ADAPTER_INTERRUPT_IN_USE; 4357 4358 et131x_up(netdev); 4359 4360 return result; 4361} 4362 4363/* et131x_close - Close the device 4364 * @netdev: device to be closed 4365 * 4366 * Returns 0 on success, errno on failure (as defined in errno.h) 4367 */ 4368static int et131x_close(struct net_device *netdev) 4369{ 4370 struct et131x_adapter *adapter = netdev_priv(netdev); 4371 4372 et131x_down(netdev); 4373 4374 adapter->flags &= ~FMP_ADAPTER_INTERRUPT_IN_USE; 4375 free_irq(adapter->pdev->irq, netdev); 4376 4377 /* Stop the error timer */ 4378 return del_timer_sync(&adapter->error_timer); 4379} 4380 4381/* et131x_ioctl - The I/O Control handler for the driver 4382 * @netdev: device on which the control request is being made 4383 * @reqbuf: a pointer to the IOCTL request buffer 4384 * @cmd: the IOCTL command code 4385 * 4386 * Returns 0 on success, errno on failure (as defined in errno.h) 4387 */ 4388static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, 4389 int cmd) 4390{ 4391 struct et131x_adapter *adapter = netdev_priv(netdev); 4392 4393 if (!adapter->phydev) 4394 return -EINVAL; 4395 4396 return phy_mii_ioctl(adapter->phydev, reqbuf, cmd); 4397} 4398 4399/* et131x_set_packet_filter - Configures the Rx Packet filtering on the device 4400 * @adapter: pointer to our private adapter structure 4401 * 4402 * FIXME: lot of dups with MAC code 4403 * 4404 * Returns 0 on success, errno on failure 4405 */ 4406static int et131x_set_packet_filter(struct et131x_adapter *adapter) 4407{ 4408 int filter = adapter->packet_filter; 4409 int status = 0; 4410 u32 ctrl; 4411 u32 pf_ctrl; 4412 4413 ctrl = readl(&adapter->regs->rxmac.ctrl); 4414 pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl); 4415 4416 /* Default to disabled packet filtering. Enable it in the individual 4417 * case statements that require the device to filter something 4418 */ 4419 ctrl |= 0x04; 4420 4421 /* Set us to be in promiscuous mode so we receive everything, this 4422 * is also true when we get a packet filter of 0 4423 */ 4424 if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0) 4425 pf_ctrl &= ~7; /* Clear filter bits */ 4426 else { 4427 /* Set us up with Multicast packet filtering. Three cases are 4428 * possible - (1) we have a multi-cast list, (2) we receive ALL 4429 * multicast entries or (3) we receive none. 4430 */ 4431 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST) 4432 pf_ctrl &= ~2; /* Multicast filter bit */ 4433 else { 4434 et1310_setup_device_for_multicast(adapter); 4435 pf_ctrl |= 2; 4436 ctrl &= ~0x04; 4437 } 4438 4439 /* Set us up with Unicast packet filtering */ 4440 if (filter & ET131X_PACKET_TYPE_DIRECTED) { 4441 et1310_setup_device_for_unicast(adapter); 4442 pf_ctrl |= 4; 4443 ctrl &= ~0x04; 4444 } 4445 4446 /* Set us up with Broadcast packet filtering */ 4447 if (filter & ET131X_PACKET_TYPE_BROADCAST) { 4448 pf_ctrl |= 1; /* Broadcast filter bit */ 4449 ctrl &= ~0x04; 4450 } else 4451 pf_ctrl &= ~1; 4452 4453 /* Setup the receive mac configuration registers - Packet 4454 * Filter control + the enable / disable for packet filter 4455 * in the control reg. 4456 */ 4457 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl); 4458 writel(ctrl, &adapter->regs->rxmac.ctrl); 4459 } 4460 return status; 4461} 4462 4463/* et131x_multicast - The handler to configure multicasting on the interface 4464 * @netdev: a pointer to a net_device struct representing the device 4465 */ 4466static void et131x_multicast(struct net_device *netdev) 4467{ 4468 struct et131x_adapter *adapter = netdev_priv(netdev); 4469 int packet_filter; 4470 unsigned long flags; 4471 struct netdev_hw_addr *ha; 4472 int i; 4473 4474 spin_lock_irqsave(&adapter->lock, flags); 4475 4476 /* Before we modify the platform-independent filter flags, store them 4477 * locally. This allows us to determine if anything's changed and if 4478 * we even need to bother the hardware 4479 */ 4480 packet_filter = adapter->packet_filter; 4481 4482 /* Clear the 'multicast' flag locally; because we only have a single 4483 * flag to check multicast, and multiple multicast addresses can be 4484 * set, this is the easiest way to determine if more than one 4485 * multicast address is being set. 4486 */ 4487 packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; 4488 4489 /* Check the net_device flags and set the device independent flags 4490 * accordingly 4491 */ 4492 4493 if (netdev->flags & IFF_PROMISC) 4494 adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS; 4495 else 4496 adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS; 4497 4498 if (netdev->flags & IFF_ALLMULTI) 4499 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; 4500 4501 if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST) 4502 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; 4503 4504 if (netdev_mc_count(netdev) < 1) { 4505 adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST; 4506 adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; 4507 } else 4508 adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST; 4509 4510 /* Set values in the private adapter struct */ 4511 i = 0; 4512 netdev_for_each_mc_addr(ha, netdev) { 4513 if (i == NIC_MAX_MCAST_LIST) 4514 break; 4515 memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN); 4516 } 4517 adapter->multicast_addr_count = i; 4518 4519 /* Are the new flags different from the previous ones? If not, then no 4520 * action is required 4521 * 4522 * NOTE - This block will always update the multicast_list with the 4523 * hardware, even if the addresses aren't the same. 4524 */ 4525 if (packet_filter != adapter->packet_filter) { 4526 /* Call the device's filter function */ 4527 et131x_set_packet_filter(adapter); 4528 } 4529 spin_unlock_irqrestore(&adapter->lock, flags); 4530} 4531 4532/* et131x_tx - The handler to tx a packet on the device 4533 * @skb: data to be Tx'd 4534 * @netdev: device on which data is to be Tx'd 4535 * 4536 * Returns 0 on success, errno on failure (as defined in errno.h) 4537 */ 4538static int et131x_tx(struct sk_buff *skb, struct net_device *netdev) 4539{ 4540 int status = 0; 4541 struct et131x_adapter *adapter = netdev_priv(netdev); 4542 4543 /* stop the queue if it's getting full */ 4544 if (adapter->tx_ring.used >= NUM_TCB - 1 && 4545 !netif_queue_stopped(netdev)) 4546 netif_stop_queue(netdev); 4547 4548 /* Save the timestamp for the TX timeout watchdog */ 4549 netdev->trans_start = jiffies; 4550 4551 /* Call the device-specific data Tx routine */ 4552 status = et131x_send_packets(skb, netdev); 4553 4554 /* Check status and manage the netif queue if necessary */ 4555 if (status != 0) { 4556 if (status == -ENOMEM) 4557 status = NETDEV_TX_BUSY; 4558 else 4559 status = NETDEV_TX_OK; 4560 } 4561 return status; 4562} 4563 4564/* et131x_tx_timeout - Timeout handler 4565 * @netdev: a pointer to a net_device struct representing the device 4566 * 4567 * The handler called when a Tx request times out. The timeout period is 4568 * specified by the 'tx_timeo" element in the net_device structure (see 4569 * et131x_alloc_device() to see how this value is set). 4570 */ 4571static void et131x_tx_timeout(struct net_device *netdev) 4572{ 4573 struct et131x_adapter *adapter = netdev_priv(netdev); 4574 struct tcb *tcb; 4575 unsigned long flags; 4576 4577 /* If the device is closed, ignore the timeout */ 4578 if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)) 4579 return; 4580 4581 /* Any nonrecoverable hardware error? 4582 * Checks adapter->flags for any failure in phy reading 4583 */ 4584 if (adapter->flags & FMP_ADAPTER_NON_RECOVER_ERROR) 4585 return; 4586 4587 /* Hardware failure? */ 4588 if (adapter->flags & FMP_ADAPTER_HARDWARE_ERROR) { 4589 dev_err(&adapter->pdev->dev, "hardware error - reset\n"); 4590 return; 4591 } 4592 4593 /* Is send stuck? */ 4594 spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 4595 4596 tcb = adapter->tx_ring.send_head; 4597 4598 if (tcb != NULL) { 4599 tcb->count++; 4600 4601 if (tcb->count > NIC_SEND_HANG_THRESHOLD) { 4602 spin_unlock_irqrestore(&adapter->tcb_send_qlock, 4603 flags); 4604 4605 dev_warn(&adapter->pdev->dev, 4606 "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n", 4607 tcb->index, 4608 tcb->flags); 4609 4610 adapter->net_stats.tx_errors++; 4611 4612 /* perform reset of tx/rx */ 4613 et131x_disable_txrx(netdev); 4614 et131x_enable_txrx(netdev); 4615 return; 4616 } 4617 } 4618 4619 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 4620} 4621 4622/* et131x_change_mtu - The handler called to change the MTU for the device 4623 * @netdev: device whose MTU is to be changed 4624 * @new_mtu: the desired MTU 4625 * 4626 * Returns 0 on success, errno on failure (as defined in errno.h) 4627 */ 4628static int et131x_change_mtu(struct net_device *netdev, int new_mtu) 4629{ 4630 int result = 0; 4631 struct et131x_adapter *adapter = netdev_priv(netdev); 4632 4633 /* Make sure the requested MTU is valid */ 4634 if (new_mtu < 64 || new_mtu > 9216) 4635 return -EINVAL; 4636 4637 et131x_disable_txrx(netdev); 4638 et131x_handle_send_interrupt(adapter); 4639 et131x_handle_recv_interrupt(adapter); 4640 4641 /* Set the new MTU */ 4642 netdev->mtu = new_mtu; 4643 4644 /* Free Rx DMA memory */ 4645 et131x_adapter_memory_free(adapter); 4646 4647 /* Set the config parameter for Jumbo Packet support */ 4648 adapter->registry_jumbo_packet = new_mtu + 14; 4649 et131x_soft_reset(adapter); 4650 4651 /* Alloc and init Rx DMA memory */ 4652 result = et131x_adapter_memory_alloc(adapter); 4653 if (result != 0) { 4654 dev_warn(&adapter->pdev->dev, 4655 "Change MTU failed; couldn't re-alloc DMA memory\n"); 4656 return result; 4657 } 4658 4659 et131x_init_send(adapter); 4660 4661 et131x_hwaddr_init(adapter); 4662 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); 4663 4664 /* Init the device with the new settings */ 4665 et131x_adapter_setup(adapter); 4666 4667 et131x_enable_txrx(netdev); 4668 4669 return result; 4670} 4671 4672/* et131x_set_mac_addr - handler to change the MAC address for the device 4673 * @netdev: device whose MAC is to be changed 4674 * @new_mac: the desired MAC address 4675 * 4676 * Returns 0 on success, errno on failure (as defined in errno.h) 4677 * 4678 * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14 4679 */ 4680static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac) 4681{ 4682 int result = 0; 4683 struct et131x_adapter *adapter = netdev_priv(netdev); 4684 struct sockaddr *address = new_mac; 4685 4686 /* begin blux */ 4687 4688 if (adapter == NULL) 4689 return -ENODEV; 4690 4691 /* Make sure the requested MAC is valid */ 4692 if (!is_valid_ether_addr(address->sa_data)) 4693 return -EADDRNOTAVAIL; 4694 4695 et131x_disable_txrx(netdev); 4696 et131x_handle_send_interrupt(adapter); 4697 et131x_handle_recv_interrupt(adapter); 4698 4699 /* Set the new MAC */ 4700 /* netdev->set_mac_address = &new_mac; */ 4701 4702 memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len); 4703 4704 netdev_info(netdev, "Setting MAC address to %pM\n", 4705 netdev->dev_addr); 4706 4707 /* Free Rx DMA memory */ 4708 et131x_adapter_memory_free(adapter); 4709 4710 et131x_soft_reset(adapter); 4711 4712 /* Alloc and init Rx DMA memory */ 4713 result = et131x_adapter_memory_alloc(adapter); 4714 if (result != 0) { 4715 dev_err(&adapter->pdev->dev, 4716 "Change MAC failed; couldn't re-alloc DMA memory\n"); 4717 return result; 4718 } 4719 4720 et131x_init_send(adapter); 4721 4722 et131x_hwaddr_init(adapter); 4723 4724 /* Init the device with the new settings */ 4725 et131x_adapter_setup(adapter); 4726 4727 et131x_enable_txrx(netdev); 4728 4729 return result; 4730} 4731 4732static const struct net_device_ops et131x_netdev_ops = { 4733 .ndo_open = et131x_open, 4734 .ndo_stop = et131x_close, 4735 .ndo_start_xmit = et131x_tx, 4736 .ndo_set_rx_mode = et131x_multicast, 4737 .ndo_tx_timeout = et131x_tx_timeout, 4738 .ndo_change_mtu = et131x_change_mtu, 4739 .ndo_set_mac_address = et131x_set_mac_addr, 4740 .ndo_validate_addr = eth_validate_addr, 4741 .ndo_get_stats = et131x_stats, 4742 .ndo_do_ioctl = et131x_ioctl, 4743}; 4744 4745/* et131x_pci_setup - Perform device initialization 4746 * @pdev: a pointer to the device's pci_dev structure 4747 * @ent: this device's entry in the pci_device_id table 4748 * 4749 * Returns 0 on success, errno on failure (as defined in errno.h) 4750 * 4751 * Registered in the pci_driver structure, this function is called when the 4752 * PCI subsystem finds a new PCI device which matches the information 4753 * contained in the pci_device_id table. This routine is the equivalent to 4754 * a device insertion routine. 4755 */ 4756static int et131x_pci_setup(struct pci_dev *pdev, 4757 const struct pci_device_id *ent) 4758{ 4759 struct net_device *netdev; 4760 struct et131x_adapter *adapter; 4761 int rc; 4762 int ii; 4763 4764 rc = pci_enable_device(pdev); 4765 if (rc < 0) { 4766 dev_err(&pdev->dev, "pci_enable_device() failed\n"); 4767 goto out; 4768 } 4769 4770 /* Perform some basic PCI checks */ 4771 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 4772 dev_err(&pdev->dev, "Can't find PCI device's base address\n"); 4773 rc = -ENODEV; 4774 goto err_disable; 4775 } 4776 4777 rc = pci_request_regions(pdev, DRIVER_NAME); 4778 if (rc < 0) { 4779 dev_err(&pdev->dev, "Can't get PCI resources\n"); 4780 goto err_disable; 4781 } 4782 4783 pci_set_master(pdev); 4784 4785 /* Check the DMA addressing support of this device */ 4786 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) && 4787 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { 4788 dev_err(&pdev->dev, "No usable DMA addressing method\n"); 4789 rc = -EIO; 4790 goto err_release_res; 4791 } 4792 4793 /* Allocate netdev and private adapter structs */ 4794 netdev = alloc_etherdev(sizeof(struct et131x_adapter)); 4795 if (!netdev) { 4796 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n"); 4797 rc = -ENOMEM; 4798 goto err_release_res; 4799 } 4800 4801 netdev->watchdog_timeo = ET131X_TX_TIMEOUT; 4802 netdev->netdev_ops = &et131x_netdev_ops; 4803 4804 SET_NETDEV_DEV(netdev, &pdev->dev); 4805 SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops); 4806 4807 adapter = et131x_adapter_init(netdev, pdev); 4808 4809 rc = et131x_pci_init(adapter, pdev); 4810 if (rc < 0) 4811 goto err_free_dev; 4812 4813 /* Map the bus-relative registers to system virtual memory */ 4814 adapter->regs = pci_ioremap_bar(pdev, 0); 4815 if (!adapter->regs) { 4816 dev_err(&pdev->dev, "Cannot map device registers\n"); 4817 rc = -ENOMEM; 4818 goto err_free_dev; 4819 } 4820 4821 /* If Phy COMA mode was enabled when we went down, disable it here. */ 4822 writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr); 4823 4824 /* Issue a global reset to the et1310 */ 4825 et131x_soft_reset(adapter); 4826 4827 /* Disable all interrupts (paranoid) */ 4828 et131x_disable_interrupts(adapter); 4829 4830 /* Allocate DMA memory */ 4831 rc = et131x_adapter_memory_alloc(adapter); 4832 if (rc < 0) { 4833 dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n"); 4834 goto err_iounmap; 4835 } 4836 4837 /* Init send data structures */ 4838 et131x_init_send(adapter); 4839 4840 /* Set up the task structure for the ISR's deferred handler */ 4841 INIT_WORK(&adapter->task, et131x_isr_handler); 4842 4843 /* Copy address into the net_device struct */ 4844 memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); 4845 4846 /* Init variable for counting how long we do not have link status */ 4847 adapter->boot_coma = 0; 4848 et1310_disable_phy_coma(adapter); 4849 4850 rc = -ENOMEM; 4851 4852 /* Setup the mii_bus struct */ 4853 adapter->mii_bus = mdiobus_alloc(); 4854 if (!adapter->mii_bus) { 4855 dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n"); 4856 goto err_mem_free; 4857 } 4858 4859 adapter->mii_bus->name = "et131x_eth_mii"; 4860 snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x", 4861 (adapter->pdev->bus->number << 8) | adapter->pdev->devfn); 4862 adapter->mii_bus->priv = netdev; 4863 adapter->mii_bus->read = et131x_mdio_read; 4864 adapter->mii_bus->write = et131x_mdio_write; 4865 adapter->mii_bus->reset = et131x_mdio_reset; 4866 adapter->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), 4867 GFP_KERNEL); 4868 if (!adapter->mii_bus->irq) 4869 goto err_mdio_free; 4870 4871 for (ii = 0; ii < PHY_MAX_ADDR; ii++) 4872 adapter->mii_bus->irq[ii] = PHY_POLL; 4873 4874 rc = mdiobus_register(adapter->mii_bus); 4875 if (rc < 0) { 4876 dev_err(&pdev->dev, "failed to register MII bus\n"); 4877 goto err_mdio_free_irq; 4878 } 4879 4880 rc = et131x_mii_probe(netdev); 4881 if (rc < 0) { 4882 dev_err(&pdev->dev, "failed to probe MII bus\n"); 4883 goto err_mdio_unregister; 4884 } 4885 4886 /* Setup et1310 as per the documentation */ 4887 et131x_adapter_setup(adapter); 4888 4889 /* We can enable interrupts now 4890 * 4891 * NOTE - Because registration of interrupt handler is done in the 4892 * device's open(), defer enabling device interrupts to that 4893 * point 4894 */ 4895 4896 /* Register the net_device struct with the Linux network layer */ 4897 rc = register_netdev(netdev); 4898 if (rc < 0) { 4899 dev_err(&pdev->dev, "register_netdev() failed\n"); 4900 goto err_phy_disconnect; 4901 } 4902 4903 /* Register the net_device struct with the PCI subsystem. Save a copy 4904 * of the PCI config space for this device now that the device has 4905 * been initialized, just in case it needs to be quickly restored. 4906 */ 4907 pci_set_drvdata(pdev, netdev); 4908out: 4909 return rc; 4910 4911err_phy_disconnect: 4912 phy_disconnect(adapter->phydev); 4913err_mdio_unregister: 4914 mdiobus_unregister(adapter->mii_bus); 4915err_mdio_free_irq: 4916 kfree(adapter->mii_bus->irq); 4917err_mdio_free: 4918 mdiobus_free(adapter->mii_bus); 4919err_mem_free: 4920 et131x_adapter_memory_free(adapter); 4921err_iounmap: 4922 iounmap(adapter->regs); 4923err_free_dev: 4924 pci_dev_put(pdev); 4925 free_netdev(netdev); 4926err_release_res: 4927 pci_release_regions(pdev); 4928err_disable: 4929 pci_disable_device(pdev); 4930 goto out; 4931} 4932 4933static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = { 4934 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL}, 4935 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL}, 4936 {0,} 4937}; 4938MODULE_DEVICE_TABLE(pci, et131x_pci_table); 4939 4940static struct pci_driver et131x_driver = { 4941 .name = DRIVER_NAME, 4942 .id_table = et131x_pci_table, 4943 .probe = et131x_pci_setup, 4944 .remove = et131x_pci_remove, 4945 .driver.pm = ET131X_PM_OPS, 4946}; 4947 4948module_pci_driver(et131x_driver);