Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.29-rc3 2814 lines 67 kB view raw
1/* 2 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports 3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> 4 * 5 * Based on the 64360 driver from: 6 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il> 7 * Rabeeh Khoury <rabeeh@marvell.com> 8 * 9 * Copyright (C) 2003 PMC-Sierra, Inc., 10 * written by Manish Lachwani 11 * 12 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> 13 * 14 * Copyright (C) 2004-2006 MontaVista Software, Inc. 15 * Dale Farnsworth <dale@farnsworth.org> 16 * 17 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> 18 * <sjhill@realitydiluted.com> 19 * 20 * Copyright (C) 2007-2008 Marvell Semiconductor 21 * Lennert Buytenhek <buytenh@marvell.com> 22 * 23 * This program is free software; you can redistribute it and/or 24 * modify it under the terms of the GNU General Public License 25 * as published by the Free Software Foundation; either version 2 26 * of the License, or (at your option) any later version. 27 * 28 * This program is distributed in the hope that it will be useful, 29 * but WITHOUT ANY WARRANTY; without even the implied warranty of 30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 31 * GNU General Public License for more details. 32 * 33 * You should have received a copy of the GNU General Public License 34 * along with this program; if not, write to the Free Software 35 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 36 */ 37 38#include <linux/init.h> 39#include <linux/dma-mapping.h> 40#include <linux/in.h> 41#include <linux/ip.h> 42#include <linux/tcp.h> 43#include <linux/udp.h> 44#include <linux/etherdevice.h> 45#include <linux/delay.h> 46#include <linux/ethtool.h> 47#include <linux/platform_device.h> 48#include <linux/module.h> 49#include <linux/kernel.h> 50#include <linux/spinlock.h> 51#include <linux/workqueue.h> 52#include <linux/phy.h> 53#include <linux/mv643xx_eth.h> 54#include <linux/io.h> 55#include <linux/types.h> 56#include <asm/system.h> 57 58static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 59static char mv643xx_eth_driver_version[] = "1.4"; 60 61 62/* 63 * Registers shared between all ports. 64 */ 65#define PHY_ADDR 0x0000 66#define SMI_REG 0x0004 67#define SMI_BUSY 0x10000000 68#define SMI_READ_VALID 0x08000000 69#define SMI_OPCODE_READ 0x04000000 70#define SMI_OPCODE_WRITE 0x00000000 71#define ERR_INT_CAUSE 0x0080 72#define ERR_INT_SMI_DONE 0x00000010 73#define ERR_INT_MASK 0x0084 74#define WINDOW_BASE(w) (0x0200 + ((w) << 3)) 75#define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) 76#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) 77#define WINDOW_BAR_ENABLE 0x0290 78#define WINDOW_PROTECT(w) (0x0294 + ((w) << 4)) 79 80/* 81 * Main per-port registers. These live at offset 0x0400 for 82 * port #0, 0x0800 for port #1, and 0x0c00 for port #2. 83 */ 84#define PORT_CONFIG 0x0000 85#define UNICAST_PROMISCUOUS_MODE 0x00000001 86#define PORT_CONFIG_EXT 0x0004 87#define MAC_ADDR_LOW 0x0014 88#define MAC_ADDR_HIGH 0x0018 89#define SDMA_CONFIG 0x001c 90#define PORT_SERIAL_CONTROL 0x003c 91#define PORT_STATUS 0x0044 92#define TX_FIFO_EMPTY 0x00000400 93#define TX_IN_PROGRESS 0x00000080 94#define PORT_SPEED_MASK 0x00000030 95#define PORT_SPEED_1000 0x00000010 96#define PORT_SPEED_100 0x00000020 97#define PORT_SPEED_10 0x00000000 98#define FLOW_CONTROL_ENABLED 0x00000008 99#define FULL_DUPLEX 0x00000004 100#define LINK_UP 0x00000002 101#define TXQ_COMMAND 0x0048 102#define TXQ_FIX_PRIO_CONF 0x004c 103#define TX_BW_RATE 0x0050 104#define TX_BW_MTU 0x0058 105#define TX_BW_BURST 0x005c 106#define INT_CAUSE 0x0060 107#define INT_TX_END 0x07f80000 108#define INT_RX 0x000003fc 109#define INT_EXT 0x00000002 110#define INT_CAUSE_EXT 0x0064 111#define INT_EXT_LINK_PHY 0x00110000 112#define INT_EXT_TX 0x000000ff 113#define INT_MASK 0x0068 114#define INT_MASK_EXT 0x006c 115#define TX_FIFO_URGENT_THRESHOLD 0x0074 116#define TXQ_FIX_PRIO_CONF_MOVED 0x00dc 117#define TX_BW_RATE_MOVED 0x00e0 118#define TX_BW_MTU_MOVED 0x00e8 119#define TX_BW_BURST_MOVED 0x00ec 120#define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4)) 121#define RXQ_COMMAND 0x0280 122#define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2)) 123#define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4)) 124#define TXQ_BW_CONF(q) (0x0304 + ((q) << 4)) 125#define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4)) 126 127/* 128 * Misc per-port registers. 129 */ 130#define MIB_COUNTERS(p) (0x1000 + ((p) << 7)) 131#define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10)) 132#define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10)) 133#define UNICAST_TABLE(p) (0x1600 + ((p) << 10)) 134 135 136/* 137 * SDMA configuration register. 138 */ 139#define RX_BURST_SIZE_4_64BIT (2 << 1) 140#define RX_BURST_SIZE_16_64BIT (4 << 1) 141#define BLM_RX_NO_SWAP (1 << 4) 142#define BLM_TX_NO_SWAP (1 << 5) 143#define TX_BURST_SIZE_4_64BIT (2 << 22) 144#define TX_BURST_SIZE_16_64BIT (4 << 22) 145 146#if defined(__BIG_ENDIAN) 147#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 148 (RX_BURST_SIZE_4_64BIT | \ 149 TX_BURST_SIZE_4_64BIT) 150#elif defined(__LITTLE_ENDIAN) 151#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 152 (RX_BURST_SIZE_4_64BIT | \ 153 BLM_RX_NO_SWAP | \ 154 BLM_TX_NO_SWAP | \ 155 TX_BURST_SIZE_4_64BIT) 156#else 157#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 158#endif 159 160 161/* 162 * Port serial control register. 163 */ 164#define SET_MII_SPEED_TO_100 (1 << 24) 165#define SET_GMII_SPEED_TO_1000 (1 << 23) 166#define SET_FULL_DUPLEX_MODE (1 << 21) 167#define MAX_RX_PACKET_9700BYTE (5 << 17) 168#define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13) 169#define DO_NOT_FORCE_LINK_FAIL (1 << 10) 170#define SERIAL_PORT_CONTROL_RESERVED (1 << 9) 171#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1 << 3) 172#define DISABLE_AUTO_NEG_FOR_DUPLEX (1 << 2) 173#define FORCE_LINK_PASS (1 << 1) 174#define SERIAL_PORT_ENABLE (1 << 0) 175 176#define DEFAULT_RX_QUEUE_SIZE 128 177#define DEFAULT_TX_QUEUE_SIZE 256 178 179 180/* 181 * RX/TX descriptors. 182 */ 183#if defined(__BIG_ENDIAN) 184struct rx_desc { 185 u16 byte_cnt; /* Descriptor buffer byte count */ 186 u16 buf_size; /* Buffer size */ 187 u32 cmd_sts; /* Descriptor command status */ 188 u32 next_desc_ptr; /* Next descriptor pointer */ 189 u32 buf_ptr; /* Descriptor buffer pointer */ 190}; 191 192struct tx_desc { 193 u16 byte_cnt; /* buffer byte count */ 194 u16 l4i_chk; /* CPU provided TCP checksum */ 195 u32 cmd_sts; /* Command/status field */ 196 u32 next_desc_ptr; /* Pointer to next descriptor */ 197 u32 buf_ptr; /* pointer to buffer for this descriptor*/ 198}; 199#elif defined(__LITTLE_ENDIAN) 200struct rx_desc { 201 u32 cmd_sts; /* Descriptor command status */ 202 u16 buf_size; /* Buffer size */ 203 u16 byte_cnt; /* Descriptor buffer byte count */ 204 u32 buf_ptr; /* Descriptor buffer pointer */ 205 u32 next_desc_ptr; /* Next descriptor pointer */ 206}; 207 208struct tx_desc { 209 u32 cmd_sts; /* Command/status field */ 210 u16 l4i_chk; /* CPU provided TCP checksum */ 211 u16 byte_cnt; /* buffer byte count */ 212 u32 buf_ptr; /* pointer to buffer for this descriptor*/ 213 u32 next_desc_ptr; /* Pointer to next descriptor */ 214}; 215#else 216#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 217#endif 218 219/* RX & TX descriptor command */ 220#define BUFFER_OWNED_BY_DMA 0x80000000 221 222/* RX & TX descriptor status */ 223#define ERROR_SUMMARY 0x00000001 224 225/* RX descriptor status */ 226#define LAYER_4_CHECKSUM_OK 0x40000000 227#define RX_ENABLE_INTERRUPT 0x20000000 228#define RX_FIRST_DESC 0x08000000 229#define RX_LAST_DESC 0x04000000 230 231/* TX descriptor command */ 232#define TX_ENABLE_INTERRUPT 0x00800000 233#define GEN_CRC 0x00400000 234#define TX_FIRST_DESC 0x00200000 235#define TX_LAST_DESC 0x00100000 236#define ZERO_PADDING 0x00080000 237#define GEN_IP_V4_CHECKSUM 0x00040000 238#define GEN_TCP_UDP_CHECKSUM 0x00020000 239#define UDP_FRAME 0x00010000 240#define MAC_HDR_EXTRA_4_BYTES 0x00008000 241#define MAC_HDR_EXTRA_8_BYTES 0x00000200 242 243#define TX_IHL_SHIFT 11 244 245 246/* global *******************************************************************/ 247struct mv643xx_eth_shared_private { 248 /* 249 * Ethernet controller base address. 250 */ 251 void __iomem *base; 252 253 /* 254 * Points at the right SMI instance to use. 255 */ 256 struct mv643xx_eth_shared_private *smi; 257 258 /* 259 * Provides access to local SMI interface. 260 */ 261 struct mii_bus *smi_bus; 262 263 /* 264 * If we have access to the error interrupt pin (which is 265 * somewhat misnamed as it not only reflects internal errors 266 * but also reflects SMI completion), use that to wait for 267 * SMI access completion instead of polling the SMI busy bit. 268 */ 269 int err_interrupt; 270 wait_queue_head_t smi_busy_wait; 271 272 /* 273 * Per-port MBUS window access register value. 274 */ 275 u32 win_protect; 276 277 /* 278 * Hardware-specific parameters. 279 */ 280 unsigned int t_clk; 281 int extended_rx_coal_limit; 282 int tx_bw_control; 283}; 284 285#define TX_BW_CONTROL_ABSENT 0 286#define TX_BW_CONTROL_OLD_LAYOUT 1 287#define TX_BW_CONTROL_NEW_LAYOUT 2 288 289 290/* per-port *****************************************************************/ 291struct mib_counters { 292 u64 good_octets_received; 293 u32 bad_octets_received; 294 u32 internal_mac_transmit_err; 295 u32 good_frames_received; 296 u32 bad_frames_received; 297 u32 broadcast_frames_received; 298 u32 multicast_frames_received; 299 u32 frames_64_octets; 300 u32 frames_65_to_127_octets; 301 u32 frames_128_to_255_octets; 302 u32 frames_256_to_511_octets; 303 u32 frames_512_to_1023_octets; 304 u32 frames_1024_to_max_octets; 305 u64 good_octets_sent; 306 u32 good_frames_sent; 307 u32 excessive_collision; 308 u32 multicast_frames_sent; 309 u32 broadcast_frames_sent; 310 u32 unrec_mac_control_received; 311 u32 fc_sent; 312 u32 good_fc_received; 313 u32 bad_fc_received; 314 u32 undersize_received; 315 u32 fragments_received; 316 u32 oversize_received; 317 u32 jabber_received; 318 u32 mac_receive_error; 319 u32 bad_crc_event; 320 u32 collision; 321 u32 late_collision; 322}; 323 324struct rx_queue { 325 int index; 326 327 int rx_ring_size; 328 329 int rx_desc_count; 330 int rx_curr_desc; 331 int rx_used_desc; 332 333 struct rx_desc *rx_desc_area; 334 dma_addr_t rx_desc_dma; 335 int rx_desc_area_size; 336 struct sk_buff **rx_skb; 337}; 338 339struct tx_queue { 340 int index; 341 342 int tx_ring_size; 343 344 int tx_desc_count; 345 int tx_curr_desc; 346 int tx_used_desc; 347 348 struct tx_desc *tx_desc_area; 349 dma_addr_t tx_desc_dma; 350 int tx_desc_area_size; 351 352 struct sk_buff_head tx_skb; 353 354 unsigned long tx_packets; 355 unsigned long tx_bytes; 356 unsigned long tx_dropped; 357}; 358 359struct mv643xx_eth_private { 360 struct mv643xx_eth_shared_private *shared; 361 void __iomem *base; 362 int port_num; 363 364 struct net_device *dev; 365 366 struct phy_device *phy; 367 368 struct timer_list mib_counters_timer; 369 spinlock_t mib_counters_lock; 370 struct mib_counters mib_counters; 371 372 struct work_struct tx_timeout_task; 373 374 struct napi_struct napi; 375 u8 work_link; 376 u8 work_tx; 377 u8 work_tx_end; 378 u8 work_rx; 379 u8 work_rx_refill; 380 u8 work_rx_oom; 381 382 int skb_size; 383 struct sk_buff_head rx_recycle; 384 385 /* 386 * RX state. 387 */ 388 int default_rx_ring_size; 389 unsigned long rx_desc_sram_addr; 390 int rx_desc_sram_size; 391 int rxq_count; 392 struct timer_list rx_oom; 393 struct rx_queue rxq[8]; 394 395 /* 396 * TX state. 397 */ 398 int default_tx_ring_size; 399 unsigned long tx_desc_sram_addr; 400 int tx_desc_sram_size; 401 int txq_count; 402 struct tx_queue txq[8]; 403}; 404 405 406/* port register accessors **************************************************/ 407static inline u32 rdl(struct mv643xx_eth_private *mp, int offset) 408{ 409 return readl(mp->shared->base + offset); 410} 411 412static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset) 413{ 414 return readl(mp->base + offset); 415} 416 417static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) 418{ 419 writel(data, mp->shared->base + offset); 420} 421 422static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data) 423{ 424 writel(data, mp->base + offset); 425} 426 427 428/* rxq/txq helper functions *************************************************/ 429static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) 430{ 431 return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); 432} 433 434static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) 435{ 436 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); 437} 438 439static void rxq_enable(struct rx_queue *rxq) 440{ 441 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 442 wrlp(mp, RXQ_COMMAND, 1 << rxq->index); 443} 444 445static void rxq_disable(struct rx_queue *rxq) 446{ 447 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 448 u8 mask = 1 << rxq->index; 449 450 wrlp(mp, RXQ_COMMAND, mask << 8); 451 while (rdlp(mp, RXQ_COMMAND) & mask) 452 udelay(10); 453} 454 455static void txq_reset_hw_ptr(struct tx_queue *txq) 456{ 457 struct mv643xx_eth_private *mp = txq_to_mp(txq); 458 u32 addr; 459 460 addr = (u32)txq->tx_desc_dma; 461 addr += txq->tx_curr_desc * sizeof(struct tx_desc); 462 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr); 463} 464 465static void txq_enable(struct tx_queue *txq) 466{ 467 struct mv643xx_eth_private *mp = txq_to_mp(txq); 468 wrlp(mp, TXQ_COMMAND, 1 << txq->index); 469} 470 471static void txq_disable(struct tx_queue *txq) 472{ 473 struct mv643xx_eth_private *mp = txq_to_mp(txq); 474 u8 mask = 1 << txq->index; 475 476 wrlp(mp, TXQ_COMMAND, mask << 8); 477 while (rdlp(mp, TXQ_COMMAND) & mask) 478 udelay(10); 479} 480 481static void txq_maybe_wake(struct tx_queue *txq) 482{ 483 struct mv643xx_eth_private *mp = txq_to_mp(txq); 484 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 485 486 if (netif_tx_queue_stopped(nq)) { 487 __netif_tx_lock(nq, smp_processor_id()); 488 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) 489 netif_tx_wake_queue(nq); 490 __netif_tx_unlock(nq); 491 } 492} 493 494 495/* rx napi ******************************************************************/ 496static int rxq_process(struct rx_queue *rxq, int budget) 497{ 498 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 499 struct net_device_stats *stats = &mp->dev->stats; 500 int rx; 501 502 rx = 0; 503 while (rx < budget && rxq->rx_desc_count) { 504 struct rx_desc *rx_desc; 505 unsigned int cmd_sts; 506 struct sk_buff *skb; 507 u16 byte_cnt; 508 509 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; 510 511 cmd_sts = rx_desc->cmd_sts; 512 if (cmd_sts & BUFFER_OWNED_BY_DMA) 513 break; 514 rmb(); 515 516 skb = rxq->rx_skb[rxq->rx_curr_desc]; 517 rxq->rx_skb[rxq->rx_curr_desc] = NULL; 518 519 rxq->rx_curr_desc++; 520 if (rxq->rx_curr_desc == rxq->rx_ring_size) 521 rxq->rx_curr_desc = 0; 522 523 dma_unmap_single(NULL, rx_desc->buf_ptr, 524 rx_desc->buf_size, DMA_FROM_DEVICE); 525 rxq->rx_desc_count--; 526 rx++; 527 528 mp->work_rx_refill |= 1 << rxq->index; 529 530 byte_cnt = rx_desc->byte_cnt; 531 532 /* 533 * Update statistics. 534 * 535 * Note that the descriptor byte count includes 2 dummy 536 * bytes automatically inserted by the hardware at the 537 * start of the packet (which we don't count), and a 4 538 * byte CRC at the end of the packet (which we do count). 539 */ 540 stats->rx_packets++; 541 stats->rx_bytes += byte_cnt - 2; 542 543 /* 544 * In case we received a packet without first / last bits 545 * on, or the error summary bit is set, the packet needs 546 * to be dropped. 547 */ 548 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY)) 549 != (RX_FIRST_DESC | RX_LAST_DESC)) 550 goto err; 551 552 /* 553 * The -4 is for the CRC in the trailer of the 554 * received packet 555 */ 556 skb_put(skb, byte_cnt - 2 - 4); 557 558 if (cmd_sts & LAYER_4_CHECKSUM_OK) 559 skb->ip_summed = CHECKSUM_UNNECESSARY; 560 skb->protocol = eth_type_trans(skb, mp->dev); 561 netif_receive_skb(skb); 562 563 continue; 564 565err: 566 stats->rx_dropped++; 567 568 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != 569 (RX_FIRST_DESC | RX_LAST_DESC)) { 570 if (net_ratelimit()) 571 dev_printk(KERN_ERR, &mp->dev->dev, 572 "received packet spanning " 573 "multiple descriptors\n"); 574 } 575 576 if (cmd_sts & ERROR_SUMMARY) 577 stats->rx_errors++; 578 579 dev_kfree_skb(skb); 580 } 581 582 if (rx < budget) 583 mp->work_rx &= ~(1 << rxq->index); 584 585 return rx; 586} 587 588static int rxq_refill(struct rx_queue *rxq, int budget) 589{ 590 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 591 int refilled; 592 593 refilled = 0; 594 while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { 595 struct sk_buff *skb; 596 int unaligned; 597 int rx; 598 struct rx_desc *rx_desc; 599 600 skb = __skb_dequeue(&mp->rx_recycle); 601 if (skb == NULL) 602 skb = dev_alloc_skb(mp->skb_size + 603 dma_get_cache_alignment() - 1); 604 605 if (skb == NULL) { 606 mp->work_rx_oom |= 1 << rxq->index; 607 goto oom; 608 } 609 610 unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1); 611 if (unaligned) 612 skb_reserve(skb, dma_get_cache_alignment() - unaligned); 613 614 refilled++; 615 rxq->rx_desc_count++; 616 617 rx = rxq->rx_used_desc++; 618 if (rxq->rx_used_desc == rxq->rx_ring_size) 619 rxq->rx_used_desc = 0; 620 621 rx_desc = rxq->rx_desc_area + rx; 622 623 rx_desc->buf_ptr = dma_map_single(NULL, skb->data, 624 mp->skb_size, DMA_FROM_DEVICE); 625 rx_desc->buf_size = mp->skb_size; 626 rxq->rx_skb[rx] = skb; 627 wmb(); 628 rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; 629 wmb(); 630 631 /* 632 * The hardware automatically prepends 2 bytes of 633 * dummy data to each received packet, so that the 634 * IP header ends up 16-byte aligned. 635 */ 636 skb_reserve(skb, 2); 637 } 638 639 if (refilled < budget) 640 mp->work_rx_refill &= ~(1 << rxq->index); 641 642oom: 643 return refilled; 644} 645 646 647/* tx ***********************************************************************/ 648static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) 649{ 650 int frag; 651 652 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 653 skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; 654 if (fragp->size <= 8 && fragp->page_offset & 7) 655 return 1; 656 } 657 658 return 0; 659} 660 661static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) 662{ 663 int nr_frags = skb_shinfo(skb)->nr_frags; 664 int frag; 665 666 for (frag = 0; frag < nr_frags; frag++) { 667 skb_frag_t *this_frag; 668 int tx_index; 669 struct tx_desc *desc; 670 671 this_frag = &skb_shinfo(skb)->frags[frag]; 672 tx_index = txq->tx_curr_desc++; 673 if (txq->tx_curr_desc == txq->tx_ring_size) 674 txq->tx_curr_desc = 0; 675 desc = &txq->tx_desc_area[tx_index]; 676 677 /* 678 * The last fragment will generate an interrupt 679 * which will free the skb on TX completion. 680 */ 681 if (frag == nr_frags - 1) { 682 desc->cmd_sts = BUFFER_OWNED_BY_DMA | 683 ZERO_PADDING | TX_LAST_DESC | 684 TX_ENABLE_INTERRUPT; 685 } else { 686 desc->cmd_sts = BUFFER_OWNED_BY_DMA; 687 } 688 689 desc->l4i_chk = 0; 690 desc->byte_cnt = this_frag->size; 691 desc->buf_ptr = dma_map_page(NULL, this_frag->page, 692 this_frag->page_offset, 693 this_frag->size, 694 DMA_TO_DEVICE); 695 } 696} 697 698static inline __be16 sum16_as_be(__sum16 sum) 699{ 700 return (__force __be16)sum; 701} 702 703static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) 704{ 705 struct mv643xx_eth_private *mp = txq_to_mp(txq); 706 int nr_frags = skb_shinfo(skb)->nr_frags; 707 int tx_index; 708 struct tx_desc *desc; 709 u32 cmd_sts; 710 u16 l4i_chk; 711 int length; 712 713 cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; 714 l4i_chk = 0; 715 716 if (skb->ip_summed == CHECKSUM_PARTIAL) { 717 int tag_bytes; 718 719 BUG_ON(skb->protocol != htons(ETH_P_IP) && 720 skb->protocol != htons(ETH_P_8021Q)); 721 722 tag_bytes = (void *)ip_hdr(skb) - (void *)skb->data - ETH_HLEN; 723 if (unlikely(tag_bytes & ~12)) { 724 if (skb_checksum_help(skb) == 0) 725 goto no_csum; 726 kfree_skb(skb); 727 return 1; 728 } 729 730 if (tag_bytes & 4) 731 cmd_sts |= MAC_HDR_EXTRA_4_BYTES; 732 if (tag_bytes & 8) 733 cmd_sts |= MAC_HDR_EXTRA_8_BYTES; 734 735 cmd_sts |= GEN_TCP_UDP_CHECKSUM | 736 GEN_IP_V4_CHECKSUM | 737 ip_hdr(skb)->ihl << TX_IHL_SHIFT; 738 739 switch (ip_hdr(skb)->protocol) { 740 case IPPROTO_UDP: 741 cmd_sts |= UDP_FRAME; 742 l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check)); 743 break; 744 case IPPROTO_TCP: 745 l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check)); 746 break; 747 default: 748 BUG(); 749 } 750 } else { 751no_csum: 752 /* Errata BTS #50, IHL must be 5 if no HW checksum */ 753 cmd_sts |= 5 << TX_IHL_SHIFT; 754 } 755 756 tx_index = txq->tx_curr_desc++; 757 if (txq->tx_curr_desc == txq->tx_ring_size) 758 txq->tx_curr_desc = 0; 759 desc = &txq->tx_desc_area[tx_index]; 760 761 if (nr_frags) { 762 txq_submit_frag_skb(txq, skb); 763 length = skb_headlen(skb); 764 } else { 765 cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; 766 length = skb->len; 767 } 768 769 desc->l4i_chk = l4i_chk; 770 desc->byte_cnt = length; 771 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); 772 773 __skb_queue_tail(&txq->tx_skb, skb); 774 775 /* ensure all other descriptors are written before first cmd_sts */ 776 wmb(); 777 desc->cmd_sts = cmd_sts; 778 779 /* clear TX_END status */ 780 mp->work_tx_end &= ~(1 << txq->index); 781 782 /* ensure all descriptors are written before poking hardware */ 783 wmb(); 784 txq_enable(txq); 785 786 txq->tx_desc_count += nr_frags + 1; 787 788 return 0; 789} 790 791static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) 792{ 793 struct mv643xx_eth_private *mp = netdev_priv(dev); 794 int queue; 795 struct tx_queue *txq; 796 struct netdev_queue *nq; 797 798 queue = skb_get_queue_mapping(skb); 799 txq = mp->txq + queue; 800 nq = netdev_get_tx_queue(dev, queue); 801 802 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { 803 txq->tx_dropped++; 804 dev_printk(KERN_DEBUG, &dev->dev, 805 "failed to linearize skb with tiny " 806 "unaligned fragment\n"); 807 return NETDEV_TX_BUSY; 808 } 809 810 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { 811 if (net_ratelimit()) 812 dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n"); 813 kfree_skb(skb); 814 return NETDEV_TX_OK; 815 } 816 817 if (!txq_submit_skb(txq, skb)) { 818 int entries_left; 819 820 txq->tx_bytes += skb->len; 821 txq->tx_packets++; 822 dev->trans_start = jiffies; 823 824 entries_left = txq->tx_ring_size - txq->tx_desc_count; 825 if (entries_left < MAX_SKB_FRAGS + 1) 826 netif_tx_stop_queue(nq); 827 } 828 829 return NETDEV_TX_OK; 830} 831 832 833/* tx napi ******************************************************************/ 834static void txq_kick(struct tx_queue *txq) 835{ 836 struct mv643xx_eth_private *mp = txq_to_mp(txq); 837 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 838 u32 hw_desc_ptr; 839 u32 expected_ptr; 840 841 __netif_tx_lock(nq, smp_processor_id()); 842 843 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) 844 goto out; 845 846 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index)); 847 expected_ptr = (u32)txq->tx_desc_dma + 848 txq->tx_curr_desc * sizeof(struct tx_desc); 849 850 if (hw_desc_ptr != expected_ptr) 851 txq_enable(txq); 852 853out: 854 __netif_tx_unlock(nq); 855 856 mp->work_tx_end &= ~(1 << txq->index); 857} 858 859static int txq_reclaim(struct tx_queue *txq, int budget, int force) 860{ 861 struct mv643xx_eth_private *mp = txq_to_mp(txq); 862 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 863 int reclaimed; 864 865 __netif_tx_lock(nq, smp_processor_id()); 866 867 reclaimed = 0; 868 while (reclaimed < budget && txq->tx_desc_count > 0) { 869 int tx_index; 870 struct tx_desc *desc; 871 u32 cmd_sts; 872 struct sk_buff *skb; 873 874 tx_index = txq->tx_used_desc; 875 desc = &txq->tx_desc_area[tx_index]; 876 cmd_sts = desc->cmd_sts; 877 878 if (cmd_sts & BUFFER_OWNED_BY_DMA) { 879 if (!force) 880 break; 881 desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; 882 } 883 884 txq->tx_used_desc = tx_index + 1; 885 if (txq->tx_used_desc == txq->tx_ring_size) 886 txq->tx_used_desc = 0; 887 888 reclaimed++; 889 txq->tx_desc_count--; 890 891 skb = NULL; 892 if (cmd_sts & TX_LAST_DESC) 893 skb = __skb_dequeue(&txq->tx_skb); 894 895 if (cmd_sts & ERROR_SUMMARY) { 896 dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n"); 897 mp->dev->stats.tx_errors++; 898 } 899 900 if (cmd_sts & TX_FIRST_DESC) { 901 dma_unmap_single(NULL, desc->buf_ptr, 902 desc->byte_cnt, DMA_TO_DEVICE); 903 } else { 904 dma_unmap_page(NULL, desc->buf_ptr, 905 desc->byte_cnt, DMA_TO_DEVICE); 906 } 907 908 if (skb != NULL) { 909 if (skb_queue_len(&mp->rx_recycle) < 910 mp->default_rx_ring_size && 911 skb_recycle_check(skb, mp->skb_size + 912 dma_get_cache_alignment() - 1)) 913 __skb_queue_head(&mp->rx_recycle, skb); 914 else 915 dev_kfree_skb(skb); 916 } 917 } 918 919 __netif_tx_unlock(nq); 920 921 if (reclaimed < budget) 922 mp->work_tx &= ~(1 << txq->index); 923 924 return reclaimed; 925} 926 927 928/* tx rate control **********************************************************/ 929/* 930 * Set total maximum TX rate (shared by all TX queues for this port) 931 * to 'rate' bits per second, with a maximum burst of 'burst' bytes. 932 */ 933static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst) 934{ 935 int token_rate; 936 int mtu; 937 int bucket_size; 938 939 token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); 940 if (token_rate > 1023) 941 token_rate = 1023; 942 943 mtu = (mp->dev->mtu + 255) >> 8; 944 if (mtu > 63) 945 mtu = 63; 946 947 bucket_size = (burst + 255) >> 8; 948 if (bucket_size > 65535) 949 bucket_size = 65535; 950 951 switch (mp->shared->tx_bw_control) { 952 case TX_BW_CONTROL_OLD_LAYOUT: 953 wrlp(mp, TX_BW_RATE, token_rate); 954 wrlp(mp, TX_BW_MTU, mtu); 955 wrlp(mp, TX_BW_BURST, bucket_size); 956 break; 957 case TX_BW_CONTROL_NEW_LAYOUT: 958 wrlp(mp, TX_BW_RATE_MOVED, token_rate); 959 wrlp(mp, TX_BW_MTU_MOVED, mtu); 960 wrlp(mp, TX_BW_BURST_MOVED, bucket_size); 961 break; 962 } 963} 964 965static void txq_set_rate(struct tx_queue *txq, int rate, int burst) 966{ 967 struct mv643xx_eth_private *mp = txq_to_mp(txq); 968 int token_rate; 969 int bucket_size; 970 971 token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); 972 if (token_rate > 1023) 973 token_rate = 1023; 974 975 bucket_size = (burst + 255) >> 8; 976 if (bucket_size > 65535) 977 bucket_size = 65535; 978 979 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14); 980 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate); 981} 982 983static void txq_set_fixed_prio_mode(struct tx_queue *txq) 984{ 985 struct mv643xx_eth_private *mp = txq_to_mp(txq); 986 int off; 987 u32 val; 988 989 /* 990 * Turn on fixed priority mode. 991 */ 992 off = 0; 993 switch (mp->shared->tx_bw_control) { 994 case TX_BW_CONTROL_OLD_LAYOUT: 995 off = TXQ_FIX_PRIO_CONF; 996 break; 997 case TX_BW_CONTROL_NEW_LAYOUT: 998 off = TXQ_FIX_PRIO_CONF_MOVED; 999 break; 1000 } 1001 1002 if (off) { 1003 val = rdlp(mp, off); 1004 val |= 1 << txq->index; 1005 wrlp(mp, off, val); 1006 } 1007} 1008 1009static void txq_set_wrr(struct tx_queue *txq, int weight) 1010{ 1011 struct mv643xx_eth_private *mp = txq_to_mp(txq); 1012 int off; 1013 u32 val; 1014 1015 /* 1016 * Turn off fixed priority mode. 1017 */ 1018 off = 0; 1019 switch (mp->shared->tx_bw_control) { 1020 case TX_BW_CONTROL_OLD_LAYOUT: 1021 off = TXQ_FIX_PRIO_CONF; 1022 break; 1023 case TX_BW_CONTROL_NEW_LAYOUT: 1024 off = TXQ_FIX_PRIO_CONF_MOVED; 1025 break; 1026 } 1027 1028 if (off) { 1029 val = rdlp(mp, off); 1030 val &= ~(1 << txq->index); 1031 wrlp(mp, off, val); 1032 1033 /* 1034 * Configure WRR weight for this queue. 1035 */ 1036 1037 val = rdlp(mp, off); 1038 val = (val & ~0xff) | (weight & 0xff); 1039 wrlp(mp, TXQ_BW_WRR_CONF(txq->index), val); 1040 } 1041} 1042 1043 1044/* mii management interface *************************************************/ 1045static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id) 1046{ 1047 struct mv643xx_eth_shared_private *msp = dev_id; 1048 1049 if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) { 1050 writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE); 1051 wake_up(&msp->smi_busy_wait); 1052 return IRQ_HANDLED; 1053 } 1054 1055 return IRQ_NONE; 1056} 1057 1058static int smi_is_done(struct mv643xx_eth_shared_private *msp) 1059{ 1060 return !(readl(msp->base + SMI_REG) & SMI_BUSY); 1061} 1062 1063static int smi_wait_ready(struct mv643xx_eth_shared_private *msp) 1064{ 1065 if (msp->err_interrupt == NO_IRQ) { 1066 int i; 1067 1068 for (i = 0; !smi_is_done(msp); i++) { 1069 if (i == 10) 1070 return -ETIMEDOUT; 1071 msleep(10); 1072 } 1073 1074 return 0; 1075 } 1076 1077 if (!smi_is_done(msp)) { 1078 wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp), 1079 msecs_to_jiffies(100)); 1080 if (!smi_is_done(msp)) 1081 return -ETIMEDOUT; 1082 } 1083 1084 return 0; 1085} 1086 1087static int smi_bus_read(struct mii_bus *bus, int addr, int reg) 1088{ 1089 struct mv643xx_eth_shared_private *msp = bus->priv; 1090 void __iomem *smi_reg = msp->base + SMI_REG; 1091 int ret; 1092 1093 if (smi_wait_ready(msp)) { 1094 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1095 return -ETIMEDOUT; 1096 } 1097 1098 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg); 1099 1100 if (smi_wait_ready(msp)) { 1101 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1102 return -ETIMEDOUT; 1103 } 1104 1105 ret = readl(smi_reg); 1106 if (!(ret & SMI_READ_VALID)) { 1107 printk(KERN_WARNING "mv643xx_eth: SMI bus read not valid\n"); 1108 return -ENODEV; 1109 } 1110 1111 return ret & 0xffff; 1112} 1113 1114static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val) 1115{ 1116 struct mv643xx_eth_shared_private *msp = bus->priv; 1117 void __iomem *smi_reg = msp->base + SMI_REG; 1118 1119 if (smi_wait_ready(msp)) { 1120 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1121 return -ETIMEDOUT; 1122 } 1123 1124 writel(SMI_OPCODE_WRITE | (reg << 21) | 1125 (addr << 16) | (val & 0xffff), smi_reg); 1126 1127 if (smi_wait_ready(msp)) { 1128 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1129 return -ETIMEDOUT; 1130 } 1131 1132 return 0; 1133} 1134 1135 1136/* statistics ***************************************************************/ 1137static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) 1138{ 1139 struct mv643xx_eth_private *mp = netdev_priv(dev); 1140 struct net_device_stats *stats = &dev->stats; 1141 unsigned long tx_packets = 0; 1142 unsigned long tx_bytes = 0; 1143 unsigned long tx_dropped = 0; 1144 int i; 1145 1146 for (i = 0; i < mp->txq_count; i++) { 1147 struct tx_queue *txq = mp->txq + i; 1148 1149 tx_packets += txq->tx_packets; 1150 tx_bytes += txq->tx_bytes; 1151 tx_dropped += txq->tx_dropped; 1152 } 1153 1154 stats->tx_packets = tx_packets; 1155 stats->tx_bytes = tx_bytes; 1156 stats->tx_dropped = tx_dropped; 1157 1158 return stats; 1159} 1160 1161static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) 1162{ 1163 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); 1164} 1165 1166static void mib_counters_clear(struct mv643xx_eth_private *mp) 1167{ 1168 int i; 1169 1170 for (i = 0; i < 0x80; i += 4) 1171 mib_read(mp, i); 1172} 1173 1174static void mib_counters_update(struct mv643xx_eth_private *mp) 1175{ 1176 struct mib_counters *p = &mp->mib_counters; 1177 1178 spin_lock(&mp->mib_counters_lock); 1179 p->good_octets_received += mib_read(mp, 0x00); 1180 p->good_octets_received += (u64)mib_read(mp, 0x04) << 32; 1181 p->bad_octets_received += mib_read(mp, 0x08); 1182 p->internal_mac_transmit_err += mib_read(mp, 0x0c); 1183 p->good_frames_received += mib_read(mp, 0x10); 1184 p->bad_frames_received += mib_read(mp, 0x14); 1185 p->broadcast_frames_received += mib_read(mp, 0x18); 1186 p->multicast_frames_received += mib_read(mp, 0x1c); 1187 p->frames_64_octets += mib_read(mp, 0x20); 1188 p->frames_65_to_127_octets += mib_read(mp, 0x24); 1189 p->frames_128_to_255_octets += mib_read(mp, 0x28); 1190 p->frames_256_to_511_octets += mib_read(mp, 0x2c); 1191 p->frames_512_to_1023_octets += mib_read(mp, 0x30); 1192 p->frames_1024_to_max_octets += mib_read(mp, 0x34); 1193 p->good_octets_sent += mib_read(mp, 0x38); 1194 p->good_octets_sent += (u64)mib_read(mp, 0x3c) << 32; 1195 p->good_frames_sent += mib_read(mp, 0x40); 1196 p->excessive_collision += mib_read(mp, 0x44); 1197 p->multicast_frames_sent += mib_read(mp, 0x48); 1198 p->broadcast_frames_sent += mib_read(mp, 0x4c); 1199 p->unrec_mac_control_received += mib_read(mp, 0x50); 1200 p->fc_sent += mib_read(mp, 0x54); 1201 p->good_fc_received += mib_read(mp, 0x58); 1202 p->bad_fc_received += mib_read(mp, 0x5c); 1203 p->undersize_received += mib_read(mp, 0x60); 1204 p->fragments_received += mib_read(mp, 0x64); 1205 p->oversize_received += mib_read(mp, 0x68); 1206 p->jabber_received += mib_read(mp, 0x6c); 1207 p->mac_receive_error += mib_read(mp, 0x70); 1208 p->bad_crc_event += mib_read(mp, 0x74); 1209 p->collision += mib_read(mp, 0x78); 1210 p->late_collision += mib_read(mp, 0x7c); 1211 spin_unlock(&mp->mib_counters_lock); 1212 1213 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); 1214} 1215 1216static void mib_counters_timer_wrapper(unsigned long _mp) 1217{ 1218 struct mv643xx_eth_private *mp = (void *)_mp; 1219 1220 mib_counters_update(mp); 1221} 1222 1223 1224/* ethtool ******************************************************************/ 1225struct mv643xx_eth_stats { 1226 char stat_string[ETH_GSTRING_LEN]; 1227 int sizeof_stat; 1228 int netdev_off; 1229 int mp_off; 1230}; 1231 1232#define SSTAT(m) \ 1233 { #m, FIELD_SIZEOF(struct net_device_stats, m), \ 1234 offsetof(struct net_device, stats.m), -1 } 1235 1236#define MIBSTAT(m) \ 1237 { #m, FIELD_SIZEOF(struct mib_counters, m), \ 1238 -1, offsetof(struct mv643xx_eth_private, mib_counters.m) } 1239 1240static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { 1241 SSTAT(rx_packets), 1242 SSTAT(tx_packets), 1243 SSTAT(rx_bytes), 1244 SSTAT(tx_bytes), 1245 SSTAT(rx_errors), 1246 SSTAT(tx_errors), 1247 SSTAT(rx_dropped), 1248 SSTAT(tx_dropped), 1249 MIBSTAT(good_octets_received), 1250 MIBSTAT(bad_octets_received), 1251 MIBSTAT(internal_mac_transmit_err), 1252 MIBSTAT(good_frames_received), 1253 MIBSTAT(bad_frames_received), 1254 MIBSTAT(broadcast_frames_received), 1255 MIBSTAT(multicast_frames_received), 1256 MIBSTAT(frames_64_octets), 1257 MIBSTAT(frames_65_to_127_octets), 1258 MIBSTAT(frames_128_to_255_octets), 1259 MIBSTAT(frames_256_to_511_octets), 1260 MIBSTAT(frames_512_to_1023_octets), 1261 MIBSTAT(frames_1024_to_max_octets), 1262 MIBSTAT(good_octets_sent), 1263 MIBSTAT(good_frames_sent), 1264 MIBSTAT(excessive_collision), 1265 MIBSTAT(multicast_frames_sent), 1266 MIBSTAT(broadcast_frames_sent), 1267 MIBSTAT(unrec_mac_control_received), 1268 MIBSTAT(fc_sent), 1269 MIBSTAT(good_fc_received), 1270 MIBSTAT(bad_fc_received), 1271 MIBSTAT(undersize_received), 1272 MIBSTAT(fragments_received), 1273 MIBSTAT(oversize_received), 1274 MIBSTAT(jabber_received), 1275 MIBSTAT(mac_receive_error), 1276 MIBSTAT(bad_crc_event), 1277 MIBSTAT(collision), 1278 MIBSTAT(late_collision), 1279}; 1280 1281static int 1282mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1283{ 1284 struct mv643xx_eth_private *mp = netdev_priv(dev); 1285 int err; 1286 1287 err = phy_read_status(mp->phy); 1288 if (err == 0) 1289 err = phy_ethtool_gset(mp->phy, cmd); 1290 1291 /* 1292 * The MAC does not support 1000baseT_Half. 1293 */ 1294 cmd->supported &= ~SUPPORTED_1000baseT_Half; 1295 cmd->advertising &= ~ADVERTISED_1000baseT_Half; 1296 1297 return err; 1298} 1299 1300static int 1301mv643xx_eth_get_settings_phyless(struct net_device *dev, 1302 struct ethtool_cmd *cmd) 1303{ 1304 struct mv643xx_eth_private *mp = netdev_priv(dev); 1305 u32 port_status; 1306 1307 port_status = rdlp(mp, PORT_STATUS); 1308 1309 cmd->supported = SUPPORTED_MII; 1310 cmd->advertising = ADVERTISED_MII; 1311 switch (port_status & PORT_SPEED_MASK) { 1312 case PORT_SPEED_10: 1313 cmd->speed = SPEED_10; 1314 break; 1315 case PORT_SPEED_100: 1316 cmd->speed = SPEED_100; 1317 break; 1318 case PORT_SPEED_1000: 1319 cmd->speed = SPEED_1000; 1320 break; 1321 default: 1322 cmd->speed = -1; 1323 break; 1324 } 1325 cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; 1326 cmd->port = PORT_MII; 1327 cmd->phy_address = 0; 1328 cmd->transceiver = XCVR_INTERNAL; 1329 cmd->autoneg = AUTONEG_DISABLE; 1330 cmd->maxtxpkt = 1; 1331 cmd->maxrxpkt = 1; 1332 1333 return 0; 1334} 1335 1336static int 1337mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1338{ 1339 struct mv643xx_eth_private *mp = netdev_priv(dev); 1340 1341 /* 1342 * The MAC does not support 1000baseT_Half. 1343 */ 1344 cmd->advertising &= ~ADVERTISED_1000baseT_Half; 1345 1346 return phy_ethtool_sset(mp->phy, cmd); 1347} 1348 1349static int 1350mv643xx_eth_set_settings_phyless(struct net_device *dev, 1351 struct ethtool_cmd *cmd) 1352{ 1353 return -EINVAL; 1354} 1355 1356static void mv643xx_eth_get_drvinfo(struct net_device *dev, 1357 struct ethtool_drvinfo *drvinfo) 1358{ 1359 strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32); 1360 strncpy(drvinfo->version, mv643xx_eth_driver_version, 32); 1361 strncpy(drvinfo->fw_version, "N/A", 32); 1362 strncpy(drvinfo->bus_info, "platform", 32); 1363 drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats); 1364} 1365 1366static int mv643xx_eth_nway_reset(struct net_device *dev) 1367{ 1368 struct mv643xx_eth_private *mp = netdev_priv(dev); 1369 1370 return genphy_restart_aneg(mp->phy); 1371} 1372 1373static int mv643xx_eth_nway_reset_phyless(struct net_device *dev) 1374{ 1375 return -EINVAL; 1376} 1377 1378static u32 mv643xx_eth_get_link(struct net_device *dev) 1379{ 1380 return !!netif_carrier_ok(dev); 1381} 1382 1383static void mv643xx_eth_get_strings(struct net_device *dev, 1384 uint32_t stringset, uint8_t *data) 1385{ 1386 int i; 1387 1388 if (stringset == ETH_SS_STATS) { 1389 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { 1390 memcpy(data + i * ETH_GSTRING_LEN, 1391 mv643xx_eth_stats[i].stat_string, 1392 ETH_GSTRING_LEN); 1393 } 1394 } 1395} 1396 1397static void mv643xx_eth_get_ethtool_stats(struct net_device *dev, 1398 struct ethtool_stats *stats, 1399 uint64_t *data) 1400{ 1401 struct mv643xx_eth_private *mp = netdev_priv(dev); 1402 int i; 1403 1404 mv643xx_eth_get_stats(dev); 1405 mib_counters_update(mp); 1406 1407 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { 1408 const struct mv643xx_eth_stats *stat; 1409 void *p; 1410 1411 stat = mv643xx_eth_stats + i; 1412 1413 if (stat->netdev_off >= 0) 1414 p = ((void *)mp->dev) + stat->netdev_off; 1415 else 1416 p = ((void *)mp) + stat->mp_off; 1417 1418 data[i] = (stat->sizeof_stat == 8) ? 1419 *(uint64_t *)p : *(uint32_t *)p; 1420 } 1421} 1422 1423static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset) 1424{ 1425 if (sset == ETH_SS_STATS) 1426 return ARRAY_SIZE(mv643xx_eth_stats); 1427 1428 return -EOPNOTSUPP; 1429} 1430 1431static const struct ethtool_ops mv643xx_eth_ethtool_ops = { 1432 .get_settings = mv643xx_eth_get_settings, 1433 .set_settings = mv643xx_eth_set_settings, 1434 .get_drvinfo = mv643xx_eth_get_drvinfo, 1435 .nway_reset = mv643xx_eth_nway_reset, 1436 .get_link = mv643xx_eth_get_link, 1437 .set_sg = ethtool_op_set_sg, 1438 .get_strings = mv643xx_eth_get_strings, 1439 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, 1440 .get_sset_count = mv643xx_eth_get_sset_count, 1441}; 1442 1443static const struct ethtool_ops mv643xx_eth_ethtool_ops_phyless = { 1444 .get_settings = mv643xx_eth_get_settings_phyless, 1445 .set_settings = mv643xx_eth_set_settings_phyless, 1446 .get_drvinfo = mv643xx_eth_get_drvinfo, 1447 .nway_reset = mv643xx_eth_nway_reset_phyless, 1448 .get_link = mv643xx_eth_get_link, 1449 .set_sg = ethtool_op_set_sg, 1450 .get_strings = mv643xx_eth_get_strings, 1451 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, 1452 .get_sset_count = mv643xx_eth_get_sset_count, 1453}; 1454 1455 1456/* address handling *********************************************************/ 1457static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr) 1458{ 1459 unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH); 1460 unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW); 1461 1462 addr[0] = (mac_h >> 24) & 0xff; 1463 addr[1] = (mac_h >> 16) & 0xff; 1464 addr[2] = (mac_h >> 8) & 0xff; 1465 addr[3] = mac_h & 0xff; 1466 addr[4] = (mac_l >> 8) & 0xff; 1467 addr[5] = mac_l & 0xff; 1468} 1469 1470static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr) 1471{ 1472 wrlp(mp, MAC_ADDR_HIGH, 1473 (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]); 1474 wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]); 1475} 1476 1477static u32 uc_addr_filter_mask(struct net_device *dev) 1478{ 1479 struct dev_addr_list *uc_ptr; 1480 u32 nibbles; 1481 1482 if (dev->flags & IFF_PROMISC) 1483 return 0; 1484 1485 nibbles = 1 << (dev->dev_addr[5] & 0x0f); 1486 for (uc_ptr = dev->uc_list; uc_ptr != NULL; uc_ptr = uc_ptr->next) { 1487 if (memcmp(dev->dev_addr, uc_ptr->da_addr, 5)) 1488 return 0; 1489 if ((dev->dev_addr[5] ^ uc_ptr->da_addr[5]) & 0xf0) 1490 return 0; 1491 1492 nibbles |= 1 << (uc_ptr->da_addr[5] & 0x0f); 1493 } 1494 1495 return nibbles; 1496} 1497 1498static void mv643xx_eth_program_unicast_filter(struct net_device *dev) 1499{ 1500 struct mv643xx_eth_private *mp = netdev_priv(dev); 1501 u32 port_config; 1502 u32 nibbles; 1503 int i; 1504 1505 uc_addr_set(mp, dev->dev_addr); 1506 1507 port_config = rdlp(mp, PORT_CONFIG); 1508 nibbles = uc_addr_filter_mask(dev); 1509 if (!nibbles) { 1510 port_config |= UNICAST_PROMISCUOUS_MODE; 1511 wrlp(mp, PORT_CONFIG, port_config); 1512 return; 1513 } 1514 1515 for (i = 0; i < 16; i += 4) { 1516 int off = UNICAST_TABLE(mp->port_num) + i; 1517 u32 v; 1518 1519 v = 0; 1520 if (nibbles & 1) 1521 v |= 0x00000001; 1522 if (nibbles & 2) 1523 v |= 0x00000100; 1524 if (nibbles & 4) 1525 v |= 0x00010000; 1526 if (nibbles & 8) 1527 v |= 0x01000000; 1528 nibbles >>= 4; 1529 1530 wrl(mp, off, v); 1531 } 1532 1533 port_config &= ~UNICAST_PROMISCUOUS_MODE; 1534 wrlp(mp, PORT_CONFIG, port_config); 1535} 1536 1537static int addr_crc(unsigned char *addr) 1538{ 1539 int crc = 0; 1540 int i; 1541 1542 for (i = 0; i < 6; i++) { 1543 int j; 1544 1545 crc = (crc ^ addr[i]) << 8; 1546 for (j = 7; j >= 0; j--) { 1547 if (crc & (0x100 << j)) 1548 crc ^= 0x107 << j; 1549 } 1550 } 1551 1552 return crc; 1553} 1554 1555static void mv643xx_eth_program_multicast_filter(struct net_device *dev) 1556{ 1557 struct mv643xx_eth_private *mp = netdev_priv(dev); 1558 u32 *mc_spec; 1559 u32 *mc_other; 1560 struct dev_addr_list *addr; 1561 int i; 1562 1563 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1564 int port_num; 1565 u32 accept; 1566 int i; 1567 1568oom: 1569 port_num = mp->port_num; 1570 accept = 0x01010101; 1571 for (i = 0; i < 0x100; i += 4) { 1572 wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept); 1573 wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept); 1574 } 1575 return; 1576 } 1577 1578 mc_spec = kmalloc(0x200, GFP_KERNEL); 1579 if (mc_spec == NULL) 1580 goto oom; 1581 mc_other = mc_spec + (0x100 >> 2); 1582 1583 memset(mc_spec, 0, 0x100); 1584 memset(mc_other, 0, 0x100); 1585 1586 for (addr = dev->mc_list; addr != NULL; addr = addr->next) { 1587 u8 *a = addr->da_addr; 1588 u32 *table; 1589 int entry; 1590 1591 if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) { 1592 table = mc_spec; 1593 entry = a[5]; 1594 } else { 1595 table = mc_other; 1596 entry = addr_crc(a); 1597 } 1598 1599 table[entry >> 2] |= 1 << (8 * (entry & 3)); 1600 } 1601 1602 for (i = 0; i < 0x100; i += 4) { 1603 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]); 1604 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]); 1605 } 1606 1607 kfree(mc_spec); 1608} 1609 1610static void mv643xx_eth_set_rx_mode(struct net_device *dev) 1611{ 1612 mv643xx_eth_program_unicast_filter(dev); 1613 mv643xx_eth_program_multicast_filter(dev); 1614} 1615 1616static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) 1617{ 1618 struct sockaddr *sa = addr; 1619 1620 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); 1621 1622 netif_addr_lock_bh(dev); 1623 mv643xx_eth_program_unicast_filter(dev); 1624 netif_addr_unlock_bh(dev); 1625 1626 return 0; 1627} 1628 1629 1630/* rx/tx queue initialisation ***********************************************/ 1631static int rxq_init(struct mv643xx_eth_private *mp, int index) 1632{ 1633 struct rx_queue *rxq = mp->rxq + index; 1634 struct rx_desc *rx_desc; 1635 int size; 1636 int i; 1637 1638 rxq->index = index; 1639 1640 rxq->rx_ring_size = mp->default_rx_ring_size; 1641 1642 rxq->rx_desc_count = 0; 1643 rxq->rx_curr_desc = 0; 1644 rxq->rx_used_desc = 0; 1645 1646 size = rxq->rx_ring_size * sizeof(struct rx_desc); 1647 1648 if (index == 0 && size <= mp->rx_desc_sram_size) { 1649 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, 1650 mp->rx_desc_sram_size); 1651 rxq->rx_desc_dma = mp->rx_desc_sram_addr; 1652 } else { 1653 rxq->rx_desc_area = dma_alloc_coherent(NULL, size, 1654 &rxq->rx_desc_dma, 1655 GFP_KERNEL); 1656 } 1657 1658 if (rxq->rx_desc_area == NULL) { 1659 dev_printk(KERN_ERR, &mp->dev->dev, 1660 "can't allocate rx ring (%d bytes)\n", size); 1661 goto out; 1662 } 1663 memset(rxq->rx_desc_area, 0, size); 1664 1665 rxq->rx_desc_area_size = size; 1666 rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb), 1667 GFP_KERNEL); 1668 if (rxq->rx_skb == NULL) { 1669 dev_printk(KERN_ERR, &mp->dev->dev, 1670 "can't allocate rx skb ring\n"); 1671 goto out_free; 1672 } 1673 1674 rx_desc = (struct rx_desc *)rxq->rx_desc_area; 1675 for (i = 0; i < rxq->rx_ring_size; i++) { 1676 int nexti; 1677 1678 nexti = i + 1; 1679 if (nexti == rxq->rx_ring_size) 1680 nexti = 0; 1681 1682 rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + 1683 nexti * sizeof(struct rx_desc); 1684 } 1685 1686 return 0; 1687 1688 1689out_free: 1690 if (index == 0 && size <= mp->rx_desc_sram_size) 1691 iounmap(rxq->rx_desc_area); 1692 else 1693 dma_free_coherent(NULL, size, 1694 rxq->rx_desc_area, 1695 rxq->rx_desc_dma); 1696 1697out: 1698 return -ENOMEM; 1699} 1700 1701static void rxq_deinit(struct rx_queue *rxq) 1702{ 1703 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 1704 int i; 1705 1706 rxq_disable(rxq); 1707 1708 for (i = 0; i < rxq->rx_ring_size; i++) { 1709 if (rxq->rx_skb[i]) { 1710 dev_kfree_skb(rxq->rx_skb[i]); 1711 rxq->rx_desc_count--; 1712 } 1713 } 1714 1715 if (rxq->rx_desc_count) { 1716 dev_printk(KERN_ERR, &mp->dev->dev, 1717 "error freeing rx ring -- %d skbs stuck\n", 1718 rxq->rx_desc_count); 1719 } 1720 1721 if (rxq->index == 0 && 1722 rxq->rx_desc_area_size <= mp->rx_desc_sram_size) 1723 iounmap(rxq->rx_desc_area); 1724 else 1725 dma_free_coherent(NULL, rxq->rx_desc_area_size, 1726 rxq->rx_desc_area, rxq->rx_desc_dma); 1727 1728 kfree(rxq->rx_skb); 1729} 1730 1731static int txq_init(struct mv643xx_eth_private *mp, int index) 1732{ 1733 struct tx_queue *txq = mp->txq + index; 1734 struct tx_desc *tx_desc; 1735 int size; 1736 int i; 1737 1738 txq->index = index; 1739 1740 txq->tx_ring_size = mp->default_tx_ring_size; 1741 1742 txq->tx_desc_count = 0; 1743 txq->tx_curr_desc = 0; 1744 txq->tx_used_desc = 0; 1745 1746 size = txq->tx_ring_size * sizeof(struct tx_desc); 1747 1748 if (index == 0 && size <= mp->tx_desc_sram_size) { 1749 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, 1750 mp->tx_desc_sram_size); 1751 txq->tx_desc_dma = mp->tx_desc_sram_addr; 1752 } else { 1753 txq->tx_desc_area = dma_alloc_coherent(NULL, size, 1754 &txq->tx_desc_dma, 1755 GFP_KERNEL); 1756 } 1757 1758 if (txq->tx_desc_area == NULL) { 1759 dev_printk(KERN_ERR, &mp->dev->dev, 1760 "can't allocate tx ring (%d bytes)\n", size); 1761 return -ENOMEM; 1762 } 1763 memset(txq->tx_desc_area, 0, size); 1764 1765 txq->tx_desc_area_size = size; 1766 1767 tx_desc = (struct tx_desc *)txq->tx_desc_area; 1768 for (i = 0; i < txq->tx_ring_size; i++) { 1769 struct tx_desc *txd = tx_desc + i; 1770 int nexti; 1771 1772 nexti = i + 1; 1773 if (nexti == txq->tx_ring_size) 1774 nexti = 0; 1775 1776 txd->cmd_sts = 0; 1777 txd->next_desc_ptr = txq->tx_desc_dma + 1778 nexti * sizeof(struct tx_desc); 1779 } 1780 1781 skb_queue_head_init(&txq->tx_skb); 1782 1783 return 0; 1784} 1785 1786static void txq_deinit(struct tx_queue *txq) 1787{ 1788 struct mv643xx_eth_private *mp = txq_to_mp(txq); 1789 1790 txq_disable(txq); 1791 txq_reclaim(txq, txq->tx_ring_size, 1); 1792 1793 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); 1794 1795 if (txq->index == 0 && 1796 txq->tx_desc_area_size <= mp->tx_desc_sram_size) 1797 iounmap(txq->tx_desc_area); 1798 else 1799 dma_free_coherent(NULL, txq->tx_desc_area_size, 1800 txq->tx_desc_area, txq->tx_desc_dma); 1801} 1802 1803 1804/* netdev ops and related ***************************************************/ 1805static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp) 1806{ 1807 u32 int_cause; 1808 u32 int_cause_ext; 1809 1810 int_cause = rdlp(mp, INT_CAUSE) & (INT_TX_END | INT_RX | INT_EXT); 1811 if (int_cause == 0) 1812 return 0; 1813 1814 int_cause_ext = 0; 1815 if (int_cause & INT_EXT) 1816 int_cause_ext = rdlp(mp, INT_CAUSE_EXT); 1817 1818 int_cause &= INT_TX_END | INT_RX; 1819 if (int_cause) { 1820 wrlp(mp, INT_CAUSE, ~int_cause); 1821 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & 1822 ~(rdlp(mp, TXQ_COMMAND) & 0xff); 1823 mp->work_rx |= (int_cause & INT_RX) >> 2; 1824 } 1825 1826 int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX; 1827 if (int_cause_ext) { 1828 wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext); 1829 if (int_cause_ext & INT_EXT_LINK_PHY) 1830 mp->work_link = 1; 1831 mp->work_tx |= int_cause_ext & INT_EXT_TX; 1832 } 1833 1834 return 1; 1835} 1836 1837static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) 1838{ 1839 struct net_device *dev = (struct net_device *)dev_id; 1840 struct mv643xx_eth_private *mp = netdev_priv(dev); 1841 1842 if (unlikely(!mv643xx_eth_collect_events(mp))) 1843 return IRQ_NONE; 1844 1845 wrlp(mp, INT_MASK, 0); 1846 napi_schedule(&mp->napi); 1847 1848 return IRQ_HANDLED; 1849} 1850 1851static void handle_link_event(struct mv643xx_eth_private *mp) 1852{ 1853 struct net_device *dev = mp->dev; 1854 u32 port_status; 1855 int speed; 1856 int duplex; 1857 int fc; 1858 1859 port_status = rdlp(mp, PORT_STATUS); 1860 if (!(port_status & LINK_UP)) { 1861 if (netif_carrier_ok(dev)) { 1862 int i; 1863 1864 printk(KERN_INFO "%s: link down\n", dev->name); 1865 1866 netif_carrier_off(dev); 1867 1868 for (i = 0; i < mp->txq_count; i++) { 1869 struct tx_queue *txq = mp->txq + i; 1870 1871 txq_reclaim(txq, txq->tx_ring_size, 1); 1872 txq_reset_hw_ptr(txq); 1873 } 1874 } 1875 return; 1876 } 1877 1878 switch (port_status & PORT_SPEED_MASK) { 1879 case PORT_SPEED_10: 1880 speed = 10; 1881 break; 1882 case PORT_SPEED_100: 1883 speed = 100; 1884 break; 1885 case PORT_SPEED_1000: 1886 speed = 1000; 1887 break; 1888 default: 1889 speed = -1; 1890 break; 1891 } 1892 duplex = (port_status & FULL_DUPLEX) ? 1 : 0; 1893 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; 1894 1895 printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, " 1896 "flow control %sabled\n", dev->name, 1897 speed, duplex ? "full" : "half", 1898 fc ? "en" : "dis"); 1899 1900 if (!netif_carrier_ok(dev)) 1901 netif_carrier_on(dev); 1902} 1903 1904static int mv643xx_eth_poll(struct napi_struct *napi, int budget) 1905{ 1906 struct mv643xx_eth_private *mp; 1907 int work_done; 1908 1909 mp = container_of(napi, struct mv643xx_eth_private, napi); 1910 1911 mp->work_rx_refill |= mp->work_rx_oom; 1912 mp->work_rx_oom = 0; 1913 1914 work_done = 0; 1915 while (work_done < budget) { 1916 u8 queue_mask; 1917 int queue; 1918 int work_tbd; 1919 1920 if (mp->work_link) { 1921 mp->work_link = 0; 1922 handle_link_event(mp); 1923 continue; 1924 } 1925 1926 queue_mask = mp->work_tx | mp->work_tx_end | 1927 mp->work_rx | mp->work_rx_refill; 1928 if (!queue_mask) { 1929 if (mv643xx_eth_collect_events(mp)) 1930 continue; 1931 break; 1932 } 1933 1934 queue = fls(queue_mask) - 1; 1935 queue_mask = 1 << queue; 1936 1937 work_tbd = budget - work_done; 1938 if (work_tbd > 16) 1939 work_tbd = 16; 1940 1941 if (mp->work_tx_end & queue_mask) { 1942 txq_kick(mp->txq + queue); 1943 } else if (mp->work_tx & queue_mask) { 1944 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0); 1945 txq_maybe_wake(mp->txq + queue); 1946 } else if (mp->work_rx & queue_mask) { 1947 work_done += rxq_process(mp->rxq + queue, work_tbd); 1948 } else if (mp->work_rx_refill & queue_mask) { 1949 work_done += rxq_refill(mp->rxq + queue, work_tbd); 1950 } else { 1951 BUG(); 1952 } 1953 } 1954 1955 if (work_done < budget) { 1956 if (mp->work_rx_oom) 1957 mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); 1958 napi_complete(napi); 1959 wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT); 1960 } 1961 1962 return work_done; 1963} 1964 1965static inline void oom_timer_wrapper(unsigned long data) 1966{ 1967 struct mv643xx_eth_private *mp = (void *)data; 1968 1969 napi_schedule(&mp->napi); 1970} 1971 1972static void phy_reset(struct mv643xx_eth_private *mp) 1973{ 1974 int data; 1975 1976 data = phy_read(mp->phy, MII_BMCR); 1977 if (data < 0) 1978 return; 1979 1980 data |= BMCR_RESET; 1981 if (phy_write(mp->phy, MII_BMCR, data) < 0) 1982 return; 1983 1984 do { 1985 data = phy_read(mp->phy, MII_BMCR); 1986 } while (data >= 0 && data & BMCR_RESET); 1987} 1988 1989static void port_start(struct mv643xx_eth_private *mp) 1990{ 1991 u32 pscr; 1992 int i; 1993 1994 /* 1995 * Perform PHY reset, if there is a PHY. 1996 */ 1997 if (mp->phy != NULL) { 1998 struct ethtool_cmd cmd; 1999 2000 mv643xx_eth_get_settings(mp->dev, &cmd); 2001 phy_reset(mp); 2002 mv643xx_eth_set_settings(mp->dev, &cmd); 2003 } 2004 2005 /* 2006 * Configure basic link parameters. 2007 */ 2008 pscr = rdlp(mp, PORT_SERIAL_CONTROL); 2009 2010 pscr |= SERIAL_PORT_ENABLE; 2011 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2012 2013 pscr |= DO_NOT_FORCE_LINK_FAIL; 2014 if (mp->phy == NULL) 2015 pscr |= FORCE_LINK_PASS; 2016 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2017 2018 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); 2019 2020 /* 2021 * Configure TX path and queues. 2022 */ 2023 tx_set_rate(mp, 1000000000, 16777216); 2024 for (i = 0; i < mp->txq_count; i++) { 2025 struct tx_queue *txq = mp->txq + i; 2026 2027 txq_reset_hw_ptr(txq); 2028 txq_set_rate(txq, 1000000000, 16777216); 2029 txq_set_fixed_prio_mode(txq); 2030 } 2031 2032 /* 2033 * Add configured unicast address to address filter table. 2034 */ 2035 mv643xx_eth_program_unicast_filter(mp->dev); 2036 2037 /* 2038 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast 2039 * frames to RX queue #0, and include the pseudo-header when 2040 * calculating receive checksums. 2041 */ 2042 wrlp(mp, PORT_CONFIG, 0x02000000); 2043 2044 /* 2045 * Treat BPDUs as normal multicasts, and disable partition mode. 2046 */ 2047 wrlp(mp, PORT_CONFIG_EXT, 0x00000000); 2048 2049 /* 2050 * Enable the receive queues. 2051 */ 2052 for (i = 0; i < mp->rxq_count; i++) { 2053 struct rx_queue *rxq = mp->rxq + i; 2054 u32 addr; 2055 2056 addr = (u32)rxq->rx_desc_dma; 2057 addr += rxq->rx_curr_desc * sizeof(struct rx_desc); 2058 wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr); 2059 2060 rxq_enable(rxq); 2061 } 2062} 2063 2064static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int delay) 2065{ 2066 unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64; 2067 u32 val; 2068 2069 val = rdlp(mp, SDMA_CONFIG); 2070 if (mp->shared->extended_rx_coal_limit) { 2071 if (coal > 0xffff) 2072 coal = 0xffff; 2073 val &= ~0x023fff80; 2074 val |= (coal & 0x8000) << 10; 2075 val |= (coal & 0x7fff) << 7; 2076 } else { 2077 if (coal > 0x3fff) 2078 coal = 0x3fff; 2079 val &= ~0x003fff00; 2080 val |= (coal & 0x3fff) << 8; 2081 } 2082 wrlp(mp, SDMA_CONFIG, val); 2083} 2084 2085static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay) 2086{ 2087 unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64; 2088 2089 if (coal > 0x3fff) 2090 coal = 0x3fff; 2091 wrlp(mp, TX_FIFO_URGENT_THRESHOLD, (coal & 0x3fff) << 4); 2092} 2093 2094static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp) 2095{ 2096 int skb_size; 2097 2098 /* 2099 * Reserve 2+14 bytes for an ethernet header (the hardware 2100 * automatically prepends 2 bytes of dummy data to each 2101 * received packet), 16 bytes for up to four VLAN tags, and 2102 * 4 bytes for the trailing FCS -- 36 bytes total. 2103 */ 2104 skb_size = mp->dev->mtu + 36; 2105 2106 /* 2107 * Make sure that the skb size is a multiple of 8 bytes, as 2108 * the lower three bits of the receive descriptor's buffer 2109 * size field are ignored by the hardware. 2110 */ 2111 mp->skb_size = (skb_size + 7) & ~7; 2112} 2113 2114static int mv643xx_eth_open(struct net_device *dev) 2115{ 2116 struct mv643xx_eth_private *mp = netdev_priv(dev); 2117 int err; 2118 int i; 2119 2120 wrlp(mp, INT_CAUSE, 0); 2121 wrlp(mp, INT_CAUSE_EXT, 0); 2122 rdlp(mp, INT_CAUSE_EXT); 2123 2124 err = request_irq(dev->irq, mv643xx_eth_irq, 2125 IRQF_SHARED, dev->name, dev); 2126 if (err) { 2127 dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n"); 2128 return -EAGAIN; 2129 } 2130 2131 mv643xx_eth_recalc_skb_size(mp); 2132 2133 napi_enable(&mp->napi); 2134 2135 skb_queue_head_init(&mp->rx_recycle); 2136 2137 for (i = 0; i < mp->rxq_count; i++) { 2138 err = rxq_init(mp, i); 2139 if (err) { 2140 while (--i >= 0) 2141 rxq_deinit(mp->rxq + i); 2142 goto out; 2143 } 2144 2145 rxq_refill(mp->rxq + i, INT_MAX); 2146 } 2147 2148 if (mp->work_rx_oom) { 2149 mp->rx_oom.expires = jiffies + (HZ / 10); 2150 add_timer(&mp->rx_oom); 2151 } 2152 2153 for (i = 0; i < mp->txq_count; i++) { 2154 err = txq_init(mp, i); 2155 if (err) { 2156 while (--i >= 0) 2157 txq_deinit(mp->txq + i); 2158 goto out_free; 2159 } 2160 } 2161 2162 netif_carrier_off(dev); 2163 2164 port_start(mp); 2165 2166 set_rx_coal(mp, 0); 2167 set_tx_coal(mp, 0); 2168 2169 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); 2170 wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT); 2171 2172 return 0; 2173 2174 2175out_free: 2176 for (i = 0; i < mp->rxq_count; i++) 2177 rxq_deinit(mp->rxq + i); 2178out: 2179 free_irq(dev->irq, dev); 2180 2181 return err; 2182} 2183 2184static void port_reset(struct mv643xx_eth_private *mp) 2185{ 2186 unsigned int data; 2187 int i; 2188 2189 for (i = 0; i < mp->rxq_count; i++) 2190 rxq_disable(mp->rxq + i); 2191 for (i = 0; i < mp->txq_count; i++) 2192 txq_disable(mp->txq + i); 2193 2194 while (1) { 2195 u32 ps = rdlp(mp, PORT_STATUS); 2196 2197 if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY) 2198 break; 2199 udelay(10); 2200 } 2201 2202 /* Reset the Enable bit in the Configuration Register */ 2203 data = rdlp(mp, PORT_SERIAL_CONTROL); 2204 data &= ~(SERIAL_PORT_ENABLE | 2205 DO_NOT_FORCE_LINK_FAIL | 2206 FORCE_LINK_PASS); 2207 wrlp(mp, PORT_SERIAL_CONTROL, data); 2208} 2209 2210static int mv643xx_eth_stop(struct net_device *dev) 2211{ 2212 struct mv643xx_eth_private *mp = netdev_priv(dev); 2213 int i; 2214 2215 wrlp(mp, INT_MASK_EXT, 0x00000000); 2216 wrlp(mp, INT_MASK, 0x00000000); 2217 rdlp(mp, INT_MASK); 2218 2219 del_timer_sync(&mp->mib_counters_timer); 2220 2221 napi_disable(&mp->napi); 2222 2223 del_timer_sync(&mp->rx_oom); 2224 2225 netif_carrier_off(dev); 2226 2227 free_irq(dev->irq, dev); 2228 2229 port_reset(mp); 2230 mv643xx_eth_get_stats(dev); 2231 mib_counters_update(mp); 2232 2233 skb_queue_purge(&mp->rx_recycle); 2234 2235 for (i = 0; i < mp->rxq_count; i++) 2236 rxq_deinit(mp->rxq + i); 2237 for (i = 0; i < mp->txq_count; i++) 2238 txq_deinit(mp->txq + i); 2239 2240 return 0; 2241} 2242 2243static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2244{ 2245 struct mv643xx_eth_private *mp = netdev_priv(dev); 2246 2247 if (mp->phy != NULL) 2248 return phy_mii_ioctl(mp->phy, if_mii(ifr), cmd); 2249 2250 return -EOPNOTSUPP; 2251} 2252 2253static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) 2254{ 2255 struct mv643xx_eth_private *mp = netdev_priv(dev); 2256 2257 if (new_mtu < 64 || new_mtu > 9500) 2258 return -EINVAL; 2259 2260 dev->mtu = new_mtu; 2261 mv643xx_eth_recalc_skb_size(mp); 2262 tx_set_rate(mp, 1000000000, 16777216); 2263 2264 if (!netif_running(dev)) 2265 return 0; 2266 2267 /* 2268 * Stop and then re-open the interface. This will allocate RX 2269 * skbs of the new MTU. 2270 * There is a possible danger that the open will not succeed, 2271 * due to memory being full. 2272 */ 2273 mv643xx_eth_stop(dev); 2274 if (mv643xx_eth_open(dev)) { 2275 dev_printk(KERN_ERR, &dev->dev, 2276 "fatal error on re-opening device after " 2277 "MTU change\n"); 2278 } 2279 2280 return 0; 2281} 2282 2283static void tx_timeout_task(struct work_struct *ugly) 2284{ 2285 struct mv643xx_eth_private *mp; 2286 2287 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); 2288 if (netif_running(mp->dev)) { 2289 netif_tx_stop_all_queues(mp->dev); 2290 port_reset(mp); 2291 port_start(mp); 2292 netif_tx_wake_all_queues(mp->dev); 2293 } 2294} 2295 2296static void mv643xx_eth_tx_timeout(struct net_device *dev) 2297{ 2298 struct mv643xx_eth_private *mp = netdev_priv(dev); 2299 2300 dev_printk(KERN_INFO, &dev->dev, "tx timeout\n"); 2301 2302 schedule_work(&mp->tx_timeout_task); 2303} 2304 2305#ifdef CONFIG_NET_POLL_CONTROLLER 2306static void mv643xx_eth_netpoll(struct net_device *dev) 2307{ 2308 struct mv643xx_eth_private *mp = netdev_priv(dev); 2309 2310 wrlp(mp, INT_MASK, 0x00000000); 2311 rdlp(mp, INT_MASK); 2312 2313 mv643xx_eth_irq(dev->irq, dev); 2314 2315 wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT); 2316} 2317#endif 2318 2319 2320/* platform glue ************************************************************/ 2321static void 2322mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp, 2323 struct mbus_dram_target_info *dram) 2324{ 2325 void __iomem *base = msp->base; 2326 u32 win_enable; 2327 u32 win_protect; 2328 int i; 2329 2330 for (i = 0; i < 6; i++) { 2331 writel(0, base + WINDOW_BASE(i)); 2332 writel(0, base + WINDOW_SIZE(i)); 2333 if (i < 4) 2334 writel(0, base + WINDOW_REMAP_HIGH(i)); 2335 } 2336 2337 win_enable = 0x3f; 2338 win_protect = 0; 2339 2340 for (i = 0; i < dram->num_cs; i++) { 2341 struct mbus_dram_window *cs = dram->cs + i; 2342 2343 writel((cs->base & 0xffff0000) | 2344 (cs->mbus_attr << 8) | 2345 dram->mbus_dram_target_id, base + WINDOW_BASE(i)); 2346 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); 2347 2348 win_enable &= ~(1 << i); 2349 win_protect |= 3 << (2 * i); 2350 } 2351 2352 writel(win_enable, base + WINDOW_BAR_ENABLE); 2353 msp->win_protect = win_protect; 2354} 2355 2356static void infer_hw_params(struct mv643xx_eth_shared_private *msp) 2357{ 2358 /* 2359 * Check whether we have a 14-bit coal limit field in bits 2360 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the 2361 * SDMA config register. 2362 */ 2363 writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG); 2364 if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000) 2365 msp->extended_rx_coal_limit = 1; 2366 else 2367 msp->extended_rx_coal_limit = 0; 2368 2369 /* 2370 * Check whether the MAC supports TX rate control, and if 2371 * yes, whether its associated registers are in the old or 2372 * the new place. 2373 */ 2374 writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED); 2375 if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) { 2376 msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT; 2377 } else { 2378 writel(7, msp->base + 0x0400 + TX_BW_RATE); 2379 if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7) 2380 msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT; 2381 else 2382 msp->tx_bw_control = TX_BW_CONTROL_ABSENT; 2383 } 2384} 2385 2386static int mv643xx_eth_shared_probe(struct platform_device *pdev) 2387{ 2388 static int mv643xx_eth_version_printed; 2389 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; 2390 struct mv643xx_eth_shared_private *msp; 2391 struct resource *res; 2392 int ret; 2393 2394 if (!mv643xx_eth_version_printed++) 2395 printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet " 2396 "driver version %s\n", mv643xx_eth_driver_version); 2397 2398 ret = -EINVAL; 2399 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2400 if (res == NULL) 2401 goto out; 2402 2403 ret = -ENOMEM; 2404 msp = kmalloc(sizeof(*msp), GFP_KERNEL); 2405 if (msp == NULL) 2406 goto out; 2407 memset(msp, 0, sizeof(*msp)); 2408 2409 msp->base = ioremap(res->start, res->end - res->start + 1); 2410 if (msp->base == NULL) 2411 goto out_free; 2412 2413 /* 2414 * Set up and register SMI bus. 2415 */ 2416 if (pd == NULL || pd->shared_smi == NULL) { 2417 msp->smi_bus = mdiobus_alloc(); 2418 if (msp->smi_bus == NULL) 2419 goto out_unmap; 2420 2421 msp->smi_bus->priv = msp; 2422 msp->smi_bus->name = "mv643xx_eth smi"; 2423 msp->smi_bus->read = smi_bus_read; 2424 msp->smi_bus->write = smi_bus_write, 2425 snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id); 2426 msp->smi_bus->parent = &pdev->dev; 2427 msp->smi_bus->phy_mask = 0xffffffff; 2428 if (mdiobus_register(msp->smi_bus) < 0) 2429 goto out_free_mii_bus; 2430 msp->smi = msp; 2431 } else { 2432 msp->smi = platform_get_drvdata(pd->shared_smi); 2433 } 2434 2435 msp->err_interrupt = NO_IRQ; 2436 init_waitqueue_head(&msp->smi_busy_wait); 2437 2438 /* 2439 * Check whether the error interrupt is hooked up. 2440 */ 2441 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2442 if (res != NULL) { 2443 int err; 2444 2445 err = request_irq(res->start, mv643xx_eth_err_irq, 2446 IRQF_SHARED, "mv643xx_eth", msp); 2447 if (!err) { 2448 writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK); 2449 msp->err_interrupt = res->start; 2450 } 2451 } 2452 2453 /* 2454 * (Re-)program MBUS remapping windows if we are asked to. 2455 */ 2456 if (pd != NULL && pd->dram != NULL) 2457 mv643xx_eth_conf_mbus_windows(msp, pd->dram); 2458 2459 /* 2460 * Detect hardware parameters. 2461 */ 2462 msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; 2463 infer_hw_params(msp); 2464 2465 platform_set_drvdata(pdev, msp); 2466 2467 return 0; 2468 2469out_free_mii_bus: 2470 mdiobus_free(msp->smi_bus); 2471out_unmap: 2472 iounmap(msp->base); 2473out_free: 2474 kfree(msp); 2475out: 2476 return ret; 2477} 2478 2479static int mv643xx_eth_shared_remove(struct platform_device *pdev) 2480{ 2481 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); 2482 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; 2483 2484 if (pd == NULL || pd->shared_smi == NULL) { 2485 mdiobus_unregister(msp->smi_bus); 2486 mdiobus_free(msp->smi_bus); 2487 } 2488 if (msp->err_interrupt != NO_IRQ) 2489 free_irq(msp->err_interrupt, msp); 2490 iounmap(msp->base); 2491 kfree(msp); 2492 2493 return 0; 2494} 2495 2496static struct platform_driver mv643xx_eth_shared_driver = { 2497 .probe = mv643xx_eth_shared_probe, 2498 .remove = mv643xx_eth_shared_remove, 2499 .driver = { 2500 .name = MV643XX_ETH_SHARED_NAME, 2501 .owner = THIS_MODULE, 2502 }, 2503}; 2504 2505static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr) 2506{ 2507 int addr_shift = 5 * mp->port_num; 2508 u32 data; 2509 2510 data = rdl(mp, PHY_ADDR); 2511 data &= ~(0x1f << addr_shift); 2512 data |= (phy_addr & 0x1f) << addr_shift; 2513 wrl(mp, PHY_ADDR, data); 2514} 2515 2516static int phy_addr_get(struct mv643xx_eth_private *mp) 2517{ 2518 unsigned int data; 2519 2520 data = rdl(mp, PHY_ADDR); 2521 2522 return (data >> (5 * mp->port_num)) & 0x1f; 2523} 2524 2525static void set_params(struct mv643xx_eth_private *mp, 2526 struct mv643xx_eth_platform_data *pd) 2527{ 2528 struct net_device *dev = mp->dev; 2529 2530 if (is_valid_ether_addr(pd->mac_addr)) 2531 memcpy(dev->dev_addr, pd->mac_addr, 6); 2532 else 2533 uc_addr_get(mp, dev->dev_addr); 2534 2535 mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE; 2536 if (pd->rx_queue_size) 2537 mp->default_rx_ring_size = pd->rx_queue_size; 2538 mp->rx_desc_sram_addr = pd->rx_sram_addr; 2539 mp->rx_desc_sram_size = pd->rx_sram_size; 2540 2541 mp->rxq_count = pd->rx_queue_count ? : 1; 2542 2543 mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE; 2544 if (pd->tx_queue_size) 2545 mp->default_tx_ring_size = pd->tx_queue_size; 2546 mp->tx_desc_sram_addr = pd->tx_sram_addr; 2547 mp->tx_desc_sram_size = pd->tx_sram_size; 2548 2549 mp->txq_count = pd->tx_queue_count ? : 1; 2550} 2551 2552static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, 2553 int phy_addr) 2554{ 2555 struct mii_bus *bus = mp->shared->smi->smi_bus; 2556 struct phy_device *phydev; 2557 int start; 2558 int num; 2559 int i; 2560 2561 if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) { 2562 start = phy_addr_get(mp) & 0x1f; 2563 num = 32; 2564 } else { 2565 start = phy_addr & 0x1f; 2566 num = 1; 2567 } 2568 2569 phydev = NULL; 2570 for (i = 0; i < num; i++) { 2571 int addr = (start + i) & 0x1f; 2572 2573 if (bus->phy_map[addr] == NULL) 2574 mdiobus_scan(bus, addr); 2575 2576 if (phydev == NULL) { 2577 phydev = bus->phy_map[addr]; 2578 if (phydev != NULL) 2579 phy_addr_set(mp, addr); 2580 } 2581 } 2582 2583 return phydev; 2584} 2585 2586static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) 2587{ 2588 struct phy_device *phy = mp->phy; 2589 2590 phy_reset(mp); 2591 2592 phy_attach(mp->dev, phy->dev.bus_id, 0, PHY_INTERFACE_MODE_GMII); 2593 2594 if (speed == 0) { 2595 phy->autoneg = AUTONEG_ENABLE; 2596 phy->speed = 0; 2597 phy->duplex = 0; 2598 phy->advertising = phy->supported | ADVERTISED_Autoneg; 2599 } else { 2600 phy->autoneg = AUTONEG_DISABLE; 2601 phy->advertising = 0; 2602 phy->speed = speed; 2603 phy->duplex = duplex; 2604 } 2605 phy_start_aneg(phy); 2606} 2607 2608static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) 2609{ 2610 u32 pscr; 2611 2612 pscr = rdlp(mp, PORT_SERIAL_CONTROL); 2613 if (pscr & SERIAL_PORT_ENABLE) { 2614 pscr &= ~SERIAL_PORT_ENABLE; 2615 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2616 } 2617 2618 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED; 2619 if (mp->phy == NULL) { 2620 pscr |= DISABLE_AUTO_NEG_SPEED_GMII; 2621 if (speed == SPEED_1000) 2622 pscr |= SET_GMII_SPEED_TO_1000; 2623 else if (speed == SPEED_100) 2624 pscr |= SET_MII_SPEED_TO_100; 2625 2626 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL; 2627 2628 pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX; 2629 if (duplex == DUPLEX_FULL) 2630 pscr |= SET_FULL_DUPLEX_MODE; 2631 } 2632 2633 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2634} 2635 2636static int mv643xx_eth_probe(struct platform_device *pdev) 2637{ 2638 struct mv643xx_eth_platform_data *pd; 2639 struct mv643xx_eth_private *mp; 2640 struct net_device *dev; 2641 struct resource *res; 2642 int err; 2643 2644 pd = pdev->dev.platform_data; 2645 if (pd == NULL) { 2646 dev_printk(KERN_ERR, &pdev->dev, 2647 "no mv643xx_eth_platform_data\n"); 2648 return -ENODEV; 2649 } 2650 2651 if (pd->shared == NULL) { 2652 dev_printk(KERN_ERR, &pdev->dev, 2653 "no mv643xx_eth_platform_data->shared\n"); 2654 return -ENODEV; 2655 } 2656 2657 dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8); 2658 if (!dev) 2659 return -ENOMEM; 2660 2661 mp = netdev_priv(dev); 2662 platform_set_drvdata(pdev, mp); 2663 2664 mp->shared = platform_get_drvdata(pd->shared); 2665 mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10); 2666 mp->port_num = pd->port_number; 2667 2668 mp->dev = dev; 2669 2670 set_params(mp, pd); 2671 dev->real_num_tx_queues = mp->txq_count; 2672 2673 if (pd->phy_addr != MV643XX_ETH_PHY_NONE) 2674 mp->phy = phy_scan(mp, pd->phy_addr); 2675 2676 if (mp->phy != NULL) { 2677 phy_init(mp, pd->speed, pd->duplex); 2678 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); 2679 } else { 2680 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops_phyless); 2681 } 2682 2683 init_pscr(mp, pd->speed, pd->duplex); 2684 2685 2686 mib_counters_clear(mp); 2687 2688 init_timer(&mp->mib_counters_timer); 2689 mp->mib_counters_timer.data = (unsigned long)mp; 2690 mp->mib_counters_timer.function = mib_counters_timer_wrapper; 2691 mp->mib_counters_timer.expires = jiffies + 30 * HZ; 2692 add_timer(&mp->mib_counters_timer); 2693 2694 spin_lock_init(&mp->mib_counters_lock); 2695 2696 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); 2697 2698 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128); 2699 2700 init_timer(&mp->rx_oom); 2701 mp->rx_oom.data = (unsigned long)mp; 2702 mp->rx_oom.function = oom_timer_wrapper; 2703 2704 2705 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2706 BUG_ON(!res); 2707 dev->irq = res->start; 2708 2709 dev->get_stats = mv643xx_eth_get_stats; 2710 dev->hard_start_xmit = mv643xx_eth_xmit; 2711 dev->open = mv643xx_eth_open; 2712 dev->stop = mv643xx_eth_stop; 2713 dev->set_rx_mode = mv643xx_eth_set_rx_mode; 2714 dev->set_mac_address = mv643xx_eth_set_mac_address; 2715 dev->do_ioctl = mv643xx_eth_ioctl; 2716 dev->change_mtu = mv643xx_eth_change_mtu; 2717 dev->tx_timeout = mv643xx_eth_tx_timeout; 2718#ifdef CONFIG_NET_POLL_CONTROLLER 2719 dev->poll_controller = mv643xx_eth_netpoll; 2720#endif 2721 dev->watchdog_timeo = 2 * HZ; 2722 dev->base_addr = 0; 2723 2724 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; 2725 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM; 2726 2727 SET_NETDEV_DEV(dev, &pdev->dev); 2728 2729 if (mp->shared->win_protect) 2730 wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); 2731 2732 err = register_netdev(dev); 2733 if (err) 2734 goto out; 2735 2736 dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %pM\n", 2737 mp->port_num, dev->dev_addr); 2738 2739 if (mp->tx_desc_sram_size > 0) 2740 dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n"); 2741 2742 return 0; 2743 2744out: 2745 free_netdev(dev); 2746 2747 return err; 2748} 2749 2750static int mv643xx_eth_remove(struct platform_device *pdev) 2751{ 2752 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); 2753 2754 unregister_netdev(mp->dev); 2755 if (mp->phy != NULL) 2756 phy_detach(mp->phy); 2757 flush_scheduled_work(); 2758 free_netdev(mp->dev); 2759 2760 platform_set_drvdata(pdev, NULL); 2761 2762 return 0; 2763} 2764 2765static void mv643xx_eth_shutdown(struct platform_device *pdev) 2766{ 2767 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); 2768 2769 /* Mask all interrupts on ethernet port */ 2770 wrlp(mp, INT_MASK, 0); 2771 rdlp(mp, INT_MASK); 2772 2773 if (netif_running(mp->dev)) 2774 port_reset(mp); 2775} 2776 2777static struct platform_driver mv643xx_eth_driver = { 2778 .probe = mv643xx_eth_probe, 2779 .remove = mv643xx_eth_remove, 2780 .shutdown = mv643xx_eth_shutdown, 2781 .driver = { 2782 .name = MV643XX_ETH_NAME, 2783 .owner = THIS_MODULE, 2784 }, 2785}; 2786 2787static int __init mv643xx_eth_init_module(void) 2788{ 2789 int rc; 2790 2791 rc = platform_driver_register(&mv643xx_eth_shared_driver); 2792 if (!rc) { 2793 rc = platform_driver_register(&mv643xx_eth_driver); 2794 if (rc) 2795 platform_driver_unregister(&mv643xx_eth_shared_driver); 2796 } 2797 2798 return rc; 2799} 2800module_init(mv643xx_eth_init_module); 2801 2802static void __exit mv643xx_eth_cleanup_module(void) 2803{ 2804 platform_driver_unregister(&mv643xx_eth_driver); 2805 platform_driver_unregister(&mv643xx_eth_shared_driver); 2806} 2807module_exit(mv643xx_eth_cleanup_module); 2808 2809MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, " 2810 "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek"); 2811MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); 2812MODULE_LICENSE("GPL"); 2813MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME); 2814MODULE_ALIAS("platform:" MV643XX_ETH_NAME);