Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.32 3027 lines 73 kB view raw
1/* 2 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports 3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> 4 * 5 * Based on the 64360 driver from: 6 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il> 7 * Rabeeh Khoury <rabeeh@marvell.com> 8 * 9 * Copyright (C) 2003 PMC-Sierra, Inc., 10 * written by Manish Lachwani 11 * 12 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> 13 * 14 * Copyright (C) 2004-2006 MontaVista Software, Inc. 15 * Dale Farnsworth <dale@farnsworth.org> 16 * 17 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> 18 * <sjhill@realitydiluted.com> 19 * 20 * Copyright (C) 2007-2008 Marvell Semiconductor 21 * Lennert Buytenhek <buytenh@marvell.com> 22 * 23 * This program is free software; you can redistribute it and/or 24 * modify it under the terms of the GNU General Public License 25 * as published by the Free Software Foundation; either version 2 26 * of the License, or (at your option) any later version. 27 * 28 * This program is distributed in the hope that it will be useful, 29 * but WITHOUT ANY WARRANTY; without even the implied warranty of 30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 31 * GNU General Public License for more details. 32 * 33 * You should have received a copy of the GNU General Public License 34 * along with this program; if not, write to the Free Software 35 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 36 */ 37 38#include <linux/init.h> 39#include <linux/dma-mapping.h> 40#include <linux/in.h> 41#include <linux/ip.h> 42#include <linux/tcp.h> 43#include <linux/udp.h> 44#include <linux/etherdevice.h> 45#include <linux/delay.h> 46#include <linux/ethtool.h> 47#include <linux/platform_device.h> 48#include <linux/module.h> 49#include <linux/kernel.h> 50#include <linux/spinlock.h> 51#include <linux/workqueue.h> 52#include <linux/phy.h> 53#include <linux/mv643xx_eth.h> 54#include <linux/io.h> 55#include <linux/types.h> 56#include <linux/inet_lro.h> 57#include <asm/system.h> 58#include <linux/list.h> 59 60static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 61static char mv643xx_eth_driver_version[] = "1.4"; 62 63 64/* 65 * Registers shared between all ports. 66 */ 67#define PHY_ADDR 0x0000 68#define SMI_REG 0x0004 69#define SMI_BUSY 0x10000000 70#define SMI_READ_VALID 0x08000000 71#define SMI_OPCODE_READ 0x04000000 72#define SMI_OPCODE_WRITE 0x00000000 73#define ERR_INT_CAUSE 0x0080 74#define ERR_INT_SMI_DONE 0x00000010 75#define ERR_INT_MASK 0x0084 76#define WINDOW_BASE(w) (0x0200 + ((w) << 3)) 77#define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) 78#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) 79#define WINDOW_BAR_ENABLE 0x0290 80#define WINDOW_PROTECT(w) (0x0294 + ((w) << 4)) 81 82/* 83 * Main per-port registers. These live at offset 0x0400 for 84 * port #0, 0x0800 for port #1, and 0x0c00 for port #2. 85 */ 86#define PORT_CONFIG 0x0000 87#define UNICAST_PROMISCUOUS_MODE 0x00000001 88#define PORT_CONFIG_EXT 0x0004 89#define MAC_ADDR_LOW 0x0014 90#define MAC_ADDR_HIGH 0x0018 91#define SDMA_CONFIG 0x001c 92#define TX_BURST_SIZE_16_64BIT 0x01000000 93#define TX_BURST_SIZE_4_64BIT 0x00800000 94#define BLM_TX_NO_SWAP 0x00000020 95#define BLM_RX_NO_SWAP 0x00000010 96#define RX_BURST_SIZE_16_64BIT 0x00000008 97#define RX_BURST_SIZE_4_64BIT 0x00000004 98#define PORT_SERIAL_CONTROL 0x003c 99#define SET_MII_SPEED_TO_100 0x01000000 100#define SET_GMII_SPEED_TO_1000 0x00800000 101#define SET_FULL_DUPLEX_MODE 0x00200000 102#define MAX_RX_PACKET_9700BYTE 0x000a0000 103#define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000 104#define DO_NOT_FORCE_LINK_FAIL 0x00000400 105#define SERIAL_PORT_CONTROL_RESERVED 0x00000200 106#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008 107#define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004 108#define FORCE_LINK_PASS 0x00000002 109#define SERIAL_PORT_ENABLE 0x00000001 110#define PORT_STATUS 0x0044 111#define TX_FIFO_EMPTY 0x00000400 112#define TX_IN_PROGRESS 0x00000080 113#define PORT_SPEED_MASK 0x00000030 114#define PORT_SPEED_1000 0x00000010 115#define PORT_SPEED_100 0x00000020 116#define PORT_SPEED_10 0x00000000 117#define FLOW_CONTROL_ENABLED 0x00000008 118#define FULL_DUPLEX 0x00000004 119#define LINK_UP 0x00000002 120#define TXQ_COMMAND 0x0048 121#define TXQ_FIX_PRIO_CONF 0x004c 122#define TX_BW_RATE 0x0050 123#define TX_BW_MTU 0x0058 124#define TX_BW_BURST 0x005c 125#define INT_CAUSE 0x0060 126#define INT_TX_END 0x07f80000 127#define INT_TX_END_0 0x00080000 128#define INT_RX 0x000003fc 129#define INT_RX_0 0x00000004 130#define INT_EXT 0x00000002 131#define INT_CAUSE_EXT 0x0064 132#define INT_EXT_LINK_PHY 0x00110000 133#define INT_EXT_TX 0x000000ff 134#define INT_MASK 0x0068 135#define INT_MASK_EXT 0x006c 136#define TX_FIFO_URGENT_THRESHOLD 0x0074 137#define TXQ_FIX_PRIO_CONF_MOVED 0x00dc 138#define TX_BW_RATE_MOVED 0x00e0 139#define TX_BW_MTU_MOVED 0x00e8 140#define TX_BW_BURST_MOVED 0x00ec 141#define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4)) 142#define RXQ_COMMAND 0x0280 143#define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2)) 144#define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4)) 145#define TXQ_BW_CONF(q) (0x0304 + ((q) << 4)) 146#define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4)) 147 148/* 149 * Misc per-port registers. 150 */ 151#define MIB_COUNTERS(p) (0x1000 + ((p) << 7)) 152#define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10)) 153#define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10)) 154#define UNICAST_TABLE(p) (0x1600 + ((p) << 10)) 155 156 157/* 158 * SDMA configuration register default value. 159 */ 160#if defined(__BIG_ENDIAN) 161#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 162 (RX_BURST_SIZE_4_64BIT | \ 163 TX_BURST_SIZE_4_64BIT) 164#elif defined(__LITTLE_ENDIAN) 165#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 166 (RX_BURST_SIZE_4_64BIT | \ 167 BLM_RX_NO_SWAP | \ 168 BLM_TX_NO_SWAP | \ 169 TX_BURST_SIZE_4_64BIT) 170#else 171#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 172#endif 173 174 175/* 176 * Misc definitions. 177 */ 178#define DEFAULT_RX_QUEUE_SIZE 128 179#define DEFAULT_TX_QUEUE_SIZE 256 180#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) 181 182 183/* 184 * RX/TX descriptors. 185 */ 186#if defined(__BIG_ENDIAN) 187struct rx_desc { 188 u16 byte_cnt; /* Descriptor buffer byte count */ 189 u16 buf_size; /* Buffer size */ 190 u32 cmd_sts; /* Descriptor command status */ 191 u32 next_desc_ptr; /* Next descriptor pointer */ 192 u32 buf_ptr; /* Descriptor buffer pointer */ 193}; 194 195struct tx_desc { 196 u16 byte_cnt; /* buffer byte count */ 197 u16 l4i_chk; /* CPU provided TCP checksum */ 198 u32 cmd_sts; /* Command/status field */ 199 u32 next_desc_ptr; /* Pointer to next descriptor */ 200 u32 buf_ptr; /* pointer to buffer for this descriptor*/ 201}; 202#elif defined(__LITTLE_ENDIAN) 203struct rx_desc { 204 u32 cmd_sts; /* Descriptor command status */ 205 u16 buf_size; /* Buffer size */ 206 u16 byte_cnt; /* Descriptor buffer byte count */ 207 u32 buf_ptr; /* Descriptor buffer pointer */ 208 u32 next_desc_ptr; /* Next descriptor pointer */ 209}; 210 211struct tx_desc { 212 u32 cmd_sts; /* Command/status field */ 213 u16 l4i_chk; /* CPU provided TCP checksum */ 214 u16 byte_cnt; /* buffer byte count */ 215 u32 buf_ptr; /* pointer to buffer for this descriptor*/ 216 u32 next_desc_ptr; /* Pointer to next descriptor */ 217}; 218#else 219#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 220#endif 221 222/* RX & TX descriptor command */ 223#define BUFFER_OWNED_BY_DMA 0x80000000 224 225/* RX & TX descriptor status */ 226#define ERROR_SUMMARY 0x00000001 227 228/* RX descriptor status */ 229#define LAYER_4_CHECKSUM_OK 0x40000000 230#define RX_ENABLE_INTERRUPT 0x20000000 231#define RX_FIRST_DESC 0x08000000 232#define RX_LAST_DESC 0x04000000 233#define RX_IP_HDR_OK 0x02000000 234#define RX_PKT_IS_IPV4 0x01000000 235#define RX_PKT_IS_ETHERNETV2 0x00800000 236#define RX_PKT_LAYER4_TYPE_MASK 0x00600000 237#define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000 238#define RX_PKT_IS_VLAN_TAGGED 0x00080000 239 240/* TX descriptor command */ 241#define TX_ENABLE_INTERRUPT 0x00800000 242#define GEN_CRC 0x00400000 243#define TX_FIRST_DESC 0x00200000 244#define TX_LAST_DESC 0x00100000 245#define ZERO_PADDING 0x00080000 246#define GEN_IP_V4_CHECKSUM 0x00040000 247#define GEN_TCP_UDP_CHECKSUM 0x00020000 248#define UDP_FRAME 0x00010000 249#define MAC_HDR_EXTRA_4_BYTES 0x00008000 250#define MAC_HDR_EXTRA_8_BYTES 0x00000200 251 252#define TX_IHL_SHIFT 11 253 254 255/* global *******************************************************************/ 256struct mv643xx_eth_shared_private { 257 /* 258 * Ethernet controller base address. 259 */ 260 void __iomem *base; 261 262 /* 263 * Points at the right SMI instance to use. 264 */ 265 struct mv643xx_eth_shared_private *smi; 266 267 /* 268 * Provides access to local SMI interface. 269 */ 270 struct mii_bus *smi_bus; 271 272 /* 273 * If we have access to the error interrupt pin (which is 274 * somewhat misnamed as it not only reflects internal errors 275 * but also reflects SMI completion), use that to wait for 276 * SMI access completion instead of polling the SMI busy bit. 277 */ 278 int err_interrupt; 279 wait_queue_head_t smi_busy_wait; 280 281 /* 282 * Per-port MBUS window access register value. 283 */ 284 u32 win_protect; 285 286 /* 287 * Hardware-specific parameters. 288 */ 289 unsigned int t_clk; 290 int extended_rx_coal_limit; 291 int tx_bw_control; 292}; 293 294#define TX_BW_CONTROL_ABSENT 0 295#define TX_BW_CONTROL_OLD_LAYOUT 1 296#define TX_BW_CONTROL_NEW_LAYOUT 2 297 298static int mv643xx_eth_open(struct net_device *dev); 299static int mv643xx_eth_stop(struct net_device *dev); 300 301 302/* per-port *****************************************************************/ 303struct mib_counters { 304 u64 good_octets_received; 305 u32 bad_octets_received; 306 u32 internal_mac_transmit_err; 307 u32 good_frames_received; 308 u32 bad_frames_received; 309 u32 broadcast_frames_received; 310 u32 multicast_frames_received; 311 u32 frames_64_octets; 312 u32 frames_65_to_127_octets; 313 u32 frames_128_to_255_octets; 314 u32 frames_256_to_511_octets; 315 u32 frames_512_to_1023_octets; 316 u32 frames_1024_to_max_octets; 317 u64 good_octets_sent; 318 u32 good_frames_sent; 319 u32 excessive_collision; 320 u32 multicast_frames_sent; 321 u32 broadcast_frames_sent; 322 u32 unrec_mac_control_received; 323 u32 fc_sent; 324 u32 good_fc_received; 325 u32 bad_fc_received; 326 u32 undersize_received; 327 u32 fragments_received; 328 u32 oversize_received; 329 u32 jabber_received; 330 u32 mac_receive_error; 331 u32 bad_crc_event; 332 u32 collision; 333 u32 late_collision; 334}; 335 336struct lro_counters { 337 u32 lro_aggregated; 338 u32 lro_flushed; 339 u32 lro_no_desc; 340}; 341 342struct rx_queue { 343 int index; 344 345 int rx_ring_size; 346 347 int rx_desc_count; 348 int rx_curr_desc; 349 int rx_used_desc; 350 351 struct rx_desc *rx_desc_area; 352 dma_addr_t rx_desc_dma; 353 int rx_desc_area_size; 354 struct sk_buff **rx_skb; 355 356 struct net_lro_mgr lro_mgr; 357 struct net_lro_desc lro_arr[8]; 358}; 359 360struct tx_queue { 361 int index; 362 363 int tx_ring_size; 364 365 int tx_desc_count; 366 int tx_curr_desc; 367 int tx_used_desc; 368 369 struct tx_desc *tx_desc_area; 370 dma_addr_t tx_desc_dma; 371 int tx_desc_area_size; 372 373 struct sk_buff_head tx_skb; 374 375 unsigned long tx_packets; 376 unsigned long tx_bytes; 377 unsigned long tx_dropped; 378}; 379 380struct mv643xx_eth_private { 381 struct mv643xx_eth_shared_private *shared; 382 void __iomem *base; 383 int port_num; 384 385 struct net_device *dev; 386 387 struct phy_device *phy; 388 389 struct timer_list mib_counters_timer; 390 spinlock_t mib_counters_lock; 391 struct mib_counters mib_counters; 392 393 struct lro_counters lro_counters; 394 395 struct work_struct tx_timeout_task; 396 397 struct napi_struct napi; 398 u32 int_mask; 399 u8 oom; 400 u8 work_link; 401 u8 work_tx; 402 u8 work_tx_end; 403 u8 work_rx; 404 u8 work_rx_refill; 405 406 int skb_size; 407 struct sk_buff_head rx_recycle; 408 409 /* 410 * RX state. 411 */ 412 int rx_ring_size; 413 unsigned long rx_desc_sram_addr; 414 int rx_desc_sram_size; 415 int rxq_count; 416 struct timer_list rx_oom; 417 struct rx_queue rxq[8]; 418 419 /* 420 * TX state. 421 */ 422 int tx_ring_size; 423 unsigned long tx_desc_sram_addr; 424 int tx_desc_sram_size; 425 int txq_count; 426 struct tx_queue txq[8]; 427}; 428 429 430/* port register accessors **************************************************/ 431static inline u32 rdl(struct mv643xx_eth_private *mp, int offset) 432{ 433 return readl(mp->shared->base + offset); 434} 435 436static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset) 437{ 438 return readl(mp->base + offset); 439} 440 441static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) 442{ 443 writel(data, mp->shared->base + offset); 444} 445 446static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data) 447{ 448 writel(data, mp->base + offset); 449} 450 451 452/* rxq/txq helper functions *************************************************/ 453static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) 454{ 455 return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); 456} 457 458static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) 459{ 460 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); 461} 462 463static void rxq_enable(struct rx_queue *rxq) 464{ 465 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 466 wrlp(mp, RXQ_COMMAND, 1 << rxq->index); 467} 468 469static void rxq_disable(struct rx_queue *rxq) 470{ 471 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 472 u8 mask = 1 << rxq->index; 473 474 wrlp(mp, RXQ_COMMAND, mask << 8); 475 while (rdlp(mp, RXQ_COMMAND) & mask) 476 udelay(10); 477} 478 479static void txq_reset_hw_ptr(struct tx_queue *txq) 480{ 481 struct mv643xx_eth_private *mp = txq_to_mp(txq); 482 u32 addr; 483 484 addr = (u32)txq->tx_desc_dma; 485 addr += txq->tx_curr_desc * sizeof(struct tx_desc); 486 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr); 487} 488 489static void txq_enable(struct tx_queue *txq) 490{ 491 struct mv643xx_eth_private *mp = txq_to_mp(txq); 492 wrlp(mp, TXQ_COMMAND, 1 << txq->index); 493} 494 495static void txq_disable(struct tx_queue *txq) 496{ 497 struct mv643xx_eth_private *mp = txq_to_mp(txq); 498 u8 mask = 1 << txq->index; 499 500 wrlp(mp, TXQ_COMMAND, mask << 8); 501 while (rdlp(mp, TXQ_COMMAND) & mask) 502 udelay(10); 503} 504 505static void txq_maybe_wake(struct tx_queue *txq) 506{ 507 struct mv643xx_eth_private *mp = txq_to_mp(txq); 508 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 509 510 if (netif_tx_queue_stopped(nq)) { 511 __netif_tx_lock(nq, smp_processor_id()); 512 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) 513 netif_tx_wake_queue(nq); 514 __netif_tx_unlock(nq); 515 } 516} 517 518 519/* rx napi ******************************************************************/ 520static int 521mv643xx_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph, 522 u64 *hdr_flags, void *priv) 523{ 524 unsigned long cmd_sts = (unsigned long)priv; 525 526 /* 527 * Make sure that this packet is Ethernet II, is not VLAN 528 * tagged, is IPv4, has a valid IP header, and is TCP. 529 */ 530 if ((cmd_sts & (RX_IP_HDR_OK | RX_PKT_IS_IPV4 | 531 RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_MASK | 532 RX_PKT_IS_VLAN_TAGGED)) != 533 (RX_IP_HDR_OK | RX_PKT_IS_IPV4 | 534 RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_TCP_IPV4)) 535 return -1; 536 537 skb_reset_network_header(skb); 538 skb_set_transport_header(skb, ip_hdrlen(skb)); 539 *iphdr = ip_hdr(skb); 540 *tcph = tcp_hdr(skb); 541 *hdr_flags = LRO_IPV4 | LRO_TCP; 542 543 return 0; 544} 545 546static int rxq_process(struct rx_queue *rxq, int budget) 547{ 548 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 549 struct net_device_stats *stats = &mp->dev->stats; 550 int lro_flush_needed; 551 int rx; 552 553 lro_flush_needed = 0; 554 rx = 0; 555 while (rx < budget && rxq->rx_desc_count) { 556 struct rx_desc *rx_desc; 557 unsigned int cmd_sts; 558 struct sk_buff *skb; 559 u16 byte_cnt; 560 561 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; 562 563 cmd_sts = rx_desc->cmd_sts; 564 if (cmd_sts & BUFFER_OWNED_BY_DMA) 565 break; 566 rmb(); 567 568 skb = rxq->rx_skb[rxq->rx_curr_desc]; 569 rxq->rx_skb[rxq->rx_curr_desc] = NULL; 570 571 rxq->rx_curr_desc++; 572 if (rxq->rx_curr_desc == rxq->rx_ring_size) 573 rxq->rx_curr_desc = 0; 574 575 dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr, 576 rx_desc->buf_size, DMA_FROM_DEVICE); 577 rxq->rx_desc_count--; 578 rx++; 579 580 mp->work_rx_refill |= 1 << rxq->index; 581 582 byte_cnt = rx_desc->byte_cnt; 583 584 /* 585 * Update statistics. 586 * 587 * Note that the descriptor byte count includes 2 dummy 588 * bytes automatically inserted by the hardware at the 589 * start of the packet (which we don't count), and a 4 590 * byte CRC at the end of the packet (which we do count). 591 */ 592 stats->rx_packets++; 593 stats->rx_bytes += byte_cnt - 2; 594 595 /* 596 * In case we received a packet without first / last bits 597 * on, or the error summary bit is set, the packet needs 598 * to be dropped. 599 */ 600 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY)) 601 != (RX_FIRST_DESC | RX_LAST_DESC)) 602 goto err; 603 604 /* 605 * The -4 is for the CRC in the trailer of the 606 * received packet 607 */ 608 skb_put(skb, byte_cnt - 2 - 4); 609 610 if (cmd_sts & LAYER_4_CHECKSUM_OK) 611 skb->ip_summed = CHECKSUM_UNNECESSARY; 612 skb->protocol = eth_type_trans(skb, mp->dev); 613 614 if (skb->dev->features & NETIF_F_LRO && 615 skb->ip_summed == CHECKSUM_UNNECESSARY) { 616 lro_receive_skb(&rxq->lro_mgr, skb, (void *)cmd_sts); 617 lro_flush_needed = 1; 618 } else 619 netif_receive_skb(skb); 620 621 continue; 622 623err: 624 stats->rx_dropped++; 625 626 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != 627 (RX_FIRST_DESC | RX_LAST_DESC)) { 628 if (net_ratelimit()) 629 dev_printk(KERN_ERR, &mp->dev->dev, 630 "received packet spanning " 631 "multiple descriptors\n"); 632 } 633 634 if (cmd_sts & ERROR_SUMMARY) 635 stats->rx_errors++; 636 637 dev_kfree_skb(skb); 638 } 639 640 if (lro_flush_needed) 641 lro_flush_all(&rxq->lro_mgr); 642 643 if (rx < budget) 644 mp->work_rx &= ~(1 << rxq->index); 645 646 return rx; 647} 648 649static int rxq_refill(struct rx_queue *rxq, int budget) 650{ 651 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 652 int refilled; 653 654 refilled = 0; 655 while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { 656 struct sk_buff *skb; 657 int rx; 658 struct rx_desc *rx_desc; 659 660 skb = __skb_dequeue(&mp->rx_recycle); 661 if (skb == NULL) 662 skb = dev_alloc_skb(mp->skb_size); 663 664 if (skb == NULL) { 665 mp->oom = 1; 666 goto oom; 667 } 668 669 if (SKB_DMA_REALIGN) 670 skb_reserve(skb, SKB_DMA_REALIGN); 671 672 refilled++; 673 rxq->rx_desc_count++; 674 675 rx = rxq->rx_used_desc++; 676 if (rxq->rx_used_desc == rxq->rx_ring_size) 677 rxq->rx_used_desc = 0; 678 679 rx_desc = rxq->rx_desc_area + rx; 680 681 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, 682 skb->data, mp->skb_size, 683 DMA_FROM_DEVICE); 684 rx_desc->buf_size = mp->skb_size; 685 rxq->rx_skb[rx] = skb; 686 wmb(); 687 rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; 688 wmb(); 689 690 /* 691 * The hardware automatically prepends 2 bytes of 692 * dummy data to each received packet, so that the 693 * IP header ends up 16-byte aligned. 694 */ 695 skb_reserve(skb, 2); 696 } 697 698 if (refilled < budget) 699 mp->work_rx_refill &= ~(1 << rxq->index); 700 701oom: 702 return refilled; 703} 704 705 706/* tx ***********************************************************************/ 707static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) 708{ 709 int frag; 710 711 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 712 skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; 713 if (fragp->size <= 8 && fragp->page_offset & 7) 714 return 1; 715 } 716 717 return 0; 718} 719 720static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) 721{ 722 struct mv643xx_eth_private *mp = txq_to_mp(txq); 723 int nr_frags = skb_shinfo(skb)->nr_frags; 724 int frag; 725 726 for (frag = 0; frag < nr_frags; frag++) { 727 skb_frag_t *this_frag; 728 int tx_index; 729 struct tx_desc *desc; 730 731 this_frag = &skb_shinfo(skb)->frags[frag]; 732 tx_index = txq->tx_curr_desc++; 733 if (txq->tx_curr_desc == txq->tx_ring_size) 734 txq->tx_curr_desc = 0; 735 desc = &txq->tx_desc_area[tx_index]; 736 737 /* 738 * The last fragment will generate an interrupt 739 * which will free the skb on TX completion. 740 */ 741 if (frag == nr_frags - 1) { 742 desc->cmd_sts = BUFFER_OWNED_BY_DMA | 743 ZERO_PADDING | TX_LAST_DESC | 744 TX_ENABLE_INTERRUPT; 745 } else { 746 desc->cmd_sts = BUFFER_OWNED_BY_DMA; 747 } 748 749 desc->l4i_chk = 0; 750 desc->byte_cnt = this_frag->size; 751 desc->buf_ptr = dma_map_page(mp->dev->dev.parent, 752 this_frag->page, 753 this_frag->page_offset, 754 this_frag->size, DMA_TO_DEVICE); 755 } 756} 757 758static inline __be16 sum16_as_be(__sum16 sum) 759{ 760 return (__force __be16)sum; 761} 762 763static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) 764{ 765 struct mv643xx_eth_private *mp = txq_to_mp(txq); 766 int nr_frags = skb_shinfo(skb)->nr_frags; 767 int tx_index; 768 struct tx_desc *desc; 769 u32 cmd_sts; 770 u16 l4i_chk; 771 int length; 772 773 cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; 774 l4i_chk = 0; 775 776 if (skb->ip_summed == CHECKSUM_PARTIAL) { 777 int tag_bytes; 778 779 BUG_ON(skb->protocol != htons(ETH_P_IP) && 780 skb->protocol != htons(ETH_P_8021Q)); 781 782 tag_bytes = (void *)ip_hdr(skb) - (void *)skb->data - ETH_HLEN; 783 if (unlikely(tag_bytes & ~12)) { 784 if (skb_checksum_help(skb) == 0) 785 goto no_csum; 786 kfree_skb(skb); 787 return 1; 788 } 789 790 if (tag_bytes & 4) 791 cmd_sts |= MAC_HDR_EXTRA_4_BYTES; 792 if (tag_bytes & 8) 793 cmd_sts |= MAC_HDR_EXTRA_8_BYTES; 794 795 cmd_sts |= GEN_TCP_UDP_CHECKSUM | 796 GEN_IP_V4_CHECKSUM | 797 ip_hdr(skb)->ihl << TX_IHL_SHIFT; 798 799 switch (ip_hdr(skb)->protocol) { 800 case IPPROTO_UDP: 801 cmd_sts |= UDP_FRAME; 802 l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check)); 803 break; 804 case IPPROTO_TCP: 805 l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check)); 806 break; 807 default: 808 BUG(); 809 } 810 } else { 811no_csum: 812 /* Errata BTS #50, IHL must be 5 if no HW checksum */ 813 cmd_sts |= 5 << TX_IHL_SHIFT; 814 } 815 816 tx_index = txq->tx_curr_desc++; 817 if (txq->tx_curr_desc == txq->tx_ring_size) 818 txq->tx_curr_desc = 0; 819 desc = &txq->tx_desc_area[tx_index]; 820 821 if (nr_frags) { 822 txq_submit_frag_skb(txq, skb); 823 length = skb_headlen(skb); 824 } else { 825 cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; 826 length = skb->len; 827 } 828 829 desc->l4i_chk = l4i_chk; 830 desc->byte_cnt = length; 831 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, 832 length, DMA_TO_DEVICE); 833 834 __skb_queue_tail(&txq->tx_skb, skb); 835 836 /* ensure all other descriptors are written before first cmd_sts */ 837 wmb(); 838 desc->cmd_sts = cmd_sts; 839 840 /* clear TX_END status */ 841 mp->work_tx_end &= ~(1 << txq->index); 842 843 /* ensure all descriptors are written before poking hardware */ 844 wmb(); 845 txq_enable(txq); 846 847 txq->tx_desc_count += nr_frags + 1; 848 849 return 0; 850} 851 852static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) 853{ 854 struct mv643xx_eth_private *mp = netdev_priv(dev); 855 int queue; 856 struct tx_queue *txq; 857 struct netdev_queue *nq; 858 859 queue = skb_get_queue_mapping(skb); 860 txq = mp->txq + queue; 861 nq = netdev_get_tx_queue(dev, queue); 862 863 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { 864 txq->tx_dropped++; 865 dev_printk(KERN_DEBUG, &dev->dev, 866 "failed to linearize skb with tiny " 867 "unaligned fragment\n"); 868 return NETDEV_TX_BUSY; 869 } 870 871 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { 872 if (net_ratelimit()) 873 dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n"); 874 kfree_skb(skb); 875 return NETDEV_TX_OK; 876 } 877 878 if (!txq_submit_skb(txq, skb)) { 879 int entries_left; 880 881 txq->tx_bytes += skb->len; 882 txq->tx_packets++; 883 dev->trans_start = jiffies; 884 885 entries_left = txq->tx_ring_size - txq->tx_desc_count; 886 if (entries_left < MAX_SKB_FRAGS + 1) 887 netif_tx_stop_queue(nq); 888 } 889 890 return NETDEV_TX_OK; 891} 892 893 894/* tx napi ******************************************************************/ 895static void txq_kick(struct tx_queue *txq) 896{ 897 struct mv643xx_eth_private *mp = txq_to_mp(txq); 898 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 899 u32 hw_desc_ptr; 900 u32 expected_ptr; 901 902 __netif_tx_lock(nq, smp_processor_id()); 903 904 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) 905 goto out; 906 907 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index)); 908 expected_ptr = (u32)txq->tx_desc_dma + 909 txq->tx_curr_desc * sizeof(struct tx_desc); 910 911 if (hw_desc_ptr != expected_ptr) 912 txq_enable(txq); 913 914out: 915 __netif_tx_unlock(nq); 916 917 mp->work_tx_end &= ~(1 << txq->index); 918} 919 920static int txq_reclaim(struct tx_queue *txq, int budget, int force) 921{ 922 struct mv643xx_eth_private *mp = txq_to_mp(txq); 923 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 924 int reclaimed; 925 926 __netif_tx_lock(nq, smp_processor_id()); 927 928 reclaimed = 0; 929 while (reclaimed < budget && txq->tx_desc_count > 0) { 930 int tx_index; 931 struct tx_desc *desc; 932 u32 cmd_sts; 933 struct sk_buff *skb; 934 935 tx_index = txq->tx_used_desc; 936 desc = &txq->tx_desc_area[tx_index]; 937 cmd_sts = desc->cmd_sts; 938 939 if (cmd_sts & BUFFER_OWNED_BY_DMA) { 940 if (!force) 941 break; 942 desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; 943 } 944 945 txq->tx_used_desc = tx_index + 1; 946 if (txq->tx_used_desc == txq->tx_ring_size) 947 txq->tx_used_desc = 0; 948 949 reclaimed++; 950 txq->tx_desc_count--; 951 952 skb = NULL; 953 if (cmd_sts & TX_LAST_DESC) 954 skb = __skb_dequeue(&txq->tx_skb); 955 956 if (cmd_sts & ERROR_SUMMARY) { 957 dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n"); 958 mp->dev->stats.tx_errors++; 959 } 960 961 if (cmd_sts & TX_FIRST_DESC) { 962 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, 963 desc->byte_cnt, DMA_TO_DEVICE); 964 } else { 965 dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr, 966 desc->byte_cnt, DMA_TO_DEVICE); 967 } 968 969 if (skb != NULL) { 970 if (skb_queue_len(&mp->rx_recycle) < 971 mp->rx_ring_size && 972 skb_recycle_check(skb, mp->skb_size)) 973 __skb_queue_head(&mp->rx_recycle, skb); 974 else 975 dev_kfree_skb(skb); 976 } 977 } 978 979 __netif_tx_unlock(nq); 980 981 if (reclaimed < budget) 982 mp->work_tx &= ~(1 << txq->index); 983 984 return reclaimed; 985} 986 987 988/* tx rate control **********************************************************/ 989/* 990 * Set total maximum TX rate (shared by all TX queues for this port) 991 * to 'rate' bits per second, with a maximum burst of 'burst' bytes. 992 */ 993static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst) 994{ 995 int token_rate; 996 int mtu; 997 int bucket_size; 998 999 token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); 1000 if (token_rate > 1023) 1001 token_rate = 1023; 1002 1003 mtu = (mp->dev->mtu + 255) >> 8; 1004 if (mtu > 63) 1005 mtu = 63; 1006 1007 bucket_size = (burst + 255) >> 8; 1008 if (bucket_size > 65535) 1009 bucket_size = 65535; 1010 1011 switch (mp->shared->tx_bw_control) { 1012 case TX_BW_CONTROL_OLD_LAYOUT: 1013 wrlp(mp, TX_BW_RATE, token_rate); 1014 wrlp(mp, TX_BW_MTU, mtu); 1015 wrlp(mp, TX_BW_BURST, bucket_size); 1016 break; 1017 case TX_BW_CONTROL_NEW_LAYOUT: 1018 wrlp(mp, TX_BW_RATE_MOVED, token_rate); 1019 wrlp(mp, TX_BW_MTU_MOVED, mtu); 1020 wrlp(mp, TX_BW_BURST_MOVED, bucket_size); 1021 break; 1022 } 1023} 1024 1025static void txq_set_rate(struct tx_queue *txq, int rate, int burst) 1026{ 1027 struct mv643xx_eth_private *mp = txq_to_mp(txq); 1028 int token_rate; 1029 int bucket_size; 1030 1031 token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); 1032 if (token_rate > 1023) 1033 token_rate = 1023; 1034 1035 bucket_size = (burst + 255) >> 8; 1036 if (bucket_size > 65535) 1037 bucket_size = 65535; 1038 1039 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14); 1040 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate); 1041} 1042 1043static void txq_set_fixed_prio_mode(struct tx_queue *txq) 1044{ 1045 struct mv643xx_eth_private *mp = txq_to_mp(txq); 1046 int off; 1047 u32 val; 1048 1049 /* 1050 * Turn on fixed priority mode. 1051 */ 1052 off = 0; 1053 switch (mp->shared->tx_bw_control) { 1054 case TX_BW_CONTROL_OLD_LAYOUT: 1055 off = TXQ_FIX_PRIO_CONF; 1056 break; 1057 case TX_BW_CONTROL_NEW_LAYOUT: 1058 off = TXQ_FIX_PRIO_CONF_MOVED; 1059 break; 1060 } 1061 1062 if (off) { 1063 val = rdlp(mp, off); 1064 val |= 1 << txq->index; 1065 wrlp(mp, off, val); 1066 } 1067} 1068 1069 1070/* mii management interface *************************************************/ 1071static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id) 1072{ 1073 struct mv643xx_eth_shared_private *msp = dev_id; 1074 1075 if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) { 1076 writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE); 1077 wake_up(&msp->smi_busy_wait); 1078 return IRQ_HANDLED; 1079 } 1080 1081 return IRQ_NONE; 1082} 1083 1084static int smi_is_done(struct mv643xx_eth_shared_private *msp) 1085{ 1086 return !(readl(msp->base + SMI_REG) & SMI_BUSY); 1087} 1088 1089static int smi_wait_ready(struct mv643xx_eth_shared_private *msp) 1090{ 1091 if (msp->err_interrupt == NO_IRQ) { 1092 int i; 1093 1094 for (i = 0; !smi_is_done(msp); i++) { 1095 if (i == 10) 1096 return -ETIMEDOUT; 1097 msleep(10); 1098 } 1099 1100 return 0; 1101 } 1102 1103 if (!smi_is_done(msp)) { 1104 wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp), 1105 msecs_to_jiffies(100)); 1106 if (!smi_is_done(msp)) 1107 return -ETIMEDOUT; 1108 } 1109 1110 return 0; 1111} 1112 1113static int smi_bus_read(struct mii_bus *bus, int addr, int reg) 1114{ 1115 struct mv643xx_eth_shared_private *msp = bus->priv; 1116 void __iomem *smi_reg = msp->base + SMI_REG; 1117 int ret; 1118 1119 if (smi_wait_ready(msp)) { 1120 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1121 return -ETIMEDOUT; 1122 } 1123 1124 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg); 1125 1126 if (smi_wait_ready(msp)) { 1127 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1128 return -ETIMEDOUT; 1129 } 1130 1131 ret = readl(smi_reg); 1132 if (!(ret & SMI_READ_VALID)) { 1133 printk(KERN_WARNING "mv643xx_eth: SMI bus read not valid\n"); 1134 return -ENODEV; 1135 } 1136 1137 return ret & 0xffff; 1138} 1139 1140static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val) 1141{ 1142 struct mv643xx_eth_shared_private *msp = bus->priv; 1143 void __iomem *smi_reg = msp->base + SMI_REG; 1144 1145 if (smi_wait_ready(msp)) { 1146 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1147 return -ETIMEDOUT; 1148 } 1149 1150 writel(SMI_OPCODE_WRITE | (reg << 21) | 1151 (addr << 16) | (val & 0xffff), smi_reg); 1152 1153 if (smi_wait_ready(msp)) { 1154 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1155 return -ETIMEDOUT; 1156 } 1157 1158 return 0; 1159} 1160 1161 1162/* statistics ***************************************************************/ 1163static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) 1164{ 1165 struct mv643xx_eth_private *mp = netdev_priv(dev); 1166 struct net_device_stats *stats = &dev->stats; 1167 unsigned long tx_packets = 0; 1168 unsigned long tx_bytes = 0; 1169 unsigned long tx_dropped = 0; 1170 int i; 1171 1172 for (i = 0; i < mp->txq_count; i++) { 1173 struct tx_queue *txq = mp->txq + i; 1174 1175 tx_packets += txq->tx_packets; 1176 tx_bytes += txq->tx_bytes; 1177 tx_dropped += txq->tx_dropped; 1178 } 1179 1180 stats->tx_packets = tx_packets; 1181 stats->tx_bytes = tx_bytes; 1182 stats->tx_dropped = tx_dropped; 1183 1184 return stats; 1185} 1186 1187static void mv643xx_eth_grab_lro_stats(struct mv643xx_eth_private *mp) 1188{ 1189 u32 lro_aggregated = 0; 1190 u32 lro_flushed = 0; 1191 u32 lro_no_desc = 0; 1192 int i; 1193 1194 for (i = 0; i < mp->rxq_count; i++) { 1195 struct rx_queue *rxq = mp->rxq + i; 1196 1197 lro_aggregated += rxq->lro_mgr.stats.aggregated; 1198 lro_flushed += rxq->lro_mgr.stats.flushed; 1199 lro_no_desc += rxq->lro_mgr.stats.no_desc; 1200 } 1201 1202 mp->lro_counters.lro_aggregated = lro_aggregated; 1203 mp->lro_counters.lro_flushed = lro_flushed; 1204 mp->lro_counters.lro_no_desc = lro_no_desc; 1205} 1206 1207static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) 1208{ 1209 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); 1210} 1211 1212static void mib_counters_clear(struct mv643xx_eth_private *mp) 1213{ 1214 int i; 1215 1216 for (i = 0; i < 0x80; i += 4) 1217 mib_read(mp, i); 1218} 1219 1220static void mib_counters_update(struct mv643xx_eth_private *mp) 1221{ 1222 struct mib_counters *p = &mp->mib_counters; 1223 1224 spin_lock_bh(&mp->mib_counters_lock); 1225 p->good_octets_received += mib_read(mp, 0x00); 1226 p->bad_octets_received += mib_read(mp, 0x08); 1227 p->internal_mac_transmit_err += mib_read(mp, 0x0c); 1228 p->good_frames_received += mib_read(mp, 0x10); 1229 p->bad_frames_received += mib_read(mp, 0x14); 1230 p->broadcast_frames_received += mib_read(mp, 0x18); 1231 p->multicast_frames_received += mib_read(mp, 0x1c); 1232 p->frames_64_octets += mib_read(mp, 0x20); 1233 p->frames_65_to_127_octets += mib_read(mp, 0x24); 1234 p->frames_128_to_255_octets += mib_read(mp, 0x28); 1235 p->frames_256_to_511_octets += mib_read(mp, 0x2c); 1236 p->frames_512_to_1023_octets += mib_read(mp, 0x30); 1237 p->frames_1024_to_max_octets += mib_read(mp, 0x34); 1238 p->good_octets_sent += mib_read(mp, 0x38); 1239 p->good_frames_sent += mib_read(mp, 0x40); 1240 p->excessive_collision += mib_read(mp, 0x44); 1241 p->multicast_frames_sent += mib_read(mp, 0x48); 1242 p->broadcast_frames_sent += mib_read(mp, 0x4c); 1243 p->unrec_mac_control_received += mib_read(mp, 0x50); 1244 p->fc_sent += mib_read(mp, 0x54); 1245 p->good_fc_received += mib_read(mp, 0x58); 1246 p->bad_fc_received += mib_read(mp, 0x5c); 1247 p->undersize_received += mib_read(mp, 0x60); 1248 p->fragments_received += mib_read(mp, 0x64); 1249 p->oversize_received += mib_read(mp, 0x68); 1250 p->jabber_received += mib_read(mp, 0x6c); 1251 p->mac_receive_error += mib_read(mp, 0x70); 1252 p->bad_crc_event += mib_read(mp, 0x74); 1253 p->collision += mib_read(mp, 0x78); 1254 p->late_collision += mib_read(mp, 0x7c); 1255 spin_unlock_bh(&mp->mib_counters_lock); 1256 1257 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); 1258} 1259 1260static void mib_counters_timer_wrapper(unsigned long _mp) 1261{ 1262 struct mv643xx_eth_private *mp = (void *)_mp; 1263 1264 mib_counters_update(mp); 1265} 1266 1267 1268/* interrupt coalescing *****************************************************/ 1269/* 1270 * Hardware coalescing parameters are set in units of 64 t_clk 1271 * cycles. I.e.: 1272 * 1273 * coal_delay_in_usec = 64000000 * register_value / t_clk_rate 1274 * 1275 * register_value = coal_delay_in_usec * t_clk_rate / 64000000 1276 * 1277 * In the ->set*() methods, we round the computed register value 1278 * to the nearest integer. 1279 */ 1280static unsigned int get_rx_coal(struct mv643xx_eth_private *mp) 1281{ 1282 u32 val = rdlp(mp, SDMA_CONFIG); 1283 u64 temp; 1284 1285 if (mp->shared->extended_rx_coal_limit) 1286 temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7); 1287 else 1288 temp = (val & 0x003fff00) >> 8; 1289 1290 temp *= 64000000; 1291 do_div(temp, mp->shared->t_clk); 1292 1293 return (unsigned int)temp; 1294} 1295 1296static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec) 1297{ 1298 u64 temp; 1299 u32 val; 1300 1301 temp = (u64)usec * mp->shared->t_clk; 1302 temp += 31999999; 1303 do_div(temp, 64000000); 1304 1305 val = rdlp(mp, SDMA_CONFIG); 1306 if (mp->shared->extended_rx_coal_limit) { 1307 if (temp > 0xffff) 1308 temp = 0xffff; 1309 val &= ~0x023fff80; 1310 val |= (temp & 0x8000) << 10; 1311 val |= (temp & 0x7fff) << 7; 1312 } else { 1313 if (temp > 0x3fff) 1314 temp = 0x3fff; 1315 val &= ~0x003fff00; 1316 val |= (temp & 0x3fff) << 8; 1317 } 1318 wrlp(mp, SDMA_CONFIG, val); 1319} 1320 1321static unsigned int get_tx_coal(struct mv643xx_eth_private *mp) 1322{ 1323 u64 temp; 1324 1325 temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; 1326 temp *= 64000000; 1327 do_div(temp, mp->shared->t_clk); 1328 1329 return (unsigned int)temp; 1330} 1331 1332static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec) 1333{ 1334 u64 temp; 1335 1336 temp = (u64)usec * mp->shared->t_clk; 1337 temp += 31999999; 1338 do_div(temp, 64000000); 1339 1340 if (temp > 0x3fff) 1341 temp = 0x3fff; 1342 1343 wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4); 1344} 1345 1346 1347/* ethtool ******************************************************************/ 1348struct mv643xx_eth_stats { 1349 char stat_string[ETH_GSTRING_LEN]; 1350 int sizeof_stat; 1351 int netdev_off; 1352 int mp_off; 1353}; 1354 1355#define SSTAT(m) \ 1356 { #m, FIELD_SIZEOF(struct net_device_stats, m), \ 1357 offsetof(struct net_device, stats.m), -1 } 1358 1359#define MIBSTAT(m) \ 1360 { #m, FIELD_SIZEOF(struct mib_counters, m), \ 1361 -1, offsetof(struct mv643xx_eth_private, mib_counters.m) } 1362 1363#define LROSTAT(m) \ 1364 { #m, FIELD_SIZEOF(struct lro_counters, m), \ 1365 -1, offsetof(struct mv643xx_eth_private, lro_counters.m) } 1366 1367static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { 1368 SSTAT(rx_packets), 1369 SSTAT(tx_packets), 1370 SSTAT(rx_bytes), 1371 SSTAT(tx_bytes), 1372 SSTAT(rx_errors), 1373 SSTAT(tx_errors), 1374 SSTAT(rx_dropped), 1375 SSTAT(tx_dropped), 1376 MIBSTAT(good_octets_received), 1377 MIBSTAT(bad_octets_received), 1378 MIBSTAT(internal_mac_transmit_err), 1379 MIBSTAT(good_frames_received), 1380 MIBSTAT(bad_frames_received), 1381 MIBSTAT(broadcast_frames_received), 1382 MIBSTAT(multicast_frames_received), 1383 MIBSTAT(frames_64_octets), 1384 MIBSTAT(frames_65_to_127_octets), 1385 MIBSTAT(frames_128_to_255_octets), 1386 MIBSTAT(frames_256_to_511_octets), 1387 MIBSTAT(frames_512_to_1023_octets), 1388 MIBSTAT(frames_1024_to_max_octets), 1389 MIBSTAT(good_octets_sent), 1390 MIBSTAT(good_frames_sent), 1391 MIBSTAT(excessive_collision), 1392 MIBSTAT(multicast_frames_sent), 1393 MIBSTAT(broadcast_frames_sent), 1394 MIBSTAT(unrec_mac_control_received), 1395 MIBSTAT(fc_sent), 1396 MIBSTAT(good_fc_received), 1397 MIBSTAT(bad_fc_received), 1398 MIBSTAT(undersize_received), 1399 MIBSTAT(fragments_received), 1400 MIBSTAT(oversize_received), 1401 MIBSTAT(jabber_received), 1402 MIBSTAT(mac_receive_error), 1403 MIBSTAT(bad_crc_event), 1404 MIBSTAT(collision), 1405 MIBSTAT(late_collision), 1406 LROSTAT(lro_aggregated), 1407 LROSTAT(lro_flushed), 1408 LROSTAT(lro_no_desc), 1409}; 1410 1411static int 1412mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp, 1413 struct ethtool_cmd *cmd) 1414{ 1415 int err; 1416 1417 err = phy_read_status(mp->phy); 1418 if (err == 0) 1419 err = phy_ethtool_gset(mp->phy, cmd); 1420 1421 /* 1422 * The MAC does not support 1000baseT_Half. 1423 */ 1424 cmd->supported &= ~SUPPORTED_1000baseT_Half; 1425 cmd->advertising &= ~ADVERTISED_1000baseT_Half; 1426 1427 return err; 1428} 1429 1430static int 1431mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp, 1432 struct ethtool_cmd *cmd) 1433{ 1434 u32 port_status; 1435 1436 port_status = rdlp(mp, PORT_STATUS); 1437 1438 cmd->supported = SUPPORTED_MII; 1439 cmd->advertising = ADVERTISED_MII; 1440 switch (port_status & PORT_SPEED_MASK) { 1441 case PORT_SPEED_10: 1442 cmd->speed = SPEED_10; 1443 break; 1444 case PORT_SPEED_100: 1445 cmd->speed = SPEED_100; 1446 break; 1447 case PORT_SPEED_1000: 1448 cmd->speed = SPEED_1000; 1449 break; 1450 default: 1451 cmd->speed = -1; 1452 break; 1453 } 1454 cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; 1455 cmd->port = PORT_MII; 1456 cmd->phy_address = 0; 1457 cmd->transceiver = XCVR_INTERNAL; 1458 cmd->autoneg = AUTONEG_DISABLE; 1459 cmd->maxtxpkt = 1; 1460 cmd->maxrxpkt = 1; 1461 1462 return 0; 1463} 1464 1465static int 1466mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1467{ 1468 struct mv643xx_eth_private *mp = netdev_priv(dev); 1469 1470 if (mp->phy != NULL) 1471 return mv643xx_eth_get_settings_phy(mp, cmd); 1472 else 1473 return mv643xx_eth_get_settings_phyless(mp, cmd); 1474} 1475 1476static int 1477mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1478{ 1479 struct mv643xx_eth_private *mp = netdev_priv(dev); 1480 1481 if (mp->phy == NULL) 1482 return -EINVAL; 1483 1484 /* 1485 * The MAC does not support 1000baseT_Half. 1486 */ 1487 cmd->advertising &= ~ADVERTISED_1000baseT_Half; 1488 1489 return phy_ethtool_sset(mp->phy, cmd); 1490} 1491 1492static void mv643xx_eth_get_drvinfo(struct net_device *dev, 1493 struct ethtool_drvinfo *drvinfo) 1494{ 1495 strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32); 1496 strncpy(drvinfo->version, mv643xx_eth_driver_version, 32); 1497 strncpy(drvinfo->fw_version, "N/A", 32); 1498 strncpy(drvinfo->bus_info, "platform", 32); 1499 drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats); 1500} 1501 1502static int mv643xx_eth_nway_reset(struct net_device *dev) 1503{ 1504 struct mv643xx_eth_private *mp = netdev_priv(dev); 1505 1506 if (mp->phy == NULL) 1507 return -EINVAL; 1508 1509 return genphy_restart_aneg(mp->phy); 1510} 1511 1512static u32 mv643xx_eth_get_link(struct net_device *dev) 1513{ 1514 return !!netif_carrier_ok(dev); 1515} 1516 1517static int 1518mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 1519{ 1520 struct mv643xx_eth_private *mp = netdev_priv(dev); 1521 1522 ec->rx_coalesce_usecs = get_rx_coal(mp); 1523 ec->tx_coalesce_usecs = get_tx_coal(mp); 1524 1525 return 0; 1526} 1527 1528static int 1529mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 1530{ 1531 struct mv643xx_eth_private *mp = netdev_priv(dev); 1532 1533 set_rx_coal(mp, ec->rx_coalesce_usecs); 1534 set_tx_coal(mp, ec->tx_coalesce_usecs); 1535 1536 return 0; 1537} 1538 1539static void 1540mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er) 1541{ 1542 struct mv643xx_eth_private *mp = netdev_priv(dev); 1543 1544 er->rx_max_pending = 4096; 1545 er->tx_max_pending = 4096; 1546 er->rx_mini_max_pending = 0; 1547 er->rx_jumbo_max_pending = 0; 1548 1549 er->rx_pending = mp->rx_ring_size; 1550 er->tx_pending = mp->tx_ring_size; 1551 er->rx_mini_pending = 0; 1552 er->rx_jumbo_pending = 0; 1553} 1554 1555static int 1556mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er) 1557{ 1558 struct mv643xx_eth_private *mp = netdev_priv(dev); 1559 1560 if (er->rx_mini_pending || er->rx_jumbo_pending) 1561 return -EINVAL; 1562 1563 mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096; 1564 mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096; 1565 1566 if (netif_running(dev)) { 1567 mv643xx_eth_stop(dev); 1568 if (mv643xx_eth_open(dev)) { 1569 dev_printk(KERN_ERR, &dev->dev, 1570 "fatal error on re-opening device after " 1571 "ring param change\n"); 1572 return -ENOMEM; 1573 } 1574 } 1575 1576 return 0; 1577} 1578 1579static u32 1580mv643xx_eth_get_rx_csum(struct net_device *dev) 1581{ 1582 struct mv643xx_eth_private *mp = netdev_priv(dev); 1583 1584 return !!(rdlp(mp, PORT_CONFIG) & 0x02000000); 1585} 1586 1587static int 1588mv643xx_eth_set_rx_csum(struct net_device *dev, u32 rx_csum) 1589{ 1590 struct mv643xx_eth_private *mp = netdev_priv(dev); 1591 1592 wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000); 1593 1594 return 0; 1595} 1596 1597static void mv643xx_eth_get_strings(struct net_device *dev, 1598 uint32_t stringset, uint8_t *data) 1599{ 1600 int i; 1601 1602 if (stringset == ETH_SS_STATS) { 1603 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { 1604 memcpy(data + i * ETH_GSTRING_LEN, 1605 mv643xx_eth_stats[i].stat_string, 1606 ETH_GSTRING_LEN); 1607 } 1608 } 1609} 1610 1611static void mv643xx_eth_get_ethtool_stats(struct net_device *dev, 1612 struct ethtool_stats *stats, 1613 uint64_t *data) 1614{ 1615 struct mv643xx_eth_private *mp = netdev_priv(dev); 1616 int i; 1617 1618 mv643xx_eth_get_stats(dev); 1619 mib_counters_update(mp); 1620 mv643xx_eth_grab_lro_stats(mp); 1621 1622 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { 1623 const struct mv643xx_eth_stats *stat; 1624 void *p; 1625 1626 stat = mv643xx_eth_stats + i; 1627 1628 if (stat->netdev_off >= 0) 1629 p = ((void *)mp->dev) + stat->netdev_off; 1630 else 1631 p = ((void *)mp) + stat->mp_off; 1632 1633 data[i] = (stat->sizeof_stat == 8) ? 1634 *(uint64_t *)p : *(uint32_t *)p; 1635 } 1636} 1637 1638static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset) 1639{ 1640 if (sset == ETH_SS_STATS) 1641 return ARRAY_SIZE(mv643xx_eth_stats); 1642 1643 return -EOPNOTSUPP; 1644} 1645 1646static const struct ethtool_ops mv643xx_eth_ethtool_ops = { 1647 .get_settings = mv643xx_eth_get_settings, 1648 .set_settings = mv643xx_eth_set_settings, 1649 .get_drvinfo = mv643xx_eth_get_drvinfo, 1650 .nway_reset = mv643xx_eth_nway_reset, 1651 .get_link = mv643xx_eth_get_link, 1652 .get_coalesce = mv643xx_eth_get_coalesce, 1653 .set_coalesce = mv643xx_eth_set_coalesce, 1654 .get_ringparam = mv643xx_eth_get_ringparam, 1655 .set_ringparam = mv643xx_eth_set_ringparam, 1656 .get_rx_csum = mv643xx_eth_get_rx_csum, 1657 .set_rx_csum = mv643xx_eth_set_rx_csum, 1658 .set_tx_csum = ethtool_op_set_tx_csum, 1659 .set_sg = ethtool_op_set_sg, 1660 .get_strings = mv643xx_eth_get_strings, 1661 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, 1662 .get_flags = ethtool_op_get_flags, 1663 .set_flags = ethtool_op_set_flags, 1664 .get_sset_count = mv643xx_eth_get_sset_count, 1665}; 1666 1667 1668/* address handling *********************************************************/ 1669static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr) 1670{ 1671 unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH); 1672 unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW); 1673 1674 addr[0] = (mac_h >> 24) & 0xff; 1675 addr[1] = (mac_h >> 16) & 0xff; 1676 addr[2] = (mac_h >> 8) & 0xff; 1677 addr[3] = mac_h & 0xff; 1678 addr[4] = (mac_l >> 8) & 0xff; 1679 addr[5] = mac_l & 0xff; 1680} 1681 1682static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr) 1683{ 1684 wrlp(mp, MAC_ADDR_HIGH, 1685 (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]); 1686 wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]); 1687} 1688 1689static u32 uc_addr_filter_mask(struct net_device *dev) 1690{ 1691 struct netdev_hw_addr *ha; 1692 u32 nibbles; 1693 1694 if (dev->flags & IFF_PROMISC) 1695 return 0; 1696 1697 nibbles = 1 << (dev->dev_addr[5] & 0x0f); 1698 list_for_each_entry(ha, &dev->uc.list, list) { 1699 if (memcmp(dev->dev_addr, ha->addr, 5)) 1700 return 0; 1701 if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0) 1702 return 0; 1703 1704 nibbles |= 1 << (ha->addr[5] & 0x0f); 1705 } 1706 1707 return nibbles; 1708} 1709 1710static void mv643xx_eth_program_unicast_filter(struct net_device *dev) 1711{ 1712 struct mv643xx_eth_private *mp = netdev_priv(dev); 1713 u32 port_config; 1714 u32 nibbles; 1715 int i; 1716 1717 uc_addr_set(mp, dev->dev_addr); 1718 1719 port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE; 1720 1721 nibbles = uc_addr_filter_mask(dev); 1722 if (!nibbles) { 1723 port_config |= UNICAST_PROMISCUOUS_MODE; 1724 nibbles = 0xffff; 1725 } 1726 1727 for (i = 0; i < 16; i += 4) { 1728 int off = UNICAST_TABLE(mp->port_num) + i; 1729 u32 v; 1730 1731 v = 0; 1732 if (nibbles & 1) 1733 v |= 0x00000001; 1734 if (nibbles & 2) 1735 v |= 0x00000100; 1736 if (nibbles & 4) 1737 v |= 0x00010000; 1738 if (nibbles & 8) 1739 v |= 0x01000000; 1740 nibbles >>= 4; 1741 1742 wrl(mp, off, v); 1743 } 1744 1745 wrlp(mp, PORT_CONFIG, port_config); 1746} 1747 1748static int addr_crc(unsigned char *addr) 1749{ 1750 int crc = 0; 1751 int i; 1752 1753 for (i = 0; i < 6; i++) { 1754 int j; 1755 1756 crc = (crc ^ addr[i]) << 8; 1757 for (j = 7; j >= 0; j--) { 1758 if (crc & (0x100 << j)) 1759 crc ^= 0x107 << j; 1760 } 1761 } 1762 1763 return crc; 1764} 1765 1766static void mv643xx_eth_program_multicast_filter(struct net_device *dev) 1767{ 1768 struct mv643xx_eth_private *mp = netdev_priv(dev); 1769 u32 *mc_spec; 1770 u32 *mc_other; 1771 struct dev_addr_list *addr; 1772 int i; 1773 1774 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1775 int port_num; 1776 u32 accept; 1777 1778oom: 1779 port_num = mp->port_num; 1780 accept = 0x01010101; 1781 for (i = 0; i < 0x100; i += 4) { 1782 wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept); 1783 wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept); 1784 } 1785 return; 1786 } 1787 1788 mc_spec = kmalloc(0x200, GFP_ATOMIC); 1789 if (mc_spec == NULL) 1790 goto oom; 1791 mc_other = mc_spec + (0x100 >> 2); 1792 1793 memset(mc_spec, 0, 0x100); 1794 memset(mc_other, 0, 0x100); 1795 1796 for (addr = dev->mc_list; addr != NULL; addr = addr->next) { 1797 u8 *a = addr->da_addr; 1798 u32 *table; 1799 int entry; 1800 1801 if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) { 1802 table = mc_spec; 1803 entry = a[5]; 1804 } else { 1805 table = mc_other; 1806 entry = addr_crc(a); 1807 } 1808 1809 table[entry >> 2] |= 1 << (8 * (entry & 3)); 1810 } 1811 1812 for (i = 0; i < 0x100; i += 4) { 1813 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]); 1814 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]); 1815 } 1816 1817 kfree(mc_spec); 1818} 1819 1820static void mv643xx_eth_set_rx_mode(struct net_device *dev) 1821{ 1822 mv643xx_eth_program_unicast_filter(dev); 1823 mv643xx_eth_program_multicast_filter(dev); 1824} 1825 1826static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) 1827{ 1828 struct sockaddr *sa = addr; 1829 1830 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); 1831 1832 netif_addr_lock_bh(dev); 1833 mv643xx_eth_program_unicast_filter(dev); 1834 netif_addr_unlock_bh(dev); 1835 1836 return 0; 1837} 1838 1839 1840/* rx/tx queue initialisation ***********************************************/ 1841static int rxq_init(struct mv643xx_eth_private *mp, int index) 1842{ 1843 struct rx_queue *rxq = mp->rxq + index; 1844 struct rx_desc *rx_desc; 1845 int size; 1846 int i; 1847 1848 rxq->index = index; 1849 1850 rxq->rx_ring_size = mp->rx_ring_size; 1851 1852 rxq->rx_desc_count = 0; 1853 rxq->rx_curr_desc = 0; 1854 rxq->rx_used_desc = 0; 1855 1856 size = rxq->rx_ring_size * sizeof(struct rx_desc); 1857 1858 if (index == 0 && size <= mp->rx_desc_sram_size) { 1859 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, 1860 mp->rx_desc_sram_size); 1861 rxq->rx_desc_dma = mp->rx_desc_sram_addr; 1862 } else { 1863 rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, 1864 size, &rxq->rx_desc_dma, 1865 GFP_KERNEL); 1866 } 1867 1868 if (rxq->rx_desc_area == NULL) { 1869 dev_printk(KERN_ERR, &mp->dev->dev, 1870 "can't allocate rx ring (%d bytes)\n", size); 1871 goto out; 1872 } 1873 memset(rxq->rx_desc_area, 0, size); 1874 1875 rxq->rx_desc_area_size = size; 1876 rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb), 1877 GFP_KERNEL); 1878 if (rxq->rx_skb == NULL) { 1879 dev_printk(KERN_ERR, &mp->dev->dev, 1880 "can't allocate rx skb ring\n"); 1881 goto out_free; 1882 } 1883 1884 rx_desc = (struct rx_desc *)rxq->rx_desc_area; 1885 for (i = 0; i < rxq->rx_ring_size; i++) { 1886 int nexti; 1887 1888 nexti = i + 1; 1889 if (nexti == rxq->rx_ring_size) 1890 nexti = 0; 1891 1892 rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + 1893 nexti * sizeof(struct rx_desc); 1894 } 1895 1896 rxq->lro_mgr.dev = mp->dev; 1897 memset(&rxq->lro_mgr.stats, 0, sizeof(rxq->lro_mgr.stats)); 1898 rxq->lro_mgr.features = LRO_F_NAPI; 1899 rxq->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; 1900 rxq->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; 1901 rxq->lro_mgr.max_desc = ARRAY_SIZE(rxq->lro_arr); 1902 rxq->lro_mgr.max_aggr = 32; 1903 rxq->lro_mgr.frag_align_pad = 0; 1904 rxq->lro_mgr.lro_arr = rxq->lro_arr; 1905 rxq->lro_mgr.get_skb_header = mv643xx_get_skb_header; 1906 1907 memset(&rxq->lro_arr, 0, sizeof(rxq->lro_arr)); 1908 1909 return 0; 1910 1911 1912out_free: 1913 if (index == 0 && size <= mp->rx_desc_sram_size) 1914 iounmap(rxq->rx_desc_area); 1915 else 1916 dma_free_coherent(mp->dev->dev.parent, size, 1917 rxq->rx_desc_area, 1918 rxq->rx_desc_dma); 1919 1920out: 1921 return -ENOMEM; 1922} 1923 1924static void rxq_deinit(struct rx_queue *rxq) 1925{ 1926 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 1927 int i; 1928 1929 rxq_disable(rxq); 1930 1931 for (i = 0; i < rxq->rx_ring_size; i++) { 1932 if (rxq->rx_skb[i]) { 1933 dev_kfree_skb(rxq->rx_skb[i]); 1934 rxq->rx_desc_count--; 1935 } 1936 } 1937 1938 if (rxq->rx_desc_count) { 1939 dev_printk(KERN_ERR, &mp->dev->dev, 1940 "error freeing rx ring -- %d skbs stuck\n", 1941 rxq->rx_desc_count); 1942 } 1943 1944 if (rxq->index == 0 && 1945 rxq->rx_desc_area_size <= mp->rx_desc_sram_size) 1946 iounmap(rxq->rx_desc_area); 1947 else 1948 dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size, 1949 rxq->rx_desc_area, rxq->rx_desc_dma); 1950 1951 kfree(rxq->rx_skb); 1952} 1953 1954static int txq_init(struct mv643xx_eth_private *mp, int index) 1955{ 1956 struct tx_queue *txq = mp->txq + index; 1957 struct tx_desc *tx_desc; 1958 int size; 1959 int i; 1960 1961 txq->index = index; 1962 1963 txq->tx_ring_size = mp->tx_ring_size; 1964 1965 txq->tx_desc_count = 0; 1966 txq->tx_curr_desc = 0; 1967 txq->tx_used_desc = 0; 1968 1969 size = txq->tx_ring_size * sizeof(struct tx_desc); 1970 1971 if (index == 0 && size <= mp->tx_desc_sram_size) { 1972 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, 1973 mp->tx_desc_sram_size); 1974 txq->tx_desc_dma = mp->tx_desc_sram_addr; 1975 } else { 1976 txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, 1977 size, &txq->tx_desc_dma, 1978 GFP_KERNEL); 1979 } 1980 1981 if (txq->tx_desc_area == NULL) { 1982 dev_printk(KERN_ERR, &mp->dev->dev, 1983 "can't allocate tx ring (%d bytes)\n", size); 1984 return -ENOMEM; 1985 } 1986 memset(txq->tx_desc_area, 0, size); 1987 1988 txq->tx_desc_area_size = size; 1989 1990 tx_desc = (struct tx_desc *)txq->tx_desc_area; 1991 for (i = 0; i < txq->tx_ring_size; i++) { 1992 struct tx_desc *txd = tx_desc + i; 1993 int nexti; 1994 1995 nexti = i + 1; 1996 if (nexti == txq->tx_ring_size) 1997 nexti = 0; 1998 1999 txd->cmd_sts = 0; 2000 txd->next_desc_ptr = txq->tx_desc_dma + 2001 nexti * sizeof(struct tx_desc); 2002 } 2003 2004 skb_queue_head_init(&txq->tx_skb); 2005 2006 return 0; 2007} 2008 2009static void txq_deinit(struct tx_queue *txq) 2010{ 2011 struct mv643xx_eth_private *mp = txq_to_mp(txq); 2012 2013 txq_disable(txq); 2014 txq_reclaim(txq, txq->tx_ring_size, 1); 2015 2016 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); 2017 2018 if (txq->index == 0 && 2019 txq->tx_desc_area_size <= mp->tx_desc_sram_size) 2020 iounmap(txq->tx_desc_area); 2021 else 2022 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, 2023 txq->tx_desc_area, txq->tx_desc_dma); 2024} 2025 2026 2027/* netdev ops and related ***************************************************/ 2028static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp) 2029{ 2030 u32 int_cause; 2031 u32 int_cause_ext; 2032 2033 int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask; 2034 if (int_cause == 0) 2035 return 0; 2036 2037 int_cause_ext = 0; 2038 if (int_cause & INT_EXT) { 2039 int_cause &= ~INT_EXT; 2040 int_cause_ext = rdlp(mp, INT_CAUSE_EXT); 2041 } 2042 2043 if (int_cause) { 2044 wrlp(mp, INT_CAUSE, ~int_cause); 2045 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & 2046 ~(rdlp(mp, TXQ_COMMAND) & 0xff); 2047 mp->work_rx |= (int_cause & INT_RX) >> 2; 2048 } 2049 2050 int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX; 2051 if (int_cause_ext) { 2052 wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext); 2053 if (int_cause_ext & INT_EXT_LINK_PHY) 2054 mp->work_link = 1; 2055 mp->work_tx |= int_cause_ext & INT_EXT_TX; 2056 } 2057 2058 return 1; 2059} 2060 2061static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) 2062{ 2063 struct net_device *dev = (struct net_device *)dev_id; 2064 struct mv643xx_eth_private *mp = netdev_priv(dev); 2065 2066 if (unlikely(!mv643xx_eth_collect_events(mp))) 2067 return IRQ_NONE; 2068 2069 wrlp(mp, INT_MASK, 0); 2070 napi_schedule(&mp->napi); 2071 2072 return IRQ_HANDLED; 2073} 2074 2075static void handle_link_event(struct mv643xx_eth_private *mp) 2076{ 2077 struct net_device *dev = mp->dev; 2078 u32 port_status; 2079 int speed; 2080 int duplex; 2081 int fc; 2082 2083 port_status = rdlp(mp, PORT_STATUS); 2084 if (!(port_status & LINK_UP)) { 2085 if (netif_carrier_ok(dev)) { 2086 int i; 2087 2088 printk(KERN_INFO "%s: link down\n", dev->name); 2089 2090 netif_carrier_off(dev); 2091 2092 for (i = 0; i < mp->txq_count; i++) { 2093 struct tx_queue *txq = mp->txq + i; 2094 2095 txq_reclaim(txq, txq->tx_ring_size, 1); 2096 txq_reset_hw_ptr(txq); 2097 } 2098 } 2099 return; 2100 } 2101 2102 switch (port_status & PORT_SPEED_MASK) { 2103 case PORT_SPEED_10: 2104 speed = 10; 2105 break; 2106 case PORT_SPEED_100: 2107 speed = 100; 2108 break; 2109 case PORT_SPEED_1000: 2110 speed = 1000; 2111 break; 2112 default: 2113 speed = -1; 2114 break; 2115 } 2116 duplex = (port_status & FULL_DUPLEX) ? 1 : 0; 2117 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; 2118 2119 printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, " 2120 "flow control %sabled\n", dev->name, 2121 speed, duplex ? "full" : "half", 2122 fc ? "en" : "dis"); 2123 2124 if (!netif_carrier_ok(dev)) 2125 netif_carrier_on(dev); 2126} 2127 2128static int mv643xx_eth_poll(struct napi_struct *napi, int budget) 2129{ 2130 struct mv643xx_eth_private *mp; 2131 int work_done; 2132 2133 mp = container_of(napi, struct mv643xx_eth_private, napi); 2134 2135 if (unlikely(mp->oom)) { 2136 mp->oom = 0; 2137 del_timer(&mp->rx_oom); 2138 } 2139 2140 work_done = 0; 2141 while (work_done < budget) { 2142 u8 queue_mask; 2143 int queue; 2144 int work_tbd; 2145 2146 if (mp->work_link) { 2147 mp->work_link = 0; 2148 handle_link_event(mp); 2149 work_done++; 2150 continue; 2151 } 2152 2153 queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; 2154 if (likely(!mp->oom)) 2155 queue_mask |= mp->work_rx_refill; 2156 2157 if (!queue_mask) { 2158 if (mv643xx_eth_collect_events(mp)) 2159 continue; 2160 break; 2161 } 2162 2163 queue = fls(queue_mask) - 1; 2164 queue_mask = 1 << queue; 2165 2166 work_tbd = budget - work_done; 2167 if (work_tbd > 16) 2168 work_tbd = 16; 2169 2170 if (mp->work_tx_end & queue_mask) { 2171 txq_kick(mp->txq + queue); 2172 } else if (mp->work_tx & queue_mask) { 2173 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0); 2174 txq_maybe_wake(mp->txq + queue); 2175 } else if (mp->work_rx & queue_mask) { 2176 work_done += rxq_process(mp->rxq + queue, work_tbd); 2177 } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { 2178 work_done += rxq_refill(mp->rxq + queue, work_tbd); 2179 } else { 2180 BUG(); 2181 } 2182 } 2183 2184 if (work_done < budget) { 2185 if (mp->oom) 2186 mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); 2187 napi_complete(napi); 2188 wrlp(mp, INT_MASK, mp->int_mask); 2189 } 2190 2191 return work_done; 2192} 2193 2194static inline void oom_timer_wrapper(unsigned long data) 2195{ 2196 struct mv643xx_eth_private *mp = (void *)data; 2197 2198 napi_schedule(&mp->napi); 2199} 2200 2201static void phy_reset(struct mv643xx_eth_private *mp) 2202{ 2203 int data; 2204 2205 data = phy_read(mp->phy, MII_BMCR); 2206 if (data < 0) 2207 return; 2208 2209 data |= BMCR_RESET; 2210 if (phy_write(mp->phy, MII_BMCR, data) < 0) 2211 return; 2212 2213 do { 2214 data = phy_read(mp->phy, MII_BMCR); 2215 } while (data >= 0 && data & BMCR_RESET); 2216} 2217 2218static void port_start(struct mv643xx_eth_private *mp) 2219{ 2220 u32 pscr; 2221 int i; 2222 2223 /* 2224 * Perform PHY reset, if there is a PHY. 2225 */ 2226 if (mp->phy != NULL) { 2227 struct ethtool_cmd cmd; 2228 2229 mv643xx_eth_get_settings(mp->dev, &cmd); 2230 phy_reset(mp); 2231 mv643xx_eth_set_settings(mp->dev, &cmd); 2232 } 2233 2234 /* 2235 * Configure basic link parameters. 2236 */ 2237 pscr = rdlp(mp, PORT_SERIAL_CONTROL); 2238 2239 pscr |= SERIAL_PORT_ENABLE; 2240 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2241 2242 pscr |= DO_NOT_FORCE_LINK_FAIL; 2243 if (mp->phy == NULL) 2244 pscr |= FORCE_LINK_PASS; 2245 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2246 2247 /* 2248 * Configure TX path and queues. 2249 */ 2250 tx_set_rate(mp, 1000000000, 16777216); 2251 for (i = 0; i < mp->txq_count; i++) { 2252 struct tx_queue *txq = mp->txq + i; 2253 2254 txq_reset_hw_ptr(txq); 2255 txq_set_rate(txq, 1000000000, 16777216); 2256 txq_set_fixed_prio_mode(txq); 2257 } 2258 2259 /* 2260 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast 2261 * frames to RX queue #0, and include the pseudo-header when 2262 * calculating receive checksums. 2263 */ 2264 wrlp(mp, PORT_CONFIG, 0x02000000); 2265 2266 /* 2267 * Treat BPDUs as normal multicasts, and disable partition mode. 2268 */ 2269 wrlp(mp, PORT_CONFIG_EXT, 0x00000000); 2270 2271 /* 2272 * Add configured unicast addresses to address filter table. 2273 */ 2274 mv643xx_eth_program_unicast_filter(mp->dev); 2275 2276 /* 2277 * Enable the receive queues. 2278 */ 2279 for (i = 0; i < mp->rxq_count; i++) { 2280 struct rx_queue *rxq = mp->rxq + i; 2281 u32 addr; 2282 2283 addr = (u32)rxq->rx_desc_dma; 2284 addr += rxq->rx_curr_desc * sizeof(struct rx_desc); 2285 wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr); 2286 2287 rxq_enable(rxq); 2288 } 2289} 2290 2291static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp) 2292{ 2293 int skb_size; 2294 2295 /* 2296 * Reserve 2+14 bytes for an ethernet header (the hardware 2297 * automatically prepends 2 bytes of dummy data to each 2298 * received packet), 16 bytes for up to four VLAN tags, and 2299 * 4 bytes for the trailing FCS -- 36 bytes total. 2300 */ 2301 skb_size = mp->dev->mtu + 36; 2302 2303 /* 2304 * Make sure that the skb size is a multiple of 8 bytes, as 2305 * the lower three bits of the receive descriptor's buffer 2306 * size field are ignored by the hardware. 2307 */ 2308 mp->skb_size = (skb_size + 7) & ~7; 2309 2310 /* 2311 * If NET_SKB_PAD is smaller than a cache line, 2312 * netdev_alloc_skb() will cause skb->data to be misaligned 2313 * to a cache line boundary. If this is the case, include 2314 * some extra space to allow re-aligning the data area. 2315 */ 2316 mp->skb_size += SKB_DMA_REALIGN; 2317} 2318 2319static int mv643xx_eth_open(struct net_device *dev) 2320{ 2321 struct mv643xx_eth_private *mp = netdev_priv(dev); 2322 int err; 2323 int i; 2324 2325 wrlp(mp, INT_CAUSE, 0); 2326 wrlp(mp, INT_CAUSE_EXT, 0); 2327 rdlp(mp, INT_CAUSE_EXT); 2328 2329 err = request_irq(dev->irq, mv643xx_eth_irq, 2330 IRQF_SHARED, dev->name, dev); 2331 if (err) { 2332 dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n"); 2333 return -EAGAIN; 2334 } 2335 2336 mv643xx_eth_recalc_skb_size(mp); 2337 2338 napi_enable(&mp->napi); 2339 2340 skb_queue_head_init(&mp->rx_recycle); 2341 2342 mp->int_mask = INT_EXT; 2343 2344 for (i = 0; i < mp->rxq_count; i++) { 2345 err = rxq_init(mp, i); 2346 if (err) { 2347 while (--i >= 0) 2348 rxq_deinit(mp->rxq + i); 2349 goto out; 2350 } 2351 2352 rxq_refill(mp->rxq + i, INT_MAX); 2353 mp->int_mask |= INT_RX_0 << i; 2354 } 2355 2356 if (mp->oom) { 2357 mp->rx_oom.expires = jiffies + (HZ / 10); 2358 add_timer(&mp->rx_oom); 2359 } 2360 2361 for (i = 0; i < mp->txq_count; i++) { 2362 err = txq_init(mp, i); 2363 if (err) { 2364 while (--i >= 0) 2365 txq_deinit(mp->txq + i); 2366 goto out_free; 2367 } 2368 mp->int_mask |= INT_TX_END_0 << i; 2369 } 2370 2371 port_start(mp); 2372 2373 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); 2374 wrlp(mp, INT_MASK, mp->int_mask); 2375 2376 return 0; 2377 2378 2379out_free: 2380 for (i = 0; i < mp->rxq_count; i++) 2381 rxq_deinit(mp->rxq + i); 2382out: 2383 free_irq(dev->irq, dev); 2384 2385 return err; 2386} 2387 2388static void port_reset(struct mv643xx_eth_private *mp) 2389{ 2390 unsigned int data; 2391 int i; 2392 2393 for (i = 0; i < mp->rxq_count; i++) 2394 rxq_disable(mp->rxq + i); 2395 for (i = 0; i < mp->txq_count; i++) 2396 txq_disable(mp->txq + i); 2397 2398 while (1) { 2399 u32 ps = rdlp(mp, PORT_STATUS); 2400 2401 if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY) 2402 break; 2403 udelay(10); 2404 } 2405 2406 /* Reset the Enable bit in the Configuration Register */ 2407 data = rdlp(mp, PORT_SERIAL_CONTROL); 2408 data &= ~(SERIAL_PORT_ENABLE | 2409 DO_NOT_FORCE_LINK_FAIL | 2410 FORCE_LINK_PASS); 2411 wrlp(mp, PORT_SERIAL_CONTROL, data); 2412} 2413 2414static int mv643xx_eth_stop(struct net_device *dev) 2415{ 2416 struct mv643xx_eth_private *mp = netdev_priv(dev); 2417 int i; 2418 2419 wrlp(mp, INT_MASK_EXT, 0x00000000); 2420 wrlp(mp, INT_MASK, 0x00000000); 2421 rdlp(mp, INT_MASK); 2422 2423 napi_disable(&mp->napi); 2424 2425 del_timer_sync(&mp->rx_oom); 2426 2427 netif_carrier_off(dev); 2428 2429 free_irq(dev->irq, dev); 2430 2431 port_reset(mp); 2432 mv643xx_eth_get_stats(dev); 2433 mib_counters_update(mp); 2434 del_timer_sync(&mp->mib_counters_timer); 2435 2436 skb_queue_purge(&mp->rx_recycle); 2437 2438 for (i = 0; i < mp->rxq_count; i++) 2439 rxq_deinit(mp->rxq + i); 2440 for (i = 0; i < mp->txq_count; i++) 2441 txq_deinit(mp->txq + i); 2442 2443 return 0; 2444} 2445 2446static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2447{ 2448 struct mv643xx_eth_private *mp = netdev_priv(dev); 2449 2450 if (mp->phy != NULL) 2451 return phy_mii_ioctl(mp->phy, if_mii(ifr), cmd); 2452 2453 return -EOPNOTSUPP; 2454} 2455 2456static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) 2457{ 2458 struct mv643xx_eth_private *mp = netdev_priv(dev); 2459 2460 if (new_mtu < 64 || new_mtu > 9500) 2461 return -EINVAL; 2462 2463 dev->mtu = new_mtu; 2464 mv643xx_eth_recalc_skb_size(mp); 2465 tx_set_rate(mp, 1000000000, 16777216); 2466 2467 if (!netif_running(dev)) 2468 return 0; 2469 2470 /* 2471 * Stop and then re-open the interface. This will allocate RX 2472 * skbs of the new MTU. 2473 * There is a possible danger that the open will not succeed, 2474 * due to memory being full. 2475 */ 2476 mv643xx_eth_stop(dev); 2477 if (mv643xx_eth_open(dev)) { 2478 dev_printk(KERN_ERR, &dev->dev, 2479 "fatal error on re-opening device after " 2480 "MTU change\n"); 2481 } 2482 2483 return 0; 2484} 2485 2486static void tx_timeout_task(struct work_struct *ugly) 2487{ 2488 struct mv643xx_eth_private *mp; 2489 2490 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); 2491 if (netif_running(mp->dev)) { 2492 netif_tx_stop_all_queues(mp->dev); 2493 port_reset(mp); 2494 port_start(mp); 2495 netif_tx_wake_all_queues(mp->dev); 2496 } 2497} 2498 2499static void mv643xx_eth_tx_timeout(struct net_device *dev) 2500{ 2501 struct mv643xx_eth_private *mp = netdev_priv(dev); 2502 2503 dev_printk(KERN_INFO, &dev->dev, "tx timeout\n"); 2504 2505 schedule_work(&mp->tx_timeout_task); 2506} 2507 2508#ifdef CONFIG_NET_POLL_CONTROLLER 2509static void mv643xx_eth_netpoll(struct net_device *dev) 2510{ 2511 struct mv643xx_eth_private *mp = netdev_priv(dev); 2512 2513 wrlp(mp, INT_MASK, 0x00000000); 2514 rdlp(mp, INT_MASK); 2515 2516 mv643xx_eth_irq(dev->irq, dev); 2517 2518 wrlp(mp, INT_MASK, mp->int_mask); 2519} 2520#endif 2521 2522 2523/* platform glue ************************************************************/ 2524static void 2525mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp, 2526 struct mbus_dram_target_info *dram) 2527{ 2528 void __iomem *base = msp->base; 2529 u32 win_enable; 2530 u32 win_protect; 2531 int i; 2532 2533 for (i = 0; i < 6; i++) { 2534 writel(0, base + WINDOW_BASE(i)); 2535 writel(0, base + WINDOW_SIZE(i)); 2536 if (i < 4) 2537 writel(0, base + WINDOW_REMAP_HIGH(i)); 2538 } 2539 2540 win_enable = 0x3f; 2541 win_protect = 0; 2542 2543 for (i = 0; i < dram->num_cs; i++) { 2544 struct mbus_dram_window *cs = dram->cs + i; 2545 2546 writel((cs->base & 0xffff0000) | 2547 (cs->mbus_attr << 8) | 2548 dram->mbus_dram_target_id, base + WINDOW_BASE(i)); 2549 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); 2550 2551 win_enable &= ~(1 << i); 2552 win_protect |= 3 << (2 * i); 2553 } 2554 2555 writel(win_enable, base + WINDOW_BAR_ENABLE); 2556 msp->win_protect = win_protect; 2557} 2558 2559static void infer_hw_params(struct mv643xx_eth_shared_private *msp) 2560{ 2561 /* 2562 * Check whether we have a 14-bit coal limit field in bits 2563 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the 2564 * SDMA config register. 2565 */ 2566 writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG); 2567 if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000) 2568 msp->extended_rx_coal_limit = 1; 2569 else 2570 msp->extended_rx_coal_limit = 0; 2571 2572 /* 2573 * Check whether the MAC supports TX rate control, and if 2574 * yes, whether its associated registers are in the old or 2575 * the new place. 2576 */ 2577 writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED); 2578 if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) { 2579 msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT; 2580 } else { 2581 writel(7, msp->base + 0x0400 + TX_BW_RATE); 2582 if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7) 2583 msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT; 2584 else 2585 msp->tx_bw_control = TX_BW_CONTROL_ABSENT; 2586 } 2587} 2588 2589static int mv643xx_eth_shared_probe(struct platform_device *pdev) 2590{ 2591 static int mv643xx_eth_version_printed; 2592 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; 2593 struct mv643xx_eth_shared_private *msp; 2594 struct resource *res; 2595 int ret; 2596 2597 if (!mv643xx_eth_version_printed++) 2598 printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet " 2599 "driver version %s\n", mv643xx_eth_driver_version); 2600 2601 ret = -EINVAL; 2602 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2603 if (res == NULL) 2604 goto out; 2605 2606 ret = -ENOMEM; 2607 msp = kmalloc(sizeof(*msp), GFP_KERNEL); 2608 if (msp == NULL) 2609 goto out; 2610 memset(msp, 0, sizeof(*msp)); 2611 2612 msp->base = ioremap(res->start, res->end - res->start + 1); 2613 if (msp->base == NULL) 2614 goto out_free; 2615 2616 /* 2617 * Set up and register SMI bus. 2618 */ 2619 if (pd == NULL || pd->shared_smi == NULL) { 2620 msp->smi_bus = mdiobus_alloc(); 2621 if (msp->smi_bus == NULL) 2622 goto out_unmap; 2623 2624 msp->smi_bus->priv = msp; 2625 msp->smi_bus->name = "mv643xx_eth smi"; 2626 msp->smi_bus->read = smi_bus_read; 2627 msp->smi_bus->write = smi_bus_write, 2628 snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id); 2629 msp->smi_bus->parent = &pdev->dev; 2630 msp->smi_bus->phy_mask = 0xffffffff; 2631 if (mdiobus_register(msp->smi_bus) < 0) 2632 goto out_free_mii_bus; 2633 msp->smi = msp; 2634 } else { 2635 msp->smi = platform_get_drvdata(pd->shared_smi); 2636 } 2637 2638 msp->err_interrupt = NO_IRQ; 2639 init_waitqueue_head(&msp->smi_busy_wait); 2640 2641 /* 2642 * Check whether the error interrupt is hooked up. 2643 */ 2644 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2645 if (res != NULL) { 2646 int err; 2647 2648 err = request_irq(res->start, mv643xx_eth_err_irq, 2649 IRQF_SHARED, "mv643xx_eth", msp); 2650 if (!err) { 2651 writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK); 2652 msp->err_interrupt = res->start; 2653 } 2654 } 2655 2656 /* 2657 * (Re-)program MBUS remapping windows if we are asked to. 2658 */ 2659 if (pd != NULL && pd->dram != NULL) 2660 mv643xx_eth_conf_mbus_windows(msp, pd->dram); 2661 2662 /* 2663 * Detect hardware parameters. 2664 */ 2665 msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; 2666 infer_hw_params(msp); 2667 2668 platform_set_drvdata(pdev, msp); 2669 2670 return 0; 2671 2672out_free_mii_bus: 2673 mdiobus_free(msp->smi_bus); 2674out_unmap: 2675 iounmap(msp->base); 2676out_free: 2677 kfree(msp); 2678out: 2679 return ret; 2680} 2681 2682static int mv643xx_eth_shared_remove(struct platform_device *pdev) 2683{ 2684 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); 2685 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; 2686 2687 if (pd == NULL || pd->shared_smi == NULL) { 2688 mdiobus_unregister(msp->smi_bus); 2689 mdiobus_free(msp->smi_bus); 2690 } 2691 if (msp->err_interrupt != NO_IRQ) 2692 free_irq(msp->err_interrupt, msp); 2693 iounmap(msp->base); 2694 kfree(msp); 2695 2696 return 0; 2697} 2698 2699static struct platform_driver mv643xx_eth_shared_driver = { 2700 .probe = mv643xx_eth_shared_probe, 2701 .remove = mv643xx_eth_shared_remove, 2702 .driver = { 2703 .name = MV643XX_ETH_SHARED_NAME, 2704 .owner = THIS_MODULE, 2705 }, 2706}; 2707 2708static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr) 2709{ 2710 int addr_shift = 5 * mp->port_num; 2711 u32 data; 2712 2713 data = rdl(mp, PHY_ADDR); 2714 data &= ~(0x1f << addr_shift); 2715 data |= (phy_addr & 0x1f) << addr_shift; 2716 wrl(mp, PHY_ADDR, data); 2717} 2718 2719static int phy_addr_get(struct mv643xx_eth_private *mp) 2720{ 2721 unsigned int data; 2722 2723 data = rdl(mp, PHY_ADDR); 2724 2725 return (data >> (5 * mp->port_num)) & 0x1f; 2726} 2727 2728static void set_params(struct mv643xx_eth_private *mp, 2729 struct mv643xx_eth_platform_data *pd) 2730{ 2731 struct net_device *dev = mp->dev; 2732 2733 if (is_valid_ether_addr(pd->mac_addr)) 2734 memcpy(dev->dev_addr, pd->mac_addr, 6); 2735 else 2736 uc_addr_get(mp, dev->dev_addr); 2737 2738 mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE; 2739 if (pd->rx_queue_size) 2740 mp->rx_ring_size = pd->rx_queue_size; 2741 mp->rx_desc_sram_addr = pd->rx_sram_addr; 2742 mp->rx_desc_sram_size = pd->rx_sram_size; 2743 2744 mp->rxq_count = pd->rx_queue_count ? : 1; 2745 2746 mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE; 2747 if (pd->tx_queue_size) 2748 mp->tx_ring_size = pd->tx_queue_size; 2749 mp->tx_desc_sram_addr = pd->tx_sram_addr; 2750 mp->tx_desc_sram_size = pd->tx_sram_size; 2751 2752 mp->txq_count = pd->tx_queue_count ? : 1; 2753} 2754 2755static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, 2756 int phy_addr) 2757{ 2758 struct mii_bus *bus = mp->shared->smi->smi_bus; 2759 struct phy_device *phydev; 2760 int start; 2761 int num; 2762 int i; 2763 2764 if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) { 2765 start = phy_addr_get(mp) & 0x1f; 2766 num = 32; 2767 } else { 2768 start = phy_addr & 0x1f; 2769 num = 1; 2770 } 2771 2772 phydev = NULL; 2773 for (i = 0; i < num; i++) { 2774 int addr = (start + i) & 0x1f; 2775 2776 if (bus->phy_map[addr] == NULL) 2777 mdiobus_scan(bus, addr); 2778 2779 if (phydev == NULL) { 2780 phydev = bus->phy_map[addr]; 2781 if (phydev != NULL) 2782 phy_addr_set(mp, addr); 2783 } 2784 } 2785 2786 return phydev; 2787} 2788 2789static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) 2790{ 2791 struct phy_device *phy = mp->phy; 2792 2793 phy_reset(mp); 2794 2795 phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII); 2796 2797 if (speed == 0) { 2798 phy->autoneg = AUTONEG_ENABLE; 2799 phy->speed = 0; 2800 phy->duplex = 0; 2801 phy->advertising = phy->supported | ADVERTISED_Autoneg; 2802 } else { 2803 phy->autoneg = AUTONEG_DISABLE; 2804 phy->advertising = 0; 2805 phy->speed = speed; 2806 phy->duplex = duplex; 2807 } 2808 phy_start_aneg(phy); 2809} 2810 2811static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) 2812{ 2813 u32 pscr; 2814 2815 pscr = rdlp(mp, PORT_SERIAL_CONTROL); 2816 if (pscr & SERIAL_PORT_ENABLE) { 2817 pscr &= ~SERIAL_PORT_ENABLE; 2818 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2819 } 2820 2821 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED; 2822 if (mp->phy == NULL) { 2823 pscr |= DISABLE_AUTO_NEG_SPEED_GMII; 2824 if (speed == SPEED_1000) 2825 pscr |= SET_GMII_SPEED_TO_1000; 2826 else if (speed == SPEED_100) 2827 pscr |= SET_MII_SPEED_TO_100; 2828 2829 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL; 2830 2831 pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX; 2832 if (duplex == DUPLEX_FULL) 2833 pscr |= SET_FULL_DUPLEX_MODE; 2834 } 2835 2836 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2837} 2838 2839static const struct net_device_ops mv643xx_eth_netdev_ops = { 2840 .ndo_open = mv643xx_eth_open, 2841 .ndo_stop = mv643xx_eth_stop, 2842 .ndo_start_xmit = mv643xx_eth_xmit, 2843 .ndo_set_rx_mode = mv643xx_eth_set_rx_mode, 2844 .ndo_set_mac_address = mv643xx_eth_set_mac_address, 2845 .ndo_do_ioctl = mv643xx_eth_ioctl, 2846 .ndo_change_mtu = mv643xx_eth_change_mtu, 2847 .ndo_tx_timeout = mv643xx_eth_tx_timeout, 2848 .ndo_get_stats = mv643xx_eth_get_stats, 2849#ifdef CONFIG_NET_POLL_CONTROLLER 2850 .ndo_poll_controller = mv643xx_eth_netpoll, 2851#endif 2852}; 2853 2854static int mv643xx_eth_probe(struct platform_device *pdev) 2855{ 2856 struct mv643xx_eth_platform_data *pd; 2857 struct mv643xx_eth_private *mp; 2858 struct net_device *dev; 2859 struct resource *res; 2860 int err; 2861 2862 pd = pdev->dev.platform_data; 2863 if (pd == NULL) { 2864 dev_printk(KERN_ERR, &pdev->dev, 2865 "no mv643xx_eth_platform_data\n"); 2866 return -ENODEV; 2867 } 2868 2869 if (pd->shared == NULL) { 2870 dev_printk(KERN_ERR, &pdev->dev, 2871 "no mv643xx_eth_platform_data->shared\n"); 2872 return -ENODEV; 2873 } 2874 2875 dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8); 2876 if (!dev) 2877 return -ENOMEM; 2878 2879 mp = netdev_priv(dev); 2880 platform_set_drvdata(pdev, mp); 2881 2882 mp->shared = platform_get_drvdata(pd->shared); 2883 mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10); 2884 mp->port_num = pd->port_number; 2885 2886 mp->dev = dev; 2887 2888 set_params(mp, pd); 2889 dev->real_num_tx_queues = mp->txq_count; 2890 2891 if (pd->phy_addr != MV643XX_ETH_PHY_NONE) 2892 mp->phy = phy_scan(mp, pd->phy_addr); 2893 2894 if (mp->phy != NULL) 2895 phy_init(mp, pd->speed, pd->duplex); 2896 2897 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); 2898 2899 init_pscr(mp, pd->speed, pd->duplex); 2900 2901 2902 mib_counters_clear(mp); 2903 2904 init_timer(&mp->mib_counters_timer); 2905 mp->mib_counters_timer.data = (unsigned long)mp; 2906 mp->mib_counters_timer.function = mib_counters_timer_wrapper; 2907 mp->mib_counters_timer.expires = jiffies + 30 * HZ; 2908 add_timer(&mp->mib_counters_timer); 2909 2910 spin_lock_init(&mp->mib_counters_lock); 2911 2912 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); 2913 2914 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128); 2915 2916 init_timer(&mp->rx_oom); 2917 mp->rx_oom.data = (unsigned long)mp; 2918 mp->rx_oom.function = oom_timer_wrapper; 2919 2920 2921 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2922 BUG_ON(!res); 2923 dev->irq = res->start; 2924 2925 dev->netdev_ops = &mv643xx_eth_netdev_ops; 2926 2927 dev->watchdog_timeo = 2 * HZ; 2928 dev->base_addr = 0; 2929 2930 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; 2931 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM; 2932 2933 SET_NETDEV_DEV(dev, &pdev->dev); 2934 2935 if (mp->shared->win_protect) 2936 wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); 2937 2938 netif_carrier_off(dev); 2939 2940 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); 2941 2942 set_rx_coal(mp, 250); 2943 set_tx_coal(mp, 0); 2944 2945 err = register_netdev(dev); 2946 if (err) 2947 goto out; 2948 2949 dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %pM\n", 2950 mp->port_num, dev->dev_addr); 2951 2952 if (mp->tx_desc_sram_size > 0) 2953 dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n"); 2954 2955 return 0; 2956 2957out: 2958 free_netdev(dev); 2959 2960 return err; 2961} 2962 2963static int mv643xx_eth_remove(struct platform_device *pdev) 2964{ 2965 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); 2966 2967 unregister_netdev(mp->dev); 2968 if (mp->phy != NULL) 2969 phy_detach(mp->phy); 2970 flush_scheduled_work(); 2971 free_netdev(mp->dev); 2972 2973 platform_set_drvdata(pdev, NULL); 2974 2975 return 0; 2976} 2977 2978static void mv643xx_eth_shutdown(struct platform_device *pdev) 2979{ 2980 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); 2981 2982 /* Mask all interrupts on ethernet port */ 2983 wrlp(mp, INT_MASK, 0); 2984 rdlp(mp, INT_MASK); 2985 2986 if (netif_running(mp->dev)) 2987 port_reset(mp); 2988} 2989 2990static struct platform_driver mv643xx_eth_driver = { 2991 .probe = mv643xx_eth_probe, 2992 .remove = mv643xx_eth_remove, 2993 .shutdown = mv643xx_eth_shutdown, 2994 .driver = { 2995 .name = MV643XX_ETH_NAME, 2996 .owner = THIS_MODULE, 2997 }, 2998}; 2999 3000static int __init mv643xx_eth_init_module(void) 3001{ 3002 int rc; 3003 3004 rc = platform_driver_register(&mv643xx_eth_shared_driver); 3005 if (!rc) { 3006 rc = platform_driver_register(&mv643xx_eth_driver); 3007 if (rc) 3008 platform_driver_unregister(&mv643xx_eth_shared_driver); 3009 } 3010 3011 return rc; 3012} 3013module_init(mv643xx_eth_init_module); 3014 3015static void __exit mv643xx_eth_cleanup_module(void) 3016{ 3017 platform_driver_unregister(&mv643xx_eth_driver); 3018 platform_driver_unregister(&mv643xx_eth_shared_driver); 3019} 3020module_exit(mv643xx_eth_cleanup_module); 3021 3022MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, " 3023 "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek"); 3024MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); 3025MODULE_LICENSE("GPL"); 3026MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME); 3027MODULE_ALIAS("platform:" MV643XX_ETH_NAME);