Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.31-rc9 3061 lines 73 kB view raw
1/* 2 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports 3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> 4 * 5 * Based on the 64360 driver from: 6 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il> 7 * Rabeeh Khoury <rabeeh@marvell.com> 8 * 9 * Copyright (C) 2003 PMC-Sierra, Inc., 10 * written by Manish Lachwani 11 * 12 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> 13 * 14 * Copyright (C) 2004-2006 MontaVista Software, Inc. 15 * Dale Farnsworth <dale@farnsworth.org> 16 * 17 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> 18 * <sjhill@realitydiluted.com> 19 * 20 * Copyright (C) 2007-2008 Marvell Semiconductor 21 * Lennert Buytenhek <buytenh@marvell.com> 22 * 23 * This program is free software; you can redistribute it and/or 24 * modify it under the terms of the GNU General Public License 25 * as published by the Free Software Foundation; either version 2 26 * of the License, or (at your option) any later version. 27 * 28 * This program is distributed in the hope that it will be useful, 29 * but WITHOUT ANY WARRANTY; without even the implied warranty of 30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 31 * GNU General Public License for more details. 32 * 33 * You should have received a copy of the GNU General Public License 34 * along with this program; if not, write to the Free Software 35 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 36 */ 37 38#include <linux/init.h> 39#include <linux/dma-mapping.h> 40#include <linux/in.h> 41#include <linux/ip.h> 42#include <linux/tcp.h> 43#include <linux/udp.h> 44#include <linux/etherdevice.h> 45#include <linux/delay.h> 46#include <linux/ethtool.h> 47#include <linux/platform_device.h> 48#include <linux/module.h> 49#include <linux/kernel.h> 50#include <linux/spinlock.h> 51#include <linux/workqueue.h> 52#include <linux/phy.h> 53#include <linux/mv643xx_eth.h> 54#include <linux/io.h> 55#include <linux/types.h> 56#include <linux/inet_lro.h> 57#include <asm/system.h> 58#include <linux/list.h> 59 60static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 61static char mv643xx_eth_driver_version[] = "1.4"; 62 63 64/* 65 * Registers shared between all ports. 66 */ 67#define PHY_ADDR 0x0000 68#define SMI_REG 0x0004 69#define SMI_BUSY 0x10000000 70#define SMI_READ_VALID 0x08000000 71#define SMI_OPCODE_READ 0x04000000 72#define SMI_OPCODE_WRITE 0x00000000 73#define ERR_INT_CAUSE 0x0080 74#define ERR_INT_SMI_DONE 0x00000010 75#define ERR_INT_MASK 0x0084 76#define WINDOW_BASE(w) (0x0200 + ((w) << 3)) 77#define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) 78#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) 79#define WINDOW_BAR_ENABLE 0x0290 80#define WINDOW_PROTECT(w) (0x0294 + ((w) << 4)) 81 82/* 83 * Main per-port registers. These live at offset 0x0400 for 84 * port #0, 0x0800 for port #1, and 0x0c00 for port #2. 85 */ 86#define PORT_CONFIG 0x0000 87#define UNICAST_PROMISCUOUS_MODE 0x00000001 88#define PORT_CONFIG_EXT 0x0004 89#define MAC_ADDR_LOW 0x0014 90#define MAC_ADDR_HIGH 0x0018 91#define SDMA_CONFIG 0x001c 92#define TX_BURST_SIZE_16_64BIT 0x01000000 93#define TX_BURST_SIZE_4_64BIT 0x00800000 94#define BLM_TX_NO_SWAP 0x00000020 95#define BLM_RX_NO_SWAP 0x00000010 96#define RX_BURST_SIZE_16_64BIT 0x00000008 97#define RX_BURST_SIZE_4_64BIT 0x00000004 98#define PORT_SERIAL_CONTROL 0x003c 99#define SET_MII_SPEED_TO_100 0x01000000 100#define SET_GMII_SPEED_TO_1000 0x00800000 101#define SET_FULL_DUPLEX_MODE 0x00200000 102#define MAX_RX_PACKET_9700BYTE 0x000a0000 103#define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000 104#define DO_NOT_FORCE_LINK_FAIL 0x00000400 105#define SERIAL_PORT_CONTROL_RESERVED 0x00000200 106#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008 107#define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004 108#define FORCE_LINK_PASS 0x00000002 109#define SERIAL_PORT_ENABLE 0x00000001 110#define PORT_STATUS 0x0044 111#define TX_FIFO_EMPTY 0x00000400 112#define TX_IN_PROGRESS 0x00000080 113#define PORT_SPEED_MASK 0x00000030 114#define PORT_SPEED_1000 0x00000010 115#define PORT_SPEED_100 0x00000020 116#define PORT_SPEED_10 0x00000000 117#define FLOW_CONTROL_ENABLED 0x00000008 118#define FULL_DUPLEX 0x00000004 119#define LINK_UP 0x00000002 120#define TXQ_COMMAND 0x0048 121#define TXQ_FIX_PRIO_CONF 0x004c 122#define TX_BW_RATE 0x0050 123#define TX_BW_MTU 0x0058 124#define TX_BW_BURST 0x005c 125#define INT_CAUSE 0x0060 126#define INT_TX_END 0x07f80000 127#define INT_TX_END_0 0x00080000 128#define INT_RX 0x000003fc 129#define INT_RX_0 0x00000004 130#define INT_EXT 0x00000002 131#define INT_CAUSE_EXT 0x0064 132#define INT_EXT_LINK_PHY 0x00110000 133#define INT_EXT_TX 0x000000ff 134#define INT_MASK 0x0068 135#define INT_MASK_EXT 0x006c 136#define TX_FIFO_URGENT_THRESHOLD 0x0074 137#define TXQ_FIX_PRIO_CONF_MOVED 0x00dc 138#define TX_BW_RATE_MOVED 0x00e0 139#define TX_BW_MTU_MOVED 0x00e8 140#define TX_BW_BURST_MOVED 0x00ec 141#define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4)) 142#define RXQ_COMMAND 0x0280 143#define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2)) 144#define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4)) 145#define TXQ_BW_CONF(q) (0x0304 + ((q) << 4)) 146#define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4)) 147 148/* 149 * Misc per-port registers. 150 */ 151#define MIB_COUNTERS(p) (0x1000 + ((p) << 7)) 152#define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10)) 153#define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10)) 154#define UNICAST_TABLE(p) (0x1600 + ((p) << 10)) 155 156 157/* 158 * SDMA configuration register default value. 159 */ 160#if defined(__BIG_ENDIAN) 161#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 162 (RX_BURST_SIZE_4_64BIT | \ 163 TX_BURST_SIZE_4_64BIT) 164#elif defined(__LITTLE_ENDIAN) 165#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 166 (RX_BURST_SIZE_4_64BIT | \ 167 BLM_RX_NO_SWAP | \ 168 BLM_TX_NO_SWAP | \ 169 TX_BURST_SIZE_4_64BIT) 170#else 171#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 172#endif 173 174 175/* 176 * Misc definitions. 177 */ 178#define DEFAULT_RX_QUEUE_SIZE 128 179#define DEFAULT_TX_QUEUE_SIZE 256 180#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) 181 182 183/* 184 * RX/TX descriptors. 185 */ 186#if defined(__BIG_ENDIAN) 187struct rx_desc { 188 u16 byte_cnt; /* Descriptor buffer byte count */ 189 u16 buf_size; /* Buffer size */ 190 u32 cmd_sts; /* Descriptor command status */ 191 u32 next_desc_ptr; /* Next descriptor pointer */ 192 u32 buf_ptr; /* Descriptor buffer pointer */ 193}; 194 195struct tx_desc { 196 u16 byte_cnt; /* buffer byte count */ 197 u16 l4i_chk; /* CPU provided TCP checksum */ 198 u32 cmd_sts; /* Command/status field */ 199 u32 next_desc_ptr; /* Pointer to next descriptor */ 200 u32 buf_ptr; /* pointer to buffer for this descriptor*/ 201}; 202#elif defined(__LITTLE_ENDIAN) 203struct rx_desc { 204 u32 cmd_sts; /* Descriptor command status */ 205 u16 buf_size; /* Buffer size */ 206 u16 byte_cnt; /* Descriptor buffer byte count */ 207 u32 buf_ptr; /* Descriptor buffer pointer */ 208 u32 next_desc_ptr; /* Next descriptor pointer */ 209}; 210 211struct tx_desc { 212 u32 cmd_sts; /* Command/status field */ 213 u16 l4i_chk; /* CPU provided TCP checksum */ 214 u16 byte_cnt; /* buffer byte count */ 215 u32 buf_ptr; /* pointer to buffer for this descriptor*/ 216 u32 next_desc_ptr; /* Pointer to next descriptor */ 217}; 218#else 219#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 220#endif 221 222/* RX & TX descriptor command */ 223#define BUFFER_OWNED_BY_DMA 0x80000000 224 225/* RX & TX descriptor status */ 226#define ERROR_SUMMARY 0x00000001 227 228/* RX descriptor status */ 229#define LAYER_4_CHECKSUM_OK 0x40000000 230#define RX_ENABLE_INTERRUPT 0x20000000 231#define RX_FIRST_DESC 0x08000000 232#define RX_LAST_DESC 0x04000000 233#define RX_IP_HDR_OK 0x02000000 234#define RX_PKT_IS_IPV4 0x01000000 235#define RX_PKT_IS_ETHERNETV2 0x00800000 236#define RX_PKT_LAYER4_TYPE_MASK 0x00600000 237#define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000 238#define RX_PKT_IS_VLAN_TAGGED 0x00080000 239 240/* TX descriptor command */ 241#define TX_ENABLE_INTERRUPT 0x00800000 242#define GEN_CRC 0x00400000 243#define TX_FIRST_DESC 0x00200000 244#define TX_LAST_DESC 0x00100000 245#define ZERO_PADDING 0x00080000 246#define GEN_IP_V4_CHECKSUM 0x00040000 247#define GEN_TCP_UDP_CHECKSUM 0x00020000 248#define UDP_FRAME 0x00010000 249#define MAC_HDR_EXTRA_4_BYTES 0x00008000 250#define MAC_HDR_EXTRA_8_BYTES 0x00000200 251 252#define TX_IHL_SHIFT 11 253 254 255/* global *******************************************************************/ 256struct mv643xx_eth_shared_private { 257 /* 258 * Ethernet controller base address. 259 */ 260 void __iomem *base; 261 262 /* 263 * Points at the right SMI instance to use. 264 */ 265 struct mv643xx_eth_shared_private *smi; 266 267 /* 268 * Provides access to local SMI interface. 269 */ 270 struct mii_bus *smi_bus; 271 272 /* 273 * If we have access to the error interrupt pin (which is 274 * somewhat misnamed as it not only reflects internal errors 275 * but also reflects SMI completion), use that to wait for 276 * SMI access completion instead of polling the SMI busy bit. 277 */ 278 int err_interrupt; 279 wait_queue_head_t smi_busy_wait; 280 281 /* 282 * Per-port MBUS window access register value. 283 */ 284 u32 win_protect; 285 286 /* 287 * Hardware-specific parameters. 288 */ 289 unsigned int t_clk; 290 int extended_rx_coal_limit; 291 int tx_bw_control; 292}; 293 294#define TX_BW_CONTROL_ABSENT 0 295#define TX_BW_CONTROL_OLD_LAYOUT 1 296#define TX_BW_CONTROL_NEW_LAYOUT 2 297 298static int mv643xx_eth_open(struct net_device *dev); 299static int mv643xx_eth_stop(struct net_device *dev); 300 301 302/* per-port *****************************************************************/ 303struct mib_counters { 304 u64 good_octets_received; 305 u32 bad_octets_received; 306 u32 internal_mac_transmit_err; 307 u32 good_frames_received; 308 u32 bad_frames_received; 309 u32 broadcast_frames_received; 310 u32 multicast_frames_received; 311 u32 frames_64_octets; 312 u32 frames_65_to_127_octets; 313 u32 frames_128_to_255_octets; 314 u32 frames_256_to_511_octets; 315 u32 frames_512_to_1023_octets; 316 u32 frames_1024_to_max_octets; 317 u64 good_octets_sent; 318 u32 good_frames_sent; 319 u32 excessive_collision; 320 u32 multicast_frames_sent; 321 u32 broadcast_frames_sent; 322 u32 unrec_mac_control_received; 323 u32 fc_sent; 324 u32 good_fc_received; 325 u32 bad_fc_received; 326 u32 undersize_received; 327 u32 fragments_received; 328 u32 oversize_received; 329 u32 jabber_received; 330 u32 mac_receive_error; 331 u32 bad_crc_event; 332 u32 collision; 333 u32 late_collision; 334}; 335 336struct lro_counters { 337 u32 lro_aggregated; 338 u32 lro_flushed; 339 u32 lro_no_desc; 340}; 341 342struct rx_queue { 343 int index; 344 345 int rx_ring_size; 346 347 int rx_desc_count; 348 int rx_curr_desc; 349 int rx_used_desc; 350 351 struct rx_desc *rx_desc_area; 352 dma_addr_t rx_desc_dma; 353 int rx_desc_area_size; 354 struct sk_buff **rx_skb; 355 356 struct net_lro_mgr lro_mgr; 357 struct net_lro_desc lro_arr[8]; 358}; 359 360struct tx_queue { 361 int index; 362 363 int tx_ring_size; 364 365 int tx_desc_count; 366 int tx_curr_desc; 367 int tx_used_desc; 368 369 struct tx_desc *tx_desc_area; 370 dma_addr_t tx_desc_dma; 371 int tx_desc_area_size; 372 373 struct sk_buff_head tx_skb; 374 375 unsigned long tx_packets; 376 unsigned long tx_bytes; 377 unsigned long tx_dropped; 378}; 379 380struct mv643xx_eth_private { 381 struct mv643xx_eth_shared_private *shared; 382 void __iomem *base; 383 int port_num; 384 385 struct net_device *dev; 386 387 struct phy_device *phy; 388 389 struct timer_list mib_counters_timer; 390 spinlock_t mib_counters_lock; 391 struct mib_counters mib_counters; 392 393 struct lro_counters lro_counters; 394 395 struct work_struct tx_timeout_task; 396 397 struct napi_struct napi; 398 u32 int_mask; 399 u8 oom; 400 u8 work_link; 401 u8 work_tx; 402 u8 work_tx_end; 403 u8 work_rx; 404 u8 work_rx_refill; 405 406 int skb_size; 407 struct sk_buff_head rx_recycle; 408 409 /* 410 * RX state. 411 */ 412 int rx_ring_size; 413 unsigned long rx_desc_sram_addr; 414 int rx_desc_sram_size; 415 int rxq_count; 416 struct timer_list rx_oom; 417 struct rx_queue rxq[8]; 418 419 /* 420 * TX state. 421 */ 422 int tx_ring_size; 423 unsigned long tx_desc_sram_addr; 424 int tx_desc_sram_size; 425 int txq_count; 426 struct tx_queue txq[8]; 427}; 428 429 430/* port register accessors **************************************************/ 431static inline u32 rdl(struct mv643xx_eth_private *mp, int offset) 432{ 433 return readl(mp->shared->base + offset); 434} 435 436static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset) 437{ 438 return readl(mp->base + offset); 439} 440 441static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) 442{ 443 writel(data, mp->shared->base + offset); 444} 445 446static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data) 447{ 448 writel(data, mp->base + offset); 449} 450 451 452/* rxq/txq helper functions *************************************************/ 453static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) 454{ 455 return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); 456} 457 458static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) 459{ 460 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); 461} 462 463static void rxq_enable(struct rx_queue *rxq) 464{ 465 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 466 wrlp(mp, RXQ_COMMAND, 1 << rxq->index); 467} 468 469static void rxq_disable(struct rx_queue *rxq) 470{ 471 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 472 u8 mask = 1 << rxq->index; 473 474 wrlp(mp, RXQ_COMMAND, mask << 8); 475 while (rdlp(mp, RXQ_COMMAND) & mask) 476 udelay(10); 477} 478 479static void txq_reset_hw_ptr(struct tx_queue *txq) 480{ 481 struct mv643xx_eth_private *mp = txq_to_mp(txq); 482 u32 addr; 483 484 addr = (u32)txq->tx_desc_dma; 485 addr += txq->tx_curr_desc * sizeof(struct tx_desc); 486 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr); 487} 488 489static void txq_enable(struct tx_queue *txq) 490{ 491 struct mv643xx_eth_private *mp = txq_to_mp(txq); 492 wrlp(mp, TXQ_COMMAND, 1 << txq->index); 493} 494 495static void txq_disable(struct tx_queue *txq) 496{ 497 struct mv643xx_eth_private *mp = txq_to_mp(txq); 498 u8 mask = 1 << txq->index; 499 500 wrlp(mp, TXQ_COMMAND, mask << 8); 501 while (rdlp(mp, TXQ_COMMAND) & mask) 502 udelay(10); 503} 504 505static void txq_maybe_wake(struct tx_queue *txq) 506{ 507 struct mv643xx_eth_private *mp = txq_to_mp(txq); 508 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 509 510 if (netif_tx_queue_stopped(nq)) { 511 __netif_tx_lock(nq, smp_processor_id()); 512 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) 513 netif_tx_wake_queue(nq); 514 __netif_tx_unlock(nq); 515 } 516} 517 518 519/* rx napi ******************************************************************/ 520static int 521mv643xx_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph, 522 u64 *hdr_flags, void *priv) 523{ 524 unsigned long cmd_sts = (unsigned long)priv; 525 526 /* 527 * Make sure that this packet is Ethernet II, is not VLAN 528 * tagged, is IPv4, has a valid IP header, and is TCP. 529 */ 530 if ((cmd_sts & (RX_IP_HDR_OK | RX_PKT_IS_IPV4 | 531 RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_MASK | 532 RX_PKT_IS_VLAN_TAGGED)) != 533 (RX_IP_HDR_OK | RX_PKT_IS_IPV4 | 534 RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_TCP_IPV4)) 535 return -1; 536 537 skb_reset_network_header(skb); 538 skb_set_transport_header(skb, ip_hdrlen(skb)); 539 *iphdr = ip_hdr(skb); 540 *tcph = tcp_hdr(skb); 541 *hdr_flags = LRO_IPV4 | LRO_TCP; 542 543 return 0; 544} 545 546static int rxq_process(struct rx_queue *rxq, int budget) 547{ 548 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 549 struct net_device_stats *stats = &mp->dev->stats; 550 int lro_flush_needed; 551 int rx; 552 553 lro_flush_needed = 0; 554 rx = 0; 555 while (rx < budget && rxq->rx_desc_count) { 556 struct rx_desc *rx_desc; 557 unsigned int cmd_sts; 558 struct sk_buff *skb; 559 u16 byte_cnt; 560 561 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; 562 563 cmd_sts = rx_desc->cmd_sts; 564 if (cmd_sts & BUFFER_OWNED_BY_DMA) 565 break; 566 rmb(); 567 568 skb = rxq->rx_skb[rxq->rx_curr_desc]; 569 rxq->rx_skb[rxq->rx_curr_desc] = NULL; 570 571 rxq->rx_curr_desc++; 572 if (rxq->rx_curr_desc == rxq->rx_ring_size) 573 rxq->rx_curr_desc = 0; 574 575 dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr, 576 rx_desc->buf_size, DMA_FROM_DEVICE); 577 rxq->rx_desc_count--; 578 rx++; 579 580 mp->work_rx_refill |= 1 << rxq->index; 581 582 byte_cnt = rx_desc->byte_cnt; 583 584 /* 585 * Update statistics. 586 * 587 * Note that the descriptor byte count includes 2 dummy 588 * bytes automatically inserted by the hardware at the 589 * start of the packet (which we don't count), and a 4 590 * byte CRC at the end of the packet (which we do count). 591 */ 592 stats->rx_packets++; 593 stats->rx_bytes += byte_cnt - 2; 594 595 /* 596 * In case we received a packet without first / last bits 597 * on, or the error summary bit is set, the packet needs 598 * to be dropped. 599 */ 600 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY)) 601 != (RX_FIRST_DESC | RX_LAST_DESC)) 602 goto err; 603 604 /* 605 * The -4 is for the CRC in the trailer of the 606 * received packet 607 */ 608 skb_put(skb, byte_cnt - 2 - 4); 609 610 if (cmd_sts & LAYER_4_CHECKSUM_OK) 611 skb->ip_summed = CHECKSUM_UNNECESSARY; 612 skb->protocol = eth_type_trans(skb, mp->dev); 613 614 if (skb->dev->features & NETIF_F_LRO && 615 skb->ip_summed == CHECKSUM_UNNECESSARY) { 616 lro_receive_skb(&rxq->lro_mgr, skb, (void *)cmd_sts); 617 lro_flush_needed = 1; 618 } else 619 netif_receive_skb(skb); 620 621 continue; 622 623err: 624 stats->rx_dropped++; 625 626 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != 627 (RX_FIRST_DESC | RX_LAST_DESC)) { 628 if (net_ratelimit()) 629 dev_printk(KERN_ERR, &mp->dev->dev, 630 "received packet spanning " 631 "multiple descriptors\n"); 632 } 633 634 if (cmd_sts & ERROR_SUMMARY) 635 stats->rx_errors++; 636 637 dev_kfree_skb(skb); 638 } 639 640 if (lro_flush_needed) 641 lro_flush_all(&rxq->lro_mgr); 642 643 if (rx < budget) 644 mp->work_rx &= ~(1 << rxq->index); 645 646 return rx; 647} 648 649static int rxq_refill(struct rx_queue *rxq, int budget) 650{ 651 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 652 int refilled; 653 654 refilled = 0; 655 while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { 656 struct sk_buff *skb; 657 int rx; 658 struct rx_desc *rx_desc; 659 660 skb = __skb_dequeue(&mp->rx_recycle); 661 if (skb == NULL) 662 skb = dev_alloc_skb(mp->skb_size); 663 664 if (skb == NULL) { 665 mp->oom = 1; 666 goto oom; 667 } 668 669 if (SKB_DMA_REALIGN) 670 skb_reserve(skb, SKB_DMA_REALIGN); 671 672 refilled++; 673 rxq->rx_desc_count++; 674 675 rx = rxq->rx_used_desc++; 676 if (rxq->rx_used_desc == rxq->rx_ring_size) 677 rxq->rx_used_desc = 0; 678 679 rx_desc = rxq->rx_desc_area + rx; 680 681 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, 682 skb->data, mp->skb_size, 683 DMA_FROM_DEVICE); 684 rx_desc->buf_size = mp->skb_size; 685 rxq->rx_skb[rx] = skb; 686 wmb(); 687 rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; 688 wmb(); 689 690 /* 691 * The hardware automatically prepends 2 bytes of 692 * dummy data to each received packet, so that the 693 * IP header ends up 16-byte aligned. 694 */ 695 skb_reserve(skb, 2); 696 } 697 698 if (refilled < budget) 699 mp->work_rx_refill &= ~(1 << rxq->index); 700 701oom: 702 return refilled; 703} 704 705 706/* tx ***********************************************************************/ 707static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) 708{ 709 int frag; 710 711 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 712 skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; 713 if (fragp->size <= 8 && fragp->page_offset & 7) 714 return 1; 715 } 716 717 return 0; 718} 719 720static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) 721{ 722 struct mv643xx_eth_private *mp = txq_to_mp(txq); 723 int nr_frags = skb_shinfo(skb)->nr_frags; 724 int frag; 725 726 for (frag = 0; frag < nr_frags; frag++) { 727 skb_frag_t *this_frag; 728 int tx_index; 729 struct tx_desc *desc; 730 731 this_frag = &skb_shinfo(skb)->frags[frag]; 732 tx_index = txq->tx_curr_desc++; 733 if (txq->tx_curr_desc == txq->tx_ring_size) 734 txq->tx_curr_desc = 0; 735 desc = &txq->tx_desc_area[tx_index]; 736 737 /* 738 * The last fragment will generate an interrupt 739 * which will free the skb on TX completion. 740 */ 741 if (frag == nr_frags - 1) { 742 desc->cmd_sts = BUFFER_OWNED_BY_DMA | 743 ZERO_PADDING | TX_LAST_DESC | 744 TX_ENABLE_INTERRUPT; 745 } else { 746 desc->cmd_sts = BUFFER_OWNED_BY_DMA; 747 } 748 749 desc->l4i_chk = 0; 750 desc->byte_cnt = this_frag->size; 751 desc->buf_ptr = dma_map_page(mp->dev->dev.parent, 752 this_frag->page, 753 this_frag->page_offset, 754 this_frag->size, DMA_TO_DEVICE); 755 } 756} 757 758static inline __be16 sum16_as_be(__sum16 sum) 759{ 760 return (__force __be16)sum; 761} 762 763static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) 764{ 765 struct mv643xx_eth_private *mp = txq_to_mp(txq); 766 int nr_frags = skb_shinfo(skb)->nr_frags; 767 int tx_index; 768 struct tx_desc *desc; 769 u32 cmd_sts; 770 u16 l4i_chk; 771 int length; 772 773 cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; 774 l4i_chk = 0; 775 776 if (skb->ip_summed == CHECKSUM_PARTIAL) { 777 int tag_bytes; 778 779 BUG_ON(skb->protocol != htons(ETH_P_IP) && 780 skb->protocol != htons(ETH_P_8021Q)); 781 782 tag_bytes = (void *)ip_hdr(skb) - (void *)skb->data - ETH_HLEN; 783 if (unlikely(tag_bytes & ~12)) { 784 if (skb_checksum_help(skb) == 0) 785 goto no_csum; 786 kfree_skb(skb); 787 return 1; 788 } 789 790 if (tag_bytes & 4) 791 cmd_sts |= MAC_HDR_EXTRA_4_BYTES; 792 if (tag_bytes & 8) 793 cmd_sts |= MAC_HDR_EXTRA_8_BYTES; 794 795 cmd_sts |= GEN_TCP_UDP_CHECKSUM | 796 GEN_IP_V4_CHECKSUM | 797 ip_hdr(skb)->ihl << TX_IHL_SHIFT; 798 799 switch (ip_hdr(skb)->protocol) { 800 case IPPROTO_UDP: 801 cmd_sts |= UDP_FRAME; 802 l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check)); 803 break; 804 case IPPROTO_TCP: 805 l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check)); 806 break; 807 default: 808 BUG(); 809 } 810 } else { 811no_csum: 812 /* Errata BTS #50, IHL must be 5 if no HW checksum */ 813 cmd_sts |= 5 << TX_IHL_SHIFT; 814 } 815 816 tx_index = txq->tx_curr_desc++; 817 if (txq->tx_curr_desc == txq->tx_ring_size) 818 txq->tx_curr_desc = 0; 819 desc = &txq->tx_desc_area[tx_index]; 820 821 if (nr_frags) { 822 txq_submit_frag_skb(txq, skb); 823 length = skb_headlen(skb); 824 } else { 825 cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; 826 length = skb->len; 827 } 828 829 desc->l4i_chk = l4i_chk; 830 desc->byte_cnt = length; 831 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, 832 length, DMA_TO_DEVICE); 833 834 __skb_queue_tail(&txq->tx_skb, skb); 835 836 /* ensure all other descriptors are written before first cmd_sts */ 837 wmb(); 838 desc->cmd_sts = cmd_sts; 839 840 /* clear TX_END status */ 841 mp->work_tx_end &= ~(1 << txq->index); 842 843 /* ensure all descriptors are written before poking hardware */ 844 wmb(); 845 txq_enable(txq); 846 847 txq->tx_desc_count += nr_frags + 1; 848 849 return 0; 850} 851 852static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) 853{ 854 struct mv643xx_eth_private *mp = netdev_priv(dev); 855 int queue; 856 struct tx_queue *txq; 857 struct netdev_queue *nq; 858 859 queue = skb_get_queue_mapping(skb); 860 txq = mp->txq + queue; 861 nq = netdev_get_tx_queue(dev, queue); 862 863 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { 864 txq->tx_dropped++; 865 dev_printk(KERN_DEBUG, &dev->dev, 866 "failed to linearize skb with tiny " 867 "unaligned fragment\n"); 868 return NETDEV_TX_BUSY; 869 } 870 871 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { 872 if (net_ratelimit()) 873 dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n"); 874 kfree_skb(skb); 875 return NETDEV_TX_OK; 876 } 877 878 if (!txq_submit_skb(txq, skb)) { 879 int entries_left; 880 881 txq->tx_bytes += skb->len; 882 txq->tx_packets++; 883 dev->trans_start = jiffies; 884 885 entries_left = txq->tx_ring_size - txq->tx_desc_count; 886 if (entries_left < MAX_SKB_FRAGS + 1) 887 netif_tx_stop_queue(nq); 888 } 889 890 return NETDEV_TX_OK; 891} 892 893 894/* tx napi ******************************************************************/ 895static void txq_kick(struct tx_queue *txq) 896{ 897 struct mv643xx_eth_private *mp = txq_to_mp(txq); 898 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 899 u32 hw_desc_ptr; 900 u32 expected_ptr; 901 902 __netif_tx_lock(nq, smp_processor_id()); 903 904 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) 905 goto out; 906 907 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index)); 908 expected_ptr = (u32)txq->tx_desc_dma + 909 txq->tx_curr_desc * sizeof(struct tx_desc); 910 911 if (hw_desc_ptr != expected_ptr) 912 txq_enable(txq); 913 914out: 915 __netif_tx_unlock(nq); 916 917 mp->work_tx_end &= ~(1 << txq->index); 918} 919 920static int txq_reclaim(struct tx_queue *txq, int budget, int force) 921{ 922 struct mv643xx_eth_private *mp = txq_to_mp(txq); 923 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 924 int reclaimed; 925 926 __netif_tx_lock(nq, smp_processor_id()); 927 928 reclaimed = 0; 929 while (reclaimed < budget && txq->tx_desc_count > 0) { 930 int tx_index; 931 struct tx_desc *desc; 932 u32 cmd_sts; 933 struct sk_buff *skb; 934 935 tx_index = txq->tx_used_desc; 936 desc = &txq->tx_desc_area[tx_index]; 937 cmd_sts = desc->cmd_sts; 938 939 if (cmd_sts & BUFFER_OWNED_BY_DMA) { 940 if (!force) 941 break; 942 desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; 943 } 944 945 txq->tx_used_desc = tx_index + 1; 946 if (txq->tx_used_desc == txq->tx_ring_size) 947 txq->tx_used_desc = 0; 948 949 reclaimed++; 950 txq->tx_desc_count--; 951 952 skb = NULL; 953 if (cmd_sts & TX_LAST_DESC) 954 skb = __skb_dequeue(&txq->tx_skb); 955 956 if (cmd_sts & ERROR_SUMMARY) { 957 dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n"); 958 mp->dev->stats.tx_errors++; 959 } 960 961 if (cmd_sts & TX_FIRST_DESC) { 962 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, 963 desc->byte_cnt, DMA_TO_DEVICE); 964 } else { 965 dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr, 966 desc->byte_cnt, DMA_TO_DEVICE); 967 } 968 969 if (skb != NULL) { 970 if (skb_queue_len(&mp->rx_recycle) < 971 mp->rx_ring_size && 972 skb_recycle_check(skb, mp->skb_size)) 973 __skb_queue_head(&mp->rx_recycle, skb); 974 else 975 dev_kfree_skb(skb); 976 } 977 } 978 979 __netif_tx_unlock(nq); 980 981 if (reclaimed < budget) 982 mp->work_tx &= ~(1 << txq->index); 983 984 return reclaimed; 985} 986 987 988/* tx rate control **********************************************************/ 989/* 990 * Set total maximum TX rate (shared by all TX queues for this port) 991 * to 'rate' bits per second, with a maximum burst of 'burst' bytes. 992 */ 993static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst) 994{ 995 int token_rate; 996 int mtu; 997 int bucket_size; 998 999 token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); 1000 if (token_rate > 1023) 1001 token_rate = 1023; 1002 1003 mtu = (mp->dev->mtu + 255) >> 8; 1004 if (mtu > 63) 1005 mtu = 63; 1006 1007 bucket_size = (burst + 255) >> 8; 1008 if (bucket_size > 65535) 1009 bucket_size = 65535; 1010 1011 switch (mp->shared->tx_bw_control) { 1012 case TX_BW_CONTROL_OLD_LAYOUT: 1013 wrlp(mp, TX_BW_RATE, token_rate); 1014 wrlp(mp, TX_BW_MTU, mtu); 1015 wrlp(mp, TX_BW_BURST, bucket_size); 1016 break; 1017 case TX_BW_CONTROL_NEW_LAYOUT: 1018 wrlp(mp, TX_BW_RATE_MOVED, token_rate); 1019 wrlp(mp, TX_BW_MTU_MOVED, mtu); 1020 wrlp(mp, TX_BW_BURST_MOVED, bucket_size); 1021 break; 1022 } 1023} 1024 1025static void txq_set_rate(struct tx_queue *txq, int rate, int burst) 1026{ 1027 struct mv643xx_eth_private *mp = txq_to_mp(txq); 1028 int token_rate; 1029 int bucket_size; 1030 1031 token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); 1032 if (token_rate > 1023) 1033 token_rate = 1023; 1034 1035 bucket_size = (burst + 255) >> 8; 1036 if (bucket_size > 65535) 1037 bucket_size = 65535; 1038 1039 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14); 1040 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate); 1041} 1042 1043static void txq_set_fixed_prio_mode(struct tx_queue *txq) 1044{ 1045 struct mv643xx_eth_private *mp = txq_to_mp(txq); 1046 int off; 1047 u32 val; 1048 1049 /* 1050 * Turn on fixed priority mode. 1051 */ 1052 off = 0; 1053 switch (mp->shared->tx_bw_control) { 1054 case TX_BW_CONTROL_OLD_LAYOUT: 1055 off = TXQ_FIX_PRIO_CONF; 1056 break; 1057 case TX_BW_CONTROL_NEW_LAYOUT: 1058 off = TXQ_FIX_PRIO_CONF_MOVED; 1059 break; 1060 } 1061 1062 if (off) { 1063 val = rdlp(mp, off); 1064 val |= 1 << txq->index; 1065 wrlp(mp, off, val); 1066 } 1067} 1068 1069static void txq_set_wrr(struct tx_queue *txq, int weight) 1070{ 1071 struct mv643xx_eth_private *mp = txq_to_mp(txq); 1072 int off; 1073 u32 val; 1074 1075 /* 1076 * Turn off fixed priority mode. 1077 */ 1078 off = 0; 1079 switch (mp->shared->tx_bw_control) { 1080 case TX_BW_CONTROL_OLD_LAYOUT: 1081 off = TXQ_FIX_PRIO_CONF; 1082 break; 1083 case TX_BW_CONTROL_NEW_LAYOUT: 1084 off = TXQ_FIX_PRIO_CONF_MOVED; 1085 break; 1086 } 1087 1088 if (off) { 1089 val = rdlp(mp, off); 1090 val &= ~(1 << txq->index); 1091 wrlp(mp, off, val); 1092 1093 /* 1094 * Configure WRR weight for this queue. 1095 */ 1096 1097 val = rdlp(mp, off); 1098 val = (val & ~0xff) | (weight & 0xff); 1099 wrlp(mp, TXQ_BW_WRR_CONF(txq->index), val); 1100 } 1101} 1102 1103 1104/* mii management interface *************************************************/ 1105static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id) 1106{ 1107 struct mv643xx_eth_shared_private *msp = dev_id; 1108 1109 if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) { 1110 writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE); 1111 wake_up(&msp->smi_busy_wait); 1112 return IRQ_HANDLED; 1113 } 1114 1115 return IRQ_NONE; 1116} 1117 1118static int smi_is_done(struct mv643xx_eth_shared_private *msp) 1119{ 1120 return !(readl(msp->base + SMI_REG) & SMI_BUSY); 1121} 1122 1123static int smi_wait_ready(struct mv643xx_eth_shared_private *msp) 1124{ 1125 if (msp->err_interrupt == NO_IRQ) { 1126 int i; 1127 1128 for (i = 0; !smi_is_done(msp); i++) { 1129 if (i == 10) 1130 return -ETIMEDOUT; 1131 msleep(10); 1132 } 1133 1134 return 0; 1135 } 1136 1137 if (!smi_is_done(msp)) { 1138 wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp), 1139 msecs_to_jiffies(100)); 1140 if (!smi_is_done(msp)) 1141 return -ETIMEDOUT; 1142 } 1143 1144 return 0; 1145} 1146 1147static int smi_bus_read(struct mii_bus *bus, int addr, int reg) 1148{ 1149 struct mv643xx_eth_shared_private *msp = bus->priv; 1150 void __iomem *smi_reg = msp->base + SMI_REG; 1151 int ret; 1152 1153 if (smi_wait_ready(msp)) { 1154 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1155 return -ETIMEDOUT; 1156 } 1157 1158 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg); 1159 1160 if (smi_wait_ready(msp)) { 1161 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1162 return -ETIMEDOUT; 1163 } 1164 1165 ret = readl(smi_reg); 1166 if (!(ret & SMI_READ_VALID)) { 1167 printk(KERN_WARNING "mv643xx_eth: SMI bus read not valid\n"); 1168 return -ENODEV; 1169 } 1170 1171 return ret & 0xffff; 1172} 1173 1174static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val) 1175{ 1176 struct mv643xx_eth_shared_private *msp = bus->priv; 1177 void __iomem *smi_reg = msp->base + SMI_REG; 1178 1179 if (smi_wait_ready(msp)) { 1180 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1181 return -ETIMEDOUT; 1182 } 1183 1184 writel(SMI_OPCODE_WRITE | (reg << 21) | 1185 (addr << 16) | (val & 0xffff), smi_reg); 1186 1187 if (smi_wait_ready(msp)) { 1188 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1189 return -ETIMEDOUT; 1190 } 1191 1192 return 0; 1193} 1194 1195 1196/* statistics ***************************************************************/ 1197static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) 1198{ 1199 struct mv643xx_eth_private *mp = netdev_priv(dev); 1200 struct net_device_stats *stats = &dev->stats; 1201 unsigned long tx_packets = 0; 1202 unsigned long tx_bytes = 0; 1203 unsigned long tx_dropped = 0; 1204 int i; 1205 1206 for (i = 0; i < mp->txq_count; i++) { 1207 struct tx_queue *txq = mp->txq + i; 1208 1209 tx_packets += txq->tx_packets; 1210 tx_bytes += txq->tx_bytes; 1211 tx_dropped += txq->tx_dropped; 1212 } 1213 1214 stats->tx_packets = tx_packets; 1215 stats->tx_bytes = tx_bytes; 1216 stats->tx_dropped = tx_dropped; 1217 1218 return stats; 1219} 1220 1221static void mv643xx_eth_grab_lro_stats(struct mv643xx_eth_private *mp) 1222{ 1223 u32 lro_aggregated = 0; 1224 u32 lro_flushed = 0; 1225 u32 lro_no_desc = 0; 1226 int i; 1227 1228 for (i = 0; i < mp->rxq_count; i++) { 1229 struct rx_queue *rxq = mp->rxq + i; 1230 1231 lro_aggregated += rxq->lro_mgr.stats.aggregated; 1232 lro_flushed += rxq->lro_mgr.stats.flushed; 1233 lro_no_desc += rxq->lro_mgr.stats.no_desc; 1234 } 1235 1236 mp->lro_counters.lro_aggregated = lro_aggregated; 1237 mp->lro_counters.lro_flushed = lro_flushed; 1238 mp->lro_counters.lro_no_desc = lro_no_desc; 1239} 1240 1241static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) 1242{ 1243 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); 1244} 1245 1246static void mib_counters_clear(struct mv643xx_eth_private *mp) 1247{ 1248 int i; 1249 1250 for (i = 0; i < 0x80; i += 4) 1251 mib_read(mp, i); 1252} 1253 1254static void mib_counters_update(struct mv643xx_eth_private *mp) 1255{ 1256 struct mib_counters *p = &mp->mib_counters; 1257 1258 spin_lock_bh(&mp->mib_counters_lock); 1259 p->good_octets_received += mib_read(mp, 0x00); 1260 p->bad_octets_received += mib_read(mp, 0x08); 1261 p->internal_mac_transmit_err += mib_read(mp, 0x0c); 1262 p->good_frames_received += mib_read(mp, 0x10); 1263 p->bad_frames_received += mib_read(mp, 0x14); 1264 p->broadcast_frames_received += mib_read(mp, 0x18); 1265 p->multicast_frames_received += mib_read(mp, 0x1c); 1266 p->frames_64_octets += mib_read(mp, 0x20); 1267 p->frames_65_to_127_octets += mib_read(mp, 0x24); 1268 p->frames_128_to_255_octets += mib_read(mp, 0x28); 1269 p->frames_256_to_511_octets += mib_read(mp, 0x2c); 1270 p->frames_512_to_1023_octets += mib_read(mp, 0x30); 1271 p->frames_1024_to_max_octets += mib_read(mp, 0x34); 1272 p->good_octets_sent += mib_read(mp, 0x38); 1273 p->good_frames_sent += mib_read(mp, 0x40); 1274 p->excessive_collision += mib_read(mp, 0x44); 1275 p->multicast_frames_sent += mib_read(mp, 0x48); 1276 p->broadcast_frames_sent += mib_read(mp, 0x4c); 1277 p->unrec_mac_control_received += mib_read(mp, 0x50); 1278 p->fc_sent += mib_read(mp, 0x54); 1279 p->good_fc_received += mib_read(mp, 0x58); 1280 p->bad_fc_received += mib_read(mp, 0x5c); 1281 p->undersize_received += mib_read(mp, 0x60); 1282 p->fragments_received += mib_read(mp, 0x64); 1283 p->oversize_received += mib_read(mp, 0x68); 1284 p->jabber_received += mib_read(mp, 0x6c); 1285 p->mac_receive_error += mib_read(mp, 0x70); 1286 p->bad_crc_event += mib_read(mp, 0x74); 1287 p->collision += mib_read(mp, 0x78); 1288 p->late_collision += mib_read(mp, 0x7c); 1289 spin_unlock_bh(&mp->mib_counters_lock); 1290 1291 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); 1292} 1293 1294static void mib_counters_timer_wrapper(unsigned long _mp) 1295{ 1296 struct mv643xx_eth_private *mp = (void *)_mp; 1297 1298 mib_counters_update(mp); 1299} 1300 1301 1302/* interrupt coalescing *****************************************************/ 1303/* 1304 * Hardware coalescing parameters are set in units of 64 t_clk 1305 * cycles. I.e.: 1306 * 1307 * coal_delay_in_usec = 64000000 * register_value / t_clk_rate 1308 * 1309 * register_value = coal_delay_in_usec * t_clk_rate / 64000000 1310 * 1311 * In the ->set*() methods, we round the computed register value 1312 * to the nearest integer. 1313 */ 1314static unsigned int get_rx_coal(struct mv643xx_eth_private *mp) 1315{ 1316 u32 val = rdlp(mp, SDMA_CONFIG); 1317 u64 temp; 1318 1319 if (mp->shared->extended_rx_coal_limit) 1320 temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7); 1321 else 1322 temp = (val & 0x003fff00) >> 8; 1323 1324 temp *= 64000000; 1325 do_div(temp, mp->shared->t_clk); 1326 1327 return (unsigned int)temp; 1328} 1329 1330static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec) 1331{ 1332 u64 temp; 1333 u32 val; 1334 1335 temp = (u64)usec * mp->shared->t_clk; 1336 temp += 31999999; 1337 do_div(temp, 64000000); 1338 1339 val = rdlp(mp, SDMA_CONFIG); 1340 if (mp->shared->extended_rx_coal_limit) { 1341 if (temp > 0xffff) 1342 temp = 0xffff; 1343 val &= ~0x023fff80; 1344 val |= (temp & 0x8000) << 10; 1345 val |= (temp & 0x7fff) << 7; 1346 } else { 1347 if (temp > 0x3fff) 1348 temp = 0x3fff; 1349 val &= ~0x003fff00; 1350 val |= (temp & 0x3fff) << 8; 1351 } 1352 wrlp(mp, SDMA_CONFIG, val); 1353} 1354 1355static unsigned int get_tx_coal(struct mv643xx_eth_private *mp) 1356{ 1357 u64 temp; 1358 1359 temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; 1360 temp *= 64000000; 1361 do_div(temp, mp->shared->t_clk); 1362 1363 return (unsigned int)temp; 1364} 1365 1366static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec) 1367{ 1368 u64 temp; 1369 1370 temp = (u64)usec * mp->shared->t_clk; 1371 temp += 31999999; 1372 do_div(temp, 64000000); 1373 1374 if (temp > 0x3fff) 1375 temp = 0x3fff; 1376 1377 wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4); 1378} 1379 1380 1381/* ethtool ******************************************************************/ 1382struct mv643xx_eth_stats { 1383 char stat_string[ETH_GSTRING_LEN]; 1384 int sizeof_stat; 1385 int netdev_off; 1386 int mp_off; 1387}; 1388 1389#define SSTAT(m) \ 1390 { #m, FIELD_SIZEOF(struct net_device_stats, m), \ 1391 offsetof(struct net_device, stats.m), -1 } 1392 1393#define MIBSTAT(m) \ 1394 { #m, FIELD_SIZEOF(struct mib_counters, m), \ 1395 -1, offsetof(struct mv643xx_eth_private, mib_counters.m) } 1396 1397#define LROSTAT(m) \ 1398 { #m, FIELD_SIZEOF(struct lro_counters, m), \ 1399 -1, offsetof(struct mv643xx_eth_private, lro_counters.m) } 1400 1401static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { 1402 SSTAT(rx_packets), 1403 SSTAT(tx_packets), 1404 SSTAT(rx_bytes), 1405 SSTAT(tx_bytes), 1406 SSTAT(rx_errors), 1407 SSTAT(tx_errors), 1408 SSTAT(rx_dropped), 1409 SSTAT(tx_dropped), 1410 MIBSTAT(good_octets_received), 1411 MIBSTAT(bad_octets_received), 1412 MIBSTAT(internal_mac_transmit_err), 1413 MIBSTAT(good_frames_received), 1414 MIBSTAT(bad_frames_received), 1415 MIBSTAT(broadcast_frames_received), 1416 MIBSTAT(multicast_frames_received), 1417 MIBSTAT(frames_64_octets), 1418 MIBSTAT(frames_65_to_127_octets), 1419 MIBSTAT(frames_128_to_255_octets), 1420 MIBSTAT(frames_256_to_511_octets), 1421 MIBSTAT(frames_512_to_1023_octets), 1422 MIBSTAT(frames_1024_to_max_octets), 1423 MIBSTAT(good_octets_sent), 1424 MIBSTAT(good_frames_sent), 1425 MIBSTAT(excessive_collision), 1426 MIBSTAT(multicast_frames_sent), 1427 MIBSTAT(broadcast_frames_sent), 1428 MIBSTAT(unrec_mac_control_received), 1429 MIBSTAT(fc_sent), 1430 MIBSTAT(good_fc_received), 1431 MIBSTAT(bad_fc_received), 1432 MIBSTAT(undersize_received), 1433 MIBSTAT(fragments_received), 1434 MIBSTAT(oversize_received), 1435 MIBSTAT(jabber_received), 1436 MIBSTAT(mac_receive_error), 1437 MIBSTAT(bad_crc_event), 1438 MIBSTAT(collision), 1439 MIBSTAT(late_collision), 1440 LROSTAT(lro_aggregated), 1441 LROSTAT(lro_flushed), 1442 LROSTAT(lro_no_desc), 1443}; 1444 1445static int 1446mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp, 1447 struct ethtool_cmd *cmd) 1448{ 1449 int err; 1450 1451 err = phy_read_status(mp->phy); 1452 if (err == 0) 1453 err = phy_ethtool_gset(mp->phy, cmd); 1454 1455 /* 1456 * The MAC does not support 1000baseT_Half. 1457 */ 1458 cmd->supported &= ~SUPPORTED_1000baseT_Half; 1459 cmd->advertising &= ~ADVERTISED_1000baseT_Half; 1460 1461 return err; 1462} 1463 1464static int 1465mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp, 1466 struct ethtool_cmd *cmd) 1467{ 1468 u32 port_status; 1469 1470 port_status = rdlp(mp, PORT_STATUS); 1471 1472 cmd->supported = SUPPORTED_MII; 1473 cmd->advertising = ADVERTISED_MII; 1474 switch (port_status & PORT_SPEED_MASK) { 1475 case PORT_SPEED_10: 1476 cmd->speed = SPEED_10; 1477 break; 1478 case PORT_SPEED_100: 1479 cmd->speed = SPEED_100; 1480 break; 1481 case PORT_SPEED_1000: 1482 cmd->speed = SPEED_1000; 1483 break; 1484 default: 1485 cmd->speed = -1; 1486 break; 1487 } 1488 cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; 1489 cmd->port = PORT_MII; 1490 cmd->phy_address = 0; 1491 cmd->transceiver = XCVR_INTERNAL; 1492 cmd->autoneg = AUTONEG_DISABLE; 1493 cmd->maxtxpkt = 1; 1494 cmd->maxrxpkt = 1; 1495 1496 return 0; 1497} 1498 1499static int 1500mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1501{ 1502 struct mv643xx_eth_private *mp = netdev_priv(dev); 1503 1504 if (mp->phy != NULL) 1505 return mv643xx_eth_get_settings_phy(mp, cmd); 1506 else 1507 return mv643xx_eth_get_settings_phyless(mp, cmd); 1508} 1509 1510static int 1511mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1512{ 1513 struct mv643xx_eth_private *mp = netdev_priv(dev); 1514 1515 if (mp->phy == NULL) 1516 return -EINVAL; 1517 1518 /* 1519 * The MAC does not support 1000baseT_Half. 1520 */ 1521 cmd->advertising &= ~ADVERTISED_1000baseT_Half; 1522 1523 return phy_ethtool_sset(mp->phy, cmd); 1524} 1525 1526static void mv643xx_eth_get_drvinfo(struct net_device *dev, 1527 struct ethtool_drvinfo *drvinfo) 1528{ 1529 strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32); 1530 strncpy(drvinfo->version, mv643xx_eth_driver_version, 32); 1531 strncpy(drvinfo->fw_version, "N/A", 32); 1532 strncpy(drvinfo->bus_info, "platform", 32); 1533 drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats); 1534} 1535 1536static int mv643xx_eth_nway_reset(struct net_device *dev) 1537{ 1538 struct mv643xx_eth_private *mp = netdev_priv(dev); 1539 1540 if (mp->phy == NULL) 1541 return -EINVAL; 1542 1543 return genphy_restart_aneg(mp->phy); 1544} 1545 1546static u32 mv643xx_eth_get_link(struct net_device *dev) 1547{ 1548 return !!netif_carrier_ok(dev); 1549} 1550 1551static int 1552mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 1553{ 1554 struct mv643xx_eth_private *mp = netdev_priv(dev); 1555 1556 ec->rx_coalesce_usecs = get_rx_coal(mp); 1557 ec->tx_coalesce_usecs = get_tx_coal(mp); 1558 1559 return 0; 1560} 1561 1562static int 1563mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 1564{ 1565 struct mv643xx_eth_private *mp = netdev_priv(dev); 1566 1567 set_rx_coal(mp, ec->rx_coalesce_usecs); 1568 set_tx_coal(mp, ec->tx_coalesce_usecs); 1569 1570 return 0; 1571} 1572 1573static void 1574mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er) 1575{ 1576 struct mv643xx_eth_private *mp = netdev_priv(dev); 1577 1578 er->rx_max_pending = 4096; 1579 er->tx_max_pending = 4096; 1580 er->rx_mini_max_pending = 0; 1581 er->rx_jumbo_max_pending = 0; 1582 1583 er->rx_pending = mp->rx_ring_size; 1584 er->tx_pending = mp->tx_ring_size; 1585 er->rx_mini_pending = 0; 1586 er->rx_jumbo_pending = 0; 1587} 1588 1589static int 1590mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er) 1591{ 1592 struct mv643xx_eth_private *mp = netdev_priv(dev); 1593 1594 if (er->rx_mini_pending || er->rx_jumbo_pending) 1595 return -EINVAL; 1596 1597 mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096; 1598 mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096; 1599 1600 if (netif_running(dev)) { 1601 mv643xx_eth_stop(dev); 1602 if (mv643xx_eth_open(dev)) { 1603 dev_printk(KERN_ERR, &dev->dev, 1604 "fatal error on re-opening device after " 1605 "ring param change\n"); 1606 return -ENOMEM; 1607 } 1608 } 1609 1610 return 0; 1611} 1612 1613static u32 1614mv643xx_eth_get_rx_csum(struct net_device *dev) 1615{ 1616 struct mv643xx_eth_private *mp = netdev_priv(dev); 1617 1618 return !!(rdlp(mp, PORT_CONFIG) & 0x02000000); 1619} 1620 1621static int 1622mv643xx_eth_set_rx_csum(struct net_device *dev, u32 rx_csum) 1623{ 1624 struct mv643xx_eth_private *mp = netdev_priv(dev); 1625 1626 wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000); 1627 1628 return 0; 1629} 1630 1631static void mv643xx_eth_get_strings(struct net_device *dev, 1632 uint32_t stringset, uint8_t *data) 1633{ 1634 int i; 1635 1636 if (stringset == ETH_SS_STATS) { 1637 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { 1638 memcpy(data + i * ETH_GSTRING_LEN, 1639 mv643xx_eth_stats[i].stat_string, 1640 ETH_GSTRING_LEN); 1641 } 1642 } 1643} 1644 1645static void mv643xx_eth_get_ethtool_stats(struct net_device *dev, 1646 struct ethtool_stats *stats, 1647 uint64_t *data) 1648{ 1649 struct mv643xx_eth_private *mp = netdev_priv(dev); 1650 int i; 1651 1652 mv643xx_eth_get_stats(dev); 1653 mib_counters_update(mp); 1654 mv643xx_eth_grab_lro_stats(mp); 1655 1656 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { 1657 const struct mv643xx_eth_stats *stat; 1658 void *p; 1659 1660 stat = mv643xx_eth_stats + i; 1661 1662 if (stat->netdev_off >= 0) 1663 p = ((void *)mp->dev) + stat->netdev_off; 1664 else 1665 p = ((void *)mp) + stat->mp_off; 1666 1667 data[i] = (stat->sizeof_stat == 8) ? 1668 *(uint64_t *)p : *(uint32_t *)p; 1669 } 1670} 1671 1672static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset) 1673{ 1674 if (sset == ETH_SS_STATS) 1675 return ARRAY_SIZE(mv643xx_eth_stats); 1676 1677 return -EOPNOTSUPP; 1678} 1679 1680static const struct ethtool_ops mv643xx_eth_ethtool_ops = { 1681 .get_settings = mv643xx_eth_get_settings, 1682 .set_settings = mv643xx_eth_set_settings, 1683 .get_drvinfo = mv643xx_eth_get_drvinfo, 1684 .nway_reset = mv643xx_eth_nway_reset, 1685 .get_link = mv643xx_eth_get_link, 1686 .get_coalesce = mv643xx_eth_get_coalesce, 1687 .set_coalesce = mv643xx_eth_set_coalesce, 1688 .get_ringparam = mv643xx_eth_get_ringparam, 1689 .set_ringparam = mv643xx_eth_set_ringparam, 1690 .get_rx_csum = mv643xx_eth_get_rx_csum, 1691 .set_rx_csum = mv643xx_eth_set_rx_csum, 1692 .set_tx_csum = ethtool_op_set_tx_csum, 1693 .set_sg = ethtool_op_set_sg, 1694 .get_strings = mv643xx_eth_get_strings, 1695 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, 1696 .get_flags = ethtool_op_get_flags, 1697 .set_flags = ethtool_op_set_flags, 1698 .get_sset_count = mv643xx_eth_get_sset_count, 1699}; 1700 1701 1702/* address handling *********************************************************/ 1703static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr) 1704{ 1705 unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH); 1706 unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW); 1707 1708 addr[0] = (mac_h >> 24) & 0xff; 1709 addr[1] = (mac_h >> 16) & 0xff; 1710 addr[2] = (mac_h >> 8) & 0xff; 1711 addr[3] = mac_h & 0xff; 1712 addr[4] = (mac_l >> 8) & 0xff; 1713 addr[5] = mac_l & 0xff; 1714} 1715 1716static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr) 1717{ 1718 wrlp(mp, MAC_ADDR_HIGH, 1719 (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]); 1720 wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]); 1721} 1722 1723static u32 uc_addr_filter_mask(struct net_device *dev) 1724{ 1725 struct netdev_hw_addr *ha; 1726 u32 nibbles; 1727 1728 if (dev->flags & IFF_PROMISC) 1729 return 0; 1730 1731 nibbles = 1 << (dev->dev_addr[5] & 0x0f); 1732 list_for_each_entry(ha, &dev->uc.list, list) { 1733 if (memcmp(dev->dev_addr, ha->addr, 5)) 1734 return 0; 1735 if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0) 1736 return 0; 1737 1738 nibbles |= 1 << (ha->addr[5] & 0x0f); 1739 } 1740 1741 return nibbles; 1742} 1743 1744static void mv643xx_eth_program_unicast_filter(struct net_device *dev) 1745{ 1746 struct mv643xx_eth_private *mp = netdev_priv(dev); 1747 u32 port_config; 1748 u32 nibbles; 1749 int i; 1750 1751 uc_addr_set(mp, dev->dev_addr); 1752 1753 port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE; 1754 1755 nibbles = uc_addr_filter_mask(dev); 1756 if (!nibbles) { 1757 port_config |= UNICAST_PROMISCUOUS_MODE; 1758 nibbles = 0xffff; 1759 } 1760 1761 for (i = 0; i < 16; i += 4) { 1762 int off = UNICAST_TABLE(mp->port_num) + i; 1763 u32 v; 1764 1765 v = 0; 1766 if (nibbles & 1) 1767 v |= 0x00000001; 1768 if (nibbles & 2) 1769 v |= 0x00000100; 1770 if (nibbles & 4) 1771 v |= 0x00010000; 1772 if (nibbles & 8) 1773 v |= 0x01000000; 1774 nibbles >>= 4; 1775 1776 wrl(mp, off, v); 1777 } 1778 1779 wrlp(mp, PORT_CONFIG, port_config); 1780} 1781 1782static int addr_crc(unsigned char *addr) 1783{ 1784 int crc = 0; 1785 int i; 1786 1787 for (i = 0; i < 6; i++) { 1788 int j; 1789 1790 crc = (crc ^ addr[i]) << 8; 1791 for (j = 7; j >= 0; j--) { 1792 if (crc & (0x100 << j)) 1793 crc ^= 0x107 << j; 1794 } 1795 } 1796 1797 return crc; 1798} 1799 1800static void mv643xx_eth_program_multicast_filter(struct net_device *dev) 1801{ 1802 struct mv643xx_eth_private *mp = netdev_priv(dev); 1803 u32 *mc_spec; 1804 u32 *mc_other; 1805 struct dev_addr_list *addr; 1806 int i; 1807 1808 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1809 int port_num; 1810 u32 accept; 1811 1812oom: 1813 port_num = mp->port_num; 1814 accept = 0x01010101; 1815 for (i = 0; i < 0x100; i += 4) { 1816 wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept); 1817 wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept); 1818 } 1819 return; 1820 } 1821 1822 mc_spec = kmalloc(0x200, GFP_ATOMIC); 1823 if (mc_spec == NULL) 1824 goto oom; 1825 mc_other = mc_spec + (0x100 >> 2); 1826 1827 memset(mc_spec, 0, 0x100); 1828 memset(mc_other, 0, 0x100); 1829 1830 for (addr = dev->mc_list; addr != NULL; addr = addr->next) { 1831 u8 *a = addr->da_addr; 1832 u32 *table; 1833 int entry; 1834 1835 if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) { 1836 table = mc_spec; 1837 entry = a[5]; 1838 } else { 1839 table = mc_other; 1840 entry = addr_crc(a); 1841 } 1842 1843 table[entry >> 2] |= 1 << (8 * (entry & 3)); 1844 } 1845 1846 for (i = 0; i < 0x100; i += 4) { 1847 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]); 1848 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]); 1849 } 1850 1851 kfree(mc_spec); 1852} 1853 1854static void mv643xx_eth_set_rx_mode(struct net_device *dev) 1855{ 1856 mv643xx_eth_program_unicast_filter(dev); 1857 mv643xx_eth_program_multicast_filter(dev); 1858} 1859 1860static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) 1861{ 1862 struct sockaddr *sa = addr; 1863 1864 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); 1865 1866 netif_addr_lock_bh(dev); 1867 mv643xx_eth_program_unicast_filter(dev); 1868 netif_addr_unlock_bh(dev); 1869 1870 return 0; 1871} 1872 1873 1874/* rx/tx queue initialisation ***********************************************/ 1875static int rxq_init(struct mv643xx_eth_private *mp, int index) 1876{ 1877 struct rx_queue *rxq = mp->rxq + index; 1878 struct rx_desc *rx_desc; 1879 int size; 1880 int i; 1881 1882 rxq->index = index; 1883 1884 rxq->rx_ring_size = mp->rx_ring_size; 1885 1886 rxq->rx_desc_count = 0; 1887 rxq->rx_curr_desc = 0; 1888 rxq->rx_used_desc = 0; 1889 1890 size = rxq->rx_ring_size * sizeof(struct rx_desc); 1891 1892 if (index == 0 && size <= mp->rx_desc_sram_size) { 1893 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, 1894 mp->rx_desc_sram_size); 1895 rxq->rx_desc_dma = mp->rx_desc_sram_addr; 1896 } else { 1897 rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, 1898 size, &rxq->rx_desc_dma, 1899 GFP_KERNEL); 1900 } 1901 1902 if (rxq->rx_desc_area == NULL) { 1903 dev_printk(KERN_ERR, &mp->dev->dev, 1904 "can't allocate rx ring (%d bytes)\n", size); 1905 goto out; 1906 } 1907 memset(rxq->rx_desc_area, 0, size); 1908 1909 rxq->rx_desc_area_size = size; 1910 rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb), 1911 GFP_KERNEL); 1912 if (rxq->rx_skb == NULL) { 1913 dev_printk(KERN_ERR, &mp->dev->dev, 1914 "can't allocate rx skb ring\n"); 1915 goto out_free; 1916 } 1917 1918 rx_desc = (struct rx_desc *)rxq->rx_desc_area; 1919 for (i = 0; i < rxq->rx_ring_size; i++) { 1920 int nexti; 1921 1922 nexti = i + 1; 1923 if (nexti == rxq->rx_ring_size) 1924 nexti = 0; 1925 1926 rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + 1927 nexti * sizeof(struct rx_desc); 1928 } 1929 1930 rxq->lro_mgr.dev = mp->dev; 1931 memset(&rxq->lro_mgr.stats, 0, sizeof(rxq->lro_mgr.stats)); 1932 rxq->lro_mgr.features = LRO_F_NAPI; 1933 rxq->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; 1934 rxq->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; 1935 rxq->lro_mgr.max_desc = ARRAY_SIZE(rxq->lro_arr); 1936 rxq->lro_mgr.max_aggr = 32; 1937 rxq->lro_mgr.frag_align_pad = 0; 1938 rxq->lro_mgr.lro_arr = rxq->lro_arr; 1939 rxq->lro_mgr.get_skb_header = mv643xx_get_skb_header; 1940 1941 memset(&rxq->lro_arr, 0, sizeof(rxq->lro_arr)); 1942 1943 return 0; 1944 1945 1946out_free: 1947 if (index == 0 && size <= mp->rx_desc_sram_size) 1948 iounmap(rxq->rx_desc_area); 1949 else 1950 dma_free_coherent(mp->dev->dev.parent, size, 1951 rxq->rx_desc_area, 1952 rxq->rx_desc_dma); 1953 1954out: 1955 return -ENOMEM; 1956} 1957 1958static void rxq_deinit(struct rx_queue *rxq) 1959{ 1960 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 1961 int i; 1962 1963 rxq_disable(rxq); 1964 1965 for (i = 0; i < rxq->rx_ring_size; i++) { 1966 if (rxq->rx_skb[i]) { 1967 dev_kfree_skb(rxq->rx_skb[i]); 1968 rxq->rx_desc_count--; 1969 } 1970 } 1971 1972 if (rxq->rx_desc_count) { 1973 dev_printk(KERN_ERR, &mp->dev->dev, 1974 "error freeing rx ring -- %d skbs stuck\n", 1975 rxq->rx_desc_count); 1976 } 1977 1978 if (rxq->index == 0 && 1979 rxq->rx_desc_area_size <= mp->rx_desc_sram_size) 1980 iounmap(rxq->rx_desc_area); 1981 else 1982 dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size, 1983 rxq->rx_desc_area, rxq->rx_desc_dma); 1984 1985 kfree(rxq->rx_skb); 1986} 1987 1988static int txq_init(struct mv643xx_eth_private *mp, int index) 1989{ 1990 struct tx_queue *txq = mp->txq + index; 1991 struct tx_desc *tx_desc; 1992 int size; 1993 int i; 1994 1995 txq->index = index; 1996 1997 txq->tx_ring_size = mp->tx_ring_size; 1998 1999 txq->tx_desc_count = 0; 2000 txq->tx_curr_desc = 0; 2001 txq->tx_used_desc = 0; 2002 2003 size = txq->tx_ring_size * sizeof(struct tx_desc); 2004 2005 if (index == 0 && size <= mp->tx_desc_sram_size) { 2006 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, 2007 mp->tx_desc_sram_size); 2008 txq->tx_desc_dma = mp->tx_desc_sram_addr; 2009 } else { 2010 txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, 2011 size, &txq->tx_desc_dma, 2012 GFP_KERNEL); 2013 } 2014 2015 if (txq->tx_desc_area == NULL) { 2016 dev_printk(KERN_ERR, &mp->dev->dev, 2017 "can't allocate tx ring (%d bytes)\n", size); 2018 return -ENOMEM; 2019 } 2020 memset(txq->tx_desc_area, 0, size); 2021 2022 txq->tx_desc_area_size = size; 2023 2024 tx_desc = (struct tx_desc *)txq->tx_desc_area; 2025 for (i = 0; i < txq->tx_ring_size; i++) { 2026 struct tx_desc *txd = tx_desc + i; 2027 int nexti; 2028 2029 nexti = i + 1; 2030 if (nexti == txq->tx_ring_size) 2031 nexti = 0; 2032 2033 txd->cmd_sts = 0; 2034 txd->next_desc_ptr = txq->tx_desc_dma + 2035 nexti * sizeof(struct tx_desc); 2036 } 2037 2038 skb_queue_head_init(&txq->tx_skb); 2039 2040 return 0; 2041} 2042 2043static void txq_deinit(struct tx_queue *txq) 2044{ 2045 struct mv643xx_eth_private *mp = txq_to_mp(txq); 2046 2047 txq_disable(txq); 2048 txq_reclaim(txq, txq->tx_ring_size, 1); 2049 2050 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); 2051 2052 if (txq->index == 0 && 2053 txq->tx_desc_area_size <= mp->tx_desc_sram_size) 2054 iounmap(txq->tx_desc_area); 2055 else 2056 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, 2057 txq->tx_desc_area, txq->tx_desc_dma); 2058} 2059 2060 2061/* netdev ops and related ***************************************************/ 2062static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp) 2063{ 2064 u32 int_cause; 2065 u32 int_cause_ext; 2066 2067 int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask; 2068 if (int_cause == 0) 2069 return 0; 2070 2071 int_cause_ext = 0; 2072 if (int_cause & INT_EXT) { 2073 int_cause &= ~INT_EXT; 2074 int_cause_ext = rdlp(mp, INT_CAUSE_EXT); 2075 } 2076 2077 if (int_cause) { 2078 wrlp(mp, INT_CAUSE, ~int_cause); 2079 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & 2080 ~(rdlp(mp, TXQ_COMMAND) & 0xff); 2081 mp->work_rx |= (int_cause & INT_RX) >> 2; 2082 } 2083 2084 int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX; 2085 if (int_cause_ext) { 2086 wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext); 2087 if (int_cause_ext & INT_EXT_LINK_PHY) 2088 mp->work_link = 1; 2089 mp->work_tx |= int_cause_ext & INT_EXT_TX; 2090 } 2091 2092 return 1; 2093} 2094 2095static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) 2096{ 2097 struct net_device *dev = (struct net_device *)dev_id; 2098 struct mv643xx_eth_private *mp = netdev_priv(dev); 2099 2100 if (unlikely(!mv643xx_eth_collect_events(mp))) 2101 return IRQ_NONE; 2102 2103 wrlp(mp, INT_MASK, 0); 2104 napi_schedule(&mp->napi); 2105 2106 return IRQ_HANDLED; 2107} 2108 2109static void handle_link_event(struct mv643xx_eth_private *mp) 2110{ 2111 struct net_device *dev = mp->dev; 2112 u32 port_status; 2113 int speed; 2114 int duplex; 2115 int fc; 2116 2117 port_status = rdlp(mp, PORT_STATUS); 2118 if (!(port_status & LINK_UP)) { 2119 if (netif_carrier_ok(dev)) { 2120 int i; 2121 2122 printk(KERN_INFO "%s: link down\n", dev->name); 2123 2124 netif_carrier_off(dev); 2125 2126 for (i = 0; i < mp->txq_count; i++) { 2127 struct tx_queue *txq = mp->txq + i; 2128 2129 txq_reclaim(txq, txq->tx_ring_size, 1); 2130 txq_reset_hw_ptr(txq); 2131 } 2132 } 2133 return; 2134 } 2135 2136 switch (port_status & PORT_SPEED_MASK) { 2137 case PORT_SPEED_10: 2138 speed = 10; 2139 break; 2140 case PORT_SPEED_100: 2141 speed = 100; 2142 break; 2143 case PORT_SPEED_1000: 2144 speed = 1000; 2145 break; 2146 default: 2147 speed = -1; 2148 break; 2149 } 2150 duplex = (port_status & FULL_DUPLEX) ? 1 : 0; 2151 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; 2152 2153 printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, " 2154 "flow control %sabled\n", dev->name, 2155 speed, duplex ? "full" : "half", 2156 fc ? "en" : "dis"); 2157 2158 if (!netif_carrier_ok(dev)) 2159 netif_carrier_on(dev); 2160} 2161 2162static int mv643xx_eth_poll(struct napi_struct *napi, int budget) 2163{ 2164 struct mv643xx_eth_private *mp; 2165 int work_done; 2166 2167 mp = container_of(napi, struct mv643xx_eth_private, napi); 2168 2169 if (unlikely(mp->oom)) { 2170 mp->oom = 0; 2171 del_timer(&mp->rx_oom); 2172 } 2173 2174 work_done = 0; 2175 while (work_done < budget) { 2176 u8 queue_mask; 2177 int queue; 2178 int work_tbd; 2179 2180 if (mp->work_link) { 2181 mp->work_link = 0; 2182 handle_link_event(mp); 2183 work_done++; 2184 continue; 2185 } 2186 2187 queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; 2188 if (likely(!mp->oom)) 2189 queue_mask |= mp->work_rx_refill; 2190 2191 if (!queue_mask) { 2192 if (mv643xx_eth_collect_events(mp)) 2193 continue; 2194 break; 2195 } 2196 2197 queue = fls(queue_mask) - 1; 2198 queue_mask = 1 << queue; 2199 2200 work_tbd = budget - work_done; 2201 if (work_tbd > 16) 2202 work_tbd = 16; 2203 2204 if (mp->work_tx_end & queue_mask) { 2205 txq_kick(mp->txq + queue); 2206 } else if (mp->work_tx & queue_mask) { 2207 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0); 2208 txq_maybe_wake(mp->txq + queue); 2209 } else if (mp->work_rx & queue_mask) { 2210 work_done += rxq_process(mp->rxq + queue, work_tbd); 2211 } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { 2212 work_done += rxq_refill(mp->rxq + queue, work_tbd); 2213 } else { 2214 BUG(); 2215 } 2216 } 2217 2218 if (work_done < budget) { 2219 if (mp->oom) 2220 mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); 2221 napi_complete(napi); 2222 wrlp(mp, INT_MASK, mp->int_mask); 2223 } 2224 2225 return work_done; 2226} 2227 2228static inline void oom_timer_wrapper(unsigned long data) 2229{ 2230 struct mv643xx_eth_private *mp = (void *)data; 2231 2232 napi_schedule(&mp->napi); 2233} 2234 2235static void phy_reset(struct mv643xx_eth_private *mp) 2236{ 2237 int data; 2238 2239 data = phy_read(mp->phy, MII_BMCR); 2240 if (data < 0) 2241 return; 2242 2243 data |= BMCR_RESET; 2244 if (phy_write(mp->phy, MII_BMCR, data) < 0) 2245 return; 2246 2247 do { 2248 data = phy_read(mp->phy, MII_BMCR); 2249 } while (data >= 0 && data & BMCR_RESET); 2250} 2251 2252static void port_start(struct mv643xx_eth_private *mp) 2253{ 2254 u32 pscr; 2255 int i; 2256 2257 /* 2258 * Perform PHY reset, if there is a PHY. 2259 */ 2260 if (mp->phy != NULL) { 2261 struct ethtool_cmd cmd; 2262 2263 mv643xx_eth_get_settings(mp->dev, &cmd); 2264 phy_reset(mp); 2265 mv643xx_eth_set_settings(mp->dev, &cmd); 2266 } 2267 2268 /* 2269 * Configure basic link parameters. 2270 */ 2271 pscr = rdlp(mp, PORT_SERIAL_CONTROL); 2272 2273 pscr |= SERIAL_PORT_ENABLE; 2274 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2275 2276 pscr |= DO_NOT_FORCE_LINK_FAIL; 2277 if (mp->phy == NULL) 2278 pscr |= FORCE_LINK_PASS; 2279 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2280 2281 /* 2282 * Configure TX path and queues. 2283 */ 2284 tx_set_rate(mp, 1000000000, 16777216); 2285 for (i = 0; i < mp->txq_count; i++) { 2286 struct tx_queue *txq = mp->txq + i; 2287 2288 txq_reset_hw_ptr(txq); 2289 txq_set_rate(txq, 1000000000, 16777216); 2290 txq_set_fixed_prio_mode(txq); 2291 } 2292 2293 /* 2294 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast 2295 * frames to RX queue #0, and include the pseudo-header when 2296 * calculating receive checksums. 2297 */ 2298 wrlp(mp, PORT_CONFIG, 0x02000000); 2299 2300 /* 2301 * Treat BPDUs as normal multicasts, and disable partition mode. 2302 */ 2303 wrlp(mp, PORT_CONFIG_EXT, 0x00000000); 2304 2305 /* 2306 * Add configured unicast addresses to address filter table. 2307 */ 2308 mv643xx_eth_program_unicast_filter(mp->dev); 2309 2310 /* 2311 * Enable the receive queues. 2312 */ 2313 for (i = 0; i < mp->rxq_count; i++) { 2314 struct rx_queue *rxq = mp->rxq + i; 2315 u32 addr; 2316 2317 addr = (u32)rxq->rx_desc_dma; 2318 addr += rxq->rx_curr_desc * sizeof(struct rx_desc); 2319 wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr); 2320 2321 rxq_enable(rxq); 2322 } 2323} 2324 2325static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp) 2326{ 2327 int skb_size; 2328 2329 /* 2330 * Reserve 2+14 bytes for an ethernet header (the hardware 2331 * automatically prepends 2 bytes of dummy data to each 2332 * received packet), 16 bytes for up to four VLAN tags, and 2333 * 4 bytes for the trailing FCS -- 36 bytes total. 2334 */ 2335 skb_size = mp->dev->mtu + 36; 2336 2337 /* 2338 * Make sure that the skb size is a multiple of 8 bytes, as 2339 * the lower three bits of the receive descriptor's buffer 2340 * size field are ignored by the hardware. 2341 */ 2342 mp->skb_size = (skb_size + 7) & ~7; 2343 2344 /* 2345 * If NET_SKB_PAD is smaller than a cache line, 2346 * netdev_alloc_skb() will cause skb->data to be misaligned 2347 * to a cache line boundary. If this is the case, include 2348 * some extra space to allow re-aligning the data area. 2349 */ 2350 mp->skb_size += SKB_DMA_REALIGN; 2351} 2352 2353static int mv643xx_eth_open(struct net_device *dev) 2354{ 2355 struct mv643xx_eth_private *mp = netdev_priv(dev); 2356 int err; 2357 int i; 2358 2359 wrlp(mp, INT_CAUSE, 0); 2360 wrlp(mp, INT_CAUSE_EXT, 0); 2361 rdlp(mp, INT_CAUSE_EXT); 2362 2363 err = request_irq(dev->irq, mv643xx_eth_irq, 2364 IRQF_SHARED, dev->name, dev); 2365 if (err) { 2366 dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n"); 2367 return -EAGAIN; 2368 } 2369 2370 mv643xx_eth_recalc_skb_size(mp); 2371 2372 napi_enable(&mp->napi); 2373 2374 skb_queue_head_init(&mp->rx_recycle); 2375 2376 mp->int_mask = INT_EXT; 2377 2378 for (i = 0; i < mp->rxq_count; i++) { 2379 err = rxq_init(mp, i); 2380 if (err) { 2381 while (--i >= 0) 2382 rxq_deinit(mp->rxq + i); 2383 goto out; 2384 } 2385 2386 rxq_refill(mp->rxq + i, INT_MAX); 2387 mp->int_mask |= INT_RX_0 << i; 2388 } 2389 2390 if (mp->oom) { 2391 mp->rx_oom.expires = jiffies + (HZ / 10); 2392 add_timer(&mp->rx_oom); 2393 } 2394 2395 for (i = 0; i < mp->txq_count; i++) { 2396 err = txq_init(mp, i); 2397 if (err) { 2398 while (--i >= 0) 2399 txq_deinit(mp->txq + i); 2400 goto out_free; 2401 } 2402 mp->int_mask |= INT_TX_END_0 << i; 2403 } 2404 2405 port_start(mp); 2406 2407 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); 2408 wrlp(mp, INT_MASK, mp->int_mask); 2409 2410 return 0; 2411 2412 2413out_free: 2414 for (i = 0; i < mp->rxq_count; i++) 2415 rxq_deinit(mp->rxq + i); 2416out: 2417 free_irq(dev->irq, dev); 2418 2419 return err; 2420} 2421 2422static void port_reset(struct mv643xx_eth_private *mp) 2423{ 2424 unsigned int data; 2425 int i; 2426 2427 for (i = 0; i < mp->rxq_count; i++) 2428 rxq_disable(mp->rxq + i); 2429 for (i = 0; i < mp->txq_count; i++) 2430 txq_disable(mp->txq + i); 2431 2432 while (1) { 2433 u32 ps = rdlp(mp, PORT_STATUS); 2434 2435 if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY) 2436 break; 2437 udelay(10); 2438 } 2439 2440 /* Reset the Enable bit in the Configuration Register */ 2441 data = rdlp(mp, PORT_SERIAL_CONTROL); 2442 data &= ~(SERIAL_PORT_ENABLE | 2443 DO_NOT_FORCE_LINK_FAIL | 2444 FORCE_LINK_PASS); 2445 wrlp(mp, PORT_SERIAL_CONTROL, data); 2446} 2447 2448static int mv643xx_eth_stop(struct net_device *dev) 2449{ 2450 struct mv643xx_eth_private *mp = netdev_priv(dev); 2451 int i; 2452 2453 wrlp(mp, INT_MASK_EXT, 0x00000000); 2454 wrlp(mp, INT_MASK, 0x00000000); 2455 rdlp(mp, INT_MASK); 2456 2457 napi_disable(&mp->napi); 2458 2459 del_timer_sync(&mp->rx_oom); 2460 2461 netif_carrier_off(dev); 2462 2463 free_irq(dev->irq, dev); 2464 2465 port_reset(mp); 2466 mv643xx_eth_get_stats(dev); 2467 mib_counters_update(mp); 2468 del_timer_sync(&mp->mib_counters_timer); 2469 2470 skb_queue_purge(&mp->rx_recycle); 2471 2472 for (i = 0; i < mp->rxq_count; i++) 2473 rxq_deinit(mp->rxq + i); 2474 for (i = 0; i < mp->txq_count; i++) 2475 txq_deinit(mp->txq + i); 2476 2477 return 0; 2478} 2479 2480static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2481{ 2482 struct mv643xx_eth_private *mp = netdev_priv(dev); 2483 2484 if (mp->phy != NULL) 2485 return phy_mii_ioctl(mp->phy, if_mii(ifr), cmd); 2486 2487 return -EOPNOTSUPP; 2488} 2489 2490static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) 2491{ 2492 struct mv643xx_eth_private *mp = netdev_priv(dev); 2493 2494 if (new_mtu < 64 || new_mtu > 9500) 2495 return -EINVAL; 2496 2497 dev->mtu = new_mtu; 2498 mv643xx_eth_recalc_skb_size(mp); 2499 tx_set_rate(mp, 1000000000, 16777216); 2500 2501 if (!netif_running(dev)) 2502 return 0; 2503 2504 /* 2505 * Stop and then re-open the interface. This will allocate RX 2506 * skbs of the new MTU. 2507 * There is a possible danger that the open will not succeed, 2508 * due to memory being full. 2509 */ 2510 mv643xx_eth_stop(dev); 2511 if (mv643xx_eth_open(dev)) { 2512 dev_printk(KERN_ERR, &dev->dev, 2513 "fatal error on re-opening device after " 2514 "MTU change\n"); 2515 } 2516 2517 return 0; 2518} 2519 2520static void tx_timeout_task(struct work_struct *ugly) 2521{ 2522 struct mv643xx_eth_private *mp; 2523 2524 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); 2525 if (netif_running(mp->dev)) { 2526 netif_tx_stop_all_queues(mp->dev); 2527 port_reset(mp); 2528 port_start(mp); 2529 netif_tx_wake_all_queues(mp->dev); 2530 } 2531} 2532 2533static void mv643xx_eth_tx_timeout(struct net_device *dev) 2534{ 2535 struct mv643xx_eth_private *mp = netdev_priv(dev); 2536 2537 dev_printk(KERN_INFO, &dev->dev, "tx timeout\n"); 2538 2539 schedule_work(&mp->tx_timeout_task); 2540} 2541 2542#ifdef CONFIG_NET_POLL_CONTROLLER 2543static void mv643xx_eth_netpoll(struct net_device *dev) 2544{ 2545 struct mv643xx_eth_private *mp = netdev_priv(dev); 2546 2547 wrlp(mp, INT_MASK, 0x00000000); 2548 rdlp(mp, INT_MASK); 2549 2550 mv643xx_eth_irq(dev->irq, dev); 2551 2552 wrlp(mp, INT_MASK, mp->int_mask); 2553} 2554#endif 2555 2556 2557/* platform glue ************************************************************/ 2558static void 2559mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp, 2560 struct mbus_dram_target_info *dram) 2561{ 2562 void __iomem *base = msp->base; 2563 u32 win_enable; 2564 u32 win_protect; 2565 int i; 2566 2567 for (i = 0; i < 6; i++) { 2568 writel(0, base + WINDOW_BASE(i)); 2569 writel(0, base + WINDOW_SIZE(i)); 2570 if (i < 4) 2571 writel(0, base + WINDOW_REMAP_HIGH(i)); 2572 } 2573 2574 win_enable = 0x3f; 2575 win_protect = 0; 2576 2577 for (i = 0; i < dram->num_cs; i++) { 2578 struct mbus_dram_window *cs = dram->cs + i; 2579 2580 writel((cs->base & 0xffff0000) | 2581 (cs->mbus_attr << 8) | 2582 dram->mbus_dram_target_id, base + WINDOW_BASE(i)); 2583 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); 2584 2585 win_enable &= ~(1 << i); 2586 win_protect |= 3 << (2 * i); 2587 } 2588 2589 writel(win_enable, base + WINDOW_BAR_ENABLE); 2590 msp->win_protect = win_protect; 2591} 2592 2593static void infer_hw_params(struct mv643xx_eth_shared_private *msp) 2594{ 2595 /* 2596 * Check whether we have a 14-bit coal limit field in bits 2597 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the 2598 * SDMA config register. 2599 */ 2600 writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG); 2601 if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000) 2602 msp->extended_rx_coal_limit = 1; 2603 else 2604 msp->extended_rx_coal_limit = 0; 2605 2606 /* 2607 * Check whether the MAC supports TX rate control, and if 2608 * yes, whether its associated registers are in the old or 2609 * the new place. 2610 */ 2611 writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED); 2612 if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) { 2613 msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT; 2614 } else { 2615 writel(7, msp->base + 0x0400 + TX_BW_RATE); 2616 if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7) 2617 msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT; 2618 else 2619 msp->tx_bw_control = TX_BW_CONTROL_ABSENT; 2620 } 2621} 2622 2623static int mv643xx_eth_shared_probe(struct platform_device *pdev) 2624{ 2625 static int mv643xx_eth_version_printed; 2626 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; 2627 struct mv643xx_eth_shared_private *msp; 2628 struct resource *res; 2629 int ret; 2630 2631 if (!mv643xx_eth_version_printed++) 2632 printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet " 2633 "driver version %s\n", mv643xx_eth_driver_version); 2634 2635 ret = -EINVAL; 2636 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2637 if (res == NULL) 2638 goto out; 2639 2640 ret = -ENOMEM; 2641 msp = kmalloc(sizeof(*msp), GFP_KERNEL); 2642 if (msp == NULL) 2643 goto out; 2644 memset(msp, 0, sizeof(*msp)); 2645 2646 msp->base = ioremap(res->start, res->end - res->start + 1); 2647 if (msp->base == NULL) 2648 goto out_free; 2649 2650 /* 2651 * Set up and register SMI bus. 2652 */ 2653 if (pd == NULL || pd->shared_smi == NULL) { 2654 msp->smi_bus = mdiobus_alloc(); 2655 if (msp->smi_bus == NULL) 2656 goto out_unmap; 2657 2658 msp->smi_bus->priv = msp; 2659 msp->smi_bus->name = "mv643xx_eth smi"; 2660 msp->smi_bus->read = smi_bus_read; 2661 msp->smi_bus->write = smi_bus_write, 2662 snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id); 2663 msp->smi_bus->parent = &pdev->dev; 2664 msp->smi_bus->phy_mask = 0xffffffff; 2665 if (mdiobus_register(msp->smi_bus) < 0) 2666 goto out_free_mii_bus; 2667 msp->smi = msp; 2668 } else { 2669 msp->smi = platform_get_drvdata(pd->shared_smi); 2670 } 2671 2672 msp->err_interrupt = NO_IRQ; 2673 init_waitqueue_head(&msp->smi_busy_wait); 2674 2675 /* 2676 * Check whether the error interrupt is hooked up. 2677 */ 2678 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2679 if (res != NULL) { 2680 int err; 2681 2682 err = request_irq(res->start, mv643xx_eth_err_irq, 2683 IRQF_SHARED, "mv643xx_eth", msp); 2684 if (!err) { 2685 writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK); 2686 msp->err_interrupt = res->start; 2687 } 2688 } 2689 2690 /* 2691 * (Re-)program MBUS remapping windows if we are asked to. 2692 */ 2693 if (pd != NULL && pd->dram != NULL) 2694 mv643xx_eth_conf_mbus_windows(msp, pd->dram); 2695 2696 /* 2697 * Detect hardware parameters. 2698 */ 2699 msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; 2700 infer_hw_params(msp); 2701 2702 platform_set_drvdata(pdev, msp); 2703 2704 return 0; 2705 2706out_free_mii_bus: 2707 mdiobus_free(msp->smi_bus); 2708out_unmap: 2709 iounmap(msp->base); 2710out_free: 2711 kfree(msp); 2712out: 2713 return ret; 2714} 2715 2716static int mv643xx_eth_shared_remove(struct platform_device *pdev) 2717{ 2718 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); 2719 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; 2720 2721 if (pd == NULL || pd->shared_smi == NULL) { 2722 mdiobus_unregister(msp->smi_bus); 2723 mdiobus_free(msp->smi_bus); 2724 } 2725 if (msp->err_interrupt != NO_IRQ) 2726 free_irq(msp->err_interrupt, msp); 2727 iounmap(msp->base); 2728 kfree(msp); 2729 2730 return 0; 2731} 2732 2733static struct platform_driver mv643xx_eth_shared_driver = { 2734 .probe = mv643xx_eth_shared_probe, 2735 .remove = mv643xx_eth_shared_remove, 2736 .driver = { 2737 .name = MV643XX_ETH_SHARED_NAME, 2738 .owner = THIS_MODULE, 2739 }, 2740}; 2741 2742static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr) 2743{ 2744 int addr_shift = 5 * mp->port_num; 2745 u32 data; 2746 2747 data = rdl(mp, PHY_ADDR); 2748 data &= ~(0x1f << addr_shift); 2749 data |= (phy_addr & 0x1f) << addr_shift; 2750 wrl(mp, PHY_ADDR, data); 2751} 2752 2753static int phy_addr_get(struct mv643xx_eth_private *mp) 2754{ 2755 unsigned int data; 2756 2757 data = rdl(mp, PHY_ADDR); 2758 2759 return (data >> (5 * mp->port_num)) & 0x1f; 2760} 2761 2762static void set_params(struct mv643xx_eth_private *mp, 2763 struct mv643xx_eth_platform_data *pd) 2764{ 2765 struct net_device *dev = mp->dev; 2766 2767 if (is_valid_ether_addr(pd->mac_addr)) 2768 memcpy(dev->dev_addr, pd->mac_addr, 6); 2769 else 2770 uc_addr_get(mp, dev->dev_addr); 2771 2772 mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE; 2773 if (pd->rx_queue_size) 2774 mp->rx_ring_size = pd->rx_queue_size; 2775 mp->rx_desc_sram_addr = pd->rx_sram_addr; 2776 mp->rx_desc_sram_size = pd->rx_sram_size; 2777 2778 mp->rxq_count = pd->rx_queue_count ? : 1; 2779 2780 mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE; 2781 if (pd->tx_queue_size) 2782 mp->tx_ring_size = pd->tx_queue_size; 2783 mp->tx_desc_sram_addr = pd->tx_sram_addr; 2784 mp->tx_desc_sram_size = pd->tx_sram_size; 2785 2786 mp->txq_count = pd->tx_queue_count ? : 1; 2787} 2788 2789static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, 2790 int phy_addr) 2791{ 2792 struct mii_bus *bus = mp->shared->smi->smi_bus; 2793 struct phy_device *phydev; 2794 int start; 2795 int num; 2796 int i; 2797 2798 if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) { 2799 start = phy_addr_get(mp) & 0x1f; 2800 num = 32; 2801 } else { 2802 start = phy_addr & 0x1f; 2803 num = 1; 2804 } 2805 2806 phydev = NULL; 2807 for (i = 0; i < num; i++) { 2808 int addr = (start + i) & 0x1f; 2809 2810 if (bus->phy_map[addr] == NULL) 2811 mdiobus_scan(bus, addr); 2812 2813 if (phydev == NULL) { 2814 phydev = bus->phy_map[addr]; 2815 if (phydev != NULL) 2816 phy_addr_set(mp, addr); 2817 } 2818 } 2819 2820 return phydev; 2821} 2822 2823static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) 2824{ 2825 struct phy_device *phy = mp->phy; 2826 2827 phy_reset(mp); 2828 2829 phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII); 2830 2831 if (speed == 0) { 2832 phy->autoneg = AUTONEG_ENABLE; 2833 phy->speed = 0; 2834 phy->duplex = 0; 2835 phy->advertising = phy->supported | ADVERTISED_Autoneg; 2836 } else { 2837 phy->autoneg = AUTONEG_DISABLE; 2838 phy->advertising = 0; 2839 phy->speed = speed; 2840 phy->duplex = duplex; 2841 } 2842 phy_start_aneg(phy); 2843} 2844 2845static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) 2846{ 2847 u32 pscr; 2848 2849 pscr = rdlp(mp, PORT_SERIAL_CONTROL); 2850 if (pscr & SERIAL_PORT_ENABLE) { 2851 pscr &= ~SERIAL_PORT_ENABLE; 2852 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2853 } 2854 2855 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED; 2856 if (mp->phy == NULL) { 2857 pscr |= DISABLE_AUTO_NEG_SPEED_GMII; 2858 if (speed == SPEED_1000) 2859 pscr |= SET_GMII_SPEED_TO_1000; 2860 else if (speed == SPEED_100) 2861 pscr |= SET_MII_SPEED_TO_100; 2862 2863 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL; 2864 2865 pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX; 2866 if (duplex == DUPLEX_FULL) 2867 pscr |= SET_FULL_DUPLEX_MODE; 2868 } 2869 2870 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2871} 2872 2873static const struct net_device_ops mv643xx_eth_netdev_ops = { 2874 .ndo_open = mv643xx_eth_open, 2875 .ndo_stop = mv643xx_eth_stop, 2876 .ndo_start_xmit = mv643xx_eth_xmit, 2877 .ndo_set_rx_mode = mv643xx_eth_set_rx_mode, 2878 .ndo_set_mac_address = mv643xx_eth_set_mac_address, 2879 .ndo_do_ioctl = mv643xx_eth_ioctl, 2880 .ndo_change_mtu = mv643xx_eth_change_mtu, 2881 .ndo_tx_timeout = mv643xx_eth_tx_timeout, 2882 .ndo_get_stats = mv643xx_eth_get_stats, 2883#ifdef CONFIG_NET_POLL_CONTROLLER 2884 .ndo_poll_controller = mv643xx_eth_netpoll, 2885#endif 2886}; 2887 2888static int mv643xx_eth_probe(struct platform_device *pdev) 2889{ 2890 struct mv643xx_eth_platform_data *pd; 2891 struct mv643xx_eth_private *mp; 2892 struct net_device *dev; 2893 struct resource *res; 2894 int err; 2895 2896 pd = pdev->dev.platform_data; 2897 if (pd == NULL) { 2898 dev_printk(KERN_ERR, &pdev->dev, 2899 "no mv643xx_eth_platform_data\n"); 2900 return -ENODEV; 2901 } 2902 2903 if (pd->shared == NULL) { 2904 dev_printk(KERN_ERR, &pdev->dev, 2905 "no mv643xx_eth_platform_data->shared\n"); 2906 return -ENODEV; 2907 } 2908 2909 dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8); 2910 if (!dev) 2911 return -ENOMEM; 2912 2913 mp = netdev_priv(dev); 2914 platform_set_drvdata(pdev, mp); 2915 2916 mp->shared = platform_get_drvdata(pd->shared); 2917 mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10); 2918 mp->port_num = pd->port_number; 2919 2920 mp->dev = dev; 2921 2922 set_params(mp, pd); 2923 dev->real_num_tx_queues = mp->txq_count; 2924 2925 if (pd->phy_addr != MV643XX_ETH_PHY_NONE) 2926 mp->phy = phy_scan(mp, pd->phy_addr); 2927 2928 if (mp->phy != NULL) 2929 phy_init(mp, pd->speed, pd->duplex); 2930 2931 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); 2932 2933 init_pscr(mp, pd->speed, pd->duplex); 2934 2935 2936 mib_counters_clear(mp); 2937 2938 init_timer(&mp->mib_counters_timer); 2939 mp->mib_counters_timer.data = (unsigned long)mp; 2940 mp->mib_counters_timer.function = mib_counters_timer_wrapper; 2941 mp->mib_counters_timer.expires = jiffies + 30 * HZ; 2942 add_timer(&mp->mib_counters_timer); 2943 2944 spin_lock_init(&mp->mib_counters_lock); 2945 2946 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); 2947 2948 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128); 2949 2950 init_timer(&mp->rx_oom); 2951 mp->rx_oom.data = (unsigned long)mp; 2952 mp->rx_oom.function = oom_timer_wrapper; 2953 2954 2955 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2956 BUG_ON(!res); 2957 dev->irq = res->start; 2958 2959 dev->netdev_ops = &mv643xx_eth_netdev_ops; 2960 2961 dev->watchdog_timeo = 2 * HZ; 2962 dev->base_addr = 0; 2963 2964 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; 2965 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM; 2966 2967 SET_NETDEV_DEV(dev, &pdev->dev); 2968 2969 if (mp->shared->win_protect) 2970 wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); 2971 2972 netif_carrier_off(dev); 2973 2974 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); 2975 2976 set_rx_coal(mp, 250); 2977 set_tx_coal(mp, 0); 2978 2979 err = register_netdev(dev); 2980 if (err) 2981 goto out; 2982 2983 dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %pM\n", 2984 mp->port_num, dev->dev_addr); 2985 2986 if (mp->tx_desc_sram_size > 0) 2987 dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n"); 2988 2989 return 0; 2990 2991out: 2992 free_netdev(dev); 2993 2994 return err; 2995} 2996 2997static int mv643xx_eth_remove(struct platform_device *pdev) 2998{ 2999 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); 3000 3001 unregister_netdev(mp->dev); 3002 if (mp->phy != NULL) 3003 phy_detach(mp->phy); 3004 flush_scheduled_work(); 3005 free_netdev(mp->dev); 3006 3007 platform_set_drvdata(pdev, NULL); 3008 3009 return 0; 3010} 3011 3012static void mv643xx_eth_shutdown(struct platform_device *pdev) 3013{ 3014 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); 3015 3016 /* Mask all interrupts on ethernet port */ 3017 wrlp(mp, INT_MASK, 0); 3018 rdlp(mp, INT_MASK); 3019 3020 if (netif_running(mp->dev)) 3021 port_reset(mp); 3022} 3023 3024static struct platform_driver mv643xx_eth_driver = { 3025 .probe = mv643xx_eth_probe, 3026 .remove = mv643xx_eth_remove, 3027 .shutdown = mv643xx_eth_shutdown, 3028 .driver = { 3029 .name = MV643XX_ETH_NAME, 3030 .owner = THIS_MODULE, 3031 }, 3032}; 3033 3034static int __init mv643xx_eth_init_module(void) 3035{ 3036 int rc; 3037 3038 rc = platform_driver_register(&mv643xx_eth_shared_driver); 3039 if (!rc) { 3040 rc = platform_driver_register(&mv643xx_eth_driver); 3041 if (rc) 3042 platform_driver_unregister(&mv643xx_eth_shared_driver); 3043 } 3044 3045 return rc; 3046} 3047module_init(mv643xx_eth_init_module); 3048 3049static void __exit mv643xx_eth_cleanup_module(void) 3050{ 3051 platform_driver_unregister(&mv643xx_eth_driver); 3052 platform_driver_unregister(&mv643xx_eth_shared_driver); 3053} 3054module_exit(mv643xx_eth_cleanup_module); 3055 3056MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, " 3057 "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek"); 3058MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); 3059MODULE_LICENSE("GPL"); 3060MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME); 3061MODULE_ALIAS("platform:" MV643XX_ETH_NAME);