Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.39-rc4 3030 lines 73 kB view raw
1/* 2 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports 3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> 4 * 5 * Based on the 64360 driver from: 6 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il> 7 * Rabeeh Khoury <rabeeh@marvell.com> 8 * 9 * Copyright (C) 2003 PMC-Sierra, Inc., 10 * written by Manish Lachwani 11 * 12 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> 13 * 14 * Copyright (C) 2004-2006 MontaVista Software, Inc. 15 * Dale Farnsworth <dale@farnsworth.org> 16 * 17 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> 18 * <sjhill@realitydiluted.com> 19 * 20 * Copyright (C) 2007-2008 Marvell Semiconductor 21 * Lennert Buytenhek <buytenh@marvell.com> 22 * 23 * This program is free software; you can redistribute it and/or 24 * modify it under the terms of the GNU General Public License 25 * as published by the Free Software Foundation; either version 2 26 * of the License, or (at your option) any later version. 27 * 28 * This program is distributed in the hope that it will be useful, 29 * but WITHOUT ANY WARRANTY; without even the implied warranty of 30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 31 * GNU General Public License for more details. 32 * 33 * You should have received a copy of the GNU General Public License 34 * along with this program; if not, write to the Free Software 35 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 36 */ 37 38#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 39 40#include <linux/init.h> 41#include <linux/dma-mapping.h> 42#include <linux/in.h> 43#include <linux/ip.h> 44#include <linux/tcp.h> 45#include <linux/udp.h> 46#include <linux/etherdevice.h> 47#include <linux/delay.h> 48#include <linux/ethtool.h> 49#include <linux/platform_device.h> 50#include <linux/module.h> 51#include <linux/kernel.h> 52#include <linux/spinlock.h> 53#include <linux/workqueue.h> 54#include <linux/phy.h> 55#include <linux/mv643xx_eth.h> 56#include <linux/io.h> 57#include <linux/types.h> 58#include <linux/inet_lro.h> 59#include <linux/slab.h> 60#include <asm/system.h> 61 62static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 63static char mv643xx_eth_driver_version[] = "1.4"; 64 65 66/* 67 * Registers shared between all ports. 68 */ 69#define PHY_ADDR 0x0000 70#define SMI_REG 0x0004 71#define SMI_BUSY 0x10000000 72#define SMI_READ_VALID 0x08000000 73#define SMI_OPCODE_READ 0x04000000 74#define SMI_OPCODE_WRITE 0x00000000 75#define ERR_INT_CAUSE 0x0080 76#define ERR_INT_SMI_DONE 0x00000010 77#define ERR_INT_MASK 0x0084 78#define WINDOW_BASE(w) (0x0200 + ((w) << 3)) 79#define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) 80#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) 81#define WINDOW_BAR_ENABLE 0x0290 82#define WINDOW_PROTECT(w) (0x0294 + ((w) << 4)) 83 84/* 85 * Main per-port registers. These live at offset 0x0400 for 86 * port #0, 0x0800 for port #1, and 0x0c00 for port #2. 87 */ 88#define PORT_CONFIG 0x0000 89#define UNICAST_PROMISCUOUS_MODE 0x00000001 90#define PORT_CONFIG_EXT 0x0004 91#define MAC_ADDR_LOW 0x0014 92#define MAC_ADDR_HIGH 0x0018 93#define SDMA_CONFIG 0x001c 94#define TX_BURST_SIZE_16_64BIT 0x01000000 95#define TX_BURST_SIZE_4_64BIT 0x00800000 96#define BLM_TX_NO_SWAP 0x00000020 97#define BLM_RX_NO_SWAP 0x00000010 98#define RX_BURST_SIZE_16_64BIT 0x00000008 99#define RX_BURST_SIZE_4_64BIT 0x00000004 100#define PORT_SERIAL_CONTROL 0x003c 101#define SET_MII_SPEED_TO_100 0x01000000 102#define SET_GMII_SPEED_TO_1000 0x00800000 103#define SET_FULL_DUPLEX_MODE 0x00200000 104#define MAX_RX_PACKET_9700BYTE 0x000a0000 105#define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000 106#define DO_NOT_FORCE_LINK_FAIL 0x00000400 107#define SERIAL_PORT_CONTROL_RESERVED 0x00000200 108#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008 109#define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004 110#define FORCE_LINK_PASS 0x00000002 111#define SERIAL_PORT_ENABLE 0x00000001 112#define PORT_STATUS 0x0044 113#define TX_FIFO_EMPTY 0x00000400 114#define TX_IN_PROGRESS 0x00000080 115#define PORT_SPEED_MASK 0x00000030 116#define PORT_SPEED_1000 0x00000010 117#define PORT_SPEED_100 0x00000020 118#define PORT_SPEED_10 0x00000000 119#define FLOW_CONTROL_ENABLED 0x00000008 120#define FULL_DUPLEX 0x00000004 121#define LINK_UP 0x00000002 122#define TXQ_COMMAND 0x0048 123#define TXQ_FIX_PRIO_CONF 0x004c 124#define TX_BW_RATE 0x0050 125#define TX_BW_MTU 0x0058 126#define TX_BW_BURST 0x005c 127#define INT_CAUSE 0x0060 128#define INT_TX_END 0x07f80000 129#define INT_TX_END_0 0x00080000 130#define INT_RX 0x000003fc 131#define INT_RX_0 0x00000004 132#define INT_EXT 0x00000002 133#define INT_CAUSE_EXT 0x0064 134#define INT_EXT_LINK_PHY 0x00110000 135#define INT_EXT_TX 0x000000ff 136#define INT_MASK 0x0068 137#define INT_MASK_EXT 0x006c 138#define TX_FIFO_URGENT_THRESHOLD 0x0074 139#define TXQ_FIX_PRIO_CONF_MOVED 0x00dc 140#define TX_BW_RATE_MOVED 0x00e0 141#define TX_BW_MTU_MOVED 0x00e8 142#define TX_BW_BURST_MOVED 0x00ec 143#define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4)) 144#define RXQ_COMMAND 0x0280 145#define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2)) 146#define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4)) 147#define TXQ_BW_CONF(q) (0x0304 + ((q) << 4)) 148#define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4)) 149 150/* 151 * Misc per-port registers. 152 */ 153#define MIB_COUNTERS(p) (0x1000 + ((p) << 7)) 154#define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10)) 155#define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10)) 156#define UNICAST_TABLE(p) (0x1600 + ((p) << 10)) 157 158 159/* 160 * SDMA configuration register default value. 161 */ 162#if defined(__BIG_ENDIAN) 163#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 164 (RX_BURST_SIZE_4_64BIT | \ 165 TX_BURST_SIZE_4_64BIT) 166#elif defined(__LITTLE_ENDIAN) 167#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 168 (RX_BURST_SIZE_4_64BIT | \ 169 BLM_RX_NO_SWAP | \ 170 BLM_TX_NO_SWAP | \ 171 TX_BURST_SIZE_4_64BIT) 172#else 173#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 174#endif 175 176 177/* 178 * Misc definitions. 179 */ 180#define DEFAULT_RX_QUEUE_SIZE 128 181#define DEFAULT_TX_QUEUE_SIZE 256 182#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) 183 184 185/* 186 * RX/TX descriptors. 187 */ 188#if defined(__BIG_ENDIAN) 189struct rx_desc { 190 u16 byte_cnt; /* Descriptor buffer byte count */ 191 u16 buf_size; /* Buffer size */ 192 u32 cmd_sts; /* Descriptor command status */ 193 u32 next_desc_ptr; /* Next descriptor pointer */ 194 u32 buf_ptr; /* Descriptor buffer pointer */ 195}; 196 197struct tx_desc { 198 u16 byte_cnt; /* buffer byte count */ 199 u16 l4i_chk; /* CPU provided TCP checksum */ 200 u32 cmd_sts; /* Command/status field */ 201 u32 next_desc_ptr; /* Pointer to next descriptor */ 202 u32 buf_ptr; /* pointer to buffer for this descriptor*/ 203}; 204#elif defined(__LITTLE_ENDIAN) 205struct rx_desc { 206 u32 cmd_sts; /* Descriptor command status */ 207 u16 buf_size; /* Buffer size */ 208 u16 byte_cnt; /* Descriptor buffer byte count */ 209 u32 buf_ptr; /* Descriptor buffer pointer */ 210 u32 next_desc_ptr; /* Next descriptor pointer */ 211}; 212 213struct tx_desc { 214 u32 cmd_sts; /* Command/status field */ 215 u16 l4i_chk; /* CPU provided TCP checksum */ 216 u16 byte_cnt; /* buffer byte count */ 217 u32 buf_ptr; /* pointer to buffer for this descriptor*/ 218 u32 next_desc_ptr; /* Pointer to next descriptor */ 219}; 220#else 221#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 222#endif 223 224/* RX & TX descriptor command */ 225#define BUFFER_OWNED_BY_DMA 0x80000000 226 227/* RX & TX descriptor status */ 228#define ERROR_SUMMARY 0x00000001 229 230/* RX descriptor status */ 231#define LAYER_4_CHECKSUM_OK 0x40000000 232#define RX_ENABLE_INTERRUPT 0x20000000 233#define RX_FIRST_DESC 0x08000000 234#define RX_LAST_DESC 0x04000000 235#define RX_IP_HDR_OK 0x02000000 236#define RX_PKT_IS_IPV4 0x01000000 237#define RX_PKT_IS_ETHERNETV2 0x00800000 238#define RX_PKT_LAYER4_TYPE_MASK 0x00600000 239#define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000 240#define RX_PKT_IS_VLAN_TAGGED 0x00080000 241 242/* TX descriptor command */ 243#define TX_ENABLE_INTERRUPT 0x00800000 244#define GEN_CRC 0x00400000 245#define TX_FIRST_DESC 0x00200000 246#define TX_LAST_DESC 0x00100000 247#define ZERO_PADDING 0x00080000 248#define GEN_IP_V4_CHECKSUM 0x00040000 249#define GEN_TCP_UDP_CHECKSUM 0x00020000 250#define UDP_FRAME 0x00010000 251#define MAC_HDR_EXTRA_4_BYTES 0x00008000 252#define MAC_HDR_EXTRA_8_BYTES 0x00000200 253 254#define TX_IHL_SHIFT 11 255 256 257/* global *******************************************************************/ 258struct mv643xx_eth_shared_private { 259 /* 260 * Ethernet controller base address. 261 */ 262 void __iomem *base; 263 264 /* 265 * Points at the right SMI instance to use. 266 */ 267 struct mv643xx_eth_shared_private *smi; 268 269 /* 270 * Provides access to local SMI interface. 271 */ 272 struct mii_bus *smi_bus; 273 274 /* 275 * If we have access to the error interrupt pin (which is 276 * somewhat misnamed as it not only reflects internal errors 277 * but also reflects SMI completion), use that to wait for 278 * SMI access completion instead of polling the SMI busy bit. 279 */ 280 int err_interrupt; 281 wait_queue_head_t smi_busy_wait; 282 283 /* 284 * Per-port MBUS window access register value. 285 */ 286 u32 win_protect; 287 288 /* 289 * Hardware-specific parameters. 290 */ 291 unsigned int t_clk; 292 int extended_rx_coal_limit; 293 int tx_bw_control; 294 int tx_csum_limit; 295}; 296 297#define TX_BW_CONTROL_ABSENT 0 298#define TX_BW_CONTROL_OLD_LAYOUT 1 299#define TX_BW_CONTROL_NEW_LAYOUT 2 300 301static int mv643xx_eth_open(struct net_device *dev); 302static int mv643xx_eth_stop(struct net_device *dev); 303 304 305/* per-port *****************************************************************/ 306struct mib_counters { 307 u64 good_octets_received; 308 u32 bad_octets_received; 309 u32 internal_mac_transmit_err; 310 u32 good_frames_received; 311 u32 bad_frames_received; 312 u32 broadcast_frames_received; 313 u32 multicast_frames_received; 314 u32 frames_64_octets; 315 u32 frames_65_to_127_octets; 316 u32 frames_128_to_255_octets; 317 u32 frames_256_to_511_octets; 318 u32 frames_512_to_1023_octets; 319 u32 frames_1024_to_max_octets; 320 u64 good_octets_sent; 321 u32 good_frames_sent; 322 u32 excessive_collision; 323 u32 multicast_frames_sent; 324 u32 broadcast_frames_sent; 325 u32 unrec_mac_control_received; 326 u32 fc_sent; 327 u32 good_fc_received; 328 u32 bad_fc_received; 329 u32 undersize_received; 330 u32 fragments_received; 331 u32 oversize_received; 332 u32 jabber_received; 333 u32 mac_receive_error; 334 u32 bad_crc_event; 335 u32 collision; 336 u32 late_collision; 337}; 338 339struct lro_counters { 340 u32 lro_aggregated; 341 u32 lro_flushed; 342 u32 lro_no_desc; 343}; 344 345struct rx_queue { 346 int index; 347 348 int rx_ring_size; 349 350 int rx_desc_count; 351 int rx_curr_desc; 352 int rx_used_desc; 353 354 struct rx_desc *rx_desc_area; 355 dma_addr_t rx_desc_dma; 356 int rx_desc_area_size; 357 struct sk_buff **rx_skb; 358 359 struct net_lro_mgr lro_mgr; 360 struct net_lro_desc lro_arr[8]; 361}; 362 363struct tx_queue { 364 int index; 365 366 int tx_ring_size; 367 368 int tx_desc_count; 369 int tx_curr_desc; 370 int tx_used_desc; 371 372 struct tx_desc *tx_desc_area; 373 dma_addr_t tx_desc_dma; 374 int tx_desc_area_size; 375 376 struct sk_buff_head tx_skb; 377 378 unsigned long tx_packets; 379 unsigned long tx_bytes; 380 unsigned long tx_dropped; 381}; 382 383struct mv643xx_eth_private { 384 struct mv643xx_eth_shared_private *shared; 385 void __iomem *base; 386 int port_num; 387 388 struct net_device *dev; 389 390 struct phy_device *phy; 391 392 struct timer_list mib_counters_timer; 393 spinlock_t mib_counters_lock; 394 struct mib_counters mib_counters; 395 396 struct lro_counters lro_counters; 397 398 struct work_struct tx_timeout_task; 399 400 struct napi_struct napi; 401 u32 int_mask; 402 u8 oom; 403 u8 work_link; 404 u8 work_tx; 405 u8 work_tx_end; 406 u8 work_rx; 407 u8 work_rx_refill; 408 409 int skb_size; 410 struct sk_buff_head rx_recycle; 411 412 /* 413 * RX state. 414 */ 415 int rx_ring_size; 416 unsigned long rx_desc_sram_addr; 417 int rx_desc_sram_size; 418 int rxq_count; 419 struct timer_list rx_oom; 420 struct rx_queue rxq[8]; 421 422 /* 423 * TX state. 424 */ 425 int tx_ring_size; 426 unsigned long tx_desc_sram_addr; 427 int tx_desc_sram_size; 428 int txq_count; 429 struct tx_queue txq[8]; 430}; 431 432 433/* port register accessors **************************************************/ 434static inline u32 rdl(struct mv643xx_eth_private *mp, int offset) 435{ 436 return readl(mp->shared->base + offset); 437} 438 439static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset) 440{ 441 return readl(mp->base + offset); 442} 443 444static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) 445{ 446 writel(data, mp->shared->base + offset); 447} 448 449static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data) 450{ 451 writel(data, mp->base + offset); 452} 453 454 455/* rxq/txq helper functions *************************************************/ 456static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) 457{ 458 return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); 459} 460 461static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) 462{ 463 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); 464} 465 466static void rxq_enable(struct rx_queue *rxq) 467{ 468 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 469 wrlp(mp, RXQ_COMMAND, 1 << rxq->index); 470} 471 472static void rxq_disable(struct rx_queue *rxq) 473{ 474 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 475 u8 mask = 1 << rxq->index; 476 477 wrlp(mp, RXQ_COMMAND, mask << 8); 478 while (rdlp(mp, RXQ_COMMAND) & mask) 479 udelay(10); 480} 481 482static void txq_reset_hw_ptr(struct tx_queue *txq) 483{ 484 struct mv643xx_eth_private *mp = txq_to_mp(txq); 485 u32 addr; 486 487 addr = (u32)txq->tx_desc_dma; 488 addr += txq->tx_curr_desc * sizeof(struct tx_desc); 489 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr); 490} 491 492static void txq_enable(struct tx_queue *txq) 493{ 494 struct mv643xx_eth_private *mp = txq_to_mp(txq); 495 wrlp(mp, TXQ_COMMAND, 1 << txq->index); 496} 497 498static void txq_disable(struct tx_queue *txq) 499{ 500 struct mv643xx_eth_private *mp = txq_to_mp(txq); 501 u8 mask = 1 << txq->index; 502 503 wrlp(mp, TXQ_COMMAND, mask << 8); 504 while (rdlp(mp, TXQ_COMMAND) & mask) 505 udelay(10); 506} 507 508static void txq_maybe_wake(struct tx_queue *txq) 509{ 510 struct mv643xx_eth_private *mp = txq_to_mp(txq); 511 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 512 513 if (netif_tx_queue_stopped(nq)) { 514 __netif_tx_lock(nq, smp_processor_id()); 515 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) 516 netif_tx_wake_queue(nq); 517 __netif_tx_unlock(nq); 518 } 519} 520 521 522/* rx napi ******************************************************************/ 523static int 524mv643xx_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph, 525 u64 *hdr_flags, void *priv) 526{ 527 unsigned long cmd_sts = (unsigned long)priv; 528 529 /* 530 * Make sure that this packet is Ethernet II, is not VLAN 531 * tagged, is IPv4, has a valid IP header, and is TCP. 532 */ 533 if ((cmd_sts & (RX_IP_HDR_OK | RX_PKT_IS_IPV4 | 534 RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_MASK | 535 RX_PKT_IS_VLAN_TAGGED)) != 536 (RX_IP_HDR_OK | RX_PKT_IS_IPV4 | 537 RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_TCP_IPV4)) 538 return -1; 539 540 skb_reset_network_header(skb); 541 skb_set_transport_header(skb, ip_hdrlen(skb)); 542 *iphdr = ip_hdr(skb); 543 *tcph = tcp_hdr(skb); 544 *hdr_flags = LRO_IPV4 | LRO_TCP; 545 546 return 0; 547} 548 549static int rxq_process(struct rx_queue *rxq, int budget) 550{ 551 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 552 struct net_device_stats *stats = &mp->dev->stats; 553 int lro_flush_needed; 554 int rx; 555 556 lro_flush_needed = 0; 557 rx = 0; 558 while (rx < budget && rxq->rx_desc_count) { 559 struct rx_desc *rx_desc; 560 unsigned int cmd_sts; 561 struct sk_buff *skb; 562 u16 byte_cnt; 563 564 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; 565 566 cmd_sts = rx_desc->cmd_sts; 567 if (cmd_sts & BUFFER_OWNED_BY_DMA) 568 break; 569 rmb(); 570 571 skb = rxq->rx_skb[rxq->rx_curr_desc]; 572 rxq->rx_skb[rxq->rx_curr_desc] = NULL; 573 574 rxq->rx_curr_desc++; 575 if (rxq->rx_curr_desc == rxq->rx_ring_size) 576 rxq->rx_curr_desc = 0; 577 578 dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr, 579 rx_desc->buf_size, DMA_FROM_DEVICE); 580 rxq->rx_desc_count--; 581 rx++; 582 583 mp->work_rx_refill |= 1 << rxq->index; 584 585 byte_cnt = rx_desc->byte_cnt; 586 587 /* 588 * Update statistics. 589 * 590 * Note that the descriptor byte count includes 2 dummy 591 * bytes automatically inserted by the hardware at the 592 * start of the packet (which we don't count), and a 4 593 * byte CRC at the end of the packet (which we do count). 594 */ 595 stats->rx_packets++; 596 stats->rx_bytes += byte_cnt - 2; 597 598 /* 599 * In case we received a packet without first / last bits 600 * on, or the error summary bit is set, the packet needs 601 * to be dropped. 602 */ 603 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY)) 604 != (RX_FIRST_DESC | RX_LAST_DESC)) 605 goto err; 606 607 /* 608 * The -4 is for the CRC in the trailer of the 609 * received packet 610 */ 611 skb_put(skb, byte_cnt - 2 - 4); 612 613 if (cmd_sts & LAYER_4_CHECKSUM_OK) 614 skb->ip_summed = CHECKSUM_UNNECESSARY; 615 skb->protocol = eth_type_trans(skb, mp->dev); 616 617 if (skb->dev->features & NETIF_F_LRO && 618 skb->ip_summed == CHECKSUM_UNNECESSARY) { 619 lro_receive_skb(&rxq->lro_mgr, skb, (void *)cmd_sts); 620 lro_flush_needed = 1; 621 } else 622 netif_receive_skb(skb); 623 624 continue; 625 626err: 627 stats->rx_dropped++; 628 629 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != 630 (RX_FIRST_DESC | RX_LAST_DESC)) { 631 if (net_ratelimit()) 632 netdev_err(mp->dev, 633 "received packet spanning multiple descriptors\n"); 634 } 635 636 if (cmd_sts & ERROR_SUMMARY) 637 stats->rx_errors++; 638 639 dev_kfree_skb(skb); 640 } 641 642 if (lro_flush_needed) 643 lro_flush_all(&rxq->lro_mgr); 644 645 if (rx < budget) 646 mp->work_rx &= ~(1 << rxq->index); 647 648 return rx; 649} 650 651static int rxq_refill(struct rx_queue *rxq, int budget) 652{ 653 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 654 int refilled; 655 656 refilled = 0; 657 while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { 658 struct sk_buff *skb; 659 int rx; 660 struct rx_desc *rx_desc; 661 int size; 662 663 skb = __skb_dequeue(&mp->rx_recycle); 664 if (skb == NULL) 665 skb = dev_alloc_skb(mp->skb_size); 666 667 if (skb == NULL) { 668 mp->oom = 1; 669 goto oom; 670 } 671 672 if (SKB_DMA_REALIGN) 673 skb_reserve(skb, SKB_DMA_REALIGN); 674 675 refilled++; 676 rxq->rx_desc_count++; 677 678 rx = rxq->rx_used_desc++; 679 if (rxq->rx_used_desc == rxq->rx_ring_size) 680 rxq->rx_used_desc = 0; 681 682 rx_desc = rxq->rx_desc_area + rx; 683 684 size = skb->end - skb->data; 685 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, 686 skb->data, size, 687 DMA_FROM_DEVICE); 688 rx_desc->buf_size = size; 689 rxq->rx_skb[rx] = skb; 690 wmb(); 691 rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; 692 wmb(); 693 694 /* 695 * The hardware automatically prepends 2 bytes of 696 * dummy data to each received packet, so that the 697 * IP header ends up 16-byte aligned. 698 */ 699 skb_reserve(skb, 2); 700 } 701 702 if (refilled < budget) 703 mp->work_rx_refill &= ~(1 << rxq->index); 704 705oom: 706 return refilled; 707} 708 709 710/* tx ***********************************************************************/ 711static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) 712{ 713 int frag; 714 715 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 716 skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; 717 if (fragp->size <= 8 && fragp->page_offset & 7) 718 return 1; 719 } 720 721 return 0; 722} 723 724static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) 725{ 726 struct mv643xx_eth_private *mp = txq_to_mp(txq); 727 int nr_frags = skb_shinfo(skb)->nr_frags; 728 int frag; 729 730 for (frag = 0; frag < nr_frags; frag++) { 731 skb_frag_t *this_frag; 732 int tx_index; 733 struct tx_desc *desc; 734 735 this_frag = &skb_shinfo(skb)->frags[frag]; 736 tx_index = txq->tx_curr_desc++; 737 if (txq->tx_curr_desc == txq->tx_ring_size) 738 txq->tx_curr_desc = 0; 739 desc = &txq->tx_desc_area[tx_index]; 740 741 /* 742 * The last fragment will generate an interrupt 743 * which will free the skb on TX completion. 744 */ 745 if (frag == nr_frags - 1) { 746 desc->cmd_sts = BUFFER_OWNED_BY_DMA | 747 ZERO_PADDING | TX_LAST_DESC | 748 TX_ENABLE_INTERRUPT; 749 } else { 750 desc->cmd_sts = BUFFER_OWNED_BY_DMA; 751 } 752 753 desc->l4i_chk = 0; 754 desc->byte_cnt = this_frag->size; 755 desc->buf_ptr = dma_map_page(mp->dev->dev.parent, 756 this_frag->page, 757 this_frag->page_offset, 758 this_frag->size, DMA_TO_DEVICE); 759 } 760} 761 762static inline __be16 sum16_as_be(__sum16 sum) 763{ 764 return (__force __be16)sum; 765} 766 767static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) 768{ 769 struct mv643xx_eth_private *mp = txq_to_mp(txq); 770 int nr_frags = skb_shinfo(skb)->nr_frags; 771 int tx_index; 772 struct tx_desc *desc; 773 u32 cmd_sts; 774 u16 l4i_chk; 775 int length; 776 777 cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; 778 l4i_chk = 0; 779 780 if (skb->ip_summed == CHECKSUM_PARTIAL) { 781 int hdr_len; 782 int tag_bytes; 783 784 BUG_ON(skb->protocol != htons(ETH_P_IP) && 785 skb->protocol != htons(ETH_P_8021Q)); 786 787 hdr_len = (void *)ip_hdr(skb) - (void *)skb->data; 788 tag_bytes = hdr_len - ETH_HLEN; 789 if (skb->len - hdr_len > mp->shared->tx_csum_limit || 790 unlikely(tag_bytes & ~12)) { 791 if (skb_checksum_help(skb) == 0) 792 goto no_csum; 793 kfree_skb(skb); 794 return 1; 795 } 796 797 if (tag_bytes & 4) 798 cmd_sts |= MAC_HDR_EXTRA_4_BYTES; 799 if (tag_bytes & 8) 800 cmd_sts |= MAC_HDR_EXTRA_8_BYTES; 801 802 cmd_sts |= GEN_TCP_UDP_CHECKSUM | 803 GEN_IP_V4_CHECKSUM | 804 ip_hdr(skb)->ihl << TX_IHL_SHIFT; 805 806 switch (ip_hdr(skb)->protocol) { 807 case IPPROTO_UDP: 808 cmd_sts |= UDP_FRAME; 809 l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check)); 810 break; 811 case IPPROTO_TCP: 812 l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check)); 813 break; 814 default: 815 BUG(); 816 } 817 } else { 818no_csum: 819 /* Errata BTS #50, IHL must be 5 if no HW checksum */ 820 cmd_sts |= 5 << TX_IHL_SHIFT; 821 } 822 823 tx_index = txq->tx_curr_desc++; 824 if (txq->tx_curr_desc == txq->tx_ring_size) 825 txq->tx_curr_desc = 0; 826 desc = &txq->tx_desc_area[tx_index]; 827 828 if (nr_frags) { 829 txq_submit_frag_skb(txq, skb); 830 length = skb_headlen(skb); 831 } else { 832 cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; 833 length = skb->len; 834 } 835 836 desc->l4i_chk = l4i_chk; 837 desc->byte_cnt = length; 838 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, 839 length, DMA_TO_DEVICE); 840 841 __skb_queue_tail(&txq->tx_skb, skb); 842 843 /* ensure all other descriptors are written before first cmd_sts */ 844 wmb(); 845 desc->cmd_sts = cmd_sts; 846 847 /* clear TX_END status */ 848 mp->work_tx_end &= ~(1 << txq->index); 849 850 /* ensure all descriptors are written before poking hardware */ 851 wmb(); 852 txq_enable(txq); 853 854 txq->tx_desc_count += nr_frags + 1; 855 856 return 0; 857} 858 859static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) 860{ 861 struct mv643xx_eth_private *mp = netdev_priv(dev); 862 int queue; 863 struct tx_queue *txq; 864 struct netdev_queue *nq; 865 866 queue = skb_get_queue_mapping(skb); 867 txq = mp->txq + queue; 868 nq = netdev_get_tx_queue(dev, queue); 869 870 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { 871 txq->tx_dropped++; 872 netdev_printk(KERN_DEBUG, dev, 873 "failed to linearize skb with tiny unaligned fragment\n"); 874 return NETDEV_TX_BUSY; 875 } 876 877 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { 878 if (net_ratelimit()) 879 netdev_err(dev, "tx queue full?!\n"); 880 kfree_skb(skb); 881 return NETDEV_TX_OK; 882 } 883 884 if (!txq_submit_skb(txq, skb)) { 885 int entries_left; 886 887 txq->tx_bytes += skb->len; 888 txq->tx_packets++; 889 890 entries_left = txq->tx_ring_size - txq->tx_desc_count; 891 if (entries_left < MAX_SKB_FRAGS + 1) 892 netif_tx_stop_queue(nq); 893 } 894 895 return NETDEV_TX_OK; 896} 897 898 899/* tx napi ******************************************************************/ 900static void txq_kick(struct tx_queue *txq) 901{ 902 struct mv643xx_eth_private *mp = txq_to_mp(txq); 903 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 904 u32 hw_desc_ptr; 905 u32 expected_ptr; 906 907 __netif_tx_lock(nq, smp_processor_id()); 908 909 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) 910 goto out; 911 912 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index)); 913 expected_ptr = (u32)txq->tx_desc_dma + 914 txq->tx_curr_desc * sizeof(struct tx_desc); 915 916 if (hw_desc_ptr != expected_ptr) 917 txq_enable(txq); 918 919out: 920 __netif_tx_unlock(nq); 921 922 mp->work_tx_end &= ~(1 << txq->index); 923} 924 925static int txq_reclaim(struct tx_queue *txq, int budget, int force) 926{ 927 struct mv643xx_eth_private *mp = txq_to_mp(txq); 928 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 929 int reclaimed; 930 931 __netif_tx_lock(nq, smp_processor_id()); 932 933 reclaimed = 0; 934 while (reclaimed < budget && txq->tx_desc_count > 0) { 935 int tx_index; 936 struct tx_desc *desc; 937 u32 cmd_sts; 938 struct sk_buff *skb; 939 940 tx_index = txq->tx_used_desc; 941 desc = &txq->tx_desc_area[tx_index]; 942 cmd_sts = desc->cmd_sts; 943 944 if (cmd_sts & BUFFER_OWNED_BY_DMA) { 945 if (!force) 946 break; 947 desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; 948 } 949 950 txq->tx_used_desc = tx_index + 1; 951 if (txq->tx_used_desc == txq->tx_ring_size) 952 txq->tx_used_desc = 0; 953 954 reclaimed++; 955 txq->tx_desc_count--; 956 957 skb = NULL; 958 if (cmd_sts & TX_LAST_DESC) 959 skb = __skb_dequeue(&txq->tx_skb); 960 961 if (cmd_sts & ERROR_SUMMARY) { 962 netdev_info(mp->dev, "tx error\n"); 963 mp->dev->stats.tx_errors++; 964 } 965 966 if (cmd_sts & TX_FIRST_DESC) { 967 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, 968 desc->byte_cnt, DMA_TO_DEVICE); 969 } else { 970 dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr, 971 desc->byte_cnt, DMA_TO_DEVICE); 972 } 973 974 if (skb != NULL) { 975 if (skb_queue_len(&mp->rx_recycle) < 976 mp->rx_ring_size && 977 skb_recycle_check(skb, mp->skb_size)) 978 __skb_queue_head(&mp->rx_recycle, skb); 979 else 980 dev_kfree_skb(skb); 981 } 982 } 983 984 __netif_tx_unlock(nq); 985 986 if (reclaimed < budget) 987 mp->work_tx &= ~(1 << txq->index); 988 989 return reclaimed; 990} 991 992 993/* tx rate control **********************************************************/ 994/* 995 * Set total maximum TX rate (shared by all TX queues for this port) 996 * to 'rate' bits per second, with a maximum burst of 'burst' bytes. 997 */ 998static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst) 999{ 1000 int token_rate; 1001 int mtu; 1002 int bucket_size; 1003 1004 token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); 1005 if (token_rate > 1023) 1006 token_rate = 1023; 1007 1008 mtu = (mp->dev->mtu + 255) >> 8; 1009 if (mtu > 63) 1010 mtu = 63; 1011 1012 bucket_size = (burst + 255) >> 8; 1013 if (bucket_size > 65535) 1014 bucket_size = 65535; 1015 1016 switch (mp->shared->tx_bw_control) { 1017 case TX_BW_CONTROL_OLD_LAYOUT: 1018 wrlp(mp, TX_BW_RATE, token_rate); 1019 wrlp(mp, TX_BW_MTU, mtu); 1020 wrlp(mp, TX_BW_BURST, bucket_size); 1021 break; 1022 case TX_BW_CONTROL_NEW_LAYOUT: 1023 wrlp(mp, TX_BW_RATE_MOVED, token_rate); 1024 wrlp(mp, TX_BW_MTU_MOVED, mtu); 1025 wrlp(mp, TX_BW_BURST_MOVED, bucket_size); 1026 break; 1027 } 1028} 1029 1030static void txq_set_rate(struct tx_queue *txq, int rate, int burst) 1031{ 1032 struct mv643xx_eth_private *mp = txq_to_mp(txq); 1033 int token_rate; 1034 int bucket_size; 1035 1036 token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); 1037 if (token_rate > 1023) 1038 token_rate = 1023; 1039 1040 bucket_size = (burst + 255) >> 8; 1041 if (bucket_size > 65535) 1042 bucket_size = 65535; 1043 1044 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14); 1045 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate); 1046} 1047 1048static void txq_set_fixed_prio_mode(struct tx_queue *txq) 1049{ 1050 struct mv643xx_eth_private *mp = txq_to_mp(txq); 1051 int off; 1052 u32 val; 1053 1054 /* 1055 * Turn on fixed priority mode. 1056 */ 1057 off = 0; 1058 switch (mp->shared->tx_bw_control) { 1059 case TX_BW_CONTROL_OLD_LAYOUT: 1060 off = TXQ_FIX_PRIO_CONF; 1061 break; 1062 case TX_BW_CONTROL_NEW_LAYOUT: 1063 off = TXQ_FIX_PRIO_CONF_MOVED; 1064 break; 1065 } 1066 1067 if (off) { 1068 val = rdlp(mp, off); 1069 val |= 1 << txq->index; 1070 wrlp(mp, off, val); 1071 } 1072} 1073 1074 1075/* mii management interface *************************************************/ 1076static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id) 1077{ 1078 struct mv643xx_eth_shared_private *msp = dev_id; 1079 1080 if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) { 1081 writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE); 1082 wake_up(&msp->smi_busy_wait); 1083 return IRQ_HANDLED; 1084 } 1085 1086 return IRQ_NONE; 1087} 1088 1089static int smi_is_done(struct mv643xx_eth_shared_private *msp) 1090{ 1091 return !(readl(msp->base + SMI_REG) & SMI_BUSY); 1092} 1093 1094static int smi_wait_ready(struct mv643xx_eth_shared_private *msp) 1095{ 1096 if (msp->err_interrupt == NO_IRQ) { 1097 int i; 1098 1099 for (i = 0; !smi_is_done(msp); i++) { 1100 if (i == 10) 1101 return -ETIMEDOUT; 1102 msleep(10); 1103 } 1104 1105 return 0; 1106 } 1107 1108 if (!smi_is_done(msp)) { 1109 wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp), 1110 msecs_to_jiffies(100)); 1111 if (!smi_is_done(msp)) 1112 return -ETIMEDOUT; 1113 } 1114 1115 return 0; 1116} 1117 1118static int smi_bus_read(struct mii_bus *bus, int addr, int reg) 1119{ 1120 struct mv643xx_eth_shared_private *msp = bus->priv; 1121 void __iomem *smi_reg = msp->base + SMI_REG; 1122 int ret; 1123 1124 if (smi_wait_ready(msp)) { 1125 pr_warn("SMI bus busy timeout\n"); 1126 return -ETIMEDOUT; 1127 } 1128 1129 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg); 1130 1131 if (smi_wait_ready(msp)) { 1132 pr_warn("SMI bus busy timeout\n"); 1133 return -ETIMEDOUT; 1134 } 1135 1136 ret = readl(smi_reg); 1137 if (!(ret & SMI_READ_VALID)) { 1138 pr_warn("SMI bus read not valid\n"); 1139 return -ENODEV; 1140 } 1141 1142 return ret & 0xffff; 1143} 1144 1145static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val) 1146{ 1147 struct mv643xx_eth_shared_private *msp = bus->priv; 1148 void __iomem *smi_reg = msp->base + SMI_REG; 1149 1150 if (smi_wait_ready(msp)) { 1151 pr_warn("SMI bus busy timeout\n"); 1152 return -ETIMEDOUT; 1153 } 1154 1155 writel(SMI_OPCODE_WRITE | (reg << 21) | 1156 (addr << 16) | (val & 0xffff), smi_reg); 1157 1158 if (smi_wait_ready(msp)) { 1159 pr_warn("SMI bus busy timeout\n"); 1160 return -ETIMEDOUT; 1161 } 1162 1163 return 0; 1164} 1165 1166 1167/* statistics ***************************************************************/ 1168static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) 1169{ 1170 struct mv643xx_eth_private *mp = netdev_priv(dev); 1171 struct net_device_stats *stats = &dev->stats; 1172 unsigned long tx_packets = 0; 1173 unsigned long tx_bytes = 0; 1174 unsigned long tx_dropped = 0; 1175 int i; 1176 1177 for (i = 0; i < mp->txq_count; i++) { 1178 struct tx_queue *txq = mp->txq + i; 1179 1180 tx_packets += txq->tx_packets; 1181 tx_bytes += txq->tx_bytes; 1182 tx_dropped += txq->tx_dropped; 1183 } 1184 1185 stats->tx_packets = tx_packets; 1186 stats->tx_bytes = tx_bytes; 1187 stats->tx_dropped = tx_dropped; 1188 1189 return stats; 1190} 1191 1192static void mv643xx_eth_grab_lro_stats(struct mv643xx_eth_private *mp) 1193{ 1194 u32 lro_aggregated = 0; 1195 u32 lro_flushed = 0; 1196 u32 lro_no_desc = 0; 1197 int i; 1198 1199 for (i = 0; i < mp->rxq_count; i++) { 1200 struct rx_queue *rxq = mp->rxq + i; 1201 1202 lro_aggregated += rxq->lro_mgr.stats.aggregated; 1203 lro_flushed += rxq->lro_mgr.stats.flushed; 1204 lro_no_desc += rxq->lro_mgr.stats.no_desc; 1205 } 1206 1207 mp->lro_counters.lro_aggregated = lro_aggregated; 1208 mp->lro_counters.lro_flushed = lro_flushed; 1209 mp->lro_counters.lro_no_desc = lro_no_desc; 1210} 1211 1212static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) 1213{ 1214 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); 1215} 1216 1217static void mib_counters_clear(struct mv643xx_eth_private *mp) 1218{ 1219 int i; 1220 1221 for (i = 0; i < 0x80; i += 4) 1222 mib_read(mp, i); 1223} 1224 1225static void mib_counters_update(struct mv643xx_eth_private *mp) 1226{ 1227 struct mib_counters *p = &mp->mib_counters; 1228 1229 spin_lock_bh(&mp->mib_counters_lock); 1230 p->good_octets_received += mib_read(mp, 0x00); 1231 p->bad_octets_received += mib_read(mp, 0x08); 1232 p->internal_mac_transmit_err += mib_read(mp, 0x0c); 1233 p->good_frames_received += mib_read(mp, 0x10); 1234 p->bad_frames_received += mib_read(mp, 0x14); 1235 p->broadcast_frames_received += mib_read(mp, 0x18); 1236 p->multicast_frames_received += mib_read(mp, 0x1c); 1237 p->frames_64_octets += mib_read(mp, 0x20); 1238 p->frames_65_to_127_octets += mib_read(mp, 0x24); 1239 p->frames_128_to_255_octets += mib_read(mp, 0x28); 1240 p->frames_256_to_511_octets += mib_read(mp, 0x2c); 1241 p->frames_512_to_1023_octets += mib_read(mp, 0x30); 1242 p->frames_1024_to_max_octets += mib_read(mp, 0x34); 1243 p->good_octets_sent += mib_read(mp, 0x38); 1244 p->good_frames_sent += mib_read(mp, 0x40); 1245 p->excessive_collision += mib_read(mp, 0x44); 1246 p->multicast_frames_sent += mib_read(mp, 0x48); 1247 p->broadcast_frames_sent += mib_read(mp, 0x4c); 1248 p->unrec_mac_control_received += mib_read(mp, 0x50); 1249 p->fc_sent += mib_read(mp, 0x54); 1250 p->good_fc_received += mib_read(mp, 0x58); 1251 p->bad_fc_received += mib_read(mp, 0x5c); 1252 p->undersize_received += mib_read(mp, 0x60); 1253 p->fragments_received += mib_read(mp, 0x64); 1254 p->oversize_received += mib_read(mp, 0x68); 1255 p->jabber_received += mib_read(mp, 0x6c); 1256 p->mac_receive_error += mib_read(mp, 0x70); 1257 p->bad_crc_event += mib_read(mp, 0x74); 1258 p->collision += mib_read(mp, 0x78); 1259 p->late_collision += mib_read(mp, 0x7c); 1260 spin_unlock_bh(&mp->mib_counters_lock); 1261 1262 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); 1263} 1264 1265static void mib_counters_timer_wrapper(unsigned long _mp) 1266{ 1267 struct mv643xx_eth_private *mp = (void *)_mp; 1268 1269 mib_counters_update(mp); 1270} 1271 1272 1273/* interrupt coalescing *****************************************************/ 1274/* 1275 * Hardware coalescing parameters are set in units of 64 t_clk 1276 * cycles. I.e.: 1277 * 1278 * coal_delay_in_usec = 64000000 * register_value / t_clk_rate 1279 * 1280 * register_value = coal_delay_in_usec * t_clk_rate / 64000000 1281 * 1282 * In the ->set*() methods, we round the computed register value 1283 * to the nearest integer. 1284 */ 1285static unsigned int get_rx_coal(struct mv643xx_eth_private *mp) 1286{ 1287 u32 val = rdlp(mp, SDMA_CONFIG); 1288 u64 temp; 1289 1290 if (mp->shared->extended_rx_coal_limit) 1291 temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7); 1292 else 1293 temp = (val & 0x003fff00) >> 8; 1294 1295 temp *= 64000000; 1296 do_div(temp, mp->shared->t_clk); 1297 1298 return (unsigned int)temp; 1299} 1300 1301static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec) 1302{ 1303 u64 temp; 1304 u32 val; 1305 1306 temp = (u64)usec * mp->shared->t_clk; 1307 temp += 31999999; 1308 do_div(temp, 64000000); 1309 1310 val = rdlp(mp, SDMA_CONFIG); 1311 if (mp->shared->extended_rx_coal_limit) { 1312 if (temp > 0xffff) 1313 temp = 0xffff; 1314 val &= ~0x023fff80; 1315 val |= (temp & 0x8000) << 10; 1316 val |= (temp & 0x7fff) << 7; 1317 } else { 1318 if (temp > 0x3fff) 1319 temp = 0x3fff; 1320 val &= ~0x003fff00; 1321 val |= (temp & 0x3fff) << 8; 1322 } 1323 wrlp(mp, SDMA_CONFIG, val); 1324} 1325 1326static unsigned int get_tx_coal(struct mv643xx_eth_private *mp) 1327{ 1328 u64 temp; 1329 1330 temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; 1331 temp *= 64000000; 1332 do_div(temp, mp->shared->t_clk); 1333 1334 return (unsigned int)temp; 1335} 1336 1337static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec) 1338{ 1339 u64 temp; 1340 1341 temp = (u64)usec * mp->shared->t_clk; 1342 temp += 31999999; 1343 do_div(temp, 64000000); 1344 1345 if (temp > 0x3fff) 1346 temp = 0x3fff; 1347 1348 wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4); 1349} 1350 1351 1352/* ethtool ******************************************************************/ 1353struct mv643xx_eth_stats { 1354 char stat_string[ETH_GSTRING_LEN]; 1355 int sizeof_stat; 1356 int netdev_off; 1357 int mp_off; 1358}; 1359 1360#define SSTAT(m) \ 1361 { #m, FIELD_SIZEOF(struct net_device_stats, m), \ 1362 offsetof(struct net_device, stats.m), -1 } 1363 1364#define MIBSTAT(m) \ 1365 { #m, FIELD_SIZEOF(struct mib_counters, m), \ 1366 -1, offsetof(struct mv643xx_eth_private, mib_counters.m) } 1367 1368#define LROSTAT(m) \ 1369 { #m, FIELD_SIZEOF(struct lro_counters, m), \ 1370 -1, offsetof(struct mv643xx_eth_private, lro_counters.m) } 1371 1372static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { 1373 SSTAT(rx_packets), 1374 SSTAT(tx_packets), 1375 SSTAT(rx_bytes), 1376 SSTAT(tx_bytes), 1377 SSTAT(rx_errors), 1378 SSTAT(tx_errors), 1379 SSTAT(rx_dropped), 1380 SSTAT(tx_dropped), 1381 MIBSTAT(good_octets_received), 1382 MIBSTAT(bad_octets_received), 1383 MIBSTAT(internal_mac_transmit_err), 1384 MIBSTAT(good_frames_received), 1385 MIBSTAT(bad_frames_received), 1386 MIBSTAT(broadcast_frames_received), 1387 MIBSTAT(multicast_frames_received), 1388 MIBSTAT(frames_64_octets), 1389 MIBSTAT(frames_65_to_127_octets), 1390 MIBSTAT(frames_128_to_255_octets), 1391 MIBSTAT(frames_256_to_511_octets), 1392 MIBSTAT(frames_512_to_1023_octets), 1393 MIBSTAT(frames_1024_to_max_octets), 1394 MIBSTAT(good_octets_sent), 1395 MIBSTAT(good_frames_sent), 1396 MIBSTAT(excessive_collision), 1397 MIBSTAT(multicast_frames_sent), 1398 MIBSTAT(broadcast_frames_sent), 1399 MIBSTAT(unrec_mac_control_received), 1400 MIBSTAT(fc_sent), 1401 MIBSTAT(good_fc_received), 1402 MIBSTAT(bad_fc_received), 1403 MIBSTAT(undersize_received), 1404 MIBSTAT(fragments_received), 1405 MIBSTAT(oversize_received), 1406 MIBSTAT(jabber_received), 1407 MIBSTAT(mac_receive_error), 1408 MIBSTAT(bad_crc_event), 1409 MIBSTAT(collision), 1410 MIBSTAT(late_collision), 1411 LROSTAT(lro_aggregated), 1412 LROSTAT(lro_flushed), 1413 LROSTAT(lro_no_desc), 1414}; 1415 1416static int 1417mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp, 1418 struct ethtool_cmd *cmd) 1419{ 1420 int err; 1421 1422 err = phy_read_status(mp->phy); 1423 if (err == 0) 1424 err = phy_ethtool_gset(mp->phy, cmd); 1425 1426 /* 1427 * The MAC does not support 1000baseT_Half. 1428 */ 1429 cmd->supported &= ~SUPPORTED_1000baseT_Half; 1430 cmd->advertising &= ~ADVERTISED_1000baseT_Half; 1431 1432 return err; 1433} 1434 1435static int 1436mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp, 1437 struct ethtool_cmd *cmd) 1438{ 1439 u32 port_status; 1440 1441 port_status = rdlp(mp, PORT_STATUS); 1442 1443 cmd->supported = SUPPORTED_MII; 1444 cmd->advertising = ADVERTISED_MII; 1445 switch (port_status & PORT_SPEED_MASK) { 1446 case PORT_SPEED_10: 1447 cmd->speed = SPEED_10; 1448 break; 1449 case PORT_SPEED_100: 1450 cmd->speed = SPEED_100; 1451 break; 1452 case PORT_SPEED_1000: 1453 cmd->speed = SPEED_1000; 1454 break; 1455 default: 1456 cmd->speed = -1; 1457 break; 1458 } 1459 cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; 1460 cmd->port = PORT_MII; 1461 cmd->phy_address = 0; 1462 cmd->transceiver = XCVR_INTERNAL; 1463 cmd->autoneg = AUTONEG_DISABLE; 1464 cmd->maxtxpkt = 1; 1465 cmd->maxrxpkt = 1; 1466 1467 return 0; 1468} 1469 1470static int 1471mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1472{ 1473 struct mv643xx_eth_private *mp = netdev_priv(dev); 1474 1475 if (mp->phy != NULL) 1476 return mv643xx_eth_get_settings_phy(mp, cmd); 1477 else 1478 return mv643xx_eth_get_settings_phyless(mp, cmd); 1479} 1480 1481static int 1482mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1483{ 1484 struct mv643xx_eth_private *mp = netdev_priv(dev); 1485 1486 if (mp->phy == NULL) 1487 return -EINVAL; 1488 1489 /* 1490 * The MAC does not support 1000baseT_Half. 1491 */ 1492 cmd->advertising &= ~ADVERTISED_1000baseT_Half; 1493 1494 return phy_ethtool_sset(mp->phy, cmd); 1495} 1496 1497static void mv643xx_eth_get_drvinfo(struct net_device *dev, 1498 struct ethtool_drvinfo *drvinfo) 1499{ 1500 strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32); 1501 strncpy(drvinfo->version, mv643xx_eth_driver_version, 32); 1502 strncpy(drvinfo->fw_version, "N/A", 32); 1503 strncpy(drvinfo->bus_info, "platform", 32); 1504 drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats); 1505} 1506 1507static int mv643xx_eth_nway_reset(struct net_device *dev) 1508{ 1509 struct mv643xx_eth_private *mp = netdev_priv(dev); 1510 1511 if (mp->phy == NULL) 1512 return -EINVAL; 1513 1514 return genphy_restart_aneg(mp->phy); 1515} 1516 1517static int 1518mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 1519{ 1520 struct mv643xx_eth_private *mp = netdev_priv(dev); 1521 1522 ec->rx_coalesce_usecs = get_rx_coal(mp); 1523 ec->tx_coalesce_usecs = get_tx_coal(mp); 1524 1525 return 0; 1526} 1527 1528static int 1529mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 1530{ 1531 struct mv643xx_eth_private *mp = netdev_priv(dev); 1532 1533 set_rx_coal(mp, ec->rx_coalesce_usecs); 1534 set_tx_coal(mp, ec->tx_coalesce_usecs); 1535 1536 return 0; 1537} 1538 1539static void 1540mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er) 1541{ 1542 struct mv643xx_eth_private *mp = netdev_priv(dev); 1543 1544 er->rx_max_pending = 4096; 1545 er->tx_max_pending = 4096; 1546 er->rx_mini_max_pending = 0; 1547 er->rx_jumbo_max_pending = 0; 1548 1549 er->rx_pending = mp->rx_ring_size; 1550 er->tx_pending = mp->tx_ring_size; 1551 er->rx_mini_pending = 0; 1552 er->rx_jumbo_pending = 0; 1553} 1554 1555static int 1556mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er) 1557{ 1558 struct mv643xx_eth_private *mp = netdev_priv(dev); 1559 1560 if (er->rx_mini_pending || er->rx_jumbo_pending) 1561 return -EINVAL; 1562 1563 mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096; 1564 mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096; 1565 1566 if (netif_running(dev)) { 1567 mv643xx_eth_stop(dev); 1568 if (mv643xx_eth_open(dev)) { 1569 netdev_err(dev, 1570 "fatal error on re-opening device after ring param change\n"); 1571 return -ENOMEM; 1572 } 1573 } 1574 1575 return 0; 1576} 1577 1578static u32 1579mv643xx_eth_get_rx_csum(struct net_device *dev) 1580{ 1581 struct mv643xx_eth_private *mp = netdev_priv(dev); 1582 1583 return !!(rdlp(mp, PORT_CONFIG) & 0x02000000); 1584} 1585 1586static int 1587mv643xx_eth_set_rx_csum(struct net_device *dev, u32 rx_csum) 1588{ 1589 struct mv643xx_eth_private *mp = netdev_priv(dev); 1590 1591 wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000); 1592 1593 return 0; 1594} 1595 1596static void mv643xx_eth_get_strings(struct net_device *dev, 1597 uint32_t stringset, uint8_t *data) 1598{ 1599 int i; 1600 1601 if (stringset == ETH_SS_STATS) { 1602 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { 1603 memcpy(data + i * ETH_GSTRING_LEN, 1604 mv643xx_eth_stats[i].stat_string, 1605 ETH_GSTRING_LEN); 1606 } 1607 } 1608} 1609 1610static void mv643xx_eth_get_ethtool_stats(struct net_device *dev, 1611 struct ethtool_stats *stats, 1612 uint64_t *data) 1613{ 1614 struct mv643xx_eth_private *mp = netdev_priv(dev); 1615 int i; 1616 1617 mv643xx_eth_get_stats(dev); 1618 mib_counters_update(mp); 1619 mv643xx_eth_grab_lro_stats(mp); 1620 1621 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { 1622 const struct mv643xx_eth_stats *stat; 1623 void *p; 1624 1625 stat = mv643xx_eth_stats + i; 1626 1627 if (stat->netdev_off >= 0) 1628 p = ((void *)mp->dev) + stat->netdev_off; 1629 else 1630 p = ((void *)mp) + stat->mp_off; 1631 1632 data[i] = (stat->sizeof_stat == 8) ? 1633 *(uint64_t *)p : *(uint32_t *)p; 1634 } 1635} 1636 1637static int mv643xx_eth_set_flags(struct net_device *dev, u32 data) 1638{ 1639 return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO); 1640} 1641 1642static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset) 1643{ 1644 if (sset == ETH_SS_STATS) 1645 return ARRAY_SIZE(mv643xx_eth_stats); 1646 1647 return -EOPNOTSUPP; 1648} 1649 1650static const struct ethtool_ops mv643xx_eth_ethtool_ops = { 1651 .get_settings = mv643xx_eth_get_settings, 1652 .set_settings = mv643xx_eth_set_settings, 1653 .get_drvinfo = mv643xx_eth_get_drvinfo, 1654 .nway_reset = mv643xx_eth_nway_reset, 1655 .get_link = ethtool_op_get_link, 1656 .get_coalesce = mv643xx_eth_get_coalesce, 1657 .set_coalesce = mv643xx_eth_set_coalesce, 1658 .get_ringparam = mv643xx_eth_get_ringparam, 1659 .set_ringparam = mv643xx_eth_set_ringparam, 1660 .get_rx_csum = mv643xx_eth_get_rx_csum, 1661 .set_rx_csum = mv643xx_eth_set_rx_csum, 1662 .set_tx_csum = ethtool_op_set_tx_csum, 1663 .set_sg = ethtool_op_set_sg, 1664 .get_strings = mv643xx_eth_get_strings, 1665 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, 1666 .get_flags = ethtool_op_get_flags, 1667 .set_flags = mv643xx_eth_set_flags, 1668 .get_sset_count = mv643xx_eth_get_sset_count, 1669}; 1670 1671 1672/* address handling *********************************************************/ 1673static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr) 1674{ 1675 unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH); 1676 unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW); 1677 1678 addr[0] = (mac_h >> 24) & 0xff; 1679 addr[1] = (mac_h >> 16) & 0xff; 1680 addr[2] = (mac_h >> 8) & 0xff; 1681 addr[3] = mac_h & 0xff; 1682 addr[4] = (mac_l >> 8) & 0xff; 1683 addr[5] = mac_l & 0xff; 1684} 1685 1686static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr) 1687{ 1688 wrlp(mp, MAC_ADDR_HIGH, 1689 (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]); 1690 wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]); 1691} 1692 1693static u32 uc_addr_filter_mask(struct net_device *dev) 1694{ 1695 struct netdev_hw_addr *ha; 1696 u32 nibbles; 1697 1698 if (dev->flags & IFF_PROMISC) 1699 return 0; 1700 1701 nibbles = 1 << (dev->dev_addr[5] & 0x0f); 1702 netdev_for_each_uc_addr(ha, dev) { 1703 if (memcmp(dev->dev_addr, ha->addr, 5)) 1704 return 0; 1705 if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0) 1706 return 0; 1707 1708 nibbles |= 1 << (ha->addr[5] & 0x0f); 1709 } 1710 1711 return nibbles; 1712} 1713 1714static void mv643xx_eth_program_unicast_filter(struct net_device *dev) 1715{ 1716 struct mv643xx_eth_private *mp = netdev_priv(dev); 1717 u32 port_config; 1718 u32 nibbles; 1719 int i; 1720 1721 uc_addr_set(mp, dev->dev_addr); 1722 1723 port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE; 1724 1725 nibbles = uc_addr_filter_mask(dev); 1726 if (!nibbles) { 1727 port_config |= UNICAST_PROMISCUOUS_MODE; 1728 nibbles = 0xffff; 1729 } 1730 1731 for (i = 0; i < 16; i += 4) { 1732 int off = UNICAST_TABLE(mp->port_num) + i; 1733 u32 v; 1734 1735 v = 0; 1736 if (nibbles & 1) 1737 v |= 0x00000001; 1738 if (nibbles & 2) 1739 v |= 0x00000100; 1740 if (nibbles & 4) 1741 v |= 0x00010000; 1742 if (nibbles & 8) 1743 v |= 0x01000000; 1744 nibbles >>= 4; 1745 1746 wrl(mp, off, v); 1747 } 1748 1749 wrlp(mp, PORT_CONFIG, port_config); 1750} 1751 1752static int addr_crc(unsigned char *addr) 1753{ 1754 int crc = 0; 1755 int i; 1756 1757 for (i = 0; i < 6; i++) { 1758 int j; 1759 1760 crc = (crc ^ addr[i]) << 8; 1761 for (j = 7; j >= 0; j--) { 1762 if (crc & (0x100 << j)) 1763 crc ^= 0x107 << j; 1764 } 1765 } 1766 1767 return crc; 1768} 1769 1770static void mv643xx_eth_program_multicast_filter(struct net_device *dev) 1771{ 1772 struct mv643xx_eth_private *mp = netdev_priv(dev); 1773 u32 *mc_spec; 1774 u32 *mc_other; 1775 struct netdev_hw_addr *ha; 1776 int i; 1777 1778 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1779 int port_num; 1780 u32 accept; 1781 1782oom: 1783 port_num = mp->port_num; 1784 accept = 0x01010101; 1785 for (i = 0; i < 0x100; i += 4) { 1786 wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept); 1787 wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept); 1788 } 1789 return; 1790 } 1791 1792 mc_spec = kmalloc(0x200, GFP_ATOMIC); 1793 if (mc_spec == NULL) 1794 goto oom; 1795 mc_other = mc_spec + (0x100 >> 2); 1796 1797 memset(mc_spec, 0, 0x100); 1798 memset(mc_other, 0, 0x100); 1799 1800 netdev_for_each_mc_addr(ha, dev) { 1801 u8 *a = ha->addr; 1802 u32 *table; 1803 int entry; 1804 1805 if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) { 1806 table = mc_spec; 1807 entry = a[5]; 1808 } else { 1809 table = mc_other; 1810 entry = addr_crc(a); 1811 } 1812 1813 table[entry >> 2] |= 1 << (8 * (entry & 3)); 1814 } 1815 1816 for (i = 0; i < 0x100; i += 4) { 1817 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]); 1818 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]); 1819 } 1820 1821 kfree(mc_spec); 1822} 1823 1824static void mv643xx_eth_set_rx_mode(struct net_device *dev) 1825{ 1826 mv643xx_eth_program_unicast_filter(dev); 1827 mv643xx_eth_program_multicast_filter(dev); 1828} 1829 1830static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) 1831{ 1832 struct sockaddr *sa = addr; 1833 1834 if (!is_valid_ether_addr(sa->sa_data)) 1835 return -EINVAL; 1836 1837 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); 1838 1839 netif_addr_lock_bh(dev); 1840 mv643xx_eth_program_unicast_filter(dev); 1841 netif_addr_unlock_bh(dev); 1842 1843 return 0; 1844} 1845 1846 1847/* rx/tx queue initialisation ***********************************************/ 1848static int rxq_init(struct mv643xx_eth_private *mp, int index) 1849{ 1850 struct rx_queue *rxq = mp->rxq + index; 1851 struct rx_desc *rx_desc; 1852 int size; 1853 int i; 1854 1855 rxq->index = index; 1856 1857 rxq->rx_ring_size = mp->rx_ring_size; 1858 1859 rxq->rx_desc_count = 0; 1860 rxq->rx_curr_desc = 0; 1861 rxq->rx_used_desc = 0; 1862 1863 size = rxq->rx_ring_size * sizeof(struct rx_desc); 1864 1865 if (index == 0 && size <= mp->rx_desc_sram_size) { 1866 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, 1867 mp->rx_desc_sram_size); 1868 rxq->rx_desc_dma = mp->rx_desc_sram_addr; 1869 } else { 1870 rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, 1871 size, &rxq->rx_desc_dma, 1872 GFP_KERNEL); 1873 } 1874 1875 if (rxq->rx_desc_area == NULL) { 1876 netdev_err(mp->dev, 1877 "can't allocate rx ring (%d bytes)\n", size); 1878 goto out; 1879 } 1880 memset(rxq->rx_desc_area, 0, size); 1881 1882 rxq->rx_desc_area_size = size; 1883 rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb), 1884 GFP_KERNEL); 1885 if (rxq->rx_skb == NULL) { 1886 netdev_err(mp->dev, "can't allocate rx skb ring\n"); 1887 goto out_free; 1888 } 1889 1890 rx_desc = (struct rx_desc *)rxq->rx_desc_area; 1891 for (i = 0; i < rxq->rx_ring_size; i++) { 1892 int nexti; 1893 1894 nexti = i + 1; 1895 if (nexti == rxq->rx_ring_size) 1896 nexti = 0; 1897 1898 rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + 1899 nexti * sizeof(struct rx_desc); 1900 } 1901 1902 rxq->lro_mgr.dev = mp->dev; 1903 memset(&rxq->lro_mgr.stats, 0, sizeof(rxq->lro_mgr.stats)); 1904 rxq->lro_mgr.features = LRO_F_NAPI; 1905 rxq->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; 1906 rxq->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; 1907 rxq->lro_mgr.max_desc = ARRAY_SIZE(rxq->lro_arr); 1908 rxq->lro_mgr.max_aggr = 32; 1909 rxq->lro_mgr.frag_align_pad = 0; 1910 rxq->lro_mgr.lro_arr = rxq->lro_arr; 1911 rxq->lro_mgr.get_skb_header = mv643xx_get_skb_header; 1912 1913 memset(&rxq->lro_arr, 0, sizeof(rxq->lro_arr)); 1914 1915 return 0; 1916 1917 1918out_free: 1919 if (index == 0 && size <= mp->rx_desc_sram_size) 1920 iounmap(rxq->rx_desc_area); 1921 else 1922 dma_free_coherent(mp->dev->dev.parent, size, 1923 rxq->rx_desc_area, 1924 rxq->rx_desc_dma); 1925 1926out: 1927 return -ENOMEM; 1928} 1929 1930static void rxq_deinit(struct rx_queue *rxq) 1931{ 1932 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 1933 int i; 1934 1935 rxq_disable(rxq); 1936 1937 for (i = 0; i < rxq->rx_ring_size; i++) { 1938 if (rxq->rx_skb[i]) { 1939 dev_kfree_skb(rxq->rx_skb[i]); 1940 rxq->rx_desc_count--; 1941 } 1942 } 1943 1944 if (rxq->rx_desc_count) { 1945 netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n", 1946 rxq->rx_desc_count); 1947 } 1948 1949 if (rxq->index == 0 && 1950 rxq->rx_desc_area_size <= mp->rx_desc_sram_size) 1951 iounmap(rxq->rx_desc_area); 1952 else 1953 dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size, 1954 rxq->rx_desc_area, rxq->rx_desc_dma); 1955 1956 kfree(rxq->rx_skb); 1957} 1958 1959static int txq_init(struct mv643xx_eth_private *mp, int index) 1960{ 1961 struct tx_queue *txq = mp->txq + index; 1962 struct tx_desc *tx_desc; 1963 int size; 1964 int i; 1965 1966 txq->index = index; 1967 1968 txq->tx_ring_size = mp->tx_ring_size; 1969 1970 txq->tx_desc_count = 0; 1971 txq->tx_curr_desc = 0; 1972 txq->tx_used_desc = 0; 1973 1974 size = txq->tx_ring_size * sizeof(struct tx_desc); 1975 1976 if (index == 0 && size <= mp->tx_desc_sram_size) { 1977 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, 1978 mp->tx_desc_sram_size); 1979 txq->tx_desc_dma = mp->tx_desc_sram_addr; 1980 } else { 1981 txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, 1982 size, &txq->tx_desc_dma, 1983 GFP_KERNEL); 1984 } 1985 1986 if (txq->tx_desc_area == NULL) { 1987 netdev_err(mp->dev, 1988 "can't allocate tx ring (%d bytes)\n", size); 1989 return -ENOMEM; 1990 } 1991 memset(txq->tx_desc_area, 0, size); 1992 1993 txq->tx_desc_area_size = size; 1994 1995 tx_desc = (struct tx_desc *)txq->tx_desc_area; 1996 for (i = 0; i < txq->tx_ring_size; i++) { 1997 struct tx_desc *txd = tx_desc + i; 1998 int nexti; 1999 2000 nexti = i + 1; 2001 if (nexti == txq->tx_ring_size) 2002 nexti = 0; 2003 2004 txd->cmd_sts = 0; 2005 txd->next_desc_ptr = txq->tx_desc_dma + 2006 nexti * sizeof(struct tx_desc); 2007 } 2008 2009 skb_queue_head_init(&txq->tx_skb); 2010 2011 return 0; 2012} 2013 2014static void txq_deinit(struct tx_queue *txq) 2015{ 2016 struct mv643xx_eth_private *mp = txq_to_mp(txq); 2017 2018 txq_disable(txq); 2019 txq_reclaim(txq, txq->tx_ring_size, 1); 2020 2021 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); 2022 2023 if (txq->index == 0 && 2024 txq->tx_desc_area_size <= mp->tx_desc_sram_size) 2025 iounmap(txq->tx_desc_area); 2026 else 2027 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, 2028 txq->tx_desc_area, txq->tx_desc_dma); 2029} 2030 2031 2032/* netdev ops and related ***************************************************/ 2033static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp) 2034{ 2035 u32 int_cause; 2036 u32 int_cause_ext; 2037 2038 int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask; 2039 if (int_cause == 0) 2040 return 0; 2041 2042 int_cause_ext = 0; 2043 if (int_cause & INT_EXT) { 2044 int_cause &= ~INT_EXT; 2045 int_cause_ext = rdlp(mp, INT_CAUSE_EXT); 2046 } 2047 2048 if (int_cause) { 2049 wrlp(mp, INT_CAUSE, ~int_cause); 2050 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & 2051 ~(rdlp(mp, TXQ_COMMAND) & 0xff); 2052 mp->work_rx |= (int_cause & INT_RX) >> 2; 2053 } 2054 2055 int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX; 2056 if (int_cause_ext) { 2057 wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext); 2058 if (int_cause_ext & INT_EXT_LINK_PHY) 2059 mp->work_link = 1; 2060 mp->work_tx |= int_cause_ext & INT_EXT_TX; 2061 } 2062 2063 return 1; 2064} 2065 2066static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) 2067{ 2068 struct net_device *dev = (struct net_device *)dev_id; 2069 struct mv643xx_eth_private *mp = netdev_priv(dev); 2070 2071 if (unlikely(!mv643xx_eth_collect_events(mp))) 2072 return IRQ_NONE; 2073 2074 wrlp(mp, INT_MASK, 0); 2075 napi_schedule(&mp->napi); 2076 2077 return IRQ_HANDLED; 2078} 2079 2080static void handle_link_event(struct mv643xx_eth_private *mp) 2081{ 2082 struct net_device *dev = mp->dev; 2083 u32 port_status; 2084 int speed; 2085 int duplex; 2086 int fc; 2087 2088 port_status = rdlp(mp, PORT_STATUS); 2089 if (!(port_status & LINK_UP)) { 2090 if (netif_carrier_ok(dev)) { 2091 int i; 2092 2093 netdev_info(dev, "link down\n"); 2094 2095 netif_carrier_off(dev); 2096 2097 for (i = 0; i < mp->txq_count; i++) { 2098 struct tx_queue *txq = mp->txq + i; 2099 2100 txq_reclaim(txq, txq->tx_ring_size, 1); 2101 txq_reset_hw_ptr(txq); 2102 } 2103 } 2104 return; 2105 } 2106 2107 switch (port_status & PORT_SPEED_MASK) { 2108 case PORT_SPEED_10: 2109 speed = 10; 2110 break; 2111 case PORT_SPEED_100: 2112 speed = 100; 2113 break; 2114 case PORT_SPEED_1000: 2115 speed = 1000; 2116 break; 2117 default: 2118 speed = -1; 2119 break; 2120 } 2121 duplex = (port_status & FULL_DUPLEX) ? 1 : 0; 2122 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; 2123 2124 netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n", 2125 speed, duplex ? "full" : "half", fc ? "en" : "dis"); 2126 2127 if (!netif_carrier_ok(dev)) 2128 netif_carrier_on(dev); 2129} 2130 2131static int mv643xx_eth_poll(struct napi_struct *napi, int budget) 2132{ 2133 struct mv643xx_eth_private *mp; 2134 int work_done; 2135 2136 mp = container_of(napi, struct mv643xx_eth_private, napi); 2137 2138 if (unlikely(mp->oom)) { 2139 mp->oom = 0; 2140 del_timer(&mp->rx_oom); 2141 } 2142 2143 work_done = 0; 2144 while (work_done < budget) { 2145 u8 queue_mask; 2146 int queue; 2147 int work_tbd; 2148 2149 if (mp->work_link) { 2150 mp->work_link = 0; 2151 handle_link_event(mp); 2152 work_done++; 2153 continue; 2154 } 2155 2156 queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; 2157 if (likely(!mp->oom)) 2158 queue_mask |= mp->work_rx_refill; 2159 2160 if (!queue_mask) { 2161 if (mv643xx_eth_collect_events(mp)) 2162 continue; 2163 break; 2164 } 2165 2166 queue = fls(queue_mask) - 1; 2167 queue_mask = 1 << queue; 2168 2169 work_tbd = budget - work_done; 2170 if (work_tbd > 16) 2171 work_tbd = 16; 2172 2173 if (mp->work_tx_end & queue_mask) { 2174 txq_kick(mp->txq + queue); 2175 } else if (mp->work_tx & queue_mask) { 2176 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0); 2177 txq_maybe_wake(mp->txq + queue); 2178 } else if (mp->work_rx & queue_mask) { 2179 work_done += rxq_process(mp->rxq + queue, work_tbd); 2180 } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { 2181 work_done += rxq_refill(mp->rxq + queue, work_tbd); 2182 } else { 2183 BUG(); 2184 } 2185 } 2186 2187 if (work_done < budget) { 2188 if (mp->oom) 2189 mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); 2190 napi_complete(napi); 2191 wrlp(mp, INT_MASK, mp->int_mask); 2192 } 2193 2194 return work_done; 2195} 2196 2197static inline void oom_timer_wrapper(unsigned long data) 2198{ 2199 struct mv643xx_eth_private *mp = (void *)data; 2200 2201 napi_schedule(&mp->napi); 2202} 2203 2204static void phy_reset(struct mv643xx_eth_private *mp) 2205{ 2206 int data; 2207 2208 data = phy_read(mp->phy, MII_BMCR); 2209 if (data < 0) 2210 return; 2211 2212 data |= BMCR_RESET; 2213 if (phy_write(mp->phy, MII_BMCR, data) < 0) 2214 return; 2215 2216 do { 2217 data = phy_read(mp->phy, MII_BMCR); 2218 } while (data >= 0 && data & BMCR_RESET); 2219} 2220 2221static void port_start(struct mv643xx_eth_private *mp) 2222{ 2223 u32 pscr; 2224 int i; 2225 2226 /* 2227 * Perform PHY reset, if there is a PHY. 2228 */ 2229 if (mp->phy != NULL) { 2230 struct ethtool_cmd cmd; 2231 2232 mv643xx_eth_get_settings(mp->dev, &cmd); 2233 phy_reset(mp); 2234 mv643xx_eth_set_settings(mp->dev, &cmd); 2235 } 2236 2237 /* 2238 * Configure basic link parameters. 2239 */ 2240 pscr = rdlp(mp, PORT_SERIAL_CONTROL); 2241 2242 pscr |= SERIAL_PORT_ENABLE; 2243 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2244 2245 pscr |= DO_NOT_FORCE_LINK_FAIL; 2246 if (mp->phy == NULL) 2247 pscr |= FORCE_LINK_PASS; 2248 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2249 2250 /* 2251 * Configure TX path and queues. 2252 */ 2253 tx_set_rate(mp, 1000000000, 16777216); 2254 for (i = 0; i < mp->txq_count; i++) { 2255 struct tx_queue *txq = mp->txq + i; 2256 2257 txq_reset_hw_ptr(txq); 2258 txq_set_rate(txq, 1000000000, 16777216); 2259 txq_set_fixed_prio_mode(txq); 2260 } 2261 2262 /* 2263 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast 2264 * frames to RX queue #0, and include the pseudo-header when 2265 * calculating receive checksums. 2266 */ 2267 wrlp(mp, PORT_CONFIG, 0x02000000); 2268 2269 /* 2270 * Treat BPDUs as normal multicasts, and disable partition mode. 2271 */ 2272 wrlp(mp, PORT_CONFIG_EXT, 0x00000000); 2273 2274 /* 2275 * Add configured unicast addresses to address filter table. 2276 */ 2277 mv643xx_eth_program_unicast_filter(mp->dev); 2278 2279 /* 2280 * Enable the receive queues. 2281 */ 2282 for (i = 0; i < mp->rxq_count; i++) { 2283 struct rx_queue *rxq = mp->rxq + i; 2284 u32 addr; 2285 2286 addr = (u32)rxq->rx_desc_dma; 2287 addr += rxq->rx_curr_desc * sizeof(struct rx_desc); 2288 wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr); 2289 2290 rxq_enable(rxq); 2291 } 2292} 2293 2294static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp) 2295{ 2296 int skb_size; 2297 2298 /* 2299 * Reserve 2+14 bytes for an ethernet header (the hardware 2300 * automatically prepends 2 bytes of dummy data to each 2301 * received packet), 16 bytes for up to four VLAN tags, and 2302 * 4 bytes for the trailing FCS -- 36 bytes total. 2303 */ 2304 skb_size = mp->dev->mtu + 36; 2305 2306 /* 2307 * Make sure that the skb size is a multiple of 8 bytes, as 2308 * the lower three bits of the receive descriptor's buffer 2309 * size field are ignored by the hardware. 2310 */ 2311 mp->skb_size = (skb_size + 7) & ~7; 2312 2313 /* 2314 * If NET_SKB_PAD is smaller than a cache line, 2315 * netdev_alloc_skb() will cause skb->data to be misaligned 2316 * to a cache line boundary. If this is the case, include 2317 * some extra space to allow re-aligning the data area. 2318 */ 2319 mp->skb_size += SKB_DMA_REALIGN; 2320} 2321 2322static int mv643xx_eth_open(struct net_device *dev) 2323{ 2324 struct mv643xx_eth_private *mp = netdev_priv(dev); 2325 int err; 2326 int i; 2327 2328 wrlp(mp, INT_CAUSE, 0); 2329 wrlp(mp, INT_CAUSE_EXT, 0); 2330 rdlp(mp, INT_CAUSE_EXT); 2331 2332 err = request_irq(dev->irq, mv643xx_eth_irq, 2333 IRQF_SHARED, dev->name, dev); 2334 if (err) { 2335 netdev_err(dev, "can't assign irq\n"); 2336 return -EAGAIN; 2337 } 2338 2339 mv643xx_eth_recalc_skb_size(mp); 2340 2341 napi_enable(&mp->napi); 2342 2343 skb_queue_head_init(&mp->rx_recycle); 2344 2345 mp->int_mask = INT_EXT; 2346 2347 for (i = 0; i < mp->rxq_count; i++) { 2348 err = rxq_init(mp, i); 2349 if (err) { 2350 while (--i >= 0) 2351 rxq_deinit(mp->rxq + i); 2352 goto out; 2353 } 2354 2355 rxq_refill(mp->rxq + i, INT_MAX); 2356 mp->int_mask |= INT_RX_0 << i; 2357 } 2358 2359 if (mp->oom) { 2360 mp->rx_oom.expires = jiffies + (HZ / 10); 2361 add_timer(&mp->rx_oom); 2362 } 2363 2364 for (i = 0; i < mp->txq_count; i++) { 2365 err = txq_init(mp, i); 2366 if (err) { 2367 while (--i >= 0) 2368 txq_deinit(mp->txq + i); 2369 goto out_free; 2370 } 2371 mp->int_mask |= INT_TX_END_0 << i; 2372 } 2373 2374 port_start(mp); 2375 2376 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); 2377 wrlp(mp, INT_MASK, mp->int_mask); 2378 2379 return 0; 2380 2381 2382out_free: 2383 for (i = 0; i < mp->rxq_count; i++) 2384 rxq_deinit(mp->rxq + i); 2385out: 2386 free_irq(dev->irq, dev); 2387 2388 return err; 2389} 2390 2391static void port_reset(struct mv643xx_eth_private *mp) 2392{ 2393 unsigned int data; 2394 int i; 2395 2396 for (i = 0; i < mp->rxq_count; i++) 2397 rxq_disable(mp->rxq + i); 2398 for (i = 0; i < mp->txq_count; i++) 2399 txq_disable(mp->txq + i); 2400 2401 while (1) { 2402 u32 ps = rdlp(mp, PORT_STATUS); 2403 2404 if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY) 2405 break; 2406 udelay(10); 2407 } 2408 2409 /* Reset the Enable bit in the Configuration Register */ 2410 data = rdlp(mp, PORT_SERIAL_CONTROL); 2411 data &= ~(SERIAL_PORT_ENABLE | 2412 DO_NOT_FORCE_LINK_FAIL | 2413 FORCE_LINK_PASS); 2414 wrlp(mp, PORT_SERIAL_CONTROL, data); 2415} 2416 2417static int mv643xx_eth_stop(struct net_device *dev) 2418{ 2419 struct mv643xx_eth_private *mp = netdev_priv(dev); 2420 int i; 2421 2422 wrlp(mp, INT_MASK_EXT, 0x00000000); 2423 wrlp(mp, INT_MASK, 0x00000000); 2424 rdlp(mp, INT_MASK); 2425 2426 napi_disable(&mp->napi); 2427 2428 del_timer_sync(&mp->rx_oom); 2429 2430 netif_carrier_off(dev); 2431 2432 free_irq(dev->irq, dev); 2433 2434 port_reset(mp); 2435 mv643xx_eth_get_stats(dev); 2436 mib_counters_update(mp); 2437 del_timer_sync(&mp->mib_counters_timer); 2438 2439 skb_queue_purge(&mp->rx_recycle); 2440 2441 for (i = 0; i < mp->rxq_count; i++) 2442 rxq_deinit(mp->rxq + i); 2443 for (i = 0; i < mp->txq_count; i++) 2444 txq_deinit(mp->txq + i); 2445 2446 return 0; 2447} 2448 2449static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2450{ 2451 struct mv643xx_eth_private *mp = netdev_priv(dev); 2452 2453 if (mp->phy != NULL) 2454 return phy_mii_ioctl(mp->phy, ifr, cmd); 2455 2456 return -EOPNOTSUPP; 2457} 2458 2459static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) 2460{ 2461 struct mv643xx_eth_private *mp = netdev_priv(dev); 2462 2463 if (new_mtu < 64 || new_mtu > 9500) 2464 return -EINVAL; 2465 2466 dev->mtu = new_mtu; 2467 mv643xx_eth_recalc_skb_size(mp); 2468 tx_set_rate(mp, 1000000000, 16777216); 2469 2470 if (!netif_running(dev)) 2471 return 0; 2472 2473 /* 2474 * Stop and then re-open the interface. This will allocate RX 2475 * skbs of the new MTU. 2476 * There is a possible danger that the open will not succeed, 2477 * due to memory being full. 2478 */ 2479 mv643xx_eth_stop(dev); 2480 if (mv643xx_eth_open(dev)) { 2481 netdev_err(dev, 2482 "fatal error on re-opening device after MTU change\n"); 2483 } 2484 2485 return 0; 2486} 2487 2488static void tx_timeout_task(struct work_struct *ugly) 2489{ 2490 struct mv643xx_eth_private *mp; 2491 2492 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); 2493 if (netif_running(mp->dev)) { 2494 netif_tx_stop_all_queues(mp->dev); 2495 port_reset(mp); 2496 port_start(mp); 2497 netif_tx_wake_all_queues(mp->dev); 2498 } 2499} 2500 2501static void mv643xx_eth_tx_timeout(struct net_device *dev) 2502{ 2503 struct mv643xx_eth_private *mp = netdev_priv(dev); 2504 2505 netdev_info(dev, "tx timeout\n"); 2506 2507 schedule_work(&mp->tx_timeout_task); 2508} 2509 2510#ifdef CONFIG_NET_POLL_CONTROLLER 2511static void mv643xx_eth_netpoll(struct net_device *dev) 2512{ 2513 struct mv643xx_eth_private *mp = netdev_priv(dev); 2514 2515 wrlp(mp, INT_MASK, 0x00000000); 2516 rdlp(mp, INT_MASK); 2517 2518 mv643xx_eth_irq(dev->irq, dev); 2519 2520 wrlp(mp, INT_MASK, mp->int_mask); 2521} 2522#endif 2523 2524 2525/* platform glue ************************************************************/ 2526static void 2527mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp, 2528 struct mbus_dram_target_info *dram) 2529{ 2530 void __iomem *base = msp->base; 2531 u32 win_enable; 2532 u32 win_protect; 2533 int i; 2534 2535 for (i = 0; i < 6; i++) { 2536 writel(0, base + WINDOW_BASE(i)); 2537 writel(0, base + WINDOW_SIZE(i)); 2538 if (i < 4) 2539 writel(0, base + WINDOW_REMAP_HIGH(i)); 2540 } 2541 2542 win_enable = 0x3f; 2543 win_protect = 0; 2544 2545 for (i = 0; i < dram->num_cs; i++) { 2546 struct mbus_dram_window *cs = dram->cs + i; 2547 2548 writel((cs->base & 0xffff0000) | 2549 (cs->mbus_attr << 8) | 2550 dram->mbus_dram_target_id, base + WINDOW_BASE(i)); 2551 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); 2552 2553 win_enable &= ~(1 << i); 2554 win_protect |= 3 << (2 * i); 2555 } 2556 2557 writel(win_enable, base + WINDOW_BAR_ENABLE); 2558 msp->win_protect = win_protect; 2559} 2560 2561static void infer_hw_params(struct mv643xx_eth_shared_private *msp) 2562{ 2563 /* 2564 * Check whether we have a 14-bit coal limit field in bits 2565 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the 2566 * SDMA config register. 2567 */ 2568 writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG); 2569 if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000) 2570 msp->extended_rx_coal_limit = 1; 2571 else 2572 msp->extended_rx_coal_limit = 0; 2573 2574 /* 2575 * Check whether the MAC supports TX rate control, and if 2576 * yes, whether its associated registers are in the old or 2577 * the new place. 2578 */ 2579 writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED); 2580 if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) { 2581 msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT; 2582 } else { 2583 writel(7, msp->base + 0x0400 + TX_BW_RATE); 2584 if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7) 2585 msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT; 2586 else 2587 msp->tx_bw_control = TX_BW_CONTROL_ABSENT; 2588 } 2589} 2590 2591static int mv643xx_eth_shared_probe(struct platform_device *pdev) 2592{ 2593 static int mv643xx_eth_version_printed; 2594 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; 2595 struct mv643xx_eth_shared_private *msp; 2596 struct resource *res; 2597 int ret; 2598 2599 if (!mv643xx_eth_version_printed++) 2600 pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n", 2601 mv643xx_eth_driver_version); 2602 2603 ret = -EINVAL; 2604 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2605 if (res == NULL) 2606 goto out; 2607 2608 ret = -ENOMEM; 2609 msp = kzalloc(sizeof(*msp), GFP_KERNEL); 2610 if (msp == NULL) 2611 goto out; 2612 2613 msp->base = ioremap(res->start, res->end - res->start + 1); 2614 if (msp->base == NULL) 2615 goto out_free; 2616 2617 /* 2618 * Set up and register SMI bus. 2619 */ 2620 if (pd == NULL || pd->shared_smi == NULL) { 2621 msp->smi_bus = mdiobus_alloc(); 2622 if (msp->smi_bus == NULL) 2623 goto out_unmap; 2624 2625 msp->smi_bus->priv = msp; 2626 msp->smi_bus->name = "mv643xx_eth smi"; 2627 msp->smi_bus->read = smi_bus_read; 2628 msp->smi_bus->write = smi_bus_write, 2629 snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id); 2630 msp->smi_bus->parent = &pdev->dev; 2631 msp->smi_bus->phy_mask = 0xffffffff; 2632 if (mdiobus_register(msp->smi_bus) < 0) 2633 goto out_free_mii_bus; 2634 msp->smi = msp; 2635 } else { 2636 msp->smi = platform_get_drvdata(pd->shared_smi); 2637 } 2638 2639 msp->err_interrupt = NO_IRQ; 2640 init_waitqueue_head(&msp->smi_busy_wait); 2641 2642 /* 2643 * Check whether the error interrupt is hooked up. 2644 */ 2645 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2646 if (res != NULL) { 2647 int err; 2648 2649 err = request_irq(res->start, mv643xx_eth_err_irq, 2650 IRQF_SHARED, "mv643xx_eth", msp); 2651 if (!err) { 2652 writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK); 2653 msp->err_interrupt = res->start; 2654 } 2655 } 2656 2657 /* 2658 * (Re-)program MBUS remapping windows if we are asked to. 2659 */ 2660 if (pd != NULL && pd->dram != NULL) 2661 mv643xx_eth_conf_mbus_windows(msp, pd->dram); 2662 2663 /* 2664 * Detect hardware parameters. 2665 */ 2666 msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; 2667 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? 2668 pd->tx_csum_limit : 9 * 1024; 2669 infer_hw_params(msp); 2670 2671 platform_set_drvdata(pdev, msp); 2672 2673 return 0; 2674 2675out_free_mii_bus: 2676 mdiobus_free(msp->smi_bus); 2677out_unmap: 2678 iounmap(msp->base); 2679out_free: 2680 kfree(msp); 2681out: 2682 return ret; 2683} 2684 2685static int mv643xx_eth_shared_remove(struct platform_device *pdev) 2686{ 2687 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); 2688 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; 2689 2690 if (pd == NULL || pd->shared_smi == NULL) { 2691 mdiobus_unregister(msp->smi_bus); 2692 mdiobus_free(msp->smi_bus); 2693 } 2694 if (msp->err_interrupt != NO_IRQ) 2695 free_irq(msp->err_interrupt, msp); 2696 iounmap(msp->base); 2697 kfree(msp); 2698 2699 return 0; 2700} 2701 2702static struct platform_driver mv643xx_eth_shared_driver = { 2703 .probe = mv643xx_eth_shared_probe, 2704 .remove = mv643xx_eth_shared_remove, 2705 .driver = { 2706 .name = MV643XX_ETH_SHARED_NAME, 2707 .owner = THIS_MODULE, 2708 }, 2709}; 2710 2711static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr) 2712{ 2713 int addr_shift = 5 * mp->port_num; 2714 u32 data; 2715 2716 data = rdl(mp, PHY_ADDR); 2717 data &= ~(0x1f << addr_shift); 2718 data |= (phy_addr & 0x1f) << addr_shift; 2719 wrl(mp, PHY_ADDR, data); 2720} 2721 2722static int phy_addr_get(struct mv643xx_eth_private *mp) 2723{ 2724 unsigned int data; 2725 2726 data = rdl(mp, PHY_ADDR); 2727 2728 return (data >> (5 * mp->port_num)) & 0x1f; 2729} 2730 2731static void set_params(struct mv643xx_eth_private *mp, 2732 struct mv643xx_eth_platform_data *pd) 2733{ 2734 struct net_device *dev = mp->dev; 2735 2736 if (is_valid_ether_addr(pd->mac_addr)) 2737 memcpy(dev->dev_addr, pd->mac_addr, 6); 2738 else 2739 uc_addr_get(mp, dev->dev_addr); 2740 2741 mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE; 2742 if (pd->rx_queue_size) 2743 mp->rx_ring_size = pd->rx_queue_size; 2744 mp->rx_desc_sram_addr = pd->rx_sram_addr; 2745 mp->rx_desc_sram_size = pd->rx_sram_size; 2746 2747 mp->rxq_count = pd->rx_queue_count ? : 1; 2748 2749 mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE; 2750 if (pd->tx_queue_size) 2751 mp->tx_ring_size = pd->tx_queue_size; 2752 mp->tx_desc_sram_addr = pd->tx_sram_addr; 2753 mp->tx_desc_sram_size = pd->tx_sram_size; 2754 2755 mp->txq_count = pd->tx_queue_count ? : 1; 2756} 2757 2758static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, 2759 int phy_addr) 2760{ 2761 struct mii_bus *bus = mp->shared->smi->smi_bus; 2762 struct phy_device *phydev; 2763 int start; 2764 int num; 2765 int i; 2766 2767 if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) { 2768 start = phy_addr_get(mp) & 0x1f; 2769 num = 32; 2770 } else { 2771 start = phy_addr & 0x1f; 2772 num = 1; 2773 } 2774 2775 phydev = NULL; 2776 for (i = 0; i < num; i++) { 2777 int addr = (start + i) & 0x1f; 2778 2779 if (bus->phy_map[addr] == NULL) 2780 mdiobus_scan(bus, addr); 2781 2782 if (phydev == NULL) { 2783 phydev = bus->phy_map[addr]; 2784 if (phydev != NULL) 2785 phy_addr_set(mp, addr); 2786 } 2787 } 2788 2789 return phydev; 2790} 2791 2792static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) 2793{ 2794 struct phy_device *phy = mp->phy; 2795 2796 phy_reset(mp); 2797 2798 phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII); 2799 2800 if (speed == 0) { 2801 phy->autoneg = AUTONEG_ENABLE; 2802 phy->speed = 0; 2803 phy->duplex = 0; 2804 phy->advertising = phy->supported | ADVERTISED_Autoneg; 2805 } else { 2806 phy->autoneg = AUTONEG_DISABLE; 2807 phy->advertising = 0; 2808 phy->speed = speed; 2809 phy->duplex = duplex; 2810 } 2811 phy_start_aneg(phy); 2812} 2813 2814static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) 2815{ 2816 u32 pscr; 2817 2818 pscr = rdlp(mp, PORT_SERIAL_CONTROL); 2819 if (pscr & SERIAL_PORT_ENABLE) { 2820 pscr &= ~SERIAL_PORT_ENABLE; 2821 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2822 } 2823 2824 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED; 2825 if (mp->phy == NULL) { 2826 pscr |= DISABLE_AUTO_NEG_SPEED_GMII; 2827 if (speed == SPEED_1000) 2828 pscr |= SET_GMII_SPEED_TO_1000; 2829 else if (speed == SPEED_100) 2830 pscr |= SET_MII_SPEED_TO_100; 2831 2832 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL; 2833 2834 pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX; 2835 if (duplex == DUPLEX_FULL) 2836 pscr |= SET_FULL_DUPLEX_MODE; 2837 } 2838 2839 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2840} 2841 2842static const struct net_device_ops mv643xx_eth_netdev_ops = { 2843 .ndo_open = mv643xx_eth_open, 2844 .ndo_stop = mv643xx_eth_stop, 2845 .ndo_start_xmit = mv643xx_eth_xmit, 2846 .ndo_set_rx_mode = mv643xx_eth_set_rx_mode, 2847 .ndo_set_mac_address = mv643xx_eth_set_mac_address, 2848 .ndo_validate_addr = eth_validate_addr, 2849 .ndo_do_ioctl = mv643xx_eth_ioctl, 2850 .ndo_change_mtu = mv643xx_eth_change_mtu, 2851 .ndo_tx_timeout = mv643xx_eth_tx_timeout, 2852 .ndo_get_stats = mv643xx_eth_get_stats, 2853#ifdef CONFIG_NET_POLL_CONTROLLER 2854 .ndo_poll_controller = mv643xx_eth_netpoll, 2855#endif 2856}; 2857 2858static int mv643xx_eth_probe(struct platform_device *pdev) 2859{ 2860 struct mv643xx_eth_platform_data *pd; 2861 struct mv643xx_eth_private *mp; 2862 struct net_device *dev; 2863 struct resource *res; 2864 int err; 2865 2866 pd = pdev->dev.platform_data; 2867 if (pd == NULL) { 2868 dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n"); 2869 return -ENODEV; 2870 } 2871 2872 if (pd->shared == NULL) { 2873 dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n"); 2874 return -ENODEV; 2875 } 2876 2877 dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8); 2878 if (!dev) 2879 return -ENOMEM; 2880 2881 mp = netdev_priv(dev); 2882 platform_set_drvdata(pdev, mp); 2883 2884 mp->shared = platform_get_drvdata(pd->shared); 2885 mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10); 2886 mp->port_num = pd->port_number; 2887 2888 mp->dev = dev; 2889 2890 set_params(mp, pd); 2891 netif_set_real_num_tx_queues(dev, mp->txq_count); 2892 netif_set_real_num_rx_queues(dev, mp->rxq_count); 2893 2894 if (pd->phy_addr != MV643XX_ETH_PHY_NONE) 2895 mp->phy = phy_scan(mp, pd->phy_addr); 2896 2897 if (mp->phy != NULL) 2898 phy_init(mp, pd->speed, pd->duplex); 2899 2900 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); 2901 2902 init_pscr(mp, pd->speed, pd->duplex); 2903 2904 2905 mib_counters_clear(mp); 2906 2907 init_timer(&mp->mib_counters_timer); 2908 mp->mib_counters_timer.data = (unsigned long)mp; 2909 mp->mib_counters_timer.function = mib_counters_timer_wrapper; 2910 mp->mib_counters_timer.expires = jiffies + 30 * HZ; 2911 add_timer(&mp->mib_counters_timer); 2912 2913 spin_lock_init(&mp->mib_counters_lock); 2914 2915 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); 2916 2917 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128); 2918 2919 init_timer(&mp->rx_oom); 2920 mp->rx_oom.data = (unsigned long)mp; 2921 mp->rx_oom.function = oom_timer_wrapper; 2922 2923 2924 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2925 BUG_ON(!res); 2926 dev->irq = res->start; 2927 2928 dev->netdev_ops = &mv643xx_eth_netdev_ops; 2929 2930 dev->watchdog_timeo = 2 * HZ; 2931 dev->base_addr = 0; 2932 2933 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; 2934 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM; 2935 2936 SET_NETDEV_DEV(dev, &pdev->dev); 2937 2938 if (mp->shared->win_protect) 2939 wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); 2940 2941 netif_carrier_off(dev); 2942 2943 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); 2944 2945 set_rx_coal(mp, 250); 2946 set_tx_coal(mp, 0); 2947 2948 err = register_netdev(dev); 2949 if (err) 2950 goto out; 2951 2952 netdev_notice(dev, "port %d with MAC address %pM\n", 2953 mp->port_num, dev->dev_addr); 2954 2955 if (mp->tx_desc_sram_size > 0) 2956 netdev_notice(dev, "configured with sram\n"); 2957 2958 return 0; 2959 2960out: 2961 free_netdev(dev); 2962 2963 return err; 2964} 2965 2966static int mv643xx_eth_remove(struct platform_device *pdev) 2967{ 2968 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); 2969 2970 unregister_netdev(mp->dev); 2971 if (mp->phy != NULL) 2972 phy_detach(mp->phy); 2973 cancel_work_sync(&mp->tx_timeout_task); 2974 free_netdev(mp->dev); 2975 2976 platform_set_drvdata(pdev, NULL); 2977 2978 return 0; 2979} 2980 2981static void mv643xx_eth_shutdown(struct platform_device *pdev) 2982{ 2983 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); 2984 2985 /* Mask all interrupts on ethernet port */ 2986 wrlp(mp, INT_MASK, 0); 2987 rdlp(mp, INT_MASK); 2988 2989 if (netif_running(mp->dev)) 2990 port_reset(mp); 2991} 2992 2993static struct platform_driver mv643xx_eth_driver = { 2994 .probe = mv643xx_eth_probe, 2995 .remove = mv643xx_eth_remove, 2996 .shutdown = mv643xx_eth_shutdown, 2997 .driver = { 2998 .name = MV643XX_ETH_NAME, 2999 .owner = THIS_MODULE, 3000 }, 3001}; 3002 3003static int __init mv643xx_eth_init_module(void) 3004{ 3005 int rc; 3006 3007 rc = platform_driver_register(&mv643xx_eth_shared_driver); 3008 if (!rc) { 3009 rc = platform_driver_register(&mv643xx_eth_driver); 3010 if (rc) 3011 platform_driver_unregister(&mv643xx_eth_shared_driver); 3012 } 3013 3014 return rc; 3015} 3016module_init(mv643xx_eth_init_module); 3017 3018static void __exit mv643xx_eth_cleanup_module(void) 3019{ 3020 platform_driver_unregister(&mv643xx_eth_driver); 3021 platform_driver_unregister(&mv643xx_eth_shared_driver); 3022} 3023module_exit(mv643xx_eth_cleanup_module); 3024 3025MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, " 3026 "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek"); 3027MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); 3028MODULE_LICENSE("GPL"); 3029MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME); 3030MODULE_ALIAS("platform:" MV643XX_ETH_NAME);