Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.34-rc2 3078 lines 82 kB view raw
1/* 2 * drivers/net/gianfar.c 3 * 4 * Gianfar Ethernet Driver 5 * This driver is designed for the non-CPM ethernet controllers 6 * on the 85xx and 83xx family of integrated processors 7 * Based on 8260_io/fcc_enet.c 8 * 9 * Author: Andy Fleming 10 * Maintainer: Kumar Gala 11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 12 * 13 * Copyright 2002-2009 Freescale Semiconductor, Inc. 14 * Copyright 2007 MontaVista Software, Inc. 15 * 16 * This program is free software; you can redistribute it and/or modify it 17 * under the terms of the GNU General Public License as published by the 18 * Free Software Foundation; either version 2 of the License, or (at your 19 * option) any later version. 20 * 21 * Gianfar: AKA Lambda Draconis, "Dragon" 22 * RA 11 31 24.2 23 * Dec +69 19 52 24 * V 3.84 25 * B-V +1.62 26 * 27 * Theory of operation 28 * 29 * The driver is initialized through of_device. Configuration information 30 * is therefore conveyed through an OF-style device tree. 31 * 32 * The Gianfar Ethernet Controller uses a ring of buffer 33 * descriptors. The beginning is indicated by a register 34 * pointing to the physical address of the start of the ring. 35 * The end is determined by a "wrap" bit being set in the 36 * last descriptor of the ring. 37 * 38 * When a packet is received, the RXF bit in the 39 * IEVENT register is set, triggering an interrupt when the 40 * corresponding bit in the IMASK register is also set (if 41 * interrupt coalescing is active, then the interrupt may not 42 * happen immediately, but will wait until either a set number 43 * of frames or amount of time have passed). In NAPI, the 44 * interrupt handler will signal there is work to be done, and 45 * exit. This method will start at the last known empty 46 * descriptor, and process every subsequent descriptor until there 47 * are none left with data (NAPI will stop after a set number of 48 * packets to give time to other tasks, but will eventually 49 * process all the packets). The data arrives inside a 50 * pre-allocated skb, and so after the skb is passed up to the 51 * stack, a new skb must be allocated, and the address field in 52 * the buffer descriptor must be updated to indicate this new 53 * skb. 54 * 55 * When the kernel requests that a packet be transmitted, the 56 * driver starts where it left off last time, and points the 57 * descriptor at the buffer which was passed in. The driver 58 * then informs the DMA engine that there are packets ready to 59 * be transmitted. Once the controller is finished transmitting 60 * the packet, an interrupt may be triggered (under the same 61 * conditions as for reception, but depending on the TXF bit). 62 * The driver then cleans up the buffer. 63 */ 64 65#include <linux/kernel.h> 66#include <linux/string.h> 67#include <linux/errno.h> 68#include <linux/unistd.h> 69#include <linux/slab.h> 70#include <linux/interrupt.h> 71#include <linux/init.h> 72#include <linux/delay.h> 73#include <linux/netdevice.h> 74#include <linux/etherdevice.h> 75#include <linux/skbuff.h> 76#include <linux/if_vlan.h> 77#include <linux/spinlock.h> 78#include <linux/mm.h> 79#include <linux/of_mdio.h> 80#include <linux/of_platform.h> 81#include <linux/ip.h> 82#include <linux/tcp.h> 83#include <linux/udp.h> 84#include <linux/in.h> 85 86#include <asm/io.h> 87#include <asm/irq.h> 88#include <asm/uaccess.h> 89#include <linux/module.h> 90#include <linux/dma-mapping.h> 91#include <linux/crc32.h> 92#include <linux/mii.h> 93#include <linux/phy.h> 94#include <linux/phy_fixed.h> 95#include <linux/of.h> 96 97#include "gianfar.h" 98#include "fsl_pq_mdio.h" 99 100#define TX_TIMEOUT (1*HZ) 101#undef BRIEF_GFAR_ERRORS 102#undef VERBOSE_GFAR_ERRORS 103 104const char gfar_driver_name[] = "Gianfar Ethernet"; 105const char gfar_driver_version[] = "1.3"; 106 107static int gfar_enet_open(struct net_device *dev); 108static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); 109static void gfar_reset_task(struct work_struct *work); 110static void gfar_timeout(struct net_device *dev); 111static int gfar_close(struct net_device *dev); 112struct sk_buff *gfar_new_skb(struct net_device *dev); 113static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 114 struct sk_buff *skb); 115static int gfar_set_mac_address(struct net_device *dev); 116static int gfar_change_mtu(struct net_device *dev, int new_mtu); 117static irqreturn_t gfar_error(int irq, void *dev_id); 118static irqreturn_t gfar_transmit(int irq, void *dev_id); 119static irqreturn_t gfar_interrupt(int irq, void *dev_id); 120static void adjust_link(struct net_device *dev); 121static void init_registers(struct net_device *dev); 122static int init_phy(struct net_device *dev); 123static int gfar_probe(struct of_device *ofdev, 124 const struct of_device_id *match); 125static int gfar_remove(struct of_device *ofdev); 126static void free_skb_resources(struct gfar_private *priv); 127static void gfar_set_multi(struct net_device *dev); 128static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 129static void gfar_configure_serdes(struct net_device *dev); 130static int gfar_poll(struct napi_struct *napi, int budget); 131#ifdef CONFIG_NET_POLL_CONTROLLER 132static void gfar_netpoll(struct net_device *dev); 133#endif 134int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); 135static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); 136static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 137 int amount_pull); 138static void gfar_vlan_rx_register(struct net_device *netdev, 139 struct vlan_group *grp); 140void gfar_halt(struct net_device *dev); 141static void gfar_halt_nodisable(struct net_device *dev); 142void gfar_start(struct net_device *dev); 143static void gfar_clear_exact_match(struct net_device *dev); 144static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); 145static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 146 147MODULE_AUTHOR("Freescale Semiconductor, Inc"); 148MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 149MODULE_LICENSE("GPL"); 150 151static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 152 dma_addr_t buf) 153{ 154 u32 lstatus; 155 156 bdp->bufPtr = buf; 157 158 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); 159 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) 160 lstatus |= BD_LFLAG(RXBD_WRAP); 161 162 eieio(); 163 164 bdp->lstatus = lstatus; 165} 166 167static int gfar_init_bds(struct net_device *ndev) 168{ 169 struct gfar_private *priv = netdev_priv(ndev); 170 struct gfar_priv_tx_q *tx_queue = NULL; 171 struct gfar_priv_rx_q *rx_queue = NULL; 172 struct txbd8 *txbdp; 173 struct rxbd8 *rxbdp; 174 int i, j; 175 176 for (i = 0; i < priv->num_tx_queues; i++) { 177 tx_queue = priv->tx_queue[i]; 178 /* Initialize some variables in our dev structure */ 179 tx_queue->num_txbdfree = tx_queue->tx_ring_size; 180 tx_queue->dirty_tx = tx_queue->tx_bd_base; 181 tx_queue->cur_tx = tx_queue->tx_bd_base; 182 tx_queue->skb_curtx = 0; 183 tx_queue->skb_dirtytx = 0; 184 185 /* Initialize Transmit Descriptor Ring */ 186 txbdp = tx_queue->tx_bd_base; 187 for (j = 0; j < tx_queue->tx_ring_size; j++) { 188 txbdp->lstatus = 0; 189 txbdp->bufPtr = 0; 190 txbdp++; 191 } 192 193 /* Set the last descriptor in the ring to indicate wrap */ 194 txbdp--; 195 txbdp->status |= TXBD_WRAP; 196 } 197 198 for (i = 0; i < priv->num_rx_queues; i++) { 199 rx_queue = priv->rx_queue[i]; 200 rx_queue->cur_rx = rx_queue->rx_bd_base; 201 rx_queue->skb_currx = 0; 202 rxbdp = rx_queue->rx_bd_base; 203 204 for (j = 0; j < rx_queue->rx_ring_size; j++) { 205 struct sk_buff *skb = rx_queue->rx_skbuff[j]; 206 207 if (skb) { 208 gfar_init_rxbdp(rx_queue, rxbdp, 209 rxbdp->bufPtr); 210 } else { 211 skb = gfar_new_skb(ndev); 212 if (!skb) { 213 pr_err("%s: Can't allocate RX buffers\n", 214 ndev->name); 215 goto err_rxalloc_fail; 216 } 217 rx_queue->rx_skbuff[j] = skb; 218 219 gfar_new_rxbdp(rx_queue, rxbdp, skb); 220 } 221 222 rxbdp++; 223 } 224 225 } 226 227 return 0; 228 229err_rxalloc_fail: 230 free_skb_resources(priv); 231 return -ENOMEM; 232} 233 234static int gfar_alloc_skb_resources(struct net_device *ndev) 235{ 236 void *vaddr; 237 dma_addr_t addr; 238 int i, j, k; 239 struct gfar_private *priv = netdev_priv(ndev); 240 struct device *dev = &priv->ofdev->dev; 241 struct gfar_priv_tx_q *tx_queue = NULL; 242 struct gfar_priv_rx_q *rx_queue = NULL; 243 244 priv->total_tx_ring_size = 0; 245 for (i = 0; i < priv->num_tx_queues; i++) 246 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; 247 248 priv->total_rx_ring_size = 0; 249 for (i = 0; i < priv->num_rx_queues; i++) 250 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; 251 252 /* Allocate memory for the buffer descriptors */ 253 vaddr = dma_alloc_coherent(dev, 254 sizeof(struct txbd8) * priv->total_tx_ring_size + 255 sizeof(struct rxbd8) * priv->total_rx_ring_size, 256 &addr, GFP_KERNEL); 257 if (!vaddr) { 258 if (netif_msg_ifup(priv)) 259 pr_err("%s: Could not allocate buffer descriptors!\n", 260 ndev->name); 261 return -ENOMEM; 262 } 263 264 for (i = 0; i < priv->num_tx_queues; i++) { 265 tx_queue = priv->tx_queue[i]; 266 tx_queue->tx_bd_base = (struct txbd8 *) vaddr; 267 tx_queue->tx_bd_dma_base = addr; 268 tx_queue->dev = ndev; 269 /* enet DMA only understands physical addresses */ 270 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size; 271 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size; 272 } 273 274 /* Start the rx descriptor ring where the tx ring leaves off */ 275 for (i = 0; i < priv->num_rx_queues; i++) { 276 rx_queue = priv->rx_queue[i]; 277 rx_queue->rx_bd_base = (struct rxbd8 *) vaddr; 278 rx_queue->rx_bd_dma_base = addr; 279 rx_queue->dev = ndev; 280 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; 281 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; 282 } 283 284 /* Setup the skbuff rings */ 285 for (i = 0; i < priv->num_tx_queues; i++) { 286 tx_queue = priv->tx_queue[i]; 287 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * 288 tx_queue->tx_ring_size, GFP_KERNEL); 289 if (!tx_queue->tx_skbuff) { 290 if (netif_msg_ifup(priv)) 291 pr_err("%s: Could not allocate tx_skbuff\n", 292 ndev->name); 293 goto cleanup; 294 } 295 296 for (k = 0; k < tx_queue->tx_ring_size; k++) 297 tx_queue->tx_skbuff[k] = NULL; 298 } 299 300 for (i = 0; i < priv->num_rx_queues; i++) { 301 rx_queue = priv->rx_queue[i]; 302 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * 303 rx_queue->rx_ring_size, GFP_KERNEL); 304 305 if (!rx_queue->rx_skbuff) { 306 if (netif_msg_ifup(priv)) 307 pr_err("%s: Could not allocate rx_skbuff\n", 308 ndev->name); 309 goto cleanup; 310 } 311 312 for (j = 0; j < rx_queue->rx_ring_size; j++) 313 rx_queue->rx_skbuff[j] = NULL; 314 } 315 316 if (gfar_init_bds(ndev)) 317 goto cleanup; 318 319 return 0; 320 321cleanup: 322 free_skb_resources(priv); 323 return -ENOMEM; 324} 325 326static void gfar_init_tx_rx_base(struct gfar_private *priv) 327{ 328 struct gfar __iomem *regs = priv->gfargrp[0].regs; 329 u32 __iomem *baddr; 330 int i; 331 332 baddr = &regs->tbase0; 333 for(i = 0; i < priv->num_tx_queues; i++) { 334 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); 335 baddr += 2; 336 } 337 338 baddr = &regs->rbase0; 339 for(i = 0; i < priv->num_rx_queues; i++) { 340 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); 341 baddr += 2; 342 } 343} 344 345static void gfar_init_mac(struct net_device *ndev) 346{ 347 struct gfar_private *priv = netdev_priv(ndev); 348 struct gfar __iomem *regs = priv->gfargrp[0].regs; 349 u32 rctrl = 0; 350 u32 tctrl = 0; 351 u32 attrs = 0; 352 353 /* write the tx/rx base registers */ 354 gfar_init_tx_rx_base(priv); 355 356 /* Configure the coalescing support */ 357 gfar_configure_coalescing(priv, 0xFF, 0xFF); 358 359 if (priv->rx_filer_enable) { 360 rctrl |= RCTRL_FILREN; 361 /* Program the RIR0 reg with the required distribution */ 362 gfar_write(&regs->rir0, DEFAULT_RIR0); 363 } 364 365 if (priv->rx_csum_enable) 366 rctrl |= RCTRL_CHECKSUMMING; 367 368 if (priv->extended_hash) { 369 rctrl |= RCTRL_EXTHASH; 370 371 gfar_clear_exact_match(ndev); 372 rctrl |= RCTRL_EMEN; 373 } 374 375 if (priv->padding) { 376 rctrl &= ~RCTRL_PAL_MASK; 377 rctrl |= RCTRL_PADDING(priv->padding); 378 } 379 380 /* keep vlan related bits if it's enabled */ 381 if (priv->vlgrp) { 382 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; 383 tctrl |= TCTRL_VLINS; 384 } 385 386 /* Init rctrl based on our settings */ 387 gfar_write(&regs->rctrl, rctrl); 388 389 if (ndev->features & NETIF_F_IP_CSUM) 390 tctrl |= TCTRL_INIT_CSUM; 391 392 tctrl |= TCTRL_TXSCHED_PRIO; 393 394 gfar_write(&regs->tctrl, tctrl); 395 396 /* Set the extraction length and index */ 397 attrs = ATTRELI_EL(priv->rx_stash_size) | 398 ATTRELI_EI(priv->rx_stash_index); 399 400 gfar_write(&regs->attreli, attrs); 401 402 /* Start with defaults, and add stashing or locking 403 * depending on the approprate variables */ 404 attrs = ATTR_INIT_SETTINGS; 405 406 if (priv->bd_stash_en) 407 attrs |= ATTR_BDSTASH; 408 409 if (priv->rx_stash_size != 0) 410 attrs |= ATTR_BUFSTASH; 411 412 gfar_write(&regs->attr, attrs); 413 414 gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold); 415 gfar_write(&regs->fifo_tx_starve, priv->fifo_starve); 416 gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off); 417} 418 419static struct net_device_stats *gfar_get_stats(struct net_device *dev) 420{ 421 struct gfar_private *priv = netdev_priv(dev); 422 struct netdev_queue *txq; 423 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; 424 unsigned long tx_packets = 0, tx_bytes = 0; 425 int i = 0; 426 427 for (i = 0; i < priv->num_rx_queues; i++) { 428 rx_packets += priv->rx_queue[i]->stats.rx_packets; 429 rx_bytes += priv->rx_queue[i]->stats.rx_bytes; 430 rx_dropped += priv->rx_queue[i]->stats.rx_dropped; 431 } 432 433 dev->stats.rx_packets = rx_packets; 434 dev->stats.rx_bytes = rx_bytes; 435 dev->stats.rx_dropped = rx_dropped; 436 437 for (i = 0; i < priv->num_tx_queues; i++) { 438 txq = netdev_get_tx_queue(dev, i); 439 tx_bytes += txq->tx_bytes; 440 tx_packets += txq->tx_packets; 441 } 442 443 dev->stats.tx_bytes = tx_bytes; 444 dev->stats.tx_packets = tx_packets; 445 446 return &dev->stats; 447} 448 449static const struct net_device_ops gfar_netdev_ops = { 450 .ndo_open = gfar_enet_open, 451 .ndo_start_xmit = gfar_start_xmit, 452 .ndo_stop = gfar_close, 453 .ndo_change_mtu = gfar_change_mtu, 454 .ndo_set_multicast_list = gfar_set_multi, 455 .ndo_tx_timeout = gfar_timeout, 456 .ndo_do_ioctl = gfar_ioctl, 457 .ndo_get_stats = gfar_get_stats, 458 .ndo_vlan_rx_register = gfar_vlan_rx_register, 459 .ndo_set_mac_address = eth_mac_addr, 460 .ndo_validate_addr = eth_validate_addr, 461#ifdef CONFIG_NET_POLL_CONTROLLER 462 .ndo_poll_controller = gfar_netpoll, 463#endif 464}; 465 466unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; 467unsigned int ftp_rqfcr[MAX_FILER_IDX + 1]; 468 469void lock_rx_qs(struct gfar_private *priv) 470{ 471 int i = 0x0; 472 473 for (i = 0; i < priv->num_rx_queues; i++) 474 spin_lock(&priv->rx_queue[i]->rxlock); 475} 476 477void lock_tx_qs(struct gfar_private *priv) 478{ 479 int i = 0x0; 480 481 for (i = 0; i < priv->num_tx_queues; i++) 482 spin_lock(&priv->tx_queue[i]->txlock); 483} 484 485void unlock_rx_qs(struct gfar_private *priv) 486{ 487 int i = 0x0; 488 489 for (i = 0; i < priv->num_rx_queues; i++) 490 spin_unlock(&priv->rx_queue[i]->rxlock); 491} 492 493void unlock_tx_qs(struct gfar_private *priv) 494{ 495 int i = 0x0; 496 497 for (i = 0; i < priv->num_tx_queues; i++) 498 spin_unlock(&priv->tx_queue[i]->txlock); 499} 500 501/* Returns 1 if incoming frames use an FCB */ 502static inline int gfar_uses_fcb(struct gfar_private *priv) 503{ 504 return priv->vlgrp || priv->rx_csum_enable; 505} 506 507static void free_tx_pointers(struct gfar_private *priv) 508{ 509 int i = 0; 510 511 for (i = 0; i < priv->num_tx_queues; i++) 512 kfree(priv->tx_queue[i]); 513} 514 515static void free_rx_pointers(struct gfar_private *priv) 516{ 517 int i = 0; 518 519 for (i = 0; i < priv->num_rx_queues; i++) 520 kfree(priv->rx_queue[i]); 521} 522 523static void unmap_group_regs(struct gfar_private *priv) 524{ 525 int i = 0; 526 527 for (i = 0; i < MAXGROUPS; i++) 528 if (priv->gfargrp[i].regs) 529 iounmap(priv->gfargrp[i].regs); 530} 531 532static void disable_napi(struct gfar_private *priv) 533{ 534 int i = 0; 535 536 for (i = 0; i < priv->num_grps; i++) 537 napi_disable(&priv->gfargrp[i].napi); 538} 539 540static void enable_napi(struct gfar_private *priv) 541{ 542 int i = 0; 543 544 for (i = 0; i < priv->num_grps; i++) 545 napi_enable(&priv->gfargrp[i].napi); 546} 547 548static int gfar_parse_group(struct device_node *np, 549 struct gfar_private *priv, const char *model) 550{ 551 u32 *queue_mask; 552 u64 addr, size; 553 554 addr = of_translate_address(np, 555 of_get_address(np, 0, &size, NULL)); 556 priv->gfargrp[priv->num_grps].regs = ioremap(addr, size); 557 558 if (!priv->gfargrp[priv->num_grps].regs) 559 return -ENOMEM; 560 561 priv->gfargrp[priv->num_grps].interruptTransmit = 562 irq_of_parse_and_map(np, 0); 563 564 /* If we aren't the FEC we have multiple interrupts */ 565 if (model && strcasecmp(model, "FEC")) { 566 priv->gfargrp[priv->num_grps].interruptReceive = 567 irq_of_parse_and_map(np, 1); 568 priv->gfargrp[priv->num_grps].interruptError = 569 irq_of_parse_and_map(np,2); 570 if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 || 571 priv->gfargrp[priv->num_grps].interruptReceive < 0 || 572 priv->gfargrp[priv->num_grps].interruptError < 0) { 573 return -EINVAL; 574 } 575 } 576 577 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; 578 priv->gfargrp[priv->num_grps].priv = priv; 579 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock); 580 if(priv->mode == MQ_MG_MODE) { 581 queue_mask = (u32 *)of_get_property(np, 582 "fsl,rx-bit-map", NULL); 583 priv->gfargrp[priv->num_grps].rx_bit_map = 584 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps); 585 queue_mask = (u32 *)of_get_property(np, 586 "fsl,tx-bit-map", NULL); 587 priv->gfargrp[priv->num_grps].tx_bit_map = 588 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); 589 } else { 590 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF; 591 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF; 592 } 593 priv->num_grps++; 594 595 return 0; 596} 597 598static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) 599{ 600 const char *model; 601 const char *ctype; 602 const void *mac_addr; 603 int err = 0, i; 604 struct net_device *dev = NULL; 605 struct gfar_private *priv = NULL; 606 struct device_node *np = ofdev->node; 607 struct device_node *child = NULL; 608 const u32 *stash; 609 const u32 *stash_len; 610 const u32 *stash_idx; 611 unsigned int num_tx_qs, num_rx_qs; 612 u32 *tx_queues, *rx_queues; 613 614 if (!np || !of_device_is_available(np)) 615 return -ENODEV; 616 617 /* parse the num of tx and rx queues */ 618 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); 619 num_tx_qs = tx_queues ? *tx_queues : 1; 620 621 if (num_tx_qs > MAX_TX_QS) { 622 printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", 623 num_tx_qs, MAX_TX_QS); 624 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n"); 625 return -EINVAL; 626 } 627 628 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); 629 num_rx_qs = rx_queues ? *rx_queues : 1; 630 631 if (num_rx_qs > MAX_RX_QS) { 632 printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", 633 num_tx_qs, MAX_TX_QS); 634 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n"); 635 return -EINVAL; 636 } 637 638 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); 639 dev = *pdev; 640 if (NULL == dev) 641 return -ENOMEM; 642 643 priv = netdev_priv(dev); 644 priv->node = ofdev->node; 645 priv->ndev = dev; 646 647 dev->num_tx_queues = num_tx_qs; 648 dev->real_num_tx_queues = num_tx_qs; 649 priv->num_tx_queues = num_tx_qs; 650 priv->num_rx_queues = num_rx_qs; 651 priv->num_grps = 0x0; 652 653 model = of_get_property(np, "model", NULL); 654 655 for (i = 0; i < MAXGROUPS; i++) 656 priv->gfargrp[i].regs = NULL; 657 658 /* Parse and initialize group specific information */ 659 if (of_device_is_compatible(np, "fsl,etsec2")) { 660 priv->mode = MQ_MG_MODE; 661 for_each_child_of_node(np, child) { 662 err = gfar_parse_group(child, priv, model); 663 if (err) 664 goto err_grp_init; 665 } 666 } else { 667 priv->mode = SQ_SG_MODE; 668 err = gfar_parse_group(np, priv, model); 669 if(err) 670 goto err_grp_init; 671 } 672 673 for (i = 0; i < priv->num_tx_queues; i++) 674 priv->tx_queue[i] = NULL; 675 for (i = 0; i < priv->num_rx_queues; i++) 676 priv->rx_queue[i] = NULL; 677 678 for (i = 0; i < priv->num_tx_queues; i++) { 679 priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc( 680 sizeof (struct gfar_priv_tx_q), GFP_KERNEL); 681 if (!priv->tx_queue[i]) { 682 err = -ENOMEM; 683 goto tx_alloc_failed; 684 } 685 priv->tx_queue[i]->tx_skbuff = NULL; 686 priv->tx_queue[i]->qindex = i; 687 priv->tx_queue[i]->dev = dev; 688 spin_lock_init(&(priv->tx_queue[i]->txlock)); 689 } 690 691 for (i = 0; i < priv->num_rx_queues; i++) { 692 priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc( 693 sizeof (struct gfar_priv_rx_q), GFP_KERNEL); 694 if (!priv->rx_queue[i]) { 695 err = -ENOMEM; 696 goto rx_alloc_failed; 697 } 698 priv->rx_queue[i]->rx_skbuff = NULL; 699 priv->rx_queue[i]->qindex = i; 700 priv->rx_queue[i]->dev = dev; 701 spin_lock_init(&(priv->rx_queue[i]->rxlock)); 702 } 703 704 705 stash = of_get_property(np, "bd-stash", NULL); 706 707 if (stash) { 708 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; 709 priv->bd_stash_en = 1; 710 } 711 712 stash_len = of_get_property(np, "rx-stash-len", NULL); 713 714 if (stash_len) 715 priv->rx_stash_size = *stash_len; 716 717 stash_idx = of_get_property(np, "rx-stash-idx", NULL); 718 719 if (stash_idx) 720 priv->rx_stash_index = *stash_idx; 721 722 if (stash_len || stash_idx) 723 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; 724 725 mac_addr = of_get_mac_address(np); 726 if (mac_addr) 727 memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN); 728 729 if (model && !strcasecmp(model, "TSEC")) 730 priv->device_flags = 731 FSL_GIANFAR_DEV_HAS_GIGABIT | 732 FSL_GIANFAR_DEV_HAS_COALESCE | 733 FSL_GIANFAR_DEV_HAS_RMON | 734 FSL_GIANFAR_DEV_HAS_MULTI_INTR; 735 if (model && !strcasecmp(model, "eTSEC")) 736 priv->device_flags = 737 FSL_GIANFAR_DEV_HAS_GIGABIT | 738 FSL_GIANFAR_DEV_HAS_COALESCE | 739 FSL_GIANFAR_DEV_HAS_RMON | 740 FSL_GIANFAR_DEV_HAS_MULTI_INTR | 741 FSL_GIANFAR_DEV_HAS_PADDING | 742 FSL_GIANFAR_DEV_HAS_CSUM | 743 FSL_GIANFAR_DEV_HAS_VLAN | 744 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 745 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH; 746 747 ctype = of_get_property(np, "phy-connection-type", NULL); 748 749 /* We only care about rgmii-id. The rest are autodetected */ 750 if (ctype && !strcmp(ctype, "rgmii-id")) 751 priv->interface = PHY_INTERFACE_MODE_RGMII_ID; 752 else 753 priv->interface = PHY_INTERFACE_MODE_MII; 754 755 if (of_get_property(np, "fsl,magic-packet", NULL)) 756 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; 757 758 priv->phy_node = of_parse_phandle(np, "phy-handle", 0); 759 760 /* Find the TBI PHY. If it's not there, we don't support SGMII */ 761 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); 762 763 return 0; 764 765rx_alloc_failed: 766 free_rx_pointers(priv); 767tx_alloc_failed: 768 free_tx_pointers(priv); 769err_grp_init: 770 unmap_group_regs(priv); 771 free_netdev(dev); 772 return err; 773} 774 775/* Ioctl MII Interface */ 776static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 777{ 778 struct gfar_private *priv = netdev_priv(dev); 779 780 if (!netif_running(dev)) 781 return -EINVAL; 782 783 if (!priv->phydev) 784 return -ENODEV; 785 786 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); 787} 788 789static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) 790{ 791 unsigned int new_bit_map = 0x0; 792 int mask = 0x1 << (max_qs - 1), i; 793 for (i = 0; i < max_qs; i++) { 794 if (bit_map & mask) 795 new_bit_map = new_bit_map + (1 << i); 796 mask = mask >> 0x1; 797 } 798 return new_bit_map; 799} 800 801static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, 802 u32 class) 803{ 804 u32 rqfpr = FPR_FILER_MASK; 805 u32 rqfcr = 0x0; 806 807 rqfar--; 808 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; 809 ftp_rqfpr[rqfar] = rqfpr; 810 ftp_rqfcr[rqfar] = rqfcr; 811 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 812 813 rqfar--; 814 rqfcr = RQFCR_CMP_NOMATCH; 815 ftp_rqfpr[rqfar] = rqfpr; 816 ftp_rqfcr[rqfar] = rqfcr; 817 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 818 819 rqfar--; 820 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; 821 rqfpr = class; 822 ftp_rqfcr[rqfar] = rqfcr; 823 ftp_rqfpr[rqfar] = rqfpr; 824 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 825 826 rqfar--; 827 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; 828 rqfpr = class; 829 ftp_rqfcr[rqfar] = rqfcr; 830 ftp_rqfpr[rqfar] = rqfpr; 831 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 832 833 return rqfar; 834} 835 836static void gfar_init_filer_table(struct gfar_private *priv) 837{ 838 int i = 0x0; 839 u32 rqfar = MAX_FILER_IDX; 840 u32 rqfcr = 0x0; 841 u32 rqfpr = FPR_FILER_MASK; 842 843 /* Default rule */ 844 rqfcr = RQFCR_CMP_MATCH; 845 ftp_rqfcr[rqfar] = rqfcr; 846 ftp_rqfpr[rqfar] = rqfpr; 847 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 848 849 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); 850 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); 851 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); 852 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); 853 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); 854 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); 855 856 /* cur_filer_idx indicated the fisrt non-masked rule */ 857 priv->cur_filer_idx = rqfar; 858 859 /* Rest are masked rules */ 860 rqfcr = RQFCR_CMP_NOMATCH; 861 for (i = 0; i < rqfar; i++) { 862 ftp_rqfcr[i] = rqfcr; 863 ftp_rqfpr[i] = rqfpr; 864 gfar_write_filer(priv, i, rqfcr, rqfpr); 865 } 866} 867 868/* Set up the ethernet device structure, private data, 869 * and anything else we need before we start */ 870static int gfar_probe(struct of_device *ofdev, 871 const struct of_device_id *match) 872{ 873 u32 tempval; 874 struct net_device *dev = NULL; 875 struct gfar_private *priv = NULL; 876 struct gfar __iomem *regs = NULL; 877 int err = 0, i, grp_idx = 0; 878 int len_devname; 879 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; 880 u32 isrg = 0; 881 u32 __iomem *baddr; 882 883 err = gfar_of_init(ofdev, &dev); 884 885 if (err) 886 return err; 887 888 priv = netdev_priv(dev); 889 priv->ndev = dev; 890 priv->ofdev = ofdev; 891 priv->node = ofdev->node; 892 SET_NETDEV_DEV(dev, &ofdev->dev); 893 894 spin_lock_init(&priv->bflock); 895 INIT_WORK(&priv->reset_task, gfar_reset_task); 896 897 dev_set_drvdata(&ofdev->dev, priv); 898 regs = priv->gfargrp[0].regs; 899 900 /* Stop the DMA engine now, in case it was running before */ 901 /* (The firmware could have used it, and left it running). */ 902 gfar_halt(dev); 903 904 /* Reset MAC layer */ 905 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET); 906 907 /* We need to delay at least 3 TX clocks */ 908 udelay(2); 909 910 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 911 gfar_write(&regs->maccfg1, tempval); 912 913 /* Initialize MACCFG2. */ 914 gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS); 915 916 /* Initialize ECNTRL */ 917 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS); 918 919 /* Set the dev->base_addr to the gfar reg region */ 920 dev->base_addr = (unsigned long) regs; 921 922 SET_NETDEV_DEV(dev, &ofdev->dev); 923 924 /* Fill in the dev structure */ 925 dev->watchdog_timeo = TX_TIMEOUT; 926 dev->mtu = 1500; 927 dev->netdev_ops = &gfar_netdev_ops; 928 dev->ethtool_ops = &gfar_ethtool_ops; 929 930 /* Register for napi ...We are registering NAPI for each grp */ 931 for (i = 0; i < priv->num_grps; i++) 932 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT); 933 934 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 935 priv->rx_csum_enable = 1; 936 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; 937 } else 938 priv->rx_csum_enable = 0; 939 940 priv->vlgrp = NULL; 941 942 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) 943 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 944 945 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 946 priv->extended_hash = 1; 947 priv->hash_width = 9; 948 949 priv->hash_regs[0] = &regs->igaddr0; 950 priv->hash_regs[1] = &regs->igaddr1; 951 priv->hash_regs[2] = &regs->igaddr2; 952 priv->hash_regs[3] = &regs->igaddr3; 953 priv->hash_regs[4] = &regs->igaddr4; 954 priv->hash_regs[5] = &regs->igaddr5; 955 priv->hash_regs[6] = &regs->igaddr6; 956 priv->hash_regs[7] = &regs->igaddr7; 957 priv->hash_regs[8] = &regs->gaddr0; 958 priv->hash_regs[9] = &regs->gaddr1; 959 priv->hash_regs[10] = &regs->gaddr2; 960 priv->hash_regs[11] = &regs->gaddr3; 961 priv->hash_regs[12] = &regs->gaddr4; 962 priv->hash_regs[13] = &regs->gaddr5; 963 priv->hash_regs[14] = &regs->gaddr6; 964 priv->hash_regs[15] = &regs->gaddr7; 965 966 } else { 967 priv->extended_hash = 0; 968 priv->hash_width = 8; 969 970 priv->hash_regs[0] = &regs->gaddr0; 971 priv->hash_regs[1] = &regs->gaddr1; 972 priv->hash_regs[2] = &regs->gaddr2; 973 priv->hash_regs[3] = &regs->gaddr3; 974 priv->hash_regs[4] = &regs->gaddr4; 975 priv->hash_regs[5] = &regs->gaddr5; 976 priv->hash_regs[6] = &regs->gaddr6; 977 priv->hash_regs[7] = &regs->gaddr7; 978 } 979 980 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) 981 priv->padding = DEFAULT_PADDING; 982 else 983 priv->padding = 0; 984 985 if (dev->features & NETIF_F_IP_CSUM) 986 dev->hard_header_len += GMAC_FCB_LEN; 987 988 /* Program the isrg regs only if number of grps > 1 */ 989 if (priv->num_grps > 1) { 990 baddr = &regs->isrg0; 991 for (i = 0; i < priv->num_grps; i++) { 992 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); 993 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); 994 gfar_write(baddr, isrg); 995 baddr++; 996 isrg = 0x0; 997 } 998 } 999 1000 /* Need to reverse the bit maps as bit_map's MSB is q0 1001 * but, for_each_set_bit parses from right to left, which 1002 * basically reverses the queue numbers */ 1003 for (i = 0; i< priv->num_grps; i++) { 1004 priv->gfargrp[i].tx_bit_map = reverse_bitmap( 1005 priv->gfargrp[i].tx_bit_map, MAX_TX_QS); 1006 priv->gfargrp[i].rx_bit_map = reverse_bitmap( 1007 priv->gfargrp[i].rx_bit_map, MAX_RX_QS); 1008 } 1009 1010 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, 1011 * also assign queues to groups */ 1012 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { 1013 priv->gfargrp[grp_idx].num_rx_queues = 0x0; 1014 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, 1015 priv->num_rx_queues) { 1016 priv->gfargrp[grp_idx].num_rx_queues++; 1017 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1018 rstat = rstat | (RSTAT_CLEAR_RHALT >> i); 1019 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); 1020 } 1021 priv->gfargrp[grp_idx].num_tx_queues = 0x0; 1022 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map, 1023 priv->num_tx_queues) { 1024 priv->gfargrp[grp_idx].num_tx_queues++; 1025 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1026 tstat = tstat | (TSTAT_CLEAR_THALT >> i); 1027 tqueue = tqueue | (TQUEUE_EN0 >> i); 1028 } 1029 priv->gfargrp[grp_idx].rstat = rstat; 1030 priv->gfargrp[grp_idx].tstat = tstat; 1031 rstat = tstat =0; 1032 } 1033 1034 gfar_write(&regs->rqueue, rqueue); 1035 gfar_write(&regs->tqueue, tqueue); 1036 1037 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 1038 1039 /* Initializing some of the rx/tx queue level parameters */ 1040 for (i = 0; i < priv->num_tx_queues; i++) { 1041 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; 1042 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; 1043 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; 1044 priv->tx_queue[i]->txic = DEFAULT_TXIC; 1045 } 1046 1047 for (i = 0; i < priv->num_rx_queues; i++) { 1048 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; 1049 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; 1050 priv->rx_queue[i]->rxic = DEFAULT_RXIC; 1051 } 1052 1053 /* enable filer if using multiple RX queues*/ 1054 if(priv->num_rx_queues > 1) 1055 priv->rx_filer_enable = 1; 1056 /* Enable most messages by default */ 1057 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1058 1059 /* Carrier starts down, phylib will bring it up */ 1060 netif_carrier_off(dev); 1061 1062 err = register_netdev(dev); 1063 1064 if (err) { 1065 printk(KERN_ERR "%s: Cannot register net device, aborting.\n", 1066 dev->name); 1067 goto register_fail; 1068 } 1069 1070 device_init_wakeup(&dev->dev, 1071 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1072 1073 /* fill out IRQ number and name fields */ 1074 len_devname = strlen(dev->name); 1075 for (i = 0; i < priv->num_grps; i++) { 1076 strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name, 1077 len_devname); 1078 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1079 strncpy(&priv->gfargrp[i].int_name_tx[len_devname], 1080 "_g", sizeof("_g")); 1081 priv->gfargrp[i].int_name_tx[ 1082 strlen(priv->gfargrp[i].int_name_tx)] = i+48; 1083 strncpy(&priv->gfargrp[i].int_name_tx[strlen( 1084 priv->gfargrp[i].int_name_tx)], 1085 "_tx", sizeof("_tx") + 1); 1086 1087 strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name, 1088 len_devname); 1089 strncpy(&priv->gfargrp[i].int_name_rx[len_devname], 1090 "_g", sizeof("_g")); 1091 priv->gfargrp[i].int_name_rx[ 1092 strlen(priv->gfargrp[i].int_name_rx)] = i+48; 1093 strncpy(&priv->gfargrp[i].int_name_rx[strlen( 1094 priv->gfargrp[i].int_name_rx)], 1095 "_rx", sizeof("_rx") + 1); 1096 1097 strncpy(&priv->gfargrp[i].int_name_er[0], dev->name, 1098 len_devname); 1099 strncpy(&priv->gfargrp[i].int_name_er[len_devname], 1100 "_g", sizeof("_g")); 1101 priv->gfargrp[i].int_name_er[strlen( 1102 priv->gfargrp[i].int_name_er)] = i+48; 1103 strncpy(&priv->gfargrp[i].int_name_er[strlen(\ 1104 priv->gfargrp[i].int_name_er)], 1105 "_er", sizeof("_er") + 1); 1106 } else 1107 priv->gfargrp[i].int_name_tx[len_devname] = '\0'; 1108 } 1109 1110 /* Initialize the filer table */ 1111 gfar_init_filer_table(priv); 1112 1113 /* Create all the sysfs files */ 1114 gfar_init_sysfs(dev); 1115 1116 /* Print out the device info */ 1117 printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr); 1118 1119 /* Even more device info helps when determining which kernel */ 1120 /* provided which set of benchmarks. */ 1121 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 1122 for (i = 0; i < priv->num_rx_queues; i++) 1123 printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n", 1124 dev->name, i, priv->rx_queue[i]->rx_ring_size); 1125 for(i = 0; i < priv->num_tx_queues; i++) 1126 printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n", 1127 dev->name, i, priv->tx_queue[i]->tx_ring_size); 1128 1129 return 0; 1130 1131register_fail: 1132 unmap_group_regs(priv); 1133 free_tx_pointers(priv); 1134 free_rx_pointers(priv); 1135 if (priv->phy_node) 1136 of_node_put(priv->phy_node); 1137 if (priv->tbi_node) 1138 of_node_put(priv->tbi_node); 1139 free_netdev(dev); 1140 return err; 1141} 1142 1143static int gfar_remove(struct of_device *ofdev) 1144{ 1145 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); 1146 1147 if (priv->phy_node) 1148 of_node_put(priv->phy_node); 1149 if (priv->tbi_node) 1150 of_node_put(priv->tbi_node); 1151 1152 dev_set_drvdata(&ofdev->dev, NULL); 1153 1154 unregister_netdev(priv->ndev); 1155 unmap_group_regs(priv); 1156 free_netdev(priv->ndev); 1157 1158 return 0; 1159} 1160 1161#ifdef CONFIG_PM 1162 1163static int gfar_suspend(struct device *dev) 1164{ 1165 struct gfar_private *priv = dev_get_drvdata(dev); 1166 struct net_device *ndev = priv->ndev; 1167 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1168 unsigned long flags; 1169 u32 tempval; 1170 1171 int magic_packet = priv->wol_en && 1172 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1173 1174 netif_device_detach(ndev); 1175 1176 if (netif_running(ndev)) { 1177 1178 local_irq_save(flags); 1179 lock_tx_qs(priv); 1180 lock_rx_qs(priv); 1181 1182 gfar_halt_nodisable(ndev); 1183 1184 /* Disable Tx, and Rx if wake-on-LAN is disabled. */ 1185 tempval = gfar_read(&regs->maccfg1); 1186 1187 tempval &= ~MACCFG1_TX_EN; 1188 1189 if (!magic_packet) 1190 tempval &= ~MACCFG1_RX_EN; 1191 1192 gfar_write(&regs->maccfg1, tempval); 1193 1194 unlock_rx_qs(priv); 1195 unlock_tx_qs(priv); 1196 local_irq_restore(flags); 1197 1198 disable_napi(priv); 1199 1200 if (magic_packet) { 1201 /* Enable interrupt on Magic Packet */ 1202 gfar_write(&regs->imask, IMASK_MAG); 1203 1204 /* Enable Magic Packet mode */ 1205 tempval = gfar_read(&regs->maccfg2); 1206 tempval |= MACCFG2_MPEN; 1207 gfar_write(&regs->maccfg2, tempval); 1208 } else { 1209 phy_stop(priv->phydev); 1210 } 1211 } 1212 1213 return 0; 1214} 1215 1216static int gfar_resume(struct device *dev) 1217{ 1218 struct gfar_private *priv = dev_get_drvdata(dev); 1219 struct net_device *ndev = priv->ndev; 1220 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1221 unsigned long flags; 1222 u32 tempval; 1223 int magic_packet = priv->wol_en && 1224 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1225 1226 if (!netif_running(ndev)) { 1227 netif_device_attach(ndev); 1228 return 0; 1229 } 1230 1231 if (!magic_packet && priv->phydev) 1232 phy_start(priv->phydev); 1233 1234 /* Disable Magic Packet mode, in case something 1235 * else woke us up. 1236 */ 1237 local_irq_save(flags); 1238 lock_tx_qs(priv); 1239 lock_rx_qs(priv); 1240 1241 tempval = gfar_read(&regs->maccfg2); 1242 tempval &= ~MACCFG2_MPEN; 1243 gfar_write(&regs->maccfg2, tempval); 1244 1245 gfar_start(ndev); 1246 1247 unlock_rx_qs(priv); 1248 unlock_tx_qs(priv); 1249 local_irq_restore(flags); 1250 1251 netif_device_attach(ndev); 1252 1253 enable_napi(priv); 1254 1255 return 0; 1256} 1257 1258static int gfar_restore(struct device *dev) 1259{ 1260 struct gfar_private *priv = dev_get_drvdata(dev); 1261 struct net_device *ndev = priv->ndev; 1262 1263 if (!netif_running(ndev)) 1264 return 0; 1265 1266 gfar_init_bds(ndev); 1267 init_registers(ndev); 1268 gfar_set_mac_address(ndev); 1269 gfar_init_mac(ndev); 1270 gfar_start(ndev); 1271 1272 priv->oldlink = 0; 1273 priv->oldspeed = 0; 1274 priv->oldduplex = -1; 1275 1276 if (priv->phydev) 1277 phy_start(priv->phydev); 1278 1279 netif_device_attach(ndev); 1280 enable_napi(priv); 1281 1282 return 0; 1283} 1284 1285static struct dev_pm_ops gfar_pm_ops = { 1286 .suspend = gfar_suspend, 1287 .resume = gfar_resume, 1288 .freeze = gfar_suspend, 1289 .thaw = gfar_resume, 1290 .restore = gfar_restore, 1291}; 1292 1293#define GFAR_PM_OPS (&gfar_pm_ops) 1294 1295static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state) 1296{ 1297 return gfar_suspend(&ofdev->dev); 1298} 1299 1300static int gfar_legacy_resume(struct of_device *ofdev) 1301{ 1302 return gfar_resume(&ofdev->dev); 1303} 1304 1305#else 1306 1307#define GFAR_PM_OPS NULL 1308#define gfar_legacy_suspend NULL 1309#define gfar_legacy_resume NULL 1310 1311#endif 1312 1313/* Reads the controller's registers to determine what interface 1314 * connects it to the PHY. 1315 */ 1316static phy_interface_t gfar_get_interface(struct net_device *dev) 1317{ 1318 struct gfar_private *priv = netdev_priv(dev); 1319 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1320 u32 ecntrl; 1321 1322 ecntrl = gfar_read(&regs->ecntrl); 1323 1324 if (ecntrl & ECNTRL_SGMII_MODE) 1325 return PHY_INTERFACE_MODE_SGMII; 1326 1327 if (ecntrl & ECNTRL_TBI_MODE) { 1328 if (ecntrl & ECNTRL_REDUCED_MODE) 1329 return PHY_INTERFACE_MODE_RTBI; 1330 else 1331 return PHY_INTERFACE_MODE_TBI; 1332 } 1333 1334 if (ecntrl & ECNTRL_REDUCED_MODE) { 1335 if (ecntrl & ECNTRL_REDUCED_MII_MODE) 1336 return PHY_INTERFACE_MODE_RMII; 1337 else { 1338 phy_interface_t interface = priv->interface; 1339 1340 /* 1341 * This isn't autodetected right now, so it must 1342 * be set by the device tree or platform code. 1343 */ 1344 if (interface == PHY_INTERFACE_MODE_RGMII_ID) 1345 return PHY_INTERFACE_MODE_RGMII_ID; 1346 1347 return PHY_INTERFACE_MODE_RGMII; 1348 } 1349 } 1350 1351 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) 1352 return PHY_INTERFACE_MODE_GMII; 1353 1354 return PHY_INTERFACE_MODE_MII; 1355} 1356 1357 1358/* Initializes driver's PHY state, and attaches to the PHY. 1359 * Returns 0 on success. 1360 */ 1361static int init_phy(struct net_device *dev) 1362{ 1363 struct gfar_private *priv = netdev_priv(dev); 1364 uint gigabit_support = 1365 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? 1366 SUPPORTED_1000baseT_Full : 0; 1367 phy_interface_t interface; 1368 1369 priv->oldlink = 0; 1370 priv->oldspeed = 0; 1371 priv->oldduplex = -1; 1372 1373 interface = gfar_get_interface(dev); 1374 1375 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, 1376 interface); 1377 if (!priv->phydev) 1378 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link, 1379 interface); 1380 if (!priv->phydev) { 1381 dev_err(&dev->dev, "could not attach to PHY\n"); 1382 return -ENODEV; 1383 } 1384 1385 if (interface == PHY_INTERFACE_MODE_SGMII) 1386 gfar_configure_serdes(dev); 1387 1388 /* Remove any features not supported by the controller */ 1389 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); 1390 priv->phydev->advertising = priv->phydev->supported; 1391 1392 return 0; 1393} 1394 1395/* 1396 * Initialize TBI PHY interface for communicating with the 1397 * SERDES lynx PHY on the chip. We communicate with this PHY 1398 * through the MDIO bus on each controller, treating it as a 1399 * "normal" PHY at the address found in the TBIPA register. We assume 1400 * that the TBIPA register is valid. Either the MDIO bus code will set 1401 * it to a value that doesn't conflict with other PHYs on the bus, or the 1402 * value doesn't matter, as there are no other PHYs on the bus. 1403 */ 1404static void gfar_configure_serdes(struct net_device *dev) 1405{ 1406 struct gfar_private *priv = netdev_priv(dev); 1407 struct phy_device *tbiphy; 1408 1409 if (!priv->tbi_node) { 1410 dev_warn(&dev->dev, "error: SGMII mode requires that the " 1411 "device tree specify a tbi-handle\n"); 1412 return; 1413 } 1414 1415 tbiphy = of_phy_find_device(priv->tbi_node); 1416 if (!tbiphy) { 1417 dev_err(&dev->dev, "error: Could not get TBI device\n"); 1418 return; 1419 } 1420 1421 /* 1422 * If the link is already up, we must already be ok, and don't need to 1423 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured 1424 * everything for us? Resetting it takes the link down and requires 1425 * several seconds for it to come back. 1426 */ 1427 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) 1428 return; 1429 1430 /* Single clk mode, mii mode off(for serdes communication) */ 1431 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); 1432 1433 phy_write(tbiphy, MII_ADVERTISE, 1434 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | 1435 ADVERTISE_1000XPSE_ASYM); 1436 1437 phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE | 1438 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); 1439} 1440 1441static void init_registers(struct net_device *dev) 1442{ 1443 struct gfar_private *priv = netdev_priv(dev); 1444 struct gfar __iomem *regs = NULL; 1445 int i = 0; 1446 1447 for (i = 0; i < priv->num_grps; i++) { 1448 regs = priv->gfargrp[i].regs; 1449 /* Clear IEVENT */ 1450 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR); 1451 1452 /* Initialize IMASK */ 1453 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 1454 } 1455 1456 regs = priv->gfargrp[0].regs; 1457 /* Init hash registers to zero */ 1458 gfar_write(&regs->igaddr0, 0); 1459 gfar_write(&regs->igaddr1, 0); 1460 gfar_write(&regs->igaddr2, 0); 1461 gfar_write(&regs->igaddr3, 0); 1462 gfar_write(&regs->igaddr4, 0); 1463 gfar_write(&regs->igaddr5, 0); 1464 gfar_write(&regs->igaddr6, 0); 1465 gfar_write(&regs->igaddr7, 0); 1466 1467 gfar_write(&regs->gaddr0, 0); 1468 gfar_write(&regs->gaddr1, 0); 1469 gfar_write(&regs->gaddr2, 0); 1470 gfar_write(&regs->gaddr3, 0); 1471 gfar_write(&regs->gaddr4, 0); 1472 gfar_write(&regs->gaddr5, 0); 1473 gfar_write(&regs->gaddr6, 0); 1474 gfar_write(&regs->gaddr7, 0); 1475 1476 /* Zero out the rmon mib registers if it has them */ 1477 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 1478 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib)); 1479 1480 /* Mask off the CAM interrupts */ 1481 gfar_write(&regs->rmon.cam1, 0xffffffff); 1482 gfar_write(&regs->rmon.cam2, 0xffffffff); 1483 } 1484 1485 /* Initialize the max receive buffer length */ 1486 gfar_write(&regs->mrblr, priv->rx_buffer_size); 1487 1488 /* Initialize the Minimum Frame Length Register */ 1489 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS); 1490} 1491 1492 1493/* Halt the receive and transmit queues */ 1494static void gfar_halt_nodisable(struct net_device *dev) 1495{ 1496 struct gfar_private *priv = netdev_priv(dev); 1497 struct gfar __iomem *regs = NULL; 1498 u32 tempval; 1499 int i = 0; 1500 1501 for (i = 0; i < priv->num_grps; i++) { 1502 regs = priv->gfargrp[i].regs; 1503 /* Mask all interrupts */ 1504 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 1505 1506 /* Clear all interrupts */ 1507 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR); 1508 } 1509 1510 regs = priv->gfargrp[0].regs; 1511 /* Stop the DMA, and wait for it to stop */ 1512 tempval = gfar_read(&regs->dmactrl); 1513 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) 1514 != (DMACTRL_GRS | DMACTRL_GTS)) { 1515 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1516 gfar_write(&regs->dmactrl, tempval); 1517 1518 while (!(gfar_read(&regs->ievent) & 1519 (IEVENT_GRSC | IEVENT_GTSC))) 1520 cpu_relax(); 1521 } 1522} 1523 1524/* Halt the receive and transmit queues */ 1525void gfar_halt(struct net_device *dev) 1526{ 1527 struct gfar_private *priv = netdev_priv(dev); 1528 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1529 u32 tempval; 1530 1531 gfar_halt_nodisable(dev); 1532 1533 /* Disable Rx and Tx */ 1534 tempval = gfar_read(&regs->maccfg1); 1535 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 1536 gfar_write(&regs->maccfg1, tempval); 1537} 1538 1539static void free_grp_irqs(struct gfar_priv_grp *grp) 1540{ 1541 free_irq(grp->interruptError, grp); 1542 free_irq(grp->interruptTransmit, grp); 1543 free_irq(grp->interruptReceive, grp); 1544} 1545 1546void stop_gfar(struct net_device *dev) 1547{ 1548 struct gfar_private *priv = netdev_priv(dev); 1549 unsigned long flags; 1550 int i; 1551 1552 phy_stop(priv->phydev); 1553 1554 1555 /* Lock it down */ 1556 local_irq_save(flags); 1557 lock_tx_qs(priv); 1558 lock_rx_qs(priv); 1559 1560 gfar_halt(dev); 1561 1562 unlock_rx_qs(priv); 1563 unlock_tx_qs(priv); 1564 local_irq_restore(flags); 1565 1566 /* Free the IRQs */ 1567 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1568 for (i = 0; i < priv->num_grps; i++) 1569 free_grp_irqs(&priv->gfargrp[i]); 1570 } else { 1571 for (i = 0; i < priv->num_grps; i++) 1572 free_irq(priv->gfargrp[i].interruptTransmit, 1573 &priv->gfargrp[i]); 1574 } 1575 1576 free_skb_resources(priv); 1577} 1578 1579static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) 1580{ 1581 struct txbd8 *txbdp; 1582 struct gfar_private *priv = netdev_priv(tx_queue->dev); 1583 int i, j; 1584 1585 txbdp = tx_queue->tx_bd_base; 1586 1587 for (i = 0; i < tx_queue->tx_ring_size; i++) { 1588 if (!tx_queue->tx_skbuff[i]) 1589 continue; 1590 1591 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, 1592 txbdp->length, DMA_TO_DEVICE); 1593 txbdp->lstatus = 0; 1594 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; 1595 j++) { 1596 txbdp++; 1597 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, 1598 txbdp->length, DMA_TO_DEVICE); 1599 } 1600 txbdp++; 1601 dev_kfree_skb_any(tx_queue->tx_skbuff[i]); 1602 tx_queue->tx_skbuff[i] = NULL; 1603 } 1604 kfree(tx_queue->tx_skbuff); 1605} 1606 1607static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) 1608{ 1609 struct rxbd8 *rxbdp; 1610 struct gfar_private *priv = netdev_priv(rx_queue->dev); 1611 int i; 1612 1613 rxbdp = rx_queue->rx_bd_base; 1614 1615 for (i = 0; i < rx_queue->rx_ring_size; i++) { 1616 if (rx_queue->rx_skbuff[i]) { 1617 dma_unmap_single(&priv->ofdev->dev, 1618 rxbdp->bufPtr, priv->rx_buffer_size, 1619 DMA_FROM_DEVICE); 1620 dev_kfree_skb_any(rx_queue->rx_skbuff[i]); 1621 rx_queue->rx_skbuff[i] = NULL; 1622 } 1623 rxbdp->lstatus = 0; 1624 rxbdp->bufPtr = 0; 1625 rxbdp++; 1626 } 1627 kfree(rx_queue->rx_skbuff); 1628} 1629 1630/* If there are any tx skbs or rx skbs still around, free them. 1631 * Then free tx_skbuff and rx_skbuff */ 1632static void free_skb_resources(struct gfar_private *priv) 1633{ 1634 struct gfar_priv_tx_q *tx_queue = NULL; 1635 struct gfar_priv_rx_q *rx_queue = NULL; 1636 int i; 1637 1638 /* Go through all the buffer descriptors and free their data buffers */ 1639 for (i = 0; i < priv->num_tx_queues; i++) { 1640 tx_queue = priv->tx_queue[i]; 1641 if(!tx_queue->tx_skbuff) 1642 free_skb_tx_queue(tx_queue); 1643 } 1644 1645 for (i = 0; i < priv->num_rx_queues; i++) { 1646 rx_queue = priv->rx_queue[i]; 1647 if(!rx_queue->rx_skbuff) 1648 free_skb_rx_queue(rx_queue); 1649 } 1650 1651 dma_free_coherent(&priv->ofdev->dev, 1652 sizeof(struct txbd8) * priv->total_tx_ring_size + 1653 sizeof(struct rxbd8) * priv->total_rx_ring_size, 1654 priv->tx_queue[0]->tx_bd_base, 1655 priv->tx_queue[0]->tx_bd_dma_base); 1656} 1657 1658void gfar_start(struct net_device *dev) 1659{ 1660 struct gfar_private *priv = netdev_priv(dev); 1661 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1662 u32 tempval; 1663 int i = 0; 1664 1665 /* Enable Rx and Tx in MACCFG1 */ 1666 tempval = gfar_read(&regs->maccfg1); 1667 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); 1668 gfar_write(&regs->maccfg1, tempval); 1669 1670 /* Initialize DMACTRL to have WWR and WOP */ 1671 tempval = gfar_read(&regs->dmactrl); 1672 tempval |= DMACTRL_INIT_SETTINGS; 1673 gfar_write(&regs->dmactrl, tempval); 1674 1675 /* Make sure we aren't stopped */ 1676 tempval = gfar_read(&regs->dmactrl); 1677 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 1678 gfar_write(&regs->dmactrl, tempval); 1679 1680 for (i = 0; i < priv->num_grps; i++) { 1681 regs = priv->gfargrp[i].regs; 1682 /* Clear THLT/RHLT, so that the DMA starts polling now */ 1683 gfar_write(&regs->tstat, priv->gfargrp[i].tstat); 1684 gfar_write(&regs->rstat, priv->gfargrp[i].rstat); 1685 /* Unmask the interrupts we look for */ 1686 gfar_write(&regs->imask, IMASK_DEFAULT); 1687 } 1688 1689 dev->trans_start = jiffies; 1690} 1691 1692void gfar_configure_coalescing(struct gfar_private *priv, 1693 unsigned long tx_mask, unsigned long rx_mask) 1694{ 1695 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1696 u32 __iomem *baddr; 1697 int i = 0; 1698 1699 /* Backward compatible case ---- even if we enable 1700 * multiple queues, there's only single reg to program 1701 */ 1702 gfar_write(&regs->txic, 0); 1703 if(likely(priv->tx_queue[0]->txcoalescing)) 1704 gfar_write(&regs->txic, priv->tx_queue[0]->txic); 1705 1706 gfar_write(&regs->rxic, 0); 1707 if(unlikely(priv->rx_queue[0]->rxcoalescing)) 1708 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic); 1709 1710 if (priv->mode == MQ_MG_MODE) { 1711 baddr = &regs->txic0; 1712 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { 1713 if (likely(priv->tx_queue[i]->txcoalescing)) { 1714 gfar_write(baddr + i, 0); 1715 gfar_write(baddr + i, priv->tx_queue[i]->txic); 1716 } 1717 } 1718 1719 baddr = &regs->rxic0; 1720 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { 1721 if (likely(priv->rx_queue[i]->rxcoalescing)) { 1722 gfar_write(baddr + i, 0); 1723 gfar_write(baddr + i, priv->rx_queue[i]->rxic); 1724 } 1725 } 1726 } 1727} 1728 1729static int register_grp_irqs(struct gfar_priv_grp *grp) 1730{ 1731 struct gfar_private *priv = grp->priv; 1732 struct net_device *dev = priv->ndev; 1733 int err; 1734 1735 /* If the device has multiple interrupts, register for 1736 * them. Otherwise, only register for the one */ 1737 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1738 /* Install our interrupt handlers for Error, 1739 * Transmit, and Receive */ 1740 if ((err = request_irq(grp->interruptError, gfar_error, 0, 1741 grp->int_name_er,grp)) < 0) { 1742 if (netif_msg_intr(priv)) 1743 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1744 dev->name, grp->interruptError); 1745 1746 goto err_irq_fail; 1747 } 1748 1749 if ((err = request_irq(grp->interruptTransmit, gfar_transmit, 1750 0, grp->int_name_tx, grp)) < 0) { 1751 if (netif_msg_intr(priv)) 1752 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1753 dev->name, grp->interruptTransmit); 1754 goto tx_irq_fail; 1755 } 1756 1757 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0, 1758 grp->int_name_rx, grp)) < 0) { 1759 if (netif_msg_intr(priv)) 1760 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1761 dev->name, grp->interruptReceive); 1762 goto rx_irq_fail; 1763 } 1764 } else { 1765 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0, 1766 grp->int_name_tx, grp)) < 0) { 1767 if (netif_msg_intr(priv)) 1768 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1769 dev->name, grp->interruptTransmit); 1770 goto err_irq_fail; 1771 } 1772 } 1773 1774 return 0; 1775 1776rx_irq_fail: 1777 free_irq(grp->interruptTransmit, grp); 1778tx_irq_fail: 1779 free_irq(grp->interruptError, grp); 1780err_irq_fail: 1781 return err; 1782 1783} 1784 1785/* Bring the controller up and running */ 1786int startup_gfar(struct net_device *ndev) 1787{ 1788 struct gfar_private *priv = netdev_priv(ndev); 1789 struct gfar __iomem *regs = NULL; 1790 int err, i, j; 1791 1792 for (i = 0; i < priv->num_grps; i++) { 1793 regs= priv->gfargrp[i].regs; 1794 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 1795 } 1796 1797 regs= priv->gfargrp[0].regs; 1798 err = gfar_alloc_skb_resources(ndev); 1799 if (err) 1800 return err; 1801 1802 gfar_init_mac(ndev); 1803 1804 for (i = 0; i < priv->num_grps; i++) { 1805 err = register_grp_irqs(&priv->gfargrp[i]); 1806 if (err) { 1807 for (j = 0; j < i; j++) 1808 free_grp_irqs(&priv->gfargrp[j]); 1809 goto irq_fail; 1810 } 1811 } 1812 1813 /* Start the controller */ 1814 gfar_start(ndev); 1815 1816 phy_start(priv->phydev); 1817 1818 gfar_configure_coalescing(priv, 0xFF, 0xFF); 1819 1820 return 0; 1821 1822irq_fail: 1823 free_skb_resources(priv); 1824 return err; 1825} 1826 1827/* Called when something needs to use the ethernet device */ 1828/* Returns 0 for success. */ 1829static int gfar_enet_open(struct net_device *dev) 1830{ 1831 struct gfar_private *priv = netdev_priv(dev); 1832 int err; 1833 1834 enable_napi(priv); 1835 1836 skb_queue_head_init(&priv->rx_recycle); 1837 1838 /* Initialize a bunch of registers */ 1839 init_registers(dev); 1840 1841 gfar_set_mac_address(dev); 1842 1843 err = init_phy(dev); 1844 1845 if (err) { 1846 disable_napi(priv); 1847 return err; 1848 } 1849 1850 err = startup_gfar(dev); 1851 if (err) { 1852 disable_napi(priv); 1853 return err; 1854 } 1855 1856 netif_tx_start_all_queues(dev); 1857 1858 device_set_wakeup_enable(&dev->dev, priv->wol_en); 1859 1860 return err; 1861} 1862 1863static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) 1864{ 1865 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); 1866 1867 memset(fcb, 0, GMAC_FCB_LEN); 1868 1869 return fcb; 1870} 1871 1872static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) 1873{ 1874 u8 flags = 0; 1875 1876 /* If we're here, it's a IP packet with a TCP or UDP 1877 * payload. We set it to checksum, using a pseudo-header 1878 * we provide 1879 */ 1880 flags = TXFCB_DEFAULT; 1881 1882 /* Tell the controller what the protocol is */ 1883 /* And provide the already calculated phcs */ 1884 if (ip_hdr(skb)->protocol == IPPROTO_UDP) { 1885 flags |= TXFCB_UDP; 1886 fcb->phcs = udp_hdr(skb)->check; 1887 } else 1888 fcb->phcs = tcp_hdr(skb)->check; 1889 1890 /* l3os is the distance between the start of the 1891 * frame (skb->data) and the start of the IP hdr. 1892 * l4os is the distance between the start of the 1893 * l3 hdr and the l4 hdr */ 1894 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN); 1895 fcb->l4os = skb_network_header_len(skb); 1896 1897 fcb->flags = flags; 1898} 1899 1900void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) 1901{ 1902 fcb->flags |= TXFCB_VLN; 1903 fcb->vlctl = vlan_tx_tag_get(skb); 1904} 1905 1906static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, 1907 struct txbd8 *base, int ring_size) 1908{ 1909 struct txbd8 *new_bd = bdp + stride; 1910 1911 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; 1912} 1913 1914static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, 1915 int ring_size) 1916{ 1917 return skip_txbd(bdp, 1, base, ring_size); 1918} 1919 1920/* This is called by the kernel when a frame is ready for transmission. */ 1921/* It is pointed to by the dev->hard_start_xmit function pointer */ 1922static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 1923{ 1924 struct gfar_private *priv = netdev_priv(dev); 1925 struct gfar_priv_tx_q *tx_queue = NULL; 1926 struct netdev_queue *txq; 1927 struct gfar __iomem *regs = NULL; 1928 struct txfcb *fcb = NULL; 1929 struct txbd8 *txbdp, *txbdp_start, *base; 1930 u32 lstatus; 1931 int i, rq = 0; 1932 u32 bufaddr; 1933 unsigned long flags; 1934 unsigned int nr_frags, length; 1935 1936 1937 rq = skb->queue_mapping; 1938 tx_queue = priv->tx_queue[rq]; 1939 txq = netdev_get_tx_queue(dev, rq); 1940 base = tx_queue->tx_bd_base; 1941 regs = tx_queue->grp->regs; 1942 1943 /* make space for additional header when fcb is needed */ 1944 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 1945 (priv->vlgrp && vlan_tx_tag_present(skb))) && 1946 (skb_headroom(skb) < GMAC_FCB_LEN)) { 1947 struct sk_buff *skb_new; 1948 1949 skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN); 1950 if (!skb_new) { 1951 dev->stats.tx_errors++; 1952 kfree_skb(skb); 1953 return NETDEV_TX_OK; 1954 } 1955 kfree_skb(skb); 1956 skb = skb_new; 1957 } 1958 1959 /* total number of fragments in the SKB */ 1960 nr_frags = skb_shinfo(skb)->nr_frags; 1961 1962 /* check if there is space to queue this packet */ 1963 if ((nr_frags+1) > tx_queue->num_txbdfree) { 1964 /* no space, stop the queue */ 1965 netif_tx_stop_queue(txq); 1966 dev->stats.tx_fifo_errors++; 1967 return NETDEV_TX_BUSY; 1968 } 1969 1970 /* Update transmit stats */ 1971 txq->tx_bytes += skb->len; 1972 txq->tx_packets ++; 1973 1974 txbdp = txbdp_start = tx_queue->cur_tx; 1975 1976 if (nr_frags == 0) { 1977 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 1978 } else { 1979 /* Place the fragment addresses and lengths into the TxBDs */ 1980 for (i = 0; i < nr_frags; i++) { 1981 /* Point at the next BD, wrapping as needed */ 1982 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); 1983 1984 length = skb_shinfo(skb)->frags[i].size; 1985 1986 lstatus = txbdp->lstatus | length | 1987 BD_LFLAG(TXBD_READY); 1988 1989 /* Handle the last BD specially */ 1990 if (i == nr_frags - 1) 1991 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 1992 1993 bufaddr = dma_map_page(&priv->ofdev->dev, 1994 skb_shinfo(skb)->frags[i].page, 1995 skb_shinfo(skb)->frags[i].page_offset, 1996 length, 1997 DMA_TO_DEVICE); 1998 1999 /* set the TxBD length and buffer pointer */ 2000 txbdp->bufPtr = bufaddr; 2001 txbdp->lstatus = lstatus; 2002 } 2003 2004 lstatus = txbdp_start->lstatus; 2005 } 2006 2007 /* Set up checksumming */ 2008 if (CHECKSUM_PARTIAL == skb->ip_summed) { 2009 fcb = gfar_add_fcb(skb); 2010 lstatus |= BD_LFLAG(TXBD_TOE); 2011 gfar_tx_checksum(skb, fcb); 2012 } 2013 2014 if (priv->vlgrp && vlan_tx_tag_present(skb)) { 2015 if (unlikely(NULL == fcb)) { 2016 fcb = gfar_add_fcb(skb); 2017 lstatus |= BD_LFLAG(TXBD_TOE); 2018 } 2019 2020 gfar_tx_vlan(skb, fcb); 2021 } 2022 2023 /* setup the TxBD length and buffer pointer for the first BD */ 2024 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, 2025 skb_headlen(skb), DMA_TO_DEVICE); 2026 2027 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 2028 2029 /* 2030 * We can work in parallel with gfar_clean_tx_ring(), except 2031 * when modifying num_txbdfree. Note that we didn't grab the lock 2032 * when we were reading the num_txbdfree and checking for available 2033 * space, that's because outside of this function it can only grow, 2034 * and once we've got needed space, it cannot suddenly disappear. 2035 * 2036 * The lock also protects us from gfar_error(), which can modify 2037 * regs->tstat and thus retrigger the transfers, which is why we 2038 * also must grab the lock before setting ready bit for the first 2039 * to be transmitted BD. 2040 */ 2041 spin_lock_irqsave(&tx_queue->txlock, flags); 2042 2043 /* 2044 * The powerpc-specific eieio() is used, as wmb() has too strong 2045 * semantics (it requires synchronization between cacheable and 2046 * uncacheable mappings, which eieio doesn't provide and which we 2047 * don't need), thus requiring a more expensive sync instruction. At 2048 * some point, the set of architecture-independent barrier functions 2049 * should be expanded to include weaker barriers. 2050 */ 2051 eieio(); 2052 2053 txbdp_start->lstatus = lstatus; 2054 2055 eieio(); /* force lstatus write before tx_skbuff */ 2056 2057 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; 2058 2059 /* Update the current skb pointer to the next entry we will use 2060 * (wrapping if necessary) */ 2061 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & 2062 TX_RING_MOD_MASK(tx_queue->tx_ring_size); 2063 2064 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2065 2066 /* reduce TxBD free count */ 2067 tx_queue->num_txbdfree -= (nr_frags + 1); 2068 2069 dev->trans_start = jiffies; 2070 2071 /* If the next BD still needs to be cleaned up, then the bds 2072 are full. We need to tell the kernel to stop sending us stuff. */ 2073 if (!tx_queue->num_txbdfree) { 2074 netif_tx_stop_queue(txq); 2075 2076 dev->stats.tx_fifo_errors++; 2077 } 2078 2079 /* Tell the DMA to go go go */ 2080 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); 2081 2082 /* Unlock priv */ 2083 spin_unlock_irqrestore(&tx_queue->txlock, flags); 2084 2085 return NETDEV_TX_OK; 2086} 2087 2088/* Stops the kernel queue, and halts the controller */ 2089static int gfar_close(struct net_device *dev) 2090{ 2091 struct gfar_private *priv = netdev_priv(dev); 2092 2093 disable_napi(priv); 2094 2095 skb_queue_purge(&priv->rx_recycle); 2096 cancel_work_sync(&priv->reset_task); 2097 stop_gfar(dev); 2098 2099 /* Disconnect from the PHY */ 2100 phy_disconnect(priv->phydev); 2101 priv->phydev = NULL; 2102 2103 netif_tx_stop_all_queues(dev); 2104 2105 return 0; 2106} 2107 2108/* Changes the mac address if the controller is not running. */ 2109static int gfar_set_mac_address(struct net_device *dev) 2110{ 2111 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 2112 2113 return 0; 2114} 2115 2116 2117/* Enables and disables VLAN insertion/extraction */ 2118static void gfar_vlan_rx_register(struct net_device *dev, 2119 struct vlan_group *grp) 2120{ 2121 struct gfar_private *priv = netdev_priv(dev); 2122 struct gfar __iomem *regs = NULL; 2123 unsigned long flags; 2124 u32 tempval; 2125 2126 regs = priv->gfargrp[0].regs; 2127 local_irq_save(flags); 2128 lock_rx_qs(priv); 2129 2130 priv->vlgrp = grp; 2131 2132 if (grp) { 2133 /* Enable VLAN tag insertion */ 2134 tempval = gfar_read(&regs->tctrl); 2135 tempval |= TCTRL_VLINS; 2136 2137 gfar_write(&regs->tctrl, tempval); 2138 2139 /* Enable VLAN tag extraction */ 2140 tempval = gfar_read(&regs->rctrl); 2141 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); 2142 gfar_write(&regs->rctrl, tempval); 2143 } else { 2144 /* Disable VLAN tag insertion */ 2145 tempval = gfar_read(&regs->tctrl); 2146 tempval &= ~TCTRL_VLINS; 2147 gfar_write(&regs->tctrl, tempval); 2148 2149 /* Disable VLAN tag extraction */ 2150 tempval = gfar_read(&regs->rctrl); 2151 tempval &= ~RCTRL_VLEX; 2152 /* If parse is no longer required, then disable parser */ 2153 if (tempval & RCTRL_REQ_PARSER) 2154 tempval |= RCTRL_PRSDEP_INIT; 2155 else 2156 tempval &= ~RCTRL_PRSDEP_INIT; 2157 gfar_write(&regs->rctrl, tempval); 2158 } 2159 2160 gfar_change_mtu(dev, dev->mtu); 2161 2162 unlock_rx_qs(priv); 2163 local_irq_restore(flags); 2164} 2165 2166static int gfar_change_mtu(struct net_device *dev, int new_mtu) 2167{ 2168 int tempsize, tempval; 2169 struct gfar_private *priv = netdev_priv(dev); 2170 struct gfar __iomem *regs = priv->gfargrp[0].regs; 2171 int oldsize = priv->rx_buffer_size; 2172 int frame_size = new_mtu + ETH_HLEN; 2173 2174 if (priv->vlgrp) 2175 frame_size += VLAN_HLEN; 2176 2177 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { 2178 if (netif_msg_drv(priv)) 2179 printk(KERN_ERR "%s: Invalid MTU setting\n", 2180 dev->name); 2181 return -EINVAL; 2182 } 2183 2184 if (gfar_uses_fcb(priv)) 2185 frame_size += GMAC_FCB_LEN; 2186 2187 frame_size += priv->padding; 2188 2189 tempsize = 2190 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + 2191 INCREMENTAL_BUFFER_SIZE; 2192 2193 /* Only stop and start the controller if it isn't already 2194 * stopped, and we changed something */ 2195 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 2196 stop_gfar(dev); 2197 2198 priv->rx_buffer_size = tempsize; 2199 2200 dev->mtu = new_mtu; 2201 2202 gfar_write(&regs->mrblr, priv->rx_buffer_size); 2203 gfar_write(&regs->maxfrm, priv->rx_buffer_size); 2204 2205 /* If the mtu is larger than the max size for standard 2206 * ethernet frames (ie, a jumbo frame), then set maccfg2 2207 * to allow huge frames, and to check the length */ 2208 tempval = gfar_read(&regs->maccfg2); 2209 2210 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) 2211 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2212 else 2213 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2214 2215 gfar_write(&regs->maccfg2, tempval); 2216 2217 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 2218 startup_gfar(dev); 2219 2220 return 0; 2221} 2222 2223/* gfar_reset_task gets scheduled when a packet has not been 2224 * transmitted after a set amount of time. 2225 * For now, assume that clearing out all the structures, and 2226 * starting over will fix the problem. 2227 */ 2228static void gfar_reset_task(struct work_struct *work) 2229{ 2230 struct gfar_private *priv = container_of(work, struct gfar_private, 2231 reset_task); 2232 struct net_device *dev = priv->ndev; 2233 2234 if (dev->flags & IFF_UP) { 2235 netif_tx_stop_all_queues(dev); 2236 stop_gfar(dev); 2237 startup_gfar(dev); 2238 netif_tx_start_all_queues(dev); 2239 } 2240 2241 netif_tx_schedule_all(dev); 2242} 2243 2244static void gfar_timeout(struct net_device *dev) 2245{ 2246 struct gfar_private *priv = netdev_priv(dev); 2247 2248 dev->stats.tx_errors++; 2249 schedule_work(&priv->reset_task); 2250} 2251 2252/* Interrupt Handler for Transmit complete */ 2253static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) 2254{ 2255 struct net_device *dev = tx_queue->dev; 2256 struct gfar_private *priv = netdev_priv(dev); 2257 struct gfar_priv_rx_q *rx_queue = NULL; 2258 struct txbd8 *bdp; 2259 struct txbd8 *lbdp = NULL; 2260 struct txbd8 *base = tx_queue->tx_bd_base; 2261 struct sk_buff *skb; 2262 int skb_dirtytx; 2263 int tx_ring_size = tx_queue->tx_ring_size; 2264 int frags = 0; 2265 int i; 2266 int howmany = 0; 2267 u32 lstatus; 2268 2269 rx_queue = priv->rx_queue[tx_queue->qindex]; 2270 bdp = tx_queue->dirty_tx; 2271 skb_dirtytx = tx_queue->skb_dirtytx; 2272 2273 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { 2274 unsigned long flags; 2275 2276 frags = skb_shinfo(skb)->nr_frags; 2277 lbdp = skip_txbd(bdp, frags, base, tx_ring_size); 2278 2279 lstatus = lbdp->lstatus; 2280 2281 /* Only clean completed frames */ 2282 if ((lstatus & BD_LFLAG(TXBD_READY)) && 2283 (lstatus & BD_LENGTH_MASK)) 2284 break; 2285 2286 dma_unmap_single(&priv->ofdev->dev, 2287 bdp->bufPtr, 2288 bdp->length, 2289 DMA_TO_DEVICE); 2290 2291 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2292 bdp = next_txbd(bdp, base, tx_ring_size); 2293 2294 for (i = 0; i < frags; i++) { 2295 dma_unmap_page(&priv->ofdev->dev, 2296 bdp->bufPtr, 2297 bdp->length, 2298 DMA_TO_DEVICE); 2299 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2300 bdp = next_txbd(bdp, base, tx_ring_size); 2301 } 2302 2303 /* 2304 * If there's room in the queue (limit it to rx_buffer_size) 2305 * we add this skb back into the pool, if it's the right size 2306 */ 2307 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && 2308 skb_recycle_check(skb, priv->rx_buffer_size + 2309 RXBUF_ALIGNMENT)) 2310 __skb_queue_head(&priv->rx_recycle, skb); 2311 else 2312 dev_kfree_skb_any(skb); 2313 2314 tx_queue->tx_skbuff[skb_dirtytx] = NULL; 2315 2316 skb_dirtytx = (skb_dirtytx + 1) & 2317 TX_RING_MOD_MASK(tx_ring_size); 2318 2319 howmany++; 2320 spin_lock_irqsave(&tx_queue->txlock, flags); 2321 tx_queue->num_txbdfree += frags + 1; 2322 spin_unlock_irqrestore(&tx_queue->txlock, flags); 2323 } 2324 2325 /* If we freed a buffer, we can restart transmission, if necessary */ 2326 if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree) 2327 netif_wake_subqueue(dev, tx_queue->qindex); 2328 2329 /* Update dirty indicators */ 2330 tx_queue->skb_dirtytx = skb_dirtytx; 2331 tx_queue->dirty_tx = bdp; 2332 2333 return howmany; 2334} 2335 2336static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) 2337{ 2338 unsigned long flags; 2339 2340 spin_lock_irqsave(&gfargrp->grplock, flags); 2341 if (napi_schedule_prep(&gfargrp->napi)) { 2342 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); 2343 __napi_schedule(&gfargrp->napi); 2344 } else { 2345 /* 2346 * Clear IEVENT, so interrupts aren't called again 2347 * because of the packets that have already arrived. 2348 */ 2349 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); 2350 } 2351 spin_unlock_irqrestore(&gfargrp->grplock, flags); 2352 2353} 2354 2355/* Interrupt Handler for Transmit complete */ 2356static irqreturn_t gfar_transmit(int irq, void *grp_id) 2357{ 2358 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); 2359 return IRQ_HANDLED; 2360} 2361 2362static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 2363 struct sk_buff *skb) 2364{ 2365 struct net_device *dev = rx_queue->dev; 2366 struct gfar_private *priv = netdev_priv(dev); 2367 dma_addr_t buf; 2368 2369 buf = dma_map_single(&priv->ofdev->dev, skb->data, 2370 priv->rx_buffer_size, DMA_FROM_DEVICE); 2371 gfar_init_rxbdp(rx_queue, bdp, buf); 2372} 2373 2374 2375struct sk_buff * gfar_new_skb(struct net_device *dev) 2376{ 2377 unsigned int alignamount; 2378 struct gfar_private *priv = netdev_priv(dev); 2379 struct sk_buff *skb = NULL; 2380 2381 skb = __skb_dequeue(&priv->rx_recycle); 2382 if (!skb) 2383 skb = netdev_alloc_skb(dev, 2384 priv->rx_buffer_size + RXBUF_ALIGNMENT); 2385 2386 if (!skb) 2387 return NULL; 2388 2389 alignamount = RXBUF_ALIGNMENT - 2390 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)); 2391 2392 /* We need the data buffer to be aligned properly. We will reserve 2393 * as many bytes as needed to align the data properly 2394 */ 2395 skb_reserve(skb, alignamount); 2396 2397 return skb; 2398} 2399 2400static inline void count_errors(unsigned short status, struct net_device *dev) 2401{ 2402 struct gfar_private *priv = netdev_priv(dev); 2403 struct net_device_stats *stats = &dev->stats; 2404 struct gfar_extra_stats *estats = &priv->extra_stats; 2405 2406 /* If the packet was truncated, none of the other errors 2407 * matter */ 2408 if (status & RXBD_TRUNCATED) { 2409 stats->rx_length_errors++; 2410 2411 estats->rx_trunc++; 2412 2413 return; 2414 } 2415 /* Count the errors, if there were any */ 2416 if (status & (RXBD_LARGE | RXBD_SHORT)) { 2417 stats->rx_length_errors++; 2418 2419 if (status & RXBD_LARGE) 2420 estats->rx_large++; 2421 else 2422 estats->rx_short++; 2423 } 2424 if (status & RXBD_NONOCTET) { 2425 stats->rx_frame_errors++; 2426 estats->rx_nonoctet++; 2427 } 2428 if (status & RXBD_CRCERR) { 2429 estats->rx_crcerr++; 2430 stats->rx_crc_errors++; 2431 } 2432 if (status & RXBD_OVERRUN) { 2433 estats->rx_overrun++; 2434 stats->rx_crc_errors++; 2435 } 2436} 2437 2438irqreturn_t gfar_receive(int irq, void *grp_id) 2439{ 2440 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); 2441 return IRQ_HANDLED; 2442} 2443 2444static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) 2445{ 2446 /* If valid headers were found, and valid sums 2447 * were verified, then we tell the kernel that no 2448 * checksumming is necessary. Otherwise, it is */ 2449 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) 2450 skb->ip_summed = CHECKSUM_UNNECESSARY; 2451 else 2452 skb->ip_summed = CHECKSUM_NONE; 2453} 2454 2455 2456/* gfar_process_frame() -- handle one incoming packet if skb 2457 * isn't NULL. */ 2458static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 2459 int amount_pull) 2460{ 2461 struct gfar_private *priv = netdev_priv(dev); 2462 struct rxfcb *fcb = NULL; 2463 2464 int ret; 2465 2466 /* fcb is at the beginning if exists */ 2467 fcb = (struct rxfcb *)skb->data; 2468 2469 /* Remove the FCB from the skb */ 2470 /* Remove the padded bytes, if there are any */ 2471 if (amount_pull) { 2472 skb_record_rx_queue(skb, fcb->rq); 2473 skb_pull(skb, amount_pull); 2474 } 2475 2476 if (priv->rx_csum_enable) 2477 gfar_rx_checksum(skb, fcb); 2478 2479 /* Tell the skb what kind of packet this is */ 2480 skb->protocol = eth_type_trans(skb, dev); 2481 2482 /* Send the packet up the stack */ 2483 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) 2484 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl); 2485 else 2486 ret = netif_receive_skb(skb); 2487 2488 if (NET_RX_DROP == ret) 2489 priv->extra_stats.kernel_dropped++; 2490 2491 return 0; 2492} 2493 2494/* gfar_clean_rx_ring() -- Processes each frame in the rx ring 2495 * until the budget/quota has been reached. Returns the number 2496 * of frames handled 2497 */ 2498int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) 2499{ 2500 struct net_device *dev = rx_queue->dev; 2501 struct rxbd8 *bdp, *base; 2502 struct sk_buff *skb; 2503 int pkt_len; 2504 int amount_pull; 2505 int howmany = 0; 2506 struct gfar_private *priv = netdev_priv(dev); 2507 2508 /* Get the first full descriptor */ 2509 bdp = rx_queue->cur_rx; 2510 base = rx_queue->rx_bd_base; 2511 2512 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) + 2513 priv->padding; 2514 2515 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 2516 struct sk_buff *newskb; 2517 rmb(); 2518 2519 /* Add another skb for the future */ 2520 newskb = gfar_new_skb(dev); 2521 2522 skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; 2523 2524 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 2525 priv->rx_buffer_size, DMA_FROM_DEVICE); 2526 2527 /* We drop the frame if we failed to allocate a new buffer */ 2528 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || 2529 bdp->status & RXBD_ERR)) { 2530 count_errors(bdp->status, dev); 2531 2532 if (unlikely(!newskb)) 2533 newskb = skb; 2534 else if (skb) { 2535 /* 2536 * We need to reset ->data to what it 2537 * was before gfar_new_skb() re-aligned 2538 * it to an RXBUF_ALIGNMENT boundary 2539 * before we put the skb back on the 2540 * recycle list. 2541 */ 2542 skb->data = skb->head + NET_SKB_PAD; 2543 __skb_queue_head(&priv->rx_recycle, skb); 2544 } 2545 } else { 2546 /* Increment the number of packets */ 2547 rx_queue->stats.rx_packets++; 2548 howmany++; 2549 2550 if (likely(skb)) { 2551 pkt_len = bdp->length - ETH_FCS_LEN; 2552 /* Remove the FCS from the packet length */ 2553 skb_put(skb, pkt_len); 2554 rx_queue->stats.rx_bytes += pkt_len; 2555 skb_record_rx_queue(skb, rx_queue->qindex); 2556 gfar_process_frame(dev, skb, amount_pull); 2557 2558 } else { 2559 if (netif_msg_rx_err(priv)) 2560 printk(KERN_WARNING 2561 "%s: Missing skb!\n", dev->name); 2562 rx_queue->stats.rx_dropped++; 2563 priv->extra_stats.rx_skbmissing++; 2564 } 2565 2566 } 2567 2568 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; 2569 2570 /* Setup the new bdp */ 2571 gfar_new_rxbdp(rx_queue, bdp, newskb); 2572 2573 /* Update to the next pointer */ 2574 bdp = next_bd(bdp, base, rx_queue->rx_ring_size); 2575 2576 /* update to point at the next skb */ 2577 rx_queue->skb_currx = 2578 (rx_queue->skb_currx + 1) & 2579 RX_RING_MOD_MASK(rx_queue->rx_ring_size); 2580 } 2581 2582 /* Update the current rxbd pointer to be the next one */ 2583 rx_queue->cur_rx = bdp; 2584 2585 return howmany; 2586} 2587 2588static int gfar_poll(struct napi_struct *napi, int budget) 2589{ 2590 struct gfar_priv_grp *gfargrp = container_of(napi, 2591 struct gfar_priv_grp, napi); 2592 struct gfar_private *priv = gfargrp->priv; 2593 struct gfar __iomem *regs = gfargrp->regs; 2594 struct gfar_priv_tx_q *tx_queue = NULL; 2595 struct gfar_priv_rx_q *rx_queue = NULL; 2596 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0; 2597 int tx_cleaned = 0, i, left_over_budget = budget; 2598 unsigned long serviced_queues = 0; 2599 int num_queues = 0; 2600 2601 num_queues = gfargrp->num_rx_queues; 2602 budget_per_queue = budget/num_queues; 2603 2604 /* Clear IEVENT, so interrupts aren't called again 2605 * because of the packets that have already arrived */ 2606 gfar_write(&regs->ievent, IEVENT_RTX_MASK); 2607 2608 while (num_queues && left_over_budget) { 2609 2610 budget_per_queue = left_over_budget/num_queues; 2611 left_over_budget = 0; 2612 2613 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { 2614 if (test_bit(i, &serviced_queues)) 2615 continue; 2616 rx_queue = priv->rx_queue[i]; 2617 tx_queue = priv->tx_queue[rx_queue->qindex]; 2618 2619 tx_cleaned += gfar_clean_tx_ring(tx_queue); 2620 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, 2621 budget_per_queue); 2622 rx_cleaned += rx_cleaned_per_queue; 2623 if(rx_cleaned_per_queue < budget_per_queue) { 2624 left_over_budget = left_over_budget + 2625 (budget_per_queue - rx_cleaned_per_queue); 2626 set_bit(i, &serviced_queues); 2627 num_queues--; 2628 } 2629 } 2630 } 2631 2632 if (tx_cleaned) 2633 return budget; 2634 2635 if (rx_cleaned < budget) { 2636 napi_complete(napi); 2637 2638 /* Clear the halt bit in RSTAT */ 2639 gfar_write(&regs->rstat, gfargrp->rstat); 2640 2641 gfar_write(&regs->imask, IMASK_DEFAULT); 2642 2643 /* If we are coalescing interrupts, update the timer */ 2644 /* Otherwise, clear it */ 2645 gfar_configure_coalescing(priv, 2646 gfargrp->rx_bit_map, gfargrp->tx_bit_map); 2647 } 2648 2649 return rx_cleaned; 2650} 2651 2652#ifdef CONFIG_NET_POLL_CONTROLLER 2653/* 2654 * Polling 'interrupt' - used by things like netconsole to send skbs 2655 * without having to re-enable interrupts. It's not called while 2656 * the interrupt routine is executing. 2657 */ 2658static void gfar_netpoll(struct net_device *dev) 2659{ 2660 struct gfar_private *priv = netdev_priv(dev); 2661 int i = 0; 2662 2663 /* If the device has multiple interrupts, run tx/rx */ 2664 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 2665 for (i = 0; i < priv->num_grps; i++) { 2666 disable_irq(priv->gfargrp[i].interruptTransmit); 2667 disable_irq(priv->gfargrp[i].interruptReceive); 2668 disable_irq(priv->gfargrp[i].interruptError); 2669 gfar_interrupt(priv->gfargrp[i].interruptTransmit, 2670 &priv->gfargrp[i]); 2671 enable_irq(priv->gfargrp[i].interruptError); 2672 enable_irq(priv->gfargrp[i].interruptReceive); 2673 enable_irq(priv->gfargrp[i].interruptTransmit); 2674 } 2675 } else { 2676 for (i = 0; i < priv->num_grps; i++) { 2677 disable_irq(priv->gfargrp[i].interruptTransmit); 2678 gfar_interrupt(priv->gfargrp[i].interruptTransmit, 2679 &priv->gfargrp[i]); 2680 enable_irq(priv->gfargrp[i].interruptTransmit); 2681 } 2682 } 2683} 2684#endif 2685 2686/* The interrupt handler for devices with one interrupt */ 2687static irqreturn_t gfar_interrupt(int irq, void *grp_id) 2688{ 2689 struct gfar_priv_grp *gfargrp = grp_id; 2690 2691 /* Save ievent for future reference */ 2692 u32 events = gfar_read(&gfargrp->regs->ievent); 2693 2694 /* Check for reception */ 2695 if (events & IEVENT_RX_MASK) 2696 gfar_receive(irq, grp_id); 2697 2698 /* Check for transmit completion */ 2699 if (events & IEVENT_TX_MASK) 2700 gfar_transmit(irq, grp_id); 2701 2702 /* Check for errors */ 2703 if (events & IEVENT_ERR_MASK) 2704 gfar_error(irq, grp_id); 2705 2706 return IRQ_HANDLED; 2707} 2708 2709/* Called every time the controller might need to be made 2710 * aware of new link state. The PHY code conveys this 2711 * information through variables in the phydev structure, and this 2712 * function converts those variables into the appropriate 2713 * register values, and can bring down the device if needed. 2714 */ 2715static void adjust_link(struct net_device *dev) 2716{ 2717 struct gfar_private *priv = netdev_priv(dev); 2718 struct gfar __iomem *regs = priv->gfargrp[0].regs; 2719 unsigned long flags; 2720 struct phy_device *phydev = priv->phydev; 2721 int new_state = 0; 2722 2723 local_irq_save(flags); 2724 lock_tx_qs(priv); 2725 2726 if (phydev->link) { 2727 u32 tempval = gfar_read(&regs->maccfg2); 2728 u32 ecntrl = gfar_read(&regs->ecntrl); 2729 2730 /* Now we make sure that we can be in full duplex mode. 2731 * If not, we operate in half-duplex mode. */ 2732 if (phydev->duplex != priv->oldduplex) { 2733 new_state = 1; 2734 if (!(phydev->duplex)) 2735 tempval &= ~(MACCFG2_FULL_DUPLEX); 2736 else 2737 tempval |= MACCFG2_FULL_DUPLEX; 2738 2739 priv->oldduplex = phydev->duplex; 2740 } 2741 2742 if (phydev->speed != priv->oldspeed) { 2743 new_state = 1; 2744 switch (phydev->speed) { 2745 case 1000: 2746 tempval = 2747 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); 2748 2749 ecntrl &= ~(ECNTRL_R100); 2750 break; 2751 case 100: 2752 case 10: 2753 tempval = 2754 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 2755 2756 /* Reduced mode distinguishes 2757 * between 10 and 100 */ 2758 if (phydev->speed == SPEED_100) 2759 ecntrl |= ECNTRL_R100; 2760 else 2761 ecntrl &= ~(ECNTRL_R100); 2762 break; 2763 default: 2764 if (netif_msg_link(priv)) 2765 printk(KERN_WARNING 2766 "%s: Ack! Speed (%d) is not 10/100/1000!\n", 2767 dev->name, phydev->speed); 2768 break; 2769 } 2770 2771 priv->oldspeed = phydev->speed; 2772 } 2773 2774 gfar_write(&regs->maccfg2, tempval); 2775 gfar_write(&regs->ecntrl, ecntrl); 2776 2777 if (!priv->oldlink) { 2778 new_state = 1; 2779 priv->oldlink = 1; 2780 } 2781 } else if (priv->oldlink) { 2782 new_state = 1; 2783 priv->oldlink = 0; 2784 priv->oldspeed = 0; 2785 priv->oldduplex = -1; 2786 } 2787 2788 if (new_state && netif_msg_link(priv)) 2789 phy_print_status(phydev); 2790 unlock_tx_qs(priv); 2791 local_irq_restore(flags); 2792} 2793 2794/* Update the hash table based on the current list of multicast 2795 * addresses we subscribe to. Also, change the promiscuity of 2796 * the device based on the flags (this function is called 2797 * whenever dev->flags is changed */ 2798static void gfar_set_multi(struct net_device *dev) 2799{ 2800 struct dev_mc_list *mc_ptr; 2801 struct gfar_private *priv = netdev_priv(dev); 2802 struct gfar __iomem *regs = priv->gfargrp[0].regs; 2803 u32 tempval; 2804 2805 if (dev->flags & IFF_PROMISC) { 2806 /* Set RCTRL to PROM */ 2807 tempval = gfar_read(&regs->rctrl); 2808 tempval |= RCTRL_PROM; 2809 gfar_write(&regs->rctrl, tempval); 2810 } else { 2811 /* Set RCTRL to not PROM */ 2812 tempval = gfar_read(&regs->rctrl); 2813 tempval &= ~(RCTRL_PROM); 2814 gfar_write(&regs->rctrl, tempval); 2815 } 2816 2817 if (dev->flags & IFF_ALLMULTI) { 2818 /* Set the hash to rx all multicast frames */ 2819 gfar_write(&regs->igaddr0, 0xffffffff); 2820 gfar_write(&regs->igaddr1, 0xffffffff); 2821 gfar_write(&regs->igaddr2, 0xffffffff); 2822 gfar_write(&regs->igaddr3, 0xffffffff); 2823 gfar_write(&regs->igaddr4, 0xffffffff); 2824 gfar_write(&regs->igaddr5, 0xffffffff); 2825 gfar_write(&regs->igaddr6, 0xffffffff); 2826 gfar_write(&regs->igaddr7, 0xffffffff); 2827 gfar_write(&regs->gaddr0, 0xffffffff); 2828 gfar_write(&regs->gaddr1, 0xffffffff); 2829 gfar_write(&regs->gaddr2, 0xffffffff); 2830 gfar_write(&regs->gaddr3, 0xffffffff); 2831 gfar_write(&regs->gaddr4, 0xffffffff); 2832 gfar_write(&regs->gaddr5, 0xffffffff); 2833 gfar_write(&regs->gaddr6, 0xffffffff); 2834 gfar_write(&regs->gaddr7, 0xffffffff); 2835 } else { 2836 int em_num; 2837 int idx; 2838 2839 /* zero out the hash */ 2840 gfar_write(&regs->igaddr0, 0x0); 2841 gfar_write(&regs->igaddr1, 0x0); 2842 gfar_write(&regs->igaddr2, 0x0); 2843 gfar_write(&regs->igaddr3, 0x0); 2844 gfar_write(&regs->igaddr4, 0x0); 2845 gfar_write(&regs->igaddr5, 0x0); 2846 gfar_write(&regs->igaddr6, 0x0); 2847 gfar_write(&regs->igaddr7, 0x0); 2848 gfar_write(&regs->gaddr0, 0x0); 2849 gfar_write(&regs->gaddr1, 0x0); 2850 gfar_write(&regs->gaddr2, 0x0); 2851 gfar_write(&regs->gaddr3, 0x0); 2852 gfar_write(&regs->gaddr4, 0x0); 2853 gfar_write(&regs->gaddr5, 0x0); 2854 gfar_write(&regs->gaddr6, 0x0); 2855 gfar_write(&regs->gaddr7, 0x0); 2856 2857 /* If we have extended hash tables, we need to 2858 * clear the exact match registers to prepare for 2859 * setting them */ 2860 if (priv->extended_hash) { 2861 em_num = GFAR_EM_NUM + 1; 2862 gfar_clear_exact_match(dev); 2863 idx = 1; 2864 } else { 2865 idx = 0; 2866 em_num = 0; 2867 } 2868 2869 if (netdev_mc_empty(dev)) 2870 return; 2871 2872 /* Parse the list, and set the appropriate bits */ 2873 netdev_for_each_mc_addr(mc_ptr, dev) { 2874 if (idx < em_num) { 2875 gfar_set_mac_for_addr(dev, idx, 2876 mc_ptr->dmi_addr); 2877 idx++; 2878 } else 2879 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr); 2880 } 2881 } 2882 2883 return; 2884} 2885 2886 2887/* Clears each of the exact match registers to zero, so they 2888 * don't interfere with normal reception */ 2889static void gfar_clear_exact_match(struct net_device *dev) 2890{ 2891 int idx; 2892 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0}; 2893 2894 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) 2895 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr); 2896} 2897 2898/* Set the appropriate hash bit for the given addr */ 2899/* The algorithm works like so: 2900 * 1) Take the Destination Address (ie the multicast address), and 2901 * do a CRC on it (little endian), and reverse the bits of the 2902 * result. 2903 * 2) Use the 8 most significant bits as a hash into a 256-entry 2904 * table. The table is controlled through 8 32-bit registers: 2905 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is 2906 * gaddr7. This means that the 3 most significant bits in the 2907 * hash index which gaddr register to use, and the 5 other bits 2908 * indicate which bit (assuming an IBM numbering scheme, which 2909 * for PowerPC (tm) is usually the case) in the register holds 2910 * the entry. */ 2911static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) 2912{ 2913 u32 tempval; 2914 struct gfar_private *priv = netdev_priv(dev); 2915 u32 result = ether_crc(MAC_ADDR_LEN, addr); 2916 int width = priv->hash_width; 2917 u8 whichbit = (result >> (32 - width)) & 0x1f; 2918 u8 whichreg = result >> (32 - width + 5); 2919 u32 value = (1 << (31-whichbit)); 2920 2921 tempval = gfar_read(priv->hash_regs[whichreg]); 2922 tempval |= value; 2923 gfar_write(priv->hash_regs[whichreg], tempval); 2924 2925 return; 2926} 2927 2928 2929/* There are multiple MAC Address register pairs on some controllers 2930 * This function sets the numth pair to a given address 2931 */ 2932static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) 2933{ 2934 struct gfar_private *priv = netdev_priv(dev); 2935 struct gfar __iomem *regs = priv->gfargrp[0].regs; 2936 int idx; 2937 char tmpbuf[MAC_ADDR_LEN]; 2938 u32 tempval; 2939 u32 __iomem *macptr = &regs->macstnaddr1; 2940 2941 macptr += num*2; 2942 2943 /* Now copy it into the mac registers backwards, cuz */ 2944 /* little endian is silly */ 2945 for (idx = 0; idx < MAC_ADDR_LEN; idx++) 2946 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx]; 2947 2948 gfar_write(macptr, *((u32 *) (tmpbuf))); 2949 2950 tempval = *((u32 *) (tmpbuf + 4)); 2951 2952 gfar_write(macptr+1, tempval); 2953} 2954 2955/* GFAR error interrupt handler */ 2956static irqreturn_t gfar_error(int irq, void *grp_id) 2957{ 2958 struct gfar_priv_grp *gfargrp = grp_id; 2959 struct gfar __iomem *regs = gfargrp->regs; 2960 struct gfar_private *priv= gfargrp->priv; 2961 struct net_device *dev = priv->ndev; 2962 2963 /* Save ievent for future reference */ 2964 u32 events = gfar_read(&regs->ievent); 2965 2966 /* Clear IEVENT */ 2967 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK); 2968 2969 /* Magic Packet is not an error. */ 2970 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 2971 (events & IEVENT_MAG)) 2972 events &= ~IEVENT_MAG; 2973 2974 /* Hmm... */ 2975 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 2976 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", 2977 dev->name, events, gfar_read(&regs->imask)); 2978 2979 /* Update the error counters */ 2980 if (events & IEVENT_TXE) { 2981 dev->stats.tx_errors++; 2982 2983 if (events & IEVENT_LC) 2984 dev->stats.tx_window_errors++; 2985 if (events & IEVENT_CRL) 2986 dev->stats.tx_aborted_errors++; 2987 if (events & IEVENT_XFUN) { 2988 unsigned long flags; 2989 2990 if (netif_msg_tx_err(priv)) 2991 printk(KERN_DEBUG "%s: TX FIFO underrun, " 2992 "packet dropped.\n", dev->name); 2993 dev->stats.tx_dropped++; 2994 priv->extra_stats.tx_underrun++; 2995 2996 local_irq_save(flags); 2997 lock_tx_qs(priv); 2998 2999 /* Reactivate the Tx Queues */ 3000 gfar_write(&regs->tstat, gfargrp->tstat); 3001 3002 unlock_tx_qs(priv); 3003 local_irq_restore(flags); 3004 } 3005 if (netif_msg_tx_err(priv)) 3006 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); 3007 } 3008 if (events & IEVENT_BSY) { 3009 dev->stats.rx_errors++; 3010 priv->extra_stats.rx_bsy++; 3011 3012 gfar_receive(irq, grp_id); 3013 3014 if (netif_msg_rx_err(priv)) 3015 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n", 3016 dev->name, gfar_read(&regs->rstat)); 3017 } 3018 if (events & IEVENT_BABR) { 3019 dev->stats.rx_errors++; 3020 priv->extra_stats.rx_babr++; 3021 3022 if (netif_msg_rx_err(priv)) 3023 printk(KERN_DEBUG "%s: babbling RX error\n", dev->name); 3024 } 3025 if (events & IEVENT_EBERR) { 3026 priv->extra_stats.eberr++; 3027 if (netif_msg_rx_err(priv)) 3028 printk(KERN_DEBUG "%s: bus error\n", dev->name); 3029 } 3030 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv)) 3031 printk(KERN_DEBUG "%s: control frame\n", dev->name); 3032 3033 if (events & IEVENT_BABT) { 3034 priv->extra_stats.tx_babt++; 3035 if (netif_msg_tx_err(priv)) 3036 printk(KERN_DEBUG "%s: babbling TX error\n", dev->name); 3037 } 3038 return IRQ_HANDLED; 3039} 3040 3041static struct of_device_id gfar_match[] = 3042{ 3043 { 3044 .type = "network", 3045 .compatible = "gianfar", 3046 }, 3047 { 3048 .compatible = "fsl,etsec2", 3049 }, 3050 {}, 3051}; 3052MODULE_DEVICE_TABLE(of, gfar_match); 3053 3054/* Structure for a device driver */ 3055static struct of_platform_driver gfar_driver = { 3056 .name = "fsl-gianfar", 3057 .match_table = gfar_match, 3058 3059 .probe = gfar_probe, 3060 .remove = gfar_remove, 3061 .suspend = gfar_legacy_suspend, 3062 .resume = gfar_legacy_resume, 3063 .driver.pm = GFAR_PM_OPS, 3064}; 3065 3066static int __init gfar_init(void) 3067{ 3068 return of_register_platform_driver(&gfar_driver); 3069} 3070 3071static void __exit gfar_exit(void) 3072{ 3073 of_unregister_platform_driver(&gfar_driver); 3074} 3075 3076module_init(gfar_init); 3077module_exit(gfar_exit); 3078