at v2.6.33-rc2 3080 lines 83 kB view raw
1/* 2 * drivers/net/gianfar.c 3 * 4 * Gianfar Ethernet Driver 5 * This driver is designed for the non-CPM ethernet controllers 6 * on the 85xx and 83xx family of integrated processors 7 * Based on 8260_io/fcc_enet.c 8 * 9 * Author: Andy Fleming 10 * Maintainer: Kumar Gala 11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 12 * 13 * Copyright 2002-2009 Freescale Semiconductor, Inc. 14 * Copyright 2007 MontaVista Software, Inc. 15 * 16 * This program is free software; you can redistribute it and/or modify it 17 * under the terms of the GNU General Public License as published by the 18 * Free Software Foundation; either version 2 of the License, or (at your 19 * option) any later version. 20 * 21 * Gianfar: AKA Lambda Draconis, "Dragon" 22 * RA 11 31 24.2 23 * Dec +69 19 52 24 * V 3.84 25 * B-V +1.62 26 * 27 * Theory of operation 28 * 29 * The driver is initialized through of_device. Configuration information 30 * is therefore conveyed through an OF-style device tree. 31 * 32 * The Gianfar Ethernet Controller uses a ring of buffer 33 * descriptors. The beginning is indicated by a register 34 * pointing to the physical address of the start of the ring. 35 * The end is determined by a "wrap" bit being set in the 36 * last descriptor of the ring. 37 * 38 * When a packet is received, the RXF bit in the 39 * IEVENT register is set, triggering an interrupt when the 40 * corresponding bit in the IMASK register is also set (if 41 * interrupt coalescing is active, then the interrupt may not 42 * happen immediately, but will wait until either a set number 43 * of frames or amount of time have passed). In NAPI, the 44 * interrupt handler will signal there is work to be done, and 45 * exit. This method will start at the last known empty 46 * descriptor, and process every subsequent descriptor until there 47 * are none left with data (NAPI will stop after a set number of 48 * packets to give time to other tasks, but will eventually 49 * process all the packets). The data arrives inside a 50 * pre-allocated skb, and so after the skb is passed up to the 51 * stack, a new skb must be allocated, and the address field in 52 * the buffer descriptor must be updated to indicate this new 53 * skb. 54 * 55 * When the kernel requests that a packet be transmitted, the 56 * driver starts where it left off last time, and points the 57 * descriptor at the buffer which was passed in. The driver 58 * then informs the DMA engine that there are packets ready to 59 * be transmitted. Once the controller is finished transmitting 60 * the packet, an interrupt may be triggered (under the same 61 * conditions as for reception, but depending on the TXF bit). 62 * The driver then cleans up the buffer. 63 */ 64 65#include <linux/kernel.h> 66#include <linux/string.h> 67#include <linux/errno.h> 68#include <linux/unistd.h> 69#include <linux/slab.h> 70#include <linux/interrupt.h> 71#include <linux/init.h> 72#include <linux/delay.h> 73#include <linux/netdevice.h> 74#include <linux/etherdevice.h> 75#include <linux/skbuff.h> 76#include <linux/if_vlan.h> 77#include <linux/spinlock.h> 78#include <linux/mm.h> 79#include <linux/of_mdio.h> 80#include <linux/of_platform.h> 81#include <linux/ip.h> 82#include <linux/tcp.h> 83#include <linux/udp.h> 84#include <linux/in.h> 85 86#include <asm/io.h> 87#include <asm/irq.h> 88#include <asm/uaccess.h> 89#include <linux/module.h> 90#include <linux/dma-mapping.h> 91#include <linux/crc32.h> 92#include <linux/mii.h> 93#include <linux/phy.h> 94#include <linux/phy_fixed.h> 95#include <linux/of.h> 96 97#include "gianfar.h" 98#include "fsl_pq_mdio.h" 99 100#define TX_TIMEOUT (1*HZ) 101#undef BRIEF_GFAR_ERRORS 102#undef VERBOSE_GFAR_ERRORS 103 104const char gfar_driver_name[] = "Gianfar Ethernet"; 105const char gfar_driver_version[] = "1.3"; 106 107static int gfar_enet_open(struct net_device *dev); 108static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); 109static void gfar_reset_task(struct work_struct *work); 110static void gfar_timeout(struct net_device *dev); 111static int gfar_close(struct net_device *dev); 112struct sk_buff *gfar_new_skb(struct net_device *dev); 113static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 114 struct sk_buff *skb); 115static int gfar_set_mac_address(struct net_device *dev); 116static int gfar_change_mtu(struct net_device *dev, int new_mtu); 117static irqreturn_t gfar_error(int irq, void *dev_id); 118static irqreturn_t gfar_transmit(int irq, void *dev_id); 119static irqreturn_t gfar_interrupt(int irq, void *dev_id); 120static void adjust_link(struct net_device *dev); 121static void init_registers(struct net_device *dev); 122static int init_phy(struct net_device *dev); 123static int gfar_probe(struct of_device *ofdev, 124 const struct of_device_id *match); 125static int gfar_remove(struct of_device *ofdev); 126static void free_skb_resources(struct gfar_private *priv); 127static void gfar_set_multi(struct net_device *dev); 128static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 129static void gfar_configure_serdes(struct net_device *dev); 130static int gfar_poll(struct napi_struct *napi, int budget); 131#ifdef CONFIG_NET_POLL_CONTROLLER 132static void gfar_netpoll(struct net_device *dev); 133#endif 134int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); 135static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); 136static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 137 int amount_pull); 138static void gfar_vlan_rx_register(struct net_device *netdev, 139 struct vlan_group *grp); 140void gfar_halt(struct net_device *dev); 141static void gfar_halt_nodisable(struct net_device *dev); 142void gfar_start(struct net_device *dev); 143static void gfar_clear_exact_match(struct net_device *dev); 144static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); 145static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 146u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb); 147 148MODULE_AUTHOR("Freescale Semiconductor, Inc"); 149MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 150MODULE_LICENSE("GPL"); 151 152static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 153 dma_addr_t buf) 154{ 155 u32 lstatus; 156 157 bdp->bufPtr = buf; 158 159 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); 160 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) 161 lstatus |= BD_LFLAG(RXBD_WRAP); 162 163 eieio(); 164 165 bdp->lstatus = lstatus; 166} 167 168static int gfar_init_bds(struct net_device *ndev) 169{ 170 struct gfar_private *priv = netdev_priv(ndev); 171 struct gfar_priv_tx_q *tx_queue = NULL; 172 struct gfar_priv_rx_q *rx_queue = NULL; 173 struct txbd8 *txbdp; 174 struct rxbd8 *rxbdp; 175 int i, j; 176 177 for (i = 0; i < priv->num_tx_queues; i++) { 178 tx_queue = priv->tx_queue[i]; 179 /* Initialize some variables in our dev structure */ 180 tx_queue->num_txbdfree = tx_queue->tx_ring_size; 181 tx_queue->dirty_tx = tx_queue->tx_bd_base; 182 tx_queue->cur_tx = tx_queue->tx_bd_base; 183 tx_queue->skb_curtx = 0; 184 tx_queue->skb_dirtytx = 0; 185 186 /* Initialize Transmit Descriptor Ring */ 187 txbdp = tx_queue->tx_bd_base; 188 for (j = 0; j < tx_queue->tx_ring_size; j++) { 189 txbdp->lstatus = 0; 190 txbdp->bufPtr = 0; 191 txbdp++; 192 } 193 194 /* Set the last descriptor in the ring to indicate wrap */ 195 txbdp--; 196 txbdp->status |= TXBD_WRAP; 197 } 198 199 for (i = 0; i < priv->num_rx_queues; i++) { 200 rx_queue = priv->rx_queue[i]; 201 rx_queue->cur_rx = rx_queue->rx_bd_base; 202 rx_queue->skb_currx = 0; 203 rxbdp = rx_queue->rx_bd_base; 204 205 for (j = 0; j < rx_queue->rx_ring_size; j++) { 206 struct sk_buff *skb = rx_queue->rx_skbuff[j]; 207 208 if (skb) { 209 gfar_init_rxbdp(rx_queue, rxbdp, 210 rxbdp->bufPtr); 211 } else { 212 skb = gfar_new_skb(ndev); 213 if (!skb) { 214 pr_err("%s: Can't allocate RX buffers\n", 215 ndev->name); 216 goto err_rxalloc_fail; 217 } 218 rx_queue->rx_skbuff[j] = skb; 219 220 gfar_new_rxbdp(rx_queue, rxbdp, skb); 221 } 222 223 rxbdp++; 224 } 225 226 } 227 228 return 0; 229 230err_rxalloc_fail: 231 free_skb_resources(priv); 232 return -ENOMEM; 233} 234 235static int gfar_alloc_skb_resources(struct net_device *ndev) 236{ 237 void *vaddr; 238 dma_addr_t addr; 239 int i, j, k; 240 struct gfar_private *priv = netdev_priv(ndev); 241 struct device *dev = &priv->ofdev->dev; 242 struct gfar_priv_tx_q *tx_queue = NULL; 243 struct gfar_priv_rx_q *rx_queue = NULL; 244 245 priv->total_tx_ring_size = 0; 246 for (i = 0; i < priv->num_tx_queues; i++) 247 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; 248 249 priv->total_rx_ring_size = 0; 250 for (i = 0; i < priv->num_rx_queues; i++) 251 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; 252 253 /* Allocate memory for the buffer descriptors */ 254 vaddr = dma_alloc_coherent(dev, 255 sizeof(struct txbd8) * priv->total_tx_ring_size + 256 sizeof(struct rxbd8) * priv->total_rx_ring_size, 257 &addr, GFP_KERNEL); 258 if (!vaddr) { 259 if (netif_msg_ifup(priv)) 260 pr_err("%s: Could not allocate buffer descriptors!\n", 261 ndev->name); 262 return -ENOMEM; 263 } 264 265 for (i = 0; i < priv->num_tx_queues; i++) { 266 tx_queue = priv->tx_queue[i]; 267 tx_queue->tx_bd_base = (struct txbd8 *) vaddr; 268 tx_queue->tx_bd_dma_base = addr; 269 tx_queue->dev = ndev; 270 /* enet DMA only understands physical addresses */ 271 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size; 272 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size; 273 } 274 275 /* Start the rx descriptor ring where the tx ring leaves off */ 276 for (i = 0; i < priv->num_rx_queues; i++) { 277 rx_queue = priv->rx_queue[i]; 278 rx_queue->rx_bd_base = (struct rxbd8 *) vaddr; 279 rx_queue->rx_bd_dma_base = addr; 280 rx_queue->dev = ndev; 281 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; 282 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; 283 } 284 285 /* Setup the skbuff rings */ 286 for (i = 0; i < priv->num_tx_queues; i++) { 287 tx_queue = priv->tx_queue[i]; 288 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * 289 tx_queue->tx_ring_size, GFP_KERNEL); 290 if (!tx_queue->tx_skbuff) { 291 if (netif_msg_ifup(priv)) 292 pr_err("%s: Could not allocate tx_skbuff\n", 293 ndev->name); 294 goto cleanup; 295 } 296 297 for (k = 0; k < tx_queue->tx_ring_size; k++) 298 tx_queue->tx_skbuff[k] = NULL; 299 } 300 301 for (i = 0; i < priv->num_rx_queues; i++) { 302 rx_queue = priv->rx_queue[i]; 303 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * 304 rx_queue->rx_ring_size, GFP_KERNEL); 305 306 if (!rx_queue->rx_skbuff) { 307 if (netif_msg_ifup(priv)) 308 pr_err("%s: Could not allocate rx_skbuff\n", 309 ndev->name); 310 goto cleanup; 311 } 312 313 for (j = 0; j < rx_queue->rx_ring_size; j++) 314 rx_queue->rx_skbuff[j] = NULL; 315 } 316 317 if (gfar_init_bds(ndev)) 318 goto cleanup; 319 320 return 0; 321 322cleanup: 323 free_skb_resources(priv); 324 return -ENOMEM; 325} 326 327static void gfar_init_tx_rx_base(struct gfar_private *priv) 328{ 329 struct gfar __iomem *regs = priv->gfargrp[0].regs; 330 u32 __iomem *baddr; 331 int i; 332 333 baddr = &regs->tbase0; 334 for(i = 0; i < priv->num_tx_queues; i++) { 335 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); 336 baddr += 2; 337 } 338 339 baddr = &regs->rbase0; 340 for(i = 0; i < priv->num_rx_queues; i++) { 341 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); 342 baddr += 2; 343 } 344} 345 346static void gfar_init_mac(struct net_device *ndev) 347{ 348 struct gfar_private *priv = netdev_priv(ndev); 349 struct gfar __iomem *regs = priv->gfargrp[0].regs; 350 u32 rctrl = 0; 351 u32 tctrl = 0; 352 u32 attrs = 0; 353 354 /* write the tx/rx base registers */ 355 gfar_init_tx_rx_base(priv); 356 357 /* Configure the coalescing support */ 358 gfar_configure_coalescing(priv, 0xFF, 0xFF); 359 360 if (priv->rx_filer_enable) { 361 rctrl |= RCTRL_FILREN; 362 /* Program the RIR0 reg with the required distribution */ 363 gfar_write(&regs->rir0, DEFAULT_RIR0); 364 } 365 366 if (priv->rx_csum_enable) 367 rctrl |= RCTRL_CHECKSUMMING; 368 369 if (priv->extended_hash) { 370 rctrl |= RCTRL_EXTHASH; 371 372 gfar_clear_exact_match(ndev); 373 rctrl |= RCTRL_EMEN; 374 } 375 376 if (priv->padding) { 377 rctrl &= ~RCTRL_PAL_MASK; 378 rctrl |= RCTRL_PADDING(priv->padding); 379 } 380 381 /* keep vlan related bits if it's enabled */ 382 if (priv->vlgrp) { 383 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; 384 tctrl |= TCTRL_VLINS; 385 } 386 387 /* Init rctrl based on our settings */ 388 gfar_write(&regs->rctrl, rctrl); 389 390 if (ndev->features & NETIF_F_IP_CSUM) 391 tctrl |= TCTRL_INIT_CSUM; 392 393 tctrl |= TCTRL_TXSCHED_PRIO; 394 395 gfar_write(&regs->tctrl, tctrl); 396 397 /* Set the extraction length and index */ 398 attrs = ATTRELI_EL(priv->rx_stash_size) | 399 ATTRELI_EI(priv->rx_stash_index); 400 401 gfar_write(&regs->attreli, attrs); 402 403 /* Start with defaults, and add stashing or locking 404 * depending on the approprate variables */ 405 attrs = ATTR_INIT_SETTINGS; 406 407 if (priv->bd_stash_en) 408 attrs |= ATTR_BDSTASH; 409 410 if (priv->rx_stash_size != 0) 411 attrs |= ATTR_BUFSTASH; 412 413 gfar_write(&regs->attr, attrs); 414 415 gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold); 416 gfar_write(&regs->fifo_tx_starve, priv->fifo_starve); 417 gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off); 418} 419 420static struct net_device_stats *gfar_get_stats(struct net_device *dev) 421{ 422 struct gfar_private *priv = netdev_priv(dev); 423 struct netdev_queue *txq; 424 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; 425 unsigned long tx_packets = 0, tx_bytes = 0; 426 int i = 0; 427 428 for (i = 0; i < priv->num_rx_queues; i++) { 429 rx_packets += priv->rx_queue[i]->stats.rx_packets; 430 rx_bytes += priv->rx_queue[i]->stats.rx_bytes; 431 rx_dropped += priv->rx_queue[i]->stats.rx_dropped; 432 } 433 434 dev->stats.rx_packets = rx_packets; 435 dev->stats.rx_bytes = rx_bytes; 436 dev->stats.rx_dropped = rx_dropped; 437 438 for (i = 0; i < priv->num_tx_queues; i++) { 439 txq = netdev_get_tx_queue(dev, i); 440 tx_bytes += txq->tx_bytes; 441 tx_packets += txq->tx_packets; 442 } 443 444 dev->stats.tx_bytes = tx_bytes; 445 dev->stats.tx_packets = tx_packets; 446 447 return &dev->stats; 448} 449 450static const struct net_device_ops gfar_netdev_ops = { 451 .ndo_open = gfar_enet_open, 452 .ndo_start_xmit = gfar_start_xmit, 453 .ndo_stop = gfar_close, 454 .ndo_change_mtu = gfar_change_mtu, 455 .ndo_set_multicast_list = gfar_set_multi, 456 .ndo_tx_timeout = gfar_timeout, 457 .ndo_do_ioctl = gfar_ioctl, 458 .ndo_select_queue = gfar_select_queue, 459 .ndo_get_stats = gfar_get_stats, 460 .ndo_vlan_rx_register = gfar_vlan_rx_register, 461 .ndo_set_mac_address = eth_mac_addr, 462 .ndo_validate_addr = eth_validate_addr, 463#ifdef CONFIG_NET_POLL_CONTROLLER 464 .ndo_poll_controller = gfar_netpoll, 465#endif 466}; 467 468unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; 469unsigned int ftp_rqfcr[MAX_FILER_IDX + 1]; 470 471void lock_rx_qs(struct gfar_private *priv) 472{ 473 int i = 0x0; 474 475 for (i = 0; i < priv->num_rx_queues; i++) 476 spin_lock(&priv->rx_queue[i]->rxlock); 477} 478 479void lock_tx_qs(struct gfar_private *priv) 480{ 481 int i = 0x0; 482 483 for (i = 0; i < priv->num_tx_queues; i++) 484 spin_lock(&priv->tx_queue[i]->txlock); 485} 486 487void unlock_rx_qs(struct gfar_private *priv) 488{ 489 int i = 0x0; 490 491 for (i = 0; i < priv->num_rx_queues; i++) 492 spin_unlock(&priv->rx_queue[i]->rxlock); 493} 494 495void unlock_tx_qs(struct gfar_private *priv) 496{ 497 int i = 0x0; 498 499 for (i = 0; i < priv->num_tx_queues; i++) 500 spin_unlock(&priv->tx_queue[i]->txlock); 501} 502 503/* Returns 1 if incoming frames use an FCB */ 504static inline int gfar_uses_fcb(struct gfar_private *priv) 505{ 506 return priv->vlgrp || priv->rx_csum_enable; 507} 508 509u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb) 510{ 511 return skb_get_queue_mapping(skb); 512} 513static void free_tx_pointers(struct gfar_private *priv) 514{ 515 int i = 0; 516 517 for (i = 0; i < priv->num_tx_queues; i++) 518 kfree(priv->tx_queue[i]); 519} 520 521static void free_rx_pointers(struct gfar_private *priv) 522{ 523 int i = 0; 524 525 for (i = 0; i < priv->num_rx_queues; i++) 526 kfree(priv->rx_queue[i]); 527} 528 529static void unmap_group_regs(struct gfar_private *priv) 530{ 531 int i = 0; 532 533 for (i = 0; i < MAXGROUPS; i++) 534 if (priv->gfargrp[i].regs) 535 iounmap(priv->gfargrp[i].regs); 536} 537 538static void disable_napi(struct gfar_private *priv) 539{ 540 int i = 0; 541 542 for (i = 0; i < priv->num_grps; i++) 543 napi_disable(&priv->gfargrp[i].napi); 544} 545 546static void enable_napi(struct gfar_private *priv) 547{ 548 int i = 0; 549 550 for (i = 0; i < priv->num_grps; i++) 551 napi_enable(&priv->gfargrp[i].napi); 552} 553 554static int gfar_parse_group(struct device_node *np, 555 struct gfar_private *priv, const char *model) 556{ 557 u32 *queue_mask; 558 u64 addr, size; 559 560 addr = of_translate_address(np, 561 of_get_address(np, 0, &size, NULL)); 562 priv->gfargrp[priv->num_grps].regs = ioremap(addr, size); 563 564 if (!priv->gfargrp[priv->num_grps].regs) 565 return -ENOMEM; 566 567 priv->gfargrp[priv->num_grps].interruptTransmit = 568 irq_of_parse_and_map(np, 0); 569 570 /* If we aren't the FEC we have multiple interrupts */ 571 if (model && strcasecmp(model, "FEC")) { 572 priv->gfargrp[priv->num_grps].interruptReceive = 573 irq_of_parse_and_map(np, 1); 574 priv->gfargrp[priv->num_grps].interruptError = 575 irq_of_parse_and_map(np,2); 576 if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 || 577 priv->gfargrp[priv->num_grps].interruptReceive < 0 || 578 priv->gfargrp[priv->num_grps].interruptError < 0) { 579 return -EINVAL; 580 } 581 } 582 583 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; 584 priv->gfargrp[priv->num_grps].priv = priv; 585 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock); 586 if(priv->mode == MQ_MG_MODE) { 587 queue_mask = (u32 *)of_get_property(np, 588 "fsl,rx-bit-map", NULL); 589 priv->gfargrp[priv->num_grps].rx_bit_map = 590 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps); 591 queue_mask = (u32 *)of_get_property(np, 592 "fsl,tx-bit-map", NULL); 593 priv->gfargrp[priv->num_grps].tx_bit_map = 594 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); 595 } else { 596 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF; 597 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF; 598 } 599 priv->num_grps++; 600 601 return 0; 602} 603 604static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) 605{ 606 const char *model; 607 const char *ctype; 608 const void *mac_addr; 609 int err = 0, i; 610 struct net_device *dev = NULL; 611 struct gfar_private *priv = NULL; 612 struct device_node *np = ofdev->node; 613 struct device_node *child = NULL; 614 const u32 *stash; 615 const u32 *stash_len; 616 const u32 *stash_idx; 617 unsigned int num_tx_qs, num_rx_qs; 618 u32 *tx_queues, *rx_queues; 619 620 if (!np || !of_device_is_available(np)) 621 return -ENODEV; 622 623 /* parse the num of tx and rx queues */ 624 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); 625 num_tx_qs = tx_queues ? *tx_queues : 1; 626 627 if (num_tx_qs > MAX_TX_QS) { 628 printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", 629 num_tx_qs, MAX_TX_QS); 630 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n"); 631 return -EINVAL; 632 } 633 634 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); 635 num_rx_qs = rx_queues ? *rx_queues : 1; 636 637 if (num_rx_qs > MAX_RX_QS) { 638 printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", 639 num_tx_qs, MAX_TX_QS); 640 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n"); 641 return -EINVAL; 642 } 643 644 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); 645 dev = *pdev; 646 if (NULL == dev) 647 return -ENOMEM; 648 649 priv = netdev_priv(dev); 650 priv->node = ofdev->node; 651 priv->ndev = dev; 652 653 dev->num_tx_queues = num_tx_qs; 654 dev->real_num_tx_queues = num_tx_qs; 655 priv->num_tx_queues = num_tx_qs; 656 priv->num_rx_queues = num_rx_qs; 657 priv->num_grps = 0x0; 658 659 model = of_get_property(np, "model", NULL); 660 661 for (i = 0; i < MAXGROUPS; i++) 662 priv->gfargrp[i].regs = NULL; 663 664 /* Parse and initialize group specific information */ 665 if (of_device_is_compatible(np, "fsl,etsec2")) { 666 priv->mode = MQ_MG_MODE; 667 for_each_child_of_node(np, child) { 668 err = gfar_parse_group(child, priv, model); 669 if (err) 670 goto err_grp_init; 671 } 672 } else { 673 priv->mode = SQ_SG_MODE; 674 err = gfar_parse_group(np, priv, model); 675 if(err) 676 goto err_grp_init; 677 } 678 679 for (i = 0; i < priv->num_tx_queues; i++) 680 priv->tx_queue[i] = NULL; 681 for (i = 0; i < priv->num_rx_queues; i++) 682 priv->rx_queue[i] = NULL; 683 684 for (i = 0; i < priv->num_tx_queues; i++) { 685 priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc( 686 sizeof (struct gfar_priv_tx_q), GFP_KERNEL); 687 if (!priv->tx_queue[i]) { 688 err = -ENOMEM; 689 goto tx_alloc_failed; 690 } 691 priv->tx_queue[i]->tx_skbuff = NULL; 692 priv->tx_queue[i]->qindex = i; 693 priv->tx_queue[i]->dev = dev; 694 spin_lock_init(&(priv->tx_queue[i]->txlock)); 695 } 696 697 for (i = 0; i < priv->num_rx_queues; i++) { 698 priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc( 699 sizeof (struct gfar_priv_rx_q), GFP_KERNEL); 700 if (!priv->rx_queue[i]) { 701 err = -ENOMEM; 702 goto rx_alloc_failed; 703 } 704 priv->rx_queue[i]->rx_skbuff = NULL; 705 priv->rx_queue[i]->qindex = i; 706 priv->rx_queue[i]->dev = dev; 707 spin_lock_init(&(priv->rx_queue[i]->rxlock)); 708 } 709 710 711 stash = of_get_property(np, "bd-stash", NULL); 712 713 if (stash) { 714 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; 715 priv->bd_stash_en = 1; 716 } 717 718 stash_len = of_get_property(np, "rx-stash-len", NULL); 719 720 if (stash_len) 721 priv->rx_stash_size = *stash_len; 722 723 stash_idx = of_get_property(np, "rx-stash-idx", NULL); 724 725 if (stash_idx) 726 priv->rx_stash_index = *stash_idx; 727 728 if (stash_len || stash_idx) 729 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; 730 731 mac_addr = of_get_mac_address(np); 732 if (mac_addr) 733 memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN); 734 735 if (model && !strcasecmp(model, "TSEC")) 736 priv->device_flags = 737 FSL_GIANFAR_DEV_HAS_GIGABIT | 738 FSL_GIANFAR_DEV_HAS_COALESCE | 739 FSL_GIANFAR_DEV_HAS_RMON | 740 FSL_GIANFAR_DEV_HAS_MULTI_INTR; 741 if (model && !strcasecmp(model, "eTSEC")) 742 priv->device_flags = 743 FSL_GIANFAR_DEV_HAS_GIGABIT | 744 FSL_GIANFAR_DEV_HAS_COALESCE | 745 FSL_GIANFAR_DEV_HAS_RMON | 746 FSL_GIANFAR_DEV_HAS_MULTI_INTR | 747 FSL_GIANFAR_DEV_HAS_PADDING | 748 FSL_GIANFAR_DEV_HAS_CSUM | 749 FSL_GIANFAR_DEV_HAS_VLAN | 750 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 751 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH; 752 753 ctype = of_get_property(np, "phy-connection-type", NULL); 754 755 /* We only care about rgmii-id. The rest are autodetected */ 756 if (ctype && !strcmp(ctype, "rgmii-id")) 757 priv->interface = PHY_INTERFACE_MODE_RGMII_ID; 758 else 759 priv->interface = PHY_INTERFACE_MODE_MII; 760 761 if (of_get_property(np, "fsl,magic-packet", NULL)) 762 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; 763 764 priv->phy_node = of_parse_phandle(np, "phy-handle", 0); 765 766 /* Find the TBI PHY. If it's not there, we don't support SGMII */ 767 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); 768 769 return 0; 770 771rx_alloc_failed: 772 free_rx_pointers(priv); 773tx_alloc_failed: 774 free_tx_pointers(priv); 775err_grp_init: 776 unmap_group_regs(priv); 777 free_netdev(dev); 778 return err; 779} 780 781/* Ioctl MII Interface */ 782static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 783{ 784 struct gfar_private *priv = netdev_priv(dev); 785 786 if (!netif_running(dev)) 787 return -EINVAL; 788 789 if (!priv->phydev) 790 return -ENODEV; 791 792 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); 793} 794 795static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) 796{ 797 unsigned int new_bit_map = 0x0; 798 int mask = 0x1 << (max_qs - 1), i; 799 for (i = 0; i < max_qs; i++) { 800 if (bit_map & mask) 801 new_bit_map = new_bit_map + (1 << i); 802 mask = mask >> 0x1; 803 } 804 return new_bit_map; 805} 806 807static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, 808 u32 class) 809{ 810 u32 rqfpr = FPR_FILER_MASK; 811 u32 rqfcr = 0x0; 812 813 rqfar--; 814 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; 815 ftp_rqfpr[rqfar] = rqfpr; 816 ftp_rqfcr[rqfar] = rqfcr; 817 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 818 819 rqfar--; 820 rqfcr = RQFCR_CMP_NOMATCH; 821 ftp_rqfpr[rqfar] = rqfpr; 822 ftp_rqfcr[rqfar] = rqfcr; 823 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 824 825 rqfar--; 826 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; 827 rqfpr = class; 828 ftp_rqfcr[rqfar] = rqfcr; 829 ftp_rqfpr[rqfar] = rqfpr; 830 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 831 832 rqfar--; 833 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; 834 rqfpr = class; 835 ftp_rqfcr[rqfar] = rqfcr; 836 ftp_rqfpr[rqfar] = rqfpr; 837 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 838 839 return rqfar; 840} 841 842static void gfar_init_filer_table(struct gfar_private *priv) 843{ 844 int i = 0x0; 845 u32 rqfar = MAX_FILER_IDX; 846 u32 rqfcr = 0x0; 847 u32 rqfpr = FPR_FILER_MASK; 848 849 /* Default rule */ 850 rqfcr = RQFCR_CMP_MATCH; 851 ftp_rqfcr[rqfar] = rqfcr; 852 ftp_rqfpr[rqfar] = rqfpr; 853 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 854 855 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); 856 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); 857 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); 858 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); 859 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); 860 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); 861 862 /* cur_filer_idx indicated the fisrt non-masked rule */ 863 priv->cur_filer_idx = rqfar; 864 865 /* Rest are masked rules */ 866 rqfcr = RQFCR_CMP_NOMATCH; 867 for (i = 0; i < rqfar; i++) { 868 ftp_rqfcr[i] = rqfcr; 869 ftp_rqfpr[i] = rqfpr; 870 gfar_write_filer(priv, i, rqfcr, rqfpr); 871 } 872} 873 874/* Set up the ethernet device structure, private data, 875 * and anything else we need before we start */ 876static int gfar_probe(struct of_device *ofdev, 877 const struct of_device_id *match) 878{ 879 u32 tempval; 880 struct net_device *dev = NULL; 881 struct gfar_private *priv = NULL; 882 struct gfar __iomem *regs = NULL; 883 int err = 0, i, grp_idx = 0; 884 int len_devname; 885 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; 886 u32 isrg = 0; 887 u32 __iomem *baddr; 888 889 err = gfar_of_init(ofdev, &dev); 890 891 if (err) 892 return err; 893 894 priv = netdev_priv(dev); 895 priv->ndev = dev; 896 priv->ofdev = ofdev; 897 priv->node = ofdev->node; 898 SET_NETDEV_DEV(dev, &ofdev->dev); 899 900 spin_lock_init(&priv->bflock); 901 INIT_WORK(&priv->reset_task, gfar_reset_task); 902 903 dev_set_drvdata(&ofdev->dev, priv); 904 regs = priv->gfargrp[0].regs; 905 906 /* Stop the DMA engine now, in case it was running before */ 907 /* (The firmware could have used it, and left it running). */ 908 gfar_halt(dev); 909 910 /* Reset MAC layer */ 911 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET); 912 913 /* We need to delay at least 3 TX clocks */ 914 udelay(2); 915 916 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 917 gfar_write(&regs->maccfg1, tempval); 918 919 /* Initialize MACCFG2. */ 920 gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS); 921 922 /* Initialize ECNTRL */ 923 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS); 924 925 /* Set the dev->base_addr to the gfar reg region */ 926 dev->base_addr = (unsigned long) regs; 927 928 SET_NETDEV_DEV(dev, &ofdev->dev); 929 930 /* Fill in the dev structure */ 931 dev->watchdog_timeo = TX_TIMEOUT; 932 dev->mtu = 1500; 933 dev->netdev_ops = &gfar_netdev_ops; 934 dev->ethtool_ops = &gfar_ethtool_ops; 935 936 /* Register for napi ...We are registering NAPI for each grp */ 937 for (i = 0; i < priv->num_grps; i++) 938 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT); 939 940 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 941 priv->rx_csum_enable = 1; 942 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; 943 } else 944 priv->rx_csum_enable = 0; 945 946 priv->vlgrp = NULL; 947 948 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) 949 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 950 951 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 952 priv->extended_hash = 1; 953 priv->hash_width = 9; 954 955 priv->hash_regs[0] = &regs->igaddr0; 956 priv->hash_regs[1] = &regs->igaddr1; 957 priv->hash_regs[2] = &regs->igaddr2; 958 priv->hash_regs[3] = &regs->igaddr3; 959 priv->hash_regs[4] = &regs->igaddr4; 960 priv->hash_regs[5] = &regs->igaddr5; 961 priv->hash_regs[6] = &regs->igaddr6; 962 priv->hash_regs[7] = &regs->igaddr7; 963 priv->hash_regs[8] = &regs->gaddr0; 964 priv->hash_regs[9] = &regs->gaddr1; 965 priv->hash_regs[10] = &regs->gaddr2; 966 priv->hash_regs[11] = &regs->gaddr3; 967 priv->hash_regs[12] = &regs->gaddr4; 968 priv->hash_regs[13] = &regs->gaddr5; 969 priv->hash_regs[14] = &regs->gaddr6; 970 priv->hash_regs[15] = &regs->gaddr7; 971 972 } else { 973 priv->extended_hash = 0; 974 priv->hash_width = 8; 975 976 priv->hash_regs[0] = &regs->gaddr0; 977 priv->hash_regs[1] = &regs->gaddr1; 978 priv->hash_regs[2] = &regs->gaddr2; 979 priv->hash_regs[3] = &regs->gaddr3; 980 priv->hash_regs[4] = &regs->gaddr4; 981 priv->hash_regs[5] = &regs->gaddr5; 982 priv->hash_regs[6] = &regs->gaddr6; 983 priv->hash_regs[7] = &regs->gaddr7; 984 } 985 986 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) 987 priv->padding = DEFAULT_PADDING; 988 else 989 priv->padding = 0; 990 991 if (dev->features & NETIF_F_IP_CSUM) 992 dev->hard_header_len += GMAC_FCB_LEN; 993 994 /* Program the isrg regs only if number of grps > 1 */ 995 if (priv->num_grps > 1) { 996 baddr = &regs->isrg0; 997 for (i = 0; i < priv->num_grps; i++) { 998 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); 999 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); 1000 gfar_write(baddr, isrg); 1001 baddr++; 1002 isrg = 0x0; 1003 } 1004 } 1005 1006 /* Need to reverse the bit maps as bit_map's MSB is q0 1007 * but, for_each_bit parses from right to left, which 1008 * basically reverses the queue numbers */ 1009 for (i = 0; i< priv->num_grps; i++) { 1010 priv->gfargrp[i].tx_bit_map = reverse_bitmap( 1011 priv->gfargrp[i].tx_bit_map, MAX_TX_QS); 1012 priv->gfargrp[i].rx_bit_map = reverse_bitmap( 1013 priv->gfargrp[i].rx_bit_map, MAX_RX_QS); 1014 } 1015 1016 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, 1017 * also assign queues to groups */ 1018 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { 1019 priv->gfargrp[grp_idx].num_rx_queues = 0x0; 1020 for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, 1021 priv->num_rx_queues) { 1022 priv->gfargrp[grp_idx].num_rx_queues++; 1023 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1024 rstat = rstat | (RSTAT_CLEAR_RHALT >> i); 1025 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); 1026 } 1027 priv->gfargrp[grp_idx].num_tx_queues = 0x0; 1028 for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map, 1029 priv->num_tx_queues) { 1030 priv->gfargrp[grp_idx].num_tx_queues++; 1031 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; 1032 tstat = tstat | (TSTAT_CLEAR_THALT >> i); 1033 tqueue = tqueue | (TQUEUE_EN0 >> i); 1034 } 1035 priv->gfargrp[grp_idx].rstat = rstat; 1036 priv->gfargrp[grp_idx].tstat = tstat; 1037 rstat = tstat =0; 1038 } 1039 1040 gfar_write(&regs->rqueue, rqueue); 1041 gfar_write(&regs->tqueue, tqueue); 1042 1043 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 1044 1045 /* Initializing some of the rx/tx queue level parameters */ 1046 for (i = 0; i < priv->num_tx_queues; i++) { 1047 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; 1048 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; 1049 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; 1050 priv->tx_queue[i]->txic = DEFAULT_TXIC; 1051 } 1052 1053 for (i = 0; i < priv->num_rx_queues; i++) { 1054 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; 1055 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; 1056 priv->rx_queue[i]->rxic = DEFAULT_RXIC; 1057 } 1058 1059 /* enable filer if using multiple RX queues*/ 1060 if(priv->num_rx_queues > 1) 1061 priv->rx_filer_enable = 1; 1062 /* Enable most messages by default */ 1063 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1064 1065 /* Carrier starts down, phylib will bring it up */ 1066 netif_carrier_off(dev); 1067 1068 err = register_netdev(dev); 1069 1070 if (err) { 1071 printk(KERN_ERR "%s: Cannot register net device, aborting.\n", 1072 dev->name); 1073 goto register_fail; 1074 } 1075 1076 device_init_wakeup(&dev->dev, 1077 priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1078 1079 /* fill out IRQ number and name fields */ 1080 len_devname = strlen(dev->name); 1081 for (i = 0; i < priv->num_grps; i++) { 1082 strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name, 1083 len_devname); 1084 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1085 strncpy(&priv->gfargrp[i].int_name_tx[len_devname], 1086 "_g", sizeof("_g")); 1087 priv->gfargrp[i].int_name_tx[ 1088 strlen(priv->gfargrp[i].int_name_tx)] = i+48; 1089 strncpy(&priv->gfargrp[i].int_name_tx[strlen( 1090 priv->gfargrp[i].int_name_tx)], 1091 "_tx", sizeof("_tx") + 1); 1092 1093 strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name, 1094 len_devname); 1095 strncpy(&priv->gfargrp[i].int_name_rx[len_devname], 1096 "_g", sizeof("_g")); 1097 priv->gfargrp[i].int_name_rx[ 1098 strlen(priv->gfargrp[i].int_name_rx)] = i+48; 1099 strncpy(&priv->gfargrp[i].int_name_rx[strlen( 1100 priv->gfargrp[i].int_name_rx)], 1101 "_rx", sizeof("_rx") + 1); 1102 1103 strncpy(&priv->gfargrp[i].int_name_er[0], dev->name, 1104 len_devname); 1105 strncpy(&priv->gfargrp[i].int_name_er[len_devname], 1106 "_g", sizeof("_g")); 1107 priv->gfargrp[i].int_name_er[strlen( 1108 priv->gfargrp[i].int_name_er)] = i+48; 1109 strncpy(&priv->gfargrp[i].int_name_er[strlen(\ 1110 priv->gfargrp[i].int_name_er)], 1111 "_er", sizeof("_er") + 1); 1112 } else 1113 priv->gfargrp[i].int_name_tx[len_devname] = '\0'; 1114 } 1115 1116 /* Initialize the filer table */ 1117 gfar_init_filer_table(priv); 1118 1119 /* Create all the sysfs files */ 1120 gfar_init_sysfs(dev); 1121 1122 /* Print out the device info */ 1123 printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr); 1124 1125 /* Even more device info helps when determining which kernel */ 1126 /* provided which set of benchmarks. */ 1127 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 1128 for (i = 0; i < priv->num_rx_queues; i++) 1129 printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n", 1130 dev->name, i, priv->rx_queue[i]->rx_ring_size); 1131 for(i = 0; i < priv->num_tx_queues; i++) 1132 printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n", 1133 dev->name, i, priv->tx_queue[i]->tx_ring_size); 1134 1135 return 0; 1136 1137register_fail: 1138 unmap_group_regs(priv); 1139 free_tx_pointers(priv); 1140 free_rx_pointers(priv); 1141 if (priv->phy_node) 1142 of_node_put(priv->phy_node); 1143 if (priv->tbi_node) 1144 of_node_put(priv->tbi_node); 1145 free_netdev(dev); 1146 return err; 1147} 1148 1149static int gfar_remove(struct of_device *ofdev) 1150{ 1151 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); 1152 1153 if (priv->phy_node) 1154 of_node_put(priv->phy_node); 1155 if (priv->tbi_node) 1156 of_node_put(priv->tbi_node); 1157 1158 dev_set_drvdata(&ofdev->dev, NULL); 1159 1160 unregister_netdev(priv->ndev); 1161 unmap_group_regs(priv); 1162 free_netdev(priv->ndev); 1163 1164 return 0; 1165} 1166 1167#ifdef CONFIG_PM 1168 1169static int gfar_suspend(struct device *dev) 1170{ 1171 struct gfar_private *priv = dev_get_drvdata(dev); 1172 struct net_device *ndev = priv->ndev; 1173 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1174 unsigned long flags; 1175 u32 tempval; 1176 1177 int magic_packet = priv->wol_en && 1178 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1179 1180 netif_device_detach(ndev); 1181 1182 if (netif_running(ndev)) { 1183 1184 local_irq_save(flags); 1185 lock_tx_qs(priv); 1186 lock_rx_qs(priv); 1187 1188 gfar_halt_nodisable(ndev); 1189 1190 /* Disable Tx, and Rx if wake-on-LAN is disabled. */ 1191 tempval = gfar_read(&regs->maccfg1); 1192 1193 tempval &= ~MACCFG1_TX_EN; 1194 1195 if (!magic_packet) 1196 tempval &= ~MACCFG1_RX_EN; 1197 1198 gfar_write(&regs->maccfg1, tempval); 1199 1200 unlock_rx_qs(priv); 1201 unlock_tx_qs(priv); 1202 local_irq_restore(flags); 1203 1204 disable_napi(priv); 1205 1206 if (magic_packet) { 1207 /* Enable interrupt on Magic Packet */ 1208 gfar_write(&regs->imask, IMASK_MAG); 1209 1210 /* Enable Magic Packet mode */ 1211 tempval = gfar_read(&regs->maccfg2); 1212 tempval |= MACCFG2_MPEN; 1213 gfar_write(&regs->maccfg2, tempval); 1214 } else { 1215 phy_stop(priv->phydev); 1216 } 1217 } 1218 1219 return 0; 1220} 1221 1222static int gfar_resume(struct device *dev) 1223{ 1224 struct gfar_private *priv = dev_get_drvdata(dev); 1225 struct net_device *ndev = priv->ndev; 1226 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1227 unsigned long flags; 1228 u32 tempval; 1229 int magic_packet = priv->wol_en && 1230 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1231 1232 if (!netif_running(ndev)) { 1233 netif_device_attach(ndev); 1234 return 0; 1235 } 1236 1237 if (!magic_packet && priv->phydev) 1238 phy_start(priv->phydev); 1239 1240 /* Disable Magic Packet mode, in case something 1241 * else woke us up. 1242 */ 1243 local_irq_save(flags); 1244 lock_tx_qs(priv); 1245 lock_rx_qs(priv); 1246 1247 tempval = gfar_read(&regs->maccfg2); 1248 tempval &= ~MACCFG2_MPEN; 1249 gfar_write(&regs->maccfg2, tempval); 1250 1251 gfar_start(ndev); 1252 1253 unlock_rx_qs(priv); 1254 unlock_tx_qs(priv); 1255 local_irq_restore(flags); 1256 1257 netif_device_attach(ndev); 1258 1259 enable_napi(priv); 1260 1261 return 0; 1262} 1263 1264static int gfar_restore(struct device *dev) 1265{ 1266 struct gfar_private *priv = dev_get_drvdata(dev); 1267 struct net_device *ndev = priv->ndev; 1268 1269 if (!netif_running(ndev)) 1270 return 0; 1271 1272 gfar_init_bds(ndev); 1273 init_registers(ndev); 1274 gfar_set_mac_address(ndev); 1275 gfar_init_mac(ndev); 1276 gfar_start(ndev); 1277 1278 priv->oldlink = 0; 1279 priv->oldspeed = 0; 1280 priv->oldduplex = -1; 1281 1282 if (priv->phydev) 1283 phy_start(priv->phydev); 1284 1285 netif_device_attach(ndev); 1286 enable_napi(priv); 1287 1288 return 0; 1289} 1290 1291static struct dev_pm_ops gfar_pm_ops = { 1292 .suspend = gfar_suspend, 1293 .resume = gfar_resume, 1294 .freeze = gfar_suspend, 1295 .thaw = gfar_resume, 1296 .restore = gfar_restore, 1297}; 1298 1299#define GFAR_PM_OPS (&gfar_pm_ops) 1300 1301static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state) 1302{ 1303 return gfar_suspend(&ofdev->dev); 1304} 1305 1306static int gfar_legacy_resume(struct of_device *ofdev) 1307{ 1308 return gfar_resume(&ofdev->dev); 1309} 1310 1311#else 1312 1313#define GFAR_PM_OPS NULL 1314#define gfar_legacy_suspend NULL 1315#define gfar_legacy_resume NULL 1316 1317#endif 1318 1319/* Reads the controller's registers to determine what interface 1320 * connects it to the PHY. 1321 */ 1322static phy_interface_t gfar_get_interface(struct net_device *dev) 1323{ 1324 struct gfar_private *priv = netdev_priv(dev); 1325 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1326 u32 ecntrl; 1327 1328 ecntrl = gfar_read(&regs->ecntrl); 1329 1330 if (ecntrl & ECNTRL_SGMII_MODE) 1331 return PHY_INTERFACE_MODE_SGMII; 1332 1333 if (ecntrl & ECNTRL_TBI_MODE) { 1334 if (ecntrl & ECNTRL_REDUCED_MODE) 1335 return PHY_INTERFACE_MODE_RTBI; 1336 else 1337 return PHY_INTERFACE_MODE_TBI; 1338 } 1339 1340 if (ecntrl & ECNTRL_REDUCED_MODE) { 1341 if (ecntrl & ECNTRL_REDUCED_MII_MODE) 1342 return PHY_INTERFACE_MODE_RMII; 1343 else { 1344 phy_interface_t interface = priv->interface; 1345 1346 /* 1347 * This isn't autodetected right now, so it must 1348 * be set by the device tree or platform code. 1349 */ 1350 if (interface == PHY_INTERFACE_MODE_RGMII_ID) 1351 return PHY_INTERFACE_MODE_RGMII_ID; 1352 1353 return PHY_INTERFACE_MODE_RGMII; 1354 } 1355 } 1356 1357 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) 1358 return PHY_INTERFACE_MODE_GMII; 1359 1360 return PHY_INTERFACE_MODE_MII; 1361} 1362 1363 1364/* Initializes driver's PHY state, and attaches to the PHY. 1365 * Returns 0 on success. 1366 */ 1367static int init_phy(struct net_device *dev) 1368{ 1369 struct gfar_private *priv = netdev_priv(dev); 1370 uint gigabit_support = 1371 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? 1372 SUPPORTED_1000baseT_Full : 0; 1373 phy_interface_t interface; 1374 1375 priv->oldlink = 0; 1376 priv->oldspeed = 0; 1377 priv->oldduplex = -1; 1378 1379 interface = gfar_get_interface(dev); 1380 1381 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, 1382 interface); 1383 if (!priv->phydev) 1384 priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link, 1385 interface); 1386 if (!priv->phydev) { 1387 dev_err(&dev->dev, "could not attach to PHY\n"); 1388 return -ENODEV; 1389 } 1390 1391 if (interface == PHY_INTERFACE_MODE_SGMII) 1392 gfar_configure_serdes(dev); 1393 1394 /* Remove any features not supported by the controller */ 1395 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); 1396 priv->phydev->advertising = priv->phydev->supported; 1397 1398 return 0; 1399} 1400 1401/* 1402 * Initialize TBI PHY interface for communicating with the 1403 * SERDES lynx PHY on the chip. We communicate with this PHY 1404 * through the MDIO bus on each controller, treating it as a 1405 * "normal" PHY at the address found in the TBIPA register. We assume 1406 * that the TBIPA register is valid. Either the MDIO bus code will set 1407 * it to a value that doesn't conflict with other PHYs on the bus, or the 1408 * value doesn't matter, as there are no other PHYs on the bus. 1409 */ 1410static void gfar_configure_serdes(struct net_device *dev) 1411{ 1412 struct gfar_private *priv = netdev_priv(dev); 1413 struct phy_device *tbiphy; 1414 1415 if (!priv->tbi_node) { 1416 dev_warn(&dev->dev, "error: SGMII mode requires that the " 1417 "device tree specify a tbi-handle\n"); 1418 return; 1419 } 1420 1421 tbiphy = of_phy_find_device(priv->tbi_node); 1422 if (!tbiphy) { 1423 dev_err(&dev->dev, "error: Could not get TBI device\n"); 1424 return; 1425 } 1426 1427 /* 1428 * If the link is already up, we must already be ok, and don't need to 1429 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured 1430 * everything for us? Resetting it takes the link down and requires 1431 * several seconds for it to come back. 1432 */ 1433 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) 1434 return; 1435 1436 /* Single clk mode, mii mode off(for serdes communication) */ 1437 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); 1438 1439 phy_write(tbiphy, MII_ADVERTISE, 1440 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | 1441 ADVERTISE_1000XPSE_ASYM); 1442 1443 phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE | 1444 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); 1445} 1446 1447static void init_registers(struct net_device *dev) 1448{ 1449 struct gfar_private *priv = netdev_priv(dev); 1450 struct gfar __iomem *regs = NULL; 1451 int i = 0; 1452 1453 for (i = 0; i < priv->num_grps; i++) { 1454 regs = priv->gfargrp[i].regs; 1455 /* Clear IEVENT */ 1456 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR); 1457 1458 /* Initialize IMASK */ 1459 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 1460 } 1461 1462 regs = priv->gfargrp[0].regs; 1463 /* Init hash registers to zero */ 1464 gfar_write(&regs->igaddr0, 0); 1465 gfar_write(&regs->igaddr1, 0); 1466 gfar_write(&regs->igaddr2, 0); 1467 gfar_write(&regs->igaddr3, 0); 1468 gfar_write(&regs->igaddr4, 0); 1469 gfar_write(&regs->igaddr5, 0); 1470 gfar_write(&regs->igaddr6, 0); 1471 gfar_write(&regs->igaddr7, 0); 1472 1473 gfar_write(&regs->gaddr0, 0); 1474 gfar_write(&regs->gaddr1, 0); 1475 gfar_write(&regs->gaddr2, 0); 1476 gfar_write(&regs->gaddr3, 0); 1477 gfar_write(&regs->gaddr4, 0); 1478 gfar_write(&regs->gaddr5, 0); 1479 gfar_write(&regs->gaddr6, 0); 1480 gfar_write(&regs->gaddr7, 0); 1481 1482 /* Zero out the rmon mib registers if it has them */ 1483 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 1484 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib)); 1485 1486 /* Mask off the CAM interrupts */ 1487 gfar_write(&regs->rmon.cam1, 0xffffffff); 1488 gfar_write(&regs->rmon.cam2, 0xffffffff); 1489 } 1490 1491 /* Initialize the max receive buffer length */ 1492 gfar_write(&regs->mrblr, priv->rx_buffer_size); 1493 1494 /* Initialize the Minimum Frame Length Register */ 1495 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS); 1496} 1497 1498 1499/* Halt the receive and transmit queues */ 1500static void gfar_halt_nodisable(struct net_device *dev) 1501{ 1502 struct gfar_private *priv = netdev_priv(dev); 1503 struct gfar __iomem *regs = NULL; 1504 u32 tempval; 1505 int i = 0; 1506 1507 for (i = 0; i < priv->num_grps; i++) { 1508 regs = priv->gfargrp[i].regs; 1509 /* Mask all interrupts */ 1510 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 1511 1512 /* Clear all interrupts */ 1513 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR); 1514 } 1515 1516 regs = priv->gfargrp[0].regs; 1517 /* Stop the DMA, and wait for it to stop */ 1518 tempval = gfar_read(&regs->dmactrl); 1519 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) 1520 != (DMACTRL_GRS | DMACTRL_GTS)) { 1521 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1522 gfar_write(&regs->dmactrl, tempval); 1523 1524 while (!(gfar_read(&regs->ievent) & 1525 (IEVENT_GRSC | IEVENT_GTSC))) 1526 cpu_relax(); 1527 } 1528} 1529 1530/* Halt the receive and transmit queues */ 1531void gfar_halt(struct net_device *dev) 1532{ 1533 struct gfar_private *priv = netdev_priv(dev); 1534 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1535 u32 tempval; 1536 1537 gfar_halt_nodisable(dev); 1538 1539 /* Disable Rx and Tx */ 1540 tempval = gfar_read(&regs->maccfg1); 1541 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 1542 gfar_write(&regs->maccfg1, tempval); 1543} 1544 1545static void free_grp_irqs(struct gfar_priv_grp *grp) 1546{ 1547 free_irq(grp->interruptError, grp); 1548 free_irq(grp->interruptTransmit, grp); 1549 free_irq(grp->interruptReceive, grp); 1550} 1551 1552void stop_gfar(struct net_device *dev) 1553{ 1554 struct gfar_private *priv = netdev_priv(dev); 1555 unsigned long flags; 1556 int i; 1557 1558 phy_stop(priv->phydev); 1559 1560 1561 /* Lock it down */ 1562 local_irq_save(flags); 1563 lock_tx_qs(priv); 1564 lock_rx_qs(priv); 1565 1566 gfar_halt(dev); 1567 1568 unlock_rx_qs(priv); 1569 unlock_tx_qs(priv); 1570 local_irq_restore(flags); 1571 1572 /* Free the IRQs */ 1573 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1574 for (i = 0; i < priv->num_grps; i++) 1575 free_grp_irqs(&priv->gfargrp[i]); 1576 } else { 1577 for (i = 0; i < priv->num_grps; i++) 1578 free_irq(priv->gfargrp[i].interruptTransmit, 1579 &priv->gfargrp[i]); 1580 } 1581 1582 free_skb_resources(priv); 1583} 1584 1585static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) 1586{ 1587 struct txbd8 *txbdp; 1588 struct gfar_private *priv = netdev_priv(tx_queue->dev); 1589 int i, j; 1590 1591 txbdp = tx_queue->tx_bd_base; 1592 1593 for (i = 0; i < tx_queue->tx_ring_size; i++) { 1594 if (!tx_queue->tx_skbuff[i]) 1595 continue; 1596 1597 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, 1598 txbdp->length, DMA_TO_DEVICE); 1599 txbdp->lstatus = 0; 1600 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; 1601 j++) { 1602 txbdp++; 1603 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, 1604 txbdp->length, DMA_TO_DEVICE); 1605 } 1606 txbdp++; 1607 dev_kfree_skb_any(tx_queue->tx_skbuff[i]); 1608 tx_queue->tx_skbuff[i] = NULL; 1609 } 1610 kfree(tx_queue->tx_skbuff); 1611} 1612 1613static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) 1614{ 1615 struct rxbd8 *rxbdp; 1616 struct gfar_private *priv = netdev_priv(rx_queue->dev); 1617 int i; 1618 1619 rxbdp = rx_queue->rx_bd_base; 1620 1621 for (i = 0; i < rx_queue->rx_ring_size; i++) { 1622 if (rx_queue->rx_skbuff[i]) { 1623 dma_unmap_single(&priv->ofdev->dev, 1624 rxbdp->bufPtr, priv->rx_buffer_size, 1625 DMA_FROM_DEVICE); 1626 dev_kfree_skb_any(rx_queue->rx_skbuff[i]); 1627 rx_queue->rx_skbuff[i] = NULL; 1628 } 1629 rxbdp->lstatus = 0; 1630 rxbdp->bufPtr = 0; 1631 rxbdp++; 1632 } 1633 kfree(rx_queue->rx_skbuff); 1634} 1635 1636/* If there are any tx skbs or rx skbs still around, free them. 1637 * Then free tx_skbuff and rx_skbuff */ 1638static void free_skb_resources(struct gfar_private *priv) 1639{ 1640 struct gfar_priv_tx_q *tx_queue = NULL; 1641 struct gfar_priv_rx_q *rx_queue = NULL; 1642 int i; 1643 1644 /* Go through all the buffer descriptors and free their data buffers */ 1645 for (i = 0; i < priv->num_tx_queues; i++) { 1646 tx_queue = priv->tx_queue[i]; 1647 if(!tx_queue->tx_skbuff) 1648 free_skb_tx_queue(tx_queue); 1649 } 1650 1651 for (i = 0; i < priv->num_rx_queues; i++) { 1652 rx_queue = priv->rx_queue[i]; 1653 if(!rx_queue->rx_skbuff) 1654 free_skb_rx_queue(rx_queue); 1655 } 1656 1657 dma_free_coherent(&priv->ofdev->dev, 1658 sizeof(struct txbd8) * priv->total_tx_ring_size + 1659 sizeof(struct rxbd8) * priv->total_rx_ring_size, 1660 priv->tx_queue[0]->tx_bd_base, 1661 priv->tx_queue[0]->tx_bd_dma_base); 1662} 1663 1664void gfar_start(struct net_device *dev) 1665{ 1666 struct gfar_private *priv = netdev_priv(dev); 1667 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1668 u32 tempval; 1669 int i = 0; 1670 1671 /* Enable Rx and Tx in MACCFG1 */ 1672 tempval = gfar_read(&regs->maccfg1); 1673 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); 1674 gfar_write(&regs->maccfg1, tempval); 1675 1676 /* Initialize DMACTRL to have WWR and WOP */ 1677 tempval = gfar_read(&regs->dmactrl); 1678 tempval |= DMACTRL_INIT_SETTINGS; 1679 gfar_write(&regs->dmactrl, tempval); 1680 1681 /* Make sure we aren't stopped */ 1682 tempval = gfar_read(&regs->dmactrl); 1683 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 1684 gfar_write(&regs->dmactrl, tempval); 1685 1686 for (i = 0; i < priv->num_grps; i++) { 1687 regs = priv->gfargrp[i].regs; 1688 /* Clear THLT/RHLT, so that the DMA starts polling now */ 1689 gfar_write(&regs->tstat, priv->gfargrp[i].tstat); 1690 gfar_write(&regs->rstat, priv->gfargrp[i].rstat); 1691 /* Unmask the interrupts we look for */ 1692 gfar_write(&regs->imask, IMASK_DEFAULT); 1693 } 1694 1695 dev->trans_start = jiffies; 1696} 1697 1698void gfar_configure_coalescing(struct gfar_private *priv, 1699 unsigned long tx_mask, unsigned long rx_mask) 1700{ 1701 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1702 u32 __iomem *baddr; 1703 int i = 0; 1704 1705 /* Backward compatible case ---- even if we enable 1706 * multiple queues, there's only single reg to program 1707 */ 1708 gfar_write(&regs->txic, 0); 1709 if(likely(priv->tx_queue[0]->txcoalescing)) 1710 gfar_write(&regs->txic, priv->tx_queue[0]->txic); 1711 1712 gfar_write(&regs->rxic, 0); 1713 if(unlikely(priv->rx_queue[0]->rxcoalescing)) 1714 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic); 1715 1716 if (priv->mode == MQ_MG_MODE) { 1717 baddr = &regs->txic0; 1718 for_each_bit (i, &tx_mask, priv->num_tx_queues) { 1719 if (likely(priv->tx_queue[i]->txcoalescing)) { 1720 gfar_write(baddr + i, 0); 1721 gfar_write(baddr + i, priv->tx_queue[i]->txic); 1722 } 1723 } 1724 1725 baddr = &regs->rxic0; 1726 for_each_bit (i, &rx_mask, priv->num_rx_queues) { 1727 if (likely(priv->rx_queue[i]->rxcoalescing)) { 1728 gfar_write(baddr + i, 0); 1729 gfar_write(baddr + i, priv->rx_queue[i]->rxic); 1730 } 1731 } 1732 } 1733} 1734 1735static int register_grp_irqs(struct gfar_priv_grp *grp) 1736{ 1737 struct gfar_private *priv = grp->priv; 1738 struct net_device *dev = priv->ndev; 1739 int err; 1740 1741 /* If the device has multiple interrupts, register for 1742 * them. Otherwise, only register for the one */ 1743 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1744 /* Install our interrupt handlers for Error, 1745 * Transmit, and Receive */ 1746 if ((err = request_irq(grp->interruptError, gfar_error, 0, 1747 grp->int_name_er,grp)) < 0) { 1748 if (netif_msg_intr(priv)) 1749 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1750 dev->name, grp->interruptError); 1751 1752 goto err_irq_fail; 1753 } 1754 1755 if ((err = request_irq(grp->interruptTransmit, gfar_transmit, 1756 0, grp->int_name_tx, grp)) < 0) { 1757 if (netif_msg_intr(priv)) 1758 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1759 dev->name, grp->interruptTransmit); 1760 goto tx_irq_fail; 1761 } 1762 1763 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0, 1764 grp->int_name_rx, grp)) < 0) { 1765 if (netif_msg_intr(priv)) 1766 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1767 dev->name, grp->interruptReceive); 1768 goto rx_irq_fail; 1769 } 1770 } else { 1771 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0, 1772 grp->int_name_tx, grp)) < 0) { 1773 if (netif_msg_intr(priv)) 1774 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1775 dev->name, grp->interruptTransmit); 1776 goto err_irq_fail; 1777 } 1778 } 1779 1780 return 0; 1781 1782rx_irq_fail: 1783 free_irq(grp->interruptTransmit, grp); 1784tx_irq_fail: 1785 free_irq(grp->interruptError, grp); 1786err_irq_fail: 1787 return err; 1788 1789} 1790 1791/* Bring the controller up and running */ 1792int startup_gfar(struct net_device *ndev) 1793{ 1794 struct gfar_private *priv = netdev_priv(ndev); 1795 struct gfar __iomem *regs = NULL; 1796 int err, i, j; 1797 1798 for (i = 0; i < priv->num_grps; i++) { 1799 regs= priv->gfargrp[i].regs; 1800 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 1801 } 1802 1803 regs= priv->gfargrp[0].regs; 1804 err = gfar_alloc_skb_resources(ndev); 1805 if (err) 1806 return err; 1807 1808 gfar_init_mac(ndev); 1809 1810 for (i = 0; i < priv->num_grps; i++) { 1811 err = register_grp_irqs(&priv->gfargrp[i]); 1812 if (err) { 1813 for (j = 0; j < i; j++) 1814 free_grp_irqs(&priv->gfargrp[j]); 1815 goto irq_fail; 1816 } 1817 } 1818 1819 /* Start the controller */ 1820 gfar_start(ndev); 1821 1822 phy_start(priv->phydev); 1823 1824 gfar_configure_coalescing(priv, 0xFF, 0xFF); 1825 1826 return 0; 1827 1828irq_fail: 1829 free_skb_resources(priv); 1830 return err; 1831} 1832 1833/* Called when something needs to use the ethernet device */ 1834/* Returns 0 for success. */ 1835static int gfar_enet_open(struct net_device *dev) 1836{ 1837 struct gfar_private *priv = netdev_priv(dev); 1838 int err; 1839 1840 enable_napi(priv); 1841 1842 skb_queue_head_init(&priv->rx_recycle); 1843 1844 /* Initialize a bunch of registers */ 1845 init_registers(dev); 1846 1847 gfar_set_mac_address(dev); 1848 1849 err = init_phy(dev); 1850 1851 if (err) { 1852 disable_napi(priv); 1853 return err; 1854 } 1855 1856 err = startup_gfar(dev); 1857 if (err) { 1858 disable_napi(priv); 1859 return err; 1860 } 1861 1862 netif_tx_start_all_queues(dev); 1863 1864 device_set_wakeup_enable(&dev->dev, priv->wol_en); 1865 1866 return err; 1867} 1868 1869static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) 1870{ 1871 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); 1872 1873 memset(fcb, 0, GMAC_FCB_LEN); 1874 1875 return fcb; 1876} 1877 1878static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) 1879{ 1880 u8 flags = 0; 1881 1882 /* If we're here, it's a IP packet with a TCP or UDP 1883 * payload. We set it to checksum, using a pseudo-header 1884 * we provide 1885 */ 1886 flags = TXFCB_DEFAULT; 1887 1888 /* Tell the controller what the protocol is */ 1889 /* And provide the already calculated phcs */ 1890 if (ip_hdr(skb)->protocol == IPPROTO_UDP) { 1891 flags |= TXFCB_UDP; 1892 fcb->phcs = udp_hdr(skb)->check; 1893 } else 1894 fcb->phcs = tcp_hdr(skb)->check; 1895 1896 /* l3os is the distance between the start of the 1897 * frame (skb->data) and the start of the IP hdr. 1898 * l4os is the distance between the start of the 1899 * l3 hdr and the l4 hdr */ 1900 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN); 1901 fcb->l4os = skb_network_header_len(skb); 1902 1903 fcb->flags = flags; 1904} 1905 1906void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) 1907{ 1908 fcb->flags |= TXFCB_VLN; 1909 fcb->vlctl = vlan_tx_tag_get(skb); 1910} 1911 1912static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, 1913 struct txbd8 *base, int ring_size) 1914{ 1915 struct txbd8 *new_bd = bdp + stride; 1916 1917 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; 1918} 1919 1920static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, 1921 int ring_size) 1922{ 1923 return skip_txbd(bdp, 1, base, ring_size); 1924} 1925 1926/* This is called by the kernel when a frame is ready for transmission. */ 1927/* It is pointed to by the dev->hard_start_xmit function pointer */ 1928static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 1929{ 1930 struct gfar_private *priv = netdev_priv(dev); 1931 struct gfar_priv_tx_q *tx_queue = NULL; 1932 struct netdev_queue *txq; 1933 struct gfar __iomem *regs = NULL; 1934 struct txfcb *fcb = NULL; 1935 struct txbd8 *txbdp, *txbdp_start, *base; 1936 u32 lstatus; 1937 int i, rq = 0; 1938 u32 bufaddr; 1939 unsigned long flags; 1940 unsigned int nr_frags, length; 1941 1942 1943 rq = skb->queue_mapping; 1944 tx_queue = priv->tx_queue[rq]; 1945 txq = netdev_get_tx_queue(dev, rq); 1946 base = tx_queue->tx_bd_base; 1947 regs = tx_queue->grp->regs; 1948 1949 /* make space for additional header when fcb is needed */ 1950 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 1951 (priv->vlgrp && vlan_tx_tag_present(skb))) && 1952 (skb_headroom(skb) < GMAC_FCB_LEN)) { 1953 struct sk_buff *skb_new; 1954 1955 skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN); 1956 if (!skb_new) { 1957 dev->stats.tx_errors++; 1958 kfree_skb(skb); 1959 return NETDEV_TX_OK; 1960 } 1961 kfree_skb(skb); 1962 skb = skb_new; 1963 } 1964 1965 /* total number of fragments in the SKB */ 1966 nr_frags = skb_shinfo(skb)->nr_frags; 1967 1968 /* check if there is space to queue this packet */ 1969 if ((nr_frags+1) > tx_queue->num_txbdfree) { 1970 /* no space, stop the queue */ 1971 netif_tx_stop_queue(txq); 1972 dev->stats.tx_fifo_errors++; 1973 return NETDEV_TX_BUSY; 1974 } 1975 1976 /* Update transmit stats */ 1977 txq->tx_bytes += skb->len; 1978 txq->tx_packets ++; 1979 1980 txbdp = txbdp_start = tx_queue->cur_tx; 1981 1982 if (nr_frags == 0) { 1983 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 1984 } else { 1985 /* Place the fragment addresses and lengths into the TxBDs */ 1986 for (i = 0; i < nr_frags; i++) { 1987 /* Point at the next BD, wrapping as needed */ 1988 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); 1989 1990 length = skb_shinfo(skb)->frags[i].size; 1991 1992 lstatus = txbdp->lstatus | length | 1993 BD_LFLAG(TXBD_READY); 1994 1995 /* Handle the last BD specially */ 1996 if (i == nr_frags - 1) 1997 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 1998 1999 bufaddr = dma_map_page(&priv->ofdev->dev, 2000 skb_shinfo(skb)->frags[i].page, 2001 skb_shinfo(skb)->frags[i].page_offset, 2002 length, 2003 DMA_TO_DEVICE); 2004 2005 /* set the TxBD length and buffer pointer */ 2006 txbdp->bufPtr = bufaddr; 2007 txbdp->lstatus = lstatus; 2008 } 2009 2010 lstatus = txbdp_start->lstatus; 2011 } 2012 2013 /* Set up checksumming */ 2014 if (CHECKSUM_PARTIAL == skb->ip_summed) { 2015 fcb = gfar_add_fcb(skb); 2016 lstatus |= BD_LFLAG(TXBD_TOE); 2017 gfar_tx_checksum(skb, fcb); 2018 } 2019 2020 if (priv->vlgrp && vlan_tx_tag_present(skb)) { 2021 if (unlikely(NULL == fcb)) { 2022 fcb = gfar_add_fcb(skb); 2023 lstatus |= BD_LFLAG(TXBD_TOE); 2024 } 2025 2026 gfar_tx_vlan(skb, fcb); 2027 } 2028 2029 /* setup the TxBD length and buffer pointer for the first BD */ 2030 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; 2031 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, 2032 skb_headlen(skb), DMA_TO_DEVICE); 2033 2034 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 2035 2036 /* 2037 * We can work in parallel with gfar_clean_tx_ring(), except 2038 * when modifying num_txbdfree. Note that we didn't grab the lock 2039 * when we were reading the num_txbdfree and checking for available 2040 * space, that's because outside of this function it can only grow, 2041 * and once we've got needed space, it cannot suddenly disappear. 2042 * 2043 * The lock also protects us from gfar_error(), which can modify 2044 * regs->tstat and thus retrigger the transfers, which is why we 2045 * also must grab the lock before setting ready bit for the first 2046 * to be transmitted BD. 2047 */ 2048 spin_lock_irqsave(&tx_queue->txlock, flags); 2049 2050 /* 2051 * The powerpc-specific eieio() is used, as wmb() has too strong 2052 * semantics (it requires synchronization between cacheable and 2053 * uncacheable mappings, which eieio doesn't provide and which we 2054 * don't need), thus requiring a more expensive sync instruction. At 2055 * some point, the set of architecture-independent barrier functions 2056 * should be expanded to include weaker barriers. 2057 */ 2058 eieio(); 2059 2060 txbdp_start->lstatus = lstatus; 2061 2062 /* Update the current skb pointer to the next entry we will use 2063 * (wrapping if necessary) */ 2064 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & 2065 TX_RING_MOD_MASK(tx_queue->tx_ring_size); 2066 2067 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2068 2069 /* reduce TxBD free count */ 2070 tx_queue->num_txbdfree -= (nr_frags + 1); 2071 2072 dev->trans_start = jiffies; 2073 2074 /* If the next BD still needs to be cleaned up, then the bds 2075 are full. We need to tell the kernel to stop sending us stuff. */ 2076 if (!tx_queue->num_txbdfree) { 2077 netif_tx_stop_queue(txq); 2078 2079 dev->stats.tx_fifo_errors++; 2080 } 2081 2082 /* Tell the DMA to go go go */ 2083 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); 2084 2085 /* Unlock priv */ 2086 spin_unlock_irqrestore(&tx_queue->txlock, flags); 2087 2088 return NETDEV_TX_OK; 2089} 2090 2091/* Stops the kernel queue, and halts the controller */ 2092static int gfar_close(struct net_device *dev) 2093{ 2094 struct gfar_private *priv = netdev_priv(dev); 2095 2096 disable_napi(priv); 2097 2098 skb_queue_purge(&priv->rx_recycle); 2099 cancel_work_sync(&priv->reset_task); 2100 stop_gfar(dev); 2101 2102 /* Disconnect from the PHY */ 2103 phy_disconnect(priv->phydev); 2104 priv->phydev = NULL; 2105 2106 netif_tx_stop_all_queues(dev); 2107 2108 return 0; 2109} 2110 2111/* Changes the mac address if the controller is not running. */ 2112static int gfar_set_mac_address(struct net_device *dev) 2113{ 2114 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 2115 2116 return 0; 2117} 2118 2119 2120/* Enables and disables VLAN insertion/extraction */ 2121static void gfar_vlan_rx_register(struct net_device *dev, 2122 struct vlan_group *grp) 2123{ 2124 struct gfar_private *priv = netdev_priv(dev); 2125 struct gfar __iomem *regs = NULL; 2126 unsigned long flags; 2127 u32 tempval; 2128 2129 regs = priv->gfargrp[0].regs; 2130 local_irq_save(flags); 2131 lock_rx_qs(priv); 2132 2133 priv->vlgrp = grp; 2134 2135 if (grp) { 2136 /* Enable VLAN tag insertion */ 2137 tempval = gfar_read(&regs->tctrl); 2138 tempval |= TCTRL_VLINS; 2139 2140 gfar_write(&regs->tctrl, tempval); 2141 2142 /* Enable VLAN tag extraction */ 2143 tempval = gfar_read(&regs->rctrl); 2144 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); 2145 gfar_write(&regs->rctrl, tempval); 2146 } else { 2147 /* Disable VLAN tag insertion */ 2148 tempval = gfar_read(&regs->tctrl); 2149 tempval &= ~TCTRL_VLINS; 2150 gfar_write(&regs->tctrl, tempval); 2151 2152 /* Disable VLAN tag extraction */ 2153 tempval = gfar_read(&regs->rctrl); 2154 tempval &= ~RCTRL_VLEX; 2155 /* If parse is no longer required, then disable parser */ 2156 if (tempval & RCTRL_REQ_PARSER) 2157 tempval |= RCTRL_PRSDEP_INIT; 2158 else 2159 tempval &= ~RCTRL_PRSDEP_INIT; 2160 gfar_write(&regs->rctrl, tempval); 2161 } 2162 2163 gfar_change_mtu(dev, dev->mtu); 2164 2165 unlock_rx_qs(priv); 2166 local_irq_restore(flags); 2167} 2168 2169static int gfar_change_mtu(struct net_device *dev, int new_mtu) 2170{ 2171 int tempsize, tempval; 2172 struct gfar_private *priv = netdev_priv(dev); 2173 struct gfar __iomem *regs = priv->gfargrp[0].regs; 2174 int oldsize = priv->rx_buffer_size; 2175 int frame_size = new_mtu + ETH_HLEN; 2176 2177 if (priv->vlgrp) 2178 frame_size += VLAN_HLEN; 2179 2180 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { 2181 if (netif_msg_drv(priv)) 2182 printk(KERN_ERR "%s: Invalid MTU setting\n", 2183 dev->name); 2184 return -EINVAL; 2185 } 2186 2187 if (gfar_uses_fcb(priv)) 2188 frame_size += GMAC_FCB_LEN; 2189 2190 frame_size += priv->padding; 2191 2192 tempsize = 2193 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + 2194 INCREMENTAL_BUFFER_SIZE; 2195 2196 /* Only stop and start the controller if it isn't already 2197 * stopped, and we changed something */ 2198 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 2199 stop_gfar(dev); 2200 2201 priv->rx_buffer_size = tempsize; 2202 2203 dev->mtu = new_mtu; 2204 2205 gfar_write(&regs->mrblr, priv->rx_buffer_size); 2206 gfar_write(&regs->maxfrm, priv->rx_buffer_size); 2207 2208 /* If the mtu is larger than the max size for standard 2209 * ethernet frames (ie, a jumbo frame), then set maccfg2 2210 * to allow huge frames, and to check the length */ 2211 tempval = gfar_read(&regs->maccfg2); 2212 2213 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) 2214 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2215 else 2216 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2217 2218 gfar_write(&regs->maccfg2, tempval); 2219 2220 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 2221 startup_gfar(dev); 2222 2223 return 0; 2224} 2225 2226/* gfar_reset_task gets scheduled when a packet has not been 2227 * transmitted after a set amount of time. 2228 * For now, assume that clearing out all the structures, and 2229 * starting over will fix the problem. 2230 */ 2231static void gfar_reset_task(struct work_struct *work) 2232{ 2233 struct gfar_private *priv = container_of(work, struct gfar_private, 2234 reset_task); 2235 struct net_device *dev = priv->ndev; 2236 2237 if (dev->flags & IFF_UP) { 2238 netif_tx_stop_all_queues(dev); 2239 stop_gfar(dev); 2240 startup_gfar(dev); 2241 netif_tx_start_all_queues(dev); 2242 } 2243 2244 netif_tx_schedule_all(dev); 2245} 2246 2247static void gfar_timeout(struct net_device *dev) 2248{ 2249 struct gfar_private *priv = netdev_priv(dev); 2250 2251 dev->stats.tx_errors++; 2252 schedule_work(&priv->reset_task); 2253} 2254 2255/* Interrupt Handler for Transmit complete */ 2256static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) 2257{ 2258 struct net_device *dev = tx_queue->dev; 2259 struct gfar_private *priv = netdev_priv(dev); 2260 struct gfar_priv_rx_q *rx_queue = NULL; 2261 struct txbd8 *bdp; 2262 struct txbd8 *lbdp = NULL; 2263 struct txbd8 *base = tx_queue->tx_bd_base; 2264 struct sk_buff *skb; 2265 int skb_dirtytx; 2266 int tx_ring_size = tx_queue->tx_ring_size; 2267 int frags = 0; 2268 int i; 2269 int howmany = 0; 2270 u32 lstatus; 2271 2272 rx_queue = priv->rx_queue[tx_queue->qindex]; 2273 bdp = tx_queue->dirty_tx; 2274 skb_dirtytx = tx_queue->skb_dirtytx; 2275 2276 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { 2277 unsigned long flags; 2278 2279 frags = skb_shinfo(skb)->nr_frags; 2280 lbdp = skip_txbd(bdp, frags, base, tx_ring_size); 2281 2282 lstatus = lbdp->lstatus; 2283 2284 /* Only clean completed frames */ 2285 if ((lstatus & BD_LFLAG(TXBD_READY)) && 2286 (lstatus & BD_LENGTH_MASK)) 2287 break; 2288 2289 dma_unmap_single(&priv->ofdev->dev, 2290 bdp->bufPtr, 2291 bdp->length, 2292 DMA_TO_DEVICE); 2293 2294 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2295 bdp = next_txbd(bdp, base, tx_ring_size); 2296 2297 for (i = 0; i < frags; i++) { 2298 dma_unmap_page(&priv->ofdev->dev, 2299 bdp->bufPtr, 2300 bdp->length, 2301 DMA_TO_DEVICE); 2302 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2303 bdp = next_txbd(bdp, base, tx_ring_size); 2304 } 2305 2306 /* 2307 * If there's room in the queue (limit it to rx_buffer_size) 2308 * we add this skb back into the pool, if it's the right size 2309 */ 2310 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && 2311 skb_recycle_check(skb, priv->rx_buffer_size + 2312 RXBUF_ALIGNMENT)) 2313 __skb_queue_head(&priv->rx_recycle, skb); 2314 else 2315 dev_kfree_skb_any(skb); 2316 2317 tx_queue->tx_skbuff[skb_dirtytx] = NULL; 2318 2319 skb_dirtytx = (skb_dirtytx + 1) & 2320 TX_RING_MOD_MASK(tx_ring_size); 2321 2322 howmany++; 2323 spin_lock_irqsave(&tx_queue->txlock, flags); 2324 tx_queue->num_txbdfree += frags + 1; 2325 spin_unlock_irqrestore(&tx_queue->txlock, flags); 2326 } 2327 2328 /* If we freed a buffer, we can restart transmission, if necessary */ 2329 if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree) 2330 netif_wake_subqueue(dev, tx_queue->qindex); 2331 2332 /* Update dirty indicators */ 2333 tx_queue->skb_dirtytx = skb_dirtytx; 2334 tx_queue->dirty_tx = bdp; 2335 2336 return howmany; 2337} 2338 2339static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) 2340{ 2341 unsigned long flags; 2342 2343 spin_lock_irqsave(&gfargrp->grplock, flags); 2344 if (napi_schedule_prep(&gfargrp->napi)) { 2345 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); 2346 __napi_schedule(&gfargrp->napi); 2347 } else { 2348 /* 2349 * Clear IEVENT, so interrupts aren't called again 2350 * because of the packets that have already arrived. 2351 */ 2352 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); 2353 } 2354 spin_unlock_irqrestore(&gfargrp->grplock, flags); 2355 2356} 2357 2358/* Interrupt Handler for Transmit complete */ 2359static irqreturn_t gfar_transmit(int irq, void *grp_id) 2360{ 2361 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); 2362 return IRQ_HANDLED; 2363} 2364 2365static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 2366 struct sk_buff *skb) 2367{ 2368 struct net_device *dev = rx_queue->dev; 2369 struct gfar_private *priv = netdev_priv(dev); 2370 dma_addr_t buf; 2371 2372 buf = dma_map_single(&priv->ofdev->dev, skb->data, 2373 priv->rx_buffer_size, DMA_FROM_DEVICE); 2374 gfar_init_rxbdp(rx_queue, bdp, buf); 2375} 2376 2377 2378struct sk_buff * gfar_new_skb(struct net_device *dev) 2379{ 2380 unsigned int alignamount; 2381 struct gfar_private *priv = netdev_priv(dev); 2382 struct sk_buff *skb = NULL; 2383 2384 skb = __skb_dequeue(&priv->rx_recycle); 2385 if (!skb) 2386 skb = netdev_alloc_skb(dev, 2387 priv->rx_buffer_size + RXBUF_ALIGNMENT); 2388 2389 if (!skb) 2390 return NULL; 2391 2392 alignamount = RXBUF_ALIGNMENT - 2393 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)); 2394 2395 /* We need the data buffer to be aligned properly. We will reserve 2396 * as many bytes as needed to align the data properly 2397 */ 2398 skb_reserve(skb, alignamount); 2399 2400 return skb; 2401} 2402 2403static inline void count_errors(unsigned short status, struct net_device *dev) 2404{ 2405 struct gfar_private *priv = netdev_priv(dev); 2406 struct net_device_stats *stats = &dev->stats; 2407 struct gfar_extra_stats *estats = &priv->extra_stats; 2408 2409 /* If the packet was truncated, none of the other errors 2410 * matter */ 2411 if (status & RXBD_TRUNCATED) { 2412 stats->rx_length_errors++; 2413 2414 estats->rx_trunc++; 2415 2416 return; 2417 } 2418 /* Count the errors, if there were any */ 2419 if (status & (RXBD_LARGE | RXBD_SHORT)) { 2420 stats->rx_length_errors++; 2421 2422 if (status & RXBD_LARGE) 2423 estats->rx_large++; 2424 else 2425 estats->rx_short++; 2426 } 2427 if (status & RXBD_NONOCTET) { 2428 stats->rx_frame_errors++; 2429 estats->rx_nonoctet++; 2430 } 2431 if (status & RXBD_CRCERR) { 2432 estats->rx_crcerr++; 2433 stats->rx_crc_errors++; 2434 } 2435 if (status & RXBD_OVERRUN) { 2436 estats->rx_overrun++; 2437 stats->rx_crc_errors++; 2438 } 2439} 2440 2441irqreturn_t gfar_receive(int irq, void *grp_id) 2442{ 2443 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); 2444 return IRQ_HANDLED; 2445} 2446 2447static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) 2448{ 2449 /* If valid headers were found, and valid sums 2450 * were verified, then we tell the kernel that no 2451 * checksumming is necessary. Otherwise, it is */ 2452 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) 2453 skb->ip_summed = CHECKSUM_UNNECESSARY; 2454 else 2455 skb->ip_summed = CHECKSUM_NONE; 2456} 2457 2458 2459/* gfar_process_frame() -- handle one incoming packet if skb 2460 * isn't NULL. */ 2461static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 2462 int amount_pull) 2463{ 2464 struct gfar_private *priv = netdev_priv(dev); 2465 struct rxfcb *fcb = NULL; 2466 2467 int ret; 2468 2469 /* fcb is at the beginning if exists */ 2470 fcb = (struct rxfcb *)skb->data; 2471 2472 /* Remove the FCB from the skb */ 2473 skb_set_queue_mapping(skb, fcb->rq); 2474 /* Remove the padded bytes, if there are any */ 2475 if (amount_pull) 2476 skb_pull(skb, amount_pull); 2477 2478 if (priv->rx_csum_enable) 2479 gfar_rx_checksum(skb, fcb); 2480 2481 /* Tell the skb what kind of packet this is */ 2482 skb->protocol = eth_type_trans(skb, dev); 2483 2484 /* Send the packet up the stack */ 2485 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) 2486 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl); 2487 else 2488 ret = netif_receive_skb(skb); 2489 2490 if (NET_RX_DROP == ret) 2491 priv->extra_stats.kernel_dropped++; 2492 2493 return 0; 2494} 2495 2496/* gfar_clean_rx_ring() -- Processes each frame in the rx ring 2497 * until the budget/quota has been reached. Returns the number 2498 * of frames handled 2499 */ 2500int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) 2501{ 2502 struct net_device *dev = rx_queue->dev; 2503 struct rxbd8 *bdp, *base; 2504 struct sk_buff *skb; 2505 int pkt_len; 2506 int amount_pull; 2507 int howmany = 0; 2508 struct gfar_private *priv = netdev_priv(dev); 2509 2510 /* Get the first full descriptor */ 2511 bdp = rx_queue->cur_rx; 2512 base = rx_queue->rx_bd_base; 2513 2514 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) + 2515 priv->padding; 2516 2517 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 2518 struct sk_buff *newskb; 2519 rmb(); 2520 2521 /* Add another skb for the future */ 2522 newskb = gfar_new_skb(dev); 2523 2524 skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; 2525 2526 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 2527 priv->rx_buffer_size, DMA_FROM_DEVICE); 2528 2529 /* We drop the frame if we failed to allocate a new buffer */ 2530 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || 2531 bdp->status & RXBD_ERR)) { 2532 count_errors(bdp->status, dev); 2533 2534 if (unlikely(!newskb)) 2535 newskb = skb; 2536 else if (skb) { 2537 /* 2538 * We need to reset ->data to what it 2539 * was before gfar_new_skb() re-aligned 2540 * it to an RXBUF_ALIGNMENT boundary 2541 * before we put the skb back on the 2542 * recycle list. 2543 */ 2544 skb->data = skb->head + NET_SKB_PAD; 2545 __skb_queue_head(&priv->rx_recycle, skb); 2546 } 2547 } else { 2548 /* Increment the number of packets */ 2549 rx_queue->stats.rx_packets++; 2550 howmany++; 2551 2552 if (likely(skb)) { 2553 pkt_len = bdp->length - ETH_FCS_LEN; 2554 /* Remove the FCS from the packet length */ 2555 skb_put(skb, pkt_len); 2556 rx_queue->stats.rx_bytes += pkt_len; 2557 2558 gfar_process_frame(dev, skb, amount_pull); 2559 2560 } else { 2561 if (netif_msg_rx_err(priv)) 2562 printk(KERN_WARNING 2563 "%s: Missing skb!\n", dev->name); 2564 rx_queue->stats.rx_dropped++; 2565 priv->extra_stats.rx_skbmissing++; 2566 } 2567 2568 } 2569 2570 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; 2571 2572 /* Setup the new bdp */ 2573 gfar_new_rxbdp(rx_queue, bdp, newskb); 2574 2575 /* Update to the next pointer */ 2576 bdp = next_bd(bdp, base, rx_queue->rx_ring_size); 2577 2578 /* update to point at the next skb */ 2579 rx_queue->skb_currx = 2580 (rx_queue->skb_currx + 1) & 2581 RX_RING_MOD_MASK(rx_queue->rx_ring_size); 2582 } 2583 2584 /* Update the current rxbd pointer to be the next one */ 2585 rx_queue->cur_rx = bdp; 2586 2587 return howmany; 2588} 2589 2590static int gfar_poll(struct napi_struct *napi, int budget) 2591{ 2592 struct gfar_priv_grp *gfargrp = container_of(napi, 2593 struct gfar_priv_grp, napi); 2594 struct gfar_private *priv = gfargrp->priv; 2595 struct gfar __iomem *regs = gfargrp->regs; 2596 struct gfar_priv_tx_q *tx_queue = NULL; 2597 struct gfar_priv_rx_q *rx_queue = NULL; 2598 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0; 2599 int tx_cleaned = 0, i, left_over_budget = budget; 2600 unsigned long serviced_queues = 0; 2601 int num_queues = 0; 2602 2603 num_queues = gfargrp->num_rx_queues; 2604 budget_per_queue = budget/num_queues; 2605 2606 /* Clear IEVENT, so interrupts aren't called again 2607 * because of the packets that have already arrived */ 2608 gfar_write(&regs->ievent, IEVENT_RTX_MASK); 2609 2610 while (num_queues && left_over_budget) { 2611 2612 budget_per_queue = left_over_budget/num_queues; 2613 left_over_budget = 0; 2614 2615 for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { 2616 if (test_bit(i, &serviced_queues)) 2617 continue; 2618 rx_queue = priv->rx_queue[i]; 2619 tx_queue = priv->tx_queue[rx_queue->qindex]; 2620 2621 tx_cleaned += gfar_clean_tx_ring(tx_queue); 2622 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, 2623 budget_per_queue); 2624 rx_cleaned += rx_cleaned_per_queue; 2625 if(rx_cleaned_per_queue < budget_per_queue) { 2626 left_over_budget = left_over_budget + 2627 (budget_per_queue - rx_cleaned_per_queue); 2628 set_bit(i, &serviced_queues); 2629 num_queues--; 2630 } 2631 } 2632 } 2633 2634 if (tx_cleaned) 2635 return budget; 2636 2637 if (rx_cleaned < budget) { 2638 napi_complete(napi); 2639 2640 /* Clear the halt bit in RSTAT */ 2641 gfar_write(&regs->rstat, gfargrp->rstat); 2642 2643 gfar_write(&regs->imask, IMASK_DEFAULT); 2644 2645 /* If we are coalescing interrupts, update the timer */ 2646 /* Otherwise, clear it */ 2647 gfar_configure_coalescing(priv, 2648 gfargrp->rx_bit_map, gfargrp->tx_bit_map); 2649 } 2650 2651 return rx_cleaned; 2652} 2653 2654#ifdef CONFIG_NET_POLL_CONTROLLER 2655/* 2656 * Polling 'interrupt' - used by things like netconsole to send skbs 2657 * without having to re-enable interrupts. It's not called while 2658 * the interrupt routine is executing. 2659 */ 2660static void gfar_netpoll(struct net_device *dev) 2661{ 2662 struct gfar_private *priv = netdev_priv(dev); 2663 int i = 0; 2664 2665 /* If the device has multiple interrupts, run tx/rx */ 2666 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 2667 for (i = 0; i < priv->num_grps; i++) { 2668 disable_irq(priv->gfargrp[i].interruptTransmit); 2669 disable_irq(priv->gfargrp[i].interruptReceive); 2670 disable_irq(priv->gfargrp[i].interruptError); 2671 gfar_interrupt(priv->gfargrp[i].interruptTransmit, 2672 &priv->gfargrp[i]); 2673 enable_irq(priv->gfargrp[i].interruptError); 2674 enable_irq(priv->gfargrp[i].interruptReceive); 2675 enable_irq(priv->gfargrp[i].interruptTransmit); 2676 } 2677 } else { 2678 for (i = 0; i < priv->num_grps; i++) { 2679 disable_irq(priv->gfargrp[i].interruptTransmit); 2680 gfar_interrupt(priv->gfargrp[i].interruptTransmit, 2681 &priv->gfargrp[i]); 2682 enable_irq(priv->gfargrp[i].interruptTransmit); 2683 } 2684 } 2685} 2686#endif 2687 2688/* The interrupt handler for devices with one interrupt */ 2689static irqreturn_t gfar_interrupt(int irq, void *grp_id) 2690{ 2691 struct gfar_priv_grp *gfargrp = grp_id; 2692 2693 /* Save ievent for future reference */ 2694 u32 events = gfar_read(&gfargrp->regs->ievent); 2695 2696 /* Check for reception */ 2697 if (events & IEVENT_RX_MASK) 2698 gfar_receive(irq, grp_id); 2699 2700 /* Check for transmit completion */ 2701 if (events & IEVENT_TX_MASK) 2702 gfar_transmit(irq, grp_id); 2703 2704 /* Check for errors */ 2705 if (events & IEVENT_ERR_MASK) 2706 gfar_error(irq, grp_id); 2707 2708 return IRQ_HANDLED; 2709} 2710 2711/* Called every time the controller might need to be made 2712 * aware of new link state. The PHY code conveys this 2713 * information through variables in the phydev structure, and this 2714 * function converts those variables into the appropriate 2715 * register values, and can bring down the device if needed. 2716 */ 2717static void adjust_link(struct net_device *dev) 2718{ 2719 struct gfar_private *priv = netdev_priv(dev); 2720 struct gfar __iomem *regs = priv->gfargrp[0].regs; 2721 unsigned long flags; 2722 struct phy_device *phydev = priv->phydev; 2723 int new_state = 0; 2724 2725 local_irq_save(flags); 2726 lock_tx_qs(priv); 2727 2728 if (phydev->link) { 2729 u32 tempval = gfar_read(&regs->maccfg2); 2730 u32 ecntrl = gfar_read(&regs->ecntrl); 2731 2732 /* Now we make sure that we can be in full duplex mode. 2733 * If not, we operate in half-duplex mode. */ 2734 if (phydev->duplex != priv->oldduplex) { 2735 new_state = 1; 2736 if (!(phydev->duplex)) 2737 tempval &= ~(MACCFG2_FULL_DUPLEX); 2738 else 2739 tempval |= MACCFG2_FULL_DUPLEX; 2740 2741 priv->oldduplex = phydev->duplex; 2742 } 2743 2744 if (phydev->speed != priv->oldspeed) { 2745 new_state = 1; 2746 switch (phydev->speed) { 2747 case 1000: 2748 tempval = 2749 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); 2750 2751 ecntrl &= ~(ECNTRL_R100); 2752 break; 2753 case 100: 2754 case 10: 2755 tempval = 2756 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 2757 2758 /* Reduced mode distinguishes 2759 * between 10 and 100 */ 2760 if (phydev->speed == SPEED_100) 2761 ecntrl |= ECNTRL_R100; 2762 else 2763 ecntrl &= ~(ECNTRL_R100); 2764 break; 2765 default: 2766 if (netif_msg_link(priv)) 2767 printk(KERN_WARNING 2768 "%s: Ack! Speed (%d) is not 10/100/1000!\n", 2769 dev->name, phydev->speed); 2770 break; 2771 } 2772 2773 priv->oldspeed = phydev->speed; 2774 } 2775 2776 gfar_write(&regs->maccfg2, tempval); 2777 gfar_write(&regs->ecntrl, ecntrl); 2778 2779 if (!priv->oldlink) { 2780 new_state = 1; 2781 priv->oldlink = 1; 2782 } 2783 } else if (priv->oldlink) { 2784 new_state = 1; 2785 priv->oldlink = 0; 2786 priv->oldspeed = 0; 2787 priv->oldduplex = -1; 2788 } 2789 2790 if (new_state && netif_msg_link(priv)) 2791 phy_print_status(phydev); 2792 unlock_tx_qs(priv); 2793 local_irq_restore(flags); 2794} 2795 2796/* Update the hash table based on the current list of multicast 2797 * addresses we subscribe to. Also, change the promiscuity of 2798 * the device based on the flags (this function is called 2799 * whenever dev->flags is changed */ 2800static void gfar_set_multi(struct net_device *dev) 2801{ 2802 struct dev_mc_list *mc_ptr; 2803 struct gfar_private *priv = netdev_priv(dev); 2804 struct gfar __iomem *regs = priv->gfargrp[0].regs; 2805 u32 tempval; 2806 2807 if (dev->flags & IFF_PROMISC) { 2808 /* Set RCTRL to PROM */ 2809 tempval = gfar_read(&regs->rctrl); 2810 tempval |= RCTRL_PROM; 2811 gfar_write(&regs->rctrl, tempval); 2812 } else { 2813 /* Set RCTRL to not PROM */ 2814 tempval = gfar_read(&regs->rctrl); 2815 tempval &= ~(RCTRL_PROM); 2816 gfar_write(&regs->rctrl, tempval); 2817 } 2818 2819 if (dev->flags & IFF_ALLMULTI) { 2820 /* Set the hash to rx all multicast frames */ 2821 gfar_write(&regs->igaddr0, 0xffffffff); 2822 gfar_write(&regs->igaddr1, 0xffffffff); 2823 gfar_write(&regs->igaddr2, 0xffffffff); 2824 gfar_write(&regs->igaddr3, 0xffffffff); 2825 gfar_write(&regs->igaddr4, 0xffffffff); 2826 gfar_write(&regs->igaddr5, 0xffffffff); 2827 gfar_write(&regs->igaddr6, 0xffffffff); 2828 gfar_write(&regs->igaddr7, 0xffffffff); 2829 gfar_write(&regs->gaddr0, 0xffffffff); 2830 gfar_write(&regs->gaddr1, 0xffffffff); 2831 gfar_write(&regs->gaddr2, 0xffffffff); 2832 gfar_write(&regs->gaddr3, 0xffffffff); 2833 gfar_write(&regs->gaddr4, 0xffffffff); 2834 gfar_write(&regs->gaddr5, 0xffffffff); 2835 gfar_write(&regs->gaddr6, 0xffffffff); 2836 gfar_write(&regs->gaddr7, 0xffffffff); 2837 } else { 2838 int em_num; 2839 int idx; 2840 2841 /* zero out the hash */ 2842 gfar_write(&regs->igaddr0, 0x0); 2843 gfar_write(&regs->igaddr1, 0x0); 2844 gfar_write(&regs->igaddr2, 0x0); 2845 gfar_write(&regs->igaddr3, 0x0); 2846 gfar_write(&regs->igaddr4, 0x0); 2847 gfar_write(&regs->igaddr5, 0x0); 2848 gfar_write(&regs->igaddr6, 0x0); 2849 gfar_write(&regs->igaddr7, 0x0); 2850 gfar_write(&regs->gaddr0, 0x0); 2851 gfar_write(&regs->gaddr1, 0x0); 2852 gfar_write(&regs->gaddr2, 0x0); 2853 gfar_write(&regs->gaddr3, 0x0); 2854 gfar_write(&regs->gaddr4, 0x0); 2855 gfar_write(&regs->gaddr5, 0x0); 2856 gfar_write(&regs->gaddr6, 0x0); 2857 gfar_write(&regs->gaddr7, 0x0); 2858 2859 /* If we have extended hash tables, we need to 2860 * clear the exact match registers to prepare for 2861 * setting them */ 2862 if (priv->extended_hash) { 2863 em_num = GFAR_EM_NUM + 1; 2864 gfar_clear_exact_match(dev); 2865 idx = 1; 2866 } else { 2867 idx = 0; 2868 em_num = 0; 2869 } 2870 2871 if (dev->mc_count == 0) 2872 return; 2873 2874 /* Parse the list, and set the appropriate bits */ 2875 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { 2876 if (idx < em_num) { 2877 gfar_set_mac_for_addr(dev, idx, 2878 mc_ptr->dmi_addr); 2879 idx++; 2880 } else 2881 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr); 2882 } 2883 } 2884 2885 return; 2886} 2887 2888 2889/* Clears each of the exact match registers to zero, so they 2890 * don't interfere with normal reception */ 2891static void gfar_clear_exact_match(struct net_device *dev) 2892{ 2893 int idx; 2894 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0}; 2895 2896 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) 2897 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr); 2898} 2899 2900/* Set the appropriate hash bit for the given addr */ 2901/* The algorithm works like so: 2902 * 1) Take the Destination Address (ie the multicast address), and 2903 * do a CRC on it (little endian), and reverse the bits of the 2904 * result. 2905 * 2) Use the 8 most significant bits as a hash into a 256-entry 2906 * table. The table is controlled through 8 32-bit registers: 2907 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is 2908 * gaddr7. This means that the 3 most significant bits in the 2909 * hash index which gaddr register to use, and the 5 other bits 2910 * indicate which bit (assuming an IBM numbering scheme, which 2911 * for PowerPC (tm) is usually the case) in the register holds 2912 * the entry. */ 2913static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) 2914{ 2915 u32 tempval; 2916 struct gfar_private *priv = netdev_priv(dev); 2917 u32 result = ether_crc(MAC_ADDR_LEN, addr); 2918 int width = priv->hash_width; 2919 u8 whichbit = (result >> (32 - width)) & 0x1f; 2920 u8 whichreg = result >> (32 - width + 5); 2921 u32 value = (1 << (31-whichbit)); 2922 2923 tempval = gfar_read(priv->hash_regs[whichreg]); 2924 tempval |= value; 2925 gfar_write(priv->hash_regs[whichreg], tempval); 2926 2927 return; 2928} 2929 2930 2931/* There are multiple MAC Address register pairs on some controllers 2932 * This function sets the numth pair to a given address 2933 */ 2934static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) 2935{ 2936 struct gfar_private *priv = netdev_priv(dev); 2937 struct gfar __iomem *regs = priv->gfargrp[0].regs; 2938 int idx; 2939 char tmpbuf[MAC_ADDR_LEN]; 2940 u32 tempval; 2941 u32 __iomem *macptr = &regs->macstnaddr1; 2942 2943 macptr += num*2; 2944 2945 /* Now copy it into the mac registers backwards, cuz */ 2946 /* little endian is silly */ 2947 for (idx = 0; idx < MAC_ADDR_LEN; idx++) 2948 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx]; 2949 2950 gfar_write(macptr, *((u32 *) (tmpbuf))); 2951 2952 tempval = *((u32 *) (tmpbuf + 4)); 2953 2954 gfar_write(macptr+1, tempval); 2955} 2956 2957/* GFAR error interrupt handler */ 2958static irqreturn_t gfar_error(int irq, void *grp_id) 2959{ 2960 struct gfar_priv_grp *gfargrp = grp_id; 2961 struct gfar __iomem *regs = gfargrp->regs; 2962 struct gfar_private *priv= gfargrp->priv; 2963 struct net_device *dev = priv->ndev; 2964 2965 /* Save ievent for future reference */ 2966 u32 events = gfar_read(&regs->ievent); 2967 2968 /* Clear IEVENT */ 2969 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK); 2970 2971 /* Magic Packet is not an error. */ 2972 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 2973 (events & IEVENT_MAG)) 2974 events &= ~IEVENT_MAG; 2975 2976 /* Hmm... */ 2977 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 2978 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", 2979 dev->name, events, gfar_read(&regs->imask)); 2980 2981 /* Update the error counters */ 2982 if (events & IEVENT_TXE) { 2983 dev->stats.tx_errors++; 2984 2985 if (events & IEVENT_LC) 2986 dev->stats.tx_window_errors++; 2987 if (events & IEVENT_CRL) 2988 dev->stats.tx_aborted_errors++; 2989 if (events & IEVENT_XFUN) { 2990 unsigned long flags; 2991 2992 if (netif_msg_tx_err(priv)) 2993 printk(KERN_DEBUG "%s: TX FIFO underrun, " 2994 "packet dropped.\n", dev->name); 2995 dev->stats.tx_dropped++; 2996 priv->extra_stats.tx_underrun++; 2997 2998 local_irq_save(flags); 2999 lock_tx_qs(priv); 3000 3001 /* Reactivate the Tx Queues */ 3002 gfar_write(&regs->tstat, gfargrp->tstat); 3003 3004 unlock_tx_qs(priv); 3005 local_irq_restore(flags); 3006 } 3007 if (netif_msg_tx_err(priv)) 3008 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); 3009 } 3010 if (events & IEVENT_BSY) { 3011 dev->stats.rx_errors++; 3012 priv->extra_stats.rx_bsy++; 3013 3014 gfar_receive(irq, grp_id); 3015 3016 if (netif_msg_rx_err(priv)) 3017 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n", 3018 dev->name, gfar_read(&regs->rstat)); 3019 } 3020 if (events & IEVENT_BABR) { 3021 dev->stats.rx_errors++; 3022 priv->extra_stats.rx_babr++; 3023 3024 if (netif_msg_rx_err(priv)) 3025 printk(KERN_DEBUG "%s: babbling RX error\n", dev->name); 3026 } 3027 if (events & IEVENT_EBERR) { 3028 priv->extra_stats.eberr++; 3029 if (netif_msg_rx_err(priv)) 3030 printk(KERN_DEBUG "%s: bus error\n", dev->name); 3031 } 3032 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv)) 3033 printk(KERN_DEBUG "%s: control frame\n", dev->name); 3034 3035 if (events & IEVENT_BABT) { 3036 priv->extra_stats.tx_babt++; 3037 if (netif_msg_tx_err(priv)) 3038 printk(KERN_DEBUG "%s: babbling TX error\n", dev->name); 3039 } 3040 return IRQ_HANDLED; 3041} 3042 3043static struct of_device_id gfar_match[] = 3044{ 3045 { 3046 .type = "network", 3047 .compatible = "gianfar", 3048 }, 3049 { 3050 .compatible = "fsl,etsec2", 3051 }, 3052 {}, 3053}; 3054MODULE_DEVICE_TABLE(of, gfar_match); 3055 3056/* Structure for a device driver */ 3057static struct of_platform_driver gfar_driver = { 3058 .name = "fsl-gianfar", 3059 .match_table = gfar_match, 3060 3061 .probe = gfar_probe, 3062 .remove = gfar_remove, 3063 .suspend = gfar_legacy_suspend, 3064 .resume = gfar_legacy_resume, 3065 .driver.pm = GFAR_PM_OPS, 3066}; 3067 3068static int __init gfar_init(void) 3069{ 3070 return of_register_platform_driver(&gfar_driver); 3071} 3072 3073static void __exit gfar_exit(void) 3074{ 3075 of_unregister_platform_driver(&gfar_driver); 3076} 3077 3078module_init(gfar_init); 3079module_exit(gfar_exit); 3080