Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.18-rc6 1972 lines 53 kB view raw
1/* 2 * drivers/net/gianfar.c 3 * 4 * Gianfar Ethernet Driver 5 * This driver is designed for the non-CPM ethernet controllers 6 * on the 85xx and 83xx family of integrated processors 7 * Based on 8260_io/fcc_enet.c 8 * 9 * Author: Andy Fleming 10 * Maintainer: Kumar Gala 11 * 12 * Copyright (c) 2002-2004 Freescale Semiconductor, Inc. 13 * 14 * This program is free software; you can redistribute it and/or modify it 15 * under the terms of the GNU General Public License as published by the 16 * Free Software Foundation; either version 2 of the License, or (at your 17 * option) any later version. 18 * 19 * Gianfar: AKA Lambda Draconis, "Dragon" 20 * RA 11 31 24.2 21 * Dec +69 19 52 22 * V 3.84 23 * B-V +1.62 24 * 25 * Theory of operation 26 * 27 * The driver is initialized through platform_device. Structures which 28 * define the configuration needed by the board are defined in a 29 * board structure in arch/ppc/platforms (though I do not 30 * discount the possibility that other architectures could one 31 * day be supported. 32 * 33 * The Gianfar Ethernet Controller uses a ring of buffer 34 * descriptors. The beginning is indicated by a register 35 * pointing to the physical address of the start of the ring. 36 * The end is determined by a "wrap" bit being set in the 37 * last descriptor of the ring. 38 * 39 * When a packet is received, the RXF bit in the 40 * IEVENT register is set, triggering an interrupt when the 41 * corresponding bit in the IMASK register is also set (if 42 * interrupt coalescing is active, then the interrupt may not 43 * happen immediately, but will wait until either a set number 44 * of frames or amount of time have passed). In NAPI, the 45 * interrupt handler will signal there is work to be done, and 46 * exit. Without NAPI, the packet(s) will be handled 47 * immediately. Both methods will start at the last known empty 48 * descriptor, and process every subsequent descriptor until there 49 * are none left with data (NAPI will stop after a set number of 50 * packets to give time to other tasks, but will eventually 51 * process all the packets). The data arrives inside a 52 * pre-allocated skb, and so after the skb is passed up to the 53 * stack, a new skb must be allocated, and the address field in 54 * the buffer descriptor must be updated to indicate this new 55 * skb. 56 * 57 * When the kernel requests that a packet be transmitted, the 58 * driver starts where it left off last time, and points the 59 * descriptor at the buffer which was passed in. The driver 60 * then informs the DMA engine that there are packets ready to 61 * be transmitted. Once the controller is finished transmitting 62 * the packet, an interrupt may be triggered (under the same 63 * conditions as for reception, but depending on the TXF bit). 64 * The driver then cleans up the buffer. 65 */ 66 67#include <linux/kernel.h> 68#include <linux/sched.h> 69#include <linux/string.h> 70#include <linux/errno.h> 71#include <linux/unistd.h> 72#include <linux/slab.h> 73#include <linux/interrupt.h> 74#include <linux/init.h> 75#include <linux/delay.h> 76#include <linux/netdevice.h> 77#include <linux/etherdevice.h> 78#include <linux/skbuff.h> 79#include <linux/if_vlan.h> 80#include <linux/spinlock.h> 81#include <linux/mm.h> 82#include <linux/platform_device.h> 83#include <linux/ip.h> 84#include <linux/tcp.h> 85#include <linux/udp.h> 86#include <linux/in.h> 87 88#include <asm/io.h> 89#include <asm/irq.h> 90#include <asm/uaccess.h> 91#include <linux/module.h> 92#include <linux/dma-mapping.h> 93#include <linux/crc32.h> 94#include <linux/mii.h> 95#include <linux/phy.h> 96 97#include "gianfar.h" 98#include "gianfar_mii.h" 99 100#define TX_TIMEOUT (1*HZ) 101#define SKB_ALLOC_TIMEOUT 1000000 102#undef BRIEF_GFAR_ERRORS 103#undef VERBOSE_GFAR_ERRORS 104 105#ifdef CONFIG_GFAR_NAPI 106#define RECEIVE(x) netif_receive_skb(x) 107#else 108#define RECEIVE(x) netif_rx(x) 109#endif 110 111const char gfar_driver_name[] = "Gianfar Ethernet"; 112const char gfar_driver_version[] = "1.3"; 113 114static int gfar_enet_open(struct net_device *dev); 115static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); 116static void gfar_timeout(struct net_device *dev); 117static int gfar_close(struct net_device *dev); 118struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp); 119static struct net_device_stats *gfar_get_stats(struct net_device *dev); 120static int gfar_set_mac_address(struct net_device *dev); 121static int gfar_change_mtu(struct net_device *dev, int new_mtu); 122static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs); 123static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs); 124static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs); 125static void adjust_link(struct net_device *dev); 126static void init_registers(struct net_device *dev); 127static int init_phy(struct net_device *dev); 128static int gfar_probe(struct platform_device *pdev); 129static int gfar_remove(struct platform_device *pdev); 130static void free_skb_resources(struct gfar_private *priv); 131static void gfar_set_multi(struct net_device *dev); 132static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 133#ifdef CONFIG_GFAR_NAPI 134static int gfar_poll(struct net_device *dev, int *budget); 135#endif 136int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 137static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length); 138static void gfar_vlan_rx_register(struct net_device *netdev, 139 struct vlan_group *grp); 140static void gfar_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 141void gfar_halt(struct net_device *dev); 142void gfar_start(struct net_device *dev); 143static void gfar_clear_exact_match(struct net_device *dev); 144static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); 145 146extern struct ethtool_ops gfar_ethtool_ops; 147 148MODULE_AUTHOR("Freescale Semiconductor, Inc"); 149MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 150MODULE_LICENSE("GPL"); 151 152/* Returns 1 if incoming frames use an FCB */ 153static inline int gfar_uses_fcb(struct gfar_private *priv) 154{ 155 return (priv->vlan_enable || priv->rx_csum_enable); 156} 157 158/* Set up the ethernet device structure, private data, 159 * and anything else we need before we start */ 160static int gfar_probe(struct platform_device *pdev) 161{ 162 u32 tempval; 163 struct net_device *dev = NULL; 164 struct gfar_private *priv = NULL; 165 struct gianfar_platform_data *einfo; 166 struct resource *r; 167 int idx; 168 int err = 0; 169 170 einfo = (struct gianfar_platform_data *) pdev->dev.platform_data; 171 172 if (NULL == einfo) { 173 printk(KERN_ERR "gfar %d: Missing additional data!\n", 174 pdev->id); 175 176 return -ENODEV; 177 } 178 179 /* Create an ethernet device instance */ 180 dev = alloc_etherdev(sizeof (*priv)); 181 182 if (NULL == dev) 183 return -ENOMEM; 184 185 priv = netdev_priv(dev); 186 187 /* Set the info in the priv to the current info */ 188 priv->einfo = einfo; 189 190 /* fill out IRQ fields */ 191 if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 192 priv->interruptTransmit = platform_get_irq_byname(pdev, "tx"); 193 priv->interruptReceive = platform_get_irq_byname(pdev, "rx"); 194 priv->interruptError = platform_get_irq_byname(pdev, "error"); 195 if (priv->interruptTransmit < 0 || priv->interruptReceive < 0 || priv->interruptError < 0) 196 goto regs_fail; 197 } else { 198 priv->interruptTransmit = platform_get_irq(pdev, 0); 199 if (priv->interruptTransmit < 0) 200 goto regs_fail; 201 } 202 203 /* get a pointer to the register memory */ 204 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 205 priv->regs = ioremap(r->start, sizeof (struct gfar)); 206 207 if (NULL == priv->regs) { 208 err = -ENOMEM; 209 goto regs_fail; 210 } 211 212 spin_lock_init(&priv->txlock); 213 spin_lock_init(&priv->rxlock); 214 215 platform_set_drvdata(pdev, dev); 216 217 /* Stop the DMA engine now, in case it was running before */ 218 /* (The firmware could have used it, and left it running). */ 219 /* To do this, we write Graceful Receive Stop and Graceful */ 220 /* Transmit Stop, and then wait until the corresponding bits */ 221 /* in IEVENT indicate the stops have completed. */ 222 tempval = gfar_read(&priv->regs->dmactrl); 223 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 224 gfar_write(&priv->regs->dmactrl, tempval); 225 226 tempval = gfar_read(&priv->regs->dmactrl); 227 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 228 gfar_write(&priv->regs->dmactrl, tempval); 229 230 while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC))) 231 cpu_relax(); 232 233 /* Reset MAC layer */ 234 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET); 235 236 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 237 gfar_write(&priv->regs->maccfg1, tempval); 238 239 /* Initialize MACCFG2. */ 240 gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS); 241 242 /* Initialize ECNTRL */ 243 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS); 244 245 /* Copy the station address into the dev structure, */ 246 memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN); 247 248 /* Set the dev->base_addr to the gfar reg region */ 249 dev->base_addr = (unsigned long) (priv->regs); 250 251 SET_MODULE_OWNER(dev); 252 SET_NETDEV_DEV(dev, &pdev->dev); 253 254 /* Fill in the dev structure */ 255 dev->open = gfar_enet_open; 256 dev->hard_start_xmit = gfar_start_xmit; 257 dev->tx_timeout = gfar_timeout; 258 dev->watchdog_timeo = TX_TIMEOUT; 259#ifdef CONFIG_GFAR_NAPI 260 dev->poll = gfar_poll; 261 dev->weight = GFAR_DEV_WEIGHT; 262#endif 263 dev->stop = gfar_close; 264 dev->get_stats = gfar_get_stats; 265 dev->change_mtu = gfar_change_mtu; 266 dev->mtu = 1500; 267 dev->set_multicast_list = gfar_set_multi; 268 269 dev->ethtool_ops = &gfar_ethtool_ops; 270 271 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 272 priv->rx_csum_enable = 1; 273 dev->features |= NETIF_F_IP_CSUM; 274 } else 275 priv->rx_csum_enable = 0; 276 277 priv->vlgrp = NULL; 278 279 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 280 dev->vlan_rx_register = gfar_vlan_rx_register; 281 dev->vlan_rx_kill_vid = gfar_vlan_rx_kill_vid; 282 283 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 284 285 priv->vlan_enable = 1; 286 } 287 288 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 289 priv->extended_hash = 1; 290 priv->hash_width = 9; 291 292 priv->hash_regs[0] = &priv->regs->igaddr0; 293 priv->hash_regs[1] = &priv->regs->igaddr1; 294 priv->hash_regs[2] = &priv->regs->igaddr2; 295 priv->hash_regs[3] = &priv->regs->igaddr3; 296 priv->hash_regs[4] = &priv->regs->igaddr4; 297 priv->hash_regs[5] = &priv->regs->igaddr5; 298 priv->hash_regs[6] = &priv->regs->igaddr6; 299 priv->hash_regs[7] = &priv->regs->igaddr7; 300 priv->hash_regs[8] = &priv->regs->gaddr0; 301 priv->hash_regs[9] = &priv->regs->gaddr1; 302 priv->hash_regs[10] = &priv->regs->gaddr2; 303 priv->hash_regs[11] = &priv->regs->gaddr3; 304 priv->hash_regs[12] = &priv->regs->gaddr4; 305 priv->hash_regs[13] = &priv->regs->gaddr5; 306 priv->hash_regs[14] = &priv->regs->gaddr6; 307 priv->hash_regs[15] = &priv->regs->gaddr7; 308 309 } else { 310 priv->extended_hash = 0; 311 priv->hash_width = 8; 312 313 priv->hash_regs[0] = &priv->regs->gaddr0; 314 priv->hash_regs[1] = &priv->regs->gaddr1; 315 priv->hash_regs[2] = &priv->regs->gaddr2; 316 priv->hash_regs[3] = &priv->regs->gaddr3; 317 priv->hash_regs[4] = &priv->regs->gaddr4; 318 priv->hash_regs[5] = &priv->regs->gaddr5; 319 priv->hash_regs[6] = &priv->regs->gaddr6; 320 priv->hash_regs[7] = &priv->regs->gaddr7; 321 } 322 323 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) 324 priv->padding = DEFAULT_PADDING; 325 else 326 priv->padding = 0; 327 328 if (dev->features & NETIF_F_IP_CSUM) 329 dev->hard_header_len += GMAC_FCB_LEN; 330 331 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 332 priv->tx_ring_size = DEFAULT_TX_RING_SIZE; 333 priv->rx_ring_size = DEFAULT_RX_RING_SIZE; 334 335 priv->txcoalescing = DEFAULT_TX_COALESCE; 336 priv->txcount = DEFAULT_TXCOUNT; 337 priv->txtime = DEFAULT_TXTIME; 338 priv->rxcoalescing = DEFAULT_RX_COALESCE; 339 priv->rxcount = DEFAULT_RXCOUNT; 340 priv->rxtime = DEFAULT_RXTIME; 341 342 /* Enable most messages by default */ 343 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 344 345 err = register_netdev(dev); 346 347 if (err) { 348 printk(KERN_ERR "%s: Cannot register net device, aborting.\n", 349 dev->name); 350 goto register_fail; 351 } 352 353 /* Create all the sysfs files */ 354 gfar_init_sysfs(dev); 355 356 /* Print out the device info */ 357 printk(KERN_INFO DEVICE_NAME, dev->name); 358 for (idx = 0; idx < 6; idx++) 359 printk("%2.2x%c", dev->dev_addr[idx], idx == 5 ? ' ' : ':'); 360 printk("\n"); 361 362 /* Even more device info helps when determining which kernel */ 363 /* provided which set of benchmarks. */ 364#ifdef CONFIG_GFAR_NAPI 365 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 366#else 367 printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name); 368#endif 369 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", 370 dev->name, priv->rx_ring_size, priv->tx_ring_size); 371 372 return 0; 373 374register_fail: 375 iounmap(priv->regs); 376regs_fail: 377 free_netdev(dev); 378 return err; 379} 380 381static int gfar_remove(struct platform_device *pdev) 382{ 383 struct net_device *dev = platform_get_drvdata(pdev); 384 struct gfar_private *priv = netdev_priv(dev); 385 386 platform_set_drvdata(pdev, NULL); 387 388 iounmap(priv->regs); 389 free_netdev(dev); 390 391 return 0; 392} 393 394 395/* Initializes driver's PHY state, and attaches to the PHY. 396 * Returns 0 on success. 397 */ 398static int init_phy(struct net_device *dev) 399{ 400 struct gfar_private *priv = netdev_priv(dev); 401 uint gigabit_support = 402 priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? 403 SUPPORTED_1000baseT_Full : 0; 404 struct phy_device *phydev; 405 char phy_id[BUS_ID_SIZE]; 406 407 priv->oldlink = 0; 408 priv->oldspeed = 0; 409 priv->oldduplex = -1; 410 411 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id); 412 413 phydev = phy_connect(dev, phy_id, &adjust_link, 0); 414 415 if (IS_ERR(phydev)) { 416 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 417 return PTR_ERR(phydev); 418 } 419 420 /* Remove any features not supported by the controller */ 421 phydev->supported &= (GFAR_SUPPORTED | gigabit_support); 422 phydev->advertising = phydev->supported; 423 424 priv->phydev = phydev; 425 426 return 0; 427} 428 429static void init_registers(struct net_device *dev) 430{ 431 struct gfar_private *priv = netdev_priv(dev); 432 433 /* Clear IEVENT */ 434 gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR); 435 436 /* Initialize IMASK */ 437 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR); 438 439 /* Init hash registers to zero */ 440 gfar_write(&priv->regs->igaddr0, 0); 441 gfar_write(&priv->regs->igaddr1, 0); 442 gfar_write(&priv->regs->igaddr2, 0); 443 gfar_write(&priv->regs->igaddr3, 0); 444 gfar_write(&priv->regs->igaddr4, 0); 445 gfar_write(&priv->regs->igaddr5, 0); 446 gfar_write(&priv->regs->igaddr6, 0); 447 gfar_write(&priv->regs->igaddr7, 0); 448 449 gfar_write(&priv->regs->gaddr0, 0); 450 gfar_write(&priv->regs->gaddr1, 0); 451 gfar_write(&priv->regs->gaddr2, 0); 452 gfar_write(&priv->regs->gaddr3, 0); 453 gfar_write(&priv->regs->gaddr4, 0); 454 gfar_write(&priv->regs->gaddr5, 0); 455 gfar_write(&priv->regs->gaddr6, 0); 456 gfar_write(&priv->regs->gaddr7, 0); 457 458 /* Zero out the rmon mib registers if it has them */ 459 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 460 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib)); 461 462 /* Mask off the CAM interrupts */ 463 gfar_write(&priv->regs->rmon.cam1, 0xffffffff); 464 gfar_write(&priv->regs->rmon.cam2, 0xffffffff); 465 } 466 467 /* Initialize the max receive buffer length */ 468 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 469 470 /* Initialize the Minimum Frame Length Register */ 471 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); 472 473 /* Assign the TBI an address which won't conflict with the PHYs */ 474 gfar_write(&priv->regs->tbipa, TBIPA_VALUE); 475} 476 477 478/* Halt the receive and transmit queues */ 479void gfar_halt(struct net_device *dev) 480{ 481 struct gfar_private *priv = netdev_priv(dev); 482 struct gfar __iomem *regs = priv->regs; 483 u32 tempval; 484 485 /* Mask all interrupts */ 486 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 487 488 /* Clear all interrupts */ 489 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR); 490 491 /* Stop the DMA, and wait for it to stop */ 492 tempval = gfar_read(&priv->regs->dmactrl); 493 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) 494 != (DMACTRL_GRS | DMACTRL_GTS)) { 495 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 496 gfar_write(&priv->regs->dmactrl, tempval); 497 498 while (!(gfar_read(&priv->regs->ievent) & 499 (IEVENT_GRSC | IEVENT_GTSC))) 500 cpu_relax(); 501 } 502 503 /* Disable Rx and Tx */ 504 tempval = gfar_read(&regs->maccfg1); 505 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 506 gfar_write(&regs->maccfg1, tempval); 507} 508 509void stop_gfar(struct net_device *dev) 510{ 511 struct gfar_private *priv = netdev_priv(dev); 512 struct gfar __iomem *regs = priv->regs; 513 unsigned long flags; 514 515 phy_stop(priv->phydev); 516 517 /* Lock it down */ 518 spin_lock_irqsave(&priv->txlock, flags); 519 spin_lock(&priv->rxlock); 520 521 gfar_halt(dev); 522 523 spin_unlock(&priv->rxlock); 524 spin_unlock_irqrestore(&priv->txlock, flags); 525 526 /* Free the IRQs */ 527 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 528 free_irq(priv->interruptError, dev); 529 free_irq(priv->interruptTransmit, dev); 530 free_irq(priv->interruptReceive, dev); 531 } else { 532 free_irq(priv->interruptTransmit, dev); 533 } 534 535 free_skb_resources(priv); 536 537 dma_free_coherent(NULL, 538 sizeof(struct txbd8)*priv->tx_ring_size 539 + sizeof(struct rxbd8)*priv->rx_ring_size, 540 priv->tx_bd_base, 541 gfar_read(&regs->tbase0)); 542} 543 544/* If there are any tx skbs or rx skbs still around, free them. 545 * Then free tx_skbuff and rx_skbuff */ 546static void free_skb_resources(struct gfar_private *priv) 547{ 548 struct rxbd8 *rxbdp; 549 struct txbd8 *txbdp; 550 int i; 551 552 /* Go through all the buffer descriptors and free their data buffers */ 553 txbdp = priv->tx_bd_base; 554 555 for (i = 0; i < priv->tx_ring_size; i++) { 556 557 if (priv->tx_skbuff[i]) { 558 dma_unmap_single(NULL, txbdp->bufPtr, 559 txbdp->length, 560 DMA_TO_DEVICE); 561 dev_kfree_skb_any(priv->tx_skbuff[i]); 562 priv->tx_skbuff[i] = NULL; 563 } 564 } 565 566 kfree(priv->tx_skbuff); 567 568 rxbdp = priv->rx_bd_base; 569 570 /* rx_skbuff is not guaranteed to be allocated, so only 571 * free it and its contents if it is allocated */ 572 if(priv->rx_skbuff != NULL) { 573 for (i = 0; i < priv->rx_ring_size; i++) { 574 if (priv->rx_skbuff[i]) { 575 dma_unmap_single(NULL, rxbdp->bufPtr, 576 priv->rx_buffer_size, 577 DMA_FROM_DEVICE); 578 579 dev_kfree_skb_any(priv->rx_skbuff[i]); 580 priv->rx_skbuff[i] = NULL; 581 } 582 583 rxbdp->status = 0; 584 rxbdp->length = 0; 585 rxbdp->bufPtr = 0; 586 587 rxbdp++; 588 } 589 590 kfree(priv->rx_skbuff); 591 } 592} 593 594void gfar_start(struct net_device *dev) 595{ 596 struct gfar_private *priv = netdev_priv(dev); 597 struct gfar __iomem *regs = priv->regs; 598 u32 tempval; 599 600 /* Enable Rx and Tx in MACCFG1 */ 601 tempval = gfar_read(&regs->maccfg1); 602 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); 603 gfar_write(&regs->maccfg1, tempval); 604 605 /* Initialize DMACTRL to have WWR and WOP */ 606 tempval = gfar_read(&priv->regs->dmactrl); 607 tempval |= DMACTRL_INIT_SETTINGS; 608 gfar_write(&priv->regs->dmactrl, tempval); 609 610 /* Make sure we aren't stopped */ 611 tempval = gfar_read(&priv->regs->dmactrl); 612 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 613 gfar_write(&priv->regs->dmactrl, tempval); 614 615 /* Clear THLT/RHLT, so that the DMA starts polling now */ 616 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT); 617 gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT); 618 619 /* Unmask the interrupts we look for */ 620 gfar_write(&regs->imask, IMASK_DEFAULT); 621} 622 623/* Bring the controller up and running */ 624int startup_gfar(struct net_device *dev) 625{ 626 struct txbd8 *txbdp; 627 struct rxbd8 *rxbdp; 628 dma_addr_t addr; 629 unsigned long vaddr; 630 int i; 631 struct gfar_private *priv = netdev_priv(dev); 632 struct gfar __iomem *regs = priv->regs; 633 int err = 0; 634 u32 rctrl = 0; 635 u32 attrs = 0; 636 637 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 638 639 /* Allocate memory for the buffer descriptors */ 640 vaddr = (unsigned long) dma_alloc_coherent(NULL, 641 sizeof (struct txbd8) * priv->tx_ring_size + 642 sizeof (struct rxbd8) * priv->rx_ring_size, 643 &addr, GFP_KERNEL); 644 645 if (vaddr == 0) { 646 if (netif_msg_ifup(priv)) 647 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n", 648 dev->name); 649 return -ENOMEM; 650 } 651 652 priv->tx_bd_base = (struct txbd8 *) vaddr; 653 654 /* enet DMA only understands physical addresses */ 655 gfar_write(&regs->tbase0, addr); 656 657 /* Start the rx descriptor ring where the tx ring leaves off */ 658 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size; 659 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size; 660 priv->rx_bd_base = (struct rxbd8 *) vaddr; 661 gfar_write(&regs->rbase0, addr); 662 663 /* Setup the skbuff rings */ 664 priv->tx_skbuff = 665 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * 666 priv->tx_ring_size, GFP_KERNEL); 667 668 if (NULL == priv->tx_skbuff) { 669 if (netif_msg_ifup(priv)) 670 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n", 671 dev->name); 672 err = -ENOMEM; 673 goto tx_skb_fail; 674 } 675 676 for (i = 0; i < priv->tx_ring_size; i++) 677 priv->tx_skbuff[i] = NULL; 678 679 priv->rx_skbuff = 680 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * 681 priv->rx_ring_size, GFP_KERNEL); 682 683 if (NULL == priv->rx_skbuff) { 684 if (netif_msg_ifup(priv)) 685 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n", 686 dev->name); 687 err = -ENOMEM; 688 goto rx_skb_fail; 689 } 690 691 for (i = 0; i < priv->rx_ring_size; i++) 692 priv->rx_skbuff[i] = NULL; 693 694 /* Initialize some variables in our dev structure */ 695 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base; 696 priv->cur_rx = priv->rx_bd_base; 697 priv->skb_curtx = priv->skb_dirtytx = 0; 698 priv->skb_currx = 0; 699 700 /* Initialize Transmit Descriptor Ring */ 701 txbdp = priv->tx_bd_base; 702 for (i = 0; i < priv->tx_ring_size; i++) { 703 txbdp->status = 0; 704 txbdp->length = 0; 705 txbdp->bufPtr = 0; 706 txbdp++; 707 } 708 709 /* Set the last descriptor in the ring to indicate wrap */ 710 txbdp--; 711 txbdp->status |= TXBD_WRAP; 712 713 rxbdp = priv->rx_bd_base; 714 for (i = 0; i < priv->rx_ring_size; i++) { 715 struct sk_buff *skb = NULL; 716 717 rxbdp->status = 0; 718 719 skb = gfar_new_skb(dev, rxbdp); 720 721 priv->rx_skbuff[i] = skb; 722 723 rxbdp++; 724 } 725 726 /* Set the last descriptor in the ring to wrap */ 727 rxbdp--; 728 rxbdp->status |= RXBD_WRAP; 729 730 /* If the device has multiple interrupts, register for 731 * them. Otherwise, only register for the one */ 732 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 733 /* Install our interrupt handlers for Error, 734 * Transmit, and Receive */ 735 if (request_irq(priv->interruptError, gfar_error, 736 0, "enet_error", dev) < 0) { 737 if (netif_msg_intr(priv)) 738 printk(KERN_ERR "%s: Can't get IRQ %d\n", 739 dev->name, priv->interruptError); 740 741 err = -1; 742 goto err_irq_fail; 743 } 744 745 if (request_irq(priv->interruptTransmit, gfar_transmit, 746 0, "enet_tx", dev) < 0) { 747 if (netif_msg_intr(priv)) 748 printk(KERN_ERR "%s: Can't get IRQ %d\n", 749 dev->name, priv->interruptTransmit); 750 751 err = -1; 752 753 goto tx_irq_fail; 754 } 755 756 if (request_irq(priv->interruptReceive, gfar_receive, 757 0, "enet_rx", dev) < 0) { 758 if (netif_msg_intr(priv)) 759 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n", 760 dev->name, priv->interruptReceive); 761 762 err = -1; 763 goto rx_irq_fail; 764 } 765 } else { 766 if (request_irq(priv->interruptTransmit, gfar_interrupt, 767 0, "gfar_interrupt", dev) < 0) { 768 if (netif_msg_intr(priv)) 769 printk(KERN_ERR "%s: Can't get IRQ %d\n", 770 dev->name, priv->interruptError); 771 772 err = -1; 773 goto err_irq_fail; 774 } 775 } 776 777 phy_start(priv->phydev); 778 779 /* Configure the coalescing support */ 780 if (priv->txcoalescing) 781 gfar_write(&regs->txic, 782 mk_ic_value(priv->txcount, priv->txtime)); 783 else 784 gfar_write(&regs->txic, 0); 785 786 if (priv->rxcoalescing) 787 gfar_write(&regs->rxic, 788 mk_ic_value(priv->rxcount, priv->rxtime)); 789 else 790 gfar_write(&regs->rxic, 0); 791 792 if (priv->rx_csum_enable) 793 rctrl |= RCTRL_CHECKSUMMING; 794 795 if (priv->extended_hash) { 796 rctrl |= RCTRL_EXTHASH; 797 798 gfar_clear_exact_match(dev); 799 rctrl |= RCTRL_EMEN; 800 } 801 802 if (priv->vlan_enable) 803 rctrl |= RCTRL_VLAN; 804 805 if (priv->padding) { 806 rctrl &= ~RCTRL_PAL_MASK; 807 rctrl |= RCTRL_PADDING(priv->padding); 808 } 809 810 /* Init rctrl based on our settings */ 811 gfar_write(&priv->regs->rctrl, rctrl); 812 813 if (dev->features & NETIF_F_IP_CSUM) 814 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM); 815 816 /* Set the extraction length and index */ 817 attrs = ATTRELI_EL(priv->rx_stash_size) | 818 ATTRELI_EI(priv->rx_stash_index); 819 820 gfar_write(&priv->regs->attreli, attrs); 821 822 /* Start with defaults, and add stashing or locking 823 * depending on the approprate variables */ 824 attrs = ATTR_INIT_SETTINGS; 825 826 if (priv->bd_stash_en) 827 attrs |= ATTR_BDSTASH; 828 829 if (priv->rx_stash_size != 0) 830 attrs |= ATTR_BUFSTASH; 831 832 gfar_write(&priv->regs->attr, attrs); 833 834 gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold); 835 gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve); 836 gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off); 837 838 /* Start the controller */ 839 gfar_start(dev); 840 841 return 0; 842 843rx_irq_fail: 844 free_irq(priv->interruptTransmit, dev); 845tx_irq_fail: 846 free_irq(priv->interruptError, dev); 847err_irq_fail: 848rx_skb_fail: 849 free_skb_resources(priv); 850tx_skb_fail: 851 dma_free_coherent(NULL, 852 sizeof(struct txbd8)*priv->tx_ring_size 853 + sizeof(struct rxbd8)*priv->rx_ring_size, 854 priv->tx_bd_base, 855 gfar_read(&regs->tbase0)); 856 857 return err; 858} 859 860/* Called when something needs to use the ethernet device */ 861/* Returns 0 for success. */ 862static int gfar_enet_open(struct net_device *dev) 863{ 864 int err; 865 866 /* Initialize a bunch of registers */ 867 init_registers(dev); 868 869 gfar_set_mac_address(dev); 870 871 err = init_phy(dev); 872 873 if(err) 874 return err; 875 876 err = startup_gfar(dev); 877 878 netif_start_queue(dev); 879 880 return err; 881} 882 883static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp) 884{ 885 struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN); 886 887 memset(fcb, 0, GMAC_FCB_LEN); 888 889 return fcb; 890} 891 892static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) 893{ 894 u8 flags = 0; 895 896 /* If we're here, it's a IP packet with a TCP or UDP 897 * payload. We set it to checksum, using a pseudo-header 898 * we provide 899 */ 900 flags = TXFCB_DEFAULT; 901 902 /* Tell the controller what the protocol is */ 903 /* And provide the already calculated phcs */ 904 if (skb->nh.iph->protocol == IPPROTO_UDP) { 905 flags |= TXFCB_UDP; 906 fcb->phcs = skb->h.uh->check; 907 } else 908 fcb->phcs = skb->h.th->check; 909 910 /* l3os is the distance between the start of the 911 * frame (skb->data) and the start of the IP hdr. 912 * l4os is the distance between the start of the 913 * l3 hdr and the l4 hdr */ 914 fcb->l3os = (u16)(skb->nh.raw - skb->data - GMAC_FCB_LEN); 915 fcb->l4os = (u16)(skb->h.raw - skb->nh.raw); 916 917 fcb->flags = flags; 918} 919 920void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) 921{ 922 fcb->flags |= TXFCB_VLN; 923 fcb->vlctl = vlan_tx_tag_get(skb); 924} 925 926/* This is called by the kernel when a frame is ready for transmission. */ 927/* It is pointed to by the dev->hard_start_xmit function pointer */ 928static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 929{ 930 struct gfar_private *priv = netdev_priv(dev); 931 struct txfcb *fcb = NULL; 932 struct txbd8 *txbdp; 933 u16 status; 934 unsigned long flags; 935 936 /* Update transmit stats */ 937 priv->stats.tx_bytes += skb->len; 938 939 /* Lock priv now */ 940 spin_lock_irqsave(&priv->txlock, flags); 941 942 /* Point at the first free tx descriptor */ 943 txbdp = priv->cur_tx; 944 945 /* Clear all but the WRAP status flags */ 946 status = txbdp->status & TXBD_WRAP; 947 948 /* Set up checksumming */ 949 if (likely((dev->features & NETIF_F_IP_CSUM) 950 && (CHECKSUM_HW == skb->ip_summed))) { 951 fcb = gfar_add_fcb(skb, txbdp); 952 status |= TXBD_TOE; 953 gfar_tx_checksum(skb, fcb); 954 } 955 956 if (priv->vlan_enable && 957 unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) { 958 if (unlikely(NULL == fcb)) { 959 fcb = gfar_add_fcb(skb, txbdp); 960 status |= TXBD_TOE; 961 } 962 963 gfar_tx_vlan(skb, fcb); 964 } 965 966 /* Set buffer length and pointer */ 967 txbdp->length = skb->len; 968 txbdp->bufPtr = dma_map_single(NULL, skb->data, 969 skb->len, DMA_TO_DEVICE); 970 971 /* Save the skb pointer so we can free it later */ 972 priv->tx_skbuff[priv->skb_curtx] = skb; 973 974 /* Update the current skb pointer (wrapping if this was the last) */ 975 priv->skb_curtx = 976 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size); 977 978 /* Flag the BD as interrupt-causing */ 979 status |= TXBD_INTERRUPT; 980 981 /* Flag the BD as ready to go, last in frame, and */ 982 /* in need of CRC */ 983 status |= (TXBD_READY | TXBD_LAST | TXBD_CRC); 984 985 dev->trans_start = jiffies; 986 987 txbdp->status = status; 988 989 /* If this was the last BD in the ring, the next one */ 990 /* is at the beginning of the ring */ 991 if (txbdp->status & TXBD_WRAP) 992 txbdp = priv->tx_bd_base; 993 else 994 txbdp++; 995 996 /* If the next BD still needs to be cleaned up, then the bds 997 are full. We need to tell the kernel to stop sending us stuff. */ 998 if (txbdp == priv->dirty_tx) { 999 netif_stop_queue(dev); 1000 1001 priv->stats.tx_fifo_errors++; 1002 } 1003 1004 /* Update the current txbd to the next one */ 1005 priv->cur_tx = txbdp; 1006 1007 /* Tell the DMA to go go go */ 1008 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 1009 1010 /* Unlock priv */ 1011 spin_unlock_irqrestore(&priv->txlock, flags); 1012 1013 return 0; 1014} 1015 1016/* Stops the kernel queue, and halts the controller */ 1017static int gfar_close(struct net_device *dev) 1018{ 1019 struct gfar_private *priv = netdev_priv(dev); 1020 stop_gfar(dev); 1021 1022 /* Disconnect from the PHY */ 1023 phy_disconnect(priv->phydev); 1024 priv->phydev = NULL; 1025 1026 netif_stop_queue(dev); 1027 1028 return 0; 1029} 1030 1031/* returns a net_device_stats structure pointer */ 1032static struct net_device_stats * gfar_get_stats(struct net_device *dev) 1033{ 1034 struct gfar_private *priv = netdev_priv(dev); 1035 1036 return &(priv->stats); 1037} 1038 1039/* Changes the mac address if the controller is not running. */ 1040int gfar_set_mac_address(struct net_device *dev) 1041{ 1042 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 1043 1044 return 0; 1045} 1046 1047 1048/* Enables and disables VLAN insertion/extraction */ 1049static void gfar_vlan_rx_register(struct net_device *dev, 1050 struct vlan_group *grp) 1051{ 1052 struct gfar_private *priv = netdev_priv(dev); 1053 unsigned long flags; 1054 u32 tempval; 1055 1056 spin_lock_irqsave(&priv->rxlock, flags); 1057 1058 priv->vlgrp = grp; 1059 1060 if (grp) { 1061 /* Enable VLAN tag insertion */ 1062 tempval = gfar_read(&priv->regs->tctrl); 1063 tempval |= TCTRL_VLINS; 1064 1065 gfar_write(&priv->regs->tctrl, tempval); 1066 1067 /* Enable VLAN tag extraction */ 1068 tempval = gfar_read(&priv->regs->rctrl); 1069 tempval |= RCTRL_VLEX; 1070 gfar_write(&priv->regs->rctrl, tempval); 1071 } else { 1072 /* Disable VLAN tag insertion */ 1073 tempval = gfar_read(&priv->regs->tctrl); 1074 tempval &= ~TCTRL_VLINS; 1075 gfar_write(&priv->regs->tctrl, tempval); 1076 1077 /* Disable VLAN tag extraction */ 1078 tempval = gfar_read(&priv->regs->rctrl); 1079 tempval &= ~RCTRL_VLEX; 1080 gfar_write(&priv->regs->rctrl, tempval); 1081 } 1082 1083 spin_unlock_irqrestore(&priv->rxlock, flags); 1084} 1085 1086 1087static void gfar_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid) 1088{ 1089 struct gfar_private *priv = netdev_priv(dev); 1090 unsigned long flags; 1091 1092 spin_lock_irqsave(&priv->rxlock, flags); 1093 1094 if (priv->vlgrp) 1095 priv->vlgrp->vlan_devices[vid] = NULL; 1096 1097 spin_unlock_irqrestore(&priv->rxlock, flags); 1098} 1099 1100 1101static int gfar_change_mtu(struct net_device *dev, int new_mtu) 1102{ 1103 int tempsize, tempval; 1104 struct gfar_private *priv = netdev_priv(dev); 1105 int oldsize = priv->rx_buffer_size; 1106 int frame_size = new_mtu + ETH_HLEN; 1107 1108 if (priv->vlan_enable) 1109 frame_size += VLAN_ETH_HLEN; 1110 1111 if (gfar_uses_fcb(priv)) 1112 frame_size += GMAC_FCB_LEN; 1113 1114 frame_size += priv->padding; 1115 1116 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { 1117 if (netif_msg_drv(priv)) 1118 printk(KERN_ERR "%s: Invalid MTU setting\n", 1119 dev->name); 1120 return -EINVAL; 1121 } 1122 1123 tempsize = 1124 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + 1125 INCREMENTAL_BUFFER_SIZE; 1126 1127 /* Only stop and start the controller if it isn't already 1128 * stopped, and we changed something */ 1129 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 1130 stop_gfar(dev); 1131 1132 priv->rx_buffer_size = tempsize; 1133 1134 dev->mtu = new_mtu; 1135 1136 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 1137 gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size); 1138 1139 /* If the mtu is larger than the max size for standard 1140 * ethernet frames (ie, a jumbo frame), then set maccfg2 1141 * to allow huge frames, and to check the length */ 1142 tempval = gfar_read(&priv->regs->maccfg2); 1143 1144 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) 1145 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 1146 else 1147 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 1148 1149 gfar_write(&priv->regs->maccfg2, tempval); 1150 1151 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 1152 startup_gfar(dev); 1153 1154 return 0; 1155} 1156 1157/* gfar_timeout gets called when a packet has not been 1158 * transmitted after a set amount of time. 1159 * For now, assume that clearing out all the structures, and 1160 * starting over will fix the problem. */ 1161static void gfar_timeout(struct net_device *dev) 1162{ 1163 struct gfar_private *priv = netdev_priv(dev); 1164 1165 priv->stats.tx_errors++; 1166 1167 if (dev->flags & IFF_UP) { 1168 stop_gfar(dev); 1169 startup_gfar(dev); 1170 } 1171 1172 netif_schedule(dev); 1173} 1174 1175/* Interrupt Handler for Transmit complete */ 1176static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs) 1177{ 1178 struct net_device *dev = (struct net_device *) dev_id; 1179 struct gfar_private *priv = netdev_priv(dev); 1180 struct txbd8 *bdp; 1181 1182 /* Clear IEVENT */ 1183 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); 1184 1185 /* Lock priv */ 1186 spin_lock(&priv->txlock); 1187 bdp = priv->dirty_tx; 1188 while ((bdp->status & TXBD_READY) == 0) { 1189 /* If dirty_tx and cur_tx are the same, then either the */ 1190 /* ring is empty or full now (it could only be full in the beginning, */ 1191 /* obviously). If it is empty, we are done. */ 1192 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0)) 1193 break; 1194 1195 priv->stats.tx_packets++; 1196 1197 /* Deferred means some collisions occurred during transmit, */ 1198 /* but we eventually sent the packet. */ 1199 if (bdp->status & TXBD_DEF) 1200 priv->stats.collisions++; 1201 1202 /* Free the sk buffer associated with this TxBD */ 1203 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); 1204 priv->tx_skbuff[priv->skb_dirtytx] = NULL; 1205 priv->skb_dirtytx = 1206 (priv->skb_dirtytx + 1207 1) & TX_RING_MOD_MASK(priv->tx_ring_size); 1208 1209 /* update bdp to point at next bd in the ring (wrapping if necessary) */ 1210 if (bdp->status & TXBD_WRAP) 1211 bdp = priv->tx_bd_base; 1212 else 1213 bdp++; 1214 1215 /* Move dirty_tx to be the next bd */ 1216 priv->dirty_tx = bdp; 1217 1218 /* We freed a buffer, so now we can restart transmission */ 1219 if (netif_queue_stopped(dev)) 1220 netif_wake_queue(dev); 1221 } /* while ((bdp->status & TXBD_READY) == 0) */ 1222 1223 /* If we are coalescing the interrupts, reset the timer */ 1224 /* Otherwise, clear it */ 1225 if (priv->txcoalescing) 1226 gfar_write(&priv->regs->txic, 1227 mk_ic_value(priv->txcount, priv->txtime)); 1228 else 1229 gfar_write(&priv->regs->txic, 0); 1230 1231 spin_unlock(&priv->txlock); 1232 1233 return IRQ_HANDLED; 1234} 1235 1236struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) 1237{ 1238 unsigned int alignamount; 1239 struct gfar_private *priv = netdev_priv(dev); 1240 struct sk_buff *skb = NULL; 1241 unsigned int timeout = SKB_ALLOC_TIMEOUT; 1242 1243 /* We have to allocate the skb, so keep trying till we succeed */ 1244 while ((!skb) && timeout--) 1245 skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT); 1246 1247 if (NULL == skb) 1248 return NULL; 1249 1250 alignamount = RXBUF_ALIGNMENT - 1251 (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1)); 1252 1253 /* We need the data buffer to be aligned properly. We will reserve 1254 * as many bytes as needed to align the data properly 1255 */ 1256 skb_reserve(skb, alignamount); 1257 1258 skb->dev = dev; 1259 1260 bdp->bufPtr = dma_map_single(NULL, skb->data, 1261 priv->rx_buffer_size, DMA_FROM_DEVICE); 1262 1263 bdp->length = 0; 1264 1265 /* Mark the buffer empty */ 1266 bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT); 1267 1268 return skb; 1269} 1270 1271static inline void count_errors(unsigned short status, struct gfar_private *priv) 1272{ 1273 struct net_device_stats *stats = &priv->stats; 1274 struct gfar_extra_stats *estats = &priv->extra_stats; 1275 1276 /* If the packet was truncated, none of the other errors 1277 * matter */ 1278 if (status & RXBD_TRUNCATED) { 1279 stats->rx_length_errors++; 1280 1281 estats->rx_trunc++; 1282 1283 return; 1284 } 1285 /* Count the errors, if there were any */ 1286 if (status & (RXBD_LARGE | RXBD_SHORT)) { 1287 stats->rx_length_errors++; 1288 1289 if (status & RXBD_LARGE) 1290 estats->rx_large++; 1291 else 1292 estats->rx_short++; 1293 } 1294 if (status & RXBD_NONOCTET) { 1295 stats->rx_frame_errors++; 1296 estats->rx_nonoctet++; 1297 } 1298 if (status & RXBD_CRCERR) { 1299 estats->rx_crcerr++; 1300 stats->rx_crc_errors++; 1301 } 1302 if (status & RXBD_OVERRUN) { 1303 estats->rx_overrun++; 1304 stats->rx_crc_errors++; 1305 } 1306} 1307 1308irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs) 1309{ 1310 struct net_device *dev = (struct net_device *) dev_id; 1311 struct gfar_private *priv = netdev_priv(dev); 1312#ifdef CONFIG_GFAR_NAPI 1313 u32 tempval; 1314#else 1315 unsigned long flags; 1316#endif 1317 1318 /* Clear IEVENT, so rx interrupt isn't called again 1319 * because of this interrupt */ 1320 gfar_write(&priv->regs->ievent, IEVENT_RX_MASK); 1321 1322 /* support NAPI */ 1323#ifdef CONFIG_GFAR_NAPI 1324 if (netif_rx_schedule_prep(dev)) { 1325 tempval = gfar_read(&priv->regs->imask); 1326 tempval &= IMASK_RX_DISABLED; 1327 gfar_write(&priv->regs->imask, tempval); 1328 1329 __netif_rx_schedule(dev); 1330 } else { 1331 if (netif_msg_rx_err(priv)) 1332 printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n", 1333 dev->name, gfar_read(&priv->regs->ievent), 1334 gfar_read(&priv->regs->imask)); 1335 } 1336#else 1337 1338 spin_lock_irqsave(&priv->rxlock, flags); 1339 gfar_clean_rx_ring(dev, priv->rx_ring_size); 1340 1341 /* If we are coalescing interrupts, update the timer */ 1342 /* Otherwise, clear it */ 1343 if (priv->rxcoalescing) 1344 gfar_write(&priv->regs->rxic, 1345 mk_ic_value(priv->rxcount, priv->rxtime)); 1346 else 1347 gfar_write(&priv->regs->rxic, 0); 1348 1349 spin_unlock_irqrestore(&priv->rxlock, flags); 1350#endif 1351 1352 return IRQ_HANDLED; 1353} 1354 1355static inline int gfar_rx_vlan(struct sk_buff *skb, 1356 struct vlan_group *vlgrp, unsigned short vlctl) 1357{ 1358#ifdef CONFIG_GFAR_NAPI 1359 return vlan_hwaccel_receive_skb(skb, vlgrp, vlctl); 1360#else 1361 return vlan_hwaccel_rx(skb, vlgrp, vlctl); 1362#endif 1363} 1364 1365static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) 1366{ 1367 /* If valid headers were found, and valid sums 1368 * were verified, then we tell the kernel that no 1369 * checksumming is necessary. Otherwise, it is */ 1370 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) 1371 skb->ip_summed = CHECKSUM_UNNECESSARY; 1372 else 1373 skb->ip_summed = CHECKSUM_NONE; 1374} 1375 1376 1377static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb) 1378{ 1379 struct rxfcb *fcb = (struct rxfcb *)skb->data; 1380 1381 /* Remove the FCB from the skb */ 1382 skb_pull(skb, GMAC_FCB_LEN); 1383 1384 return fcb; 1385} 1386 1387/* gfar_process_frame() -- handle one incoming packet if skb 1388 * isn't NULL. */ 1389static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 1390 int length) 1391{ 1392 struct gfar_private *priv = netdev_priv(dev); 1393 struct rxfcb *fcb = NULL; 1394 1395 if (NULL == skb) { 1396 if (netif_msg_rx_err(priv)) 1397 printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name); 1398 priv->stats.rx_dropped++; 1399 priv->extra_stats.rx_skbmissing++; 1400 } else { 1401 int ret; 1402 1403 /* Prep the skb for the packet */ 1404 skb_put(skb, length); 1405 1406 /* Grab the FCB if there is one */ 1407 if (gfar_uses_fcb(priv)) 1408 fcb = gfar_get_fcb(skb); 1409 1410 /* Remove the padded bytes, if there are any */ 1411 if (priv->padding) 1412 skb_pull(skb, priv->padding); 1413 1414 if (priv->rx_csum_enable) 1415 gfar_rx_checksum(skb, fcb); 1416 1417 /* Tell the skb what kind of packet this is */ 1418 skb->protocol = eth_type_trans(skb, dev); 1419 1420 /* Send the packet up the stack */ 1421 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) 1422 ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl); 1423 else 1424 ret = RECEIVE(skb); 1425 1426 if (NET_RX_DROP == ret) 1427 priv->extra_stats.kernel_dropped++; 1428 } 1429 1430 return 0; 1431} 1432 1433/* gfar_clean_rx_ring() -- Processes each frame in the rx ring 1434 * until the budget/quota has been reached. Returns the number 1435 * of frames handled 1436 */ 1437int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) 1438{ 1439 struct rxbd8 *bdp; 1440 struct sk_buff *skb; 1441 u16 pkt_len; 1442 int howmany = 0; 1443 struct gfar_private *priv = netdev_priv(dev); 1444 1445 /* Get the first full descriptor */ 1446 bdp = priv->cur_rx; 1447 1448 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 1449 skb = priv->rx_skbuff[priv->skb_currx]; 1450 1451 if (!(bdp->status & 1452 (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET 1453 | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) { 1454 /* Increment the number of packets */ 1455 priv->stats.rx_packets++; 1456 howmany++; 1457 1458 /* Remove the FCS from the packet length */ 1459 pkt_len = bdp->length - 4; 1460 1461 gfar_process_frame(dev, skb, pkt_len); 1462 1463 priv->stats.rx_bytes += pkt_len; 1464 } else { 1465 count_errors(bdp->status, priv); 1466 1467 if (skb) 1468 dev_kfree_skb_any(skb); 1469 1470 priv->rx_skbuff[priv->skb_currx] = NULL; 1471 } 1472 1473 dev->last_rx = jiffies; 1474 1475 /* Clear the status flags for this buffer */ 1476 bdp->status &= ~RXBD_STATS; 1477 1478 /* Add another skb for the future */ 1479 skb = gfar_new_skb(dev, bdp); 1480 priv->rx_skbuff[priv->skb_currx] = skb; 1481 1482 /* Update to the next pointer */ 1483 if (bdp->status & RXBD_WRAP) 1484 bdp = priv->rx_bd_base; 1485 else 1486 bdp++; 1487 1488 /* update to point at the next skb */ 1489 priv->skb_currx = 1490 (priv->skb_currx + 1491 1) & RX_RING_MOD_MASK(priv->rx_ring_size); 1492 1493 } 1494 1495 /* Update the current rxbd pointer to be the next one */ 1496 priv->cur_rx = bdp; 1497 1498 return howmany; 1499} 1500 1501#ifdef CONFIG_GFAR_NAPI 1502static int gfar_poll(struct net_device *dev, int *budget) 1503{ 1504 int howmany; 1505 struct gfar_private *priv = netdev_priv(dev); 1506 int rx_work_limit = *budget; 1507 1508 if (rx_work_limit > dev->quota) 1509 rx_work_limit = dev->quota; 1510 1511 howmany = gfar_clean_rx_ring(dev, rx_work_limit); 1512 1513 dev->quota -= howmany; 1514 rx_work_limit -= howmany; 1515 *budget -= howmany; 1516 1517 if (rx_work_limit > 0) { 1518 netif_rx_complete(dev); 1519 1520 /* Clear the halt bit in RSTAT */ 1521 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 1522 1523 gfar_write(&priv->regs->imask, IMASK_DEFAULT); 1524 1525 /* If we are coalescing interrupts, update the timer */ 1526 /* Otherwise, clear it */ 1527 if (priv->rxcoalescing) 1528 gfar_write(&priv->regs->rxic, 1529 mk_ic_value(priv->rxcount, priv->rxtime)); 1530 else 1531 gfar_write(&priv->regs->rxic, 0); 1532 } 1533 1534 /* Return 1 if there's more work to do */ 1535 return (rx_work_limit > 0) ? 0 : 1; 1536} 1537#endif 1538 1539/* The interrupt handler for devices with one interrupt */ 1540static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs) 1541{ 1542 struct net_device *dev = dev_id; 1543 struct gfar_private *priv = netdev_priv(dev); 1544 1545 /* Save ievent for future reference */ 1546 u32 events = gfar_read(&priv->regs->ievent); 1547 1548 /* Clear IEVENT */ 1549 gfar_write(&priv->regs->ievent, events); 1550 1551 /* Check for reception */ 1552 if ((events & IEVENT_RXF0) || (events & IEVENT_RXB0)) 1553 gfar_receive(irq, dev_id, regs); 1554 1555 /* Check for transmit completion */ 1556 if ((events & IEVENT_TXF) || (events & IEVENT_TXB)) 1557 gfar_transmit(irq, dev_id, regs); 1558 1559 /* Update error statistics */ 1560 if (events & IEVENT_TXE) { 1561 priv->stats.tx_errors++; 1562 1563 if (events & IEVENT_LC) 1564 priv->stats.tx_window_errors++; 1565 if (events & IEVENT_CRL) 1566 priv->stats.tx_aborted_errors++; 1567 if (events & IEVENT_XFUN) { 1568 if (netif_msg_tx_err(priv)) 1569 printk(KERN_WARNING "%s: tx underrun. dropped packet\n", dev->name); 1570 priv->stats.tx_dropped++; 1571 priv->extra_stats.tx_underrun++; 1572 1573 /* Reactivate the Tx Queues */ 1574 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 1575 } 1576 } 1577 if (events & IEVENT_BSY) { 1578 priv->stats.rx_errors++; 1579 priv->extra_stats.rx_bsy++; 1580 1581 gfar_receive(irq, dev_id, regs); 1582 1583#ifndef CONFIG_GFAR_NAPI 1584 /* Clear the halt bit in RSTAT */ 1585 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 1586#endif 1587 1588 if (netif_msg_rx_err(priv)) 1589 printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", 1590 dev->name, 1591 gfar_read(&priv->regs->rstat)); 1592 } 1593 if (events & IEVENT_BABR) { 1594 priv->stats.rx_errors++; 1595 priv->extra_stats.rx_babr++; 1596 1597 if (netif_msg_rx_err(priv)) 1598 printk(KERN_DEBUG "%s: babbling error\n", dev->name); 1599 } 1600 if (events & IEVENT_EBERR) { 1601 priv->extra_stats.eberr++; 1602 if (netif_msg_rx_err(priv)) 1603 printk(KERN_DEBUG "%s: EBERR\n", dev->name); 1604 } 1605 if ((events & IEVENT_RXC) && (netif_msg_rx_err(priv))) 1606 printk(KERN_DEBUG "%s: control frame\n", dev->name); 1607 1608 if (events & IEVENT_BABT) { 1609 priv->extra_stats.tx_babt++; 1610 if (netif_msg_rx_err(priv)) 1611 printk(KERN_DEBUG "%s: babt error\n", dev->name); 1612 } 1613 1614 return IRQ_HANDLED; 1615} 1616 1617/* Called every time the controller might need to be made 1618 * aware of new link state. The PHY code conveys this 1619 * information through variables in the phydev structure, and this 1620 * function converts those variables into the appropriate 1621 * register values, and can bring down the device if needed. 1622 */ 1623static void adjust_link(struct net_device *dev) 1624{ 1625 struct gfar_private *priv = netdev_priv(dev); 1626 struct gfar __iomem *regs = priv->regs; 1627 unsigned long flags; 1628 struct phy_device *phydev = priv->phydev; 1629 int new_state = 0; 1630 1631 spin_lock_irqsave(&priv->txlock, flags); 1632 if (phydev->link) { 1633 u32 tempval = gfar_read(&regs->maccfg2); 1634 u32 ecntrl = gfar_read(&regs->ecntrl); 1635 1636 /* Now we make sure that we can be in full duplex mode. 1637 * If not, we operate in half-duplex mode. */ 1638 if (phydev->duplex != priv->oldduplex) { 1639 new_state = 1; 1640 if (!(phydev->duplex)) 1641 tempval &= ~(MACCFG2_FULL_DUPLEX); 1642 else 1643 tempval |= MACCFG2_FULL_DUPLEX; 1644 1645 priv->oldduplex = phydev->duplex; 1646 } 1647 1648 if (phydev->speed != priv->oldspeed) { 1649 new_state = 1; 1650 switch (phydev->speed) { 1651 case 1000: 1652 tempval = 1653 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); 1654 break; 1655 case 100: 1656 case 10: 1657 tempval = 1658 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 1659 1660 /* Reduced mode distinguishes 1661 * between 10 and 100 */ 1662 if (phydev->speed == SPEED_100) 1663 ecntrl |= ECNTRL_R100; 1664 else 1665 ecntrl &= ~(ECNTRL_R100); 1666 break; 1667 default: 1668 if (netif_msg_link(priv)) 1669 printk(KERN_WARNING 1670 "%s: Ack! Speed (%d) is not 10/100/1000!\n", 1671 dev->name, phydev->speed); 1672 break; 1673 } 1674 1675 priv->oldspeed = phydev->speed; 1676 } 1677 1678 gfar_write(&regs->maccfg2, tempval); 1679 gfar_write(&regs->ecntrl, ecntrl); 1680 1681 if (!priv->oldlink) { 1682 new_state = 1; 1683 priv->oldlink = 1; 1684 netif_schedule(dev); 1685 } 1686 } else if (priv->oldlink) { 1687 new_state = 1; 1688 priv->oldlink = 0; 1689 priv->oldspeed = 0; 1690 priv->oldduplex = -1; 1691 } 1692 1693 if (new_state && netif_msg_link(priv)) 1694 phy_print_status(phydev); 1695 1696 spin_unlock_irqrestore(&priv->txlock, flags); 1697} 1698 1699/* Update the hash table based on the current list of multicast 1700 * addresses we subscribe to. Also, change the promiscuity of 1701 * the device based on the flags (this function is called 1702 * whenever dev->flags is changed */ 1703static void gfar_set_multi(struct net_device *dev) 1704{ 1705 struct dev_mc_list *mc_ptr; 1706 struct gfar_private *priv = netdev_priv(dev); 1707 struct gfar __iomem *regs = priv->regs; 1708 u32 tempval; 1709 1710 if(dev->flags & IFF_PROMISC) { 1711 if (netif_msg_drv(priv)) 1712 printk(KERN_INFO "%s: Entering promiscuous mode.\n", 1713 dev->name); 1714 /* Set RCTRL to PROM */ 1715 tempval = gfar_read(&regs->rctrl); 1716 tempval |= RCTRL_PROM; 1717 gfar_write(&regs->rctrl, tempval); 1718 } else { 1719 /* Set RCTRL to not PROM */ 1720 tempval = gfar_read(&regs->rctrl); 1721 tempval &= ~(RCTRL_PROM); 1722 gfar_write(&regs->rctrl, tempval); 1723 } 1724 1725 if(dev->flags & IFF_ALLMULTI) { 1726 /* Set the hash to rx all multicast frames */ 1727 gfar_write(&regs->igaddr0, 0xffffffff); 1728 gfar_write(&regs->igaddr1, 0xffffffff); 1729 gfar_write(&regs->igaddr2, 0xffffffff); 1730 gfar_write(&regs->igaddr3, 0xffffffff); 1731 gfar_write(&regs->igaddr4, 0xffffffff); 1732 gfar_write(&regs->igaddr5, 0xffffffff); 1733 gfar_write(&regs->igaddr6, 0xffffffff); 1734 gfar_write(&regs->igaddr7, 0xffffffff); 1735 gfar_write(&regs->gaddr0, 0xffffffff); 1736 gfar_write(&regs->gaddr1, 0xffffffff); 1737 gfar_write(&regs->gaddr2, 0xffffffff); 1738 gfar_write(&regs->gaddr3, 0xffffffff); 1739 gfar_write(&regs->gaddr4, 0xffffffff); 1740 gfar_write(&regs->gaddr5, 0xffffffff); 1741 gfar_write(&regs->gaddr6, 0xffffffff); 1742 gfar_write(&regs->gaddr7, 0xffffffff); 1743 } else { 1744 int em_num; 1745 int idx; 1746 1747 /* zero out the hash */ 1748 gfar_write(&regs->igaddr0, 0x0); 1749 gfar_write(&regs->igaddr1, 0x0); 1750 gfar_write(&regs->igaddr2, 0x0); 1751 gfar_write(&regs->igaddr3, 0x0); 1752 gfar_write(&regs->igaddr4, 0x0); 1753 gfar_write(&regs->igaddr5, 0x0); 1754 gfar_write(&regs->igaddr6, 0x0); 1755 gfar_write(&regs->igaddr7, 0x0); 1756 gfar_write(&regs->gaddr0, 0x0); 1757 gfar_write(&regs->gaddr1, 0x0); 1758 gfar_write(&regs->gaddr2, 0x0); 1759 gfar_write(&regs->gaddr3, 0x0); 1760 gfar_write(&regs->gaddr4, 0x0); 1761 gfar_write(&regs->gaddr5, 0x0); 1762 gfar_write(&regs->gaddr6, 0x0); 1763 gfar_write(&regs->gaddr7, 0x0); 1764 1765 /* If we have extended hash tables, we need to 1766 * clear the exact match registers to prepare for 1767 * setting them */ 1768 if (priv->extended_hash) { 1769 em_num = GFAR_EM_NUM + 1; 1770 gfar_clear_exact_match(dev); 1771 idx = 1; 1772 } else { 1773 idx = 0; 1774 em_num = 0; 1775 } 1776 1777 if(dev->mc_count == 0) 1778 return; 1779 1780 /* Parse the list, and set the appropriate bits */ 1781 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { 1782 if (idx < em_num) { 1783 gfar_set_mac_for_addr(dev, idx, 1784 mc_ptr->dmi_addr); 1785 idx++; 1786 } else 1787 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr); 1788 } 1789 } 1790 1791 return; 1792} 1793 1794 1795/* Clears each of the exact match registers to zero, so they 1796 * don't interfere with normal reception */ 1797static void gfar_clear_exact_match(struct net_device *dev) 1798{ 1799 int idx; 1800 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0}; 1801 1802 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) 1803 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr); 1804} 1805 1806/* Set the appropriate hash bit for the given addr */ 1807/* The algorithm works like so: 1808 * 1) Take the Destination Address (ie the multicast address), and 1809 * do a CRC on it (little endian), and reverse the bits of the 1810 * result. 1811 * 2) Use the 8 most significant bits as a hash into a 256-entry 1812 * table. The table is controlled through 8 32-bit registers: 1813 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is 1814 * gaddr7. This means that the 3 most significant bits in the 1815 * hash index which gaddr register to use, and the 5 other bits 1816 * indicate which bit (assuming an IBM numbering scheme, which 1817 * for PowerPC (tm) is usually the case) in the register holds 1818 * the entry. */ 1819static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) 1820{ 1821 u32 tempval; 1822 struct gfar_private *priv = netdev_priv(dev); 1823 u32 result = ether_crc(MAC_ADDR_LEN, addr); 1824 int width = priv->hash_width; 1825 u8 whichbit = (result >> (32 - width)) & 0x1f; 1826 u8 whichreg = result >> (32 - width + 5); 1827 u32 value = (1 << (31-whichbit)); 1828 1829 tempval = gfar_read(priv->hash_regs[whichreg]); 1830 tempval |= value; 1831 gfar_write(priv->hash_regs[whichreg], tempval); 1832 1833 return; 1834} 1835 1836 1837/* There are multiple MAC Address register pairs on some controllers 1838 * This function sets the numth pair to a given address 1839 */ 1840static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) 1841{ 1842 struct gfar_private *priv = netdev_priv(dev); 1843 int idx; 1844 char tmpbuf[MAC_ADDR_LEN]; 1845 u32 tempval; 1846 u32 __iomem *macptr = &priv->regs->macstnaddr1; 1847 1848 macptr += num*2; 1849 1850 /* Now copy it into the mac registers backwards, cuz */ 1851 /* little endian is silly */ 1852 for (idx = 0; idx < MAC_ADDR_LEN; idx++) 1853 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx]; 1854 1855 gfar_write(macptr, *((u32 *) (tmpbuf))); 1856 1857 tempval = *((u32 *) (tmpbuf + 4)); 1858 1859 gfar_write(macptr+1, tempval); 1860} 1861 1862/* GFAR error interrupt handler */ 1863static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs) 1864{ 1865 struct net_device *dev = dev_id; 1866 struct gfar_private *priv = netdev_priv(dev); 1867 1868 /* Save ievent for future reference */ 1869 u32 events = gfar_read(&priv->regs->ievent); 1870 1871 /* Clear IEVENT */ 1872 gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK); 1873 1874 /* Hmm... */ 1875 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 1876 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", 1877 dev->name, events, gfar_read(&priv->regs->imask)); 1878 1879 /* Update the error counters */ 1880 if (events & IEVENT_TXE) { 1881 priv->stats.tx_errors++; 1882 1883 if (events & IEVENT_LC) 1884 priv->stats.tx_window_errors++; 1885 if (events & IEVENT_CRL) 1886 priv->stats.tx_aborted_errors++; 1887 if (events & IEVENT_XFUN) { 1888 if (netif_msg_tx_err(priv)) 1889 printk(KERN_DEBUG "%s: underrun. packet dropped.\n", 1890 dev->name); 1891 priv->stats.tx_dropped++; 1892 priv->extra_stats.tx_underrun++; 1893 1894 /* Reactivate the Tx Queues */ 1895 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 1896 } 1897 if (netif_msg_tx_err(priv)) 1898 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); 1899 } 1900 if (events & IEVENT_BSY) { 1901 priv->stats.rx_errors++; 1902 priv->extra_stats.rx_bsy++; 1903 1904 gfar_receive(irq, dev_id, regs); 1905 1906#ifndef CONFIG_GFAR_NAPI 1907 /* Clear the halt bit in RSTAT */ 1908 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 1909#endif 1910 1911 if (netif_msg_rx_err(priv)) 1912 printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", 1913 dev->name, 1914 gfar_read(&priv->regs->rstat)); 1915 } 1916 if (events & IEVENT_BABR) { 1917 priv->stats.rx_errors++; 1918 priv->extra_stats.rx_babr++; 1919 1920 if (netif_msg_rx_err(priv)) 1921 printk(KERN_DEBUG "%s: babbling error\n", dev->name); 1922 } 1923 if (events & IEVENT_EBERR) { 1924 priv->extra_stats.eberr++; 1925 if (netif_msg_rx_err(priv)) 1926 printk(KERN_DEBUG "%s: EBERR\n", dev->name); 1927 } 1928 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv)) 1929 if (netif_msg_rx_status(priv)) 1930 printk(KERN_DEBUG "%s: control frame\n", dev->name); 1931 1932 if (events & IEVENT_BABT) { 1933 priv->extra_stats.tx_babt++; 1934 if (netif_msg_tx_err(priv)) 1935 printk(KERN_DEBUG "%s: babt error\n", dev->name); 1936 } 1937 return IRQ_HANDLED; 1938} 1939 1940/* Structure for a device driver */ 1941static struct platform_driver gfar_driver = { 1942 .probe = gfar_probe, 1943 .remove = gfar_remove, 1944 .driver = { 1945 .name = "fsl-gianfar", 1946 }, 1947}; 1948 1949static int __init gfar_init(void) 1950{ 1951 int err = gfar_mdio_init(); 1952 1953 if (err) 1954 return err; 1955 1956 err = platform_driver_register(&gfar_driver); 1957 1958 if (err) 1959 gfar_mdio_exit(); 1960 1961 return err; 1962} 1963 1964static void __exit gfar_exit(void) 1965{ 1966 platform_driver_unregister(&gfar_driver); 1967 gfar_mdio_exit(); 1968} 1969 1970module_init(gfar_init); 1971module_exit(gfar_exit); 1972