at v2.6.17-rc2 1973 lines 53 kB view raw
1/* 2 * drivers/net/gianfar.c 3 * 4 * Gianfar Ethernet Driver 5 * This driver is designed for the non-CPM ethernet controllers 6 * on the 85xx and 83xx family of integrated processors 7 * Based on 8260_io/fcc_enet.c 8 * 9 * Author: Andy Fleming 10 * Maintainer: Kumar Gala 11 * 12 * Copyright (c) 2002-2004 Freescale Semiconductor, Inc. 13 * 14 * This program is free software; you can redistribute it and/or modify it 15 * under the terms of the GNU General Public License as published by the 16 * Free Software Foundation; either version 2 of the License, or (at your 17 * option) any later version. 18 * 19 * Gianfar: AKA Lambda Draconis, "Dragon" 20 * RA 11 31 24.2 21 * Dec +69 19 52 22 * V 3.84 23 * B-V +1.62 24 * 25 * Theory of operation 26 * 27 * The driver is initialized through platform_device. Structures which 28 * define the configuration needed by the board are defined in a 29 * board structure in arch/ppc/platforms (though I do not 30 * discount the possibility that other architectures could one 31 * day be supported. 32 * 33 * The Gianfar Ethernet Controller uses a ring of buffer 34 * descriptors. The beginning is indicated by a register 35 * pointing to the physical address of the start of the ring. 36 * The end is determined by a "wrap" bit being set in the 37 * last descriptor of the ring. 38 * 39 * When a packet is received, the RXF bit in the 40 * IEVENT register is set, triggering an interrupt when the 41 * corresponding bit in the IMASK register is also set (if 42 * interrupt coalescing is active, then the interrupt may not 43 * happen immediately, but will wait until either a set number 44 * of frames or amount of time have passed). In NAPI, the 45 * interrupt handler will signal there is work to be done, and 46 * exit. Without NAPI, the packet(s) will be handled 47 * immediately. Both methods will start at the last known empty 48 * descriptor, and process every subsequent descriptor until there 49 * are none left with data (NAPI will stop after a set number of 50 * packets to give time to other tasks, but will eventually 51 * process all the packets). The data arrives inside a 52 * pre-allocated skb, and so after the skb is passed up to the 53 * stack, a new skb must be allocated, and the address field in 54 * the buffer descriptor must be updated to indicate this new 55 * skb. 56 * 57 * When the kernel requests that a packet be transmitted, the 58 * driver starts where it left off last time, and points the 59 * descriptor at the buffer which was passed in. The driver 60 * then informs the DMA engine that there are packets ready to 61 * be transmitted. Once the controller is finished transmitting 62 * the packet, an interrupt may be triggered (under the same 63 * conditions as for reception, but depending on the TXF bit). 64 * The driver then cleans up the buffer. 65 */ 66 67#include <linux/config.h> 68#include <linux/kernel.h> 69#include <linux/sched.h> 70#include <linux/string.h> 71#include <linux/errno.h> 72#include <linux/unistd.h> 73#include <linux/slab.h> 74#include <linux/interrupt.h> 75#include <linux/init.h> 76#include <linux/delay.h> 77#include <linux/netdevice.h> 78#include <linux/etherdevice.h> 79#include <linux/skbuff.h> 80#include <linux/if_vlan.h> 81#include <linux/spinlock.h> 82#include <linux/mm.h> 83#include <linux/platform_device.h> 84#include <linux/ip.h> 85#include <linux/tcp.h> 86#include <linux/udp.h> 87#include <linux/in.h> 88 89#include <asm/io.h> 90#include <asm/irq.h> 91#include <asm/uaccess.h> 92#include <linux/module.h> 93#include <linux/dma-mapping.h> 94#include <linux/crc32.h> 95#include <linux/mii.h> 96#include <linux/phy.h> 97 98#include "gianfar.h" 99#include "gianfar_mii.h" 100 101#define TX_TIMEOUT (1*HZ) 102#define SKB_ALLOC_TIMEOUT 1000000 103#undef BRIEF_GFAR_ERRORS 104#undef VERBOSE_GFAR_ERRORS 105 106#ifdef CONFIG_GFAR_NAPI 107#define RECEIVE(x) netif_receive_skb(x) 108#else 109#define RECEIVE(x) netif_rx(x) 110#endif 111 112const char gfar_driver_name[] = "Gianfar Ethernet"; 113const char gfar_driver_version[] = "1.3"; 114 115static int gfar_enet_open(struct net_device *dev); 116static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); 117static void gfar_timeout(struct net_device *dev); 118static int gfar_close(struct net_device *dev); 119struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp); 120static struct net_device_stats *gfar_get_stats(struct net_device *dev); 121static int gfar_set_mac_address(struct net_device *dev); 122static int gfar_change_mtu(struct net_device *dev, int new_mtu); 123static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs); 124static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs); 125static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs); 126static void adjust_link(struct net_device *dev); 127static void init_registers(struct net_device *dev); 128static int init_phy(struct net_device *dev); 129static int gfar_probe(struct platform_device *pdev); 130static int gfar_remove(struct platform_device *pdev); 131static void free_skb_resources(struct gfar_private *priv); 132static void gfar_set_multi(struct net_device *dev); 133static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 134#ifdef CONFIG_GFAR_NAPI 135static int gfar_poll(struct net_device *dev, int *budget); 136#endif 137int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 138static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length); 139static void gfar_vlan_rx_register(struct net_device *netdev, 140 struct vlan_group *grp); 141static void gfar_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 142void gfar_halt(struct net_device *dev); 143void gfar_start(struct net_device *dev); 144static void gfar_clear_exact_match(struct net_device *dev); 145static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); 146 147extern struct ethtool_ops gfar_ethtool_ops; 148 149MODULE_AUTHOR("Freescale Semiconductor, Inc"); 150MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 151MODULE_LICENSE("GPL"); 152 153/* Returns 1 if incoming frames use an FCB */ 154static inline int gfar_uses_fcb(struct gfar_private *priv) 155{ 156 return (priv->vlan_enable || priv->rx_csum_enable); 157} 158 159/* Set up the ethernet device structure, private data, 160 * and anything else we need before we start */ 161static int gfar_probe(struct platform_device *pdev) 162{ 163 u32 tempval; 164 struct net_device *dev = NULL; 165 struct gfar_private *priv = NULL; 166 struct gianfar_platform_data *einfo; 167 struct resource *r; 168 int idx; 169 int err = 0; 170 171 einfo = (struct gianfar_platform_data *) pdev->dev.platform_data; 172 173 if (NULL == einfo) { 174 printk(KERN_ERR "gfar %d: Missing additional data!\n", 175 pdev->id); 176 177 return -ENODEV; 178 } 179 180 /* Create an ethernet device instance */ 181 dev = alloc_etherdev(sizeof (*priv)); 182 183 if (NULL == dev) 184 return -ENOMEM; 185 186 priv = netdev_priv(dev); 187 188 /* Set the info in the priv to the current info */ 189 priv->einfo = einfo; 190 191 /* fill out IRQ fields */ 192 if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 193 priv->interruptTransmit = platform_get_irq_byname(pdev, "tx"); 194 priv->interruptReceive = platform_get_irq_byname(pdev, "rx"); 195 priv->interruptError = platform_get_irq_byname(pdev, "error"); 196 if (priv->interruptTransmit < 0 || priv->interruptReceive < 0 || priv->interruptError < 0) 197 goto regs_fail; 198 } else { 199 priv->interruptTransmit = platform_get_irq(pdev, 0); 200 if (priv->interruptTransmit < 0) 201 goto regs_fail; 202 } 203 204 /* get a pointer to the register memory */ 205 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 206 priv->regs = ioremap(r->start, sizeof (struct gfar)); 207 208 if (NULL == priv->regs) { 209 err = -ENOMEM; 210 goto regs_fail; 211 } 212 213 spin_lock_init(&priv->lock); 214 215 platform_set_drvdata(pdev, dev); 216 217 /* Stop the DMA engine now, in case it was running before */ 218 /* (The firmware could have used it, and left it running). */ 219 /* To do this, we write Graceful Receive Stop and Graceful */ 220 /* Transmit Stop, and then wait until the corresponding bits */ 221 /* in IEVENT indicate the stops have completed. */ 222 tempval = gfar_read(&priv->regs->dmactrl); 223 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 224 gfar_write(&priv->regs->dmactrl, tempval); 225 226 tempval = gfar_read(&priv->regs->dmactrl); 227 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 228 gfar_write(&priv->regs->dmactrl, tempval); 229 230 while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC))) 231 cpu_relax(); 232 233 /* Reset MAC layer */ 234 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET); 235 236 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 237 gfar_write(&priv->regs->maccfg1, tempval); 238 239 /* Initialize MACCFG2. */ 240 gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS); 241 242 /* Initialize ECNTRL */ 243 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS); 244 245 /* Copy the station address into the dev structure, */ 246 memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN); 247 248 /* Set the dev->base_addr to the gfar reg region */ 249 dev->base_addr = (unsigned long) (priv->regs); 250 251 SET_MODULE_OWNER(dev); 252 SET_NETDEV_DEV(dev, &pdev->dev); 253 254 /* Fill in the dev structure */ 255 dev->open = gfar_enet_open; 256 dev->hard_start_xmit = gfar_start_xmit; 257 dev->tx_timeout = gfar_timeout; 258 dev->watchdog_timeo = TX_TIMEOUT; 259#ifdef CONFIG_GFAR_NAPI 260 dev->poll = gfar_poll; 261 dev->weight = GFAR_DEV_WEIGHT; 262#endif 263 dev->stop = gfar_close; 264 dev->get_stats = gfar_get_stats; 265 dev->change_mtu = gfar_change_mtu; 266 dev->mtu = 1500; 267 dev->set_multicast_list = gfar_set_multi; 268 269 dev->ethtool_ops = &gfar_ethtool_ops; 270 271 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 272 priv->rx_csum_enable = 1; 273 dev->features |= NETIF_F_IP_CSUM; 274 } else 275 priv->rx_csum_enable = 0; 276 277 priv->vlgrp = NULL; 278 279 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 280 dev->vlan_rx_register = gfar_vlan_rx_register; 281 dev->vlan_rx_kill_vid = gfar_vlan_rx_kill_vid; 282 283 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 284 285 priv->vlan_enable = 1; 286 } 287 288 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 289 priv->extended_hash = 1; 290 priv->hash_width = 9; 291 292 priv->hash_regs[0] = &priv->regs->igaddr0; 293 priv->hash_regs[1] = &priv->regs->igaddr1; 294 priv->hash_regs[2] = &priv->regs->igaddr2; 295 priv->hash_regs[3] = &priv->regs->igaddr3; 296 priv->hash_regs[4] = &priv->regs->igaddr4; 297 priv->hash_regs[5] = &priv->regs->igaddr5; 298 priv->hash_regs[6] = &priv->regs->igaddr6; 299 priv->hash_regs[7] = &priv->regs->igaddr7; 300 priv->hash_regs[8] = &priv->regs->gaddr0; 301 priv->hash_regs[9] = &priv->regs->gaddr1; 302 priv->hash_regs[10] = &priv->regs->gaddr2; 303 priv->hash_regs[11] = &priv->regs->gaddr3; 304 priv->hash_regs[12] = &priv->regs->gaddr4; 305 priv->hash_regs[13] = &priv->regs->gaddr5; 306 priv->hash_regs[14] = &priv->regs->gaddr6; 307 priv->hash_regs[15] = &priv->regs->gaddr7; 308 309 } else { 310 priv->extended_hash = 0; 311 priv->hash_width = 8; 312 313 priv->hash_regs[0] = &priv->regs->gaddr0; 314 priv->hash_regs[1] = &priv->regs->gaddr1; 315 priv->hash_regs[2] = &priv->regs->gaddr2; 316 priv->hash_regs[3] = &priv->regs->gaddr3; 317 priv->hash_regs[4] = &priv->regs->gaddr4; 318 priv->hash_regs[5] = &priv->regs->gaddr5; 319 priv->hash_regs[6] = &priv->regs->gaddr6; 320 priv->hash_regs[7] = &priv->regs->gaddr7; 321 } 322 323 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) 324 priv->padding = DEFAULT_PADDING; 325 else 326 priv->padding = 0; 327 328 if (dev->features & NETIF_F_IP_CSUM) 329 dev->hard_header_len += GMAC_FCB_LEN; 330 331 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 332 priv->tx_ring_size = DEFAULT_TX_RING_SIZE; 333 priv->rx_ring_size = DEFAULT_RX_RING_SIZE; 334 335 priv->txcoalescing = DEFAULT_TX_COALESCE; 336 priv->txcount = DEFAULT_TXCOUNT; 337 priv->txtime = DEFAULT_TXTIME; 338 priv->rxcoalescing = DEFAULT_RX_COALESCE; 339 priv->rxcount = DEFAULT_RXCOUNT; 340 priv->rxtime = DEFAULT_RXTIME; 341 342 /* Enable most messages by default */ 343 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 344 345 err = register_netdev(dev); 346 347 if (err) { 348 printk(KERN_ERR "%s: Cannot register net device, aborting.\n", 349 dev->name); 350 goto register_fail; 351 } 352 353 /* Create all the sysfs files */ 354 gfar_init_sysfs(dev); 355 356 /* Print out the device info */ 357 printk(KERN_INFO DEVICE_NAME, dev->name); 358 for (idx = 0; idx < 6; idx++) 359 printk("%2.2x%c", dev->dev_addr[idx], idx == 5 ? ' ' : ':'); 360 printk("\n"); 361 362 /* Even more device info helps when determining which kernel */ 363 /* provided which set of benchmarks. */ 364#ifdef CONFIG_GFAR_NAPI 365 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 366#else 367 printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name); 368#endif 369 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", 370 dev->name, priv->rx_ring_size, priv->tx_ring_size); 371 372 return 0; 373 374register_fail: 375 iounmap(priv->regs); 376regs_fail: 377 free_netdev(dev); 378 return err; 379} 380 381static int gfar_remove(struct platform_device *pdev) 382{ 383 struct net_device *dev = platform_get_drvdata(pdev); 384 struct gfar_private *priv = netdev_priv(dev); 385 386 platform_set_drvdata(pdev, NULL); 387 388 iounmap(priv->regs); 389 free_netdev(dev); 390 391 return 0; 392} 393 394 395/* Initializes driver's PHY state, and attaches to the PHY. 396 * Returns 0 on success. 397 */ 398static int init_phy(struct net_device *dev) 399{ 400 struct gfar_private *priv = netdev_priv(dev); 401 uint gigabit_support = 402 priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? 403 SUPPORTED_1000baseT_Full : 0; 404 struct phy_device *phydev; 405 char phy_id[BUS_ID_SIZE]; 406 407 priv->oldlink = 0; 408 priv->oldspeed = 0; 409 priv->oldduplex = -1; 410 411 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id); 412 413 phydev = phy_connect(dev, phy_id, &adjust_link, 0); 414 415 if (IS_ERR(phydev)) { 416 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 417 return PTR_ERR(phydev); 418 } 419 420 /* Remove any features not supported by the controller */ 421 phydev->supported &= (GFAR_SUPPORTED | gigabit_support); 422 phydev->advertising = phydev->supported; 423 424 priv->phydev = phydev; 425 426 return 0; 427} 428 429static void init_registers(struct net_device *dev) 430{ 431 struct gfar_private *priv = netdev_priv(dev); 432 433 /* Clear IEVENT */ 434 gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR); 435 436 /* Initialize IMASK */ 437 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR); 438 439 /* Init hash registers to zero */ 440 gfar_write(&priv->regs->igaddr0, 0); 441 gfar_write(&priv->regs->igaddr1, 0); 442 gfar_write(&priv->regs->igaddr2, 0); 443 gfar_write(&priv->regs->igaddr3, 0); 444 gfar_write(&priv->regs->igaddr4, 0); 445 gfar_write(&priv->regs->igaddr5, 0); 446 gfar_write(&priv->regs->igaddr6, 0); 447 gfar_write(&priv->regs->igaddr7, 0); 448 449 gfar_write(&priv->regs->gaddr0, 0); 450 gfar_write(&priv->regs->gaddr1, 0); 451 gfar_write(&priv->regs->gaddr2, 0); 452 gfar_write(&priv->regs->gaddr3, 0); 453 gfar_write(&priv->regs->gaddr4, 0); 454 gfar_write(&priv->regs->gaddr5, 0); 455 gfar_write(&priv->regs->gaddr6, 0); 456 gfar_write(&priv->regs->gaddr7, 0); 457 458 /* Zero out the rmon mib registers if it has them */ 459 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 460 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib)); 461 462 /* Mask off the CAM interrupts */ 463 gfar_write(&priv->regs->rmon.cam1, 0xffffffff); 464 gfar_write(&priv->regs->rmon.cam2, 0xffffffff); 465 } 466 467 /* Initialize the max receive buffer length */ 468 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 469 470 /* Initialize the Minimum Frame Length Register */ 471 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); 472 473 /* Assign the TBI an address which won't conflict with the PHYs */ 474 gfar_write(&priv->regs->tbipa, TBIPA_VALUE); 475} 476 477 478/* Halt the receive and transmit queues */ 479void gfar_halt(struct net_device *dev) 480{ 481 struct gfar_private *priv = netdev_priv(dev); 482 struct gfar __iomem *regs = priv->regs; 483 u32 tempval; 484 485 /* Mask all interrupts */ 486 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 487 488 /* Clear all interrupts */ 489 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR); 490 491 /* Stop the DMA, and wait for it to stop */ 492 tempval = gfar_read(&priv->regs->dmactrl); 493 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) 494 != (DMACTRL_GRS | DMACTRL_GTS)) { 495 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 496 gfar_write(&priv->regs->dmactrl, tempval); 497 498 while (!(gfar_read(&priv->regs->ievent) & 499 (IEVENT_GRSC | IEVENT_GTSC))) 500 cpu_relax(); 501 } 502 503 /* Disable Rx and Tx */ 504 tempval = gfar_read(&regs->maccfg1); 505 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 506 gfar_write(&regs->maccfg1, tempval); 507} 508 509void stop_gfar(struct net_device *dev) 510{ 511 struct gfar_private *priv = netdev_priv(dev); 512 struct gfar __iomem *regs = priv->regs; 513 unsigned long flags; 514 515 phy_stop(priv->phydev); 516 517 /* Lock it down */ 518 spin_lock_irqsave(&priv->lock, flags); 519 520 gfar_halt(dev); 521 522 spin_unlock_irqrestore(&priv->lock, flags); 523 524 /* Free the IRQs */ 525 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 526 free_irq(priv->interruptError, dev); 527 free_irq(priv->interruptTransmit, dev); 528 free_irq(priv->interruptReceive, dev); 529 } else { 530 free_irq(priv->interruptTransmit, dev); 531 } 532 533 free_skb_resources(priv); 534 535 dma_free_coherent(NULL, 536 sizeof(struct txbd8)*priv->tx_ring_size 537 + sizeof(struct rxbd8)*priv->rx_ring_size, 538 priv->tx_bd_base, 539 gfar_read(&regs->tbase0)); 540} 541 542/* If there are any tx skbs or rx skbs still around, free them. 543 * Then free tx_skbuff and rx_skbuff */ 544static void free_skb_resources(struct gfar_private *priv) 545{ 546 struct rxbd8 *rxbdp; 547 struct txbd8 *txbdp; 548 int i; 549 550 /* Go through all the buffer descriptors and free their data buffers */ 551 txbdp = priv->tx_bd_base; 552 553 for (i = 0; i < priv->tx_ring_size; i++) { 554 555 if (priv->tx_skbuff[i]) { 556 dma_unmap_single(NULL, txbdp->bufPtr, 557 txbdp->length, 558 DMA_TO_DEVICE); 559 dev_kfree_skb_any(priv->tx_skbuff[i]); 560 priv->tx_skbuff[i] = NULL; 561 } 562 } 563 564 kfree(priv->tx_skbuff); 565 566 rxbdp = priv->rx_bd_base; 567 568 /* rx_skbuff is not guaranteed to be allocated, so only 569 * free it and its contents if it is allocated */ 570 if(priv->rx_skbuff != NULL) { 571 for (i = 0; i < priv->rx_ring_size; i++) { 572 if (priv->rx_skbuff[i]) { 573 dma_unmap_single(NULL, rxbdp->bufPtr, 574 priv->rx_buffer_size, 575 DMA_FROM_DEVICE); 576 577 dev_kfree_skb_any(priv->rx_skbuff[i]); 578 priv->rx_skbuff[i] = NULL; 579 } 580 581 rxbdp->status = 0; 582 rxbdp->length = 0; 583 rxbdp->bufPtr = 0; 584 585 rxbdp++; 586 } 587 588 kfree(priv->rx_skbuff); 589 } 590} 591 592void gfar_start(struct net_device *dev) 593{ 594 struct gfar_private *priv = netdev_priv(dev); 595 struct gfar __iomem *regs = priv->regs; 596 u32 tempval; 597 598 /* Enable Rx and Tx in MACCFG1 */ 599 tempval = gfar_read(&regs->maccfg1); 600 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); 601 gfar_write(&regs->maccfg1, tempval); 602 603 /* Initialize DMACTRL to have WWR and WOP */ 604 tempval = gfar_read(&priv->regs->dmactrl); 605 tempval |= DMACTRL_INIT_SETTINGS; 606 gfar_write(&priv->regs->dmactrl, tempval); 607 608 /* Clear THLT, so that the DMA starts polling now */ 609 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT); 610 611 /* Make sure we aren't stopped */ 612 tempval = gfar_read(&priv->regs->dmactrl); 613 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 614 gfar_write(&priv->regs->dmactrl, tempval); 615 616 /* Unmask the interrupts we look for */ 617 gfar_write(&regs->imask, IMASK_DEFAULT); 618} 619 620/* Bring the controller up and running */ 621int startup_gfar(struct net_device *dev) 622{ 623 struct txbd8 *txbdp; 624 struct rxbd8 *rxbdp; 625 dma_addr_t addr; 626 unsigned long vaddr; 627 int i; 628 struct gfar_private *priv = netdev_priv(dev); 629 struct gfar __iomem *regs = priv->regs; 630 int err = 0; 631 u32 rctrl = 0; 632 u32 attrs = 0; 633 634 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 635 636 /* Allocate memory for the buffer descriptors */ 637 vaddr = (unsigned long) dma_alloc_coherent(NULL, 638 sizeof (struct txbd8) * priv->tx_ring_size + 639 sizeof (struct rxbd8) * priv->rx_ring_size, 640 &addr, GFP_KERNEL); 641 642 if (vaddr == 0) { 643 if (netif_msg_ifup(priv)) 644 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n", 645 dev->name); 646 return -ENOMEM; 647 } 648 649 priv->tx_bd_base = (struct txbd8 *) vaddr; 650 651 /* enet DMA only understands physical addresses */ 652 gfar_write(&regs->tbase0, addr); 653 654 /* Start the rx descriptor ring where the tx ring leaves off */ 655 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size; 656 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size; 657 priv->rx_bd_base = (struct rxbd8 *) vaddr; 658 gfar_write(&regs->rbase0, addr); 659 660 /* Setup the skbuff rings */ 661 priv->tx_skbuff = 662 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * 663 priv->tx_ring_size, GFP_KERNEL); 664 665 if (NULL == priv->tx_skbuff) { 666 if (netif_msg_ifup(priv)) 667 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n", 668 dev->name); 669 err = -ENOMEM; 670 goto tx_skb_fail; 671 } 672 673 for (i = 0; i < priv->tx_ring_size; i++) 674 priv->tx_skbuff[i] = NULL; 675 676 priv->rx_skbuff = 677 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * 678 priv->rx_ring_size, GFP_KERNEL); 679 680 if (NULL == priv->rx_skbuff) { 681 if (netif_msg_ifup(priv)) 682 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n", 683 dev->name); 684 err = -ENOMEM; 685 goto rx_skb_fail; 686 } 687 688 for (i = 0; i < priv->rx_ring_size; i++) 689 priv->rx_skbuff[i] = NULL; 690 691 /* Initialize some variables in our dev structure */ 692 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base; 693 priv->cur_rx = priv->rx_bd_base; 694 priv->skb_curtx = priv->skb_dirtytx = 0; 695 priv->skb_currx = 0; 696 697 /* Initialize Transmit Descriptor Ring */ 698 txbdp = priv->tx_bd_base; 699 for (i = 0; i < priv->tx_ring_size; i++) { 700 txbdp->status = 0; 701 txbdp->length = 0; 702 txbdp->bufPtr = 0; 703 txbdp++; 704 } 705 706 /* Set the last descriptor in the ring to indicate wrap */ 707 txbdp--; 708 txbdp->status |= TXBD_WRAP; 709 710 rxbdp = priv->rx_bd_base; 711 for (i = 0; i < priv->rx_ring_size; i++) { 712 struct sk_buff *skb = NULL; 713 714 rxbdp->status = 0; 715 716 skb = gfar_new_skb(dev, rxbdp); 717 718 priv->rx_skbuff[i] = skb; 719 720 rxbdp++; 721 } 722 723 /* Set the last descriptor in the ring to wrap */ 724 rxbdp--; 725 rxbdp->status |= RXBD_WRAP; 726 727 /* If the device has multiple interrupts, register for 728 * them. Otherwise, only register for the one */ 729 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 730 /* Install our interrupt handlers for Error, 731 * Transmit, and Receive */ 732 if (request_irq(priv->interruptError, gfar_error, 733 0, "enet_error", dev) < 0) { 734 if (netif_msg_intr(priv)) 735 printk(KERN_ERR "%s: Can't get IRQ %d\n", 736 dev->name, priv->interruptError); 737 738 err = -1; 739 goto err_irq_fail; 740 } 741 742 if (request_irq(priv->interruptTransmit, gfar_transmit, 743 0, "enet_tx", dev) < 0) { 744 if (netif_msg_intr(priv)) 745 printk(KERN_ERR "%s: Can't get IRQ %d\n", 746 dev->name, priv->interruptTransmit); 747 748 err = -1; 749 750 goto tx_irq_fail; 751 } 752 753 if (request_irq(priv->interruptReceive, gfar_receive, 754 0, "enet_rx", dev) < 0) { 755 if (netif_msg_intr(priv)) 756 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n", 757 dev->name, priv->interruptReceive); 758 759 err = -1; 760 goto rx_irq_fail; 761 } 762 } else { 763 if (request_irq(priv->interruptTransmit, gfar_interrupt, 764 0, "gfar_interrupt", dev) < 0) { 765 if (netif_msg_intr(priv)) 766 printk(KERN_ERR "%s: Can't get IRQ %d\n", 767 dev->name, priv->interruptError); 768 769 err = -1; 770 goto err_irq_fail; 771 } 772 } 773 774 phy_start(priv->phydev); 775 776 /* Configure the coalescing support */ 777 if (priv->txcoalescing) 778 gfar_write(&regs->txic, 779 mk_ic_value(priv->txcount, priv->txtime)); 780 else 781 gfar_write(&regs->txic, 0); 782 783 if (priv->rxcoalescing) 784 gfar_write(&regs->rxic, 785 mk_ic_value(priv->rxcount, priv->rxtime)); 786 else 787 gfar_write(&regs->rxic, 0); 788 789 if (priv->rx_csum_enable) 790 rctrl |= RCTRL_CHECKSUMMING; 791 792 if (priv->extended_hash) { 793 rctrl |= RCTRL_EXTHASH; 794 795 gfar_clear_exact_match(dev); 796 rctrl |= RCTRL_EMEN; 797 } 798 799 if (priv->vlan_enable) 800 rctrl |= RCTRL_VLAN; 801 802 if (priv->padding) { 803 rctrl &= ~RCTRL_PAL_MASK; 804 rctrl |= RCTRL_PADDING(priv->padding); 805 } 806 807 /* Init rctrl based on our settings */ 808 gfar_write(&priv->regs->rctrl, rctrl); 809 810 if (dev->features & NETIF_F_IP_CSUM) 811 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM); 812 813 /* Set the extraction length and index */ 814 attrs = ATTRELI_EL(priv->rx_stash_size) | 815 ATTRELI_EI(priv->rx_stash_index); 816 817 gfar_write(&priv->regs->attreli, attrs); 818 819 /* Start with defaults, and add stashing or locking 820 * depending on the approprate variables */ 821 attrs = ATTR_INIT_SETTINGS; 822 823 if (priv->bd_stash_en) 824 attrs |= ATTR_BDSTASH; 825 826 if (priv->rx_stash_size != 0) 827 attrs |= ATTR_BUFSTASH; 828 829 gfar_write(&priv->regs->attr, attrs); 830 831 gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold); 832 gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve); 833 gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off); 834 835 /* Start the controller */ 836 gfar_start(dev); 837 838 return 0; 839 840rx_irq_fail: 841 free_irq(priv->interruptTransmit, dev); 842tx_irq_fail: 843 free_irq(priv->interruptError, dev); 844err_irq_fail: 845rx_skb_fail: 846 free_skb_resources(priv); 847tx_skb_fail: 848 dma_free_coherent(NULL, 849 sizeof(struct txbd8)*priv->tx_ring_size 850 + sizeof(struct rxbd8)*priv->rx_ring_size, 851 priv->tx_bd_base, 852 gfar_read(&regs->tbase0)); 853 854 return err; 855} 856 857/* Called when something needs to use the ethernet device */ 858/* Returns 0 for success. */ 859static int gfar_enet_open(struct net_device *dev) 860{ 861 int err; 862 863 /* Initialize a bunch of registers */ 864 init_registers(dev); 865 866 gfar_set_mac_address(dev); 867 868 err = init_phy(dev); 869 870 if(err) 871 return err; 872 873 err = startup_gfar(dev); 874 875 netif_start_queue(dev); 876 877 return err; 878} 879 880static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp) 881{ 882 struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN); 883 884 memset(fcb, 0, GMAC_FCB_LEN); 885 886 return fcb; 887} 888 889static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) 890{ 891 u8 flags = 0; 892 893 /* If we're here, it's a IP packet with a TCP or UDP 894 * payload. We set it to checksum, using a pseudo-header 895 * we provide 896 */ 897 flags = TXFCB_DEFAULT; 898 899 /* Tell the controller what the protocol is */ 900 /* And provide the already calculated phcs */ 901 if (skb->nh.iph->protocol == IPPROTO_UDP) { 902 flags |= TXFCB_UDP; 903 fcb->phcs = skb->h.uh->check; 904 } else 905 fcb->phcs = skb->h.th->check; 906 907 /* l3os is the distance between the start of the 908 * frame (skb->data) and the start of the IP hdr. 909 * l4os is the distance between the start of the 910 * l3 hdr and the l4 hdr */ 911 fcb->l3os = (u16)(skb->nh.raw - skb->data - GMAC_FCB_LEN); 912 fcb->l4os = (u16)(skb->h.raw - skb->nh.raw); 913 914 fcb->flags = flags; 915} 916 917void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) 918{ 919 fcb->flags |= TXFCB_VLN; 920 fcb->vlctl = vlan_tx_tag_get(skb); 921} 922 923/* This is called by the kernel when a frame is ready for transmission. */ 924/* It is pointed to by the dev->hard_start_xmit function pointer */ 925static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 926{ 927 struct gfar_private *priv = netdev_priv(dev); 928 struct txfcb *fcb = NULL; 929 struct txbd8 *txbdp; 930 u16 status; 931 932 /* Update transmit stats */ 933 priv->stats.tx_bytes += skb->len; 934 935 /* Lock priv now */ 936 spin_lock_irq(&priv->lock); 937 938 /* Point at the first free tx descriptor */ 939 txbdp = priv->cur_tx; 940 941 /* Clear all but the WRAP status flags */ 942 status = txbdp->status & TXBD_WRAP; 943 944 /* Set up checksumming */ 945 if (likely((dev->features & NETIF_F_IP_CSUM) 946 && (CHECKSUM_HW == skb->ip_summed))) { 947 fcb = gfar_add_fcb(skb, txbdp); 948 status |= TXBD_TOE; 949 gfar_tx_checksum(skb, fcb); 950 } 951 952 if (priv->vlan_enable && 953 unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) { 954 if (unlikely(NULL == fcb)) { 955 fcb = gfar_add_fcb(skb, txbdp); 956 status |= TXBD_TOE; 957 } 958 959 gfar_tx_vlan(skb, fcb); 960 } 961 962 /* Set buffer length and pointer */ 963 txbdp->length = skb->len; 964 txbdp->bufPtr = dma_map_single(NULL, skb->data, 965 skb->len, DMA_TO_DEVICE); 966 967 /* Save the skb pointer so we can free it later */ 968 priv->tx_skbuff[priv->skb_curtx] = skb; 969 970 /* Update the current skb pointer (wrapping if this was the last) */ 971 priv->skb_curtx = 972 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size); 973 974 /* Flag the BD as interrupt-causing */ 975 status |= TXBD_INTERRUPT; 976 977 /* Flag the BD as ready to go, last in frame, and */ 978 /* in need of CRC */ 979 status |= (TXBD_READY | TXBD_LAST | TXBD_CRC); 980 981 dev->trans_start = jiffies; 982 983 txbdp->status = status; 984 985 /* If this was the last BD in the ring, the next one */ 986 /* is at the beginning of the ring */ 987 if (txbdp->status & TXBD_WRAP) 988 txbdp = priv->tx_bd_base; 989 else 990 txbdp++; 991 992 /* If the next BD still needs to be cleaned up, then the bds 993 are full. We need to tell the kernel to stop sending us stuff. */ 994 if (txbdp == priv->dirty_tx) { 995 netif_stop_queue(dev); 996 997 priv->stats.tx_fifo_errors++; 998 } 999 1000 /* Update the current txbd to the next one */ 1001 priv->cur_tx = txbdp; 1002 1003 /* Tell the DMA to go go go */ 1004 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 1005 1006 /* Unlock priv */ 1007 spin_unlock_irq(&priv->lock); 1008 1009 return 0; 1010} 1011 1012/* Stops the kernel queue, and halts the controller */ 1013static int gfar_close(struct net_device *dev) 1014{ 1015 struct gfar_private *priv = netdev_priv(dev); 1016 stop_gfar(dev); 1017 1018 /* Disconnect from the PHY */ 1019 phy_disconnect(priv->phydev); 1020 priv->phydev = NULL; 1021 1022 netif_stop_queue(dev); 1023 1024 return 0; 1025} 1026 1027/* returns a net_device_stats structure pointer */ 1028static struct net_device_stats * gfar_get_stats(struct net_device *dev) 1029{ 1030 struct gfar_private *priv = netdev_priv(dev); 1031 1032 return &(priv->stats); 1033} 1034 1035/* Changes the mac address if the controller is not running. */ 1036int gfar_set_mac_address(struct net_device *dev) 1037{ 1038 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 1039 1040 return 0; 1041} 1042 1043 1044/* Enables and disables VLAN insertion/extraction */ 1045static void gfar_vlan_rx_register(struct net_device *dev, 1046 struct vlan_group *grp) 1047{ 1048 struct gfar_private *priv = netdev_priv(dev); 1049 unsigned long flags; 1050 u32 tempval; 1051 1052 spin_lock_irqsave(&priv->lock, flags); 1053 1054 priv->vlgrp = grp; 1055 1056 if (grp) { 1057 /* Enable VLAN tag insertion */ 1058 tempval = gfar_read(&priv->regs->tctrl); 1059 tempval |= TCTRL_VLINS; 1060 1061 gfar_write(&priv->regs->tctrl, tempval); 1062 1063 /* Enable VLAN tag extraction */ 1064 tempval = gfar_read(&priv->regs->rctrl); 1065 tempval |= RCTRL_VLEX; 1066 gfar_write(&priv->regs->rctrl, tempval); 1067 } else { 1068 /* Disable VLAN tag insertion */ 1069 tempval = gfar_read(&priv->regs->tctrl); 1070 tempval &= ~TCTRL_VLINS; 1071 gfar_write(&priv->regs->tctrl, tempval); 1072 1073 /* Disable VLAN tag extraction */ 1074 tempval = gfar_read(&priv->regs->rctrl); 1075 tempval &= ~RCTRL_VLEX; 1076 gfar_write(&priv->regs->rctrl, tempval); 1077 } 1078 1079 spin_unlock_irqrestore(&priv->lock, flags); 1080} 1081 1082 1083static void gfar_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid) 1084{ 1085 struct gfar_private *priv = netdev_priv(dev); 1086 unsigned long flags; 1087 1088 spin_lock_irqsave(&priv->lock, flags); 1089 1090 if (priv->vlgrp) 1091 priv->vlgrp->vlan_devices[vid] = NULL; 1092 1093 spin_unlock_irqrestore(&priv->lock, flags); 1094} 1095 1096 1097static int gfar_change_mtu(struct net_device *dev, int new_mtu) 1098{ 1099 int tempsize, tempval; 1100 struct gfar_private *priv = netdev_priv(dev); 1101 int oldsize = priv->rx_buffer_size; 1102 int frame_size = new_mtu + ETH_HLEN; 1103 1104 if (priv->vlan_enable) 1105 frame_size += VLAN_ETH_HLEN; 1106 1107 if (gfar_uses_fcb(priv)) 1108 frame_size += GMAC_FCB_LEN; 1109 1110 frame_size += priv->padding; 1111 1112 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { 1113 if (netif_msg_drv(priv)) 1114 printk(KERN_ERR "%s: Invalid MTU setting\n", 1115 dev->name); 1116 return -EINVAL; 1117 } 1118 1119 tempsize = 1120 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + 1121 INCREMENTAL_BUFFER_SIZE; 1122 1123 /* Only stop and start the controller if it isn't already 1124 * stopped, and we changed something */ 1125 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 1126 stop_gfar(dev); 1127 1128 priv->rx_buffer_size = tempsize; 1129 1130 dev->mtu = new_mtu; 1131 1132 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 1133 gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size); 1134 1135 /* If the mtu is larger than the max size for standard 1136 * ethernet frames (ie, a jumbo frame), then set maccfg2 1137 * to allow huge frames, and to check the length */ 1138 tempval = gfar_read(&priv->regs->maccfg2); 1139 1140 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) 1141 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 1142 else 1143 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 1144 1145 gfar_write(&priv->regs->maccfg2, tempval); 1146 1147 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 1148 startup_gfar(dev); 1149 1150 return 0; 1151} 1152 1153/* gfar_timeout gets called when a packet has not been 1154 * transmitted after a set amount of time. 1155 * For now, assume that clearing out all the structures, and 1156 * starting over will fix the problem. */ 1157static void gfar_timeout(struct net_device *dev) 1158{ 1159 struct gfar_private *priv = netdev_priv(dev); 1160 1161 priv->stats.tx_errors++; 1162 1163 if (dev->flags & IFF_UP) { 1164 stop_gfar(dev); 1165 startup_gfar(dev); 1166 } 1167 1168 netif_schedule(dev); 1169} 1170 1171/* Interrupt Handler for Transmit complete */ 1172static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs) 1173{ 1174 struct net_device *dev = (struct net_device *) dev_id; 1175 struct gfar_private *priv = netdev_priv(dev); 1176 struct txbd8 *bdp; 1177 1178 /* Clear IEVENT */ 1179 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); 1180 1181 /* Lock priv */ 1182 spin_lock(&priv->lock); 1183 bdp = priv->dirty_tx; 1184 while ((bdp->status & TXBD_READY) == 0) { 1185 /* If dirty_tx and cur_tx are the same, then either the */ 1186 /* ring is empty or full now (it could only be full in the beginning, */ 1187 /* obviously). If it is empty, we are done. */ 1188 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0)) 1189 break; 1190 1191 priv->stats.tx_packets++; 1192 1193 /* Deferred means some collisions occurred during transmit, */ 1194 /* but we eventually sent the packet. */ 1195 if (bdp->status & TXBD_DEF) 1196 priv->stats.collisions++; 1197 1198 /* Free the sk buffer associated with this TxBD */ 1199 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); 1200 priv->tx_skbuff[priv->skb_dirtytx] = NULL; 1201 priv->skb_dirtytx = 1202 (priv->skb_dirtytx + 1203 1) & TX_RING_MOD_MASK(priv->tx_ring_size); 1204 1205 /* update bdp to point at next bd in the ring (wrapping if necessary) */ 1206 if (bdp->status & TXBD_WRAP) 1207 bdp = priv->tx_bd_base; 1208 else 1209 bdp++; 1210 1211 /* Move dirty_tx to be the next bd */ 1212 priv->dirty_tx = bdp; 1213 1214 /* We freed a buffer, so now we can restart transmission */ 1215 if (netif_queue_stopped(dev)) 1216 netif_wake_queue(dev); 1217 } /* while ((bdp->status & TXBD_READY) == 0) */ 1218 1219 /* If we are coalescing the interrupts, reset the timer */ 1220 /* Otherwise, clear it */ 1221 if (priv->txcoalescing) 1222 gfar_write(&priv->regs->txic, 1223 mk_ic_value(priv->txcount, priv->txtime)); 1224 else 1225 gfar_write(&priv->regs->txic, 0); 1226 1227 spin_unlock(&priv->lock); 1228 1229 return IRQ_HANDLED; 1230} 1231 1232struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) 1233{ 1234 unsigned int alignamount; 1235 struct gfar_private *priv = netdev_priv(dev); 1236 struct sk_buff *skb = NULL; 1237 unsigned int timeout = SKB_ALLOC_TIMEOUT; 1238 1239 /* We have to allocate the skb, so keep trying till we succeed */ 1240 while ((!skb) && timeout--) 1241 skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT); 1242 1243 if (NULL == skb) 1244 return NULL; 1245 1246 alignamount = RXBUF_ALIGNMENT - 1247 (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1)); 1248 1249 /* We need the data buffer to be aligned properly. We will reserve 1250 * as many bytes as needed to align the data properly 1251 */ 1252 skb_reserve(skb, alignamount); 1253 1254 skb->dev = dev; 1255 1256 bdp->bufPtr = dma_map_single(NULL, skb->data, 1257 priv->rx_buffer_size, DMA_FROM_DEVICE); 1258 1259 bdp->length = 0; 1260 1261 /* Mark the buffer empty */ 1262 bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT); 1263 1264 return skb; 1265} 1266 1267static inline void count_errors(unsigned short status, struct gfar_private *priv) 1268{ 1269 struct net_device_stats *stats = &priv->stats; 1270 struct gfar_extra_stats *estats = &priv->extra_stats; 1271 1272 /* If the packet was truncated, none of the other errors 1273 * matter */ 1274 if (status & RXBD_TRUNCATED) { 1275 stats->rx_length_errors++; 1276 1277 estats->rx_trunc++; 1278 1279 return; 1280 } 1281 /* Count the errors, if there were any */ 1282 if (status & (RXBD_LARGE | RXBD_SHORT)) { 1283 stats->rx_length_errors++; 1284 1285 if (status & RXBD_LARGE) 1286 estats->rx_large++; 1287 else 1288 estats->rx_short++; 1289 } 1290 if (status & RXBD_NONOCTET) { 1291 stats->rx_frame_errors++; 1292 estats->rx_nonoctet++; 1293 } 1294 if (status & RXBD_CRCERR) { 1295 estats->rx_crcerr++; 1296 stats->rx_crc_errors++; 1297 } 1298 if (status & RXBD_OVERRUN) { 1299 estats->rx_overrun++; 1300 stats->rx_crc_errors++; 1301 } 1302} 1303 1304irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs) 1305{ 1306 struct net_device *dev = (struct net_device *) dev_id; 1307 struct gfar_private *priv = netdev_priv(dev); 1308 1309#ifdef CONFIG_GFAR_NAPI 1310 u32 tempval; 1311#endif 1312 1313 /* Clear IEVENT, so rx interrupt isn't called again 1314 * because of this interrupt */ 1315 gfar_write(&priv->regs->ievent, IEVENT_RX_MASK); 1316 1317 /* support NAPI */ 1318#ifdef CONFIG_GFAR_NAPI 1319 if (netif_rx_schedule_prep(dev)) { 1320 tempval = gfar_read(&priv->regs->imask); 1321 tempval &= IMASK_RX_DISABLED; 1322 gfar_write(&priv->regs->imask, tempval); 1323 1324 __netif_rx_schedule(dev); 1325 } else { 1326 if (netif_msg_rx_err(priv)) 1327 printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n", 1328 dev->name, gfar_read(&priv->regs->ievent), 1329 gfar_read(&priv->regs->imask)); 1330 } 1331#else 1332 1333 spin_lock(&priv->lock); 1334 gfar_clean_rx_ring(dev, priv->rx_ring_size); 1335 1336 /* If we are coalescing interrupts, update the timer */ 1337 /* Otherwise, clear it */ 1338 if (priv->rxcoalescing) 1339 gfar_write(&priv->regs->rxic, 1340 mk_ic_value(priv->rxcount, priv->rxtime)); 1341 else 1342 gfar_write(&priv->regs->rxic, 0); 1343 1344 spin_unlock(&priv->lock); 1345#endif 1346 1347 return IRQ_HANDLED; 1348} 1349 1350static inline int gfar_rx_vlan(struct sk_buff *skb, 1351 struct vlan_group *vlgrp, unsigned short vlctl) 1352{ 1353#ifdef CONFIG_GFAR_NAPI 1354 return vlan_hwaccel_receive_skb(skb, vlgrp, vlctl); 1355#else 1356 return vlan_hwaccel_rx(skb, vlgrp, vlctl); 1357#endif 1358} 1359 1360static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) 1361{ 1362 /* If valid headers were found, and valid sums 1363 * were verified, then we tell the kernel that no 1364 * checksumming is necessary. Otherwise, it is */ 1365 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) 1366 skb->ip_summed = CHECKSUM_UNNECESSARY; 1367 else 1368 skb->ip_summed = CHECKSUM_NONE; 1369} 1370 1371 1372static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb) 1373{ 1374 struct rxfcb *fcb = (struct rxfcb *)skb->data; 1375 1376 /* Remove the FCB from the skb */ 1377 skb_pull(skb, GMAC_FCB_LEN); 1378 1379 return fcb; 1380} 1381 1382/* gfar_process_frame() -- handle one incoming packet if skb 1383 * isn't NULL. */ 1384static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 1385 int length) 1386{ 1387 struct gfar_private *priv = netdev_priv(dev); 1388 struct rxfcb *fcb = NULL; 1389 1390 if (NULL == skb) { 1391 if (netif_msg_rx_err(priv)) 1392 printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name); 1393 priv->stats.rx_dropped++; 1394 priv->extra_stats.rx_skbmissing++; 1395 } else { 1396 int ret; 1397 1398 /* Prep the skb for the packet */ 1399 skb_put(skb, length); 1400 1401 /* Grab the FCB if there is one */ 1402 if (gfar_uses_fcb(priv)) 1403 fcb = gfar_get_fcb(skb); 1404 1405 /* Remove the padded bytes, if there are any */ 1406 if (priv->padding) 1407 skb_pull(skb, priv->padding); 1408 1409 if (priv->rx_csum_enable) 1410 gfar_rx_checksum(skb, fcb); 1411 1412 /* Tell the skb what kind of packet this is */ 1413 skb->protocol = eth_type_trans(skb, dev); 1414 1415 /* Send the packet up the stack */ 1416 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) 1417 ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl); 1418 else 1419 ret = RECEIVE(skb); 1420 1421 if (NET_RX_DROP == ret) 1422 priv->extra_stats.kernel_dropped++; 1423 } 1424 1425 return 0; 1426} 1427 1428/* gfar_clean_rx_ring() -- Processes each frame in the rx ring 1429 * until the budget/quota has been reached. Returns the number 1430 * of frames handled 1431 */ 1432int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) 1433{ 1434 struct rxbd8 *bdp; 1435 struct sk_buff *skb; 1436 u16 pkt_len; 1437 int howmany = 0; 1438 struct gfar_private *priv = netdev_priv(dev); 1439 1440 /* Get the first full descriptor */ 1441 bdp = priv->cur_rx; 1442 1443 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 1444 skb = priv->rx_skbuff[priv->skb_currx]; 1445 1446 if (!(bdp->status & 1447 (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET 1448 | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) { 1449 /* Increment the number of packets */ 1450 priv->stats.rx_packets++; 1451 howmany++; 1452 1453 /* Remove the FCS from the packet length */ 1454 pkt_len = bdp->length - 4; 1455 1456 gfar_process_frame(dev, skb, pkt_len); 1457 1458 priv->stats.rx_bytes += pkt_len; 1459 } else { 1460 count_errors(bdp->status, priv); 1461 1462 if (skb) 1463 dev_kfree_skb_any(skb); 1464 1465 priv->rx_skbuff[priv->skb_currx] = NULL; 1466 } 1467 1468 dev->last_rx = jiffies; 1469 1470 /* Clear the status flags for this buffer */ 1471 bdp->status &= ~RXBD_STATS; 1472 1473 /* Add another skb for the future */ 1474 skb = gfar_new_skb(dev, bdp); 1475 priv->rx_skbuff[priv->skb_currx] = skb; 1476 1477 /* Update to the next pointer */ 1478 if (bdp->status & RXBD_WRAP) 1479 bdp = priv->rx_bd_base; 1480 else 1481 bdp++; 1482 1483 /* update to point at the next skb */ 1484 priv->skb_currx = 1485 (priv->skb_currx + 1486 1) & RX_RING_MOD_MASK(priv->rx_ring_size); 1487 1488 } 1489 1490 /* Update the current rxbd pointer to be the next one */ 1491 priv->cur_rx = bdp; 1492 1493 /* If no packets have arrived since the 1494 * last one we processed, clear the IEVENT RX and 1495 * BSY bits so that another interrupt won't be 1496 * generated when we set IMASK */ 1497 if (bdp->status & RXBD_EMPTY) 1498 gfar_write(&priv->regs->ievent, IEVENT_RX_MASK); 1499 1500 return howmany; 1501} 1502 1503#ifdef CONFIG_GFAR_NAPI 1504static int gfar_poll(struct net_device *dev, int *budget) 1505{ 1506 int howmany; 1507 struct gfar_private *priv = netdev_priv(dev); 1508 int rx_work_limit = *budget; 1509 1510 if (rx_work_limit > dev->quota) 1511 rx_work_limit = dev->quota; 1512 1513 howmany = gfar_clean_rx_ring(dev, rx_work_limit); 1514 1515 dev->quota -= howmany; 1516 rx_work_limit -= howmany; 1517 *budget -= howmany; 1518 1519 if (rx_work_limit >= 0) { 1520 netif_rx_complete(dev); 1521 1522 /* Clear the halt bit in RSTAT */ 1523 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 1524 1525 gfar_write(&priv->regs->imask, IMASK_DEFAULT); 1526 1527 /* If we are coalescing interrupts, update the timer */ 1528 /* Otherwise, clear it */ 1529 if (priv->rxcoalescing) 1530 gfar_write(&priv->regs->rxic, 1531 mk_ic_value(priv->rxcount, priv->rxtime)); 1532 else 1533 gfar_write(&priv->regs->rxic, 0); 1534 } 1535 1536 return (rx_work_limit < 0) ? 1 : 0; 1537} 1538#endif 1539 1540/* The interrupt handler for devices with one interrupt */ 1541static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs) 1542{ 1543 struct net_device *dev = dev_id; 1544 struct gfar_private *priv = netdev_priv(dev); 1545 1546 /* Save ievent for future reference */ 1547 u32 events = gfar_read(&priv->regs->ievent); 1548 1549 /* Clear IEVENT */ 1550 gfar_write(&priv->regs->ievent, events); 1551 1552 /* Check for reception */ 1553 if ((events & IEVENT_RXF0) || (events & IEVENT_RXB0)) 1554 gfar_receive(irq, dev_id, regs); 1555 1556 /* Check for transmit completion */ 1557 if ((events & IEVENT_TXF) || (events & IEVENT_TXB)) 1558 gfar_transmit(irq, dev_id, regs); 1559 1560 /* Update error statistics */ 1561 if (events & IEVENT_TXE) { 1562 priv->stats.tx_errors++; 1563 1564 if (events & IEVENT_LC) 1565 priv->stats.tx_window_errors++; 1566 if (events & IEVENT_CRL) 1567 priv->stats.tx_aborted_errors++; 1568 if (events & IEVENT_XFUN) { 1569 if (netif_msg_tx_err(priv)) 1570 printk(KERN_WARNING "%s: tx underrun. dropped packet\n", dev->name); 1571 priv->stats.tx_dropped++; 1572 priv->extra_stats.tx_underrun++; 1573 1574 /* Reactivate the Tx Queues */ 1575 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 1576 } 1577 } 1578 if (events & IEVENT_BSY) { 1579 priv->stats.rx_errors++; 1580 priv->extra_stats.rx_bsy++; 1581 1582 gfar_receive(irq, dev_id, regs); 1583 1584#ifndef CONFIG_GFAR_NAPI 1585 /* Clear the halt bit in RSTAT */ 1586 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 1587#endif 1588 1589 if (netif_msg_rx_err(priv)) 1590 printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", 1591 dev->name, 1592 gfar_read(&priv->regs->rstat)); 1593 } 1594 if (events & IEVENT_BABR) { 1595 priv->stats.rx_errors++; 1596 priv->extra_stats.rx_babr++; 1597 1598 if (netif_msg_rx_err(priv)) 1599 printk(KERN_DEBUG "%s: babbling error\n", dev->name); 1600 } 1601 if (events & IEVENT_EBERR) { 1602 priv->extra_stats.eberr++; 1603 if (netif_msg_rx_err(priv)) 1604 printk(KERN_DEBUG "%s: EBERR\n", dev->name); 1605 } 1606 if ((events & IEVENT_RXC) && (netif_msg_rx_err(priv))) 1607 printk(KERN_DEBUG "%s: control frame\n", dev->name); 1608 1609 if (events & IEVENT_BABT) { 1610 priv->extra_stats.tx_babt++; 1611 if (netif_msg_rx_err(priv)) 1612 printk(KERN_DEBUG "%s: babt error\n", dev->name); 1613 } 1614 1615 return IRQ_HANDLED; 1616} 1617 1618/* Called every time the controller might need to be made 1619 * aware of new link state. The PHY code conveys this 1620 * information through variables in the phydev structure, and this 1621 * function converts those variables into the appropriate 1622 * register values, and can bring down the device if needed. 1623 */ 1624static void adjust_link(struct net_device *dev) 1625{ 1626 struct gfar_private *priv = netdev_priv(dev); 1627 struct gfar __iomem *regs = priv->regs; 1628 unsigned long flags; 1629 struct phy_device *phydev = priv->phydev; 1630 int new_state = 0; 1631 1632 spin_lock_irqsave(&priv->lock, flags); 1633 if (phydev->link) { 1634 u32 tempval = gfar_read(&regs->maccfg2); 1635 u32 ecntrl = gfar_read(&regs->ecntrl); 1636 1637 /* Now we make sure that we can be in full duplex mode. 1638 * If not, we operate in half-duplex mode. */ 1639 if (phydev->duplex != priv->oldduplex) { 1640 new_state = 1; 1641 if (!(phydev->duplex)) 1642 tempval &= ~(MACCFG2_FULL_DUPLEX); 1643 else 1644 tempval |= MACCFG2_FULL_DUPLEX; 1645 1646 priv->oldduplex = phydev->duplex; 1647 } 1648 1649 if (phydev->speed != priv->oldspeed) { 1650 new_state = 1; 1651 switch (phydev->speed) { 1652 case 1000: 1653 tempval = 1654 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); 1655 break; 1656 case 100: 1657 case 10: 1658 tempval = 1659 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 1660 1661 /* Reduced mode distinguishes 1662 * between 10 and 100 */ 1663 if (phydev->speed == SPEED_100) 1664 ecntrl |= ECNTRL_R100; 1665 else 1666 ecntrl &= ~(ECNTRL_R100); 1667 break; 1668 default: 1669 if (netif_msg_link(priv)) 1670 printk(KERN_WARNING 1671 "%s: Ack! Speed (%d) is not 10/100/1000!\n", 1672 dev->name, phydev->speed); 1673 break; 1674 } 1675 1676 priv->oldspeed = phydev->speed; 1677 } 1678 1679 gfar_write(&regs->maccfg2, tempval); 1680 gfar_write(&regs->ecntrl, ecntrl); 1681 1682 if (!priv->oldlink) { 1683 new_state = 1; 1684 priv->oldlink = 1; 1685 netif_schedule(dev); 1686 } 1687 } else if (priv->oldlink) { 1688 new_state = 1; 1689 priv->oldlink = 0; 1690 priv->oldspeed = 0; 1691 priv->oldduplex = -1; 1692 } 1693 1694 if (new_state && netif_msg_link(priv)) 1695 phy_print_status(phydev); 1696 1697 spin_unlock_irqrestore(&priv->lock, flags); 1698} 1699 1700/* Update the hash table based on the current list of multicast 1701 * addresses we subscribe to. Also, change the promiscuity of 1702 * the device based on the flags (this function is called 1703 * whenever dev->flags is changed */ 1704static void gfar_set_multi(struct net_device *dev) 1705{ 1706 struct dev_mc_list *mc_ptr; 1707 struct gfar_private *priv = netdev_priv(dev); 1708 struct gfar __iomem *regs = priv->regs; 1709 u32 tempval; 1710 1711 if(dev->flags & IFF_PROMISC) { 1712 if (netif_msg_drv(priv)) 1713 printk(KERN_INFO "%s: Entering promiscuous mode.\n", 1714 dev->name); 1715 /* Set RCTRL to PROM */ 1716 tempval = gfar_read(&regs->rctrl); 1717 tempval |= RCTRL_PROM; 1718 gfar_write(&regs->rctrl, tempval); 1719 } else { 1720 /* Set RCTRL to not PROM */ 1721 tempval = gfar_read(&regs->rctrl); 1722 tempval &= ~(RCTRL_PROM); 1723 gfar_write(&regs->rctrl, tempval); 1724 } 1725 1726 if(dev->flags & IFF_ALLMULTI) { 1727 /* Set the hash to rx all multicast frames */ 1728 gfar_write(&regs->igaddr0, 0xffffffff); 1729 gfar_write(&regs->igaddr1, 0xffffffff); 1730 gfar_write(&regs->igaddr2, 0xffffffff); 1731 gfar_write(&regs->igaddr3, 0xffffffff); 1732 gfar_write(&regs->igaddr4, 0xffffffff); 1733 gfar_write(&regs->igaddr5, 0xffffffff); 1734 gfar_write(&regs->igaddr6, 0xffffffff); 1735 gfar_write(&regs->igaddr7, 0xffffffff); 1736 gfar_write(&regs->gaddr0, 0xffffffff); 1737 gfar_write(&regs->gaddr1, 0xffffffff); 1738 gfar_write(&regs->gaddr2, 0xffffffff); 1739 gfar_write(&regs->gaddr3, 0xffffffff); 1740 gfar_write(&regs->gaddr4, 0xffffffff); 1741 gfar_write(&regs->gaddr5, 0xffffffff); 1742 gfar_write(&regs->gaddr6, 0xffffffff); 1743 gfar_write(&regs->gaddr7, 0xffffffff); 1744 } else { 1745 int em_num; 1746 int idx; 1747 1748 /* zero out the hash */ 1749 gfar_write(&regs->igaddr0, 0x0); 1750 gfar_write(&regs->igaddr1, 0x0); 1751 gfar_write(&regs->igaddr2, 0x0); 1752 gfar_write(&regs->igaddr3, 0x0); 1753 gfar_write(&regs->igaddr4, 0x0); 1754 gfar_write(&regs->igaddr5, 0x0); 1755 gfar_write(&regs->igaddr6, 0x0); 1756 gfar_write(&regs->igaddr7, 0x0); 1757 gfar_write(&regs->gaddr0, 0x0); 1758 gfar_write(&regs->gaddr1, 0x0); 1759 gfar_write(&regs->gaddr2, 0x0); 1760 gfar_write(&regs->gaddr3, 0x0); 1761 gfar_write(&regs->gaddr4, 0x0); 1762 gfar_write(&regs->gaddr5, 0x0); 1763 gfar_write(&regs->gaddr6, 0x0); 1764 gfar_write(&regs->gaddr7, 0x0); 1765 1766 /* If we have extended hash tables, we need to 1767 * clear the exact match registers to prepare for 1768 * setting them */ 1769 if (priv->extended_hash) { 1770 em_num = GFAR_EM_NUM + 1; 1771 gfar_clear_exact_match(dev); 1772 idx = 1; 1773 } else { 1774 idx = 0; 1775 em_num = 0; 1776 } 1777 1778 if(dev->mc_count == 0) 1779 return; 1780 1781 /* Parse the list, and set the appropriate bits */ 1782 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { 1783 if (idx < em_num) { 1784 gfar_set_mac_for_addr(dev, idx, 1785 mc_ptr->dmi_addr); 1786 idx++; 1787 } else 1788 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr); 1789 } 1790 } 1791 1792 return; 1793} 1794 1795 1796/* Clears each of the exact match registers to zero, so they 1797 * don't interfere with normal reception */ 1798static void gfar_clear_exact_match(struct net_device *dev) 1799{ 1800 int idx; 1801 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0}; 1802 1803 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) 1804 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr); 1805} 1806 1807/* Set the appropriate hash bit for the given addr */ 1808/* The algorithm works like so: 1809 * 1) Take the Destination Address (ie the multicast address), and 1810 * do a CRC on it (little endian), and reverse the bits of the 1811 * result. 1812 * 2) Use the 8 most significant bits as a hash into a 256-entry 1813 * table. The table is controlled through 8 32-bit registers: 1814 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is 1815 * gaddr7. This means that the 3 most significant bits in the 1816 * hash index which gaddr register to use, and the 5 other bits 1817 * indicate which bit (assuming an IBM numbering scheme, which 1818 * for PowerPC (tm) is usually the case) in the register holds 1819 * the entry. */ 1820static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) 1821{ 1822 u32 tempval; 1823 struct gfar_private *priv = netdev_priv(dev); 1824 u32 result = ether_crc(MAC_ADDR_LEN, addr); 1825 int width = priv->hash_width; 1826 u8 whichbit = (result >> (32 - width)) & 0x1f; 1827 u8 whichreg = result >> (32 - width + 5); 1828 u32 value = (1 << (31-whichbit)); 1829 1830 tempval = gfar_read(priv->hash_regs[whichreg]); 1831 tempval |= value; 1832 gfar_write(priv->hash_regs[whichreg], tempval); 1833 1834 return; 1835} 1836 1837 1838/* There are multiple MAC Address register pairs on some controllers 1839 * This function sets the numth pair to a given address 1840 */ 1841static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) 1842{ 1843 struct gfar_private *priv = netdev_priv(dev); 1844 int idx; 1845 char tmpbuf[MAC_ADDR_LEN]; 1846 u32 tempval; 1847 u32 __iomem *macptr = &priv->regs->macstnaddr1; 1848 1849 macptr += num*2; 1850 1851 /* Now copy it into the mac registers backwards, cuz */ 1852 /* little endian is silly */ 1853 for (idx = 0; idx < MAC_ADDR_LEN; idx++) 1854 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx]; 1855 1856 gfar_write(macptr, *((u32 *) (tmpbuf))); 1857 1858 tempval = *((u32 *) (tmpbuf + 4)); 1859 1860 gfar_write(macptr+1, tempval); 1861} 1862 1863/* GFAR error interrupt handler */ 1864static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs) 1865{ 1866 struct net_device *dev = dev_id; 1867 struct gfar_private *priv = netdev_priv(dev); 1868 1869 /* Save ievent for future reference */ 1870 u32 events = gfar_read(&priv->regs->ievent); 1871 1872 /* Clear IEVENT */ 1873 gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK); 1874 1875 /* Hmm... */ 1876 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 1877 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", 1878 dev->name, events, gfar_read(&priv->regs->imask)); 1879 1880 /* Update the error counters */ 1881 if (events & IEVENT_TXE) { 1882 priv->stats.tx_errors++; 1883 1884 if (events & IEVENT_LC) 1885 priv->stats.tx_window_errors++; 1886 if (events & IEVENT_CRL) 1887 priv->stats.tx_aborted_errors++; 1888 if (events & IEVENT_XFUN) { 1889 if (netif_msg_tx_err(priv)) 1890 printk(KERN_DEBUG "%s: underrun. packet dropped.\n", 1891 dev->name); 1892 priv->stats.tx_dropped++; 1893 priv->extra_stats.tx_underrun++; 1894 1895 /* Reactivate the Tx Queues */ 1896 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 1897 } 1898 if (netif_msg_tx_err(priv)) 1899 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); 1900 } 1901 if (events & IEVENT_BSY) { 1902 priv->stats.rx_errors++; 1903 priv->extra_stats.rx_bsy++; 1904 1905 gfar_receive(irq, dev_id, regs); 1906 1907#ifndef CONFIG_GFAR_NAPI 1908 /* Clear the halt bit in RSTAT */ 1909 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 1910#endif 1911 1912 if (netif_msg_rx_err(priv)) 1913 printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", 1914 dev->name, 1915 gfar_read(&priv->regs->rstat)); 1916 } 1917 if (events & IEVENT_BABR) { 1918 priv->stats.rx_errors++; 1919 priv->extra_stats.rx_babr++; 1920 1921 if (netif_msg_rx_err(priv)) 1922 printk(KERN_DEBUG "%s: babbling error\n", dev->name); 1923 } 1924 if (events & IEVENT_EBERR) { 1925 priv->extra_stats.eberr++; 1926 if (netif_msg_rx_err(priv)) 1927 printk(KERN_DEBUG "%s: EBERR\n", dev->name); 1928 } 1929 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv)) 1930 if (netif_msg_rx_status(priv)) 1931 printk(KERN_DEBUG "%s: control frame\n", dev->name); 1932 1933 if (events & IEVENT_BABT) { 1934 priv->extra_stats.tx_babt++; 1935 if (netif_msg_tx_err(priv)) 1936 printk(KERN_DEBUG "%s: babt error\n", dev->name); 1937 } 1938 return IRQ_HANDLED; 1939} 1940 1941/* Structure for a device driver */ 1942static struct platform_driver gfar_driver = { 1943 .probe = gfar_probe, 1944 .remove = gfar_remove, 1945 .driver = { 1946 .name = "fsl-gianfar", 1947 }, 1948}; 1949 1950static int __init gfar_init(void) 1951{ 1952 int err = gfar_mdio_init(); 1953 1954 if (err) 1955 return err; 1956 1957 err = platform_driver_register(&gfar_driver); 1958 1959 if (err) 1960 gfar_mdio_exit(); 1961 1962 return err; 1963} 1964 1965static void __exit gfar_exit(void) 1966{ 1967 platform_driver_unregister(&gfar_driver); 1968 gfar_mdio_exit(); 1969} 1970 1971module_init(gfar_init); 1972module_exit(gfar_exit); 1973