Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 27d30b0f4e0c8e63f48ef400a64fc073b71bfe59 1980 lines 53 kB view raw
1/* 2 * drivers/net/gianfar.c 3 * 4 * Gianfar Ethernet Driver 5 * This driver is designed for the non-CPM ethernet controllers 6 * on the 85xx and 83xx family of integrated processors 7 * Based on 8260_io/fcc_enet.c 8 * 9 * Author: Andy Fleming 10 * Maintainer: Kumar Gala 11 * 12 * Copyright (c) 2002-2006 Freescale Semiconductor, Inc. 13 * Copyright (c) 2007 MontaVista Software, Inc. 14 * 15 * This program is free software; you can redistribute it and/or modify it 16 * under the terms of the GNU General Public License as published by the 17 * Free Software Foundation; either version 2 of the License, or (at your 18 * option) any later version. 19 * 20 * Gianfar: AKA Lambda Draconis, "Dragon" 21 * RA 11 31 24.2 22 * Dec +69 19 52 23 * V 3.84 24 * B-V +1.62 25 * 26 * Theory of operation 27 * 28 * The driver is initialized through platform_device. Structures which 29 * define the configuration needed by the board are defined in a 30 * board structure in arch/ppc/platforms (though I do not 31 * discount the possibility that other architectures could one 32 * day be supported. 33 * 34 * The Gianfar Ethernet Controller uses a ring of buffer 35 * descriptors. The beginning is indicated by a register 36 * pointing to the physical address of the start of the ring. 37 * The end is determined by a "wrap" bit being set in the 38 * last descriptor of the ring. 39 * 40 * When a packet is received, the RXF bit in the 41 * IEVENT register is set, triggering an interrupt when the 42 * corresponding bit in the IMASK register is also set (if 43 * interrupt coalescing is active, then the interrupt may not 44 * happen immediately, but will wait until either a set number 45 * of frames or amount of time have passed). In NAPI, the 46 * interrupt handler will signal there is work to be done, and 47 * exit. Without NAPI, the packet(s) will be handled 48 * immediately. Both methods will start at the last known empty 49 * descriptor, and process every subsequent descriptor until there 50 * are none left with data (NAPI will stop after a set number of 51 * packets to give time to other tasks, but will eventually 52 * process all the packets). The data arrives inside a 53 * pre-allocated skb, and so after the skb is passed up to the 54 * stack, a new skb must be allocated, and the address field in 55 * the buffer descriptor must be updated to indicate this new 56 * skb. 57 * 58 * When the kernel requests that a packet be transmitted, the 59 * driver starts where it left off last time, and points the 60 * descriptor at the buffer which was passed in. The driver 61 * then informs the DMA engine that there are packets ready to 62 * be transmitted. Once the controller is finished transmitting 63 * the packet, an interrupt may be triggered (under the same 64 * conditions as for reception, but depending on the TXF bit). 65 * The driver then cleans up the buffer. 66 */ 67 68#include <linux/kernel.h> 69#include <linux/string.h> 70#include <linux/errno.h> 71#include <linux/unistd.h> 72#include <linux/slab.h> 73#include <linux/interrupt.h> 74#include <linux/init.h> 75#include <linux/delay.h> 76#include <linux/netdevice.h> 77#include <linux/etherdevice.h> 78#include <linux/skbuff.h> 79#include <linux/if_vlan.h> 80#include <linux/spinlock.h> 81#include <linux/mm.h> 82#include <linux/platform_device.h> 83#include <linux/ip.h> 84#include <linux/tcp.h> 85#include <linux/udp.h> 86#include <linux/in.h> 87 88#include <asm/io.h> 89#include <asm/irq.h> 90#include <asm/uaccess.h> 91#include <linux/module.h> 92#include <linux/dma-mapping.h> 93#include <linux/crc32.h> 94#include <linux/mii.h> 95#include <linux/phy.h> 96 97#include "gianfar.h" 98#include "gianfar_mii.h" 99 100#define TX_TIMEOUT (1*HZ) 101#define SKB_ALLOC_TIMEOUT 1000000 102#undef BRIEF_GFAR_ERRORS 103#undef VERBOSE_GFAR_ERRORS 104 105#ifdef CONFIG_GFAR_NAPI 106#define RECEIVE(x) netif_receive_skb(x) 107#else 108#define RECEIVE(x) netif_rx(x) 109#endif 110 111const char gfar_driver_name[] = "Gianfar Ethernet"; 112const char gfar_driver_version[] = "1.3"; 113 114static int gfar_enet_open(struct net_device *dev); 115static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); 116static void gfar_timeout(struct net_device *dev); 117static int gfar_close(struct net_device *dev); 118struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp); 119static struct net_device_stats *gfar_get_stats(struct net_device *dev); 120static int gfar_set_mac_address(struct net_device *dev); 121static int gfar_change_mtu(struct net_device *dev, int new_mtu); 122static irqreturn_t gfar_error(int irq, void *dev_id); 123static irqreturn_t gfar_transmit(int irq, void *dev_id); 124static irqreturn_t gfar_interrupt(int irq, void *dev_id); 125static void adjust_link(struct net_device *dev); 126static void init_registers(struct net_device *dev); 127static int init_phy(struct net_device *dev); 128static int gfar_probe(struct platform_device *pdev); 129static int gfar_remove(struct platform_device *pdev); 130static void free_skb_resources(struct gfar_private *priv); 131static void gfar_set_multi(struct net_device *dev); 132static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 133#ifdef CONFIG_GFAR_NAPI 134static int gfar_poll(struct net_device *dev, int *budget); 135#endif 136#ifdef CONFIG_NET_POLL_CONTROLLER 137static void gfar_netpoll(struct net_device *dev); 138#endif 139int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 140static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length); 141static void gfar_vlan_rx_register(struct net_device *netdev, 142 struct vlan_group *grp); 143static void gfar_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 144void gfar_halt(struct net_device *dev); 145void gfar_start(struct net_device *dev); 146static void gfar_clear_exact_match(struct net_device *dev); 147static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); 148 149extern const struct ethtool_ops gfar_ethtool_ops; 150 151MODULE_AUTHOR("Freescale Semiconductor, Inc"); 152MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 153MODULE_LICENSE("GPL"); 154 155/* Returns 1 if incoming frames use an FCB */ 156static inline int gfar_uses_fcb(struct gfar_private *priv) 157{ 158 return (priv->vlan_enable || priv->rx_csum_enable); 159} 160 161/* Set up the ethernet device structure, private data, 162 * and anything else we need before we start */ 163static int gfar_probe(struct platform_device *pdev) 164{ 165 u32 tempval; 166 struct net_device *dev = NULL; 167 struct gfar_private *priv = NULL; 168 struct gianfar_platform_data *einfo; 169 struct resource *r; 170 int idx; 171 int err = 0; 172 173 einfo = (struct gianfar_platform_data *) pdev->dev.platform_data; 174 175 if (NULL == einfo) { 176 printk(KERN_ERR "gfar %d: Missing additional data!\n", 177 pdev->id); 178 179 return -ENODEV; 180 } 181 182 /* Create an ethernet device instance */ 183 dev = alloc_etherdev(sizeof (*priv)); 184 185 if (NULL == dev) 186 return -ENOMEM; 187 188 priv = netdev_priv(dev); 189 190 /* Set the info in the priv to the current info */ 191 priv->einfo = einfo; 192 193 /* fill out IRQ fields */ 194 if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 195 priv->interruptTransmit = platform_get_irq_byname(pdev, "tx"); 196 priv->interruptReceive = platform_get_irq_byname(pdev, "rx"); 197 priv->interruptError = platform_get_irq_byname(pdev, "error"); 198 if (priv->interruptTransmit < 0 || priv->interruptReceive < 0 || priv->interruptError < 0) 199 goto regs_fail; 200 } else { 201 priv->interruptTransmit = platform_get_irq(pdev, 0); 202 if (priv->interruptTransmit < 0) 203 goto regs_fail; 204 } 205 206 /* get a pointer to the register memory */ 207 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 208 priv->regs = ioremap(r->start, sizeof (struct gfar)); 209 210 if (NULL == priv->regs) { 211 err = -ENOMEM; 212 goto regs_fail; 213 } 214 215 spin_lock_init(&priv->txlock); 216 spin_lock_init(&priv->rxlock); 217 218 platform_set_drvdata(pdev, dev); 219 220 /* Stop the DMA engine now, in case it was running before */ 221 /* (The firmware could have used it, and left it running). */ 222 /* To do this, we write Graceful Receive Stop and Graceful */ 223 /* Transmit Stop, and then wait until the corresponding bits */ 224 /* in IEVENT indicate the stops have completed. */ 225 tempval = gfar_read(&priv->regs->dmactrl); 226 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 227 gfar_write(&priv->regs->dmactrl, tempval); 228 229 tempval = gfar_read(&priv->regs->dmactrl); 230 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 231 gfar_write(&priv->regs->dmactrl, tempval); 232 233 while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC))) 234 cpu_relax(); 235 236 /* Reset MAC layer */ 237 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET); 238 239 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 240 gfar_write(&priv->regs->maccfg1, tempval); 241 242 /* Initialize MACCFG2. */ 243 gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS); 244 245 /* Initialize ECNTRL */ 246 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS); 247 248 /* Copy the station address into the dev structure, */ 249 memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN); 250 251 /* Set the dev->base_addr to the gfar reg region */ 252 dev->base_addr = (unsigned long) (priv->regs); 253 254 SET_MODULE_OWNER(dev); 255 SET_NETDEV_DEV(dev, &pdev->dev); 256 257 /* Fill in the dev structure */ 258 dev->open = gfar_enet_open; 259 dev->hard_start_xmit = gfar_start_xmit; 260 dev->tx_timeout = gfar_timeout; 261 dev->watchdog_timeo = TX_TIMEOUT; 262#ifdef CONFIG_GFAR_NAPI 263 dev->poll = gfar_poll; 264 dev->weight = GFAR_DEV_WEIGHT; 265#endif 266#ifdef CONFIG_NET_POLL_CONTROLLER 267 dev->poll_controller = gfar_netpoll; 268#endif 269 dev->stop = gfar_close; 270 dev->get_stats = gfar_get_stats; 271 dev->change_mtu = gfar_change_mtu; 272 dev->mtu = 1500; 273 dev->set_multicast_list = gfar_set_multi; 274 275 dev->ethtool_ops = &gfar_ethtool_ops; 276 277 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 278 priv->rx_csum_enable = 1; 279 dev->features |= NETIF_F_IP_CSUM; 280 } else 281 priv->rx_csum_enable = 0; 282 283 priv->vlgrp = NULL; 284 285 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 286 dev->vlan_rx_register = gfar_vlan_rx_register; 287 dev->vlan_rx_kill_vid = gfar_vlan_rx_kill_vid; 288 289 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 290 291 priv->vlan_enable = 1; 292 } 293 294 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 295 priv->extended_hash = 1; 296 priv->hash_width = 9; 297 298 priv->hash_regs[0] = &priv->regs->igaddr0; 299 priv->hash_regs[1] = &priv->regs->igaddr1; 300 priv->hash_regs[2] = &priv->regs->igaddr2; 301 priv->hash_regs[3] = &priv->regs->igaddr3; 302 priv->hash_regs[4] = &priv->regs->igaddr4; 303 priv->hash_regs[5] = &priv->regs->igaddr5; 304 priv->hash_regs[6] = &priv->regs->igaddr6; 305 priv->hash_regs[7] = &priv->regs->igaddr7; 306 priv->hash_regs[8] = &priv->regs->gaddr0; 307 priv->hash_regs[9] = &priv->regs->gaddr1; 308 priv->hash_regs[10] = &priv->regs->gaddr2; 309 priv->hash_regs[11] = &priv->regs->gaddr3; 310 priv->hash_regs[12] = &priv->regs->gaddr4; 311 priv->hash_regs[13] = &priv->regs->gaddr5; 312 priv->hash_regs[14] = &priv->regs->gaddr6; 313 priv->hash_regs[15] = &priv->regs->gaddr7; 314 315 } else { 316 priv->extended_hash = 0; 317 priv->hash_width = 8; 318 319 priv->hash_regs[0] = &priv->regs->gaddr0; 320 priv->hash_regs[1] = &priv->regs->gaddr1; 321 priv->hash_regs[2] = &priv->regs->gaddr2; 322 priv->hash_regs[3] = &priv->regs->gaddr3; 323 priv->hash_regs[4] = &priv->regs->gaddr4; 324 priv->hash_regs[5] = &priv->regs->gaddr5; 325 priv->hash_regs[6] = &priv->regs->gaddr6; 326 priv->hash_regs[7] = &priv->regs->gaddr7; 327 } 328 329 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) 330 priv->padding = DEFAULT_PADDING; 331 else 332 priv->padding = 0; 333 334 if (dev->features & NETIF_F_IP_CSUM) 335 dev->hard_header_len += GMAC_FCB_LEN; 336 337 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 338 priv->tx_ring_size = DEFAULT_TX_RING_SIZE; 339 priv->rx_ring_size = DEFAULT_RX_RING_SIZE; 340 341 priv->txcoalescing = DEFAULT_TX_COALESCE; 342 priv->txcount = DEFAULT_TXCOUNT; 343 priv->txtime = DEFAULT_TXTIME; 344 priv->rxcoalescing = DEFAULT_RX_COALESCE; 345 priv->rxcount = DEFAULT_RXCOUNT; 346 priv->rxtime = DEFAULT_RXTIME; 347 348 /* Enable most messages by default */ 349 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 350 351 err = register_netdev(dev); 352 353 if (err) { 354 printk(KERN_ERR "%s: Cannot register net device, aborting.\n", 355 dev->name); 356 goto register_fail; 357 } 358 359 /* Create all the sysfs files */ 360 gfar_init_sysfs(dev); 361 362 /* Print out the device info */ 363 printk(KERN_INFO DEVICE_NAME, dev->name); 364 for (idx = 0; idx < 6; idx++) 365 printk("%2.2x%c", dev->dev_addr[idx], idx == 5 ? ' ' : ':'); 366 printk("\n"); 367 368 /* Even more device info helps when determining which kernel */ 369 /* provided which set of benchmarks. */ 370#ifdef CONFIG_GFAR_NAPI 371 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 372#else 373 printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name); 374#endif 375 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", 376 dev->name, priv->rx_ring_size, priv->tx_ring_size); 377 378 return 0; 379 380register_fail: 381 iounmap(priv->regs); 382regs_fail: 383 free_netdev(dev); 384 return err; 385} 386 387static int gfar_remove(struct platform_device *pdev) 388{ 389 struct net_device *dev = platform_get_drvdata(pdev); 390 struct gfar_private *priv = netdev_priv(dev); 391 392 platform_set_drvdata(pdev, NULL); 393 394 iounmap(priv->regs); 395 free_netdev(dev); 396 397 return 0; 398} 399 400 401/* Reads the controller's registers to determine what interface 402 * connects it to the PHY. 403 */ 404static phy_interface_t gfar_get_interface(struct net_device *dev) 405{ 406 struct gfar_private *priv = netdev_priv(dev); 407 u32 ecntrl = gfar_read(&priv->regs->ecntrl); 408 409 if (ecntrl & ECNTRL_SGMII_MODE) 410 return PHY_INTERFACE_MODE_SGMII; 411 412 if (ecntrl & ECNTRL_TBI_MODE) { 413 if (ecntrl & ECNTRL_REDUCED_MODE) 414 return PHY_INTERFACE_MODE_RTBI; 415 else 416 return PHY_INTERFACE_MODE_TBI; 417 } 418 419 if (ecntrl & ECNTRL_REDUCED_MODE) { 420 if (ecntrl & ECNTRL_REDUCED_MII_MODE) 421 return PHY_INTERFACE_MODE_RMII; 422 else 423 return PHY_INTERFACE_MODE_RGMII; 424 } 425 426 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) 427 return PHY_INTERFACE_MODE_GMII; 428 429 return PHY_INTERFACE_MODE_MII; 430} 431 432 433/* Initializes driver's PHY state, and attaches to the PHY. 434 * Returns 0 on success. 435 */ 436static int init_phy(struct net_device *dev) 437{ 438 struct gfar_private *priv = netdev_priv(dev); 439 uint gigabit_support = 440 priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? 441 SUPPORTED_1000baseT_Full : 0; 442 struct phy_device *phydev; 443 char phy_id[BUS_ID_SIZE]; 444 phy_interface_t interface; 445 446 priv->oldlink = 0; 447 priv->oldspeed = 0; 448 priv->oldduplex = -1; 449 450 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id); 451 452 interface = gfar_get_interface(dev); 453 454 phydev = phy_connect(dev, phy_id, &adjust_link, 0, interface); 455 456 if (IS_ERR(phydev)) { 457 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 458 return PTR_ERR(phydev); 459 } 460 461 /* Remove any features not supported by the controller */ 462 phydev->supported &= (GFAR_SUPPORTED | gigabit_support); 463 phydev->advertising = phydev->supported; 464 465 priv->phydev = phydev; 466 467 return 0; 468} 469 470static void init_registers(struct net_device *dev) 471{ 472 struct gfar_private *priv = netdev_priv(dev); 473 474 /* Clear IEVENT */ 475 gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR); 476 477 /* Initialize IMASK */ 478 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR); 479 480 /* Init hash registers to zero */ 481 gfar_write(&priv->regs->igaddr0, 0); 482 gfar_write(&priv->regs->igaddr1, 0); 483 gfar_write(&priv->regs->igaddr2, 0); 484 gfar_write(&priv->regs->igaddr3, 0); 485 gfar_write(&priv->regs->igaddr4, 0); 486 gfar_write(&priv->regs->igaddr5, 0); 487 gfar_write(&priv->regs->igaddr6, 0); 488 gfar_write(&priv->regs->igaddr7, 0); 489 490 gfar_write(&priv->regs->gaddr0, 0); 491 gfar_write(&priv->regs->gaddr1, 0); 492 gfar_write(&priv->regs->gaddr2, 0); 493 gfar_write(&priv->regs->gaddr3, 0); 494 gfar_write(&priv->regs->gaddr4, 0); 495 gfar_write(&priv->regs->gaddr5, 0); 496 gfar_write(&priv->regs->gaddr6, 0); 497 gfar_write(&priv->regs->gaddr7, 0); 498 499 /* Zero out the rmon mib registers if it has them */ 500 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 501 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib)); 502 503 /* Mask off the CAM interrupts */ 504 gfar_write(&priv->regs->rmon.cam1, 0xffffffff); 505 gfar_write(&priv->regs->rmon.cam2, 0xffffffff); 506 } 507 508 /* Initialize the max receive buffer length */ 509 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 510 511 /* Initialize the Minimum Frame Length Register */ 512 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); 513 514 /* Assign the TBI an address which won't conflict with the PHYs */ 515 gfar_write(&priv->regs->tbipa, TBIPA_VALUE); 516} 517 518 519/* Halt the receive and transmit queues */ 520void gfar_halt(struct net_device *dev) 521{ 522 struct gfar_private *priv = netdev_priv(dev); 523 struct gfar __iomem *regs = priv->regs; 524 u32 tempval; 525 526 /* Mask all interrupts */ 527 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 528 529 /* Clear all interrupts */ 530 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR); 531 532 /* Stop the DMA, and wait for it to stop */ 533 tempval = gfar_read(&priv->regs->dmactrl); 534 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) 535 != (DMACTRL_GRS | DMACTRL_GTS)) { 536 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 537 gfar_write(&priv->regs->dmactrl, tempval); 538 539 while (!(gfar_read(&priv->regs->ievent) & 540 (IEVENT_GRSC | IEVENT_GTSC))) 541 cpu_relax(); 542 } 543 544 /* Disable Rx and Tx */ 545 tempval = gfar_read(&regs->maccfg1); 546 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 547 gfar_write(&regs->maccfg1, tempval); 548} 549 550void stop_gfar(struct net_device *dev) 551{ 552 struct gfar_private *priv = netdev_priv(dev); 553 struct gfar __iomem *regs = priv->regs; 554 unsigned long flags; 555 556 phy_stop(priv->phydev); 557 558 /* Lock it down */ 559 spin_lock_irqsave(&priv->txlock, flags); 560 spin_lock(&priv->rxlock); 561 562 gfar_halt(dev); 563 564 spin_unlock(&priv->rxlock); 565 spin_unlock_irqrestore(&priv->txlock, flags); 566 567 /* Free the IRQs */ 568 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 569 free_irq(priv->interruptError, dev); 570 free_irq(priv->interruptTransmit, dev); 571 free_irq(priv->interruptReceive, dev); 572 } else { 573 free_irq(priv->interruptTransmit, dev); 574 } 575 576 free_skb_resources(priv); 577 578 dma_free_coherent(NULL, 579 sizeof(struct txbd8)*priv->tx_ring_size 580 + sizeof(struct rxbd8)*priv->rx_ring_size, 581 priv->tx_bd_base, 582 gfar_read(&regs->tbase0)); 583} 584 585/* If there are any tx skbs or rx skbs still around, free them. 586 * Then free tx_skbuff and rx_skbuff */ 587static void free_skb_resources(struct gfar_private *priv) 588{ 589 struct rxbd8 *rxbdp; 590 struct txbd8 *txbdp; 591 int i; 592 593 /* Go through all the buffer descriptors and free their data buffers */ 594 txbdp = priv->tx_bd_base; 595 596 for (i = 0; i < priv->tx_ring_size; i++) { 597 598 if (priv->tx_skbuff[i]) { 599 dma_unmap_single(NULL, txbdp->bufPtr, 600 txbdp->length, 601 DMA_TO_DEVICE); 602 dev_kfree_skb_any(priv->tx_skbuff[i]); 603 priv->tx_skbuff[i] = NULL; 604 } 605 } 606 607 kfree(priv->tx_skbuff); 608 609 rxbdp = priv->rx_bd_base; 610 611 /* rx_skbuff is not guaranteed to be allocated, so only 612 * free it and its contents if it is allocated */ 613 if(priv->rx_skbuff != NULL) { 614 for (i = 0; i < priv->rx_ring_size; i++) { 615 if (priv->rx_skbuff[i]) { 616 dma_unmap_single(NULL, rxbdp->bufPtr, 617 priv->rx_buffer_size, 618 DMA_FROM_DEVICE); 619 620 dev_kfree_skb_any(priv->rx_skbuff[i]); 621 priv->rx_skbuff[i] = NULL; 622 } 623 624 rxbdp->status = 0; 625 rxbdp->length = 0; 626 rxbdp->bufPtr = 0; 627 628 rxbdp++; 629 } 630 631 kfree(priv->rx_skbuff); 632 } 633} 634 635void gfar_start(struct net_device *dev) 636{ 637 struct gfar_private *priv = netdev_priv(dev); 638 struct gfar __iomem *regs = priv->regs; 639 u32 tempval; 640 641 /* Enable Rx and Tx in MACCFG1 */ 642 tempval = gfar_read(&regs->maccfg1); 643 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); 644 gfar_write(&regs->maccfg1, tempval); 645 646 /* Initialize DMACTRL to have WWR and WOP */ 647 tempval = gfar_read(&priv->regs->dmactrl); 648 tempval |= DMACTRL_INIT_SETTINGS; 649 gfar_write(&priv->regs->dmactrl, tempval); 650 651 /* Make sure we aren't stopped */ 652 tempval = gfar_read(&priv->regs->dmactrl); 653 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 654 gfar_write(&priv->regs->dmactrl, tempval); 655 656 /* Clear THLT/RHLT, so that the DMA starts polling now */ 657 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT); 658 gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT); 659 660 /* Unmask the interrupts we look for */ 661 gfar_write(&regs->imask, IMASK_DEFAULT); 662} 663 664/* Bring the controller up and running */ 665int startup_gfar(struct net_device *dev) 666{ 667 struct txbd8 *txbdp; 668 struct rxbd8 *rxbdp; 669 dma_addr_t addr; 670 unsigned long vaddr; 671 int i; 672 struct gfar_private *priv = netdev_priv(dev); 673 struct gfar __iomem *regs = priv->regs; 674 int err = 0; 675 u32 rctrl = 0; 676 u32 attrs = 0; 677 678 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 679 680 /* Allocate memory for the buffer descriptors */ 681 vaddr = (unsigned long) dma_alloc_coherent(NULL, 682 sizeof (struct txbd8) * priv->tx_ring_size + 683 sizeof (struct rxbd8) * priv->rx_ring_size, 684 &addr, GFP_KERNEL); 685 686 if (vaddr == 0) { 687 if (netif_msg_ifup(priv)) 688 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n", 689 dev->name); 690 return -ENOMEM; 691 } 692 693 priv->tx_bd_base = (struct txbd8 *) vaddr; 694 695 /* enet DMA only understands physical addresses */ 696 gfar_write(&regs->tbase0, addr); 697 698 /* Start the rx descriptor ring where the tx ring leaves off */ 699 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size; 700 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size; 701 priv->rx_bd_base = (struct rxbd8 *) vaddr; 702 gfar_write(&regs->rbase0, addr); 703 704 /* Setup the skbuff rings */ 705 priv->tx_skbuff = 706 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * 707 priv->tx_ring_size, GFP_KERNEL); 708 709 if (NULL == priv->tx_skbuff) { 710 if (netif_msg_ifup(priv)) 711 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n", 712 dev->name); 713 err = -ENOMEM; 714 goto tx_skb_fail; 715 } 716 717 for (i = 0; i < priv->tx_ring_size; i++) 718 priv->tx_skbuff[i] = NULL; 719 720 priv->rx_skbuff = 721 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * 722 priv->rx_ring_size, GFP_KERNEL); 723 724 if (NULL == priv->rx_skbuff) { 725 if (netif_msg_ifup(priv)) 726 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n", 727 dev->name); 728 err = -ENOMEM; 729 goto rx_skb_fail; 730 } 731 732 for (i = 0; i < priv->rx_ring_size; i++) 733 priv->rx_skbuff[i] = NULL; 734 735 /* Initialize some variables in our dev structure */ 736 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base; 737 priv->cur_rx = priv->rx_bd_base; 738 priv->skb_curtx = priv->skb_dirtytx = 0; 739 priv->skb_currx = 0; 740 741 /* Initialize Transmit Descriptor Ring */ 742 txbdp = priv->tx_bd_base; 743 for (i = 0; i < priv->tx_ring_size; i++) { 744 txbdp->status = 0; 745 txbdp->length = 0; 746 txbdp->bufPtr = 0; 747 txbdp++; 748 } 749 750 /* Set the last descriptor in the ring to indicate wrap */ 751 txbdp--; 752 txbdp->status |= TXBD_WRAP; 753 754 rxbdp = priv->rx_bd_base; 755 for (i = 0; i < priv->rx_ring_size; i++) { 756 struct sk_buff *skb = NULL; 757 758 rxbdp->status = 0; 759 760 skb = gfar_new_skb(dev, rxbdp); 761 762 priv->rx_skbuff[i] = skb; 763 764 rxbdp++; 765 } 766 767 /* Set the last descriptor in the ring to wrap */ 768 rxbdp--; 769 rxbdp->status |= RXBD_WRAP; 770 771 /* If the device has multiple interrupts, register for 772 * them. Otherwise, only register for the one */ 773 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 774 /* Install our interrupt handlers for Error, 775 * Transmit, and Receive */ 776 if (request_irq(priv->interruptError, gfar_error, 777 0, "enet_error", dev) < 0) { 778 if (netif_msg_intr(priv)) 779 printk(KERN_ERR "%s: Can't get IRQ %d\n", 780 dev->name, priv->interruptError); 781 782 err = -1; 783 goto err_irq_fail; 784 } 785 786 if (request_irq(priv->interruptTransmit, gfar_transmit, 787 0, "enet_tx", dev) < 0) { 788 if (netif_msg_intr(priv)) 789 printk(KERN_ERR "%s: Can't get IRQ %d\n", 790 dev->name, priv->interruptTransmit); 791 792 err = -1; 793 794 goto tx_irq_fail; 795 } 796 797 if (request_irq(priv->interruptReceive, gfar_receive, 798 0, "enet_rx", dev) < 0) { 799 if (netif_msg_intr(priv)) 800 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n", 801 dev->name, priv->interruptReceive); 802 803 err = -1; 804 goto rx_irq_fail; 805 } 806 } else { 807 if (request_irq(priv->interruptTransmit, gfar_interrupt, 808 0, "gfar_interrupt", dev) < 0) { 809 if (netif_msg_intr(priv)) 810 printk(KERN_ERR "%s: Can't get IRQ %d\n", 811 dev->name, priv->interruptError); 812 813 err = -1; 814 goto err_irq_fail; 815 } 816 } 817 818 phy_start(priv->phydev); 819 820 /* Configure the coalescing support */ 821 if (priv->txcoalescing) 822 gfar_write(&regs->txic, 823 mk_ic_value(priv->txcount, priv->txtime)); 824 else 825 gfar_write(&regs->txic, 0); 826 827 if (priv->rxcoalescing) 828 gfar_write(&regs->rxic, 829 mk_ic_value(priv->rxcount, priv->rxtime)); 830 else 831 gfar_write(&regs->rxic, 0); 832 833 if (priv->rx_csum_enable) 834 rctrl |= RCTRL_CHECKSUMMING; 835 836 if (priv->extended_hash) { 837 rctrl |= RCTRL_EXTHASH; 838 839 gfar_clear_exact_match(dev); 840 rctrl |= RCTRL_EMEN; 841 } 842 843 if (priv->vlan_enable) 844 rctrl |= RCTRL_VLAN; 845 846 if (priv->padding) { 847 rctrl &= ~RCTRL_PAL_MASK; 848 rctrl |= RCTRL_PADDING(priv->padding); 849 } 850 851 /* Init rctrl based on our settings */ 852 gfar_write(&priv->regs->rctrl, rctrl); 853 854 if (dev->features & NETIF_F_IP_CSUM) 855 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM); 856 857 /* Set the extraction length and index */ 858 attrs = ATTRELI_EL(priv->rx_stash_size) | 859 ATTRELI_EI(priv->rx_stash_index); 860 861 gfar_write(&priv->regs->attreli, attrs); 862 863 /* Start with defaults, and add stashing or locking 864 * depending on the approprate variables */ 865 attrs = ATTR_INIT_SETTINGS; 866 867 if (priv->bd_stash_en) 868 attrs |= ATTR_BDSTASH; 869 870 if (priv->rx_stash_size != 0) 871 attrs |= ATTR_BUFSTASH; 872 873 gfar_write(&priv->regs->attr, attrs); 874 875 gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold); 876 gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve); 877 gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off); 878 879 /* Start the controller */ 880 gfar_start(dev); 881 882 return 0; 883 884rx_irq_fail: 885 free_irq(priv->interruptTransmit, dev); 886tx_irq_fail: 887 free_irq(priv->interruptError, dev); 888err_irq_fail: 889rx_skb_fail: 890 free_skb_resources(priv); 891tx_skb_fail: 892 dma_free_coherent(NULL, 893 sizeof(struct txbd8)*priv->tx_ring_size 894 + sizeof(struct rxbd8)*priv->rx_ring_size, 895 priv->tx_bd_base, 896 gfar_read(&regs->tbase0)); 897 898 return err; 899} 900 901/* Called when something needs to use the ethernet device */ 902/* Returns 0 for success. */ 903static int gfar_enet_open(struct net_device *dev) 904{ 905 int err; 906 907 /* Initialize a bunch of registers */ 908 init_registers(dev); 909 910 gfar_set_mac_address(dev); 911 912 err = init_phy(dev); 913 914 if(err) 915 return err; 916 917 err = startup_gfar(dev); 918 919 netif_start_queue(dev); 920 921 return err; 922} 923 924static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp) 925{ 926 struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN); 927 928 memset(fcb, 0, GMAC_FCB_LEN); 929 930 return fcb; 931} 932 933static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) 934{ 935 u8 flags = 0; 936 937 /* If we're here, it's a IP packet with a TCP or UDP 938 * payload. We set it to checksum, using a pseudo-header 939 * we provide 940 */ 941 flags = TXFCB_DEFAULT; 942 943 /* Tell the controller what the protocol is */ 944 /* And provide the already calculated phcs */ 945 if (skb->nh.iph->protocol == IPPROTO_UDP) { 946 flags |= TXFCB_UDP; 947 fcb->phcs = skb->h.uh->check; 948 } else 949 fcb->phcs = skb->h.th->check; 950 951 /* l3os is the distance between the start of the 952 * frame (skb->data) and the start of the IP hdr. 953 * l4os is the distance between the start of the 954 * l3 hdr and the l4 hdr */ 955 fcb->l3os = (u16)(skb->nh.raw - skb->data - GMAC_FCB_LEN); 956 fcb->l4os = (u16)(skb->h.raw - skb->nh.raw); 957 958 fcb->flags = flags; 959} 960 961void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) 962{ 963 fcb->flags |= TXFCB_VLN; 964 fcb->vlctl = vlan_tx_tag_get(skb); 965} 966 967/* This is called by the kernel when a frame is ready for transmission. */ 968/* It is pointed to by the dev->hard_start_xmit function pointer */ 969static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 970{ 971 struct gfar_private *priv = netdev_priv(dev); 972 struct txfcb *fcb = NULL; 973 struct txbd8 *txbdp; 974 u16 status; 975 unsigned long flags; 976 977 /* Update transmit stats */ 978 priv->stats.tx_bytes += skb->len; 979 980 /* Lock priv now */ 981 spin_lock_irqsave(&priv->txlock, flags); 982 983 /* Point at the first free tx descriptor */ 984 txbdp = priv->cur_tx; 985 986 /* Clear all but the WRAP status flags */ 987 status = txbdp->status & TXBD_WRAP; 988 989 /* Set up checksumming */ 990 if (likely((dev->features & NETIF_F_IP_CSUM) 991 && (CHECKSUM_PARTIAL == skb->ip_summed))) { 992 fcb = gfar_add_fcb(skb, txbdp); 993 status |= TXBD_TOE; 994 gfar_tx_checksum(skb, fcb); 995 } 996 997 if (priv->vlan_enable && 998 unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) { 999 if (unlikely(NULL == fcb)) { 1000 fcb = gfar_add_fcb(skb, txbdp); 1001 status |= TXBD_TOE; 1002 } 1003 1004 gfar_tx_vlan(skb, fcb); 1005 } 1006 1007 /* Set buffer length and pointer */ 1008 txbdp->length = skb->len; 1009 txbdp->bufPtr = dma_map_single(NULL, skb->data, 1010 skb->len, DMA_TO_DEVICE); 1011 1012 /* Save the skb pointer so we can free it later */ 1013 priv->tx_skbuff[priv->skb_curtx] = skb; 1014 1015 /* Update the current skb pointer (wrapping if this was the last) */ 1016 priv->skb_curtx = 1017 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size); 1018 1019 /* Flag the BD as interrupt-causing */ 1020 status |= TXBD_INTERRUPT; 1021 1022 /* Flag the BD as ready to go, last in frame, and */ 1023 /* in need of CRC */ 1024 status |= (TXBD_READY | TXBD_LAST | TXBD_CRC); 1025 1026 dev->trans_start = jiffies; 1027 1028 txbdp->status = status; 1029 1030 /* If this was the last BD in the ring, the next one */ 1031 /* is at the beginning of the ring */ 1032 if (txbdp->status & TXBD_WRAP) 1033 txbdp = priv->tx_bd_base; 1034 else 1035 txbdp++; 1036 1037 /* If the next BD still needs to be cleaned up, then the bds 1038 are full. We need to tell the kernel to stop sending us stuff. */ 1039 if (txbdp == priv->dirty_tx) { 1040 netif_stop_queue(dev); 1041 1042 priv->stats.tx_fifo_errors++; 1043 } 1044 1045 /* Update the current txbd to the next one */ 1046 priv->cur_tx = txbdp; 1047 1048 /* Tell the DMA to go go go */ 1049 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 1050 1051 /* Unlock priv */ 1052 spin_unlock_irqrestore(&priv->txlock, flags); 1053 1054 return 0; 1055} 1056 1057/* Stops the kernel queue, and halts the controller */ 1058static int gfar_close(struct net_device *dev) 1059{ 1060 struct gfar_private *priv = netdev_priv(dev); 1061 stop_gfar(dev); 1062 1063 /* Disconnect from the PHY */ 1064 phy_disconnect(priv->phydev); 1065 priv->phydev = NULL; 1066 1067 netif_stop_queue(dev); 1068 1069 return 0; 1070} 1071 1072/* returns a net_device_stats structure pointer */ 1073static struct net_device_stats * gfar_get_stats(struct net_device *dev) 1074{ 1075 struct gfar_private *priv = netdev_priv(dev); 1076 1077 return &(priv->stats); 1078} 1079 1080/* Changes the mac address if the controller is not running. */ 1081int gfar_set_mac_address(struct net_device *dev) 1082{ 1083 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 1084 1085 return 0; 1086} 1087 1088 1089/* Enables and disables VLAN insertion/extraction */ 1090static void gfar_vlan_rx_register(struct net_device *dev, 1091 struct vlan_group *grp) 1092{ 1093 struct gfar_private *priv = netdev_priv(dev); 1094 unsigned long flags; 1095 u32 tempval; 1096 1097 spin_lock_irqsave(&priv->rxlock, flags); 1098 1099 priv->vlgrp = grp; 1100 1101 if (grp) { 1102 /* Enable VLAN tag insertion */ 1103 tempval = gfar_read(&priv->regs->tctrl); 1104 tempval |= TCTRL_VLINS; 1105 1106 gfar_write(&priv->regs->tctrl, tempval); 1107 1108 /* Enable VLAN tag extraction */ 1109 tempval = gfar_read(&priv->regs->rctrl); 1110 tempval |= RCTRL_VLEX; 1111 gfar_write(&priv->regs->rctrl, tempval); 1112 } else { 1113 /* Disable VLAN tag insertion */ 1114 tempval = gfar_read(&priv->regs->tctrl); 1115 tempval &= ~TCTRL_VLINS; 1116 gfar_write(&priv->regs->tctrl, tempval); 1117 1118 /* Disable VLAN tag extraction */ 1119 tempval = gfar_read(&priv->regs->rctrl); 1120 tempval &= ~RCTRL_VLEX; 1121 gfar_write(&priv->regs->rctrl, tempval); 1122 } 1123 1124 spin_unlock_irqrestore(&priv->rxlock, flags); 1125} 1126 1127 1128static void gfar_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid) 1129{ 1130 struct gfar_private *priv = netdev_priv(dev); 1131 unsigned long flags; 1132 1133 spin_lock_irqsave(&priv->rxlock, flags); 1134 1135 vlan_group_set_device(priv->vlgrp, vid, NULL); 1136 1137 spin_unlock_irqrestore(&priv->rxlock, flags); 1138} 1139 1140 1141static int gfar_change_mtu(struct net_device *dev, int new_mtu) 1142{ 1143 int tempsize, tempval; 1144 struct gfar_private *priv = netdev_priv(dev); 1145 int oldsize = priv->rx_buffer_size; 1146 int frame_size = new_mtu + ETH_HLEN; 1147 1148 if (priv->vlan_enable) 1149 frame_size += VLAN_ETH_HLEN; 1150 1151 if (gfar_uses_fcb(priv)) 1152 frame_size += GMAC_FCB_LEN; 1153 1154 frame_size += priv->padding; 1155 1156 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { 1157 if (netif_msg_drv(priv)) 1158 printk(KERN_ERR "%s: Invalid MTU setting\n", 1159 dev->name); 1160 return -EINVAL; 1161 } 1162 1163 tempsize = 1164 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + 1165 INCREMENTAL_BUFFER_SIZE; 1166 1167 /* Only stop and start the controller if it isn't already 1168 * stopped, and we changed something */ 1169 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 1170 stop_gfar(dev); 1171 1172 priv->rx_buffer_size = tempsize; 1173 1174 dev->mtu = new_mtu; 1175 1176 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 1177 gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size); 1178 1179 /* If the mtu is larger than the max size for standard 1180 * ethernet frames (ie, a jumbo frame), then set maccfg2 1181 * to allow huge frames, and to check the length */ 1182 tempval = gfar_read(&priv->regs->maccfg2); 1183 1184 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) 1185 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 1186 else 1187 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 1188 1189 gfar_write(&priv->regs->maccfg2, tempval); 1190 1191 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 1192 startup_gfar(dev); 1193 1194 return 0; 1195} 1196 1197/* gfar_timeout gets called when a packet has not been 1198 * transmitted after a set amount of time. 1199 * For now, assume that clearing out all the structures, and 1200 * starting over will fix the problem. */ 1201static void gfar_timeout(struct net_device *dev) 1202{ 1203 struct gfar_private *priv = netdev_priv(dev); 1204 1205 priv->stats.tx_errors++; 1206 1207 if (dev->flags & IFF_UP) { 1208 stop_gfar(dev); 1209 startup_gfar(dev); 1210 } 1211 1212 netif_schedule(dev); 1213} 1214 1215/* Interrupt Handler for Transmit complete */ 1216static irqreturn_t gfar_transmit(int irq, void *dev_id) 1217{ 1218 struct net_device *dev = (struct net_device *) dev_id; 1219 struct gfar_private *priv = netdev_priv(dev); 1220 struct txbd8 *bdp; 1221 1222 /* Clear IEVENT */ 1223 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); 1224 1225 /* Lock priv */ 1226 spin_lock(&priv->txlock); 1227 bdp = priv->dirty_tx; 1228 while ((bdp->status & TXBD_READY) == 0) { 1229 /* If dirty_tx and cur_tx are the same, then either the */ 1230 /* ring is empty or full now (it could only be full in the beginning, */ 1231 /* obviously). If it is empty, we are done. */ 1232 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0)) 1233 break; 1234 1235 priv->stats.tx_packets++; 1236 1237 /* Deferred means some collisions occurred during transmit, */ 1238 /* but we eventually sent the packet. */ 1239 if (bdp->status & TXBD_DEF) 1240 priv->stats.collisions++; 1241 1242 /* Free the sk buffer associated with this TxBD */ 1243 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); 1244 priv->tx_skbuff[priv->skb_dirtytx] = NULL; 1245 priv->skb_dirtytx = 1246 (priv->skb_dirtytx + 1247 1) & TX_RING_MOD_MASK(priv->tx_ring_size); 1248 1249 /* update bdp to point at next bd in the ring (wrapping if necessary) */ 1250 if (bdp->status & TXBD_WRAP) 1251 bdp = priv->tx_bd_base; 1252 else 1253 bdp++; 1254 1255 /* Move dirty_tx to be the next bd */ 1256 priv->dirty_tx = bdp; 1257 1258 /* We freed a buffer, so now we can restart transmission */ 1259 if (netif_queue_stopped(dev)) 1260 netif_wake_queue(dev); 1261 } /* while ((bdp->status & TXBD_READY) == 0) */ 1262 1263 /* If we are coalescing the interrupts, reset the timer */ 1264 /* Otherwise, clear it */ 1265 if (priv->txcoalescing) 1266 gfar_write(&priv->regs->txic, 1267 mk_ic_value(priv->txcount, priv->txtime)); 1268 else 1269 gfar_write(&priv->regs->txic, 0); 1270 1271 spin_unlock(&priv->txlock); 1272 1273 return IRQ_HANDLED; 1274} 1275 1276struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) 1277{ 1278 unsigned int alignamount; 1279 struct gfar_private *priv = netdev_priv(dev); 1280 struct sk_buff *skb = NULL; 1281 unsigned int timeout = SKB_ALLOC_TIMEOUT; 1282 1283 /* We have to allocate the skb, so keep trying till we succeed */ 1284 while ((!skb) && timeout--) 1285 skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT); 1286 1287 if (NULL == skb) 1288 return NULL; 1289 1290 alignamount = RXBUF_ALIGNMENT - 1291 (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1)); 1292 1293 /* We need the data buffer to be aligned properly. We will reserve 1294 * as many bytes as needed to align the data properly 1295 */ 1296 skb_reserve(skb, alignamount); 1297 1298 skb->dev = dev; 1299 1300 bdp->bufPtr = dma_map_single(NULL, skb->data, 1301 priv->rx_buffer_size, DMA_FROM_DEVICE); 1302 1303 bdp->length = 0; 1304 1305 /* Mark the buffer empty */ 1306 bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT); 1307 1308 return skb; 1309} 1310 1311static inline void count_errors(unsigned short status, struct gfar_private *priv) 1312{ 1313 struct net_device_stats *stats = &priv->stats; 1314 struct gfar_extra_stats *estats = &priv->extra_stats; 1315 1316 /* If the packet was truncated, none of the other errors 1317 * matter */ 1318 if (status & RXBD_TRUNCATED) { 1319 stats->rx_length_errors++; 1320 1321 estats->rx_trunc++; 1322 1323 return; 1324 } 1325 /* Count the errors, if there were any */ 1326 if (status & (RXBD_LARGE | RXBD_SHORT)) { 1327 stats->rx_length_errors++; 1328 1329 if (status & RXBD_LARGE) 1330 estats->rx_large++; 1331 else 1332 estats->rx_short++; 1333 } 1334 if (status & RXBD_NONOCTET) { 1335 stats->rx_frame_errors++; 1336 estats->rx_nonoctet++; 1337 } 1338 if (status & RXBD_CRCERR) { 1339 estats->rx_crcerr++; 1340 stats->rx_crc_errors++; 1341 } 1342 if (status & RXBD_OVERRUN) { 1343 estats->rx_overrun++; 1344 stats->rx_crc_errors++; 1345 } 1346} 1347 1348irqreturn_t gfar_receive(int irq, void *dev_id) 1349{ 1350 struct net_device *dev = (struct net_device *) dev_id; 1351 struct gfar_private *priv = netdev_priv(dev); 1352#ifdef CONFIG_GFAR_NAPI 1353 u32 tempval; 1354#else 1355 unsigned long flags; 1356#endif 1357 1358 /* Clear IEVENT, so rx interrupt isn't called again 1359 * because of this interrupt */ 1360 gfar_write(&priv->regs->ievent, IEVENT_RX_MASK); 1361 1362 /* support NAPI */ 1363#ifdef CONFIG_GFAR_NAPI 1364 if (netif_rx_schedule_prep(dev)) { 1365 tempval = gfar_read(&priv->regs->imask); 1366 tempval &= IMASK_RX_DISABLED; 1367 gfar_write(&priv->regs->imask, tempval); 1368 1369 __netif_rx_schedule(dev); 1370 } else { 1371 if (netif_msg_rx_err(priv)) 1372 printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n", 1373 dev->name, gfar_read(&priv->regs->ievent), 1374 gfar_read(&priv->regs->imask)); 1375 } 1376#else 1377 1378 spin_lock_irqsave(&priv->rxlock, flags); 1379 gfar_clean_rx_ring(dev, priv->rx_ring_size); 1380 1381 /* If we are coalescing interrupts, update the timer */ 1382 /* Otherwise, clear it */ 1383 if (priv->rxcoalescing) 1384 gfar_write(&priv->regs->rxic, 1385 mk_ic_value(priv->rxcount, priv->rxtime)); 1386 else 1387 gfar_write(&priv->regs->rxic, 0); 1388 1389 spin_unlock_irqrestore(&priv->rxlock, flags); 1390#endif 1391 1392 return IRQ_HANDLED; 1393} 1394 1395static inline int gfar_rx_vlan(struct sk_buff *skb, 1396 struct vlan_group *vlgrp, unsigned short vlctl) 1397{ 1398#ifdef CONFIG_GFAR_NAPI 1399 return vlan_hwaccel_receive_skb(skb, vlgrp, vlctl); 1400#else 1401 return vlan_hwaccel_rx(skb, vlgrp, vlctl); 1402#endif 1403} 1404 1405static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) 1406{ 1407 /* If valid headers were found, and valid sums 1408 * were verified, then we tell the kernel that no 1409 * checksumming is necessary. Otherwise, it is */ 1410 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) 1411 skb->ip_summed = CHECKSUM_UNNECESSARY; 1412 else 1413 skb->ip_summed = CHECKSUM_NONE; 1414} 1415 1416 1417static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb) 1418{ 1419 struct rxfcb *fcb = (struct rxfcb *)skb->data; 1420 1421 /* Remove the FCB from the skb */ 1422 skb_pull(skb, GMAC_FCB_LEN); 1423 1424 return fcb; 1425} 1426 1427/* gfar_process_frame() -- handle one incoming packet if skb 1428 * isn't NULL. */ 1429static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 1430 int length) 1431{ 1432 struct gfar_private *priv = netdev_priv(dev); 1433 struct rxfcb *fcb = NULL; 1434 1435 if (NULL == skb) { 1436 if (netif_msg_rx_err(priv)) 1437 printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name); 1438 priv->stats.rx_dropped++; 1439 priv->extra_stats.rx_skbmissing++; 1440 } else { 1441 int ret; 1442 1443 /* Prep the skb for the packet */ 1444 skb_put(skb, length); 1445 1446 /* Grab the FCB if there is one */ 1447 if (gfar_uses_fcb(priv)) 1448 fcb = gfar_get_fcb(skb); 1449 1450 /* Remove the padded bytes, if there are any */ 1451 if (priv->padding) 1452 skb_pull(skb, priv->padding); 1453 1454 if (priv->rx_csum_enable) 1455 gfar_rx_checksum(skb, fcb); 1456 1457 /* Tell the skb what kind of packet this is */ 1458 skb->protocol = eth_type_trans(skb, dev); 1459 1460 /* Send the packet up the stack */ 1461 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) 1462 ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl); 1463 else 1464 ret = RECEIVE(skb); 1465 1466 if (NET_RX_DROP == ret) 1467 priv->extra_stats.kernel_dropped++; 1468 } 1469 1470 return 0; 1471} 1472 1473/* gfar_clean_rx_ring() -- Processes each frame in the rx ring 1474 * until the budget/quota has been reached. Returns the number 1475 * of frames handled 1476 */ 1477int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) 1478{ 1479 struct rxbd8 *bdp; 1480 struct sk_buff *skb; 1481 u16 pkt_len; 1482 int howmany = 0; 1483 struct gfar_private *priv = netdev_priv(dev); 1484 1485 /* Get the first full descriptor */ 1486 bdp = priv->cur_rx; 1487 1488 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 1489 skb = priv->rx_skbuff[priv->skb_currx]; 1490 1491 if (!(bdp->status & 1492 (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET 1493 | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) { 1494 /* Increment the number of packets */ 1495 priv->stats.rx_packets++; 1496 howmany++; 1497 1498 /* Remove the FCS from the packet length */ 1499 pkt_len = bdp->length - 4; 1500 1501 gfar_process_frame(dev, skb, pkt_len); 1502 1503 priv->stats.rx_bytes += pkt_len; 1504 } else { 1505 count_errors(bdp->status, priv); 1506 1507 if (skb) 1508 dev_kfree_skb_any(skb); 1509 1510 priv->rx_skbuff[priv->skb_currx] = NULL; 1511 } 1512 1513 dev->last_rx = jiffies; 1514 1515 /* Clear the status flags for this buffer */ 1516 bdp->status &= ~RXBD_STATS; 1517 1518 /* Add another skb for the future */ 1519 skb = gfar_new_skb(dev, bdp); 1520 priv->rx_skbuff[priv->skb_currx] = skb; 1521 1522 /* Update to the next pointer */ 1523 if (bdp->status & RXBD_WRAP) 1524 bdp = priv->rx_bd_base; 1525 else 1526 bdp++; 1527 1528 /* update to point at the next skb */ 1529 priv->skb_currx = 1530 (priv->skb_currx + 1531 1) & RX_RING_MOD_MASK(priv->rx_ring_size); 1532 1533 } 1534 1535 /* Update the current rxbd pointer to be the next one */ 1536 priv->cur_rx = bdp; 1537 1538 return howmany; 1539} 1540 1541#ifdef CONFIG_GFAR_NAPI 1542static int gfar_poll(struct net_device *dev, int *budget) 1543{ 1544 int howmany; 1545 struct gfar_private *priv = netdev_priv(dev); 1546 int rx_work_limit = *budget; 1547 1548 if (rx_work_limit > dev->quota) 1549 rx_work_limit = dev->quota; 1550 1551 howmany = gfar_clean_rx_ring(dev, rx_work_limit); 1552 1553 dev->quota -= howmany; 1554 rx_work_limit -= howmany; 1555 *budget -= howmany; 1556 1557 if (rx_work_limit > 0) { 1558 netif_rx_complete(dev); 1559 1560 /* Clear the halt bit in RSTAT */ 1561 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 1562 1563 gfar_write(&priv->regs->imask, IMASK_DEFAULT); 1564 1565 /* If we are coalescing interrupts, update the timer */ 1566 /* Otherwise, clear it */ 1567 if (priv->rxcoalescing) 1568 gfar_write(&priv->regs->rxic, 1569 mk_ic_value(priv->rxcount, priv->rxtime)); 1570 else 1571 gfar_write(&priv->regs->rxic, 0); 1572 } 1573 1574 /* Return 1 if there's more work to do */ 1575 return (rx_work_limit > 0) ? 0 : 1; 1576} 1577#endif 1578 1579#ifdef CONFIG_NET_POLL_CONTROLLER 1580/* 1581 * Polling 'interrupt' - used by things like netconsole to send skbs 1582 * without having to re-enable interrupts. It's not called while 1583 * the interrupt routine is executing. 1584 */ 1585static void gfar_netpoll(struct net_device *dev) 1586{ 1587 struct gfar_private *priv = netdev_priv(dev); 1588 1589 /* If the device has multiple interrupts, run tx/rx */ 1590 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1591 disable_irq(priv->interruptTransmit); 1592 disable_irq(priv->interruptReceive); 1593 disable_irq(priv->interruptError); 1594 gfar_interrupt(priv->interruptTransmit, dev); 1595 enable_irq(priv->interruptError); 1596 enable_irq(priv->interruptReceive); 1597 enable_irq(priv->interruptTransmit); 1598 } else { 1599 disable_irq(priv->interruptTransmit); 1600 gfar_interrupt(priv->interruptTransmit, dev); 1601 enable_irq(priv->interruptTransmit); 1602 } 1603} 1604#endif 1605 1606/* The interrupt handler for devices with one interrupt */ 1607static irqreturn_t gfar_interrupt(int irq, void *dev_id) 1608{ 1609 struct net_device *dev = dev_id; 1610 struct gfar_private *priv = netdev_priv(dev); 1611 1612 /* Save ievent for future reference */ 1613 u32 events = gfar_read(&priv->regs->ievent); 1614 1615 /* Check for reception */ 1616 if (events & IEVENT_RX_MASK) 1617 gfar_receive(irq, dev_id); 1618 1619 /* Check for transmit completion */ 1620 if (events & IEVENT_TX_MASK) 1621 gfar_transmit(irq, dev_id); 1622 1623 /* Check for errors */ 1624 if (events & IEVENT_ERR_MASK) 1625 gfar_error(irq, dev_id); 1626 1627 return IRQ_HANDLED; 1628} 1629 1630/* Called every time the controller might need to be made 1631 * aware of new link state. The PHY code conveys this 1632 * information through variables in the phydev structure, and this 1633 * function converts those variables into the appropriate 1634 * register values, and can bring down the device if needed. 1635 */ 1636static void adjust_link(struct net_device *dev) 1637{ 1638 struct gfar_private *priv = netdev_priv(dev); 1639 struct gfar __iomem *regs = priv->regs; 1640 unsigned long flags; 1641 struct phy_device *phydev = priv->phydev; 1642 int new_state = 0; 1643 1644 spin_lock_irqsave(&priv->txlock, flags); 1645 if (phydev->link) { 1646 u32 tempval = gfar_read(&regs->maccfg2); 1647 u32 ecntrl = gfar_read(&regs->ecntrl); 1648 1649 /* Now we make sure that we can be in full duplex mode. 1650 * If not, we operate in half-duplex mode. */ 1651 if (phydev->duplex != priv->oldduplex) { 1652 new_state = 1; 1653 if (!(phydev->duplex)) 1654 tempval &= ~(MACCFG2_FULL_DUPLEX); 1655 else 1656 tempval |= MACCFG2_FULL_DUPLEX; 1657 1658 priv->oldduplex = phydev->duplex; 1659 } 1660 1661 if (phydev->speed != priv->oldspeed) { 1662 new_state = 1; 1663 switch (phydev->speed) { 1664 case 1000: 1665 tempval = 1666 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); 1667 break; 1668 case 100: 1669 case 10: 1670 tempval = 1671 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 1672 1673 /* Reduced mode distinguishes 1674 * between 10 and 100 */ 1675 if (phydev->speed == SPEED_100) 1676 ecntrl |= ECNTRL_R100; 1677 else 1678 ecntrl &= ~(ECNTRL_R100); 1679 break; 1680 default: 1681 if (netif_msg_link(priv)) 1682 printk(KERN_WARNING 1683 "%s: Ack! Speed (%d) is not 10/100/1000!\n", 1684 dev->name, phydev->speed); 1685 break; 1686 } 1687 1688 priv->oldspeed = phydev->speed; 1689 } 1690 1691 gfar_write(&regs->maccfg2, tempval); 1692 gfar_write(&regs->ecntrl, ecntrl); 1693 1694 if (!priv->oldlink) { 1695 new_state = 1; 1696 priv->oldlink = 1; 1697 netif_schedule(dev); 1698 } 1699 } else if (priv->oldlink) { 1700 new_state = 1; 1701 priv->oldlink = 0; 1702 priv->oldspeed = 0; 1703 priv->oldduplex = -1; 1704 } 1705 1706 if (new_state && netif_msg_link(priv)) 1707 phy_print_status(phydev); 1708 1709 spin_unlock_irqrestore(&priv->txlock, flags); 1710} 1711 1712/* Update the hash table based on the current list of multicast 1713 * addresses we subscribe to. Also, change the promiscuity of 1714 * the device based on the flags (this function is called 1715 * whenever dev->flags is changed */ 1716static void gfar_set_multi(struct net_device *dev) 1717{ 1718 struct dev_mc_list *mc_ptr; 1719 struct gfar_private *priv = netdev_priv(dev); 1720 struct gfar __iomem *regs = priv->regs; 1721 u32 tempval; 1722 1723 if(dev->flags & IFF_PROMISC) { 1724 /* Set RCTRL to PROM */ 1725 tempval = gfar_read(&regs->rctrl); 1726 tempval |= RCTRL_PROM; 1727 gfar_write(&regs->rctrl, tempval); 1728 } else { 1729 /* Set RCTRL to not PROM */ 1730 tempval = gfar_read(&regs->rctrl); 1731 tempval &= ~(RCTRL_PROM); 1732 gfar_write(&regs->rctrl, tempval); 1733 } 1734 1735 if(dev->flags & IFF_ALLMULTI) { 1736 /* Set the hash to rx all multicast frames */ 1737 gfar_write(&regs->igaddr0, 0xffffffff); 1738 gfar_write(&regs->igaddr1, 0xffffffff); 1739 gfar_write(&regs->igaddr2, 0xffffffff); 1740 gfar_write(&regs->igaddr3, 0xffffffff); 1741 gfar_write(&regs->igaddr4, 0xffffffff); 1742 gfar_write(&regs->igaddr5, 0xffffffff); 1743 gfar_write(&regs->igaddr6, 0xffffffff); 1744 gfar_write(&regs->igaddr7, 0xffffffff); 1745 gfar_write(&regs->gaddr0, 0xffffffff); 1746 gfar_write(&regs->gaddr1, 0xffffffff); 1747 gfar_write(&regs->gaddr2, 0xffffffff); 1748 gfar_write(&regs->gaddr3, 0xffffffff); 1749 gfar_write(&regs->gaddr4, 0xffffffff); 1750 gfar_write(&regs->gaddr5, 0xffffffff); 1751 gfar_write(&regs->gaddr6, 0xffffffff); 1752 gfar_write(&regs->gaddr7, 0xffffffff); 1753 } else { 1754 int em_num; 1755 int idx; 1756 1757 /* zero out the hash */ 1758 gfar_write(&regs->igaddr0, 0x0); 1759 gfar_write(&regs->igaddr1, 0x0); 1760 gfar_write(&regs->igaddr2, 0x0); 1761 gfar_write(&regs->igaddr3, 0x0); 1762 gfar_write(&regs->igaddr4, 0x0); 1763 gfar_write(&regs->igaddr5, 0x0); 1764 gfar_write(&regs->igaddr6, 0x0); 1765 gfar_write(&regs->igaddr7, 0x0); 1766 gfar_write(&regs->gaddr0, 0x0); 1767 gfar_write(&regs->gaddr1, 0x0); 1768 gfar_write(&regs->gaddr2, 0x0); 1769 gfar_write(&regs->gaddr3, 0x0); 1770 gfar_write(&regs->gaddr4, 0x0); 1771 gfar_write(&regs->gaddr5, 0x0); 1772 gfar_write(&regs->gaddr6, 0x0); 1773 gfar_write(&regs->gaddr7, 0x0); 1774 1775 /* If we have extended hash tables, we need to 1776 * clear the exact match registers to prepare for 1777 * setting them */ 1778 if (priv->extended_hash) { 1779 em_num = GFAR_EM_NUM + 1; 1780 gfar_clear_exact_match(dev); 1781 idx = 1; 1782 } else { 1783 idx = 0; 1784 em_num = 0; 1785 } 1786 1787 if(dev->mc_count == 0) 1788 return; 1789 1790 /* Parse the list, and set the appropriate bits */ 1791 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { 1792 if (idx < em_num) { 1793 gfar_set_mac_for_addr(dev, idx, 1794 mc_ptr->dmi_addr); 1795 idx++; 1796 } else 1797 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr); 1798 } 1799 } 1800 1801 return; 1802} 1803 1804 1805/* Clears each of the exact match registers to zero, so they 1806 * don't interfere with normal reception */ 1807static void gfar_clear_exact_match(struct net_device *dev) 1808{ 1809 int idx; 1810 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0}; 1811 1812 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) 1813 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr); 1814} 1815 1816/* Set the appropriate hash bit for the given addr */ 1817/* The algorithm works like so: 1818 * 1) Take the Destination Address (ie the multicast address), and 1819 * do a CRC on it (little endian), and reverse the bits of the 1820 * result. 1821 * 2) Use the 8 most significant bits as a hash into a 256-entry 1822 * table. The table is controlled through 8 32-bit registers: 1823 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is 1824 * gaddr7. This means that the 3 most significant bits in the 1825 * hash index which gaddr register to use, and the 5 other bits 1826 * indicate which bit (assuming an IBM numbering scheme, which 1827 * for PowerPC (tm) is usually the case) in the register holds 1828 * the entry. */ 1829static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) 1830{ 1831 u32 tempval; 1832 struct gfar_private *priv = netdev_priv(dev); 1833 u32 result = ether_crc(MAC_ADDR_LEN, addr); 1834 int width = priv->hash_width; 1835 u8 whichbit = (result >> (32 - width)) & 0x1f; 1836 u8 whichreg = result >> (32 - width + 5); 1837 u32 value = (1 << (31-whichbit)); 1838 1839 tempval = gfar_read(priv->hash_regs[whichreg]); 1840 tempval |= value; 1841 gfar_write(priv->hash_regs[whichreg], tempval); 1842 1843 return; 1844} 1845 1846 1847/* There are multiple MAC Address register pairs on some controllers 1848 * This function sets the numth pair to a given address 1849 */ 1850static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) 1851{ 1852 struct gfar_private *priv = netdev_priv(dev); 1853 int idx; 1854 char tmpbuf[MAC_ADDR_LEN]; 1855 u32 tempval; 1856 u32 __iomem *macptr = &priv->regs->macstnaddr1; 1857 1858 macptr += num*2; 1859 1860 /* Now copy it into the mac registers backwards, cuz */ 1861 /* little endian is silly */ 1862 for (idx = 0; idx < MAC_ADDR_LEN; idx++) 1863 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx]; 1864 1865 gfar_write(macptr, *((u32 *) (tmpbuf))); 1866 1867 tempval = *((u32 *) (tmpbuf + 4)); 1868 1869 gfar_write(macptr+1, tempval); 1870} 1871 1872/* GFAR error interrupt handler */ 1873static irqreturn_t gfar_error(int irq, void *dev_id) 1874{ 1875 struct net_device *dev = dev_id; 1876 struct gfar_private *priv = netdev_priv(dev); 1877 1878 /* Save ievent for future reference */ 1879 u32 events = gfar_read(&priv->regs->ievent); 1880 1881 /* Clear IEVENT */ 1882 gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK); 1883 1884 /* Hmm... */ 1885 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 1886 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", 1887 dev->name, events, gfar_read(&priv->regs->imask)); 1888 1889 /* Update the error counters */ 1890 if (events & IEVENT_TXE) { 1891 priv->stats.tx_errors++; 1892 1893 if (events & IEVENT_LC) 1894 priv->stats.tx_window_errors++; 1895 if (events & IEVENT_CRL) 1896 priv->stats.tx_aborted_errors++; 1897 if (events & IEVENT_XFUN) { 1898 if (netif_msg_tx_err(priv)) 1899 printk(KERN_DEBUG "%s: TX FIFO underrun, " 1900 "packet dropped.\n", dev->name); 1901 priv->stats.tx_dropped++; 1902 priv->extra_stats.tx_underrun++; 1903 1904 /* Reactivate the Tx Queues */ 1905 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 1906 } 1907 if (netif_msg_tx_err(priv)) 1908 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); 1909 } 1910 if (events & IEVENT_BSY) { 1911 priv->stats.rx_errors++; 1912 priv->extra_stats.rx_bsy++; 1913 1914 gfar_receive(irq, dev_id); 1915 1916#ifndef CONFIG_GFAR_NAPI 1917 /* Clear the halt bit in RSTAT */ 1918 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 1919#endif 1920 1921 if (netif_msg_rx_err(priv)) 1922 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n", 1923 dev->name, gfar_read(&priv->regs->rstat)); 1924 } 1925 if (events & IEVENT_BABR) { 1926 priv->stats.rx_errors++; 1927 priv->extra_stats.rx_babr++; 1928 1929 if (netif_msg_rx_err(priv)) 1930 printk(KERN_DEBUG "%s: babbling RX error\n", dev->name); 1931 } 1932 if (events & IEVENT_EBERR) { 1933 priv->extra_stats.eberr++; 1934 if (netif_msg_rx_err(priv)) 1935 printk(KERN_DEBUG "%s: bus error\n", dev->name); 1936 } 1937 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv)) 1938 printk(KERN_DEBUG "%s: control frame\n", dev->name); 1939 1940 if (events & IEVENT_BABT) { 1941 priv->extra_stats.tx_babt++; 1942 if (netif_msg_tx_err(priv)) 1943 printk(KERN_DEBUG "%s: babbling TX error\n", dev->name); 1944 } 1945 return IRQ_HANDLED; 1946} 1947 1948/* Structure for a device driver */ 1949static struct platform_driver gfar_driver = { 1950 .probe = gfar_probe, 1951 .remove = gfar_remove, 1952 .driver = { 1953 .name = "fsl-gianfar", 1954 }, 1955}; 1956 1957static int __init gfar_init(void) 1958{ 1959 int err = gfar_mdio_init(); 1960 1961 if (err) 1962 return err; 1963 1964 err = platform_driver_register(&gfar_driver); 1965 1966 if (err) 1967 gfar_mdio_exit(); 1968 1969 return err; 1970} 1971 1972static void __exit gfar_exit(void) 1973{ 1974 platform_driver_unregister(&gfar_driver); 1975 gfar_mdio_exit(); 1976} 1977 1978module_init(gfar_init); 1979module_exit(gfar_exit); 1980