at v2.6.24-rc2 2010 lines 54 kB view raw
1/* 2 * drivers/net/gianfar.c 3 * 4 * Gianfar Ethernet Driver 5 * This driver is designed for the non-CPM ethernet controllers 6 * on the 85xx and 83xx family of integrated processors 7 * Based on 8260_io/fcc_enet.c 8 * 9 * Author: Andy Fleming 10 * Maintainer: Kumar Gala 11 * 12 * Copyright (c) 2002-2006 Freescale Semiconductor, Inc. 13 * Copyright (c) 2007 MontaVista Software, Inc. 14 * 15 * This program is free software; you can redistribute it and/or modify it 16 * under the terms of the GNU General Public License as published by the 17 * Free Software Foundation; either version 2 of the License, or (at your 18 * option) any later version. 19 * 20 * Gianfar: AKA Lambda Draconis, "Dragon" 21 * RA 11 31 24.2 22 * Dec +69 19 52 23 * V 3.84 24 * B-V +1.62 25 * 26 * Theory of operation 27 * 28 * The driver is initialized through platform_device. Structures which 29 * define the configuration needed by the board are defined in a 30 * board structure in arch/ppc/platforms (though I do not 31 * discount the possibility that other architectures could one 32 * day be supported. 33 * 34 * The Gianfar Ethernet Controller uses a ring of buffer 35 * descriptors. The beginning is indicated by a register 36 * pointing to the physical address of the start of the ring. 37 * The end is determined by a "wrap" bit being set in the 38 * last descriptor of the ring. 39 * 40 * When a packet is received, the RXF bit in the 41 * IEVENT register is set, triggering an interrupt when the 42 * corresponding bit in the IMASK register is also set (if 43 * interrupt coalescing is active, then the interrupt may not 44 * happen immediately, but will wait until either a set number 45 * of frames or amount of time have passed). In NAPI, the 46 * interrupt handler will signal there is work to be done, and 47 * exit. Without NAPI, the packet(s) will be handled 48 * immediately. Both methods will start at the last known empty 49 * descriptor, and process every subsequent descriptor until there 50 * are none left with data (NAPI will stop after a set number of 51 * packets to give time to other tasks, but will eventually 52 * process all the packets). The data arrives inside a 53 * pre-allocated skb, and so after the skb is passed up to the 54 * stack, a new skb must be allocated, and the address field in 55 * the buffer descriptor must be updated to indicate this new 56 * skb. 57 * 58 * When the kernel requests that a packet be transmitted, the 59 * driver starts where it left off last time, and points the 60 * descriptor at the buffer which was passed in. The driver 61 * then informs the DMA engine that there are packets ready to 62 * be transmitted. Once the controller is finished transmitting 63 * the packet, an interrupt may be triggered (under the same 64 * conditions as for reception, but depending on the TXF bit). 65 * The driver then cleans up the buffer. 66 */ 67 68#include <linux/kernel.h> 69#include <linux/string.h> 70#include <linux/errno.h> 71#include <linux/unistd.h> 72#include <linux/slab.h> 73#include <linux/interrupt.h> 74#include <linux/init.h> 75#include <linux/delay.h> 76#include <linux/netdevice.h> 77#include <linux/etherdevice.h> 78#include <linux/skbuff.h> 79#include <linux/if_vlan.h> 80#include <linux/spinlock.h> 81#include <linux/mm.h> 82#include <linux/platform_device.h> 83#include <linux/ip.h> 84#include <linux/tcp.h> 85#include <linux/udp.h> 86#include <linux/in.h> 87 88#include <asm/io.h> 89#include <asm/irq.h> 90#include <asm/uaccess.h> 91#include <linux/module.h> 92#include <linux/dma-mapping.h> 93#include <linux/crc32.h> 94#include <linux/mii.h> 95#include <linux/phy.h> 96 97#include "gianfar.h" 98#include "gianfar_mii.h" 99 100#define TX_TIMEOUT (1*HZ) 101#define SKB_ALLOC_TIMEOUT 1000000 102#undef BRIEF_GFAR_ERRORS 103#undef VERBOSE_GFAR_ERRORS 104 105#ifdef CONFIG_GFAR_NAPI 106#define RECEIVE(x) netif_receive_skb(x) 107#else 108#define RECEIVE(x) netif_rx(x) 109#endif 110 111const char gfar_driver_name[] = "Gianfar Ethernet"; 112const char gfar_driver_version[] = "1.3"; 113 114static int gfar_enet_open(struct net_device *dev); 115static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); 116static void gfar_timeout(struct net_device *dev); 117static int gfar_close(struct net_device *dev); 118struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp); 119static int gfar_set_mac_address(struct net_device *dev); 120static int gfar_change_mtu(struct net_device *dev, int new_mtu); 121static irqreturn_t gfar_error(int irq, void *dev_id); 122static irqreturn_t gfar_transmit(int irq, void *dev_id); 123static irqreturn_t gfar_interrupt(int irq, void *dev_id); 124static void adjust_link(struct net_device *dev); 125static void init_registers(struct net_device *dev); 126static int init_phy(struct net_device *dev); 127static int gfar_probe(struct platform_device *pdev); 128static int gfar_remove(struct platform_device *pdev); 129static void free_skb_resources(struct gfar_private *priv); 130static void gfar_set_multi(struct net_device *dev); 131static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 132static void gfar_configure_serdes(struct net_device *dev); 133extern int gfar_local_mdio_write(struct gfar_mii *regs, int mii_id, int regnum, u16 value); 134extern int gfar_local_mdio_read(struct gfar_mii *regs, int mii_id, int regnum); 135#ifdef CONFIG_GFAR_NAPI 136static int gfar_poll(struct napi_struct *napi, int budget); 137#endif 138#ifdef CONFIG_NET_POLL_CONTROLLER 139static void gfar_netpoll(struct net_device *dev); 140#endif 141int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 142static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length); 143static void gfar_vlan_rx_register(struct net_device *netdev, 144 struct vlan_group *grp); 145void gfar_halt(struct net_device *dev); 146void gfar_start(struct net_device *dev); 147static void gfar_clear_exact_match(struct net_device *dev); 148static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); 149 150extern const struct ethtool_ops gfar_ethtool_ops; 151 152MODULE_AUTHOR("Freescale Semiconductor, Inc"); 153MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 154MODULE_LICENSE("GPL"); 155 156/* Returns 1 if incoming frames use an FCB */ 157static inline int gfar_uses_fcb(struct gfar_private *priv) 158{ 159 return (priv->vlan_enable || priv->rx_csum_enable); 160} 161 162/* Set up the ethernet device structure, private data, 163 * and anything else we need before we start */ 164static int gfar_probe(struct platform_device *pdev) 165{ 166 u32 tempval; 167 struct net_device *dev = NULL; 168 struct gfar_private *priv = NULL; 169 struct gianfar_platform_data *einfo; 170 struct resource *r; 171 int err = 0; 172 DECLARE_MAC_BUF(mac); 173 174 einfo = (struct gianfar_platform_data *) pdev->dev.platform_data; 175 176 if (NULL == einfo) { 177 printk(KERN_ERR "gfar %d: Missing additional data!\n", 178 pdev->id); 179 180 return -ENODEV; 181 } 182 183 /* Create an ethernet device instance */ 184 dev = alloc_etherdev(sizeof (*priv)); 185 186 if (NULL == dev) 187 return -ENOMEM; 188 189 priv = netdev_priv(dev); 190 priv->dev = dev; 191 192 /* Set the info in the priv to the current info */ 193 priv->einfo = einfo; 194 195 /* fill out IRQ fields */ 196 if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 197 priv->interruptTransmit = platform_get_irq_byname(pdev, "tx"); 198 priv->interruptReceive = platform_get_irq_byname(pdev, "rx"); 199 priv->interruptError = platform_get_irq_byname(pdev, "error"); 200 if (priv->interruptTransmit < 0 || priv->interruptReceive < 0 || priv->interruptError < 0) 201 goto regs_fail; 202 } else { 203 priv->interruptTransmit = platform_get_irq(pdev, 0); 204 if (priv->interruptTransmit < 0) 205 goto regs_fail; 206 } 207 208 /* get a pointer to the register memory */ 209 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 210 priv->regs = ioremap(r->start, sizeof (struct gfar)); 211 212 if (NULL == priv->regs) { 213 err = -ENOMEM; 214 goto regs_fail; 215 } 216 217 spin_lock_init(&priv->txlock); 218 spin_lock_init(&priv->rxlock); 219 220 platform_set_drvdata(pdev, dev); 221 222 /* Stop the DMA engine now, in case it was running before */ 223 /* (The firmware could have used it, and left it running). */ 224 /* To do this, we write Graceful Receive Stop and Graceful */ 225 /* Transmit Stop, and then wait until the corresponding bits */ 226 /* in IEVENT indicate the stops have completed. */ 227 tempval = gfar_read(&priv->regs->dmactrl); 228 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 229 gfar_write(&priv->regs->dmactrl, tempval); 230 231 tempval = gfar_read(&priv->regs->dmactrl); 232 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 233 gfar_write(&priv->regs->dmactrl, tempval); 234 235 while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC))) 236 cpu_relax(); 237 238 /* Reset MAC layer */ 239 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET); 240 241 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 242 gfar_write(&priv->regs->maccfg1, tempval); 243 244 /* Initialize MACCFG2. */ 245 gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS); 246 247 /* Initialize ECNTRL */ 248 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS); 249 250 /* Copy the station address into the dev structure, */ 251 memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN); 252 253 /* Set the dev->base_addr to the gfar reg region */ 254 dev->base_addr = (unsigned long) (priv->regs); 255 256 SET_NETDEV_DEV(dev, &pdev->dev); 257 258 /* Fill in the dev structure */ 259 dev->open = gfar_enet_open; 260 dev->hard_start_xmit = gfar_start_xmit; 261 dev->tx_timeout = gfar_timeout; 262 dev->watchdog_timeo = TX_TIMEOUT; 263#ifdef CONFIG_GFAR_NAPI 264 netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT); 265#endif 266#ifdef CONFIG_NET_POLL_CONTROLLER 267 dev->poll_controller = gfar_netpoll; 268#endif 269 dev->stop = gfar_close; 270 dev->change_mtu = gfar_change_mtu; 271 dev->mtu = 1500; 272 dev->set_multicast_list = gfar_set_multi; 273 274 dev->ethtool_ops = &gfar_ethtool_ops; 275 276 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 277 priv->rx_csum_enable = 1; 278 dev->features |= NETIF_F_IP_CSUM; 279 } else 280 priv->rx_csum_enable = 0; 281 282 priv->vlgrp = NULL; 283 284 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 285 dev->vlan_rx_register = gfar_vlan_rx_register; 286 287 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 288 289 priv->vlan_enable = 1; 290 } 291 292 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 293 priv->extended_hash = 1; 294 priv->hash_width = 9; 295 296 priv->hash_regs[0] = &priv->regs->igaddr0; 297 priv->hash_regs[1] = &priv->regs->igaddr1; 298 priv->hash_regs[2] = &priv->regs->igaddr2; 299 priv->hash_regs[3] = &priv->regs->igaddr3; 300 priv->hash_regs[4] = &priv->regs->igaddr4; 301 priv->hash_regs[5] = &priv->regs->igaddr5; 302 priv->hash_regs[6] = &priv->regs->igaddr6; 303 priv->hash_regs[7] = &priv->regs->igaddr7; 304 priv->hash_regs[8] = &priv->regs->gaddr0; 305 priv->hash_regs[9] = &priv->regs->gaddr1; 306 priv->hash_regs[10] = &priv->regs->gaddr2; 307 priv->hash_regs[11] = &priv->regs->gaddr3; 308 priv->hash_regs[12] = &priv->regs->gaddr4; 309 priv->hash_regs[13] = &priv->regs->gaddr5; 310 priv->hash_regs[14] = &priv->regs->gaddr6; 311 priv->hash_regs[15] = &priv->regs->gaddr7; 312 313 } else { 314 priv->extended_hash = 0; 315 priv->hash_width = 8; 316 317 priv->hash_regs[0] = &priv->regs->gaddr0; 318 priv->hash_regs[1] = &priv->regs->gaddr1; 319 priv->hash_regs[2] = &priv->regs->gaddr2; 320 priv->hash_regs[3] = &priv->regs->gaddr3; 321 priv->hash_regs[4] = &priv->regs->gaddr4; 322 priv->hash_regs[5] = &priv->regs->gaddr5; 323 priv->hash_regs[6] = &priv->regs->gaddr6; 324 priv->hash_regs[7] = &priv->regs->gaddr7; 325 } 326 327 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) 328 priv->padding = DEFAULT_PADDING; 329 else 330 priv->padding = 0; 331 332 if (dev->features & NETIF_F_IP_CSUM) 333 dev->hard_header_len += GMAC_FCB_LEN; 334 335 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 336 priv->tx_ring_size = DEFAULT_TX_RING_SIZE; 337 priv->rx_ring_size = DEFAULT_RX_RING_SIZE; 338 339 priv->txcoalescing = DEFAULT_TX_COALESCE; 340 priv->txcount = DEFAULT_TXCOUNT; 341 priv->txtime = DEFAULT_TXTIME; 342 priv->rxcoalescing = DEFAULT_RX_COALESCE; 343 priv->rxcount = DEFAULT_RXCOUNT; 344 priv->rxtime = DEFAULT_RXTIME; 345 346 /* Enable most messages by default */ 347 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 348 349 err = register_netdev(dev); 350 351 if (err) { 352 printk(KERN_ERR "%s: Cannot register net device, aborting.\n", 353 dev->name); 354 goto register_fail; 355 } 356 357 /* Create all the sysfs files */ 358 gfar_init_sysfs(dev); 359 360 /* Print out the device info */ 361 printk(KERN_INFO DEVICE_NAME "%s\n", 362 dev->name, print_mac(mac, dev->dev_addr)); 363 364 /* Even more device info helps when determining which kernel */ 365 /* provided which set of benchmarks. */ 366#ifdef CONFIG_GFAR_NAPI 367 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 368#else 369 printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name); 370#endif 371 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", 372 dev->name, priv->rx_ring_size, priv->tx_ring_size); 373 374 return 0; 375 376register_fail: 377 iounmap(priv->regs); 378regs_fail: 379 free_netdev(dev); 380 return err; 381} 382 383static int gfar_remove(struct platform_device *pdev) 384{ 385 struct net_device *dev = platform_get_drvdata(pdev); 386 struct gfar_private *priv = netdev_priv(dev); 387 388 platform_set_drvdata(pdev, NULL); 389 390 iounmap(priv->regs); 391 free_netdev(dev); 392 393 return 0; 394} 395 396 397/* Reads the controller's registers to determine what interface 398 * connects it to the PHY. 399 */ 400static phy_interface_t gfar_get_interface(struct net_device *dev) 401{ 402 struct gfar_private *priv = netdev_priv(dev); 403 u32 ecntrl = gfar_read(&priv->regs->ecntrl); 404 405 if (ecntrl & ECNTRL_SGMII_MODE) 406 return PHY_INTERFACE_MODE_SGMII; 407 408 if (ecntrl & ECNTRL_TBI_MODE) { 409 if (ecntrl & ECNTRL_REDUCED_MODE) 410 return PHY_INTERFACE_MODE_RTBI; 411 else 412 return PHY_INTERFACE_MODE_TBI; 413 } 414 415 if (ecntrl & ECNTRL_REDUCED_MODE) { 416 if (ecntrl & ECNTRL_REDUCED_MII_MODE) 417 return PHY_INTERFACE_MODE_RMII; 418 else { 419 phy_interface_t interface = priv->einfo->interface; 420 421 /* 422 * This isn't autodetected right now, so it must 423 * be set by the device tree or platform code. 424 */ 425 if (interface == PHY_INTERFACE_MODE_RGMII_ID) 426 return PHY_INTERFACE_MODE_RGMII_ID; 427 428 return PHY_INTERFACE_MODE_RGMII; 429 } 430 } 431 432 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) 433 return PHY_INTERFACE_MODE_GMII; 434 435 return PHY_INTERFACE_MODE_MII; 436} 437 438 439/* Initializes driver's PHY state, and attaches to the PHY. 440 * Returns 0 on success. 441 */ 442static int init_phy(struct net_device *dev) 443{ 444 struct gfar_private *priv = netdev_priv(dev); 445 uint gigabit_support = 446 priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? 447 SUPPORTED_1000baseT_Full : 0; 448 struct phy_device *phydev; 449 char phy_id[BUS_ID_SIZE]; 450 phy_interface_t interface; 451 452 priv->oldlink = 0; 453 priv->oldspeed = 0; 454 priv->oldduplex = -1; 455 456 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id); 457 458 interface = gfar_get_interface(dev); 459 460 phydev = phy_connect(dev, phy_id, &adjust_link, 0, interface); 461 462 if (interface == PHY_INTERFACE_MODE_SGMII) 463 gfar_configure_serdes(dev); 464 465 if (IS_ERR(phydev)) { 466 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 467 return PTR_ERR(phydev); 468 } 469 470 /* Remove any features not supported by the controller */ 471 phydev->supported &= (GFAR_SUPPORTED | gigabit_support); 472 phydev->advertising = phydev->supported; 473 474 priv->phydev = phydev; 475 476 return 0; 477} 478 479static void gfar_configure_serdes(struct net_device *dev) 480{ 481 struct gfar_private *priv = netdev_priv(dev); 482 struct gfar_mii __iomem *regs = 483 (void __iomem *)&priv->regs->gfar_mii_regs; 484 485 /* Initialise TBI i/f to communicate with serdes (lynx phy) */ 486 487 /* Single clk mode, mii mode off(for aerdes communication) */ 488 gfar_local_mdio_write(regs, TBIPA_VALUE, MII_TBICON, TBICON_CLK_SELECT); 489 490 /* Supported pause and full-duplex, no half-duplex */ 491 gfar_local_mdio_write(regs, TBIPA_VALUE, MII_ADVERTISE, 492 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | 493 ADVERTISE_1000XPSE_ASYM); 494 495 /* ANEG enable, restart ANEG, full duplex mode, speed[1] set */ 496 gfar_local_mdio_write(regs, TBIPA_VALUE, MII_BMCR, BMCR_ANENABLE | 497 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); 498} 499 500static void init_registers(struct net_device *dev) 501{ 502 struct gfar_private *priv = netdev_priv(dev); 503 504 /* Clear IEVENT */ 505 gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR); 506 507 /* Initialize IMASK */ 508 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR); 509 510 /* Init hash registers to zero */ 511 gfar_write(&priv->regs->igaddr0, 0); 512 gfar_write(&priv->regs->igaddr1, 0); 513 gfar_write(&priv->regs->igaddr2, 0); 514 gfar_write(&priv->regs->igaddr3, 0); 515 gfar_write(&priv->regs->igaddr4, 0); 516 gfar_write(&priv->regs->igaddr5, 0); 517 gfar_write(&priv->regs->igaddr6, 0); 518 gfar_write(&priv->regs->igaddr7, 0); 519 520 gfar_write(&priv->regs->gaddr0, 0); 521 gfar_write(&priv->regs->gaddr1, 0); 522 gfar_write(&priv->regs->gaddr2, 0); 523 gfar_write(&priv->regs->gaddr3, 0); 524 gfar_write(&priv->regs->gaddr4, 0); 525 gfar_write(&priv->regs->gaddr5, 0); 526 gfar_write(&priv->regs->gaddr6, 0); 527 gfar_write(&priv->regs->gaddr7, 0); 528 529 /* Zero out the rmon mib registers if it has them */ 530 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 531 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib)); 532 533 /* Mask off the CAM interrupts */ 534 gfar_write(&priv->regs->rmon.cam1, 0xffffffff); 535 gfar_write(&priv->regs->rmon.cam2, 0xffffffff); 536 } 537 538 /* Initialize the max receive buffer length */ 539 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 540 541 /* Initialize the Minimum Frame Length Register */ 542 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); 543 544 /* Assign the TBI an address which won't conflict with the PHYs */ 545 gfar_write(&priv->regs->tbipa, TBIPA_VALUE); 546} 547 548 549/* Halt the receive and transmit queues */ 550void gfar_halt(struct net_device *dev) 551{ 552 struct gfar_private *priv = netdev_priv(dev); 553 struct gfar __iomem *regs = priv->regs; 554 u32 tempval; 555 556 /* Mask all interrupts */ 557 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 558 559 /* Clear all interrupts */ 560 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR); 561 562 /* Stop the DMA, and wait for it to stop */ 563 tempval = gfar_read(&priv->regs->dmactrl); 564 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) 565 != (DMACTRL_GRS | DMACTRL_GTS)) { 566 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 567 gfar_write(&priv->regs->dmactrl, tempval); 568 569 while (!(gfar_read(&priv->regs->ievent) & 570 (IEVENT_GRSC | IEVENT_GTSC))) 571 cpu_relax(); 572 } 573 574 /* Disable Rx and Tx */ 575 tempval = gfar_read(&regs->maccfg1); 576 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 577 gfar_write(&regs->maccfg1, tempval); 578} 579 580void stop_gfar(struct net_device *dev) 581{ 582 struct gfar_private *priv = netdev_priv(dev); 583 struct gfar __iomem *regs = priv->regs; 584 unsigned long flags; 585 586 phy_stop(priv->phydev); 587 588 /* Lock it down */ 589 spin_lock_irqsave(&priv->txlock, flags); 590 spin_lock(&priv->rxlock); 591 592 gfar_halt(dev); 593 594 spin_unlock(&priv->rxlock); 595 spin_unlock_irqrestore(&priv->txlock, flags); 596 597 /* Free the IRQs */ 598 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 599 free_irq(priv->interruptError, dev); 600 free_irq(priv->interruptTransmit, dev); 601 free_irq(priv->interruptReceive, dev); 602 } else { 603 free_irq(priv->interruptTransmit, dev); 604 } 605 606 free_skb_resources(priv); 607 608 dma_free_coherent(NULL, 609 sizeof(struct txbd8)*priv->tx_ring_size 610 + sizeof(struct rxbd8)*priv->rx_ring_size, 611 priv->tx_bd_base, 612 gfar_read(&regs->tbase0)); 613} 614 615/* If there are any tx skbs or rx skbs still around, free them. 616 * Then free tx_skbuff and rx_skbuff */ 617static void free_skb_resources(struct gfar_private *priv) 618{ 619 struct rxbd8 *rxbdp; 620 struct txbd8 *txbdp; 621 int i; 622 623 /* Go through all the buffer descriptors and free their data buffers */ 624 txbdp = priv->tx_bd_base; 625 626 for (i = 0; i < priv->tx_ring_size; i++) { 627 628 if (priv->tx_skbuff[i]) { 629 dma_unmap_single(NULL, txbdp->bufPtr, 630 txbdp->length, 631 DMA_TO_DEVICE); 632 dev_kfree_skb_any(priv->tx_skbuff[i]); 633 priv->tx_skbuff[i] = NULL; 634 } 635 } 636 637 kfree(priv->tx_skbuff); 638 639 rxbdp = priv->rx_bd_base; 640 641 /* rx_skbuff is not guaranteed to be allocated, so only 642 * free it and its contents if it is allocated */ 643 if(priv->rx_skbuff != NULL) { 644 for (i = 0; i < priv->rx_ring_size; i++) { 645 if (priv->rx_skbuff[i]) { 646 dma_unmap_single(NULL, rxbdp->bufPtr, 647 priv->rx_buffer_size, 648 DMA_FROM_DEVICE); 649 650 dev_kfree_skb_any(priv->rx_skbuff[i]); 651 priv->rx_skbuff[i] = NULL; 652 } 653 654 rxbdp->status = 0; 655 rxbdp->length = 0; 656 rxbdp->bufPtr = 0; 657 658 rxbdp++; 659 } 660 661 kfree(priv->rx_skbuff); 662 } 663} 664 665void gfar_start(struct net_device *dev) 666{ 667 struct gfar_private *priv = netdev_priv(dev); 668 struct gfar __iomem *regs = priv->regs; 669 u32 tempval; 670 671 /* Enable Rx and Tx in MACCFG1 */ 672 tempval = gfar_read(&regs->maccfg1); 673 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); 674 gfar_write(&regs->maccfg1, tempval); 675 676 /* Initialize DMACTRL to have WWR and WOP */ 677 tempval = gfar_read(&priv->regs->dmactrl); 678 tempval |= DMACTRL_INIT_SETTINGS; 679 gfar_write(&priv->regs->dmactrl, tempval); 680 681 /* Make sure we aren't stopped */ 682 tempval = gfar_read(&priv->regs->dmactrl); 683 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 684 gfar_write(&priv->regs->dmactrl, tempval); 685 686 /* Clear THLT/RHLT, so that the DMA starts polling now */ 687 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT); 688 gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT); 689 690 /* Unmask the interrupts we look for */ 691 gfar_write(&regs->imask, IMASK_DEFAULT); 692} 693 694/* Bring the controller up and running */ 695int startup_gfar(struct net_device *dev) 696{ 697 struct txbd8 *txbdp; 698 struct rxbd8 *rxbdp; 699 dma_addr_t addr; 700 unsigned long vaddr; 701 int i; 702 struct gfar_private *priv = netdev_priv(dev); 703 struct gfar __iomem *regs = priv->regs; 704 int err = 0; 705 u32 rctrl = 0; 706 u32 attrs = 0; 707 708 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 709 710 /* Allocate memory for the buffer descriptors */ 711 vaddr = (unsigned long) dma_alloc_coherent(NULL, 712 sizeof (struct txbd8) * priv->tx_ring_size + 713 sizeof (struct rxbd8) * priv->rx_ring_size, 714 &addr, GFP_KERNEL); 715 716 if (vaddr == 0) { 717 if (netif_msg_ifup(priv)) 718 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n", 719 dev->name); 720 return -ENOMEM; 721 } 722 723 priv->tx_bd_base = (struct txbd8 *) vaddr; 724 725 /* enet DMA only understands physical addresses */ 726 gfar_write(&regs->tbase0, addr); 727 728 /* Start the rx descriptor ring where the tx ring leaves off */ 729 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size; 730 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size; 731 priv->rx_bd_base = (struct rxbd8 *) vaddr; 732 gfar_write(&regs->rbase0, addr); 733 734 /* Setup the skbuff rings */ 735 priv->tx_skbuff = 736 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * 737 priv->tx_ring_size, GFP_KERNEL); 738 739 if (NULL == priv->tx_skbuff) { 740 if (netif_msg_ifup(priv)) 741 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n", 742 dev->name); 743 err = -ENOMEM; 744 goto tx_skb_fail; 745 } 746 747 for (i = 0; i < priv->tx_ring_size; i++) 748 priv->tx_skbuff[i] = NULL; 749 750 priv->rx_skbuff = 751 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * 752 priv->rx_ring_size, GFP_KERNEL); 753 754 if (NULL == priv->rx_skbuff) { 755 if (netif_msg_ifup(priv)) 756 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n", 757 dev->name); 758 err = -ENOMEM; 759 goto rx_skb_fail; 760 } 761 762 for (i = 0; i < priv->rx_ring_size; i++) 763 priv->rx_skbuff[i] = NULL; 764 765 /* Initialize some variables in our dev structure */ 766 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base; 767 priv->cur_rx = priv->rx_bd_base; 768 priv->skb_curtx = priv->skb_dirtytx = 0; 769 priv->skb_currx = 0; 770 771 /* Initialize Transmit Descriptor Ring */ 772 txbdp = priv->tx_bd_base; 773 for (i = 0; i < priv->tx_ring_size; i++) { 774 txbdp->status = 0; 775 txbdp->length = 0; 776 txbdp->bufPtr = 0; 777 txbdp++; 778 } 779 780 /* Set the last descriptor in the ring to indicate wrap */ 781 txbdp--; 782 txbdp->status |= TXBD_WRAP; 783 784 rxbdp = priv->rx_bd_base; 785 for (i = 0; i < priv->rx_ring_size; i++) { 786 struct sk_buff *skb = NULL; 787 788 rxbdp->status = 0; 789 790 skb = gfar_new_skb(dev, rxbdp); 791 792 priv->rx_skbuff[i] = skb; 793 794 rxbdp++; 795 } 796 797 /* Set the last descriptor in the ring to wrap */ 798 rxbdp--; 799 rxbdp->status |= RXBD_WRAP; 800 801 /* If the device has multiple interrupts, register for 802 * them. Otherwise, only register for the one */ 803 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 804 /* Install our interrupt handlers for Error, 805 * Transmit, and Receive */ 806 if (request_irq(priv->interruptError, gfar_error, 807 0, "enet_error", dev) < 0) { 808 if (netif_msg_intr(priv)) 809 printk(KERN_ERR "%s: Can't get IRQ %d\n", 810 dev->name, priv->interruptError); 811 812 err = -1; 813 goto err_irq_fail; 814 } 815 816 if (request_irq(priv->interruptTransmit, gfar_transmit, 817 0, "enet_tx", dev) < 0) { 818 if (netif_msg_intr(priv)) 819 printk(KERN_ERR "%s: Can't get IRQ %d\n", 820 dev->name, priv->interruptTransmit); 821 822 err = -1; 823 824 goto tx_irq_fail; 825 } 826 827 if (request_irq(priv->interruptReceive, gfar_receive, 828 0, "enet_rx", dev) < 0) { 829 if (netif_msg_intr(priv)) 830 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n", 831 dev->name, priv->interruptReceive); 832 833 err = -1; 834 goto rx_irq_fail; 835 } 836 } else { 837 if (request_irq(priv->interruptTransmit, gfar_interrupt, 838 0, "gfar_interrupt", dev) < 0) { 839 if (netif_msg_intr(priv)) 840 printk(KERN_ERR "%s: Can't get IRQ %d\n", 841 dev->name, priv->interruptError); 842 843 err = -1; 844 goto err_irq_fail; 845 } 846 } 847 848 phy_start(priv->phydev); 849 850 /* Configure the coalescing support */ 851 if (priv->txcoalescing) 852 gfar_write(&regs->txic, 853 mk_ic_value(priv->txcount, priv->txtime)); 854 else 855 gfar_write(&regs->txic, 0); 856 857 if (priv->rxcoalescing) 858 gfar_write(&regs->rxic, 859 mk_ic_value(priv->rxcount, priv->rxtime)); 860 else 861 gfar_write(&regs->rxic, 0); 862 863 if (priv->rx_csum_enable) 864 rctrl |= RCTRL_CHECKSUMMING; 865 866 if (priv->extended_hash) { 867 rctrl |= RCTRL_EXTHASH; 868 869 gfar_clear_exact_match(dev); 870 rctrl |= RCTRL_EMEN; 871 } 872 873 if (priv->vlan_enable) 874 rctrl |= RCTRL_VLAN; 875 876 if (priv->padding) { 877 rctrl &= ~RCTRL_PAL_MASK; 878 rctrl |= RCTRL_PADDING(priv->padding); 879 } 880 881 /* Init rctrl based on our settings */ 882 gfar_write(&priv->regs->rctrl, rctrl); 883 884 if (dev->features & NETIF_F_IP_CSUM) 885 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM); 886 887 /* Set the extraction length and index */ 888 attrs = ATTRELI_EL(priv->rx_stash_size) | 889 ATTRELI_EI(priv->rx_stash_index); 890 891 gfar_write(&priv->regs->attreli, attrs); 892 893 /* Start with defaults, and add stashing or locking 894 * depending on the approprate variables */ 895 attrs = ATTR_INIT_SETTINGS; 896 897 if (priv->bd_stash_en) 898 attrs |= ATTR_BDSTASH; 899 900 if (priv->rx_stash_size != 0) 901 attrs |= ATTR_BUFSTASH; 902 903 gfar_write(&priv->regs->attr, attrs); 904 905 gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold); 906 gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve); 907 gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off); 908 909 /* Start the controller */ 910 gfar_start(dev); 911 912 return 0; 913 914rx_irq_fail: 915 free_irq(priv->interruptTransmit, dev); 916tx_irq_fail: 917 free_irq(priv->interruptError, dev); 918err_irq_fail: 919rx_skb_fail: 920 free_skb_resources(priv); 921tx_skb_fail: 922 dma_free_coherent(NULL, 923 sizeof(struct txbd8)*priv->tx_ring_size 924 + sizeof(struct rxbd8)*priv->rx_ring_size, 925 priv->tx_bd_base, 926 gfar_read(&regs->tbase0)); 927 928 return err; 929} 930 931/* Called when something needs to use the ethernet device */ 932/* Returns 0 for success. */ 933static int gfar_enet_open(struct net_device *dev) 934{ 935#ifdef CONFIG_GFAR_NAPI 936 struct gfar_private *priv = netdev_priv(dev); 937#endif 938 int err; 939 940#ifdef CONFIG_GFAR_NAPI 941 napi_enable(&priv->napi); 942#endif 943 944 /* Initialize a bunch of registers */ 945 init_registers(dev); 946 947 gfar_set_mac_address(dev); 948 949 err = init_phy(dev); 950 951 if(err) { 952#ifdef CONFIG_GFAR_NAPI 953 napi_disable(&priv->napi); 954#endif 955 return err; 956 } 957 958 err = startup_gfar(dev); 959 if (err) { 960#ifdef CONFIG_GFAR_NAPI 961 napi_disable(&priv->napi); 962#endif 963 return err; 964 } 965 966 netif_start_queue(dev); 967 968 return err; 969} 970 971static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp) 972{ 973 struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN); 974 975 memset(fcb, 0, GMAC_FCB_LEN); 976 977 return fcb; 978} 979 980static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) 981{ 982 u8 flags = 0; 983 984 /* If we're here, it's a IP packet with a TCP or UDP 985 * payload. We set it to checksum, using a pseudo-header 986 * we provide 987 */ 988 flags = TXFCB_DEFAULT; 989 990 /* Tell the controller what the protocol is */ 991 /* And provide the already calculated phcs */ 992 if (ip_hdr(skb)->protocol == IPPROTO_UDP) { 993 flags |= TXFCB_UDP; 994 fcb->phcs = udp_hdr(skb)->check; 995 } else 996 fcb->phcs = tcp_hdr(skb)->check; 997 998 /* l3os is the distance between the start of the 999 * frame (skb->data) and the start of the IP hdr. 1000 * l4os is the distance between the start of the 1001 * l3 hdr and the l4 hdr */ 1002 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN); 1003 fcb->l4os = skb_network_header_len(skb); 1004 1005 fcb->flags = flags; 1006} 1007 1008void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) 1009{ 1010 fcb->flags |= TXFCB_VLN; 1011 fcb->vlctl = vlan_tx_tag_get(skb); 1012} 1013 1014/* This is called by the kernel when a frame is ready for transmission. */ 1015/* It is pointed to by the dev->hard_start_xmit function pointer */ 1016static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 1017{ 1018 struct gfar_private *priv = netdev_priv(dev); 1019 struct txfcb *fcb = NULL; 1020 struct txbd8 *txbdp; 1021 u16 status; 1022 unsigned long flags; 1023 1024 /* Update transmit stats */ 1025 dev->stats.tx_bytes += skb->len; 1026 1027 /* Lock priv now */ 1028 spin_lock_irqsave(&priv->txlock, flags); 1029 1030 /* Point at the first free tx descriptor */ 1031 txbdp = priv->cur_tx; 1032 1033 /* Clear all but the WRAP status flags */ 1034 status = txbdp->status & TXBD_WRAP; 1035 1036 /* Set up checksumming */ 1037 if (likely((dev->features & NETIF_F_IP_CSUM) 1038 && (CHECKSUM_PARTIAL == skb->ip_summed))) { 1039 fcb = gfar_add_fcb(skb, txbdp); 1040 status |= TXBD_TOE; 1041 gfar_tx_checksum(skb, fcb); 1042 } 1043 1044 if (priv->vlan_enable && 1045 unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) { 1046 if (unlikely(NULL == fcb)) { 1047 fcb = gfar_add_fcb(skb, txbdp); 1048 status |= TXBD_TOE; 1049 } 1050 1051 gfar_tx_vlan(skb, fcb); 1052 } 1053 1054 /* Set buffer length and pointer */ 1055 txbdp->length = skb->len; 1056 txbdp->bufPtr = dma_map_single(NULL, skb->data, 1057 skb->len, DMA_TO_DEVICE); 1058 1059 /* Save the skb pointer so we can free it later */ 1060 priv->tx_skbuff[priv->skb_curtx] = skb; 1061 1062 /* Update the current skb pointer (wrapping if this was the last) */ 1063 priv->skb_curtx = 1064 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size); 1065 1066 /* Flag the BD as interrupt-causing */ 1067 status |= TXBD_INTERRUPT; 1068 1069 /* Flag the BD as ready to go, last in frame, and */ 1070 /* in need of CRC */ 1071 status |= (TXBD_READY | TXBD_LAST | TXBD_CRC); 1072 1073 dev->trans_start = jiffies; 1074 1075 /* The powerpc-specific eieio() is used, as wmb() has too strong 1076 * semantics (it requires synchronization between cacheable and 1077 * uncacheable mappings, which eieio doesn't provide and which we 1078 * don't need), thus requiring a more expensive sync instruction. At 1079 * some point, the set of architecture-independent barrier functions 1080 * should be expanded to include weaker barriers. 1081 */ 1082 1083 eieio(); 1084 txbdp->status = status; 1085 1086 /* If this was the last BD in the ring, the next one */ 1087 /* is at the beginning of the ring */ 1088 if (txbdp->status & TXBD_WRAP) 1089 txbdp = priv->tx_bd_base; 1090 else 1091 txbdp++; 1092 1093 /* If the next BD still needs to be cleaned up, then the bds 1094 are full. We need to tell the kernel to stop sending us stuff. */ 1095 if (txbdp == priv->dirty_tx) { 1096 netif_stop_queue(dev); 1097 1098 dev->stats.tx_fifo_errors++; 1099 } 1100 1101 /* Update the current txbd to the next one */ 1102 priv->cur_tx = txbdp; 1103 1104 /* Tell the DMA to go go go */ 1105 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 1106 1107 /* Unlock priv */ 1108 spin_unlock_irqrestore(&priv->txlock, flags); 1109 1110 return 0; 1111} 1112 1113/* Stops the kernel queue, and halts the controller */ 1114static int gfar_close(struct net_device *dev) 1115{ 1116 struct gfar_private *priv = netdev_priv(dev); 1117 1118#ifdef CONFIG_GFAR_NAPI 1119 napi_disable(&priv->napi); 1120#endif 1121 1122 stop_gfar(dev); 1123 1124 /* Disconnect from the PHY */ 1125 phy_disconnect(priv->phydev); 1126 priv->phydev = NULL; 1127 1128 netif_stop_queue(dev); 1129 1130 return 0; 1131} 1132 1133/* Changes the mac address if the controller is not running. */ 1134int gfar_set_mac_address(struct net_device *dev) 1135{ 1136 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 1137 1138 return 0; 1139} 1140 1141 1142/* Enables and disables VLAN insertion/extraction */ 1143static void gfar_vlan_rx_register(struct net_device *dev, 1144 struct vlan_group *grp) 1145{ 1146 struct gfar_private *priv = netdev_priv(dev); 1147 unsigned long flags; 1148 u32 tempval; 1149 1150 spin_lock_irqsave(&priv->rxlock, flags); 1151 1152 priv->vlgrp = grp; 1153 1154 if (grp) { 1155 /* Enable VLAN tag insertion */ 1156 tempval = gfar_read(&priv->regs->tctrl); 1157 tempval |= TCTRL_VLINS; 1158 1159 gfar_write(&priv->regs->tctrl, tempval); 1160 1161 /* Enable VLAN tag extraction */ 1162 tempval = gfar_read(&priv->regs->rctrl); 1163 tempval |= RCTRL_VLEX; 1164 gfar_write(&priv->regs->rctrl, tempval); 1165 } else { 1166 /* Disable VLAN tag insertion */ 1167 tempval = gfar_read(&priv->regs->tctrl); 1168 tempval &= ~TCTRL_VLINS; 1169 gfar_write(&priv->regs->tctrl, tempval); 1170 1171 /* Disable VLAN tag extraction */ 1172 tempval = gfar_read(&priv->regs->rctrl); 1173 tempval &= ~RCTRL_VLEX; 1174 gfar_write(&priv->regs->rctrl, tempval); 1175 } 1176 1177 spin_unlock_irqrestore(&priv->rxlock, flags); 1178} 1179 1180static int gfar_change_mtu(struct net_device *dev, int new_mtu) 1181{ 1182 int tempsize, tempval; 1183 struct gfar_private *priv = netdev_priv(dev); 1184 int oldsize = priv->rx_buffer_size; 1185 int frame_size = new_mtu + ETH_HLEN; 1186 1187 if (priv->vlan_enable) 1188 frame_size += VLAN_ETH_HLEN; 1189 1190 if (gfar_uses_fcb(priv)) 1191 frame_size += GMAC_FCB_LEN; 1192 1193 frame_size += priv->padding; 1194 1195 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { 1196 if (netif_msg_drv(priv)) 1197 printk(KERN_ERR "%s: Invalid MTU setting\n", 1198 dev->name); 1199 return -EINVAL; 1200 } 1201 1202 tempsize = 1203 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + 1204 INCREMENTAL_BUFFER_SIZE; 1205 1206 /* Only stop and start the controller if it isn't already 1207 * stopped, and we changed something */ 1208 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 1209 stop_gfar(dev); 1210 1211 priv->rx_buffer_size = tempsize; 1212 1213 dev->mtu = new_mtu; 1214 1215 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 1216 gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size); 1217 1218 /* If the mtu is larger than the max size for standard 1219 * ethernet frames (ie, a jumbo frame), then set maccfg2 1220 * to allow huge frames, and to check the length */ 1221 tempval = gfar_read(&priv->regs->maccfg2); 1222 1223 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) 1224 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 1225 else 1226 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 1227 1228 gfar_write(&priv->regs->maccfg2, tempval); 1229 1230 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 1231 startup_gfar(dev); 1232 1233 return 0; 1234} 1235 1236/* gfar_timeout gets called when a packet has not been 1237 * transmitted after a set amount of time. 1238 * For now, assume that clearing out all the structures, and 1239 * starting over will fix the problem. */ 1240static void gfar_timeout(struct net_device *dev) 1241{ 1242 dev->stats.tx_errors++; 1243 1244 if (dev->flags & IFF_UP) { 1245 stop_gfar(dev); 1246 startup_gfar(dev); 1247 } 1248 1249 netif_schedule(dev); 1250} 1251 1252/* Interrupt Handler for Transmit complete */ 1253static irqreturn_t gfar_transmit(int irq, void *dev_id) 1254{ 1255 struct net_device *dev = (struct net_device *) dev_id; 1256 struct gfar_private *priv = netdev_priv(dev); 1257 struct txbd8 *bdp; 1258 1259 /* Clear IEVENT */ 1260 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); 1261 1262 /* Lock priv */ 1263 spin_lock(&priv->txlock); 1264 bdp = priv->dirty_tx; 1265 while ((bdp->status & TXBD_READY) == 0) { 1266 /* If dirty_tx and cur_tx are the same, then either the */ 1267 /* ring is empty or full now (it could only be full in the beginning, */ 1268 /* obviously). If it is empty, we are done. */ 1269 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0)) 1270 break; 1271 1272 dev->stats.tx_packets++; 1273 1274 /* Deferred means some collisions occurred during transmit, */ 1275 /* but we eventually sent the packet. */ 1276 if (bdp->status & TXBD_DEF) 1277 dev->stats.collisions++; 1278 1279 /* Free the sk buffer associated with this TxBD */ 1280 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); 1281 priv->tx_skbuff[priv->skb_dirtytx] = NULL; 1282 priv->skb_dirtytx = 1283 (priv->skb_dirtytx + 1284 1) & TX_RING_MOD_MASK(priv->tx_ring_size); 1285 1286 /* update bdp to point at next bd in the ring (wrapping if necessary) */ 1287 if (bdp->status & TXBD_WRAP) 1288 bdp = priv->tx_bd_base; 1289 else 1290 bdp++; 1291 1292 /* Move dirty_tx to be the next bd */ 1293 priv->dirty_tx = bdp; 1294 1295 /* We freed a buffer, so now we can restart transmission */ 1296 if (netif_queue_stopped(dev)) 1297 netif_wake_queue(dev); 1298 } /* while ((bdp->status & TXBD_READY) == 0) */ 1299 1300 /* If we are coalescing the interrupts, reset the timer */ 1301 /* Otherwise, clear it */ 1302 if (priv->txcoalescing) 1303 gfar_write(&priv->regs->txic, 1304 mk_ic_value(priv->txcount, priv->txtime)); 1305 else 1306 gfar_write(&priv->regs->txic, 0); 1307 1308 spin_unlock(&priv->txlock); 1309 1310 return IRQ_HANDLED; 1311} 1312 1313struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) 1314{ 1315 unsigned int alignamount; 1316 struct gfar_private *priv = netdev_priv(dev); 1317 struct sk_buff *skb = NULL; 1318 unsigned int timeout = SKB_ALLOC_TIMEOUT; 1319 1320 /* We have to allocate the skb, so keep trying till we succeed */ 1321 while ((!skb) && timeout--) 1322 skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT); 1323 1324 if (NULL == skb) 1325 return NULL; 1326 1327 alignamount = RXBUF_ALIGNMENT - 1328 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)); 1329 1330 /* We need the data buffer to be aligned properly. We will reserve 1331 * as many bytes as needed to align the data properly 1332 */ 1333 skb_reserve(skb, alignamount); 1334 1335 bdp->bufPtr = dma_map_single(NULL, skb->data, 1336 priv->rx_buffer_size, DMA_FROM_DEVICE); 1337 1338 bdp->length = 0; 1339 1340 /* Mark the buffer empty */ 1341 eieio(); 1342 bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT); 1343 1344 return skb; 1345} 1346 1347static inline void count_errors(unsigned short status, struct net_device *dev) 1348{ 1349 struct gfar_private *priv = netdev_priv(dev); 1350 struct net_device_stats *stats = &dev->stats; 1351 struct gfar_extra_stats *estats = &priv->extra_stats; 1352 1353 /* If the packet was truncated, none of the other errors 1354 * matter */ 1355 if (status & RXBD_TRUNCATED) { 1356 stats->rx_length_errors++; 1357 1358 estats->rx_trunc++; 1359 1360 return; 1361 } 1362 /* Count the errors, if there were any */ 1363 if (status & (RXBD_LARGE | RXBD_SHORT)) { 1364 stats->rx_length_errors++; 1365 1366 if (status & RXBD_LARGE) 1367 estats->rx_large++; 1368 else 1369 estats->rx_short++; 1370 } 1371 if (status & RXBD_NONOCTET) { 1372 stats->rx_frame_errors++; 1373 estats->rx_nonoctet++; 1374 } 1375 if (status & RXBD_CRCERR) { 1376 estats->rx_crcerr++; 1377 stats->rx_crc_errors++; 1378 } 1379 if (status & RXBD_OVERRUN) { 1380 estats->rx_overrun++; 1381 stats->rx_crc_errors++; 1382 } 1383} 1384 1385irqreturn_t gfar_receive(int irq, void *dev_id) 1386{ 1387 struct net_device *dev = (struct net_device *) dev_id; 1388 struct gfar_private *priv = netdev_priv(dev); 1389#ifdef CONFIG_GFAR_NAPI 1390 u32 tempval; 1391#else 1392 unsigned long flags; 1393#endif 1394 1395 /* Clear IEVENT, so rx interrupt isn't called again 1396 * because of this interrupt */ 1397 gfar_write(&priv->regs->ievent, IEVENT_RX_MASK); 1398 1399 /* support NAPI */ 1400#ifdef CONFIG_GFAR_NAPI 1401 if (netif_rx_schedule_prep(dev, &priv->napi)) { 1402 tempval = gfar_read(&priv->regs->imask); 1403 tempval &= IMASK_RX_DISABLED; 1404 gfar_write(&priv->regs->imask, tempval); 1405 1406 __netif_rx_schedule(dev, &priv->napi); 1407 } else { 1408 if (netif_msg_rx_err(priv)) 1409 printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n", 1410 dev->name, gfar_read(&priv->regs->ievent), 1411 gfar_read(&priv->regs->imask)); 1412 } 1413#else 1414 1415 spin_lock_irqsave(&priv->rxlock, flags); 1416 gfar_clean_rx_ring(dev, priv->rx_ring_size); 1417 1418 /* If we are coalescing interrupts, update the timer */ 1419 /* Otherwise, clear it */ 1420 if (priv->rxcoalescing) 1421 gfar_write(&priv->regs->rxic, 1422 mk_ic_value(priv->rxcount, priv->rxtime)); 1423 else 1424 gfar_write(&priv->regs->rxic, 0); 1425 1426 spin_unlock_irqrestore(&priv->rxlock, flags); 1427#endif 1428 1429 return IRQ_HANDLED; 1430} 1431 1432static inline int gfar_rx_vlan(struct sk_buff *skb, 1433 struct vlan_group *vlgrp, unsigned short vlctl) 1434{ 1435#ifdef CONFIG_GFAR_NAPI 1436 return vlan_hwaccel_receive_skb(skb, vlgrp, vlctl); 1437#else 1438 return vlan_hwaccel_rx(skb, vlgrp, vlctl); 1439#endif 1440} 1441 1442static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) 1443{ 1444 /* If valid headers were found, and valid sums 1445 * were verified, then we tell the kernel that no 1446 * checksumming is necessary. Otherwise, it is */ 1447 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) 1448 skb->ip_summed = CHECKSUM_UNNECESSARY; 1449 else 1450 skb->ip_summed = CHECKSUM_NONE; 1451} 1452 1453 1454static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb) 1455{ 1456 struct rxfcb *fcb = (struct rxfcb *)skb->data; 1457 1458 /* Remove the FCB from the skb */ 1459 skb_pull(skb, GMAC_FCB_LEN); 1460 1461 return fcb; 1462} 1463 1464/* gfar_process_frame() -- handle one incoming packet if skb 1465 * isn't NULL. */ 1466static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 1467 int length) 1468{ 1469 struct gfar_private *priv = netdev_priv(dev); 1470 struct rxfcb *fcb = NULL; 1471 1472 if (NULL == skb) { 1473 if (netif_msg_rx_err(priv)) 1474 printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name); 1475 dev->stats.rx_dropped++; 1476 priv->extra_stats.rx_skbmissing++; 1477 } else { 1478 int ret; 1479 1480 /* Prep the skb for the packet */ 1481 skb_put(skb, length); 1482 1483 /* Grab the FCB if there is one */ 1484 if (gfar_uses_fcb(priv)) 1485 fcb = gfar_get_fcb(skb); 1486 1487 /* Remove the padded bytes, if there are any */ 1488 if (priv->padding) 1489 skb_pull(skb, priv->padding); 1490 1491 if (priv->rx_csum_enable) 1492 gfar_rx_checksum(skb, fcb); 1493 1494 /* Tell the skb what kind of packet this is */ 1495 skb->protocol = eth_type_trans(skb, dev); 1496 1497 /* Send the packet up the stack */ 1498 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) 1499 ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl); 1500 else 1501 ret = RECEIVE(skb); 1502 1503 if (NET_RX_DROP == ret) 1504 priv->extra_stats.kernel_dropped++; 1505 } 1506 1507 return 0; 1508} 1509 1510/* gfar_clean_rx_ring() -- Processes each frame in the rx ring 1511 * until the budget/quota has been reached. Returns the number 1512 * of frames handled 1513 */ 1514int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) 1515{ 1516 struct rxbd8 *bdp; 1517 struct sk_buff *skb; 1518 u16 pkt_len; 1519 int howmany = 0; 1520 struct gfar_private *priv = netdev_priv(dev); 1521 1522 /* Get the first full descriptor */ 1523 bdp = priv->cur_rx; 1524 1525 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 1526 rmb(); 1527 skb = priv->rx_skbuff[priv->skb_currx]; 1528 1529 if (!(bdp->status & 1530 (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET 1531 | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) { 1532 /* Increment the number of packets */ 1533 dev->stats.rx_packets++; 1534 howmany++; 1535 1536 /* Remove the FCS from the packet length */ 1537 pkt_len = bdp->length - 4; 1538 1539 gfar_process_frame(dev, skb, pkt_len); 1540 1541 dev->stats.rx_bytes += pkt_len; 1542 } else { 1543 count_errors(bdp->status, dev); 1544 1545 if (skb) 1546 dev_kfree_skb_any(skb); 1547 1548 priv->rx_skbuff[priv->skb_currx] = NULL; 1549 } 1550 1551 dev->last_rx = jiffies; 1552 1553 /* Clear the status flags for this buffer */ 1554 bdp->status &= ~RXBD_STATS; 1555 1556 /* Add another skb for the future */ 1557 skb = gfar_new_skb(dev, bdp); 1558 priv->rx_skbuff[priv->skb_currx] = skb; 1559 1560 /* Update to the next pointer */ 1561 if (bdp->status & RXBD_WRAP) 1562 bdp = priv->rx_bd_base; 1563 else 1564 bdp++; 1565 1566 /* update to point at the next skb */ 1567 priv->skb_currx = 1568 (priv->skb_currx + 1569 1) & RX_RING_MOD_MASK(priv->rx_ring_size); 1570 1571 } 1572 1573 /* Update the current rxbd pointer to be the next one */ 1574 priv->cur_rx = bdp; 1575 1576 return howmany; 1577} 1578 1579#ifdef CONFIG_GFAR_NAPI 1580static int gfar_poll(struct napi_struct *napi, int budget) 1581{ 1582 struct gfar_private *priv = container_of(napi, struct gfar_private, napi); 1583 struct net_device *dev = priv->dev; 1584 int howmany; 1585 1586 howmany = gfar_clean_rx_ring(dev, budget); 1587 1588 if (howmany < budget) { 1589 netif_rx_complete(dev, napi); 1590 1591 /* Clear the halt bit in RSTAT */ 1592 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 1593 1594 gfar_write(&priv->regs->imask, IMASK_DEFAULT); 1595 1596 /* If we are coalescing interrupts, update the timer */ 1597 /* Otherwise, clear it */ 1598 if (priv->rxcoalescing) 1599 gfar_write(&priv->regs->rxic, 1600 mk_ic_value(priv->rxcount, priv->rxtime)); 1601 else 1602 gfar_write(&priv->regs->rxic, 0); 1603 } 1604 1605 return howmany; 1606} 1607#endif 1608 1609#ifdef CONFIG_NET_POLL_CONTROLLER 1610/* 1611 * Polling 'interrupt' - used by things like netconsole to send skbs 1612 * without having to re-enable interrupts. It's not called while 1613 * the interrupt routine is executing. 1614 */ 1615static void gfar_netpoll(struct net_device *dev) 1616{ 1617 struct gfar_private *priv = netdev_priv(dev); 1618 1619 /* If the device has multiple interrupts, run tx/rx */ 1620 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1621 disable_irq(priv->interruptTransmit); 1622 disable_irq(priv->interruptReceive); 1623 disable_irq(priv->interruptError); 1624 gfar_interrupt(priv->interruptTransmit, dev); 1625 enable_irq(priv->interruptError); 1626 enable_irq(priv->interruptReceive); 1627 enable_irq(priv->interruptTransmit); 1628 } else { 1629 disable_irq(priv->interruptTransmit); 1630 gfar_interrupt(priv->interruptTransmit, dev); 1631 enable_irq(priv->interruptTransmit); 1632 } 1633} 1634#endif 1635 1636/* The interrupt handler for devices with one interrupt */ 1637static irqreturn_t gfar_interrupt(int irq, void *dev_id) 1638{ 1639 struct net_device *dev = dev_id; 1640 struct gfar_private *priv = netdev_priv(dev); 1641 1642 /* Save ievent for future reference */ 1643 u32 events = gfar_read(&priv->regs->ievent); 1644 1645 /* Check for reception */ 1646 if (events & IEVENT_RX_MASK) 1647 gfar_receive(irq, dev_id); 1648 1649 /* Check for transmit completion */ 1650 if (events & IEVENT_TX_MASK) 1651 gfar_transmit(irq, dev_id); 1652 1653 /* Check for errors */ 1654 if (events & IEVENT_ERR_MASK) 1655 gfar_error(irq, dev_id); 1656 1657 return IRQ_HANDLED; 1658} 1659 1660/* Called every time the controller might need to be made 1661 * aware of new link state. The PHY code conveys this 1662 * information through variables in the phydev structure, and this 1663 * function converts those variables into the appropriate 1664 * register values, and can bring down the device if needed. 1665 */ 1666static void adjust_link(struct net_device *dev) 1667{ 1668 struct gfar_private *priv = netdev_priv(dev); 1669 struct gfar __iomem *regs = priv->regs; 1670 unsigned long flags; 1671 struct phy_device *phydev = priv->phydev; 1672 int new_state = 0; 1673 1674 spin_lock_irqsave(&priv->txlock, flags); 1675 if (phydev->link) { 1676 u32 tempval = gfar_read(&regs->maccfg2); 1677 u32 ecntrl = gfar_read(&regs->ecntrl); 1678 1679 /* Now we make sure that we can be in full duplex mode. 1680 * If not, we operate in half-duplex mode. */ 1681 if (phydev->duplex != priv->oldduplex) { 1682 new_state = 1; 1683 if (!(phydev->duplex)) 1684 tempval &= ~(MACCFG2_FULL_DUPLEX); 1685 else 1686 tempval |= MACCFG2_FULL_DUPLEX; 1687 1688 priv->oldduplex = phydev->duplex; 1689 } 1690 1691 if (phydev->speed != priv->oldspeed) { 1692 new_state = 1; 1693 switch (phydev->speed) { 1694 case 1000: 1695 tempval = 1696 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); 1697 break; 1698 case 100: 1699 case 10: 1700 tempval = 1701 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 1702 1703 /* Reduced mode distinguishes 1704 * between 10 and 100 */ 1705 if (phydev->speed == SPEED_100) 1706 ecntrl |= ECNTRL_R100; 1707 else 1708 ecntrl &= ~(ECNTRL_R100); 1709 break; 1710 default: 1711 if (netif_msg_link(priv)) 1712 printk(KERN_WARNING 1713 "%s: Ack! Speed (%d) is not 10/100/1000!\n", 1714 dev->name, phydev->speed); 1715 break; 1716 } 1717 1718 priv->oldspeed = phydev->speed; 1719 } 1720 1721 gfar_write(&regs->maccfg2, tempval); 1722 gfar_write(&regs->ecntrl, ecntrl); 1723 1724 if (!priv->oldlink) { 1725 new_state = 1; 1726 priv->oldlink = 1; 1727 netif_schedule(dev); 1728 } 1729 } else if (priv->oldlink) { 1730 new_state = 1; 1731 priv->oldlink = 0; 1732 priv->oldspeed = 0; 1733 priv->oldduplex = -1; 1734 } 1735 1736 if (new_state && netif_msg_link(priv)) 1737 phy_print_status(phydev); 1738 1739 spin_unlock_irqrestore(&priv->txlock, flags); 1740} 1741 1742/* Update the hash table based on the current list of multicast 1743 * addresses we subscribe to. Also, change the promiscuity of 1744 * the device based on the flags (this function is called 1745 * whenever dev->flags is changed */ 1746static void gfar_set_multi(struct net_device *dev) 1747{ 1748 struct dev_mc_list *mc_ptr; 1749 struct gfar_private *priv = netdev_priv(dev); 1750 struct gfar __iomem *regs = priv->regs; 1751 u32 tempval; 1752 1753 if(dev->flags & IFF_PROMISC) { 1754 /* Set RCTRL to PROM */ 1755 tempval = gfar_read(&regs->rctrl); 1756 tempval |= RCTRL_PROM; 1757 gfar_write(&regs->rctrl, tempval); 1758 } else { 1759 /* Set RCTRL to not PROM */ 1760 tempval = gfar_read(&regs->rctrl); 1761 tempval &= ~(RCTRL_PROM); 1762 gfar_write(&regs->rctrl, tempval); 1763 } 1764 1765 if(dev->flags & IFF_ALLMULTI) { 1766 /* Set the hash to rx all multicast frames */ 1767 gfar_write(&regs->igaddr0, 0xffffffff); 1768 gfar_write(&regs->igaddr1, 0xffffffff); 1769 gfar_write(&regs->igaddr2, 0xffffffff); 1770 gfar_write(&regs->igaddr3, 0xffffffff); 1771 gfar_write(&regs->igaddr4, 0xffffffff); 1772 gfar_write(&regs->igaddr5, 0xffffffff); 1773 gfar_write(&regs->igaddr6, 0xffffffff); 1774 gfar_write(&regs->igaddr7, 0xffffffff); 1775 gfar_write(&regs->gaddr0, 0xffffffff); 1776 gfar_write(&regs->gaddr1, 0xffffffff); 1777 gfar_write(&regs->gaddr2, 0xffffffff); 1778 gfar_write(&regs->gaddr3, 0xffffffff); 1779 gfar_write(&regs->gaddr4, 0xffffffff); 1780 gfar_write(&regs->gaddr5, 0xffffffff); 1781 gfar_write(&regs->gaddr6, 0xffffffff); 1782 gfar_write(&regs->gaddr7, 0xffffffff); 1783 } else { 1784 int em_num; 1785 int idx; 1786 1787 /* zero out the hash */ 1788 gfar_write(&regs->igaddr0, 0x0); 1789 gfar_write(&regs->igaddr1, 0x0); 1790 gfar_write(&regs->igaddr2, 0x0); 1791 gfar_write(&regs->igaddr3, 0x0); 1792 gfar_write(&regs->igaddr4, 0x0); 1793 gfar_write(&regs->igaddr5, 0x0); 1794 gfar_write(&regs->igaddr6, 0x0); 1795 gfar_write(&regs->igaddr7, 0x0); 1796 gfar_write(&regs->gaddr0, 0x0); 1797 gfar_write(&regs->gaddr1, 0x0); 1798 gfar_write(&regs->gaddr2, 0x0); 1799 gfar_write(&regs->gaddr3, 0x0); 1800 gfar_write(&regs->gaddr4, 0x0); 1801 gfar_write(&regs->gaddr5, 0x0); 1802 gfar_write(&regs->gaddr6, 0x0); 1803 gfar_write(&regs->gaddr7, 0x0); 1804 1805 /* If we have extended hash tables, we need to 1806 * clear the exact match registers to prepare for 1807 * setting them */ 1808 if (priv->extended_hash) { 1809 em_num = GFAR_EM_NUM + 1; 1810 gfar_clear_exact_match(dev); 1811 idx = 1; 1812 } else { 1813 idx = 0; 1814 em_num = 0; 1815 } 1816 1817 if(dev->mc_count == 0) 1818 return; 1819 1820 /* Parse the list, and set the appropriate bits */ 1821 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { 1822 if (idx < em_num) { 1823 gfar_set_mac_for_addr(dev, idx, 1824 mc_ptr->dmi_addr); 1825 idx++; 1826 } else 1827 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr); 1828 } 1829 } 1830 1831 return; 1832} 1833 1834 1835/* Clears each of the exact match registers to zero, so they 1836 * don't interfere with normal reception */ 1837static void gfar_clear_exact_match(struct net_device *dev) 1838{ 1839 int idx; 1840 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0}; 1841 1842 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) 1843 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr); 1844} 1845 1846/* Set the appropriate hash bit for the given addr */ 1847/* The algorithm works like so: 1848 * 1) Take the Destination Address (ie the multicast address), and 1849 * do a CRC on it (little endian), and reverse the bits of the 1850 * result. 1851 * 2) Use the 8 most significant bits as a hash into a 256-entry 1852 * table. The table is controlled through 8 32-bit registers: 1853 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is 1854 * gaddr7. This means that the 3 most significant bits in the 1855 * hash index which gaddr register to use, and the 5 other bits 1856 * indicate which bit (assuming an IBM numbering scheme, which 1857 * for PowerPC (tm) is usually the case) in the register holds 1858 * the entry. */ 1859static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) 1860{ 1861 u32 tempval; 1862 struct gfar_private *priv = netdev_priv(dev); 1863 u32 result = ether_crc(MAC_ADDR_LEN, addr); 1864 int width = priv->hash_width; 1865 u8 whichbit = (result >> (32 - width)) & 0x1f; 1866 u8 whichreg = result >> (32 - width + 5); 1867 u32 value = (1 << (31-whichbit)); 1868 1869 tempval = gfar_read(priv->hash_regs[whichreg]); 1870 tempval |= value; 1871 gfar_write(priv->hash_regs[whichreg], tempval); 1872 1873 return; 1874} 1875 1876 1877/* There are multiple MAC Address register pairs on some controllers 1878 * This function sets the numth pair to a given address 1879 */ 1880static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) 1881{ 1882 struct gfar_private *priv = netdev_priv(dev); 1883 int idx; 1884 char tmpbuf[MAC_ADDR_LEN]; 1885 u32 tempval; 1886 u32 __iomem *macptr = &priv->regs->macstnaddr1; 1887 1888 macptr += num*2; 1889 1890 /* Now copy it into the mac registers backwards, cuz */ 1891 /* little endian is silly */ 1892 for (idx = 0; idx < MAC_ADDR_LEN; idx++) 1893 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx]; 1894 1895 gfar_write(macptr, *((u32 *) (tmpbuf))); 1896 1897 tempval = *((u32 *) (tmpbuf + 4)); 1898 1899 gfar_write(macptr+1, tempval); 1900} 1901 1902/* GFAR error interrupt handler */ 1903static irqreturn_t gfar_error(int irq, void *dev_id) 1904{ 1905 struct net_device *dev = dev_id; 1906 struct gfar_private *priv = netdev_priv(dev); 1907 1908 /* Save ievent for future reference */ 1909 u32 events = gfar_read(&priv->regs->ievent); 1910 1911 /* Clear IEVENT */ 1912 gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK); 1913 1914 /* Hmm... */ 1915 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 1916 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", 1917 dev->name, events, gfar_read(&priv->regs->imask)); 1918 1919 /* Update the error counters */ 1920 if (events & IEVENT_TXE) { 1921 dev->stats.tx_errors++; 1922 1923 if (events & IEVENT_LC) 1924 dev->stats.tx_window_errors++; 1925 if (events & IEVENT_CRL) 1926 dev->stats.tx_aborted_errors++; 1927 if (events & IEVENT_XFUN) { 1928 if (netif_msg_tx_err(priv)) 1929 printk(KERN_DEBUG "%s: TX FIFO underrun, " 1930 "packet dropped.\n", dev->name); 1931 dev->stats.tx_dropped++; 1932 priv->extra_stats.tx_underrun++; 1933 1934 /* Reactivate the Tx Queues */ 1935 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 1936 } 1937 if (netif_msg_tx_err(priv)) 1938 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); 1939 } 1940 if (events & IEVENT_BSY) { 1941 dev->stats.rx_errors++; 1942 priv->extra_stats.rx_bsy++; 1943 1944 gfar_receive(irq, dev_id); 1945 1946#ifndef CONFIG_GFAR_NAPI 1947 /* Clear the halt bit in RSTAT */ 1948 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 1949#endif 1950 1951 if (netif_msg_rx_err(priv)) 1952 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n", 1953 dev->name, gfar_read(&priv->regs->rstat)); 1954 } 1955 if (events & IEVENT_BABR) { 1956 dev->stats.rx_errors++; 1957 priv->extra_stats.rx_babr++; 1958 1959 if (netif_msg_rx_err(priv)) 1960 printk(KERN_DEBUG "%s: babbling RX error\n", dev->name); 1961 } 1962 if (events & IEVENT_EBERR) { 1963 priv->extra_stats.eberr++; 1964 if (netif_msg_rx_err(priv)) 1965 printk(KERN_DEBUG "%s: bus error\n", dev->name); 1966 } 1967 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv)) 1968 printk(KERN_DEBUG "%s: control frame\n", dev->name); 1969 1970 if (events & IEVENT_BABT) { 1971 priv->extra_stats.tx_babt++; 1972 if (netif_msg_tx_err(priv)) 1973 printk(KERN_DEBUG "%s: babbling TX error\n", dev->name); 1974 } 1975 return IRQ_HANDLED; 1976} 1977 1978/* Structure for a device driver */ 1979static struct platform_driver gfar_driver = { 1980 .probe = gfar_probe, 1981 .remove = gfar_remove, 1982 .driver = { 1983 .name = "fsl-gianfar", 1984 }, 1985}; 1986 1987static int __init gfar_init(void) 1988{ 1989 int err = gfar_mdio_init(); 1990 1991 if (err) 1992 return err; 1993 1994 err = platform_driver_register(&gfar_driver); 1995 1996 if (err) 1997 gfar_mdio_exit(); 1998 1999 return err; 2000} 2001 2002static void __exit gfar_exit(void) 2003{ 2004 platform_driver_unregister(&gfar_driver); 2005 gfar_mdio_exit(); 2006} 2007 2008module_init(gfar_init); 2009module_exit(gfar_exit); 2010