at v2.6.12-rc2 1849 lines 50 kB view raw
1/* 2 * drivers/net/gianfar.c 3 * 4 * Gianfar Ethernet Driver 5 * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560 6 * Based on 8260_io/fcc_enet.c 7 * 8 * Author: Andy Fleming 9 * Maintainer: Kumar Gala (kumar.gala@freescale.com) 10 * 11 * Copyright (c) 2002-2004 Freescale Semiconductor, Inc. 12 * 13 * This program is free software; you can redistribute it and/or modify it 14 * under the terms of the GNU General Public License as published by the 15 * Free Software Foundation; either version 2 of the License, or (at your 16 * option) any later version. 17 * 18 * Gianfar: AKA Lambda Draconis, "Dragon" 19 * RA 11 31 24.2 20 * Dec +69 19 52 21 * V 3.84 22 * B-V +1.62 23 * 24 * Theory of operation 25 * This driver is designed for the Triple-speed Ethernet 26 * controllers on the Freescale 8540/8560 integrated processors, 27 * as well as the Fast Ethernet Controller on the 8540. 28 * 29 * The driver is initialized through platform_device. Structures which 30 * define the configuration needed by the board are defined in a 31 * board structure in arch/ppc/platforms (though I do not 32 * discount the possibility that other architectures could one 33 * day be supported. One assumption the driver currently makes 34 * is that the PHY is configured in such a way to advertise all 35 * capabilities. This is a sensible default, and on certain 36 * PHYs, changing this default encounters substantial errata 37 * issues. Future versions may remove this requirement, but for 38 * now, it is best for the firmware to ensure this is the case. 39 * 40 * The Gianfar Ethernet Controller uses a ring of buffer 41 * descriptors. The beginning is indicated by a register 42 * pointing to the physical address of the start of the ring. 43 * The end is determined by a "wrap" bit being set in the 44 * last descriptor of the ring. 45 * 46 * When a packet is received, the RXF bit in the 47 * IEVENT register is set, triggering an interrupt when the 48 * corresponding bit in the IMASK register is also set (if 49 * interrupt coalescing is active, then the interrupt may not 50 * happen immediately, but will wait until either a set number 51 * of frames or amount of time have passed.). In NAPI, the 52 * interrupt handler will signal there is work to be done, and 53 * exit. Without NAPI, the packet(s) will be handled 54 * immediately. Both methods will start at the last known empty 55 * descriptor, and process every subsequent descriptor until there 56 * are none left with data (NAPI will stop after a set number of 57 * packets to give time to other tasks, but will eventually 58 * process all the packets). The data arrives inside a 59 * pre-allocated skb, and so after the skb is passed up to the 60 * stack, a new skb must be allocated, and the address field in 61 * the buffer descriptor must be updated to indicate this new 62 * skb. 63 * 64 * When the kernel requests that a packet be transmitted, the 65 * driver starts where it left off last time, and points the 66 * descriptor at the buffer which was passed in. The driver 67 * then informs the DMA engine that there are packets ready to 68 * be transmitted. Once the controller is finished transmitting 69 * the packet, an interrupt may be triggered (under the same 70 * conditions as for reception, but depending on the TXF bit). 71 * The driver then cleans up the buffer. 72 */ 73 74#include <linux/config.h> 75#include <linux/kernel.h> 76#include <linux/sched.h> 77#include <linux/string.h> 78#include <linux/errno.h> 79#include <linux/slab.h> 80#include <linux/interrupt.h> 81#include <linux/init.h> 82#include <linux/delay.h> 83#include <linux/netdevice.h> 84#include <linux/etherdevice.h> 85#include <linux/skbuff.h> 86#include <linux/spinlock.h> 87#include <linux/mm.h> 88#include <linux/device.h> 89 90#include <asm/io.h> 91#include <asm/irq.h> 92#include <asm/uaccess.h> 93#include <linux/module.h> 94#include <linux/version.h> 95#include <linux/dma-mapping.h> 96#include <linux/crc32.h> 97 98#include "gianfar.h" 99#include "gianfar_phy.h" 100 101#define TX_TIMEOUT (1*HZ) 102#define SKB_ALLOC_TIMEOUT 1000000 103#undef BRIEF_GFAR_ERRORS 104#undef VERBOSE_GFAR_ERRORS 105 106#ifdef CONFIG_GFAR_NAPI 107#define RECEIVE(x) netif_receive_skb(x) 108#else 109#define RECEIVE(x) netif_rx(x) 110#endif 111 112const char gfar_driver_name[] = "Gianfar Ethernet"; 113const char gfar_driver_version[] = "1.1"; 114 115int startup_gfar(struct net_device *dev); 116static int gfar_enet_open(struct net_device *dev); 117static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); 118static void gfar_timeout(struct net_device *dev); 119static int gfar_close(struct net_device *dev); 120struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp); 121static struct net_device_stats *gfar_get_stats(struct net_device *dev); 122static int gfar_set_mac_address(struct net_device *dev); 123static int gfar_change_mtu(struct net_device *dev, int new_mtu); 124static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs); 125static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs); 126irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs); 127static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs); 128static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs); 129static void gfar_phy_change(void *data); 130static void gfar_phy_timer(unsigned long data); 131static void adjust_link(struct net_device *dev); 132static void init_registers(struct net_device *dev); 133static int init_phy(struct net_device *dev); 134static int gfar_probe(struct device *device); 135static int gfar_remove(struct device *device); 136void free_skb_resources(struct gfar_private *priv); 137static void gfar_set_multi(struct net_device *dev); 138static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 139#ifdef CONFIG_GFAR_NAPI 140static int gfar_poll(struct net_device *dev, int *budget); 141#endif 142static int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 143static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length); 144static void gfar_phy_startup_timer(unsigned long data); 145 146extern struct ethtool_ops gfar_ethtool_ops; 147 148MODULE_AUTHOR("Freescale Semiconductor, Inc"); 149MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 150MODULE_LICENSE("GPL"); 151 152static int gfar_probe(struct device *device) 153{ 154 u32 tempval; 155 struct net_device *dev = NULL; 156 struct gfar_private *priv = NULL; 157 struct platform_device *pdev = to_platform_device(device); 158 struct gianfar_platform_data *einfo; 159 struct resource *r; 160 int idx; 161 int err = 0; 162 int dev_ethtool_ops = 0; 163 164 einfo = (struct gianfar_platform_data *) pdev->dev.platform_data; 165 166 if (einfo == NULL) { 167 printk(KERN_ERR "gfar %d: Missing additional data!\n", 168 pdev->id); 169 170 return -ENODEV; 171 } 172 173 /* Create an ethernet device instance */ 174 dev = alloc_etherdev(sizeof (*priv)); 175 176 if (dev == NULL) 177 return -ENOMEM; 178 179 priv = netdev_priv(dev); 180 181 /* Set the info in the priv to the current info */ 182 priv->einfo = einfo; 183 184 /* fill out IRQ fields */ 185 if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 186 priv->interruptTransmit = platform_get_irq_byname(pdev, "tx"); 187 priv->interruptReceive = platform_get_irq_byname(pdev, "rx"); 188 priv->interruptError = platform_get_irq_byname(pdev, "error"); 189 } else { 190 priv->interruptTransmit = platform_get_irq(pdev, 0); 191 } 192 193 /* get a pointer to the register memory */ 194 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 195 priv->regs = (struct gfar *) 196 ioremap(r->start, sizeof (struct gfar)); 197 198 if (priv->regs == NULL) { 199 err = -ENOMEM; 200 goto regs_fail; 201 } 202 203 /* Set the PHY base address */ 204 priv->phyregs = (struct gfar *) 205 ioremap(einfo->phy_reg_addr, sizeof (struct gfar)); 206 207 if (priv->phyregs == NULL) { 208 err = -ENOMEM; 209 goto phy_regs_fail; 210 } 211 212 spin_lock_init(&priv->lock); 213 214 dev_set_drvdata(device, dev); 215 216 /* Stop the DMA engine now, in case it was running before */ 217 /* (The firmware could have used it, and left it running). */ 218 /* To do this, we write Graceful Receive Stop and Graceful */ 219 /* Transmit Stop, and then wait until the corresponding bits */ 220 /* in IEVENT indicate the stops have completed. */ 221 tempval = gfar_read(&priv->regs->dmactrl); 222 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 223 gfar_write(&priv->regs->dmactrl, tempval); 224 225 tempval = gfar_read(&priv->regs->dmactrl); 226 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 227 gfar_write(&priv->regs->dmactrl, tempval); 228 229 while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC))) 230 cpu_relax(); 231 232 /* Reset MAC layer */ 233 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET); 234 235 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 236 gfar_write(&priv->regs->maccfg1, tempval); 237 238 /* Initialize MACCFG2. */ 239 gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS); 240 241 /* Initialize ECNTRL */ 242 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS); 243 244 /* Copy the station address into the dev structure, */ 245 memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN); 246 247 /* Set the dev->base_addr to the gfar reg region */ 248 dev->base_addr = (unsigned long) (priv->regs); 249 250 SET_MODULE_OWNER(dev); 251 SET_NETDEV_DEV(dev, device); 252 253 /* Fill in the dev structure */ 254 dev->open = gfar_enet_open; 255 dev->hard_start_xmit = gfar_start_xmit; 256 dev->tx_timeout = gfar_timeout; 257 dev->watchdog_timeo = TX_TIMEOUT; 258#ifdef CONFIG_GFAR_NAPI 259 dev->poll = gfar_poll; 260 dev->weight = GFAR_DEV_WEIGHT; 261#endif 262 dev->stop = gfar_close; 263 dev->get_stats = gfar_get_stats; 264 dev->change_mtu = gfar_change_mtu; 265 dev->mtu = 1500; 266 dev->set_multicast_list = gfar_set_multi; 267 268 /* Index into the array of possible ethtool 269 * ops to catch all 4 possibilities */ 270 if((priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) == 0) 271 dev_ethtool_ops += 1; 272 273 if((priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE) == 0) 274 dev_ethtool_ops += 2; 275 276 dev->ethtool_ops = gfar_op_array[dev_ethtool_ops]; 277 278 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 279#ifdef CONFIG_GFAR_BUFSTASH 280 priv->rx_stash_size = STASH_LENGTH; 281#endif 282 priv->tx_ring_size = DEFAULT_TX_RING_SIZE; 283 priv->rx_ring_size = DEFAULT_RX_RING_SIZE; 284 285 priv->txcoalescing = DEFAULT_TX_COALESCE; 286 priv->txcount = DEFAULT_TXCOUNT; 287 priv->txtime = DEFAULT_TXTIME; 288 priv->rxcoalescing = DEFAULT_RX_COALESCE; 289 priv->rxcount = DEFAULT_RXCOUNT; 290 priv->rxtime = DEFAULT_RXTIME; 291 292 err = register_netdev(dev); 293 294 if (err) { 295 printk(KERN_ERR "%s: Cannot register net device, aborting.\n", 296 dev->name); 297 goto register_fail; 298 } 299 300 /* Print out the device info */ 301 printk(KERN_INFO DEVICE_NAME, dev->name); 302 for (idx = 0; idx < 6; idx++) 303 printk("%2.2x%c", dev->dev_addr[idx], idx == 5 ? ' ' : ':'); 304 printk("\n"); 305 306 /* Even more device info helps when determining which kernel */ 307 /* provided which set of benchmarks. Since this is global for all */ 308 /* devices, we only print it once */ 309#ifdef CONFIG_GFAR_NAPI 310 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 311#else 312 printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name); 313#endif 314 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", 315 dev->name, priv->rx_ring_size, priv->tx_ring_size); 316 317 return 0; 318 319register_fail: 320 iounmap((void *) priv->phyregs); 321phy_regs_fail: 322 iounmap((void *) priv->regs); 323regs_fail: 324 free_netdev(dev); 325 return -ENOMEM; 326} 327 328static int gfar_remove(struct device *device) 329{ 330 struct net_device *dev = dev_get_drvdata(device); 331 struct gfar_private *priv = netdev_priv(dev); 332 333 dev_set_drvdata(device, NULL); 334 335 iounmap((void *) priv->regs); 336 iounmap((void *) priv->phyregs); 337 free_netdev(dev); 338 339 return 0; 340} 341 342 343/* Configure the PHY for dev. 344 * returns 0 if success. -1 if failure 345 */ 346static int init_phy(struct net_device *dev) 347{ 348 struct gfar_private *priv = netdev_priv(dev); 349 struct phy_info *curphy; 350 unsigned int timeout = PHY_INIT_TIMEOUT; 351 struct gfar *phyregs = priv->phyregs; 352 struct gfar_mii_info *mii_info; 353 int err; 354 355 priv->oldlink = 0; 356 priv->oldspeed = 0; 357 priv->oldduplex = -1; 358 359 mii_info = kmalloc(sizeof(struct gfar_mii_info), 360 GFP_KERNEL); 361 362 if(NULL == mii_info) { 363 printk(KERN_ERR "%s: Could not allocate mii_info\n", 364 dev->name); 365 return -ENOMEM; 366 } 367 368 mii_info->speed = SPEED_1000; 369 mii_info->duplex = DUPLEX_FULL; 370 mii_info->pause = 0; 371 mii_info->link = 1; 372 373 mii_info->advertising = (ADVERTISED_10baseT_Half | 374 ADVERTISED_10baseT_Full | 375 ADVERTISED_100baseT_Half | 376 ADVERTISED_100baseT_Full | 377 ADVERTISED_1000baseT_Full); 378 mii_info->autoneg = 1; 379 380 spin_lock_init(&mii_info->mdio_lock); 381 382 mii_info->mii_id = priv->einfo->phyid; 383 384 mii_info->dev = dev; 385 386 mii_info->mdio_read = &read_phy_reg; 387 mii_info->mdio_write = &write_phy_reg; 388 389 priv->mii_info = mii_info; 390 391 /* Reset the management interface */ 392 gfar_write(&phyregs->miimcfg, MIIMCFG_RESET); 393 394 /* Setup the MII Mgmt clock speed */ 395 gfar_write(&phyregs->miimcfg, MIIMCFG_INIT_VALUE); 396 397 /* Wait until the bus is free */ 398 while ((gfar_read(&phyregs->miimind) & MIIMIND_BUSY) && 399 timeout--) 400 cpu_relax(); 401 402 if(timeout <= 0) { 403 printk(KERN_ERR "%s: The MII Bus is stuck!\n", 404 dev->name); 405 err = -1; 406 goto bus_fail; 407 } 408 409 /* get info for this PHY */ 410 curphy = get_phy_info(priv->mii_info); 411 412 if (curphy == NULL) { 413 printk(KERN_ERR "%s: No PHY found\n", dev->name); 414 err = -1; 415 goto no_phy; 416 } 417 418 mii_info->phyinfo = curphy; 419 420 /* Run the commands which initialize the PHY */ 421 if(curphy->init) { 422 err = curphy->init(priv->mii_info); 423 424 if (err) 425 goto phy_init_fail; 426 } 427 428 return 0; 429 430phy_init_fail: 431no_phy: 432bus_fail: 433 kfree(mii_info); 434 435 return err; 436} 437 438static void init_registers(struct net_device *dev) 439{ 440 struct gfar_private *priv = netdev_priv(dev); 441 442 /* Clear IEVENT */ 443 gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR); 444 445 /* Initialize IMASK */ 446 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR); 447 448 /* Init hash registers to zero */ 449 gfar_write(&priv->regs->iaddr0, 0); 450 gfar_write(&priv->regs->iaddr1, 0); 451 gfar_write(&priv->regs->iaddr2, 0); 452 gfar_write(&priv->regs->iaddr3, 0); 453 gfar_write(&priv->regs->iaddr4, 0); 454 gfar_write(&priv->regs->iaddr5, 0); 455 gfar_write(&priv->regs->iaddr6, 0); 456 gfar_write(&priv->regs->iaddr7, 0); 457 458 gfar_write(&priv->regs->gaddr0, 0); 459 gfar_write(&priv->regs->gaddr1, 0); 460 gfar_write(&priv->regs->gaddr2, 0); 461 gfar_write(&priv->regs->gaddr3, 0); 462 gfar_write(&priv->regs->gaddr4, 0); 463 gfar_write(&priv->regs->gaddr5, 0); 464 gfar_write(&priv->regs->gaddr6, 0); 465 gfar_write(&priv->regs->gaddr7, 0); 466 467 /* Zero out rctrl */ 468 gfar_write(&priv->regs->rctrl, 0x00000000); 469 470 /* Zero out the rmon mib registers if it has them */ 471 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 472 memset((void *) &(priv->regs->rmon), 0, 473 sizeof (struct rmon_mib)); 474 475 /* Mask off the CAM interrupts */ 476 gfar_write(&priv->regs->rmon.cam1, 0xffffffff); 477 gfar_write(&priv->regs->rmon.cam2, 0xffffffff); 478 } 479 480 /* Initialize the max receive buffer length */ 481 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 482 483#ifdef CONFIG_GFAR_BUFSTASH 484 /* If we are stashing buffers, we need to set the 485 * extraction length to the size of the buffer */ 486 gfar_write(&priv->regs->attreli, priv->rx_stash_size << 16); 487#endif 488 489 /* Initialize the Minimum Frame Length Register */ 490 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); 491 492 /* Setup Attributes so that snooping is on for rx */ 493 gfar_write(&priv->regs->attr, ATTR_INIT_SETTINGS); 494 gfar_write(&priv->regs->attreli, ATTRELI_INIT_SETTINGS); 495 496 /* Assign the TBI an address which won't conflict with the PHYs */ 497 gfar_write(&priv->regs->tbipa, TBIPA_VALUE); 498} 499 500void stop_gfar(struct net_device *dev) 501{ 502 struct gfar_private *priv = netdev_priv(dev); 503 struct gfar *regs = priv->regs; 504 unsigned long flags; 505 u32 tempval; 506 507 /* Lock it down */ 508 spin_lock_irqsave(&priv->lock, flags); 509 510 /* Tell the kernel the link is down */ 511 priv->mii_info->link = 0; 512 adjust_link(dev); 513 514 /* Mask all interrupts */ 515 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 516 517 /* Clear all interrupts */ 518 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR); 519 520 /* Stop the DMA, and wait for it to stop */ 521 tempval = gfar_read(&priv->regs->dmactrl); 522 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) 523 != (DMACTRL_GRS | DMACTRL_GTS)) { 524 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 525 gfar_write(&priv->regs->dmactrl, tempval); 526 527 while (!(gfar_read(&priv->regs->ievent) & 528 (IEVENT_GRSC | IEVENT_GTSC))) 529 cpu_relax(); 530 } 531 532 /* Disable Rx and Tx */ 533 tempval = gfar_read(&regs->maccfg1); 534 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 535 gfar_write(&regs->maccfg1, tempval); 536 537 if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) { 538 /* Clear any pending interrupts */ 539 mii_clear_phy_interrupt(priv->mii_info); 540 541 /* Disable PHY Interrupts */ 542 mii_configure_phy_interrupt(priv->mii_info, 543 MII_INTERRUPT_DISABLED); 544 } 545 546 spin_unlock_irqrestore(&priv->lock, flags); 547 548 /* Free the IRQs */ 549 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 550 free_irq(priv->interruptError, dev); 551 free_irq(priv->interruptTransmit, dev); 552 free_irq(priv->interruptReceive, dev); 553 } else { 554 free_irq(priv->interruptTransmit, dev); 555 } 556 557 if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) { 558 free_irq(priv->einfo->interruptPHY, dev); 559 } else { 560 del_timer_sync(&priv->phy_info_timer); 561 } 562 563 free_skb_resources(priv); 564 565 dma_free_coherent(NULL, 566 sizeof(struct txbd8)*priv->tx_ring_size 567 + sizeof(struct rxbd8)*priv->rx_ring_size, 568 priv->tx_bd_base, 569 gfar_read(&regs->tbase)); 570} 571 572/* If there are any tx skbs or rx skbs still around, free them. 573 * Then free tx_skbuff and rx_skbuff */ 574void free_skb_resources(struct gfar_private *priv) 575{ 576 struct rxbd8 *rxbdp; 577 struct txbd8 *txbdp; 578 int i; 579 580 /* Go through all the buffer descriptors and free their data buffers */ 581 txbdp = priv->tx_bd_base; 582 583 for (i = 0; i < priv->tx_ring_size; i++) { 584 585 if (priv->tx_skbuff[i]) { 586 dma_unmap_single(NULL, txbdp->bufPtr, 587 txbdp->length, 588 DMA_TO_DEVICE); 589 dev_kfree_skb_any(priv->tx_skbuff[i]); 590 priv->tx_skbuff[i] = NULL; 591 } 592 } 593 594 kfree(priv->tx_skbuff); 595 596 rxbdp = priv->rx_bd_base; 597 598 /* rx_skbuff is not guaranteed to be allocated, so only 599 * free it and its contents if it is allocated */ 600 if(priv->rx_skbuff != NULL) { 601 for (i = 0; i < priv->rx_ring_size; i++) { 602 if (priv->rx_skbuff[i]) { 603 dma_unmap_single(NULL, rxbdp->bufPtr, 604 priv->rx_buffer_size 605 + RXBUF_ALIGNMENT, 606 DMA_FROM_DEVICE); 607 608 dev_kfree_skb_any(priv->rx_skbuff[i]); 609 priv->rx_skbuff[i] = NULL; 610 } 611 612 rxbdp->status = 0; 613 rxbdp->length = 0; 614 rxbdp->bufPtr = 0; 615 616 rxbdp++; 617 } 618 619 kfree(priv->rx_skbuff); 620 } 621} 622 623/* Bring the controller up and running */ 624int startup_gfar(struct net_device *dev) 625{ 626 struct txbd8 *txbdp; 627 struct rxbd8 *rxbdp; 628 dma_addr_t addr; 629 unsigned long vaddr; 630 int i; 631 struct gfar_private *priv = netdev_priv(dev); 632 struct gfar *regs = priv->regs; 633 u32 tempval; 634 int err = 0; 635 636 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 637 638 /* Allocate memory for the buffer descriptors */ 639 vaddr = (unsigned long) dma_alloc_coherent(NULL, 640 sizeof (struct txbd8) * priv->tx_ring_size + 641 sizeof (struct rxbd8) * priv->rx_ring_size, 642 &addr, GFP_KERNEL); 643 644 if (vaddr == 0) { 645 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n", 646 dev->name); 647 return -ENOMEM; 648 } 649 650 priv->tx_bd_base = (struct txbd8 *) vaddr; 651 652 /* enet DMA only understands physical addresses */ 653 gfar_write(&regs->tbase, addr); 654 655 /* Start the rx descriptor ring where the tx ring leaves off */ 656 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size; 657 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size; 658 priv->rx_bd_base = (struct rxbd8 *) vaddr; 659 gfar_write(&regs->rbase, addr); 660 661 /* Setup the skbuff rings */ 662 priv->tx_skbuff = 663 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * 664 priv->tx_ring_size, GFP_KERNEL); 665 666 if (priv->tx_skbuff == NULL) { 667 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n", 668 dev->name); 669 err = -ENOMEM; 670 goto tx_skb_fail; 671 } 672 673 for (i = 0; i < priv->tx_ring_size; i++) 674 priv->tx_skbuff[i] = NULL; 675 676 priv->rx_skbuff = 677 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * 678 priv->rx_ring_size, GFP_KERNEL); 679 680 if (priv->rx_skbuff == NULL) { 681 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n", 682 dev->name); 683 err = -ENOMEM; 684 goto rx_skb_fail; 685 } 686 687 for (i = 0; i < priv->rx_ring_size; i++) 688 priv->rx_skbuff[i] = NULL; 689 690 /* Initialize some variables in our dev structure */ 691 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base; 692 priv->cur_rx = priv->rx_bd_base; 693 priv->skb_curtx = priv->skb_dirtytx = 0; 694 priv->skb_currx = 0; 695 696 /* Initialize Transmit Descriptor Ring */ 697 txbdp = priv->tx_bd_base; 698 for (i = 0; i < priv->tx_ring_size; i++) { 699 txbdp->status = 0; 700 txbdp->length = 0; 701 txbdp->bufPtr = 0; 702 txbdp++; 703 } 704 705 /* Set the last descriptor in the ring to indicate wrap */ 706 txbdp--; 707 txbdp->status |= TXBD_WRAP; 708 709 rxbdp = priv->rx_bd_base; 710 for (i = 0; i < priv->rx_ring_size; i++) { 711 struct sk_buff *skb = NULL; 712 713 rxbdp->status = 0; 714 715 skb = gfar_new_skb(dev, rxbdp); 716 717 priv->rx_skbuff[i] = skb; 718 719 rxbdp++; 720 } 721 722 /* Set the last descriptor in the ring to wrap */ 723 rxbdp--; 724 rxbdp->status |= RXBD_WRAP; 725 726 /* If the device has multiple interrupts, register for 727 * them. Otherwise, only register for the one */ 728 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 729 /* Install our interrupt handlers for Error, 730 * Transmit, and Receive */ 731 if (request_irq(priv->interruptError, gfar_error, 732 0, "enet_error", dev) < 0) { 733 printk(KERN_ERR "%s: Can't get IRQ %d\n", 734 dev->name, priv->interruptError); 735 736 err = -1; 737 goto err_irq_fail; 738 } 739 740 if (request_irq(priv->interruptTransmit, gfar_transmit, 741 0, "enet_tx", dev) < 0) { 742 printk(KERN_ERR "%s: Can't get IRQ %d\n", 743 dev->name, priv->interruptTransmit); 744 745 err = -1; 746 747 goto tx_irq_fail; 748 } 749 750 if (request_irq(priv->interruptReceive, gfar_receive, 751 0, "enet_rx", dev) < 0) { 752 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n", 753 dev->name, priv->interruptReceive); 754 755 err = -1; 756 goto rx_irq_fail; 757 } 758 } else { 759 if (request_irq(priv->interruptTransmit, gfar_interrupt, 760 0, "gfar_interrupt", dev) < 0) { 761 printk(KERN_ERR "%s: Can't get IRQ %d\n", 762 dev->name, priv->interruptError); 763 764 err = -1; 765 goto err_irq_fail; 766 } 767 } 768 769 /* Set up the PHY change work queue */ 770 INIT_WORK(&priv->tq, gfar_phy_change, dev); 771 772 init_timer(&priv->phy_info_timer); 773 priv->phy_info_timer.function = &gfar_phy_startup_timer; 774 priv->phy_info_timer.data = (unsigned long) priv->mii_info; 775 mod_timer(&priv->phy_info_timer, jiffies + HZ); 776 777 /* Configure the coalescing support */ 778 if (priv->txcoalescing) 779 gfar_write(&regs->txic, 780 mk_ic_value(priv->txcount, priv->txtime)); 781 else 782 gfar_write(&regs->txic, 0); 783 784 if (priv->rxcoalescing) 785 gfar_write(&regs->rxic, 786 mk_ic_value(priv->rxcount, priv->rxtime)); 787 else 788 gfar_write(&regs->rxic, 0); 789 790 init_waitqueue_head(&priv->rxcleanupq); 791 792 /* Enable Rx and Tx in MACCFG1 */ 793 tempval = gfar_read(&regs->maccfg1); 794 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); 795 gfar_write(&regs->maccfg1, tempval); 796 797 /* Initialize DMACTRL to have WWR and WOP */ 798 tempval = gfar_read(&priv->regs->dmactrl); 799 tempval |= DMACTRL_INIT_SETTINGS; 800 gfar_write(&priv->regs->dmactrl, tempval); 801 802 /* Clear THLT, so that the DMA starts polling now */ 803 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT); 804 805 /* Make sure we aren't stopped */ 806 tempval = gfar_read(&priv->regs->dmactrl); 807 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 808 gfar_write(&priv->regs->dmactrl, tempval); 809 810 /* Unmask the interrupts we look for */ 811 gfar_write(&regs->imask, IMASK_DEFAULT); 812 813 return 0; 814 815rx_irq_fail: 816 free_irq(priv->interruptTransmit, dev); 817tx_irq_fail: 818 free_irq(priv->interruptError, dev); 819err_irq_fail: 820rx_skb_fail: 821 free_skb_resources(priv); 822tx_skb_fail: 823 dma_free_coherent(NULL, 824 sizeof(struct txbd8)*priv->tx_ring_size 825 + sizeof(struct rxbd8)*priv->rx_ring_size, 826 priv->tx_bd_base, 827 gfar_read(&regs->tbase)); 828 829 if (priv->mii_info->phyinfo->close) 830 priv->mii_info->phyinfo->close(priv->mii_info); 831 832 kfree(priv->mii_info); 833 834 return err; 835} 836 837/* Called when something needs to use the ethernet device */ 838/* Returns 0 for success. */ 839static int gfar_enet_open(struct net_device *dev) 840{ 841 int err; 842 843 /* Initialize a bunch of registers */ 844 init_registers(dev); 845 846 gfar_set_mac_address(dev); 847 848 err = init_phy(dev); 849 850 if(err) 851 return err; 852 853 err = startup_gfar(dev); 854 855 netif_start_queue(dev); 856 857 return err; 858} 859 860/* This is called by the kernel when a frame is ready for transmission. */ 861/* It is pointed to by the dev->hard_start_xmit function pointer */ 862static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 863{ 864 struct gfar_private *priv = netdev_priv(dev); 865 struct txbd8 *txbdp; 866 867 /* Update transmit stats */ 868 priv->stats.tx_bytes += skb->len; 869 870 /* Lock priv now */ 871 spin_lock_irq(&priv->lock); 872 873 /* Point at the first free tx descriptor */ 874 txbdp = priv->cur_tx; 875 876 /* Clear all but the WRAP status flags */ 877 txbdp->status &= TXBD_WRAP; 878 879 /* Set buffer length and pointer */ 880 txbdp->length = skb->len; 881 txbdp->bufPtr = dma_map_single(NULL, skb->data, 882 skb->len, DMA_TO_DEVICE); 883 884 /* Save the skb pointer so we can free it later */ 885 priv->tx_skbuff[priv->skb_curtx] = skb; 886 887 /* Update the current skb pointer (wrapping if this was the last) */ 888 priv->skb_curtx = 889 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size); 890 891 /* Flag the BD as interrupt-causing */ 892 txbdp->status |= TXBD_INTERRUPT; 893 894 /* Flag the BD as ready to go, last in frame, and */ 895 /* in need of CRC */ 896 txbdp->status |= (TXBD_READY | TXBD_LAST | TXBD_CRC); 897 898 dev->trans_start = jiffies; 899 900 /* If this was the last BD in the ring, the next one */ 901 /* is at the beginning of the ring */ 902 if (txbdp->status & TXBD_WRAP) 903 txbdp = priv->tx_bd_base; 904 else 905 txbdp++; 906 907 /* If the next BD still needs to be cleaned up, then the bds 908 are full. We need to tell the kernel to stop sending us stuff. */ 909 if (txbdp == priv->dirty_tx) { 910 netif_stop_queue(dev); 911 912 priv->stats.tx_fifo_errors++; 913 } 914 915 /* Update the current txbd to the next one */ 916 priv->cur_tx = txbdp; 917 918 /* Tell the DMA to go go go */ 919 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 920 921 /* Unlock priv */ 922 spin_unlock_irq(&priv->lock); 923 924 return 0; 925} 926 927/* Stops the kernel queue, and halts the controller */ 928static int gfar_close(struct net_device *dev) 929{ 930 struct gfar_private *priv = netdev_priv(dev); 931 stop_gfar(dev); 932 933 /* Shutdown the PHY */ 934 if (priv->mii_info->phyinfo->close) 935 priv->mii_info->phyinfo->close(priv->mii_info); 936 937 kfree(priv->mii_info); 938 939 netif_stop_queue(dev); 940 941 return 0; 942} 943 944/* returns a net_device_stats structure pointer */ 945static struct net_device_stats * gfar_get_stats(struct net_device *dev) 946{ 947 struct gfar_private *priv = netdev_priv(dev); 948 949 return &(priv->stats); 950} 951 952/* Changes the mac address if the controller is not running. */ 953int gfar_set_mac_address(struct net_device *dev) 954{ 955 struct gfar_private *priv = netdev_priv(dev); 956 int i; 957 char tmpbuf[MAC_ADDR_LEN]; 958 u32 tempval; 959 960 /* Now copy it into the mac registers backwards, cuz */ 961 /* little endian is silly */ 962 for (i = 0; i < MAC_ADDR_LEN; i++) 963 tmpbuf[MAC_ADDR_LEN - 1 - i] = dev->dev_addr[i]; 964 965 gfar_write(&priv->regs->macstnaddr1, *((u32 *) (tmpbuf))); 966 967 tempval = *((u32 *) (tmpbuf + 4)); 968 969 gfar_write(&priv->regs->macstnaddr2, tempval); 970 971 return 0; 972} 973 974 975static int gfar_change_mtu(struct net_device *dev, int new_mtu) 976{ 977 int tempsize, tempval; 978 struct gfar_private *priv = netdev_priv(dev); 979 int oldsize = priv->rx_buffer_size; 980 int frame_size = new_mtu + 18; 981 982 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { 983 printk(KERN_ERR "%s: Invalid MTU setting\n", dev->name); 984 return -EINVAL; 985 } 986 987 tempsize = 988 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + 989 INCREMENTAL_BUFFER_SIZE; 990 991 /* Only stop and start the controller if it isn't already 992 * stopped */ 993 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 994 stop_gfar(dev); 995 996 priv->rx_buffer_size = tempsize; 997 998 dev->mtu = new_mtu; 999 1000 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 1001 gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size); 1002 1003 /* If the mtu is larger than the max size for standard 1004 * ethernet frames (ie, a jumbo frame), then set maccfg2 1005 * to allow huge frames, and to check the length */ 1006 tempval = gfar_read(&priv->regs->maccfg2); 1007 1008 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) 1009 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 1010 else 1011 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 1012 1013 gfar_write(&priv->regs->maccfg2, tempval); 1014 1015 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 1016 startup_gfar(dev); 1017 1018 return 0; 1019} 1020 1021/* gfar_timeout gets called when a packet has not been 1022 * transmitted after a set amount of time. 1023 * For now, assume that clearing out all the structures, and 1024 * starting over will fix the problem. */ 1025static void gfar_timeout(struct net_device *dev) 1026{ 1027 struct gfar_private *priv = netdev_priv(dev); 1028 1029 priv->stats.tx_errors++; 1030 1031 if (dev->flags & IFF_UP) { 1032 stop_gfar(dev); 1033 startup_gfar(dev); 1034 } 1035 1036 netif_schedule(dev); 1037} 1038 1039/* Interrupt Handler for Transmit complete */ 1040static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs) 1041{ 1042 struct net_device *dev = (struct net_device *) dev_id; 1043 struct gfar_private *priv = netdev_priv(dev); 1044 struct txbd8 *bdp; 1045 1046 /* Clear IEVENT */ 1047 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); 1048 1049 /* Lock priv */ 1050 spin_lock(&priv->lock); 1051 bdp = priv->dirty_tx; 1052 while ((bdp->status & TXBD_READY) == 0) { 1053 /* If dirty_tx and cur_tx are the same, then either the */ 1054 /* ring is empty or full now (it could only be full in the beginning, */ 1055 /* obviously). If it is empty, we are done. */ 1056 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0)) 1057 break; 1058 1059 priv->stats.tx_packets++; 1060 1061 /* Deferred means some collisions occurred during transmit, */ 1062 /* but we eventually sent the packet. */ 1063 if (bdp->status & TXBD_DEF) 1064 priv->stats.collisions++; 1065 1066 /* Free the sk buffer associated with this TxBD */ 1067 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); 1068 priv->tx_skbuff[priv->skb_dirtytx] = NULL; 1069 priv->skb_dirtytx = 1070 (priv->skb_dirtytx + 1071 1) & TX_RING_MOD_MASK(priv->tx_ring_size); 1072 1073 /* update bdp to point at next bd in the ring (wrapping if necessary) */ 1074 if (bdp->status & TXBD_WRAP) 1075 bdp = priv->tx_bd_base; 1076 else 1077 bdp++; 1078 1079 /* Move dirty_tx to be the next bd */ 1080 priv->dirty_tx = bdp; 1081 1082 /* We freed a buffer, so now we can restart transmission */ 1083 if (netif_queue_stopped(dev)) 1084 netif_wake_queue(dev); 1085 } /* while ((bdp->status & TXBD_READY) == 0) */ 1086 1087 /* If we are coalescing the interrupts, reset the timer */ 1088 /* Otherwise, clear it */ 1089 if (priv->txcoalescing) 1090 gfar_write(&priv->regs->txic, 1091 mk_ic_value(priv->txcount, priv->txtime)); 1092 else 1093 gfar_write(&priv->regs->txic, 0); 1094 1095 spin_unlock(&priv->lock); 1096 1097 return IRQ_HANDLED; 1098} 1099 1100struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) 1101{ 1102 struct gfar_private *priv = netdev_priv(dev); 1103 struct sk_buff *skb = NULL; 1104 unsigned int timeout = SKB_ALLOC_TIMEOUT; 1105 1106 /* We have to allocate the skb, so keep trying till we succeed */ 1107 while ((!skb) && timeout--) 1108 skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT); 1109 1110 if (skb == NULL) 1111 return NULL; 1112 1113 /* We need the data buffer to be aligned properly. We will reserve 1114 * as many bytes as needed to align the data properly 1115 */ 1116 skb_reserve(skb, 1117 RXBUF_ALIGNMENT - 1118 (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1))); 1119 1120 skb->dev = dev; 1121 1122 bdp->bufPtr = dma_map_single(NULL, skb->data, 1123 priv->rx_buffer_size + RXBUF_ALIGNMENT, 1124 DMA_FROM_DEVICE); 1125 1126 bdp->length = 0; 1127 1128 /* Mark the buffer empty */ 1129 bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT); 1130 1131 return skb; 1132} 1133 1134static inline void count_errors(unsigned short status, struct gfar_private *priv) 1135{ 1136 struct net_device_stats *stats = &priv->stats; 1137 struct gfar_extra_stats *estats = &priv->extra_stats; 1138 1139 /* If the packet was truncated, none of the other errors 1140 * matter */ 1141 if (status & RXBD_TRUNCATED) { 1142 stats->rx_length_errors++; 1143 1144 estats->rx_trunc++; 1145 1146 return; 1147 } 1148 /* Count the errors, if there were any */ 1149 if (status & (RXBD_LARGE | RXBD_SHORT)) { 1150 stats->rx_length_errors++; 1151 1152 if (status & RXBD_LARGE) 1153 estats->rx_large++; 1154 else 1155 estats->rx_short++; 1156 } 1157 if (status & RXBD_NONOCTET) { 1158 stats->rx_frame_errors++; 1159 estats->rx_nonoctet++; 1160 } 1161 if (status & RXBD_CRCERR) { 1162 estats->rx_crcerr++; 1163 stats->rx_crc_errors++; 1164 } 1165 if (status & RXBD_OVERRUN) { 1166 estats->rx_overrun++; 1167 stats->rx_crc_errors++; 1168 } 1169} 1170 1171irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs) 1172{ 1173 struct net_device *dev = (struct net_device *) dev_id; 1174 struct gfar_private *priv = netdev_priv(dev); 1175 1176#ifdef CONFIG_GFAR_NAPI 1177 u32 tempval; 1178#endif 1179 1180 /* Clear IEVENT, so rx interrupt isn't called again 1181 * because of this interrupt */ 1182 gfar_write(&priv->regs->ievent, IEVENT_RX_MASK); 1183 1184 /* support NAPI */ 1185#ifdef CONFIG_GFAR_NAPI 1186 if (netif_rx_schedule_prep(dev)) { 1187 tempval = gfar_read(&priv->regs->imask); 1188 tempval &= IMASK_RX_DISABLED; 1189 gfar_write(&priv->regs->imask, tempval); 1190 1191 __netif_rx_schedule(dev); 1192 } else { 1193#ifdef VERBOSE_GFAR_ERRORS 1194 printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n", 1195 dev->name, gfar_read(&priv->regs->ievent), 1196 gfar_read(&priv->regs->imask)); 1197#endif 1198 } 1199#else 1200 1201 spin_lock(&priv->lock); 1202 gfar_clean_rx_ring(dev, priv->rx_ring_size); 1203 1204 /* If we are coalescing interrupts, update the timer */ 1205 /* Otherwise, clear it */ 1206 if (priv->rxcoalescing) 1207 gfar_write(&priv->regs->rxic, 1208 mk_ic_value(priv->rxcount, priv->rxtime)); 1209 else 1210 gfar_write(&priv->regs->rxic, 0); 1211 1212 /* Just in case we need to wake the ring param changer */ 1213 priv->rxclean = 1; 1214 1215 spin_unlock(&priv->lock); 1216#endif 1217 1218 return IRQ_HANDLED; 1219} 1220 1221 1222/* gfar_process_frame() -- handle one incoming packet if skb 1223 * isn't NULL. */ 1224static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 1225 int length) 1226{ 1227 struct gfar_private *priv = netdev_priv(dev); 1228 1229 if (skb == NULL) { 1230#ifdef BRIEF_GFAR_ERRORS 1231 printk(KERN_WARNING "%s: Missing skb!!.\n", 1232 dev->name); 1233#endif 1234 priv->stats.rx_dropped++; 1235 priv->extra_stats.rx_skbmissing++; 1236 } else { 1237 /* Prep the skb for the packet */ 1238 skb_put(skb, length); 1239 1240 /* Tell the skb what kind of packet this is */ 1241 skb->protocol = eth_type_trans(skb, dev); 1242 1243 /* Send the packet up the stack */ 1244 if (RECEIVE(skb) == NET_RX_DROP) { 1245 priv->extra_stats.kernel_dropped++; 1246 } 1247 } 1248 1249 return 0; 1250} 1251 1252/* gfar_clean_rx_ring() -- Processes each frame in the rx ring 1253 * until the budget/quota has been reached. Returns the number 1254 * of frames handled 1255 */ 1256static int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) 1257{ 1258 struct rxbd8 *bdp; 1259 struct sk_buff *skb; 1260 u16 pkt_len; 1261 int howmany = 0; 1262 struct gfar_private *priv = netdev_priv(dev); 1263 1264 /* Get the first full descriptor */ 1265 bdp = priv->cur_rx; 1266 1267 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 1268 skb = priv->rx_skbuff[priv->skb_currx]; 1269 1270 if (!(bdp->status & 1271 (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET 1272 | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) { 1273 /* Increment the number of packets */ 1274 priv->stats.rx_packets++; 1275 howmany++; 1276 1277 /* Remove the FCS from the packet length */ 1278 pkt_len = bdp->length - 4; 1279 1280 gfar_process_frame(dev, skb, pkt_len); 1281 1282 priv->stats.rx_bytes += pkt_len; 1283 } else { 1284 count_errors(bdp->status, priv); 1285 1286 if (skb) 1287 dev_kfree_skb_any(skb); 1288 1289 priv->rx_skbuff[priv->skb_currx] = NULL; 1290 } 1291 1292 dev->last_rx = jiffies; 1293 1294 /* Clear the status flags for this buffer */ 1295 bdp->status &= ~RXBD_STATS; 1296 1297 /* Add another skb for the future */ 1298 skb = gfar_new_skb(dev, bdp); 1299 priv->rx_skbuff[priv->skb_currx] = skb; 1300 1301 /* Update to the next pointer */ 1302 if (bdp->status & RXBD_WRAP) 1303 bdp = priv->rx_bd_base; 1304 else 1305 bdp++; 1306 1307 /* update to point at the next skb */ 1308 priv->skb_currx = 1309 (priv->skb_currx + 1310 1) & RX_RING_MOD_MASK(priv->rx_ring_size); 1311 1312 } 1313 1314 /* Update the current rxbd pointer to be the next one */ 1315 priv->cur_rx = bdp; 1316 1317 /* If no packets have arrived since the 1318 * last one we processed, clear the IEVENT RX and 1319 * BSY bits so that another interrupt won't be 1320 * generated when we set IMASK */ 1321 if (bdp->status & RXBD_EMPTY) 1322 gfar_write(&priv->regs->ievent, IEVENT_RX_MASK); 1323 1324 return howmany; 1325} 1326 1327#ifdef CONFIG_GFAR_NAPI 1328static int gfar_poll(struct net_device *dev, int *budget) 1329{ 1330 int howmany; 1331 struct gfar_private *priv = netdev_priv(dev); 1332 int rx_work_limit = *budget; 1333 1334 if (rx_work_limit > dev->quota) 1335 rx_work_limit = dev->quota; 1336 1337 howmany = gfar_clean_rx_ring(dev, rx_work_limit); 1338 1339 dev->quota -= howmany; 1340 rx_work_limit -= howmany; 1341 *budget -= howmany; 1342 1343 if (rx_work_limit >= 0) { 1344 netif_rx_complete(dev); 1345 1346 /* Clear the halt bit in RSTAT */ 1347 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 1348 1349 gfar_write(&priv->regs->imask, IMASK_DEFAULT); 1350 1351 /* If we are coalescing interrupts, update the timer */ 1352 /* Otherwise, clear it */ 1353 if (priv->rxcoalescing) 1354 gfar_write(&priv->regs->rxic, 1355 mk_ic_value(priv->rxcount, priv->rxtime)); 1356 else 1357 gfar_write(&priv->regs->rxic, 0); 1358 1359 /* Signal to the ring size changer that it's safe to go */ 1360 priv->rxclean = 1; 1361 } 1362 1363 return (rx_work_limit < 0) ? 1 : 0; 1364} 1365#endif 1366 1367/* The interrupt handler for devices with one interrupt */ 1368static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs) 1369{ 1370 struct net_device *dev = dev_id; 1371 struct gfar_private *priv = netdev_priv(dev); 1372 1373 /* Save ievent for future reference */ 1374 u32 events = gfar_read(&priv->regs->ievent); 1375 1376 /* Clear IEVENT */ 1377 gfar_write(&priv->regs->ievent, events); 1378 1379 /* Check for reception */ 1380 if ((events & IEVENT_RXF0) || (events & IEVENT_RXB0)) 1381 gfar_receive(irq, dev_id, regs); 1382 1383 /* Check for transmit completion */ 1384 if ((events & IEVENT_TXF) || (events & IEVENT_TXB)) 1385 gfar_transmit(irq, dev_id, regs); 1386 1387 /* Update error statistics */ 1388 if (events & IEVENT_TXE) { 1389 priv->stats.tx_errors++; 1390 1391 if (events & IEVENT_LC) 1392 priv->stats.tx_window_errors++; 1393 if (events & IEVENT_CRL) 1394 priv->stats.tx_aborted_errors++; 1395 if (events & IEVENT_XFUN) { 1396#ifdef VERBOSE_GFAR_ERRORS 1397 printk(KERN_WARNING "%s: tx underrun. dropped packet\n", 1398 dev->name); 1399#endif 1400 priv->stats.tx_dropped++; 1401 priv->extra_stats.tx_underrun++; 1402 1403 /* Reactivate the Tx Queues */ 1404 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 1405 } 1406 } 1407 if (events & IEVENT_BSY) { 1408 priv->stats.rx_errors++; 1409 priv->extra_stats.rx_bsy++; 1410 1411 gfar_receive(irq, dev_id, regs); 1412 1413#ifndef CONFIG_GFAR_NAPI 1414 /* Clear the halt bit in RSTAT */ 1415 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 1416#endif 1417 1418#ifdef VERBOSE_GFAR_ERRORS 1419 printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", dev->name, 1420 gfar_read(&priv->regs->rstat)); 1421#endif 1422 } 1423 if (events & IEVENT_BABR) { 1424 priv->stats.rx_errors++; 1425 priv->extra_stats.rx_babr++; 1426 1427#ifdef VERBOSE_GFAR_ERRORS 1428 printk(KERN_DEBUG "%s: babbling error\n", dev->name); 1429#endif 1430 } 1431 if (events & IEVENT_EBERR) { 1432 priv->extra_stats.eberr++; 1433#ifdef VERBOSE_GFAR_ERRORS 1434 printk(KERN_DEBUG "%s: EBERR\n", dev->name); 1435#endif 1436 } 1437 if (events & IEVENT_RXC) { 1438#ifdef VERBOSE_GFAR_ERRORS 1439 printk(KERN_DEBUG "%s: control frame\n", dev->name); 1440#endif 1441 } 1442 1443 if (events & IEVENT_BABT) { 1444 priv->extra_stats.tx_babt++; 1445#ifdef VERBOSE_GFAR_ERRORS 1446 printk(KERN_DEBUG "%s: babt error\n", dev->name); 1447#endif 1448 } 1449 1450 return IRQ_HANDLED; 1451} 1452 1453static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs) 1454{ 1455 struct net_device *dev = (struct net_device *) dev_id; 1456 struct gfar_private *priv = netdev_priv(dev); 1457 1458 /* Clear the interrupt */ 1459 mii_clear_phy_interrupt(priv->mii_info); 1460 1461 /* Disable PHY interrupts */ 1462 mii_configure_phy_interrupt(priv->mii_info, 1463 MII_INTERRUPT_DISABLED); 1464 1465 /* Schedule the phy change */ 1466 schedule_work(&priv->tq); 1467 1468 return IRQ_HANDLED; 1469} 1470 1471/* Scheduled by the phy_interrupt/timer to handle PHY changes */ 1472static void gfar_phy_change(void *data) 1473{ 1474 struct net_device *dev = (struct net_device *) data; 1475 struct gfar_private *priv = netdev_priv(dev); 1476 int result = 0; 1477 1478 /* Delay to give the PHY a chance to change the 1479 * register state */ 1480 msleep(1); 1481 1482 /* Update the link, speed, duplex */ 1483 result = priv->mii_info->phyinfo->read_status(priv->mii_info); 1484 1485 /* Adjust the known status as long as the link 1486 * isn't still coming up */ 1487 if((0 == result) || (priv->mii_info->link == 0)) 1488 adjust_link(dev); 1489 1490 /* Reenable interrupts, if needed */ 1491 if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) 1492 mii_configure_phy_interrupt(priv->mii_info, 1493 MII_INTERRUPT_ENABLED); 1494} 1495 1496/* Called every so often on systems that don't interrupt 1497 * the core for PHY changes */ 1498static void gfar_phy_timer(unsigned long data) 1499{ 1500 struct net_device *dev = (struct net_device *) data; 1501 struct gfar_private *priv = netdev_priv(dev); 1502 1503 schedule_work(&priv->tq); 1504 1505 mod_timer(&priv->phy_info_timer, jiffies + 1506 GFAR_PHY_CHANGE_TIME * HZ); 1507} 1508 1509/* Keep trying aneg for some time 1510 * If, after GFAR_AN_TIMEOUT seconds, it has not 1511 * finished, we switch to forced. 1512 * Either way, once the process has completed, we either 1513 * request the interrupt, or switch the timer over to 1514 * using gfar_phy_timer to check status */ 1515static void gfar_phy_startup_timer(unsigned long data) 1516{ 1517 int result; 1518 static int secondary = GFAR_AN_TIMEOUT; 1519 struct gfar_mii_info *mii_info = (struct gfar_mii_info *)data; 1520 struct gfar_private *priv = netdev_priv(mii_info->dev); 1521 1522 /* Configure the Auto-negotiation */ 1523 result = mii_info->phyinfo->config_aneg(mii_info); 1524 1525 /* If autonegotiation failed to start, and 1526 * we haven't timed out, reset the timer, and return */ 1527 if (result && secondary--) { 1528 mod_timer(&priv->phy_info_timer, jiffies + HZ); 1529 return; 1530 } else if (result) { 1531 /* Couldn't start autonegotiation. 1532 * Try switching to forced */ 1533 mii_info->autoneg = 0; 1534 result = mii_info->phyinfo->config_aneg(mii_info); 1535 1536 /* Forcing failed! Give up */ 1537 if(result) { 1538 printk(KERN_ERR "%s: Forcing failed!\n", 1539 mii_info->dev->name); 1540 return; 1541 } 1542 } 1543 1544 /* Kill the timer so it can be restarted */ 1545 del_timer_sync(&priv->phy_info_timer); 1546 1547 /* Grab the PHY interrupt, if necessary/possible */ 1548 if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) { 1549 if (request_irq(priv->einfo->interruptPHY, 1550 phy_interrupt, 1551 SA_SHIRQ, 1552 "phy_interrupt", 1553 mii_info->dev) < 0) { 1554 printk(KERN_ERR "%s: Can't get IRQ %d (PHY)\n", 1555 mii_info->dev->name, 1556 priv->einfo->interruptPHY); 1557 } else { 1558 mii_configure_phy_interrupt(priv->mii_info, 1559 MII_INTERRUPT_ENABLED); 1560 return; 1561 } 1562 } 1563 1564 /* Start the timer again, this time in order to 1565 * handle a change in status */ 1566 init_timer(&priv->phy_info_timer); 1567 priv->phy_info_timer.function = &gfar_phy_timer; 1568 priv->phy_info_timer.data = (unsigned long) mii_info->dev; 1569 mod_timer(&priv->phy_info_timer, jiffies + 1570 GFAR_PHY_CHANGE_TIME * HZ); 1571} 1572 1573/* Called every time the controller might need to be made 1574 * aware of new link state. The PHY code conveys this 1575 * information through variables in the priv structure, and this 1576 * function converts those variables into the appropriate 1577 * register values, and can bring down the device if needed. 1578 */ 1579static void adjust_link(struct net_device *dev) 1580{ 1581 struct gfar_private *priv = netdev_priv(dev); 1582 struct gfar *regs = priv->regs; 1583 u32 tempval; 1584 struct gfar_mii_info *mii_info = priv->mii_info; 1585 1586 if (mii_info->link) { 1587 /* Now we make sure that we can be in full duplex mode. 1588 * If not, we operate in half-duplex mode. */ 1589 if (mii_info->duplex != priv->oldduplex) { 1590 if (!(mii_info->duplex)) { 1591 tempval = gfar_read(&regs->maccfg2); 1592 tempval &= ~(MACCFG2_FULL_DUPLEX); 1593 gfar_write(&regs->maccfg2, tempval); 1594 1595 printk(KERN_INFO "%s: Half Duplex\n", 1596 dev->name); 1597 } else { 1598 tempval = gfar_read(&regs->maccfg2); 1599 tempval |= MACCFG2_FULL_DUPLEX; 1600 gfar_write(&regs->maccfg2, tempval); 1601 1602 printk(KERN_INFO "%s: Full Duplex\n", 1603 dev->name); 1604 } 1605 1606 priv->oldduplex = mii_info->duplex; 1607 } 1608 1609 if (mii_info->speed != priv->oldspeed) { 1610 switch (mii_info->speed) { 1611 case 1000: 1612 tempval = gfar_read(&regs->maccfg2); 1613 tempval = 1614 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); 1615 gfar_write(&regs->maccfg2, tempval); 1616 break; 1617 case 100: 1618 case 10: 1619 tempval = gfar_read(&regs->maccfg2); 1620 tempval = 1621 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 1622 gfar_write(&regs->maccfg2, tempval); 1623 break; 1624 default: 1625 printk(KERN_WARNING 1626 "%s: Ack! Speed (%d) is not 10/100/1000!\n", 1627 dev->name, mii_info->speed); 1628 break; 1629 } 1630 1631 printk(KERN_INFO "%s: Speed %dBT\n", dev->name, 1632 mii_info->speed); 1633 1634 priv->oldspeed = mii_info->speed; 1635 } 1636 1637 if (!priv->oldlink) { 1638 printk(KERN_INFO "%s: Link is up\n", dev->name); 1639 priv->oldlink = 1; 1640 netif_carrier_on(dev); 1641 netif_schedule(dev); 1642 } 1643 } else { 1644 if (priv->oldlink) { 1645 printk(KERN_INFO "%s: Link is down\n", dev->name); 1646 priv->oldlink = 0; 1647 priv->oldspeed = 0; 1648 priv->oldduplex = -1; 1649 netif_carrier_off(dev); 1650 } 1651 } 1652} 1653 1654 1655/* Update the hash table based on the current list of multicast 1656 * addresses we subscribe to. Also, change the promiscuity of 1657 * the device based on the flags (this function is called 1658 * whenever dev->flags is changed */ 1659static void gfar_set_multi(struct net_device *dev) 1660{ 1661 struct dev_mc_list *mc_ptr; 1662 struct gfar_private *priv = netdev_priv(dev); 1663 struct gfar *regs = priv->regs; 1664 u32 tempval; 1665 1666 if(dev->flags & IFF_PROMISC) { 1667 printk(KERN_INFO "%s: Entering promiscuous mode.\n", 1668 dev->name); 1669 /* Set RCTRL to PROM */ 1670 tempval = gfar_read(&regs->rctrl); 1671 tempval |= RCTRL_PROM; 1672 gfar_write(&regs->rctrl, tempval); 1673 } else { 1674 /* Set RCTRL to not PROM */ 1675 tempval = gfar_read(&regs->rctrl); 1676 tempval &= ~(RCTRL_PROM); 1677 gfar_write(&regs->rctrl, tempval); 1678 } 1679 1680 if(dev->flags & IFF_ALLMULTI) { 1681 /* Set the hash to rx all multicast frames */ 1682 gfar_write(&regs->gaddr0, 0xffffffff); 1683 gfar_write(&regs->gaddr1, 0xffffffff); 1684 gfar_write(&regs->gaddr2, 0xffffffff); 1685 gfar_write(&regs->gaddr3, 0xffffffff); 1686 gfar_write(&regs->gaddr4, 0xffffffff); 1687 gfar_write(&regs->gaddr5, 0xffffffff); 1688 gfar_write(&regs->gaddr6, 0xffffffff); 1689 gfar_write(&regs->gaddr7, 0xffffffff); 1690 } else { 1691 /* zero out the hash */ 1692 gfar_write(&regs->gaddr0, 0x0); 1693 gfar_write(&regs->gaddr1, 0x0); 1694 gfar_write(&regs->gaddr2, 0x0); 1695 gfar_write(&regs->gaddr3, 0x0); 1696 gfar_write(&regs->gaddr4, 0x0); 1697 gfar_write(&regs->gaddr5, 0x0); 1698 gfar_write(&regs->gaddr6, 0x0); 1699 gfar_write(&regs->gaddr7, 0x0); 1700 1701 if(dev->mc_count == 0) 1702 return; 1703 1704 /* Parse the list, and set the appropriate bits */ 1705 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { 1706 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr); 1707 } 1708 } 1709 1710 return; 1711} 1712 1713/* Set the appropriate hash bit for the given addr */ 1714/* The algorithm works like so: 1715 * 1) Take the Destination Address (ie the multicast address), and 1716 * do a CRC on it (little endian), and reverse the bits of the 1717 * result. 1718 * 2) Use the 8 most significant bits as a hash into a 256-entry 1719 * table. The table is controlled through 8 32-bit registers: 1720 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is 1721 * gaddr7. This means that the 3 most significant bits in the 1722 * hash index which gaddr register to use, and the 5 other bits 1723 * indicate which bit (assuming an IBM numbering scheme, which 1724 * for PowerPC (tm) is usually the case) in the register holds 1725 * the entry. */ 1726static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) 1727{ 1728 u32 tempval; 1729 struct gfar_private *priv = netdev_priv(dev); 1730 struct gfar *regs = priv->regs; 1731 u32 *hash = &regs->gaddr0; 1732 u32 result = ether_crc(MAC_ADDR_LEN, addr); 1733 u8 whichreg = ((result >> 29) & 0x7); 1734 u8 whichbit = ((result >> 24) & 0x1f); 1735 u32 value = (1 << (31-whichbit)); 1736 1737 tempval = gfar_read(&hash[whichreg]); 1738 tempval |= value; 1739 gfar_write(&hash[whichreg], tempval); 1740 1741 return; 1742} 1743 1744/* GFAR error interrupt handler */ 1745static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs) 1746{ 1747 struct net_device *dev = dev_id; 1748 struct gfar_private *priv = netdev_priv(dev); 1749 1750 /* Save ievent for future reference */ 1751 u32 events = gfar_read(&priv->regs->ievent); 1752 1753 /* Clear IEVENT */ 1754 gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK); 1755 1756 /* Hmm... */ 1757#if defined (BRIEF_GFAR_ERRORS) || defined (VERBOSE_GFAR_ERRORS) 1758 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", 1759 dev->name, events, gfar_read(&priv->regs->imask)); 1760#endif 1761 1762 /* Update the error counters */ 1763 if (events & IEVENT_TXE) { 1764 priv->stats.tx_errors++; 1765 1766 if (events & IEVENT_LC) 1767 priv->stats.tx_window_errors++; 1768 if (events & IEVENT_CRL) 1769 priv->stats.tx_aborted_errors++; 1770 if (events & IEVENT_XFUN) { 1771#ifdef VERBOSE_GFAR_ERRORS 1772 printk(KERN_DEBUG "%s: underrun. packet dropped.\n", 1773 dev->name); 1774#endif 1775 priv->stats.tx_dropped++; 1776 priv->extra_stats.tx_underrun++; 1777 1778 /* Reactivate the Tx Queues */ 1779 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 1780 } 1781#ifdef VERBOSE_GFAR_ERRORS 1782 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); 1783#endif 1784 } 1785 if (events & IEVENT_BSY) { 1786 priv->stats.rx_errors++; 1787 priv->extra_stats.rx_bsy++; 1788 1789 gfar_receive(irq, dev_id, regs); 1790 1791#ifndef CONFIG_GFAR_NAPI 1792 /* Clear the halt bit in RSTAT */ 1793 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 1794#endif 1795 1796#ifdef VERBOSE_GFAR_ERRORS 1797 printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", dev->name, 1798 gfar_read(&priv->regs->rstat)); 1799#endif 1800 } 1801 if (events & IEVENT_BABR) { 1802 priv->stats.rx_errors++; 1803 priv->extra_stats.rx_babr++; 1804 1805#ifdef VERBOSE_GFAR_ERRORS 1806 printk(KERN_DEBUG "%s: babbling error\n", dev->name); 1807#endif 1808 } 1809 if (events & IEVENT_EBERR) { 1810 priv->extra_stats.eberr++; 1811#ifdef VERBOSE_GFAR_ERRORS 1812 printk(KERN_DEBUG "%s: EBERR\n", dev->name); 1813#endif 1814 } 1815 if (events & IEVENT_RXC) 1816#ifdef VERBOSE_GFAR_ERRORS 1817 printk(KERN_DEBUG "%s: control frame\n", dev->name); 1818#endif 1819 1820 if (events & IEVENT_BABT) { 1821 priv->extra_stats.tx_babt++; 1822#ifdef VERBOSE_GFAR_ERRORS 1823 printk(KERN_DEBUG "%s: babt error\n", dev->name); 1824#endif 1825 } 1826 return IRQ_HANDLED; 1827} 1828 1829/* Structure for a device driver */ 1830static struct device_driver gfar_driver = { 1831 .name = "fsl-gianfar", 1832 .bus = &platform_bus_type, 1833 .probe = gfar_probe, 1834 .remove = gfar_remove, 1835}; 1836 1837static int __init gfar_init(void) 1838{ 1839 return driver_register(&gfar_driver); 1840} 1841 1842static void __exit gfar_exit(void) 1843{ 1844 driver_unregister(&gfar_driver); 1845} 1846 1847module_init(gfar_init); 1848module_exit(gfar_exit); 1849