at v2.6.14-rc2 2135 lines 58 kB view raw
1/* 2 * drivers/net/gianfar.c 3 * 4 * Gianfar Ethernet Driver 5 * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560 6 * Based on 8260_io/fcc_enet.c 7 * 8 * Author: Andy Fleming 9 * Maintainer: Kumar Gala (kumar.gala@freescale.com) 10 * 11 * Copyright (c) 2002-2004 Freescale Semiconductor, Inc. 12 * 13 * This program is free software; you can redistribute it and/or modify it 14 * under the terms of the GNU General Public License as published by the 15 * Free Software Foundation; either version 2 of the License, or (at your 16 * option) any later version. 17 * 18 * Gianfar: AKA Lambda Draconis, "Dragon" 19 * RA 11 31 24.2 20 * Dec +69 19 52 21 * V 3.84 22 * B-V +1.62 23 * 24 * Theory of operation 25 * This driver is designed for the non-CPM ethernet controllers 26 * on the 85xx and 83xx family of integrated processors 27 * 28 * The driver is initialized through platform_device. Structures which 29 * define the configuration needed by the board are defined in a 30 * board structure in arch/ppc/platforms (though I do not 31 * discount the possibility that other architectures could one 32 * day be supported. One assumption the driver currently makes 33 * is that the PHY is configured in such a way to advertise all 34 * capabilities. This is a sensible default, and on certain 35 * PHYs, changing this default encounters substantial errata 36 * issues. Future versions may remove this requirement, but for 37 * now, it is best for the firmware to ensure this is the case. 38 * 39 * The Gianfar Ethernet Controller uses a ring of buffer 40 * descriptors. The beginning is indicated by a register 41 * pointing to the physical address of the start of the ring. 42 * The end is determined by a "wrap" bit being set in the 43 * last descriptor of the ring. 44 * 45 * When a packet is received, the RXF bit in the 46 * IEVENT register is set, triggering an interrupt when the 47 * corresponding bit in the IMASK register is also set (if 48 * interrupt coalescing is active, then the interrupt may not 49 * happen immediately, but will wait until either a set number 50 * of frames or amount of time have passed.). In NAPI, the 51 * interrupt handler will signal there is work to be done, and 52 * exit. Without NAPI, the packet(s) will be handled 53 * immediately. Both methods will start at the last known empty 54 * descriptor, and process every subsequent descriptor until there 55 * are none left with data (NAPI will stop after a set number of 56 * packets to give time to other tasks, but will eventually 57 * process all the packets). The data arrives inside a 58 * pre-allocated skb, and so after the skb is passed up to the 59 * stack, a new skb must be allocated, and the address field in 60 * the buffer descriptor must be updated to indicate this new 61 * skb. 62 * 63 * When the kernel requests that a packet be transmitted, the 64 * driver starts where it left off last time, and points the 65 * descriptor at the buffer which was passed in. The driver 66 * then informs the DMA engine that there are packets ready to 67 * be transmitted. Once the controller is finished transmitting 68 * the packet, an interrupt may be triggered (under the same 69 * conditions as for reception, but depending on the TXF bit). 70 * The driver then cleans up the buffer. 71 */ 72 73#include <linux/config.h> 74#include <linux/kernel.h> 75#include <linux/sched.h> 76#include <linux/string.h> 77#include <linux/errno.h> 78#include <linux/slab.h> 79#include <linux/interrupt.h> 80#include <linux/init.h> 81#include <linux/delay.h> 82#include <linux/netdevice.h> 83#include <linux/etherdevice.h> 84#include <linux/skbuff.h> 85#include <linux/if_vlan.h> 86#include <linux/spinlock.h> 87#include <linux/mm.h> 88#include <linux/device.h> 89#include <linux/ip.h> 90#include <linux/tcp.h> 91#include <linux/udp.h> 92 93#include <asm/io.h> 94#include <asm/irq.h> 95#include <asm/uaccess.h> 96#include <linux/module.h> 97#include <linux/version.h> 98#include <linux/dma-mapping.h> 99#include <linux/crc32.h> 100 101#include "gianfar.h" 102#include "gianfar_phy.h" 103 104#define TX_TIMEOUT (1*HZ) 105#define SKB_ALLOC_TIMEOUT 1000000 106#undef BRIEF_GFAR_ERRORS 107#undef VERBOSE_GFAR_ERRORS 108 109#ifdef CONFIG_GFAR_NAPI 110#define RECEIVE(x) netif_receive_skb(x) 111#else 112#define RECEIVE(x) netif_rx(x) 113#endif 114 115const char gfar_driver_name[] = "Gianfar Ethernet"; 116const char gfar_driver_version[] = "1.1"; 117 118int startup_gfar(struct net_device *dev); 119static int gfar_enet_open(struct net_device *dev); 120static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); 121static void gfar_timeout(struct net_device *dev); 122static int gfar_close(struct net_device *dev); 123struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp); 124static struct net_device_stats *gfar_get_stats(struct net_device *dev); 125static int gfar_set_mac_address(struct net_device *dev); 126static int gfar_change_mtu(struct net_device *dev, int new_mtu); 127static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs); 128static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs); 129static irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs); 130static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs); 131static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs); 132static void gfar_phy_change(void *data); 133static void gfar_phy_timer(unsigned long data); 134static void adjust_link(struct net_device *dev); 135static void init_registers(struct net_device *dev); 136static int init_phy(struct net_device *dev); 137static int gfar_probe(struct device *device); 138static int gfar_remove(struct device *device); 139void free_skb_resources(struct gfar_private *priv); 140static void gfar_set_multi(struct net_device *dev); 141static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 142#ifdef CONFIG_GFAR_NAPI 143static int gfar_poll(struct net_device *dev, int *budget); 144#endif 145int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 146static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length); 147static void gfar_phy_startup_timer(unsigned long data); 148static void gfar_vlan_rx_register(struct net_device *netdev, 149 struct vlan_group *grp); 150static void gfar_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 151 152extern struct ethtool_ops gfar_ethtool_ops; 153 154MODULE_AUTHOR("Freescale Semiconductor, Inc"); 155MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 156MODULE_LICENSE("GPL"); 157 158int gfar_uses_fcb(struct gfar_private *priv) 159{ 160 if (priv->vlan_enable || priv->rx_csum_enable) 161 return 1; 162 else 163 return 0; 164} 165static int gfar_probe(struct device *device) 166{ 167 u32 tempval; 168 struct net_device *dev = NULL; 169 struct gfar_private *priv = NULL; 170 struct platform_device *pdev = to_platform_device(device); 171 struct gianfar_platform_data *einfo; 172 struct resource *r; 173 int idx; 174 int err = 0; 175 176 einfo = (struct gianfar_platform_data *) pdev->dev.platform_data; 177 178 if (einfo == NULL) { 179 printk(KERN_ERR "gfar %d: Missing additional data!\n", 180 pdev->id); 181 182 return -ENODEV; 183 } 184 185 /* Create an ethernet device instance */ 186 dev = alloc_etherdev(sizeof (*priv)); 187 188 if (dev == NULL) 189 return -ENOMEM; 190 191 priv = netdev_priv(dev); 192 193 /* Set the info in the priv to the current info */ 194 priv->einfo = einfo; 195 196 /* fill out IRQ fields */ 197 if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 198 priv->interruptTransmit = platform_get_irq_byname(pdev, "tx"); 199 priv->interruptReceive = platform_get_irq_byname(pdev, "rx"); 200 priv->interruptError = platform_get_irq_byname(pdev, "error"); 201 } else { 202 priv->interruptTransmit = platform_get_irq(pdev, 0); 203 } 204 205 /* get a pointer to the register memory */ 206 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 207 priv->regs = (struct gfar *) 208 ioremap(r->start, sizeof (struct gfar)); 209 210 if (priv->regs == NULL) { 211 err = -ENOMEM; 212 goto regs_fail; 213 } 214 215 /* Set the PHY base address */ 216 priv->phyregs = (struct gfar *) 217 ioremap(einfo->phy_reg_addr, sizeof (struct gfar)); 218 219 if (priv->phyregs == NULL) { 220 err = -ENOMEM; 221 goto phy_regs_fail; 222 } 223 224 spin_lock_init(&priv->lock); 225 226 dev_set_drvdata(device, dev); 227 228 /* Stop the DMA engine now, in case it was running before */ 229 /* (The firmware could have used it, and left it running). */ 230 /* To do this, we write Graceful Receive Stop and Graceful */ 231 /* Transmit Stop, and then wait until the corresponding bits */ 232 /* in IEVENT indicate the stops have completed. */ 233 tempval = gfar_read(&priv->regs->dmactrl); 234 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 235 gfar_write(&priv->regs->dmactrl, tempval); 236 237 tempval = gfar_read(&priv->regs->dmactrl); 238 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 239 gfar_write(&priv->regs->dmactrl, tempval); 240 241 while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC))) 242 cpu_relax(); 243 244 /* Reset MAC layer */ 245 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET); 246 247 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 248 gfar_write(&priv->regs->maccfg1, tempval); 249 250 /* Initialize MACCFG2. */ 251 gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS); 252 253 /* Initialize ECNTRL */ 254 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS); 255 256 /* Copy the station address into the dev structure, */ 257 memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN); 258 259 /* Set the dev->base_addr to the gfar reg region */ 260 dev->base_addr = (unsigned long) (priv->regs); 261 262 SET_MODULE_OWNER(dev); 263 SET_NETDEV_DEV(dev, device); 264 265 /* Fill in the dev structure */ 266 dev->open = gfar_enet_open; 267 dev->hard_start_xmit = gfar_start_xmit; 268 dev->tx_timeout = gfar_timeout; 269 dev->watchdog_timeo = TX_TIMEOUT; 270#ifdef CONFIG_GFAR_NAPI 271 dev->poll = gfar_poll; 272 dev->weight = GFAR_DEV_WEIGHT; 273#endif 274 dev->stop = gfar_close; 275 dev->get_stats = gfar_get_stats; 276 dev->change_mtu = gfar_change_mtu; 277 dev->mtu = 1500; 278 dev->set_multicast_list = gfar_set_multi; 279 280 dev->ethtool_ops = &gfar_ethtool_ops; 281 282 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 283 priv->rx_csum_enable = 1; 284 dev->features |= NETIF_F_IP_CSUM; 285 } else 286 priv->rx_csum_enable = 0; 287 288 priv->vlgrp = NULL; 289 290 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 291 dev->vlan_rx_register = gfar_vlan_rx_register; 292 dev->vlan_rx_kill_vid = gfar_vlan_rx_kill_vid; 293 294 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 295 296 priv->vlan_enable = 1; 297 } 298 299 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 300 priv->extended_hash = 1; 301 priv->hash_width = 9; 302 303 priv->hash_regs[0] = &priv->regs->igaddr0; 304 priv->hash_regs[1] = &priv->regs->igaddr1; 305 priv->hash_regs[2] = &priv->regs->igaddr2; 306 priv->hash_regs[3] = &priv->regs->igaddr3; 307 priv->hash_regs[4] = &priv->regs->igaddr4; 308 priv->hash_regs[5] = &priv->regs->igaddr5; 309 priv->hash_regs[6] = &priv->regs->igaddr6; 310 priv->hash_regs[7] = &priv->regs->igaddr7; 311 priv->hash_regs[8] = &priv->regs->gaddr0; 312 priv->hash_regs[9] = &priv->regs->gaddr1; 313 priv->hash_regs[10] = &priv->regs->gaddr2; 314 priv->hash_regs[11] = &priv->regs->gaddr3; 315 priv->hash_regs[12] = &priv->regs->gaddr4; 316 priv->hash_regs[13] = &priv->regs->gaddr5; 317 priv->hash_regs[14] = &priv->regs->gaddr6; 318 priv->hash_regs[15] = &priv->regs->gaddr7; 319 320 } else { 321 priv->extended_hash = 0; 322 priv->hash_width = 8; 323 324 priv->hash_regs[0] = &priv->regs->gaddr0; 325 priv->hash_regs[1] = &priv->regs->gaddr1; 326 priv->hash_regs[2] = &priv->regs->gaddr2; 327 priv->hash_regs[3] = &priv->regs->gaddr3; 328 priv->hash_regs[4] = &priv->regs->gaddr4; 329 priv->hash_regs[5] = &priv->regs->gaddr5; 330 priv->hash_regs[6] = &priv->regs->gaddr6; 331 priv->hash_regs[7] = &priv->regs->gaddr7; 332 } 333 334 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) 335 priv->padding = DEFAULT_PADDING; 336 else 337 priv->padding = 0; 338 339 dev->hard_header_len += priv->padding; 340 341 if (dev->features & NETIF_F_IP_CSUM) 342 dev->hard_header_len += GMAC_FCB_LEN; 343 344 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 345#ifdef CONFIG_GFAR_BUFSTASH 346 priv->rx_stash_size = STASH_LENGTH; 347#endif 348 priv->tx_ring_size = DEFAULT_TX_RING_SIZE; 349 priv->rx_ring_size = DEFAULT_RX_RING_SIZE; 350 351 priv->txcoalescing = DEFAULT_TX_COALESCE; 352 priv->txcount = DEFAULT_TXCOUNT; 353 priv->txtime = DEFAULT_TXTIME; 354 priv->rxcoalescing = DEFAULT_RX_COALESCE; 355 priv->rxcount = DEFAULT_RXCOUNT; 356 priv->rxtime = DEFAULT_RXTIME; 357 358 /* Enable most messages by default */ 359 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 360 361 err = register_netdev(dev); 362 363 if (err) { 364 printk(KERN_ERR "%s: Cannot register net device, aborting.\n", 365 dev->name); 366 goto register_fail; 367 } 368 369 /* Print out the device info */ 370 printk(KERN_INFO DEVICE_NAME, dev->name); 371 for (idx = 0; idx < 6; idx++) 372 printk("%2.2x%c", dev->dev_addr[idx], idx == 5 ? ' ' : ':'); 373 printk("\n"); 374 375 /* Even more device info helps when determining which kernel */ 376 /* provided which set of benchmarks. Since this is global for all */ 377 /* devices, we only print it once */ 378#ifdef CONFIG_GFAR_NAPI 379 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 380#else 381 printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name); 382#endif 383 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", 384 dev->name, priv->rx_ring_size, priv->tx_ring_size); 385 386 return 0; 387 388register_fail: 389 iounmap((void *) priv->phyregs); 390phy_regs_fail: 391 iounmap((void *) priv->regs); 392regs_fail: 393 free_netdev(dev); 394 return -ENOMEM; 395} 396 397static int gfar_remove(struct device *device) 398{ 399 struct net_device *dev = dev_get_drvdata(device); 400 struct gfar_private *priv = netdev_priv(dev); 401 402 dev_set_drvdata(device, NULL); 403 404 iounmap((void *) priv->regs); 405 iounmap((void *) priv->phyregs); 406 free_netdev(dev); 407 408 return 0; 409} 410 411 412/* Configure the PHY for dev. 413 * returns 0 if success. -1 if failure 414 */ 415static int init_phy(struct net_device *dev) 416{ 417 struct gfar_private *priv = netdev_priv(dev); 418 struct phy_info *curphy; 419 unsigned int timeout = PHY_INIT_TIMEOUT; 420 struct gfar *phyregs = priv->phyregs; 421 struct gfar_mii_info *mii_info; 422 int err; 423 424 priv->oldlink = 0; 425 priv->oldspeed = 0; 426 priv->oldduplex = -1; 427 428 mii_info = kmalloc(sizeof(struct gfar_mii_info), 429 GFP_KERNEL); 430 431 if(NULL == mii_info) { 432 if (netif_msg_ifup(priv)) 433 printk(KERN_ERR "%s: Could not allocate mii_info\n", 434 dev->name); 435 return -ENOMEM; 436 } 437 438 mii_info->speed = SPEED_1000; 439 mii_info->duplex = DUPLEX_FULL; 440 mii_info->pause = 0; 441 mii_info->link = 1; 442 443 mii_info->advertising = (ADVERTISED_10baseT_Half | 444 ADVERTISED_10baseT_Full | 445 ADVERTISED_100baseT_Half | 446 ADVERTISED_100baseT_Full | 447 ADVERTISED_1000baseT_Full); 448 mii_info->autoneg = 1; 449 450 spin_lock_init(&mii_info->mdio_lock); 451 452 mii_info->mii_id = priv->einfo->phyid; 453 454 mii_info->dev = dev; 455 456 mii_info->mdio_read = &read_phy_reg; 457 mii_info->mdio_write = &write_phy_reg; 458 459 priv->mii_info = mii_info; 460 461 /* Reset the management interface */ 462 gfar_write(&phyregs->miimcfg, MIIMCFG_RESET); 463 464 /* Setup the MII Mgmt clock speed */ 465 gfar_write(&phyregs->miimcfg, MIIMCFG_INIT_VALUE); 466 467 /* Wait until the bus is free */ 468 while ((gfar_read(&phyregs->miimind) & MIIMIND_BUSY) && 469 timeout--) 470 cpu_relax(); 471 472 if(timeout <= 0) { 473 printk(KERN_ERR "%s: The MII Bus is stuck!\n", 474 dev->name); 475 err = -1; 476 goto bus_fail; 477 } 478 479 /* get info for this PHY */ 480 curphy = get_phy_info(priv->mii_info); 481 482 if (curphy == NULL) { 483 if (netif_msg_ifup(priv)) 484 printk(KERN_ERR "%s: No PHY found\n", dev->name); 485 err = -1; 486 goto no_phy; 487 } 488 489 mii_info->phyinfo = curphy; 490 491 /* Run the commands which initialize the PHY */ 492 if(curphy->init) { 493 err = curphy->init(priv->mii_info); 494 495 if (err) 496 goto phy_init_fail; 497 } 498 499 return 0; 500 501phy_init_fail: 502no_phy: 503bus_fail: 504 kfree(mii_info); 505 506 return err; 507} 508 509static void init_registers(struct net_device *dev) 510{ 511 struct gfar_private *priv = netdev_priv(dev); 512 513 /* Clear IEVENT */ 514 gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR); 515 516 /* Initialize IMASK */ 517 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR); 518 519 /* Init hash registers to zero */ 520 gfar_write(&priv->regs->igaddr0, 0); 521 gfar_write(&priv->regs->igaddr1, 0); 522 gfar_write(&priv->regs->igaddr2, 0); 523 gfar_write(&priv->regs->igaddr3, 0); 524 gfar_write(&priv->regs->igaddr4, 0); 525 gfar_write(&priv->regs->igaddr5, 0); 526 gfar_write(&priv->regs->igaddr6, 0); 527 gfar_write(&priv->regs->igaddr7, 0); 528 529 gfar_write(&priv->regs->gaddr0, 0); 530 gfar_write(&priv->regs->gaddr1, 0); 531 gfar_write(&priv->regs->gaddr2, 0); 532 gfar_write(&priv->regs->gaddr3, 0); 533 gfar_write(&priv->regs->gaddr4, 0); 534 gfar_write(&priv->regs->gaddr5, 0); 535 gfar_write(&priv->regs->gaddr6, 0); 536 gfar_write(&priv->regs->gaddr7, 0); 537 538 /* Zero out the rmon mib registers if it has them */ 539 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 540 memset((void *) &(priv->regs->rmon), 0, 541 sizeof (struct rmon_mib)); 542 543 /* Mask off the CAM interrupts */ 544 gfar_write(&priv->regs->rmon.cam1, 0xffffffff); 545 gfar_write(&priv->regs->rmon.cam2, 0xffffffff); 546 } 547 548 /* Initialize the max receive buffer length */ 549 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 550 551#ifdef CONFIG_GFAR_BUFSTASH 552 /* If we are stashing buffers, we need to set the 553 * extraction length to the size of the buffer */ 554 gfar_write(&priv->regs->attreli, priv->rx_stash_size << 16); 555#endif 556 557 /* Initialize the Minimum Frame Length Register */ 558 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); 559 560 /* Setup Attributes so that snooping is on for rx */ 561 gfar_write(&priv->regs->attr, ATTR_INIT_SETTINGS); 562 gfar_write(&priv->regs->attreli, ATTRELI_INIT_SETTINGS); 563 564 /* Assign the TBI an address which won't conflict with the PHYs */ 565 gfar_write(&priv->regs->tbipa, TBIPA_VALUE); 566} 567 568 569/* Halt the receive and transmit queues */ 570void gfar_halt(struct net_device *dev) 571{ 572 struct gfar_private *priv = netdev_priv(dev); 573 struct gfar *regs = priv->regs; 574 u32 tempval; 575 576 /* Mask all interrupts */ 577 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 578 579 /* Clear all interrupts */ 580 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR); 581 582 /* Stop the DMA, and wait for it to stop */ 583 tempval = gfar_read(&priv->regs->dmactrl); 584 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) 585 != (DMACTRL_GRS | DMACTRL_GTS)) { 586 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 587 gfar_write(&priv->regs->dmactrl, tempval); 588 589 while (!(gfar_read(&priv->regs->ievent) & 590 (IEVENT_GRSC | IEVENT_GTSC))) 591 cpu_relax(); 592 } 593 594 /* Disable Rx and Tx */ 595 tempval = gfar_read(&regs->maccfg1); 596 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 597 gfar_write(&regs->maccfg1, tempval); 598} 599 600void stop_gfar(struct net_device *dev) 601{ 602 struct gfar_private *priv = netdev_priv(dev); 603 struct gfar *regs = priv->regs; 604 unsigned long flags; 605 606 /* Lock it down */ 607 spin_lock_irqsave(&priv->lock, flags); 608 609 /* Tell the kernel the link is down */ 610 priv->mii_info->link = 0; 611 adjust_link(dev); 612 613 gfar_halt(dev); 614 615 if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) { 616 /* Clear any pending interrupts */ 617 mii_clear_phy_interrupt(priv->mii_info); 618 619 /* Disable PHY Interrupts */ 620 mii_configure_phy_interrupt(priv->mii_info, 621 MII_INTERRUPT_DISABLED); 622 } 623 624 spin_unlock_irqrestore(&priv->lock, flags); 625 626 /* Free the IRQs */ 627 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 628 free_irq(priv->interruptError, dev); 629 free_irq(priv->interruptTransmit, dev); 630 free_irq(priv->interruptReceive, dev); 631 } else { 632 free_irq(priv->interruptTransmit, dev); 633 } 634 635 if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) { 636 free_irq(priv->einfo->interruptPHY, dev); 637 } else { 638 del_timer_sync(&priv->phy_info_timer); 639 } 640 641 free_skb_resources(priv); 642 643 dma_free_coherent(NULL, 644 sizeof(struct txbd8)*priv->tx_ring_size 645 + sizeof(struct rxbd8)*priv->rx_ring_size, 646 priv->tx_bd_base, 647 gfar_read(&regs->tbase0)); 648} 649 650/* If there are any tx skbs or rx skbs still around, free them. 651 * Then free tx_skbuff and rx_skbuff */ 652void free_skb_resources(struct gfar_private *priv) 653{ 654 struct rxbd8 *rxbdp; 655 struct txbd8 *txbdp; 656 int i; 657 658 /* Go through all the buffer descriptors and free their data buffers */ 659 txbdp = priv->tx_bd_base; 660 661 for (i = 0; i < priv->tx_ring_size; i++) { 662 663 if (priv->tx_skbuff[i]) { 664 dma_unmap_single(NULL, txbdp->bufPtr, 665 txbdp->length, 666 DMA_TO_DEVICE); 667 dev_kfree_skb_any(priv->tx_skbuff[i]); 668 priv->tx_skbuff[i] = NULL; 669 } 670 } 671 672 kfree(priv->tx_skbuff); 673 674 rxbdp = priv->rx_bd_base; 675 676 /* rx_skbuff is not guaranteed to be allocated, so only 677 * free it and its contents if it is allocated */ 678 if(priv->rx_skbuff != NULL) { 679 for (i = 0; i < priv->rx_ring_size; i++) { 680 if (priv->rx_skbuff[i]) { 681 dma_unmap_single(NULL, rxbdp->bufPtr, 682 priv->rx_buffer_size 683 + RXBUF_ALIGNMENT, 684 DMA_FROM_DEVICE); 685 686 dev_kfree_skb_any(priv->rx_skbuff[i]); 687 priv->rx_skbuff[i] = NULL; 688 } 689 690 rxbdp->status = 0; 691 rxbdp->length = 0; 692 rxbdp->bufPtr = 0; 693 694 rxbdp++; 695 } 696 697 kfree(priv->rx_skbuff); 698 } 699} 700 701void gfar_start(struct net_device *dev) 702{ 703 struct gfar_private *priv = netdev_priv(dev); 704 struct gfar *regs = priv->regs; 705 u32 tempval; 706 707 /* Enable Rx and Tx in MACCFG1 */ 708 tempval = gfar_read(&regs->maccfg1); 709 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); 710 gfar_write(&regs->maccfg1, tempval); 711 712 /* Initialize DMACTRL to have WWR and WOP */ 713 tempval = gfar_read(&priv->regs->dmactrl); 714 tempval |= DMACTRL_INIT_SETTINGS; 715 gfar_write(&priv->regs->dmactrl, tempval); 716 717 /* Clear THLT, so that the DMA starts polling now */ 718 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT); 719 720 /* Make sure we aren't stopped */ 721 tempval = gfar_read(&priv->regs->dmactrl); 722 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 723 gfar_write(&priv->regs->dmactrl, tempval); 724 725 /* Unmask the interrupts we look for */ 726 gfar_write(&regs->imask, IMASK_DEFAULT); 727} 728 729/* Bring the controller up and running */ 730int startup_gfar(struct net_device *dev) 731{ 732 struct txbd8 *txbdp; 733 struct rxbd8 *rxbdp; 734 dma_addr_t addr; 735 unsigned long vaddr; 736 int i; 737 struct gfar_private *priv = netdev_priv(dev); 738 struct gfar *regs = priv->regs; 739 int err = 0; 740 u32 rctrl = 0; 741 742 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 743 744 /* Allocate memory for the buffer descriptors */ 745 vaddr = (unsigned long) dma_alloc_coherent(NULL, 746 sizeof (struct txbd8) * priv->tx_ring_size + 747 sizeof (struct rxbd8) * priv->rx_ring_size, 748 &addr, GFP_KERNEL); 749 750 if (vaddr == 0) { 751 if (netif_msg_ifup(priv)) 752 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n", 753 dev->name); 754 return -ENOMEM; 755 } 756 757 priv->tx_bd_base = (struct txbd8 *) vaddr; 758 759 /* enet DMA only understands physical addresses */ 760 gfar_write(&regs->tbase0, addr); 761 762 /* Start the rx descriptor ring where the tx ring leaves off */ 763 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size; 764 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size; 765 priv->rx_bd_base = (struct rxbd8 *) vaddr; 766 gfar_write(&regs->rbase0, addr); 767 768 /* Setup the skbuff rings */ 769 priv->tx_skbuff = 770 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * 771 priv->tx_ring_size, GFP_KERNEL); 772 773 if (priv->tx_skbuff == NULL) { 774 if (netif_msg_ifup(priv)) 775 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n", 776 dev->name); 777 err = -ENOMEM; 778 goto tx_skb_fail; 779 } 780 781 for (i = 0; i < priv->tx_ring_size; i++) 782 priv->tx_skbuff[i] = NULL; 783 784 priv->rx_skbuff = 785 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * 786 priv->rx_ring_size, GFP_KERNEL); 787 788 if (priv->rx_skbuff == NULL) { 789 if (netif_msg_ifup(priv)) 790 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n", 791 dev->name); 792 err = -ENOMEM; 793 goto rx_skb_fail; 794 } 795 796 for (i = 0; i < priv->rx_ring_size; i++) 797 priv->rx_skbuff[i] = NULL; 798 799 /* Initialize some variables in our dev structure */ 800 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base; 801 priv->cur_rx = priv->rx_bd_base; 802 priv->skb_curtx = priv->skb_dirtytx = 0; 803 priv->skb_currx = 0; 804 805 /* Initialize Transmit Descriptor Ring */ 806 txbdp = priv->tx_bd_base; 807 for (i = 0; i < priv->tx_ring_size; i++) { 808 txbdp->status = 0; 809 txbdp->length = 0; 810 txbdp->bufPtr = 0; 811 txbdp++; 812 } 813 814 /* Set the last descriptor in the ring to indicate wrap */ 815 txbdp--; 816 txbdp->status |= TXBD_WRAP; 817 818 rxbdp = priv->rx_bd_base; 819 for (i = 0; i < priv->rx_ring_size; i++) { 820 struct sk_buff *skb = NULL; 821 822 rxbdp->status = 0; 823 824 skb = gfar_new_skb(dev, rxbdp); 825 826 priv->rx_skbuff[i] = skb; 827 828 rxbdp++; 829 } 830 831 /* Set the last descriptor in the ring to wrap */ 832 rxbdp--; 833 rxbdp->status |= RXBD_WRAP; 834 835 /* If the device has multiple interrupts, register for 836 * them. Otherwise, only register for the one */ 837 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 838 /* Install our interrupt handlers for Error, 839 * Transmit, and Receive */ 840 if (request_irq(priv->interruptError, gfar_error, 841 0, "enet_error", dev) < 0) { 842 if (netif_msg_intr(priv)) 843 printk(KERN_ERR "%s: Can't get IRQ %d\n", 844 dev->name, priv->interruptError); 845 846 err = -1; 847 goto err_irq_fail; 848 } 849 850 if (request_irq(priv->interruptTransmit, gfar_transmit, 851 0, "enet_tx", dev) < 0) { 852 if (netif_msg_intr(priv)) 853 printk(KERN_ERR "%s: Can't get IRQ %d\n", 854 dev->name, priv->interruptTransmit); 855 856 err = -1; 857 858 goto tx_irq_fail; 859 } 860 861 if (request_irq(priv->interruptReceive, gfar_receive, 862 0, "enet_rx", dev) < 0) { 863 if (netif_msg_intr(priv)) 864 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n", 865 dev->name, priv->interruptReceive); 866 867 err = -1; 868 goto rx_irq_fail; 869 } 870 } else { 871 if (request_irq(priv->interruptTransmit, gfar_interrupt, 872 0, "gfar_interrupt", dev) < 0) { 873 if (netif_msg_intr(priv)) 874 printk(KERN_ERR "%s: Can't get IRQ %d\n", 875 dev->name, priv->interruptError); 876 877 err = -1; 878 goto err_irq_fail; 879 } 880 } 881 882 /* Set up the PHY change work queue */ 883 INIT_WORK(&priv->tq, gfar_phy_change, dev); 884 885 init_timer(&priv->phy_info_timer); 886 priv->phy_info_timer.function = &gfar_phy_startup_timer; 887 priv->phy_info_timer.data = (unsigned long) priv->mii_info; 888 mod_timer(&priv->phy_info_timer, jiffies + HZ); 889 890 /* Configure the coalescing support */ 891 if (priv->txcoalescing) 892 gfar_write(&regs->txic, 893 mk_ic_value(priv->txcount, priv->txtime)); 894 else 895 gfar_write(&regs->txic, 0); 896 897 if (priv->rxcoalescing) 898 gfar_write(&regs->rxic, 899 mk_ic_value(priv->rxcount, priv->rxtime)); 900 else 901 gfar_write(&regs->rxic, 0); 902 903 if (priv->rx_csum_enable) 904 rctrl |= RCTRL_CHECKSUMMING; 905 906 if (priv->extended_hash) 907 rctrl |= RCTRL_EXTHASH; 908 909 if (priv->vlan_enable) 910 rctrl |= RCTRL_VLAN; 911 912 /* Init rctrl based on our settings */ 913 gfar_write(&priv->regs->rctrl, rctrl); 914 915 if (dev->features & NETIF_F_IP_CSUM) 916 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM); 917 918 gfar_start(dev); 919 920 return 0; 921 922rx_irq_fail: 923 free_irq(priv->interruptTransmit, dev); 924tx_irq_fail: 925 free_irq(priv->interruptError, dev); 926err_irq_fail: 927rx_skb_fail: 928 free_skb_resources(priv); 929tx_skb_fail: 930 dma_free_coherent(NULL, 931 sizeof(struct txbd8)*priv->tx_ring_size 932 + sizeof(struct rxbd8)*priv->rx_ring_size, 933 priv->tx_bd_base, 934 gfar_read(&regs->tbase0)); 935 936 if (priv->mii_info->phyinfo->close) 937 priv->mii_info->phyinfo->close(priv->mii_info); 938 939 kfree(priv->mii_info); 940 941 return err; 942} 943 944/* Called when something needs to use the ethernet device */ 945/* Returns 0 for success. */ 946static int gfar_enet_open(struct net_device *dev) 947{ 948 int err; 949 950 /* Initialize a bunch of registers */ 951 init_registers(dev); 952 953 gfar_set_mac_address(dev); 954 955 err = init_phy(dev); 956 957 if(err) 958 return err; 959 960 err = startup_gfar(dev); 961 962 netif_start_queue(dev); 963 964 return err; 965} 966 967static struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp) 968{ 969 struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN); 970 971 memset(fcb, 0, GMAC_FCB_LEN); 972 973 /* Flag the bd so the controller looks for the FCB */ 974 bdp->status |= TXBD_TOE; 975 976 return fcb; 977} 978 979static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) 980{ 981 int len; 982 983 /* If we're here, it's a IP packet with a TCP or UDP 984 * payload. We set it to checksum, using a pseudo-header 985 * we provide 986 */ 987 fcb->ip = 1; 988 fcb->tup = 1; 989 fcb->ctu = 1; 990 fcb->nph = 1; 991 992 /* Notify the controller what the protocol is */ 993 if (skb->nh.iph->protocol == IPPROTO_UDP) 994 fcb->udp = 1; 995 996 /* l3os is the distance between the start of the 997 * frame (skb->data) and the start of the IP hdr. 998 * l4os is the distance between the start of the 999 * l3 hdr and the l4 hdr */ 1000 fcb->l3os = (u16)(skb->nh.raw - skb->data - GMAC_FCB_LEN); 1001 fcb->l4os = (u16)(skb->h.raw - skb->nh.raw); 1002 1003 len = skb->nh.iph->tot_len - fcb->l4os; 1004 1005 /* Provide the pseudoheader csum */ 1006 fcb->phcs = ~csum_tcpudp_magic(skb->nh.iph->saddr, 1007 skb->nh.iph->daddr, len, 1008 skb->nh.iph->protocol, 0); 1009} 1010 1011void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) 1012{ 1013 fcb->vln = 1; 1014 fcb->vlctl = vlan_tx_tag_get(skb); 1015} 1016 1017/* This is called by the kernel when a frame is ready for transmission. */ 1018/* It is pointed to by the dev->hard_start_xmit function pointer */ 1019static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 1020{ 1021 struct gfar_private *priv = netdev_priv(dev); 1022 struct txfcb *fcb = NULL; 1023 struct txbd8 *txbdp; 1024 1025 /* Update transmit stats */ 1026 priv->stats.tx_bytes += skb->len; 1027 1028 /* Lock priv now */ 1029 spin_lock_irq(&priv->lock); 1030 1031 /* Point at the first free tx descriptor */ 1032 txbdp = priv->cur_tx; 1033 1034 /* Clear all but the WRAP status flags */ 1035 txbdp->status &= TXBD_WRAP; 1036 1037 /* Set up checksumming */ 1038 if ((dev->features & NETIF_F_IP_CSUM) 1039 && (CHECKSUM_HW == skb->ip_summed)) { 1040 fcb = gfar_add_fcb(skb, txbdp); 1041 gfar_tx_checksum(skb, fcb); 1042 } 1043 1044 if (priv->vlan_enable && 1045 unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) { 1046 if (NULL == fcb) 1047 fcb = gfar_add_fcb(skb, txbdp); 1048 1049 gfar_tx_vlan(skb, fcb); 1050 } 1051 1052 /* Set buffer length and pointer */ 1053 txbdp->length = skb->len; 1054 txbdp->bufPtr = dma_map_single(NULL, skb->data, 1055 skb->len, DMA_TO_DEVICE); 1056 1057 /* Save the skb pointer so we can free it later */ 1058 priv->tx_skbuff[priv->skb_curtx] = skb; 1059 1060 /* Update the current skb pointer (wrapping if this was the last) */ 1061 priv->skb_curtx = 1062 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size); 1063 1064 /* Flag the BD as interrupt-causing */ 1065 txbdp->status |= TXBD_INTERRUPT; 1066 1067 /* Flag the BD as ready to go, last in frame, and */ 1068 /* in need of CRC */ 1069 txbdp->status |= (TXBD_READY | TXBD_LAST | TXBD_CRC); 1070 1071 dev->trans_start = jiffies; 1072 1073 /* If this was the last BD in the ring, the next one */ 1074 /* is at the beginning of the ring */ 1075 if (txbdp->status & TXBD_WRAP) 1076 txbdp = priv->tx_bd_base; 1077 else 1078 txbdp++; 1079 1080 /* If the next BD still needs to be cleaned up, then the bds 1081 are full. We need to tell the kernel to stop sending us stuff. */ 1082 if (txbdp == priv->dirty_tx) { 1083 netif_stop_queue(dev); 1084 1085 priv->stats.tx_fifo_errors++; 1086 } 1087 1088 /* Update the current txbd to the next one */ 1089 priv->cur_tx = txbdp; 1090 1091 /* Tell the DMA to go go go */ 1092 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 1093 1094 /* Unlock priv */ 1095 spin_unlock_irq(&priv->lock); 1096 1097 return 0; 1098} 1099 1100/* Stops the kernel queue, and halts the controller */ 1101static int gfar_close(struct net_device *dev) 1102{ 1103 struct gfar_private *priv = netdev_priv(dev); 1104 stop_gfar(dev); 1105 1106 /* Shutdown the PHY */ 1107 if (priv->mii_info->phyinfo->close) 1108 priv->mii_info->phyinfo->close(priv->mii_info); 1109 1110 kfree(priv->mii_info); 1111 1112 netif_stop_queue(dev); 1113 1114 return 0; 1115} 1116 1117/* returns a net_device_stats structure pointer */ 1118static struct net_device_stats * gfar_get_stats(struct net_device *dev) 1119{ 1120 struct gfar_private *priv = netdev_priv(dev); 1121 1122 return &(priv->stats); 1123} 1124 1125/* Changes the mac address if the controller is not running. */ 1126int gfar_set_mac_address(struct net_device *dev) 1127{ 1128 struct gfar_private *priv = netdev_priv(dev); 1129 int i; 1130 char tmpbuf[MAC_ADDR_LEN]; 1131 u32 tempval; 1132 1133 /* Now copy it into the mac registers backwards, cuz */ 1134 /* little endian is silly */ 1135 for (i = 0; i < MAC_ADDR_LEN; i++) 1136 tmpbuf[MAC_ADDR_LEN - 1 - i] = dev->dev_addr[i]; 1137 1138 gfar_write(&priv->regs->macstnaddr1, *((u32 *) (tmpbuf))); 1139 1140 tempval = *((u32 *) (tmpbuf + 4)); 1141 1142 gfar_write(&priv->regs->macstnaddr2, tempval); 1143 1144 return 0; 1145} 1146 1147 1148/* Enables and disables VLAN insertion/extraction */ 1149static void gfar_vlan_rx_register(struct net_device *dev, 1150 struct vlan_group *grp) 1151{ 1152 struct gfar_private *priv = netdev_priv(dev); 1153 unsigned long flags; 1154 u32 tempval; 1155 1156 spin_lock_irqsave(&priv->lock, flags); 1157 1158 priv->vlgrp = grp; 1159 1160 if (grp) { 1161 /* Enable VLAN tag insertion */ 1162 tempval = gfar_read(&priv->regs->tctrl); 1163 tempval |= TCTRL_VLINS; 1164 1165 gfar_write(&priv->regs->tctrl, tempval); 1166 1167 /* Enable VLAN tag extraction */ 1168 tempval = gfar_read(&priv->regs->rctrl); 1169 tempval |= RCTRL_VLEX; 1170 gfar_write(&priv->regs->rctrl, tempval); 1171 } else { 1172 /* Disable VLAN tag insertion */ 1173 tempval = gfar_read(&priv->regs->tctrl); 1174 tempval &= ~TCTRL_VLINS; 1175 gfar_write(&priv->regs->tctrl, tempval); 1176 1177 /* Disable VLAN tag extraction */ 1178 tempval = gfar_read(&priv->regs->rctrl); 1179 tempval &= ~RCTRL_VLEX; 1180 gfar_write(&priv->regs->rctrl, tempval); 1181 } 1182 1183 spin_unlock_irqrestore(&priv->lock, flags); 1184} 1185 1186 1187static void gfar_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid) 1188{ 1189 struct gfar_private *priv = netdev_priv(dev); 1190 unsigned long flags; 1191 1192 spin_lock_irqsave(&priv->lock, flags); 1193 1194 if (priv->vlgrp) 1195 priv->vlgrp->vlan_devices[vid] = NULL; 1196 1197 spin_unlock_irqrestore(&priv->lock, flags); 1198} 1199 1200 1201static int gfar_change_mtu(struct net_device *dev, int new_mtu) 1202{ 1203 int tempsize, tempval; 1204 struct gfar_private *priv = netdev_priv(dev); 1205 int oldsize = priv->rx_buffer_size; 1206 int frame_size = new_mtu + ETH_HLEN; 1207 1208 if (priv->vlan_enable) 1209 frame_size += VLAN_ETH_HLEN; 1210 1211 if (gfar_uses_fcb(priv)) 1212 frame_size += GMAC_FCB_LEN; 1213 1214 frame_size += priv->padding; 1215 1216 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { 1217 if (netif_msg_drv(priv)) 1218 printk(KERN_ERR "%s: Invalid MTU setting\n", 1219 dev->name); 1220 return -EINVAL; 1221 } 1222 1223 tempsize = 1224 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + 1225 INCREMENTAL_BUFFER_SIZE; 1226 1227 /* Only stop and start the controller if it isn't already 1228 * stopped */ 1229 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 1230 stop_gfar(dev); 1231 1232 priv->rx_buffer_size = tempsize; 1233 1234 dev->mtu = new_mtu; 1235 1236 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 1237 gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size); 1238 1239 /* If the mtu is larger than the max size for standard 1240 * ethernet frames (ie, a jumbo frame), then set maccfg2 1241 * to allow huge frames, and to check the length */ 1242 tempval = gfar_read(&priv->regs->maccfg2); 1243 1244 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) 1245 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 1246 else 1247 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 1248 1249 gfar_write(&priv->regs->maccfg2, tempval); 1250 1251 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 1252 startup_gfar(dev); 1253 1254 return 0; 1255} 1256 1257/* gfar_timeout gets called when a packet has not been 1258 * transmitted after a set amount of time. 1259 * For now, assume that clearing out all the structures, and 1260 * starting over will fix the problem. */ 1261static void gfar_timeout(struct net_device *dev) 1262{ 1263 struct gfar_private *priv = netdev_priv(dev); 1264 1265 priv->stats.tx_errors++; 1266 1267 if (dev->flags & IFF_UP) { 1268 stop_gfar(dev); 1269 startup_gfar(dev); 1270 } 1271 1272 netif_schedule(dev); 1273} 1274 1275/* Interrupt Handler for Transmit complete */ 1276static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs) 1277{ 1278 struct net_device *dev = (struct net_device *) dev_id; 1279 struct gfar_private *priv = netdev_priv(dev); 1280 struct txbd8 *bdp; 1281 1282 /* Clear IEVENT */ 1283 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); 1284 1285 /* Lock priv */ 1286 spin_lock(&priv->lock); 1287 bdp = priv->dirty_tx; 1288 while ((bdp->status & TXBD_READY) == 0) { 1289 /* If dirty_tx and cur_tx are the same, then either the */ 1290 /* ring is empty or full now (it could only be full in the beginning, */ 1291 /* obviously). If it is empty, we are done. */ 1292 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0)) 1293 break; 1294 1295 priv->stats.tx_packets++; 1296 1297 /* Deferred means some collisions occurred during transmit, */ 1298 /* but we eventually sent the packet. */ 1299 if (bdp->status & TXBD_DEF) 1300 priv->stats.collisions++; 1301 1302 /* Free the sk buffer associated with this TxBD */ 1303 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); 1304 priv->tx_skbuff[priv->skb_dirtytx] = NULL; 1305 priv->skb_dirtytx = 1306 (priv->skb_dirtytx + 1307 1) & TX_RING_MOD_MASK(priv->tx_ring_size); 1308 1309 /* update bdp to point at next bd in the ring (wrapping if necessary) */ 1310 if (bdp->status & TXBD_WRAP) 1311 bdp = priv->tx_bd_base; 1312 else 1313 bdp++; 1314 1315 /* Move dirty_tx to be the next bd */ 1316 priv->dirty_tx = bdp; 1317 1318 /* We freed a buffer, so now we can restart transmission */ 1319 if (netif_queue_stopped(dev)) 1320 netif_wake_queue(dev); 1321 } /* while ((bdp->status & TXBD_READY) == 0) */ 1322 1323 /* If we are coalescing the interrupts, reset the timer */ 1324 /* Otherwise, clear it */ 1325 if (priv->txcoalescing) 1326 gfar_write(&priv->regs->txic, 1327 mk_ic_value(priv->txcount, priv->txtime)); 1328 else 1329 gfar_write(&priv->regs->txic, 0); 1330 1331 spin_unlock(&priv->lock); 1332 1333 return IRQ_HANDLED; 1334} 1335 1336struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) 1337{ 1338 struct gfar_private *priv = netdev_priv(dev); 1339 struct sk_buff *skb = NULL; 1340 unsigned int timeout = SKB_ALLOC_TIMEOUT; 1341 1342 /* We have to allocate the skb, so keep trying till we succeed */ 1343 while ((!skb) && timeout--) 1344 skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT); 1345 1346 if (skb == NULL) 1347 return NULL; 1348 1349 /* We need the data buffer to be aligned properly. We will reserve 1350 * as many bytes as needed to align the data properly 1351 */ 1352 skb_reserve(skb, 1353 RXBUF_ALIGNMENT - 1354 (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1))); 1355 1356 skb->dev = dev; 1357 1358 bdp->bufPtr = dma_map_single(NULL, skb->data, 1359 priv->rx_buffer_size + RXBUF_ALIGNMENT, 1360 DMA_FROM_DEVICE); 1361 1362 bdp->length = 0; 1363 1364 /* Mark the buffer empty */ 1365 bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT); 1366 1367 return skb; 1368} 1369 1370static inline void count_errors(unsigned short status, struct gfar_private *priv) 1371{ 1372 struct net_device_stats *stats = &priv->stats; 1373 struct gfar_extra_stats *estats = &priv->extra_stats; 1374 1375 /* If the packet was truncated, none of the other errors 1376 * matter */ 1377 if (status & RXBD_TRUNCATED) { 1378 stats->rx_length_errors++; 1379 1380 estats->rx_trunc++; 1381 1382 return; 1383 } 1384 /* Count the errors, if there were any */ 1385 if (status & (RXBD_LARGE | RXBD_SHORT)) { 1386 stats->rx_length_errors++; 1387 1388 if (status & RXBD_LARGE) 1389 estats->rx_large++; 1390 else 1391 estats->rx_short++; 1392 } 1393 if (status & RXBD_NONOCTET) { 1394 stats->rx_frame_errors++; 1395 estats->rx_nonoctet++; 1396 } 1397 if (status & RXBD_CRCERR) { 1398 estats->rx_crcerr++; 1399 stats->rx_crc_errors++; 1400 } 1401 if (status & RXBD_OVERRUN) { 1402 estats->rx_overrun++; 1403 stats->rx_crc_errors++; 1404 } 1405} 1406 1407irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs) 1408{ 1409 struct net_device *dev = (struct net_device *) dev_id; 1410 struct gfar_private *priv = netdev_priv(dev); 1411 1412#ifdef CONFIG_GFAR_NAPI 1413 u32 tempval; 1414#endif 1415 1416 /* Clear IEVENT, so rx interrupt isn't called again 1417 * because of this interrupt */ 1418 gfar_write(&priv->regs->ievent, IEVENT_RX_MASK); 1419 1420 /* support NAPI */ 1421#ifdef CONFIG_GFAR_NAPI 1422 if (netif_rx_schedule_prep(dev)) { 1423 tempval = gfar_read(&priv->regs->imask); 1424 tempval &= IMASK_RX_DISABLED; 1425 gfar_write(&priv->regs->imask, tempval); 1426 1427 __netif_rx_schedule(dev); 1428 } else { 1429 if (netif_msg_rx_err(priv)) 1430 printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n", 1431 dev->name, gfar_read(&priv->regs->ievent), 1432 gfar_read(&priv->regs->imask)); 1433 } 1434#else 1435 1436 spin_lock(&priv->lock); 1437 gfar_clean_rx_ring(dev, priv->rx_ring_size); 1438 1439 /* If we are coalescing interrupts, update the timer */ 1440 /* Otherwise, clear it */ 1441 if (priv->rxcoalescing) 1442 gfar_write(&priv->regs->rxic, 1443 mk_ic_value(priv->rxcount, priv->rxtime)); 1444 else 1445 gfar_write(&priv->regs->rxic, 0); 1446 1447 spin_unlock(&priv->lock); 1448#endif 1449 1450 return IRQ_HANDLED; 1451} 1452 1453static inline int gfar_rx_vlan(struct sk_buff *skb, 1454 struct vlan_group *vlgrp, unsigned short vlctl) 1455{ 1456#ifdef CONFIG_GFAR_NAPI 1457 return vlan_hwaccel_receive_skb(skb, vlgrp, vlctl); 1458#else 1459 return vlan_hwaccel_rx(skb, vlgrp, vlctl); 1460#endif 1461} 1462 1463static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) 1464{ 1465 /* If valid headers were found, and valid sums 1466 * were verified, then we tell the kernel that no 1467 * checksumming is necessary. Otherwise, it is */ 1468 if (fcb->cip && !fcb->eip && fcb->ctu && !fcb->etu) 1469 skb->ip_summed = CHECKSUM_UNNECESSARY; 1470 else 1471 skb->ip_summed = CHECKSUM_NONE; 1472} 1473 1474 1475static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb) 1476{ 1477 struct rxfcb *fcb = (struct rxfcb *)skb->data; 1478 1479 /* Remove the FCB from the skb */ 1480 skb_pull(skb, GMAC_FCB_LEN); 1481 1482 return fcb; 1483} 1484 1485/* gfar_process_frame() -- handle one incoming packet if skb 1486 * isn't NULL. */ 1487static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 1488 int length) 1489{ 1490 struct gfar_private *priv = netdev_priv(dev); 1491 struct rxfcb *fcb = NULL; 1492 1493 if (skb == NULL) { 1494 if (netif_msg_rx_err(priv)) 1495 printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name); 1496 priv->stats.rx_dropped++; 1497 priv->extra_stats.rx_skbmissing++; 1498 } else { 1499 int ret; 1500 1501 /* Prep the skb for the packet */ 1502 skb_put(skb, length); 1503 1504 /* Grab the FCB if there is one */ 1505 if (gfar_uses_fcb(priv)) 1506 fcb = gfar_get_fcb(skb); 1507 1508 /* Remove the padded bytes, if there are any */ 1509 if (priv->padding) 1510 skb_pull(skb, priv->padding); 1511 1512 if (priv->rx_csum_enable) 1513 gfar_rx_checksum(skb, fcb); 1514 1515 /* Tell the skb what kind of packet this is */ 1516 skb->protocol = eth_type_trans(skb, dev); 1517 1518 /* Send the packet up the stack */ 1519 if (unlikely(priv->vlgrp && fcb->vln)) 1520 ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl); 1521 else 1522 ret = RECEIVE(skb); 1523 1524 if (NET_RX_DROP == ret) 1525 priv->extra_stats.kernel_dropped++; 1526 } 1527 1528 return 0; 1529} 1530 1531/* gfar_clean_rx_ring() -- Processes each frame in the rx ring 1532 * until the budget/quota has been reached. Returns the number 1533 * of frames handled 1534 */ 1535int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) 1536{ 1537 struct rxbd8 *bdp; 1538 struct sk_buff *skb; 1539 u16 pkt_len; 1540 int howmany = 0; 1541 struct gfar_private *priv = netdev_priv(dev); 1542 1543 /* Get the first full descriptor */ 1544 bdp = priv->cur_rx; 1545 1546 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 1547 skb = priv->rx_skbuff[priv->skb_currx]; 1548 1549 if (!(bdp->status & 1550 (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET 1551 | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) { 1552 /* Increment the number of packets */ 1553 priv->stats.rx_packets++; 1554 howmany++; 1555 1556 /* Remove the FCS from the packet length */ 1557 pkt_len = bdp->length - 4; 1558 1559 gfar_process_frame(dev, skb, pkt_len); 1560 1561 priv->stats.rx_bytes += pkt_len; 1562 } else { 1563 count_errors(bdp->status, priv); 1564 1565 if (skb) 1566 dev_kfree_skb_any(skb); 1567 1568 priv->rx_skbuff[priv->skb_currx] = NULL; 1569 } 1570 1571 dev->last_rx = jiffies; 1572 1573 /* Clear the status flags for this buffer */ 1574 bdp->status &= ~RXBD_STATS; 1575 1576 /* Add another skb for the future */ 1577 skb = gfar_new_skb(dev, bdp); 1578 priv->rx_skbuff[priv->skb_currx] = skb; 1579 1580 /* Update to the next pointer */ 1581 if (bdp->status & RXBD_WRAP) 1582 bdp = priv->rx_bd_base; 1583 else 1584 bdp++; 1585 1586 /* update to point at the next skb */ 1587 priv->skb_currx = 1588 (priv->skb_currx + 1589 1) & RX_RING_MOD_MASK(priv->rx_ring_size); 1590 1591 } 1592 1593 /* Update the current rxbd pointer to be the next one */ 1594 priv->cur_rx = bdp; 1595 1596 /* If no packets have arrived since the 1597 * last one we processed, clear the IEVENT RX and 1598 * BSY bits so that another interrupt won't be 1599 * generated when we set IMASK */ 1600 if (bdp->status & RXBD_EMPTY) 1601 gfar_write(&priv->regs->ievent, IEVENT_RX_MASK); 1602 1603 return howmany; 1604} 1605 1606#ifdef CONFIG_GFAR_NAPI 1607static int gfar_poll(struct net_device *dev, int *budget) 1608{ 1609 int howmany; 1610 struct gfar_private *priv = netdev_priv(dev); 1611 int rx_work_limit = *budget; 1612 1613 if (rx_work_limit > dev->quota) 1614 rx_work_limit = dev->quota; 1615 1616 howmany = gfar_clean_rx_ring(dev, rx_work_limit); 1617 1618 dev->quota -= howmany; 1619 rx_work_limit -= howmany; 1620 *budget -= howmany; 1621 1622 if (rx_work_limit >= 0) { 1623 netif_rx_complete(dev); 1624 1625 /* Clear the halt bit in RSTAT */ 1626 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 1627 1628 gfar_write(&priv->regs->imask, IMASK_DEFAULT); 1629 1630 /* If we are coalescing interrupts, update the timer */ 1631 /* Otherwise, clear it */ 1632 if (priv->rxcoalescing) 1633 gfar_write(&priv->regs->rxic, 1634 mk_ic_value(priv->rxcount, priv->rxtime)); 1635 else 1636 gfar_write(&priv->regs->rxic, 0); 1637 } 1638 1639 return (rx_work_limit < 0) ? 1 : 0; 1640} 1641#endif 1642 1643/* The interrupt handler for devices with one interrupt */ 1644static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs) 1645{ 1646 struct net_device *dev = dev_id; 1647 struct gfar_private *priv = netdev_priv(dev); 1648 1649 /* Save ievent for future reference */ 1650 u32 events = gfar_read(&priv->regs->ievent); 1651 1652 /* Clear IEVENT */ 1653 gfar_write(&priv->regs->ievent, events); 1654 1655 /* Check for reception */ 1656 if ((events & IEVENT_RXF0) || (events & IEVENT_RXB0)) 1657 gfar_receive(irq, dev_id, regs); 1658 1659 /* Check for transmit completion */ 1660 if ((events & IEVENT_TXF) || (events & IEVENT_TXB)) 1661 gfar_transmit(irq, dev_id, regs); 1662 1663 /* Update error statistics */ 1664 if (events & IEVENT_TXE) { 1665 priv->stats.tx_errors++; 1666 1667 if (events & IEVENT_LC) 1668 priv->stats.tx_window_errors++; 1669 if (events & IEVENT_CRL) 1670 priv->stats.tx_aborted_errors++; 1671 if (events & IEVENT_XFUN) { 1672 if (netif_msg_tx_err(priv)) 1673 printk(KERN_WARNING "%s: tx underrun. dropped packet\n", dev->name); 1674 priv->stats.tx_dropped++; 1675 priv->extra_stats.tx_underrun++; 1676 1677 /* Reactivate the Tx Queues */ 1678 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 1679 } 1680 } 1681 if (events & IEVENT_BSY) { 1682 priv->stats.rx_errors++; 1683 priv->extra_stats.rx_bsy++; 1684 1685 gfar_receive(irq, dev_id, regs); 1686 1687#ifndef CONFIG_GFAR_NAPI 1688 /* Clear the halt bit in RSTAT */ 1689 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 1690#endif 1691 1692 if (netif_msg_rx_err(priv)) 1693 printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", 1694 dev->name, 1695 gfar_read(&priv->regs->rstat)); 1696 } 1697 if (events & IEVENT_BABR) { 1698 priv->stats.rx_errors++; 1699 priv->extra_stats.rx_babr++; 1700 1701 if (netif_msg_rx_err(priv)) 1702 printk(KERN_DEBUG "%s: babbling error\n", dev->name); 1703 } 1704 if (events & IEVENT_EBERR) { 1705 priv->extra_stats.eberr++; 1706 if (netif_msg_rx_err(priv)) 1707 printk(KERN_DEBUG "%s: EBERR\n", dev->name); 1708 } 1709 if ((events & IEVENT_RXC) && (netif_msg_rx_err(priv))) 1710 printk(KERN_DEBUG "%s: control frame\n", dev->name); 1711 1712 if (events & IEVENT_BABT) { 1713 priv->extra_stats.tx_babt++; 1714 if (netif_msg_rx_err(priv)) 1715 printk(KERN_DEBUG "%s: babt error\n", dev->name); 1716 } 1717 1718 return IRQ_HANDLED; 1719} 1720 1721static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs) 1722{ 1723 struct net_device *dev = (struct net_device *) dev_id; 1724 struct gfar_private *priv = netdev_priv(dev); 1725 1726 /* Clear the interrupt */ 1727 mii_clear_phy_interrupt(priv->mii_info); 1728 1729 /* Disable PHY interrupts */ 1730 mii_configure_phy_interrupt(priv->mii_info, 1731 MII_INTERRUPT_DISABLED); 1732 1733 /* Schedule the phy change */ 1734 schedule_work(&priv->tq); 1735 1736 return IRQ_HANDLED; 1737} 1738 1739/* Scheduled by the phy_interrupt/timer to handle PHY changes */ 1740static void gfar_phy_change(void *data) 1741{ 1742 struct net_device *dev = (struct net_device *) data; 1743 struct gfar_private *priv = netdev_priv(dev); 1744 int result = 0; 1745 1746 /* Delay to give the PHY a chance to change the 1747 * register state */ 1748 msleep(1); 1749 1750 /* Update the link, speed, duplex */ 1751 result = priv->mii_info->phyinfo->read_status(priv->mii_info); 1752 1753 /* Adjust the known status as long as the link 1754 * isn't still coming up */ 1755 if((0 == result) || (priv->mii_info->link == 0)) 1756 adjust_link(dev); 1757 1758 /* Reenable interrupts, if needed */ 1759 if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) 1760 mii_configure_phy_interrupt(priv->mii_info, 1761 MII_INTERRUPT_ENABLED); 1762} 1763 1764/* Called every so often on systems that don't interrupt 1765 * the core for PHY changes */ 1766static void gfar_phy_timer(unsigned long data) 1767{ 1768 struct net_device *dev = (struct net_device *) data; 1769 struct gfar_private *priv = netdev_priv(dev); 1770 1771 schedule_work(&priv->tq); 1772 1773 mod_timer(&priv->phy_info_timer, jiffies + 1774 GFAR_PHY_CHANGE_TIME * HZ); 1775} 1776 1777/* Keep trying aneg for some time 1778 * If, after GFAR_AN_TIMEOUT seconds, it has not 1779 * finished, we switch to forced. 1780 * Either way, once the process has completed, we either 1781 * request the interrupt, or switch the timer over to 1782 * using gfar_phy_timer to check status */ 1783static void gfar_phy_startup_timer(unsigned long data) 1784{ 1785 int result; 1786 static int secondary = GFAR_AN_TIMEOUT; 1787 struct gfar_mii_info *mii_info = (struct gfar_mii_info *)data; 1788 struct gfar_private *priv = netdev_priv(mii_info->dev); 1789 1790 /* Configure the Auto-negotiation */ 1791 result = mii_info->phyinfo->config_aneg(mii_info); 1792 1793 /* If autonegotiation failed to start, and 1794 * we haven't timed out, reset the timer, and return */ 1795 if (result && secondary--) { 1796 mod_timer(&priv->phy_info_timer, jiffies + HZ); 1797 return; 1798 } else if (result) { 1799 /* Couldn't start autonegotiation. 1800 * Try switching to forced */ 1801 mii_info->autoneg = 0; 1802 result = mii_info->phyinfo->config_aneg(mii_info); 1803 1804 /* Forcing failed! Give up */ 1805 if(result) { 1806 if (netif_msg_link(priv)) 1807 printk(KERN_ERR "%s: Forcing failed!\n", 1808 mii_info->dev->name); 1809 return; 1810 } 1811 } 1812 1813 /* Kill the timer so it can be restarted */ 1814 del_timer_sync(&priv->phy_info_timer); 1815 1816 /* Grab the PHY interrupt, if necessary/possible */ 1817 if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) { 1818 if (request_irq(priv->einfo->interruptPHY, 1819 phy_interrupt, 1820 SA_SHIRQ, 1821 "phy_interrupt", 1822 mii_info->dev) < 0) { 1823 if (netif_msg_intr(priv)) 1824 printk(KERN_ERR "%s: Can't get IRQ %d (PHY)\n", 1825 mii_info->dev->name, 1826 priv->einfo->interruptPHY); 1827 } else { 1828 mii_configure_phy_interrupt(priv->mii_info, 1829 MII_INTERRUPT_ENABLED); 1830 return; 1831 } 1832 } 1833 1834 /* Start the timer again, this time in order to 1835 * handle a change in status */ 1836 init_timer(&priv->phy_info_timer); 1837 priv->phy_info_timer.function = &gfar_phy_timer; 1838 priv->phy_info_timer.data = (unsigned long) mii_info->dev; 1839 mod_timer(&priv->phy_info_timer, jiffies + 1840 GFAR_PHY_CHANGE_TIME * HZ); 1841} 1842 1843/* Called every time the controller might need to be made 1844 * aware of new link state. The PHY code conveys this 1845 * information through variables in the priv structure, and this 1846 * function converts those variables into the appropriate 1847 * register values, and can bring down the device if needed. 1848 */ 1849static void adjust_link(struct net_device *dev) 1850{ 1851 struct gfar_private *priv = netdev_priv(dev); 1852 struct gfar *regs = priv->regs; 1853 u32 tempval; 1854 struct gfar_mii_info *mii_info = priv->mii_info; 1855 1856 if (mii_info->link) { 1857 /* Now we make sure that we can be in full duplex mode. 1858 * If not, we operate in half-duplex mode. */ 1859 if (mii_info->duplex != priv->oldduplex) { 1860 if (!(mii_info->duplex)) { 1861 tempval = gfar_read(&regs->maccfg2); 1862 tempval &= ~(MACCFG2_FULL_DUPLEX); 1863 gfar_write(&regs->maccfg2, tempval); 1864 1865 if (netif_msg_link(priv)) 1866 printk(KERN_INFO "%s: Half Duplex\n", 1867 dev->name); 1868 } else { 1869 tempval = gfar_read(&regs->maccfg2); 1870 tempval |= MACCFG2_FULL_DUPLEX; 1871 gfar_write(&regs->maccfg2, tempval); 1872 1873 if (netif_msg_link(priv)) 1874 printk(KERN_INFO "%s: Full Duplex\n", 1875 dev->name); 1876 } 1877 1878 priv->oldduplex = mii_info->duplex; 1879 } 1880 1881 if (mii_info->speed != priv->oldspeed) { 1882 switch (mii_info->speed) { 1883 case 1000: 1884 tempval = gfar_read(&regs->maccfg2); 1885 tempval = 1886 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); 1887 gfar_write(&regs->maccfg2, tempval); 1888 break; 1889 case 100: 1890 case 10: 1891 tempval = gfar_read(&regs->maccfg2); 1892 tempval = 1893 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 1894 gfar_write(&regs->maccfg2, tempval); 1895 break; 1896 default: 1897 if (netif_msg_link(priv)) 1898 printk(KERN_WARNING 1899 "%s: Ack! Speed (%d) is not 10/100/1000!\n", 1900 dev->name, mii_info->speed); 1901 break; 1902 } 1903 1904 if (netif_msg_link(priv)) 1905 printk(KERN_INFO "%s: Speed %dBT\n", dev->name, 1906 mii_info->speed); 1907 1908 priv->oldspeed = mii_info->speed; 1909 } 1910 1911 if (!priv->oldlink) { 1912 if (netif_msg_link(priv)) 1913 printk(KERN_INFO "%s: Link is up\n", dev->name); 1914 priv->oldlink = 1; 1915 netif_carrier_on(dev); 1916 netif_schedule(dev); 1917 } 1918 } else { 1919 if (priv->oldlink) { 1920 if (netif_msg_link(priv)) 1921 printk(KERN_INFO "%s: Link is down\n", 1922 dev->name); 1923 priv->oldlink = 0; 1924 priv->oldspeed = 0; 1925 priv->oldduplex = -1; 1926 netif_carrier_off(dev); 1927 } 1928 } 1929} 1930 1931 1932/* Update the hash table based on the current list of multicast 1933 * addresses we subscribe to. Also, change the promiscuity of 1934 * the device based on the flags (this function is called 1935 * whenever dev->flags is changed */ 1936static void gfar_set_multi(struct net_device *dev) 1937{ 1938 struct dev_mc_list *mc_ptr; 1939 struct gfar_private *priv = netdev_priv(dev); 1940 struct gfar *regs = priv->regs; 1941 u32 tempval; 1942 1943 if(dev->flags & IFF_PROMISC) { 1944 if (netif_msg_drv(priv)) 1945 printk(KERN_INFO "%s: Entering promiscuous mode.\n", 1946 dev->name); 1947 /* Set RCTRL to PROM */ 1948 tempval = gfar_read(&regs->rctrl); 1949 tempval |= RCTRL_PROM; 1950 gfar_write(&regs->rctrl, tempval); 1951 } else { 1952 /* Set RCTRL to not PROM */ 1953 tempval = gfar_read(&regs->rctrl); 1954 tempval &= ~(RCTRL_PROM); 1955 gfar_write(&regs->rctrl, tempval); 1956 } 1957 1958 if(dev->flags & IFF_ALLMULTI) { 1959 /* Set the hash to rx all multicast frames */ 1960 gfar_write(&regs->igaddr0, 0xffffffff); 1961 gfar_write(&regs->igaddr1, 0xffffffff); 1962 gfar_write(&regs->igaddr2, 0xffffffff); 1963 gfar_write(&regs->igaddr3, 0xffffffff); 1964 gfar_write(&regs->igaddr4, 0xffffffff); 1965 gfar_write(&regs->igaddr5, 0xffffffff); 1966 gfar_write(&regs->igaddr6, 0xffffffff); 1967 gfar_write(&regs->igaddr7, 0xffffffff); 1968 gfar_write(&regs->gaddr0, 0xffffffff); 1969 gfar_write(&regs->gaddr1, 0xffffffff); 1970 gfar_write(&regs->gaddr2, 0xffffffff); 1971 gfar_write(&regs->gaddr3, 0xffffffff); 1972 gfar_write(&regs->gaddr4, 0xffffffff); 1973 gfar_write(&regs->gaddr5, 0xffffffff); 1974 gfar_write(&regs->gaddr6, 0xffffffff); 1975 gfar_write(&regs->gaddr7, 0xffffffff); 1976 } else { 1977 /* zero out the hash */ 1978 gfar_write(&regs->igaddr0, 0x0); 1979 gfar_write(&regs->igaddr1, 0x0); 1980 gfar_write(&regs->igaddr2, 0x0); 1981 gfar_write(&regs->igaddr3, 0x0); 1982 gfar_write(&regs->igaddr4, 0x0); 1983 gfar_write(&regs->igaddr5, 0x0); 1984 gfar_write(&regs->igaddr6, 0x0); 1985 gfar_write(&regs->igaddr7, 0x0); 1986 gfar_write(&regs->gaddr0, 0x0); 1987 gfar_write(&regs->gaddr1, 0x0); 1988 gfar_write(&regs->gaddr2, 0x0); 1989 gfar_write(&regs->gaddr3, 0x0); 1990 gfar_write(&regs->gaddr4, 0x0); 1991 gfar_write(&regs->gaddr5, 0x0); 1992 gfar_write(&regs->gaddr6, 0x0); 1993 gfar_write(&regs->gaddr7, 0x0); 1994 1995 if(dev->mc_count == 0) 1996 return; 1997 1998 /* Parse the list, and set the appropriate bits */ 1999 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { 2000 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr); 2001 } 2002 } 2003 2004 return; 2005} 2006 2007/* Set the appropriate hash bit for the given addr */ 2008/* The algorithm works like so: 2009 * 1) Take the Destination Address (ie the multicast address), and 2010 * do a CRC on it (little endian), and reverse the bits of the 2011 * result. 2012 * 2) Use the 8 most significant bits as a hash into a 256-entry 2013 * table. The table is controlled through 8 32-bit registers: 2014 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is 2015 * gaddr7. This means that the 3 most significant bits in the 2016 * hash index which gaddr register to use, and the 5 other bits 2017 * indicate which bit (assuming an IBM numbering scheme, which 2018 * for PowerPC (tm) is usually the case) in the register holds 2019 * the entry. */ 2020static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) 2021{ 2022 u32 tempval; 2023 struct gfar_private *priv = netdev_priv(dev); 2024 u32 result = ether_crc(MAC_ADDR_LEN, addr); 2025 int width = priv->hash_width; 2026 u8 whichbit = (result >> (32 - width)) & 0x1f; 2027 u8 whichreg = result >> (32 - width + 5); 2028 u32 value = (1 << (31-whichbit)); 2029 2030 tempval = gfar_read(priv->hash_regs[whichreg]); 2031 tempval |= value; 2032 gfar_write(priv->hash_regs[whichreg], tempval); 2033 2034 return; 2035} 2036 2037/* GFAR error interrupt handler */ 2038static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs) 2039{ 2040 struct net_device *dev = dev_id; 2041 struct gfar_private *priv = netdev_priv(dev); 2042 2043 /* Save ievent for future reference */ 2044 u32 events = gfar_read(&priv->regs->ievent); 2045 2046 /* Clear IEVENT */ 2047 gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK); 2048 2049 /* Hmm... */ 2050 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 2051 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", 2052 dev->name, events, gfar_read(&priv->regs->imask)); 2053 2054 /* Update the error counters */ 2055 if (events & IEVENT_TXE) { 2056 priv->stats.tx_errors++; 2057 2058 if (events & IEVENT_LC) 2059 priv->stats.tx_window_errors++; 2060 if (events & IEVENT_CRL) 2061 priv->stats.tx_aborted_errors++; 2062 if (events & IEVENT_XFUN) { 2063 if (netif_msg_tx_err(priv)) 2064 printk(KERN_DEBUG "%s: underrun. packet dropped.\n", 2065 dev->name); 2066 priv->stats.tx_dropped++; 2067 priv->extra_stats.tx_underrun++; 2068 2069 /* Reactivate the Tx Queues */ 2070 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 2071 } 2072 if (netif_msg_tx_err(priv)) 2073 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); 2074 } 2075 if (events & IEVENT_BSY) { 2076 priv->stats.rx_errors++; 2077 priv->extra_stats.rx_bsy++; 2078 2079 gfar_receive(irq, dev_id, regs); 2080 2081#ifndef CONFIG_GFAR_NAPI 2082 /* Clear the halt bit in RSTAT */ 2083 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 2084#endif 2085 2086 if (netif_msg_rx_err(priv)) 2087 printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", 2088 dev->name, 2089 gfar_read(&priv->regs->rstat)); 2090 } 2091 if (events & IEVENT_BABR) { 2092 priv->stats.rx_errors++; 2093 priv->extra_stats.rx_babr++; 2094 2095 if (netif_msg_rx_err(priv)) 2096 printk(KERN_DEBUG "%s: babbling error\n", dev->name); 2097 } 2098 if (events & IEVENT_EBERR) { 2099 priv->extra_stats.eberr++; 2100 if (netif_msg_rx_err(priv)) 2101 printk(KERN_DEBUG "%s: EBERR\n", dev->name); 2102 } 2103 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv)) 2104 if (netif_msg_rx_status(priv)) 2105 printk(KERN_DEBUG "%s: control frame\n", dev->name); 2106 2107 if (events & IEVENT_BABT) { 2108 priv->extra_stats.tx_babt++; 2109 if (netif_msg_tx_err(priv)) 2110 printk(KERN_DEBUG "%s: babt error\n", dev->name); 2111 } 2112 return IRQ_HANDLED; 2113} 2114 2115/* Structure for a device driver */ 2116static struct device_driver gfar_driver = { 2117 .name = "fsl-gianfar", 2118 .bus = &platform_bus_type, 2119 .probe = gfar_probe, 2120 .remove = gfar_remove, 2121}; 2122 2123static int __init gfar_init(void) 2124{ 2125 return driver_register(&gfar_driver); 2126} 2127 2128static void __exit gfar_exit(void) 2129{ 2130 driver_unregister(&gfar_driver); 2131} 2132 2133module_init(gfar_init); 2134module_exit(gfar_exit); 2135