Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.27-rc8 2135 lines 58 kB view raw
1/* 2 * drivers/net/gianfar.c 3 * 4 * Gianfar Ethernet Driver 5 * This driver is designed for the non-CPM ethernet controllers 6 * on the 85xx and 83xx family of integrated processors 7 * Based on 8260_io/fcc_enet.c 8 * 9 * Author: Andy Fleming 10 * Maintainer: Kumar Gala 11 * 12 * Copyright (c) 2002-2006 Freescale Semiconductor, Inc. 13 * Copyright (c) 2007 MontaVista Software, Inc. 14 * 15 * This program is free software; you can redistribute it and/or modify it 16 * under the terms of the GNU General Public License as published by the 17 * Free Software Foundation; either version 2 of the License, or (at your 18 * option) any later version. 19 * 20 * Gianfar: AKA Lambda Draconis, "Dragon" 21 * RA 11 31 24.2 22 * Dec +69 19 52 23 * V 3.84 24 * B-V +1.62 25 * 26 * Theory of operation 27 * 28 * The driver is initialized through platform_device. Structures which 29 * define the configuration needed by the board are defined in a 30 * board structure in arch/ppc/platforms (though I do not 31 * discount the possibility that other architectures could one 32 * day be supported. 33 * 34 * The Gianfar Ethernet Controller uses a ring of buffer 35 * descriptors. The beginning is indicated by a register 36 * pointing to the physical address of the start of the ring. 37 * The end is determined by a "wrap" bit being set in the 38 * last descriptor of the ring. 39 * 40 * When a packet is received, the RXF bit in the 41 * IEVENT register is set, triggering an interrupt when the 42 * corresponding bit in the IMASK register is also set (if 43 * interrupt coalescing is active, then the interrupt may not 44 * happen immediately, but will wait until either a set number 45 * of frames or amount of time have passed). In NAPI, the 46 * interrupt handler will signal there is work to be done, and 47 * exit. This method will start at the last known empty 48 * descriptor, and process every subsequent descriptor until there 49 * are none left with data (NAPI will stop after a set number of 50 * packets to give time to other tasks, but will eventually 51 * process all the packets). The data arrives inside a 52 * pre-allocated skb, and so after the skb is passed up to the 53 * stack, a new skb must be allocated, and the address field in 54 * the buffer descriptor must be updated to indicate this new 55 * skb. 56 * 57 * When the kernel requests that a packet be transmitted, the 58 * driver starts where it left off last time, and points the 59 * descriptor at the buffer which was passed in. The driver 60 * then informs the DMA engine that there are packets ready to 61 * be transmitted. Once the controller is finished transmitting 62 * the packet, an interrupt may be triggered (under the same 63 * conditions as for reception, but depending on the TXF bit). 64 * The driver then cleans up the buffer. 65 */ 66 67#include <linux/kernel.h> 68#include <linux/string.h> 69#include <linux/errno.h> 70#include <linux/unistd.h> 71#include <linux/slab.h> 72#include <linux/interrupt.h> 73#include <linux/init.h> 74#include <linux/delay.h> 75#include <linux/netdevice.h> 76#include <linux/etherdevice.h> 77#include <linux/skbuff.h> 78#include <linux/if_vlan.h> 79#include <linux/spinlock.h> 80#include <linux/mm.h> 81#include <linux/platform_device.h> 82#include <linux/ip.h> 83#include <linux/tcp.h> 84#include <linux/udp.h> 85#include <linux/in.h> 86 87#include <asm/io.h> 88#include <asm/irq.h> 89#include <asm/uaccess.h> 90#include <linux/module.h> 91#include <linux/dma-mapping.h> 92#include <linux/crc32.h> 93#include <linux/mii.h> 94#include <linux/phy.h> 95 96#include "gianfar.h" 97#include "gianfar_mii.h" 98 99#define TX_TIMEOUT (1*HZ) 100#undef BRIEF_GFAR_ERRORS 101#undef VERBOSE_GFAR_ERRORS 102 103const char gfar_driver_name[] = "Gianfar Ethernet"; 104const char gfar_driver_version[] = "1.3"; 105 106static int gfar_enet_open(struct net_device *dev); 107static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); 108static void gfar_reset_task(struct work_struct *work); 109static void gfar_timeout(struct net_device *dev); 110static int gfar_close(struct net_device *dev); 111struct sk_buff *gfar_new_skb(struct net_device *dev); 112static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, 113 struct sk_buff *skb); 114static int gfar_set_mac_address(struct net_device *dev); 115static int gfar_change_mtu(struct net_device *dev, int new_mtu); 116static irqreturn_t gfar_error(int irq, void *dev_id); 117static irqreturn_t gfar_transmit(int irq, void *dev_id); 118static irqreturn_t gfar_interrupt(int irq, void *dev_id); 119static void adjust_link(struct net_device *dev); 120static void init_registers(struct net_device *dev); 121static int init_phy(struct net_device *dev); 122static int gfar_probe(struct platform_device *pdev); 123static int gfar_remove(struct platform_device *pdev); 124static void free_skb_resources(struct gfar_private *priv); 125static void gfar_set_multi(struct net_device *dev); 126static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 127static void gfar_configure_serdes(struct net_device *dev); 128static int gfar_poll(struct napi_struct *napi, int budget); 129#ifdef CONFIG_NET_POLL_CONTROLLER 130static void gfar_netpoll(struct net_device *dev); 131#endif 132int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 133static int gfar_clean_tx_ring(struct net_device *dev); 134static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length); 135static void gfar_vlan_rx_register(struct net_device *netdev, 136 struct vlan_group *grp); 137void gfar_halt(struct net_device *dev); 138static void gfar_halt_nodisable(struct net_device *dev); 139void gfar_start(struct net_device *dev); 140static void gfar_clear_exact_match(struct net_device *dev); 141static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); 142 143extern const struct ethtool_ops gfar_ethtool_ops; 144 145MODULE_AUTHOR("Freescale Semiconductor, Inc"); 146MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 147MODULE_LICENSE("GPL"); 148 149/* Returns 1 if incoming frames use an FCB */ 150static inline int gfar_uses_fcb(struct gfar_private *priv) 151{ 152 return (priv->vlan_enable || priv->rx_csum_enable); 153} 154 155/* Set up the ethernet device structure, private data, 156 * and anything else we need before we start */ 157static int gfar_probe(struct platform_device *pdev) 158{ 159 u32 tempval; 160 struct net_device *dev = NULL; 161 struct gfar_private *priv = NULL; 162 struct gianfar_platform_data *einfo; 163 struct resource *r; 164 int err = 0; 165 DECLARE_MAC_BUF(mac); 166 167 einfo = (struct gianfar_platform_data *) pdev->dev.platform_data; 168 169 if (NULL == einfo) { 170 printk(KERN_ERR "gfar %d: Missing additional data!\n", 171 pdev->id); 172 173 return -ENODEV; 174 } 175 176 /* Create an ethernet device instance */ 177 dev = alloc_etherdev(sizeof (*priv)); 178 179 if (NULL == dev) 180 return -ENOMEM; 181 182 priv = netdev_priv(dev); 183 priv->dev = dev; 184 185 /* Set the info in the priv to the current info */ 186 priv->einfo = einfo; 187 188 /* fill out IRQ fields */ 189 if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 190 priv->interruptTransmit = platform_get_irq_byname(pdev, "tx"); 191 priv->interruptReceive = platform_get_irq_byname(pdev, "rx"); 192 priv->interruptError = platform_get_irq_byname(pdev, "error"); 193 if (priv->interruptTransmit < 0 || priv->interruptReceive < 0 || priv->interruptError < 0) 194 goto regs_fail; 195 } else { 196 priv->interruptTransmit = platform_get_irq(pdev, 0); 197 if (priv->interruptTransmit < 0) 198 goto regs_fail; 199 } 200 201 /* get a pointer to the register memory */ 202 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 203 priv->regs = ioremap(r->start, sizeof (struct gfar)); 204 205 if (NULL == priv->regs) { 206 err = -ENOMEM; 207 goto regs_fail; 208 } 209 210 spin_lock_init(&priv->txlock); 211 spin_lock_init(&priv->rxlock); 212 spin_lock_init(&priv->bflock); 213 INIT_WORK(&priv->reset_task, gfar_reset_task); 214 215 platform_set_drvdata(pdev, dev); 216 217 /* Stop the DMA engine now, in case it was running before */ 218 /* (The firmware could have used it, and left it running). */ 219 /* To do this, we write Graceful Receive Stop and Graceful */ 220 /* Transmit Stop, and then wait until the corresponding bits */ 221 /* in IEVENT indicate the stops have completed. */ 222 tempval = gfar_read(&priv->regs->dmactrl); 223 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 224 gfar_write(&priv->regs->dmactrl, tempval); 225 226 tempval = gfar_read(&priv->regs->dmactrl); 227 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 228 gfar_write(&priv->regs->dmactrl, tempval); 229 230 while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC))) 231 cpu_relax(); 232 233 /* Reset MAC layer */ 234 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET); 235 236 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 237 gfar_write(&priv->regs->maccfg1, tempval); 238 239 /* Initialize MACCFG2. */ 240 gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS); 241 242 /* Initialize ECNTRL */ 243 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS); 244 245 /* Copy the station address into the dev structure, */ 246 memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN); 247 248 /* Set the dev->base_addr to the gfar reg region */ 249 dev->base_addr = (unsigned long) (priv->regs); 250 251 SET_NETDEV_DEV(dev, &pdev->dev); 252 253 /* Fill in the dev structure */ 254 dev->open = gfar_enet_open; 255 dev->hard_start_xmit = gfar_start_xmit; 256 dev->tx_timeout = gfar_timeout; 257 dev->watchdog_timeo = TX_TIMEOUT; 258 netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT); 259#ifdef CONFIG_NET_POLL_CONTROLLER 260 dev->poll_controller = gfar_netpoll; 261#endif 262 dev->stop = gfar_close; 263 dev->change_mtu = gfar_change_mtu; 264 dev->mtu = 1500; 265 dev->set_multicast_list = gfar_set_multi; 266 267 dev->ethtool_ops = &gfar_ethtool_ops; 268 269 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 270 priv->rx_csum_enable = 1; 271 dev->features |= NETIF_F_IP_CSUM; 272 } else 273 priv->rx_csum_enable = 0; 274 275 priv->vlgrp = NULL; 276 277 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 278 dev->vlan_rx_register = gfar_vlan_rx_register; 279 280 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 281 282 priv->vlan_enable = 1; 283 } 284 285 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 286 priv->extended_hash = 1; 287 priv->hash_width = 9; 288 289 priv->hash_regs[0] = &priv->regs->igaddr0; 290 priv->hash_regs[1] = &priv->regs->igaddr1; 291 priv->hash_regs[2] = &priv->regs->igaddr2; 292 priv->hash_regs[3] = &priv->regs->igaddr3; 293 priv->hash_regs[4] = &priv->regs->igaddr4; 294 priv->hash_regs[5] = &priv->regs->igaddr5; 295 priv->hash_regs[6] = &priv->regs->igaddr6; 296 priv->hash_regs[7] = &priv->regs->igaddr7; 297 priv->hash_regs[8] = &priv->regs->gaddr0; 298 priv->hash_regs[9] = &priv->regs->gaddr1; 299 priv->hash_regs[10] = &priv->regs->gaddr2; 300 priv->hash_regs[11] = &priv->regs->gaddr3; 301 priv->hash_regs[12] = &priv->regs->gaddr4; 302 priv->hash_regs[13] = &priv->regs->gaddr5; 303 priv->hash_regs[14] = &priv->regs->gaddr6; 304 priv->hash_regs[15] = &priv->regs->gaddr7; 305 306 } else { 307 priv->extended_hash = 0; 308 priv->hash_width = 8; 309 310 priv->hash_regs[0] = &priv->regs->gaddr0; 311 priv->hash_regs[1] = &priv->regs->gaddr1; 312 priv->hash_regs[2] = &priv->regs->gaddr2; 313 priv->hash_regs[3] = &priv->regs->gaddr3; 314 priv->hash_regs[4] = &priv->regs->gaddr4; 315 priv->hash_regs[5] = &priv->regs->gaddr5; 316 priv->hash_regs[6] = &priv->regs->gaddr6; 317 priv->hash_regs[7] = &priv->regs->gaddr7; 318 } 319 320 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) 321 priv->padding = DEFAULT_PADDING; 322 else 323 priv->padding = 0; 324 325 if (dev->features & NETIF_F_IP_CSUM) 326 dev->hard_header_len += GMAC_FCB_LEN; 327 328 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 329 priv->tx_ring_size = DEFAULT_TX_RING_SIZE; 330 priv->rx_ring_size = DEFAULT_RX_RING_SIZE; 331 332 priv->txcoalescing = DEFAULT_TX_COALESCE; 333 priv->txcount = DEFAULT_TXCOUNT; 334 priv->txtime = DEFAULT_TXTIME; 335 priv->rxcoalescing = DEFAULT_RX_COALESCE; 336 priv->rxcount = DEFAULT_RXCOUNT; 337 priv->rxtime = DEFAULT_RXTIME; 338 339 /* Enable most messages by default */ 340 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 341 342 err = register_netdev(dev); 343 344 if (err) { 345 printk(KERN_ERR "%s: Cannot register net device, aborting.\n", 346 dev->name); 347 goto register_fail; 348 } 349 350 /* Create all the sysfs files */ 351 gfar_init_sysfs(dev); 352 353 /* Print out the device info */ 354 printk(KERN_INFO DEVICE_NAME "%s\n", 355 dev->name, print_mac(mac, dev->dev_addr)); 356 357 /* Even more device info helps when determining which kernel */ 358 /* provided which set of benchmarks. */ 359 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 360 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", 361 dev->name, priv->rx_ring_size, priv->tx_ring_size); 362 363 return 0; 364 365register_fail: 366 iounmap(priv->regs); 367regs_fail: 368 free_netdev(dev); 369 return err; 370} 371 372static int gfar_remove(struct platform_device *pdev) 373{ 374 struct net_device *dev = platform_get_drvdata(pdev); 375 struct gfar_private *priv = netdev_priv(dev); 376 377 platform_set_drvdata(pdev, NULL); 378 379 iounmap(priv->regs); 380 free_netdev(dev); 381 382 return 0; 383} 384 385#ifdef CONFIG_PM 386static int gfar_suspend(struct platform_device *pdev, pm_message_t state) 387{ 388 struct net_device *dev = platform_get_drvdata(pdev); 389 struct gfar_private *priv = netdev_priv(dev); 390 unsigned long flags; 391 u32 tempval; 392 393 int magic_packet = priv->wol_en && 394 (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 395 396 netif_device_detach(dev); 397 398 if (netif_running(dev)) { 399 spin_lock_irqsave(&priv->txlock, flags); 400 spin_lock(&priv->rxlock); 401 402 gfar_halt_nodisable(dev); 403 404 /* Disable Tx, and Rx if wake-on-LAN is disabled. */ 405 tempval = gfar_read(&priv->regs->maccfg1); 406 407 tempval &= ~MACCFG1_TX_EN; 408 409 if (!magic_packet) 410 tempval &= ~MACCFG1_RX_EN; 411 412 gfar_write(&priv->regs->maccfg1, tempval); 413 414 spin_unlock(&priv->rxlock); 415 spin_unlock_irqrestore(&priv->txlock, flags); 416 417 napi_disable(&priv->napi); 418 419 if (magic_packet) { 420 /* Enable interrupt on Magic Packet */ 421 gfar_write(&priv->regs->imask, IMASK_MAG); 422 423 /* Enable Magic Packet mode */ 424 tempval = gfar_read(&priv->regs->maccfg2); 425 tempval |= MACCFG2_MPEN; 426 gfar_write(&priv->regs->maccfg2, tempval); 427 } else { 428 phy_stop(priv->phydev); 429 } 430 } 431 432 return 0; 433} 434 435static int gfar_resume(struct platform_device *pdev) 436{ 437 struct net_device *dev = platform_get_drvdata(pdev); 438 struct gfar_private *priv = netdev_priv(dev); 439 unsigned long flags; 440 u32 tempval; 441 int magic_packet = priv->wol_en && 442 (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 443 444 if (!netif_running(dev)) { 445 netif_device_attach(dev); 446 return 0; 447 } 448 449 if (!magic_packet && priv->phydev) 450 phy_start(priv->phydev); 451 452 /* Disable Magic Packet mode, in case something 453 * else woke us up. 454 */ 455 456 spin_lock_irqsave(&priv->txlock, flags); 457 spin_lock(&priv->rxlock); 458 459 tempval = gfar_read(&priv->regs->maccfg2); 460 tempval &= ~MACCFG2_MPEN; 461 gfar_write(&priv->regs->maccfg2, tempval); 462 463 gfar_start(dev); 464 465 spin_unlock(&priv->rxlock); 466 spin_unlock_irqrestore(&priv->txlock, flags); 467 468 netif_device_attach(dev); 469 470 napi_enable(&priv->napi); 471 472 return 0; 473} 474#else 475#define gfar_suspend NULL 476#define gfar_resume NULL 477#endif 478 479/* Reads the controller's registers to determine what interface 480 * connects it to the PHY. 481 */ 482static phy_interface_t gfar_get_interface(struct net_device *dev) 483{ 484 struct gfar_private *priv = netdev_priv(dev); 485 u32 ecntrl = gfar_read(&priv->regs->ecntrl); 486 487 if (ecntrl & ECNTRL_SGMII_MODE) 488 return PHY_INTERFACE_MODE_SGMII; 489 490 if (ecntrl & ECNTRL_TBI_MODE) { 491 if (ecntrl & ECNTRL_REDUCED_MODE) 492 return PHY_INTERFACE_MODE_RTBI; 493 else 494 return PHY_INTERFACE_MODE_TBI; 495 } 496 497 if (ecntrl & ECNTRL_REDUCED_MODE) { 498 if (ecntrl & ECNTRL_REDUCED_MII_MODE) 499 return PHY_INTERFACE_MODE_RMII; 500 else { 501 phy_interface_t interface = priv->einfo->interface; 502 503 /* 504 * This isn't autodetected right now, so it must 505 * be set by the device tree or platform code. 506 */ 507 if (interface == PHY_INTERFACE_MODE_RGMII_ID) 508 return PHY_INTERFACE_MODE_RGMII_ID; 509 510 return PHY_INTERFACE_MODE_RGMII; 511 } 512 } 513 514 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) 515 return PHY_INTERFACE_MODE_GMII; 516 517 return PHY_INTERFACE_MODE_MII; 518} 519 520 521/* Initializes driver's PHY state, and attaches to the PHY. 522 * Returns 0 on success. 523 */ 524static int init_phy(struct net_device *dev) 525{ 526 struct gfar_private *priv = netdev_priv(dev); 527 uint gigabit_support = 528 priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? 529 SUPPORTED_1000baseT_Full : 0; 530 struct phy_device *phydev; 531 char phy_id[BUS_ID_SIZE]; 532 phy_interface_t interface; 533 534 priv->oldlink = 0; 535 priv->oldspeed = 0; 536 priv->oldduplex = -1; 537 538 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id); 539 540 interface = gfar_get_interface(dev); 541 542 phydev = phy_connect(dev, phy_id, &adjust_link, 0, interface); 543 544 if (interface == PHY_INTERFACE_MODE_SGMII) 545 gfar_configure_serdes(dev); 546 547 if (IS_ERR(phydev)) { 548 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 549 return PTR_ERR(phydev); 550 } 551 552 /* Remove any features not supported by the controller */ 553 phydev->supported &= (GFAR_SUPPORTED | gigabit_support); 554 phydev->advertising = phydev->supported; 555 556 priv->phydev = phydev; 557 558 return 0; 559} 560 561/* 562 * Initialize TBI PHY interface for communicating with the 563 * SERDES lynx PHY on the chip. We communicate with this PHY 564 * through the MDIO bus on each controller, treating it as a 565 * "normal" PHY at the address found in the TBIPA register. We assume 566 * that the TBIPA register is valid. Either the MDIO bus code will set 567 * it to a value that doesn't conflict with other PHYs on the bus, or the 568 * value doesn't matter, as there are no other PHYs on the bus. 569 */ 570static void gfar_configure_serdes(struct net_device *dev) 571{ 572 struct gfar_private *priv = netdev_priv(dev); 573 struct gfar_mii __iomem *regs = 574 (void __iomem *)&priv->regs->gfar_mii_regs; 575 int tbipa = gfar_read(&priv->regs->tbipa); 576 577 /* Single clk mode, mii mode off(for serdes communication) */ 578 gfar_local_mdio_write(regs, tbipa, MII_TBICON, TBICON_CLK_SELECT); 579 580 gfar_local_mdio_write(regs, tbipa, MII_ADVERTISE, 581 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | 582 ADVERTISE_1000XPSE_ASYM); 583 584 gfar_local_mdio_write(regs, tbipa, MII_BMCR, BMCR_ANENABLE | 585 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); 586} 587 588static void init_registers(struct net_device *dev) 589{ 590 struct gfar_private *priv = netdev_priv(dev); 591 592 /* Clear IEVENT */ 593 gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR); 594 595 /* Initialize IMASK */ 596 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR); 597 598 /* Init hash registers to zero */ 599 gfar_write(&priv->regs->igaddr0, 0); 600 gfar_write(&priv->regs->igaddr1, 0); 601 gfar_write(&priv->regs->igaddr2, 0); 602 gfar_write(&priv->regs->igaddr3, 0); 603 gfar_write(&priv->regs->igaddr4, 0); 604 gfar_write(&priv->regs->igaddr5, 0); 605 gfar_write(&priv->regs->igaddr6, 0); 606 gfar_write(&priv->regs->igaddr7, 0); 607 608 gfar_write(&priv->regs->gaddr0, 0); 609 gfar_write(&priv->regs->gaddr1, 0); 610 gfar_write(&priv->regs->gaddr2, 0); 611 gfar_write(&priv->regs->gaddr3, 0); 612 gfar_write(&priv->regs->gaddr4, 0); 613 gfar_write(&priv->regs->gaddr5, 0); 614 gfar_write(&priv->regs->gaddr6, 0); 615 gfar_write(&priv->regs->gaddr7, 0); 616 617 /* Zero out the rmon mib registers if it has them */ 618 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 619 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib)); 620 621 /* Mask off the CAM interrupts */ 622 gfar_write(&priv->regs->rmon.cam1, 0xffffffff); 623 gfar_write(&priv->regs->rmon.cam2, 0xffffffff); 624 } 625 626 /* Initialize the max receive buffer length */ 627 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 628 629 /* Initialize the Minimum Frame Length Register */ 630 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); 631} 632 633 634/* Halt the receive and transmit queues */ 635static void gfar_halt_nodisable(struct net_device *dev) 636{ 637 struct gfar_private *priv = netdev_priv(dev); 638 struct gfar __iomem *regs = priv->regs; 639 u32 tempval; 640 641 /* Mask all interrupts */ 642 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 643 644 /* Clear all interrupts */ 645 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR); 646 647 /* Stop the DMA, and wait for it to stop */ 648 tempval = gfar_read(&priv->regs->dmactrl); 649 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) 650 != (DMACTRL_GRS | DMACTRL_GTS)) { 651 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 652 gfar_write(&priv->regs->dmactrl, tempval); 653 654 while (!(gfar_read(&priv->regs->ievent) & 655 (IEVENT_GRSC | IEVENT_GTSC))) 656 cpu_relax(); 657 } 658} 659 660/* Halt the receive and transmit queues */ 661void gfar_halt(struct net_device *dev) 662{ 663 struct gfar_private *priv = netdev_priv(dev); 664 struct gfar __iomem *regs = priv->regs; 665 u32 tempval; 666 667 gfar_halt_nodisable(dev); 668 669 /* Disable Rx and Tx */ 670 tempval = gfar_read(&regs->maccfg1); 671 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 672 gfar_write(&regs->maccfg1, tempval); 673} 674 675void stop_gfar(struct net_device *dev) 676{ 677 struct gfar_private *priv = netdev_priv(dev); 678 struct gfar __iomem *regs = priv->regs; 679 unsigned long flags; 680 681 phy_stop(priv->phydev); 682 683 /* Lock it down */ 684 spin_lock_irqsave(&priv->txlock, flags); 685 spin_lock(&priv->rxlock); 686 687 gfar_halt(dev); 688 689 spin_unlock(&priv->rxlock); 690 spin_unlock_irqrestore(&priv->txlock, flags); 691 692 /* Free the IRQs */ 693 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 694 free_irq(priv->interruptError, dev); 695 free_irq(priv->interruptTransmit, dev); 696 free_irq(priv->interruptReceive, dev); 697 } else { 698 free_irq(priv->interruptTransmit, dev); 699 } 700 701 free_skb_resources(priv); 702 703 dma_free_coherent(&dev->dev, 704 sizeof(struct txbd8)*priv->tx_ring_size 705 + sizeof(struct rxbd8)*priv->rx_ring_size, 706 priv->tx_bd_base, 707 gfar_read(&regs->tbase0)); 708} 709 710/* If there are any tx skbs or rx skbs still around, free them. 711 * Then free tx_skbuff and rx_skbuff */ 712static void free_skb_resources(struct gfar_private *priv) 713{ 714 struct rxbd8 *rxbdp; 715 struct txbd8 *txbdp; 716 int i; 717 718 /* Go through all the buffer descriptors and free their data buffers */ 719 txbdp = priv->tx_bd_base; 720 721 for (i = 0; i < priv->tx_ring_size; i++) { 722 723 if (priv->tx_skbuff[i]) { 724 dma_unmap_single(&priv->dev->dev, txbdp->bufPtr, 725 txbdp->length, 726 DMA_TO_DEVICE); 727 dev_kfree_skb_any(priv->tx_skbuff[i]); 728 priv->tx_skbuff[i] = NULL; 729 } 730 731 txbdp++; 732 } 733 734 kfree(priv->tx_skbuff); 735 736 rxbdp = priv->rx_bd_base; 737 738 /* rx_skbuff is not guaranteed to be allocated, so only 739 * free it and its contents if it is allocated */ 740 if(priv->rx_skbuff != NULL) { 741 for (i = 0; i < priv->rx_ring_size; i++) { 742 if (priv->rx_skbuff[i]) { 743 dma_unmap_single(&priv->dev->dev, rxbdp->bufPtr, 744 priv->rx_buffer_size, 745 DMA_FROM_DEVICE); 746 747 dev_kfree_skb_any(priv->rx_skbuff[i]); 748 priv->rx_skbuff[i] = NULL; 749 } 750 751 rxbdp->status = 0; 752 rxbdp->length = 0; 753 rxbdp->bufPtr = 0; 754 755 rxbdp++; 756 } 757 758 kfree(priv->rx_skbuff); 759 } 760} 761 762void gfar_start(struct net_device *dev) 763{ 764 struct gfar_private *priv = netdev_priv(dev); 765 struct gfar __iomem *regs = priv->regs; 766 u32 tempval; 767 768 /* Enable Rx and Tx in MACCFG1 */ 769 tempval = gfar_read(&regs->maccfg1); 770 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); 771 gfar_write(&regs->maccfg1, tempval); 772 773 /* Initialize DMACTRL to have WWR and WOP */ 774 tempval = gfar_read(&priv->regs->dmactrl); 775 tempval |= DMACTRL_INIT_SETTINGS; 776 gfar_write(&priv->regs->dmactrl, tempval); 777 778 /* Make sure we aren't stopped */ 779 tempval = gfar_read(&priv->regs->dmactrl); 780 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 781 gfar_write(&priv->regs->dmactrl, tempval); 782 783 /* Clear THLT/RHLT, so that the DMA starts polling now */ 784 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT); 785 gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT); 786 787 /* Unmask the interrupts we look for */ 788 gfar_write(&regs->imask, IMASK_DEFAULT); 789} 790 791/* Bring the controller up and running */ 792int startup_gfar(struct net_device *dev) 793{ 794 struct txbd8 *txbdp; 795 struct rxbd8 *rxbdp; 796 dma_addr_t addr = 0; 797 unsigned long vaddr; 798 int i; 799 struct gfar_private *priv = netdev_priv(dev); 800 struct gfar __iomem *regs = priv->regs; 801 int err = 0; 802 u32 rctrl = 0; 803 u32 attrs = 0; 804 805 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 806 807 /* Allocate memory for the buffer descriptors */ 808 vaddr = (unsigned long) dma_alloc_coherent(&dev->dev, 809 sizeof (struct txbd8) * priv->tx_ring_size + 810 sizeof (struct rxbd8) * priv->rx_ring_size, 811 &addr, GFP_KERNEL); 812 813 if (vaddr == 0) { 814 if (netif_msg_ifup(priv)) 815 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n", 816 dev->name); 817 return -ENOMEM; 818 } 819 820 priv->tx_bd_base = (struct txbd8 *) vaddr; 821 822 /* enet DMA only understands physical addresses */ 823 gfar_write(&regs->tbase0, addr); 824 825 /* Start the rx descriptor ring where the tx ring leaves off */ 826 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size; 827 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size; 828 priv->rx_bd_base = (struct rxbd8 *) vaddr; 829 gfar_write(&regs->rbase0, addr); 830 831 /* Setup the skbuff rings */ 832 priv->tx_skbuff = 833 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * 834 priv->tx_ring_size, GFP_KERNEL); 835 836 if (NULL == priv->tx_skbuff) { 837 if (netif_msg_ifup(priv)) 838 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n", 839 dev->name); 840 err = -ENOMEM; 841 goto tx_skb_fail; 842 } 843 844 for (i = 0; i < priv->tx_ring_size; i++) 845 priv->tx_skbuff[i] = NULL; 846 847 priv->rx_skbuff = 848 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * 849 priv->rx_ring_size, GFP_KERNEL); 850 851 if (NULL == priv->rx_skbuff) { 852 if (netif_msg_ifup(priv)) 853 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n", 854 dev->name); 855 err = -ENOMEM; 856 goto rx_skb_fail; 857 } 858 859 for (i = 0; i < priv->rx_ring_size; i++) 860 priv->rx_skbuff[i] = NULL; 861 862 /* Initialize some variables in our dev structure */ 863 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base; 864 priv->cur_rx = priv->rx_bd_base; 865 priv->skb_curtx = priv->skb_dirtytx = 0; 866 priv->skb_currx = 0; 867 868 /* Initialize Transmit Descriptor Ring */ 869 txbdp = priv->tx_bd_base; 870 for (i = 0; i < priv->tx_ring_size; i++) { 871 txbdp->status = 0; 872 txbdp->length = 0; 873 txbdp->bufPtr = 0; 874 txbdp++; 875 } 876 877 /* Set the last descriptor in the ring to indicate wrap */ 878 txbdp--; 879 txbdp->status |= TXBD_WRAP; 880 881 rxbdp = priv->rx_bd_base; 882 for (i = 0; i < priv->rx_ring_size; i++) { 883 struct sk_buff *skb; 884 885 skb = gfar_new_skb(dev); 886 887 if (!skb) { 888 printk(KERN_ERR "%s: Can't allocate RX buffers\n", 889 dev->name); 890 891 goto err_rxalloc_fail; 892 } 893 894 priv->rx_skbuff[i] = skb; 895 896 gfar_new_rxbdp(dev, rxbdp, skb); 897 898 rxbdp++; 899 } 900 901 /* Set the last descriptor in the ring to wrap */ 902 rxbdp--; 903 rxbdp->status |= RXBD_WRAP; 904 905 /* If the device has multiple interrupts, register for 906 * them. Otherwise, only register for the one */ 907 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 908 /* Install our interrupt handlers for Error, 909 * Transmit, and Receive */ 910 if (request_irq(priv->interruptError, gfar_error, 911 0, "enet_error", dev) < 0) { 912 if (netif_msg_intr(priv)) 913 printk(KERN_ERR "%s: Can't get IRQ %d\n", 914 dev->name, priv->interruptError); 915 916 err = -1; 917 goto err_irq_fail; 918 } 919 920 if (request_irq(priv->interruptTransmit, gfar_transmit, 921 0, "enet_tx", dev) < 0) { 922 if (netif_msg_intr(priv)) 923 printk(KERN_ERR "%s: Can't get IRQ %d\n", 924 dev->name, priv->interruptTransmit); 925 926 err = -1; 927 928 goto tx_irq_fail; 929 } 930 931 if (request_irq(priv->interruptReceive, gfar_receive, 932 0, "enet_rx", dev) < 0) { 933 if (netif_msg_intr(priv)) 934 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n", 935 dev->name, priv->interruptReceive); 936 937 err = -1; 938 goto rx_irq_fail; 939 } 940 } else { 941 if (request_irq(priv->interruptTransmit, gfar_interrupt, 942 0, "gfar_interrupt", dev) < 0) { 943 if (netif_msg_intr(priv)) 944 printk(KERN_ERR "%s: Can't get IRQ %d\n", 945 dev->name, priv->interruptError); 946 947 err = -1; 948 goto err_irq_fail; 949 } 950 } 951 952 phy_start(priv->phydev); 953 954 /* Configure the coalescing support */ 955 if (priv->txcoalescing) 956 gfar_write(&regs->txic, 957 mk_ic_value(priv->txcount, priv->txtime)); 958 else 959 gfar_write(&regs->txic, 0); 960 961 if (priv->rxcoalescing) 962 gfar_write(&regs->rxic, 963 mk_ic_value(priv->rxcount, priv->rxtime)); 964 else 965 gfar_write(&regs->rxic, 0); 966 967 if (priv->rx_csum_enable) 968 rctrl |= RCTRL_CHECKSUMMING; 969 970 if (priv->extended_hash) { 971 rctrl |= RCTRL_EXTHASH; 972 973 gfar_clear_exact_match(dev); 974 rctrl |= RCTRL_EMEN; 975 } 976 977 if (priv->vlan_enable) 978 rctrl |= RCTRL_VLAN; 979 980 if (priv->padding) { 981 rctrl &= ~RCTRL_PAL_MASK; 982 rctrl |= RCTRL_PADDING(priv->padding); 983 } 984 985 /* Init rctrl based on our settings */ 986 gfar_write(&priv->regs->rctrl, rctrl); 987 988 if (dev->features & NETIF_F_IP_CSUM) 989 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM); 990 991 /* Set the extraction length and index */ 992 attrs = ATTRELI_EL(priv->rx_stash_size) | 993 ATTRELI_EI(priv->rx_stash_index); 994 995 gfar_write(&priv->regs->attreli, attrs); 996 997 /* Start with defaults, and add stashing or locking 998 * depending on the approprate variables */ 999 attrs = ATTR_INIT_SETTINGS; 1000 1001 if (priv->bd_stash_en) 1002 attrs |= ATTR_BDSTASH; 1003 1004 if (priv->rx_stash_size != 0) 1005 attrs |= ATTR_BUFSTASH; 1006 1007 gfar_write(&priv->regs->attr, attrs); 1008 1009 gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold); 1010 gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve); 1011 gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off); 1012 1013 /* Start the controller */ 1014 gfar_start(dev); 1015 1016 return 0; 1017 1018rx_irq_fail: 1019 free_irq(priv->interruptTransmit, dev); 1020tx_irq_fail: 1021 free_irq(priv->interruptError, dev); 1022err_irq_fail: 1023err_rxalloc_fail: 1024rx_skb_fail: 1025 free_skb_resources(priv); 1026tx_skb_fail: 1027 dma_free_coherent(&dev->dev, 1028 sizeof(struct txbd8)*priv->tx_ring_size 1029 + sizeof(struct rxbd8)*priv->rx_ring_size, 1030 priv->tx_bd_base, 1031 gfar_read(&regs->tbase0)); 1032 1033 return err; 1034} 1035 1036/* Called when something needs to use the ethernet device */ 1037/* Returns 0 for success. */ 1038static int gfar_enet_open(struct net_device *dev) 1039{ 1040 struct gfar_private *priv = netdev_priv(dev); 1041 int err; 1042 1043 napi_enable(&priv->napi); 1044 1045 /* Initialize a bunch of registers */ 1046 init_registers(dev); 1047 1048 gfar_set_mac_address(dev); 1049 1050 err = init_phy(dev); 1051 1052 if(err) { 1053 napi_disable(&priv->napi); 1054 return err; 1055 } 1056 1057 err = startup_gfar(dev); 1058 if (err) { 1059 napi_disable(&priv->napi); 1060 return err; 1061 } 1062 1063 netif_start_queue(dev); 1064 1065 return err; 1066} 1067 1068static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp) 1069{ 1070 struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN); 1071 1072 memset(fcb, 0, GMAC_FCB_LEN); 1073 1074 return fcb; 1075} 1076 1077static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) 1078{ 1079 u8 flags = 0; 1080 1081 /* If we're here, it's a IP packet with a TCP or UDP 1082 * payload. We set it to checksum, using a pseudo-header 1083 * we provide 1084 */ 1085 flags = TXFCB_DEFAULT; 1086 1087 /* Tell the controller what the protocol is */ 1088 /* And provide the already calculated phcs */ 1089 if (ip_hdr(skb)->protocol == IPPROTO_UDP) { 1090 flags |= TXFCB_UDP; 1091 fcb->phcs = udp_hdr(skb)->check; 1092 } else 1093 fcb->phcs = tcp_hdr(skb)->check; 1094 1095 /* l3os is the distance between the start of the 1096 * frame (skb->data) and the start of the IP hdr. 1097 * l4os is the distance between the start of the 1098 * l3 hdr and the l4 hdr */ 1099 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN); 1100 fcb->l4os = skb_network_header_len(skb); 1101 1102 fcb->flags = flags; 1103} 1104 1105void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) 1106{ 1107 fcb->flags |= TXFCB_VLN; 1108 fcb->vlctl = vlan_tx_tag_get(skb); 1109} 1110 1111/* This is called by the kernel when a frame is ready for transmission. */ 1112/* It is pointed to by the dev->hard_start_xmit function pointer */ 1113static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 1114{ 1115 struct gfar_private *priv = netdev_priv(dev); 1116 struct txfcb *fcb = NULL; 1117 struct txbd8 *txbdp; 1118 u16 status; 1119 unsigned long flags; 1120 1121 /* Update transmit stats */ 1122 dev->stats.tx_bytes += skb->len; 1123 1124 /* Lock priv now */ 1125 spin_lock_irqsave(&priv->txlock, flags); 1126 1127 /* Point at the first free tx descriptor */ 1128 txbdp = priv->cur_tx; 1129 1130 /* Clear all but the WRAP status flags */ 1131 status = txbdp->status & TXBD_WRAP; 1132 1133 /* Set up checksumming */ 1134 if (likely((dev->features & NETIF_F_IP_CSUM) 1135 && (CHECKSUM_PARTIAL == skb->ip_summed))) { 1136 fcb = gfar_add_fcb(skb, txbdp); 1137 status |= TXBD_TOE; 1138 gfar_tx_checksum(skb, fcb); 1139 } 1140 1141 if (priv->vlan_enable && 1142 unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) { 1143 if (unlikely(NULL == fcb)) { 1144 fcb = gfar_add_fcb(skb, txbdp); 1145 status |= TXBD_TOE; 1146 } 1147 1148 gfar_tx_vlan(skb, fcb); 1149 } 1150 1151 /* Set buffer length and pointer */ 1152 txbdp->length = skb->len; 1153 txbdp->bufPtr = dma_map_single(&dev->dev, skb->data, 1154 skb->len, DMA_TO_DEVICE); 1155 1156 /* Save the skb pointer so we can free it later */ 1157 priv->tx_skbuff[priv->skb_curtx] = skb; 1158 1159 /* Update the current skb pointer (wrapping if this was the last) */ 1160 priv->skb_curtx = 1161 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size); 1162 1163 /* Flag the BD as interrupt-causing */ 1164 status |= TXBD_INTERRUPT; 1165 1166 /* Flag the BD as ready to go, last in frame, and */ 1167 /* in need of CRC */ 1168 status |= (TXBD_READY | TXBD_LAST | TXBD_CRC); 1169 1170 dev->trans_start = jiffies; 1171 1172 /* The powerpc-specific eieio() is used, as wmb() has too strong 1173 * semantics (it requires synchronization between cacheable and 1174 * uncacheable mappings, which eieio doesn't provide and which we 1175 * don't need), thus requiring a more expensive sync instruction. At 1176 * some point, the set of architecture-independent barrier functions 1177 * should be expanded to include weaker barriers. 1178 */ 1179 1180 eieio(); 1181 txbdp->status = status; 1182 1183 /* If this was the last BD in the ring, the next one */ 1184 /* is at the beginning of the ring */ 1185 if (txbdp->status & TXBD_WRAP) 1186 txbdp = priv->tx_bd_base; 1187 else 1188 txbdp++; 1189 1190 /* If the next BD still needs to be cleaned up, then the bds 1191 are full. We need to tell the kernel to stop sending us stuff. */ 1192 if (txbdp == priv->dirty_tx) { 1193 netif_stop_queue(dev); 1194 1195 dev->stats.tx_fifo_errors++; 1196 } 1197 1198 /* Update the current txbd to the next one */ 1199 priv->cur_tx = txbdp; 1200 1201 /* Tell the DMA to go go go */ 1202 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 1203 1204 /* Unlock priv */ 1205 spin_unlock_irqrestore(&priv->txlock, flags); 1206 1207 return 0; 1208} 1209 1210/* Stops the kernel queue, and halts the controller */ 1211static int gfar_close(struct net_device *dev) 1212{ 1213 struct gfar_private *priv = netdev_priv(dev); 1214 1215 napi_disable(&priv->napi); 1216 1217 cancel_work_sync(&priv->reset_task); 1218 stop_gfar(dev); 1219 1220 /* Disconnect from the PHY */ 1221 phy_disconnect(priv->phydev); 1222 priv->phydev = NULL; 1223 1224 netif_stop_queue(dev); 1225 1226 return 0; 1227} 1228 1229/* Changes the mac address if the controller is not running. */ 1230static int gfar_set_mac_address(struct net_device *dev) 1231{ 1232 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 1233 1234 return 0; 1235} 1236 1237 1238/* Enables and disables VLAN insertion/extraction */ 1239static void gfar_vlan_rx_register(struct net_device *dev, 1240 struct vlan_group *grp) 1241{ 1242 struct gfar_private *priv = netdev_priv(dev); 1243 unsigned long flags; 1244 u32 tempval; 1245 1246 spin_lock_irqsave(&priv->rxlock, flags); 1247 1248 priv->vlgrp = grp; 1249 1250 if (grp) { 1251 /* Enable VLAN tag insertion */ 1252 tempval = gfar_read(&priv->regs->tctrl); 1253 tempval |= TCTRL_VLINS; 1254 1255 gfar_write(&priv->regs->tctrl, tempval); 1256 1257 /* Enable VLAN tag extraction */ 1258 tempval = gfar_read(&priv->regs->rctrl); 1259 tempval |= RCTRL_VLEX; 1260 gfar_write(&priv->regs->rctrl, tempval); 1261 } else { 1262 /* Disable VLAN tag insertion */ 1263 tempval = gfar_read(&priv->regs->tctrl); 1264 tempval &= ~TCTRL_VLINS; 1265 gfar_write(&priv->regs->tctrl, tempval); 1266 1267 /* Disable VLAN tag extraction */ 1268 tempval = gfar_read(&priv->regs->rctrl); 1269 tempval &= ~RCTRL_VLEX; 1270 gfar_write(&priv->regs->rctrl, tempval); 1271 } 1272 1273 spin_unlock_irqrestore(&priv->rxlock, flags); 1274} 1275 1276static int gfar_change_mtu(struct net_device *dev, int new_mtu) 1277{ 1278 int tempsize, tempval; 1279 struct gfar_private *priv = netdev_priv(dev); 1280 int oldsize = priv->rx_buffer_size; 1281 int frame_size = new_mtu + ETH_HLEN; 1282 1283 if (priv->vlan_enable) 1284 frame_size += VLAN_HLEN; 1285 1286 if (gfar_uses_fcb(priv)) 1287 frame_size += GMAC_FCB_LEN; 1288 1289 frame_size += priv->padding; 1290 1291 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { 1292 if (netif_msg_drv(priv)) 1293 printk(KERN_ERR "%s: Invalid MTU setting\n", 1294 dev->name); 1295 return -EINVAL; 1296 } 1297 1298 tempsize = 1299 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + 1300 INCREMENTAL_BUFFER_SIZE; 1301 1302 /* Only stop and start the controller if it isn't already 1303 * stopped, and we changed something */ 1304 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 1305 stop_gfar(dev); 1306 1307 priv->rx_buffer_size = tempsize; 1308 1309 dev->mtu = new_mtu; 1310 1311 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 1312 gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size); 1313 1314 /* If the mtu is larger than the max size for standard 1315 * ethernet frames (ie, a jumbo frame), then set maccfg2 1316 * to allow huge frames, and to check the length */ 1317 tempval = gfar_read(&priv->regs->maccfg2); 1318 1319 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) 1320 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 1321 else 1322 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 1323 1324 gfar_write(&priv->regs->maccfg2, tempval); 1325 1326 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 1327 startup_gfar(dev); 1328 1329 return 0; 1330} 1331 1332/* gfar_reset_task gets scheduled when a packet has not been 1333 * transmitted after a set amount of time. 1334 * For now, assume that clearing out all the structures, and 1335 * starting over will fix the problem. 1336 */ 1337static void gfar_reset_task(struct work_struct *work) 1338{ 1339 struct gfar_private *priv = container_of(work, struct gfar_private, 1340 reset_task); 1341 struct net_device *dev = priv->dev; 1342 1343 if (dev->flags & IFF_UP) { 1344 stop_gfar(dev); 1345 startup_gfar(dev); 1346 } 1347 1348 netif_tx_schedule_all(dev); 1349} 1350 1351static void gfar_timeout(struct net_device *dev) 1352{ 1353 struct gfar_private *priv = netdev_priv(dev); 1354 1355 dev->stats.tx_errors++; 1356 schedule_work(&priv->reset_task); 1357} 1358 1359/* Interrupt Handler for Transmit complete */ 1360static int gfar_clean_tx_ring(struct net_device *dev) 1361{ 1362 struct txbd8 *bdp; 1363 struct gfar_private *priv = netdev_priv(dev); 1364 int howmany = 0; 1365 1366 bdp = priv->dirty_tx; 1367 while ((bdp->status & TXBD_READY) == 0) { 1368 /* If dirty_tx and cur_tx are the same, then either the */ 1369 /* ring is empty or full now (it could only be full in the beginning, */ 1370 /* obviously). If it is empty, we are done. */ 1371 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0)) 1372 break; 1373 1374 howmany++; 1375 1376 /* Deferred means some collisions occurred during transmit, */ 1377 /* but we eventually sent the packet. */ 1378 if (bdp->status & TXBD_DEF) 1379 dev->stats.collisions++; 1380 1381 /* Free the sk buffer associated with this TxBD */ 1382 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); 1383 1384 priv->tx_skbuff[priv->skb_dirtytx] = NULL; 1385 priv->skb_dirtytx = 1386 (priv->skb_dirtytx + 1387 1) & TX_RING_MOD_MASK(priv->tx_ring_size); 1388 1389 /* Clean BD length for empty detection */ 1390 bdp->length = 0; 1391 1392 /* update bdp to point at next bd in the ring (wrapping if necessary) */ 1393 if (bdp->status & TXBD_WRAP) 1394 bdp = priv->tx_bd_base; 1395 else 1396 bdp++; 1397 1398 /* Move dirty_tx to be the next bd */ 1399 priv->dirty_tx = bdp; 1400 1401 /* We freed a buffer, so now we can restart transmission */ 1402 if (netif_queue_stopped(dev)) 1403 netif_wake_queue(dev); 1404 } /* while ((bdp->status & TXBD_READY) == 0) */ 1405 1406 dev->stats.tx_packets += howmany; 1407 1408 return howmany; 1409} 1410 1411/* Interrupt Handler for Transmit complete */ 1412static irqreturn_t gfar_transmit(int irq, void *dev_id) 1413{ 1414 struct net_device *dev = (struct net_device *) dev_id; 1415 struct gfar_private *priv = netdev_priv(dev); 1416 1417 /* Clear IEVENT */ 1418 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); 1419 1420 /* Lock priv */ 1421 spin_lock(&priv->txlock); 1422 1423 gfar_clean_tx_ring(dev); 1424 1425 /* If we are coalescing the interrupts, reset the timer */ 1426 /* Otherwise, clear it */ 1427 if (likely(priv->txcoalescing)) { 1428 gfar_write(&priv->regs->txic, 0); 1429 gfar_write(&priv->regs->txic, 1430 mk_ic_value(priv->txcount, priv->txtime)); 1431 } 1432 1433 spin_unlock(&priv->txlock); 1434 1435 return IRQ_HANDLED; 1436} 1437 1438static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, 1439 struct sk_buff *skb) 1440{ 1441 struct gfar_private *priv = netdev_priv(dev); 1442 u32 * status_len = (u32 *)bdp; 1443 u16 flags; 1444 1445 bdp->bufPtr = dma_map_single(&dev->dev, skb->data, 1446 priv->rx_buffer_size, DMA_FROM_DEVICE); 1447 1448 flags = RXBD_EMPTY | RXBD_INTERRUPT; 1449 1450 if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1) 1451 flags |= RXBD_WRAP; 1452 1453 eieio(); 1454 1455 *status_len = (u32)flags << 16; 1456} 1457 1458 1459struct sk_buff * gfar_new_skb(struct net_device *dev) 1460{ 1461 unsigned int alignamount; 1462 struct gfar_private *priv = netdev_priv(dev); 1463 struct sk_buff *skb = NULL; 1464 1465 /* We have to allocate the skb, so keep trying till we succeed */ 1466 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); 1467 1468 if (!skb) 1469 return NULL; 1470 1471 alignamount = RXBUF_ALIGNMENT - 1472 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)); 1473 1474 /* We need the data buffer to be aligned properly. We will reserve 1475 * as many bytes as needed to align the data properly 1476 */ 1477 skb_reserve(skb, alignamount); 1478 1479 return skb; 1480} 1481 1482static inline void count_errors(unsigned short status, struct net_device *dev) 1483{ 1484 struct gfar_private *priv = netdev_priv(dev); 1485 struct net_device_stats *stats = &dev->stats; 1486 struct gfar_extra_stats *estats = &priv->extra_stats; 1487 1488 /* If the packet was truncated, none of the other errors 1489 * matter */ 1490 if (status & RXBD_TRUNCATED) { 1491 stats->rx_length_errors++; 1492 1493 estats->rx_trunc++; 1494 1495 return; 1496 } 1497 /* Count the errors, if there were any */ 1498 if (status & (RXBD_LARGE | RXBD_SHORT)) { 1499 stats->rx_length_errors++; 1500 1501 if (status & RXBD_LARGE) 1502 estats->rx_large++; 1503 else 1504 estats->rx_short++; 1505 } 1506 if (status & RXBD_NONOCTET) { 1507 stats->rx_frame_errors++; 1508 estats->rx_nonoctet++; 1509 } 1510 if (status & RXBD_CRCERR) { 1511 estats->rx_crcerr++; 1512 stats->rx_crc_errors++; 1513 } 1514 if (status & RXBD_OVERRUN) { 1515 estats->rx_overrun++; 1516 stats->rx_crc_errors++; 1517 } 1518} 1519 1520irqreturn_t gfar_receive(int irq, void *dev_id) 1521{ 1522 struct net_device *dev = (struct net_device *) dev_id; 1523 struct gfar_private *priv = netdev_priv(dev); 1524 u32 tempval; 1525 1526 /* support NAPI */ 1527 /* Clear IEVENT, so interrupts aren't called again 1528 * because of the packets that have already arrived */ 1529 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); 1530 1531 if (netif_rx_schedule_prep(dev, &priv->napi)) { 1532 tempval = gfar_read(&priv->regs->imask); 1533 tempval &= IMASK_RTX_DISABLED; 1534 gfar_write(&priv->regs->imask, tempval); 1535 1536 __netif_rx_schedule(dev, &priv->napi); 1537 } else { 1538 if (netif_msg_rx_err(priv)) 1539 printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n", 1540 dev->name, gfar_read(&priv->regs->ievent), 1541 gfar_read(&priv->regs->imask)); 1542 } 1543 1544 return IRQ_HANDLED; 1545} 1546 1547static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) 1548{ 1549 /* If valid headers were found, and valid sums 1550 * were verified, then we tell the kernel that no 1551 * checksumming is necessary. Otherwise, it is */ 1552 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) 1553 skb->ip_summed = CHECKSUM_UNNECESSARY; 1554 else 1555 skb->ip_summed = CHECKSUM_NONE; 1556} 1557 1558 1559static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb) 1560{ 1561 struct rxfcb *fcb = (struct rxfcb *)skb->data; 1562 1563 /* Remove the FCB from the skb */ 1564 skb_pull(skb, GMAC_FCB_LEN); 1565 1566 return fcb; 1567} 1568 1569/* gfar_process_frame() -- handle one incoming packet if skb 1570 * isn't NULL. */ 1571static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 1572 int length) 1573{ 1574 struct gfar_private *priv = netdev_priv(dev); 1575 struct rxfcb *fcb = NULL; 1576 1577 if (NULL == skb) { 1578 if (netif_msg_rx_err(priv)) 1579 printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name); 1580 dev->stats.rx_dropped++; 1581 priv->extra_stats.rx_skbmissing++; 1582 } else { 1583 int ret; 1584 1585 /* Prep the skb for the packet */ 1586 skb_put(skb, length); 1587 1588 /* Grab the FCB if there is one */ 1589 if (gfar_uses_fcb(priv)) 1590 fcb = gfar_get_fcb(skb); 1591 1592 /* Remove the padded bytes, if there are any */ 1593 if (priv->padding) 1594 skb_pull(skb, priv->padding); 1595 1596 if (priv->rx_csum_enable) 1597 gfar_rx_checksum(skb, fcb); 1598 1599 /* Tell the skb what kind of packet this is */ 1600 skb->protocol = eth_type_trans(skb, dev); 1601 1602 /* Send the packet up the stack */ 1603 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) { 1604 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, 1605 fcb->vlctl); 1606 } else 1607 ret = netif_receive_skb(skb); 1608 1609 if (NET_RX_DROP == ret) 1610 priv->extra_stats.kernel_dropped++; 1611 } 1612 1613 return 0; 1614} 1615 1616/* gfar_clean_rx_ring() -- Processes each frame in the rx ring 1617 * until the budget/quota has been reached. Returns the number 1618 * of frames handled 1619 */ 1620int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) 1621{ 1622 struct rxbd8 *bdp; 1623 struct sk_buff *skb; 1624 u16 pkt_len; 1625 int howmany = 0; 1626 struct gfar_private *priv = netdev_priv(dev); 1627 1628 /* Get the first full descriptor */ 1629 bdp = priv->cur_rx; 1630 1631 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 1632 struct sk_buff *newskb; 1633 rmb(); 1634 1635 /* Add another skb for the future */ 1636 newskb = gfar_new_skb(dev); 1637 1638 skb = priv->rx_skbuff[priv->skb_currx]; 1639 1640 /* We drop the frame if we failed to allocate a new buffer */ 1641 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || 1642 bdp->status & RXBD_ERR)) { 1643 count_errors(bdp->status, dev); 1644 1645 if (unlikely(!newskb)) 1646 newskb = skb; 1647 1648 if (skb) { 1649 dma_unmap_single(&priv->dev->dev, 1650 bdp->bufPtr, 1651 priv->rx_buffer_size, 1652 DMA_FROM_DEVICE); 1653 1654 dev_kfree_skb_any(skb); 1655 } 1656 } else { 1657 /* Increment the number of packets */ 1658 dev->stats.rx_packets++; 1659 howmany++; 1660 1661 /* Remove the FCS from the packet length */ 1662 pkt_len = bdp->length - 4; 1663 1664 gfar_process_frame(dev, skb, pkt_len); 1665 1666 dev->stats.rx_bytes += pkt_len; 1667 } 1668 1669 dev->last_rx = jiffies; 1670 1671 priv->rx_skbuff[priv->skb_currx] = newskb; 1672 1673 /* Setup the new bdp */ 1674 gfar_new_rxbdp(dev, bdp, newskb); 1675 1676 /* Update to the next pointer */ 1677 if (bdp->status & RXBD_WRAP) 1678 bdp = priv->rx_bd_base; 1679 else 1680 bdp++; 1681 1682 /* update to point at the next skb */ 1683 priv->skb_currx = 1684 (priv->skb_currx + 1) & 1685 RX_RING_MOD_MASK(priv->rx_ring_size); 1686 } 1687 1688 /* Update the current rxbd pointer to be the next one */ 1689 priv->cur_rx = bdp; 1690 1691 return howmany; 1692} 1693 1694static int gfar_poll(struct napi_struct *napi, int budget) 1695{ 1696 struct gfar_private *priv = container_of(napi, struct gfar_private, napi); 1697 struct net_device *dev = priv->dev; 1698 int howmany; 1699 unsigned long flags; 1700 1701 /* If we fail to get the lock, don't bother with the TX BDs */ 1702 if (spin_trylock_irqsave(&priv->txlock, flags)) { 1703 gfar_clean_tx_ring(dev); 1704 spin_unlock_irqrestore(&priv->txlock, flags); 1705 } 1706 1707 howmany = gfar_clean_rx_ring(dev, budget); 1708 1709 if (howmany < budget) { 1710 netif_rx_complete(dev, napi); 1711 1712 /* Clear the halt bit in RSTAT */ 1713 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 1714 1715 gfar_write(&priv->regs->imask, IMASK_DEFAULT); 1716 1717 /* If we are coalescing interrupts, update the timer */ 1718 /* Otherwise, clear it */ 1719 if (likely(priv->rxcoalescing)) { 1720 gfar_write(&priv->regs->rxic, 0); 1721 gfar_write(&priv->regs->rxic, 1722 mk_ic_value(priv->rxcount, priv->rxtime)); 1723 } 1724 } 1725 1726 return howmany; 1727} 1728 1729#ifdef CONFIG_NET_POLL_CONTROLLER 1730/* 1731 * Polling 'interrupt' - used by things like netconsole to send skbs 1732 * without having to re-enable interrupts. It's not called while 1733 * the interrupt routine is executing. 1734 */ 1735static void gfar_netpoll(struct net_device *dev) 1736{ 1737 struct gfar_private *priv = netdev_priv(dev); 1738 1739 /* If the device has multiple interrupts, run tx/rx */ 1740 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1741 disable_irq(priv->interruptTransmit); 1742 disable_irq(priv->interruptReceive); 1743 disable_irq(priv->interruptError); 1744 gfar_interrupt(priv->interruptTransmit, dev); 1745 enable_irq(priv->interruptError); 1746 enable_irq(priv->interruptReceive); 1747 enable_irq(priv->interruptTransmit); 1748 } else { 1749 disable_irq(priv->interruptTransmit); 1750 gfar_interrupt(priv->interruptTransmit, dev); 1751 enable_irq(priv->interruptTransmit); 1752 } 1753} 1754#endif 1755 1756/* The interrupt handler for devices with one interrupt */ 1757static irqreturn_t gfar_interrupt(int irq, void *dev_id) 1758{ 1759 struct net_device *dev = dev_id; 1760 struct gfar_private *priv = netdev_priv(dev); 1761 1762 /* Save ievent for future reference */ 1763 u32 events = gfar_read(&priv->regs->ievent); 1764 1765 /* Check for reception */ 1766 if (events & IEVENT_RX_MASK) 1767 gfar_receive(irq, dev_id); 1768 1769 /* Check for transmit completion */ 1770 if (events & IEVENT_TX_MASK) 1771 gfar_transmit(irq, dev_id); 1772 1773 /* Check for errors */ 1774 if (events & IEVENT_ERR_MASK) 1775 gfar_error(irq, dev_id); 1776 1777 return IRQ_HANDLED; 1778} 1779 1780/* Called every time the controller might need to be made 1781 * aware of new link state. The PHY code conveys this 1782 * information through variables in the phydev structure, and this 1783 * function converts those variables into the appropriate 1784 * register values, and can bring down the device if needed. 1785 */ 1786static void adjust_link(struct net_device *dev) 1787{ 1788 struct gfar_private *priv = netdev_priv(dev); 1789 struct gfar __iomem *regs = priv->regs; 1790 unsigned long flags; 1791 struct phy_device *phydev = priv->phydev; 1792 int new_state = 0; 1793 1794 spin_lock_irqsave(&priv->txlock, flags); 1795 if (phydev->link) { 1796 u32 tempval = gfar_read(&regs->maccfg2); 1797 u32 ecntrl = gfar_read(&regs->ecntrl); 1798 1799 /* Now we make sure that we can be in full duplex mode. 1800 * If not, we operate in half-duplex mode. */ 1801 if (phydev->duplex != priv->oldduplex) { 1802 new_state = 1; 1803 if (!(phydev->duplex)) 1804 tempval &= ~(MACCFG2_FULL_DUPLEX); 1805 else 1806 tempval |= MACCFG2_FULL_DUPLEX; 1807 1808 priv->oldduplex = phydev->duplex; 1809 } 1810 1811 if (phydev->speed != priv->oldspeed) { 1812 new_state = 1; 1813 switch (phydev->speed) { 1814 case 1000: 1815 tempval = 1816 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); 1817 break; 1818 case 100: 1819 case 10: 1820 tempval = 1821 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 1822 1823 /* Reduced mode distinguishes 1824 * between 10 and 100 */ 1825 if (phydev->speed == SPEED_100) 1826 ecntrl |= ECNTRL_R100; 1827 else 1828 ecntrl &= ~(ECNTRL_R100); 1829 break; 1830 default: 1831 if (netif_msg_link(priv)) 1832 printk(KERN_WARNING 1833 "%s: Ack! Speed (%d) is not 10/100/1000!\n", 1834 dev->name, phydev->speed); 1835 break; 1836 } 1837 1838 priv->oldspeed = phydev->speed; 1839 } 1840 1841 gfar_write(&regs->maccfg2, tempval); 1842 gfar_write(&regs->ecntrl, ecntrl); 1843 1844 if (!priv->oldlink) { 1845 new_state = 1; 1846 priv->oldlink = 1; 1847 } 1848 } else if (priv->oldlink) { 1849 new_state = 1; 1850 priv->oldlink = 0; 1851 priv->oldspeed = 0; 1852 priv->oldduplex = -1; 1853 } 1854 1855 if (new_state && netif_msg_link(priv)) 1856 phy_print_status(phydev); 1857 1858 spin_unlock_irqrestore(&priv->txlock, flags); 1859} 1860 1861/* Update the hash table based on the current list of multicast 1862 * addresses we subscribe to. Also, change the promiscuity of 1863 * the device based on the flags (this function is called 1864 * whenever dev->flags is changed */ 1865static void gfar_set_multi(struct net_device *dev) 1866{ 1867 struct dev_mc_list *mc_ptr; 1868 struct gfar_private *priv = netdev_priv(dev); 1869 struct gfar __iomem *regs = priv->regs; 1870 u32 tempval; 1871 1872 if(dev->flags & IFF_PROMISC) { 1873 /* Set RCTRL to PROM */ 1874 tempval = gfar_read(&regs->rctrl); 1875 tempval |= RCTRL_PROM; 1876 gfar_write(&regs->rctrl, tempval); 1877 } else { 1878 /* Set RCTRL to not PROM */ 1879 tempval = gfar_read(&regs->rctrl); 1880 tempval &= ~(RCTRL_PROM); 1881 gfar_write(&regs->rctrl, tempval); 1882 } 1883 1884 if(dev->flags & IFF_ALLMULTI) { 1885 /* Set the hash to rx all multicast frames */ 1886 gfar_write(&regs->igaddr0, 0xffffffff); 1887 gfar_write(&regs->igaddr1, 0xffffffff); 1888 gfar_write(&regs->igaddr2, 0xffffffff); 1889 gfar_write(&regs->igaddr3, 0xffffffff); 1890 gfar_write(&regs->igaddr4, 0xffffffff); 1891 gfar_write(&regs->igaddr5, 0xffffffff); 1892 gfar_write(&regs->igaddr6, 0xffffffff); 1893 gfar_write(&regs->igaddr7, 0xffffffff); 1894 gfar_write(&regs->gaddr0, 0xffffffff); 1895 gfar_write(&regs->gaddr1, 0xffffffff); 1896 gfar_write(&regs->gaddr2, 0xffffffff); 1897 gfar_write(&regs->gaddr3, 0xffffffff); 1898 gfar_write(&regs->gaddr4, 0xffffffff); 1899 gfar_write(&regs->gaddr5, 0xffffffff); 1900 gfar_write(&regs->gaddr6, 0xffffffff); 1901 gfar_write(&regs->gaddr7, 0xffffffff); 1902 } else { 1903 int em_num; 1904 int idx; 1905 1906 /* zero out the hash */ 1907 gfar_write(&regs->igaddr0, 0x0); 1908 gfar_write(&regs->igaddr1, 0x0); 1909 gfar_write(&regs->igaddr2, 0x0); 1910 gfar_write(&regs->igaddr3, 0x0); 1911 gfar_write(&regs->igaddr4, 0x0); 1912 gfar_write(&regs->igaddr5, 0x0); 1913 gfar_write(&regs->igaddr6, 0x0); 1914 gfar_write(&regs->igaddr7, 0x0); 1915 gfar_write(&regs->gaddr0, 0x0); 1916 gfar_write(&regs->gaddr1, 0x0); 1917 gfar_write(&regs->gaddr2, 0x0); 1918 gfar_write(&regs->gaddr3, 0x0); 1919 gfar_write(&regs->gaddr4, 0x0); 1920 gfar_write(&regs->gaddr5, 0x0); 1921 gfar_write(&regs->gaddr6, 0x0); 1922 gfar_write(&regs->gaddr7, 0x0); 1923 1924 /* If we have extended hash tables, we need to 1925 * clear the exact match registers to prepare for 1926 * setting them */ 1927 if (priv->extended_hash) { 1928 em_num = GFAR_EM_NUM + 1; 1929 gfar_clear_exact_match(dev); 1930 idx = 1; 1931 } else { 1932 idx = 0; 1933 em_num = 0; 1934 } 1935 1936 if(dev->mc_count == 0) 1937 return; 1938 1939 /* Parse the list, and set the appropriate bits */ 1940 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { 1941 if (idx < em_num) { 1942 gfar_set_mac_for_addr(dev, idx, 1943 mc_ptr->dmi_addr); 1944 idx++; 1945 } else 1946 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr); 1947 } 1948 } 1949 1950 return; 1951} 1952 1953 1954/* Clears each of the exact match registers to zero, so they 1955 * don't interfere with normal reception */ 1956static void gfar_clear_exact_match(struct net_device *dev) 1957{ 1958 int idx; 1959 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0}; 1960 1961 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) 1962 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr); 1963} 1964 1965/* Set the appropriate hash bit for the given addr */ 1966/* The algorithm works like so: 1967 * 1) Take the Destination Address (ie the multicast address), and 1968 * do a CRC on it (little endian), and reverse the bits of the 1969 * result. 1970 * 2) Use the 8 most significant bits as a hash into a 256-entry 1971 * table. The table is controlled through 8 32-bit registers: 1972 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is 1973 * gaddr7. This means that the 3 most significant bits in the 1974 * hash index which gaddr register to use, and the 5 other bits 1975 * indicate which bit (assuming an IBM numbering scheme, which 1976 * for PowerPC (tm) is usually the case) in the register holds 1977 * the entry. */ 1978static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) 1979{ 1980 u32 tempval; 1981 struct gfar_private *priv = netdev_priv(dev); 1982 u32 result = ether_crc(MAC_ADDR_LEN, addr); 1983 int width = priv->hash_width; 1984 u8 whichbit = (result >> (32 - width)) & 0x1f; 1985 u8 whichreg = result >> (32 - width + 5); 1986 u32 value = (1 << (31-whichbit)); 1987 1988 tempval = gfar_read(priv->hash_regs[whichreg]); 1989 tempval |= value; 1990 gfar_write(priv->hash_regs[whichreg], tempval); 1991 1992 return; 1993} 1994 1995 1996/* There are multiple MAC Address register pairs on some controllers 1997 * This function sets the numth pair to a given address 1998 */ 1999static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) 2000{ 2001 struct gfar_private *priv = netdev_priv(dev); 2002 int idx; 2003 char tmpbuf[MAC_ADDR_LEN]; 2004 u32 tempval; 2005 u32 __iomem *macptr = &priv->regs->macstnaddr1; 2006 2007 macptr += num*2; 2008 2009 /* Now copy it into the mac registers backwards, cuz */ 2010 /* little endian is silly */ 2011 for (idx = 0; idx < MAC_ADDR_LEN; idx++) 2012 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx]; 2013 2014 gfar_write(macptr, *((u32 *) (tmpbuf))); 2015 2016 tempval = *((u32 *) (tmpbuf + 4)); 2017 2018 gfar_write(macptr+1, tempval); 2019} 2020 2021/* GFAR error interrupt handler */ 2022static irqreturn_t gfar_error(int irq, void *dev_id) 2023{ 2024 struct net_device *dev = dev_id; 2025 struct gfar_private *priv = netdev_priv(dev); 2026 2027 /* Save ievent for future reference */ 2028 u32 events = gfar_read(&priv->regs->ievent); 2029 2030 /* Clear IEVENT */ 2031 gfar_write(&priv->regs->ievent, events & IEVENT_ERR_MASK); 2032 2033 /* Magic Packet is not an error. */ 2034 if ((priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 2035 (events & IEVENT_MAG)) 2036 events &= ~IEVENT_MAG; 2037 2038 /* Hmm... */ 2039 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 2040 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", 2041 dev->name, events, gfar_read(&priv->regs->imask)); 2042 2043 /* Update the error counters */ 2044 if (events & IEVENT_TXE) { 2045 dev->stats.tx_errors++; 2046 2047 if (events & IEVENT_LC) 2048 dev->stats.tx_window_errors++; 2049 if (events & IEVENT_CRL) 2050 dev->stats.tx_aborted_errors++; 2051 if (events & IEVENT_XFUN) { 2052 if (netif_msg_tx_err(priv)) 2053 printk(KERN_DEBUG "%s: TX FIFO underrun, " 2054 "packet dropped.\n", dev->name); 2055 dev->stats.tx_dropped++; 2056 priv->extra_stats.tx_underrun++; 2057 2058 /* Reactivate the Tx Queues */ 2059 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 2060 } 2061 if (netif_msg_tx_err(priv)) 2062 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); 2063 } 2064 if (events & IEVENT_BSY) { 2065 dev->stats.rx_errors++; 2066 priv->extra_stats.rx_bsy++; 2067 2068 gfar_receive(irq, dev_id); 2069 2070 if (netif_msg_rx_err(priv)) 2071 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n", 2072 dev->name, gfar_read(&priv->regs->rstat)); 2073 } 2074 if (events & IEVENT_BABR) { 2075 dev->stats.rx_errors++; 2076 priv->extra_stats.rx_babr++; 2077 2078 if (netif_msg_rx_err(priv)) 2079 printk(KERN_DEBUG "%s: babbling RX error\n", dev->name); 2080 } 2081 if (events & IEVENT_EBERR) { 2082 priv->extra_stats.eberr++; 2083 if (netif_msg_rx_err(priv)) 2084 printk(KERN_DEBUG "%s: bus error\n", dev->name); 2085 } 2086 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv)) 2087 printk(KERN_DEBUG "%s: control frame\n", dev->name); 2088 2089 if (events & IEVENT_BABT) { 2090 priv->extra_stats.tx_babt++; 2091 if (netif_msg_tx_err(priv)) 2092 printk(KERN_DEBUG "%s: babbling TX error\n", dev->name); 2093 } 2094 return IRQ_HANDLED; 2095} 2096 2097/* work with hotplug and coldplug */ 2098MODULE_ALIAS("platform:fsl-gianfar"); 2099 2100/* Structure for a device driver */ 2101static struct platform_driver gfar_driver = { 2102 .probe = gfar_probe, 2103 .remove = gfar_remove, 2104 .suspend = gfar_suspend, 2105 .resume = gfar_resume, 2106 .driver = { 2107 .name = "fsl-gianfar", 2108 .owner = THIS_MODULE, 2109 }, 2110}; 2111 2112static int __init gfar_init(void) 2113{ 2114 int err = gfar_mdio_init(); 2115 2116 if (err) 2117 return err; 2118 2119 err = platform_driver_register(&gfar_driver); 2120 2121 if (err) 2122 gfar_mdio_exit(); 2123 2124 return err; 2125} 2126 2127static void __exit gfar_exit(void) 2128{ 2129 platform_driver_unregister(&gfar_driver); 2130 gfar_mdio_exit(); 2131} 2132 2133module_init(gfar_init); 2134module_exit(gfar_exit); 2135