Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.36-rc2 1428 lines 36 kB view raw
1/* 2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 4 * 5 * Right now, I am very wasteful with the buffers. I allocate memory 6 * pages and then divide them into 2K frame buffers. This way I know I 7 * have buffers large enough to hold one frame within one buffer descriptor. 8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which 9 * will be much more memory efficient and will easily handle lots of 10 * small packets. 11 * 12 * Much better multiple PHY support by Magnus Damm. 13 * Copyright (c) 2000 Ericsson Radio Systems AB. 14 * 15 * Support for FEC controller of ColdFire processors. 16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) 17 * 18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 19 * Copyright (c) 2004-2006 Macq Electronique SA. 20 */ 21 22#include <linux/module.h> 23#include <linux/kernel.h> 24#include <linux/string.h> 25#include <linux/ptrace.h> 26#include <linux/errno.h> 27#include <linux/ioport.h> 28#include <linux/slab.h> 29#include <linux/interrupt.h> 30#include <linux/pci.h> 31#include <linux/init.h> 32#include <linux/delay.h> 33#include <linux/netdevice.h> 34#include <linux/etherdevice.h> 35#include <linux/skbuff.h> 36#include <linux/spinlock.h> 37#include <linux/workqueue.h> 38#include <linux/bitops.h> 39#include <linux/io.h> 40#include <linux/irq.h> 41#include <linux/clk.h> 42#include <linux/platform_device.h> 43#include <linux/phy.h> 44#include <linux/fec.h> 45 46#include <asm/cacheflush.h> 47 48#ifndef CONFIG_ARCH_MXC 49#include <asm/coldfire.h> 50#include <asm/mcfsim.h> 51#endif 52 53#include "fec.h" 54 55#ifdef CONFIG_ARCH_MXC 56#include <mach/hardware.h> 57#define FEC_ALIGNMENT 0xf 58#else 59#define FEC_ALIGNMENT 0x3 60#endif 61 62/* 63 * Define the fixed address of the FEC hardware. 64 */ 65#if defined(CONFIG_M5272) 66 67static unsigned char fec_mac_default[] = { 68 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 69}; 70 71/* 72 * Some hardware gets it MAC address out of local flash memory. 73 * if this is non-zero then assume it is the address to get MAC from. 74 */ 75#if defined(CONFIG_NETtel) 76#define FEC_FLASHMAC 0xf0006006 77#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) 78#define FEC_FLASHMAC 0xf0006000 79#elif defined(CONFIG_CANCam) 80#define FEC_FLASHMAC 0xf0020000 81#elif defined (CONFIG_M5272C3) 82#define FEC_FLASHMAC (0xffe04000 + 4) 83#elif defined(CONFIG_MOD5272) 84#define FEC_FLASHMAC 0xffc0406b 85#else 86#define FEC_FLASHMAC 0 87#endif 88#endif /* CONFIG_M5272 */ 89 90/* The number of Tx and Rx buffers. These are allocated from the page 91 * pool. The code may assume these are power of two, so it it best 92 * to keep them that size. 93 * We don't need to allocate pages for the transmitter. We just use 94 * the skbuffer directly. 95 */ 96#define FEC_ENET_RX_PAGES 8 97#define FEC_ENET_RX_FRSIZE 2048 98#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) 99#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) 100#define FEC_ENET_TX_FRSIZE 2048 101#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE) 102#define TX_RING_SIZE 16 /* Must be power of two */ 103#define TX_RING_MOD_MASK 15 /* for this to work */ 104 105#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE) 106#error "FEC: descriptor ring size constants too large" 107#endif 108 109/* Interrupt events/masks. */ 110#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ 111#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ 112#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ 113#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ 114#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */ 115#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ 116#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */ 117#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ 118#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ 119#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ 120 121#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII) 122 123/* The FEC stores dest/src/type, data, and checksum for receive packets. 124 */ 125#define PKT_MAXBUF_SIZE 1518 126#define PKT_MINBUF_SIZE 64 127#define PKT_MAXBLR_SIZE 1520 128 129 130/* 131 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame 132 * size bits. Other FEC hardware does not, so we need to take that into 133 * account when setting it. 134 */ 135#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 136 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) 137#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 138#else 139#define OPT_FRAME_SIZE 0 140#endif 141 142/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and 143 * tx_bd_base always point to the base of the buffer descriptors. The 144 * cur_rx and cur_tx point to the currently available buffer. 145 * The dirty_tx tracks the current buffer that is being sent by the 146 * controller. The cur_tx and dirty_tx are equal under both completely 147 * empty and completely full conditions. The empty/ready indicator in 148 * the buffer descriptor determines the actual condition. 149 */ 150struct fec_enet_private { 151 /* Hardware registers of the FEC device */ 152 void __iomem *hwp; 153 154 struct net_device *netdev; 155 156 struct clk *clk; 157 158 /* The saved address of a sent-in-place packet/buffer, for skfree(). */ 159 unsigned char *tx_bounce[TX_RING_SIZE]; 160 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 161 struct sk_buff* rx_skbuff[RX_RING_SIZE]; 162 ushort skb_cur; 163 ushort skb_dirty; 164 165 /* CPM dual port RAM relative addresses */ 166 dma_addr_t bd_dma; 167 /* Address of Rx and Tx buffers */ 168 struct bufdesc *rx_bd_base; 169 struct bufdesc *tx_bd_base; 170 /* The next free ring entry */ 171 struct bufdesc *cur_rx, *cur_tx; 172 /* The ring entries to be free()ed */ 173 struct bufdesc *dirty_tx; 174 175 uint tx_full; 176 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ 177 spinlock_t hw_lock; 178 179 struct platform_device *pdev; 180 181 int opened; 182 183 /* Phylib and MDIO interface */ 184 struct mii_bus *mii_bus; 185 struct phy_device *phy_dev; 186 int mii_timeout; 187 uint phy_speed; 188 phy_interface_t phy_interface; 189 int index; 190 int link; 191 int full_duplex; 192 struct completion mdio_done; 193}; 194 195static irqreturn_t fec_enet_interrupt(int irq, void * dev_id); 196static void fec_enet_tx(struct net_device *dev); 197static void fec_enet_rx(struct net_device *dev); 198static int fec_enet_close(struct net_device *dev); 199static void fec_restart(struct net_device *dev, int duplex); 200static void fec_stop(struct net_device *dev); 201 202/* FEC MII MMFR bits definition */ 203#define FEC_MMFR_ST (1 << 30) 204#define FEC_MMFR_OP_READ (2 << 28) 205#define FEC_MMFR_OP_WRITE (1 << 28) 206#define FEC_MMFR_PA(v) ((v & 0x1f) << 23) 207#define FEC_MMFR_RA(v) ((v & 0x1f) << 18) 208#define FEC_MMFR_TA (2 << 16) 209#define FEC_MMFR_DATA(v) (v & 0xffff) 210 211#define FEC_MII_TIMEOUT 1000 /* us */ 212 213/* Transmitter timeout */ 214#define TX_TIMEOUT (2 * HZ) 215 216static netdev_tx_t 217fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 218{ 219 struct fec_enet_private *fep = netdev_priv(dev); 220 struct bufdesc *bdp; 221 void *bufaddr; 222 unsigned short status; 223 unsigned long flags; 224 225 if (!fep->link) { 226 /* Link is down or autonegotiation is in progress. */ 227 return NETDEV_TX_BUSY; 228 } 229 230 spin_lock_irqsave(&fep->hw_lock, flags); 231 /* Fill in a Tx ring entry */ 232 bdp = fep->cur_tx; 233 234 status = bdp->cbd_sc; 235 236 if (status & BD_ENET_TX_READY) { 237 /* Ooops. All transmit buffers are full. Bail out. 238 * This should not happen, since dev->tbusy should be set. 239 */ 240 printk("%s: tx queue full!.\n", dev->name); 241 spin_unlock_irqrestore(&fep->hw_lock, flags); 242 return NETDEV_TX_BUSY; 243 } 244 245 /* Clear all of the status flags */ 246 status &= ~BD_ENET_TX_STATS; 247 248 /* Set buffer length and buffer pointer */ 249 bufaddr = skb->data; 250 bdp->cbd_datlen = skb->len; 251 252 /* 253 * On some FEC implementations data must be aligned on 254 * 4-byte boundaries. Use bounce buffers to copy data 255 * and get it aligned. Ugh. 256 */ 257 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { 258 unsigned int index; 259 index = bdp - fep->tx_bd_base; 260 memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len); 261 bufaddr = fep->tx_bounce[index]; 262 } 263 264 /* Save skb pointer */ 265 fep->tx_skbuff[fep->skb_cur] = skb; 266 267 dev->stats.tx_bytes += skb->len; 268 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; 269 270 /* Push the data cache so the CPM does not get stale memory 271 * data. 272 */ 273 bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr, 274 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 275 276 /* Send it on its way. Tell FEC it's ready, interrupt when done, 277 * it's the last BD of the frame, and to put the CRC on the end. 278 */ 279 status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR 280 | BD_ENET_TX_LAST | BD_ENET_TX_TC); 281 bdp->cbd_sc = status; 282 283 /* Trigger transmission start */ 284 writel(0, fep->hwp + FEC_X_DES_ACTIVE); 285 286 /* If this was the last BD in the ring, start at the beginning again. */ 287 if (status & BD_ENET_TX_WRAP) 288 bdp = fep->tx_bd_base; 289 else 290 bdp++; 291 292 if (bdp == fep->dirty_tx) { 293 fep->tx_full = 1; 294 netif_stop_queue(dev); 295 } 296 297 fep->cur_tx = bdp; 298 299 spin_unlock_irqrestore(&fep->hw_lock, flags); 300 301 return NETDEV_TX_OK; 302} 303 304static void 305fec_timeout(struct net_device *dev) 306{ 307 struct fec_enet_private *fep = netdev_priv(dev); 308 309 dev->stats.tx_errors++; 310 311 fec_restart(dev, fep->full_duplex); 312 netif_wake_queue(dev); 313} 314 315static irqreturn_t 316fec_enet_interrupt(int irq, void * dev_id) 317{ 318 struct net_device *dev = dev_id; 319 struct fec_enet_private *fep = netdev_priv(dev); 320 uint int_events; 321 irqreturn_t ret = IRQ_NONE; 322 323 do { 324 int_events = readl(fep->hwp + FEC_IEVENT); 325 writel(int_events, fep->hwp + FEC_IEVENT); 326 327 if (int_events & FEC_ENET_RXF) { 328 ret = IRQ_HANDLED; 329 fec_enet_rx(dev); 330 } 331 332 /* Transmit OK, or non-fatal error. Update the buffer 333 * descriptors. FEC handles all errors, we just discover 334 * them as part of the transmit process. 335 */ 336 if (int_events & FEC_ENET_TXF) { 337 ret = IRQ_HANDLED; 338 fec_enet_tx(dev); 339 } 340 341 if (int_events & FEC_ENET_MII) { 342 ret = IRQ_HANDLED; 343 complete(&fep->mdio_done); 344 } 345 } while (int_events); 346 347 return ret; 348} 349 350 351static void 352fec_enet_tx(struct net_device *dev) 353{ 354 struct fec_enet_private *fep; 355 struct bufdesc *bdp; 356 unsigned short status; 357 struct sk_buff *skb; 358 359 fep = netdev_priv(dev); 360 spin_lock(&fep->hw_lock); 361 bdp = fep->dirty_tx; 362 363 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { 364 if (bdp == fep->cur_tx && fep->tx_full == 0) 365 break; 366 367 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 368 bdp->cbd_bufaddr = 0; 369 370 skb = fep->tx_skbuff[fep->skb_dirty]; 371 /* Check for errors. */ 372 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 373 BD_ENET_TX_RL | BD_ENET_TX_UN | 374 BD_ENET_TX_CSL)) { 375 dev->stats.tx_errors++; 376 if (status & BD_ENET_TX_HB) /* No heartbeat */ 377 dev->stats.tx_heartbeat_errors++; 378 if (status & BD_ENET_TX_LC) /* Late collision */ 379 dev->stats.tx_window_errors++; 380 if (status & BD_ENET_TX_RL) /* Retrans limit */ 381 dev->stats.tx_aborted_errors++; 382 if (status & BD_ENET_TX_UN) /* Underrun */ 383 dev->stats.tx_fifo_errors++; 384 if (status & BD_ENET_TX_CSL) /* Carrier lost */ 385 dev->stats.tx_carrier_errors++; 386 } else { 387 dev->stats.tx_packets++; 388 } 389 390 if (status & BD_ENET_TX_READY) 391 printk("HEY! Enet xmit interrupt and TX_READY.\n"); 392 393 /* Deferred means some collisions occurred during transmit, 394 * but we eventually sent the packet OK. 395 */ 396 if (status & BD_ENET_TX_DEF) 397 dev->stats.collisions++; 398 399 /* Free the sk buffer associated with this last transmit */ 400 dev_kfree_skb_any(skb); 401 fep->tx_skbuff[fep->skb_dirty] = NULL; 402 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; 403 404 /* Update pointer to next buffer descriptor to be transmitted */ 405 if (status & BD_ENET_TX_WRAP) 406 bdp = fep->tx_bd_base; 407 else 408 bdp++; 409 410 /* Since we have freed up a buffer, the ring is no longer full 411 */ 412 if (fep->tx_full) { 413 fep->tx_full = 0; 414 if (netif_queue_stopped(dev)) 415 netif_wake_queue(dev); 416 } 417 } 418 fep->dirty_tx = bdp; 419 spin_unlock(&fep->hw_lock); 420} 421 422 423/* During a receive, the cur_rx points to the current incoming buffer. 424 * When we update through the ring, if the next incoming buffer has 425 * not been given to the system, we just set the empty indicator, 426 * effectively tossing the packet. 427 */ 428static void 429fec_enet_rx(struct net_device *dev) 430{ 431 struct fec_enet_private *fep = netdev_priv(dev); 432 struct bufdesc *bdp; 433 unsigned short status; 434 struct sk_buff *skb; 435 ushort pkt_len; 436 __u8 *data; 437 438#ifdef CONFIG_M532x 439 flush_cache_all(); 440#endif 441 442 spin_lock(&fep->hw_lock); 443 444 /* First, grab all of the stats for the incoming packet. 445 * These get messed up if we get called due to a busy condition. 446 */ 447 bdp = fep->cur_rx; 448 449 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { 450 451 /* Since we have allocated space to hold a complete frame, 452 * the last indicator should be set. 453 */ 454 if ((status & BD_ENET_RX_LAST) == 0) 455 printk("FEC ENET: rcv is not +last\n"); 456 457 if (!fep->opened) 458 goto rx_processing_done; 459 460 /* Check for errors. */ 461 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 462 BD_ENET_RX_CR | BD_ENET_RX_OV)) { 463 dev->stats.rx_errors++; 464 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { 465 /* Frame too long or too short. */ 466 dev->stats.rx_length_errors++; 467 } 468 if (status & BD_ENET_RX_NO) /* Frame alignment */ 469 dev->stats.rx_frame_errors++; 470 if (status & BD_ENET_RX_CR) /* CRC Error */ 471 dev->stats.rx_crc_errors++; 472 if (status & BD_ENET_RX_OV) /* FIFO overrun */ 473 dev->stats.rx_fifo_errors++; 474 } 475 476 /* Report late collisions as a frame error. 477 * On this error, the BD is closed, but we don't know what we 478 * have in the buffer. So, just drop this frame on the floor. 479 */ 480 if (status & BD_ENET_RX_CL) { 481 dev->stats.rx_errors++; 482 dev->stats.rx_frame_errors++; 483 goto rx_processing_done; 484 } 485 486 /* Process the incoming frame. */ 487 dev->stats.rx_packets++; 488 pkt_len = bdp->cbd_datlen; 489 dev->stats.rx_bytes += pkt_len; 490 data = (__u8*)__va(bdp->cbd_bufaddr); 491 492 dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen, 493 DMA_FROM_DEVICE); 494 495 /* This does 16 byte alignment, exactly what we need. 496 * The packet length includes FCS, but we don't want to 497 * include that when passing upstream as it messes up 498 * bridging applications. 499 */ 500 skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN); 501 502 if (unlikely(!skb)) { 503 printk("%s: Memory squeeze, dropping packet.\n", 504 dev->name); 505 dev->stats.rx_dropped++; 506 } else { 507 skb_reserve(skb, NET_IP_ALIGN); 508 skb_put(skb, pkt_len - 4); /* Make room */ 509 skb_copy_to_linear_data(skb, data, pkt_len - 4); 510 skb->protocol = eth_type_trans(skb, dev); 511 netif_rx(skb); 512 } 513 514 bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen, 515 DMA_FROM_DEVICE); 516rx_processing_done: 517 /* Clear the status flags for this buffer */ 518 status &= ~BD_ENET_RX_STATS; 519 520 /* Mark the buffer empty */ 521 status |= BD_ENET_RX_EMPTY; 522 bdp->cbd_sc = status; 523 524 /* Update BD pointer to next entry */ 525 if (status & BD_ENET_RX_WRAP) 526 bdp = fep->rx_bd_base; 527 else 528 bdp++; 529 /* Doing this here will keep the FEC running while we process 530 * incoming frames. On a heavily loaded network, we should be 531 * able to keep up at the expense of system resources. 532 */ 533 writel(0, fep->hwp + FEC_R_DES_ACTIVE); 534 } 535 fep->cur_rx = bdp; 536 537 spin_unlock(&fep->hw_lock); 538} 539 540/* ------------------------------------------------------------------------- */ 541#ifdef CONFIG_M5272 542static void __inline__ fec_get_mac(struct net_device *dev) 543{ 544 struct fec_enet_private *fep = netdev_priv(dev); 545 unsigned char *iap, tmpaddr[ETH_ALEN]; 546 547 if (FEC_FLASHMAC) { 548 /* 549 * Get MAC address from FLASH. 550 * If it is all 1's or 0's, use the default. 551 */ 552 iap = (unsigned char *)FEC_FLASHMAC; 553 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && 554 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) 555 iap = fec_mac_default; 556 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && 557 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) 558 iap = fec_mac_default; 559 } else { 560 *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW); 561 *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16); 562 iap = &tmpaddr[0]; 563 } 564 565 memcpy(dev->dev_addr, iap, ETH_ALEN); 566 567 /* Adjust MAC if using default MAC address */ 568 if (iap == fec_mac_default) 569 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 570} 571#endif 572 573/* ------------------------------------------------------------------------- */ 574 575/* 576 * Phy section 577 */ 578static void fec_enet_adjust_link(struct net_device *dev) 579{ 580 struct fec_enet_private *fep = netdev_priv(dev); 581 struct phy_device *phy_dev = fep->phy_dev; 582 unsigned long flags; 583 584 int status_change = 0; 585 586 spin_lock_irqsave(&fep->hw_lock, flags); 587 588 /* Prevent a state halted on mii error */ 589 if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { 590 phy_dev->state = PHY_RESUMING; 591 goto spin_unlock; 592 } 593 594 /* Duplex link change */ 595 if (phy_dev->link) { 596 if (fep->full_duplex != phy_dev->duplex) { 597 fec_restart(dev, phy_dev->duplex); 598 status_change = 1; 599 } 600 } 601 602 /* Link on or off change */ 603 if (phy_dev->link != fep->link) { 604 fep->link = phy_dev->link; 605 if (phy_dev->link) 606 fec_restart(dev, phy_dev->duplex); 607 else 608 fec_stop(dev); 609 status_change = 1; 610 } 611 612spin_unlock: 613 spin_unlock_irqrestore(&fep->hw_lock, flags); 614 615 if (status_change) 616 phy_print_status(phy_dev); 617} 618 619static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 620{ 621 struct fec_enet_private *fep = bus->priv; 622 unsigned long time_left; 623 624 fep->mii_timeout = 0; 625 init_completion(&fep->mdio_done); 626 627 /* start a read op */ 628 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | 629 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | 630 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); 631 632 /* wait for end of transfer */ 633 time_left = wait_for_completion_timeout(&fep->mdio_done, 634 usecs_to_jiffies(FEC_MII_TIMEOUT)); 635 if (time_left == 0) { 636 fep->mii_timeout = 1; 637 printk(KERN_ERR "FEC: MDIO read timeout\n"); 638 return -ETIMEDOUT; 639 } 640 641 /* return value */ 642 return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 643} 644 645static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 646 u16 value) 647{ 648 struct fec_enet_private *fep = bus->priv; 649 unsigned long time_left; 650 651 fep->mii_timeout = 0; 652 init_completion(&fep->mdio_done); 653 654 /* start a read op */ 655 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | 656 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | 657 FEC_MMFR_TA | FEC_MMFR_DATA(value), 658 fep->hwp + FEC_MII_DATA); 659 660 /* wait for end of transfer */ 661 time_left = wait_for_completion_timeout(&fep->mdio_done, 662 usecs_to_jiffies(FEC_MII_TIMEOUT)); 663 if (time_left == 0) { 664 fep->mii_timeout = 1; 665 printk(KERN_ERR "FEC: MDIO write timeout\n"); 666 return -ETIMEDOUT; 667 } 668 669 return 0; 670} 671 672static int fec_enet_mdio_reset(struct mii_bus *bus) 673{ 674 return 0; 675} 676 677static int fec_enet_mii_probe(struct net_device *dev) 678{ 679 struct fec_enet_private *fep = netdev_priv(dev); 680 struct phy_device *phy_dev = NULL; 681 int ret; 682 683 fep->phy_dev = NULL; 684 685 /* find the first phy */ 686 phy_dev = phy_find_first(fep->mii_bus); 687 if (!phy_dev) { 688 printk(KERN_ERR "%s: no PHY found\n", dev->name); 689 return -ENODEV; 690 } 691 692 /* attach the mac to the phy */ 693 ret = phy_connect_direct(dev, phy_dev, 694 &fec_enet_adjust_link, 0, 695 PHY_INTERFACE_MODE_MII); 696 if (ret) { 697 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 698 return ret; 699 } 700 701 /* mask with MAC supported features */ 702 phy_dev->supported &= PHY_BASIC_FEATURES; 703 phy_dev->advertising = phy_dev->supported; 704 705 fep->phy_dev = phy_dev; 706 fep->link = 0; 707 fep->full_duplex = 0; 708 709 printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] " 710 "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name, 711 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), 712 fep->phy_dev->irq); 713 714 return 0; 715} 716 717static int fec_enet_mii_init(struct platform_device *pdev) 718{ 719 struct net_device *dev = platform_get_drvdata(pdev); 720 struct fec_enet_private *fep = netdev_priv(dev); 721 int err = -ENXIO, i; 722 723 fep->mii_timeout = 0; 724 725 /* 726 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed) 727 */ 728 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000) << 1; 729 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 730 731 fep->mii_bus = mdiobus_alloc(); 732 if (fep->mii_bus == NULL) { 733 err = -ENOMEM; 734 goto err_out; 735 } 736 737 fep->mii_bus->name = "fec_enet_mii_bus"; 738 fep->mii_bus->read = fec_enet_mdio_read; 739 fep->mii_bus->write = fec_enet_mdio_write; 740 fep->mii_bus->reset = fec_enet_mdio_reset; 741 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); 742 fep->mii_bus->priv = fep; 743 fep->mii_bus->parent = &pdev->dev; 744 745 fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); 746 if (!fep->mii_bus->irq) { 747 err = -ENOMEM; 748 goto err_out_free_mdiobus; 749 } 750 751 for (i = 0; i < PHY_MAX_ADDR; i++) 752 fep->mii_bus->irq[i] = PHY_POLL; 753 754 platform_set_drvdata(dev, fep->mii_bus); 755 756 if (mdiobus_register(fep->mii_bus)) 757 goto err_out_free_mdio_irq; 758 759 return 0; 760 761err_out_free_mdio_irq: 762 kfree(fep->mii_bus->irq); 763err_out_free_mdiobus: 764 mdiobus_free(fep->mii_bus); 765err_out: 766 return err; 767} 768 769static void fec_enet_mii_remove(struct fec_enet_private *fep) 770{ 771 if (fep->phy_dev) 772 phy_disconnect(fep->phy_dev); 773 mdiobus_unregister(fep->mii_bus); 774 kfree(fep->mii_bus->irq); 775 mdiobus_free(fep->mii_bus); 776} 777 778static int fec_enet_get_settings(struct net_device *dev, 779 struct ethtool_cmd *cmd) 780{ 781 struct fec_enet_private *fep = netdev_priv(dev); 782 struct phy_device *phydev = fep->phy_dev; 783 784 if (!phydev) 785 return -ENODEV; 786 787 return phy_ethtool_gset(phydev, cmd); 788} 789 790static int fec_enet_set_settings(struct net_device *dev, 791 struct ethtool_cmd *cmd) 792{ 793 struct fec_enet_private *fep = netdev_priv(dev); 794 struct phy_device *phydev = fep->phy_dev; 795 796 if (!phydev) 797 return -ENODEV; 798 799 return phy_ethtool_sset(phydev, cmd); 800} 801 802static void fec_enet_get_drvinfo(struct net_device *dev, 803 struct ethtool_drvinfo *info) 804{ 805 struct fec_enet_private *fep = netdev_priv(dev); 806 807 strcpy(info->driver, fep->pdev->dev.driver->name); 808 strcpy(info->version, "Revision: 1.0"); 809 strcpy(info->bus_info, dev_name(&dev->dev)); 810} 811 812static struct ethtool_ops fec_enet_ethtool_ops = { 813 .get_settings = fec_enet_get_settings, 814 .set_settings = fec_enet_set_settings, 815 .get_drvinfo = fec_enet_get_drvinfo, 816 .get_link = ethtool_op_get_link, 817}; 818 819static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 820{ 821 struct fec_enet_private *fep = netdev_priv(dev); 822 struct phy_device *phydev = fep->phy_dev; 823 824 if (!netif_running(dev)) 825 return -EINVAL; 826 827 if (!phydev) 828 return -ENODEV; 829 830 return phy_mii_ioctl(phydev, rq, cmd); 831} 832 833static void fec_enet_free_buffers(struct net_device *dev) 834{ 835 struct fec_enet_private *fep = netdev_priv(dev); 836 int i; 837 struct sk_buff *skb; 838 struct bufdesc *bdp; 839 840 bdp = fep->rx_bd_base; 841 for (i = 0; i < RX_RING_SIZE; i++) { 842 skb = fep->rx_skbuff[i]; 843 844 if (bdp->cbd_bufaddr) 845 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, 846 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 847 if (skb) 848 dev_kfree_skb(skb); 849 bdp++; 850 } 851 852 bdp = fep->tx_bd_base; 853 for (i = 0; i < TX_RING_SIZE; i++) 854 kfree(fep->tx_bounce[i]); 855} 856 857static int fec_enet_alloc_buffers(struct net_device *dev) 858{ 859 struct fec_enet_private *fep = netdev_priv(dev); 860 int i; 861 struct sk_buff *skb; 862 struct bufdesc *bdp; 863 864 bdp = fep->rx_bd_base; 865 for (i = 0; i < RX_RING_SIZE; i++) { 866 skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE); 867 if (!skb) { 868 fec_enet_free_buffers(dev); 869 return -ENOMEM; 870 } 871 fep->rx_skbuff[i] = skb; 872 873 bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data, 874 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 875 bdp->cbd_sc = BD_ENET_RX_EMPTY; 876 bdp++; 877 } 878 879 /* Set the last buffer to wrap. */ 880 bdp--; 881 bdp->cbd_sc |= BD_SC_WRAP; 882 883 bdp = fep->tx_bd_base; 884 for (i = 0; i < TX_RING_SIZE; i++) { 885 fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); 886 887 bdp->cbd_sc = 0; 888 bdp->cbd_bufaddr = 0; 889 bdp++; 890 } 891 892 /* Set the last buffer to wrap. */ 893 bdp--; 894 bdp->cbd_sc |= BD_SC_WRAP; 895 896 return 0; 897} 898 899static int 900fec_enet_open(struct net_device *dev) 901{ 902 struct fec_enet_private *fep = netdev_priv(dev); 903 int ret; 904 905 /* I should reset the ring buffers here, but I don't yet know 906 * a simple way to do that. 907 */ 908 909 ret = fec_enet_alloc_buffers(dev); 910 if (ret) 911 return ret; 912 913 /* Probe and connect to PHY when open the interface */ 914 ret = fec_enet_mii_probe(dev); 915 if (ret) { 916 fec_enet_free_buffers(dev); 917 return ret; 918 } 919 phy_start(fep->phy_dev); 920 netif_start_queue(dev); 921 fep->opened = 1; 922 return 0; 923} 924 925static int 926fec_enet_close(struct net_device *dev) 927{ 928 struct fec_enet_private *fep = netdev_priv(dev); 929 930 /* Don't know what to do yet. */ 931 fep->opened = 0; 932 netif_stop_queue(dev); 933 fec_stop(dev); 934 935 if (fep->phy_dev) 936 phy_disconnect(fep->phy_dev); 937 938 fec_enet_free_buffers(dev); 939 940 return 0; 941} 942 943/* Set or clear the multicast filter for this adaptor. 944 * Skeleton taken from sunlance driver. 945 * The CPM Ethernet implementation allows Multicast as well as individual 946 * MAC address filtering. Some of the drivers check to make sure it is 947 * a group multicast address, and discard those that are not. I guess I 948 * will do the same for now, but just remove the test if you want 949 * individual filtering as well (do the upper net layers want or support 950 * this kind of feature?). 951 */ 952 953#define HASH_BITS 6 /* #bits in hash */ 954#define CRC32_POLY 0xEDB88320 955 956static void set_multicast_list(struct net_device *dev) 957{ 958 struct fec_enet_private *fep = netdev_priv(dev); 959 struct netdev_hw_addr *ha; 960 unsigned int i, bit, data, crc, tmp; 961 unsigned char hash; 962 963 if (dev->flags & IFF_PROMISC) { 964 tmp = readl(fep->hwp + FEC_R_CNTRL); 965 tmp |= 0x8; 966 writel(tmp, fep->hwp + FEC_R_CNTRL); 967 return; 968 } 969 970 tmp = readl(fep->hwp + FEC_R_CNTRL); 971 tmp &= ~0x8; 972 writel(tmp, fep->hwp + FEC_R_CNTRL); 973 974 if (dev->flags & IFF_ALLMULTI) { 975 /* Catch all multicast addresses, so set the 976 * filter to all 1's 977 */ 978 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 979 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 980 981 return; 982 } 983 984 /* Clear filter and add the addresses in hash register 985 */ 986 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 987 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 988 989 netdev_for_each_mc_addr(ha, dev) { 990 /* Only support group multicast for now */ 991 if (!(ha->addr[0] & 1)) 992 continue; 993 994 /* calculate crc32 value of mac address */ 995 crc = 0xffffffff; 996 997 for (i = 0; i < dev->addr_len; i++) { 998 data = ha->addr[i]; 999 for (bit = 0; bit < 8; bit++, data >>= 1) { 1000 crc = (crc >> 1) ^ 1001 (((crc ^ data) & 1) ? CRC32_POLY : 0); 1002 } 1003 } 1004 1005 /* only upper 6 bits (HASH_BITS) are used 1006 * which point to specific bit in he hash registers 1007 */ 1008 hash = (crc >> (32 - HASH_BITS)) & 0x3f; 1009 1010 if (hash > 31) { 1011 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1012 tmp |= 1 << (hash - 32); 1013 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1014 } else { 1015 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); 1016 tmp |= 1 << hash; 1017 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 1018 } 1019 } 1020} 1021 1022/* Set a MAC change in hardware. */ 1023static int 1024fec_set_mac_address(struct net_device *dev, void *p) 1025{ 1026 struct fec_enet_private *fep = netdev_priv(dev); 1027 struct sockaddr *addr = p; 1028 1029 if (!is_valid_ether_addr(addr->sa_data)) 1030 return -EADDRNOTAVAIL; 1031 1032 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1033 1034 writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) | 1035 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24), 1036 fep->hwp + FEC_ADDR_LOW); 1037 writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24), 1038 fep->hwp + FEC_ADDR_HIGH); 1039 return 0; 1040} 1041 1042static const struct net_device_ops fec_netdev_ops = { 1043 .ndo_open = fec_enet_open, 1044 .ndo_stop = fec_enet_close, 1045 .ndo_start_xmit = fec_enet_start_xmit, 1046 .ndo_set_multicast_list = set_multicast_list, 1047 .ndo_change_mtu = eth_change_mtu, 1048 .ndo_validate_addr = eth_validate_addr, 1049 .ndo_tx_timeout = fec_timeout, 1050 .ndo_set_mac_address = fec_set_mac_address, 1051 .ndo_do_ioctl = fec_enet_ioctl, 1052}; 1053 1054 /* 1055 * XXX: We need to clean up on failure exits here. 1056 * 1057 * index is only used in legacy code 1058 */ 1059static int fec_enet_init(struct net_device *dev, int index) 1060{ 1061 struct fec_enet_private *fep = netdev_priv(dev); 1062 struct bufdesc *cbd_base; 1063 struct bufdesc *bdp; 1064 int i; 1065 1066 /* Allocate memory for buffer descriptors. */ 1067 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, 1068 GFP_KERNEL); 1069 if (!cbd_base) { 1070 printk("FEC: allocate descriptor memory failed?\n"); 1071 return -ENOMEM; 1072 } 1073 1074 spin_lock_init(&fep->hw_lock); 1075 1076 fep->index = index; 1077 fep->hwp = (void __iomem *)dev->base_addr; 1078 fep->netdev = dev; 1079 1080 /* Set the Ethernet address */ 1081#ifdef CONFIG_M5272 1082 fec_get_mac(dev); 1083#else 1084 { 1085 unsigned long l; 1086 l = readl(fep->hwp + FEC_ADDR_LOW); 1087 dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24); 1088 dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16); 1089 dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8); 1090 dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0); 1091 l = readl(fep->hwp + FEC_ADDR_HIGH); 1092 dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24); 1093 dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16); 1094 } 1095#endif 1096 1097 /* Set receive and transmit descriptor base. */ 1098 fep->rx_bd_base = cbd_base; 1099 fep->tx_bd_base = cbd_base + RX_RING_SIZE; 1100 1101 /* The FEC Ethernet specific entries in the device structure */ 1102 dev->watchdog_timeo = TX_TIMEOUT; 1103 dev->netdev_ops = &fec_netdev_ops; 1104 dev->ethtool_ops = &fec_enet_ethtool_ops; 1105 1106 /* Initialize the receive buffer descriptors. */ 1107 bdp = fep->rx_bd_base; 1108 for (i = 0; i < RX_RING_SIZE; i++) { 1109 1110 /* Initialize the BD for every fragment in the page. */ 1111 bdp->cbd_sc = 0; 1112 bdp++; 1113 } 1114 1115 /* Set the last buffer to wrap */ 1116 bdp--; 1117 bdp->cbd_sc |= BD_SC_WRAP; 1118 1119 /* ...and the same for transmit */ 1120 bdp = fep->tx_bd_base; 1121 for (i = 0; i < TX_RING_SIZE; i++) { 1122 1123 /* Initialize the BD for every fragment in the page. */ 1124 bdp->cbd_sc = 0; 1125 bdp->cbd_bufaddr = 0; 1126 bdp++; 1127 } 1128 1129 /* Set the last buffer to wrap */ 1130 bdp--; 1131 bdp->cbd_sc |= BD_SC_WRAP; 1132 1133 fec_restart(dev, 0); 1134 1135 return 0; 1136} 1137 1138/* This function is called to start or restart the FEC during a link 1139 * change. This only happens when switching between half and full 1140 * duplex. 1141 */ 1142static void 1143fec_restart(struct net_device *dev, int duplex) 1144{ 1145 struct fec_enet_private *fep = netdev_priv(dev); 1146 int i; 1147 1148 /* Whack a reset. We should wait for this. */ 1149 writel(1, fep->hwp + FEC_ECNTRL); 1150 udelay(10); 1151 1152 /* Clear any outstanding interrupt. */ 1153 writel(0xffc00000, fep->hwp + FEC_IEVENT); 1154 1155 /* Reset all multicast. */ 1156 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1157 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 1158#ifndef CONFIG_M5272 1159 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); 1160 writel(0, fep->hwp + FEC_HASH_TABLE_LOW); 1161#endif 1162 1163 /* Set maximum receive buffer size. */ 1164 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); 1165 1166 /* Set receive and transmit descriptor base. */ 1167 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); 1168 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE, 1169 fep->hwp + FEC_X_DES_START); 1170 1171 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; 1172 fep->cur_rx = fep->rx_bd_base; 1173 1174 /* Reset SKB transmit buffers. */ 1175 fep->skb_cur = fep->skb_dirty = 0; 1176 for (i = 0; i <= TX_RING_MOD_MASK; i++) { 1177 if (fep->tx_skbuff[i]) { 1178 dev_kfree_skb_any(fep->tx_skbuff[i]); 1179 fep->tx_skbuff[i] = NULL; 1180 } 1181 } 1182 1183 /* Enable MII mode */ 1184 if (duplex) { 1185 /* MII enable / FD enable */ 1186 writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL); 1187 writel(0x04, fep->hwp + FEC_X_CNTRL); 1188 } else { 1189 /* MII enable / No Rcv on Xmit */ 1190 writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL); 1191 writel(0x0, fep->hwp + FEC_X_CNTRL); 1192 } 1193 fep->full_duplex = duplex; 1194 1195 /* Set MII speed */ 1196 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1197 1198#ifdef FEC_MIIGSK_ENR 1199 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) { 1200 /* disable the gasket and wait */ 1201 writel(0, fep->hwp + FEC_MIIGSK_ENR); 1202 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) 1203 udelay(1); 1204 1205 /* configure the gasket: RMII, 50 MHz, no loopback, no echo */ 1206 writel(1, fep->hwp + FEC_MIIGSK_CFGR); 1207 1208 /* re-enable the gasket */ 1209 writel(2, fep->hwp + FEC_MIIGSK_ENR); 1210 } 1211#endif 1212 1213 /* And last, enable the transmit and receive processing */ 1214 writel(2, fep->hwp + FEC_ECNTRL); 1215 writel(0, fep->hwp + FEC_R_DES_ACTIVE); 1216 1217 /* Enable interrupts we wish to service */ 1218 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1219} 1220 1221static void 1222fec_stop(struct net_device *dev) 1223{ 1224 struct fec_enet_private *fep = netdev_priv(dev); 1225 1226 /* We cannot expect a graceful transmit stop without link !!! */ 1227 if (fep->link) { 1228 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ 1229 udelay(10); 1230 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) 1231 printk("fec_stop : Graceful transmit stop did not complete !\n"); 1232 } 1233 1234 /* Whack a reset. We should wait for this. */ 1235 writel(1, fep->hwp + FEC_ECNTRL); 1236 udelay(10); 1237 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1238 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1239} 1240 1241static int __devinit 1242fec_probe(struct platform_device *pdev) 1243{ 1244 struct fec_enet_private *fep; 1245 struct fec_platform_data *pdata; 1246 struct net_device *ndev; 1247 int i, irq, ret = 0; 1248 struct resource *r; 1249 1250 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1251 if (!r) 1252 return -ENXIO; 1253 1254 r = request_mem_region(r->start, resource_size(r), pdev->name); 1255 if (!r) 1256 return -EBUSY; 1257 1258 /* Init network device */ 1259 ndev = alloc_etherdev(sizeof(struct fec_enet_private)); 1260 if (!ndev) 1261 return -ENOMEM; 1262 1263 SET_NETDEV_DEV(ndev, &pdev->dev); 1264 1265 /* setup board info structure */ 1266 fep = netdev_priv(ndev); 1267 memset(fep, 0, sizeof(*fep)); 1268 1269 ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r)); 1270 fep->pdev = pdev; 1271 1272 if (!ndev->base_addr) { 1273 ret = -ENOMEM; 1274 goto failed_ioremap; 1275 } 1276 1277 platform_set_drvdata(pdev, ndev); 1278 1279 pdata = pdev->dev.platform_data; 1280 if (pdata) 1281 fep->phy_interface = pdata->phy; 1282 1283 /* This device has up to three irqs on some platforms */ 1284 for (i = 0; i < 3; i++) { 1285 irq = platform_get_irq(pdev, i); 1286 if (i && irq < 0) 1287 break; 1288 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); 1289 if (ret) { 1290 while (i >= 0) { 1291 irq = platform_get_irq(pdev, i); 1292 free_irq(irq, ndev); 1293 i--; 1294 } 1295 goto failed_irq; 1296 } 1297 } 1298 1299 fep->clk = clk_get(&pdev->dev, "fec_clk"); 1300 if (IS_ERR(fep->clk)) { 1301 ret = PTR_ERR(fep->clk); 1302 goto failed_clk; 1303 } 1304 clk_enable(fep->clk); 1305 1306 ret = fec_enet_init(ndev, 0); 1307 if (ret) 1308 goto failed_init; 1309 1310 ret = fec_enet_mii_init(pdev); 1311 if (ret) 1312 goto failed_mii_init; 1313 1314 ret = register_netdev(ndev); 1315 if (ret) 1316 goto failed_register; 1317 1318 return 0; 1319 1320failed_register: 1321 fec_enet_mii_remove(fep); 1322failed_mii_init: 1323failed_init: 1324 clk_disable(fep->clk); 1325 clk_put(fep->clk); 1326failed_clk: 1327 for (i = 0; i < 3; i++) { 1328 irq = platform_get_irq(pdev, i); 1329 if (irq > 0) 1330 free_irq(irq, ndev); 1331 } 1332failed_irq: 1333 iounmap((void __iomem *)ndev->base_addr); 1334failed_ioremap: 1335 free_netdev(ndev); 1336 1337 return ret; 1338} 1339 1340static int __devexit 1341fec_drv_remove(struct platform_device *pdev) 1342{ 1343 struct net_device *ndev = platform_get_drvdata(pdev); 1344 struct fec_enet_private *fep = netdev_priv(ndev); 1345 1346 platform_set_drvdata(pdev, NULL); 1347 1348 fec_stop(ndev); 1349 fec_enet_mii_remove(fep); 1350 clk_disable(fep->clk); 1351 clk_put(fep->clk); 1352 iounmap((void __iomem *)ndev->base_addr); 1353 unregister_netdev(ndev); 1354 free_netdev(ndev); 1355 return 0; 1356} 1357 1358#ifdef CONFIG_PM 1359static int 1360fec_suspend(struct device *dev) 1361{ 1362 struct net_device *ndev = dev_get_drvdata(dev); 1363 struct fec_enet_private *fep; 1364 1365 if (ndev) { 1366 fep = netdev_priv(ndev); 1367 if (netif_running(ndev)) 1368 fec_enet_close(ndev); 1369 clk_disable(fep->clk); 1370 } 1371 return 0; 1372} 1373 1374static int 1375fec_resume(struct device *dev) 1376{ 1377 struct net_device *ndev = dev_get_drvdata(dev); 1378 struct fec_enet_private *fep; 1379 1380 if (ndev) { 1381 fep = netdev_priv(ndev); 1382 clk_enable(fep->clk); 1383 if (netif_running(ndev)) 1384 fec_enet_open(ndev); 1385 } 1386 return 0; 1387} 1388 1389static const struct dev_pm_ops fec_pm_ops = { 1390 .suspend = fec_suspend, 1391 .resume = fec_resume, 1392 .freeze = fec_suspend, 1393 .thaw = fec_resume, 1394 .poweroff = fec_suspend, 1395 .restore = fec_resume, 1396}; 1397#endif 1398 1399static struct platform_driver fec_driver = { 1400 .driver = { 1401 .name = "fec", 1402 .owner = THIS_MODULE, 1403#ifdef CONFIG_PM 1404 .pm = &fec_pm_ops, 1405#endif 1406 }, 1407 .probe = fec_probe, 1408 .remove = __devexit_p(fec_drv_remove), 1409}; 1410 1411static int __init 1412fec_enet_module_init(void) 1413{ 1414 printk(KERN_INFO "FEC Ethernet Driver\n"); 1415 1416 return platform_driver_register(&fec_driver); 1417} 1418 1419static void __exit 1420fec_enet_cleanup(void) 1421{ 1422 platform_driver_unregister(&fec_driver); 1423} 1424 1425module_exit(fec_enet_cleanup); 1426module_init(fec_enet_module_init); 1427 1428MODULE_LICENSE("GPL");