at v2.6.31-rc2 2006 lines 55 kB view raw
1/* 2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 4 * 5 * Right now, I am very wasteful with the buffers. I allocate memory 6 * pages and then divide them into 2K frame buffers. This way I know I 7 * have buffers large enough to hold one frame within one buffer descriptor. 8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which 9 * will be much more memory efficient and will easily handle lots of 10 * small packets. 11 * 12 * Much better multiple PHY support by Magnus Damm. 13 * Copyright (c) 2000 Ericsson Radio Systems AB. 14 * 15 * Support for FEC controller of ColdFire processors. 16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) 17 * 18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 19 * Copyright (c) 2004-2006 Macq Electronique SA. 20 */ 21 22#include <linux/module.h> 23#include <linux/kernel.h> 24#include <linux/string.h> 25#include <linux/ptrace.h> 26#include <linux/errno.h> 27#include <linux/ioport.h> 28#include <linux/slab.h> 29#include <linux/interrupt.h> 30#include <linux/pci.h> 31#include <linux/init.h> 32#include <linux/delay.h> 33#include <linux/netdevice.h> 34#include <linux/etherdevice.h> 35#include <linux/skbuff.h> 36#include <linux/spinlock.h> 37#include <linux/workqueue.h> 38#include <linux/bitops.h> 39#include <linux/io.h> 40#include <linux/irq.h> 41#include <linux/clk.h> 42#include <linux/platform_device.h> 43 44#include <asm/cacheflush.h> 45 46#ifndef CONFIG_ARCH_MXC 47#include <asm/coldfire.h> 48#include <asm/mcfsim.h> 49#endif 50 51#include "fec.h" 52 53#ifdef CONFIG_ARCH_MXC 54#include <mach/hardware.h> 55#define FEC_ALIGNMENT 0xf 56#else 57#define FEC_ALIGNMENT 0x3 58#endif 59 60/* 61 * Define the fixed address of the FEC hardware. 62 */ 63#if defined(CONFIG_M5272) 64#define HAVE_mii_link_interrupt 65 66static unsigned char fec_mac_default[] = { 67 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 68}; 69 70/* 71 * Some hardware gets it MAC address out of local flash memory. 72 * if this is non-zero then assume it is the address to get MAC from. 73 */ 74#if defined(CONFIG_NETtel) 75#define FEC_FLASHMAC 0xf0006006 76#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) 77#define FEC_FLASHMAC 0xf0006000 78#elif defined(CONFIG_CANCam) 79#define FEC_FLASHMAC 0xf0020000 80#elif defined (CONFIG_M5272C3) 81#define FEC_FLASHMAC (0xffe04000 + 4) 82#elif defined(CONFIG_MOD5272) 83#define FEC_FLASHMAC 0xffc0406b 84#else 85#define FEC_FLASHMAC 0 86#endif 87#endif /* CONFIG_M5272 */ 88 89/* Forward declarations of some structures to support different PHYs */ 90 91typedef struct { 92 uint mii_data; 93 void (*funct)(uint mii_reg, struct net_device *dev); 94} phy_cmd_t; 95 96typedef struct { 97 uint id; 98 char *name; 99 100 const phy_cmd_t *config; 101 const phy_cmd_t *startup; 102 const phy_cmd_t *ack_int; 103 const phy_cmd_t *shutdown; 104} phy_info_t; 105 106/* The number of Tx and Rx buffers. These are allocated from the page 107 * pool. The code may assume these are power of two, so it it best 108 * to keep them that size. 109 * We don't need to allocate pages for the transmitter. We just use 110 * the skbuffer directly. 111 */ 112#define FEC_ENET_RX_PAGES 8 113#define FEC_ENET_RX_FRSIZE 2048 114#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) 115#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) 116#define FEC_ENET_TX_FRSIZE 2048 117#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE) 118#define TX_RING_SIZE 16 /* Must be power of two */ 119#define TX_RING_MOD_MASK 15 /* for this to work */ 120 121#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE) 122#error "FEC: descriptor ring size constants too large" 123#endif 124 125/* Interrupt events/masks. */ 126#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ 127#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ 128#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ 129#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ 130#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */ 131#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ 132#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */ 133#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ 134#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ 135#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ 136 137/* The FEC stores dest/src/type, data, and checksum for receive packets. 138 */ 139#define PKT_MAXBUF_SIZE 1518 140#define PKT_MINBUF_SIZE 64 141#define PKT_MAXBLR_SIZE 1520 142 143 144/* 145 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame 146 * size bits. Other FEC hardware does not, so we need to take that into 147 * account when setting it. 148 */ 149#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 150 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) 151#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 152#else 153#define OPT_FRAME_SIZE 0 154#endif 155 156/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and 157 * tx_bd_base always point to the base of the buffer descriptors. The 158 * cur_rx and cur_tx point to the currently available buffer. 159 * The dirty_tx tracks the current buffer that is being sent by the 160 * controller. The cur_tx and dirty_tx are equal under both completely 161 * empty and completely full conditions. The empty/ready indicator in 162 * the buffer descriptor determines the actual condition. 163 */ 164struct fec_enet_private { 165 /* Hardware registers of the FEC device */ 166 void __iomem *hwp; 167 168 struct net_device *netdev; 169 170 struct clk *clk; 171 172 /* The saved address of a sent-in-place packet/buffer, for skfree(). */ 173 unsigned char *tx_bounce[TX_RING_SIZE]; 174 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 175 struct sk_buff* rx_skbuff[RX_RING_SIZE]; 176 ushort skb_cur; 177 ushort skb_dirty; 178 179 /* CPM dual port RAM relative addresses */ 180 dma_addr_t bd_dma; 181 /* Address of Rx and Tx buffers */ 182 struct bufdesc *rx_bd_base; 183 struct bufdesc *tx_bd_base; 184 /* The next free ring entry */ 185 struct bufdesc *cur_rx, *cur_tx; 186 /* The ring entries to be free()ed */ 187 struct bufdesc *dirty_tx; 188 189 uint tx_full; 190 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ 191 spinlock_t hw_lock; 192 /* hold while accessing the mii_list_t() elements */ 193 spinlock_t mii_lock; 194 195 uint phy_id; 196 uint phy_id_done; 197 uint phy_status; 198 uint phy_speed; 199 phy_info_t const *phy; 200 struct work_struct phy_task; 201 202 uint sequence_done; 203 uint mii_phy_task_queued; 204 205 uint phy_addr; 206 207 int index; 208 int opened; 209 int link; 210 int old_link; 211 int full_duplex; 212}; 213 214static void fec_enet_mii(struct net_device *dev); 215static irqreturn_t fec_enet_interrupt(int irq, void * dev_id); 216static void fec_enet_tx(struct net_device *dev); 217static void fec_enet_rx(struct net_device *dev); 218static int fec_enet_close(struct net_device *dev); 219static void fec_restart(struct net_device *dev, int duplex); 220static void fec_stop(struct net_device *dev); 221 222 223/* MII processing. We keep this as simple as possible. Requests are 224 * placed on the list (if there is room). When the request is finished 225 * by the MII, an optional function may be called. 226 */ 227typedef struct mii_list { 228 uint mii_regval; 229 void (*mii_func)(uint val, struct net_device *dev); 230 struct mii_list *mii_next; 231} mii_list_t; 232 233#define NMII 20 234static mii_list_t mii_cmds[NMII]; 235static mii_list_t *mii_free; 236static mii_list_t *mii_head; 237static mii_list_t *mii_tail; 238 239static int mii_queue(struct net_device *dev, int request, 240 void (*func)(uint, struct net_device *)); 241 242/* Make MII read/write commands for the FEC */ 243#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) 244#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \ 245 (VAL & 0xffff)) 246#define mk_mii_end 0 247 248/* Transmitter timeout */ 249#define TX_TIMEOUT (2 * HZ) 250 251/* Register definitions for the PHY */ 252 253#define MII_REG_CR 0 /* Control Register */ 254#define MII_REG_SR 1 /* Status Register */ 255#define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */ 256#define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */ 257#define MII_REG_ANAR 4 /* A-N Advertisement Register */ 258#define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */ 259#define MII_REG_ANER 6 /* A-N Expansion Register */ 260#define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */ 261#define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */ 262 263/* values for phy_status */ 264 265#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */ 266#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */ 267#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */ 268#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */ 269#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */ 270#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */ 271#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */ 272 273#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */ 274#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */ 275#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */ 276#define PHY_STAT_SPMASK 0xf000 /* mask for speed */ 277#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */ 278#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */ 279#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */ 280#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */ 281 282 283static int 284fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 285{ 286 struct fec_enet_private *fep = netdev_priv(dev); 287 struct bufdesc *bdp; 288 unsigned short status; 289 unsigned long flags; 290 291 if (!fep->link) { 292 /* Link is down or autonegotiation is in progress. */ 293 return NETDEV_TX_BUSY; 294 } 295 296 spin_lock_irqsave(&fep->hw_lock, flags); 297 /* Fill in a Tx ring entry */ 298 bdp = fep->cur_tx; 299 300 status = bdp->cbd_sc; 301 302 if (status & BD_ENET_TX_READY) { 303 /* Ooops. All transmit buffers are full. Bail out. 304 * This should not happen, since dev->tbusy should be set. 305 */ 306 printk("%s: tx queue full!.\n", dev->name); 307 spin_unlock_irqrestore(&fep->hw_lock, flags); 308 return NETDEV_TX_BUSY; 309 } 310 311 /* Clear all of the status flags */ 312 status &= ~BD_ENET_TX_STATS; 313 314 /* Set buffer length and buffer pointer */ 315 bdp->cbd_bufaddr = __pa(skb->data); 316 bdp->cbd_datlen = skb->len; 317 318 /* 319 * On some FEC implementations data must be aligned on 320 * 4-byte boundaries. Use bounce buffers to copy data 321 * and get it aligned. Ugh. 322 */ 323 if (bdp->cbd_bufaddr & FEC_ALIGNMENT) { 324 unsigned int index; 325 index = bdp - fep->tx_bd_base; 326 memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len); 327 bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]); 328 } 329 330 /* Save skb pointer */ 331 fep->tx_skbuff[fep->skb_cur] = skb; 332 333 dev->stats.tx_bytes += skb->len; 334 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; 335 336 /* Push the data cache so the CPM does not get stale memory 337 * data. 338 */ 339 bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data, 340 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 341 342 /* Send it on its way. Tell FEC it's ready, interrupt when done, 343 * it's the last BD of the frame, and to put the CRC on the end. 344 */ 345 status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR 346 | BD_ENET_TX_LAST | BD_ENET_TX_TC); 347 bdp->cbd_sc = status; 348 349 dev->trans_start = jiffies; 350 351 /* Trigger transmission start */ 352 writel(0, fep->hwp + FEC_X_DES_ACTIVE); 353 354 /* If this was the last BD in the ring, start at the beginning again. */ 355 if (status & BD_ENET_TX_WRAP) 356 bdp = fep->tx_bd_base; 357 else 358 bdp++; 359 360 if (bdp == fep->dirty_tx) { 361 fep->tx_full = 1; 362 netif_stop_queue(dev); 363 } 364 365 fep->cur_tx = bdp; 366 367 spin_unlock_irqrestore(&fep->hw_lock, flags); 368 369 return 0; 370} 371 372static void 373fec_timeout(struct net_device *dev) 374{ 375 struct fec_enet_private *fep = netdev_priv(dev); 376 377 dev->stats.tx_errors++; 378 379 fec_restart(dev, fep->full_duplex); 380 netif_wake_queue(dev); 381} 382 383static irqreturn_t 384fec_enet_interrupt(int irq, void * dev_id) 385{ 386 struct net_device *dev = dev_id; 387 struct fec_enet_private *fep = netdev_priv(dev); 388 uint int_events; 389 irqreturn_t ret = IRQ_NONE; 390 391 do { 392 int_events = readl(fep->hwp + FEC_IEVENT); 393 writel(int_events, fep->hwp + FEC_IEVENT); 394 395 if (int_events & FEC_ENET_RXF) { 396 ret = IRQ_HANDLED; 397 fec_enet_rx(dev); 398 } 399 400 /* Transmit OK, or non-fatal error. Update the buffer 401 * descriptors. FEC handles all errors, we just discover 402 * them as part of the transmit process. 403 */ 404 if (int_events & FEC_ENET_TXF) { 405 ret = IRQ_HANDLED; 406 fec_enet_tx(dev); 407 } 408 409 if (int_events & FEC_ENET_MII) { 410 ret = IRQ_HANDLED; 411 fec_enet_mii(dev); 412 } 413 414 } while (int_events); 415 416 return ret; 417} 418 419 420static void 421fec_enet_tx(struct net_device *dev) 422{ 423 struct fec_enet_private *fep; 424 struct bufdesc *bdp; 425 unsigned short status; 426 struct sk_buff *skb; 427 428 fep = netdev_priv(dev); 429 spin_lock_irq(&fep->hw_lock); 430 bdp = fep->dirty_tx; 431 432 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { 433 if (bdp == fep->cur_tx && fep->tx_full == 0) 434 break; 435 436 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 437 bdp->cbd_bufaddr = 0; 438 439 skb = fep->tx_skbuff[fep->skb_dirty]; 440 /* Check for errors. */ 441 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 442 BD_ENET_TX_RL | BD_ENET_TX_UN | 443 BD_ENET_TX_CSL)) { 444 dev->stats.tx_errors++; 445 if (status & BD_ENET_TX_HB) /* No heartbeat */ 446 dev->stats.tx_heartbeat_errors++; 447 if (status & BD_ENET_TX_LC) /* Late collision */ 448 dev->stats.tx_window_errors++; 449 if (status & BD_ENET_TX_RL) /* Retrans limit */ 450 dev->stats.tx_aborted_errors++; 451 if (status & BD_ENET_TX_UN) /* Underrun */ 452 dev->stats.tx_fifo_errors++; 453 if (status & BD_ENET_TX_CSL) /* Carrier lost */ 454 dev->stats.tx_carrier_errors++; 455 } else { 456 dev->stats.tx_packets++; 457 } 458 459 if (status & BD_ENET_TX_READY) 460 printk("HEY! Enet xmit interrupt and TX_READY.\n"); 461 462 /* Deferred means some collisions occurred during transmit, 463 * but we eventually sent the packet OK. 464 */ 465 if (status & BD_ENET_TX_DEF) 466 dev->stats.collisions++; 467 468 /* Free the sk buffer associated with this last transmit */ 469 dev_kfree_skb_any(skb); 470 fep->tx_skbuff[fep->skb_dirty] = NULL; 471 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; 472 473 /* Update pointer to next buffer descriptor to be transmitted */ 474 if (status & BD_ENET_TX_WRAP) 475 bdp = fep->tx_bd_base; 476 else 477 bdp++; 478 479 /* Since we have freed up a buffer, the ring is no longer full 480 */ 481 if (fep->tx_full) { 482 fep->tx_full = 0; 483 if (netif_queue_stopped(dev)) 484 netif_wake_queue(dev); 485 } 486 } 487 fep->dirty_tx = bdp; 488 spin_unlock_irq(&fep->hw_lock); 489} 490 491 492/* During a receive, the cur_rx points to the current incoming buffer. 493 * When we update through the ring, if the next incoming buffer has 494 * not been given to the system, we just set the empty indicator, 495 * effectively tossing the packet. 496 */ 497static void 498fec_enet_rx(struct net_device *dev) 499{ 500 struct fec_enet_private *fep = netdev_priv(dev); 501 struct bufdesc *bdp; 502 unsigned short status; 503 struct sk_buff *skb; 504 ushort pkt_len; 505 __u8 *data; 506 507#ifdef CONFIG_M532x 508 flush_cache_all(); 509#endif 510 511 spin_lock_irq(&fep->hw_lock); 512 513 /* First, grab all of the stats for the incoming packet. 514 * These get messed up if we get called due to a busy condition. 515 */ 516 bdp = fep->cur_rx; 517 518 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { 519 520 /* Since we have allocated space to hold a complete frame, 521 * the last indicator should be set. 522 */ 523 if ((status & BD_ENET_RX_LAST) == 0) 524 printk("FEC ENET: rcv is not +last\n"); 525 526 if (!fep->opened) 527 goto rx_processing_done; 528 529 /* Check for errors. */ 530 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 531 BD_ENET_RX_CR | BD_ENET_RX_OV)) { 532 dev->stats.rx_errors++; 533 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { 534 /* Frame too long or too short. */ 535 dev->stats.rx_length_errors++; 536 } 537 if (status & BD_ENET_RX_NO) /* Frame alignment */ 538 dev->stats.rx_frame_errors++; 539 if (status & BD_ENET_RX_CR) /* CRC Error */ 540 dev->stats.rx_crc_errors++; 541 if (status & BD_ENET_RX_OV) /* FIFO overrun */ 542 dev->stats.rx_fifo_errors++; 543 } 544 545 /* Report late collisions as a frame error. 546 * On this error, the BD is closed, but we don't know what we 547 * have in the buffer. So, just drop this frame on the floor. 548 */ 549 if (status & BD_ENET_RX_CL) { 550 dev->stats.rx_errors++; 551 dev->stats.rx_frame_errors++; 552 goto rx_processing_done; 553 } 554 555 /* Process the incoming frame. */ 556 dev->stats.rx_packets++; 557 pkt_len = bdp->cbd_datlen; 558 dev->stats.rx_bytes += pkt_len; 559 data = (__u8*)__va(bdp->cbd_bufaddr); 560 561 dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen, 562 DMA_FROM_DEVICE); 563 564 /* This does 16 byte alignment, exactly what we need. 565 * The packet length includes FCS, but we don't want to 566 * include that when passing upstream as it messes up 567 * bridging applications. 568 */ 569 skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN); 570 571 if (unlikely(!skb)) { 572 printk("%s: Memory squeeze, dropping packet.\n", 573 dev->name); 574 dev->stats.rx_dropped++; 575 } else { 576 skb_reserve(skb, NET_IP_ALIGN); 577 skb_put(skb, pkt_len - 4); /* Make room */ 578 skb_copy_to_linear_data(skb, data, pkt_len - 4); 579 skb->protocol = eth_type_trans(skb, dev); 580 netif_rx(skb); 581 } 582 583 bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen, 584 DMA_FROM_DEVICE); 585rx_processing_done: 586 /* Clear the status flags for this buffer */ 587 status &= ~BD_ENET_RX_STATS; 588 589 /* Mark the buffer empty */ 590 status |= BD_ENET_RX_EMPTY; 591 bdp->cbd_sc = status; 592 593 /* Update BD pointer to next entry */ 594 if (status & BD_ENET_RX_WRAP) 595 bdp = fep->rx_bd_base; 596 else 597 bdp++; 598 /* Doing this here will keep the FEC running while we process 599 * incoming frames. On a heavily loaded network, we should be 600 * able to keep up at the expense of system resources. 601 */ 602 writel(0, fep->hwp + FEC_R_DES_ACTIVE); 603 } 604 fep->cur_rx = bdp; 605 606 spin_unlock_irq(&fep->hw_lock); 607} 608 609/* called from interrupt context */ 610static void 611fec_enet_mii(struct net_device *dev) 612{ 613 struct fec_enet_private *fep; 614 mii_list_t *mip; 615 616 fep = netdev_priv(dev); 617 spin_lock_irq(&fep->mii_lock); 618 619 if ((mip = mii_head) == NULL) { 620 printk("MII and no head!\n"); 621 goto unlock; 622 } 623 624 if (mip->mii_func != NULL) 625 (*(mip->mii_func))(readl(fep->hwp + FEC_MII_DATA), dev); 626 627 mii_head = mip->mii_next; 628 mip->mii_next = mii_free; 629 mii_free = mip; 630 631 if ((mip = mii_head) != NULL) 632 writel(mip->mii_regval, fep->hwp + FEC_MII_DATA); 633 634unlock: 635 spin_unlock_irq(&fep->mii_lock); 636} 637 638static int 639mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_device *)) 640{ 641 struct fec_enet_private *fep; 642 unsigned long flags; 643 mii_list_t *mip; 644 int retval; 645 646 /* Add PHY address to register command */ 647 fep = netdev_priv(dev); 648 spin_lock_irqsave(&fep->mii_lock, flags); 649 650 regval |= fep->phy_addr << 23; 651 retval = 0; 652 653 if ((mip = mii_free) != NULL) { 654 mii_free = mip->mii_next; 655 mip->mii_regval = regval; 656 mip->mii_func = func; 657 mip->mii_next = NULL; 658 if (mii_head) { 659 mii_tail->mii_next = mip; 660 mii_tail = mip; 661 } else { 662 mii_head = mii_tail = mip; 663 writel(regval, fep->hwp + FEC_MII_DATA); 664 } 665 } else { 666 retval = 1; 667 } 668 669 spin_unlock_irqrestore(&fep->mii_lock, flags); 670 return retval; 671} 672 673static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c) 674{ 675 if(!c) 676 return; 677 678 for (; c->mii_data != mk_mii_end; c++) 679 mii_queue(dev, c->mii_data, c->funct); 680} 681 682static void mii_parse_sr(uint mii_reg, struct net_device *dev) 683{ 684 struct fec_enet_private *fep = netdev_priv(dev); 685 volatile uint *s = &(fep->phy_status); 686 uint status; 687 688 status = *s & ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC); 689 690 if (mii_reg & 0x0004) 691 status |= PHY_STAT_LINK; 692 if (mii_reg & 0x0010) 693 status |= PHY_STAT_FAULT; 694 if (mii_reg & 0x0020) 695 status |= PHY_STAT_ANC; 696 *s = status; 697} 698 699static void mii_parse_cr(uint mii_reg, struct net_device *dev) 700{ 701 struct fec_enet_private *fep = netdev_priv(dev); 702 volatile uint *s = &(fep->phy_status); 703 uint status; 704 705 status = *s & ~(PHY_CONF_ANE | PHY_CONF_LOOP); 706 707 if (mii_reg & 0x1000) 708 status |= PHY_CONF_ANE; 709 if (mii_reg & 0x4000) 710 status |= PHY_CONF_LOOP; 711 *s = status; 712} 713 714static void mii_parse_anar(uint mii_reg, struct net_device *dev) 715{ 716 struct fec_enet_private *fep = netdev_priv(dev); 717 volatile uint *s = &(fep->phy_status); 718 uint status; 719 720 status = *s & ~(PHY_CONF_SPMASK); 721 722 if (mii_reg & 0x0020) 723 status |= PHY_CONF_10HDX; 724 if (mii_reg & 0x0040) 725 status |= PHY_CONF_10FDX; 726 if (mii_reg & 0x0080) 727 status |= PHY_CONF_100HDX; 728 if (mii_reg & 0x00100) 729 status |= PHY_CONF_100FDX; 730 *s = status; 731} 732 733/* ------------------------------------------------------------------------- */ 734/* The Level one LXT970 is used by many boards */ 735 736#define MII_LXT970_MIRROR 16 /* Mirror register */ 737#define MII_LXT970_IER 17 /* Interrupt Enable Register */ 738#define MII_LXT970_ISR 18 /* Interrupt Status Register */ 739#define MII_LXT970_CONFIG 19 /* Configuration Register */ 740#define MII_LXT970_CSR 20 /* Chip Status Register */ 741 742static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev) 743{ 744 struct fec_enet_private *fep = netdev_priv(dev); 745 volatile uint *s = &(fep->phy_status); 746 uint status; 747 748 status = *s & ~(PHY_STAT_SPMASK); 749 if (mii_reg & 0x0800) { 750 if (mii_reg & 0x1000) 751 status |= PHY_STAT_100FDX; 752 else 753 status |= PHY_STAT_100HDX; 754 } else { 755 if (mii_reg & 0x1000) 756 status |= PHY_STAT_10FDX; 757 else 758 status |= PHY_STAT_10HDX; 759 } 760 *s = status; 761} 762 763static phy_cmd_t const phy_cmd_lxt970_config[] = { 764 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 765 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 766 { mk_mii_end, } 767 }; 768static phy_cmd_t const phy_cmd_lxt970_startup[] = { /* enable interrupts */ 769 { mk_mii_write(MII_LXT970_IER, 0x0002), NULL }, 770 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 771 { mk_mii_end, } 772 }; 773static phy_cmd_t const phy_cmd_lxt970_ack_int[] = { 774 /* read SR and ISR to acknowledge */ 775 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 776 { mk_mii_read(MII_LXT970_ISR), NULL }, 777 778 /* find out the current status */ 779 { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr }, 780 { mk_mii_end, } 781 }; 782static phy_cmd_t const phy_cmd_lxt970_shutdown[] = { /* disable interrupts */ 783 { mk_mii_write(MII_LXT970_IER, 0x0000), NULL }, 784 { mk_mii_end, } 785 }; 786static phy_info_t const phy_info_lxt970 = { 787 .id = 0x07810000, 788 .name = "LXT970", 789 .config = phy_cmd_lxt970_config, 790 .startup = phy_cmd_lxt970_startup, 791 .ack_int = phy_cmd_lxt970_ack_int, 792 .shutdown = phy_cmd_lxt970_shutdown 793}; 794 795/* ------------------------------------------------------------------------- */ 796/* The Level one LXT971 is used on some of my custom boards */ 797 798/* register definitions for the 971 */ 799 800#define MII_LXT971_PCR 16 /* Port Control Register */ 801#define MII_LXT971_SR2 17 /* Status Register 2 */ 802#define MII_LXT971_IER 18 /* Interrupt Enable Register */ 803#define MII_LXT971_ISR 19 /* Interrupt Status Register */ 804#define MII_LXT971_LCR 20 /* LED Control Register */ 805#define MII_LXT971_TCR 30 /* Transmit Control Register */ 806 807/* 808 * I had some nice ideas of running the MDIO faster... 809 * The 971 should support 8MHz and I tried it, but things acted really 810 * weird, so 2.5 MHz ought to be enough for anyone... 811 */ 812 813static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev) 814{ 815 struct fec_enet_private *fep = netdev_priv(dev); 816 volatile uint *s = &(fep->phy_status); 817 uint status; 818 819 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC); 820 821 if (mii_reg & 0x0400) { 822 fep->link = 1; 823 status |= PHY_STAT_LINK; 824 } else { 825 fep->link = 0; 826 } 827 if (mii_reg & 0x0080) 828 status |= PHY_STAT_ANC; 829 if (mii_reg & 0x4000) { 830 if (mii_reg & 0x0200) 831 status |= PHY_STAT_100FDX; 832 else 833 status |= PHY_STAT_100HDX; 834 } else { 835 if (mii_reg & 0x0200) 836 status |= PHY_STAT_10FDX; 837 else 838 status |= PHY_STAT_10HDX; 839 } 840 if (mii_reg & 0x0008) 841 status |= PHY_STAT_FAULT; 842 843 *s = status; 844} 845 846static phy_cmd_t const phy_cmd_lxt971_config[] = { 847 /* limit to 10MBit because my prototype board 848 * doesn't work with 100. */ 849 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 850 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 851 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, 852 { mk_mii_end, } 853 }; 854static phy_cmd_t const phy_cmd_lxt971_startup[] = { /* enable interrupts */ 855 { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL }, 856 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 857 { mk_mii_write(MII_LXT971_LCR, 0xd422), NULL }, /* LED config */ 858 /* Somehow does the 971 tell me that the link is down 859 * the first read after power-up. 860 * read here to get a valid value in ack_int */ 861 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 862 { mk_mii_end, } 863 }; 864static phy_cmd_t const phy_cmd_lxt971_ack_int[] = { 865 /* acknowledge the int before reading status ! */ 866 { mk_mii_read(MII_LXT971_ISR), NULL }, 867 /* find out the current status */ 868 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 869 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, 870 { mk_mii_end, } 871 }; 872static phy_cmd_t const phy_cmd_lxt971_shutdown[] = { /* disable interrupts */ 873 { mk_mii_write(MII_LXT971_IER, 0x0000), NULL }, 874 { mk_mii_end, } 875 }; 876static phy_info_t const phy_info_lxt971 = { 877 .id = 0x0001378e, 878 .name = "LXT971", 879 .config = phy_cmd_lxt971_config, 880 .startup = phy_cmd_lxt971_startup, 881 .ack_int = phy_cmd_lxt971_ack_int, 882 .shutdown = phy_cmd_lxt971_shutdown 883}; 884 885/* ------------------------------------------------------------------------- */ 886/* The Quality Semiconductor QS6612 is used on the RPX CLLF */ 887 888/* register definitions */ 889 890#define MII_QS6612_MCR 17 /* Mode Control Register */ 891#define MII_QS6612_FTR 27 /* Factory Test Register */ 892#define MII_QS6612_MCO 28 /* Misc. Control Register */ 893#define MII_QS6612_ISR 29 /* Interrupt Source Register */ 894#define MII_QS6612_IMR 30 /* Interrupt Mask Register */ 895#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */ 896 897static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev) 898{ 899 struct fec_enet_private *fep = netdev_priv(dev); 900 volatile uint *s = &(fep->phy_status); 901 uint status; 902 903 status = *s & ~(PHY_STAT_SPMASK); 904 905 switch((mii_reg >> 2) & 7) { 906 case 1: status |= PHY_STAT_10HDX; break; 907 case 2: status |= PHY_STAT_100HDX; break; 908 case 5: status |= PHY_STAT_10FDX; break; 909 case 6: status |= PHY_STAT_100FDX; break; 910} 911 912 *s = status; 913} 914 915static phy_cmd_t const phy_cmd_qs6612_config[] = { 916 /* The PHY powers up isolated on the RPX, 917 * so send a command to allow operation. 918 */ 919 { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL }, 920 921 /* parse cr and anar to get some info */ 922 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 923 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 924 { mk_mii_end, } 925 }; 926static phy_cmd_t const phy_cmd_qs6612_startup[] = { /* enable interrupts */ 927 { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL }, 928 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 929 { mk_mii_end, } 930 }; 931static phy_cmd_t const phy_cmd_qs6612_ack_int[] = { 932 /* we need to read ISR, SR and ANER to acknowledge */ 933 { mk_mii_read(MII_QS6612_ISR), NULL }, 934 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 935 { mk_mii_read(MII_REG_ANER), NULL }, 936 937 /* read pcr to get info */ 938 { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr }, 939 { mk_mii_end, } 940 }; 941static phy_cmd_t const phy_cmd_qs6612_shutdown[] = { /* disable interrupts */ 942 { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL }, 943 { mk_mii_end, } 944 }; 945static phy_info_t const phy_info_qs6612 = { 946 .id = 0x00181440, 947 .name = "QS6612", 948 .config = phy_cmd_qs6612_config, 949 .startup = phy_cmd_qs6612_startup, 950 .ack_int = phy_cmd_qs6612_ack_int, 951 .shutdown = phy_cmd_qs6612_shutdown 952}; 953 954/* ------------------------------------------------------------------------- */ 955/* AMD AM79C874 phy */ 956 957/* register definitions for the 874 */ 958 959#define MII_AM79C874_MFR 16 /* Miscellaneous Feature Register */ 960#define MII_AM79C874_ICSR 17 /* Interrupt/Status Register */ 961#define MII_AM79C874_DR 18 /* Diagnostic Register */ 962#define MII_AM79C874_PMLR 19 /* Power and Loopback Register */ 963#define MII_AM79C874_MCR 21 /* ModeControl Register */ 964#define MII_AM79C874_DC 23 /* Disconnect Counter */ 965#define MII_AM79C874_REC 24 /* Recieve Error Counter */ 966 967static void mii_parse_am79c874_dr(uint mii_reg, struct net_device *dev) 968{ 969 struct fec_enet_private *fep = netdev_priv(dev); 970 volatile uint *s = &(fep->phy_status); 971 uint status; 972 973 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_ANC); 974 975 if (mii_reg & 0x0080) 976 status |= PHY_STAT_ANC; 977 if (mii_reg & 0x0400) 978 status |= ((mii_reg & 0x0800) ? PHY_STAT_100FDX : PHY_STAT_100HDX); 979 else 980 status |= ((mii_reg & 0x0800) ? PHY_STAT_10FDX : PHY_STAT_10HDX); 981 982 *s = status; 983} 984 985static phy_cmd_t const phy_cmd_am79c874_config[] = { 986 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 987 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 988 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr }, 989 { mk_mii_end, } 990 }; 991static phy_cmd_t const phy_cmd_am79c874_startup[] = { /* enable interrupts */ 992 { mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL }, 993 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 994 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 995 { mk_mii_end, } 996 }; 997static phy_cmd_t const phy_cmd_am79c874_ack_int[] = { 998 /* find out the current status */ 999 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1000 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr }, 1001 /* we only need to read ISR to acknowledge */ 1002 { mk_mii_read(MII_AM79C874_ICSR), NULL }, 1003 { mk_mii_end, } 1004 }; 1005static phy_cmd_t const phy_cmd_am79c874_shutdown[] = { /* disable interrupts */ 1006 { mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL }, 1007 { mk_mii_end, } 1008 }; 1009static phy_info_t const phy_info_am79c874 = { 1010 .id = 0x00022561, 1011 .name = "AM79C874", 1012 .config = phy_cmd_am79c874_config, 1013 .startup = phy_cmd_am79c874_startup, 1014 .ack_int = phy_cmd_am79c874_ack_int, 1015 .shutdown = phy_cmd_am79c874_shutdown 1016}; 1017 1018 1019/* ------------------------------------------------------------------------- */ 1020/* Kendin KS8721BL phy */ 1021 1022/* register definitions for the 8721 */ 1023 1024#define MII_KS8721BL_RXERCR 21 1025#define MII_KS8721BL_ICSR 27 1026#define MII_KS8721BL_PHYCR 31 1027 1028static phy_cmd_t const phy_cmd_ks8721bl_config[] = { 1029 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 1030 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 1031 { mk_mii_end, } 1032 }; 1033static phy_cmd_t const phy_cmd_ks8721bl_startup[] = { /* enable interrupts */ 1034 { mk_mii_write(MII_KS8721BL_ICSR, 0xff00), NULL }, 1035 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 1036 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1037 { mk_mii_end, } 1038 }; 1039static phy_cmd_t const phy_cmd_ks8721bl_ack_int[] = { 1040 /* find out the current status */ 1041 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1042 /* we only need to read ISR to acknowledge */ 1043 { mk_mii_read(MII_KS8721BL_ICSR), NULL }, 1044 { mk_mii_end, } 1045 }; 1046static phy_cmd_t const phy_cmd_ks8721bl_shutdown[] = { /* disable interrupts */ 1047 { mk_mii_write(MII_KS8721BL_ICSR, 0x0000), NULL }, 1048 { mk_mii_end, } 1049 }; 1050static phy_info_t const phy_info_ks8721bl = { 1051 .id = 0x00022161, 1052 .name = "KS8721BL", 1053 .config = phy_cmd_ks8721bl_config, 1054 .startup = phy_cmd_ks8721bl_startup, 1055 .ack_int = phy_cmd_ks8721bl_ack_int, 1056 .shutdown = phy_cmd_ks8721bl_shutdown 1057}; 1058 1059/* ------------------------------------------------------------------------- */ 1060/* register definitions for the DP83848 */ 1061 1062#define MII_DP8384X_PHYSTST 16 /* PHY Status Register */ 1063 1064static void mii_parse_dp8384x_sr2(uint mii_reg, struct net_device *dev) 1065{ 1066 struct fec_enet_private *fep = netdev_priv(dev); 1067 volatile uint *s = &(fep->phy_status); 1068 1069 *s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC); 1070 1071 /* Link up */ 1072 if (mii_reg & 0x0001) { 1073 fep->link = 1; 1074 *s |= PHY_STAT_LINK; 1075 } else 1076 fep->link = 0; 1077 /* Status of link */ 1078 if (mii_reg & 0x0010) /* Autonegotioation complete */ 1079 *s |= PHY_STAT_ANC; 1080 if (mii_reg & 0x0002) { /* 10MBps? */ 1081 if (mii_reg & 0x0004) /* Full Duplex? */ 1082 *s |= PHY_STAT_10FDX; 1083 else 1084 *s |= PHY_STAT_10HDX; 1085 } else { /* 100 Mbps? */ 1086 if (mii_reg & 0x0004) /* Full Duplex? */ 1087 *s |= PHY_STAT_100FDX; 1088 else 1089 *s |= PHY_STAT_100HDX; 1090 } 1091 if (mii_reg & 0x0008) 1092 *s |= PHY_STAT_FAULT; 1093} 1094 1095static phy_info_t phy_info_dp83848= { 1096 0x020005c9, 1097 "DP83848", 1098 1099 (const phy_cmd_t []) { /* config */ 1100 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 1101 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 1102 { mk_mii_read(MII_DP8384X_PHYSTST), mii_parse_dp8384x_sr2 }, 1103 { mk_mii_end, } 1104 }, 1105 (const phy_cmd_t []) { /* startup - enable interrupts */ 1106 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 1107 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1108 { mk_mii_end, } 1109 }, 1110 (const phy_cmd_t []) { /* ack_int - never happens, no interrupt */ 1111 { mk_mii_end, } 1112 }, 1113 (const phy_cmd_t []) { /* shutdown */ 1114 { mk_mii_end, } 1115 }, 1116}; 1117 1118/* ------------------------------------------------------------------------- */ 1119 1120static phy_info_t const * const phy_info[] = { 1121 &phy_info_lxt970, 1122 &phy_info_lxt971, 1123 &phy_info_qs6612, 1124 &phy_info_am79c874, 1125 &phy_info_ks8721bl, 1126 &phy_info_dp83848, 1127 NULL 1128}; 1129 1130/* ------------------------------------------------------------------------- */ 1131#ifdef HAVE_mii_link_interrupt 1132static irqreturn_t 1133mii_link_interrupt(int irq, void * dev_id); 1134 1135/* 1136 * This is specific to the MII interrupt setup of the M5272EVB. 1137 */ 1138static void __inline__ fec_request_mii_intr(struct net_device *dev) 1139{ 1140 if (request_irq(66, mii_link_interrupt, IRQF_DISABLED, "fec(MII)", dev) != 0) 1141 printk("FEC: Could not allocate fec(MII) IRQ(66)!\n"); 1142} 1143 1144static void __inline__ fec_disable_phy_intr(void) 1145{ 1146 volatile unsigned long *icrp; 1147 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); 1148 *icrp = 0x08000000; 1149} 1150 1151static void __inline__ fec_phy_ack_intr(void) 1152{ 1153 volatile unsigned long *icrp; 1154 /* Acknowledge the interrupt */ 1155 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); 1156 *icrp = 0x0d000000; 1157} 1158#endif 1159 1160#ifdef CONFIG_M5272 1161static void __inline__ fec_get_mac(struct net_device *dev) 1162{ 1163 struct fec_enet_private *fep = netdev_priv(dev); 1164 unsigned char *iap, tmpaddr[ETH_ALEN]; 1165 1166 if (FEC_FLASHMAC) { 1167 /* 1168 * Get MAC address from FLASH. 1169 * If it is all 1's or 0's, use the default. 1170 */ 1171 iap = (unsigned char *)FEC_FLASHMAC; 1172 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && 1173 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) 1174 iap = fec_mac_default; 1175 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && 1176 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) 1177 iap = fec_mac_default; 1178 } else { 1179 *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW); 1180 *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16); 1181 iap = &tmpaddr[0]; 1182 } 1183 1184 memcpy(dev->dev_addr, iap, ETH_ALEN); 1185 1186 /* Adjust MAC if using default MAC address */ 1187 if (iap == fec_mac_default) 1188 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 1189} 1190#endif 1191 1192/* ------------------------------------------------------------------------- */ 1193 1194static void mii_display_status(struct net_device *dev) 1195{ 1196 struct fec_enet_private *fep = netdev_priv(dev); 1197 volatile uint *s = &(fep->phy_status); 1198 1199 if (!fep->link && !fep->old_link) { 1200 /* Link is still down - don't print anything */ 1201 return; 1202 } 1203 1204 printk("%s: status: ", dev->name); 1205 1206 if (!fep->link) { 1207 printk("link down"); 1208 } else { 1209 printk("link up"); 1210 1211 switch(*s & PHY_STAT_SPMASK) { 1212 case PHY_STAT_100FDX: printk(", 100MBit Full Duplex"); break; 1213 case PHY_STAT_100HDX: printk(", 100MBit Half Duplex"); break; 1214 case PHY_STAT_10FDX: printk(", 10MBit Full Duplex"); break; 1215 case PHY_STAT_10HDX: printk(", 10MBit Half Duplex"); break; 1216 default: 1217 printk(", Unknown speed/duplex"); 1218 } 1219 1220 if (*s & PHY_STAT_ANC) 1221 printk(", auto-negotiation complete"); 1222 } 1223 1224 if (*s & PHY_STAT_FAULT) 1225 printk(", remote fault"); 1226 1227 printk(".\n"); 1228} 1229 1230static void mii_display_config(struct work_struct *work) 1231{ 1232 struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task); 1233 struct net_device *dev = fep->netdev; 1234 uint status = fep->phy_status; 1235 1236 /* 1237 ** When we get here, phy_task is already removed from 1238 ** the workqueue. It is thus safe to allow to reuse it. 1239 */ 1240 fep->mii_phy_task_queued = 0; 1241 printk("%s: config: auto-negotiation ", dev->name); 1242 1243 if (status & PHY_CONF_ANE) 1244 printk("on"); 1245 else 1246 printk("off"); 1247 1248 if (status & PHY_CONF_100FDX) 1249 printk(", 100FDX"); 1250 if (status & PHY_CONF_100HDX) 1251 printk(", 100HDX"); 1252 if (status & PHY_CONF_10FDX) 1253 printk(", 10FDX"); 1254 if (status & PHY_CONF_10HDX) 1255 printk(", 10HDX"); 1256 if (!(status & PHY_CONF_SPMASK)) 1257 printk(", No speed/duplex selected?"); 1258 1259 if (status & PHY_CONF_LOOP) 1260 printk(", loopback enabled"); 1261 1262 printk(".\n"); 1263 1264 fep->sequence_done = 1; 1265} 1266 1267static void mii_relink(struct work_struct *work) 1268{ 1269 struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task); 1270 struct net_device *dev = fep->netdev; 1271 int duplex; 1272 1273 /* 1274 ** When we get here, phy_task is already removed from 1275 ** the workqueue. It is thus safe to allow to reuse it. 1276 */ 1277 fep->mii_phy_task_queued = 0; 1278 fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0; 1279 mii_display_status(dev); 1280 fep->old_link = fep->link; 1281 1282 if (fep->link) { 1283 duplex = 0; 1284 if (fep->phy_status 1285 & (PHY_STAT_100FDX | PHY_STAT_10FDX)) 1286 duplex = 1; 1287 fec_restart(dev, duplex); 1288 } else 1289 fec_stop(dev); 1290} 1291 1292/* mii_queue_relink is called in interrupt context from mii_link_interrupt */ 1293static void mii_queue_relink(uint mii_reg, struct net_device *dev) 1294{ 1295 struct fec_enet_private *fep = netdev_priv(dev); 1296 1297 /* 1298 * We cannot queue phy_task twice in the workqueue. It 1299 * would cause an endless loop in the workqueue. 1300 * Fortunately, if the last mii_relink entry has not yet been 1301 * executed now, it will do the job for the current interrupt, 1302 * which is just what we want. 1303 */ 1304 if (fep->mii_phy_task_queued) 1305 return; 1306 1307 fep->mii_phy_task_queued = 1; 1308 INIT_WORK(&fep->phy_task, mii_relink); 1309 schedule_work(&fep->phy_task); 1310} 1311 1312/* mii_queue_config is called in interrupt context from fec_enet_mii */ 1313static void mii_queue_config(uint mii_reg, struct net_device *dev) 1314{ 1315 struct fec_enet_private *fep = netdev_priv(dev); 1316 1317 if (fep->mii_phy_task_queued) 1318 return; 1319 1320 fep->mii_phy_task_queued = 1; 1321 INIT_WORK(&fep->phy_task, mii_display_config); 1322 schedule_work(&fep->phy_task); 1323} 1324 1325phy_cmd_t const phy_cmd_relink[] = { 1326 { mk_mii_read(MII_REG_CR), mii_queue_relink }, 1327 { mk_mii_end, } 1328 }; 1329phy_cmd_t const phy_cmd_config[] = { 1330 { mk_mii_read(MII_REG_CR), mii_queue_config }, 1331 { mk_mii_end, } 1332 }; 1333 1334/* Read remainder of PHY ID. */ 1335static void 1336mii_discover_phy3(uint mii_reg, struct net_device *dev) 1337{ 1338 struct fec_enet_private *fep; 1339 int i; 1340 1341 fep = netdev_priv(dev); 1342 fep->phy_id |= (mii_reg & 0xffff); 1343 printk("fec: PHY @ 0x%x, ID 0x%08x", fep->phy_addr, fep->phy_id); 1344 1345 for(i = 0; phy_info[i]; i++) { 1346 if(phy_info[i]->id == (fep->phy_id >> 4)) 1347 break; 1348 } 1349 1350 if (phy_info[i]) 1351 printk(" -- %s\n", phy_info[i]->name); 1352 else 1353 printk(" -- unknown PHY!\n"); 1354 1355 fep->phy = phy_info[i]; 1356 fep->phy_id_done = 1; 1357} 1358 1359/* Scan all of the MII PHY addresses looking for someone to respond 1360 * with a valid ID. This usually happens quickly. 1361 */ 1362static void 1363mii_discover_phy(uint mii_reg, struct net_device *dev) 1364{ 1365 struct fec_enet_private *fep; 1366 uint phytype; 1367 1368 fep = netdev_priv(dev); 1369 1370 if (fep->phy_addr < 32) { 1371 if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) { 1372 1373 /* Got first part of ID, now get remainder */ 1374 fep->phy_id = phytype << 16; 1375 mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), 1376 mii_discover_phy3); 1377 } else { 1378 fep->phy_addr++; 1379 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), 1380 mii_discover_phy); 1381 } 1382 } else { 1383 printk("FEC: No PHY device found.\n"); 1384 /* Disable external MII interface */ 1385 writel(0, fep->hwp + FEC_MII_SPEED); 1386 fep->phy_speed = 0; 1387#ifdef HAVE_mii_link_interrupt 1388 fec_disable_phy_intr(); 1389#endif 1390 } 1391} 1392 1393/* This interrupt occurs when the PHY detects a link change */ 1394#ifdef HAVE_mii_link_interrupt 1395static irqreturn_t 1396mii_link_interrupt(int irq, void * dev_id) 1397{ 1398 struct net_device *dev = dev_id; 1399 struct fec_enet_private *fep = netdev_priv(dev); 1400 1401 fec_phy_ack_intr(); 1402 1403 mii_do_cmd(dev, fep->phy->ack_int); 1404 mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */ 1405 1406 return IRQ_HANDLED; 1407} 1408#endif 1409 1410static void fec_enet_free_buffers(struct net_device *dev) 1411{ 1412 struct fec_enet_private *fep = netdev_priv(dev); 1413 int i; 1414 struct sk_buff *skb; 1415 struct bufdesc *bdp; 1416 1417 bdp = fep->rx_bd_base; 1418 for (i = 0; i < RX_RING_SIZE; i++) { 1419 skb = fep->rx_skbuff[i]; 1420 1421 if (bdp->cbd_bufaddr) 1422 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, 1423 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1424 if (skb) 1425 dev_kfree_skb(skb); 1426 bdp++; 1427 } 1428 1429 bdp = fep->tx_bd_base; 1430 for (i = 0; i < TX_RING_SIZE; i++) 1431 kfree(fep->tx_bounce[i]); 1432} 1433 1434static int fec_enet_alloc_buffers(struct net_device *dev) 1435{ 1436 struct fec_enet_private *fep = netdev_priv(dev); 1437 int i; 1438 struct sk_buff *skb; 1439 struct bufdesc *bdp; 1440 1441 bdp = fep->rx_bd_base; 1442 for (i = 0; i < RX_RING_SIZE; i++) { 1443 skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE); 1444 if (!skb) { 1445 fec_enet_free_buffers(dev); 1446 return -ENOMEM; 1447 } 1448 fep->rx_skbuff[i] = skb; 1449 1450 bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data, 1451 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1452 bdp->cbd_sc = BD_ENET_RX_EMPTY; 1453 bdp++; 1454 } 1455 1456 /* Set the last buffer to wrap. */ 1457 bdp--; 1458 bdp->cbd_sc |= BD_SC_WRAP; 1459 1460 bdp = fep->tx_bd_base; 1461 for (i = 0; i < TX_RING_SIZE; i++) { 1462 fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); 1463 1464 bdp->cbd_sc = 0; 1465 bdp->cbd_bufaddr = 0; 1466 bdp++; 1467 } 1468 1469 /* Set the last buffer to wrap. */ 1470 bdp--; 1471 bdp->cbd_sc |= BD_SC_WRAP; 1472 1473 return 0; 1474} 1475 1476static int 1477fec_enet_open(struct net_device *dev) 1478{ 1479 struct fec_enet_private *fep = netdev_priv(dev); 1480 int ret; 1481 1482 /* I should reset the ring buffers here, but I don't yet know 1483 * a simple way to do that. 1484 */ 1485 1486 ret = fec_enet_alloc_buffers(dev); 1487 if (ret) 1488 return ret; 1489 1490 fep->sequence_done = 0; 1491 fep->link = 0; 1492 1493 fec_restart(dev, 1); 1494 1495 if (fep->phy) { 1496 mii_do_cmd(dev, fep->phy->ack_int); 1497 mii_do_cmd(dev, fep->phy->config); 1498 mii_do_cmd(dev, phy_cmd_config); /* display configuration */ 1499 1500 /* Poll until the PHY tells us its configuration 1501 * (not link state). 1502 * Request is initiated by mii_do_cmd above, but answer 1503 * comes by interrupt. 1504 * This should take about 25 usec per register at 2.5 MHz, 1505 * and we read approximately 5 registers. 1506 */ 1507 while(!fep->sequence_done) 1508 schedule(); 1509 1510 mii_do_cmd(dev, fep->phy->startup); 1511 } 1512 1513 /* Set the initial link state to true. A lot of hardware 1514 * based on this device does not implement a PHY interrupt, 1515 * so we are never notified of link change. 1516 */ 1517 fep->link = 1; 1518 1519 netif_start_queue(dev); 1520 fep->opened = 1; 1521 return 0; 1522} 1523 1524static int 1525fec_enet_close(struct net_device *dev) 1526{ 1527 struct fec_enet_private *fep = netdev_priv(dev); 1528 1529 /* Don't know what to do yet. */ 1530 fep->opened = 0; 1531 netif_stop_queue(dev); 1532 fec_stop(dev); 1533 1534 fec_enet_free_buffers(dev); 1535 1536 return 0; 1537} 1538 1539/* Set or clear the multicast filter for this adaptor. 1540 * Skeleton taken from sunlance driver. 1541 * The CPM Ethernet implementation allows Multicast as well as individual 1542 * MAC address filtering. Some of the drivers check to make sure it is 1543 * a group multicast address, and discard those that are not. I guess I 1544 * will do the same for now, but just remove the test if you want 1545 * individual filtering as well (do the upper net layers want or support 1546 * this kind of feature?). 1547 */ 1548 1549#define HASH_BITS 6 /* #bits in hash */ 1550#define CRC32_POLY 0xEDB88320 1551 1552static void set_multicast_list(struct net_device *dev) 1553{ 1554 struct fec_enet_private *fep = netdev_priv(dev); 1555 struct dev_mc_list *dmi; 1556 unsigned int i, j, bit, data, crc, tmp; 1557 unsigned char hash; 1558 1559 if (dev->flags & IFF_PROMISC) { 1560 tmp = readl(fep->hwp + FEC_R_CNTRL); 1561 tmp |= 0x8; 1562 writel(tmp, fep->hwp + FEC_R_CNTRL); 1563 return; 1564 } 1565 1566 tmp = readl(fep->hwp + FEC_R_CNTRL); 1567 tmp &= ~0x8; 1568 writel(tmp, fep->hwp + FEC_R_CNTRL); 1569 1570 if (dev->flags & IFF_ALLMULTI) { 1571 /* Catch all multicast addresses, so set the 1572 * filter to all 1's 1573 */ 1574 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1575 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 1576 1577 return; 1578 } 1579 1580 /* Clear filter and add the addresses in hash register 1581 */ 1582 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1583 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 1584 1585 dmi = dev->mc_list; 1586 1587 for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) { 1588 /* Only support group multicast for now */ 1589 if (!(dmi->dmi_addr[0] & 1)) 1590 continue; 1591 1592 /* calculate crc32 value of mac address */ 1593 crc = 0xffffffff; 1594 1595 for (i = 0; i < dmi->dmi_addrlen; i++) { 1596 data = dmi->dmi_addr[i]; 1597 for (bit = 0; bit < 8; bit++, data >>= 1) { 1598 crc = (crc >> 1) ^ 1599 (((crc ^ data) & 1) ? CRC32_POLY : 0); 1600 } 1601 } 1602 1603 /* only upper 6 bits (HASH_BITS) are used 1604 * which point to specific bit in he hash registers 1605 */ 1606 hash = (crc >> (32 - HASH_BITS)) & 0x3f; 1607 1608 if (hash > 31) { 1609 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1610 tmp |= 1 << (hash - 32); 1611 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1612 } else { 1613 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); 1614 tmp |= 1 << hash; 1615 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 1616 } 1617 } 1618} 1619 1620/* Set a MAC change in hardware. */ 1621static int 1622fec_set_mac_address(struct net_device *dev, void *p) 1623{ 1624 struct fec_enet_private *fep = netdev_priv(dev); 1625 struct sockaddr *addr = p; 1626 1627 if (!is_valid_ether_addr(addr->sa_data)) 1628 return -EADDRNOTAVAIL; 1629 1630 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1631 1632 writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) | 1633 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24), 1634 fep->hwp + FEC_ADDR_LOW); 1635 writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24), 1636 fep + FEC_ADDR_HIGH); 1637 return 0; 1638} 1639 1640static const struct net_device_ops fec_netdev_ops = { 1641 .ndo_open = fec_enet_open, 1642 .ndo_stop = fec_enet_close, 1643 .ndo_start_xmit = fec_enet_start_xmit, 1644 .ndo_set_multicast_list = set_multicast_list, 1645 .ndo_validate_addr = eth_validate_addr, 1646 .ndo_tx_timeout = fec_timeout, 1647 .ndo_set_mac_address = fec_set_mac_address, 1648}; 1649 1650 /* 1651 * XXX: We need to clean up on failure exits here. 1652 * 1653 * index is only used in legacy code 1654 */ 1655int __init fec_enet_init(struct net_device *dev, int index) 1656{ 1657 struct fec_enet_private *fep = netdev_priv(dev); 1658 struct bufdesc *cbd_base; 1659 int i; 1660 1661 /* Allocate memory for buffer descriptors. */ 1662 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, 1663 GFP_KERNEL); 1664 if (!cbd_base) { 1665 printk("FEC: allocate descriptor memory failed?\n"); 1666 return -ENOMEM; 1667 } 1668 1669 spin_lock_init(&fep->hw_lock); 1670 spin_lock_init(&fep->mii_lock); 1671 1672 fep->index = index; 1673 fep->hwp = (void __iomem *)dev->base_addr; 1674 fep->netdev = dev; 1675 1676 /* Set the Ethernet address */ 1677#ifdef CONFIG_M5272 1678 fec_get_mac(dev); 1679#else 1680 { 1681 unsigned long l; 1682 l = readl(fep->hwp + FEC_ADDR_LOW); 1683 dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24); 1684 dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16); 1685 dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8); 1686 dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0); 1687 l = readl(fep->hwp + FEC_ADDR_HIGH); 1688 dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24); 1689 dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16); 1690 } 1691#endif 1692 1693 /* Set receive and transmit descriptor base. */ 1694 fep->rx_bd_base = cbd_base; 1695 fep->tx_bd_base = cbd_base + RX_RING_SIZE; 1696 1697#ifdef HAVE_mii_link_interrupt 1698 fec_request_mii_intr(dev); 1699#endif 1700 /* The FEC Ethernet specific entries in the device structure */ 1701 dev->watchdog_timeo = TX_TIMEOUT; 1702 dev->netdev_ops = &fec_netdev_ops; 1703 1704 for (i=0; i<NMII-1; i++) 1705 mii_cmds[i].mii_next = &mii_cmds[i+1]; 1706 mii_free = mii_cmds; 1707 1708 /* Set MII speed to 2.5 MHz */ 1709 fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999) 1710 / 2500000) / 2) & 0x3F) << 1; 1711 fec_restart(dev, 0); 1712 1713 /* Queue up command to detect the PHY and initialize the 1714 * remainder of the interface. 1715 */ 1716 fep->phy_id_done = 0; 1717 fep->phy_addr = 0; 1718 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy); 1719 1720 return 0; 1721} 1722 1723/* This function is called to start or restart the FEC during a link 1724 * change. This only happens when switching between half and full 1725 * duplex. 1726 */ 1727static void 1728fec_restart(struct net_device *dev, int duplex) 1729{ 1730 struct fec_enet_private *fep = netdev_priv(dev); 1731 struct bufdesc *bdp; 1732 int i; 1733 1734 /* Whack a reset. We should wait for this. */ 1735 writel(1, fep->hwp + FEC_ECNTRL); 1736 udelay(10); 1737 1738 /* Clear any outstanding interrupt. */ 1739 writel(0xffc00000, fep->hwp + FEC_IEVENT); 1740 1741 /* Reset all multicast. */ 1742 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1743 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 1744#ifndef CONFIG_M5272 1745 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); 1746 writel(0, fep->hwp + FEC_HASH_TABLE_LOW); 1747#endif 1748 1749 /* Set maximum receive buffer size. */ 1750 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); 1751 1752 /* Set receive and transmit descriptor base. */ 1753 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); 1754 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE, 1755 fep->hwp + FEC_X_DES_START); 1756 1757 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; 1758 fep->cur_rx = fep->rx_bd_base; 1759 1760 /* Reset SKB transmit buffers. */ 1761 fep->skb_cur = fep->skb_dirty = 0; 1762 for (i = 0; i <= TX_RING_MOD_MASK; i++) { 1763 if (fep->tx_skbuff[i]) { 1764 dev_kfree_skb_any(fep->tx_skbuff[i]); 1765 fep->tx_skbuff[i] = NULL; 1766 } 1767 } 1768 1769 /* Initialize the receive buffer descriptors. */ 1770 bdp = fep->rx_bd_base; 1771 for (i = 0; i < RX_RING_SIZE; i++) { 1772 1773 /* Initialize the BD for every fragment in the page. */ 1774 bdp->cbd_sc = BD_ENET_RX_EMPTY; 1775 bdp++; 1776 } 1777 1778 /* Set the last buffer to wrap */ 1779 bdp--; 1780 bdp->cbd_sc |= BD_SC_WRAP; 1781 1782 /* ...and the same for transmit */ 1783 bdp = fep->tx_bd_base; 1784 for (i = 0; i < TX_RING_SIZE; i++) { 1785 1786 /* Initialize the BD for every fragment in the page. */ 1787 bdp->cbd_sc = 0; 1788 bdp->cbd_bufaddr = 0; 1789 bdp++; 1790 } 1791 1792 /* Set the last buffer to wrap */ 1793 bdp--; 1794 bdp->cbd_sc |= BD_SC_WRAP; 1795 1796 /* Enable MII mode */ 1797 if (duplex) { 1798 /* MII enable / FD enable */ 1799 writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL); 1800 writel(0x04, fep->hwp + FEC_X_CNTRL); 1801 } else { 1802 /* MII enable / No Rcv on Xmit */ 1803 writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL); 1804 writel(0x0, fep->hwp + FEC_X_CNTRL); 1805 } 1806 fep->full_duplex = duplex; 1807 1808 /* Set MII speed */ 1809 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1810 1811 /* And last, enable the transmit and receive processing */ 1812 writel(2, fep->hwp + FEC_ECNTRL); 1813 writel(0, fep->hwp + FEC_R_DES_ACTIVE); 1814 1815 /* Enable interrupts we wish to service */ 1816 writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII, 1817 fep->hwp + FEC_IMASK); 1818} 1819 1820static void 1821fec_stop(struct net_device *dev) 1822{ 1823 struct fec_enet_private *fep = netdev_priv(dev); 1824 1825 /* We cannot expect a graceful transmit stop without link !!! */ 1826 if (fep->link) { 1827 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ 1828 udelay(10); 1829 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) 1830 printk("fec_stop : Graceful transmit stop did not complete !\n"); 1831 } 1832 1833 /* Whack a reset. We should wait for this. */ 1834 writel(1, fep->hwp + FEC_ECNTRL); 1835 udelay(10); 1836 1837 /* Clear outstanding MII command interrupts. */ 1838 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); 1839 1840 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); 1841 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1842} 1843 1844static int __devinit 1845fec_probe(struct platform_device *pdev) 1846{ 1847 struct fec_enet_private *fep; 1848 struct net_device *ndev; 1849 int i, irq, ret = 0; 1850 struct resource *r; 1851 1852 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1853 if (!r) 1854 return -ENXIO; 1855 1856 r = request_mem_region(r->start, resource_size(r), pdev->name); 1857 if (!r) 1858 return -EBUSY; 1859 1860 /* Init network device */ 1861 ndev = alloc_etherdev(sizeof(struct fec_enet_private)); 1862 if (!ndev) 1863 return -ENOMEM; 1864 1865 SET_NETDEV_DEV(ndev, &pdev->dev); 1866 1867 /* setup board info structure */ 1868 fep = netdev_priv(ndev); 1869 memset(fep, 0, sizeof(*fep)); 1870 1871 ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r)); 1872 1873 if (!ndev->base_addr) { 1874 ret = -ENOMEM; 1875 goto failed_ioremap; 1876 } 1877 1878 platform_set_drvdata(pdev, ndev); 1879 1880 /* This device has up to three irqs on some platforms */ 1881 for (i = 0; i < 3; i++) { 1882 irq = platform_get_irq(pdev, i); 1883 if (i && irq < 0) 1884 break; 1885 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); 1886 if (ret) { 1887 while (i >= 0) { 1888 irq = platform_get_irq(pdev, i); 1889 free_irq(irq, ndev); 1890 i--; 1891 } 1892 goto failed_irq; 1893 } 1894 } 1895 1896 fep->clk = clk_get(&pdev->dev, "fec_clk"); 1897 if (IS_ERR(fep->clk)) { 1898 ret = PTR_ERR(fep->clk); 1899 goto failed_clk; 1900 } 1901 clk_enable(fep->clk); 1902 1903 ret = fec_enet_init(ndev, 0); 1904 if (ret) 1905 goto failed_init; 1906 1907 ret = register_netdev(ndev); 1908 if (ret) 1909 goto failed_register; 1910 1911 return 0; 1912 1913failed_register: 1914failed_init: 1915 clk_disable(fep->clk); 1916 clk_put(fep->clk); 1917failed_clk: 1918 for (i = 0; i < 3; i++) { 1919 irq = platform_get_irq(pdev, i); 1920 if (irq > 0) 1921 free_irq(irq, ndev); 1922 } 1923failed_irq: 1924 iounmap((void __iomem *)ndev->base_addr); 1925failed_ioremap: 1926 free_netdev(ndev); 1927 1928 return ret; 1929} 1930 1931static int __devexit 1932fec_drv_remove(struct platform_device *pdev) 1933{ 1934 struct net_device *ndev = platform_get_drvdata(pdev); 1935 struct fec_enet_private *fep = netdev_priv(ndev); 1936 1937 platform_set_drvdata(pdev, NULL); 1938 1939 fec_stop(ndev); 1940 clk_disable(fep->clk); 1941 clk_put(fep->clk); 1942 iounmap((void __iomem *)ndev->base_addr); 1943 unregister_netdev(ndev); 1944 free_netdev(ndev); 1945 return 0; 1946} 1947 1948static int 1949fec_suspend(struct platform_device *dev, pm_message_t state) 1950{ 1951 struct net_device *ndev = platform_get_drvdata(dev); 1952 struct fec_enet_private *fep; 1953 1954 if (ndev) { 1955 fep = netdev_priv(ndev); 1956 if (netif_running(ndev)) { 1957 netif_device_detach(ndev); 1958 fec_stop(ndev); 1959 } 1960 } 1961 return 0; 1962} 1963 1964static int 1965fec_resume(struct platform_device *dev) 1966{ 1967 struct net_device *ndev = platform_get_drvdata(dev); 1968 1969 if (ndev) { 1970 if (netif_running(ndev)) { 1971 fec_enet_init(ndev, 0); 1972 netif_device_attach(ndev); 1973 } 1974 } 1975 return 0; 1976} 1977 1978static struct platform_driver fec_driver = { 1979 .driver = { 1980 .name = "fec", 1981 .owner = THIS_MODULE, 1982 }, 1983 .probe = fec_probe, 1984 .remove = __devexit_p(fec_drv_remove), 1985 .suspend = fec_suspend, 1986 .resume = fec_resume, 1987}; 1988 1989static int __init 1990fec_enet_module_init(void) 1991{ 1992 printk(KERN_INFO "FEC Ethernet Driver\n"); 1993 1994 return platform_driver_register(&fec_driver); 1995} 1996 1997static void __exit 1998fec_enet_cleanup(void) 1999{ 2000 platform_driver_unregister(&fec_driver); 2001} 2002 2003module_exit(fec_enet_cleanup); 2004module_init(fec_enet_module_init); 2005 2006MODULE_LICENSE("GPL");