at v2.6.33-rc2 2008 lines 55 kB view raw
1/* 2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 4 * 5 * Right now, I am very wasteful with the buffers. I allocate memory 6 * pages and then divide them into 2K frame buffers. This way I know I 7 * have buffers large enough to hold one frame within one buffer descriptor. 8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which 9 * will be much more memory efficient and will easily handle lots of 10 * small packets. 11 * 12 * Much better multiple PHY support by Magnus Damm. 13 * Copyright (c) 2000 Ericsson Radio Systems AB. 14 * 15 * Support for FEC controller of ColdFire processors. 16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) 17 * 18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 19 * Copyright (c) 2004-2006 Macq Electronique SA. 20 */ 21 22#include <linux/module.h> 23#include <linux/kernel.h> 24#include <linux/string.h> 25#include <linux/ptrace.h> 26#include <linux/errno.h> 27#include <linux/ioport.h> 28#include <linux/slab.h> 29#include <linux/interrupt.h> 30#include <linux/pci.h> 31#include <linux/init.h> 32#include <linux/delay.h> 33#include <linux/netdevice.h> 34#include <linux/etherdevice.h> 35#include <linux/skbuff.h> 36#include <linux/spinlock.h> 37#include <linux/workqueue.h> 38#include <linux/bitops.h> 39#include <linux/io.h> 40#include <linux/irq.h> 41#include <linux/clk.h> 42#include <linux/platform_device.h> 43 44#include <asm/cacheflush.h> 45 46#ifndef CONFIG_ARCH_MXC 47#include <asm/coldfire.h> 48#include <asm/mcfsim.h> 49#endif 50 51#include "fec.h" 52 53#ifdef CONFIG_ARCH_MXC 54#include <mach/hardware.h> 55#define FEC_ALIGNMENT 0xf 56#else 57#define FEC_ALIGNMENT 0x3 58#endif 59 60/* 61 * Define the fixed address of the FEC hardware. 62 */ 63#if defined(CONFIG_M5272) 64#define HAVE_mii_link_interrupt 65 66static unsigned char fec_mac_default[] = { 67 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 68}; 69 70/* 71 * Some hardware gets it MAC address out of local flash memory. 72 * if this is non-zero then assume it is the address to get MAC from. 73 */ 74#if defined(CONFIG_NETtel) 75#define FEC_FLASHMAC 0xf0006006 76#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) 77#define FEC_FLASHMAC 0xf0006000 78#elif defined(CONFIG_CANCam) 79#define FEC_FLASHMAC 0xf0020000 80#elif defined (CONFIG_M5272C3) 81#define FEC_FLASHMAC (0xffe04000 + 4) 82#elif defined(CONFIG_MOD5272) 83#define FEC_FLASHMAC 0xffc0406b 84#else 85#define FEC_FLASHMAC 0 86#endif 87#endif /* CONFIG_M5272 */ 88 89/* Forward declarations of some structures to support different PHYs */ 90 91typedef struct { 92 uint mii_data; 93 void (*funct)(uint mii_reg, struct net_device *dev); 94} phy_cmd_t; 95 96typedef struct { 97 uint id; 98 char *name; 99 100 const phy_cmd_t *config; 101 const phy_cmd_t *startup; 102 const phy_cmd_t *ack_int; 103 const phy_cmd_t *shutdown; 104} phy_info_t; 105 106/* The number of Tx and Rx buffers. These are allocated from the page 107 * pool. The code may assume these are power of two, so it it best 108 * to keep them that size. 109 * We don't need to allocate pages for the transmitter. We just use 110 * the skbuffer directly. 111 */ 112#define FEC_ENET_RX_PAGES 8 113#define FEC_ENET_RX_FRSIZE 2048 114#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) 115#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) 116#define FEC_ENET_TX_FRSIZE 2048 117#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE) 118#define TX_RING_SIZE 16 /* Must be power of two */ 119#define TX_RING_MOD_MASK 15 /* for this to work */ 120 121#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE) 122#error "FEC: descriptor ring size constants too large" 123#endif 124 125/* Interrupt events/masks. */ 126#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ 127#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ 128#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ 129#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ 130#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */ 131#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ 132#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */ 133#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ 134#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ 135#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ 136 137/* The FEC stores dest/src/type, data, and checksum for receive packets. 138 */ 139#define PKT_MAXBUF_SIZE 1518 140#define PKT_MINBUF_SIZE 64 141#define PKT_MAXBLR_SIZE 1520 142 143 144/* 145 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame 146 * size bits. Other FEC hardware does not, so we need to take that into 147 * account when setting it. 148 */ 149#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 150 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) 151#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 152#else 153#define OPT_FRAME_SIZE 0 154#endif 155 156/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and 157 * tx_bd_base always point to the base of the buffer descriptors. The 158 * cur_rx and cur_tx point to the currently available buffer. 159 * The dirty_tx tracks the current buffer that is being sent by the 160 * controller. The cur_tx and dirty_tx are equal under both completely 161 * empty and completely full conditions. The empty/ready indicator in 162 * the buffer descriptor determines the actual condition. 163 */ 164struct fec_enet_private { 165 /* Hardware registers of the FEC device */ 166 void __iomem *hwp; 167 168 struct net_device *netdev; 169 170 struct clk *clk; 171 172 /* The saved address of a sent-in-place packet/buffer, for skfree(). */ 173 unsigned char *tx_bounce[TX_RING_SIZE]; 174 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 175 struct sk_buff* rx_skbuff[RX_RING_SIZE]; 176 ushort skb_cur; 177 ushort skb_dirty; 178 179 /* CPM dual port RAM relative addresses */ 180 dma_addr_t bd_dma; 181 /* Address of Rx and Tx buffers */ 182 struct bufdesc *rx_bd_base; 183 struct bufdesc *tx_bd_base; 184 /* The next free ring entry */ 185 struct bufdesc *cur_rx, *cur_tx; 186 /* The ring entries to be free()ed */ 187 struct bufdesc *dirty_tx; 188 189 uint tx_full; 190 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ 191 spinlock_t hw_lock; 192 /* hold while accessing the mii_list_t() elements */ 193 spinlock_t mii_lock; 194 195 uint phy_id; 196 uint phy_id_done; 197 uint phy_status; 198 uint phy_speed; 199 phy_info_t const *phy; 200 struct work_struct phy_task; 201 202 uint sequence_done; 203 uint mii_phy_task_queued; 204 205 uint phy_addr; 206 207 int index; 208 int opened; 209 int link; 210 int old_link; 211 int full_duplex; 212}; 213 214static void fec_enet_mii(struct net_device *dev); 215static irqreturn_t fec_enet_interrupt(int irq, void * dev_id); 216static void fec_enet_tx(struct net_device *dev); 217static void fec_enet_rx(struct net_device *dev); 218static int fec_enet_close(struct net_device *dev); 219static void fec_restart(struct net_device *dev, int duplex); 220static void fec_stop(struct net_device *dev); 221 222 223/* MII processing. We keep this as simple as possible. Requests are 224 * placed on the list (if there is room). When the request is finished 225 * by the MII, an optional function may be called. 226 */ 227typedef struct mii_list { 228 uint mii_regval; 229 void (*mii_func)(uint val, struct net_device *dev); 230 struct mii_list *mii_next; 231} mii_list_t; 232 233#define NMII 20 234static mii_list_t mii_cmds[NMII]; 235static mii_list_t *mii_free; 236static mii_list_t *mii_head; 237static mii_list_t *mii_tail; 238 239static int mii_queue(struct net_device *dev, int request, 240 void (*func)(uint, struct net_device *)); 241 242/* Make MII read/write commands for the FEC */ 243#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) 244#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \ 245 (VAL & 0xffff)) 246#define mk_mii_end 0 247 248/* Transmitter timeout */ 249#define TX_TIMEOUT (2 * HZ) 250 251/* Register definitions for the PHY */ 252 253#define MII_REG_CR 0 /* Control Register */ 254#define MII_REG_SR 1 /* Status Register */ 255#define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */ 256#define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */ 257#define MII_REG_ANAR 4 /* A-N Advertisement Register */ 258#define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */ 259#define MII_REG_ANER 6 /* A-N Expansion Register */ 260#define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */ 261#define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */ 262 263/* values for phy_status */ 264 265#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */ 266#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */ 267#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */ 268#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */ 269#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */ 270#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */ 271#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */ 272 273#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */ 274#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */ 275#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */ 276#define PHY_STAT_SPMASK 0xf000 /* mask for speed */ 277#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */ 278#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */ 279#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */ 280#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */ 281 282 283static int 284fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 285{ 286 struct fec_enet_private *fep = netdev_priv(dev); 287 struct bufdesc *bdp; 288 void *bufaddr; 289 unsigned short status; 290 unsigned long flags; 291 292 if (!fep->link) { 293 /* Link is down or autonegotiation is in progress. */ 294 return NETDEV_TX_BUSY; 295 } 296 297 spin_lock_irqsave(&fep->hw_lock, flags); 298 /* Fill in a Tx ring entry */ 299 bdp = fep->cur_tx; 300 301 status = bdp->cbd_sc; 302 303 if (status & BD_ENET_TX_READY) { 304 /* Ooops. All transmit buffers are full. Bail out. 305 * This should not happen, since dev->tbusy should be set. 306 */ 307 printk("%s: tx queue full!.\n", dev->name); 308 spin_unlock_irqrestore(&fep->hw_lock, flags); 309 return NETDEV_TX_BUSY; 310 } 311 312 /* Clear all of the status flags */ 313 status &= ~BD_ENET_TX_STATS; 314 315 /* Set buffer length and buffer pointer */ 316 bufaddr = skb->data; 317 bdp->cbd_datlen = skb->len; 318 319 /* 320 * On some FEC implementations data must be aligned on 321 * 4-byte boundaries. Use bounce buffers to copy data 322 * and get it aligned. Ugh. 323 */ 324 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { 325 unsigned int index; 326 index = bdp - fep->tx_bd_base; 327 memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len); 328 bufaddr = fep->tx_bounce[index]; 329 } 330 331 /* Save skb pointer */ 332 fep->tx_skbuff[fep->skb_cur] = skb; 333 334 dev->stats.tx_bytes += skb->len; 335 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; 336 337 /* Push the data cache so the CPM does not get stale memory 338 * data. 339 */ 340 bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr, 341 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 342 343 /* Send it on its way. Tell FEC it's ready, interrupt when done, 344 * it's the last BD of the frame, and to put the CRC on the end. 345 */ 346 status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR 347 | BD_ENET_TX_LAST | BD_ENET_TX_TC); 348 bdp->cbd_sc = status; 349 350 dev->trans_start = jiffies; 351 352 /* Trigger transmission start */ 353 writel(0, fep->hwp + FEC_X_DES_ACTIVE); 354 355 /* If this was the last BD in the ring, start at the beginning again. */ 356 if (status & BD_ENET_TX_WRAP) 357 bdp = fep->tx_bd_base; 358 else 359 bdp++; 360 361 if (bdp == fep->dirty_tx) { 362 fep->tx_full = 1; 363 netif_stop_queue(dev); 364 } 365 366 fep->cur_tx = bdp; 367 368 spin_unlock_irqrestore(&fep->hw_lock, flags); 369 370 return NETDEV_TX_OK; 371} 372 373static void 374fec_timeout(struct net_device *dev) 375{ 376 struct fec_enet_private *fep = netdev_priv(dev); 377 378 dev->stats.tx_errors++; 379 380 fec_restart(dev, fep->full_duplex); 381 netif_wake_queue(dev); 382} 383 384static irqreturn_t 385fec_enet_interrupt(int irq, void * dev_id) 386{ 387 struct net_device *dev = dev_id; 388 struct fec_enet_private *fep = netdev_priv(dev); 389 uint int_events; 390 irqreturn_t ret = IRQ_NONE; 391 392 do { 393 int_events = readl(fep->hwp + FEC_IEVENT); 394 writel(int_events, fep->hwp + FEC_IEVENT); 395 396 if (int_events & FEC_ENET_RXF) { 397 ret = IRQ_HANDLED; 398 fec_enet_rx(dev); 399 } 400 401 /* Transmit OK, or non-fatal error. Update the buffer 402 * descriptors. FEC handles all errors, we just discover 403 * them as part of the transmit process. 404 */ 405 if (int_events & FEC_ENET_TXF) { 406 ret = IRQ_HANDLED; 407 fec_enet_tx(dev); 408 } 409 410 if (int_events & FEC_ENET_MII) { 411 ret = IRQ_HANDLED; 412 fec_enet_mii(dev); 413 } 414 415 } while (int_events); 416 417 return ret; 418} 419 420 421static void 422fec_enet_tx(struct net_device *dev) 423{ 424 struct fec_enet_private *fep; 425 struct bufdesc *bdp; 426 unsigned short status; 427 struct sk_buff *skb; 428 429 fep = netdev_priv(dev); 430 spin_lock(&fep->hw_lock); 431 bdp = fep->dirty_tx; 432 433 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { 434 if (bdp == fep->cur_tx && fep->tx_full == 0) 435 break; 436 437 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 438 bdp->cbd_bufaddr = 0; 439 440 skb = fep->tx_skbuff[fep->skb_dirty]; 441 /* Check for errors. */ 442 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 443 BD_ENET_TX_RL | BD_ENET_TX_UN | 444 BD_ENET_TX_CSL)) { 445 dev->stats.tx_errors++; 446 if (status & BD_ENET_TX_HB) /* No heartbeat */ 447 dev->stats.tx_heartbeat_errors++; 448 if (status & BD_ENET_TX_LC) /* Late collision */ 449 dev->stats.tx_window_errors++; 450 if (status & BD_ENET_TX_RL) /* Retrans limit */ 451 dev->stats.tx_aborted_errors++; 452 if (status & BD_ENET_TX_UN) /* Underrun */ 453 dev->stats.tx_fifo_errors++; 454 if (status & BD_ENET_TX_CSL) /* Carrier lost */ 455 dev->stats.tx_carrier_errors++; 456 } else { 457 dev->stats.tx_packets++; 458 } 459 460 if (status & BD_ENET_TX_READY) 461 printk("HEY! Enet xmit interrupt and TX_READY.\n"); 462 463 /* Deferred means some collisions occurred during transmit, 464 * but we eventually sent the packet OK. 465 */ 466 if (status & BD_ENET_TX_DEF) 467 dev->stats.collisions++; 468 469 /* Free the sk buffer associated with this last transmit */ 470 dev_kfree_skb_any(skb); 471 fep->tx_skbuff[fep->skb_dirty] = NULL; 472 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; 473 474 /* Update pointer to next buffer descriptor to be transmitted */ 475 if (status & BD_ENET_TX_WRAP) 476 bdp = fep->tx_bd_base; 477 else 478 bdp++; 479 480 /* Since we have freed up a buffer, the ring is no longer full 481 */ 482 if (fep->tx_full) { 483 fep->tx_full = 0; 484 if (netif_queue_stopped(dev)) 485 netif_wake_queue(dev); 486 } 487 } 488 fep->dirty_tx = bdp; 489 spin_unlock(&fep->hw_lock); 490} 491 492 493/* During a receive, the cur_rx points to the current incoming buffer. 494 * When we update through the ring, if the next incoming buffer has 495 * not been given to the system, we just set the empty indicator, 496 * effectively tossing the packet. 497 */ 498static void 499fec_enet_rx(struct net_device *dev) 500{ 501 struct fec_enet_private *fep = netdev_priv(dev); 502 struct bufdesc *bdp; 503 unsigned short status; 504 struct sk_buff *skb; 505 ushort pkt_len; 506 __u8 *data; 507 508#ifdef CONFIG_M532x 509 flush_cache_all(); 510#endif 511 512 spin_lock(&fep->hw_lock); 513 514 /* First, grab all of the stats for the incoming packet. 515 * These get messed up if we get called due to a busy condition. 516 */ 517 bdp = fep->cur_rx; 518 519 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { 520 521 /* Since we have allocated space to hold a complete frame, 522 * the last indicator should be set. 523 */ 524 if ((status & BD_ENET_RX_LAST) == 0) 525 printk("FEC ENET: rcv is not +last\n"); 526 527 if (!fep->opened) 528 goto rx_processing_done; 529 530 /* Check for errors. */ 531 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 532 BD_ENET_RX_CR | BD_ENET_RX_OV)) { 533 dev->stats.rx_errors++; 534 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { 535 /* Frame too long or too short. */ 536 dev->stats.rx_length_errors++; 537 } 538 if (status & BD_ENET_RX_NO) /* Frame alignment */ 539 dev->stats.rx_frame_errors++; 540 if (status & BD_ENET_RX_CR) /* CRC Error */ 541 dev->stats.rx_crc_errors++; 542 if (status & BD_ENET_RX_OV) /* FIFO overrun */ 543 dev->stats.rx_fifo_errors++; 544 } 545 546 /* Report late collisions as a frame error. 547 * On this error, the BD is closed, but we don't know what we 548 * have in the buffer. So, just drop this frame on the floor. 549 */ 550 if (status & BD_ENET_RX_CL) { 551 dev->stats.rx_errors++; 552 dev->stats.rx_frame_errors++; 553 goto rx_processing_done; 554 } 555 556 /* Process the incoming frame. */ 557 dev->stats.rx_packets++; 558 pkt_len = bdp->cbd_datlen; 559 dev->stats.rx_bytes += pkt_len; 560 data = (__u8*)__va(bdp->cbd_bufaddr); 561 562 dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen, 563 DMA_FROM_DEVICE); 564 565 /* This does 16 byte alignment, exactly what we need. 566 * The packet length includes FCS, but we don't want to 567 * include that when passing upstream as it messes up 568 * bridging applications. 569 */ 570 skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN); 571 572 if (unlikely(!skb)) { 573 printk("%s: Memory squeeze, dropping packet.\n", 574 dev->name); 575 dev->stats.rx_dropped++; 576 } else { 577 skb_reserve(skb, NET_IP_ALIGN); 578 skb_put(skb, pkt_len - 4); /* Make room */ 579 skb_copy_to_linear_data(skb, data, pkt_len - 4); 580 skb->protocol = eth_type_trans(skb, dev); 581 netif_rx(skb); 582 } 583 584 bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen, 585 DMA_FROM_DEVICE); 586rx_processing_done: 587 /* Clear the status flags for this buffer */ 588 status &= ~BD_ENET_RX_STATS; 589 590 /* Mark the buffer empty */ 591 status |= BD_ENET_RX_EMPTY; 592 bdp->cbd_sc = status; 593 594 /* Update BD pointer to next entry */ 595 if (status & BD_ENET_RX_WRAP) 596 bdp = fep->rx_bd_base; 597 else 598 bdp++; 599 /* Doing this here will keep the FEC running while we process 600 * incoming frames. On a heavily loaded network, we should be 601 * able to keep up at the expense of system resources. 602 */ 603 writel(0, fep->hwp + FEC_R_DES_ACTIVE); 604 } 605 fep->cur_rx = bdp; 606 607 spin_unlock(&fep->hw_lock); 608} 609 610/* called from interrupt context */ 611static void 612fec_enet_mii(struct net_device *dev) 613{ 614 struct fec_enet_private *fep; 615 mii_list_t *mip; 616 617 fep = netdev_priv(dev); 618 spin_lock(&fep->mii_lock); 619 620 if ((mip = mii_head) == NULL) { 621 printk("MII and no head!\n"); 622 goto unlock; 623 } 624 625 if (mip->mii_func != NULL) 626 (*(mip->mii_func))(readl(fep->hwp + FEC_MII_DATA), dev); 627 628 mii_head = mip->mii_next; 629 mip->mii_next = mii_free; 630 mii_free = mip; 631 632 if ((mip = mii_head) != NULL) 633 writel(mip->mii_regval, fep->hwp + FEC_MII_DATA); 634 635unlock: 636 spin_unlock(&fep->mii_lock); 637} 638 639static int 640mii_queue_unlocked(struct net_device *dev, int regval, 641 void (*func)(uint, struct net_device *)) 642{ 643 struct fec_enet_private *fep; 644 mii_list_t *mip; 645 int retval; 646 647 /* Add PHY address to register command */ 648 fep = netdev_priv(dev); 649 650 regval |= fep->phy_addr << 23; 651 retval = 0; 652 653 if ((mip = mii_free) != NULL) { 654 mii_free = mip->mii_next; 655 mip->mii_regval = regval; 656 mip->mii_func = func; 657 mip->mii_next = NULL; 658 if (mii_head) { 659 mii_tail->mii_next = mip; 660 mii_tail = mip; 661 } else { 662 mii_head = mii_tail = mip; 663 writel(regval, fep->hwp + FEC_MII_DATA); 664 } 665 } else { 666 retval = 1; 667 } 668 669 return retval; 670} 671 672static int 673mii_queue(struct net_device *dev, int regval, 674 void (*func)(uint, struct net_device *)) 675{ 676 struct fec_enet_private *fep; 677 unsigned long flags; 678 int retval; 679 fep = netdev_priv(dev); 680 spin_lock_irqsave(&fep->mii_lock, flags); 681 retval = mii_queue_unlocked(dev, regval, func); 682 spin_unlock_irqrestore(&fep->mii_lock, flags); 683 return retval; 684} 685 686static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c) 687{ 688 if(!c) 689 return; 690 691 for (; c->mii_data != mk_mii_end; c++) 692 mii_queue(dev, c->mii_data, c->funct); 693} 694 695static void mii_parse_sr(uint mii_reg, struct net_device *dev) 696{ 697 struct fec_enet_private *fep = netdev_priv(dev); 698 volatile uint *s = &(fep->phy_status); 699 uint status; 700 701 status = *s & ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC); 702 703 if (mii_reg & 0x0004) 704 status |= PHY_STAT_LINK; 705 if (mii_reg & 0x0010) 706 status |= PHY_STAT_FAULT; 707 if (mii_reg & 0x0020) 708 status |= PHY_STAT_ANC; 709 *s = status; 710} 711 712static void mii_parse_cr(uint mii_reg, struct net_device *dev) 713{ 714 struct fec_enet_private *fep = netdev_priv(dev); 715 volatile uint *s = &(fep->phy_status); 716 uint status; 717 718 status = *s & ~(PHY_CONF_ANE | PHY_CONF_LOOP); 719 720 if (mii_reg & 0x1000) 721 status |= PHY_CONF_ANE; 722 if (mii_reg & 0x4000) 723 status |= PHY_CONF_LOOP; 724 *s = status; 725} 726 727static void mii_parse_anar(uint mii_reg, struct net_device *dev) 728{ 729 struct fec_enet_private *fep = netdev_priv(dev); 730 volatile uint *s = &(fep->phy_status); 731 uint status; 732 733 status = *s & ~(PHY_CONF_SPMASK); 734 735 if (mii_reg & 0x0020) 736 status |= PHY_CONF_10HDX; 737 if (mii_reg & 0x0040) 738 status |= PHY_CONF_10FDX; 739 if (mii_reg & 0x0080) 740 status |= PHY_CONF_100HDX; 741 if (mii_reg & 0x00100) 742 status |= PHY_CONF_100FDX; 743 *s = status; 744} 745 746/* ------------------------------------------------------------------------- */ 747/* The Level one LXT970 is used by many boards */ 748 749#define MII_LXT970_MIRROR 16 /* Mirror register */ 750#define MII_LXT970_IER 17 /* Interrupt Enable Register */ 751#define MII_LXT970_ISR 18 /* Interrupt Status Register */ 752#define MII_LXT970_CONFIG 19 /* Configuration Register */ 753#define MII_LXT970_CSR 20 /* Chip Status Register */ 754 755static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev) 756{ 757 struct fec_enet_private *fep = netdev_priv(dev); 758 volatile uint *s = &(fep->phy_status); 759 uint status; 760 761 status = *s & ~(PHY_STAT_SPMASK); 762 if (mii_reg & 0x0800) { 763 if (mii_reg & 0x1000) 764 status |= PHY_STAT_100FDX; 765 else 766 status |= PHY_STAT_100HDX; 767 } else { 768 if (mii_reg & 0x1000) 769 status |= PHY_STAT_10FDX; 770 else 771 status |= PHY_STAT_10HDX; 772 } 773 *s = status; 774} 775 776static phy_cmd_t const phy_cmd_lxt970_config[] = { 777 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 778 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 779 { mk_mii_end, } 780 }; 781static phy_cmd_t const phy_cmd_lxt970_startup[] = { /* enable interrupts */ 782 { mk_mii_write(MII_LXT970_IER, 0x0002), NULL }, 783 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 784 { mk_mii_end, } 785 }; 786static phy_cmd_t const phy_cmd_lxt970_ack_int[] = { 787 /* read SR and ISR to acknowledge */ 788 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 789 { mk_mii_read(MII_LXT970_ISR), NULL }, 790 791 /* find out the current status */ 792 { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr }, 793 { mk_mii_end, } 794 }; 795static phy_cmd_t const phy_cmd_lxt970_shutdown[] = { /* disable interrupts */ 796 { mk_mii_write(MII_LXT970_IER, 0x0000), NULL }, 797 { mk_mii_end, } 798 }; 799static phy_info_t const phy_info_lxt970 = { 800 .id = 0x07810000, 801 .name = "LXT970", 802 .config = phy_cmd_lxt970_config, 803 .startup = phy_cmd_lxt970_startup, 804 .ack_int = phy_cmd_lxt970_ack_int, 805 .shutdown = phy_cmd_lxt970_shutdown 806}; 807 808/* ------------------------------------------------------------------------- */ 809/* The Level one LXT971 is used on some of my custom boards */ 810 811/* register definitions for the 971 */ 812 813#define MII_LXT971_PCR 16 /* Port Control Register */ 814#define MII_LXT971_SR2 17 /* Status Register 2 */ 815#define MII_LXT971_IER 18 /* Interrupt Enable Register */ 816#define MII_LXT971_ISR 19 /* Interrupt Status Register */ 817#define MII_LXT971_LCR 20 /* LED Control Register */ 818#define MII_LXT971_TCR 30 /* Transmit Control Register */ 819 820/* 821 * I had some nice ideas of running the MDIO faster... 822 * The 971 should support 8MHz and I tried it, but things acted really 823 * weird, so 2.5 MHz ought to be enough for anyone... 824 */ 825 826static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev) 827{ 828 struct fec_enet_private *fep = netdev_priv(dev); 829 volatile uint *s = &(fep->phy_status); 830 uint status; 831 832 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC); 833 834 if (mii_reg & 0x0400) { 835 fep->link = 1; 836 status |= PHY_STAT_LINK; 837 } else { 838 fep->link = 0; 839 } 840 if (mii_reg & 0x0080) 841 status |= PHY_STAT_ANC; 842 if (mii_reg & 0x4000) { 843 if (mii_reg & 0x0200) 844 status |= PHY_STAT_100FDX; 845 else 846 status |= PHY_STAT_100HDX; 847 } else { 848 if (mii_reg & 0x0200) 849 status |= PHY_STAT_10FDX; 850 else 851 status |= PHY_STAT_10HDX; 852 } 853 if (mii_reg & 0x0008) 854 status |= PHY_STAT_FAULT; 855 856 *s = status; 857} 858 859static phy_cmd_t const phy_cmd_lxt971_config[] = { 860 /* limit to 10MBit because my prototype board 861 * doesn't work with 100. */ 862 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 863 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 864 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, 865 { mk_mii_end, } 866 }; 867static phy_cmd_t const phy_cmd_lxt971_startup[] = { /* enable interrupts */ 868 { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL }, 869 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 870 { mk_mii_write(MII_LXT971_LCR, 0xd422), NULL }, /* LED config */ 871 /* Somehow does the 971 tell me that the link is down 872 * the first read after power-up. 873 * read here to get a valid value in ack_int */ 874 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 875 { mk_mii_end, } 876 }; 877static phy_cmd_t const phy_cmd_lxt971_ack_int[] = { 878 /* acknowledge the int before reading status ! */ 879 { mk_mii_read(MII_LXT971_ISR), NULL }, 880 /* find out the current status */ 881 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 882 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, 883 { mk_mii_end, } 884 }; 885static phy_cmd_t const phy_cmd_lxt971_shutdown[] = { /* disable interrupts */ 886 { mk_mii_write(MII_LXT971_IER, 0x0000), NULL }, 887 { mk_mii_end, } 888 }; 889static phy_info_t const phy_info_lxt971 = { 890 .id = 0x0001378e, 891 .name = "LXT971", 892 .config = phy_cmd_lxt971_config, 893 .startup = phy_cmd_lxt971_startup, 894 .ack_int = phy_cmd_lxt971_ack_int, 895 .shutdown = phy_cmd_lxt971_shutdown 896}; 897 898/* ------------------------------------------------------------------------- */ 899/* The Quality Semiconductor QS6612 is used on the RPX CLLF */ 900 901/* register definitions */ 902 903#define MII_QS6612_MCR 17 /* Mode Control Register */ 904#define MII_QS6612_FTR 27 /* Factory Test Register */ 905#define MII_QS6612_MCO 28 /* Misc. Control Register */ 906#define MII_QS6612_ISR 29 /* Interrupt Source Register */ 907#define MII_QS6612_IMR 30 /* Interrupt Mask Register */ 908#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */ 909 910static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev) 911{ 912 struct fec_enet_private *fep = netdev_priv(dev); 913 volatile uint *s = &(fep->phy_status); 914 uint status; 915 916 status = *s & ~(PHY_STAT_SPMASK); 917 918 switch((mii_reg >> 2) & 7) { 919 case 1: status |= PHY_STAT_10HDX; break; 920 case 2: status |= PHY_STAT_100HDX; break; 921 case 5: status |= PHY_STAT_10FDX; break; 922 case 6: status |= PHY_STAT_100FDX; break; 923} 924 925 *s = status; 926} 927 928static phy_cmd_t const phy_cmd_qs6612_config[] = { 929 /* The PHY powers up isolated on the RPX, 930 * so send a command to allow operation. 931 */ 932 { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL }, 933 934 /* parse cr and anar to get some info */ 935 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 936 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 937 { mk_mii_end, } 938 }; 939static phy_cmd_t const phy_cmd_qs6612_startup[] = { /* enable interrupts */ 940 { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL }, 941 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 942 { mk_mii_end, } 943 }; 944static phy_cmd_t const phy_cmd_qs6612_ack_int[] = { 945 /* we need to read ISR, SR and ANER to acknowledge */ 946 { mk_mii_read(MII_QS6612_ISR), NULL }, 947 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 948 { mk_mii_read(MII_REG_ANER), NULL }, 949 950 /* read pcr to get info */ 951 { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr }, 952 { mk_mii_end, } 953 }; 954static phy_cmd_t const phy_cmd_qs6612_shutdown[] = { /* disable interrupts */ 955 { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL }, 956 { mk_mii_end, } 957 }; 958static phy_info_t const phy_info_qs6612 = { 959 .id = 0x00181440, 960 .name = "QS6612", 961 .config = phy_cmd_qs6612_config, 962 .startup = phy_cmd_qs6612_startup, 963 .ack_int = phy_cmd_qs6612_ack_int, 964 .shutdown = phy_cmd_qs6612_shutdown 965}; 966 967/* ------------------------------------------------------------------------- */ 968/* AMD AM79C874 phy */ 969 970/* register definitions for the 874 */ 971 972#define MII_AM79C874_MFR 16 /* Miscellaneous Feature Register */ 973#define MII_AM79C874_ICSR 17 /* Interrupt/Status Register */ 974#define MII_AM79C874_DR 18 /* Diagnostic Register */ 975#define MII_AM79C874_PMLR 19 /* Power and Loopback Register */ 976#define MII_AM79C874_MCR 21 /* ModeControl Register */ 977#define MII_AM79C874_DC 23 /* Disconnect Counter */ 978#define MII_AM79C874_REC 24 /* Recieve Error Counter */ 979 980static void mii_parse_am79c874_dr(uint mii_reg, struct net_device *dev) 981{ 982 struct fec_enet_private *fep = netdev_priv(dev); 983 volatile uint *s = &(fep->phy_status); 984 uint status; 985 986 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_ANC); 987 988 if (mii_reg & 0x0080) 989 status |= PHY_STAT_ANC; 990 if (mii_reg & 0x0400) 991 status |= ((mii_reg & 0x0800) ? PHY_STAT_100FDX : PHY_STAT_100HDX); 992 else 993 status |= ((mii_reg & 0x0800) ? PHY_STAT_10FDX : PHY_STAT_10HDX); 994 995 *s = status; 996} 997 998static phy_cmd_t const phy_cmd_am79c874_config[] = { 999 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 1000 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 1001 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr }, 1002 { mk_mii_end, } 1003 }; 1004static phy_cmd_t const phy_cmd_am79c874_startup[] = { /* enable interrupts */ 1005 { mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL }, 1006 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 1007 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1008 { mk_mii_end, } 1009 }; 1010static phy_cmd_t const phy_cmd_am79c874_ack_int[] = { 1011 /* find out the current status */ 1012 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1013 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr }, 1014 /* we only need to read ISR to acknowledge */ 1015 { mk_mii_read(MII_AM79C874_ICSR), NULL }, 1016 { mk_mii_end, } 1017 }; 1018static phy_cmd_t const phy_cmd_am79c874_shutdown[] = { /* disable interrupts */ 1019 { mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL }, 1020 { mk_mii_end, } 1021 }; 1022static phy_info_t const phy_info_am79c874 = { 1023 .id = 0x00022561, 1024 .name = "AM79C874", 1025 .config = phy_cmd_am79c874_config, 1026 .startup = phy_cmd_am79c874_startup, 1027 .ack_int = phy_cmd_am79c874_ack_int, 1028 .shutdown = phy_cmd_am79c874_shutdown 1029}; 1030 1031 1032/* ------------------------------------------------------------------------- */ 1033/* Kendin KS8721BL phy */ 1034 1035/* register definitions for the 8721 */ 1036 1037#define MII_KS8721BL_RXERCR 21 1038#define MII_KS8721BL_ICSR 27 1039#define MII_KS8721BL_PHYCR 31 1040 1041static phy_cmd_t const phy_cmd_ks8721bl_config[] = { 1042 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 1043 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 1044 { mk_mii_end, } 1045 }; 1046static phy_cmd_t const phy_cmd_ks8721bl_startup[] = { /* enable interrupts */ 1047 { mk_mii_write(MII_KS8721BL_ICSR, 0xff00), NULL }, 1048 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 1049 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1050 { mk_mii_end, } 1051 }; 1052static phy_cmd_t const phy_cmd_ks8721bl_ack_int[] = { 1053 /* find out the current status */ 1054 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1055 /* we only need to read ISR to acknowledge */ 1056 { mk_mii_read(MII_KS8721BL_ICSR), NULL }, 1057 { mk_mii_end, } 1058 }; 1059static phy_cmd_t const phy_cmd_ks8721bl_shutdown[] = { /* disable interrupts */ 1060 { mk_mii_write(MII_KS8721BL_ICSR, 0x0000), NULL }, 1061 { mk_mii_end, } 1062 }; 1063static phy_info_t const phy_info_ks8721bl = { 1064 .id = 0x00022161, 1065 .name = "KS8721BL", 1066 .config = phy_cmd_ks8721bl_config, 1067 .startup = phy_cmd_ks8721bl_startup, 1068 .ack_int = phy_cmd_ks8721bl_ack_int, 1069 .shutdown = phy_cmd_ks8721bl_shutdown 1070}; 1071 1072/* ------------------------------------------------------------------------- */ 1073/* register definitions for the DP83848 */ 1074 1075#define MII_DP8384X_PHYSTST 16 /* PHY Status Register */ 1076 1077static void mii_parse_dp8384x_sr2(uint mii_reg, struct net_device *dev) 1078{ 1079 struct fec_enet_private *fep = netdev_priv(dev); 1080 volatile uint *s = &(fep->phy_status); 1081 1082 *s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC); 1083 1084 /* Link up */ 1085 if (mii_reg & 0x0001) { 1086 fep->link = 1; 1087 *s |= PHY_STAT_LINK; 1088 } else 1089 fep->link = 0; 1090 /* Status of link */ 1091 if (mii_reg & 0x0010) /* Autonegotioation complete */ 1092 *s |= PHY_STAT_ANC; 1093 if (mii_reg & 0x0002) { /* 10MBps? */ 1094 if (mii_reg & 0x0004) /* Full Duplex? */ 1095 *s |= PHY_STAT_10FDX; 1096 else 1097 *s |= PHY_STAT_10HDX; 1098 } else { /* 100 Mbps? */ 1099 if (mii_reg & 0x0004) /* Full Duplex? */ 1100 *s |= PHY_STAT_100FDX; 1101 else 1102 *s |= PHY_STAT_100HDX; 1103 } 1104 if (mii_reg & 0x0008) 1105 *s |= PHY_STAT_FAULT; 1106} 1107 1108static phy_info_t phy_info_dp83848= { 1109 0x020005c9, 1110 "DP83848", 1111 1112 (const phy_cmd_t []) { /* config */ 1113 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 1114 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 1115 { mk_mii_read(MII_DP8384X_PHYSTST), mii_parse_dp8384x_sr2 }, 1116 { mk_mii_end, } 1117 }, 1118 (const phy_cmd_t []) { /* startup - enable interrupts */ 1119 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 1120 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1121 { mk_mii_end, } 1122 }, 1123 (const phy_cmd_t []) { /* ack_int - never happens, no interrupt */ 1124 { mk_mii_end, } 1125 }, 1126 (const phy_cmd_t []) { /* shutdown */ 1127 { mk_mii_end, } 1128 }, 1129}; 1130 1131/* ------------------------------------------------------------------------- */ 1132 1133static phy_info_t const * const phy_info[] = { 1134 &phy_info_lxt970, 1135 &phy_info_lxt971, 1136 &phy_info_qs6612, 1137 &phy_info_am79c874, 1138 &phy_info_ks8721bl, 1139 &phy_info_dp83848, 1140 NULL 1141}; 1142 1143/* ------------------------------------------------------------------------- */ 1144#ifdef HAVE_mii_link_interrupt 1145static irqreturn_t 1146mii_link_interrupt(int irq, void * dev_id); 1147 1148/* 1149 * This is specific to the MII interrupt setup of the M5272EVB. 1150 */ 1151static void __inline__ fec_request_mii_intr(struct net_device *dev) 1152{ 1153 if (request_irq(66, mii_link_interrupt, IRQF_DISABLED, "fec(MII)", dev) != 0) 1154 printk("FEC: Could not allocate fec(MII) IRQ(66)!\n"); 1155} 1156 1157static void __inline__ fec_disable_phy_intr(struct net_device *dev) 1158{ 1159 free_irq(66, dev); 1160} 1161#endif 1162 1163#ifdef CONFIG_M5272 1164static void __inline__ fec_get_mac(struct net_device *dev) 1165{ 1166 struct fec_enet_private *fep = netdev_priv(dev); 1167 unsigned char *iap, tmpaddr[ETH_ALEN]; 1168 1169 if (FEC_FLASHMAC) { 1170 /* 1171 * Get MAC address from FLASH. 1172 * If it is all 1's or 0's, use the default. 1173 */ 1174 iap = (unsigned char *)FEC_FLASHMAC; 1175 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && 1176 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) 1177 iap = fec_mac_default; 1178 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && 1179 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) 1180 iap = fec_mac_default; 1181 } else { 1182 *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW); 1183 *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16); 1184 iap = &tmpaddr[0]; 1185 } 1186 1187 memcpy(dev->dev_addr, iap, ETH_ALEN); 1188 1189 /* Adjust MAC if using default MAC address */ 1190 if (iap == fec_mac_default) 1191 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 1192} 1193#endif 1194 1195/* ------------------------------------------------------------------------- */ 1196 1197static void mii_display_status(struct net_device *dev) 1198{ 1199 struct fec_enet_private *fep = netdev_priv(dev); 1200 volatile uint *s = &(fep->phy_status); 1201 1202 if (!fep->link && !fep->old_link) { 1203 /* Link is still down - don't print anything */ 1204 return; 1205 } 1206 1207 printk("%s: status: ", dev->name); 1208 1209 if (!fep->link) { 1210 printk("link down"); 1211 } else { 1212 printk("link up"); 1213 1214 switch(*s & PHY_STAT_SPMASK) { 1215 case PHY_STAT_100FDX: printk(", 100MBit Full Duplex"); break; 1216 case PHY_STAT_100HDX: printk(", 100MBit Half Duplex"); break; 1217 case PHY_STAT_10FDX: printk(", 10MBit Full Duplex"); break; 1218 case PHY_STAT_10HDX: printk(", 10MBit Half Duplex"); break; 1219 default: 1220 printk(", Unknown speed/duplex"); 1221 } 1222 1223 if (*s & PHY_STAT_ANC) 1224 printk(", auto-negotiation complete"); 1225 } 1226 1227 if (*s & PHY_STAT_FAULT) 1228 printk(", remote fault"); 1229 1230 printk(".\n"); 1231} 1232 1233static void mii_display_config(struct work_struct *work) 1234{ 1235 struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task); 1236 struct net_device *dev = fep->netdev; 1237 uint status = fep->phy_status; 1238 1239 /* 1240 ** When we get here, phy_task is already removed from 1241 ** the workqueue. It is thus safe to allow to reuse it. 1242 */ 1243 fep->mii_phy_task_queued = 0; 1244 printk("%s: config: auto-negotiation ", dev->name); 1245 1246 if (status & PHY_CONF_ANE) 1247 printk("on"); 1248 else 1249 printk("off"); 1250 1251 if (status & PHY_CONF_100FDX) 1252 printk(", 100FDX"); 1253 if (status & PHY_CONF_100HDX) 1254 printk(", 100HDX"); 1255 if (status & PHY_CONF_10FDX) 1256 printk(", 10FDX"); 1257 if (status & PHY_CONF_10HDX) 1258 printk(", 10HDX"); 1259 if (!(status & PHY_CONF_SPMASK)) 1260 printk(", No speed/duplex selected?"); 1261 1262 if (status & PHY_CONF_LOOP) 1263 printk(", loopback enabled"); 1264 1265 printk(".\n"); 1266 1267 fep->sequence_done = 1; 1268} 1269 1270static void mii_relink(struct work_struct *work) 1271{ 1272 struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task); 1273 struct net_device *dev = fep->netdev; 1274 int duplex; 1275 1276 /* 1277 ** When we get here, phy_task is already removed from 1278 ** the workqueue. It is thus safe to allow to reuse it. 1279 */ 1280 fep->mii_phy_task_queued = 0; 1281 fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0; 1282 mii_display_status(dev); 1283 fep->old_link = fep->link; 1284 1285 if (fep->link) { 1286 duplex = 0; 1287 if (fep->phy_status 1288 & (PHY_STAT_100FDX | PHY_STAT_10FDX)) 1289 duplex = 1; 1290 fec_restart(dev, duplex); 1291 } else 1292 fec_stop(dev); 1293} 1294 1295/* mii_queue_relink is called in interrupt context from mii_link_interrupt */ 1296static void mii_queue_relink(uint mii_reg, struct net_device *dev) 1297{ 1298 struct fec_enet_private *fep = netdev_priv(dev); 1299 1300 /* 1301 * We cannot queue phy_task twice in the workqueue. It 1302 * would cause an endless loop in the workqueue. 1303 * Fortunately, if the last mii_relink entry has not yet been 1304 * executed now, it will do the job for the current interrupt, 1305 * which is just what we want. 1306 */ 1307 if (fep->mii_phy_task_queued) 1308 return; 1309 1310 fep->mii_phy_task_queued = 1; 1311 INIT_WORK(&fep->phy_task, mii_relink); 1312 schedule_work(&fep->phy_task); 1313} 1314 1315/* mii_queue_config is called in interrupt context from fec_enet_mii */ 1316static void mii_queue_config(uint mii_reg, struct net_device *dev) 1317{ 1318 struct fec_enet_private *fep = netdev_priv(dev); 1319 1320 if (fep->mii_phy_task_queued) 1321 return; 1322 1323 fep->mii_phy_task_queued = 1; 1324 INIT_WORK(&fep->phy_task, mii_display_config); 1325 schedule_work(&fep->phy_task); 1326} 1327 1328phy_cmd_t const phy_cmd_relink[] = { 1329 { mk_mii_read(MII_REG_CR), mii_queue_relink }, 1330 { mk_mii_end, } 1331 }; 1332phy_cmd_t const phy_cmd_config[] = { 1333 { mk_mii_read(MII_REG_CR), mii_queue_config }, 1334 { mk_mii_end, } 1335 }; 1336 1337/* Read remainder of PHY ID. */ 1338static void 1339mii_discover_phy3(uint mii_reg, struct net_device *dev) 1340{ 1341 struct fec_enet_private *fep; 1342 int i; 1343 1344 fep = netdev_priv(dev); 1345 fep->phy_id |= (mii_reg & 0xffff); 1346 printk("fec: PHY @ 0x%x, ID 0x%08x", fep->phy_addr, fep->phy_id); 1347 1348 for(i = 0; phy_info[i]; i++) { 1349 if(phy_info[i]->id == (fep->phy_id >> 4)) 1350 break; 1351 } 1352 1353 if (phy_info[i]) 1354 printk(" -- %s\n", phy_info[i]->name); 1355 else 1356 printk(" -- unknown PHY!\n"); 1357 1358 fep->phy = phy_info[i]; 1359 fep->phy_id_done = 1; 1360} 1361 1362/* Scan all of the MII PHY addresses looking for someone to respond 1363 * with a valid ID. This usually happens quickly. 1364 */ 1365static void 1366mii_discover_phy(uint mii_reg, struct net_device *dev) 1367{ 1368 struct fec_enet_private *fep; 1369 uint phytype; 1370 1371 fep = netdev_priv(dev); 1372 1373 if (fep->phy_addr < 32) { 1374 if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) { 1375 1376 /* Got first part of ID, now get remainder */ 1377 fep->phy_id = phytype << 16; 1378 mii_queue_unlocked(dev, mk_mii_read(MII_REG_PHYIR2), 1379 mii_discover_phy3); 1380 } else { 1381 fep->phy_addr++; 1382 mii_queue_unlocked(dev, mk_mii_read(MII_REG_PHYIR1), 1383 mii_discover_phy); 1384 } 1385 } else { 1386 printk("FEC: No PHY device found.\n"); 1387 /* Disable external MII interface */ 1388 writel(0, fep->hwp + FEC_MII_SPEED); 1389 fep->phy_speed = 0; 1390#ifdef HAVE_mii_link_interrupt 1391 fec_disable_phy_intr(dev); 1392#endif 1393 } 1394} 1395 1396/* This interrupt occurs when the PHY detects a link change */ 1397#ifdef HAVE_mii_link_interrupt 1398static irqreturn_t 1399mii_link_interrupt(int irq, void * dev_id) 1400{ 1401 struct net_device *dev = dev_id; 1402 struct fec_enet_private *fep = netdev_priv(dev); 1403 1404 mii_do_cmd(dev, fep->phy->ack_int); 1405 mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */ 1406 1407 return IRQ_HANDLED; 1408} 1409#endif 1410 1411static void fec_enet_free_buffers(struct net_device *dev) 1412{ 1413 struct fec_enet_private *fep = netdev_priv(dev); 1414 int i; 1415 struct sk_buff *skb; 1416 struct bufdesc *bdp; 1417 1418 bdp = fep->rx_bd_base; 1419 for (i = 0; i < RX_RING_SIZE; i++) { 1420 skb = fep->rx_skbuff[i]; 1421 1422 if (bdp->cbd_bufaddr) 1423 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, 1424 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1425 if (skb) 1426 dev_kfree_skb(skb); 1427 bdp++; 1428 } 1429 1430 bdp = fep->tx_bd_base; 1431 for (i = 0; i < TX_RING_SIZE; i++) 1432 kfree(fep->tx_bounce[i]); 1433} 1434 1435static int fec_enet_alloc_buffers(struct net_device *dev) 1436{ 1437 struct fec_enet_private *fep = netdev_priv(dev); 1438 int i; 1439 struct sk_buff *skb; 1440 struct bufdesc *bdp; 1441 1442 bdp = fep->rx_bd_base; 1443 for (i = 0; i < RX_RING_SIZE; i++) { 1444 skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE); 1445 if (!skb) { 1446 fec_enet_free_buffers(dev); 1447 return -ENOMEM; 1448 } 1449 fep->rx_skbuff[i] = skb; 1450 1451 bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data, 1452 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1453 bdp->cbd_sc = BD_ENET_RX_EMPTY; 1454 bdp++; 1455 } 1456 1457 /* Set the last buffer to wrap. */ 1458 bdp--; 1459 bdp->cbd_sc |= BD_SC_WRAP; 1460 1461 bdp = fep->tx_bd_base; 1462 for (i = 0; i < TX_RING_SIZE; i++) { 1463 fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); 1464 1465 bdp->cbd_sc = 0; 1466 bdp->cbd_bufaddr = 0; 1467 bdp++; 1468 } 1469 1470 /* Set the last buffer to wrap. */ 1471 bdp--; 1472 bdp->cbd_sc |= BD_SC_WRAP; 1473 1474 return 0; 1475} 1476 1477static int 1478fec_enet_open(struct net_device *dev) 1479{ 1480 struct fec_enet_private *fep = netdev_priv(dev); 1481 int ret; 1482 1483 /* I should reset the ring buffers here, but I don't yet know 1484 * a simple way to do that. 1485 */ 1486 1487 ret = fec_enet_alloc_buffers(dev); 1488 if (ret) 1489 return ret; 1490 1491 fep->sequence_done = 0; 1492 fep->link = 0; 1493 1494 fec_restart(dev, 1); 1495 1496 if (fep->phy) { 1497 mii_do_cmd(dev, fep->phy->ack_int); 1498 mii_do_cmd(dev, fep->phy->config); 1499 mii_do_cmd(dev, phy_cmd_config); /* display configuration */ 1500 1501 /* Poll until the PHY tells us its configuration 1502 * (not link state). 1503 * Request is initiated by mii_do_cmd above, but answer 1504 * comes by interrupt. 1505 * This should take about 25 usec per register at 2.5 MHz, 1506 * and we read approximately 5 registers. 1507 */ 1508 while(!fep->sequence_done) 1509 schedule(); 1510 1511 mii_do_cmd(dev, fep->phy->startup); 1512 } 1513 1514 /* Set the initial link state to true. A lot of hardware 1515 * based on this device does not implement a PHY interrupt, 1516 * so we are never notified of link change. 1517 */ 1518 fep->link = 1; 1519 1520 netif_start_queue(dev); 1521 fep->opened = 1; 1522 return 0; 1523} 1524 1525static int 1526fec_enet_close(struct net_device *dev) 1527{ 1528 struct fec_enet_private *fep = netdev_priv(dev); 1529 1530 /* Don't know what to do yet. */ 1531 fep->opened = 0; 1532 netif_stop_queue(dev); 1533 fec_stop(dev); 1534 1535 fec_enet_free_buffers(dev); 1536 1537 return 0; 1538} 1539 1540/* Set or clear the multicast filter for this adaptor. 1541 * Skeleton taken from sunlance driver. 1542 * The CPM Ethernet implementation allows Multicast as well as individual 1543 * MAC address filtering. Some of the drivers check to make sure it is 1544 * a group multicast address, and discard those that are not. I guess I 1545 * will do the same for now, but just remove the test if you want 1546 * individual filtering as well (do the upper net layers want or support 1547 * this kind of feature?). 1548 */ 1549 1550#define HASH_BITS 6 /* #bits in hash */ 1551#define CRC32_POLY 0xEDB88320 1552 1553static void set_multicast_list(struct net_device *dev) 1554{ 1555 struct fec_enet_private *fep = netdev_priv(dev); 1556 struct dev_mc_list *dmi; 1557 unsigned int i, j, bit, data, crc, tmp; 1558 unsigned char hash; 1559 1560 if (dev->flags & IFF_PROMISC) { 1561 tmp = readl(fep->hwp + FEC_R_CNTRL); 1562 tmp |= 0x8; 1563 writel(tmp, fep->hwp + FEC_R_CNTRL); 1564 return; 1565 } 1566 1567 tmp = readl(fep->hwp + FEC_R_CNTRL); 1568 tmp &= ~0x8; 1569 writel(tmp, fep->hwp + FEC_R_CNTRL); 1570 1571 if (dev->flags & IFF_ALLMULTI) { 1572 /* Catch all multicast addresses, so set the 1573 * filter to all 1's 1574 */ 1575 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1576 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 1577 1578 return; 1579 } 1580 1581 /* Clear filter and add the addresses in hash register 1582 */ 1583 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1584 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 1585 1586 dmi = dev->mc_list; 1587 1588 for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) { 1589 /* Only support group multicast for now */ 1590 if (!(dmi->dmi_addr[0] & 1)) 1591 continue; 1592 1593 /* calculate crc32 value of mac address */ 1594 crc = 0xffffffff; 1595 1596 for (i = 0; i < dmi->dmi_addrlen; i++) { 1597 data = dmi->dmi_addr[i]; 1598 for (bit = 0; bit < 8; bit++, data >>= 1) { 1599 crc = (crc >> 1) ^ 1600 (((crc ^ data) & 1) ? CRC32_POLY : 0); 1601 } 1602 } 1603 1604 /* only upper 6 bits (HASH_BITS) are used 1605 * which point to specific bit in he hash registers 1606 */ 1607 hash = (crc >> (32 - HASH_BITS)) & 0x3f; 1608 1609 if (hash > 31) { 1610 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1611 tmp |= 1 << (hash - 32); 1612 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1613 } else { 1614 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); 1615 tmp |= 1 << hash; 1616 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 1617 } 1618 } 1619} 1620 1621/* Set a MAC change in hardware. */ 1622static int 1623fec_set_mac_address(struct net_device *dev, void *p) 1624{ 1625 struct fec_enet_private *fep = netdev_priv(dev); 1626 struct sockaddr *addr = p; 1627 1628 if (!is_valid_ether_addr(addr->sa_data)) 1629 return -EADDRNOTAVAIL; 1630 1631 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1632 1633 writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) | 1634 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24), 1635 fep->hwp + FEC_ADDR_LOW); 1636 writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24), 1637 fep + FEC_ADDR_HIGH); 1638 return 0; 1639} 1640 1641static const struct net_device_ops fec_netdev_ops = { 1642 .ndo_open = fec_enet_open, 1643 .ndo_stop = fec_enet_close, 1644 .ndo_start_xmit = fec_enet_start_xmit, 1645 .ndo_set_multicast_list = set_multicast_list, 1646 .ndo_change_mtu = eth_change_mtu, 1647 .ndo_validate_addr = eth_validate_addr, 1648 .ndo_tx_timeout = fec_timeout, 1649 .ndo_set_mac_address = fec_set_mac_address, 1650}; 1651 1652 /* 1653 * XXX: We need to clean up on failure exits here. 1654 * 1655 * index is only used in legacy code 1656 */ 1657static int fec_enet_init(struct net_device *dev, int index) 1658{ 1659 struct fec_enet_private *fep = netdev_priv(dev); 1660 struct bufdesc *cbd_base; 1661 int i; 1662 1663 /* Allocate memory for buffer descriptors. */ 1664 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, 1665 GFP_KERNEL); 1666 if (!cbd_base) { 1667 printk("FEC: allocate descriptor memory failed?\n"); 1668 return -ENOMEM; 1669 } 1670 1671 spin_lock_init(&fep->hw_lock); 1672 spin_lock_init(&fep->mii_lock); 1673 1674 fep->index = index; 1675 fep->hwp = (void __iomem *)dev->base_addr; 1676 fep->netdev = dev; 1677 1678 /* Set the Ethernet address */ 1679#ifdef CONFIG_M5272 1680 fec_get_mac(dev); 1681#else 1682 { 1683 unsigned long l; 1684 l = readl(fep->hwp + FEC_ADDR_LOW); 1685 dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24); 1686 dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16); 1687 dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8); 1688 dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0); 1689 l = readl(fep->hwp + FEC_ADDR_HIGH); 1690 dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24); 1691 dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16); 1692 } 1693#endif 1694 1695 /* Set receive and transmit descriptor base. */ 1696 fep->rx_bd_base = cbd_base; 1697 fep->tx_bd_base = cbd_base + RX_RING_SIZE; 1698 1699#ifdef HAVE_mii_link_interrupt 1700 fec_request_mii_intr(dev); 1701#endif 1702 /* The FEC Ethernet specific entries in the device structure */ 1703 dev->watchdog_timeo = TX_TIMEOUT; 1704 dev->netdev_ops = &fec_netdev_ops; 1705 1706 for (i=0; i<NMII-1; i++) 1707 mii_cmds[i].mii_next = &mii_cmds[i+1]; 1708 mii_free = mii_cmds; 1709 1710 /* Set MII speed to 2.5 MHz */ 1711 fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999) 1712 / 2500000) / 2) & 0x3F) << 1; 1713 fec_restart(dev, 0); 1714 1715 /* Queue up command to detect the PHY and initialize the 1716 * remainder of the interface. 1717 */ 1718 fep->phy_id_done = 0; 1719 fep->phy_addr = 0; 1720 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy); 1721 1722 return 0; 1723} 1724 1725/* This function is called to start or restart the FEC during a link 1726 * change. This only happens when switching between half and full 1727 * duplex. 1728 */ 1729static void 1730fec_restart(struct net_device *dev, int duplex) 1731{ 1732 struct fec_enet_private *fep = netdev_priv(dev); 1733 struct bufdesc *bdp; 1734 int i; 1735 1736 /* Whack a reset. We should wait for this. */ 1737 writel(1, fep->hwp + FEC_ECNTRL); 1738 udelay(10); 1739 1740 /* Clear any outstanding interrupt. */ 1741 writel(0xffc00000, fep->hwp + FEC_IEVENT); 1742 1743 /* Reset all multicast. */ 1744 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1745 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 1746#ifndef CONFIG_M5272 1747 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); 1748 writel(0, fep->hwp + FEC_HASH_TABLE_LOW); 1749#endif 1750 1751 /* Set maximum receive buffer size. */ 1752 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); 1753 1754 /* Set receive and transmit descriptor base. */ 1755 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); 1756 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE, 1757 fep->hwp + FEC_X_DES_START); 1758 1759 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; 1760 fep->cur_rx = fep->rx_bd_base; 1761 1762 /* Reset SKB transmit buffers. */ 1763 fep->skb_cur = fep->skb_dirty = 0; 1764 for (i = 0; i <= TX_RING_MOD_MASK; i++) { 1765 if (fep->tx_skbuff[i]) { 1766 dev_kfree_skb_any(fep->tx_skbuff[i]); 1767 fep->tx_skbuff[i] = NULL; 1768 } 1769 } 1770 1771 /* Initialize the receive buffer descriptors. */ 1772 bdp = fep->rx_bd_base; 1773 for (i = 0; i < RX_RING_SIZE; i++) { 1774 1775 /* Initialize the BD for every fragment in the page. */ 1776 bdp->cbd_sc = BD_ENET_RX_EMPTY; 1777 bdp++; 1778 } 1779 1780 /* Set the last buffer to wrap */ 1781 bdp--; 1782 bdp->cbd_sc |= BD_SC_WRAP; 1783 1784 /* ...and the same for transmit */ 1785 bdp = fep->tx_bd_base; 1786 for (i = 0; i < TX_RING_SIZE; i++) { 1787 1788 /* Initialize the BD for every fragment in the page. */ 1789 bdp->cbd_sc = 0; 1790 bdp->cbd_bufaddr = 0; 1791 bdp++; 1792 } 1793 1794 /* Set the last buffer to wrap */ 1795 bdp--; 1796 bdp->cbd_sc |= BD_SC_WRAP; 1797 1798 /* Enable MII mode */ 1799 if (duplex) { 1800 /* MII enable / FD enable */ 1801 writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL); 1802 writel(0x04, fep->hwp + FEC_X_CNTRL); 1803 } else { 1804 /* MII enable / No Rcv on Xmit */ 1805 writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL); 1806 writel(0x0, fep->hwp + FEC_X_CNTRL); 1807 } 1808 fep->full_duplex = duplex; 1809 1810 /* Set MII speed */ 1811 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1812 1813 /* And last, enable the transmit and receive processing */ 1814 writel(2, fep->hwp + FEC_ECNTRL); 1815 writel(0, fep->hwp + FEC_R_DES_ACTIVE); 1816 1817 /* Enable interrupts we wish to service */ 1818 writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII, 1819 fep->hwp + FEC_IMASK); 1820} 1821 1822static void 1823fec_stop(struct net_device *dev) 1824{ 1825 struct fec_enet_private *fep = netdev_priv(dev); 1826 1827 /* We cannot expect a graceful transmit stop without link !!! */ 1828 if (fep->link) { 1829 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ 1830 udelay(10); 1831 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) 1832 printk("fec_stop : Graceful transmit stop did not complete !\n"); 1833 } 1834 1835 /* Whack a reset. We should wait for this. */ 1836 writel(1, fep->hwp + FEC_ECNTRL); 1837 udelay(10); 1838 1839 /* Clear outstanding MII command interrupts. */ 1840 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); 1841 1842 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); 1843 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1844} 1845 1846static int __devinit 1847fec_probe(struct platform_device *pdev) 1848{ 1849 struct fec_enet_private *fep; 1850 struct net_device *ndev; 1851 int i, irq, ret = 0; 1852 struct resource *r; 1853 1854 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1855 if (!r) 1856 return -ENXIO; 1857 1858 r = request_mem_region(r->start, resource_size(r), pdev->name); 1859 if (!r) 1860 return -EBUSY; 1861 1862 /* Init network device */ 1863 ndev = alloc_etherdev(sizeof(struct fec_enet_private)); 1864 if (!ndev) 1865 return -ENOMEM; 1866 1867 SET_NETDEV_DEV(ndev, &pdev->dev); 1868 1869 /* setup board info structure */ 1870 fep = netdev_priv(ndev); 1871 memset(fep, 0, sizeof(*fep)); 1872 1873 ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r)); 1874 1875 if (!ndev->base_addr) { 1876 ret = -ENOMEM; 1877 goto failed_ioremap; 1878 } 1879 1880 platform_set_drvdata(pdev, ndev); 1881 1882 /* This device has up to three irqs on some platforms */ 1883 for (i = 0; i < 3; i++) { 1884 irq = platform_get_irq(pdev, i); 1885 if (i && irq < 0) 1886 break; 1887 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); 1888 if (ret) { 1889 while (i >= 0) { 1890 irq = platform_get_irq(pdev, i); 1891 free_irq(irq, ndev); 1892 i--; 1893 } 1894 goto failed_irq; 1895 } 1896 } 1897 1898 fep->clk = clk_get(&pdev->dev, "fec_clk"); 1899 if (IS_ERR(fep->clk)) { 1900 ret = PTR_ERR(fep->clk); 1901 goto failed_clk; 1902 } 1903 clk_enable(fep->clk); 1904 1905 ret = fec_enet_init(ndev, 0); 1906 if (ret) 1907 goto failed_init; 1908 1909 ret = register_netdev(ndev); 1910 if (ret) 1911 goto failed_register; 1912 1913 return 0; 1914 1915failed_register: 1916failed_init: 1917 clk_disable(fep->clk); 1918 clk_put(fep->clk); 1919failed_clk: 1920 for (i = 0; i < 3; i++) { 1921 irq = platform_get_irq(pdev, i); 1922 if (irq > 0) 1923 free_irq(irq, ndev); 1924 } 1925failed_irq: 1926 iounmap((void __iomem *)ndev->base_addr); 1927failed_ioremap: 1928 free_netdev(ndev); 1929 1930 return ret; 1931} 1932 1933static int __devexit 1934fec_drv_remove(struct platform_device *pdev) 1935{ 1936 struct net_device *ndev = platform_get_drvdata(pdev); 1937 struct fec_enet_private *fep = netdev_priv(ndev); 1938 1939 platform_set_drvdata(pdev, NULL); 1940 1941 fec_stop(ndev); 1942 clk_disable(fep->clk); 1943 clk_put(fep->clk); 1944 iounmap((void __iomem *)ndev->base_addr); 1945 unregister_netdev(ndev); 1946 free_netdev(ndev); 1947 return 0; 1948} 1949 1950static int 1951fec_suspend(struct platform_device *dev, pm_message_t state) 1952{ 1953 struct net_device *ndev = platform_get_drvdata(dev); 1954 struct fec_enet_private *fep; 1955 1956 if (ndev) { 1957 fep = netdev_priv(ndev); 1958 if (netif_running(ndev)) { 1959 netif_device_detach(ndev); 1960 fec_stop(ndev); 1961 } 1962 } 1963 return 0; 1964} 1965 1966static int 1967fec_resume(struct platform_device *dev) 1968{ 1969 struct net_device *ndev = platform_get_drvdata(dev); 1970 1971 if (ndev) { 1972 if (netif_running(ndev)) { 1973 fec_enet_init(ndev, 0); 1974 netif_device_attach(ndev); 1975 } 1976 } 1977 return 0; 1978} 1979 1980static struct platform_driver fec_driver = { 1981 .driver = { 1982 .name = "fec", 1983 .owner = THIS_MODULE, 1984 }, 1985 .probe = fec_probe, 1986 .remove = __devexit_p(fec_drv_remove), 1987 .suspend = fec_suspend, 1988 .resume = fec_resume, 1989}; 1990 1991static int __init 1992fec_enet_module_init(void) 1993{ 1994 printk(KERN_INFO "FEC Ethernet Driver\n"); 1995 1996 return platform_driver_register(&fec_driver); 1997} 1998 1999static void __exit 2000fec_enet_cleanup(void) 2001{ 2002 platform_driver_unregister(&fec_driver); 2003} 2004 2005module_exit(fec_enet_cleanup); 2006module_init(fec_enet_module_init); 2007 2008MODULE_LICENSE("GPL");