Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.30-rc4 2153 lines 56 kB view raw
1/* 2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 4 * 5 * Right now, I am very wasteful with the buffers. I allocate memory 6 * pages and then divide them into 2K frame buffers. This way I know I 7 * have buffers large enough to hold one frame within one buffer descriptor. 8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which 9 * will be much more memory efficient and will easily handle lots of 10 * small packets. 11 * 12 * Much better multiple PHY support by Magnus Damm. 13 * Copyright (c) 2000 Ericsson Radio Systems AB. 14 * 15 * Support for FEC controller of ColdFire processors. 16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) 17 * 18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 19 * Copyright (c) 2004-2006 Macq Electronique SA. 20 */ 21 22#include <linux/module.h> 23#include <linux/kernel.h> 24#include <linux/string.h> 25#include <linux/ptrace.h> 26#include <linux/errno.h> 27#include <linux/ioport.h> 28#include <linux/slab.h> 29#include <linux/interrupt.h> 30#include <linux/pci.h> 31#include <linux/init.h> 32#include <linux/delay.h> 33#include <linux/netdevice.h> 34#include <linux/etherdevice.h> 35#include <linux/skbuff.h> 36#include <linux/spinlock.h> 37#include <linux/workqueue.h> 38#include <linux/bitops.h> 39#include <linux/io.h> 40#include <linux/irq.h> 41#include <linux/clk.h> 42#include <linux/platform_device.h> 43 44#include <asm/cacheflush.h> 45 46#ifndef CONFIG_ARCH_MXC 47#include <asm/coldfire.h> 48#include <asm/mcfsim.h> 49#endif 50 51#include "fec.h" 52 53#ifdef CONFIG_ARCH_MXC 54#include <mach/hardware.h> 55#define FEC_ALIGNMENT 0xf 56#else 57#define FEC_ALIGNMENT 0x3 58#endif 59 60/* 61 * Define the fixed address of the FEC hardware. 62 */ 63#if defined(CONFIG_M5272) 64#define HAVE_mii_link_interrupt 65 66static unsigned char fec_mac_default[] = { 67 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 68}; 69 70/* 71 * Some hardware gets it MAC address out of local flash memory. 72 * if this is non-zero then assume it is the address to get MAC from. 73 */ 74#if defined(CONFIG_NETtel) 75#define FEC_FLASHMAC 0xf0006006 76#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) 77#define FEC_FLASHMAC 0xf0006000 78#elif defined(CONFIG_CANCam) 79#define FEC_FLASHMAC 0xf0020000 80#elif defined (CONFIG_M5272C3) 81#define FEC_FLASHMAC (0xffe04000 + 4) 82#elif defined(CONFIG_MOD5272) 83#define FEC_FLASHMAC 0xffc0406b 84#else 85#define FEC_FLASHMAC 0 86#endif 87#endif /* CONFIG_M5272 */ 88 89/* Forward declarations of some structures to support different PHYs 90*/ 91 92typedef struct { 93 uint mii_data; 94 void (*funct)(uint mii_reg, struct net_device *dev); 95} phy_cmd_t; 96 97typedef struct { 98 uint id; 99 char *name; 100 101 const phy_cmd_t *config; 102 const phy_cmd_t *startup; 103 const phy_cmd_t *ack_int; 104 const phy_cmd_t *shutdown; 105} phy_info_t; 106 107/* The number of Tx and Rx buffers. These are allocated from the page 108 * pool. The code may assume these are power of two, so it it best 109 * to keep them that size. 110 * We don't need to allocate pages for the transmitter. We just use 111 * the skbuffer directly. 112 */ 113#define FEC_ENET_RX_PAGES 8 114#define FEC_ENET_RX_FRSIZE 2048 115#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) 116#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) 117#define FEC_ENET_TX_FRSIZE 2048 118#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE) 119#define TX_RING_SIZE 16 /* Must be power of two */ 120#define TX_RING_MOD_MASK 15 /* for this to work */ 121 122#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE) 123#error "FEC: descriptor ring size constants too large" 124#endif 125 126/* Interrupt events/masks. 127*/ 128#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ 129#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ 130#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ 131#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ 132#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */ 133#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ 134#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */ 135#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ 136#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ 137#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ 138 139/* The FEC stores dest/src/type, data, and checksum for receive packets. 140 */ 141#define PKT_MAXBUF_SIZE 1518 142#define PKT_MINBUF_SIZE 64 143#define PKT_MAXBLR_SIZE 1520 144 145 146/* 147 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame 148 * size bits. Other FEC hardware does not, so we need to take that into 149 * account when setting it. 150 */ 151#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 152 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) 153#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 154#else 155#define OPT_FRAME_SIZE 0 156#endif 157 158/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and 159 * tx_bd_base always point to the base of the buffer descriptors. The 160 * cur_rx and cur_tx point to the currently available buffer. 161 * The dirty_tx tracks the current buffer that is being sent by the 162 * controller. The cur_tx and dirty_tx are equal under both completely 163 * empty and completely full conditions. The empty/ready indicator in 164 * the buffer descriptor determines the actual condition. 165 */ 166struct fec_enet_private { 167 /* Hardware registers of the FEC device */ 168 volatile fec_t *hwp; 169 170 struct net_device *netdev; 171 172 struct clk *clk; 173 174 /* The saved address of a sent-in-place packet/buffer, for skfree(). */ 175 unsigned char *tx_bounce[TX_RING_SIZE]; 176 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 177 ushort skb_cur; 178 ushort skb_dirty; 179 180 /* CPM dual port RAM relative addresses. 181 */ 182 dma_addr_t bd_dma; 183 cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ 184 cbd_t *tx_bd_base; 185 cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ 186 cbd_t *dirty_tx; /* The ring entries to be free()ed. */ 187 uint tx_full; 188 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ 189 spinlock_t hw_lock; 190 /* hold while accessing the mii_list_t() elements */ 191 spinlock_t mii_lock; 192 193 uint phy_id; 194 uint phy_id_done; 195 uint phy_status; 196 uint phy_speed; 197 phy_info_t const *phy; 198 struct work_struct phy_task; 199 200 uint sequence_done; 201 uint mii_phy_task_queued; 202 203 uint phy_addr; 204 205 int index; 206 int opened; 207 int link; 208 int old_link; 209 int full_duplex; 210}; 211 212static int fec_enet_open(struct net_device *dev); 213static int fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev); 214static void fec_enet_mii(struct net_device *dev); 215static irqreturn_t fec_enet_interrupt(int irq, void * dev_id); 216static void fec_enet_tx(struct net_device *dev); 217static void fec_enet_rx(struct net_device *dev); 218static int fec_enet_close(struct net_device *dev); 219static void set_multicast_list(struct net_device *dev); 220static void fec_restart(struct net_device *dev, int duplex); 221static void fec_stop(struct net_device *dev); 222static void fec_set_mac_address(struct net_device *dev); 223 224 225/* MII processing. We keep this as simple as possible. Requests are 226 * placed on the list (if there is room). When the request is finished 227 * by the MII, an optional function may be called. 228 */ 229typedef struct mii_list { 230 uint mii_regval; 231 void (*mii_func)(uint val, struct net_device *dev); 232 struct mii_list *mii_next; 233} mii_list_t; 234 235#define NMII 20 236static mii_list_t mii_cmds[NMII]; 237static mii_list_t *mii_free; 238static mii_list_t *mii_head; 239static mii_list_t *mii_tail; 240 241static int mii_queue(struct net_device *dev, int request, 242 void (*func)(uint, struct net_device *)); 243 244/* Make MII read/write commands for the FEC. 245*/ 246#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) 247#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \ 248 (VAL & 0xffff)) 249#define mk_mii_end 0 250 251/* Transmitter timeout. 252*/ 253#define TX_TIMEOUT (2*HZ) 254 255/* Register definitions for the PHY. 256*/ 257 258#define MII_REG_CR 0 /* Control Register */ 259#define MII_REG_SR 1 /* Status Register */ 260#define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */ 261#define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */ 262#define MII_REG_ANAR 4 /* A-N Advertisement Register */ 263#define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */ 264#define MII_REG_ANER 6 /* A-N Expansion Register */ 265#define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */ 266#define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */ 267 268/* values for phy_status */ 269 270#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */ 271#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */ 272#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */ 273#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */ 274#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */ 275#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */ 276#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */ 277 278#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */ 279#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */ 280#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */ 281#define PHY_STAT_SPMASK 0xf000 /* mask for speed */ 282#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */ 283#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */ 284#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */ 285#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */ 286 287 288static int 289fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 290{ 291 struct fec_enet_private *fep; 292 volatile fec_t *fecp; 293 volatile cbd_t *bdp; 294 unsigned short status; 295 unsigned long flags; 296 297 fep = netdev_priv(dev); 298 fecp = (volatile fec_t*)dev->base_addr; 299 300 if (!fep->link) { 301 /* Link is down or autonegotiation is in progress. */ 302 return 1; 303 } 304 305 spin_lock_irqsave(&fep->hw_lock, flags); 306 /* Fill in a Tx ring entry */ 307 bdp = fep->cur_tx; 308 309 status = bdp->cbd_sc; 310#ifndef final_version 311 if (status & BD_ENET_TX_READY) { 312 /* Ooops. All transmit buffers are full. Bail out. 313 * This should not happen, since dev->tbusy should be set. 314 */ 315 printk("%s: tx queue full!.\n", dev->name); 316 spin_unlock_irqrestore(&fep->hw_lock, flags); 317 return 1; 318 } 319#endif 320 321 /* Clear all of the status flags. 322 */ 323 status &= ~BD_ENET_TX_STATS; 324 325 /* Set buffer length and buffer pointer. 326 */ 327 bdp->cbd_bufaddr = __pa(skb->data); 328 bdp->cbd_datlen = skb->len; 329 330 /* 331 * On some FEC implementations data must be aligned on 332 * 4-byte boundaries. Use bounce buffers to copy data 333 * and get it aligned. Ugh. 334 */ 335 if (bdp->cbd_bufaddr & FEC_ALIGNMENT) { 336 unsigned int index; 337 index = bdp - fep->tx_bd_base; 338 memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len); 339 bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]); 340 } 341 342 /* Save skb pointer. 343 */ 344 fep->tx_skbuff[fep->skb_cur] = skb; 345 346 dev->stats.tx_bytes += skb->len; 347 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; 348 349 /* Push the data cache so the CPM does not get stale memory 350 * data. 351 */ 352 dma_sync_single(NULL, bdp->cbd_bufaddr, 353 bdp->cbd_datlen, DMA_TO_DEVICE); 354 355 /* Send it on its way. Tell FEC it's ready, interrupt when done, 356 * it's the last BD of the frame, and to put the CRC on the end. 357 */ 358 359 status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR 360 | BD_ENET_TX_LAST | BD_ENET_TX_TC); 361 bdp->cbd_sc = status; 362 363 dev->trans_start = jiffies; 364 365 /* Trigger transmission start */ 366 fecp->fec_x_des_active = 0; 367 368 /* If this was the last BD in the ring, start at the beginning again. 369 */ 370 if (status & BD_ENET_TX_WRAP) { 371 bdp = fep->tx_bd_base; 372 } else { 373 bdp++; 374 } 375 376 if (bdp == fep->dirty_tx) { 377 fep->tx_full = 1; 378 netif_stop_queue(dev); 379 } 380 381 fep->cur_tx = (cbd_t *)bdp; 382 383 spin_unlock_irqrestore(&fep->hw_lock, flags); 384 385 return 0; 386} 387 388static void 389fec_timeout(struct net_device *dev) 390{ 391 struct fec_enet_private *fep = netdev_priv(dev); 392 393 printk("%s: transmit timed out.\n", dev->name); 394 dev->stats.tx_errors++; 395#ifndef final_version 396 { 397 int i; 398 cbd_t *bdp; 399 400 printk("Ring data dump: cur_tx %lx%s, dirty_tx %lx cur_rx: %lx\n", 401 (unsigned long)fep->cur_tx, fep->tx_full ? " (full)" : "", 402 (unsigned long)fep->dirty_tx, 403 (unsigned long)fep->cur_rx); 404 405 bdp = fep->tx_bd_base; 406 printk(" tx: %u buffers\n", TX_RING_SIZE); 407 for (i = 0 ; i < TX_RING_SIZE; i++) { 408 printk(" %08x: %04x %04x %08x\n", 409 (uint) bdp, 410 bdp->cbd_sc, 411 bdp->cbd_datlen, 412 (int) bdp->cbd_bufaddr); 413 bdp++; 414 } 415 416 bdp = fep->rx_bd_base; 417 printk(" rx: %lu buffers\n", (unsigned long) RX_RING_SIZE); 418 for (i = 0 ; i < RX_RING_SIZE; i++) { 419 printk(" %08x: %04x %04x %08x\n", 420 (uint) bdp, 421 bdp->cbd_sc, 422 bdp->cbd_datlen, 423 (int) bdp->cbd_bufaddr); 424 bdp++; 425 } 426 } 427#endif 428 fec_restart(dev, fep->full_duplex); 429 netif_wake_queue(dev); 430} 431 432/* The interrupt handler. 433 * This is called from the MPC core interrupt. 434 */ 435static irqreturn_t 436fec_enet_interrupt(int irq, void * dev_id) 437{ 438 struct net_device *dev = dev_id; 439 volatile fec_t *fecp; 440 uint int_events; 441 irqreturn_t ret = IRQ_NONE; 442 443 fecp = (volatile fec_t*)dev->base_addr; 444 445 /* Get the interrupt events that caused us to be here. 446 */ 447 do { 448 int_events = fecp->fec_ievent; 449 fecp->fec_ievent = int_events; 450 451 /* Handle receive event in its own function. 452 */ 453 if (int_events & FEC_ENET_RXF) { 454 ret = IRQ_HANDLED; 455 fec_enet_rx(dev); 456 } 457 458 /* Transmit OK, or non-fatal error. Update the buffer 459 descriptors. FEC handles all errors, we just discover 460 them as part of the transmit process. 461 */ 462 if (int_events & FEC_ENET_TXF) { 463 ret = IRQ_HANDLED; 464 fec_enet_tx(dev); 465 } 466 467 if (int_events & FEC_ENET_MII) { 468 ret = IRQ_HANDLED; 469 fec_enet_mii(dev); 470 } 471 472 } while (int_events); 473 474 return ret; 475} 476 477 478static void 479fec_enet_tx(struct net_device *dev) 480{ 481 struct fec_enet_private *fep; 482 volatile cbd_t *bdp; 483 unsigned short status; 484 struct sk_buff *skb; 485 486 fep = netdev_priv(dev); 487 spin_lock_irq(&fep->hw_lock); 488 bdp = fep->dirty_tx; 489 490 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { 491 if (bdp == fep->cur_tx && fep->tx_full == 0) break; 492 493 skb = fep->tx_skbuff[fep->skb_dirty]; 494 /* Check for errors. */ 495 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 496 BD_ENET_TX_RL | BD_ENET_TX_UN | 497 BD_ENET_TX_CSL)) { 498 dev->stats.tx_errors++; 499 if (status & BD_ENET_TX_HB) /* No heartbeat */ 500 dev->stats.tx_heartbeat_errors++; 501 if (status & BD_ENET_TX_LC) /* Late collision */ 502 dev->stats.tx_window_errors++; 503 if (status & BD_ENET_TX_RL) /* Retrans limit */ 504 dev->stats.tx_aborted_errors++; 505 if (status & BD_ENET_TX_UN) /* Underrun */ 506 dev->stats.tx_fifo_errors++; 507 if (status & BD_ENET_TX_CSL) /* Carrier lost */ 508 dev->stats.tx_carrier_errors++; 509 } else { 510 dev->stats.tx_packets++; 511 } 512 513#ifndef final_version 514 if (status & BD_ENET_TX_READY) 515 printk("HEY! Enet xmit interrupt and TX_READY.\n"); 516#endif 517 /* Deferred means some collisions occurred during transmit, 518 * but we eventually sent the packet OK. 519 */ 520 if (status & BD_ENET_TX_DEF) 521 dev->stats.collisions++; 522 523 /* Free the sk buffer associated with this last transmit. 524 */ 525 dev_kfree_skb_any(skb); 526 fep->tx_skbuff[fep->skb_dirty] = NULL; 527 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; 528 529 /* Update pointer to next buffer descriptor to be transmitted. 530 */ 531 if (status & BD_ENET_TX_WRAP) 532 bdp = fep->tx_bd_base; 533 else 534 bdp++; 535 536 /* Since we have freed up a buffer, the ring is no longer 537 * full. 538 */ 539 if (fep->tx_full) { 540 fep->tx_full = 0; 541 if (netif_queue_stopped(dev)) 542 netif_wake_queue(dev); 543 } 544 } 545 fep->dirty_tx = (cbd_t *)bdp; 546 spin_unlock_irq(&fep->hw_lock); 547} 548 549 550/* During a receive, the cur_rx points to the current incoming buffer. 551 * When we update through the ring, if the next incoming buffer has 552 * not been given to the system, we just set the empty indicator, 553 * effectively tossing the packet. 554 */ 555static void 556fec_enet_rx(struct net_device *dev) 557{ 558 struct fec_enet_private *fep; 559 volatile fec_t *fecp; 560 volatile cbd_t *bdp; 561 unsigned short status; 562 struct sk_buff *skb; 563 ushort pkt_len; 564 __u8 *data; 565 566#ifdef CONFIG_M532x 567 flush_cache_all(); 568#endif 569 570 fep = netdev_priv(dev); 571 fecp = (volatile fec_t*)dev->base_addr; 572 573 spin_lock_irq(&fep->hw_lock); 574 575 /* First, grab all of the stats for the incoming packet. 576 * These get messed up if we get called due to a busy condition. 577 */ 578 bdp = fep->cur_rx; 579 580while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { 581 582#ifndef final_version 583 /* Since we have allocated space to hold a complete frame, 584 * the last indicator should be set. 585 */ 586 if ((status & BD_ENET_RX_LAST) == 0) 587 printk("FEC ENET: rcv is not +last\n"); 588#endif 589 590 if (!fep->opened) 591 goto rx_processing_done; 592 593 /* Check for errors. */ 594 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 595 BD_ENET_RX_CR | BD_ENET_RX_OV)) { 596 dev->stats.rx_errors++; 597 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { 598 /* Frame too long or too short. */ 599 dev->stats.rx_length_errors++; 600 } 601 if (status & BD_ENET_RX_NO) /* Frame alignment */ 602 dev->stats.rx_frame_errors++; 603 if (status & BD_ENET_RX_CR) /* CRC Error */ 604 dev->stats.rx_crc_errors++; 605 if (status & BD_ENET_RX_OV) /* FIFO overrun */ 606 dev->stats.rx_fifo_errors++; 607 } 608 609 /* Report late collisions as a frame error. 610 * On this error, the BD is closed, but we don't know what we 611 * have in the buffer. So, just drop this frame on the floor. 612 */ 613 if (status & BD_ENET_RX_CL) { 614 dev->stats.rx_errors++; 615 dev->stats.rx_frame_errors++; 616 goto rx_processing_done; 617 } 618 619 /* Process the incoming frame. 620 */ 621 dev->stats.rx_packets++; 622 pkt_len = bdp->cbd_datlen; 623 dev->stats.rx_bytes += pkt_len; 624 data = (__u8*)__va(bdp->cbd_bufaddr); 625 626 dma_sync_single(NULL, (unsigned long)__pa(data), 627 pkt_len - 4, DMA_FROM_DEVICE); 628 629 /* This does 16 byte alignment, exactly what we need. 630 * The packet length includes FCS, but we don't want to 631 * include that when passing upstream as it messes up 632 * bridging applications. 633 */ 634 skb = dev_alloc_skb(pkt_len-4); 635 636 if (skb == NULL) { 637 printk("%s: Memory squeeze, dropping packet.\n", dev->name); 638 dev->stats.rx_dropped++; 639 } else { 640 skb_put(skb,pkt_len-4); /* Make room */ 641 skb_copy_to_linear_data(skb, data, pkt_len-4); 642 skb->protocol=eth_type_trans(skb,dev); 643 netif_rx(skb); 644 } 645 rx_processing_done: 646 647 /* Clear the status flags for this buffer. 648 */ 649 status &= ~BD_ENET_RX_STATS; 650 651 /* Mark the buffer empty. 652 */ 653 status |= BD_ENET_RX_EMPTY; 654 bdp->cbd_sc = status; 655 656 /* Update BD pointer to next entry. 657 */ 658 if (status & BD_ENET_RX_WRAP) 659 bdp = fep->rx_bd_base; 660 else 661 bdp++; 662 663#if 1 664 /* Doing this here will keep the FEC running while we process 665 * incoming frames. On a heavily loaded network, we should be 666 * able to keep up at the expense of system resources. 667 */ 668 fecp->fec_r_des_active = 0; 669#endif 670 } /* while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) */ 671 fep->cur_rx = (cbd_t *)bdp; 672 673#if 0 674 /* Doing this here will allow us to process all frames in the 675 * ring before the FEC is allowed to put more there. On a heavily 676 * loaded network, some frames may be lost. Unfortunately, this 677 * increases the interrupt overhead since we can potentially work 678 * our way back to the interrupt return only to come right back 679 * here. 680 */ 681 fecp->fec_r_des_active = 0; 682#endif 683 684 spin_unlock_irq(&fep->hw_lock); 685} 686 687 688/* called from interrupt context */ 689static void 690fec_enet_mii(struct net_device *dev) 691{ 692 struct fec_enet_private *fep; 693 volatile fec_t *ep; 694 mii_list_t *mip; 695 uint mii_reg; 696 697 fep = netdev_priv(dev); 698 spin_lock_irq(&fep->mii_lock); 699 700 ep = fep->hwp; 701 mii_reg = ep->fec_mii_data; 702 703 if ((mip = mii_head) == NULL) { 704 printk("MII and no head!\n"); 705 goto unlock; 706 } 707 708 if (mip->mii_func != NULL) 709 (*(mip->mii_func))(mii_reg, dev); 710 711 mii_head = mip->mii_next; 712 mip->mii_next = mii_free; 713 mii_free = mip; 714 715 if ((mip = mii_head) != NULL) 716 ep->fec_mii_data = mip->mii_regval; 717 718unlock: 719 spin_unlock_irq(&fep->mii_lock); 720} 721 722static int 723mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_device *)) 724{ 725 struct fec_enet_private *fep; 726 unsigned long flags; 727 mii_list_t *mip; 728 int retval; 729 730 /* Add PHY address to register command. 731 */ 732 fep = netdev_priv(dev); 733 spin_lock_irqsave(&fep->mii_lock, flags); 734 735 regval |= fep->phy_addr << 23; 736 retval = 0; 737 738 if ((mip = mii_free) != NULL) { 739 mii_free = mip->mii_next; 740 mip->mii_regval = regval; 741 mip->mii_func = func; 742 mip->mii_next = NULL; 743 if (mii_head) { 744 mii_tail->mii_next = mip; 745 mii_tail = mip; 746 } else { 747 mii_head = mii_tail = mip; 748 fep->hwp->fec_mii_data = regval; 749 } 750 } else { 751 retval = 1; 752 } 753 754 spin_unlock_irqrestore(&fep->mii_lock, flags); 755 return retval; 756} 757 758static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c) 759{ 760 if(!c) 761 return; 762 763 for (; c->mii_data != mk_mii_end; c++) 764 mii_queue(dev, c->mii_data, c->funct); 765} 766 767static void mii_parse_sr(uint mii_reg, struct net_device *dev) 768{ 769 struct fec_enet_private *fep = netdev_priv(dev); 770 volatile uint *s = &(fep->phy_status); 771 uint status; 772 773 status = *s & ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC); 774 775 if (mii_reg & 0x0004) 776 status |= PHY_STAT_LINK; 777 if (mii_reg & 0x0010) 778 status |= PHY_STAT_FAULT; 779 if (mii_reg & 0x0020) 780 status |= PHY_STAT_ANC; 781 *s = status; 782} 783 784static void mii_parse_cr(uint mii_reg, struct net_device *dev) 785{ 786 struct fec_enet_private *fep = netdev_priv(dev); 787 volatile uint *s = &(fep->phy_status); 788 uint status; 789 790 status = *s & ~(PHY_CONF_ANE | PHY_CONF_LOOP); 791 792 if (mii_reg & 0x1000) 793 status |= PHY_CONF_ANE; 794 if (mii_reg & 0x4000) 795 status |= PHY_CONF_LOOP; 796 *s = status; 797} 798 799static void mii_parse_anar(uint mii_reg, struct net_device *dev) 800{ 801 struct fec_enet_private *fep = netdev_priv(dev); 802 volatile uint *s = &(fep->phy_status); 803 uint status; 804 805 status = *s & ~(PHY_CONF_SPMASK); 806 807 if (mii_reg & 0x0020) 808 status |= PHY_CONF_10HDX; 809 if (mii_reg & 0x0040) 810 status |= PHY_CONF_10FDX; 811 if (mii_reg & 0x0080) 812 status |= PHY_CONF_100HDX; 813 if (mii_reg & 0x00100) 814 status |= PHY_CONF_100FDX; 815 *s = status; 816} 817 818/* ------------------------------------------------------------------------- */ 819/* The Level one LXT970 is used by many boards */ 820 821#define MII_LXT970_MIRROR 16 /* Mirror register */ 822#define MII_LXT970_IER 17 /* Interrupt Enable Register */ 823#define MII_LXT970_ISR 18 /* Interrupt Status Register */ 824#define MII_LXT970_CONFIG 19 /* Configuration Register */ 825#define MII_LXT970_CSR 20 /* Chip Status Register */ 826 827static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev) 828{ 829 struct fec_enet_private *fep = netdev_priv(dev); 830 volatile uint *s = &(fep->phy_status); 831 uint status; 832 833 status = *s & ~(PHY_STAT_SPMASK); 834 if (mii_reg & 0x0800) { 835 if (mii_reg & 0x1000) 836 status |= PHY_STAT_100FDX; 837 else 838 status |= PHY_STAT_100HDX; 839 } else { 840 if (mii_reg & 0x1000) 841 status |= PHY_STAT_10FDX; 842 else 843 status |= PHY_STAT_10HDX; 844 } 845 *s = status; 846} 847 848static phy_cmd_t const phy_cmd_lxt970_config[] = { 849 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 850 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 851 { mk_mii_end, } 852 }; 853static phy_cmd_t const phy_cmd_lxt970_startup[] = { /* enable interrupts */ 854 { mk_mii_write(MII_LXT970_IER, 0x0002), NULL }, 855 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 856 { mk_mii_end, } 857 }; 858static phy_cmd_t const phy_cmd_lxt970_ack_int[] = { 859 /* read SR and ISR to acknowledge */ 860 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 861 { mk_mii_read(MII_LXT970_ISR), NULL }, 862 863 /* find out the current status */ 864 { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr }, 865 { mk_mii_end, } 866 }; 867static phy_cmd_t const phy_cmd_lxt970_shutdown[] = { /* disable interrupts */ 868 { mk_mii_write(MII_LXT970_IER, 0x0000), NULL }, 869 { mk_mii_end, } 870 }; 871static phy_info_t const phy_info_lxt970 = { 872 .id = 0x07810000, 873 .name = "LXT970", 874 .config = phy_cmd_lxt970_config, 875 .startup = phy_cmd_lxt970_startup, 876 .ack_int = phy_cmd_lxt970_ack_int, 877 .shutdown = phy_cmd_lxt970_shutdown 878}; 879 880/* ------------------------------------------------------------------------- */ 881/* The Level one LXT971 is used on some of my custom boards */ 882 883/* register definitions for the 971 */ 884 885#define MII_LXT971_PCR 16 /* Port Control Register */ 886#define MII_LXT971_SR2 17 /* Status Register 2 */ 887#define MII_LXT971_IER 18 /* Interrupt Enable Register */ 888#define MII_LXT971_ISR 19 /* Interrupt Status Register */ 889#define MII_LXT971_LCR 20 /* LED Control Register */ 890#define MII_LXT971_TCR 30 /* Transmit Control Register */ 891 892/* 893 * I had some nice ideas of running the MDIO faster... 894 * The 971 should support 8MHz and I tried it, but things acted really 895 * weird, so 2.5 MHz ought to be enough for anyone... 896 */ 897 898static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev) 899{ 900 struct fec_enet_private *fep = netdev_priv(dev); 901 volatile uint *s = &(fep->phy_status); 902 uint status; 903 904 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC); 905 906 if (mii_reg & 0x0400) { 907 fep->link = 1; 908 status |= PHY_STAT_LINK; 909 } else { 910 fep->link = 0; 911 } 912 if (mii_reg & 0x0080) 913 status |= PHY_STAT_ANC; 914 if (mii_reg & 0x4000) { 915 if (mii_reg & 0x0200) 916 status |= PHY_STAT_100FDX; 917 else 918 status |= PHY_STAT_100HDX; 919 } else { 920 if (mii_reg & 0x0200) 921 status |= PHY_STAT_10FDX; 922 else 923 status |= PHY_STAT_10HDX; 924 } 925 if (mii_reg & 0x0008) 926 status |= PHY_STAT_FAULT; 927 928 *s = status; 929} 930 931static phy_cmd_t const phy_cmd_lxt971_config[] = { 932 /* limit to 10MBit because my prototype board 933 * doesn't work with 100. */ 934 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 935 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 936 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, 937 { mk_mii_end, } 938 }; 939static phy_cmd_t const phy_cmd_lxt971_startup[] = { /* enable interrupts */ 940 { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL }, 941 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 942 { mk_mii_write(MII_LXT971_LCR, 0xd422), NULL }, /* LED config */ 943 /* Somehow does the 971 tell me that the link is down 944 * the first read after power-up. 945 * read here to get a valid value in ack_int */ 946 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 947 { mk_mii_end, } 948 }; 949static phy_cmd_t const phy_cmd_lxt971_ack_int[] = { 950 /* acknowledge the int before reading status ! */ 951 { mk_mii_read(MII_LXT971_ISR), NULL }, 952 /* find out the current status */ 953 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 954 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, 955 { mk_mii_end, } 956 }; 957static phy_cmd_t const phy_cmd_lxt971_shutdown[] = { /* disable interrupts */ 958 { mk_mii_write(MII_LXT971_IER, 0x0000), NULL }, 959 { mk_mii_end, } 960 }; 961static phy_info_t const phy_info_lxt971 = { 962 .id = 0x0001378e, 963 .name = "LXT971", 964 .config = phy_cmd_lxt971_config, 965 .startup = phy_cmd_lxt971_startup, 966 .ack_int = phy_cmd_lxt971_ack_int, 967 .shutdown = phy_cmd_lxt971_shutdown 968}; 969 970/* ------------------------------------------------------------------------- */ 971/* The Quality Semiconductor QS6612 is used on the RPX CLLF */ 972 973/* register definitions */ 974 975#define MII_QS6612_MCR 17 /* Mode Control Register */ 976#define MII_QS6612_FTR 27 /* Factory Test Register */ 977#define MII_QS6612_MCO 28 /* Misc. Control Register */ 978#define MII_QS6612_ISR 29 /* Interrupt Source Register */ 979#define MII_QS6612_IMR 30 /* Interrupt Mask Register */ 980#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */ 981 982static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev) 983{ 984 struct fec_enet_private *fep = netdev_priv(dev); 985 volatile uint *s = &(fep->phy_status); 986 uint status; 987 988 status = *s & ~(PHY_STAT_SPMASK); 989 990 switch((mii_reg >> 2) & 7) { 991 case 1: status |= PHY_STAT_10HDX; break; 992 case 2: status |= PHY_STAT_100HDX; break; 993 case 5: status |= PHY_STAT_10FDX; break; 994 case 6: status |= PHY_STAT_100FDX; break; 995} 996 997 *s = status; 998} 999 1000static phy_cmd_t const phy_cmd_qs6612_config[] = { 1001 /* The PHY powers up isolated on the RPX, 1002 * so send a command to allow operation. 1003 */ 1004 { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL }, 1005 1006 /* parse cr and anar to get some info */ 1007 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 1008 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 1009 { mk_mii_end, } 1010 }; 1011static phy_cmd_t const phy_cmd_qs6612_startup[] = { /* enable interrupts */ 1012 { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL }, 1013 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 1014 { mk_mii_end, } 1015 }; 1016static phy_cmd_t const phy_cmd_qs6612_ack_int[] = { 1017 /* we need to read ISR, SR and ANER to acknowledge */ 1018 { mk_mii_read(MII_QS6612_ISR), NULL }, 1019 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1020 { mk_mii_read(MII_REG_ANER), NULL }, 1021 1022 /* read pcr to get info */ 1023 { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr }, 1024 { mk_mii_end, } 1025 }; 1026static phy_cmd_t const phy_cmd_qs6612_shutdown[] = { /* disable interrupts */ 1027 { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL }, 1028 { mk_mii_end, } 1029 }; 1030static phy_info_t const phy_info_qs6612 = { 1031 .id = 0x00181440, 1032 .name = "QS6612", 1033 .config = phy_cmd_qs6612_config, 1034 .startup = phy_cmd_qs6612_startup, 1035 .ack_int = phy_cmd_qs6612_ack_int, 1036 .shutdown = phy_cmd_qs6612_shutdown 1037}; 1038 1039/* ------------------------------------------------------------------------- */ 1040/* AMD AM79C874 phy */ 1041 1042/* register definitions for the 874 */ 1043 1044#define MII_AM79C874_MFR 16 /* Miscellaneous Feature Register */ 1045#define MII_AM79C874_ICSR 17 /* Interrupt/Status Register */ 1046#define MII_AM79C874_DR 18 /* Diagnostic Register */ 1047#define MII_AM79C874_PMLR 19 /* Power and Loopback Register */ 1048#define MII_AM79C874_MCR 21 /* ModeControl Register */ 1049#define MII_AM79C874_DC 23 /* Disconnect Counter */ 1050#define MII_AM79C874_REC 24 /* Recieve Error Counter */ 1051 1052static void mii_parse_am79c874_dr(uint mii_reg, struct net_device *dev) 1053{ 1054 struct fec_enet_private *fep = netdev_priv(dev); 1055 volatile uint *s = &(fep->phy_status); 1056 uint status; 1057 1058 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_ANC); 1059 1060 if (mii_reg & 0x0080) 1061 status |= PHY_STAT_ANC; 1062 if (mii_reg & 0x0400) 1063 status |= ((mii_reg & 0x0800) ? PHY_STAT_100FDX : PHY_STAT_100HDX); 1064 else 1065 status |= ((mii_reg & 0x0800) ? PHY_STAT_10FDX : PHY_STAT_10HDX); 1066 1067 *s = status; 1068} 1069 1070static phy_cmd_t const phy_cmd_am79c874_config[] = { 1071 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 1072 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 1073 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr }, 1074 { mk_mii_end, } 1075 }; 1076static phy_cmd_t const phy_cmd_am79c874_startup[] = { /* enable interrupts */ 1077 { mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL }, 1078 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 1079 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1080 { mk_mii_end, } 1081 }; 1082static phy_cmd_t const phy_cmd_am79c874_ack_int[] = { 1083 /* find out the current status */ 1084 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1085 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr }, 1086 /* we only need to read ISR to acknowledge */ 1087 { mk_mii_read(MII_AM79C874_ICSR), NULL }, 1088 { mk_mii_end, } 1089 }; 1090static phy_cmd_t const phy_cmd_am79c874_shutdown[] = { /* disable interrupts */ 1091 { mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL }, 1092 { mk_mii_end, } 1093 }; 1094static phy_info_t const phy_info_am79c874 = { 1095 .id = 0x00022561, 1096 .name = "AM79C874", 1097 .config = phy_cmd_am79c874_config, 1098 .startup = phy_cmd_am79c874_startup, 1099 .ack_int = phy_cmd_am79c874_ack_int, 1100 .shutdown = phy_cmd_am79c874_shutdown 1101}; 1102 1103 1104/* ------------------------------------------------------------------------- */ 1105/* Kendin KS8721BL phy */ 1106 1107/* register definitions for the 8721 */ 1108 1109#define MII_KS8721BL_RXERCR 21 1110#define MII_KS8721BL_ICSR 27 1111#define MII_KS8721BL_PHYCR 31 1112 1113static phy_cmd_t const phy_cmd_ks8721bl_config[] = { 1114 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 1115 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 1116 { mk_mii_end, } 1117 }; 1118static phy_cmd_t const phy_cmd_ks8721bl_startup[] = { /* enable interrupts */ 1119 { mk_mii_write(MII_KS8721BL_ICSR, 0xff00), NULL }, 1120 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 1121 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1122 { mk_mii_end, } 1123 }; 1124static phy_cmd_t const phy_cmd_ks8721bl_ack_int[] = { 1125 /* find out the current status */ 1126 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1127 /* we only need to read ISR to acknowledge */ 1128 { mk_mii_read(MII_KS8721BL_ICSR), NULL }, 1129 { mk_mii_end, } 1130 }; 1131static phy_cmd_t const phy_cmd_ks8721bl_shutdown[] = { /* disable interrupts */ 1132 { mk_mii_write(MII_KS8721BL_ICSR, 0x0000), NULL }, 1133 { mk_mii_end, } 1134 }; 1135static phy_info_t const phy_info_ks8721bl = { 1136 .id = 0x00022161, 1137 .name = "KS8721BL", 1138 .config = phy_cmd_ks8721bl_config, 1139 .startup = phy_cmd_ks8721bl_startup, 1140 .ack_int = phy_cmd_ks8721bl_ack_int, 1141 .shutdown = phy_cmd_ks8721bl_shutdown 1142}; 1143 1144/* ------------------------------------------------------------------------- */ 1145/* register definitions for the DP83848 */ 1146 1147#define MII_DP8384X_PHYSTST 16 /* PHY Status Register */ 1148 1149static void mii_parse_dp8384x_sr2(uint mii_reg, struct net_device *dev) 1150{ 1151 struct fec_enet_private *fep = netdev_priv(dev); 1152 volatile uint *s = &(fep->phy_status); 1153 1154 *s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC); 1155 1156 /* Link up */ 1157 if (mii_reg & 0x0001) { 1158 fep->link = 1; 1159 *s |= PHY_STAT_LINK; 1160 } else 1161 fep->link = 0; 1162 /* Status of link */ 1163 if (mii_reg & 0x0010) /* Autonegotioation complete */ 1164 *s |= PHY_STAT_ANC; 1165 if (mii_reg & 0x0002) { /* 10MBps? */ 1166 if (mii_reg & 0x0004) /* Full Duplex? */ 1167 *s |= PHY_STAT_10FDX; 1168 else 1169 *s |= PHY_STAT_10HDX; 1170 } else { /* 100 Mbps? */ 1171 if (mii_reg & 0x0004) /* Full Duplex? */ 1172 *s |= PHY_STAT_100FDX; 1173 else 1174 *s |= PHY_STAT_100HDX; 1175 } 1176 if (mii_reg & 0x0008) 1177 *s |= PHY_STAT_FAULT; 1178} 1179 1180static phy_info_t phy_info_dp83848= { 1181 0x020005c9, 1182 "DP83848", 1183 1184 (const phy_cmd_t []) { /* config */ 1185 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 1186 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 1187 { mk_mii_read(MII_DP8384X_PHYSTST), mii_parse_dp8384x_sr2 }, 1188 { mk_mii_end, } 1189 }, 1190 (const phy_cmd_t []) { /* startup - enable interrupts */ 1191 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 1192 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1193 { mk_mii_end, } 1194 }, 1195 (const phy_cmd_t []) { /* ack_int - never happens, no interrupt */ 1196 { mk_mii_end, } 1197 }, 1198 (const phy_cmd_t []) { /* shutdown */ 1199 { mk_mii_end, } 1200 }, 1201}; 1202 1203/* ------------------------------------------------------------------------- */ 1204 1205static phy_info_t const * const phy_info[] = { 1206 &phy_info_lxt970, 1207 &phy_info_lxt971, 1208 &phy_info_qs6612, 1209 &phy_info_am79c874, 1210 &phy_info_ks8721bl, 1211 &phy_info_dp83848, 1212 NULL 1213}; 1214 1215/* ------------------------------------------------------------------------- */ 1216#ifdef HAVE_mii_link_interrupt 1217static irqreturn_t 1218mii_link_interrupt(int irq, void * dev_id); 1219 1220/* 1221 * This is specific to the MII interrupt setup of the M5272EVB. 1222 */ 1223static void __inline__ fec_request_mii_intr(struct net_device *dev) 1224{ 1225 if (request_irq(66, mii_link_interrupt, IRQF_DISABLED, "fec(MII)", dev) != 0) 1226 printk("FEC: Could not allocate fec(MII) IRQ(66)!\n"); 1227} 1228 1229static void __inline__ fec_disable_phy_intr(void) 1230{ 1231 volatile unsigned long *icrp; 1232 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); 1233 *icrp = 0x08000000; 1234} 1235 1236static void __inline__ fec_phy_ack_intr(void) 1237{ 1238 volatile unsigned long *icrp; 1239 /* Acknowledge the interrupt */ 1240 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); 1241 *icrp = 0x0d000000; 1242} 1243#endif 1244 1245#ifdef CONFIG_M5272 1246static void __inline__ fec_get_mac(struct net_device *dev) 1247{ 1248 struct fec_enet_private *fep = netdev_priv(dev); 1249 volatile fec_t *fecp; 1250 unsigned char *iap, tmpaddr[ETH_ALEN]; 1251 1252 fecp = fep->hwp; 1253 1254 if (FEC_FLASHMAC) { 1255 /* 1256 * Get MAC address from FLASH. 1257 * If it is all 1's or 0's, use the default. 1258 */ 1259 iap = (unsigned char *)FEC_FLASHMAC; 1260 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && 1261 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) 1262 iap = fec_mac_default; 1263 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && 1264 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) 1265 iap = fec_mac_default; 1266 } else { 1267 *((unsigned long *) &tmpaddr[0]) = fecp->fec_addr_low; 1268 *((unsigned short *) &tmpaddr[4]) = (fecp->fec_addr_high >> 16); 1269 iap = &tmpaddr[0]; 1270 } 1271 1272 memcpy(dev->dev_addr, iap, ETH_ALEN); 1273 1274 /* Adjust MAC if using default MAC address */ 1275 if (iap == fec_mac_default) 1276 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 1277} 1278#endif 1279 1280/* ------------------------------------------------------------------------- */ 1281 1282static void mii_display_status(struct net_device *dev) 1283{ 1284 struct fec_enet_private *fep = netdev_priv(dev); 1285 volatile uint *s = &(fep->phy_status); 1286 1287 if (!fep->link && !fep->old_link) { 1288 /* Link is still down - don't print anything */ 1289 return; 1290 } 1291 1292 printk("%s: status: ", dev->name); 1293 1294 if (!fep->link) { 1295 printk("link down"); 1296 } else { 1297 printk("link up"); 1298 1299 switch(*s & PHY_STAT_SPMASK) { 1300 case PHY_STAT_100FDX: printk(", 100MBit Full Duplex"); break; 1301 case PHY_STAT_100HDX: printk(", 100MBit Half Duplex"); break; 1302 case PHY_STAT_10FDX: printk(", 10MBit Full Duplex"); break; 1303 case PHY_STAT_10HDX: printk(", 10MBit Half Duplex"); break; 1304 default: 1305 printk(", Unknown speed/duplex"); 1306 } 1307 1308 if (*s & PHY_STAT_ANC) 1309 printk(", auto-negotiation complete"); 1310 } 1311 1312 if (*s & PHY_STAT_FAULT) 1313 printk(", remote fault"); 1314 1315 printk(".\n"); 1316} 1317 1318static void mii_display_config(struct work_struct *work) 1319{ 1320 struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task); 1321 struct net_device *dev = fep->netdev; 1322 uint status = fep->phy_status; 1323 1324 /* 1325 ** When we get here, phy_task is already removed from 1326 ** the workqueue. It is thus safe to allow to reuse it. 1327 */ 1328 fep->mii_phy_task_queued = 0; 1329 printk("%s: config: auto-negotiation ", dev->name); 1330 1331 if (status & PHY_CONF_ANE) 1332 printk("on"); 1333 else 1334 printk("off"); 1335 1336 if (status & PHY_CONF_100FDX) 1337 printk(", 100FDX"); 1338 if (status & PHY_CONF_100HDX) 1339 printk(", 100HDX"); 1340 if (status & PHY_CONF_10FDX) 1341 printk(", 10FDX"); 1342 if (status & PHY_CONF_10HDX) 1343 printk(", 10HDX"); 1344 if (!(status & PHY_CONF_SPMASK)) 1345 printk(", No speed/duplex selected?"); 1346 1347 if (status & PHY_CONF_LOOP) 1348 printk(", loopback enabled"); 1349 1350 printk(".\n"); 1351 1352 fep->sequence_done = 1; 1353} 1354 1355static void mii_relink(struct work_struct *work) 1356{ 1357 struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task); 1358 struct net_device *dev = fep->netdev; 1359 int duplex; 1360 1361 /* 1362 ** When we get here, phy_task is already removed from 1363 ** the workqueue. It is thus safe to allow to reuse it. 1364 */ 1365 fep->mii_phy_task_queued = 0; 1366 fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0; 1367 mii_display_status(dev); 1368 fep->old_link = fep->link; 1369 1370 if (fep->link) { 1371 duplex = 0; 1372 if (fep->phy_status 1373 & (PHY_STAT_100FDX | PHY_STAT_10FDX)) 1374 duplex = 1; 1375 fec_restart(dev, duplex); 1376 } else 1377 fec_stop(dev); 1378 1379#if 0 1380 enable_irq(fep->mii_irq); 1381#endif 1382 1383} 1384 1385/* mii_queue_relink is called in interrupt context from mii_link_interrupt */ 1386static void mii_queue_relink(uint mii_reg, struct net_device *dev) 1387{ 1388 struct fec_enet_private *fep = netdev_priv(dev); 1389 1390 /* 1391 ** We cannot queue phy_task twice in the workqueue. It 1392 ** would cause an endless loop in the workqueue. 1393 ** Fortunately, if the last mii_relink entry has not yet been 1394 ** executed now, it will do the job for the current interrupt, 1395 ** which is just what we want. 1396 */ 1397 if (fep->mii_phy_task_queued) 1398 return; 1399 1400 fep->mii_phy_task_queued = 1; 1401 INIT_WORK(&fep->phy_task, mii_relink); 1402 schedule_work(&fep->phy_task); 1403} 1404 1405/* mii_queue_config is called in interrupt context from fec_enet_mii */ 1406static void mii_queue_config(uint mii_reg, struct net_device *dev) 1407{ 1408 struct fec_enet_private *fep = netdev_priv(dev); 1409 1410 if (fep->mii_phy_task_queued) 1411 return; 1412 1413 fep->mii_phy_task_queued = 1; 1414 INIT_WORK(&fep->phy_task, mii_display_config); 1415 schedule_work(&fep->phy_task); 1416} 1417 1418phy_cmd_t const phy_cmd_relink[] = { 1419 { mk_mii_read(MII_REG_CR), mii_queue_relink }, 1420 { mk_mii_end, } 1421 }; 1422phy_cmd_t const phy_cmd_config[] = { 1423 { mk_mii_read(MII_REG_CR), mii_queue_config }, 1424 { mk_mii_end, } 1425 }; 1426 1427/* Read remainder of PHY ID. 1428*/ 1429static void 1430mii_discover_phy3(uint mii_reg, struct net_device *dev) 1431{ 1432 struct fec_enet_private *fep; 1433 int i; 1434 1435 fep = netdev_priv(dev); 1436 fep->phy_id |= (mii_reg & 0xffff); 1437 printk("fec: PHY @ 0x%x, ID 0x%08x", fep->phy_addr, fep->phy_id); 1438 1439 for(i = 0; phy_info[i]; i++) { 1440 if(phy_info[i]->id == (fep->phy_id >> 4)) 1441 break; 1442 } 1443 1444 if (phy_info[i]) 1445 printk(" -- %s\n", phy_info[i]->name); 1446 else 1447 printk(" -- unknown PHY!\n"); 1448 1449 fep->phy = phy_info[i]; 1450 fep->phy_id_done = 1; 1451} 1452 1453/* Scan all of the MII PHY addresses looking for someone to respond 1454 * with a valid ID. This usually happens quickly. 1455 */ 1456static void 1457mii_discover_phy(uint mii_reg, struct net_device *dev) 1458{ 1459 struct fec_enet_private *fep; 1460 volatile fec_t *fecp; 1461 uint phytype; 1462 1463 fep = netdev_priv(dev); 1464 fecp = fep->hwp; 1465 1466 if (fep->phy_addr < 32) { 1467 if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) { 1468 1469 /* Got first part of ID, now get remainder. 1470 */ 1471 fep->phy_id = phytype << 16; 1472 mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), 1473 mii_discover_phy3); 1474 } else { 1475 fep->phy_addr++; 1476 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), 1477 mii_discover_phy); 1478 } 1479 } else { 1480 printk("FEC: No PHY device found.\n"); 1481 /* Disable external MII interface */ 1482 fecp->fec_mii_speed = fep->phy_speed = 0; 1483#ifdef HAVE_mii_link_interrupt 1484 fec_disable_phy_intr(); 1485#endif 1486 } 1487} 1488 1489/* This interrupt occurs when the PHY detects a link change. 1490*/ 1491#ifdef HAVE_mii_link_interrupt 1492static irqreturn_t 1493mii_link_interrupt(int irq, void * dev_id) 1494{ 1495 struct net_device *dev = dev_id; 1496 struct fec_enet_private *fep = netdev_priv(dev); 1497 1498 fec_phy_ack_intr(); 1499 1500#if 0 1501 disable_irq(fep->mii_irq); /* disable now, enable later */ 1502#endif 1503 1504 mii_do_cmd(dev, fep->phy->ack_int); 1505 mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */ 1506 1507 return IRQ_HANDLED; 1508} 1509#endif 1510 1511static int 1512fec_enet_open(struct net_device *dev) 1513{ 1514 struct fec_enet_private *fep = netdev_priv(dev); 1515 1516 /* I should reset the ring buffers here, but I don't yet know 1517 * a simple way to do that. 1518 */ 1519 fec_set_mac_address(dev); 1520 1521 fep->sequence_done = 0; 1522 fep->link = 0; 1523 1524 if (fep->phy) { 1525 mii_do_cmd(dev, fep->phy->ack_int); 1526 mii_do_cmd(dev, fep->phy->config); 1527 mii_do_cmd(dev, phy_cmd_config); /* display configuration */ 1528 1529 /* Poll until the PHY tells us its configuration 1530 * (not link state). 1531 * Request is initiated by mii_do_cmd above, but answer 1532 * comes by interrupt. 1533 * This should take about 25 usec per register at 2.5 MHz, 1534 * and we read approximately 5 registers. 1535 */ 1536 while(!fep->sequence_done) 1537 schedule(); 1538 1539 mii_do_cmd(dev, fep->phy->startup); 1540 1541 /* Set the initial link state to true. A lot of hardware 1542 * based on this device does not implement a PHY interrupt, 1543 * so we are never notified of link change. 1544 */ 1545 fep->link = 1; 1546 } else { 1547 fep->link = 1; /* lets just try it and see */ 1548 /* no phy, go full duplex, it's most likely a hub chip */ 1549 fec_restart(dev, 1); 1550 } 1551 1552 netif_start_queue(dev); 1553 fep->opened = 1; 1554 return 0; /* Success */ 1555} 1556 1557static int 1558fec_enet_close(struct net_device *dev) 1559{ 1560 struct fec_enet_private *fep = netdev_priv(dev); 1561 1562 /* Don't know what to do yet. 1563 */ 1564 fep->opened = 0; 1565 netif_stop_queue(dev); 1566 fec_stop(dev); 1567 1568 return 0; 1569} 1570 1571/* Set or clear the multicast filter for this adaptor. 1572 * Skeleton taken from sunlance driver. 1573 * The CPM Ethernet implementation allows Multicast as well as individual 1574 * MAC address filtering. Some of the drivers check to make sure it is 1575 * a group multicast address, and discard those that are not. I guess I 1576 * will do the same for now, but just remove the test if you want 1577 * individual filtering as well (do the upper net layers want or support 1578 * this kind of feature?). 1579 */ 1580 1581#define HASH_BITS 6 /* #bits in hash */ 1582#define CRC32_POLY 0xEDB88320 1583 1584static void set_multicast_list(struct net_device *dev) 1585{ 1586 struct fec_enet_private *fep; 1587 volatile fec_t *ep; 1588 struct dev_mc_list *dmi; 1589 unsigned int i, j, bit, data, crc; 1590 unsigned char hash; 1591 1592 fep = netdev_priv(dev); 1593 ep = fep->hwp; 1594 1595 if (dev->flags&IFF_PROMISC) { 1596 ep->fec_r_cntrl |= 0x0008; 1597 } else { 1598 1599 ep->fec_r_cntrl &= ~0x0008; 1600 1601 if (dev->flags & IFF_ALLMULTI) { 1602 /* Catch all multicast addresses, so set the 1603 * filter to all 1's. 1604 */ 1605 ep->fec_grp_hash_table_high = 0xffffffff; 1606 ep->fec_grp_hash_table_low = 0xffffffff; 1607 } else { 1608 /* Clear filter and add the addresses in hash register. 1609 */ 1610 ep->fec_grp_hash_table_high = 0; 1611 ep->fec_grp_hash_table_low = 0; 1612 1613 dmi = dev->mc_list; 1614 1615 for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) 1616 { 1617 /* Only support group multicast for now. 1618 */ 1619 if (!(dmi->dmi_addr[0] & 1)) 1620 continue; 1621 1622 /* calculate crc32 value of mac address 1623 */ 1624 crc = 0xffffffff; 1625 1626 for (i = 0; i < dmi->dmi_addrlen; i++) 1627 { 1628 data = dmi->dmi_addr[i]; 1629 for (bit = 0; bit < 8; bit++, data >>= 1) 1630 { 1631 crc = (crc >> 1) ^ 1632 (((crc ^ data) & 1) ? CRC32_POLY : 0); 1633 } 1634 } 1635 1636 /* only upper 6 bits (HASH_BITS) are used 1637 which point to specific bit in he hash registers 1638 */ 1639 hash = (crc >> (32 - HASH_BITS)) & 0x3f; 1640 1641 if (hash > 31) 1642 ep->fec_grp_hash_table_high |= 1 << (hash - 32); 1643 else 1644 ep->fec_grp_hash_table_low |= 1 << hash; 1645 } 1646 } 1647 } 1648} 1649 1650/* Set a MAC change in hardware. 1651 */ 1652static void 1653fec_set_mac_address(struct net_device *dev) 1654{ 1655 volatile fec_t *fecp; 1656 1657 fecp = ((struct fec_enet_private *)netdev_priv(dev))->hwp; 1658 1659 /* Set station address. */ 1660 fecp->fec_addr_low = dev->dev_addr[3] | (dev->dev_addr[2] << 8) | 1661 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24); 1662 fecp->fec_addr_high = (dev->dev_addr[5] << 16) | 1663 (dev->dev_addr[4] << 24); 1664 1665} 1666 1667 /* 1668 * XXX: We need to clean up on failure exits here. 1669 * 1670 * index is only used in legacy code 1671 */ 1672int __init fec_enet_init(struct net_device *dev, int index) 1673{ 1674 struct fec_enet_private *fep = netdev_priv(dev); 1675 unsigned long mem_addr; 1676 volatile cbd_t *bdp; 1677 cbd_t *cbd_base; 1678 volatile fec_t *fecp; 1679 int i, j; 1680 1681 /* Allocate memory for buffer descriptors. 1682 */ 1683 mem_addr = (unsigned long)dma_alloc_coherent(NULL, PAGE_SIZE, 1684 &fep->bd_dma, GFP_KERNEL); 1685 if (mem_addr == 0) { 1686 printk("FEC: allocate descriptor memory failed?\n"); 1687 return -ENOMEM; 1688 } 1689 1690 spin_lock_init(&fep->hw_lock); 1691 spin_lock_init(&fep->mii_lock); 1692 1693 /* Create an Ethernet device instance. 1694 */ 1695 fecp = (volatile fec_t *)dev->base_addr; 1696 1697 fep->index = index; 1698 fep->hwp = fecp; 1699 fep->netdev = dev; 1700 1701 /* Whack a reset. We should wait for this. 1702 */ 1703 fecp->fec_ecntrl = 1; 1704 udelay(10); 1705 1706 /* Set the Ethernet address */ 1707#ifdef CONFIG_M5272 1708 fec_get_mac(dev); 1709#else 1710 { 1711 unsigned long l; 1712 l = fecp->fec_addr_low; 1713 dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24); 1714 dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16); 1715 dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8); 1716 dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0); 1717 l = fecp->fec_addr_high; 1718 dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24); 1719 dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16); 1720 } 1721#endif 1722 1723 cbd_base = (cbd_t *)mem_addr; 1724 1725 /* Set receive and transmit descriptor base. 1726 */ 1727 fep->rx_bd_base = cbd_base; 1728 fep->tx_bd_base = cbd_base + RX_RING_SIZE; 1729 1730 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; 1731 fep->cur_rx = fep->rx_bd_base; 1732 1733 fep->skb_cur = fep->skb_dirty = 0; 1734 1735 /* Initialize the receive buffer descriptors. 1736 */ 1737 bdp = fep->rx_bd_base; 1738 for (i=0; i<FEC_ENET_RX_PAGES; i++) { 1739 1740 /* Allocate a page. 1741 */ 1742 mem_addr = __get_free_page(GFP_KERNEL); 1743 /* XXX: missing check for allocation failure */ 1744 1745 /* Initialize the BD for every fragment in the page. 1746 */ 1747 for (j=0; j<FEC_ENET_RX_FRPPG; j++) { 1748 bdp->cbd_sc = BD_ENET_RX_EMPTY; 1749 bdp->cbd_bufaddr = __pa(mem_addr); 1750 mem_addr += FEC_ENET_RX_FRSIZE; 1751 bdp++; 1752 } 1753 } 1754 1755 /* Set the last buffer to wrap. 1756 */ 1757 bdp--; 1758 bdp->cbd_sc |= BD_SC_WRAP; 1759 1760 /* ...and the same for transmmit. 1761 */ 1762 bdp = fep->tx_bd_base; 1763 for (i=0, j=FEC_ENET_TX_FRPPG; i<TX_RING_SIZE; i++) { 1764 if (j >= FEC_ENET_TX_FRPPG) { 1765 mem_addr = __get_free_page(GFP_KERNEL); 1766 j = 1; 1767 } else { 1768 mem_addr += FEC_ENET_TX_FRSIZE; 1769 j++; 1770 } 1771 fep->tx_bounce[i] = (unsigned char *) mem_addr; 1772 1773 /* Initialize the BD for every fragment in the page. 1774 */ 1775 bdp->cbd_sc = 0; 1776 bdp->cbd_bufaddr = 0; 1777 bdp++; 1778 } 1779 1780 /* Set the last buffer to wrap. 1781 */ 1782 bdp--; 1783 bdp->cbd_sc |= BD_SC_WRAP; 1784 1785 /* Set receive and transmit descriptor base. 1786 */ 1787 fecp->fec_r_des_start = fep->bd_dma; 1788 fecp->fec_x_des_start = (unsigned long)fep->bd_dma + sizeof(cbd_t) 1789 * RX_RING_SIZE; 1790 1791#ifdef HAVE_mii_link_interrupt 1792 fec_request_mii_intr(dev); 1793#endif 1794 1795 fecp->fec_grp_hash_table_high = 0; 1796 fecp->fec_grp_hash_table_low = 0; 1797 fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; 1798 fecp->fec_ecntrl = 2; 1799 fecp->fec_r_des_active = 0; 1800#ifndef CONFIG_M5272 1801 fecp->fec_hash_table_high = 0; 1802 fecp->fec_hash_table_low = 0; 1803#endif 1804 1805 /* The FEC Ethernet specific entries in the device structure. */ 1806 dev->open = fec_enet_open; 1807 dev->hard_start_xmit = fec_enet_start_xmit; 1808 dev->tx_timeout = fec_timeout; 1809 dev->watchdog_timeo = TX_TIMEOUT; 1810 dev->stop = fec_enet_close; 1811 dev->set_multicast_list = set_multicast_list; 1812 1813 for (i=0; i<NMII-1; i++) 1814 mii_cmds[i].mii_next = &mii_cmds[i+1]; 1815 mii_free = mii_cmds; 1816 1817 /* setup MII interface */ 1818 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04; 1819 fecp->fec_x_cntrl = 0x00; 1820 1821 /* 1822 * Set MII speed to 2.5 MHz 1823 */ 1824 fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999) 1825 / 2500000) / 2) & 0x3F) << 1; 1826 fecp->fec_mii_speed = fep->phy_speed; 1827 fec_restart(dev, 0); 1828 1829 /* Clear and enable interrupts */ 1830 fecp->fec_ievent = 0xffc00000; 1831 fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII); 1832 1833 /* Queue up command to detect the PHY and initialize the 1834 * remainder of the interface. 1835 */ 1836 fep->phy_id_done = 0; 1837 fep->phy_addr = 0; 1838 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy); 1839 1840 return 0; 1841} 1842 1843/* This function is called to start or restart the FEC during a link 1844 * change. This only happens when switching between half and full 1845 * duplex. 1846 */ 1847static void 1848fec_restart(struct net_device *dev, int duplex) 1849{ 1850 struct fec_enet_private *fep; 1851 volatile cbd_t *bdp; 1852 volatile fec_t *fecp; 1853 int i; 1854 1855 fep = netdev_priv(dev); 1856 fecp = fep->hwp; 1857 1858 /* Whack a reset. We should wait for this. 1859 */ 1860 fecp->fec_ecntrl = 1; 1861 udelay(10); 1862 1863 /* Clear any outstanding interrupt. 1864 */ 1865 fecp->fec_ievent = 0xffc00000; 1866 1867 /* Set station address. 1868 */ 1869 fec_set_mac_address(dev); 1870 1871 /* Reset all multicast. 1872 */ 1873 fecp->fec_grp_hash_table_high = 0; 1874 fecp->fec_grp_hash_table_low = 0; 1875 1876 /* Set maximum receive buffer size. 1877 */ 1878 fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; 1879 1880 /* Set receive and transmit descriptor base. 1881 */ 1882 fecp->fec_r_des_start = fep->bd_dma; 1883 fecp->fec_x_des_start = (unsigned long)fep->bd_dma + sizeof(cbd_t) 1884 * RX_RING_SIZE; 1885 1886 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; 1887 fep->cur_rx = fep->rx_bd_base; 1888 1889 /* Reset SKB transmit buffers. 1890 */ 1891 fep->skb_cur = fep->skb_dirty = 0; 1892 for (i=0; i<=TX_RING_MOD_MASK; i++) { 1893 if (fep->tx_skbuff[i] != NULL) { 1894 dev_kfree_skb_any(fep->tx_skbuff[i]); 1895 fep->tx_skbuff[i] = NULL; 1896 } 1897 } 1898 1899 /* Initialize the receive buffer descriptors. 1900 */ 1901 bdp = fep->rx_bd_base; 1902 for (i=0; i<RX_RING_SIZE; i++) { 1903 1904 /* Initialize the BD for every fragment in the page. 1905 */ 1906 bdp->cbd_sc = BD_ENET_RX_EMPTY; 1907 bdp++; 1908 } 1909 1910 /* Set the last buffer to wrap. 1911 */ 1912 bdp--; 1913 bdp->cbd_sc |= BD_SC_WRAP; 1914 1915 /* ...and the same for transmmit. 1916 */ 1917 bdp = fep->tx_bd_base; 1918 for (i=0; i<TX_RING_SIZE; i++) { 1919 1920 /* Initialize the BD for every fragment in the page. 1921 */ 1922 bdp->cbd_sc = 0; 1923 bdp->cbd_bufaddr = 0; 1924 bdp++; 1925 } 1926 1927 /* Set the last buffer to wrap. 1928 */ 1929 bdp--; 1930 bdp->cbd_sc |= BD_SC_WRAP; 1931 1932 /* Enable MII mode. 1933 */ 1934 if (duplex) { 1935 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;/* MII enable */ 1936 fecp->fec_x_cntrl = 0x04; /* FD enable */ 1937 } else { 1938 /* MII enable|No Rcv on Xmit */ 1939 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x06; 1940 fecp->fec_x_cntrl = 0x00; 1941 } 1942 fep->full_duplex = duplex; 1943 1944 /* Set MII speed. 1945 */ 1946 fecp->fec_mii_speed = fep->phy_speed; 1947 1948 /* And last, enable the transmit and receive processing. 1949 */ 1950 fecp->fec_ecntrl = 2; 1951 fecp->fec_r_des_active = 0; 1952 1953 /* Enable interrupts we wish to service. 1954 */ 1955 fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII); 1956} 1957 1958static void 1959fec_stop(struct net_device *dev) 1960{ 1961 volatile fec_t *fecp; 1962 struct fec_enet_private *fep; 1963 1964 fep = netdev_priv(dev); 1965 fecp = fep->hwp; 1966 1967 /* 1968 ** We cannot expect a graceful transmit stop without link !!! 1969 */ 1970 if (fep->link) 1971 { 1972 fecp->fec_x_cntrl = 0x01; /* Graceful transmit stop */ 1973 udelay(10); 1974 if (!(fecp->fec_ievent & FEC_ENET_GRA)) 1975 printk("fec_stop : Graceful transmit stop did not complete !\n"); 1976 } 1977 1978 /* Whack a reset. We should wait for this. 1979 */ 1980 fecp->fec_ecntrl = 1; 1981 udelay(10); 1982 1983 /* Clear outstanding MII command interrupts. 1984 */ 1985 fecp->fec_ievent = FEC_ENET_MII; 1986 1987 fecp->fec_imask = FEC_ENET_MII; 1988 fecp->fec_mii_speed = fep->phy_speed; 1989} 1990 1991static int __devinit 1992fec_probe(struct platform_device *pdev) 1993{ 1994 struct fec_enet_private *fep; 1995 struct net_device *ndev; 1996 int i, irq, ret = 0; 1997 struct resource *r; 1998 1999 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2000 if (!r) 2001 return -ENXIO; 2002 2003 r = request_mem_region(r->start, resource_size(r), pdev->name); 2004 if (!r) 2005 return -EBUSY; 2006 2007 /* Init network device */ 2008 ndev = alloc_etherdev(sizeof(struct fec_enet_private)); 2009 if (!ndev) 2010 return -ENOMEM; 2011 2012 SET_NETDEV_DEV(ndev, &pdev->dev); 2013 2014 /* setup board info structure */ 2015 fep = netdev_priv(ndev); 2016 memset(fep, 0, sizeof(*fep)); 2017 2018 ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r)); 2019 2020 if (!ndev->base_addr) { 2021 ret = -ENOMEM; 2022 goto failed_ioremap; 2023 } 2024 2025 platform_set_drvdata(pdev, ndev); 2026 2027 /* This device has up to three irqs on some platforms */ 2028 for (i = 0; i < 3; i++) { 2029 irq = platform_get_irq(pdev, i); 2030 if (i && irq < 0) 2031 break; 2032 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); 2033 if (ret) { 2034 while (i >= 0) { 2035 irq = platform_get_irq(pdev, i); 2036 free_irq(irq, ndev); 2037 i--; 2038 } 2039 goto failed_irq; 2040 } 2041 } 2042 2043 fep->clk = clk_get(&pdev->dev, "fec_clk"); 2044 if (IS_ERR(fep->clk)) { 2045 ret = PTR_ERR(fep->clk); 2046 goto failed_clk; 2047 } 2048 clk_enable(fep->clk); 2049 2050 ret = fec_enet_init(ndev, 0); 2051 if (ret) 2052 goto failed_init; 2053 2054 ret = register_netdev(ndev); 2055 if (ret) 2056 goto failed_register; 2057 2058 return 0; 2059 2060failed_register: 2061failed_init: 2062 clk_disable(fep->clk); 2063 clk_put(fep->clk); 2064failed_clk: 2065 for (i = 0; i < 3; i++) { 2066 irq = platform_get_irq(pdev, i); 2067 if (irq > 0) 2068 free_irq(irq, ndev); 2069 } 2070failed_irq: 2071 iounmap((void __iomem *)ndev->base_addr); 2072failed_ioremap: 2073 free_netdev(ndev); 2074 2075 return ret; 2076} 2077 2078static int __devexit 2079fec_drv_remove(struct platform_device *pdev) 2080{ 2081 struct net_device *ndev = platform_get_drvdata(pdev); 2082 struct fec_enet_private *fep = netdev_priv(ndev); 2083 2084 platform_set_drvdata(pdev, NULL); 2085 2086 fec_stop(ndev); 2087 clk_disable(fep->clk); 2088 clk_put(fep->clk); 2089 iounmap((void __iomem *)ndev->base_addr); 2090 unregister_netdev(ndev); 2091 free_netdev(ndev); 2092 return 0; 2093} 2094 2095static int 2096fec_suspend(struct platform_device *dev, pm_message_t state) 2097{ 2098 struct net_device *ndev = platform_get_drvdata(dev); 2099 struct fec_enet_private *fep; 2100 2101 if (ndev) { 2102 fep = netdev_priv(ndev); 2103 if (netif_running(ndev)) { 2104 netif_device_detach(ndev); 2105 fec_stop(ndev); 2106 } 2107 } 2108 return 0; 2109} 2110 2111static int 2112fec_resume(struct platform_device *dev) 2113{ 2114 struct net_device *ndev = platform_get_drvdata(dev); 2115 2116 if (ndev) { 2117 if (netif_running(ndev)) { 2118 fec_enet_init(ndev, 0); 2119 netif_device_attach(ndev); 2120 } 2121 } 2122 return 0; 2123} 2124 2125static struct platform_driver fec_driver = { 2126 .driver = { 2127 .name = "fec", 2128 .owner = THIS_MODULE, 2129 }, 2130 .probe = fec_probe, 2131 .remove = __devexit_p(fec_drv_remove), 2132 .suspend = fec_suspend, 2133 .resume = fec_resume, 2134}; 2135 2136static int __init 2137fec_enet_module_init(void) 2138{ 2139 printk(KERN_INFO "FEC Ethernet Driver\n"); 2140 2141 return platform_driver_register(&fec_driver); 2142} 2143 2144static void __exit 2145fec_enet_cleanup(void) 2146{ 2147 platform_driver_unregister(&fec_driver); 2148} 2149 2150module_exit(fec_enet_cleanup); 2151module_init(fec_enet_module_init); 2152 2153MODULE_LICENSE("GPL");