at v2.6.26-rc7 2644 lines 69 kB view raw
1/* 2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 4 * 5 * This version of the driver is specific to the FADS implementation, 6 * since the board contains control registers external to the processor 7 * for the control of the LevelOne LXT970 transceiver. The MPC860T manual 8 * describes connections using the internal parallel port I/O, which 9 * is basically all of Port D. 10 * 11 * Right now, I am very wasteful with the buffers. I allocate memory 12 * pages and then divide them into 2K frame buffers. This way I know I 13 * have buffers large enough to hold one frame within one buffer descriptor. 14 * Once I get this working, I will use 64 or 128 byte CPM buffers, which 15 * will be much more memory efficient and will easily handle lots of 16 * small packets. 17 * 18 * Much better multiple PHY support by Magnus Damm. 19 * Copyright (c) 2000 Ericsson Radio Systems AB. 20 * 21 * Support for FEC controller of ColdFire processors. 22 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) 23 * 24 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 25 * Copyright (c) 2004-2006 Macq Electronique SA. 26 */ 27 28#include <linux/module.h> 29#include <linux/kernel.h> 30#include <linux/string.h> 31#include <linux/ptrace.h> 32#include <linux/errno.h> 33#include <linux/ioport.h> 34#include <linux/slab.h> 35#include <linux/interrupt.h> 36#include <linux/pci.h> 37#include <linux/init.h> 38#include <linux/delay.h> 39#include <linux/netdevice.h> 40#include <linux/etherdevice.h> 41#include <linux/skbuff.h> 42#include <linux/spinlock.h> 43#include <linux/workqueue.h> 44#include <linux/bitops.h> 45 46#include <asm/irq.h> 47#include <asm/uaccess.h> 48#include <asm/io.h> 49#include <asm/pgtable.h> 50#include <asm/cacheflush.h> 51 52#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || \ 53 defined(CONFIG_M5272) || defined(CONFIG_M528x) || \ 54 defined(CONFIG_M520x) || defined(CONFIG_M532x) 55#include <asm/coldfire.h> 56#include <asm/mcfsim.h> 57#include "fec.h" 58#else 59#include <asm/8xx_immap.h> 60#include <asm/mpc8xx.h> 61#include "commproc.h" 62#endif 63 64#if defined(CONFIG_FEC2) 65#define FEC_MAX_PORTS 2 66#else 67#define FEC_MAX_PORTS 1 68#endif 69 70#if defined(CONFIG_FADS) || defined(CONFIG_RPXCLASSIC) || defined(CONFIG_M5272) 71#define HAVE_mii_link_interrupt 72#endif 73 74/* 75 * Define the fixed address of the FEC hardware. 76 */ 77static unsigned int fec_hw[] = { 78#if defined(CONFIG_M5272) 79 (MCF_MBAR + 0x840), 80#elif defined(CONFIG_M527x) 81 (MCF_MBAR + 0x1000), 82 (MCF_MBAR + 0x1800), 83#elif defined(CONFIG_M523x) || defined(CONFIG_M528x) 84 (MCF_MBAR + 0x1000), 85#elif defined(CONFIG_M520x) 86 (MCF_MBAR+0x30000), 87#elif defined(CONFIG_M532x) 88 (MCF_MBAR+0xfc030000), 89#else 90 &(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec), 91#endif 92}; 93 94static unsigned char fec_mac_default[] = { 95 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 96}; 97 98/* 99 * Some hardware gets it MAC address out of local flash memory. 100 * if this is non-zero then assume it is the address to get MAC from. 101 */ 102#if defined(CONFIG_NETtel) 103#define FEC_FLASHMAC 0xf0006006 104#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) 105#define FEC_FLASHMAC 0xf0006000 106#elif defined(CONFIG_CANCam) 107#define FEC_FLASHMAC 0xf0020000 108#elif defined (CONFIG_M5272C3) 109#define FEC_FLASHMAC (0xffe04000 + 4) 110#elif defined(CONFIG_MOD5272) 111#define FEC_FLASHMAC 0xffc0406b 112#else 113#define FEC_FLASHMAC 0 114#endif 115 116/* Forward declarations of some structures to support different PHYs 117*/ 118 119typedef struct { 120 uint mii_data; 121 void (*funct)(uint mii_reg, struct net_device *dev); 122} phy_cmd_t; 123 124typedef struct { 125 uint id; 126 char *name; 127 128 const phy_cmd_t *config; 129 const phy_cmd_t *startup; 130 const phy_cmd_t *ack_int; 131 const phy_cmd_t *shutdown; 132} phy_info_t; 133 134/* The number of Tx and Rx buffers. These are allocated from the page 135 * pool. The code may assume these are power of two, so it it best 136 * to keep them that size. 137 * We don't need to allocate pages for the transmitter. We just use 138 * the skbuffer directly. 139 */ 140#define FEC_ENET_RX_PAGES 8 141#define FEC_ENET_RX_FRSIZE 2048 142#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) 143#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) 144#define FEC_ENET_TX_FRSIZE 2048 145#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE) 146#define TX_RING_SIZE 16 /* Must be power of two */ 147#define TX_RING_MOD_MASK 15 /* for this to work */ 148 149#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE) 150#error "FEC: descriptor ring size constants too large" 151#endif 152 153/* Interrupt events/masks. 154*/ 155#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ 156#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ 157#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ 158#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ 159#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */ 160#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ 161#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */ 162#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ 163#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ 164#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ 165 166/* The FEC stores dest/src/type, data, and checksum for receive packets. 167 */ 168#define PKT_MAXBUF_SIZE 1518 169#define PKT_MINBUF_SIZE 64 170#define PKT_MAXBLR_SIZE 1520 171 172 173/* 174 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame 175 * size bits. Other FEC hardware does not, so we need to take that into 176 * account when setting it. 177 */ 178#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 179 defined(CONFIG_M520x) || defined(CONFIG_M532x) 180#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 181#else 182#define OPT_FRAME_SIZE 0 183#endif 184 185/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and 186 * tx_bd_base always point to the base of the buffer descriptors. The 187 * cur_rx and cur_tx point to the currently available buffer. 188 * The dirty_tx tracks the current buffer that is being sent by the 189 * controller. The cur_tx and dirty_tx are equal under both completely 190 * empty and completely full conditions. The empty/ready indicator in 191 * the buffer descriptor determines the actual condition. 192 */ 193struct fec_enet_private { 194 /* Hardware registers of the FEC device */ 195 volatile fec_t *hwp; 196 197 struct net_device *netdev; 198 199 /* The saved address of a sent-in-place packet/buffer, for skfree(). */ 200 unsigned char *tx_bounce[TX_RING_SIZE]; 201 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 202 ushort skb_cur; 203 ushort skb_dirty; 204 205 /* CPM dual port RAM relative addresses. 206 */ 207 cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ 208 cbd_t *tx_bd_base; 209 cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ 210 cbd_t *dirty_tx; /* The ring entries to be free()ed. */ 211 uint tx_full; 212 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ 213 spinlock_t hw_lock; 214 /* hold while accessing the mii_list_t() elements */ 215 spinlock_t mii_lock; 216 217 uint phy_id; 218 uint phy_id_done; 219 uint phy_status; 220 uint phy_speed; 221 phy_info_t const *phy; 222 struct work_struct phy_task; 223 224 uint sequence_done; 225 uint mii_phy_task_queued; 226 227 uint phy_addr; 228 229 int index; 230 int opened; 231 int link; 232 int old_link; 233 int full_duplex; 234}; 235 236static int fec_enet_open(struct net_device *dev); 237static int fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev); 238static void fec_enet_mii(struct net_device *dev); 239static irqreturn_t fec_enet_interrupt(int irq, void * dev_id); 240static void fec_enet_tx(struct net_device *dev); 241static void fec_enet_rx(struct net_device *dev); 242static int fec_enet_close(struct net_device *dev); 243static void set_multicast_list(struct net_device *dev); 244static void fec_restart(struct net_device *dev, int duplex); 245static void fec_stop(struct net_device *dev); 246static void fec_set_mac_address(struct net_device *dev); 247 248 249/* MII processing. We keep this as simple as possible. Requests are 250 * placed on the list (if there is room). When the request is finished 251 * by the MII, an optional function may be called. 252 */ 253typedef struct mii_list { 254 uint mii_regval; 255 void (*mii_func)(uint val, struct net_device *dev); 256 struct mii_list *mii_next; 257} mii_list_t; 258 259#define NMII 20 260static mii_list_t mii_cmds[NMII]; 261static mii_list_t *mii_free; 262static mii_list_t *mii_head; 263static mii_list_t *mii_tail; 264 265static int mii_queue(struct net_device *dev, int request, 266 void (*func)(uint, struct net_device *)); 267 268/* Make MII read/write commands for the FEC. 269*/ 270#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) 271#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \ 272 (VAL & 0xffff)) 273#define mk_mii_end 0 274 275/* Transmitter timeout. 276*/ 277#define TX_TIMEOUT (2*HZ) 278 279/* Register definitions for the PHY. 280*/ 281 282#define MII_REG_CR 0 /* Control Register */ 283#define MII_REG_SR 1 /* Status Register */ 284#define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */ 285#define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */ 286#define MII_REG_ANAR 4 /* A-N Advertisement Register */ 287#define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */ 288#define MII_REG_ANER 6 /* A-N Expansion Register */ 289#define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */ 290#define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */ 291 292/* values for phy_status */ 293 294#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */ 295#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */ 296#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */ 297#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */ 298#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */ 299#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */ 300#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */ 301 302#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */ 303#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */ 304#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */ 305#define PHY_STAT_SPMASK 0xf000 /* mask for speed */ 306#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */ 307#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */ 308#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */ 309#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */ 310 311 312static int 313fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 314{ 315 struct fec_enet_private *fep; 316 volatile fec_t *fecp; 317 volatile cbd_t *bdp; 318 unsigned short status; 319 unsigned long flags; 320 321 fep = netdev_priv(dev); 322 fecp = (volatile fec_t*)dev->base_addr; 323 324 if (!fep->link) { 325 /* Link is down or autonegotiation is in progress. */ 326 return 1; 327 } 328 329 spin_lock_irqsave(&fep->hw_lock, flags); 330 /* Fill in a Tx ring entry */ 331 bdp = fep->cur_tx; 332 333 status = bdp->cbd_sc; 334#ifndef final_version 335 if (status & BD_ENET_TX_READY) { 336 /* Ooops. All transmit buffers are full. Bail out. 337 * This should not happen, since dev->tbusy should be set. 338 */ 339 printk("%s: tx queue full!.\n", dev->name); 340 spin_unlock_irqrestore(&fep->hw_lock, flags); 341 return 1; 342 } 343#endif 344 345 /* Clear all of the status flags. 346 */ 347 status &= ~BD_ENET_TX_STATS; 348 349 /* Set buffer length and buffer pointer. 350 */ 351 bdp->cbd_bufaddr = __pa(skb->data); 352 bdp->cbd_datlen = skb->len; 353 354 /* 355 * On some FEC implementations data must be aligned on 356 * 4-byte boundaries. Use bounce buffers to copy data 357 * and get it aligned. Ugh. 358 */ 359 if (bdp->cbd_bufaddr & 0x3) { 360 unsigned int index; 361 index = bdp - fep->tx_bd_base; 362 memcpy(fep->tx_bounce[index], (void *) bdp->cbd_bufaddr, bdp->cbd_datlen); 363 bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]); 364 } 365 366 /* Save skb pointer. 367 */ 368 fep->tx_skbuff[fep->skb_cur] = skb; 369 370 dev->stats.tx_bytes += skb->len; 371 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; 372 373 /* Push the data cache so the CPM does not get stale memory 374 * data. 375 */ 376 flush_dcache_range((unsigned long)skb->data, 377 (unsigned long)skb->data + skb->len); 378 379 /* Send it on its way. Tell FEC it's ready, interrupt when done, 380 * it's the last BD of the frame, and to put the CRC on the end. 381 */ 382 383 status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR 384 | BD_ENET_TX_LAST | BD_ENET_TX_TC); 385 bdp->cbd_sc = status; 386 387 dev->trans_start = jiffies; 388 389 /* Trigger transmission start */ 390 fecp->fec_x_des_active = 0; 391 392 /* If this was the last BD in the ring, start at the beginning again. 393 */ 394 if (status & BD_ENET_TX_WRAP) { 395 bdp = fep->tx_bd_base; 396 } else { 397 bdp++; 398 } 399 400 if (bdp == fep->dirty_tx) { 401 fep->tx_full = 1; 402 netif_stop_queue(dev); 403 } 404 405 fep->cur_tx = (cbd_t *)bdp; 406 407 spin_unlock_irqrestore(&fep->hw_lock, flags); 408 409 return 0; 410} 411 412static void 413fec_timeout(struct net_device *dev) 414{ 415 struct fec_enet_private *fep = netdev_priv(dev); 416 417 printk("%s: transmit timed out.\n", dev->name); 418 dev->stats.tx_errors++; 419#ifndef final_version 420 { 421 int i; 422 cbd_t *bdp; 423 424 printk("Ring data dump: cur_tx %lx%s, dirty_tx %lx cur_rx: %lx\n", 425 (unsigned long)fep->cur_tx, fep->tx_full ? " (full)" : "", 426 (unsigned long)fep->dirty_tx, 427 (unsigned long)fep->cur_rx); 428 429 bdp = fep->tx_bd_base; 430 printk(" tx: %u buffers\n", TX_RING_SIZE); 431 for (i = 0 ; i < TX_RING_SIZE; i++) { 432 printk(" %08x: %04x %04x %08x\n", 433 (uint) bdp, 434 bdp->cbd_sc, 435 bdp->cbd_datlen, 436 (int) bdp->cbd_bufaddr); 437 bdp++; 438 } 439 440 bdp = fep->rx_bd_base; 441 printk(" rx: %lu buffers\n", (unsigned long) RX_RING_SIZE); 442 for (i = 0 ; i < RX_RING_SIZE; i++) { 443 printk(" %08x: %04x %04x %08x\n", 444 (uint) bdp, 445 bdp->cbd_sc, 446 bdp->cbd_datlen, 447 (int) bdp->cbd_bufaddr); 448 bdp++; 449 } 450 } 451#endif 452 fec_restart(dev, fep->full_duplex); 453 netif_wake_queue(dev); 454} 455 456/* The interrupt handler. 457 * This is called from the MPC core interrupt. 458 */ 459static irqreturn_t 460fec_enet_interrupt(int irq, void * dev_id) 461{ 462 struct net_device *dev = dev_id; 463 volatile fec_t *fecp; 464 uint int_events; 465 irqreturn_t ret = IRQ_NONE; 466 467 fecp = (volatile fec_t*)dev->base_addr; 468 469 /* Get the interrupt events that caused us to be here. 470 */ 471 do { 472 int_events = fecp->fec_ievent; 473 fecp->fec_ievent = int_events; 474 475 /* Handle receive event in its own function. 476 */ 477 if (int_events & FEC_ENET_RXF) { 478 ret = IRQ_HANDLED; 479 fec_enet_rx(dev); 480 } 481 482 /* Transmit OK, or non-fatal error. Update the buffer 483 descriptors. FEC handles all errors, we just discover 484 them as part of the transmit process. 485 */ 486 if (int_events & FEC_ENET_TXF) { 487 ret = IRQ_HANDLED; 488 fec_enet_tx(dev); 489 } 490 491 if (int_events & FEC_ENET_MII) { 492 ret = IRQ_HANDLED; 493 fec_enet_mii(dev); 494 } 495 496 } while (int_events); 497 498 return ret; 499} 500 501 502static void 503fec_enet_tx(struct net_device *dev) 504{ 505 struct fec_enet_private *fep; 506 volatile cbd_t *bdp; 507 unsigned short status; 508 struct sk_buff *skb; 509 510 fep = netdev_priv(dev); 511 spin_lock_irq(&fep->hw_lock); 512 bdp = fep->dirty_tx; 513 514 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { 515 if (bdp == fep->cur_tx && fep->tx_full == 0) break; 516 517 skb = fep->tx_skbuff[fep->skb_dirty]; 518 /* Check for errors. */ 519 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 520 BD_ENET_TX_RL | BD_ENET_TX_UN | 521 BD_ENET_TX_CSL)) { 522 dev->stats.tx_errors++; 523 if (status & BD_ENET_TX_HB) /* No heartbeat */ 524 dev->stats.tx_heartbeat_errors++; 525 if (status & BD_ENET_TX_LC) /* Late collision */ 526 dev->stats.tx_window_errors++; 527 if (status & BD_ENET_TX_RL) /* Retrans limit */ 528 dev->stats.tx_aborted_errors++; 529 if (status & BD_ENET_TX_UN) /* Underrun */ 530 dev->stats.tx_fifo_errors++; 531 if (status & BD_ENET_TX_CSL) /* Carrier lost */ 532 dev->stats.tx_carrier_errors++; 533 } else { 534 dev->stats.tx_packets++; 535 } 536 537#ifndef final_version 538 if (status & BD_ENET_TX_READY) 539 printk("HEY! Enet xmit interrupt and TX_READY.\n"); 540#endif 541 /* Deferred means some collisions occurred during transmit, 542 * but we eventually sent the packet OK. 543 */ 544 if (status & BD_ENET_TX_DEF) 545 dev->stats.collisions++; 546 547 /* Free the sk buffer associated with this last transmit. 548 */ 549 dev_kfree_skb_any(skb); 550 fep->tx_skbuff[fep->skb_dirty] = NULL; 551 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; 552 553 /* Update pointer to next buffer descriptor to be transmitted. 554 */ 555 if (status & BD_ENET_TX_WRAP) 556 bdp = fep->tx_bd_base; 557 else 558 bdp++; 559 560 /* Since we have freed up a buffer, the ring is no longer 561 * full. 562 */ 563 if (fep->tx_full) { 564 fep->tx_full = 0; 565 if (netif_queue_stopped(dev)) 566 netif_wake_queue(dev); 567 } 568 } 569 fep->dirty_tx = (cbd_t *)bdp; 570 spin_unlock_irq(&fep->hw_lock); 571} 572 573 574/* During a receive, the cur_rx points to the current incoming buffer. 575 * When we update through the ring, if the next incoming buffer has 576 * not been given to the system, we just set the empty indicator, 577 * effectively tossing the packet. 578 */ 579static void 580fec_enet_rx(struct net_device *dev) 581{ 582 struct fec_enet_private *fep; 583 volatile fec_t *fecp; 584 volatile cbd_t *bdp; 585 unsigned short status; 586 struct sk_buff *skb; 587 ushort pkt_len; 588 __u8 *data; 589 590#ifdef CONFIG_M532x 591 flush_cache_all(); 592#endif 593 594 fep = netdev_priv(dev); 595 fecp = (volatile fec_t*)dev->base_addr; 596 597 spin_lock_irq(&fep->hw_lock); 598 599 /* First, grab all of the stats for the incoming packet. 600 * These get messed up if we get called due to a busy condition. 601 */ 602 bdp = fep->cur_rx; 603 604while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { 605 606#ifndef final_version 607 /* Since we have allocated space to hold a complete frame, 608 * the last indicator should be set. 609 */ 610 if ((status & BD_ENET_RX_LAST) == 0) 611 printk("FEC ENET: rcv is not +last\n"); 612#endif 613 614 if (!fep->opened) 615 goto rx_processing_done; 616 617 /* Check for errors. */ 618 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 619 BD_ENET_RX_CR | BD_ENET_RX_OV)) { 620 dev->stats.rx_errors++; 621 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { 622 /* Frame too long or too short. */ 623 dev->stats.rx_length_errors++; 624 } 625 if (status & BD_ENET_RX_NO) /* Frame alignment */ 626 dev->stats.rx_frame_errors++; 627 if (status & BD_ENET_RX_CR) /* CRC Error */ 628 dev->stats.rx_crc_errors++; 629 if (status & BD_ENET_RX_OV) /* FIFO overrun */ 630 dev->stats.rx_fifo_errors++; 631 } 632 633 /* Report late collisions as a frame error. 634 * On this error, the BD is closed, but we don't know what we 635 * have in the buffer. So, just drop this frame on the floor. 636 */ 637 if (status & BD_ENET_RX_CL) { 638 dev->stats.rx_errors++; 639 dev->stats.rx_frame_errors++; 640 goto rx_processing_done; 641 } 642 643 /* Process the incoming frame. 644 */ 645 dev->stats.rx_packets++; 646 pkt_len = bdp->cbd_datlen; 647 dev->stats.rx_bytes += pkt_len; 648 data = (__u8*)__va(bdp->cbd_bufaddr); 649 650 /* This does 16 byte alignment, exactly what we need. 651 * The packet length includes FCS, but we don't want to 652 * include that when passing upstream as it messes up 653 * bridging applications. 654 */ 655 skb = dev_alloc_skb(pkt_len-4); 656 657 if (skb == NULL) { 658 printk("%s: Memory squeeze, dropping packet.\n", dev->name); 659 dev->stats.rx_dropped++; 660 } else { 661 skb_put(skb,pkt_len-4); /* Make room */ 662 skb_copy_to_linear_data(skb, data, pkt_len-4); 663 skb->protocol=eth_type_trans(skb,dev); 664 netif_rx(skb); 665 } 666 rx_processing_done: 667 668 /* Clear the status flags for this buffer. 669 */ 670 status &= ~BD_ENET_RX_STATS; 671 672 /* Mark the buffer empty. 673 */ 674 status |= BD_ENET_RX_EMPTY; 675 bdp->cbd_sc = status; 676 677 /* Update BD pointer to next entry. 678 */ 679 if (status & BD_ENET_RX_WRAP) 680 bdp = fep->rx_bd_base; 681 else 682 bdp++; 683 684#if 1 685 /* Doing this here will keep the FEC running while we process 686 * incoming frames. On a heavily loaded network, we should be 687 * able to keep up at the expense of system resources. 688 */ 689 fecp->fec_r_des_active = 0; 690#endif 691 } /* while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) */ 692 fep->cur_rx = (cbd_t *)bdp; 693 694#if 0 695 /* Doing this here will allow us to process all frames in the 696 * ring before the FEC is allowed to put more there. On a heavily 697 * loaded network, some frames may be lost. Unfortunately, this 698 * increases the interrupt overhead since we can potentially work 699 * our way back to the interrupt return only to come right back 700 * here. 701 */ 702 fecp->fec_r_des_active = 0; 703#endif 704 705 spin_unlock_irq(&fep->hw_lock); 706} 707 708 709/* called from interrupt context */ 710static void 711fec_enet_mii(struct net_device *dev) 712{ 713 struct fec_enet_private *fep; 714 volatile fec_t *ep; 715 mii_list_t *mip; 716 uint mii_reg; 717 718 fep = netdev_priv(dev); 719 spin_lock_irq(&fep->mii_lock); 720 721 ep = fep->hwp; 722 mii_reg = ep->fec_mii_data; 723 724 if ((mip = mii_head) == NULL) { 725 printk("MII and no head!\n"); 726 goto unlock; 727 } 728 729 if (mip->mii_func != NULL) 730 (*(mip->mii_func))(mii_reg, dev); 731 732 mii_head = mip->mii_next; 733 mip->mii_next = mii_free; 734 mii_free = mip; 735 736 if ((mip = mii_head) != NULL) 737 ep->fec_mii_data = mip->mii_regval; 738 739unlock: 740 spin_unlock_irq(&fep->mii_lock); 741} 742 743static int 744mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_device *)) 745{ 746 struct fec_enet_private *fep; 747 unsigned long flags; 748 mii_list_t *mip; 749 int retval; 750 751 /* Add PHY address to register command. 752 */ 753 fep = netdev_priv(dev); 754 spin_lock_irqsave(&fep->mii_lock, flags); 755 756 regval |= fep->phy_addr << 23; 757 retval = 0; 758 759 if ((mip = mii_free) != NULL) { 760 mii_free = mip->mii_next; 761 mip->mii_regval = regval; 762 mip->mii_func = func; 763 mip->mii_next = NULL; 764 if (mii_head) { 765 mii_tail->mii_next = mip; 766 mii_tail = mip; 767 } else { 768 mii_head = mii_tail = mip; 769 fep->hwp->fec_mii_data = regval; 770 } 771 } else { 772 retval = 1; 773 } 774 775 spin_unlock_irqrestore(&fep->mii_lock, flags); 776 return retval; 777} 778 779static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c) 780{ 781 if(!c) 782 return; 783 784 for (; c->mii_data != mk_mii_end; c++) 785 mii_queue(dev, c->mii_data, c->funct); 786} 787 788static void mii_parse_sr(uint mii_reg, struct net_device *dev) 789{ 790 struct fec_enet_private *fep = netdev_priv(dev); 791 volatile uint *s = &(fep->phy_status); 792 uint status; 793 794 status = *s & ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC); 795 796 if (mii_reg & 0x0004) 797 status |= PHY_STAT_LINK; 798 if (mii_reg & 0x0010) 799 status |= PHY_STAT_FAULT; 800 if (mii_reg & 0x0020) 801 status |= PHY_STAT_ANC; 802 *s = status; 803} 804 805static void mii_parse_cr(uint mii_reg, struct net_device *dev) 806{ 807 struct fec_enet_private *fep = netdev_priv(dev); 808 volatile uint *s = &(fep->phy_status); 809 uint status; 810 811 status = *s & ~(PHY_CONF_ANE | PHY_CONF_LOOP); 812 813 if (mii_reg & 0x1000) 814 status |= PHY_CONF_ANE; 815 if (mii_reg & 0x4000) 816 status |= PHY_CONF_LOOP; 817 *s = status; 818} 819 820static void mii_parse_anar(uint mii_reg, struct net_device *dev) 821{ 822 struct fec_enet_private *fep = netdev_priv(dev); 823 volatile uint *s = &(fep->phy_status); 824 uint status; 825 826 status = *s & ~(PHY_CONF_SPMASK); 827 828 if (mii_reg & 0x0020) 829 status |= PHY_CONF_10HDX; 830 if (mii_reg & 0x0040) 831 status |= PHY_CONF_10FDX; 832 if (mii_reg & 0x0080) 833 status |= PHY_CONF_100HDX; 834 if (mii_reg & 0x00100) 835 status |= PHY_CONF_100FDX; 836 *s = status; 837} 838 839/* ------------------------------------------------------------------------- */ 840/* The Level one LXT970 is used by many boards */ 841 842#define MII_LXT970_MIRROR 16 /* Mirror register */ 843#define MII_LXT970_IER 17 /* Interrupt Enable Register */ 844#define MII_LXT970_ISR 18 /* Interrupt Status Register */ 845#define MII_LXT970_CONFIG 19 /* Configuration Register */ 846#define MII_LXT970_CSR 20 /* Chip Status Register */ 847 848static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev) 849{ 850 struct fec_enet_private *fep = netdev_priv(dev); 851 volatile uint *s = &(fep->phy_status); 852 uint status; 853 854 status = *s & ~(PHY_STAT_SPMASK); 855 if (mii_reg & 0x0800) { 856 if (mii_reg & 0x1000) 857 status |= PHY_STAT_100FDX; 858 else 859 status |= PHY_STAT_100HDX; 860 } else { 861 if (mii_reg & 0x1000) 862 status |= PHY_STAT_10FDX; 863 else 864 status |= PHY_STAT_10HDX; 865 } 866 *s = status; 867} 868 869static phy_cmd_t const phy_cmd_lxt970_config[] = { 870 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 871 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 872 { mk_mii_end, } 873 }; 874static phy_cmd_t const phy_cmd_lxt970_startup[] = { /* enable interrupts */ 875 { mk_mii_write(MII_LXT970_IER, 0x0002), NULL }, 876 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 877 { mk_mii_end, } 878 }; 879static phy_cmd_t const phy_cmd_lxt970_ack_int[] = { 880 /* read SR and ISR to acknowledge */ 881 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 882 { mk_mii_read(MII_LXT970_ISR), NULL }, 883 884 /* find out the current status */ 885 { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr }, 886 { mk_mii_end, } 887 }; 888static phy_cmd_t const phy_cmd_lxt970_shutdown[] = { /* disable interrupts */ 889 { mk_mii_write(MII_LXT970_IER, 0x0000), NULL }, 890 { mk_mii_end, } 891 }; 892static phy_info_t const phy_info_lxt970 = { 893 .id = 0x07810000, 894 .name = "LXT970", 895 .config = phy_cmd_lxt970_config, 896 .startup = phy_cmd_lxt970_startup, 897 .ack_int = phy_cmd_lxt970_ack_int, 898 .shutdown = phy_cmd_lxt970_shutdown 899}; 900 901/* ------------------------------------------------------------------------- */ 902/* The Level one LXT971 is used on some of my custom boards */ 903 904/* register definitions for the 971 */ 905 906#define MII_LXT971_PCR 16 /* Port Control Register */ 907#define MII_LXT971_SR2 17 /* Status Register 2 */ 908#define MII_LXT971_IER 18 /* Interrupt Enable Register */ 909#define MII_LXT971_ISR 19 /* Interrupt Status Register */ 910#define MII_LXT971_LCR 20 /* LED Control Register */ 911#define MII_LXT971_TCR 30 /* Transmit Control Register */ 912 913/* 914 * I had some nice ideas of running the MDIO faster... 915 * The 971 should support 8MHz and I tried it, but things acted really 916 * weird, so 2.5 MHz ought to be enough for anyone... 917 */ 918 919static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev) 920{ 921 struct fec_enet_private *fep = netdev_priv(dev); 922 volatile uint *s = &(fep->phy_status); 923 uint status; 924 925 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC); 926 927 if (mii_reg & 0x0400) { 928 fep->link = 1; 929 status |= PHY_STAT_LINK; 930 } else { 931 fep->link = 0; 932 } 933 if (mii_reg & 0x0080) 934 status |= PHY_STAT_ANC; 935 if (mii_reg & 0x4000) { 936 if (mii_reg & 0x0200) 937 status |= PHY_STAT_100FDX; 938 else 939 status |= PHY_STAT_100HDX; 940 } else { 941 if (mii_reg & 0x0200) 942 status |= PHY_STAT_10FDX; 943 else 944 status |= PHY_STAT_10HDX; 945 } 946 if (mii_reg & 0x0008) 947 status |= PHY_STAT_FAULT; 948 949 *s = status; 950} 951 952static phy_cmd_t const phy_cmd_lxt971_config[] = { 953 /* limit to 10MBit because my prototype board 954 * doesn't work with 100. */ 955 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 956 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 957 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, 958 { mk_mii_end, } 959 }; 960static phy_cmd_t const phy_cmd_lxt971_startup[] = { /* enable interrupts */ 961 { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL }, 962 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 963 { mk_mii_write(MII_LXT971_LCR, 0xd422), NULL }, /* LED config */ 964 /* Somehow does the 971 tell me that the link is down 965 * the first read after power-up. 966 * read here to get a valid value in ack_int */ 967 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 968 { mk_mii_end, } 969 }; 970static phy_cmd_t const phy_cmd_lxt971_ack_int[] = { 971 /* acknowledge the int before reading status ! */ 972 { mk_mii_read(MII_LXT971_ISR), NULL }, 973 /* find out the current status */ 974 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 975 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, 976 { mk_mii_end, } 977 }; 978static phy_cmd_t const phy_cmd_lxt971_shutdown[] = { /* disable interrupts */ 979 { mk_mii_write(MII_LXT971_IER, 0x0000), NULL }, 980 { mk_mii_end, } 981 }; 982static phy_info_t const phy_info_lxt971 = { 983 .id = 0x0001378e, 984 .name = "LXT971", 985 .config = phy_cmd_lxt971_config, 986 .startup = phy_cmd_lxt971_startup, 987 .ack_int = phy_cmd_lxt971_ack_int, 988 .shutdown = phy_cmd_lxt971_shutdown 989}; 990 991/* ------------------------------------------------------------------------- */ 992/* The Quality Semiconductor QS6612 is used on the RPX CLLF */ 993 994/* register definitions */ 995 996#define MII_QS6612_MCR 17 /* Mode Control Register */ 997#define MII_QS6612_FTR 27 /* Factory Test Register */ 998#define MII_QS6612_MCO 28 /* Misc. Control Register */ 999#define MII_QS6612_ISR 29 /* Interrupt Source Register */ 1000#define MII_QS6612_IMR 30 /* Interrupt Mask Register */ 1001#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */ 1002 1003static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev) 1004{ 1005 struct fec_enet_private *fep = netdev_priv(dev); 1006 volatile uint *s = &(fep->phy_status); 1007 uint status; 1008 1009 status = *s & ~(PHY_STAT_SPMASK); 1010 1011 switch((mii_reg >> 2) & 7) { 1012 case 1: status |= PHY_STAT_10HDX; break; 1013 case 2: status |= PHY_STAT_100HDX; break; 1014 case 5: status |= PHY_STAT_10FDX; break; 1015 case 6: status |= PHY_STAT_100FDX; break; 1016} 1017 1018 *s = status; 1019} 1020 1021static phy_cmd_t const phy_cmd_qs6612_config[] = { 1022 /* The PHY powers up isolated on the RPX, 1023 * so send a command to allow operation. 1024 */ 1025 { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL }, 1026 1027 /* parse cr and anar to get some info */ 1028 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 1029 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 1030 { mk_mii_end, } 1031 }; 1032static phy_cmd_t const phy_cmd_qs6612_startup[] = { /* enable interrupts */ 1033 { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL }, 1034 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 1035 { mk_mii_end, } 1036 }; 1037static phy_cmd_t const phy_cmd_qs6612_ack_int[] = { 1038 /* we need to read ISR, SR and ANER to acknowledge */ 1039 { mk_mii_read(MII_QS6612_ISR), NULL }, 1040 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1041 { mk_mii_read(MII_REG_ANER), NULL }, 1042 1043 /* read pcr to get info */ 1044 { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr }, 1045 { mk_mii_end, } 1046 }; 1047static phy_cmd_t const phy_cmd_qs6612_shutdown[] = { /* disable interrupts */ 1048 { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL }, 1049 { mk_mii_end, } 1050 }; 1051static phy_info_t const phy_info_qs6612 = { 1052 .id = 0x00181440, 1053 .name = "QS6612", 1054 .config = phy_cmd_qs6612_config, 1055 .startup = phy_cmd_qs6612_startup, 1056 .ack_int = phy_cmd_qs6612_ack_int, 1057 .shutdown = phy_cmd_qs6612_shutdown 1058}; 1059 1060/* ------------------------------------------------------------------------- */ 1061/* AMD AM79C874 phy */ 1062 1063/* register definitions for the 874 */ 1064 1065#define MII_AM79C874_MFR 16 /* Miscellaneous Feature Register */ 1066#define MII_AM79C874_ICSR 17 /* Interrupt/Status Register */ 1067#define MII_AM79C874_DR 18 /* Diagnostic Register */ 1068#define MII_AM79C874_PMLR 19 /* Power and Loopback Register */ 1069#define MII_AM79C874_MCR 21 /* ModeControl Register */ 1070#define MII_AM79C874_DC 23 /* Disconnect Counter */ 1071#define MII_AM79C874_REC 24 /* Recieve Error Counter */ 1072 1073static void mii_parse_am79c874_dr(uint mii_reg, struct net_device *dev) 1074{ 1075 struct fec_enet_private *fep = netdev_priv(dev); 1076 volatile uint *s = &(fep->phy_status); 1077 uint status; 1078 1079 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_ANC); 1080 1081 if (mii_reg & 0x0080) 1082 status |= PHY_STAT_ANC; 1083 if (mii_reg & 0x0400) 1084 status |= ((mii_reg & 0x0800) ? PHY_STAT_100FDX : PHY_STAT_100HDX); 1085 else 1086 status |= ((mii_reg & 0x0800) ? PHY_STAT_10FDX : PHY_STAT_10HDX); 1087 1088 *s = status; 1089} 1090 1091static phy_cmd_t const phy_cmd_am79c874_config[] = { 1092 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 1093 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 1094 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr }, 1095 { mk_mii_end, } 1096 }; 1097static phy_cmd_t const phy_cmd_am79c874_startup[] = { /* enable interrupts */ 1098 { mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL }, 1099 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 1100 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1101 { mk_mii_end, } 1102 }; 1103static phy_cmd_t const phy_cmd_am79c874_ack_int[] = { 1104 /* find out the current status */ 1105 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1106 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr }, 1107 /* we only need to read ISR to acknowledge */ 1108 { mk_mii_read(MII_AM79C874_ICSR), NULL }, 1109 { mk_mii_end, } 1110 }; 1111static phy_cmd_t const phy_cmd_am79c874_shutdown[] = { /* disable interrupts */ 1112 { mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL }, 1113 { mk_mii_end, } 1114 }; 1115static phy_info_t const phy_info_am79c874 = { 1116 .id = 0x00022561, 1117 .name = "AM79C874", 1118 .config = phy_cmd_am79c874_config, 1119 .startup = phy_cmd_am79c874_startup, 1120 .ack_int = phy_cmd_am79c874_ack_int, 1121 .shutdown = phy_cmd_am79c874_shutdown 1122}; 1123 1124 1125/* ------------------------------------------------------------------------- */ 1126/* Kendin KS8721BL phy */ 1127 1128/* register definitions for the 8721 */ 1129 1130#define MII_KS8721BL_RXERCR 21 1131#define MII_KS8721BL_ICSR 22 1132#define MII_KS8721BL_PHYCR 31 1133 1134static phy_cmd_t const phy_cmd_ks8721bl_config[] = { 1135 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 1136 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 1137 { mk_mii_end, } 1138 }; 1139static phy_cmd_t const phy_cmd_ks8721bl_startup[] = { /* enable interrupts */ 1140 { mk_mii_write(MII_KS8721BL_ICSR, 0xff00), NULL }, 1141 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 1142 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1143 { mk_mii_end, } 1144 }; 1145static phy_cmd_t const phy_cmd_ks8721bl_ack_int[] = { 1146 /* find out the current status */ 1147 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1148 /* we only need to read ISR to acknowledge */ 1149 { mk_mii_read(MII_KS8721BL_ICSR), NULL }, 1150 { mk_mii_end, } 1151 }; 1152static phy_cmd_t const phy_cmd_ks8721bl_shutdown[] = { /* disable interrupts */ 1153 { mk_mii_write(MII_KS8721BL_ICSR, 0x0000), NULL }, 1154 { mk_mii_end, } 1155 }; 1156static phy_info_t const phy_info_ks8721bl = { 1157 .id = 0x00022161, 1158 .name = "KS8721BL", 1159 .config = phy_cmd_ks8721bl_config, 1160 .startup = phy_cmd_ks8721bl_startup, 1161 .ack_int = phy_cmd_ks8721bl_ack_int, 1162 .shutdown = phy_cmd_ks8721bl_shutdown 1163}; 1164 1165/* ------------------------------------------------------------------------- */ 1166/* register definitions for the DP83848 */ 1167 1168#define MII_DP8384X_PHYSTST 16 /* PHY Status Register */ 1169 1170static void mii_parse_dp8384x_sr2(uint mii_reg, struct net_device *dev) 1171{ 1172 struct fec_enet_private *fep = dev->priv; 1173 volatile uint *s = &(fep->phy_status); 1174 1175 *s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC); 1176 1177 /* Link up */ 1178 if (mii_reg & 0x0001) { 1179 fep->link = 1; 1180 *s |= PHY_STAT_LINK; 1181 } else 1182 fep->link = 0; 1183 /* Status of link */ 1184 if (mii_reg & 0x0010) /* Autonegotioation complete */ 1185 *s |= PHY_STAT_ANC; 1186 if (mii_reg & 0x0002) { /* 10MBps? */ 1187 if (mii_reg & 0x0004) /* Full Duplex? */ 1188 *s |= PHY_STAT_10FDX; 1189 else 1190 *s |= PHY_STAT_10HDX; 1191 } else { /* 100 Mbps? */ 1192 if (mii_reg & 0x0004) /* Full Duplex? */ 1193 *s |= PHY_STAT_100FDX; 1194 else 1195 *s |= PHY_STAT_100HDX; 1196 } 1197 if (mii_reg & 0x0008) 1198 *s |= PHY_STAT_FAULT; 1199} 1200 1201static phy_info_t phy_info_dp83848= { 1202 0x020005c9, 1203 "DP83848", 1204 1205 (const phy_cmd_t []) { /* config */ 1206 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 1207 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 1208 { mk_mii_read(MII_DP8384X_PHYSTST), mii_parse_dp8384x_sr2 }, 1209 { mk_mii_end, } 1210 }, 1211 (const phy_cmd_t []) { /* startup - enable interrupts */ 1212 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 1213 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1214 { mk_mii_end, } 1215 }, 1216 (const phy_cmd_t []) { /* ack_int - never happens, no interrupt */ 1217 { mk_mii_end, } 1218 }, 1219 (const phy_cmd_t []) { /* shutdown */ 1220 { mk_mii_end, } 1221 }, 1222}; 1223 1224/* ------------------------------------------------------------------------- */ 1225 1226static phy_info_t const * const phy_info[] = { 1227 &phy_info_lxt970, 1228 &phy_info_lxt971, 1229 &phy_info_qs6612, 1230 &phy_info_am79c874, 1231 &phy_info_ks8721bl, 1232 &phy_info_dp83848, 1233 NULL 1234}; 1235 1236/* ------------------------------------------------------------------------- */ 1237#ifdef HAVE_mii_link_interrupt 1238#ifdef CONFIG_RPXCLASSIC 1239static void 1240mii_link_interrupt(void *dev_id); 1241#else 1242static irqreturn_t 1243mii_link_interrupt(int irq, void * dev_id); 1244#endif 1245#endif 1246 1247#if defined(CONFIG_M5272) 1248/* 1249 * Code specific to Coldfire 5272 setup. 1250 */ 1251static void __inline__ fec_request_intrs(struct net_device *dev) 1252{ 1253 volatile unsigned long *icrp; 1254 static const struct idesc { 1255 char *name; 1256 unsigned short irq; 1257 irq_handler_t handler; 1258 } *idp, id[] = { 1259 { "fec(RX)", 86, fec_enet_interrupt }, 1260 { "fec(TX)", 87, fec_enet_interrupt }, 1261 { "fec(OTHER)", 88, fec_enet_interrupt }, 1262 { "fec(MII)", 66, mii_link_interrupt }, 1263 { NULL }, 1264 }; 1265 1266 /* Setup interrupt handlers. */ 1267 for (idp = id; idp->name; idp++) { 1268 if (request_irq(idp->irq, idp->handler, IRQF_DISABLED, idp->name, dev) != 0) 1269 printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, idp->irq); 1270 } 1271 1272 /* Unmask interrupt at ColdFire 5272 SIM */ 1273 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR3); 1274 *icrp = 0x00000ddd; 1275 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); 1276 *icrp = 0x0d000000; 1277} 1278 1279static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) 1280{ 1281 volatile fec_t *fecp; 1282 1283 fecp = fep->hwp; 1284 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04; 1285 fecp->fec_x_cntrl = 0x00; 1286 1287 /* 1288 * Set MII speed to 2.5 MHz 1289 * See 5272 manual section 11.5.8: MSCR 1290 */ 1291 fep->phy_speed = ((((MCF_CLK / 4) / (2500000 / 10)) + 5) / 10) * 2; 1292 fecp->fec_mii_speed = fep->phy_speed; 1293 1294 fec_restart(dev, 0); 1295} 1296 1297static void __inline__ fec_get_mac(struct net_device *dev) 1298{ 1299 struct fec_enet_private *fep = netdev_priv(dev); 1300 volatile fec_t *fecp; 1301 unsigned char *iap, tmpaddr[ETH_ALEN]; 1302 1303 fecp = fep->hwp; 1304 1305 if (FEC_FLASHMAC) { 1306 /* 1307 * Get MAC address from FLASH. 1308 * If it is all 1's or 0's, use the default. 1309 */ 1310 iap = (unsigned char *)FEC_FLASHMAC; 1311 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && 1312 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) 1313 iap = fec_mac_default; 1314 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && 1315 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) 1316 iap = fec_mac_default; 1317 } else { 1318 *((unsigned long *) &tmpaddr[0]) = fecp->fec_addr_low; 1319 *((unsigned short *) &tmpaddr[4]) = (fecp->fec_addr_high >> 16); 1320 iap = &tmpaddr[0]; 1321 } 1322 1323 memcpy(dev->dev_addr, iap, ETH_ALEN); 1324 1325 /* Adjust MAC if using default MAC address */ 1326 if (iap == fec_mac_default) 1327 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 1328} 1329 1330static void __inline__ fec_enable_phy_intr(void) 1331{ 1332} 1333 1334static void __inline__ fec_disable_phy_intr(void) 1335{ 1336 volatile unsigned long *icrp; 1337 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); 1338 *icrp = 0x08000000; 1339} 1340 1341static void __inline__ fec_phy_ack_intr(void) 1342{ 1343 volatile unsigned long *icrp; 1344 /* Acknowledge the interrupt */ 1345 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); 1346 *icrp = 0x0d000000; 1347} 1348 1349static void __inline__ fec_localhw_setup(void) 1350{ 1351} 1352 1353/* 1354 * Do not need to make region uncached on 5272. 1355 */ 1356static void __inline__ fec_uncache(unsigned long addr) 1357{ 1358} 1359 1360/* ------------------------------------------------------------------------- */ 1361 1362#elif defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) 1363 1364/* 1365 * Code specific to Coldfire 5230/5231/5232/5234/5235, 1366 * the 5270/5271/5274/5275 and 5280/5282 setups. 1367 */ 1368static void __inline__ fec_request_intrs(struct net_device *dev) 1369{ 1370 struct fec_enet_private *fep; 1371 int b; 1372 static const struct idesc { 1373 char *name; 1374 unsigned short irq; 1375 } *idp, id[] = { 1376 { "fec(TXF)", 23 }, 1377 { "fec(RXF)", 27 }, 1378 { "fec(MII)", 29 }, 1379 { NULL }, 1380 }; 1381 1382 fep = netdev_priv(dev); 1383 b = (fep->index) ? 128 : 64; 1384 1385 /* Setup interrupt handlers. */ 1386 for (idp = id; idp->name; idp++) { 1387 if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name, dev) != 0) 1388 printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); 1389 } 1390 1391 /* Unmask interrupts at ColdFire 5280/5282 interrupt controller */ 1392 { 1393 volatile unsigned char *icrp; 1394 volatile unsigned long *imrp; 1395 int i, ilip; 1396 1397 b = (fep->index) ? MCFICM_INTC1 : MCFICM_INTC0; 1398 icrp = (volatile unsigned char *) (MCF_IPSBAR + b + 1399 MCFINTC_ICR0); 1400 for (i = 23, ilip = 0x28; (i < 36); i++) 1401 icrp[i] = ilip--; 1402 1403 imrp = (volatile unsigned long *) (MCF_IPSBAR + b + 1404 MCFINTC_IMRH); 1405 *imrp &= ~0x0000000f; 1406 imrp = (volatile unsigned long *) (MCF_IPSBAR + b + 1407 MCFINTC_IMRL); 1408 *imrp &= ~0xff800001; 1409 } 1410 1411#if defined(CONFIG_M528x) 1412 /* Set up gpio outputs for MII lines */ 1413 { 1414 volatile u16 *gpio_paspar; 1415 volatile u8 *gpio_pehlpar; 1416 1417 gpio_paspar = (volatile u16 *) (MCF_IPSBAR + 0x100056); 1418 gpio_pehlpar = (volatile u16 *) (MCF_IPSBAR + 0x100058); 1419 *gpio_paspar |= 0x0f00; 1420 *gpio_pehlpar = 0xc0; 1421 } 1422#endif 1423 1424#if defined(CONFIG_M527x) 1425 /* Set up gpio outputs for MII lines */ 1426 { 1427 volatile u8 *gpio_par_fec; 1428 volatile u16 *gpio_par_feci2c; 1429 1430 gpio_par_feci2c = (volatile u16 *)(MCF_IPSBAR + 0x100082); 1431 /* Set up gpio outputs for FEC0 MII lines */ 1432 gpio_par_fec = (volatile u8 *)(MCF_IPSBAR + 0x100078); 1433 1434 *gpio_par_feci2c |= 0x0f00; 1435 *gpio_par_fec |= 0xc0; 1436 1437#if defined(CONFIG_FEC2) 1438 /* Set up gpio outputs for FEC1 MII lines */ 1439 gpio_par_fec = (volatile u8 *)(MCF_IPSBAR + 0x100079); 1440 1441 *gpio_par_feci2c |= 0x00a0; 1442 *gpio_par_fec |= 0xc0; 1443#endif /* CONFIG_FEC2 */ 1444 } 1445#endif /* CONFIG_M527x */ 1446} 1447 1448static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) 1449{ 1450 volatile fec_t *fecp; 1451 1452 fecp = fep->hwp; 1453 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04; 1454 fecp->fec_x_cntrl = 0x00; 1455 1456 /* 1457 * Set MII speed to 2.5 MHz 1458 * See 5282 manual section 17.5.4.7: MSCR 1459 */ 1460 fep->phy_speed = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2; 1461 fecp->fec_mii_speed = fep->phy_speed; 1462 1463 fec_restart(dev, 0); 1464} 1465 1466static void __inline__ fec_get_mac(struct net_device *dev) 1467{ 1468 struct fec_enet_private *fep = netdev_priv(dev); 1469 volatile fec_t *fecp; 1470 unsigned char *iap, tmpaddr[ETH_ALEN]; 1471 1472 fecp = fep->hwp; 1473 1474 if (FEC_FLASHMAC) { 1475 /* 1476 * Get MAC address from FLASH. 1477 * If it is all 1's or 0's, use the default. 1478 */ 1479 iap = FEC_FLASHMAC; 1480 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && 1481 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) 1482 iap = fec_mac_default; 1483 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && 1484 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) 1485 iap = fec_mac_default; 1486 } else { 1487 *((unsigned long *) &tmpaddr[0]) = fecp->fec_addr_low; 1488 *((unsigned short *) &tmpaddr[4]) = (fecp->fec_addr_high >> 16); 1489 iap = &tmpaddr[0]; 1490 } 1491 1492 memcpy(dev->dev_addr, iap, ETH_ALEN); 1493 1494 /* Adjust MAC if using default MAC address */ 1495 if (iap == fec_mac_default) 1496 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 1497} 1498 1499static void __inline__ fec_enable_phy_intr(void) 1500{ 1501} 1502 1503static void __inline__ fec_disable_phy_intr(void) 1504{ 1505} 1506 1507static void __inline__ fec_phy_ack_intr(void) 1508{ 1509} 1510 1511static void __inline__ fec_localhw_setup(void) 1512{ 1513} 1514 1515/* 1516 * Do not need to make region uncached on 5272. 1517 */ 1518static void __inline__ fec_uncache(unsigned long addr) 1519{ 1520} 1521 1522/* ------------------------------------------------------------------------- */ 1523 1524#elif defined(CONFIG_M520x) 1525 1526/* 1527 * Code specific to Coldfire 520x 1528 */ 1529static void __inline__ fec_request_intrs(struct net_device *dev) 1530{ 1531 struct fec_enet_private *fep; 1532 int b; 1533 static const struct idesc { 1534 char *name; 1535 unsigned short irq; 1536 } *idp, id[] = { 1537 { "fec(TXF)", 23 }, 1538 { "fec(RXF)", 27 }, 1539 { "fec(MII)", 29 }, 1540 { NULL }, 1541 }; 1542 1543 fep = netdev_priv(dev); 1544 b = 64 + 13; 1545 1546 /* Setup interrupt handlers. */ 1547 for (idp = id; idp->name; idp++) { 1548 if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name,dev) != 0) 1549 printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); 1550 } 1551 1552 /* Unmask interrupts at ColdFire interrupt controller */ 1553 { 1554 volatile unsigned char *icrp; 1555 volatile unsigned long *imrp; 1556 1557 icrp = (volatile unsigned char *) (MCF_IPSBAR + MCFICM_INTC0 + 1558 MCFINTC_ICR0); 1559 for (b = 36; (b < 49); b++) 1560 icrp[b] = 0x04; 1561 imrp = (volatile unsigned long *) (MCF_IPSBAR + MCFICM_INTC0 + 1562 MCFINTC_IMRH); 1563 *imrp &= ~0x0001FFF0; 1564 } 1565 *(volatile unsigned char *)(MCF_IPSBAR + MCF_GPIO_PAR_FEC) |= 0xf0; 1566 *(volatile unsigned char *)(MCF_IPSBAR + MCF_GPIO_PAR_FECI2C) |= 0x0f; 1567} 1568 1569static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) 1570{ 1571 volatile fec_t *fecp; 1572 1573 fecp = fep->hwp; 1574 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04; 1575 fecp->fec_x_cntrl = 0x00; 1576 1577 /* 1578 * Set MII speed to 2.5 MHz 1579 * See 5282 manual section 17.5.4.7: MSCR 1580 */ 1581 fep->phy_speed = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2; 1582 fecp->fec_mii_speed = fep->phy_speed; 1583 1584 fec_restart(dev, 0); 1585} 1586 1587static void __inline__ fec_get_mac(struct net_device *dev) 1588{ 1589 struct fec_enet_private *fep = netdev_priv(dev); 1590 volatile fec_t *fecp; 1591 unsigned char *iap, tmpaddr[ETH_ALEN]; 1592 1593 fecp = fep->hwp; 1594 1595 if (FEC_FLASHMAC) { 1596 /* 1597 * Get MAC address from FLASH. 1598 * If it is all 1's or 0's, use the default. 1599 */ 1600 iap = FEC_FLASHMAC; 1601 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && 1602 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) 1603 iap = fec_mac_default; 1604 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && 1605 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) 1606 iap = fec_mac_default; 1607 } else { 1608 *((unsigned long *) &tmpaddr[0]) = fecp->fec_addr_low; 1609 *((unsigned short *) &tmpaddr[4]) = (fecp->fec_addr_high >> 16); 1610 iap = &tmpaddr[0]; 1611 } 1612 1613 memcpy(dev->dev_addr, iap, ETH_ALEN); 1614 1615 /* Adjust MAC if using default MAC address */ 1616 if (iap == fec_mac_default) 1617 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 1618} 1619 1620static void __inline__ fec_enable_phy_intr(void) 1621{ 1622} 1623 1624static void __inline__ fec_disable_phy_intr(void) 1625{ 1626} 1627 1628static void __inline__ fec_phy_ack_intr(void) 1629{ 1630} 1631 1632static void __inline__ fec_localhw_setup(void) 1633{ 1634} 1635 1636static void __inline__ fec_uncache(unsigned long addr) 1637{ 1638} 1639 1640/* ------------------------------------------------------------------------- */ 1641 1642#elif defined(CONFIG_M532x) 1643/* 1644 * Code specific for M532x 1645 */ 1646static void __inline__ fec_request_intrs(struct net_device *dev) 1647{ 1648 struct fec_enet_private *fep; 1649 int b; 1650 static const struct idesc { 1651 char *name; 1652 unsigned short irq; 1653 } *idp, id[] = { 1654 { "fec(TXF)", 36 }, 1655 { "fec(RXF)", 40 }, 1656 { "fec(MII)", 42 }, 1657 { NULL }, 1658 }; 1659 1660 fep = netdev_priv(dev); 1661 b = (fep->index) ? 128 : 64; 1662 1663 /* Setup interrupt handlers. */ 1664 for (idp = id; idp->name; idp++) { 1665 if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name,dev) != 0) 1666 printk("FEC: Could not allocate %s IRQ(%d)!\n", 1667 idp->name, b+idp->irq); 1668 } 1669 1670 /* Unmask interrupts */ 1671 MCF_INTC0_ICR36 = 0x2; 1672 MCF_INTC0_ICR37 = 0x2; 1673 MCF_INTC0_ICR38 = 0x2; 1674 MCF_INTC0_ICR39 = 0x2; 1675 MCF_INTC0_ICR40 = 0x2; 1676 MCF_INTC0_ICR41 = 0x2; 1677 MCF_INTC0_ICR42 = 0x2; 1678 MCF_INTC0_ICR43 = 0x2; 1679 MCF_INTC0_ICR44 = 0x2; 1680 MCF_INTC0_ICR45 = 0x2; 1681 MCF_INTC0_ICR46 = 0x2; 1682 MCF_INTC0_ICR47 = 0x2; 1683 MCF_INTC0_ICR48 = 0x2; 1684 1685 MCF_INTC0_IMRH &= ~( 1686 MCF_INTC_IMRH_INT_MASK36 | 1687 MCF_INTC_IMRH_INT_MASK37 | 1688 MCF_INTC_IMRH_INT_MASK38 | 1689 MCF_INTC_IMRH_INT_MASK39 | 1690 MCF_INTC_IMRH_INT_MASK40 | 1691 MCF_INTC_IMRH_INT_MASK41 | 1692 MCF_INTC_IMRH_INT_MASK42 | 1693 MCF_INTC_IMRH_INT_MASK43 | 1694 MCF_INTC_IMRH_INT_MASK44 | 1695 MCF_INTC_IMRH_INT_MASK45 | 1696 MCF_INTC_IMRH_INT_MASK46 | 1697 MCF_INTC_IMRH_INT_MASK47 | 1698 MCF_INTC_IMRH_INT_MASK48 ); 1699 1700 /* Set up gpio outputs for MII lines */ 1701 MCF_GPIO_PAR_FECI2C |= (0 | 1702 MCF_GPIO_PAR_FECI2C_PAR_MDC_EMDC | 1703 MCF_GPIO_PAR_FECI2C_PAR_MDIO_EMDIO); 1704 MCF_GPIO_PAR_FEC = (0 | 1705 MCF_GPIO_PAR_FEC_PAR_FEC_7W_FEC | 1706 MCF_GPIO_PAR_FEC_PAR_FEC_MII_FEC); 1707} 1708 1709static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) 1710{ 1711 volatile fec_t *fecp; 1712 1713 fecp = fep->hwp; 1714 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04; 1715 fecp->fec_x_cntrl = 0x00; 1716 1717 /* 1718 * Set MII speed to 2.5 MHz 1719 */ 1720 fep->phy_speed = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2; 1721 fecp->fec_mii_speed = fep->phy_speed; 1722 1723 fec_restart(dev, 0); 1724} 1725 1726static void __inline__ fec_get_mac(struct net_device *dev) 1727{ 1728 struct fec_enet_private *fep = netdev_priv(dev); 1729 volatile fec_t *fecp; 1730 unsigned char *iap, tmpaddr[ETH_ALEN]; 1731 1732 fecp = fep->hwp; 1733 1734 if (FEC_FLASHMAC) { 1735 /* 1736 * Get MAC address from FLASH. 1737 * If it is all 1's or 0's, use the default. 1738 */ 1739 iap = FEC_FLASHMAC; 1740 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && 1741 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) 1742 iap = fec_mac_default; 1743 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && 1744 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) 1745 iap = fec_mac_default; 1746 } else { 1747 *((unsigned long *) &tmpaddr[0]) = fecp->fec_addr_low; 1748 *((unsigned short *) &tmpaddr[4]) = (fecp->fec_addr_high >> 16); 1749 iap = &tmpaddr[0]; 1750 } 1751 1752 memcpy(dev->dev_addr, iap, ETH_ALEN); 1753 1754 /* Adjust MAC if using default MAC address */ 1755 if (iap == fec_mac_default) 1756 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 1757} 1758 1759static void __inline__ fec_enable_phy_intr(void) 1760{ 1761} 1762 1763static void __inline__ fec_disable_phy_intr(void) 1764{ 1765} 1766 1767static void __inline__ fec_phy_ack_intr(void) 1768{ 1769} 1770 1771static void __inline__ fec_localhw_setup(void) 1772{ 1773} 1774 1775/* 1776 * Do not need to make region uncached on 532x. 1777 */ 1778static void __inline__ fec_uncache(unsigned long addr) 1779{ 1780} 1781 1782/* ------------------------------------------------------------------------- */ 1783 1784 1785#else 1786 1787/* 1788 * Code specific to the MPC860T setup. 1789 */ 1790static void __inline__ fec_request_intrs(struct net_device *dev) 1791{ 1792 volatile immap_t *immap; 1793 1794 immap = (immap_t *)IMAP_ADDR; /* pointer to internal registers */ 1795 1796 if (request_8xxirq(FEC_INTERRUPT, fec_enet_interrupt, 0, "fec", dev) != 0) 1797 panic("Could not allocate FEC IRQ!"); 1798 1799#ifdef CONFIG_RPXCLASSIC 1800 /* Make Port C, bit 15 an input that causes interrupts. 1801 */ 1802 immap->im_ioport.iop_pcpar &= ~0x0001; 1803 immap->im_ioport.iop_pcdir &= ~0x0001; 1804 immap->im_ioport.iop_pcso &= ~0x0001; 1805 immap->im_ioport.iop_pcint |= 0x0001; 1806 cpm_install_handler(CPMVEC_PIO_PC15, mii_link_interrupt, dev); 1807 1808 /* Make LEDS reflect Link status. 1809 */ 1810 *((uint *) RPX_CSR_ADDR) &= ~BCSR2_FETHLEDMODE; 1811#endif 1812#ifdef CONFIG_FADS 1813 if (request_8xxirq(SIU_IRQ2, mii_link_interrupt, 0, "mii", dev) != 0) 1814 panic("Could not allocate MII IRQ!"); 1815#endif 1816} 1817 1818static void __inline__ fec_get_mac(struct net_device *dev) 1819{ 1820 bd_t *bd; 1821 1822 bd = (bd_t *)__res; 1823 memcpy(dev->dev_addr, bd->bi_enetaddr, ETH_ALEN); 1824 1825#ifdef CONFIG_RPXCLASSIC 1826 /* The Embedded Planet boards have only one MAC address in 1827 * the EEPROM, but can have two Ethernet ports. For the 1828 * FEC port, we create another address by setting one of 1829 * the address bits above something that would have (up to 1830 * now) been allocated. 1831 */ 1832 dev->dev_adrd[3] |= 0x80; 1833#endif 1834} 1835 1836static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) 1837{ 1838 extern uint _get_IMMR(void); 1839 volatile immap_t *immap; 1840 volatile fec_t *fecp; 1841 1842 fecp = fep->hwp; 1843 immap = (immap_t *)IMAP_ADDR; /* pointer to internal registers */ 1844 1845 /* Configure all of port D for MII. 1846 */ 1847 immap->im_ioport.iop_pdpar = 0x1fff; 1848 1849 /* Bits moved from Rev. D onward. 1850 */ 1851 if ((_get_IMMR() & 0xffff) < 0x0501) 1852 immap->im_ioport.iop_pddir = 0x1c58; /* Pre rev. D */ 1853 else 1854 immap->im_ioport.iop_pddir = 0x1fff; /* Rev. D and later */ 1855 1856 /* Set MII speed to 2.5 MHz 1857 */ 1858 fecp->fec_mii_speed = fep->phy_speed = 1859 ((bd->bi_busfreq * 1000000) / 2500000) & 0x7e; 1860} 1861 1862static void __inline__ fec_enable_phy_intr(void) 1863{ 1864 volatile fec_t *fecp; 1865 1866 fecp = fep->hwp; 1867 1868 /* Enable MII command finished interrupt 1869 */ 1870 fecp->fec_ivec = (FEC_INTERRUPT/2) << 29; 1871} 1872 1873static void __inline__ fec_disable_phy_intr(void) 1874{ 1875} 1876 1877static void __inline__ fec_phy_ack_intr(void) 1878{ 1879} 1880 1881static void __inline__ fec_localhw_setup(void) 1882{ 1883 volatile fec_t *fecp; 1884 1885 fecp = fep->hwp; 1886 fecp->fec_r_hash = PKT_MAXBUF_SIZE; 1887 /* Enable big endian and don't care about SDMA FC. 1888 */ 1889 fecp->fec_fun_code = 0x78000000; 1890} 1891 1892static void __inline__ fec_uncache(unsigned long addr) 1893{ 1894 pte_t *pte; 1895 pte = va_to_pte(mem_addr); 1896 pte_val(*pte) |= _PAGE_NO_CACHE; 1897 flush_tlb_page(init_mm.mmap, mem_addr); 1898} 1899 1900#endif 1901 1902/* ------------------------------------------------------------------------- */ 1903 1904static void mii_display_status(struct net_device *dev) 1905{ 1906 struct fec_enet_private *fep = netdev_priv(dev); 1907 volatile uint *s = &(fep->phy_status); 1908 1909 if (!fep->link && !fep->old_link) { 1910 /* Link is still down - don't print anything */ 1911 return; 1912 } 1913 1914 printk("%s: status: ", dev->name); 1915 1916 if (!fep->link) { 1917 printk("link down"); 1918 } else { 1919 printk("link up"); 1920 1921 switch(*s & PHY_STAT_SPMASK) { 1922 case PHY_STAT_100FDX: printk(", 100MBit Full Duplex"); break; 1923 case PHY_STAT_100HDX: printk(", 100MBit Half Duplex"); break; 1924 case PHY_STAT_10FDX: printk(", 10MBit Full Duplex"); break; 1925 case PHY_STAT_10HDX: printk(", 10MBit Half Duplex"); break; 1926 default: 1927 printk(", Unknown speed/duplex"); 1928 } 1929 1930 if (*s & PHY_STAT_ANC) 1931 printk(", auto-negotiation complete"); 1932 } 1933 1934 if (*s & PHY_STAT_FAULT) 1935 printk(", remote fault"); 1936 1937 printk(".\n"); 1938} 1939 1940static void mii_display_config(struct work_struct *work) 1941{ 1942 struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task); 1943 struct net_device *dev = fep->netdev; 1944 uint status = fep->phy_status; 1945 1946 /* 1947 ** When we get here, phy_task is already removed from 1948 ** the workqueue. It is thus safe to allow to reuse it. 1949 */ 1950 fep->mii_phy_task_queued = 0; 1951 printk("%s: config: auto-negotiation ", dev->name); 1952 1953 if (status & PHY_CONF_ANE) 1954 printk("on"); 1955 else 1956 printk("off"); 1957 1958 if (status & PHY_CONF_100FDX) 1959 printk(", 100FDX"); 1960 if (status & PHY_CONF_100HDX) 1961 printk(", 100HDX"); 1962 if (status & PHY_CONF_10FDX) 1963 printk(", 10FDX"); 1964 if (status & PHY_CONF_10HDX) 1965 printk(", 10HDX"); 1966 if (!(status & PHY_CONF_SPMASK)) 1967 printk(", No speed/duplex selected?"); 1968 1969 if (status & PHY_CONF_LOOP) 1970 printk(", loopback enabled"); 1971 1972 printk(".\n"); 1973 1974 fep->sequence_done = 1; 1975} 1976 1977static void mii_relink(struct work_struct *work) 1978{ 1979 struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task); 1980 struct net_device *dev = fep->netdev; 1981 int duplex; 1982 1983 /* 1984 ** When we get here, phy_task is already removed from 1985 ** the workqueue. It is thus safe to allow to reuse it. 1986 */ 1987 fep->mii_phy_task_queued = 0; 1988 fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0; 1989 mii_display_status(dev); 1990 fep->old_link = fep->link; 1991 1992 if (fep->link) { 1993 duplex = 0; 1994 if (fep->phy_status 1995 & (PHY_STAT_100FDX | PHY_STAT_10FDX)) 1996 duplex = 1; 1997 fec_restart(dev, duplex); 1998 } else 1999 fec_stop(dev); 2000 2001#if 0 2002 enable_irq(fep->mii_irq); 2003#endif 2004 2005} 2006 2007/* mii_queue_relink is called in interrupt context from mii_link_interrupt */ 2008static void mii_queue_relink(uint mii_reg, struct net_device *dev) 2009{ 2010 struct fec_enet_private *fep = netdev_priv(dev); 2011 2012 /* 2013 ** We cannot queue phy_task twice in the workqueue. It 2014 ** would cause an endless loop in the workqueue. 2015 ** Fortunately, if the last mii_relink entry has not yet been 2016 ** executed now, it will do the job for the current interrupt, 2017 ** which is just what we want. 2018 */ 2019 if (fep->mii_phy_task_queued) 2020 return; 2021 2022 fep->mii_phy_task_queued = 1; 2023 INIT_WORK(&fep->phy_task, mii_relink); 2024 schedule_work(&fep->phy_task); 2025} 2026 2027/* mii_queue_config is called in interrupt context from fec_enet_mii */ 2028static void mii_queue_config(uint mii_reg, struct net_device *dev) 2029{ 2030 struct fec_enet_private *fep = netdev_priv(dev); 2031 2032 if (fep->mii_phy_task_queued) 2033 return; 2034 2035 fep->mii_phy_task_queued = 1; 2036 INIT_WORK(&fep->phy_task, mii_display_config); 2037 schedule_work(&fep->phy_task); 2038} 2039 2040phy_cmd_t const phy_cmd_relink[] = { 2041 { mk_mii_read(MII_REG_CR), mii_queue_relink }, 2042 { mk_mii_end, } 2043 }; 2044phy_cmd_t const phy_cmd_config[] = { 2045 { mk_mii_read(MII_REG_CR), mii_queue_config }, 2046 { mk_mii_end, } 2047 }; 2048 2049/* Read remainder of PHY ID. 2050*/ 2051static void 2052mii_discover_phy3(uint mii_reg, struct net_device *dev) 2053{ 2054 struct fec_enet_private *fep; 2055 int i; 2056 2057 fep = netdev_priv(dev); 2058 fep->phy_id |= (mii_reg & 0xffff); 2059 printk("fec: PHY @ 0x%x, ID 0x%08x", fep->phy_addr, fep->phy_id); 2060 2061 for(i = 0; phy_info[i]; i++) { 2062 if(phy_info[i]->id == (fep->phy_id >> 4)) 2063 break; 2064 } 2065 2066 if (phy_info[i]) 2067 printk(" -- %s\n", phy_info[i]->name); 2068 else 2069 printk(" -- unknown PHY!\n"); 2070 2071 fep->phy = phy_info[i]; 2072 fep->phy_id_done = 1; 2073} 2074 2075/* Scan all of the MII PHY addresses looking for someone to respond 2076 * with a valid ID. This usually happens quickly. 2077 */ 2078static void 2079mii_discover_phy(uint mii_reg, struct net_device *dev) 2080{ 2081 struct fec_enet_private *fep; 2082 volatile fec_t *fecp; 2083 uint phytype; 2084 2085 fep = netdev_priv(dev); 2086 fecp = fep->hwp; 2087 2088 if (fep->phy_addr < 32) { 2089 if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) { 2090 2091 /* Got first part of ID, now get remainder. 2092 */ 2093 fep->phy_id = phytype << 16; 2094 mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), 2095 mii_discover_phy3); 2096 } else { 2097 fep->phy_addr++; 2098 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), 2099 mii_discover_phy); 2100 } 2101 } else { 2102 printk("FEC: No PHY device found.\n"); 2103 /* Disable external MII interface */ 2104 fecp->fec_mii_speed = fep->phy_speed = 0; 2105 fec_disable_phy_intr(); 2106 } 2107} 2108 2109/* This interrupt occurs when the PHY detects a link change. 2110*/ 2111#ifdef HAVE_mii_link_interrupt 2112#ifdef CONFIG_RPXCLASSIC 2113static void 2114mii_link_interrupt(void *dev_id) 2115#else 2116static irqreturn_t 2117mii_link_interrupt(int irq, void * dev_id) 2118#endif 2119{ 2120 struct net_device *dev = dev_id; 2121 struct fec_enet_private *fep = netdev_priv(dev); 2122 2123 fec_phy_ack_intr(); 2124 2125#if 0 2126 disable_irq(fep->mii_irq); /* disable now, enable later */ 2127#endif 2128 2129 mii_do_cmd(dev, fep->phy->ack_int); 2130 mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */ 2131 2132 return IRQ_HANDLED; 2133} 2134#endif 2135 2136static int 2137fec_enet_open(struct net_device *dev) 2138{ 2139 struct fec_enet_private *fep = netdev_priv(dev); 2140 2141 /* I should reset the ring buffers here, but I don't yet know 2142 * a simple way to do that. 2143 */ 2144 fec_set_mac_address(dev); 2145 2146 fep->sequence_done = 0; 2147 fep->link = 0; 2148 2149 if (fep->phy) { 2150 mii_do_cmd(dev, fep->phy->ack_int); 2151 mii_do_cmd(dev, fep->phy->config); 2152 mii_do_cmd(dev, phy_cmd_config); /* display configuration */ 2153 2154 /* Poll until the PHY tells us its configuration 2155 * (not link state). 2156 * Request is initiated by mii_do_cmd above, but answer 2157 * comes by interrupt. 2158 * This should take about 25 usec per register at 2.5 MHz, 2159 * and we read approximately 5 registers. 2160 */ 2161 while(!fep->sequence_done) 2162 schedule(); 2163 2164 mii_do_cmd(dev, fep->phy->startup); 2165 2166 /* Set the initial link state to true. A lot of hardware 2167 * based on this device does not implement a PHY interrupt, 2168 * so we are never notified of link change. 2169 */ 2170 fep->link = 1; 2171 } else { 2172 fep->link = 1; /* lets just try it and see */ 2173 /* no phy, go full duplex, it's most likely a hub chip */ 2174 fec_restart(dev, 1); 2175 } 2176 2177 netif_start_queue(dev); 2178 fep->opened = 1; 2179 return 0; /* Success */ 2180} 2181 2182static int 2183fec_enet_close(struct net_device *dev) 2184{ 2185 struct fec_enet_private *fep = netdev_priv(dev); 2186 2187 /* Don't know what to do yet. 2188 */ 2189 fep->opened = 0; 2190 netif_stop_queue(dev); 2191 fec_stop(dev); 2192 2193 return 0; 2194} 2195 2196/* Set or clear the multicast filter for this adaptor. 2197 * Skeleton taken from sunlance driver. 2198 * The CPM Ethernet implementation allows Multicast as well as individual 2199 * MAC address filtering. Some of the drivers check to make sure it is 2200 * a group multicast address, and discard those that are not. I guess I 2201 * will do the same for now, but just remove the test if you want 2202 * individual filtering as well (do the upper net layers want or support 2203 * this kind of feature?). 2204 */ 2205 2206#define HASH_BITS 6 /* #bits in hash */ 2207#define CRC32_POLY 0xEDB88320 2208 2209static void set_multicast_list(struct net_device *dev) 2210{ 2211 struct fec_enet_private *fep; 2212 volatile fec_t *ep; 2213 struct dev_mc_list *dmi; 2214 unsigned int i, j, bit, data, crc; 2215 unsigned char hash; 2216 2217 fep = netdev_priv(dev); 2218 ep = fep->hwp; 2219 2220 if (dev->flags&IFF_PROMISC) { 2221 ep->fec_r_cntrl |= 0x0008; 2222 } else { 2223 2224 ep->fec_r_cntrl &= ~0x0008; 2225 2226 if (dev->flags & IFF_ALLMULTI) { 2227 /* Catch all multicast addresses, so set the 2228 * filter to all 1's. 2229 */ 2230 ep->fec_grp_hash_table_high = 0xffffffff; 2231 ep->fec_grp_hash_table_low = 0xffffffff; 2232 } else { 2233 /* Clear filter and add the addresses in hash register. 2234 */ 2235 ep->fec_grp_hash_table_high = 0; 2236 ep->fec_grp_hash_table_low = 0; 2237 2238 dmi = dev->mc_list; 2239 2240 for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) 2241 { 2242 /* Only support group multicast for now. 2243 */ 2244 if (!(dmi->dmi_addr[0] & 1)) 2245 continue; 2246 2247 /* calculate crc32 value of mac address 2248 */ 2249 crc = 0xffffffff; 2250 2251 for (i = 0; i < dmi->dmi_addrlen; i++) 2252 { 2253 data = dmi->dmi_addr[i]; 2254 for (bit = 0; bit < 8; bit++, data >>= 1) 2255 { 2256 crc = (crc >> 1) ^ 2257 (((crc ^ data) & 1) ? CRC32_POLY : 0); 2258 } 2259 } 2260 2261 /* only upper 6 bits (HASH_BITS) are used 2262 which point to specific bit in he hash registers 2263 */ 2264 hash = (crc >> (32 - HASH_BITS)) & 0x3f; 2265 2266 if (hash > 31) 2267 ep->fec_grp_hash_table_high |= 1 << (hash - 32); 2268 else 2269 ep->fec_grp_hash_table_low |= 1 << hash; 2270 } 2271 } 2272 } 2273} 2274 2275/* Set a MAC change in hardware. 2276 */ 2277static void 2278fec_set_mac_address(struct net_device *dev) 2279{ 2280 volatile fec_t *fecp; 2281 2282 fecp = ((struct fec_enet_private *)netdev_priv(dev))->hwp; 2283 2284 /* Set station address. */ 2285 fecp->fec_addr_low = dev->dev_addr[3] | (dev->dev_addr[2] << 8) | 2286 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24); 2287 fecp->fec_addr_high = (dev->dev_addr[5] << 16) | 2288 (dev->dev_addr[4] << 24); 2289 2290} 2291 2292/* Initialize the FEC Ethernet on 860T (or ColdFire 5272). 2293 */ 2294 /* 2295 * XXX: We need to clean up on failure exits here. 2296 */ 2297int __init fec_enet_init(struct net_device *dev) 2298{ 2299 struct fec_enet_private *fep = netdev_priv(dev); 2300 unsigned long mem_addr; 2301 volatile cbd_t *bdp; 2302 cbd_t *cbd_base; 2303 volatile fec_t *fecp; 2304 int i, j; 2305 static int index = 0; 2306 2307 /* Only allow us to be probed once. */ 2308 if (index >= FEC_MAX_PORTS) 2309 return -ENXIO; 2310 2311 /* Allocate memory for buffer descriptors. 2312 */ 2313 mem_addr = __get_free_page(GFP_KERNEL); 2314 if (mem_addr == 0) { 2315 printk("FEC: allocate descriptor memory failed?\n"); 2316 return -ENOMEM; 2317 } 2318 2319 spin_lock_init(&fep->hw_lock); 2320 spin_lock_init(&fep->mii_lock); 2321 2322 /* Create an Ethernet device instance. 2323 */ 2324 fecp = (volatile fec_t *) fec_hw[index]; 2325 2326 fep->index = index; 2327 fep->hwp = fecp; 2328 fep->netdev = dev; 2329 2330 /* Whack a reset. We should wait for this. 2331 */ 2332 fecp->fec_ecntrl = 1; 2333 udelay(10); 2334 2335 /* Set the Ethernet address. If using multiple Enets on the 8xx, 2336 * this needs some work to get unique addresses. 2337 * 2338 * This is our default MAC address unless the user changes 2339 * it via eth_mac_addr (our dev->set_mac_addr handler). 2340 */ 2341 fec_get_mac(dev); 2342 2343 cbd_base = (cbd_t *)mem_addr; 2344 /* XXX: missing check for allocation failure */ 2345 2346 fec_uncache(mem_addr); 2347 2348 /* Set receive and transmit descriptor base. 2349 */ 2350 fep->rx_bd_base = cbd_base; 2351 fep->tx_bd_base = cbd_base + RX_RING_SIZE; 2352 2353 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; 2354 fep->cur_rx = fep->rx_bd_base; 2355 2356 fep->skb_cur = fep->skb_dirty = 0; 2357 2358 /* Initialize the receive buffer descriptors. 2359 */ 2360 bdp = fep->rx_bd_base; 2361 for (i=0; i<FEC_ENET_RX_PAGES; i++) { 2362 2363 /* Allocate a page. 2364 */ 2365 mem_addr = __get_free_page(GFP_KERNEL); 2366 /* XXX: missing check for allocation failure */ 2367 2368 fec_uncache(mem_addr); 2369 2370 /* Initialize the BD for every fragment in the page. 2371 */ 2372 for (j=0; j<FEC_ENET_RX_FRPPG; j++) { 2373 bdp->cbd_sc = BD_ENET_RX_EMPTY; 2374 bdp->cbd_bufaddr = __pa(mem_addr); 2375 mem_addr += FEC_ENET_RX_FRSIZE; 2376 bdp++; 2377 } 2378 } 2379 2380 /* Set the last buffer to wrap. 2381 */ 2382 bdp--; 2383 bdp->cbd_sc |= BD_SC_WRAP; 2384 2385 /* ...and the same for transmmit. 2386 */ 2387 bdp = fep->tx_bd_base; 2388 for (i=0, j=FEC_ENET_TX_FRPPG; i<TX_RING_SIZE; i++) { 2389 if (j >= FEC_ENET_TX_FRPPG) { 2390 mem_addr = __get_free_page(GFP_KERNEL); 2391 j = 1; 2392 } else { 2393 mem_addr += FEC_ENET_TX_FRSIZE; 2394 j++; 2395 } 2396 fep->tx_bounce[i] = (unsigned char *) mem_addr; 2397 2398 /* Initialize the BD for every fragment in the page. 2399 */ 2400 bdp->cbd_sc = 0; 2401 bdp->cbd_bufaddr = 0; 2402 bdp++; 2403 } 2404 2405 /* Set the last buffer to wrap. 2406 */ 2407 bdp--; 2408 bdp->cbd_sc |= BD_SC_WRAP; 2409 2410 /* Set receive and transmit descriptor base. 2411 */ 2412 fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base)); 2413 fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base)); 2414 2415 /* Install our interrupt handlers. This varies depending on 2416 * the architecture. 2417 */ 2418 fec_request_intrs(dev); 2419 2420 fecp->fec_grp_hash_table_high = 0; 2421 fecp->fec_grp_hash_table_low = 0; 2422 fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; 2423 fecp->fec_ecntrl = 2; 2424 fecp->fec_r_des_active = 0; 2425#ifndef CONFIG_M5272 2426 fecp->fec_hash_table_high = 0; 2427 fecp->fec_hash_table_low = 0; 2428#endif 2429 2430 dev->base_addr = (unsigned long)fecp; 2431 2432 /* The FEC Ethernet specific entries in the device structure. */ 2433 dev->open = fec_enet_open; 2434 dev->hard_start_xmit = fec_enet_start_xmit; 2435 dev->tx_timeout = fec_timeout; 2436 dev->watchdog_timeo = TX_TIMEOUT; 2437 dev->stop = fec_enet_close; 2438 dev->set_multicast_list = set_multicast_list; 2439 2440 for (i=0; i<NMII-1; i++) 2441 mii_cmds[i].mii_next = &mii_cmds[i+1]; 2442 mii_free = mii_cmds; 2443 2444 /* setup MII interface */ 2445 fec_set_mii(dev, fep); 2446 2447 /* Clear and enable interrupts */ 2448 fecp->fec_ievent = 0xffc00000; 2449 fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII); 2450 2451 /* Queue up command to detect the PHY and initialize the 2452 * remainder of the interface. 2453 */ 2454 fep->phy_id_done = 0; 2455 fep->phy_addr = 0; 2456 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy); 2457 2458 index++; 2459 return 0; 2460} 2461 2462/* This function is called to start or restart the FEC during a link 2463 * change. This only happens when switching between half and full 2464 * duplex. 2465 */ 2466static void 2467fec_restart(struct net_device *dev, int duplex) 2468{ 2469 struct fec_enet_private *fep; 2470 volatile cbd_t *bdp; 2471 volatile fec_t *fecp; 2472 int i; 2473 2474 fep = netdev_priv(dev); 2475 fecp = fep->hwp; 2476 2477 /* Whack a reset. We should wait for this. 2478 */ 2479 fecp->fec_ecntrl = 1; 2480 udelay(10); 2481 2482 /* Clear any outstanding interrupt. 2483 */ 2484 fecp->fec_ievent = 0xffc00000; 2485 fec_enable_phy_intr(); 2486 2487 /* Set station address. 2488 */ 2489 fec_set_mac_address(dev); 2490 2491 /* Reset all multicast. 2492 */ 2493 fecp->fec_grp_hash_table_high = 0; 2494 fecp->fec_grp_hash_table_low = 0; 2495 2496 /* Set maximum receive buffer size. 2497 */ 2498 fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; 2499 2500 fec_localhw_setup(); 2501 2502 /* Set receive and transmit descriptor base. 2503 */ 2504 fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base)); 2505 fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base)); 2506 2507 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; 2508 fep->cur_rx = fep->rx_bd_base; 2509 2510 /* Reset SKB transmit buffers. 2511 */ 2512 fep->skb_cur = fep->skb_dirty = 0; 2513 for (i=0; i<=TX_RING_MOD_MASK; i++) { 2514 if (fep->tx_skbuff[i] != NULL) { 2515 dev_kfree_skb_any(fep->tx_skbuff[i]); 2516 fep->tx_skbuff[i] = NULL; 2517 } 2518 } 2519 2520 /* Initialize the receive buffer descriptors. 2521 */ 2522 bdp = fep->rx_bd_base; 2523 for (i=0; i<RX_RING_SIZE; i++) { 2524 2525 /* Initialize the BD for every fragment in the page. 2526 */ 2527 bdp->cbd_sc = BD_ENET_RX_EMPTY; 2528 bdp++; 2529 } 2530 2531 /* Set the last buffer to wrap. 2532 */ 2533 bdp--; 2534 bdp->cbd_sc |= BD_SC_WRAP; 2535 2536 /* ...and the same for transmmit. 2537 */ 2538 bdp = fep->tx_bd_base; 2539 for (i=0; i<TX_RING_SIZE; i++) { 2540 2541 /* Initialize the BD for every fragment in the page. 2542 */ 2543 bdp->cbd_sc = 0; 2544 bdp->cbd_bufaddr = 0; 2545 bdp++; 2546 } 2547 2548 /* Set the last buffer to wrap. 2549 */ 2550 bdp--; 2551 bdp->cbd_sc |= BD_SC_WRAP; 2552 2553 /* Enable MII mode. 2554 */ 2555 if (duplex) { 2556 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;/* MII enable */ 2557 fecp->fec_x_cntrl = 0x04; /* FD enable */ 2558 } else { 2559 /* MII enable|No Rcv on Xmit */ 2560 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x06; 2561 fecp->fec_x_cntrl = 0x00; 2562 } 2563 fep->full_duplex = duplex; 2564 2565 /* Set MII speed. 2566 */ 2567 fecp->fec_mii_speed = fep->phy_speed; 2568 2569 /* And last, enable the transmit and receive processing. 2570 */ 2571 fecp->fec_ecntrl = 2; 2572 fecp->fec_r_des_active = 0; 2573 2574 /* Enable interrupts we wish to service. 2575 */ 2576 fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII); 2577} 2578 2579static void 2580fec_stop(struct net_device *dev) 2581{ 2582 volatile fec_t *fecp; 2583 struct fec_enet_private *fep; 2584 2585 fep = netdev_priv(dev); 2586 fecp = fep->hwp; 2587 2588 /* 2589 ** We cannot expect a graceful transmit stop without link !!! 2590 */ 2591 if (fep->link) 2592 { 2593 fecp->fec_x_cntrl = 0x01; /* Graceful transmit stop */ 2594 udelay(10); 2595 if (!(fecp->fec_ievent & FEC_ENET_GRA)) 2596 printk("fec_stop : Graceful transmit stop did not complete !\n"); 2597 } 2598 2599 /* Whack a reset. We should wait for this. 2600 */ 2601 fecp->fec_ecntrl = 1; 2602 udelay(10); 2603 2604 /* Clear outstanding MII command interrupts. 2605 */ 2606 fecp->fec_ievent = FEC_ENET_MII; 2607 fec_enable_phy_intr(); 2608 2609 fecp->fec_imask = FEC_ENET_MII; 2610 fecp->fec_mii_speed = fep->phy_speed; 2611} 2612 2613static int __init fec_enet_module_init(void) 2614{ 2615 struct net_device *dev; 2616 int i, err; 2617 DECLARE_MAC_BUF(mac); 2618 2619 printk("FEC ENET Version 0.2\n"); 2620 2621 for (i = 0; (i < FEC_MAX_PORTS); i++) { 2622 dev = alloc_etherdev(sizeof(struct fec_enet_private)); 2623 if (!dev) 2624 return -ENOMEM; 2625 err = fec_enet_init(dev); 2626 if (err) { 2627 free_netdev(dev); 2628 continue; 2629 } 2630 if (register_netdev(dev) != 0) { 2631 /* XXX: missing cleanup here */ 2632 free_netdev(dev); 2633 return -EIO; 2634 } 2635 2636 printk("%s: ethernet %s\n", 2637 dev->name, print_mac(mac, dev->dev_addr)); 2638 } 2639 return 0; 2640} 2641 2642module_init(fec_enet_module_init); 2643 2644MODULE_LICENSE("GPL");