at v2.6.24-rc2 2655 lines 69 kB view raw
1/* 2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 4 * 5 * This version of the driver is specific to the FADS implementation, 6 * since the board contains control registers external to the processor 7 * for the control of the LevelOne LXT970 transceiver. The MPC860T manual 8 * describes connections using the internal parallel port I/O, which 9 * is basically all of Port D. 10 * 11 * Right now, I am very wasteful with the buffers. I allocate memory 12 * pages and then divide them into 2K frame buffers. This way I know I 13 * have buffers large enough to hold one frame within one buffer descriptor. 14 * Once I get this working, I will use 64 or 128 byte CPM buffers, which 15 * will be much more memory efficient and will easily handle lots of 16 * small packets. 17 * 18 * Much better multiple PHY support by Magnus Damm. 19 * Copyright (c) 2000 Ericsson Radio Systems AB. 20 * 21 * Support for FEC controller of ColdFire processors. 22 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) 23 * 24 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 25 * Copyright (c) 2004-2006 Macq Electronique SA. 26 */ 27 28#include <linux/module.h> 29#include <linux/kernel.h> 30#include <linux/string.h> 31#include <linux/ptrace.h> 32#include <linux/errno.h> 33#include <linux/ioport.h> 34#include <linux/slab.h> 35#include <linux/interrupt.h> 36#include <linux/pci.h> 37#include <linux/init.h> 38#include <linux/delay.h> 39#include <linux/netdevice.h> 40#include <linux/etherdevice.h> 41#include <linux/skbuff.h> 42#include <linux/spinlock.h> 43#include <linux/workqueue.h> 44#include <linux/bitops.h> 45 46#include <asm/irq.h> 47#include <asm/uaccess.h> 48#include <asm/io.h> 49#include <asm/pgtable.h> 50#include <asm/cacheflush.h> 51 52#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || \ 53 defined(CONFIG_M5272) || defined(CONFIG_M528x) || \ 54 defined(CONFIG_M520x) || defined(CONFIG_M532x) 55#include <asm/coldfire.h> 56#include <asm/mcfsim.h> 57#include "fec.h" 58#else 59#include <asm/8xx_immap.h> 60#include <asm/mpc8xx.h> 61#include "commproc.h" 62#endif 63 64#if defined(CONFIG_FEC2) 65#define FEC_MAX_PORTS 2 66#else 67#define FEC_MAX_PORTS 1 68#endif 69 70/* 71 * Define the fixed address of the FEC hardware. 72 */ 73static unsigned int fec_hw[] = { 74#if defined(CONFIG_M5272) 75 (MCF_MBAR + 0x840), 76#elif defined(CONFIG_M527x) 77 (MCF_MBAR + 0x1000), 78 (MCF_MBAR + 0x1800), 79#elif defined(CONFIG_M523x) || defined(CONFIG_M528x) 80 (MCF_MBAR + 0x1000), 81#elif defined(CONFIG_M520x) 82 (MCF_MBAR+0x30000), 83#elif defined(CONFIG_M532x) 84 (MCF_MBAR+0xfc030000), 85#else 86 &(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec), 87#endif 88}; 89 90static unsigned char fec_mac_default[] = { 91 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 92}; 93 94/* 95 * Some hardware gets it MAC address out of local flash memory. 96 * if this is non-zero then assume it is the address to get MAC from. 97 */ 98#if defined(CONFIG_NETtel) 99#define FEC_FLASHMAC 0xf0006006 100#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) 101#define FEC_FLASHMAC 0xf0006000 102#elif defined(CONFIG_CANCam) 103#define FEC_FLASHMAC 0xf0020000 104#elif defined (CONFIG_M5272C3) 105#define FEC_FLASHMAC (0xffe04000 + 4) 106#elif defined(CONFIG_MOD5272) 107#define FEC_FLASHMAC 0xffc0406b 108#else 109#define FEC_FLASHMAC 0 110#endif 111 112/* Forward declarations of some structures to support different PHYs 113*/ 114 115typedef struct { 116 uint mii_data; 117 void (*funct)(uint mii_reg, struct net_device *dev); 118} phy_cmd_t; 119 120typedef struct { 121 uint id; 122 char *name; 123 124 const phy_cmd_t *config; 125 const phy_cmd_t *startup; 126 const phy_cmd_t *ack_int; 127 const phy_cmd_t *shutdown; 128} phy_info_t; 129 130/* The number of Tx and Rx buffers. These are allocated from the page 131 * pool. The code may assume these are power of two, so it it best 132 * to keep them that size. 133 * We don't need to allocate pages for the transmitter. We just use 134 * the skbuffer directly. 135 */ 136#define FEC_ENET_RX_PAGES 8 137#define FEC_ENET_RX_FRSIZE 2048 138#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) 139#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) 140#define FEC_ENET_TX_FRSIZE 2048 141#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE) 142#define TX_RING_SIZE 16 /* Must be power of two */ 143#define TX_RING_MOD_MASK 15 /* for this to work */ 144 145#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE) 146#error "FEC: descriptor ring size constants too large" 147#endif 148 149/* Interrupt events/masks. 150*/ 151#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ 152#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ 153#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ 154#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ 155#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */ 156#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ 157#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */ 158#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ 159#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ 160#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ 161 162/* The FEC stores dest/src/type, data, and checksum for receive packets. 163 */ 164#define PKT_MAXBUF_SIZE 1518 165#define PKT_MINBUF_SIZE 64 166#define PKT_MAXBLR_SIZE 1520 167 168 169/* 170 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame 171 * size bits. Other FEC hardware does not, so we need to take that into 172 * account when setting it. 173 */ 174#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 175 defined(CONFIG_M520x) || defined(CONFIG_M532x) 176#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 177#else 178#define OPT_FRAME_SIZE 0 179#endif 180 181/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and 182 * tx_bd_base always point to the base of the buffer descriptors. The 183 * cur_rx and cur_tx point to the currently available buffer. 184 * The dirty_tx tracks the current buffer that is being sent by the 185 * controller. The cur_tx and dirty_tx are equal under both completely 186 * empty and completely full conditions. The empty/ready indicator in 187 * the buffer descriptor determines the actual condition. 188 */ 189struct fec_enet_private { 190 /* Hardware registers of the FEC device */ 191 volatile fec_t *hwp; 192 193 struct net_device *netdev; 194 195 /* The saved address of a sent-in-place packet/buffer, for skfree(). */ 196 unsigned char *tx_bounce[TX_RING_SIZE]; 197 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 198 ushort skb_cur; 199 ushort skb_dirty; 200 201 /* CPM dual port RAM relative addresses. 202 */ 203 cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ 204 cbd_t *tx_bd_base; 205 cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ 206 cbd_t *dirty_tx; /* The ring entries to be free()ed. */ 207 uint tx_full; 208 spinlock_t lock; 209 210 uint phy_id; 211 uint phy_id_done; 212 uint phy_status; 213 uint phy_speed; 214 phy_info_t const *phy; 215 struct work_struct phy_task; 216 217 uint sequence_done; 218 uint mii_phy_task_queued; 219 220 uint phy_addr; 221 222 int index; 223 int opened; 224 int link; 225 int old_link; 226 int full_duplex; 227}; 228 229static int fec_enet_open(struct net_device *dev); 230static int fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev); 231static void fec_enet_mii(struct net_device *dev); 232static irqreturn_t fec_enet_interrupt(int irq, void * dev_id); 233static void fec_enet_tx(struct net_device *dev); 234static void fec_enet_rx(struct net_device *dev); 235static int fec_enet_close(struct net_device *dev); 236static void set_multicast_list(struct net_device *dev); 237static void fec_restart(struct net_device *dev, int duplex); 238static void fec_stop(struct net_device *dev); 239static void fec_set_mac_address(struct net_device *dev); 240 241 242/* MII processing. We keep this as simple as possible. Requests are 243 * placed on the list (if there is room). When the request is finished 244 * by the MII, an optional function may be called. 245 */ 246typedef struct mii_list { 247 uint mii_regval; 248 void (*mii_func)(uint val, struct net_device *dev); 249 struct mii_list *mii_next; 250} mii_list_t; 251 252#define NMII 20 253static mii_list_t mii_cmds[NMII]; 254static mii_list_t *mii_free; 255static mii_list_t *mii_head; 256static mii_list_t *mii_tail; 257 258static int mii_queue(struct net_device *dev, int request, 259 void (*func)(uint, struct net_device *)); 260 261/* Make MII read/write commands for the FEC. 262*/ 263#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) 264#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \ 265 (VAL & 0xffff)) 266#define mk_mii_end 0 267 268/* Transmitter timeout. 269*/ 270#define TX_TIMEOUT (2*HZ) 271 272/* Register definitions for the PHY. 273*/ 274 275#define MII_REG_CR 0 /* Control Register */ 276#define MII_REG_SR 1 /* Status Register */ 277#define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */ 278#define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */ 279#define MII_REG_ANAR 4 /* A-N Advertisement Register */ 280#define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */ 281#define MII_REG_ANER 6 /* A-N Expansion Register */ 282#define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */ 283#define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */ 284 285/* values for phy_status */ 286 287#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */ 288#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */ 289#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */ 290#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */ 291#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */ 292#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */ 293#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */ 294 295#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */ 296#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */ 297#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */ 298#define PHY_STAT_SPMASK 0xf000 /* mask for speed */ 299#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */ 300#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */ 301#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */ 302#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */ 303 304 305static int 306fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 307{ 308 struct fec_enet_private *fep; 309 volatile fec_t *fecp; 310 volatile cbd_t *bdp; 311 unsigned short status; 312 313 fep = netdev_priv(dev); 314 fecp = (volatile fec_t*)dev->base_addr; 315 316 if (!fep->link) { 317 /* Link is down or autonegotiation is in progress. */ 318 return 1; 319 } 320 321 /* Fill in a Tx ring entry */ 322 bdp = fep->cur_tx; 323 324 status = bdp->cbd_sc; 325#ifndef final_version 326 if (status & BD_ENET_TX_READY) { 327 /* Ooops. All transmit buffers are full. Bail out. 328 * This should not happen, since dev->tbusy should be set. 329 */ 330 printk("%s: tx queue full!.\n", dev->name); 331 return 1; 332 } 333#endif 334 335 /* Clear all of the status flags. 336 */ 337 status &= ~BD_ENET_TX_STATS; 338 339 /* Set buffer length and buffer pointer. 340 */ 341 bdp->cbd_bufaddr = __pa(skb->data); 342 bdp->cbd_datlen = skb->len; 343 344 /* 345 * On some FEC implementations data must be aligned on 346 * 4-byte boundaries. Use bounce buffers to copy data 347 * and get it aligned. Ugh. 348 */ 349 if (bdp->cbd_bufaddr & 0x3) { 350 unsigned int index; 351 index = bdp - fep->tx_bd_base; 352 memcpy(fep->tx_bounce[index], (void *) bdp->cbd_bufaddr, bdp->cbd_datlen); 353 bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]); 354 } 355 356 /* Save skb pointer. 357 */ 358 fep->tx_skbuff[fep->skb_cur] = skb; 359 360 dev->stats.tx_bytes += skb->len; 361 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; 362 363 /* Push the data cache so the CPM does not get stale memory 364 * data. 365 */ 366 flush_dcache_range((unsigned long)skb->data, 367 (unsigned long)skb->data + skb->len); 368 369 spin_lock_irq(&fep->lock); 370 371 /* Send it on its way. Tell FEC it's ready, interrupt when done, 372 * it's the last BD of the frame, and to put the CRC on the end. 373 */ 374 375 status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR 376 | BD_ENET_TX_LAST | BD_ENET_TX_TC); 377 bdp->cbd_sc = status; 378 379 dev->trans_start = jiffies; 380 381 /* Trigger transmission start */ 382 fecp->fec_x_des_active = 0; 383 384 /* If this was the last BD in the ring, start at the beginning again. 385 */ 386 if (status & BD_ENET_TX_WRAP) { 387 bdp = fep->tx_bd_base; 388 } else { 389 bdp++; 390 } 391 392 if (bdp == fep->dirty_tx) { 393 fep->tx_full = 1; 394 netif_stop_queue(dev); 395 } 396 397 fep->cur_tx = (cbd_t *)bdp; 398 399 spin_unlock_irq(&fep->lock); 400 401 return 0; 402} 403 404static void 405fec_timeout(struct net_device *dev) 406{ 407 struct fec_enet_private *fep = netdev_priv(dev); 408 409 printk("%s: transmit timed out.\n", dev->name); 410 dev->stats.tx_errors++; 411#ifndef final_version 412 { 413 int i; 414 cbd_t *bdp; 415 416 printk("Ring data dump: cur_tx %lx%s, dirty_tx %lx cur_rx: %lx\n", 417 (unsigned long)fep->cur_tx, fep->tx_full ? " (full)" : "", 418 (unsigned long)fep->dirty_tx, 419 (unsigned long)fep->cur_rx); 420 421 bdp = fep->tx_bd_base; 422 printk(" tx: %u buffers\n", TX_RING_SIZE); 423 for (i = 0 ; i < TX_RING_SIZE; i++) { 424 printk(" %08x: %04x %04x %08x\n", 425 (uint) bdp, 426 bdp->cbd_sc, 427 bdp->cbd_datlen, 428 (int) bdp->cbd_bufaddr); 429 bdp++; 430 } 431 432 bdp = fep->rx_bd_base; 433 printk(" rx: %lu buffers\n", (unsigned long) RX_RING_SIZE); 434 for (i = 0 ; i < RX_RING_SIZE; i++) { 435 printk(" %08x: %04x %04x %08x\n", 436 (uint) bdp, 437 bdp->cbd_sc, 438 bdp->cbd_datlen, 439 (int) bdp->cbd_bufaddr); 440 bdp++; 441 } 442 } 443#endif 444 fec_restart(dev, fep->full_duplex); 445 netif_wake_queue(dev); 446} 447 448/* The interrupt handler. 449 * This is called from the MPC core interrupt. 450 */ 451static irqreturn_t 452fec_enet_interrupt(int irq, void * dev_id) 453{ 454 struct net_device *dev = dev_id; 455 volatile fec_t *fecp; 456 uint int_events; 457 int handled = 0; 458 459 fecp = (volatile fec_t*)dev->base_addr; 460 461 /* Get the interrupt events that caused us to be here. 462 */ 463 while ((int_events = fecp->fec_ievent) != 0) { 464 fecp->fec_ievent = int_events; 465 466 /* Handle receive event in its own function. 467 */ 468 if (int_events & FEC_ENET_RXF) { 469 handled = 1; 470 fec_enet_rx(dev); 471 } 472 473 /* Transmit OK, or non-fatal error. Update the buffer 474 descriptors. FEC handles all errors, we just discover 475 them as part of the transmit process. 476 */ 477 if (int_events & FEC_ENET_TXF) { 478 handled = 1; 479 fec_enet_tx(dev); 480 } 481 482 if (int_events & FEC_ENET_MII) { 483 handled = 1; 484 fec_enet_mii(dev); 485 } 486 487 } 488 return IRQ_RETVAL(handled); 489} 490 491 492static void 493fec_enet_tx(struct net_device *dev) 494{ 495 struct fec_enet_private *fep; 496 volatile cbd_t *bdp; 497 unsigned short status; 498 struct sk_buff *skb; 499 500 fep = netdev_priv(dev); 501 spin_lock(&fep->lock); 502 bdp = fep->dirty_tx; 503 504 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { 505 if (bdp == fep->cur_tx && fep->tx_full == 0) break; 506 507 skb = fep->tx_skbuff[fep->skb_dirty]; 508 /* Check for errors. */ 509 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 510 BD_ENET_TX_RL | BD_ENET_TX_UN | 511 BD_ENET_TX_CSL)) { 512 dev->stats.tx_errors++; 513 if (status & BD_ENET_TX_HB) /* No heartbeat */ 514 dev->stats.tx_heartbeat_errors++; 515 if (status & BD_ENET_TX_LC) /* Late collision */ 516 dev->stats.tx_window_errors++; 517 if (status & BD_ENET_TX_RL) /* Retrans limit */ 518 dev->stats.tx_aborted_errors++; 519 if (status & BD_ENET_TX_UN) /* Underrun */ 520 dev->stats.tx_fifo_errors++; 521 if (status & BD_ENET_TX_CSL) /* Carrier lost */ 522 dev->stats.tx_carrier_errors++; 523 } else { 524 dev->stats.tx_packets++; 525 } 526 527#ifndef final_version 528 if (status & BD_ENET_TX_READY) 529 printk("HEY! Enet xmit interrupt and TX_READY.\n"); 530#endif 531 /* Deferred means some collisions occurred during transmit, 532 * but we eventually sent the packet OK. 533 */ 534 if (status & BD_ENET_TX_DEF) 535 dev->stats.collisions++; 536 537 /* Free the sk buffer associated with this last transmit. 538 */ 539 dev_kfree_skb_any(skb); 540 fep->tx_skbuff[fep->skb_dirty] = NULL; 541 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; 542 543 /* Update pointer to next buffer descriptor to be transmitted. 544 */ 545 if (status & BD_ENET_TX_WRAP) 546 bdp = fep->tx_bd_base; 547 else 548 bdp++; 549 550 /* Since we have freed up a buffer, the ring is no longer 551 * full. 552 */ 553 if (fep->tx_full) { 554 fep->tx_full = 0; 555 if (netif_queue_stopped(dev)) 556 netif_wake_queue(dev); 557 } 558 } 559 fep->dirty_tx = (cbd_t *)bdp; 560 spin_unlock(&fep->lock); 561} 562 563 564/* During a receive, the cur_rx points to the current incoming buffer. 565 * When we update through the ring, if the next incoming buffer has 566 * not been given to the system, we just set the empty indicator, 567 * effectively tossing the packet. 568 */ 569static void 570fec_enet_rx(struct net_device *dev) 571{ 572 struct fec_enet_private *fep; 573 volatile fec_t *fecp; 574 volatile cbd_t *bdp; 575 unsigned short status; 576 struct sk_buff *skb; 577 ushort pkt_len; 578 __u8 *data; 579 580#ifdef CONFIG_M532x 581 flush_cache_all(); 582#endif 583 584 fep = netdev_priv(dev); 585 fecp = (volatile fec_t*)dev->base_addr; 586 587 /* First, grab all of the stats for the incoming packet. 588 * These get messed up if we get called due to a busy condition. 589 */ 590 bdp = fep->cur_rx; 591 592while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { 593 594#ifndef final_version 595 /* Since we have allocated space to hold a complete frame, 596 * the last indicator should be set. 597 */ 598 if ((status & BD_ENET_RX_LAST) == 0) 599 printk("FEC ENET: rcv is not +last\n"); 600#endif 601 602 if (!fep->opened) 603 goto rx_processing_done; 604 605 /* Check for errors. */ 606 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 607 BD_ENET_RX_CR | BD_ENET_RX_OV)) { 608 dev->stats.rx_errors++; 609 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { 610 /* Frame too long or too short. */ 611 dev->stats.rx_length_errors++; 612 } 613 if (status & BD_ENET_RX_NO) /* Frame alignment */ 614 dev->stats.rx_frame_errors++; 615 if (status & BD_ENET_RX_CR) /* CRC Error */ 616 dev->stats.rx_crc_errors++; 617 if (status & BD_ENET_RX_OV) /* FIFO overrun */ 618 dev->stats.rx_fifo_errors++; 619 } 620 621 /* Report late collisions as a frame error. 622 * On this error, the BD is closed, but we don't know what we 623 * have in the buffer. So, just drop this frame on the floor. 624 */ 625 if (status & BD_ENET_RX_CL) { 626 dev->stats.rx_errors++; 627 dev->stats.rx_frame_errors++; 628 goto rx_processing_done; 629 } 630 631 /* Process the incoming frame. 632 */ 633 dev->stats.rx_packets++; 634 pkt_len = bdp->cbd_datlen; 635 dev->stats.rx_bytes += pkt_len; 636 data = (__u8*)__va(bdp->cbd_bufaddr); 637 638 /* This does 16 byte alignment, exactly what we need. 639 * The packet length includes FCS, but we don't want to 640 * include that when passing upstream as it messes up 641 * bridging applications. 642 */ 643 skb = dev_alloc_skb(pkt_len-4); 644 645 if (skb == NULL) { 646 printk("%s: Memory squeeze, dropping packet.\n", dev->name); 647 dev->stats.rx_dropped++; 648 } else { 649 skb_put(skb,pkt_len-4); /* Make room */ 650 skb_copy_to_linear_data(skb, data, pkt_len-4); 651 skb->protocol=eth_type_trans(skb,dev); 652 netif_rx(skb); 653 } 654 rx_processing_done: 655 656 /* Clear the status flags for this buffer. 657 */ 658 status &= ~BD_ENET_RX_STATS; 659 660 /* Mark the buffer empty. 661 */ 662 status |= BD_ENET_RX_EMPTY; 663 bdp->cbd_sc = status; 664 665 /* Update BD pointer to next entry. 666 */ 667 if (status & BD_ENET_RX_WRAP) 668 bdp = fep->rx_bd_base; 669 else 670 bdp++; 671 672#if 1 673 /* Doing this here will keep the FEC running while we process 674 * incoming frames. On a heavily loaded network, we should be 675 * able to keep up at the expense of system resources. 676 */ 677 fecp->fec_r_des_active = 0; 678#endif 679 } /* while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) */ 680 fep->cur_rx = (cbd_t *)bdp; 681 682#if 0 683 /* Doing this here will allow us to process all frames in the 684 * ring before the FEC is allowed to put more there. On a heavily 685 * loaded network, some frames may be lost. Unfortunately, this 686 * increases the interrupt overhead since we can potentially work 687 * our way back to the interrupt return only to come right back 688 * here. 689 */ 690 fecp->fec_r_des_active = 0; 691#endif 692} 693 694 695/* called from interrupt context */ 696static void 697fec_enet_mii(struct net_device *dev) 698{ 699 struct fec_enet_private *fep; 700 volatile fec_t *ep; 701 mii_list_t *mip; 702 uint mii_reg; 703 704 fep = netdev_priv(dev); 705 ep = fep->hwp; 706 mii_reg = ep->fec_mii_data; 707 708 spin_lock(&fep->lock); 709 710 if ((mip = mii_head) == NULL) { 711 printk("MII and no head!\n"); 712 goto unlock; 713 } 714 715 if (mip->mii_func != NULL) 716 (*(mip->mii_func))(mii_reg, dev); 717 718 mii_head = mip->mii_next; 719 mip->mii_next = mii_free; 720 mii_free = mip; 721 722 if ((mip = mii_head) != NULL) 723 ep->fec_mii_data = mip->mii_regval; 724 725unlock: 726 spin_unlock(&fep->lock); 727} 728 729static int 730mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_device *)) 731{ 732 struct fec_enet_private *fep; 733 unsigned long flags; 734 mii_list_t *mip; 735 int retval; 736 737 /* Add PHY address to register command. 738 */ 739 fep = netdev_priv(dev); 740 regval |= fep->phy_addr << 23; 741 742 retval = 0; 743 744 spin_lock_irqsave(&fep->lock,flags); 745 746 if ((mip = mii_free) != NULL) { 747 mii_free = mip->mii_next; 748 mip->mii_regval = regval; 749 mip->mii_func = func; 750 mip->mii_next = NULL; 751 if (mii_head) { 752 mii_tail->mii_next = mip; 753 mii_tail = mip; 754 } else { 755 mii_head = mii_tail = mip; 756 fep->hwp->fec_mii_data = regval; 757 } 758 } else { 759 retval = 1; 760 } 761 762 spin_unlock_irqrestore(&fep->lock,flags); 763 764 return(retval); 765} 766 767static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c) 768{ 769 if(!c) 770 return; 771 772 for (; c->mii_data != mk_mii_end; c++) 773 mii_queue(dev, c->mii_data, c->funct); 774} 775 776static void mii_parse_sr(uint mii_reg, struct net_device *dev) 777{ 778 struct fec_enet_private *fep = netdev_priv(dev); 779 volatile uint *s = &(fep->phy_status); 780 uint status; 781 782 status = *s & ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC); 783 784 if (mii_reg & 0x0004) 785 status |= PHY_STAT_LINK; 786 if (mii_reg & 0x0010) 787 status |= PHY_STAT_FAULT; 788 if (mii_reg & 0x0020) 789 status |= PHY_STAT_ANC; 790 *s = status; 791} 792 793static void mii_parse_cr(uint mii_reg, struct net_device *dev) 794{ 795 struct fec_enet_private *fep = netdev_priv(dev); 796 volatile uint *s = &(fep->phy_status); 797 uint status; 798 799 status = *s & ~(PHY_CONF_ANE | PHY_CONF_LOOP); 800 801 if (mii_reg & 0x1000) 802 status |= PHY_CONF_ANE; 803 if (mii_reg & 0x4000) 804 status |= PHY_CONF_LOOP; 805 *s = status; 806} 807 808static void mii_parse_anar(uint mii_reg, struct net_device *dev) 809{ 810 struct fec_enet_private *fep = netdev_priv(dev); 811 volatile uint *s = &(fep->phy_status); 812 uint status; 813 814 status = *s & ~(PHY_CONF_SPMASK); 815 816 if (mii_reg & 0x0020) 817 status |= PHY_CONF_10HDX; 818 if (mii_reg & 0x0040) 819 status |= PHY_CONF_10FDX; 820 if (mii_reg & 0x0080) 821 status |= PHY_CONF_100HDX; 822 if (mii_reg & 0x00100) 823 status |= PHY_CONF_100FDX; 824 *s = status; 825} 826 827/* ------------------------------------------------------------------------- */ 828/* The Level one LXT970 is used by many boards */ 829 830#define MII_LXT970_MIRROR 16 /* Mirror register */ 831#define MII_LXT970_IER 17 /* Interrupt Enable Register */ 832#define MII_LXT970_ISR 18 /* Interrupt Status Register */ 833#define MII_LXT970_CONFIG 19 /* Configuration Register */ 834#define MII_LXT970_CSR 20 /* Chip Status Register */ 835 836static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev) 837{ 838 struct fec_enet_private *fep = netdev_priv(dev); 839 volatile uint *s = &(fep->phy_status); 840 uint status; 841 842 status = *s & ~(PHY_STAT_SPMASK); 843 if (mii_reg & 0x0800) { 844 if (mii_reg & 0x1000) 845 status |= PHY_STAT_100FDX; 846 else 847 status |= PHY_STAT_100HDX; 848 } else { 849 if (mii_reg & 0x1000) 850 status |= PHY_STAT_10FDX; 851 else 852 status |= PHY_STAT_10HDX; 853 } 854 *s = status; 855} 856 857static phy_cmd_t const phy_cmd_lxt970_config[] = { 858 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 859 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 860 { mk_mii_end, } 861 }; 862static phy_cmd_t const phy_cmd_lxt970_startup[] = { /* enable interrupts */ 863 { mk_mii_write(MII_LXT970_IER, 0x0002), NULL }, 864 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 865 { mk_mii_end, } 866 }; 867static phy_cmd_t const phy_cmd_lxt970_ack_int[] = { 868 /* read SR and ISR to acknowledge */ 869 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 870 { mk_mii_read(MII_LXT970_ISR), NULL }, 871 872 /* find out the current status */ 873 { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr }, 874 { mk_mii_end, } 875 }; 876static phy_cmd_t const phy_cmd_lxt970_shutdown[] = { /* disable interrupts */ 877 { mk_mii_write(MII_LXT970_IER, 0x0000), NULL }, 878 { mk_mii_end, } 879 }; 880static phy_info_t const phy_info_lxt970 = { 881 .id = 0x07810000, 882 .name = "LXT970", 883 .config = phy_cmd_lxt970_config, 884 .startup = phy_cmd_lxt970_startup, 885 .ack_int = phy_cmd_lxt970_ack_int, 886 .shutdown = phy_cmd_lxt970_shutdown 887}; 888 889/* ------------------------------------------------------------------------- */ 890/* The Level one LXT971 is used on some of my custom boards */ 891 892/* register definitions for the 971 */ 893 894#define MII_LXT971_PCR 16 /* Port Control Register */ 895#define MII_LXT971_SR2 17 /* Status Register 2 */ 896#define MII_LXT971_IER 18 /* Interrupt Enable Register */ 897#define MII_LXT971_ISR 19 /* Interrupt Status Register */ 898#define MII_LXT971_LCR 20 /* LED Control Register */ 899#define MII_LXT971_TCR 30 /* Transmit Control Register */ 900 901/* 902 * I had some nice ideas of running the MDIO faster... 903 * The 971 should support 8MHz and I tried it, but things acted really 904 * weird, so 2.5 MHz ought to be enough for anyone... 905 */ 906 907static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev) 908{ 909 struct fec_enet_private *fep = netdev_priv(dev); 910 volatile uint *s = &(fep->phy_status); 911 uint status; 912 913 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC); 914 915 if (mii_reg & 0x0400) { 916 fep->link = 1; 917 status |= PHY_STAT_LINK; 918 } else { 919 fep->link = 0; 920 } 921 if (mii_reg & 0x0080) 922 status |= PHY_STAT_ANC; 923 if (mii_reg & 0x4000) { 924 if (mii_reg & 0x0200) 925 status |= PHY_STAT_100FDX; 926 else 927 status |= PHY_STAT_100HDX; 928 } else { 929 if (mii_reg & 0x0200) 930 status |= PHY_STAT_10FDX; 931 else 932 status |= PHY_STAT_10HDX; 933 } 934 if (mii_reg & 0x0008) 935 status |= PHY_STAT_FAULT; 936 937 *s = status; 938} 939 940static phy_cmd_t const phy_cmd_lxt971_config[] = { 941 /* limit to 10MBit because my prototype board 942 * doesn't work with 100. */ 943 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 944 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 945 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, 946 { mk_mii_end, } 947 }; 948static phy_cmd_t const phy_cmd_lxt971_startup[] = { /* enable interrupts */ 949 { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL }, 950 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 951 { mk_mii_write(MII_LXT971_LCR, 0xd422), NULL }, /* LED config */ 952 /* Somehow does the 971 tell me that the link is down 953 * the first read after power-up. 954 * read here to get a valid value in ack_int */ 955 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 956 { mk_mii_end, } 957 }; 958static phy_cmd_t const phy_cmd_lxt971_ack_int[] = { 959 /* acknowledge the int before reading status ! */ 960 { mk_mii_read(MII_LXT971_ISR), NULL }, 961 /* find out the current status */ 962 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 963 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, 964 { mk_mii_end, } 965 }; 966static phy_cmd_t const phy_cmd_lxt971_shutdown[] = { /* disable interrupts */ 967 { mk_mii_write(MII_LXT971_IER, 0x0000), NULL }, 968 { mk_mii_end, } 969 }; 970static phy_info_t const phy_info_lxt971 = { 971 .id = 0x0001378e, 972 .name = "LXT971", 973 .config = phy_cmd_lxt971_config, 974 .startup = phy_cmd_lxt971_startup, 975 .ack_int = phy_cmd_lxt971_ack_int, 976 .shutdown = phy_cmd_lxt971_shutdown 977}; 978 979/* ------------------------------------------------------------------------- */ 980/* The Quality Semiconductor QS6612 is used on the RPX CLLF */ 981 982/* register definitions */ 983 984#define MII_QS6612_MCR 17 /* Mode Control Register */ 985#define MII_QS6612_FTR 27 /* Factory Test Register */ 986#define MII_QS6612_MCO 28 /* Misc. Control Register */ 987#define MII_QS6612_ISR 29 /* Interrupt Source Register */ 988#define MII_QS6612_IMR 30 /* Interrupt Mask Register */ 989#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */ 990 991static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev) 992{ 993 struct fec_enet_private *fep = netdev_priv(dev); 994 volatile uint *s = &(fep->phy_status); 995 uint status; 996 997 status = *s & ~(PHY_STAT_SPMASK); 998 999 switch((mii_reg >> 2) & 7) { 1000 case 1: status |= PHY_STAT_10HDX; break; 1001 case 2: status |= PHY_STAT_100HDX; break; 1002 case 5: status |= PHY_STAT_10FDX; break; 1003 case 6: status |= PHY_STAT_100FDX; break; 1004} 1005 1006 *s = status; 1007} 1008 1009static phy_cmd_t const phy_cmd_qs6612_config[] = { 1010 /* The PHY powers up isolated on the RPX, 1011 * so send a command to allow operation. 1012 */ 1013 { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL }, 1014 1015 /* parse cr and anar to get some info */ 1016 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 1017 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 1018 { mk_mii_end, } 1019 }; 1020static phy_cmd_t const phy_cmd_qs6612_startup[] = { /* enable interrupts */ 1021 { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL }, 1022 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 1023 { mk_mii_end, } 1024 }; 1025static phy_cmd_t const phy_cmd_qs6612_ack_int[] = { 1026 /* we need to read ISR, SR and ANER to acknowledge */ 1027 { mk_mii_read(MII_QS6612_ISR), NULL }, 1028 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1029 { mk_mii_read(MII_REG_ANER), NULL }, 1030 1031 /* read pcr to get info */ 1032 { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr }, 1033 { mk_mii_end, } 1034 }; 1035static phy_cmd_t const phy_cmd_qs6612_shutdown[] = { /* disable interrupts */ 1036 { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL }, 1037 { mk_mii_end, } 1038 }; 1039static phy_info_t const phy_info_qs6612 = { 1040 .id = 0x00181440, 1041 .name = "QS6612", 1042 .config = phy_cmd_qs6612_config, 1043 .startup = phy_cmd_qs6612_startup, 1044 .ack_int = phy_cmd_qs6612_ack_int, 1045 .shutdown = phy_cmd_qs6612_shutdown 1046}; 1047 1048/* ------------------------------------------------------------------------- */ 1049/* AMD AM79C874 phy */ 1050 1051/* register definitions for the 874 */ 1052 1053#define MII_AM79C874_MFR 16 /* Miscellaneous Feature Register */ 1054#define MII_AM79C874_ICSR 17 /* Interrupt/Status Register */ 1055#define MII_AM79C874_DR 18 /* Diagnostic Register */ 1056#define MII_AM79C874_PMLR 19 /* Power and Loopback Register */ 1057#define MII_AM79C874_MCR 21 /* ModeControl Register */ 1058#define MII_AM79C874_DC 23 /* Disconnect Counter */ 1059#define MII_AM79C874_REC 24 /* Recieve Error Counter */ 1060 1061static void mii_parse_am79c874_dr(uint mii_reg, struct net_device *dev) 1062{ 1063 struct fec_enet_private *fep = netdev_priv(dev); 1064 volatile uint *s = &(fep->phy_status); 1065 uint status; 1066 1067 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_ANC); 1068 1069 if (mii_reg & 0x0080) 1070 status |= PHY_STAT_ANC; 1071 if (mii_reg & 0x0400) 1072 status |= ((mii_reg & 0x0800) ? PHY_STAT_100FDX : PHY_STAT_100HDX); 1073 else 1074 status |= ((mii_reg & 0x0800) ? PHY_STAT_10FDX : PHY_STAT_10HDX); 1075 1076 *s = status; 1077} 1078 1079static phy_cmd_t const phy_cmd_am79c874_config[] = { 1080 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 1081 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 1082 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr }, 1083 { mk_mii_end, } 1084 }; 1085static phy_cmd_t const phy_cmd_am79c874_startup[] = { /* enable interrupts */ 1086 { mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL }, 1087 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 1088 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1089 { mk_mii_end, } 1090 }; 1091static phy_cmd_t const phy_cmd_am79c874_ack_int[] = { 1092 /* find out the current status */ 1093 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1094 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr }, 1095 /* we only need to read ISR to acknowledge */ 1096 { mk_mii_read(MII_AM79C874_ICSR), NULL }, 1097 { mk_mii_end, } 1098 }; 1099static phy_cmd_t const phy_cmd_am79c874_shutdown[] = { /* disable interrupts */ 1100 { mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL }, 1101 { mk_mii_end, } 1102 }; 1103static phy_info_t const phy_info_am79c874 = { 1104 .id = 0x00022561, 1105 .name = "AM79C874", 1106 .config = phy_cmd_am79c874_config, 1107 .startup = phy_cmd_am79c874_startup, 1108 .ack_int = phy_cmd_am79c874_ack_int, 1109 .shutdown = phy_cmd_am79c874_shutdown 1110}; 1111 1112 1113/* ------------------------------------------------------------------------- */ 1114/* Kendin KS8721BL phy */ 1115 1116/* register definitions for the 8721 */ 1117 1118#define MII_KS8721BL_RXERCR 21 1119#define MII_KS8721BL_ICSR 22 1120#define MII_KS8721BL_PHYCR 31 1121 1122static phy_cmd_t const phy_cmd_ks8721bl_config[] = { 1123 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 1124 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 1125 { mk_mii_end, } 1126 }; 1127static phy_cmd_t const phy_cmd_ks8721bl_startup[] = { /* enable interrupts */ 1128 { mk_mii_write(MII_KS8721BL_ICSR, 0xff00), NULL }, 1129 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 1130 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1131 { mk_mii_end, } 1132 }; 1133static phy_cmd_t const phy_cmd_ks8721bl_ack_int[] = { 1134 /* find out the current status */ 1135 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1136 /* we only need to read ISR to acknowledge */ 1137 { mk_mii_read(MII_KS8721BL_ICSR), NULL }, 1138 { mk_mii_end, } 1139 }; 1140static phy_cmd_t const phy_cmd_ks8721bl_shutdown[] = { /* disable interrupts */ 1141 { mk_mii_write(MII_KS8721BL_ICSR, 0x0000), NULL }, 1142 { mk_mii_end, } 1143 }; 1144static phy_info_t const phy_info_ks8721bl = { 1145 .id = 0x00022161, 1146 .name = "KS8721BL", 1147 .config = phy_cmd_ks8721bl_config, 1148 .startup = phy_cmd_ks8721bl_startup, 1149 .ack_int = phy_cmd_ks8721bl_ack_int, 1150 .shutdown = phy_cmd_ks8721bl_shutdown 1151}; 1152 1153/* ------------------------------------------------------------------------- */ 1154/* register definitions for the DP83848 */ 1155 1156#define MII_DP8384X_PHYSTST 16 /* PHY Status Register */ 1157 1158static void mii_parse_dp8384x_sr2(uint mii_reg, struct net_device *dev) 1159{ 1160 struct fec_enet_private *fep = dev->priv; 1161 volatile uint *s = &(fep->phy_status); 1162 1163 *s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC); 1164 1165 /* Link up */ 1166 if (mii_reg & 0x0001) { 1167 fep->link = 1; 1168 *s |= PHY_STAT_LINK; 1169 } else 1170 fep->link = 0; 1171 /* Status of link */ 1172 if (mii_reg & 0x0010) /* Autonegotioation complete */ 1173 *s |= PHY_STAT_ANC; 1174 if (mii_reg & 0x0002) { /* 10MBps? */ 1175 if (mii_reg & 0x0004) /* Full Duplex? */ 1176 *s |= PHY_STAT_10FDX; 1177 else 1178 *s |= PHY_STAT_10HDX; 1179 } else { /* 100 Mbps? */ 1180 if (mii_reg & 0x0004) /* Full Duplex? */ 1181 *s |= PHY_STAT_100FDX; 1182 else 1183 *s |= PHY_STAT_100HDX; 1184 } 1185 if (mii_reg & 0x0008) 1186 *s |= PHY_STAT_FAULT; 1187} 1188 1189static phy_info_t phy_info_dp83848= { 1190 0x020005c9, 1191 "DP83848", 1192 1193 (const phy_cmd_t []) { /* config */ 1194 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 1195 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 1196 { mk_mii_read(MII_DP8384X_PHYSTST), mii_parse_dp8384x_sr2 }, 1197 { mk_mii_end, } 1198 }, 1199 (const phy_cmd_t []) { /* startup - enable interrupts */ 1200 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 1201 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1202 { mk_mii_end, } 1203 }, 1204 (const phy_cmd_t []) { /* ack_int - never happens, no interrupt */ 1205 { mk_mii_end, } 1206 }, 1207 (const phy_cmd_t []) { /* shutdown */ 1208 { mk_mii_end, } 1209 }, 1210}; 1211 1212/* ------------------------------------------------------------------------- */ 1213 1214static phy_info_t const * const phy_info[] = { 1215 &phy_info_lxt970, 1216 &phy_info_lxt971, 1217 &phy_info_qs6612, 1218 &phy_info_am79c874, 1219 &phy_info_ks8721bl, 1220 &phy_info_dp83848, 1221 NULL 1222}; 1223 1224/* ------------------------------------------------------------------------- */ 1225#if !defined(CONFIG_M532x) 1226#ifdef CONFIG_RPXCLASSIC 1227static void 1228mii_link_interrupt(void *dev_id); 1229#else 1230static irqreturn_t 1231mii_link_interrupt(int irq, void * dev_id); 1232#endif 1233#endif 1234 1235#if defined(CONFIG_M5272) 1236/* 1237 * Code specific to Coldfire 5272 setup. 1238 */ 1239static void __inline__ fec_request_intrs(struct net_device *dev) 1240{ 1241 volatile unsigned long *icrp; 1242 static const struct idesc { 1243 char *name; 1244 unsigned short irq; 1245 irq_handler_t handler; 1246 } *idp, id[] = { 1247 { "fec(RX)", 86, fec_enet_interrupt }, 1248 { "fec(TX)", 87, fec_enet_interrupt }, 1249 { "fec(OTHER)", 88, fec_enet_interrupt }, 1250 { "fec(MII)", 66, mii_link_interrupt }, 1251 { NULL }, 1252 }; 1253 1254 /* Setup interrupt handlers. */ 1255 for (idp = id; idp->name; idp++) { 1256 if (request_irq(idp->irq, idp->handler, 0, idp->name, dev) != 0) 1257 printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, idp->irq); 1258 } 1259 1260 /* Unmask interrupt at ColdFire 5272 SIM */ 1261 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR3); 1262 *icrp = 0x00000ddd; 1263 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); 1264 *icrp = 0x0d000000; 1265} 1266 1267static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) 1268{ 1269 volatile fec_t *fecp; 1270 1271 fecp = fep->hwp; 1272 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04; 1273 fecp->fec_x_cntrl = 0x00; 1274 1275 /* 1276 * Set MII speed to 2.5 MHz 1277 * See 5272 manual section 11.5.8: MSCR 1278 */ 1279 fep->phy_speed = ((((MCF_CLK / 4) / (2500000 / 10)) + 5) / 10) * 2; 1280 fecp->fec_mii_speed = fep->phy_speed; 1281 1282 fec_restart(dev, 0); 1283} 1284 1285static void __inline__ fec_get_mac(struct net_device *dev) 1286{ 1287 struct fec_enet_private *fep = netdev_priv(dev); 1288 volatile fec_t *fecp; 1289 unsigned char *iap, tmpaddr[ETH_ALEN]; 1290 1291 fecp = fep->hwp; 1292 1293 if (FEC_FLASHMAC) { 1294 /* 1295 * Get MAC address from FLASH. 1296 * If it is all 1's or 0's, use the default. 1297 */ 1298 iap = (unsigned char *)FEC_FLASHMAC; 1299 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && 1300 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) 1301 iap = fec_mac_default; 1302 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && 1303 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) 1304 iap = fec_mac_default; 1305 } else { 1306 *((unsigned long *) &tmpaddr[0]) = fecp->fec_addr_low; 1307 *((unsigned short *) &tmpaddr[4]) = (fecp->fec_addr_high >> 16); 1308 iap = &tmpaddr[0]; 1309 } 1310 1311 memcpy(dev->dev_addr, iap, ETH_ALEN); 1312 1313 /* Adjust MAC if using default MAC address */ 1314 if (iap == fec_mac_default) 1315 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 1316} 1317 1318static void __inline__ fec_enable_phy_intr(void) 1319{ 1320} 1321 1322static void __inline__ fec_disable_phy_intr(void) 1323{ 1324 volatile unsigned long *icrp; 1325 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); 1326 *icrp = 0x08000000; 1327} 1328 1329static void __inline__ fec_phy_ack_intr(void) 1330{ 1331 volatile unsigned long *icrp; 1332 /* Acknowledge the interrupt */ 1333 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); 1334 *icrp = 0x0d000000; 1335} 1336 1337static void __inline__ fec_localhw_setup(void) 1338{ 1339} 1340 1341/* 1342 * Do not need to make region uncached on 5272. 1343 */ 1344static void __inline__ fec_uncache(unsigned long addr) 1345{ 1346} 1347 1348/* ------------------------------------------------------------------------- */ 1349 1350#elif defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) 1351 1352/* 1353 * Code specific to Coldfire 5230/5231/5232/5234/5235, 1354 * the 5270/5271/5274/5275 and 5280/5282 setups. 1355 */ 1356static void __inline__ fec_request_intrs(struct net_device *dev) 1357{ 1358 struct fec_enet_private *fep; 1359 int b; 1360 static const struct idesc { 1361 char *name; 1362 unsigned short irq; 1363 } *idp, id[] = { 1364 { "fec(TXF)", 23 }, 1365 { "fec(TXB)", 24 }, 1366 { "fec(TXFIFO)", 25 }, 1367 { "fec(TXCR)", 26 }, 1368 { "fec(RXF)", 27 }, 1369 { "fec(RXB)", 28 }, 1370 { "fec(MII)", 29 }, 1371 { "fec(LC)", 30 }, 1372 { "fec(HBERR)", 31 }, 1373 { "fec(GRA)", 32 }, 1374 { "fec(EBERR)", 33 }, 1375 { "fec(BABT)", 34 }, 1376 { "fec(BABR)", 35 }, 1377 { NULL }, 1378 }; 1379 1380 fep = netdev_priv(dev); 1381 b = (fep->index) ? 128 : 64; 1382 1383 /* Setup interrupt handlers. */ 1384 for (idp = id; idp->name; idp++) { 1385 if (request_irq(b+idp->irq, fec_enet_interrupt, 0, idp->name, dev) != 0) 1386 printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); 1387 } 1388 1389 /* Unmask interrupts at ColdFire 5280/5282 interrupt controller */ 1390 { 1391 volatile unsigned char *icrp; 1392 volatile unsigned long *imrp; 1393 int i, ilip; 1394 1395 b = (fep->index) ? MCFICM_INTC1 : MCFICM_INTC0; 1396 icrp = (volatile unsigned char *) (MCF_IPSBAR + b + 1397 MCFINTC_ICR0); 1398 for (i = 23, ilip = 0x28; (i < 36); i++) 1399 icrp[i] = ilip--; 1400 1401 imrp = (volatile unsigned long *) (MCF_IPSBAR + b + 1402 MCFINTC_IMRH); 1403 *imrp &= ~0x0000000f; 1404 imrp = (volatile unsigned long *) (MCF_IPSBAR + b + 1405 MCFINTC_IMRL); 1406 *imrp &= ~0xff800001; 1407 } 1408 1409#if defined(CONFIG_M528x) 1410 /* Set up gpio outputs for MII lines */ 1411 { 1412 volatile u16 *gpio_paspar; 1413 volatile u8 *gpio_pehlpar; 1414 1415 gpio_paspar = (volatile u16 *) (MCF_IPSBAR + 0x100056); 1416 gpio_pehlpar = (volatile u16 *) (MCF_IPSBAR + 0x100058); 1417 *gpio_paspar |= 0x0f00; 1418 *gpio_pehlpar = 0xc0; 1419 } 1420#endif 1421 1422#if defined(CONFIG_M527x) 1423 /* Set up gpio outputs for MII lines */ 1424 { 1425 volatile u8 *gpio_par_fec; 1426 volatile u16 *gpio_par_feci2c; 1427 1428 gpio_par_feci2c = (volatile u16 *)(MCF_IPSBAR + 0x100082); 1429 /* Set up gpio outputs for FEC0 MII lines */ 1430 gpio_par_fec = (volatile u8 *)(MCF_IPSBAR + 0x100078); 1431 1432 *gpio_par_feci2c |= 0x0f00; 1433 *gpio_par_fec |= 0xc0; 1434 1435#if defined(CONFIG_FEC2) 1436 /* Set up gpio outputs for FEC1 MII lines */ 1437 gpio_par_fec = (volatile u8 *)(MCF_IPSBAR + 0x100079); 1438 1439 *gpio_par_feci2c |= 0x00a0; 1440 *gpio_par_fec |= 0xc0; 1441#endif /* CONFIG_FEC2 */ 1442 } 1443#endif /* CONFIG_M527x */ 1444} 1445 1446static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) 1447{ 1448 volatile fec_t *fecp; 1449 1450 fecp = fep->hwp; 1451 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04; 1452 fecp->fec_x_cntrl = 0x00; 1453 1454 /* 1455 * Set MII speed to 2.5 MHz 1456 * See 5282 manual section 17.5.4.7: MSCR 1457 */ 1458 fep->phy_speed = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2; 1459 fecp->fec_mii_speed = fep->phy_speed; 1460 1461 fec_restart(dev, 0); 1462} 1463 1464static void __inline__ fec_get_mac(struct net_device *dev) 1465{ 1466 struct fec_enet_private *fep = netdev_priv(dev); 1467 volatile fec_t *fecp; 1468 unsigned char *iap, tmpaddr[ETH_ALEN]; 1469 1470 fecp = fep->hwp; 1471 1472 if (FEC_FLASHMAC) { 1473 /* 1474 * Get MAC address from FLASH. 1475 * If it is all 1's or 0's, use the default. 1476 */ 1477 iap = FEC_FLASHMAC; 1478 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && 1479 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) 1480 iap = fec_mac_default; 1481 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && 1482 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) 1483 iap = fec_mac_default; 1484 } else { 1485 *((unsigned long *) &tmpaddr[0]) = fecp->fec_addr_low; 1486 *((unsigned short *) &tmpaddr[4]) = (fecp->fec_addr_high >> 16); 1487 iap = &tmpaddr[0]; 1488 } 1489 1490 memcpy(dev->dev_addr, iap, ETH_ALEN); 1491 1492 /* Adjust MAC if using default MAC address */ 1493 if (iap == fec_mac_default) 1494 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 1495} 1496 1497static void __inline__ fec_enable_phy_intr(void) 1498{ 1499} 1500 1501static void __inline__ fec_disable_phy_intr(void) 1502{ 1503} 1504 1505static void __inline__ fec_phy_ack_intr(void) 1506{ 1507} 1508 1509static void __inline__ fec_localhw_setup(void) 1510{ 1511} 1512 1513/* 1514 * Do not need to make region uncached on 5272. 1515 */ 1516static void __inline__ fec_uncache(unsigned long addr) 1517{ 1518} 1519 1520/* ------------------------------------------------------------------------- */ 1521 1522#elif defined(CONFIG_M520x) 1523 1524/* 1525 * Code specific to Coldfire 520x 1526 */ 1527static void __inline__ fec_request_intrs(struct net_device *dev) 1528{ 1529 struct fec_enet_private *fep; 1530 int b; 1531 static const struct idesc { 1532 char *name; 1533 unsigned short irq; 1534 } *idp, id[] = { 1535 { "fec(TXF)", 23 }, 1536 { "fec(TXB)", 24 }, 1537 { "fec(TXFIFO)", 25 }, 1538 { "fec(TXCR)", 26 }, 1539 { "fec(RXF)", 27 }, 1540 { "fec(RXB)", 28 }, 1541 { "fec(MII)", 29 }, 1542 { "fec(LC)", 30 }, 1543 { "fec(HBERR)", 31 }, 1544 { "fec(GRA)", 32 }, 1545 { "fec(EBERR)", 33 }, 1546 { "fec(BABT)", 34 }, 1547 { "fec(BABR)", 35 }, 1548 { NULL }, 1549 }; 1550 1551 fep = netdev_priv(dev); 1552 b = 64 + 13; 1553 1554 /* Setup interrupt handlers. */ 1555 for (idp = id; idp->name; idp++) { 1556 if (request_irq(b+idp->irq,fec_enet_interrupt,0,idp->name,dev)!=0) 1557 printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); 1558 } 1559 1560 /* Unmask interrupts at ColdFire interrupt controller */ 1561 { 1562 volatile unsigned char *icrp; 1563 volatile unsigned long *imrp; 1564 1565 icrp = (volatile unsigned char *) (MCF_IPSBAR + MCFICM_INTC0 + 1566 MCFINTC_ICR0); 1567 for (b = 36; (b < 49); b++) 1568 icrp[b] = 0x04; 1569 imrp = (volatile unsigned long *) (MCF_IPSBAR + MCFICM_INTC0 + 1570 MCFINTC_IMRH); 1571 *imrp &= ~0x0001FFF0; 1572 } 1573 *(volatile unsigned char *)(MCF_IPSBAR + MCF_GPIO_PAR_FEC) |= 0xf0; 1574 *(volatile unsigned char *)(MCF_IPSBAR + MCF_GPIO_PAR_FECI2C) |= 0x0f; 1575} 1576 1577static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) 1578{ 1579 volatile fec_t *fecp; 1580 1581 fecp = fep->hwp; 1582 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04; 1583 fecp->fec_x_cntrl = 0x00; 1584 1585 /* 1586 * Set MII speed to 2.5 MHz 1587 * See 5282 manual section 17.5.4.7: MSCR 1588 */ 1589 fep->phy_speed = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2; 1590 fecp->fec_mii_speed = fep->phy_speed; 1591 1592 fec_restart(dev, 0); 1593} 1594 1595static void __inline__ fec_get_mac(struct net_device *dev) 1596{ 1597 struct fec_enet_private *fep = netdev_priv(dev); 1598 volatile fec_t *fecp; 1599 unsigned char *iap, tmpaddr[ETH_ALEN]; 1600 1601 fecp = fep->hwp; 1602 1603 if (FEC_FLASHMAC) { 1604 /* 1605 * Get MAC address from FLASH. 1606 * If it is all 1's or 0's, use the default. 1607 */ 1608 iap = FEC_FLASHMAC; 1609 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && 1610 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) 1611 iap = fec_mac_default; 1612 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && 1613 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) 1614 iap = fec_mac_default; 1615 } else { 1616 *((unsigned long *) &tmpaddr[0]) = fecp->fec_addr_low; 1617 *((unsigned short *) &tmpaddr[4]) = (fecp->fec_addr_high >> 16); 1618 iap = &tmpaddr[0]; 1619 } 1620 1621 memcpy(dev->dev_addr, iap, ETH_ALEN); 1622 1623 /* Adjust MAC if using default MAC address */ 1624 if (iap == fec_mac_default) 1625 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 1626} 1627 1628static void __inline__ fec_enable_phy_intr(void) 1629{ 1630} 1631 1632static void __inline__ fec_disable_phy_intr(void) 1633{ 1634} 1635 1636static void __inline__ fec_phy_ack_intr(void) 1637{ 1638} 1639 1640static void __inline__ fec_localhw_setup(void) 1641{ 1642} 1643 1644static void __inline__ fec_uncache(unsigned long addr) 1645{ 1646} 1647 1648/* ------------------------------------------------------------------------- */ 1649 1650#elif defined(CONFIG_M532x) 1651/* 1652 * Code specific for M532x 1653 */ 1654static void __inline__ fec_request_intrs(struct net_device *dev) 1655{ 1656 struct fec_enet_private *fep; 1657 int b; 1658 static const struct idesc { 1659 char *name; 1660 unsigned short irq; 1661 } *idp, id[] = { 1662 { "fec(TXF)", 36 }, 1663 { "fec(TXB)", 37 }, 1664 { "fec(TXFIFO)", 38 }, 1665 { "fec(TXCR)", 39 }, 1666 { "fec(RXF)", 40 }, 1667 { "fec(RXB)", 41 }, 1668 { "fec(MII)", 42 }, 1669 { "fec(LC)", 43 }, 1670 { "fec(HBERR)", 44 }, 1671 { "fec(GRA)", 45 }, 1672 { "fec(EBERR)", 46 }, 1673 { "fec(BABT)", 47 }, 1674 { "fec(BABR)", 48 }, 1675 { NULL }, 1676 }; 1677 1678 fep = netdev_priv(dev); 1679 b = (fep->index) ? 128 : 64; 1680 1681 /* Setup interrupt handlers. */ 1682 for (idp = id; idp->name; idp++) { 1683 if (request_irq(b+idp->irq,fec_enet_interrupt,0,idp->name,dev)!=0) 1684 printk("FEC: Could not allocate %s IRQ(%d)!\n", 1685 idp->name, b+idp->irq); 1686 } 1687 1688 /* Unmask interrupts */ 1689 MCF_INTC0_ICR36 = 0x2; 1690 MCF_INTC0_ICR37 = 0x2; 1691 MCF_INTC0_ICR38 = 0x2; 1692 MCF_INTC0_ICR39 = 0x2; 1693 MCF_INTC0_ICR40 = 0x2; 1694 MCF_INTC0_ICR41 = 0x2; 1695 MCF_INTC0_ICR42 = 0x2; 1696 MCF_INTC0_ICR43 = 0x2; 1697 MCF_INTC0_ICR44 = 0x2; 1698 MCF_INTC0_ICR45 = 0x2; 1699 MCF_INTC0_ICR46 = 0x2; 1700 MCF_INTC0_ICR47 = 0x2; 1701 MCF_INTC0_ICR48 = 0x2; 1702 1703 MCF_INTC0_IMRH &= ~( 1704 MCF_INTC_IMRH_INT_MASK36 | 1705 MCF_INTC_IMRH_INT_MASK37 | 1706 MCF_INTC_IMRH_INT_MASK38 | 1707 MCF_INTC_IMRH_INT_MASK39 | 1708 MCF_INTC_IMRH_INT_MASK40 | 1709 MCF_INTC_IMRH_INT_MASK41 | 1710 MCF_INTC_IMRH_INT_MASK42 | 1711 MCF_INTC_IMRH_INT_MASK43 | 1712 MCF_INTC_IMRH_INT_MASK44 | 1713 MCF_INTC_IMRH_INT_MASK45 | 1714 MCF_INTC_IMRH_INT_MASK46 | 1715 MCF_INTC_IMRH_INT_MASK47 | 1716 MCF_INTC_IMRH_INT_MASK48 ); 1717 1718 /* Set up gpio outputs for MII lines */ 1719 MCF_GPIO_PAR_FECI2C |= (0 | 1720 MCF_GPIO_PAR_FECI2C_PAR_MDC_EMDC | 1721 MCF_GPIO_PAR_FECI2C_PAR_MDIO_EMDIO); 1722 MCF_GPIO_PAR_FEC = (0 | 1723 MCF_GPIO_PAR_FEC_PAR_FEC_7W_FEC | 1724 MCF_GPIO_PAR_FEC_PAR_FEC_MII_FEC); 1725} 1726 1727static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) 1728{ 1729 volatile fec_t *fecp; 1730 1731 fecp = fep->hwp; 1732 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04; 1733 fecp->fec_x_cntrl = 0x00; 1734 1735 /* 1736 * Set MII speed to 2.5 MHz 1737 */ 1738 fep->phy_speed = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2; 1739 fecp->fec_mii_speed = fep->phy_speed; 1740 1741 fec_restart(dev, 0); 1742} 1743 1744static void __inline__ fec_get_mac(struct net_device *dev) 1745{ 1746 struct fec_enet_private *fep = netdev_priv(dev); 1747 volatile fec_t *fecp; 1748 unsigned char *iap, tmpaddr[ETH_ALEN]; 1749 1750 fecp = fep->hwp; 1751 1752 if (FEC_FLASHMAC) { 1753 /* 1754 * Get MAC address from FLASH. 1755 * If it is all 1's or 0's, use the default. 1756 */ 1757 iap = FEC_FLASHMAC; 1758 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && 1759 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) 1760 iap = fec_mac_default; 1761 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && 1762 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) 1763 iap = fec_mac_default; 1764 } else { 1765 *((unsigned long *) &tmpaddr[0]) = fecp->fec_addr_low; 1766 *((unsigned short *) &tmpaddr[4]) = (fecp->fec_addr_high >> 16); 1767 iap = &tmpaddr[0]; 1768 } 1769 1770 memcpy(dev->dev_addr, iap, ETH_ALEN); 1771 1772 /* Adjust MAC if using default MAC address */ 1773 if (iap == fec_mac_default) 1774 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 1775} 1776 1777static void __inline__ fec_enable_phy_intr(void) 1778{ 1779} 1780 1781static void __inline__ fec_disable_phy_intr(void) 1782{ 1783} 1784 1785static void __inline__ fec_phy_ack_intr(void) 1786{ 1787} 1788 1789static void __inline__ fec_localhw_setup(void) 1790{ 1791} 1792 1793/* 1794 * Do not need to make region uncached on 532x. 1795 */ 1796static void __inline__ fec_uncache(unsigned long addr) 1797{ 1798} 1799 1800/* ------------------------------------------------------------------------- */ 1801 1802 1803#else 1804 1805/* 1806 * Code specific to the MPC860T setup. 1807 */ 1808static void __inline__ fec_request_intrs(struct net_device *dev) 1809{ 1810 volatile immap_t *immap; 1811 1812 immap = (immap_t *)IMAP_ADDR; /* pointer to internal registers */ 1813 1814 if (request_8xxirq(FEC_INTERRUPT, fec_enet_interrupt, 0, "fec", dev) != 0) 1815 panic("Could not allocate FEC IRQ!"); 1816 1817#ifdef CONFIG_RPXCLASSIC 1818 /* Make Port C, bit 15 an input that causes interrupts. 1819 */ 1820 immap->im_ioport.iop_pcpar &= ~0x0001; 1821 immap->im_ioport.iop_pcdir &= ~0x0001; 1822 immap->im_ioport.iop_pcso &= ~0x0001; 1823 immap->im_ioport.iop_pcint |= 0x0001; 1824 cpm_install_handler(CPMVEC_PIO_PC15, mii_link_interrupt, dev); 1825 1826 /* Make LEDS reflect Link status. 1827 */ 1828 *((uint *) RPX_CSR_ADDR) &= ~BCSR2_FETHLEDMODE; 1829#endif 1830#ifdef CONFIG_FADS 1831 if (request_8xxirq(SIU_IRQ2, mii_link_interrupt, 0, "mii", dev) != 0) 1832 panic("Could not allocate MII IRQ!"); 1833#endif 1834} 1835 1836static void __inline__ fec_get_mac(struct net_device *dev) 1837{ 1838 bd_t *bd; 1839 1840 bd = (bd_t *)__res; 1841 memcpy(dev->dev_addr, bd->bi_enetaddr, ETH_ALEN); 1842 1843#ifdef CONFIG_RPXCLASSIC 1844 /* The Embedded Planet boards have only one MAC address in 1845 * the EEPROM, but can have two Ethernet ports. For the 1846 * FEC port, we create another address by setting one of 1847 * the address bits above something that would have (up to 1848 * now) been allocated. 1849 */ 1850 dev->dev_adrd[3] |= 0x80; 1851#endif 1852} 1853 1854static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) 1855{ 1856 extern uint _get_IMMR(void); 1857 volatile immap_t *immap; 1858 volatile fec_t *fecp; 1859 1860 fecp = fep->hwp; 1861 immap = (immap_t *)IMAP_ADDR; /* pointer to internal registers */ 1862 1863 /* Configure all of port D for MII. 1864 */ 1865 immap->im_ioport.iop_pdpar = 0x1fff; 1866 1867 /* Bits moved from Rev. D onward. 1868 */ 1869 if ((_get_IMMR() & 0xffff) < 0x0501) 1870 immap->im_ioport.iop_pddir = 0x1c58; /* Pre rev. D */ 1871 else 1872 immap->im_ioport.iop_pddir = 0x1fff; /* Rev. D and later */ 1873 1874 /* Set MII speed to 2.5 MHz 1875 */ 1876 fecp->fec_mii_speed = fep->phy_speed = 1877 ((bd->bi_busfreq * 1000000) / 2500000) & 0x7e; 1878} 1879 1880static void __inline__ fec_enable_phy_intr(void) 1881{ 1882 volatile fec_t *fecp; 1883 1884 fecp = fep->hwp; 1885 1886 /* Enable MII command finished interrupt 1887 */ 1888 fecp->fec_ivec = (FEC_INTERRUPT/2) << 29; 1889} 1890 1891static void __inline__ fec_disable_phy_intr(void) 1892{ 1893} 1894 1895static void __inline__ fec_phy_ack_intr(void) 1896{ 1897} 1898 1899static void __inline__ fec_localhw_setup(void) 1900{ 1901 volatile fec_t *fecp; 1902 1903 fecp = fep->hwp; 1904 fecp->fec_r_hash = PKT_MAXBUF_SIZE; 1905 /* Enable big endian and don't care about SDMA FC. 1906 */ 1907 fecp->fec_fun_code = 0x78000000; 1908} 1909 1910static void __inline__ fec_uncache(unsigned long addr) 1911{ 1912 pte_t *pte; 1913 pte = va_to_pte(mem_addr); 1914 pte_val(*pte) |= _PAGE_NO_CACHE; 1915 flush_tlb_page(init_mm.mmap, mem_addr); 1916} 1917 1918#endif 1919 1920/* ------------------------------------------------------------------------- */ 1921 1922static void mii_display_status(struct net_device *dev) 1923{ 1924 struct fec_enet_private *fep = netdev_priv(dev); 1925 volatile uint *s = &(fep->phy_status); 1926 1927 if (!fep->link && !fep->old_link) { 1928 /* Link is still down - don't print anything */ 1929 return; 1930 } 1931 1932 printk("%s: status: ", dev->name); 1933 1934 if (!fep->link) { 1935 printk("link down"); 1936 } else { 1937 printk("link up"); 1938 1939 switch(*s & PHY_STAT_SPMASK) { 1940 case PHY_STAT_100FDX: printk(", 100MBit Full Duplex"); break; 1941 case PHY_STAT_100HDX: printk(", 100MBit Half Duplex"); break; 1942 case PHY_STAT_10FDX: printk(", 10MBit Full Duplex"); break; 1943 case PHY_STAT_10HDX: printk(", 10MBit Half Duplex"); break; 1944 default: 1945 printk(", Unknown speed/duplex"); 1946 } 1947 1948 if (*s & PHY_STAT_ANC) 1949 printk(", auto-negotiation complete"); 1950 } 1951 1952 if (*s & PHY_STAT_FAULT) 1953 printk(", remote fault"); 1954 1955 printk(".\n"); 1956} 1957 1958static void mii_display_config(struct work_struct *work) 1959{ 1960 struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task); 1961 struct net_device *dev = fep->netdev; 1962 uint status = fep->phy_status; 1963 1964 /* 1965 ** When we get here, phy_task is already removed from 1966 ** the workqueue. It is thus safe to allow to reuse it. 1967 */ 1968 fep->mii_phy_task_queued = 0; 1969 printk("%s: config: auto-negotiation ", dev->name); 1970 1971 if (status & PHY_CONF_ANE) 1972 printk("on"); 1973 else 1974 printk("off"); 1975 1976 if (status & PHY_CONF_100FDX) 1977 printk(", 100FDX"); 1978 if (status & PHY_CONF_100HDX) 1979 printk(", 100HDX"); 1980 if (status & PHY_CONF_10FDX) 1981 printk(", 10FDX"); 1982 if (status & PHY_CONF_10HDX) 1983 printk(", 10HDX"); 1984 if (!(status & PHY_CONF_SPMASK)) 1985 printk(", No speed/duplex selected?"); 1986 1987 if (status & PHY_CONF_LOOP) 1988 printk(", loopback enabled"); 1989 1990 printk(".\n"); 1991 1992 fep->sequence_done = 1; 1993} 1994 1995static void mii_relink(struct work_struct *work) 1996{ 1997 struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task); 1998 struct net_device *dev = fep->netdev; 1999 int duplex; 2000 2001 /* 2002 ** When we get here, phy_task is already removed from 2003 ** the workqueue. It is thus safe to allow to reuse it. 2004 */ 2005 fep->mii_phy_task_queued = 0; 2006 fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0; 2007 mii_display_status(dev); 2008 fep->old_link = fep->link; 2009 2010 if (fep->link) { 2011 duplex = 0; 2012 if (fep->phy_status 2013 & (PHY_STAT_100FDX | PHY_STAT_10FDX)) 2014 duplex = 1; 2015 fec_restart(dev, duplex); 2016 } else 2017 fec_stop(dev); 2018 2019#if 0 2020 enable_irq(fep->mii_irq); 2021#endif 2022 2023} 2024 2025/* mii_queue_relink is called in interrupt context from mii_link_interrupt */ 2026static void mii_queue_relink(uint mii_reg, struct net_device *dev) 2027{ 2028 struct fec_enet_private *fep = netdev_priv(dev); 2029 2030 /* 2031 ** We cannot queue phy_task twice in the workqueue. It 2032 ** would cause an endless loop in the workqueue. 2033 ** Fortunately, if the last mii_relink entry has not yet been 2034 ** executed now, it will do the job for the current interrupt, 2035 ** which is just what we want. 2036 */ 2037 if (fep->mii_phy_task_queued) 2038 return; 2039 2040 fep->mii_phy_task_queued = 1; 2041 INIT_WORK(&fep->phy_task, mii_relink); 2042 schedule_work(&fep->phy_task); 2043} 2044 2045/* mii_queue_config is called in interrupt context from fec_enet_mii */ 2046static void mii_queue_config(uint mii_reg, struct net_device *dev) 2047{ 2048 struct fec_enet_private *fep = netdev_priv(dev); 2049 2050 if (fep->mii_phy_task_queued) 2051 return; 2052 2053 fep->mii_phy_task_queued = 1; 2054 INIT_WORK(&fep->phy_task, mii_display_config); 2055 schedule_work(&fep->phy_task); 2056} 2057 2058phy_cmd_t const phy_cmd_relink[] = { 2059 { mk_mii_read(MII_REG_CR), mii_queue_relink }, 2060 { mk_mii_end, } 2061 }; 2062phy_cmd_t const phy_cmd_config[] = { 2063 { mk_mii_read(MII_REG_CR), mii_queue_config }, 2064 { mk_mii_end, } 2065 }; 2066 2067/* Read remainder of PHY ID. 2068*/ 2069static void 2070mii_discover_phy3(uint mii_reg, struct net_device *dev) 2071{ 2072 struct fec_enet_private *fep; 2073 int i; 2074 2075 fep = netdev_priv(dev); 2076 fep->phy_id |= (mii_reg & 0xffff); 2077 printk("fec: PHY @ 0x%x, ID 0x%08x", fep->phy_addr, fep->phy_id); 2078 2079 for(i = 0; phy_info[i]; i++) { 2080 if(phy_info[i]->id == (fep->phy_id >> 4)) 2081 break; 2082 } 2083 2084 if (phy_info[i]) 2085 printk(" -- %s\n", phy_info[i]->name); 2086 else 2087 printk(" -- unknown PHY!\n"); 2088 2089 fep->phy = phy_info[i]; 2090 fep->phy_id_done = 1; 2091} 2092 2093/* Scan all of the MII PHY addresses looking for someone to respond 2094 * with a valid ID. This usually happens quickly. 2095 */ 2096static void 2097mii_discover_phy(uint mii_reg, struct net_device *dev) 2098{ 2099 struct fec_enet_private *fep; 2100 volatile fec_t *fecp; 2101 uint phytype; 2102 2103 fep = netdev_priv(dev); 2104 fecp = fep->hwp; 2105 2106 if (fep->phy_addr < 32) { 2107 if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) { 2108 2109 /* Got first part of ID, now get remainder. 2110 */ 2111 fep->phy_id = phytype << 16; 2112 mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), 2113 mii_discover_phy3); 2114 } else { 2115 fep->phy_addr++; 2116 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), 2117 mii_discover_phy); 2118 } 2119 } else { 2120 printk("FEC: No PHY device found.\n"); 2121 /* Disable external MII interface */ 2122 fecp->fec_mii_speed = fep->phy_speed = 0; 2123 fec_disable_phy_intr(); 2124 } 2125} 2126 2127/* This interrupt occurs when the PHY detects a link change. 2128*/ 2129#ifdef CONFIG_RPXCLASSIC 2130static void 2131mii_link_interrupt(void *dev_id) 2132#else 2133static irqreturn_t 2134mii_link_interrupt(int irq, void * dev_id) 2135#endif 2136{ 2137 struct net_device *dev = dev_id; 2138 struct fec_enet_private *fep = netdev_priv(dev); 2139 2140 fec_phy_ack_intr(); 2141 2142#if 0 2143 disable_irq(fep->mii_irq); /* disable now, enable later */ 2144#endif 2145 2146 mii_do_cmd(dev, fep->phy->ack_int); 2147 mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */ 2148 2149 return IRQ_HANDLED; 2150} 2151 2152static int 2153fec_enet_open(struct net_device *dev) 2154{ 2155 struct fec_enet_private *fep = netdev_priv(dev); 2156 2157 /* I should reset the ring buffers here, but I don't yet know 2158 * a simple way to do that. 2159 */ 2160 fec_set_mac_address(dev); 2161 2162 fep->sequence_done = 0; 2163 fep->link = 0; 2164 2165 if (fep->phy) { 2166 mii_do_cmd(dev, fep->phy->ack_int); 2167 mii_do_cmd(dev, fep->phy->config); 2168 mii_do_cmd(dev, phy_cmd_config); /* display configuration */ 2169 2170 /* Poll until the PHY tells us its configuration 2171 * (not link state). 2172 * Request is initiated by mii_do_cmd above, but answer 2173 * comes by interrupt. 2174 * This should take about 25 usec per register at 2.5 MHz, 2175 * and we read approximately 5 registers. 2176 */ 2177 while(!fep->sequence_done) 2178 schedule(); 2179 2180 mii_do_cmd(dev, fep->phy->startup); 2181 2182 /* Set the initial link state to true. A lot of hardware 2183 * based on this device does not implement a PHY interrupt, 2184 * so we are never notified of link change. 2185 */ 2186 fep->link = 1; 2187 } else { 2188 fep->link = 1; /* lets just try it and see */ 2189 /* no phy, go full duplex, it's most likely a hub chip */ 2190 fec_restart(dev, 1); 2191 } 2192 2193 netif_start_queue(dev); 2194 fep->opened = 1; 2195 return 0; /* Success */ 2196} 2197 2198static int 2199fec_enet_close(struct net_device *dev) 2200{ 2201 struct fec_enet_private *fep = netdev_priv(dev); 2202 2203 /* Don't know what to do yet. 2204 */ 2205 fep->opened = 0; 2206 netif_stop_queue(dev); 2207 fec_stop(dev); 2208 2209 return 0; 2210} 2211 2212/* Set or clear the multicast filter for this adaptor. 2213 * Skeleton taken from sunlance driver. 2214 * The CPM Ethernet implementation allows Multicast as well as individual 2215 * MAC address filtering. Some of the drivers check to make sure it is 2216 * a group multicast address, and discard those that are not. I guess I 2217 * will do the same for now, but just remove the test if you want 2218 * individual filtering as well (do the upper net layers want or support 2219 * this kind of feature?). 2220 */ 2221 2222#define HASH_BITS 6 /* #bits in hash */ 2223#define CRC32_POLY 0xEDB88320 2224 2225static void set_multicast_list(struct net_device *dev) 2226{ 2227 struct fec_enet_private *fep; 2228 volatile fec_t *ep; 2229 struct dev_mc_list *dmi; 2230 unsigned int i, j, bit, data, crc; 2231 unsigned char hash; 2232 2233 fep = netdev_priv(dev); 2234 ep = fep->hwp; 2235 2236 if (dev->flags&IFF_PROMISC) { 2237 ep->fec_r_cntrl |= 0x0008; 2238 } else { 2239 2240 ep->fec_r_cntrl &= ~0x0008; 2241 2242 if (dev->flags & IFF_ALLMULTI) { 2243 /* Catch all multicast addresses, so set the 2244 * filter to all 1's. 2245 */ 2246 ep->fec_hash_table_high = 0xffffffff; 2247 ep->fec_hash_table_low = 0xffffffff; 2248 } else { 2249 /* Clear filter and add the addresses in hash register. 2250 */ 2251 ep->fec_hash_table_high = 0; 2252 ep->fec_hash_table_low = 0; 2253 2254 dmi = dev->mc_list; 2255 2256 for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) 2257 { 2258 /* Only support group multicast for now. 2259 */ 2260 if (!(dmi->dmi_addr[0] & 1)) 2261 continue; 2262 2263 /* calculate crc32 value of mac address 2264 */ 2265 crc = 0xffffffff; 2266 2267 for (i = 0; i < dmi->dmi_addrlen; i++) 2268 { 2269 data = dmi->dmi_addr[i]; 2270 for (bit = 0; bit < 8; bit++, data >>= 1) 2271 { 2272 crc = (crc >> 1) ^ 2273 (((crc ^ data) & 1) ? CRC32_POLY : 0); 2274 } 2275 } 2276 2277 /* only upper 6 bits (HASH_BITS) are used 2278 which point to specific bit in he hash registers 2279 */ 2280 hash = (crc >> (32 - HASH_BITS)) & 0x3f; 2281 2282 if (hash > 31) 2283 ep->fec_hash_table_high |= 1 << (hash - 32); 2284 else 2285 ep->fec_hash_table_low |= 1 << hash; 2286 } 2287 } 2288 } 2289} 2290 2291/* Set a MAC change in hardware. 2292 */ 2293static void 2294fec_set_mac_address(struct net_device *dev) 2295{ 2296 volatile fec_t *fecp; 2297 2298 fecp = ((struct fec_enet_private *)netdev_priv(dev))->hwp; 2299 2300 /* Set station address. */ 2301 fecp->fec_addr_low = dev->dev_addr[3] | (dev->dev_addr[2] << 8) | 2302 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24); 2303 fecp->fec_addr_high = (dev->dev_addr[5] << 16) | 2304 (dev->dev_addr[4] << 24); 2305 2306} 2307 2308/* Initialize the FEC Ethernet on 860T (or ColdFire 5272). 2309 */ 2310 /* 2311 * XXX: We need to clean up on failure exits here. 2312 */ 2313int __init fec_enet_init(struct net_device *dev) 2314{ 2315 struct fec_enet_private *fep = netdev_priv(dev); 2316 unsigned long mem_addr; 2317 volatile cbd_t *bdp; 2318 cbd_t *cbd_base; 2319 volatile fec_t *fecp; 2320 int i, j; 2321 static int index = 0; 2322 2323 /* Only allow us to be probed once. */ 2324 if (index >= FEC_MAX_PORTS) 2325 return -ENXIO; 2326 2327 /* Allocate memory for buffer descriptors. 2328 */ 2329 mem_addr = __get_free_page(GFP_KERNEL); 2330 if (mem_addr == 0) { 2331 printk("FEC: allocate descriptor memory failed?\n"); 2332 return -ENOMEM; 2333 } 2334 2335 /* Create an Ethernet device instance. 2336 */ 2337 fecp = (volatile fec_t *) fec_hw[index]; 2338 2339 fep->index = index; 2340 fep->hwp = fecp; 2341 fep->netdev = dev; 2342 2343 /* Whack a reset. We should wait for this. 2344 */ 2345 fecp->fec_ecntrl = 1; 2346 udelay(10); 2347 2348 /* Set the Ethernet address. If using multiple Enets on the 8xx, 2349 * this needs some work to get unique addresses. 2350 * 2351 * This is our default MAC address unless the user changes 2352 * it via eth_mac_addr (our dev->set_mac_addr handler). 2353 */ 2354 fec_get_mac(dev); 2355 2356 cbd_base = (cbd_t *)mem_addr; 2357 /* XXX: missing check for allocation failure */ 2358 2359 fec_uncache(mem_addr); 2360 2361 /* Set receive and transmit descriptor base. 2362 */ 2363 fep->rx_bd_base = cbd_base; 2364 fep->tx_bd_base = cbd_base + RX_RING_SIZE; 2365 2366 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; 2367 fep->cur_rx = fep->rx_bd_base; 2368 2369 fep->skb_cur = fep->skb_dirty = 0; 2370 2371 /* Initialize the receive buffer descriptors. 2372 */ 2373 bdp = fep->rx_bd_base; 2374 for (i=0; i<FEC_ENET_RX_PAGES; i++) { 2375 2376 /* Allocate a page. 2377 */ 2378 mem_addr = __get_free_page(GFP_KERNEL); 2379 /* XXX: missing check for allocation failure */ 2380 2381 fec_uncache(mem_addr); 2382 2383 /* Initialize the BD for every fragment in the page. 2384 */ 2385 for (j=0; j<FEC_ENET_RX_FRPPG; j++) { 2386 bdp->cbd_sc = BD_ENET_RX_EMPTY; 2387 bdp->cbd_bufaddr = __pa(mem_addr); 2388 mem_addr += FEC_ENET_RX_FRSIZE; 2389 bdp++; 2390 } 2391 } 2392 2393 /* Set the last buffer to wrap. 2394 */ 2395 bdp--; 2396 bdp->cbd_sc |= BD_SC_WRAP; 2397 2398 /* ...and the same for transmmit. 2399 */ 2400 bdp = fep->tx_bd_base; 2401 for (i=0, j=FEC_ENET_TX_FRPPG; i<TX_RING_SIZE; i++) { 2402 if (j >= FEC_ENET_TX_FRPPG) { 2403 mem_addr = __get_free_page(GFP_KERNEL); 2404 j = 1; 2405 } else { 2406 mem_addr += FEC_ENET_TX_FRSIZE; 2407 j++; 2408 } 2409 fep->tx_bounce[i] = (unsigned char *) mem_addr; 2410 2411 /* Initialize the BD for every fragment in the page. 2412 */ 2413 bdp->cbd_sc = 0; 2414 bdp->cbd_bufaddr = 0; 2415 bdp++; 2416 } 2417 2418 /* Set the last buffer to wrap. 2419 */ 2420 bdp--; 2421 bdp->cbd_sc |= BD_SC_WRAP; 2422 2423 /* Set receive and transmit descriptor base. 2424 */ 2425 fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base)); 2426 fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base)); 2427 2428 /* Install our interrupt handlers. This varies depending on 2429 * the architecture. 2430 */ 2431 fec_request_intrs(dev); 2432 2433 fecp->fec_hash_table_high = 0; 2434 fecp->fec_hash_table_low = 0; 2435 fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; 2436 fecp->fec_ecntrl = 2; 2437 fecp->fec_r_des_active = 0; 2438 2439 dev->base_addr = (unsigned long)fecp; 2440 2441 /* The FEC Ethernet specific entries in the device structure. */ 2442 dev->open = fec_enet_open; 2443 dev->hard_start_xmit = fec_enet_start_xmit; 2444 dev->tx_timeout = fec_timeout; 2445 dev->watchdog_timeo = TX_TIMEOUT; 2446 dev->stop = fec_enet_close; 2447 dev->set_multicast_list = set_multicast_list; 2448 2449 for (i=0; i<NMII-1; i++) 2450 mii_cmds[i].mii_next = &mii_cmds[i+1]; 2451 mii_free = mii_cmds; 2452 2453 /* setup MII interface */ 2454 fec_set_mii(dev, fep); 2455 2456 /* Clear and enable interrupts */ 2457 fecp->fec_ievent = 0xffc00000; 2458 fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB | 2459 FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII); 2460 2461 /* Queue up command to detect the PHY and initialize the 2462 * remainder of the interface. 2463 */ 2464 fep->phy_id_done = 0; 2465 fep->phy_addr = 0; 2466 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy); 2467 2468 index++; 2469 return 0; 2470} 2471 2472/* This function is called to start or restart the FEC during a link 2473 * change. This only happens when switching between half and full 2474 * duplex. 2475 */ 2476static void 2477fec_restart(struct net_device *dev, int duplex) 2478{ 2479 struct fec_enet_private *fep; 2480 volatile cbd_t *bdp; 2481 volatile fec_t *fecp; 2482 int i; 2483 2484 fep = netdev_priv(dev); 2485 fecp = fep->hwp; 2486 2487 /* Whack a reset. We should wait for this. 2488 */ 2489 fecp->fec_ecntrl = 1; 2490 udelay(10); 2491 2492 /* Clear any outstanding interrupt. 2493 */ 2494 fecp->fec_ievent = 0xffc00000; 2495 fec_enable_phy_intr(); 2496 2497 /* Set station address. 2498 */ 2499 fec_set_mac_address(dev); 2500 2501 /* Reset all multicast. 2502 */ 2503 fecp->fec_hash_table_high = 0; 2504 fecp->fec_hash_table_low = 0; 2505 2506 /* Set maximum receive buffer size. 2507 */ 2508 fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; 2509 2510 fec_localhw_setup(); 2511 2512 /* Set receive and transmit descriptor base. 2513 */ 2514 fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base)); 2515 fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base)); 2516 2517 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; 2518 fep->cur_rx = fep->rx_bd_base; 2519 2520 /* Reset SKB transmit buffers. 2521 */ 2522 fep->skb_cur = fep->skb_dirty = 0; 2523 for (i=0; i<=TX_RING_MOD_MASK; i++) { 2524 if (fep->tx_skbuff[i] != NULL) { 2525 dev_kfree_skb_any(fep->tx_skbuff[i]); 2526 fep->tx_skbuff[i] = NULL; 2527 } 2528 } 2529 2530 /* Initialize the receive buffer descriptors. 2531 */ 2532 bdp = fep->rx_bd_base; 2533 for (i=0; i<RX_RING_SIZE; i++) { 2534 2535 /* Initialize the BD for every fragment in the page. 2536 */ 2537 bdp->cbd_sc = BD_ENET_RX_EMPTY; 2538 bdp++; 2539 } 2540 2541 /* Set the last buffer to wrap. 2542 */ 2543 bdp--; 2544 bdp->cbd_sc |= BD_SC_WRAP; 2545 2546 /* ...and the same for transmmit. 2547 */ 2548 bdp = fep->tx_bd_base; 2549 for (i=0; i<TX_RING_SIZE; i++) { 2550 2551 /* Initialize the BD for every fragment in the page. 2552 */ 2553 bdp->cbd_sc = 0; 2554 bdp->cbd_bufaddr = 0; 2555 bdp++; 2556 } 2557 2558 /* Set the last buffer to wrap. 2559 */ 2560 bdp--; 2561 bdp->cbd_sc |= BD_SC_WRAP; 2562 2563 /* Enable MII mode. 2564 */ 2565 if (duplex) { 2566 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;/* MII enable */ 2567 fecp->fec_x_cntrl = 0x04; /* FD enable */ 2568 } else { 2569 /* MII enable|No Rcv on Xmit */ 2570 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x06; 2571 fecp->fec_x_cntrl = 0x00; 2572 } 2573 fep->full_duplex = duplex; 2574 2575 /* Set MII speed. 2576 */ 2577 fecp->fec_mii_speed = fep->phy_speed; 2578 2579 /* And last, enable the transmit and receive processing. 2580 */ 2581 fecp->fec_ecntrl = 2; 2582 fecp->fec_r_des_active = 0; 2583 2584 /* Enable interrupts we wish to service. 2585 */ 2586 fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB | 2587 FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII); 2588} 2589 2590static void 2591fec_stop(struct net_device *dev) 2592{ 2593 volatile fec_t *fecp; 2594 struct fec_enet_private *fep; 2595 2596 fep = netdev_priv(dev); 2597 fecp = fep->hwp; 2598 2599 /* 2600 ** We cannot expect a graceful transmit stop without link !!! 2601 */ 2602 if (fep->link) 2603 { 2604 fecp->fec_x_cntrl = 0x01; /* Graceful transmit stop */ 2605 udelay(10); 2606 if (!(fecp->fec_ievent & FEC_ENET_GRA)) 2607 printk("fec_stop : Graceful transmit stop did not complete !\n"); 2608 } 2609 2610 /* Whack a reset. We should wait for this. 2611 */ 2612 fecp->fec_ecntrl = 1; 2613 udelay(10); 2614 2615 /* Clear outstanding MII command interrupts. 2616 */ 2617 fecp->fec_ievent = FEC_ENET_MII; 2618 fec_enable_phy_intr(); 2619 2620 fecp->fec_imask = FEC_ENET_MII; 2621 fecp->fec_mii_speed = fep->phy_speed; 2622} 2623 2624static int __init fec_enet_module_init(void) 2625{ 2626 struct net_device *dev; 2627 int i, j, err; 2628 DECLARE_MAC_BUF(mac); 2629 2630 printk("FEC ENET Version 0.2\n"); 2631 2632 for (i = 0; (i < FEC_MAX_PORTS); i++) { 2633 dev = alloc_etherdev(sizeof(struct fec_enet_private)); 2634 if (!dev) 2635 return -ENOMEM; 2636 err = fec_enet_init(dev); 2637 if (err) { 2638 free_netdev(dev); 2639 continue; 2640 } 2641 if (register_netdev(dev) != 0) { 2642 /* XXX: missing cleanup here */ 2643 free_netdev(dev); 2644 return -EIO; 2645 } 2646 2647 printk("%s: ethernet %s\n", 2648 dev->name, print_mac(mac, dev->dev_addr)); 2649 } 2650 return 0; 2651} 2652 2653module_init(fec_enet_module_init); 2654 2655MODULE_LICENSE("GPL");