at v2.6.14-rc2 2277 lines 60 kB view raw
1/* 2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 4 * 5 * This version of the driver is specific to the FADS implementation, 6 * since the board contains control registers external to the processor 7 * for the control of the LevelOne LXT970 transceiver. The MPC860T manual 8 * describes connections using the internal parallel port I/O, which 9 * is basically all of Port D. 10 * 11 * Right now, I am very wasteful with the buffers. I allocate memory 12 * pages and then divide them into 2K frame buffers. This way I know I 13 * have buffers large enough to hold one frame within one buffer descriptor. 14 * Once I get this working, I will use 64 or 128 byte CPM buffers, which 15 * will be much more memory efficient and will easily handle lots of 16 * small packets. 17 * 18 * Much better multiple PHY support by Magnus Damm. 19 * Copyright (c) 2000 Ericsson Radio Systems AB. 20 * 21 * Support for FEC controller of ColdFire/5270/5271/5272/5274/5275/5280/5282. 22 * Copyright (c) 2001-2004 Greg Ungerer (gerg@snapgear.com) 23 * 24 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 25 * Copyright (c) 2004-2005 Macq Electronique SA. 26 */ 27 28#include <linux/config.h> 29#include <linux/module.h> 30#include <linux/kernel.h> 31#include <linux/string.h> 32#include <linux/ptrace.h> 33#include <linux/errno.h> 34#include <linux/ioport.h> 35#include <linux/slab.h> 36#include <linux/interrupt.h> 37#include <linux/pci.h> 38#include <linux/init.h> 39#include <linux/delay.h> 40#include <linux/netdevice.h> 41#include <linux/etherdevice.h> 42#include <linux/skbuff.h> 43#include <linux/spinlock.h> 44#include <linux/workqueue.h> 45#include <linux/bitops.h> 46 47#include <asm/irq.h> 48#include <asm/uaccess.h> 49#include <asm/io.h> 50#include <asm/pgtable.h> 51 52#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || \ 53 defined(CONFIG_M5272) || defined(CONFIG_M528x) 54#include <asm/coldfire.h> 55#include <asm/mcfsim.h> 56#include "fec.h" 57#else 58#include <asm/8xx_immap.h> 59#include <asm/mpc8xx.h> 60#include "commproc.h" 61#endif 62 63#if defined(CONFIG_FEC2) 64#define FEC_MAX_PORTS 2 65#else 66#define FEC_MAX_PORTS 1 67#endif 68 69/* 70 * Define the fixed address of the FEC hardware. 71 */ 72static unsigned int fec_hw[] = { 73#if defined(CONFIG_M5272) 74 (MCF_MBAR + 0x840), 75#elif defined(CONFIG_M527x) 76 (MCF_MBAR + 0x1000), 77 (MCF_MBAR + 0x1800), 78#elif defined(CONFIG_M523x) || defined(CONFIG_M528x) 79 (MCF_MBAR + 0x1000), 80#else 81 &(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec), 82#endif 83}; 84 85static unsigned char fec_mac_default[] = { 86 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 87}; 88 89/* 90 * Some hardware gets it MAC address out of local flash memory. 91 * if this is non-zero then assume it is the address to get MAC from. 92 */ 93#if defined(CONFIG_NETtel) 94#define FEC_FLASHMAC 0xf0006006 95#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) 96#define FEC_FLASHMAC 0xf0006000 97#elif defined (CONFIG_MTD_KeyTechnology) 98#define FEC_FLASHMAC 0xffe04000 99#elif defined(CONFIG_CANCam) 100#define FEC_FLASHMAC 0xf0020000 101#elif defined (CONFIG_M5272C3) 102#define FEC_FLASHMAC (0xffe04000 + 4) 103#elif defined(CONFIG_MOD5272) 104#define FEC_FLASHMAC 0xffc0406b 105#else 106#define FEC_FLASHMAC 0 107#endif 108 109/* Forward declarations of some structures to support different PHYs 110*/ 111 112typedef struct { 113 uint mii_data; 114 void (*funct)(uint mii_reg, struct net_device *dev); 115} phy_cmd_t; 116 117typedef struct { 118 uint id; 119 char *name; 120 121 const phy_cmd_t *config; 122 const phy_cmd_t *startup; 123 const phy_cmd_t *ack_int; 124 const phy_cmd_t *shutdown; 125} phy_info_t; 126 127/* The number of Tx and Rx buffers. These are allocated from the page 128 * pool. The code may assume these are power of two, so it it best 129 * to keep them that size. 130 * We don't need to allocate pages for the transmitter. We just use 131 * the skbuffer directly. 132 */ 133#define FEC_ENET_RX_PAGES 8 134#define FEC_ENET_RX_FRSIZE 2048 135#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) 136#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) 137#define FEC_ENET_TX_FRSIZE 2048 138#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE) 139#define TX_RING_SIZE 16 /* Must be power of two */ 140#define TX_RING_MOD_MASK 15 /* for this to work */ 141 142/* Interrupt events/masks. 143*/ 144#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ 145#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ 146#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ 147#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ 148#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */ 149#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ 150#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */ 151#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ 152#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ 153#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ 154 155/* The FEC stores dest/src/type, data, and checksum for receive packets. 156 */ 157#define PKT_MAXBUF_SIZE 1518 158#define PKT_MINBUF_SIZE 64 159#define PKT_MAXBLR_SIZE 1520 160 161 162/* 163 * The 5270/5271/5280/5282 RX control register also contains maximum frame 164 * size bits. Other FEC hardware does not, so we need to take that into 165 * account when setting it. 166 */ 167#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) 168#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 169#else 170#define OPT_FRAME_SIZE 0 171#endif 172 173/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and 174 * tx_bd_base always point to the base of the buffer descriptors. The 175 * cur_rx and cur_tx point to the currently available buffer. 176 * The dirty_tx tracks the current buffer that is being sent by the 177 * controller. The cur_tx and dirty_tx are equal under both completely 178 * empty and completely full conditions. The empty/ready indicator in 179 * the buffer descriptor determines the actual condition. 180 */ 181struct fec_enet_private { 182 /* Hardware registers of the FEC device */ 183 volatile fec_t *hwp; 184 185 /* The saved address of a sent-in-place packet/buffer, for skfree(). */ 186 unsigned char *tx_bounce[TX_RING_SIZE]; 187 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 188 ushort skb_cur; 189 ushort skb_dirty; 190 191 /* CPM dual port RAM relative addresses. 192 */ 193 cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ 194 cbd_t *tx_bd_base; 195 cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ 196 cbd_t *dirty_tx; /* The ring entries to be free()ed. */ 197 struct net_device_stats stats; 198 uint tx_full; 199 spinlock_t lock; 200 201 uint phy_id; 202 uint phy_id_done; 203 uint phy_status; 204 uint phy_speed; 205 phy_info_t const *phy; 206 struct work_struct phy_task; 207 208 uint sequence_done; 209 uint mii_phy_task_queued; 210 211 uint phy_addr; 212 213 int index; 214 int opened; 215 int link; 216 int old_link; 217 int full_duplex; 218}; 219 220static int fec_enet_open(struct net_device *dev); 221static int fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev); 222static void fec_enet_mii(struct net_device *dev); 223static irqreturn_t fec_enet_interrupt(int irq, void * dev_id, struct pt_regs * regs); 224static void fec_enet_tx(struct net_device *dev); 225static void fec_enet_rx(struct net_device *dev); 226static int fec_enet_close(struct net_device *dev); 227static struct net_device_stats *fec_enet_get_stats(struct net_device *dev); 228static void set_multicast_list(struct net_device *dev); 229static void fec_restart(struct net_device *dev, int duplex); 230static void fec_stop(struct net_device *dev); 231static void fec_set_mac_address(struct net_device *dev); 232 233 234/* MII processing. We keep this as simple as possible. Requests are 235 * placed on the list (if there is room). When the request is finished 236 * by the MII, an optional function may be called. 237 */ 238typedef struct mii_list { 239 uint mii_regval; 240 void (*mii_func)(uint val, struct net_device *dev); 241 struct mii_list *mii_next; 242} mii_list_t; 243 244#define NMII 20 245static mii_list_t mii_cmds[NMII]; 246static mii_list_t *mii_free; 247static mii_list_t *mii_head; 248static mii_list_t *mii_tail; 249 250static int mii_queue(struct net_device *dev, int request, 251 void (*func)(uint, struct net_device *)); 252 253/* Make MII read/write commands for the FEC. 254*/ 255#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) 256#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \ 257 (VAL & 0xffff)) 258#define mk_mii_end 0 259 260/* Transmitter timeout. 261*/ 262#define TX_TIMEOUT (2*HZ) 263 264/* Register definitions for the PHY. 265*/ 266 267#define MII_REG_CR 0 /* Control Register */ 268#define MII_REG_SR 1 /* Status Register */ 269#define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */ 270#define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */ 271#define MII_REG_ANAR 4 /* A-N Advertisement Register */ 272#define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */ 273#define MII_REG_ANER 6 /* A-N Expansion Register */ 274#define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */ 275#define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */ 276 277/* values for phy_status */ 278 279#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */ 280#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */ 281#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */ 282#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */ 283#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */ 284#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */ 285#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */ 286 287#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */ 288#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */ 289#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */ 290#define PHY_STAT_SPMASK 0xf000 /* mask for speed */ 291#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */ 292#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */ 293#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */ 294#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */ 295 296 297static int 298fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 299{ 300 struct fec_enet_private *fep; 301 volatile fec_t *fecp; 302 volatile cbd_t *bdp; 303 304 fep = netdev_priv(dev); 305 fecp = (volatile fec_t*)dev->base_addr; 306 307 if (!fep->link) { 308 /* Link is down or autonegotiation is in progress. */ 309 return 1; 310 } 311 312 /* Fill in a Tx ring entry */ 313 bdp = fep->cur_tx; 314 315#ifndef final_version 316 if (bdp->cbd_sc & BD_ENET_TX_READY) { 317 /* Ooops. All transmit buffers are full. Bail out. 318 * This should not happen, since dev->tbusy should be set. 319 */ 320 printk("%s: tx queue full!.\n", dev->name); 321 return 1; 322 } 323#endif 324 325 /* Clear all of the status flags. 326 */ 327 bdp->cbd_sc &= ~BD_ENET_TX_STATS; 328 329 /* Set buffer length and buffer pointer. 330 */ 331 bdp->cbd_bufaddr = __pa(skb->data); 332 bdp->cbd_datlen = skb->len; 333 334 /* 335 * On some FEC implementations data must be aligned on 336 * 4-byte boundaries. Use bounce buffers to copy data 337 * and get it aligned. Ugh. 338 */ 339 if (bdp->cbd_bufaddr & 0x3) { 340 unsigned int index; 341 index = bdp - fep->tx_bd_base; 342 memcpy(fep->tx_bounce[index], (void *) bdp->cbd_bufaddr, bdp->cbd_datlen); 343 bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]); 344 } 345 346 /* Save skb pointer. 347 */ 348 fep->tx_skbuff[fep->skb_cur] = skb; 349 350 fep->stats.tx_bytes += skb->len; 351 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; 352 353 /* Push the data cache so the CPM does not get stale memory 354 * data. 355 */ 356 flush_dcache_range((unsigned long)skb->data, 357 (unsigned long)skb->data + skb->len); 358 359 spin_lock_irq(&fep->lock); 360 361 /* Send it on its way. Tell FEC its ready, interrupt when done, 362 * its the last BD of the frame, and to put the CRC on the end. 363 */ 364 365 bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR 366 | BD_ENET_TX_LAST | BD_ENET_TX_TC); 367 368 dev->trans_start = jiffies; 369 370 /* Trigger transmission start */ 371 fecp->fec_x_des_active = 0x01000000; 372 373 /* If this was the last BD in the ring, start at the beginning again. 374 */ 375 if (bdp->cbd_sc & BD_ENET_TX_WRAP) { 376 bdp = fep->tx_bd_base; 377 } else { 378 bdp++; 379 } 380 381 if (bdp == fep->dirty_tx) { 382 fep->tx_full = 1; 383 netif_stop_queue(dev); 384 } 385 386 fep->cur_tx = (cbd_t *)bdp; 387 388 spin_unlock_irq(&fep->lock); 389 390 return 0; 391} 392 393static void 394fec_timeout(struct net_device *dev) 395{ 396 struct fec_enet_private *fep = netdev_priv(dev); 397 398 printk("%s: transmit timed out.\n", dev->name); 399 fep->stats.tx_errors++; 400#ifndef final_version 401 { 402 int i; 403 cbd_t *bdp; 404 405 printk("Ring data dump: cur_tx %lx%s, dirty_tx %lx cur_rx: %lx\n", 406 (unsigned long)fep->cur_tx, fep->tx_full ? " (full)" : "", 407 (unsigned long)fep->dirty_tx, 408 (unsigned long)fep->cur_rx); 409 410 bdp = fep->tx_bd_base; 411 printk(" tx: %u buffers\n", TX_RING_SIZE); 412 for (i = 0 ; i < TX_RING_SIZE; i++) { 413 printk(" %08x: %04x %04x %08x\n", 414 (uint) bdp, 415 bdp->cbd_sc, 416 bdp->cbd_datlen, 417 (int) bdp->cbd_bufaddr); 418 bdp++; 419 } 420 421 bdp = fep->rx_bd_base; 422 printk(" rx: %lu buffers\n", (unsigned long) RX_RING_SIZE); 423 for (i = 0 ; i < RX_RING_SIZE; i++) { 424 printk(" %08x: %04x %04x %08x\n", 425 (uint) bdp, 426 bdp->cbd_sc, 427 bdp->cbd_datlen, 428 (int) bdp->cbd_bufaddr); 429 bdp++; 430 } 431 } 432#endif 433 fec_restart(dev, fep->full_duplex); 434 netif_wake_queue(dev); 435} 436 437/* The interrupt handler. 438 * This is called from the MPC core interrupt. 439 */ 440static irqreturn_t 441fec_enet_interrupt(int irq, void * dev_id, struct pt_regs * regs) 442{ 443 struct net_device *dev = dev_id; 444 volatile fec_t *fecp; 445 uint int_events; 446 int handled = 0; 447 448 fecp = (volatile fec_t*)dev->base_addr; 449 450 /* Get the interrupt events that caused us to be here. 451 */ 452 while ((int_events = fecp->fec_ievent) != 0) { 453 fecp->fec_ievent = int_events; 454 455 /* Handle receive event in its own function. 456 */ 457 if (int_events & FEC_ENET_RXF) { 458 handled = 1; 459 fec_enet_rx(dev); 460 } 461 462 /* Transmit OK, or non-fatal error. Update the buffer 463 descriptors. FEC handles all errors, we just discover 464 them as part of the transmit process. 465 */ 466 if (int_events & FEC_ENET_TXF) { 467 handled = 1; 468 fec_enet_tx(dev); 469 } 470 471 if (int_events & FEC_ENET_MII) { 472 handled = 1; 473 fec_enet_mii(dev); 474 } 475 476 } 477 return IRQ_RETVAL(handled); 478} 479 480 481static void 482fec_enet_tx(struct net_device *dev) 483{ 484 struct fec_enet_private *fep; 485 volatile cbd_t *bdp; 486 struct sk_buff *skb; 487 488 fep = netdev_priv(dev); 489 spin_lock(&fep->lock); 490 bdp = fep->dirty_tx; 491 492 while ((bdp->cbd_sc&BD_ENET_TX_READY) == 0) { 493 if (bdp == fep->cur_tx && fep->tx_full == 0) break; 494 495 skb = fep->tx_skbuff[fep->skb_dirty]; 496 /* Check for errors. */ 497 if (bdp->cbd_sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | 498 BD_ENET_TX_RL | BD_ENET_TX_UN | 499 BD_ENET_TX_CSL)) { 500 fep->stats.tx_errors++; 501 if (bdp->cbd_sc & BD_ENET_TX_HB) /* No heartbeat */ 502 fep->stats.tx_heartbeat_errors++; 503 if (bdp->cbd_sc & BD_ENET_TX_LC) /* Late collision */ 504 fep->stats.tx_window_errors++; 505 if (bdp->cbd_sc & BD_ENET_TX_RL) /* Retrans limit */ 506 fep->stats.tx_aborted_errors++; 507 if (bdp->cbd_sc & BD_ENET_TX_UN) /* Underrun */ 508 fep->stats.tx_fifo_errors++; 509 if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */ 510 fep->stats.tx_carrier_errors++; 511 } else { 512 fep->stats.tx_packets++; 513 } 514 515#ifndef final_version 516 if (bdp->cbd_sc & BD_ENET_TX_READY) 517 printk("HEY! Enet xmit interrupt and TX_READY.\n"); 518#endif 519 /* Deferred means some collisions occurred during transmit, 520 * but we eventually sent the packet OK. 521 */ 522 if (bdp->cbd_sc & BD_ENET_TX_DEF) 523 fep->stats.collisions++; 524 525 /* Free the sk buffer associated with this last transmit. 526 */ 527 dev_kfree_skb_any(skb); 528 fep->tx_skbuff[fep->skb_dirty] = NULL; 529 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; 530 531 /* Update pointer to next buffer descriptor to be transmitted. 532 */ 533 if (bdp->cbd_sc & BD_ENET_TX_WRAP) 534 bdp = fep->tx_bd_base; 535 else 536 bdp++; 537 538 /* Since we have freed up a buffer, the ring is no longer 539 * full. 540 */ 541 if (fep->tx_full) { 542 fep->tx_full = 0; 543 if (netif_queue_stopped(dev)) 544 netif_wake_queue(dev); 545 } 546 } 547 fep->dirty_tx = (cbd_t *)bdp; 548 spin_unlock(&fep->lock); 549} 550 551 552/* During a receive, the cur_rx points to the current incoming buffer. 553 * When we update through the ring, if the next incoming buffer has 554 * not been given to the system, we just set the empty indicator, 555 * effectively tossing the packet. 556 */ 557static void 558fec_enet_rx(struct net_device *dev) 559{ 560 struct fec_enet_private *fep; 561 volatile fec_t *fecp; 562 volatile cbd_t *bdp; 563 struct sk_buff *skb; 564 ushort pkt_len; 565 __u8 *data; 566 567 fep = netdev_priv(dev); 568 fecp = (volatile fec_t*)dev->base_addr; 569 570 /* First, grab all of the stats for the incoming packet. 571 * These get messed up if we get called due to a busy condition. 572 */ 573 bdp = fep->cur_rx; 574 575while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) { 576 577#ifndef final_version 578 /* Since we have allocated space to hold a complete frame, 579 * the last indicator should be set. 580 */ 581 if ((bdp->cbd_sc & BD_ENET_RX_LAST) == 0) 582 printk("FEC ENET: rcv is not +last\n"); 583#endif 584 585 if (!fep->opened) 586 goto rx_processing_done; 587 588 /* Check for errors. */ 589 if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 590 BD_ENET_RX_CR | BD_ENET_RX_OV)) { 591 fep->stats.rx_errors++; 592 if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { 593 /* Frame too long or too short. */ 594 fep->stats.rx_length_errors++; 595 } 596 if (bdp->cbd_sc & BD_ENET_RX_NO) /* Frame alignment */ 597 fep->stats.rx_frame_errors++; 598 if (bdp->cbd_sc & BD_ENET_RX_CR) /* CRC Error */ 599 fep->stats.rx_crc_errors++; 600 if (bdp->cbd_sc & BD_ENET_RX_OV) /* FIFO overrun */ 601 fep->stats.rx_crc_errors++; 602 } 603 604 /* Report late collisions as a frame error. 605 * On this error, the BD is closed, but we don't know what we 606 * have in the buffer. So, just drop this frame on the floor. 607 */ 608 if (bdp->cbd_sc & BD_ENET_RX_CL) { 609 fep->stats.rx_errors++; 610 fep->stats.rx_frame_errors++; 611 goto rx_processing_done; 612 } 613 614 /* Process the incoming frame. 615 */ 616 fep->stats.rx_packets++; 617 pkt_len = bdp->cbd_datlen; 618 fep->stats.rx_bytes += pkt_len; 619 data = (__u8*)__va(bdp->cbd_bufaddr); 620 621 /* This does 16 byte alignment, exactly what we need. 622 * The packet length includes FCS, but we don't want to 623 * include that when passing upstream as it messes up 624 * bridging applications. 625 */ 626 skb = dev_alloc_skb(pkt_len-4); 627 628 if (skb == NULL) { 629 printk("%s: Memory squeeze, dropping packet.\n", dev->name); 630 fep->stats.rx_dropped++; 631 } else { 632 skb->dev = dev; 633 skb_put(skb,pkt_len-4); /* Make room */ 634 eth_copy_and_sum(skb, 635 (unsigned char *)__va(bdp->cbd_bufaddr), 636 pkt_len-4, 0); 637 skb->protocol=eth_type_trans(skb,dev); 638 netif_rx(skb); 639 } 640 rx_processing_done: 641 642 /* Clear the status flags for this buffer. 643 */ 644 bdp->cbd_sc &= ~BD_ENET_RX_STATS; 645 646 /* Mark the buffer empty. 647 */ 648 bdp->cbd_sc |= BD_ENET_RX_EMPTY; 649 650 /* Update BD pointer to next entry. 651 */ 652 if (bdp->cbd_sc & BD_ENET_RX_WRAP) 653 bdp = fep->rx_bd_base; 654 else 655 bdp++; 656 657#if 1 658 /* Doing this here will keep the FEC running while we process 659 * incoming frames. On a heavily loaded network, we should be 660 * able to keep up at the expense of system resources. 661 */ 662 fecp->fec_r_des_active = 0x01000000; 663#endif 664 } /* while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) */ 665 fep->cur_rx = (cbd_t *)bdp; 666 667#if 0 668 /* Doing this here will allow us to process all frames in the 669 * ring before the FEC is allowed to put more there. On a heavily 670 * loaded network, some frames may be lost. Unfortunately, this 671 * increases the interrupt overhead since we can potentially work 672 * our way back to the interrupt return only to come right back 673 * here. 674 */ 675 fecp->fec_r_des_active = 0x01000000; 676#endif 677} 678 679 680static void 681fec_enet_mii(struct net_device *dev) 682{ 683 struct fec_enet_private *fep; 684 volatile fec_t *ep; 685 mii_list_t *mip; 686 uint mii_reg; 687 688 fep = netdev_priv(dev); 689 ep = fep->hwp; 690 mii_reg = ep->fec_mii_data; 691 692 if ((mip = mii_head) == NULL) { 693 printk("MII and no head!\n"); 694 return; 695 } 696 697 if (mip->mii_func != NULL) 698 (*(mip->mii_func))(mii_reg, dev); 699 700 mii_head = mip->mii_next; 701 mip->mii_next = mii_free; 702 mii_free = mip; 703 704 if ((mip = mii_head) != NULL) 705 ep->fec_mii_data = mip->mii_regval; 706} 707 708static int 709mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_device *)) 710{ 711 struct fec_enet_private *fep; 712 unsigned long flags; 713 mii_list_t *mip; 714 int retval; 715 716 /* Add PHY address to register command. 717 */ 718 fep = netdev_priv(dev); 719 regval |= fep->phy_addr << 23; 720 721 retval = 0; 722 723 save_flags(flags); 724 cli(); 725 726 if ((mip = mii_free) != NULL) { 727 mii_free = mip->mii_next; 728 mip->mii_regval = regval; 729 mip->mii_func = func; 730 mip->mii_next = NULL; 731 if (mii_head) { 732 mii_tail->mii_next = mip; 733 mii_tail = mip; 734 } 735 else { 736 mii_head = mii_tail = mip; 737 fep->hwp->fec_mii_data = regval; 738 } 739 } 740 else { 741 retval = 1; 742 } 743 744 restore_flags(flags); 745 746 return(retval); 747} 748 749static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c) 750{ 751 int k; 752 753 if(!c) 754 return; 755 756 for(k = 0; (c+k)->mii_data != mk_mii_end; k++) { 757 mii_queue(dev, (c+k)->mii_data, (c+k)->funct); 758 } 759} 760 761static void mii_parse_sr(uint mii_reg, struct net_device *dev) 762{ 763 struct fec_enet_private *fep = netdev_priv(dev); 764 volatile uint *s = &(fep->phy_status); 765 uint status; 766 767 status = *s & ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC); 768 769 if (mii_reg & 0x0004) 770 status |= PHY_STAT_LINK; 771 if (mii_reg & 0x0010) 772 status |= PHY_STAT_FAULT; 773 if (mii_reg & 0x0020) 774 status |= PHY_STAT_ANC; 775 776 *s = status; 777} 778 779static void mii_parse_cr(uint mii_reg, struct net_device *dev) 780{ 781 struct fec_enet_private *fep = netdev_priv(dev); 782 volatile uint *s = &(fep->phy_status); 783 uint status; 784 785 status = *s & ~(PHY_CONF_ANE | PHY_CONF_LOOP); 786 787 if (mii_reg & 0x1000) 788 status |= PHY_CONF_ANE; 789 if (mii_reg & 0x4000) 790 status |= PHY_CONF_LOOP; 791 *s = status; 792} 793 794static void mii_parse_anar(uint mii_reg, struct net_device *dev) 795{ 796 struct fec_enet_private *fep = netdev_priv(dev); 797 volatile uint *s = &(fep->phy_status); 798 uint status; 799 800 status = *s & ~(PHY_CONF_SPMASK); 801 802 if (mii_reg & 0x0020) 803 status |= PHY_CONF_10HDX; 804 if (mii_reg & 0x0040) 805 status |= PHY_CONF_10FDX; 806 if (mii_reg & 0x0080) 807 status |= PHY_CONF_100HDX; 808 if (mii_reg & 0x00100) 809 status |= PHY_CONF_100FDX; 810 *s = status; 811} 812 813/* ------------------------------------------------------------------------- */ 814/* The Level one LXT970 is used by many boards */ 815 816#define MII_LXT970_MIRROR 16 /* Mirror register */ 817#define MII_LXT970_IER 17 /* Interrupt Enable Register */ 818#define MII_LXT970_ISR 18 /* Interrupt Status Register */ 819#define MII_LXT970_CONFIG 19 /* Configuration Register */ 820#define MII_LXT970_CSR 20 /* Chip Status Register */ 821 822static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev) 823{ 824 struct fec_enet_private *fep = netdev_priv(dev); 825 volatile uint *s = &(fep->phy_status); 826 uint status; 827 828 status = *s & ~(PHY_STAT_SPMASK); 829 if (mii_reg & 0x0800) { 830 if (mii_reg & 0x1000) 831 status |= PHY_STAT_100FDX; 832 else 833 status |= PHY_STAT_100HDX; 834 } else { 835 if (mii_reg & 0x1000) 836 status |= PHY_STAT_10FDX; 837 else 838 status |= PHY_STAT_10HDX; 839 } 840 *s = status; 841} 842 843static phy_cmd_t const phy_cmd_lxt970_config[] = { 844 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 845 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 846 { mk_mii_end, } 847 }; 848static phy_cmd_t const phy_cmd_lxt970_startup[] = { /* enable interrupts */ 849 { mk_mii_write(MII_LXT970_IER, 0x0002), NULL }, 850 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 851 { mk_mii_end, } 852 }; 853static phy_cmd_t const phy_cmd_lxt970_ack_int[] = { 854 /* read SR and ISR to acknowledge */ 855 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 856 { mk_mii_read(MII_LXT970_ISR), NULL }, 857 858 /* find out the current status */ 859 { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr }, 860 { mk_mii_end, } 861 }; 862static phy_cmd_t const phy_cmd_lxt970_shutdown[] = { /* disable interrupts */ 863 { mk_mii_write(MII_LXT970_IER, 0x0000), NULL }, 864 { mk_mii_end, } 865 }; 866static phy_info_t const phy_info_lxt970 = { 867 .id = 0x07810000, 868 .name = "LXT970", 869 .config = phy_cmd_lxt970_config, 870 .startup = phy_cmd_lxt970_startup, 871 .ack_int = phy_cmd_lxt970_ack_int, 872 .shutdown = phy_cmd_lxt970_shutdown 873}; 874 875/* ------------------------------------------------------------------------- */ 876/* The Level one LXT971 is used on some of my custom boards */ 877 878/* register definitions for the 971 */ 879 880#define MII_LXT971_PCR 16 /* Port Control Register */ 881#define MII_LXT971_SR2 17 /* Status Register 2 */ 882#define MII_LXT971_IER 18 /* Interrupt Enable Register */ 883#define MII_LXT971_ISR 19 /* Interrupt Status Register */ 884#define MII_LXT971_LCR 20 /* LED Control Register */ 885#define MII_LXT971_TCR 30 /* Transmit Control Register */ 886 887/* 888 * I had some nice ideas of running the MDIO faster... 889 * The 971 should support 8MHz and I tried it, but things acted really 890 * weird, so 2.5 MHz ought to be enough for anyone... 891 */ 892 893static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev) 894{ 895 struct fec_enet_private *fep = netdev_priv(dev); 896 volatile uint *s = &(fep->phy_status); 897 uint status; 898 899 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC); 900 901 if (mii_reg & 0x0400) { 902 fep->link = 1; 903 status |= PHY_STAT_LINK; 904 } else { 905 fep->link = 0; 906 } 907 if (mii_reg & 0x0080) 908 status |= PHY_STAT_ANC; 909 if (mii_reg & 0x4000) { 910 if (mii_reg & 0x0200) 911 status |= PHY_STAT_100FDX; 912 else 913 status |= PHY_STAT_100HDX; 914 } else { 915 if (mii_reg & 0x0200) 916 status |= PHY_STAT_10FDX; 917 else 918 status |= PHY_STAT_10HDX; 919 } 920 if (mii_reg & 0x0008) 921 status |= PHY_STAT_FAULT; 922 923 *s = status; 924} 925 926static phy_cmd_t const phy_cmd_lxt971_config[] = { 927 /* limit to 10MBit because my prototype board 928 * doesn't work with 100. */ 929 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 930 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 931 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, 932 { mk_mii_end, } 933 }; 934static phy_cmd_t const phy_cmd_lxt971_startup[] = { /* enable interrupts */ 935 { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL }, 936 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 937 { mk_mii_write(MII_LXT971_LCR, 0xd422), NULL }, /* LED config */ 938 /* Somehow does the 971 tell me that the link is down 939 * the first read after power-up. 940 * read here to get a valid value in ack_int */ 941 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 942 { mk_mii_end, } 943 }; 944static phy_cmd_t const phy_cmd_lxt971_ack_int[] = { 945 /* acknowledge the int before reading status ! */ 946 { mk_mii_read(MII_LXT971_ISR), NULL }, 947 /* find out the current status */ 948 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 949 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, 950 { mk_mii_end, } 951 }; 952static phy_cmd_t const phy_cmd_lxt971_shutdown[] = { /* disable interrupts */ 953 { mk_mii_write(MII_LXT971_IER, 0x0000), NULL }, 954 { mk_mii_end, } 955 }; 956static phy_info_t const phy_info_lxt971 = { 957 .id = 0x0001378e, 958 .name = "LXT971", 959 .config = phy_cmd_lxt971_config, 960 .startup = phy_cmd_lxt971_startup, 961 .ack_int = phy_cmd_lxt971_ack_int, 962 .shutdown = phy_cmd_lxt971_shutdown 963}; 964 965/* ------------------------------------------------------------------------- */ 966/* The Quality Semiconductor QS6612 is used on the RPX CLLF */ 967 968/* register definitions */ 969 970#define MII_QS6612_MCR 17 /* Mode Control Register */ 971#define MII_QS6612_FTR 27 /* Factory Test Register */ 972#define MII_QS6612_MCO 28 /* Misc. Control Register */ 973#define MII_QS6612_ISR 29 /* Interrupt Source Register */ 974#define MII_QS6612_IMR 30 /* Interrupt Mask Register */ 975#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */ 976 977static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev) 978{ 979 struct fec_enet_private *fep = netdev_priv(dev); 980 volatile uint *s = &(fep->phy_status); 981 uint status; 982 983 status = *s & ~(PHY_STAT_SPMASK); 984 985 switch((mii_reg >> 2) & 7) { 986 case 1: status |= PHY_STAT_10HDX; break; 987 case 2: status |= PHY_STAT_100HDX; break; 988 case 5: status |= PHY_STAT_10FDX; break; 989 case 6: status |= PHY_STAT_100FDX; break; 990} 991 992 *s = status; 993} 994 995static phy_cmd_t const phy_cmd_qs6612_config[] = { 996 /* The PHY powers up isolated on the RPX, 997 * so send a command to allow operation. 998 */ 999 { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL }, 1000 1001 /* parse cr and anar to get some info */ 1002 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 1003 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 1004 { mk_mii_end, } 1005 }; 1006static phy_cmd_t const phy_cmd_qs6612_startup[] = { /* enable interrupts */ 1007 { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL }, 1008 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 1009 { mk_mii_end, } 1010 }; 1011static phy_cmd_t const phy_cmd_qs6612_ack_int[] = { 1012 /* we need to read ISR, SR and ANER to acknowledge */ 1013 { mk_mii_read(MII_QS6612_ISR), NULL }, 1014 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1015 { mk_mii_read(MII_REG_ANER), NULL }, 1016 1017 /* read pcr to get info */ 1018 { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr }, 1019 { mk_mii_end, } 1020 }; 1021static phy_cmd_t const phy_cmd_qs6612_shutdown[] = { /* disable interrupts */ 1022 { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL }, 1023 { mk_mii_end, } 1024 }; 1025static phy_info_t const phy_info_qs6612 = { 1026 .id = 0x00181440, 1027 .name = "QS6612", 1028 .config = phy_cmd_qs6612_config, 1029 .startup = phy_cmd_qs6612_startup, 1030 .ack_int = phy_cmd_qs6612_ack_int, 1031 .shutdown = phy_cmd_qs6612_shutdown 1032}; 1033 1034/* ------------------------------------------------------------------------- */ 1035/* AMD AM79C874 phy */ 1036 1037/* register definitions for the 874 */ 1038 1039#define MII_AM79C874_MFR 16 /* Miscellaneous Feature Register */ 1040#define MII_AM79C874_ICSR 17 /* Interrupt/Status Register */ 1041#define MII_AM79C874_DR 18 /* Diagnostic Register */ 1042#define MII_AM79C874_PMLR 19 /* Power and Loopback Register */ 1043#define MII_AM79C874_MCR 21 /* ModeControl Register */ 1044#define MII_AM79C874_DC 23 /* Disconnect Counter */ 1045#define MII_AM79C874_REC 24 /* Recieve Error Counter */ 1046 1047static void mii_parse_am79c874_dr(uint mii_reg, struct net_device *dev) 1048{ 1049 struct fec_enet_private *fep = netdev_priv(dev); 1050 volatile uint *s = &(fep->phy_status); 1051 uint status; 1052 1053 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_ANC); 1054 1055 if (mii_reg & 0x0080) 1056 status |= PHY_STAT_ANC; 1057 if (mii_reg & 0x0400) 1058 status |= ((mii_reg & 0x0800) ? PHY_STAT_100FDX : PHY_STAT_100HDX); 1059 else 1060 status |= ((mii_reg & 0x0800) ? PHY_STAT_10FDX : PHY_STAT_10HDX); 1061 1062 *s = status; 1063} 1064 1065static phy_cmd_t const phy_cmd_am79c874_config[] = { 1066 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 1067 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 1068 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr }, 1069 { mk_mii_end, } 1070 }; 1071static phy_cmd_t const phy_cmd_am79c874_startup[] = { /* enable interrupts */ 1072 { mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL }, 1073 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 1074 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1075 { mk_mii_end, } 1076 }; 1077static phy_cmd_t const phy_cmd_am79c874_ack_int[] = { 1078 /* find out the current status */ 1079 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1080 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr }, 1081 /* we only need to read ISR to acknowledge */ 1082 { mk_mii_read(MII_AM79C874_ICSR), NULL }, 1083 { mk_mii_end, } 1084 }; 1085static phy_cmd_t const phy_cmd_am79c874_shutdown[] = { /* disable interrupts */ 1086 { mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL }, 1087 { mk_mii_end, } 1088 }; 1089static phy_info_t const phy_info_am79c874 = { 1090 .id = 0x00022561, 1091 .name = "AM79C874", 1092 .config = phy_cmd_am79c874_config, 1093 .startup = phy_cmd_am79c874_startup, 1094 .ack_int = phy_cmd_am79c874_ack_int, 1095 .shutdown = phy_cmd_am79c874_shutdown 1096}; 1097 1098 1099/* ------------------------------------------------------------------------- */ 1100/* Kendin KS8721BL phy */ 1101 1102/* register definitions for the 8721 */ 1103 1104#define MII_KS8721BL_RXERCR 21 1105#define MII_KS8721BL_ICSR 22 1106#define MII_KS8721BL_PHYCR 31 1107 1108static phy_cmd_t const phy_cmd_ks8721bl_config[] = { 1109 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 1110 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 1111 { mk_mii_end, } 1112 }; 1113static phy_cmd_t const phy_cmd_ks8721bl_startup[] = { /* enable interrupts */ 1114 { mk_mii_write(MII_KS8721BL_ICSR, 0xff00), NULL }, 1115 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 1116 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1117 { mk_mii_end, } 1118 }; 1119static phy_cmd_t const phy_cmd_ks8721bl_ack_int[] = { 1120 /* find out the current status */ 1121 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1122 /* we only need to read ISR to acknowledge */ 1123 { mk_mii_read(MII_KS8721BL_ICSR), NULL }, 1124 { mk_mii_end, } 1125 }; 1126static phy_cmd_t const phy_cmd_ks8721bl_shutdown[] = { /* disable interrupts */ 1127 { mk_mii_write(MII_KS8721BL_ICSR, 0x0000), NULL }, 1128 { mk_mii_end, } 1129 }; 1130static phy_info_t const phy_info_ks8721bl = { 1131 .id = 0x00022161, 1132 .name = "KS8721BL", 1133 .config = phy_cmd_ks8721bl_config, 1134 .startup = phy_cmd_ks8721bl_startup, 1135 .ack_int = phy_cmd_ks8721bl_ack_int, 1136 .shutdown = phy_cmd_ks8721bl_shutdown 1137}; 1138 1139/* ------------------------------------------------------------------------- */ 1140 1141static phy_info_t const * const phy_info[] = { 1142 &phy_info_lxt970, 1143 &phy_info_lxt971, 1144 &phy_info_qs6612, 1145 &phy_info_am79c874, 1146 &phy_info_ks8721bl, 1147 NULL 1148}; 1149 1150/* ------------------------------------------------------------------------- */ 1151 1152#ifdef CONFIG_RPXCLASSIC 1153static void 1154mii_link_interrupt(void *dev_id); 1155#else 1156static irqreturn_t 1157mii_link_interrupt(int irq, void * dev_id, struct pt_regs * regs); 1158#endif 1159 1160#if defined(CONFIG_M5272) 1161 1162/* 1163 * Code specific to Coldfire 5272 setup. 1164 */ 1165static void __inline__ fec_request_intrs(struct net_device *dev) 1166{ 1167 volatile unsigned long *icrp; 1168 static const struct idesc { 1169 char *name; 1170 unsigned short irq; 1171 irqreturn_t (*handler)(int, void *, struct pt_regs *); 1172 } *idp, id[] = { 1173 { "fec(RX)", 86, fec_enet_interrupt }, 1174 { "fec(TX)", 87, fec_enet_interrupt }, 1175 { "fec(OTHER)", 88, fec_enet_interrupt }, 1176 { "fec(MII)", 66, mii_link_interrupt }, 1177 { NULL }, 1178 }; 1179 1180 /* Setup interrupt handlers. */ 1181 for (idp = id; idp->name; idp++) { 1182 if (request_irq(idp->irq, idp->handler, 0, idp->name, dev) != 0) 1183 printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, idp->irq); 1184 } 1185 1186 /* Unmask interrupt at ColdFire 5272 SIM */ 1187 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR3); 1188 *icrp = 0x00000ddd; 1189 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); 1190 *icrp = (*icrp & 0x70777777) | 0x0d000000; 1191} 1192 1193static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) 1194{ 1195 volatile fec_t *fecp; 1196 1197 fecp = fep->hwp; 1198 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04; 1199 fecp->fec_x_cntrl = 0x00; 1200 1201 /* 1202 * Set MII speed to 2.5 MHz 1203 * See 5272 manual section 11.5.8: MSCR 1204 */ 1205 fep->phy_speed = ((((MCF_CLK / 4) / (2500000 / 10)) + 5) / 10) * 2; 1206 fecp->fec_mii_speed = fep->phy_speed; 1207 1208 fec_restart(dev, 0); 1209} 1210 1211static void __inline__ fec_get_mac(struct net_device *dev) 1212{ 1213 struct fec_enet_private *fep = netdev_priv(dev); 1214 volatile fec_t *fecp; 1215 unsigned char *iap, tmpaddr[ETH_ALEN]; 1216 1217 fecp = fep->hwp; 1218 1219 if (FEC_FLASHMAC) { 1220 /* 1221 * Get MAC address from FLASH. 1222 * If it is all 1's or 0's, use the default. 1223 */ 1224 iap = (unsigned char *)FEC_FLASHMAC; 1225 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && 1226 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) 1227 iap = fec_mac_default; 1228 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && 1229 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) 1230 iap = fec_mac_default; 1231 } else { 1232 *((unsigned long *) &tmpaddr[0]) = fecp->fec_addr_low; 1233 *((unsigned short *) &tmpaddr[4]) = (fecp->fec_addr_high >> 16); 1234 iap = &tmpaddr[0]; 1235 } 1236 1237 memcpy(dev->dev_addr, iap, ETH_ALEN); 1238 1239 /* Adjust MAC if using default MAC address */ 1240 if (iap == fec_mac_default) 1241 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 1242} 1243 1244static void __inline__ fec_enable_phy_intr(void) 1245{ 1246} 1247 1248static void __inline__ fec_disable_phy_intr(void) 1249{ 1250 volatile unsigned long *icrp; 1251 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); 1252 *icrp = (*icrp & 0x70777777) | 0x08000000; 1253} 1254 1255static void __inline__ fec_phy_ack_intr(void) 1256{ 1257 volatile unsigned long *icrp; 1258 /* Acknowledge the interrupt */ 1259 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); 1260 *icrp = (*icrp & 0x77777777) | 0x08000000; 1261} 1262 1263static void __inline__ fec_localhw_setup(void) 1264{ 1265} 1266 1267/* 1268 * Do not need to make region uncached on 5272. 1269 */ 1270static void __inline__ fec_uncache(unsigned long addr) 1271{ 1272} 1273 1274/* ------------------------------------------------------------------------- */ 1275 1276#elif defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) 1277 1278/* 1279 * Code specific to Coldfire 5230/5231/5232/5234/5235, 1280 * the 5270/5271/5274/5275 and 5280/5282 setups. 1281 */ 1282static void __inline__ fec_request_intrs(struct net_device *dev) 1283{ 1284 struct fec_enet_private *fep; 1285 int b; 1286 static const struct idesc { 1287 char *name; 1288 unsigned short irq; 1289 } *idp, id[] = { 1290 { "fec(TXF)", 23 }, 1291 { "fec(TXB)", 24 }, 1292 { "fec(TXFIFO)", 25 }, 1293 { "fec(TXCR)", 26 }, 1294 { "fec(RXF)", 27 }, 1295 { "fec(RXB)", 28 }, 1296 { "fec(MII)", 29 }, 1297 { "fec(LC)", 30 }, 1298 { "fec(HBERR)", 31 }, 1299 { "fec(GRA)", 32 }, 1300 { "fec(EBERR)", 33 }, 1301 { "fec(BABT)", 34 }, 1302 { "fec(BABR)", 35 }, 1303 { NULL }, 1304 }; 1305 1306 fep = netdev_priv(dev); 1307 b = (fep->index) ? 128 : 64; 1308 1309 /* Setup interrupt handlers. */ 1310 for (idp = id; idp->name; idp++) { 1311 if (request_irq(b+idp->irq, fec_enet_interrupt, 0, idp->name, dev) != 0) 1312 printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); 1313 } 1314 1315 /* Unmask interrupts at ColdFire 5280/5282 interrupt controller */ 1316 { 1317 volatile unsigned char *icrp; 1318 volatile unsigned long *imrp; 1319 int i; 1320 1321 b = (fep->index) ? MCFICM_INTC1 : MCFICM_INTC0; 1322 icrp = (volatile unsigned char *) (MCF_IPSBAR + b + 1323 MCFINTC_ICR0); 1324 for (i = 23; (i < 36); i++) 1325 icrp[i] = 0x23; 1326 1327 imrp = (volatile unsigned long *) (MCF_IPSBAR + b + 1328 MCFINTC_IMRH); 1329 *imrp &= ~0x0000000f; 1330 imrp = (volatile unsigned long *) (MCF_IPSBAR + b + 1331 MCFINTC_IMRL); 1332 *imrp &= ~0xff800001; 1333 } 1334 1335#if defined(CONFIG_M528x) 1336 /* Set up gpio outputs for MII lines */ 1337 { 1338 volatile u16 *gpio_paspar; 1339 volatile u8 *gpio_pehlpar; 1340 1341 gpio_paspar = (volatile u16 *) (MCF_IPSBAR + 0x100056); 1342 gpio_pehlpar = (volatile u16 *) (MCF_IPSBAR + 0x100058); 1343 *gpio_paspar |= 0x0f00; 1344 *gpio_pehlpar = 0xc0; 1345 } 1346#endif 1347} 1348 1349static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) 1350{ 1351 volatile fec_t *fecp; 1352 1353 fecp = fep->hwp; 1354 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04; 1355 fecp->fec_x_cntrl = 0x00; 1356 1357 /* 1358 * Set MII speed to 2.5 MHz 1359 * See 5282 manual section 17.5.4.7: MSCR 1360 */ 1361 fep->phy_speed = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2; 1362 fecp->fec_mii_speed = fep->phy_speed; 1363 1364 fec_restart(dev, 0); 1365} 1366 1367static void __inline__ fec_get_mac(struct net_device *dev) 1368{ 1369 struct fec_enet_private *fep = netdev_priv(dev); 1370 volatile fec_t *fecp; 1371 unsigned char *iap, tmpaddr[ETH_ALEN]; 1372 1373 fecp = fep->hwp; 1374 1375 if (FEC_FLASHMAC) { 1376 /* 1377 * Get MAC address from FLASH. 1378 * If it is all 1's or 0's, use the default. 1379 */ 1380 iap = FEC_FLASHMAC; 1381 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && 1382 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) 1383 iap = fec_mac_default; 1384 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && 1385 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) 1386 iap = fec_mac_default; 1387 } else { 1388 *((unsigned long *) &tmpaddr[0]) = fecp->fec_addr_low; 1389 *((unsigned short *) &tmpaddr[4]) = (fecp->fec_addr_high >> 16); 1390 iap = &tmpaddr[0]; 1391 } 1392 1393 memcpy(dev->dev_addr, iap, ETH_ALEN); 1394 1395 /* Adjust MAC if using default MAC address */ 1396 if (iap == fec_mac_default) 1397 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 1398} 1399 1400static void __inline__ fec_enable_phy_intr(void) 1401{ 1402} 1403 1404static void __inline__ fec_disable_phy_intr(void) 1405{ 1406} 1407 1408static void __inline__ fec_phy_ack_intr(void) 1409{ 1410} 1411 1412static void __inline__ fec_localhw_setup(void) 1413{ 1414} 1415 1416/* 1417 * Do not need to make region uncached on 5272. 1418 */ 1419static void __inline__ fec_uncache(unsigned long addr) 1420{ 1421} 1422 1423/* ------------------------------------------------------------------------- */ 1424 1425#else 1426 1427/* 1428 * Code specific to the MPC860T setup. 1429 */ 1430static void __inline__ fec_request_intrs(struct net_device *dev) 1431{ 1432 volatile immap_t *immap; 1433 1434 immap = (immap_t *)IMAP_ADDR; /* pointer to internal registers */ 1435 1436 if (request_8xxirq(FEC_INTERRUPT, fec_enet_interrupt, 0, "fec", dev) != 0) 1437 panic("Could not allocate FEC IRQ!"); 1438 1439#ifdef CONFIG_RPXCLASSIC 1440 /* Make Port C, bit 15 an input that causes interrupts. 1441 */ 1442 immap->im_ioport.iop_pcpar &= ~0x0001; 1443 immap->im_ioport.iop_pcdir &= ~0x0001; 1444 immap->im_ioport.iop_pcso &= ~0x0001; 1445 immap->im_ioport.iop_pcint |= 0x0001; 1446 cpm_install_handler(CPMVEC_PIO_PC15, mii_link_interrupt, dev); 1447 1448 /* Make LEDS reflect Link status. 1449 */ 1450 *((uint *) RPX_CSR_ADDR) &= ~BCSR2_FETHLEDMODE; 1451#endif 1452#ifdef CONFIG_FADS 1453 if (request_8xxirq(SIU_IRQ2, mii_link_interrupt, 0, "mii", dev) != 0) 1454 panic("Could not allocate MII IRQ!"); 1455#endif 1456} 1457 1458static void __inline__ fec_get_mac(struct net_device *dev) 1459{ 1460 bd_t *bd; 1461 1462 bd = (bd_t *)__res; 1463 memcpy(dev->dev_addr, bd->bi_enetaddr, ETH_ALEN); 1464 1465#ifdef CONFIG_RPXCLASSIC 1466 /* The Embedded Planet boards have only one MAC address in 1467 * the EEPROM, but can have two Ethernet ports. For the 1468 * FEC port, we create another address by setting one of 1469 * the address bits above something that would have (up to 1470 * now) been allocated. 1471 */ 1472 dev->dev_adrd[3] |= 0x80; 1473#endif 1474} 1475 1476static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) 1477{ 1478 extern uint _get_IMMR(void); 1479 volatile immap_t *immap; 1480 volatile fec_t *fecp; 1481 1482 fecp = fep->hwp; 1483 immap = (immap_t *)IMAP_ADDR; /* pointer to internal registers */ 1484 1485 /* Configure all of port D for MII. 1486 */ 1487 immap->im_ioport.iop_pdpar = 0x1fff; 1488 1489 /* Bits moved from Rev. D onward. 1490 */ 1491 if ((_get_IMMR() & 0xffff) < 0x0501) 1492 immap->im_ioport.iop_pddir = 0x1c58; /* Pre rev. D */ 1493 else 1494 immap->im_ioport.iop_pddir = 0x1fff; /* Rev. D and later */ 1495 1496 /* Set MII speed to 2.5 MHz 1497 */ 1498 fecp->fec_mii_speed = fep->phy_speed = 1499 ((bd->bi_busfreq * 1000000) / 2500000) & 0x7e; 1500} 1501 1502static void __inline__ fec_enable_phy_intr(void) 1503{ 1504 volatile fec_t *fecp; 1505 1506 fecp = fep->hwp; 1507 1508 /* Enable MII command finished interrupt 1509 */ 1510 fecp->fec_ivec = (FEC_INTERRUPT/2) << 29; 1511} 1512 1513static void __inline__ fec_disable_phy_intr(void) 1514{ 1515} 1516 1517static void __inline__ fec_phy_ack_intr(void) 1518{ 1519} 1520 1521static void __inline__ fec_localhw_setup(void) 1522{ 1523 volatile fec_t *fecp; 1524 1525 fecp = fep->hwp; 1526 fecp->fec_r_hash = PKT_MAXBUF_SIZE; 1527 /* Enable big endian and don't care about SDMA FC. 1528 */ 1529 fecp->fec_fun_code = 0x78000000; 1530} 1531 1532static void __inline__ fec_uncache(unsigned long addr) 1533{ 1534 pte_t *pte; 1535 pte = va_to_pte(mem_addr); 1536 pte_val(*pte) |= _PAGE_NO_CACHE; 1537 flush_tlb_page(init_mm.mmap, mem_addr); 1538} 1539 1540#endif 1541 1542/* ------------------------------------------------------------------------- */ 1543 1544static void mii_display_status(struct net_device *dev) 1545{ 1546 struct fec_enet_private *fep = netdev_priv(dev); 1547 volatile uint *s = &(fep->phy_status); 1548 1549 if (!fep->link && !fep->old_link) { 1550 /* Link is still down - don't print anything */ 1551 return; 1552 } 1553 1554 printk("%s: status: ", dev->name); 1555 1556 if (!fep->link) { 1557 printk("link down"); 1558 } else { 1559 printk("link up"); 1560 1561 switch(*s & PHY_STAT_SPMASK) { 1562 case PHY_STAT_100FDX: printk(", 100MBit Full Duplex"); break; 1563 case PHY_STAT_100HDX: printk(", 100MBit Half Duplex"); break; 1564 case PHY_STAT_10FDX: printk(", 10MBit Full Duplex"); break; 1565 case PHY_STAT_10HDX: printk(", 10MBit Half Duplex"); break; 1566 default: 1567 printk(", Unknown speed/duplex"); 1568 } 1569 1570 if (*s & PHY_STAT_ANC) 1571 printk(", auto-negotiation complete"); 1572 } 1573 1574 if (*s & PHY_STAT_FAULT) 1575 printk(", remote fault"); 1576 1577 printk(".\n"); 1578} 1579 1580static void mii_display_config(struct net_device *dev) 1581{ 1582 struct fec_enet_private *fep = netdev_priv(dev); 1583 uint status = fep->phy_status; 1584 1585 /* 1586 ** When we get here, phy_task is already removed from 1587 ** the workqueue. It is thus safe to allow to reuse it. 1588 */ 1589 fep->mii_phy_task_queued = 0; 1590 printk("%s: config: auto-negotiation ", dev->name); 1591 1592 if (status & PHY_CONF_ANE) 1593 printk("on"); 1594 else 1595 printk("off"); 1596 1597 if (status & PHY_CONF_100FDX) 1598 printk(", 100FDX"); 1599 if (status & PHY_CONF_100HDX) 1600 printk(", 100HDX"); 1601 if (status & PHY_CONF_10FDX) 1602 printk(", 10FDX"); 1603 if (status & PHY_CONF_10HDX) 1604 printk(", 10HDX"); 1605 if (!(status & PHY_CONF_SPMASK)) 1606 printk(", No speed/duplex selected?"); 1607 1608 if (status & PHY_CONF_LOOP) 1609 printk(", loopback enabled"); 1610 1611 printk(".\n"); 1612 1613 fep->sequence_done = 1; 1614} 1615 1616static void mii_relink(struct net_device *dev) 1617{ 1618 struct fec_enet_private *fep = netdev_priv(dev); 1619 int duplex; 1620 1621 /* 1622 ** When we get here, phy_task is already removed from 1623 ** the workqueue. It is thus safe to allow to reuse it. 1624 */ 1625 fep->mii_phy_task_queued = 0; 1626 fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0; 1627 mii_display_status(dev); 1628 fep->old_link = fep->link; 1629 1630 if (fep->link) { 1631 duplex = 0; 1632 if (fep->phy_status 1633 & (PHY_STAT_100FDX | PHY_STAT_10FDX)) 1634 duplex = 1; 1635 fec_restart(dev, duplex); 1636 } 1637 else 1638 fec_stop(dev); 1639 1640#if 0 1641 enable_irq(fep->mii_irq); 1642#endif 1643 1644} 1645 1646/* mii_queue_relink is called in interrupt context from mii_link_interrupt */ 1647static void mii_queue_relink(uint mii_reg, struct net_device *dev) 1648{ 1649 struct fec_enet_private *fep = netdev_priv(dev); 1650 1651 /* 1652 ** We cannot queue phy_task twice in the workqueue. It 1653 ** would cause an endless loop in the workqueue. 1654 ** Fortunately, if the last mii_relink entry has not yet been 1655 ** executed now, it will do the job for the current interrupt, 1656 ** which is just what we want. 1657 */ 1658 if (fep->mii_phy_task_queued) 1659 return; 1660 1661 fep->mii_phy_task_queued = 1; 1662 INIT_WORK(&fep->phy_task, (void*)mii_relink, dev); 1663 schedule_work(&fep->phy_task); 1664} 1665 1666/* mii_queue_config is called in interrupt context from fec_enet_mii */ 1667static void mii_queue_config(uint mii_reg, struct net_device *dev) 1668{ 1669 struct fec_enet_private *fep = netdev_priv(dev); 1670 1671 if (fep->mii_phy_task_queued) 1672 return; 1673 1674 fep->mii_phy_task_queued = 1; 1675 INIT_WORK(&fep->phy_task, (void*)mii_display_config, dev); 1676 schedule_work(&fep->phy_task); 1677} 1678 1679phy_cmd_t const phy_cmd_relink[] = { 1680 { mk_mii_read(MII_REG_CR), mii_queue_relink }, 1681 { mk_mii_end, } 1682 }; 1683phy_cmd_t const phy_cmd_config[] = { 1684 { mk_mii_read(MII_REG_CR), mii_queue_config }, 1685 { mk_mii_end, } 1686 }; 1687 1688/* Read remainder of PHY ID. 1689*/ 1690static void 1691mii_discover_phy3(uint mii_reg, struct net_device *dev) 1692{ 1693 struct fec_enet_private *fep; 1694 int i; 1695 1696 fep = netdev_priv(dev); 1697 fep->phy_id |= (mii_reg & 0xffff); 1698 printk("fec: PHY @ 0x%x, ID 0x%08x", fep->phy_addr, fep->phy_id); 1699 1700 for(i = 0; phy_info[i]; i++) { 1701 if(phy_info[i]->id == (fep->phy_id >> 4)) 1702 break; 1703 } 1704 1705 if (phy_info[i]) 1706 printk(" -- %s\n", phy_info[i]->name); 1707 else 1708 printk(" -- unknown PHY!\n"); 1709 1710 fep->phy = phy_info[i]; 1711 fep->phy_id_done = 1; 1712} 1713 1714/* Scan all of the MII PHY addresses looking for someone to respond 1715 * with a valid ID. This usually happens quickly. 1716 */ 1717static void 1718mii_discover_phy(uint mii_reg, struct net_device *dev) 1719{ 1720 struct fec_enet_private *fep; 1721 volatile fec_t *fecp; 1722 uint phytype; 1723 1724 fep = netdev_priv(dev); 1725 fecp = fep->hwp; 1726 1727 if (fep->phy_addr < 32) { 1728 if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) { 1729 1730 /* Got first part of ID, now get remainder. 1731 */ 1732 fep->phy_id = phytype << 16; 1733 mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), 1734 mii_discover_phy3); 1735 } 1736 else { 1737 fep->phy_addr++; 1738 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), 1739 mii_discover_phy); 1740 } 1741 } else { 1742 printk("FEC: No PHY device found.\n"); 1743 /* Disable external MII interface */ 1744 fecp->fec_mii_speed = fep->phy_speed = 0; 1745 fec_disable_phy_intr(); 1746 } 1747} 1748 1749/* This interrupt occurs when the PHY detects a link change. 1750*/ 1751#ifdef CONFIG_RPXCLASSIC 1752static void 1753mii_link_interrupt(void *dev_id) 1754#else 1755static irqreturn_t 1756mii_link_interrupt(int irq, void * dev_id, struct pt_regs * regs) 1757#endif 1758{ 1759 struct net_device *dev = dev_id; 1760 struct fec_enet_private *fep = netdev_priv(dev); 1761 1762 fec_phy_ack_intr(); 1763 1764#if 0 1765 disable_irq(fep->mii_irq); /* disable now, enable later */ 1766#endif 1767 1768 mii_do_cmd(dev, fep->phy->ack_int); 1769 mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */ 1770 1771 return IRQ_HANDLED; 1772} 1773 1774static int 1775fec_enet_open(struct net_device *dev) 1776{ 1777 struct fec_enet_private *fep = netdev_priv(dev); 1778 1779 /* I should reset the ring buffers here, but I don't yet know 1780 * a simple way to do that. 1781 */ 1782 fec_set_mac_address(dev); 1783 1784 fep->sequence_done = 0; 1785 fep->link = 0; 1786 1787 if (fep->phy) { 1788 mii_do_cmd(dev, fep->phy->ack_int); 1789 mii_do_cmd(dev, fep->phy->config); 1790 mii_do_cmd(dev, phy_cmd_config); /* display configuration */ 1791 1792 /* FIXME: use netif_carrier_{on,off} ; this polls 1793 * until link is up which is wrong... could be 1794 * 30 seconds or more we are trapped in here. -jgarzik 1795 */ 1796 while(!fep->sequence_done) 1797 schedule(); 1798 1799 mii_do_cmd(dev, fep->phy->startup); 1800 1801 /* Set the initial link state to true. A lot of hardware 1802 * based on this device does not implement a PHY interrupt, 1803 * so we are never notified of link change. 1804 */ 1805 fep->link = 1; 1806 } else { 1807 fep->link = 1; /* lets just try it and see */ 1808 /* no phy, go full duplex, it's most likely a hub chip */ 1809 fec_restart(dev, 1); 1810 } 1811 1812 netif_start_queue(dev); 1813 fep->opened = 1; 1814 return 0; /* Success */ 1815} 1816 1817static int 1818fec_enet_close(struct net_device *dev) 1819{ 1820 struct fec_enet_private *fep = netdev_priv(dev); 1821 1822 /* Don't know what to do yet. 1823 */ 1824 fep->opened = 0; 1825 netif_stop_queue(dev); 1826 fec_stop(dev); 1827 1828 return 0; 1829} 1830 1831static struct net_device_stats *fec_enet_get_stats(struct net_device *dev) 1832{ 1833 struct fec_enet_private *fep = netdev_priv(dev); 1834 1835 return &fep->stats; 1836} 1837 1838/* Set or clear the multicast filter for this adaptor. 1839 * Skeleton taken from sunlance driver. 1840 * The CPM Ethernet implementation allows Multicast as well as individual 1841 * MAC address filtering. Some of the drivers check to make sure it is 1842 * a group multicast address, and discard those that are not. I guess I 1843 * will do the same for now, but just remove the test if you want 1844 * individual filtering as well (do the upper net layers want or support 1845 * this kind of feature?). 1846 */ 1847 1848#define HASH_BITS 6 /* #bits in hash */ 1849#define CRC32_POLY 0xEDB88320 1850 1851static void set_multicast_list(struct net_device *dev) 1852{ 1853 struct fec_enet_private *fep; 1854 volatile fec_t *ep; 1855 struct dev_mc_list *dmi; 1856 unsigned int i, j, bit, data, crc; 1857 unsigned char hash; 1858 1859 fep = netdev_priv(dev); 1860 ep = fep->hwp; 1861 1862 if (dev->flags&IFF_PROMISC) { 1863 /* Log any net taps. */ 1864 printk("%s: Promiscuous mode enabled.\n", dev->name); 1865 ep->fec_r_cntrl |= 0x0008; 1866 } else { 1867 1868 ep->fec_r_cntrl &= ~0x0008; 1869 1870 if (dev->flags & IFF_ALLMULTI) { 1871 /* Catch all multicast addresses, so set the 1872 * filter to all 1's. 1873 */ 1874 ep->fec_hash_table_high = 0xffffffff; 1875 ep->fec_hash_table_low = 0xffffffff; 1876 } else { 1877 /* Clear filter and add the addresses in hash register. 1878 */ 1879 ep->fec_hash_table_high = 0; 1880 ep->fec_hash_table_low = 0; 1881 1882 dmi = dev->mc_list; 1883 1884 for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) 1885 { 1886 /* Only support group multicast for now. 1887 */ 1888 if (!(dmi->dmi_addr[0] & 1)) 1889 continue; 1890 1891 /* calculate crc32 value of mac address 1892 */ 1893 crc = 0xffffffff; 1894 1895 for (i = 0; i < dmi->dmi_addrlen; i++) 1896 { 1897 data = dmi->dmi_addr[i]; 1898 for (bit = 0; bit < 8; bit++, data >>= 1) 1899 { 1900 crc = (crc >> 1) ^ 1901 (((crc ^ data) & 1) ? CRC32_POLY : 0); 1902 } 1903 } 1904 1905 /* only upper 6 bits (HASH_BITS) are used 1906 which point to specific bit in he hash registers 1907 */ 1908 hash = (crc >> (32 - HASH_BITS)) & 0x3f; 1909 1910 if (hash > 31) 1911 ep->fec_hash_table_high |= 1 << (hash - 32); 1912 else 1913 ep->fec_hash_table_low |= 1 << hash; 1914 } 1915 } 1916 } 1917} 1918 1919/* Set a MAC change in hardware. 1920 */ 1921static void 1922fec_set_mac_address(struct net_device *dev) 1923{ 1924 volatile fec_t *fecp; 1925 1926 fecp = ((struct fec_enet_private *)netdev_priv(dev))->hwp; 1927 1928 /* Set station address. */ 1929 fecp->fec_addr_low = dev->dev_addr[3] | (dev->dev_addr[2] << 8) | 1930 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24); 1931 fecp->fec_addr_high = (dev->dev_addr[5] << 16) | 1932 (dev->dev_addr[4] << 24); 1933 1934} 1935 1936/* Initialize the FEC Ethernet on 860T (or ColdFire 5272). 1937 */ 1938 /* 1939 * XXX: We need to clean up on failure exits here. 1940 */ 1941int __init fec_enet_init(struct net_device *dev) 1942{ 1943 struct fec_enet_private *fep = netdev_priv(dev); 1944 unsigned long mem_addr; 1945 volatile cbd_t *bdp; 1946 cbd_t *cbd_base; 1947 volatile fec_t *fecp; 1948 int i, j; 1949 static int index = 0; 1950 1951 /* Only allow us to be probed once. */ 1952 if (index >= FEC_MAX_PORTS) 1953 return -ENXIO; 1954 1955 /* Create an Ethernet device instance. 1956 */ 1957 fecp = (volatile fec_t *) fec_hw[index]; 1958 1959 fep->index = index; 1960 fep->hwp = fecp; 1961 1962 /* Whack a reset. We should wait for this. 1963 */ 1964 fecp->fec_ecntrl = 1; 1965 udelay(10); 1966 1967 /* Clear and enable interrupts */ 1968 fecp->fec_ievent = 0xffc00000; 1969 fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB | 1970 FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII); 1971 fecp->fec_hash_table_high = 0; 1972 fecp->fec_hash_table_low = 0; 1973 fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; 1974 fecp->fec_ecntrl = 2; 1975 fecp->fec_r_des_active = 0x01000000; 1976 1977 /* Set the Ethernet address. If using multiple Enets on the 8xx, 1978 * this needs some work to get unique addresses. 1979 * 1980 * This is our default MAC address unless the user changes 1981 * it via eth_mac_addr (our dev->set_mac_addr handler). 1982 */ 1983 fec_get_mac(dev); 1984 1985 /* Allocate memory for buffer descriptors. 1986 */ 1987 if (((RX_RING_SIZE + TX_RING_SIZE) * sizeof(cbd_t)) > PAGE_SIZE) { 1988 printk("FEC init error. Need more space.\n"); 1989 printk("FEC initialization failed.\n"); 1990 return 1; 1991 } 1992 mem_addr = __get_free_page(GFP_KERNEL); 1993 cbd_base = (cbd_t *)mem_addr; 1994 /* XXX: missing check for allocation failure */ 1995 1996 fec_uncache(mem_addr); 1997 1998 /* Set receive and transmit descriptor base. 1999 */ 2000 fep->rx_bd_base = cbd_base; 2001 fep->tx_bd_base = cbd_base + RX_RING_SIZE; 2002 2003 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; 2004 fep->cur_rx = fep->rx_bd_base; 2005 2006 fep->skb_cur = fep->skb_dirty = 0; 2007 2008 /* Initialize the receive buffer descriptors. 2009 */ 2010 bdp = fep->rx_bd_base; 2011 for (i=0; i<FEC_ENET_RX_PAGES; i++) { 2012 2013 /* Allocate a page. 2014 */ 2015 mem_addr = __get_free_page(GFP_KERNEL); 2016 /* XXX: missing check for allocation failure */ 2017 2018 fec_uncache(mem_addr); 2019 2020 /* Initialize the BD for every fragment in the page. 2021 */ 2022 for (j=0; j<FEC_ENET_RX_FRPPG; j++) { 2023 bdp->cbd_sc = BD_ENET_RX_EMPTY; 2024 bdp->cbd_bufaddr = __pa(mem_addr); 2025 mem_addr += FEC_ENET_RX_FRSIZE; 2026 bdp++; 2027 } 2028 } 2029 2030 /* Set the last buffer to wrap. 2031 */ 2032 bdp--; 2033 bdp->cbd_sc |= BD_SC_WRAP; 2034 2035 /* ...and the same for transmmit. 2036 */ 2037 bdp = fep->tx_bd_base; 2038 for (i=0, j=FEC_ENET_TX_FRPPG; i<TX_RING_SIZE; i++) { 2039 if (j >= FEC_ENET_TX_FRPPG) { 2040 mem_addr = __get_free_page(GFP_KERNEL); 2041 j = 1; 2042 } else { 2043 mem_addr += FEC_ENET_TX_FRSIZE; 2044 j++; 2045 } 2046 fep->tx_bounce[i] = (unsigned char *) mem_addr; 2047 2048 /* Initialize the BD for every fragment in the page. 2049 */ 2050 bdp->cbd_sc = 0; 2051 bdp->cbd_bufaddr = 0; 2052 bdp++; 2053 } 2054 2055 /* Set the last buffer to wrap. 2056 */ 2057 bdp--; 2058 bdp->cbd_sc |= BD_SC_WRAP; 2059 2060 /* Set receive and transmit descriptor base. 2061 */ 2062 fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base)); 2063 fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base)); 2064 2065 /* Install our interrupt handlers. This varies depending on 2066 * the architecture. 2067 */ 2068 fec_request_intrs(dev); 2069 2070 dev->base_addr = (unsigned long)fecp; 2071 2072 /* The FEC Ethernet specific entries in the device structure. */ 2073 dev->open = fec_enet_open; 2074 dev->hard_start_xmit = fec_enet_start_xmit; 2075 dev->tx_timeout = fec_timeout; 2076 dev->watchdog_timeo = TX_TIMEOUT; 2077 dev->stop = fec_enet_close; 2078 dev->get_stats = fec_enet_get_stats; 2079 dev->set_multicast_list = set_multicast_list; 2080 2081 for (i=0; i<NMII-1; i++) 2082 mii_cmds[i].mii_next = &mii_cmds[i+1]; 2083 mii_free = mii_cmds; 2084 2085 /* setup MII interface */ 2086 fec_set_mii(dev, fep); 2087 2088 /* Queue up command to detect the PHY and initialize the 2089 * remainder of the interface. 2090 */ 2091 fep->phy_id_done = 0; 2092 fep->phy_addr = 0; 2093 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy); 2094 2095 index++; 2096 return 0; 2097} 2098 2099/* This function is called to start or restart the FEC during a link 2100 * change. This only happens when switching between half and full 2101 * duplex. 2102 */ 2103static void 2104fec_restart(struct net_device *dev, int duplex) 2105{ 2106 struct fec_enet_private *fep; 2107 volatile cbd_t *bdp; 2108 volatile fec_t *fecp; 2109 int i; 2110 2111 fep = netdev_priv(dev); 2112 fecp = fep->hwp; 2113 2114 /* Whack a reset. We should wait for this. 2115 */ 2116 fecp->fec_ecntrl = 1; 2117 udelay(10); 2118 2119 /* Enable interrupts we wish to service. 2120 */ 2121 fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB | 2122 FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII); 2123 2124 /* Clear any outstanding interrupt. 2125 */ 2126 fecp->fec_ievent = 0xffc00000; 2127 fec_enable_phy_intr(); 2128 2129 /* Set station address. 2130 */ 2131 fec_set_mac_address(dev); 2132 2133 /* Reset all multicast. 2134 */ 2135 fecp->fec_hash_table_high = 0; 2136 fecp->fec_hash_table_low = 0; 2137 2138 /* Set maximum receive buffer size. 2139 */ 2140 fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; 2141 2142 fec_localhw_setup(); 2143 2144 /* Set receive and transmit descriptor base. 2145 */ 2146 fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base)); 2147 fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base)); 2148 2149 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; 2150 fep->cur_rx = fep->rx_bd_base; 2151 2152 /* Reset SKB transmit buffers. 2153 */ 2154 fep->skb_cur = fep->skb_dirty = 0; 2155 for (i=0; i<=TX_RING_MOD_MASK; i++) { 2156 if (fep->tx_skbuff[i] != NULL) { 2157 dev_kfree_skb_any(fep->tx_skbuff[i]); 2158 fep->tx_skbuff[i] = NULL; 2159 } 2160 } 2161 2162 /* Initialize the receive buffer descriptors. 2163 */ 2164 bdp = fep->rx_bd_base; 2165 for (i=0; i<RX_RING_SIZE; i++) { 2166 2167 /* Initialize the BD for every fragment in the page. 2168 */ 2169 bdp->cbd_sc = BD_ENET_RX_EMPTY; 2170 bdp++; 2171 } 2172 2173 /* Set the last buffer to wrap. 2174 */ 2175 bdp--; 2176 bdp->cbd_sc |= BD_SC_WRAP; 2177 2178 /* ...and the same for transmmit. 2179 */ 2180 bdp = fep->tx_bd_base; 2181 for (i=0; i<TX_RING_SIZE; i++) { 2182 2183 /* Initialize the BD for every fragment in the page. 2184 */ 2185 bdp->cbd_sc = 0; 2186 bdp->cbd_bufaddr = 0; 2187 bdp++; 2188 } 2189 2190 /* Set the last buffer to wrap. 2191 */ 2192 bdp--; 2193 bdp->cbd_sc |= BD_SC_WRAP; 2194 2195 /* Enable MII mode. 2196 */ 2197 if (duplex) { 2198 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;/* MII enable */ 2199 fecp->fec_x_cntrl = 0x04; /* FD enable */ 2200 } 2201 else { 2202 /* MII enable|No Rcv on Xmit */ 2203 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x06; 2204 fecp->fec_x_cntrl = 0x00; 2205 } 2206 fep->full_duplex = duplex; 2207 2208 /* Set MII speed. 2209 */ 2210 fecp->fec_mii_speed = fep->phy_speed; 2211 2212 /* And last, enable the transmit and receive processing. 2213 */ 2214 fecp->fec_ecntrl = 2; 2215 fecp->fec_r_des_active = 0x01000000; 2216} 2217 2218static void 2219fec_stop(struct net_device *dev) 2220{ 2221 volatile fec_t *fecp; 2222 struct fec_enet_private *fep; 2223 2224 fep = netdev_priv(dev); 2225 fecp = fep->hwp; 2226 2227 fecp->fec_x_cntrl = 0x01; /* Graceful transmit stop */ 2228 2229 while(!(fecp->fec_ievent & FEC_ENET_GRA)); 2230 2231 /* Whack a reset. We should wait for this. 2232 */ 2233 fecp->fec_ecntrl = 1; 2234 udelay(10); 2235 2236 /* Clear outstanding MII command interrupts. 2237 */ 2238 fecp->fec_ievent = FEC_ENET_MII; 2239 fec_enable_phy_intr(); 2240 2241 fecp->fec_imask = FEC_ENET_MII; 2242 fecp->fec_mii_speed = fep->phy_speed; 2243} 2244 2245static int __init fec_enet_module_init(void) 2246{ 2247 struct net_device *dev; 2248 int i, j, err; 2249 2250 printk("FEC ENET Version 0.2\n"); 2251 2252 for (i = 0; (i < FEC_MAX_PORTS); i++) { 2253 dev = alloc_etherdev(sizeof(struct fec_enet_private)); 2254 if (!dev) 2255 return -ENOMEM; 2256 err = fec_enet_init(dev); 2257 if (err) { 2258 free_netdev(dev); 2259 continue; 2260 } 2261 if (register_netdev(dev) != 0) { 2262 /* XXX: missing cleanup here */ 2263 free_netdev(dev); 2264 return -EIO; 2265 } 2266 2267 printk("%s: ethernet ", dev->name); 2268 for (j = 0; (j < 5); j++) 2269 printk("%02x:", dev->dev_addr[j]); 2270 printk("%02x\n", dev->dev_addr[5]); 2271 } 2272 return 0; 2273} 2274 2275module_init(fec_enet_module_init); 2276 2277MODULE_LICENSE("GPL");