at v2.6.12-rc2 2259 lines 60 kB view raw
1/* 2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 4 * 5 * This version of the driver is specific to the FADS implementation, 6 * since the board contains control registers external to the processor 7 * for the control of the LevelOne LXT970 transceiver. The MPC860T manual 8 * describes connections using the internal parallel port I/O, which 9 * is basically all of Port D. 10 * 11 * Right now, I am very watseful with the buffers. I allocate memory 12 * pages and then divide them into 2K frame buffers. This way I know I 13 * have buffers large enough to hold one frame within one buffer descriptor. 14 * Once I get this working, I will use 64 or 128 byte CPM buffers, which 15 * will be much more memory efficient and will easily handle lots of 16 * small packets. 17 * 18 * Much better multiple PHY support by Magnus Damm. 19 * Copyright (c) 2000 Ericsson Radio Systems AB. 20 * 21 * Support for FEC controller of ColdFire/5270/5271/5272/5274/5275/5280/5282. 22 * Copyrught (c) 2001-2004 Greg Ungerer (gerg@snapgear.com) 23 */ 24 25#include <linux/config.h> 26#include <linux/module.h> 27#include <linux/kernel.h> 28#include <linux/string.h> 29#include <linux/ptrace.h> 30#include <linux/errno.h> 31#include <linux/ioport.h> 32#include <linux/slab.h> 33#include <linux/interrupt.h> 34#include <linux/pci.h> 35#include <linux/init.h> 36#include <linux/delay.h> 37#include <linux/netdevice.h> 38#include <linux/etherdevice.h> 39#include <linux/skbuff.h> 40#include <linux/spinlock.h> 41#include <linux/workqueue.h> 42#include <linux/bitops.h> 43 44#include <asm/irq.h> 45#include <asm/uaccess.h> 46#include <asm/io.h> 47#include <asm/pgtable.h> 48 49#if defined(CONFIG_M527x) || defined(CONFIG_M5272) || defined(CONFIG_M528x) 50#include <asm/coldfire.h> 51#include <asm/mcfsim.h> 52#include "fec.h" 53#else 54#include <asm/8xx_immap.h> 55#include <asm/mpc8xx.h> 56#include "commproc.h" 57#endif 58 59#if defined(CONFIG_FEC2) 60#define FEC_MAX_PORTS 2 61#else 62#define FEC_MAX_PORTS 1 63#endif 64 65/* 66 * Define the fixed address of the FEC hardware. 67 */ 68static unsigned int fec_hw[] = { 69#if defined(CONFIG_M5272) 70 (MCF_MBAR + 0x840), 71#elif defined(CONFIG_M527x) 72 (MCF_MBAR + 0x1000), 73 (MCF_MBAR + 0x1800), 74#elif defined(CONFIG_M528x) 75 (MCF_MBAR + 0x1000), 76#else 77 &(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec), 78#endif 79}; 80 81static unsigned char fec_mac_default[] = { 82 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 83}; 84 85/* 86 * Some hardware gets it MAC address out of local flash memory. 87 * if this is non-zero then assume it is the address to get MAC from. 88 */ 89#if defined(CONFIG_NETtel) 90#define FEC_FLASHMAC 0xf0006006 91#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) 92#define FEC_FLASHMAC 0xf0006000 93#elif defined (CONFIG_MTD_KeyTechnology) 94#define FEC_FLASHMAC 0xffe04000 95#elif defined(CONFIG_CANCam) 96#define FEC_FLASHMAC 0xf0020000 97#else 98#define FEC_FLASHMAC 0 99#endif 100 101unsigned char *fec_flashmac = (unsigned char *) FEC_FLASHMAC; 102 103/* Forward declarations of some structures to support different PHYs 104*/ 105 106typedef struct { 107 uint mii_data; 108 void (*funct)(uint mii_reg, struct net_device *dev); 109} phy_cmd_t; 110 111typedef struct { 112 uint id; 113 char *name; 114 115 const phy_cmd_t *config; 116 const phy_cmd_t *startup; 117 const phy_cmd_t *ack_int; 118 const phy_cmd_t *shutdown; 119} phy_info_t; 120 121/* The number of Tx and Rx buffers. These are allocated from the page 122 * pool. The code may assume these are power of two, so it it best 123 * to keep them that size. 124 * We don't need to allocate pages for the transmitter. We just use 125 * the skbuffer directly. 126 */ 127#define FEC_ENET_RX_PAGES 8 128#define FEC_ENET_RX_FRSIZE 2048 129#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) 130#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) 131#define FEC_ENET_TX_FRSIZE 2048 132#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE) 133#define TX_RING_SIZE 16 /* Must be power of two */ 134#define TX_RING_MOD_MASK 15 /* for this to work */ 135 136/* Interrupt events/masks. 137*/ 138#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ 139#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ 140#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ 141#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ 142#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */ 143#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ 144#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */ 145#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ 146#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ 147#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ 148 149/* The FEC stores dest/src/type, data, and checksum for receive packets. 150 */ 151#define PKT_MAXBUF_SIZE 1518 152#define PKT_MINBUF_SIZE 64 153#define PKT_MAXBLR_SIZE 1520 154 155 156/* 157 * The 5270/5271/5280/5282 RX control register also contains maximum frame 158 * size bits. Other FEC hardware does not, so we need to take that into 159 * account when setting it. 160 */ 161#if defined(CONFIG_M527x) || defined(CONFIG_M528x) 162#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 163#else 164#define OPT_FRAME_SIZE 0 165#endif 166 167/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and 168 * tx_bd_base always point to the base of the buffer descriptors. The 169 * cur_rx and cur_tx point to the currently available buffer. 170 * The dirty_tx tracks the current buffer that is being sent by the 171 * controller. The cur_tx and dirty_tx are equal under both completely 172 * empty and completely full conditions. The empty/ready indicator in 173 * the buffer descriptor determines the actual condition. 174 */ 175struct fec_enet_private { 176 /* Hardware registers of the FEC device */ 177 volatile fec_t *hwp; 178 179 /* The saved address of a sent-in-place packet/buffer, for skfree(). */ 180 unsigned char *tx_bounce[TX_RING_SIZE]; 181 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 182 ushort skb_cur; 183 ushort skb_dirty; 184 185 /* CPM dual port RAM relative addresses. 186 */ 187 cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ 188 cbd_t *tx_bd_base; 189 cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ 190 cbd_t *dirty_tx; /* The ring entries to be free()ed. */ 191 struct net_device_stats stats; 192 uint tx_full; 193 spinlock_t lock; 194 195 uint phy_id; 196 uint phy_id_done; 197 uint phy_status; 198 uint phy_speed; 199 phy_info_t *phy; 200 struct work_struct phy_task; 201 202 uint sequence_done; 203 uint mii_phy_task_queued; 204 205 uint phy_addr; 206 207 int index; 208 int opened; 209 int link; 210 int old_link; 211 int full_duplex; 212 unsigned char mac_addr[ETH_ALEN]; 213}; 214 215static int fec_enet_open(struct net_device *dev); 216static int fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev); 217static void fec_enet_mii(struct net_device *dev); 218static irqreturn_t fec_enet_interrupt(int irq, void * dev_id, struct pt_regs * regs); 219static void fec_enet_tx(struct net_device *dev); 220static void fec_enet_rx(struct net_device *dev); 221static int fec_enet_close(struct net_device *dev); 222static struct net_device_stats *fec_enet_get_stats(struct net_device *dev); 223static void set_multicast_list(struct net_device *dev); 224static void fec_restart(struct net_device *dev, int duplex); 225static void fec_stop(struct net_device *dev); 226static void fec_set_mac_address(struct net_device *dev); 227 228 229/* MII processing. We keep this as simple as possible. Requests are 230 * placed on the list (if there is room). When the request is finished 231 * by the MII, an optional function may be called. 232 */ 233typedef struct mii_list { 234 uint mii_regval; 235 void (*mii_func)(uint val, struct net_device *dev); 236 struct mii_list *mii_next; 237} mii_list_t; 238 239#define NMII 20 240mii_list_t mii_cmds[NMII]; 241mii_list_t *mii_free; 242mii_list_t *mii_head; 243mii_list_t *mii_tail; 244 245static int mii_queue(struct net_device *dev, int request, 246 void (*func)(uint, struct net_device *)); 247 248/* Make MII read/write commands for the FEC. 249*/ 250#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) 251#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \ 252 (VAL & 0xffff)) 253#define mk_mii_end 0 254 255/* Transmitter timeout. 256*/ 257#define TX_TIMEOUT (2*HZ) 258 259/* Register definitions for the PHY. 260*/ 261 262#define MII_REG_CR 0 /* Control Register */ 263#define MII_REG_SR 1 /* Status Register */ 264#define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */ 265#define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */ 266#define MII_REG_ANAR 4 /* A-N Advertisement Register */ 267#define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */ 268#define MII_REG_ANER 6 /* A-N Expansion Register */ 269#define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */ 270#define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */ 271 272/* values for phy_status */ 273 274#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */ 275#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */ 276#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */ 277#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */ 278#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */ 279#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */ 280#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */ 281 282#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */ 283#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */ 284#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */ 285#define PHY_STAT_SPMASK 0xf000 /* mask for speed */ 286#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */ 287#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */ 288#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */ 289#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */ 290 291 292static int 293fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 294{ 295 struct fec_enet_private *fep; 296 volatile fec_t *fecp; 297 volatile cbd_t *bdp; 298 299 fep = netdev_priv(dev); 300 fecp = (volatile fec_t*)dev->base_addr; 301 302 if (!fep->link) { 303 /* Link is down or autonegotiation is in progress. */ 304 return 1; 305 } 306 307 /* Fill in a Tx ring entry */ 308 bdp = fep->cur_tx; 309 310#ifndef final_version 311 if (bdp->cbd_sc & BD_ENET_TX_READY) { 312 /* Ooops. All transmit buffers are full. Bail out. 313 * This should not happen, since dev->tbusy should be set. 314 */ 315 printk("%s: tx queue full!.\n", dev->name); 316 return 1; 317 } 318#endif 319 320 /* Clear all of the status flags. 321 */ 322 bdp->cbd_sc &= ~BD_ENET_TX_STATS; 323 324 /* Set buffer length and buffer pointer. 325 */ 326 bdp->cbd_bufaddr = __pa(skb->data); 327 bdp->cbd_datlen = skb->len; 328 329 /* 330 * On some FEC implementations data must be aligned on 331 * 4-byte boundaries. Use bounce buffers to copy data 332 * and get it aligned. Ugh. 333 */ 334 if (bdp->cbd_bufaddr & 0x3) { 335 unsigned int index; 336 index = bdp - fep->tx_bd_base; 337 memcpy(fep->tx_bounce[index], (void *) bdp->cbd_bufaddr, bdp->cbd_datlen); 338 bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]); 339 } 340 341 /* Save skb pointer. 342 */ 343 fep->tx_skbuff[fep->skb_cur] = skb; 344 345 fep->stats.tx_bytes += skb->len; 346 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; 347 348 /* Push the data cache so the CPM does not get stale memory 349 * data. 350 */ 351 flush_dcache_range((unsigned long)skb->data, 352 (unsigned long)skb->data + skb->len); 353 354 spin_lock_irq(&fep->lock); 355 356 /* Send it on its way. Tell FEC its ready, interrupt when done, 357 * its the last BD of the frame, and to put the CRC on the end. 358 */ 359 360 bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR 361 | BD_ENET_TX_LAST | BD_ENET_TX_TC); 362 363 dev->trans_start = jiffies; 364 365 /* Trigger transmission start */ 366 fecp->fec_x_des_active = 0x01000000; 367 368 /* If this was the last BD in the ring, start at the beginning again. 369 */ 370 if (bdp->cbd_sc & BD_ENET_TX_WRAP) { 371 bdp = fep->tx_bd_base; 372 } else { 373 bdp++; 374 } 375 376 if (bdp == fep->dirty_tx) { 377 fep->tx_full = 1; 378 netif_stop_queue(dev); 379 } 380 381 fep->cur_tx = (cbd_t *)bdp; 382 383 spin_unlock_irq(&fep->lock); 384 385 return 0; 386} 387 388static void 389fec_timeout(struct net_device *dev) 390{ 391 struct fec_enet_private *fep = netdev_priv(dev); 392 393 printk("%s: transmit timed out.\n", dev->name); 394 fep->stats.tx_errors++; 395#ifndef final_version 396 { 397 int i; 398 cbd_t *bdp; 399 400 printk("Ring data dump: cur_tx %lx%s, dirty_tx %lx cur_rx: %lx\n", 401 (unsigned long)fep->cur_tx, fep->tx_full ? " (full)" : "", 402 (unsigned long)fep->dirty_tx, 403 (unsigned long)fep->cur_rx); 404 405 bdp = fep->tx_bd_base; 406 printk(" tx: %u buffers\n", TX_RING_SIZE); 407 for (i = 0 ; i < TX_RING_SIZE; i++) { 408 printk(" %08x: %04x %04x %08x\n", 409 (uint) bdp, 410 bdp->cbd_sc, 411 bdp->cbd_datlen, 412 (int) bdp->cbd_bufaddr); 413 bdp++; 414 } 415 416 bdp = fep->rx_bd_base; 417 printk(" rx: %lu buffers\n", (unsigned long) RX_RING_SIZE); 418 for (i = 0 ; i < RX_RING_SIZE; i++) { 419 printk(" %08x: %04x %04x %08x\n", 420 (uint) bdp, 421 bdp->cbd_sc, 422 bdp->cbd_datlen, 423 (int) bdp->cbd_bufaddr); 424 bdp++; 425 } 426 } 427#endif 428 fec_restart(dev, 0); 429 netif_wake_queue(dev); 430} 431 432/* The interrupt handler. 433 * This is called from the MPC core interrupt. 434 */ 435static irqreturn_t 436fec_enet_interrupt(int irq, void * dev_id, struct pt_regs * regs) 437{ 438 struct net_device *dev = dev_id; 439 volatile fec_t *fecp; 440 uint int_events; 441 int handled = 0; 442 443 fecp = (volatile fec_t*)dev->base_addr; 444 445 /* Get the interrupt events that caused us to be here. 446 */ 447 while ((int_events = fecp->fec_ievent) != 0) { 448 fecp->fec_ievent = int_events; 449 450 /* Handle receive event in its own function. 451 */ 452 if (int_events & FEC_ENET_RXF) { 453 handled = 1; 454 fec_enet_rx(dev); 455 } 456 457 /* Transmit OK, or non-fatal error. Update the buffer 458 descriptors. FEC handles all errors, we just discover 459 them as part of the transmit process. 460 */ 461 if (int_events & FEC_ENET_TXF) { 462 handled = 1; 463 fec_enet_tx(dev); 464 } 465 466 if (int_events & FEC_ENET_MII) { 467 handled = 1; 468 fec_enet_mii(dev); 469 } 470 471 } 472 return IRQ_RETVAL(handled); 473} 474 475 476static void 477fec_enet_tx(struct net_device *dev) 478{ 479 struct fec_enet_private *fep; 480 volatile cbd_t *bdp; 481 struct sk_buff *skb; 482 483 fep = netdev_priv(dev); 484 spin_lock(&fep->lock); 485 bdp = fep->dirty_tx; 486 487 while ((bdp->cbd_sc&BD_ENET_TX_READY) == 0) { 488 if (bdp == fep->cur_tx && fep->tx_full == 0) break; 489 490 skb = fep->tx_skbuff[fep->skb_dirty]; 491 /* Check for errors. */ 492 if (bdp->cbd_sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | 493 BD_ENET_TX_RL | BD_ENET_TX_UN | 494 BD_ENET_TX_CSL)) { 495 fep->stats.tx_errors++; 496 if (bdp->cbd_sc & BD_ENET_TX_HB) /* No heartbeat */ 497 fep->stats.tx_heartbeat_errors++; 498 if (bdp->cbd_sc & BD_ENET_TX_LC) /* Late collision */ 499 fep->stats.tx_window_errors++; 500 if (bdp->cbd_sc & BD_ENET_TX_RL) /* Retrans limit */ 501 fep->stats.tx_aborted_errors++; 502 if (bdp->cbd_sc & BD_ENET_TX_UN) /* Underrun */ 503 fep->stats.tx_fifo_errors++; 504 if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */ 505 fep->stats.tx_carrier_errors++; 506 } else { 507 fep->stats.tx_packets++; 508 } 509 510#ifndef final_version 511 if (bdp->cbd_sc & BD_ENET_TX_READY) 512 printk("HEY! Enet xmit interrupt and TX_READY.\n"); 513#endif 514 /* Deferred means some collisions occurred during transmit, 515 * but we eventually sent the packet OK. 516 */ 517 if (bdp->cbd_sc & BD_ENET_TX_DEF) 518 fep->stats.collisions++; 519 520 /* Free the sk buffer associated with this last transmit. 521 */ 522 dev_kfree_skb_any(skb); 523 fep->tx_skbuff[fep->skb_dirty] = NULL; 524 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; 525 526 /* Update pointer to next buffer descriptor to be transmitted. 527 */ 528 if (bdp->cbd_sc & BD_ENET_TX_WRAP) 529 bdp = fep->tx_bd_base; 530 else 531 bdp++; 532 533 /* Since we have freed up a buffer, the ring is no longer 534 * full. 535 */ 536 if (fep->tx_full) { 537 fep->tx_full = 0; 538 if (netif_queue_stopped(dev)) 539 netif_wake_queue(dev); 540 } 541 } 542 fep->dirty_tx = (cbd_t *)bdp; 543 spin_unlock(&fep->lock); 544} 545 546 547/* During a receive, the cur_rx points to the current incoming buffer. 548 * When we update through the ring, if the next incoming buffer has 549 * not been given to the system, we just set the empty indicator, 550 * effectively tossing the packet. 551 */ 552static void 553fec_enet_rx(struct net_device *dev) 554{ 555 struct fec_enet_private *fep; 556 volatile fec_t *fecp; 557 volatile cbd_t *bdp; 558 struct sk_buff *skb; 559 ushort pkt_len; 560 __u8 *data; 561 562 fep = netdev_priv(dev); 563 fecp = (volatile fec_t*)dev->base_addr; 564 565 /* First, grab all of the stats for the incoming packet. 566 * These get messed up if we get called due to a busy condition. 567 */ 568 bdp = fep->cur_rx; 569 570while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) { 571 572#ifndef final_version 573 /* Since we have allocated space to hold a complete frame, 574 * the last indicator should be set. 575 */ 576 if ((bdp->cbd_sc & BD_ENET_RX_LAST) == 0) 577 printk("FEC ENET: rcv is not +last\n"); 578#endif 579 580 if (!fep->opened) 581 goto rx_processing_done; 582 583 /* Check for errors. */ 584 if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 585 BD_ENET_RX_CR | BD_ENET_RX_OV)) { 586 fep->stats.rx_errors++; 587 if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { 588 /* Frame too long or too short. */ 589 fep->stats.rx_length_errors++; 590 } 591 if (bdp->cbd_sc & BD_ENET_RX_NO) /* Frame alignment */ 592 fep->stats.rx_frame_errors++; 593 if (bdp->cbd_sc & BD_ENET_RX_CR) /* CRC Error */ 594 fep->stats.rx_crc_errors++; 595 if (bdp->cbd_sc & BD_ENET_RX_OV) /* FIFO overrun */ 596 fep->stats.rx_crc_errors++; 597 } 598 599 /* Report late collisions as a frame error. 600 * On this error, the BD is closed, but we don't know what we 601 * have in the buffer. So, just drop this frame on the floor. 602 */ 603 if (bdp->cbd_sc & BD_ENET_RX_CL) { 604 fep->stats.rx_errors++; 605 fep->stats.rx_frame_errors++; 606 goto rx_processing_done; 607 } 608 609 /* Process the incoming frame. 610 */ 611 fep->stats.rx_packets++; 612 pkt_len = bdp->cbd_datlen; 613 fep->stats.rx_bytes += pkt_len; 614 data = (__u8*)__va(bdp->cbd_bufaddr); 615 616 /* This does 16 byte alignment, exactly what we need. 617 * The packet length includes FCS, but we don't want to 618 * include that when passing upstream as it messes up 619 * bridging applications. 620 */ 621 skb = dev_alloc_skb(pkt_len-4); 622 623 if (skb == NULL) { 624 printk("%s: Memory squeeze, dropping packet.\n", dev->name); 625 fep->stats.rx_dropped++; 626 } else { 627 skb->dev = dev; 628 skb_put(skb,pkt_len-4); /* Make room */ 629 eth_copy_and_sum(skb, 630 (unsigned char *)__va(bdp->cbd_bufaddr), 631 pkt_len-4, 0); 632 skb->protocol=eth_type_trans(skb,dev); 633 netif_rx(skb); 634 } 635 rx_processing_done: 636 637 /* Clear the status flags for this buffer. 638 */ 639 bdp->cbd_sc &= ~BD_ENET_RX_STATS; 640 641 /* Mark the buffer empty. 642 */ 643 bdp->cbd_sc |= BD_ENET_RX_EMPTY; 644 645 /* Update BD pointer to next entry. 646 */ 647 if (bdp->cbd_sc & BD_ENET_RX_WRAP) 648 bdp = fep->rx_bd_base; 649 else 650 bdp++; 651 652#if 1 653 /* Doing this here will keep the FEC running while we process 654 * incoming frames. On a heavily loaded network, we should be 655 * able to keep up at the expense of system resources. 656 */ 657 fecp->fec_r_des_active = 0x01000000; 658#endif 659 } /* while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) */ 660 fep->cur_rx = (cbd_t *)bdp; 661 662#if 0 663 /* Doing this here will allow us to process all frames in the 664 * ring before the FEC is allowed to put more there. On a heavily 665 * loaded network, some frames may be lost. Unfortunately, this 666 * increases the interrupt overhead since we can potentially work 667 * our way back to the interrupt return only to come right back 668 * here. 669 */ 670 fecp->fec_r_des_active = 0x01000000; 671#endif 672} 673 674 675static void 676fec_enet_mii(struct net_device *dev) 677{ 678 struct fec_enet_private *fep; 679 volatile fec_t *ep; 680 mii_list_t *mip; 681 uint mii_reg; 682 683 fep = netdev_priv(dev); 684 ep = fep->hwp; 685 mii_reg = ep->fec_mii_data; 686 687 if ((mip = mii_head) == NULL) { 688 printk("MII and no head!\n"); 689 return; 690 } 691 692 if (mip->mii_func != NULL) 693 (*(mip->mii_func))(mii_reg, dev); 694 695 mii_head = mip->mii_next; 696 mip->mii_next = mii_free; 697 mii_free = mip; 698 699 if ((mip = mii_head) != NULL) 700 ep->fec_mii_data = mip->mii_regval; 701} 702 703static int 704mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_device *)) 705{ 706 struct fec_enet_private *fep; 707 unsigned long flags; 708 mii_list_t *mip; 709 int retval; 710 711 /* Add PHY address to register command. 712 */ 713 fep = netdev_priv(dev); 714 regval |= fep->phy_addr << 23; 715 716 retval = 0; 717 718 save_flags(flags); 719 cli(); 720 721 if ((mip = mii_free) != NULL) { 722 mii_free = mip->mii_next; 723 mip->mii_regval = regval; 724 mip->mii_func = func; 725 mip->mii_next = NULL; 726 if (mii_head) { 727 mii_tail->mii_next = mip; 728 mii_tail = mip; 729 } 730 else { 731 mii_head = mii_tail = mip; 732 fep->hwp->fec_mii_data = regval; 733 } 734 } 735 else { 736 retval = 1; 737 } 738 739 restore_flags(flags); 740 741 return(retval); 742} 743 744static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c) 745{ 746 int k; 747 748 if(!c) 749 return; 750 751 for(k = 0; (c+k)->mii_data != mk_mii_end; k++) { 752 mii_queue(dev, (c+k)->mii_data, (c+k)->funct); 753 } 754} 755 756static void mii_parse_sr(uint mii_reg, struct net_device *dev) 757{ 758 struct fec_enet_private *fep = netdev_priv(dev); 759 volatile uint *s = &(fep->phy_status); 760 761 *s &= ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC); 762 763 if (mii_reg & 0x0004) 764 *s |= PHY_STAT_LINK; 765 if (mii_reg & 0x0010) 766 *s |= PHY_STAT_FAULT; 767 if (mii_reg & 0x0020) 768 *s |= PHY_STAT_ANC; 769} 770 771static void mii_parse_cr(uint mii_reg, struct net_device *dev) 772{ 773 struct fec_enet_private *fep = netdev_priv(dev); 774 volatile uint *s = &(fep->phy_status); 775 776 *s &= ~(PHY_CONF_ANE | PHY_CONF_LOOP); 777 778 if (mii_reg & 0x1000) 779 *s |= PHY_CONF_ANE; 780 if (mii_reg & 0x4000) 781 *s |= PHY_CONF_LOOP; 782} 783 784static void mii_parse_anar(uint mii_reg, struct net_device *dev) 785{ 786 struct fec_enet_private *fep = netdev_priv(dev); 787 volatile uint *s = &(fep->phy_status); 788 789 *s &= ~(PHY_CONF_SPMASK); 790 791 if (mii_reg & 0x0020) 792 *s |= PHY_CONF_10HDX; 793 if (mii_reg & 0x0040) 794 *s |= PHY_CONF_10FDX; 795 if (mii_reg & 0x0080) 796 *s |= PHY_CONF_100HDX; 797 if (mii_reg & 0x00100) 798 *s |= PHY_CONF_100FDX; 799} 800 801/* ------------------------------------------------------------------------- */ 802/* The Level one LXT970 is used by many boards */ 803 804#define MII_LXT970_MIRROR 16 /* Mirror register */ 805#define MII_LXT970_IER 17 /* Interrupt Enable Register */ 806#define MII_LXT970_ISR 18 /* Interrupt Status Register */ 807#define MII_LXT970_CONFIG 19 /* Configuration Register */ 808#define MII_LXT970_CSR 20 /* Chip Status Register */ 809 810static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev) 811{ 812 struct fec_enet_private *fep = netdev_priv(dev); 813 volatile uint *s = &(fep->phy_status); 814 815 *s &= ~(PHY_STAT_SPMASK); 816 817 if (mii_reg & 0x0800) { 818 if (mii_reg & 0x1000) 819 *s |= PHY_STAT_100FDX; 820 else 821 *s |= PHY_STAT_100HDX; 822 } else { 823 if (mii_reg & 0x1000) 824 *s |= PHY_STAT_10FDX; 825 else 826 *s |= PHY_STAT_10HDX; 827 } 828} 829 830static phy_info_t phy_info_lxt970 = { 831 0x07810000, 832 "LXT970", 833 834 (const phy_cmd_t []) { /* config */ 835 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 836 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 837 { mk_mii_end, } 838 }, 839 (const phy_cmd_t []) { /* startup - enable interrupts */ 840 { mk_mii_write(MII_LXT970_IER, 0x0002), NULL }, 841 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 842 { mk_mii_end, } 843 }, 844 (const phy_cmd_t []) { /* ack_int */ 845 /* read SR and ISR to acknowledge */ 846 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 847 { mk_mii_read(MII_LXT970_ISR), NULL }, 848 849 /* find out the current status */ 850 { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr }, 851 { mk_mii_end, } 852 }, 853 (const phy_cmd_t []) { /* shutdown - disable interrupts */ 854 { mk_mii_write(MII_LXT970_IER, 0x0000), NULL }, 855 { mk_mii_end, } 856 }, 857}; 858 859/* ------------------------------------------------------------------------- */ 860/* The Level one LXT971 is used on some of my custom boards */ 861 862/* register definitions for the 971 */ 863 864#define MII_LXT971_PCR 16 /* Port Control Register */ 865#define MII_LXT971_SR2 17 /* Status Register 2 */ 866#define MII_LXT971_IER 18 /* Interrupt Enable Register */ 867#define MII_LXT971_ISR 19 /* Interrupt Status Register */ 868#define MII_LXT971_LCR 20 /* LED Control Register */ 869#define MII_LXT971_TCR 30 /* Transmit Control Register */ 870 871/* 872 * I had some nice ideas of running the MDIO faster... 873 * The 971 should support 8MHz and I tried it, but things acted really 874 * weird, so 2.5 MHz ought to be enough for anyone... 875 */ 876 877static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev) 878{ 879 struct fec_enet_private *fep = netdev_priv(dev); 880 volatile uint *s = &(fep->phy_status); 881 882 *s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC); 883 884 if (mii_reg & 0x0400) { 885 fep->link = 1; 886 *s |= PHY_STAT_LINK; 887 } else { 888 fep->link = 0; 889 } 890 if (mii_reg & 0x0080) 891 *s |= PHY_STAT_ANC; 892 if (mii_reg & 0x4000) { 893 if (mii_reg & 0x0200) 894 *s |= PHY_STAT_100FDX; 895 else 896 *s |= PHY_STAT_100HDX; 897 } else { 898 if (mii_reg & 0x0200) 899 *s |= PHY_STAT_10FDX; 900 else 901 *s |= PHY_STAT_10HDX; 902 } 903 if (mii_reg & 0x0008) 904 *s |= PHY_STAT_FAULT; 905} 906 907static phy_info_t phy_info_lxt971 = { 908 0x0001378e, 909 "LXT971", 910 911 (const phy_cmd_t []) { /* config */ 912 /* limit to 10MBit because my protorype board 913 * doesn't work with 100. */ 914 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 915 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 916 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, 917 { mk_mii_end, } 918 }, 919 (const phy_cmd_t []) { /* startup - enable interrupts */ 920 { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL }, 921 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 922 { mk_mii_write(MII_LXT971_LCR, 0xd422), NULL }, /* LED config */ 923 /* Somehow does the 971 tell me that the link is down 924 * the first read after power-up. 925 * read here to get a valid value in ack_int */ 926 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 927 { mk_mii_end, } 928 }, 929 (const phy_cmd_t []) { /* ack_int */ 930 /* find out the current status */ 931 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 932 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, 933 /* we only need to read ISR to acknowledge */ 934 { mk_mii_read(MII_LXT971_ISR), NULL }, 935 { mk_mii_end, } 936 }, 937 (const phy_cmd_t []) { /* shutdown - disable interrupts */ 938 { mk_mii_write(MII_LXT971_IER, 0x0000), NULL }, 939 { mk_mii_end, } 940 }, 941}; 942 943/* ------------------------------------------------------------------------- */ 944/* The Quality Semiconductor QS6612 is used on the RPX CLLF */ 945 946/* register definitions */ 947 948#define MII_QS6612_MCR 17 /* Mode Control Register */ 949#define MII_QS6612_FTR 27 /* Factory Test Register */ 950#define MII_QS6612_MCO 28 /* Misc. Control Register */ 951#define MII_QS6612_ISR 29 /* Interrupt Source Register */ 952#define MII_QS6612_IMR 30 /* Interrupt Mask Register */ 953#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */ 954 955static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev) 956{ 957 struct fec_enet_private *fep = netdev_priv(dev); 958 volatile uint *s = &(fep->phy_status); 959 960 *s &= ~(PHY_STAT_SPMASK); 961 962 switch((mii_reg >> 2) & 7) { 963 case 1: *s |= PHY_STAT_10HDX; break; 964 case 2: *s |= PHY_STAT_100HDX; break; 965 case 5: *s |= PHY_STAT_10FDX; break; 966 case 6: *s |= PHY_STAT_100FDX; break; 967 } 968} 969 970static phy_info_t phy_info_qs6612 = { 971 0x00181440, 972 "QS6612", 973 974 (const phy_cmd_t []) { /* config */ 975 /* The PHY powers up isolated on the RPX, 976 * so send a command to allow operation. 977 */ 978 { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL }, 979 980 /* parse cr and anar to get some info */ 981 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 982 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 983 { mk_mii_end, } 984 }, 985 (const phy_cmd_t []) { /* startup - enable interrupts */ 986 { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL }, 987 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 988 { mk_mii_end, } 989 }, 990 (const phy_cmd_t []) { /* ack_int */ 991 /* we need to read ISR, SR and ANER to acknowledge */ 992 { mk_mii_read(MII_QS6612_ISR), NULL }, 993 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 994 { mk_mii_read(MII_REG_ANER), NULL }, 995 996 /* read pcr to get info */ 997 { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr }, 998 { mk_mii_end, } 999 }, 1000 (const phy_cmd_t []) { /* shutdown - disable interrupts */ 1001 { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL }, 1002 { mk_mii_end, } 1003 }, 1004}; 1005 1006/* ------------------------------------------------------------------------- */ 1007/* AMD AM79C874 phy */ 1008 1009/* register definitions for the 874 */ 1010 1011#define MII_AM79C874_MFR 16 /* Miscellaneous Feature Register */ 1012#define MII_AM79C874_ICSR 17 /* Interrupt/Status Register */ 1013#define MII_AM79C874_DR 18 /* Diagnostic Register */ 1014#define MII_AM79C874_PMLR 19 /* Power and Loopback Register */ 1015#define MII_AM79C874_MCR 21 /* ModeControl Register */ 1016#define MII_AM79C874_DC 23 /* Disconnect Counter */ 1017#define MII_AM79C874_REC 24 /* Recieve Error Counter */ 1018 1019static void mii_parse_am79c874_dr(uint mii_reg, struct net_device *dev) 1020{ 1021 struct fec_enet_private *fep = netdev_priv(dev); 1022 volatile uint *s = &(fep->phy_status); 1023 1024 *s &= ~(PHY_STAT_SPMASK | PHY_STAT_ANC); 1025 1026 if (mii_reg & 0x0080) 1027 *s |= PHY_STAT_ANC; 1028 if (mii_reg & 0x0400) 1029 *s |= ((mii_reg & 0x0800) ? PHY_STAT_100FDX : PHY_STAT_100HDX); 1030 else 1031 *s |= ((mii_reg & 0x0800) ? PHY_STAT_10FDX : PHY_STAT_10HDX); 1032} 1033 1034static phy_info_t phy_info_am79c874 = { 1035 0x00022561, 1036 "AM79C874", 1037 1038 (const phy_cmd_t []) { /* config */ 1039 /* limit to 10MBit because my protorype board 1040 * doesn't work with 100. */ 1041 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 1042 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 1043 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr }, 1044 { mk_mii_end, } 1045 }, 1046 (const phy_cmd_t []) { /* startup - enable interrupts */ 1047 { mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL }, 1048 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 1049 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1050 { mk_mii_end, } 1051 }, 1052 (const phy_cmd_t []) { /* ack_int */ 1053 /* find out the current status */ 1054 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1055 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr }, 1056 /* we only need to read ISR to acknowledge */ 1057 { mk_mii_read(MII_AM79C874_ICSR), NULL }, 1058 { mk_mii_end, } 1059 }, 1060 (const phy_cmd_t []) { /* shutdown - disable interrupts */ 1061 { mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL }, 1062 { mk_mii_end, } 1063 }, 1064}; 1065 1066/* ------------------------------------------------------------------------- */ 1067/* Kendin KS8721BL phy */ 1068 1069/* register definitions for the 8721 */ 1070 1071#define MII_KS8721BL_RXERCR 21 1072#define MII_KS8721BL_ICSR 22 1073#define MII_KS8721BL_PHYCR 31 1074 1075static phy_info_t phy_info_ks8721bl = { 1076 0x00022161, 1077 "KS8721BL", 1078 1079 (const phy_cmd_t []) { /* config */ 1080 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 1081 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 1082 { mk_mii_end, } 1083 }, 1084 (const phy_cmd_t []) { /* startup */ 1085 { mk_mii_write(MII_KS8721BL_ICSR, 0xff00), NULL }, 1086 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ 1087 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1088 { mk_mii_end, } 1089 }, 1090 (const phy_cmd_t []) { /* ack_int */ 1091 /* find out the current status */ 1092 { mk_mii_read(MII_REG_SR), mii_parse_sr }, 1093 /* we only need to read ISR to acknowledge */ 1094 { mk_mii_read(MII_KS8721BL_ICSR), NULL }, 1095 { mk_mii_end, } 1096 }, 1097 (const phy_cmd_t []) { /* shutdown */ 1098 { mk_mii_write(MII_KS8721BL_ICSR, 0x0000), NULL }, 1099 { mk_mii_end, } 1100 }, 1101}; 1102 1103/* ------------------------------------------------------------------------- */ 1104 1105static phy_info_t *phy_info[] = { 1106 &phy_info_lxt970, 1107 &phy_info_lxt971, 1108 &phy_info_qs6612, 1109 &phy_info_am79c874, 1110 &phy_info_ks8721bl, 1111 NULL 1112}; 1113 1114/* ------------------------------------------------------------------------- */ 1115 1116#ifdef CONFIG_RPXCLASSIC 1117static void 1118mii_link_interrupt(void *dev_id); 1119#else 1120static irqreturn_t 1121mii_link_interrupt(int irq, void * dev_id, struct pt_regs * regs); 1122#endif 1123 1124#if defined(CONFIG_M5272) 1125 1126/* 1127 * Code specific to Coldfire 5272 setup. 1128 */ 1129static void __inline__ fec_request_intrs(struct net_device *dev) 1130{ 1131 volatile unsigned long *icrp; 1132 1133 /* Setup interrupt handlers. */ 1134 if (request_irq(86, fec_enet_interrupt, 0, "fec(RX)", dev) != 0) 1135 printk("FEC: Could not allocate FEC(RC) IRQ(86)!\n"); 1136 if (request_irq(87, fec_enet_interrupt, 0, "fec(TX)", dev) != 0) 1137 printk("FEC: Could not allocate FEC(RC) IRQ(87)!\n"); 1138 if (request_irq(88, fec_enet_interrupt, 0, "fec(OTHER)", dev) != 0) 1139 printk("FEC: Could not allocate FEC(OTHER) IRQ(88)!\n"); 1140 if (request_irq(66, mii_link_interrupt, 0, "fec(MII)", dev) != 0) 1141 printk("FEC: Could not allocate MII IRQ(66)!\n"); 1142 1143 /* Unmask interrupt at ColdFire 5272 SIM */ 1144 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR3); 1145 *icrp = 0x00000ddd; 1146 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); 1147 *icrp = (*icrp & 0x70777777) | 0x0d000000; 1148} 1149 1150static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) 1151{ 1152 volatile fec_t *fecp; 1153 1154 fecp = fep->hwp; 1155 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04; 1156 fecp->fec_x_cntrl = 0x00; 1157 1158 /* 1159 * Set MII speed to 2.5 MHz 1160 * See 5272 manual section 11.5.8: MSCR 1161 */ 1162 fep->phy_speed = ((((MCF_CLK / 4) / (2500000 / 10)) + 5) / 10) * 2; 1163 fecp->fec_mii_speed = fep->phy_speed; 1164 1165 fec_restart(dev, 0); 1166} 1167 1168static void __inline__ fec_get_mac(struct net_device *dev) 1169{ 1170 struct fec_enet_private *fep = netdev_priv(dev); 1171 volatile fec_t *fecp; 1172 unsigned char *iap, tmpaddr[6]; 1173 int i; 1174 1175 fecp = fep->hwp; 1176 1177 if (fec_flashmac) { 1178 /* 1179 * Get MAC address from FLASH. 1180 * If it is all 1's or 0's, use the default. 1181 */ 1182 iap = fec_flashmac; 1183 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && 1184 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) 1185 iap = fec_mac_default; 1186 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && 1187 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) 1188 iap = fec_mac_default; 1189 } else { 1190 *((unsigned long *) &tmpaddr[0]) = fecp->fec_addr_low; 1191 *((unsigned short *) &tmpaddr[4]) = (fecp->fec_addr_high >> 16); 1192 iap = &tmpaddr[0]; 1193 } 1194 1195 for (i=0; i<ETH_ALEN; i++) 1196 dev->dev_addr[i] = fep->mac_addr[i] = *iap++; 1197 1198 /* Adjust MAC if using default MAC address */ 1199 if (iap == fec_mac_default) { 1200 dev->dev_addr[ETH_ALEN-1] = fep->mac_addr[ETH_ALEN-1] = 1201 iap[ETH_ALEN-1] + fep->index; 1202 } 1203} 1204 1205static void __inline__ fec_enable_phy_intr(void) 1206{ 1207} 1208 1209static void __inline__ fec_disable_phy_intr(void) 1210{ 1211 volatile unsigned long *icrp; 1212 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); 1213 *icrp = (*icrp & 0x70777777) | 0x08000000; 1214} 1215 1216static void __inline__ fec_phy_ack_intr(void) 1217{ 1218 volatile unsigned long *icrp; 1219 /* Acknowledge the interrupt */ 1220 icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); 1221 *icrp = (*icrp & 0x77777777) | 0x08000000; 1222} 1223 1224static void __inline__ fec_localhw_setup(void) 1225{ 1226} 1227 1228/* 1229 * Do not need to make region uncached on 5272. 1230 */ 1231static void __inline__ fec_uncache(unsigned long addr) 1232{ 1233} 1234 1235/* ------------------------------------------------------------------------- */ 1236 1237#elif defined(CONFIG_M527x) || defined(CONFIG_M528x) 1238 1239/* 1240 * Code specific to Coldfire 5270/5271/5274/5275 and 5280/5282 setups. 1241 */ 1242static void __inline__ fec_request_intrs(struct net_device *dev) 1243{ 1244 struct fec_enet_private *fep; 1245 int b; 1246 1247 fep = netdev_priv(dev); 1248 b = (fep->index) ? 128 : 64; 1249 1250 /* Setup interrupt handlers. */ 1251 if (request_irq(b+23, fec_enet_interrupt, 0, "fec(TXF)", dev) != 0) 1252 printk("FEC: Could not allocate FEC(TXF) IRQ(%d+23)!\n", b); 1253 if (request_irq(b+24, fec_enet_interrupt, 0, "fec(TXB)", dev) != 0) 1254 printk("FEC: Could not allocate FEC(TXB) IRQ(%d+24)!\n", b); 1255 if (request_irq(b+25, fec_enet_interrupt, 0, "fec(TXFIFO)", dev) != 0) 1256 printk("FEC: Could not allocate FEC(TXFIFO) IRQ(%d+25)!\n", b); 1257 if (request_irq(b+26, fec_enet_interrupt, 0, "fec(TXCR)", dev) != 0) 1258 printk("FEC: Could not allocate FEC(TXCR) IRQ(%d+26)!\n", b); 1259 1260 if (request_irq(b+27, fec_enet_interrupt, 0, "fec(RXF)", dev) != 0) 1261 printk("FEC: Could not allocate FEC(RXF) IRQ(%d+27)!\n", b); 1262 if (request_irq(b+28, fec_enet_interrupt, 0, "fec(RXB)", dev) != 0) 1263 printk("FEC: Could not allocate FEC(RXB) IRQ(%d+28)!\n", b); 1264 1265 if (request_irq(b+29, fec_enet_interrupt, 0, "fec(MII)", dev) != 0) 1266 printk("FEC: Could not allocate FEC(MII) IRQ(%d+29)!\n", b); 1267 if (request_irq(b+30, fec_enet_interrupt, 0, "fec(LC)", dev) != 0) 1268 printk("FEC: Could not allocate FEC(LC) IRQ(%d+30)!\n", b); 1269 if (request_irq(b+31, fec_enet_interrupt, 0, "fec(HBERR)", dev) != 0) 1270 printk("FEC: Could not allocate FEC(HBERR) IRQ(%d+31)!\n", b); 1271 if (request_irq(b+32, fec_enet_interrupt, 0, "fec(GRA)", dev) != 0) 1272 printk("FEC: Could not allocate FEC(GRA) IRQ(%d+32)!\n", b); 1273 if (request_irq(b+33, fec_enet_interrupt, 0, "fec(EBERR)", dev) != 0) 1274 printk("FEC: Could not allocate FEC(EBERR) IRQ(%d+33)!\n", b); 1275 if (request_irq(b+34, fec_enet_interrupt, 0, "fec(BABT)", dev) != 0) 1276 printk("FEC: Could not allocate FEC(BABT) IRQ(%d+34)!\n", b); 1277 if (request_irq(b+35, fec_enet_interrupt, 0, "fec(BABR)", dev) != 0) 1278 printk("FEC: Could not allocate FEC(BABR) IRQ(%d+35)!\n", b); 1279 1280 /* Unmask interrupts at ColdFire 5280/5282 interrupt controller */ 1281 { 1282 volatile unsigned char *icrp; 1283 volatile unsigned long *imrp; 1284 int i; 1285 1286 b = (fep->index) ? MCFICM_INTC1 : MCFICM_INTC0; 1287 icrp = (volatile unsigned char *) (MCF_IPSBAR + b + 1288 MCFINTC_ICR0); 1289 for (i = 23; (i < 36); i++) 1290 icrp[i] = 0x23; 1291 1292 imrp = (volatile unsigned long *) (MCF_IPSBAR + b + 1293 MCFINTC_IMRH); 1294 *imrp &= ~0x0000000f; 1295 imrp = (volatile unsigned long *) (MCF_IPSBAR + b + 1296 MCFINTC_IMRL); 1297 *imrp &= ~0xff800001; 1298 } 1299 1300#if defined(CONFIG_M528x) 1301 /* Set up gpio outputs for MII lines */ 1302 { 1303 volatile unsigned short *gpio_paspar; 1304 1305 gpio_paspar = (volatile unsigned short *) (MCF_IPSBAR + 1306 0x100056); 1307 *gpio_paspar = 0x0f00; 1308 } 1309#endif 1310} 1311 1312static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) 1313{ 1314 volatile fec_t *fecp; 1315 1316 fecp = fep->hwp; 1317 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04; 1318 fecp->fec_x_cntrl = 0x00; 1319 1320 /* 1321 * Set MII speed to 2.5 MHz 1322 * See 5282 manual section 17.5.4.7: MSCR 1323 */ 1324 fep->phy_speed = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2; 1325 fecp->fec_mii_speed = fep->phy_speed; 1326 1327 fec_restart(dev, 0); 1328} 1329 1330static void __inline__ fec_get_mac(struct net_device *dev) 1331{ 1332 struct fec_enet_private *fep = netdev_priv(dev); 1333 volatile fec_t *fecp; 1334 unsigned char *iap, tmpaddr[6]; 1335 int i; 1336 1337 fecp = fep->hwp; 1338 1339 if (fec_flashmac) { 1340 /* 1341 * Get MAC address from FLASH. 1342 * If it is all 1's or 0's, use the default. 1343 */ 1344 iap = fec_flashmac; 1345 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && 1346 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) 1347 iap = fec_mac_default; 1348 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && 1349 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) 1350 iap = fec_mac_default; 1351 } else { 1352 *((unsigned long *) &tmpaddr[0]) = fecp->fec_addr_low; 1353 *((unsigned short *) &tmpaddr[4]) = (fecp->fec_addr_high >> 16); 1354 iap = &tmpaddr[0]; 1355 } 1356 1357 for (i=0; i<ETH_ALEN; i++) 1358 dev->dev_addr[i] = fep->mac_addr[i] = *iap++; 1359 1360 /* Adjust MAC if using default MAC address */ 1361 if (iap == fec_mac_default) { 1362 dev->dev_addr[ETH_ALEN-1] = fep->mac_addr[ETH_ALEN-1] = 1363 iap[ETH_ALEN-1] + fep->index; 1364 } 1365} 1366 1367static void __inline__ fec_enable_phy_intr(void) 1368{ 1369} 1370 1371static void __inline__ fec_disable_phy_intr(void) 1372{ 1373} 1374 1375static void __inline__ fec_phy_ack_intr(void) 1376{ 1377} 1378 1379static void __inline__ fec_localhw_setup(void) 1380{ 1381} 1382 1383/* 1384 * Do not need to make region uncached on 5272. 1385 */ 1386static void __inline__ fec_uncache(unsigned long addr) 1387{ 1388} 1389 1390/* ------------------------------------------------------------------------- */ 1391 1392#else 1393 1394/* 1395 * Code sepcific to the MPC860T setup. 1396 */ 1397static void __inline__ fec_request_intrs(struct net_device *dev) 1398{ 1399 volatile immap_t *immap; 1400 1401 immap = (immap_t *)IMAP_ADDR; /* pointer to internal registers */ 1402 1403 if (request_8xxirq(FEC_INTERRUPT, fec_enet_interrupt, 0, "fec", dev) != 0) 1404 panic("Could not allocate FEC IRQ!"); 1405 1406#ifdef CONFIG_RPXCLASSIC 1407 /* Make Port C, bit 15 an input that causes interrupts. 1408 */ 1409 immap->im_ioport.iop_pcpar &= ~0x0001; 1410 immap->im_ioport.iop_pcdir &= ~0x0001; 1411 immap->im_ioport.iop_pcso &= ~0x0001; 1412 immap->im_ioport.iop_pcint |= 0x0001; 1413 cpm_install_handler(CPMVEC_PIO_PC15, mii_link_interrupt, dev); 1414 1415 /* Make LEDS reflect Link status. 1416 */ 1417 *((uint *) RPX_CSR_ADDR) &= ~BCSR2_FETHLEDMODE; 1418#endif 1419#ifdef CONFIG_FADS 1420 if (request_8xxirq(SIU_IRQ2, mii_link_interrupt, 0, "mii", dev) != 0) 1421 panic("Could not allocate MII IRQ!"); 1422#endif 1423} 1424 1425static void __inline__ fec_get_mac(struct net_device *dev) 1426{ 1427 struct fec_enet_private *fep = netdev_priv(dev); 1428 unsigned char *iap, tmpaddr[6]; 1429 bd_t *bd; 1430 int i; 1431 1432 iap = bd->bi_enetaddr; 1433 bd = (bd_t *)__res; 1434 1435#ifdef CONFIG_RPXCLASSIC 1436 /* The Embedded Planet boards have only one MAC address in 1437 * the EEPROM, but can have two Ethernet ports. For the 1438 * FEC port, we create another address by setting one of 1439 * the address bits above something that would have (up to 1440 * now) been allocated. 1441 */ 1442 for (i=0; i<6; i++) 1443 tmpaddr[i] = *iap++; 1444 tmpaddr[3] |= 0x80; 1445 iap = tmpaddr; 1446#endif 1447 1448 for (i=0; i<6; i++) 1449 dev->dev_addr[i] = fep->mac_addr[i] = *iap++; 1450} 1451 1452static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) 1453{ 1454 extern uint _get_IMMR(void); 1455 volatile immap_t *immap; 1456 volatile fec_t *fecp; 1457 1458 fecp = fep->hwp; 1459 immap = (immap_t *)IMAP_ADDR; /* pointer to internal registers */ 1460 1461 /* Configure all of port D for MII. 1462 */ 1463 immap->im_ioport.iop_pdpar = 0x1fff; 1464 1465 /* Bits moved from Rev. D onward. 1466 */ 1467 if ((_get_IMMR() & 0xffff) < 0x0501) 1468 immap->im_ioport.iop_pddir = 0x1c58; /* Pre rev. D */ 1469 else 1470 immap->im_ioport.iop_pddir = 0x1fff; /* Rev. D and later */ 1471 1472 /* Set MII speed to 2.5 MHz 1473 */ 1474 fecp->fec_mii_speed = fep->phy_speed = 1475 ((bd->bi_busfreq * 1000000) / 2500000) & 0x7e; 1476} 1477 1478static void __inline__ fec_enable_phy_intr(void) 1479{ 1480 volatile fec_t *fecp; 1481 1482 fecp = fep->hwp; 1483 1484 /* Enable MII command finished interrupt 1485 */ 1486 fecp->fec_ivec = (FEC_INTERRUPT/2) << 29; 1487} 1488 1489static void __inline__ fec_disable_phy_intr(void) 1490{ 1491} 1492 1493static void __inline__ fec_phy_ack_intr(void) 1494{ 1495} 1496 1497static void __inline__ fec_localhw_setup(void) 1498{ 1499 volatile fec_t *fecp; 1500 1501 fecp = fep->hwp; 1502 fecp->fec_r_hash = PKT_MAXBUF_SIZE; 1503 /* Enable big endian and don't care about SDMA FC. 1504 */ 1505 fecp->fec_fun_code = 0x78000000; 1506} 1507 1508static void __inline__ fec_uncache(unsigned long addr) 1509{ 1510 pte_t *pte; 1511 pte = va_to_pte(mem_addr); 1512 pte_val(*pte) |= _PAGE_NO_CACHE; 1513 flush_tlb_page(init_mm.mmap, mem_addr); 1514} 1515 1516#endif 1517 1518/* ------------------------------------------------------------------------- */ 1519 1520static void mii_display_status(struct net_device *dev) 1521{ 1522 struct fec_enet_private *fep = netdev_priv(dev); 1523 volatile uint *s = &(fep->phy_status); 1524 1525 if (!fep->link && !fep->old_link) { 1526 /* Link is still down - don't print anything */ 1527 return; 1528 } 1529 1530 printk("%s: status: ", dev->name); 1531 1532 if (!fep->link) { 1533 printk("link down"); 1534 } else { 1535 printk("link up"); 1536 1537 switch(*s & PHY_STAT_SPMASK) { 1538 case PHY_STAT_100FDX: printk(", 100MBit Full Duplex"); break; 1539 case PHY_STAT_100HDX: printk(", 100MBit Half Duplex"); break; 1540 case PHY_STAT_10FDX: printk(", 10MBit Full Duplex"); break; 1541 case PHY_STAT_10HDX: printk(", 10MBit Half Duplex"); break; 1542 default: 1543 printk(", Unknown speed/duplex"); 1544 } 1545 1546 if (*s & PHY_STAT_ANC) 1547 printk(", auto-negotiation complete"); 1548 } 1549 1550 if (*s & PHY_STAT_FAULT) 1551 printk(", remote fault"); 1552 1553 printk(".\n"); 1554} 1555 1556static void mii_display_config(struct net_device *dev) 1557{ 1558 struct fec_enet_private *fep = netdev_priv(dev); 1559 volatile uint *s = &(fep->phy_status); 1560 1561 /* 1562 ** When we get here, phy_task is already removed from 1563 ** the workqueue. It is thus safe to allow to reuse it. 1564 */ 1565 fep->mii_phy_task_queued = 0; 1566 printk("%s: config: auto-negotiation ", dev->name); 1567 1568 if (*s & PHY_CONF_ANE) 1569 printk("on"); 1570 else 1571 printk("off"); 1572 1573 if (*s & PHY_CONF_100FDX) 1574 printk(", 100FDX"); 1575 if (*s & PHY_CONF_100HDX) 1576 printk(", 100HDX"); 1577 if (*s & PHY_CONF_10FDX) 1578 printk(", 10FDX"); 1579 if (*s & PHY_CONF_10HDX) 1580 printk(", 10HDX"); 1581 if (!(*s & PHY_CONF_SPMASK)) 1582 printk(", No speed/duplex selected?"); 1583 1584 if (*s & PHY_CONF_LOOP) 1585 printk(", loopback enabled"); 1586 1587 printk(".\n"); 1588 1589 fep->sequence_done = 1; 1590} 1591 1592static void mii_relink(struct net_device *dev) 1593{ 1594 struct fec_enet_private *fep = netdev_priv(dev); 1595 int duplex; 1596 1597 /* 1598 ** When we get here, phy_task is already removed from 1599 ** the workqueue. It is thus safe to allow to reuse it. 1600 */ 1601 fep->mii_phy_task_queued = 0; 1602 fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0; 1603 mii_display_status(dev); 1604 fep->old_link = fep->link; 1605 1606 if (fep->link) { 1607 duplex = 0; 1608 if (fep->phy_status 1609 & (PHY_STAT_100FDX | PHY_STAT_10FDX)) 1610 duplex = 1; 1611 fec_restart(dev, duplex); 1612 } 1613 else 1614 fec_stop(dev); 1615 1616#if 0 1617 enable_irq(fep->mii_irq); 1618#endif 1619 1620} 1621 1622/* mii_queue_relink is called in interrupt context from mii_link_interrupt */ 1623static void mii_queue_relink(uint mii_reg, struct net_device *dev) 1624{ 1625 struct fec_enet_private *fep = netdev_priv(dev); 1626 1627 /* 1628 ** We cannot queue phy_task twice in the workqueue. It 1629 ** would cause an endless loop in the workqueue. 1630 ** Fortunately, if the last mii_relink entry has not yet been 1631 ** executed now, it will do the job for the current interrupt, 1632 ** which is just what we want. 1633 */ 1634 if (fep->mii_phy_task_queued) 1635 return; 1636 1637 fep->mii_phy_task_queued = 1; 1638 INIT_WORK(&fep->phy_task, (void*)mii_relink, dev); 1639 schedule_work(&fep->phy_task); 1640} 1641 1642/* mii_queue_config is called in user context from fec_enet_open */ 1643static void mii_queue_config(uint mii_reg, struct net_device *dev) 1644{ 1645 struct fec_enet_private *fep = netdev_priv(dev); 1646 1647 if (fep->mii_phy_task_queued) 1648 return; 1649 1650 fep->mii_phy_task_queued = 1; 1651 INIT_WORK(&fep->phy_task, (void*)mii_display_config, dev); 1652 schedule_work(&fep->phy_task); 1653} 1654 1655 1656 1657phy_cmd_t phy_cmd_relink[] = { { mk_mii_read(MII_REG_CR), mii_queue_relink }, 1658 { mk_mii_end, } }; 1659phy_cmd_t phy_cmd_config[] = { { mk_mii_read(MII_REG_CR), mii_queue_config }, 1660 { mk_mii_end, } }; 1661 1662 1663 1664/* Read remainder of PHY ID. 1665*/ 1666static void 1667mii_discover_phy3(uint mii_reg, struct net_device *dev) 1668{ 1669 struct fec_enet_private *fep; 1670 int i; 1671 1672 fep = netdev_priv(dev); 1673 fep->phy_id |= (mii_reg & 0xffff); 1674 printk("fec: PHY @ 0x%x, ID 0x%08x", fep->phy_addr, fep->phy_id); 1675 1676 for(i = 0; phy_info[i]; i++) { 1677 if(phy_info[i]->id == (fep->phy_id >> 4)) 1678 break; 1679 } 1680 1681 if (phy_info[i]) 1682 printk(" -- %s\n", phy_info[i]->name); 1683 else 1684 printk(" -- unknown PHY!\n"); 1685 1686 fep->phy = phy_info[i]; 1687 fep->phy_id_done = 1; 1688} 1689 1690/* Scan all of the MII PHY addresses looking for someone to respond 1691 * with a valid ID. This usually happens quickly. 1692 */ 1693static void 1694mii_discover_phy(uint mii_reg, struct net_device *dev) 1695{ 1696 struct fec_enet_private *fep; 1697 volatile fec_t *fecp; 1698 uint phytype; 1699 1700 fep = netdev_priv(dev); 1701 fecp = fep->hwp; 1702 1703 if (fep->phy_addr < 32) { 1704 if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) { 1705 1706 /* Got first part of ID, now get remainder. 1707 */ 1708 fep->phy_id = phytype << 16; 1709 mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), 1710 mii_discover_phy3); 1711 } 1712 else { 1713 fep->phy_addr++; 1714 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), 1715 mii_discover_phy); 1716 } 1717 } else { 1718 printk("FEC: No PHY device found.\n"); 1719 /* Disable external MII interface */ 1720 fecp->fec_mii_speed = fep->phy_speed = 0; 1721 fec_disable_phy_intr(); 1722 } 1723} 1724 1725/* This interrupt occurs when the PHY detects a link change. 1726*/ 1727#ifdef CONFIG_RPXCLASSIC 1728static void 1729mii_link_interrupt(void *dev_id) 1730#else 1731static irqreturn_t 1732mii_link_interrupt(int irq, void * dev_id, struct pt_regs * regs) 1733#endif 1734{ 1735 struct net_device *dev = dev_id; 1736 struct fec_enet_private *fep = netdev_priv(dev); 1737 1738 fec_phy_ack_intr(); 1739 1740#if 0 1741 disable_irq(fep->mii_irq); /* disable now, enable later */ 1742#endif 1743 1744 mii_do_cmd(dev, fep->phy->ack_int); 1745 mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */ 1746 1747 return IRQ_HANDLED; 1748} 1749 1750static int 1751fec_enet_open(struct net_device *dev) 1752{ 1753 struct fec_enet_private *fep = netdev_priv(dev); 1754 1755 /* I should reset the ring buffers here, but I don't yet know 1756 * a simple way to do that. 1757 */ 1758 fec_set_mac_address(dev); 1759 1760 fep->sequence_done = 0; 1761 fep->link = 0; 1762 1763 if (fep->phy) { 1764 mii_do_cmd(dev, fep->phy->ack_int); 1765 mii_do_cmd(dev, fep->phy->config); 1766 mii_do_cmd(dev, phy_cmd_config); /* display configuration */ 1767 1768 /* FIXME: use netif_carrier_{on,off} ; this polls 1769 * until link is up which is wrong... could be 1770 * 30 seconds or more we are trapped in here. -jgarzik 1771 */ 1772 while(!fep->sequence_done) 1773 schedule(); 1774 1775 mii_do_cmd(dev, fep->phy->startup); 1776 1777 /* Set the initial link state to true. A lot of hardware 1778 * based on this device does not implement a PHY interrupt, 1779 * so we are never notified of link change. 1780 */ 1781 fep->link = 1; 1782 } else { 1783 fep->link = 1; /* lets just try it and see */ 1784 /* no phy, go full duplex, it's most likely a hub chip */ 1785 fec_restart(dev, 1); 1786 } 1787 1788 netif_start_queue(dev); 1789 fep->opened = 1; 1790 return 0; /* Success */ 1791} 1792 1793static int 1794fec_enet_close(struct net_device *dev) 1795{ 1796 struct fec_enet_private *fep = netdev_priv(dev); 1797 1798 /* Don't know what to do yet. 1799 */ 1800 fep->opened = 0; 1801 netif_stop_queue(dev); 1802 fec_stop(dev); 1803 1804 return 0; 1805} 1806 1807static struct net_device_stats *fec_enet_get_stats(struct net_device *dev) 1808{ 1809 struct fec_enet_private *fep = netdev_priv(dev); 1810 1811 return &fep->stats; 1812} 1813 1814/* Set or clear the multicast filter for this adaptor. 1815 * Skeleton taken from sunlance driver. 1816 * The CPM Ethernet implementation allows Multicast as well as individual 1817 * MAC address filtering. Some of the drivers check to make sure it is 1818 * a group multicast address, and discard those that are not. I guess I 1819 * will do the same for now, but just remove the test if you want 1820 * individual filtering as well (do the upper net layers want or support 1821 * this kind of feature?). 1822 */ 1823 1824#define HASH_BITS 6 /* #bits in hash */ 1825#define CRC32_POLY 0xEDB88320 1826 1827static void set_multicast_list(struct net_device *dev) 1828{ 1829 struct fec_enet_private *fep; 1830 volatile fec_t *ep; 1831 struct dev_mc_list *dmi; 1832 unsigned int i, j, bit, data, crc; 1833 unsigned char hash; 1834 1835 fep = netdev_priv(dev); 1836 ep = fep->hwp; 1837 1838 if (dev->flags&IFF_PROMISC) { 1839 /* Log any net taps. */ 1840 printk("%s: Promiscuous mode enabled.\n", dev->name); 1841 ep->fec_r_cntrl |= 0x0008; 1842 } else { 1843 1844 ep->fec_r_cntrl &= ~0x0008; 1845 1846 if (dev->flags & IFF_ALLMULTI) { 1847 /* Catch all multicast addresses, so set the 1848 * filter to all 1's. 1849 */ 1850 ep->fec_hash_table_high = 0xffffffff; 1851 ep->fec_hash_table_low = 0xffffffff; 1852 } else { 1853 /* Clear filter and add the addresses in hash register. 1854 */ 1855 ep->fec_hash_table_high = 0; 1856 ep->fec_hash_table_low = 0; 1857 1858 dmi = dev->mc_list; 1859 1860 for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) 1861 { 1862 /* Only support group multicast for now. 1863 */ 1864 if (!(dmi->dmi_addr[0] & 1)) 1865 continue; 1866 1867 /* calculate crc32 value of mac address 1868 */ 1869 crc = 0xffffffff; 1870 1871 for (i = 0; i < dmi->dmi_addrlen; i++) 1872 { 1873 data = dmi->dmi_addr[i]; 1874 for (bit = 0; bit < 8; bit++, data >>= 1) 1875 { 1876 crc = (crc >> 1) ^ 1877 (((crc ^ data) & 1) ? CRC32_POLY : 0); 1878 } 1879 } 1880 1881 /* only upper 6 bits (HASH_BITS) are used 1882 which point to specific bit in he hash registers 1883 */ 1884 hash = (crc >> (32 - HASH_BITS)) & 0x3f; 1885 1886 if (hash > 31) 1887 ep->fec_hash_table_high |= 1 << (hash - 32); 1888 else 1889 ep->fec_hash_table_low |= 1 << hash; 1890 } 1891 } 1892 } 1893} 1894 1895/* Set a MAC change in hardware. 1896 */ 1897static void 1898fec_set_mac_address(struct net_device *dev) 1899{ 1900 struct fec_enet_private *fep; 1901 volatile fec_t *fecp; 1902 1903 fep = netdev_priv(dev); 1904 fecp = fep->hwp; 1905 1906 /* Set station address. */ 1907 fecp->fec_addr_low = fep->mac_addr[3] | (fep->mac_addr[2] << 8) | 1908 (fep->mac_addr[1] << 16) | (fep->mac_addr[0] << 24); 1909 fecp->fec_addr_high = (fep->mac_addr[5] << 16) | 1910 (fep->mac_addr[4] << 24); 1911 1912} 1913 1914/* Initialize the FEC Ethernet on 860T (or ColdFire 5272). 1915 */ 1916 /* 1917 * XXX: We need to clean up on failure exits here. 1918 */ 1919int __init fec_enet_init(struct net_device *dev) 1920{ 1921 struct fec_enet_private *fep = netdev_priv(dev); 1922 unsigned long mem_addr; 1923 volatile cbd_t *bdp; 1924 cbd_t *cbd_base; 1925 volatile fec_t *fecp; 1926 int i, j; 1927 static int index = 0; 1928 1929 /* Only allow us to be probed once. */ 1930 if (index >= FEC_MAX_PORTS) 1931 return -ENXIO; 1932 1933 /* Create an Ethernet device instance. 1934 */ 1935 fecp = (volatile fec_t *) fec_hw[index]; 1936 1937 fep->index = index; 1938 fep->hwp = fecp; 1939 1940 /* Whack a reset. We should wait for this. 1941 */ 1942 fecp->fec_ecntrl = 1; 1943 udelay(10); 1944 1945 /* Clear and enable interrupts */ 1946 fecp->fec_ievent = 0xffc0; 1947 fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB | 1948 FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII); 1949 fecp->fec_hash_table_high = 0; 1950 fecp->fec_hash_table_low = 0; 1951 fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; 1952 fecp->fec_ecntrl = 2; 1953 fecp->fec_r_des_active = 0x01000000; 1954 1955 /* Set the Ethernet address. If using multiple Enets on the 8xx, 1956 * this needs some work to get unique addresses. 1957 * 1958 * This is our default MAC address unless the user changes 1959 * it via eth_mac_addr (our dev->set_mac_addr handler). 1960 */ 1961 fec_get_mac(dev); 1962 1963 /* Allocate memory for buffer descriptors. 1964 */ 1965 if (((RX_RING_SIZE + TX_RING_SIZE) * sizeof(cbd_t)) > PAGE_SIZE) { 1966 printk("FEC init error. Need more space.\n"); 1967 printk("FEC initialization failed.\n"); 1968 return 1; 1969 } 1970 mem_addr = __get_free_page(GFP_KERNEL); 1971 cbd_base = (cbd_t *)mem_addr; 1972 /* XXX: missing check for allocation failure */ 1973 1974 fec_uncache(mem_addr); 1975 1976 /* Set receive and transmit descriptor base. 1977 */ 1978 fep->rx_bd_base = cbd_base; 1979 fep->tx_bd_base = cbd_base + RX_RING_SIZE; 1980 1981 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; 1982 fep->cur_rx = fep->rx_bd_base; 1983 1984 fep->skb_cur = fep->skb_dirty = 0; 1985 1986 /* Initialize the receive buffer descriptors. 1987 */ 1988 bdp = fep->rx_bd_base; 1989 for (i=0; i<FEC_ENET_RX_PAGES; i++) { 1990 1991 /* Allocate a page. 1992 */ 1993 mem_addr = __get_free_page(GFP_KERNEL); 1994 /* XXX: missing check for allocation failure */ 1995 1996 fec_uncache(mem_addr); 1997 1998 /* Initialize the BD for every fragment in the page. 1999 */ 2000 for (j=0; j<FEC_ENET_RX_FRPPG; j++) { 2001 bdp->cbd_sc = BD_ENET_RX_EMPTY; 2002 bdp->cbd_bufaddr = __pa(mem_addr); 2003 mem_addr += FEC_ENET_RX_FRSIZE; 2004 bdp++; 2005 } 2006 } 2007 2008 /* Set the last buffer to wrap. 2009 */ 2010 bdp--; 2011 bdp->cbd_sc |= BD_SC_WRAP; 2012 2013 /* ...and the same for transmmit. 2014 */ 2015 bdp = fep->tx_bd_base; 2016 for (i=0, j=FEC_ENET_TX_FRPPG; i<TX_RING_SIZE; i++) { 2017 if (j >= FEC_ENET_TX_FRPPG) { 2018 mem_addr = __get_free_page(GFP_KERNEL); 2019 j = 1; 2020 } else { 2021 mem_addr += FEC_ENET_TX_FRSIZE; 2022 j++; 2023 } 2024 fep->tx_bounce[i] = (unsigned char *) mem_addr; 2025 2026 /* Initialize the BD for every fragment in the page. 2027 */ 2028 bdp->cbd_sc = 0; 2029 bdp->cbd_bufaddr = 0; 2030 bdp++; 2031 } 2032 2033 /* Set the last buffer to wrap. 2034 */ 2035 bdp--; 2036 bdp->cbd_sc |= BD_SC_WRAP; 2037 2038 /* Set receive and transmit descriptor base. 2039 */ 2040 fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base)); 2041 fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base)); 2042 2043 /* Install our interrupt handlers. This varies depending on 2044 * the architecture. 2045 */ 2046 fec_request_intrs(dev); 2047 2048 dev->base_addr = (unsigned long)fecp; 2049 2050 /* The FEC Ethernet specific entries in the device structure. */ 2051 dev->open = fec_enet_open; 2052 dev->hard_start_xmit = fec_enet_start_xmit; 2053 dev->tx_timeout = fec_timeout; 2054 dev->watchdog_timeo = TX_TIMEOUT; 2055 dev->stop = fec_enet_close; 2056 dev->get_stats = fec_enet_get_stats; 2057 dev->set_multicast_list = set_multicast_list; 2058 2059 for (i=0; i<NMII-1; i++) 2060 mii_cmds[i].mii_next = &mii_cmds[i+1]; 2061 mii_free = mii_cmds; 2062 2063 /* setup MII interface */ 2064 fec_set_mii(dev, fep); 2065 2066 printk("%s: FEC ENET Version 0.2, ", dev->name); 2067 for (i=0; i<5; i++) 2068 printk("%02x:", dev->dev_addr[i]); 2069 printk("%02x\n", dev->dev_addr[5]); 2070 2071 /* Queue up command to detect the PHY and initialize the 2072 * remainder of the interface. 2073 */ 2074 fep->phy_id_done = 0; 2075 fep->phy_addr = 0; 2076 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy); 2077 2078 index++; 2079 return 0; 2080} 2081 2082/* This function is called to start or restart the FEC during a link 2083 * change. This only happens when switching between half and full 2084 * duplex. 2085 */ 2086static void 2087fec_restart(struct net_device *dev, int duplex) 2088{ 2089 struct fec_enet_private *fep; 2090 volatile cbd_t *bdp; 2091 volatile fec_t *fecp; 2092 int i; 2093 2094 fep = netdev_priv(dev); 2095 fecp = fep->hwp; 2096 2097 /* Whack a reset. We should wait for this. 2098 */ 2099 fecp->fec_ecntrl = 1; 2100 udelay(10); 2101 2102 /* Enable interrupts we wish to service. 2103 */ 2104 fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB | 2105 FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII); 2106 2107 /* Clear any outstanding interrupt. 2108 */ 2109 fecp->fec_ievent = 0xffc0; 2110 fec_enable_phy_intr(); 2111 2112 /* Set station address. 2113 */ 2114 fecp->fec_addr_low = fep->mac_addr[3] | (fep->mac_addr[2] << 8) | 2115 (fep->mac_addr[1] << 16) | (fep->mac_addr[0] << 24); 2116 fecp->fec_addr_high = (fep->mac_addr[5] << 16) | 2117 (fep->mac_addr[4] << 24); 2118 2119 for (i=0; i<ETH_ALEN; i++) 2120 dev->dev_addr[i] = fep->mac_addr[i]; 2121 2122 /* Reset all multicast. 2123 */ 2124 fecp->fec_hash_table_high = 0; 2125 fecp->fec_hash_table_low = 0; 2126 2127 /* Set maximum receive buffer size. 2128 */ 2129 fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; 2130 2131 fec_localhw_setup(); 2132 2133 /* Set receive and transmit descriptor base. 2134 */ 2135 fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base)); 2136 fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base)); 2137 2138 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; 2139 fep->cur_rx = fep->rx_bd_base; 2140 2141 /* Reset SKB transmit buffers. 2142 */ 2143 fep->skb_cur = fep->skb_dirty = 0; 2144 for (i=0; i<=TX_RING_MOD_MASK; i++) { 2145 if (fep->tx_skbuff[i] != NULL) { 2146 dev_kfree_skb_any(fep->tx_skbuff[i]); 2147 fep->tx_skbuff[i] = NULL; 2148 } 2149 } 2150 2151 /* Initialize the receive buffer descriptors. 2152 */ 2153 bdp = fep->rx_bd_base; 2154 for (i=0; i<RX_RING_SIZE; i++) { 2155 2156 /* Initialize the BD for every fragment in the page. 2157 */ 2158 bdp->cbd_sc = BD_ENET_RX_EMPTY; 2159 bdp++; 2160 } 2161 2162 /* Set the last buffer to wrap. 2163 */ 2164 bdp--; 2165 bdp->cbd_sc |= BD_SC_WRAP; 2166 2167 /* ...and the same for transmmit. 2168 */ 2169 bdp = fep->tx_bd_base; 2170 for (i=0; i<TX_RING_SIZE; i++) { 2171 2172 /* Initialize the BD for every fragment in the page. 2173 */ 2174 bdp->cbd_sc = 0; 2175 bdp->cbd_bufaddr = 0; 2176 bdp++; 2177 } 2178 2179 /* Set the last buffer to wrap. 2180 */ 2181 bdp--; 2182 bdp->cbd_sc |= BD_SC_WRAP; 2183 2184 /* Enable MII mode. 2185 */ 2186 if (duplex) { 2187 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;/* MII enable */ 2188 fecp->fec_x_cntrl = 0x04; /* FD enable */ 2189 } 2190 else { 2191 /* MII enable|No Rcv on Xmit */ 2192 fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x06; 2193 fecp->fec_x_cntrl = 0x00; 2194 } 2195 fep->full_duplex = duplex; 2196 2197 /* Set MII speed. 2198 */ 2199 fecp->fec_mii_speed = fep->phy_speed; 2200 2201 /* And last, enable the transmit and receive processing. 2202 */ 2203 fecp->fec_ecntrl = 2; 2204 fecp->fec_r_des_active = 0x01000000; 2205} 2206 2207static void 2208fec_stop(struct net_device *dev) 2209{ 2210 volatile fec_t *fecp; 2211 struct fec_enet_private *fep; 2212 2213 fep = netdev_priv(dev); 2214 fecp = fep->hwp; 2215 2216 fecp->fec_x_cntrl = 0x01; /* Graceful transmit stop */ 2217 2218 while(!(fecp->fec_ievent & 0x10000000)); 2219 2220 /* Whack a reset. We should wait for this. 2221 */ 2222 fecp->fec_ecntrl = 1; 2223 udelay(10); 2224 2225 /* Clear outstanding MII command interrupts. 2226 */ 2227 fecp->fec_ievent = FEC_ENET_MII; 2228 fec_enable_phy_intr(); 2229 2230 fecp->fec_imask = FEC_ENET_MII; 2231 fecp->fec_mii_speed = fep->phy_speed; 2232} 2233 2234static int __init fec_enet_module_init(void) 2235{ 2236 struct net_device *dev; 2237 int i, err; 2238 2239 for (i = 0; (i < FEC_MAX_PORTS); i++) { 2240 dev = alloc_etherdev(sizeof(struct fec_enet_private)); 2241 if (!dev) 2242 return -ENOMEM; 2243 err = fec_enet_init(dev); 2244 if (err) { 2245 free_netdev(dev); 2246 continue; 2247 } 2248 if (register_netdev(dev) != 0) { 2249 /* XXX: missing cleanup here */ 2250 free_netdev(dev); 2251 return -EIO; 2252 } 2253 } 2254 return 0; 2255} 2256 2257module_init(fec_enet_module_init); 2258 2259MODULE_LICENSE("GPL");