at v2.6.26-rc7 1304 lines 31 kB view raw
1/* 2 * Atmel MACB Ethernet Controller driver 3 * 4 * Copyright (C) 2004-2006 Atmel Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11#include <linux/clk.h> 12#include <linux/module.h> 13#include <linux/moduleparam.h> 14#include <linux/kernel.h> 15#include <linux/types.h> 16#include <linux/slab.h> 17#include <linux/init.h> 18#include <linux/netdevice.h> 19#include <linux/etherdevice.h> 20#include <linux/dma-mapping.h> 21#include <linux/platform_device.h> 22#include <linux/phy.h> 23 24#include <asm/arch/board.h> 25#include <asm/arch/cpu.h> 26 27#include "macb.h" 28 29#define RX_BUFFER_SIZE 128 30#define RX_RING_SIZE 512 31#define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE) 32 33/* Make the IP header word-aligned (the ethernet header is 14 bytes) */ 34#define RX_OFFSET 2 35 36#define TX_RING_SIZE 128 37#define DEF_TX_RING_PENDING (TX_RING_SIZE - 1) 38#define TX_RING_BYTES (sizeof(struct dma_desc) * TX_RING_SIZE) 39 40#define TX_RING_GAP(bp) \ 41 (TX_RING_SIZE - (bp)->tx_pending) 42#define TX_BUFFS_AVAIL(bp) \ 43 (((bp)->tx_tail <= (bp)->tx_head) ? \ 44 (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head : \ 45 (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp)) 46#define NEXT_TX(n) (((n) + 1) & (TX_RING_SIZE - 1)) 47 48#define NEXT_RX(n) (((n) + 1) & (RX_RING_SIZE - 1)) 49 50/* minimum number of free TX descriptors before waking up TX process */ 51#define MACB_TX_WAKEUP_THRESH (TX_RING_SIZE / 4) 52 53#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 54 | MACB_BIT(ISR_ROVR)) 55 56static void __macb_set_hwaddr(struct macb *bp) 57{ 58 u32 bottom; 59 u16 top; 60 61 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); 62 macb_writel(bp, SA1B, bottom); 63 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); 64 macb_writel(bp, SA1T, top); 65} 66 67static void __init macb_get_hwaddr(struct macb *bp) 68{ 69 u32 bottom; 70 u16 top; 71 u8 addr[6]; 72 73 bottom = macb_readl(bp, SA1B); 74 top = macb_readl(bp, SA1T); 75 76 addr[0] = bottom & 0xff; 77 addr[1] = (bottom >> 8) & 0xff; 78 addr[2] = (bottom >> 16) & 0xff; 79 addr[3] = (bottom >> 24) & 0xff; 80 addr[4] = top & 0xff; 81 addr[5] = (top >> 8) & 0xff; 82 83 if (is_valid_ether_addr(addr)) 84 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); 85} 86 87static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 88{ 89 struct macb *bp = bus->priv; 90 int value; 91 92 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) 93 | MACB_BF(RW, MACB_MAN_READ) 94 | MACB_BF(PHYA, mii_id) 95 | MACB_BF(REGA, regnum) 96 | MACB_BF(CODE, MACB_MAN_CODE))); 97 98 /* wait for end of transfer */ 99 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) 100 cpu_relax(); 101 102 value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); 103 104 return value; 105} 106 107static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 108 u16 value) 109{ 110 struct macb *bp = bus->priv; 111 112 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) 113 | MACB_BF(RW, MACB_MAN_WRITE) 114 | MACB_BF(PHYA, mii_id) 115 | MACB_BF(REGA, regnum) 116 | MACB_BF(CODE, MACB_MAN_CODE) 117 | MACB_BF(DATA, value))); 118 119 /* wait for end of transfer */ 120 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) 121 cpu_relax(); 122 123 return 0; 124} 125 126static int macb_mdio_reset(struct mii_bus *bus) 127{ 128 return 0; 129} 130 131static void macb_handle_link_change(struct net_device *dev) 132{ 133 struct macb *bp = netdev_priv(dev); 134 struct phy_device *phydev = bp->phy_dev; 135 unsigned long flags; 136 137 int status_change = 0; 138 139 spin_lock_irqsave(&bp->lock, flags); 140 141 if (phydev->link) { 142 if ((bp->speed != phydev->speed) || 143 (bp->duplex != phydev->duplex)) { 144 u32 reg; 145 146 reg = macb_readl(bp, NCFGR); 147 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); 148 149 if (phydev->duplex) 150 reg |= MACB_BIT(FD); 151 if (phydev->speed == SPEED_100) 152 reg |= MACB_BIT(SPD); 153 154 macb_writel(bp, NCFGR, reg); 155 156 bp->speed = phydev->speed; 157 bp->duplex = phydev->duplex; 158 status_change = 1; 159 } 160 } 161 162 if (phydev->link != bp->link) { 163 if (phydev->link) 164 netif_schedule(dev); 165 else { 166 bp->speed = 0; 167 bp->duplex = -1; 168 } 169 bp->link = phydev->link; 170 171 status_change = 1; 172 } 173 174 spin_unlock_irqrestore(&bp->lock, flags); 175 176 if (status_change) { 177 if (phydev->link) 178 printk(KERN_INFO "%s: link up (%d/%s)\n", 179 dev->name, phydev->speed, 180 DUPLEX_FULL == phydev->duplex ? "Full":"Half"); 181 else 182 printk(KERN_INFO "%s: link down\n", dev->name); 183 } 184} 185 186/* based on au1000_eth. c*/ 187static int macb_mii_probe(struct net_device *dev) 188{ 189 struct macb *bp = netdev_priv(dev); 190 struct phy_device *phydev = NULL; 191 struct eth_platform_data *pdata; 192 int phy_addr; 193 194 /* find the first phy */ 195 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { 196 if (bp->mii_bus.phy_map[phy_addr]) { 197 phydev = bp->mii_bus.phy_map[phy_addr]; 198 break; 199 } 200 } 201 202 if (!phydev) { 203 printk (KERN_ERR "%s: no PHY found\n", dev->name); 204 return -1; 205 } 206 207 pdata = bp->pdev->dev.platform_data; 208 /* TODO : add pin_irq */ 209 210 /* attach the mac to the phy */ 211 if (pdata && pdata->is_rmii) { 212 phydev = phy_connect(dev, phydev->dev.bus_id, 213 &macb_handle_link_change, 0, PHY_INTERFACE_MODE_RMII); 214 } else { 215 phydev = phy_connect(dev, phydev->dev.bus_id, 216 &macb_handle_link_change, 0, PHY_INTERFACE_MODE_MII); 217 } 218 219 if (IS_ERR(phydev)) { 220 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 221 return PTR_ERR(phydev); 222 } 223 224 /* mask with MAC supported features */ 225 phydev->supported &= PHY_BASIC_FEATURES; 226 227 phydev->advertising = phydev->supported; 228 229 bp->link = 0; 230 bp->speed = 0; 231 bp->duplex = -1; 232 bp->phy_dev = phydev; 233 234 return 0; 235} 236 237static int macb_mii_init(struct macb *bp) 238{ 239 struct eth_platform_data *pdata; 240 int err = -ENXIO, i; 241 242 /* Enable managment port */ 243 macb_writel(bp, NCR, MACB_BIT(MPE)); 244 245 bp->mii_bus.name = "MACB_mii_bus"; 246 bp->mii_bus.read = &macb_mdio_read; 247 bp->mii_bus.write = &macb_mdio_write; 248 bp->mii_bus.reset = &macb_mdio_reset; 249 snprintf(bp->mii_bus.id, MII_BUS_ID_SIZE, "%x", bp->pdev->id); 250 bp->mii_bus.priv = bp; 251 bp->mii_bus.dev = &bp->dev->dev; 252 pdata = bp->pdev->dev.platform_data; 253 254 if (pdata) 255 bp->mii_bus.phy_mask = pdata->phy_mask; 256 257 bp->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 258 if (!bp->mii_bus.irq) { 259 err = -ENOMEM; 260 goto err_out; 261 } 262 263 for (i = 0; i < PHY_MAX_ADDR; i++) 264 bp->mii_bus.irq[i] = PHY_POLL; 265 266 platform_set_drvdata(bp->dev, &bp->mii_bus); 267 268 if (mdiobus_register(&bp->mii_bus)) 269 goto err_out_free_mdio_irq; 270 271 if (macb_mii_probe(bp->dev) != 0) { 272 goto err_out_unregister_bus; 273 } 274 275 return 0; 276 277err_out_unregister_bus: 278 mdiobus_unregister(&bp->mii_bus); 279err_out_free_mdio_irq: 280 kfree(bp->mii_bus.irq); 281err_out: 282 return err; 283} 284 285static void macb_update_stats(struct macb *bp) 286{ 287 u32 __iomem *reg = bp->regs + MACB_PFR; 288 u32 *p = &bp->hw_stats.rx_pause_frames; 289 u32 *end = &bp->hw_stats.tx_pause_frames + 1; 290 291 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); 292 293 for(; p < end; p++, reg++) 294 *p += __raw_readl(reg); 295} 296 297static void macb_tx(struct macb *bp) 298{ 299 unsigned int tail; 300 unsigned int head; 301 u32 status; 302 303 status = macb_readl(bp, TSR); 304 macb_writel(bp, TSR, status); 305 306 dev_dbg(&bp->pdev->dev, "macb_tx status = %02lx\n", 307 (unsigned long)status); 308 309 if (status & MACB_BIT(UND)) { 310 int i; 311 printk(KERN_ERR "%s: TX underrun, resetting buffers\n", 312 bp->dev->name); 313 314 head = bp->tx_head; 315 316 /*Mark all the buffer as used to avoid sending a lost buffer*/ 317 for (i = 0; i < TX_RING_SIZE; i++) 318 bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); 319 320 /* free transmit buffer in upper layer*/ 321 for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { 322 struct ring_info *rp = &bp->tx_skb[tail]; 323 struct sk_buff *skb = rp->skb; 324 325 BUG_ON(skb == NULL); 326 327 rmb(); 328 329 dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, 330 DMA_TO_DEVICE); 331 rp->skb = NULL; 332 dev_kfree_skb_irq(skb); 333 } 334 335 bp->tx_head = bp->tx_tail = 0; 336 } 337 338 if (!(status & MACB_BIT(COMP))) 339 /* 340 * This may happen when a buffer becomes complete 341 * between reading the ISR and scanning the 342 * descriptors. Nothing to worry about. 343 */ 344 return; 345 346 head = bp->tx_head; 347 for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { 348 struct ring_info *rp = &bp->tx_skb[tail]; 349 struct sk_buff *skb = rp->skb; 350 u32 bufstat; 351 352 BUG_ON(skb == NULL); 353 354 rmb(); 355 bufstat = bp->tx_ring[tail].ctrl; 356 357 if (!(bufstat & MACB_BIT(TX_USED))) 358 break; 359 360 dev_dbg(&bp->pdev->dev, "skb %u (data %p) TX complete\n", 361 tail, skb->data); 362 dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, 363 DMA_TO_DEVICE); 364 bp->stats.tx_packets++; 365 bp->stats.tx_bytes += skb->len; 366 rp->skb = NULL; 367 dev_kfree_skb_irq(skb); 368 } 369 370 bp->tx_tail = tail; 371 if (netif_queue_stopped(bp->dev) && 372 TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH) 373 netif_wake_queue(bp->dev); 374} 375 376static int macb_rx_frame(struct macb *bp, unsigned int first_frag, 377 unsigned int last_frag) 378{ 379 unsigned int len; 380 unsigned int frag; 381 unsigned int offset = 0; 382 struct sk_buff *skb; 383 384 len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl); 385 386 dev_dbg(&bp->pdev->dev, "macb_rx_frame frags %u - %u (len %u)\n", 387 first_frag, last_frag, len); 388 389 skb = dev_alloc_skb(len + RX_OFFSET); 390 if (!skb) { 391 bp->stats.rx_dropped++; 392 for (frag = first_frag; ; frag = NEXT_RX(frag)) { 393 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); 394 if (frag == last_frag) 395 break; 396 } 397 wmb(); 398 return 1; 399 } 400 401 skb_reserve(skb, RX_OFFSET); 402 skb->ip_summed = CHECKSUM_NONE; 403 skb_put(skb, len); 404 405 for (frag = first_frag; ; frag = NEXT_RX(frag)) { 406 unsigned int frag_len = RX_BUFFER_SIZE; 407 408 if (offset + frag_len > len) { 409 BUG_ON(frag != last_frag); 410 frag_len = len - offset; 411 } 412 skb_copy_to_linear_data_offset(skb, offset, 413 (bp->rx_buffers + 414 (RX_BUFFER_SIZE * frag)), 415 frag_len); 416 offset += RX_BUFFER_SIZE; 417 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); 418 wmb(); 419 420 if (frag == last_frag) 421 break; 422 } 423 424 skb->protocol = eth_type_trans(skb, bp->dev); 425 426 bp->stats.rx_packets++; 427 bp->stats.rx_bytes += len; 428 bp->dev->last_rx = jiffies; 429 dev_dbg(&bp->pdev->dev, "received skb of length %u, csum: %08x\n", 430 skb->len, skb->csum); 431 netif_receive_skb(skb); 432 433 return 0; 434} 435 436/* Mark DMA descriptors from begin up to and not including end as unused */ 437static void discard_partial_frame(struct macb *bp, unsigned int begin, 438 unsigned int end) 439{ 440 unsigned int frag; 441 442 for (frag = begin; frag != end; frag = NEXT_RX(frag)) 443 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); 444 wmb(); 445 446 /* 447 * When this happens, the hardware stats registers for 448 * whatever caused this is updated, so we don't have to record 449 * anything. 450 */ 451} 452 453static int macb_rx(struct macb *bp, int budget) 454{ 455 int received = 0; 456 unsigned int tail = bp->rx_tail; 457 int first_frag = -1; 458 459 for (; budget > 0; tail = NEXT_RX(tail)) { 460 u32 addr, ctrl; 461 462 rmb(); 463 addr = bp->rx_ring[tail].addr; 464 ctrl = bp->rx_ring[tail].ctrl; 465 466 if (!(addr & MACB_BIT(RX_USED))) 467 break; 468 469 if (ctrl & MACB_BIT(RX_SOF)) { 470 if (first_frag != -1) 471 discard_partial_frame(bp, first_frag, tail); 472 first_frag = tail; 473 } 474 475 if (ctrl & MACB_BIT(RX_EOF)) { 476 int dropped; 477 BUG_ON(first_frag == -1); 478 479 dropped = macb_rx_frame(bp, first_frag, tail); 480 first_frag = -1; 481 if (!dropped) { 482 received++; 483 budget--; 484 } 485 } 486 } 487 488 if (first_frag != -1) 489 bp->rx_tail = first_frag; 490 else 491 bp->rx_tail = tail; 492 493 return received; 494} 495 496static int macb_poll(struct napi_struct *napi, int budget) 497{ 498 struct macb *bp = container_of(napi, struct macb, napi); 499 struct net_device *dev = bp->dev; 500 int work_done; 501 u32 status; 502 503 status = macb_readl(bp, RSR); 504 macb_writel(bp, RSR, status); 505 506 work_done = 0; 507 if (!status) { 508 /* 509 * This may happen if an interrupt was pending before 510 * this function was called last time, and no packets 511 * have been received since. 512 */ 513 netif_rx_complete(dev, napi); 514 goto out; 515 } 516 517 dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n", 518 (unsigned long)status, budget); 519 520 if (!(status & MACB_BIT(REC))) { 521 dev_warn(&bp->pdev->dev, 522 "No RX buffers complete, status = %02lx\n", 523 (unsigned long)status); 524 netif_rx_complete(dev, napi); 525 goto out; 526 } 527 528 work_done = macb_rx(bp, budget); 529 if (work_done < budget) 530 netif_rx_complete(dev, napi); 531 532 /* 533 * We've done what we can to clean the buffers. Make sure we 534 * get notified when new packets arrive. 535 */ 536out: 537 macb_writel(bp, IER, MACB_RX_INT_FLAGS); 538 539 /* TODO: Handle errors */ 540 541 return work_done; 542} 543 544static irqreturn_t macb_interrupt(int irq, void *dev_id) 545{ 546 struct net_device *dev = dev_id; 547 struct macb *bp = netdev_priv(dev); 548 u32 status; 549 550 status = macb_readl(bp, ISR); 551 552 if (unlikely(!status)) 553 return IRQ_NONE; 554 555 spin_lock(&bp->lock); 556 557 while (status) { 558 /* close possible race with dev_close */ 559 if (unlikely(!netif_running(dev))) { 560 macb_writel(bp, IDR, ~0UL); 561 break; 562 } 563 564 if (status & MACB_RX_INT_FLAGS) { 565 if (netif_rx_schedule_prep(dev, &bp->napi)) { 566 /* 567 * There's no point taking any more interrupts 568 * until we have processed the buffers 569 */ 570 macb_writel(bp, IDR, MACB_RX_INT_FLAGS); 571 dev_dbg(&bp->pdev->dev, 572 "scheduling RX softirq\n"); 573 __netif_rx_schedule(dev, &bp->napi); 574 } 575 } 576 577 if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND))) 578 macb_tx(bp); 579 580 /* 581 * Link change detection isn't possible with RMII, so we'll 582 * add that if/when we get our hands on a full-blown MII PHY. 583 */ 584 585 if (status & MACB_BIT(HRESP)) { 586 /* 587 * TODO: Reset the hardware, and maybe move the printk 588 * to a lower-priority context as well (work queue?) 589 */ 590 printk(KERN_ERR "%s: DMA bus error: HRESP not OK\n", 591 dev->name); 592 } 593 594 status = macb_readl(bp, ISR); 595 } 596 597 spin_unlock(&bp->lock); 598 599 return IRQ_HANDLED; 600} 601 602static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) 603{ 604 struct macb *bp = netdev_priv(dev); 605 dma_addr_t mapping; 606 unsigned int len, entry; 607 u32 ctrl; 608 609#ifdef DEBUG 610 int i; 611 dev_dbg(&bp->pdev->dev, 612 "start_xmit: len %u head %p data %p tail %p end %p\n", 613 skb->len, skb->head, skb->data, 614 skb_tail_pointer(skb), skb_end_pointer(skb)); 615 dev_dbg(&bp->pdev->dev, 616 "data:"); 617 for (i = 0; i < 16; i++) 618 printk(" %02x", (unsigned int)skb->data[i]); 619 printk("\n"); 620#endif 621 622 len = skb->len; 623 spin_lock_irq(&bp->lock); 624 625 /* This is a hard error, log it. */ 626 if (TX_BUFFS_AVAIL(bp) < 1) { 627 netif_stop_queue(dev); 628 spin_unlock_irq(&bp->lock); 629 dev_err(&bp->pdev->dev, 630 "BUG! Tx Ring full when queue awake!\n"); 631 dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n", 632 bp->tx_head, bp->tx_tail); 633 return 1; 634 } 635 636 entry = bp->tx_head; 637 dev_dbg(&bp->pdev->dev, "Allocated ring entry %u\n", entry); 638 mapping = dma_map_single(&bp->pdev->dev, skb->data, 639 len, DMA_TO_DEVICE); 640 bp->tx_skb[entry].skb = skb; 641 bp->tx_skb[entry].mapping = mapping; 642 dev_dbg(&bp->pdev->dev, "Mapped skb data %p to DMA addr %08lx\n", 643 skb->data, (unsigned long)mapping); 644 645 ctrl = MACB_BF(TX_FRMLEN, len); 646 ctrl |= MACB_BIT(TX_LAST); 647 if (entry == (TX_RING_SIZE - 1)) 648 ctrl |= MACB_BIT(TX_WRAP); 649 650 bp->tx_ring[entry].addr = mapping; 651 bp->tx_ring[entry].ctrl = ctrl; 652 wmb(); 653 654 entry = NEXT_TX(entry); 655 bp->tx_head = entry; 656 657 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 658 659 if (TX_BUFFS_AVAIL(bp) < 1) 660 netif_stop_queue(dev); 661 662 spin_unlock_irq(&bp->lock); 663 664 dev->trans_start = jiffies; 665 666 return 0; 667} 668 669static void macb_free_consistent(struct macb *bp) 670{ 671 if (bp->tx_skb) { 672 kfree(bp->tx_skb); 673 bp->tx_skb = NULL; 674 } 675 if (bp->rx_ring) { 676 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, 677 bp->rx_ring, bp->rx_ring_dma); 678 bp->rx_ring = NULL; 679 } 680 if (bp->tx_ring) { 681 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES, 682 bp->tx_ring, bp->tx_ring_dma); 683 bp->tx_ring = NULL; 684 } 685 if (bp->rx_buffers) { 686 dma_free_coherent(&bp->pdev->dev, 687 RX_RING_SIZE * RX_BUFFER_SIZE, 688 bp->rx_buffers, bp->rx_buffers_dma); 689 bp->rx_buffers = NULL; 690 } 691} 692 693static int macb_alloc_consistent(struct macb *bp) 694{ 695 int size; 696 697 size = TX_RING_SIZE * sizeof(struct ring_info); 698 bp->tx_skb = kmalloc(size, GFP_KERNEL); 699 if (!bp->tx_skb) 700 goto out_err; 701 702 size = RX_RING_BYTES; 703 bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 704 &bp->rx_ring_dma, GFP_KERNEL); 705 if (!bp->rx_ring) 706 goto out_err; 707 dev_dbg(&bp->pdev->dev, 708 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", 709 size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); 710 711 size = TX_RING_BYTES; 712 bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 713 &bp->tx_ring_dma, GFP_KERNEL); 714 if (!bp->tx_ring) 715 goto out_err; 716 dev_dbg(&bp->pdev->dev, 717 "Allocated TX ring of %d bytes at %08lx (mapped %p)\n", 718 size, (unsigned long)bp->tx_ring_dma, bp->tx_ring); 719 720 size = RX_RING_SIZE * RX_BUFFER_SIZE; 721 bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, 722 &bp->rx_buffers_dma, GFP_KERNEL); 723 if (!bp->rx_buffers) 724 goto out_err; 725 dev_dbg(&bp->pdev->dev, 726 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", 727 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); 728 729 return 0; 730 731out_err: 732 macb_free_consistent(bp); 733 return -ENOMEM; 734} 735 736static void macb_init_rings(struct macb *bp) 737{ 738 int i; 739 dma_addr_t addr; 740 741 addr = bp->rx_buffers_dma; 742 for (i = 0; i < RX_RING_SIZE; i++) { 743 bp->rx_ring[i].addr = addr; 744 bp->rx_ring[i].ctrl = 0; 745 addr += RX_BUFFER_SIZE; 746 } 747 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); 748 749 for (i = 0; i < TX_RING_SIZE; i++) { 750 bp->tx_ring[i].addr = 0; 751 bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); 752 } 753 bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); 754 755 bp->rx_tail = bp->tx_head = bp->tx_tail = 0; 756} 757 758static void macb_reset_hw(struct macb *bp) 759{ 760 /* Make sure we have the write buffer for ourselves */ 761 wmb(); 762 763 /* 764 * Disable RX and TX (XXX: Should we halt the transmission 765 * more gracefully?) 766 */ 767 macb_writel(bp, NCR, 0); 768 769 /* Clear the stats registers (XXX: Update stats first?) */ 770 macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); 771 772 /* Clear all status flags */ 773 macb_writel(bp, TSR, ~0UL); 774 macb_writel(bp, RSR, ~0UL); 775 776 /* Disable all interrupts */ 777 macb_writel(bp, IDR, ~0UL); 778 macb_readl(bp, ISR); 779} 780 781static void macb_init_hw(struct macb *bp) 782{ 783 u32 config; 784 785 macb_reset_hw(bp); 786 __macb_set_hwaddr(bp); 787 788 config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L); 789 config |= MACB_BIT(PAE); /* PAuse Enable */ 790 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ 791 if (bp->dev->flags & IFF_PROMISC) 792 config |= MACB_BIT(CAF); /* Copy All Frames */ 793 if (!(bp->dev->flags & IFF_BROADCAST)) 794 config |= MACB_BIT(NBC); /* No BroadCast */ 795 macb_writel(bp, NCFGR, config); 796 797 /* Initialize TX and RX buffers */ 798 macb_writel(bp, RBQP, bp->rx_ring_dma); 799 macb_writel(bp, TBQP, bp->tx_ring_dma); 800 801 /* Enable TX and RX */ 802 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); 803 804 /* Enable interrupts */ 805 macb_writel(bp, IER, (MACB_BIT(RCOMP) 806 | MACB_BIT(RXUBR) 807 | MACB_BIT(ISR_TUND) 808 | MACB_BIT(ISR_RLE) 809 | MACB_BIT(TXERR) 810 | MACB_BIT(TCOMP) 811 | MACB_BIT(ISR_ROVR) 812 | MACB_BIT(HRESP))); 813 814} 815 816/* 817 * The hash address register is 64 bits long and takes up two 818 * locations in the memory map. The least significant bits are stored 819 * in EMAC_HSL and the most significant bits in EMAC_HSH. 820 * 821 * The unicast hash enable and the multicast hash enable bits in the 822 * network configuration register enable the reception of hash matched 823 * frames. The destination address is reduced to a 6 bit index into 824 * the 64 bit hash register using the following hash function. The 825 * hash function is an exclusive or of every sixth bit of the 826 * destination address. 827 * 828 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] 829 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] 830 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] 831 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] 832 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] 833 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] 834 * 835 * da[0] represents the least significant bit of the first byte 836 * received, that is, the multicast/unicast indicator, and da[47] 837 * represents the most significant bit of the last byte received. If 838 * the hash index, hi[n], points to a bit that is set in the hash 839 * register then the frame will be matched according to whether the 840 * frame is multicast or unicast. A multicast match will be signalled 841 * if the multicast hash enable bit is set, da[0] is 1 and the hash 842 * index points to a bit set in the hash register. A unicast match 843 * will be signalled if the unicast hash enable bit is set, da[0] is 0 844 * and the hash index points to a bit set in the hash register. To 845 * receive all multicast frames, the hash register should be set with 846 * all ones and the multicast hash enable bit should be set in the 847 * network configuration register. 848 */ 849 850static inline int hash_bit_value(int bitnr, __u8 *addr) 851{ 852 if (addr[bitnr / 8] & (1 << (bitnr % 8))) 853 return 1; 854 return 0; 855} 856 857/* 858 * Return the hash index value for the specified address. 859 */ 860static int hash_get_index(__u8 *addr) 861{ 862 int i, j, bitval; 863 int hash_index = 0; 864 865 for (j = 0; j < 6; j++) { 866 for (i = 0, bitval = 0; i < 8; i++) 867 bitval ^= hash_bit_value(i*6 + j, addr); 868 869 hash_index |= (bitval << j); 870 } 871 872 return hash_index; 873} 874 875/* 876 * Add multicast addresses to the internal multicast-hash table. 877 */ 878static void macb_sethashtable(struct net_device *dev) 879{ 880 struct dev_mc_list *curr; 881 unsigned long mc_filter[2]; 882 unsigned int i, bitnr; 883 struct macb *bp = netdev_priv(dev); 884 885 mc_filter[0] = mc_filter[1] = 0; 886 887 curr = dev->mc_list; 888 for (i = 0; i < dev->mc_count; i++, curr = curr->next) { 889 if (!curr) break; /* unexpected end of list */ 890 891 bitnr = hash_get_index(curr->dmi_addr); 892 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); 893 } 894 895 macb_writel(bp, HRB, mc_filter[0]); 896 macb_writel(bp, HRT, mc_filter[1]); 897} 898 899/* 900 * Enable/Disable promiscuous and multicast modes. 901 */ 902static void macb_set_rx_mode(struct net_device *dev) 903{ 904 unsigned long cfg; 905 struct macb *bp = netdev_priv(dev); 906 907 cfg = macb_readl(bp, NCFGR); 908 909 if (dev->flags & IFF_PROMISC) 910 /* Enable promiscuous mode */ 911 cfg |= MACB_BIT(CAF); 912 else if (dev->flags & (~IFF_PROMISC)) 913 /* Disable promiscuous mode */ 914 cfg &= ~MACB_BIT(CAF); 915 916 if (dev->flags & IFF_ALLMULTI) { 917 /* Enable all multicast mode */ 918 macb_writel(bp, HRB, -1); 919 macb_writel(bp, HRT, -1); 920 cfg |= MACB_BIT(NCFGR_MTI); 921 } else if (dev->mc_count > 0) { 922 /* Enable specific multicasts */ 923 macb_sethashtable(dev); 924 cfg |= MACB_BIT(NCFGR_MTI); 925 } else if (dev->flags & (~IFF_ALLMULTI)) { 926 /* Disable all multicast mode */ 927 macb_writel(bp, HRB, 0); 928 macb_writel(bp, HRT, 0); 929 cfg &= ~MACB_BIT(NCFGR_MTI); 930 } 931 932 macb_writel(bp, NCFGR, cfg); 933} 934 935static int macb_open(struct net_device *dev) 936{ 937 struct macb *bp = netdev_priv(dev); 938 int err; 939 940 dev_dbg(&bp->pdev->dev, "open\n"); 941 942 /* if the phy is not yet register, retry later*/ 943 if (!bp->phy_dev) 944 return -EAGAIN; 945 946 if (!is_valid_ether_addr(dev->dev_addr)) 947 return -EADDRNOTAVAIL; 948 949 err = macb_alloc_consistent(bp); 950 if (err) { 951 printk(KERN_ERR 952 "%s: Unable to allocate DMA memory (error %d)\n", 953 dev->name, err); 954 return err; 955 } 956 957 napi_enable(&bp->napi); 958 959 macb_init_rings(bp); 960 macb_init_hw(bp); 961 962 /* schedule a link state check */ 963 phy_start(bp->phy_dev); 964 965 netif_start_queue(dev); 966 967 return 0; 968} 969 970static int macb_close(struct net_device *dev) 971{ 972 struct macb *bp = netdev_priv(dev); 973 unsigned long flags; 974 975 netif_stop_queue(dev); 976 napi_disable(&bp->napi); 977 978 if (bp->phy_dev) 979 phy_stop(bp->phy_dev); 980 981 spin_lock_irqsave(&bp->lock, flags); 982 macb_reset_hw(bp); 983 netif_carrier_off(dev); 984 spin_unlock_irqrestore(&bp->lock, flags); 985 986 macb_free_consistent(bp); 987 988 return 0; 989} 990 991static struct net_device_stats *macb_get_stats(struct net_device *dev) 992{ 993 struct macb *bp = netdev_priv(dev); 994 struct net_device_stats *nstat = &bp->stats; 995 struct macb_stats *hwstat = &bp->hw_stats; 996 997 /* read stats from hardware */ 998 macb_update_stats(bp); 999 1000 /* Convert HW stats into netdevice stats */ 1001 nstat->rx_errors = (hwstat->rx_fcs_errors + 1002 hwstat->rx_align_errors + 1003 hwstat->rx_resource_errors + 1004 hwstat->rx_overruns + 1005 hwstat->rx_oversize_pkts + 1006 hwstat->rx_jabbers + 1007 hwstat->rx_undersize_pkts + 1008 hwstat->sqe_test_errors + 1009 hwstat->rx_length_mismatch); 1010 nstat->tx_errors = (hwstat->tx_late_cols + 1011 hwstat->tx_excessive_cols + 1012 hwstat->tx_underruns + 1013 hwstat->tx_carrier_errors); 1014 nstat->collisions = (hwstat->tx_single_cols + 1015 hwstat->tx_multiple_cols + 1016 hwstat->tx_excessive_cols); 1017 nstat->rx_length_errors = (hwstat->rx_oversize_pkts + 1018 hwstat->rx_jabbers + 1019 hwstat->rx_undersize_pkts + 1020 hwstat->rx_length_mismatch); 1021 nstat->rx_over_errors = hwstat->rx_resource_errors; 1022 nstat->rx_crc_errors = hwstat->rx_fcs_errors; 1023 nstat->rx_frame_errors = hwstat->rx_align_errors; 1024 nstat->rx_fifo_errors = hwstat->rx_overruns; 1025 /* XXX: What does "missed" mean? */ 1026 nstat->tx_aborted_errors = hwstat->tx_excessive_cols; 1027 nstat->tx_carrier_errors = hwstat->tx_carrier_errors; 1028 nstat->tx_fifo_errors = hwstat->tx_underruns; 1029 /* Don't know about heartbeat or window errors... */ 1030 1031 return nstat; 1032} 1033 1034static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1035{ 1036 struct macb *bp = netdev_priv(dev); 1037 struct phy_device *phydev = bp->phy_dev; 1038 1039 if (!phydev) 1040 return -ENODEV; 1041 1042 return phy_ethtool_gset(phydev, cmd); 1043} 1044 1045static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1046{ 1047 struct macb *bp = netdev_priv(dev); 1048 struct phy_device *phydev = bp->phy_dev; 1049 1050 if (!phydev) 1051 return -ENODEV; 1052 1053 return phy_ethtool_sset(phydev, cmd); 1054} 1055 1056static void macb_get_drvinfo(struct net_device *dev, 1057 struct ethtool_drvinfo *info) 1058{ 1059 struct macb *bp = netdev_priv(dev); 1060 1061 strcpy(info->driver, bp->pdev->dev.driver->name); 1062 strcpy(info->version, "$Revision: 1.14 $"); 1063 strcpy(info->bus_info, bp->pdev->dev.bus_id); 1064} 1065 1066static struct ethtool_ops macb_ethtool_ops = { 1067 .get_settings = macb_get_settings, 1068 .set_settings = macb_set_settings, 1069 .get_drvinfo = macb_get_drvinfo, 1070 .get_link = ethtool_op_get_link, 1071}; 1072 1073static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1074{ 1075 struct macb *bp = netdev_priv(dev); 1076 struct phy_device *phydev = bp->phy_dev; 1077 1078 if (!netif_running(dev)) 1079 return -EINVAL; 1080 1081 if (!phydev) 1082 return -ENODEV; 1083 1084 return phy_mii_ioctl(phydev, if_mii(rq), cmd); 1085} 1086 1087static int __init macb_probe(struct platform_device *pdev) 1088{ 1089 struct eth_platform_data *pdata; 1090 struct resource *regs; 1091 struct net_device *dev; 1092 struct macb *bp; 1093 struct phy_device *phydev; 1094 unsigned long pclk_hz; 1095 u32 config; 1096 int err = -ENXIO; 1097 DECLARE_MAC_BUF(mac); 1098 1099 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1100 if (!regs) { 1101 dev_err(&pdev->dev, "no mmio resource defined\n"); 1102 goto err_out; 1103 } 1104 1105 err = -ENOMEM; 1106 dev = alloc_etherdev(sizeof(*bp)); 1107 if (!dev) { 1108 dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n"); 1109 goto err_out; 1110 } 1111 1112 SET_NETDEV_DEV(dev, &pdev->dev); 1113 1114 /* TODO: Actually, we have some interesting features... */ 1115 dev->features |= 0; 1116 1117 bp = netdev_priv(dev); 1118 bp->pdev = pdev; 1119 bp->dev = dev; 1120 1121 spin_lock_init(&bp->lock); 1122 1123#if defined(CONFIG_ARCH_AT91) 1124 bp->pclk = clk_get(&pdev->dev, "macb_clk"); 1125 if (IS_ERR(bp->pclk)) { 1126 dev_err(&pdev->dev, "failed to get macb_clk\n"); 1127 goto err_out_free_dev; 1128 } 1129 clk_enable(bp->pclk); 1130#else 1131 bp->pclk = clk_get(&pdev->dev, "pclk"); 1132 if (IS_ERR(bp->pclk)) { 1133 dev_err(&pdev->dev, "failed to get pclk\n"); 1134 goto err_out_free_dev; 1135 } 1136 bp->hclk = clk_get(&pdev->dev, "hclk"); 1137 if (IS_ERR(bp->hclk)) { 1138 dev_err(&pdev->dev, "failed to get hclk\n"); 1139 goto err_out_put_pclk; 1140 } 1141 1142 clk_enable(bp->pclk); 1143 clk_enable(bp->hclk); 1144#endif 1145 1146 bp->regs = ioremap(regs->start, regs->end - regs->start + 1); 1147 if (!bp->regs) { 1148 dev_err(&pdev->dev, "failed to map registers, aborting.\n"); 1149 err = -ENOMEM; 1150 goto err_out_disable_clocks; 1151 } 1152 1153 dev->irq = platform_get_irq(pdev, 0); 1154 err = request_irq(dev->irq, macb_interrupt, IRQF_SAMPLE_RANDOM, 1155 dev->name, dev); 1156 if (err) { 1157 printk(KERN_ERR 1158 "%s: Unable to request IRQ %d (error %d)\n", 1159 dev->name, dev->irq, err); 1160 goto err_out_iounmap; 1161 } 1162 1163 dev->open = macb_open; 1164 dev->stop = macb_close; 1165 dev->hard_start_xmit = macb_start_xmit; 1166 dev->get_stats = macb_get_stats; 1167 dev->set_multicast_list = macb_set_rx_mode; 1168 dev->do_ioctl = macb_ioctl; 1169 netif_napi_add(dev, &bp->napi, macb_poll, 64); 1170 dev->ethtool_ops = &macb_ethtool_ops; 1171 1172 dev->base_addr = regs->start; 1173 1174 /* Set MII management clock divider */ 1175 pclk_hz = clk_get_rate(bp->pclk); 1176 if (pclk_hz <= 20000000) 1177 config = MACB_BF(CLK, MACB_CLK_DIV8); 1178 else if (pclk_hz <= 40000000) 1179 config = MACB_BF(CLK, MACB_CLK_DIV16); 1180 else if (pclk_hz <= 80000000) 1181 config = MACB_BF(CLK, MACB_CLK_DIV32); 1182 else 1183 config = MACB_BF(CLK, MACB_CLK_DIV64); 1184 macb_writel(bp, NCFGR, config); 1185 1186 macb_get_hwaddr(bp); 1187 pdata = pdev->dev.platform_data; 1188 1189 if (pdata && pdata->is_rmii) 1190#if defined(CONFIG_ARCH_AT91) 1191 macb_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN)) ); 1192#else 1193 macb_writel(bp, USRIO, 0); 1194#endif 1195 else 1196#if defined(CONFIG_ARCH_AT91) 1197 macb_writel(bp, USRIO, MACB_BIT(CLKEN)); 1198#else 1199 macb_writel(bp, USRIO, MACB_BIT(MII)); 1200#endif 1201 1202 bp->tx_pending = DEF_TX_RING_PENDING; 1203 1204 err = register_netdev(dev); 1205 if (err) { 1206 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 1207 goto err_out_free_irq; 1208 } 1209 1210 if (macb_mii_init(bp) != 0) { 1211 goto err_out_unregister_netdev; 1212 } 1213 1214 platform_set_drvdata(pdev, dev); 1215 1216 printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d " 1217 "(%s)\n", 1218 dev->name, dev->base_addr, dev->irq, 1219 print_mac(mac, dev->dev_addr)); 1220 1221 phydev = bp->phy_dev; 1222 printk(KERN_INFO "%s: attached PHY driver [%s] " 1223 "(mii_bus:phy_addr=%s, irq=%d)\n", 1224 dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq); 1225 1226 return 0; 1227 1228err_out_unregister_netdev: 1229 unregister_netdev(dev); 1230err_out_free_irq: 1231 free_irq(dev->irq, dev); 1232err_out_iounmap: 1233 iounmap(bp->regs); 1234err_out_disable_clocks: 1235#ifndef CONFIG_ARCH_AT91 1236 clk_disable(bp->hclk); 1237 clk_put(bp->hclk); 1238#endif 1239 clk_disable(bp->pclk); 1240#ifndef CONFIG_ARCH_AT91 1241err_out_put_pclk: 1242#endif 1243 clk_put(bp->pclk); 1244err_out_free_dev: 1245 free_netdev(dev); 1246err_out: 1247 platform_set_drvdata(pdev, NULL); 1248 return err; 1249} 1250 1251static int __exit macb_remove(struct platform_device *pdev) 1252{ 1253 struct net_device *dev; 1254 struct macb *bp; 1255 1256 dev = platform_get_drvdata(pdev); 1257 1258 if (dev) { 1259 bp = netdev_priv(dev); 1260 if (bp->phy_dev) 1261 phy_disconnect(bp->phy_dev); 1262 mdiobus_unregister(&bp->mii_bus); 1263 kfree(bp->mii_bus.irq); 1264 unregister_netdev(dev); 1265 free_irq(dev->irq, dev); 1266 iounmap(bp->regs); 1267#ifndef CONFIG_ARCH_AT91 1268 clk_disable(bp->hclk); 1269 clk_put(bp->hclk); 1270#endif 1271 clk_disable(bp->pclk); 1272 clk_put(bp->pclk); 1273 free_netdev(dev); 1274 platform_set_drvdata(pdev, NULL); 1275 } 1276 1277 return 0; 1278} 1279 1280static struct platform_driver macb_driver = { 1281 .remove = __exit_p(macb_remove), 1282 .driver = { 1283 .name = "macb", 1284 .owner = THIS_MODULE, 1285 }, 1286}; 1287 1288static int __init macb_init(void) 1289{ 1290 return platform_driver_probe(&macb_driver, macb_probe); 1291} 1292 1293static void __exit macb_exit(void) 1294{ 1295 platform_driver_unregister(&macb_driver); 1296} 1297 1298module_init(macb_init); 1299module_exit(macb_exit); 1300 1301MODULE_LICENSE("GPL"); 1302MODULE_DESCRIPTION("Atmel MACB Ethernet driver"); 1303MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>"); 1304MODULE_ALIAS("platform:macb");