at v2.6.12-rc2 1708 lines 43 kB view raw
1/* 2 * Network device driver for the BMAC ethernet controller on 3 * Apple Powermacs. Assumes it's under a DBDMA controller. 4 * 5 * Copyright (C) 1998 Randy Gobbel. 6 * 7 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to 8 * dynamic procfs inode. 9 */ 10#include <linux/config.h> 11#include <linux/module.h> 12#include <linux/kernel.h> 13#include <linux/netdevice.h> 14#include <linux/etherdevice.h> 15#include <linux/delay.h> 16#include <linux/string.h> 17#include <linux/timer.h> 18#include <linux/proc_fs.h> 19#include <linux/init.h> 20#include <linux/spinlock.h> 21#include <linux/crc32.h> 22#include <asm/prom.h> 23#include <asm/dbdma.h> 24#include <asm/io.h> 25#include <asm/page.h> 26#include <asm/pgtable.h> 27#include <asm/machdep.h> 28#include <asm/pmac_feature.h> 29#include <asm/macio.h> 30#include <asm/irq.h> 31 32#include "bmac.h" 33 34#define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1)))) 35#define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1))) 36 37/* 38 * CRC polynomial - used in working out multicast filter bits. 39 */ 40#define ENET_CRCPOLY 0x04c11db7 41 42/* switch to use multicast code lifted from sunhme driver */ 43#define SUNHME_MULTICAST 44 45#define N_RX_RING 64 46#define N_TX_RING 32 47#define MAX_TX_ACTIVE 1 48#define ETHERCRC 4 49#define ETHERMINPACKET 64 50#define ETHERMTU 1500 51#define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2) 52#define TX_TIMEOUT HZ /* 1 second */ 53 54/* Bits in transmit DMA status */ 55#define TX_DMA_ERR 0x80 56 57#define XXDEBUG(args) 58 59struct bmac_data { 60 /* volatile struct bmac *bmac; */ 61 struct sk_buff_head *queue; 62 volatile struct dbdma_regs __iomem *tx_dma; 63 int tx_dma_intr; 64 volatile struct dbdma_regs __iomem *rx_dma; 65 int rx_dma_intr; 66 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */ 67 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */ 68 struct macio_dev *mdev; 69 int is_bmac_plus; 70 struct sk_buff *rx_bufs[N_RX_RING]; 71 int rx_fill; 72 int rx_empty; 73 struct sk_buff *tx_bufs[N_TX_RING]; 74 int tx_fill; 75 int tx_empty; 76 unsigned char tx_fullup; 77 struct net_device_stats stats; 78 struct timer_list tx_timeout; 79 int timeout_active; 80 int sleeping; 81 int opened; 82 unsigned short hash_use_count[64]; 83 unsigned short hash_table_mask[4]; 84 spinlock_t lock; 85}; 86 87#if 0 /* Move that to ethtool */ 88 89typedef struct bmac_reg_entry { 90 char *name; 91 unsigned short reg_offset; 92} bmac_reg_entry_t; 93 94#define N_REG_ENTRIES 31 95 96static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = { 97 {"MEMADD", MEMADD}, 98 {"MEMDATAHI", MEMDATAHI}, 99 {"MEMDATALO", MEMDATALO}, 100 {"TXPNTR", TXPNTR}, 101 {"RXPNTR", RXPNTR}, 102 {"IPG1", IPG1}, 103 {"IPG2", IPG2}, 104 {"ALIMIT", ALIMIT}, 105 {"SLOT", SLOT}, 106 {"PALEN", PALEN}, 107 {"PAPAT", PAPAT}, 108 {"TXSFD", TXSFD}, 109 {"JAM", JAM}, 110 {"TXCFG", TXCFG}, 111 {"TXMAX", TXMAX}, 112 {"TXMIN", TXMIN}, 113 {"PAREG", PAREG}, 114 {"DCNT", DCNT}, 115 {"NCCNT", NCCNT}, 116 {"NTCNT", NTCNT}, 117 {"EXCNT", EXCNT}, 118 {"LTCNT", LTCNT}, 119 {"TXSM", TXSM}, 120 {"RXCFG", RXCFG}, 121 {"RXMAX", RXMAX}, 122 {"RXMIN", RXMIN}, 123 {"FRCNT", FRCNT}, 124 {"AECNT", AECNT}, 125 {"FECNT", FECNT}, 126 {"RXSM", RXSM}, 127 {"RXCV", RXCV} 128}; 129 130#endif 131 132static unsigned char *bmac_emergency_rxbuf; 133 134/* 135 * Number of bytes of private data per BMAC: allow enough for 136 * the rx and tx dma commands plus a branch dma command each, 137 * and another 16 bytes to allow us to align the dma command 138 * buffers on a 16 byte boundary. 139 */ 140#define PRIV_BYTES (sizeof(struct bmac_data) \ 141 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \ 142 + sizeof(struct sk_buff_head)) 143 144static unsigned char bitrev(unsigned char b); 145static int bmac_open(struct net_device *dev); 146static int bmac_close(struct net_device *dev); 147static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev); 148static struct net_device_stats *bmac_stats(struct net_device *dev); 149static void bmac_set_multicast(struct net_device *dev); 150static void bmac_reset_and_enable(struct net_device *dev); 151static void bmac_start_chip(struct net_device *dev); 152static void bmac_init_chip(struct net_device *dev); 153static void bmac_init_registers(struct net_device *dev); 154static void bmac_enable_and_reset_chip(struct net_device *dev); 155static int bmac_set_address(struct net_device *dev, void *addr); 156static irqreturn_t bmac_misc_intr(int irq, void *dev_id, struct pt_regs *regs); 157static irqreturn_t bmac_txdma_intr(int irq, void *dev_id, struct pt_regs *regs); 158static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs); 159static void bmac_set_timeout(struct net_device *dev); 160static void bmac_tx_timeout(unsigned long data); 161static int bmac_output(struct sk_buff *skb, struct net_device *dev); 162static void bmac_start(struct net_device *dev); 163 164#define DBDMA_SET(x) ( ((x) | (x) << 16) ) 165#define DBDMA_CLEAR(x) ( (x) << 16) 166 167static inline void 168dbdma_st32(volatile __u32 __iomem *a, unsigned long x) 169{ 170 __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory"); 171 return; 172} 173 174static inline unsigned long 175dbdma_ld32(volatile __u32 __iomem *a) 176{ 177 __u32 swap; 178 __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a)); 179 return swap; 180} 181 182static void 183dbdma_continue(volatile struct dbdma_regs __iomem *dmap) 184{ 185 dbdma_st32(&dmap->control, 186 DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD)); 187 eieio(); 188} 189 190static void 191dbdma_reset(volatile struct dbdma_regs __iomem *dmap) 192{ 193 dbdma_st32(&dmap->control, 194 DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN)); 195 eieio(); 196 while (dbdma_ld32(&dmap->status) & RUN) 197 eieio(); 198} 199 200static void 201dbdma_setcmd(volatile struct dbdma_cmd *cp, 202 unsigned short cmd, unsigned count, unsigned long addr, 203 unsigned long cmd_dep) 204{ 205 out_le16(&cp->command, cmd); 206 out_le16(&cp->req_count, count); 207 out_le32(&cp->phy_addr, addr); 208 out_le32(&cp->cmd_dep, cmd_dep); 209 out_le16(&cp->xfer_status, 0); 210 out_le16(&cp->res_count, 0); 211} 212 213static inline 214void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data ) 215{ 216 out_le16((void __iomem *)dev->base_addr + reg_offset, data); 217} 218 219 220static inline 221volatile unsigned short bmread(struct net_device *dev, unsigned long reg_offset ) 222{ 223 return in_le16((void __iomem *)dev->base_addr + reg_offset); 224} 225 226static void 227bmac_enable_and_reset_chip(struct net_device *dev) 228{ 229 struct bmac_data *bp = netdev_priv(dev); 230 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 231 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 232 233 if (rd) 234 dbdma_reset(rd); 235 if (td) 236 dbdma_reset(td); 237 238 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1); 239} 240 241#define MIFDELAY udelay(10) 242 243static unsigned int 244bmac_mif_readbits(struct net_device *dev, int nb) 245{ 246 unsigned int val = 0; 247 248 while (--nb >= 0) { 249 bmwrite(dev, MIFCSR, 0); 250 MIFDELAY; 251 if (bmread(dev, MIFCSR) & 8) 252 val |= 1 << nb; 253 bmwrite(dev, MIFCSR, 1); 254 MIFDELAY; 255 } 256 bmwrite(dev, MIFCSR, 0); 257 MIFDELAY; 258 bmwrite(dev, MIFCSR, 1); 259 MIFDELAY; 260 return val; 261} 262 263static void 264bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb) 265{ 266 int b; 267 268 while (--nb >= 0) { 269 b = (val & (1 << nb))? 6: 4; 270 bmwrite(dev, MIFCSR, b); 271 MIFDELAY; 272 bmwrite(dev, MIFCSR, b|1); 273 MIFDELAY; 274 } 275} 276 277static unsigned int 278bmac_mif_read(struct net_device *dev, unsigned int addr) 279{ 280 unsigned int val; 281 282 bmwrite(dev, MIFCSR, 4); 283 MIFDELAY; 284 bmac_mif_writebits(dev, ~0U, 32); 285 bmac_mif_writebits(dev, 6, 4); 286 bmac_mif_writebits(dev, addr, 10); 287 bmwrite(dev, MIFCSR, 2); 288 MIFDELAY; 289 bmwrite(dev, MIFCSR, 1); 290 MIFDELAY; 291 val = bmac_mif_readbits(dev, 17); 292 bmwrite(dev, MIFCSR, 4); 293 MIFDELAY; 294 return val; 295} 296 297static void 298bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val) 299{ 300 bmwrite(dev, MIFCSR, 4); 301 MIFDELAY; 302 bmac_mif_writebits(dev, ~0U, 32); 303 bmac_mif_writebits(dev, 5, 4); 304 bmac_mif_writebits(dev, addr, 10); 305 bmac_mif_writebits(dev, 2, 2); 306 bmac_mif_writebits(dev, val, 16); 307 bmac_mif_writebits(dev, 3, 2); 308} 309 310static void 311bmac_init_registers(struct net_device *dev) 312{ 313 struct bmac_data *bp = netdev_priv(dev); 314 volatile unsigned short regValue; 315 unsigned short *pWord16; 316 int i; 317 318 /* XXDEBUG(("bmac: enter init_registers\n")); */ 319 320 bmwrite(dev, RXRST, RxResetValue); 321 bmwrite(dev, TXRST, TxResetBit); 322 323 i = 100; 324 do { 325 --i; 326 udelay(10000); 327 regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */ 328 } while ((regValue & TxResetBit) && i > 0); 329 330 if (!bp->is_bmac_plus) { 331 regValue = bmread(dev, XCVRIF); 332 regValue |= ClkBit | SerialMode | COLActiveLow; 333 bmwrite(dev, XCVRIF, regValue); 334 udelay(10000); 335 } 336 337 bmwrite(dev, RSEED, (unsigned short)0x1968); 338 339 regValue = bmread(dev, XIFC); 340 regValue |= TxOutputEnable; 341 bmwrite(dev, XIFC, regValue); 342 343 bmread(dev, PAREG); 344 345 /* set collision counters to 0 */ 346 bmwrite(dev, NCCNT, 0); 347 bmwrite(dev, NTCNT, 0); 348 bmwrite(dev, EXCNT, 0); 349 bmwrite(dev, LTCNT, 0); 350 351 /* set rx counters to 0 */ 352 bmwrite(dev, FRCNT, 0); 353 bmwrite(dev, LECNT, 0); 354 bmwrite(dev, AECNT, 0); 355 bmwrite(dev, FECNT, 0); 356 bmwrite(dev, RXCV, 0); 357 358 /* set tx fifo information */ 359 bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */ 360 361 bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */ 362 bmwrite(dev, TXFIFOCSR, TxFIFOEnable ); 363 364 /* set rx fifo information */ 365 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */ 366 bmwrite(dev, RXFIFOCSR, RxFIFOEnable ); 367 368 //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */ 369 bmread(dev, STATUS); /* read it just to clear it */ 370 371 /* zero out the chip Hash Filter registers */ 372 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0; 373 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */ 374 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */ 375 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ 376 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ 377 378 pWord16 = (unsigned short *)dev->dev_addr; 379 bmwrite(dev, MADD0, *pWord16++); 380 bmwrite(dev, MADD1, *pWord16++); 381 bmwrite(dev, MADD2, *pWord16); 382 383 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets); 384 385 bmwrite(dev, INTDISABLE, EnableNormal); 386 387 return; 388} 389 390#if 0 391static void 392bmac_disable_interrupts(struct net_device *dev) 393{ 394 bmwrite(dev, INTDISABLE, DisableAll); 395} 396 397static void 398bmac_enable_interrupts(struct net_device *dev) 399{ 400 bmwrite(dev, INTDISABLE, EnableNormal); 401} 402#endif 403 404 405static void 406bmac_start_chip(struct net_device *dev) 407{ 408 struct bmac_data *bp = netdev_priv(dev); 409 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 410 unsigned short oldConfig; 411 412 /* enable rx dma channel */ 413 dbdma_continue(rd); 414 415 oldConfig = bmread(dev, TXCFG); 416 bmwrite(dev, TXCFG, oldConfig | TxMACEnable ); 417 418 /* turn on rx plus any other bits already on (promiscuous possibly) */ 419 oldConfig = bmread(dev, RXCFG); 420 bmwrite(dev, RXCFG, oldConfig | RxMACEnable ); 421 udelay(20000); 422} 423 424static void 425bmac_init_phy(struct net_device *dev) 426{ 427 unsigned int addr; 428 struct bmac_data *bp = netdev_priv(dev); 429 430 printk(KERN_DEBUG "phy registers:"); 431 for (addr = 0; addr < 32; ++addr) { 432 if ((addr & 7) == 0) 433 printk("\n" KERN_DEBUG); 434 printk(" %.4x", bmac_mif_read(dev, addr)); 435 } 436 printk("\n"); 437 if (bp->is_bmac_plus) { 438 unsigned int capable, ctrl; 439 440 ctrl = bmac_mif_read(dev, 0); 441 capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1; 442 if (bmac_mif_read(dev, 4) != capable 443 || (ctrl & 0x1000) == 0) { 444 bmac_mif_write(dev, 4, capable); 445 bmac_mif_write(dev, 0, 0x1200); 446 } else 447 bmac_mif_write(dev, 0, 0x1000); 448 } 449} 450 451static void bmac_init_chip(struct net_device *dev) 452{ 453 bmac_init_phy(dev); 454 bmac_init_registers(dev); 455} 456 457#ifdef CONFIG_PM 458static int bmac_suspend(struct macio_dev *mdev, u32 state) 459{ 460 struct net_device* dev = macio_get_drvdata(mdev); 461 struct bmac_data *bp = netdev_priv(dev); 462 unsigned long flags; 463 unsigned short config; 464 int i; 465 466 netif_device_detach(dev); 467 /* prolly should wait for dma to finish & turn off the chip */ 468 spin_lock_irqsave(&bp->lock, flags); 469 if (bp->timeout_active) { 470 del_timer(&bp->tx_timeout); 471 bp->timeout_active = 0; 472 } 473 disable_irq(dev->irq); 474 disable_irq(bp->tx_dma_intr); 475 disable_irq(bp->rx_dma_intr); 476 bp->sleeping = 1; 477 spin_unlock_irqrestore(&bp->lock, flags); 478 if (bp->opened) { 479 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 480 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 481 482 config = bmread(dev, RXCFG); 483 bmwrite(dev, RXCFG, (config & ~RxMACEnable)); 484 config = bmread(dev, TXCFG); 485 bmwrite(dev, TXCFG, (config & ~TxMACEnable)); 486 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */ 487 /* disable rx and tx dma */ 488 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ 489 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ 490 /* free some skb's */ 491 for (i=0; i<N_RX_RING; i++) { 492 if (bp->rx_bufs[i] != NULL) { 493 dev_kfree_skb(bp->rx_bufs[i]); 494 bp->rx_bufs[i] = NULL; 495 } 496 } 497 for (i = 0; i<N_TX_RING; i++) { 498 if (bp->tx_bufs[i] != NULL) { 499 dev_kfree_skb(bp->tx_bufs[i]); 500 bp->tx_bufs[i] = NULL; 501 } 502 } 503 } 504 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); 505 return 0; 506} 507 508static int bmac_resume(struct macio_dev *mdev) 509{ 510 struct net_device* dev = macio_get_drvdata(mdev); 511 struct bmac_data *bp = netdev_priv(dev); 512 513 /* see if this is enough */ 514 if (bp->opened) 515 bmac_reset_and_enable(dev); 516 517 enable_irq(dev->irq); 518 enable_irq(bp->tx_dma_intr); 519 enable_irq(bp->rx_dma_intr); 520 netif_device_attach(dev); 521 522 return 0; 523} 524#endif /* CONFIG_PM */ 525 526static int bmac_set_address(struct net_device *dev, void *addr) 527{ 528 struct bmac_data *bp = netdev_priv(dev); 529 unsigned char *p = addr; 530 unsigned short *pWord16; 531 unsigned long flags; 532 int i; 533 534 XXDEBUG(("bmac: enter set_address\n")); 535 spin_lock_irqsave(&bp->lock, flags); 536 537 for (i = 0; i < 6; ++i) { 538 dev->dev_addr[i] = p[i]; 539 } 540 /* load up the hardware address */ 541 pWord16 = (unsigned short *)dev->dev_addr; 542 bmwrite(dev, MADD0, *pWord16++); 543 bmwrite(dev, MADD1, *pWord16++); 544 bmwrite(dev, MADD2, *pWord16); 545 546 spin_unlock_irqrestore(&bp->lock, flags); 547 XXDEBUG(("bmac: exit set_address\n")); 548 return 0; 549} 550 551static inline void bmac_set_timeout(struct net_device *dev) 552{ 553 struct bmac_data *bp = netdev_priv(dev); 554 unsigned long flags; 555 556 spin_lock_irqsave(&bp->lock, flags); 557 if (bp->timeout_active) 558 del_timer(&bp->tx_timeout); 559 bp->tx_timeout.expires = jiffies + TX_TIMEOUT; 560 bp->tx_timeout.function = bmac_tx_timeout; 561 bp->tx_timeout.data = (unsigned long) dev; 562 add_timer(&bp->tx_timeout); 563 bp->timeout_active = 1; 564 spin_unlock_irqrestore(&bp->lock, flags); 565} 566 567static void 568bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp) 569{ 570 void *vaddr; 571 unsigned long baddr; 572 unsigned long len; 573 574 len = skb->len; 575 vaddr = skb->data; 576 baddr = virt_to_bus(vaddr); 577 578 dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0); 579} 580 581static void 582bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp) 583{ 584 unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf; 585 586 dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN, 587 virt_to_bus(addr), 0); 588} 589 590/* Bit-reverse one byte of an ethernet hardware address. */ 591static unsigned char 592bitrev(unsigned char b) 593{ 594 int d = 0, i; 595 596 for (i = 0; i < 8; ++i, b >>= 1) 597 d = (d << 1) | (b & 1); 598 return d; 599} 600 601 602static void 603bmac_init_tx_ring(struct bmac_data *bp) 604{ 605 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 606 607 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd)); 608 609 bp->tx_empty = 0; 610 bp->tx_fill = 0; 611 bp->tx_fullup = 0; 612 613 /* put a branch at the end of the tx command list */ 614 dbdma_setcmd(&bp->tx_cmds[N_TX_RING], 615 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds)); 616 617 /* reset tx dma */ 618 dbdma_reset(td); 619 out_le32(&td->wait_sel, 0x00200020); 620 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds)); 621} 622 623static int 624bmac_init_rx_ring(struct bmac_data *bp) 625{ 626 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 627 int i; 628 struct sk_buff *skb; 629 630 /* initialize list of sk_buffs for receiving and set up recv dma */ 631 memset((char *)bp->rx_cmds, 0, 632 (N_RX_RING + 1) * sizeof(struct dbdma_cmd)); 633 for (i = 0; i < N_RX_RING; i++) { 634 if ((skb = bp->rx_bufs[i]) == NULL) { 635 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2); 636 if (skb != NULL) 637 skb_reserve(skb, 2); 638 } 639 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); 640 } 641 642 bp->rx_empty = 0; 643 bp->rx_fill = i; 644 645 /* Put a branch back to the beginning of the receive command list */ 646 dbdma_setcmd(&bp->rx_cmds[N_RX_RING], 647 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds)); 648 649 /* start rx dma */ 650 dbdma_reset(rd); 651 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds)); 652 653 return 1; 654} 655 656 657static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev) 658{ 659 struct bmac_data *bp = netdev_priv(dev); 660 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 661 int i; 662 663 /* see if there's a free slot in the tx ring */ 664 /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */ 665 /* bp->tx_empty, bp->tx_fill)); */ 666 i = bp->tx_fill + 1; 667 if (i >= N_TX_RING) 668 i = 0; 669 if (i == bp->tx_empty) { 670 netif_stop_queue(dev); 671 bp->tx_fullup = 1; 672 XXDEBUG(("bmac_transmit_packet: tx ring full\n")); 673 return -1; /* can't take it at the moment */ 674 } 675 676 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0); 677 678 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]); 679 680 bp->tx_bufs[bp->tx_fill] = skb; 681 bp->tx_fill = i; 682 683 bp->stats.tx_bytes += skb->len; 684 685 dbdma_continue(td); 686 687 return 0; 688} 689 690static int rxintcount; 691 692static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs) 693{ 694 struct net_device *dev = (struct net_device *) dev_id; 695 struct bmac_data *bp = netdev_priv(dev); 696 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 697 volatile struct dbdma_cmd *cp; 698 int i, nb, stat; 699 struct sk_buff *skb; 700 unsigned int residual; 701 int last; 702 unsigned long flags; 703 704 spin_lock_irqsave(&bp->lock, flags); 705 706 if (++rxintcount < 10) { 707 XXDEBUG(("bmac_rxdma_intr\n")); 708 } 709 710 last = -1; 711 i = bp->rx_empty; 712 713 while (1) { 714 cp = &bp->rx_cmds[i]; 715 stat = ld_le16(&cp->xfer_status); 716 residual = ld_le16(&cp->res_count); 717 if ((stat & ACTIVE) == 0) 718 break; 719 nb = RX_BUFLEN - residual - 2; 720 if (nb < (ETHERMINPACKET - ETHERCRC)) { 721 skb = NULL; 722 bp->stats.rx_length_errors++; 723 bp->stats.rx_errors++; 724 } else { 725 skb = bp->rx_bufs[i]; 726 bp->rx_bufs[i] = NULL; 727 } 728 if (skb != NULL) { 729 nb -= ETHERCRC; 730 skb_put(skb, nb); 731 skb->dev = dev; 732 skb->protocol = eth_type_trans(skb, dev); 733 netif_rx(skb); 734 dev->last_rx = jiffies; 735 ++bp->stats.rx_packets; 736 bp->stats.rx_bytes += nb; 737 } else { 738 ++bp->stats.rx_dropped; 739 } 740 dev->last_rx = jiffies; 741 if ((skb = bp->rx_bufs[i]) == NULL) { 742 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2); 743 if (skb != NULL) 744 skb_reserve(bp->rx_bufs[i], 2); 745 } 746 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); 747 st_le16(&cp->res_count, 0); 748 st_le16(&cp->xfer_status, 0); 749 last = i; 750 if (++i >= N_RX_RING) i = 0; 751 } 752 753 if (last != -1) { 754 bp->rx_fill = last; 755 bp->rx_empty = i; 756 } 757 758 dbdma_continue(rd); 759 spin_unlock_irqrestore(&bp->lock, flags); 760 761 if (rxintcount < 10) { 762 XXDEBUG(("bmac_rxdma_intr done\n")); 763 } 764 return IRQ_HANDLED; 765} 766 767static int txintcount; 768 769static irqreturn_t bmac_txdma_intr(int irq, void *dev_id, struct pt_regs *regs) 770{ 771 struct net_device *dev = (struct net_device *) dev_id; 772 struct bmac_data *bp = netdev_priv(dev); 773 volatile struct dbdma_cmd *cp; 774 int stat; 775 unsigned long flags; 776 777 spin_lock_irqsave(&bp->lock, flags); 778 779 if (txintcount++ < 10) { 780 XXDEBUG(("bmac_txdma_intr\n")); 781 } 782 783 /* del_timer(&bp->tx_timeout); */ 784 /* bp->timeout_active = 0; */ 785 786 while (1) { 787 cp = &bp->tx_cmds[bp->tx_empty]; 788 stat = ld_le16(&cp->xfer_status); 789 if (txintcount < 10) { 790 XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat)); 791 } 792 if (!(stat & ACTIVE)) { 793 /* 794 * status field might not have been filled by DBDMA 795 */ 796 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr))) 797 break; 798 } 799 800 if (bp->tx_bufs[bp->tx_empty]) { 801 ++bp->stats.tx_packets; 802 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]); 803 } 804 bp->tx_bufs[bp->tx_empty] = NULL; 805 bp->tx_fullup = 0; 806 netif_wake_queue(dev); 807 if (++bp->tx_empty >= N_TX_RING) 808 bp->tx_empty = 0; 809 if (bp->tx_empty == bp->tx_fill) 810 break; 811 } 812 813 spin_unlock_irqrestore(&bp->lock, flags); 814 815 if (txintcount < 10) { 816 XXDEBUG(("bmac_txdma_intr done->bmac_start\n")); 817 } 818 819 bmac_start(dev); 820 return IRQ_HANDLED; 821} 822 823static struct net_device_stats *bmac_stats(struct net_device *dev) 824{ 825 struct bmac_data *p = netdev_priv(dev); 826 827 return &p->stats; 828} 829 830#ifndef SUNHME_MULTICAST 831/* Real fast bit-reversal algorithm, 6-bit values */ 832static int reverse6[64] = { 833 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38, 834 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c, 835 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a, 836 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e, 837 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39, 838 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d, 839 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b, 840 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f 841}; 842 843static unsigned int 844crc416(unsigned int curval, unsigned short nxtval) 845{ 846 register unsigned int counter, cur = curval, next = nxtval; 847 register int high_crc_set, low_data_set; 848 849 /* Swap bytes */ 850 next = ((next & 0x00FF) << 8) | (next >> 8); 851 852 /* Compute bit-by-bit */ 853 for (counter = 0; counter < 16; ++counter) { 854 /* is high CRC bit set? */ 855 if ((cur & 0x80000000) == 0) high_crc_set = 0; 856 else high_crc_set = 1; 857 858 cur = cur << 1; 859 860 if ((next & 0x0001) == 0) low_data_set = 0; 861 else low_data_set = 1; 862 863 next = next >> 1; 864 865 /* do the XOR */ 866 if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY; 867 } 868 return cur; 869} 870 871static unsigned int 872bmac_crc(unsigned short *address) 873{ 874 unsigned int newcrc; 875 876 XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2])); 877 newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */ 878 newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */ 879 newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */ 880 881 return(newcrc); 882} 883 884/* 885 * Add requested mcast addr to BMac's hash table filter. 886 * 887 */ 888 889static void 890bmac_addhash(struct bmac_data *bp, unsigned char *addr) 891{ 892 unsigned int crc; 893 unsigned short mask; 894 895 if (!(*addr)) return; 896 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */ 897 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ 898 if (bp->hash_use_count[crc]++) return; /* This bit is already set */ 899 mask = crc % 16; 900 mask = (unsigned char)1 << mask; 901 bp->hash_use_count[crc/16] |= mask; 902} 903 904static void 905bmac_removehash(struct bmac_data *bp, unsigned char *addr) 906{ 907 unsigned int crc; 908 unsigned char mask; 909 910 /* Now, delete the address from the filter copy, as indicated */ 911 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */ 912 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ 913 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */ 914 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */ 915 mask = crc % 16; 916 mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */ 917 bp->hash_table_mask[crc/16] &= mask; 918} 919 920/* 921 * Sync the adapter with the software copy of the multicast mask 922 * (logical address filter). 923 */ 924 925static void 926bmac_rx_off(struct net_device *dev) 927{ 928 unsigned short rx_cfg; 929 930 rx_cfg = bmread(dev, RXCFG); 931 rx_cfg &= ~RxMACEnable; 932 bmwrite(dev, RXCFG, rx_cfg); 933 do { 934 rx_cfg = bmread(dev, RXCFG); 935 } while (rx_cfg & RxMACEnable); 936} 937 938unsigned short 939bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable) 940{ 941 unsigned short rx_cfg; 942 943 rx_cfg = bmread(dev, RXCFG); 944 rx_cfg |= RxMACEnable; 945 if (hash_enable) rx_cfg |= RxHashFilterEnable; 946 else rx_cfg &= ~RxHashFilterEnable; 947 if (promisc_enable) rx_cfg |= RxPromiscEnable; 948 else rx_cfg &= ~RxPromiscEnable; 949 bmwrite(dev, RXRST, RxResetValue); 950 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */ 951 bmwrite(dev, RXFIFOCSR, RxFIFOEnable ); 952 bmwrite(dev, RXCFG, rx_cfg ); 953 return rx_cfg; 954} 955 956static void 957bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp) 958{ 959 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */ 960 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */ 961 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ 962 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ 963} 964 965#if 0 966static void 967bmac_add_multi(struct net_device *dev, 968 struct bmac_data *bp, unsigned char *addr) 969{ 970 /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */ 971 bmac_addhash(bp, addr); 972 bmac_rx_off(dev); 973 bmac_update_hash_table_mask(dev, bp); 974 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0); 975 /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */ 976} 977 978static void 979bmac_remove_multi(struct net_device *dev, 980 struct bmac_data *bp, unsigned char *addr) 981{ 982 bmac_removehash(bp, addr); 983 bmac_rx_off(dev); 984 bmac_update_hash_table_mask(dev, bp); 985 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0); 986} 987#endif 988 989/* Set or clear the multicast filter for this adaptor. 990 num_addrs == -1 Promiscuous mode, receive all packets 991 num_addrs == 0 Normal mode, clear multicast list 992 num_addrs > 0 Multicast mode, receive normal and MC packets, and do 993 best-effort filtering. 994 */ 995static void bmac_set_multicast(struct net_device *dev) 996{ 997 struct dev_mc_list *dmi; 998 struct bmac_data *bp = netdev_priv(dev); 999 int num_addrs = dev->mc_count; 1000 unsigned short rx_cfg; 1001 int i; 1002 1003 if (bp->sleeping) 1004 return; 1005 1006 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs)); 1007 1008 if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) { 1009 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff; 1010 bmac_update_hash_table_mask(dev, bp); 1011 rx_cfg = bmac_rx_on(dev, 1, 0); 1012 XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n")); 1013 } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) { 1014 rx_cfg = bmread(dev, RXCFG); 1015 rx_cfg |= RxPromiscEnable; 1016 bmwrite(dev, RXCFG, rx_cfg); 1017 rx_cfg = bmac_rx_on(dev, 0, 1); 1018 XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg)); 1019 } else { 1020 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0; 1021 for (i=0; i<64; i++) bp->hash_use_count[i] = 0; 1022 if (num_addrs == 0) { 1023 rx_cfg = bmac_rx_on(dev, 0, 0); 1024 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg)); 1025 } else { 1026 for (dmi=dev->mc_list; dmi!=NULL; dmi=dmi->next) 1027 bmac_addhash(bp, dmi->dmi_addr); 1028 bmac_update_hash_table_mask(dev, bp); 1029 rx_cfg = bmac_rx_on(dev, 1, 0); 1030 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg)); 1031 } 1032 } 1033 /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */ 1034} 1035#else /* ifdef SUNHME_MULTICAST */ 1036 1037/* The version of set_multicast below was lifted from sunhme.c */ 1038 1039static void bmac_set_multicast(struct net_device *dev) 1040{ 1041 struct dev_mc_list *dmi = dev->mc_list; 1042 char *addrs; 1043 int i; 1044 unsigned short rx_cfg; 1045 u32 crc; 1046 1047 if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) { 1048 bmwrite(dev, BHASH0, 0xffff); 1049 bmwrite(dev, BHASH1, 0xffff); 1050 bmwrite(dev, BHASH2, 0xffff); 1051 bmwrite(dev, BHASH3, 0xffff); 1052 } else if(dev->flags & IFF_PROMISC) { 1053 rx_cfg = bmread(dev, RXCFG); 1054 rx_cfg |= RxPromiscEnable; 1055 bmwrite(dev, RXCFG, rx_cfg); 1056 } else { 1057 u16 hash_table[4]; 1058 1059 rx_cfg = bmread(dev, RXCFG); 1060 rx_cfg &= ~RxPromiscEnable; 1061 bmwrite(dev, RXCFG, rx_cfg); 1062 1063 for(i = 0; i < 4; i++) hash_table[i] = 0; 1064 1065 for(i = 0; i < dev->mc_count; i++) { 1066 addrs = dmi->dmi_addr; 1067 dmi = dmi->next; 1068 1069 if(!(*addrs & 1)) 1070 continue; 1071 1072 crc = ether_crc_le(6, addrs); 1073 crc >>= 26; 1074 hash_table[crc >> 4] |= 1 << (crc & 0xf); 1075 } 1076 bmwrite(dev, BHASH0, hash_table[0]); 1077 bmwrite(dev, BHASH1, hash_table[1]); 1078 bmwrite(dev, BHASH2, hash_table[2]); 1079 bmwrite(dev, BHASH3, hash_table[3]); 1080 } 1081} 1082#endif /* SUNHME_MULTICAST */ 1083 1084static int miscintcount; 1085 1086static irqreturn_t bmac_misc_intr(int irq, void *dev_id, struct pt_regs *regs) 1087{ 1088 struct net_device *dev = (struct net_device *) dev_id; 1089 struct bmac_data *bp = netdev_priv(dev); 1090 unsigned int status = bmread(dev, STATUS); 1091 if (miscintcount++ < 10) { 1092 XXDEBUG(("bmac_misc_intr\n")); 1093 } 1094 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */ 1095 /* bmac_txdma_intr_inner(irq, dev_id, regs); */ 1096 /* if (status & FrameReceived) bp->stats.rx_dropped++; */ 1097 if (status & RxErrorMask) bp->stats.rx_errors++; 1098 if (status & RxCRCCntExp) bp->stats.rx_crc_errors++; 1099 if (status & RxLenCntExp) bp->stats.rx_length_errors++; 1100 if (status & RxOverFlow) bp->stats.rx_over_errors++; 1101 if (status & RxAlignCntExp) bp->stats.rx_frame_errors++; 1102 1103 /* if (status & FrameSent) bp->stats.tx_dropped++; */ 1104 if (status & TxErrorMask) bp->stats.tx_errors++; 1105 if (status & TxUnderrun) bp->stats.tx_fifo_errors++; 1106 if (status & TxNormalCollExp) bp->stats.collisions++; 1107 return IRQ_HANDLED; 1108} 1109 1110/* 1111 * Procedure for reading EEPROM 1112 */ 1113#define SROMAddressLength 5 1114#define DataInOn 0x0008 1115#define DataInOff 0x0000 1116#define Clk 0x0002 1117#define ChipSelect 0x0001 1118#define SDIShiftCount 3 1119#define SD0ShiftCount 2 1120#define DelayValue 1000 /* number of microseconds */ 1121#define SROMStartOffset 10 /* this is in words */ 1122#define SROMReadCount 3 /* number of words to read from SROM */ 1123#define SROMAddressBits 6 1124#define EnetAddressOffset 20 1125 1126static unsigned char 1127bmac_clock_out_bit(struct net_device *dev) 1128{ 1129 unsigned short data; 1130 unsigned short val; 1131 1132 bmwrite(dev, SROMCSR, ChipSelect | Clk); 1133 udelay(DelayValue); 1134 1135 data = bmread(dev, SROMCSR); 1136 udelay(DelayValue); 1137 val = (data >> SD0ShiftCount) & 1; 1138 1139 bmwrite(dev, SROMCSR, ChipSelect); 1140 udelay(DelayValue); 1141 1142 return val; 1143} 1144 1145static void 1146bmac_clock_in_bit(struct net_device *dev, unsigned int val) 1147{ 1148 unsigned short data; 1149 1150 if (val != 0 && val != 1) return; 1151 1152 data = (val << SDIShiftCount); 1153 bmwrite(dev, SROMCSR, data | ChipSelect ); 1154 udelay(DelayValue); 1155 1156 bmwrite(dev, SROMCSR, data | ChipSelect | Clk ); 1157 udelay(DelayValue); 1158 1159 bmwrite(dev, SROMCSR, data | ChipSelect); 1160 udelay(DelayValue); 1161} 1162 1163static void 1164reset_and_select_srom(struct net_device *dev) 1165{ 1166 /* first reset */ 1167 bmwrite(dev, SROMCSR, 0); 1168 udelay(DelayValue); 1169 1170 /* send it the read command (110) */ 1171 bmac_clock_in_bit(dev, 1); 1172 bmac_clock_in_bit(dev, 1); 1173 bmac_clock_in_bit(dev, 0); 1174} 1175 1176static unsigned short 1177read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len) 1178{ 1179 unsigned short data, val; 1180 int i; 1181 1182 /* send out the address we want to read from */ 1183 for (i = 0; i < addr_len; i++) { 1184 val = addr >> (addr_len-i-1); 1185 bmac_clock_in_bit(dev, val & 1); 1186 } 1187 1188 /* Now read in the 16-bit data */ 1189 data = 0; 1190 for (i = 0; i < 16; i++) { 1191 val = bmac_clock_out_bit(dev); 1192 data <<= 1; 1193 data |= val; 1194 } 1195 bmwrite(dev, SROMCSR, 0); 1196 1197 return data; 1198} 1199 1200/* 1201 * It looks like Cogent and SMC use different methods for calculating 1202 * checksums. What a pain.. 1203 */ 1204 1205static int 1206bmac_verify_checksum(struct net_device *dev) 1207{ 1208 unsigned short data, storedCS; 1209 1210 reset_and_select_srom(dev); 1211 data = read_srom(dev, 3, SROMAddressBits); 1212 storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00); 1213 1214 return 0; 1215} 1216 1217 1218static void 1219bmac_get_station_address(struct net_device *dev, unsigned char *ea) 1220{ 1221 int i; 1222 unsigned short data; 1223 1224 for (i = 0; i < 6; i++) 1225 { 1226 reset_and_select_srom(dev); 1227 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits); 1228 ea[2*i] = bitrev(data & 0x0ff); 1229 ea[2*i+1] = bitrev((data >> 8) & 0x0ff); 1230 } 1231} 1232 1233static void bmac_reset_and_enable(struct net_device *dev) 1234{ 1235 struct bmac_data *bp = netdev_priv(dev); 1236 unsigned long flags; 1237 struct sk_buff *skb; 1238 unsigned char *data; 1239 1240 spin_lock_irqsave(&bp->lock, flags); 1241 bmac_enable_and_reset_chip(dev); 1242 bmac_init_tx_ring(bp); 1243 bmac_init_rx_ring(bp); 1244 bmac_init_chip(dev); 1245 bmac_start_chip(dev); 1246 bmwrite(dev, INTDISABLE, EnableNormal); 1247 bp->sleeping = 0; 1248 1249 /* 1250 * It seems that the bmac can't receive until it's transmitted 1251 * a packet. So we give it a dummy packet to transmit. 1252 */ 1253 skb = dev_alloc_skb(ETHERMINPACKET); 1254 if (skb != NULL) { 1255 data = skb_put(skb, ETHERMINPACKET); 1256 memset(data, 0, ETHERMINPACKET); 1257 memcpy(data, dev->dev_addr, 6); 1258 memcpy(data+6, dev->dev_addr, 6); 1259 bmac_transmit_packet(skb, dev); 1260 } 1261 spin_unlock_irqrestore(&bp->lock, flags); 1262} 1263 1264static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_match *match) 1265{ 1266 int j, rev, ret; 1267 struct bmac_data *bp; 1268 unsigned char *addr; 1269 struct net_device *dev; 1270 int is_bmac_plus = ((int)match->data) != 0; 1271 1272 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { 1273 printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n"); 1274 return -ENODEV; 1275 } 1276 addr = get_property(macio_get_of_node(mdev), "mac-address", NULL); 1277 if (addr == NULL) { 1278 addr = get_property(macio_get_of_node(mdev), "local-mac-address", NULL); 1279 if (addr == NULL) { 1280 printk(KERN_ERR "BMAC: Can't get mac-address\n"); 1281 return -ENODEV; 1282 } 1283 } 1284 1285 dev = alloc_etherdev(PRIV_BYTES); 1286 if (!dev) { 1287 printk(KERN_ERR "BMAC: alloc_etherdev failed, out of memory\n"); 1288 return -ENOMEM; 1289 } 1290 1291 bp = netdev_priv(dev); 1292 SET_MODULE_OWNER(dev); 1293 SET_NETDEV_DEV(dev, &mdev->ofdev.dev); 1294 macio_set_drvdata(mdev, dev); 1295 1296 bp->mdev = mdev; 1297 spin_lock_init(&bp->lock); 1298 1299 if (macio_request_resources(mdev, "bmac")) { 1300 printk(KERN_ERR "BMAC: can't request IO resource !\n"); 1301 goto out_free; 1302 } 1303 1304 dev->base_addr = (unsigned long) 1305 ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0)); 1306 if (dev->base_addr == 0) 1307 goto out_release; 1308 1309 dev->irq = macio_irq(mdev, 0); 1310 1311 bmac_enable_and_reset_chip(dev); 1312 bmwrite(dev, INTDISABLE, DisableAll); 1313 1314 rev = addr[0] == 0 && addr[1] == 0xA0; 1315 for (j = 0; j < 6; ++j) 1316 dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j]; 1317 1318 /* Enable chip without interrupts for now */ 1319 bmac_enable_and_reset_chip(dev); 1320 bmwrite(dev, INTDISABLE, DisableAll); 1321 1322 dev->open = bmac_open; 1323 dev->stop = bmac_close; 1324 dev->hard_start_xmit = bmac_output; 1325 dev->get_stats = bmac_stats; 1326 dev->set_multicast_list = bmac_set_multicast; 1327 dev->set_mac_address = bmac_set_address; 1328 1329 bmac_get_station_address(dev, addr); 1330 if (bmac_verify_checksum(dev) != 0) 1331 goto err_out_iounmap; 1332 1333 bp->is_bmac_plus = is_bmac_plus; 1334 bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1)); 1335 if (!bp->tx_dma) 1336 goto err_out_iounmap; 1337 bp->tx_dma_intr = macio_irq(mdev, 1); 1338 bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2)); 1339 if (!bp->rx_dma) 1340 goto err_out_iounmap_tx; 1341 bp->rx_dma_intr = macio_irq(mdev, 2); 1342 1343 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1); 1344 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1; 1345 1346 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1); 1347 skb_queue_head_init(bp->queue); 1348 1349 init_timer(&bp->tx_timeout); 1350 1351 ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev); 1352 if (ret) { 1353 printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq); 1354 goto err_out_iounmap_rx; 1355 } 1356 ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev); 1357 if (ret) { 1358 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr); 1359 goto err_out_irq0; 1360 } 1361 ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev); 1362 if (ret) { 1363 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr); 1364 goto err_out_irq1; 1365 } 1366 1367 /* Mask chip interrupts and disable chip, will be 1368 * re-enabled on open() 1369 */ 1370 disable_irq(dev->irq); 1371 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); 1372 1373 if (register_netdev(dev) != 0) { 1374 printk(KERN_ERR "BMAC: Ethernet registration failed\n"); 1375 goto err_out_irq2; 1376 } 1377 1378 printk(KERN_INFO "%s: BMAC%s at", dev->name, (is_bmac_plus? "+": "")); 1379 for (j = 0; j < 6; ++j) 1380 printk("%c%.2x", (j? ':': ' '), dev->dev_addr[j]); 1381 XXDEBUG((", base_addr=%#0lx", dev->base_addr)); 1382 printk("\n"); 1383 1384 return 0; 1385 1386err_out_irq2: 1387 free_irq(bp->rx_dma_intr, dev); 1388err_out_irq1: 1389 free_irq(bp->tx_dma_intr, dev); 1390err_out_irq0: 1391 free_irq(dev->irq, dev); 1392err_out_iounmap_rx: 1393 iounmap(bp->rx_dma); 1394err_out_iounmap_tx: 1395 iounmap(bp->tx_dma); 1396err_out_iounmap: 1397 iounmap((void __iomem *)dev->base_addr); 1398out_release: 1399 macio_release_resources(mdev); 1400out_free: 1401 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); 1402 free_netdev(dev); 1403 1404 return -ENODEV; 1405} 1406 1407static int bmac_open(struct net_device *dev) 1408{ 1409 struct bmac_data *bp = netdev_priv(dev); 1410 /* XXDEBUG(("bmac: enter open\n")); */ 1411 /* reset the chip */ 1412 bp->opened = 1; 1413 bmac_reset_and_enable(dev); 1414 enable_irq(dev->irq); 1415 dev->flags |= IFF_RUNNING; 1416 return 0; 1417} 1418 1419static int bmac_close(struct net_device *dev) 1420{ 1421 struct bmac_data *bp = netdev_priv(dev); 1422 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 1423 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 1424 unsigned short config; 1425 int i; 1426 1427 bp->sleeping = 1; 1428 dev->flags &= ~(IFF_UP | IFF_RUNNING); 1429 1430 /* disable rx and tx */ 1431 config = bmread(dev, RXCFG); 1432 bmwrite(dev, RXCFG, (config & ~RxMACEnable)); 1433 1434 config = bmread(dev, TXCFG); 1435 bmwrite(dev, TXCFG, (config & ~TxMACEnable)); 1436 1437 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */ 1438 1439 /* disable rx and tx dma */ 1440 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ 1441 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ 1442 1443 /* free some skb's */ 1444 XXDEBUG(("bmac: free rx bufs\n")); 1445 for (i=0; i<N_RX_RING; i++) { 1446 if (bp->rx_bufs[i] != NULL) { 1447 dev_kfree_skb(bp->rx_bufs[i]); 1448 bp->rx_bufs[i] = NULL; 1449 } 1450 } 1451 XXDEBUG(("bmac: free tx bufs\n")); 1452 for (i = 0; i<N_TX_RING; i++) { 1453 if (bp->tx_bufs[i] != NULL) { 1454 dev_kfree_skb(bp->tx_bufs[i]); 1455 bp->tx_bufs[i] = NULL; 1456 } 1457 } 1458 XXDEBUG(("bmac: all bufs freed\n")); 1459 1460 bp->opened = 0; 1461 disable_irq(dev->irq); 1462 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); 1463 1464 return 0; 1465} 1466 1467static void 1468bmac_start(struct net_device *dev) 1469{ 1470 struct bmac_data *bp = netdev_priv(dev); 1471 int i; 1472 struct sk_buff *skb; 1473 unsigned long flags; 1474 1475 if (bp->sleeping) 1476 return; 1477 1478 spin_lock_irqsave(&bp->lock, flags); 1479 while (1) { 1480 i = bp->tx_fill + 1; 1481 if (i >= N_TX_RING) 1482 i = 0; 1483 if (i == bp->tx_empty) 1484 break; 1485 skb = skb_dequeue(bp->queue); 1486 if (skb == NULL) 1487 break; 1488 bmac_transmit_packet(skb, dev); 1489 } 1490 spin_unlock_irqrestore(&bp->lock, flags); 1491} 1492 1493static int 1494bmac_output(struct sk_buff *skb, struct net_device *dev) 1495{ 1496 struct bmac_data *bp = netdev_priv(dev); 1497 skb_queue_tail(bp->queue, skb); 1498 bmac_start(dev); 1499 return 0; 1500} 1501 1502static void bmac_tx_timeout(unsigned long data) 1503{ 1504 struct net_device *dev = (struct net_device *) data; 1505 struct bmac_data *bp = netdev_priv(dev); 1506 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 1507 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 1508 volatile struct dbdma_cmd *cp; 1509 unsigned long flags; 1510 unsigned short config, oldConfig; 1511 int i; 1512 1513 XXDEBUG(("bmac: tx_timeout called\n")); 1514 spin_lock_irqsave(&bp->lock, flags); 1515 bp->timeout_active = 0; 1516 1517 /* update various counters */ 1518/* bmac_handle_misc_intrs(bp, 0); */ 1519 1520 cp = &bp->tx_cmds[bp->tx_empty]; 1521/* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */ 1522/* ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */ 1523/* mb->pr, mb->xmtfs, mb->fifofc)); */ 1524 1525 /* turn off both tx and rx and reset the chip */ 1526 config = bmread(dev, RXCFG); 1527 bmwrite(dev, RXCFG, (config & ~RxMACEnable)); 1528 config = bmread(dev, TXCFG); 1529 bmwrite(dev, TXCFG, (config & ~TxMACEnable)); 1530 out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD)); 1531 printk(KERN_ERR "bmac: transmit timeout - resetting\n"); 1532 bmac_enable_and_reset_chip(dev); 1533 1534 /* restart rx dma */ 1535 cp = bus_to_virt(ld_le32(&rd->cmdptr)); 1536 out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD)); 1537 out_le16(&cp->xfer_status, 0); 1538 out_le32(&rd->cmdptr, virt_to_bus(cp)); 1539 out_le32(&rd->control, DBDMA_SET(RUN|WAKE)); 1540 1541 /* fix up the transmit side */ 1542 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n", 1543 bp->tx_empty, bp->tx_fill, bp->tx_fullup)); 1544 i = bp->tx_empty; 1545 ++bp->stats.tx_errors; 1546 if (i != bp->tx_fill) { 1547 dev_kfree_skb(bp->tx_bufs[i]); 1548 bp->tx_bufs[i] = NULL; 1549 if (++i >= N_TX_RING) i = 0; 1550 bp->tx_empty = i; 1551 } 1552 bp->tx_fullup = 0; 1553 netif_wake_queue(dev); 1554 if (i != bp->tx_fill) { 1555 cp = &bp->tx_cmds[i]; 1556 out_le16(&cp->xfer_status, 0); 1557 out_le16(&cp->command, OUTPUT_LAST); 1558 out_le32(&td->cmdptr, virt_to_bus(cp)); 1559 out_le32(&td->control, DBDMA_SET(RUN)); 1560 /* bmac_set_timeout(dev); */ 1561 XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i)); 1562 } 1563 1564 /* turn it back on */ 1565 oldConfig = bmread(dev, RXCFG); 1566 bmwrite(dev, RXCFG, oldConfig | RxMACEnable ); 1567 oldConfig = bmread(dev, TXCFG); 1568 bmwrite(dev, TXCFG, oldConfig | TxMACEnable ); 1569 1570 spin_unlock_irqrestore(&bp->lock, flags); 1571} 1572 1573#if 0 1574static void dump_dbdma(volatile struct dbdma_cmd *cp,int count) 1575{ 1576 int i,*ip; 1577 1578 for (i=0;i< count;i++) { 1579 ip = (int*)(cp+i); 1580 1581 printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n", 1582 ld_le32(ip+0), 1583 ld_le32(ip+1), 1584 ld_le32(ip+2), 1585 ld_le32(ip+3)); 1586 } 1587 1588} 1589#endif 1590 1591#if 0 1592static int 1593bmac_proc_info(char *buffer, char **start, off_t offset, int length) 1594{ 1595 int len = 0; 1596 off_t pos = 0; 1597 off_t begin = 0; 1598 int i; 1599 1600 if (bmac_devs == NULL) 1601 return (-ENOSYS); 1602 1603 len += sprintf(buffer, "BMAC counters & registers\n"); 1604 1605 for (i = 0; i<N_REG_ENTRIES; i++) { 1606 len += sprintf(buffer + len, "%s: %#08x\n", 1607 reg_entries[i].name, 1608 bmread(bmac_devs, reg_entries[i].reg_offset)); 1609 pos = begin + len; 1610 1611 if (pos < offset) { 1612 len = 0; 1613 begin = pos; 1614 } 1615 1616 if (pos > offset+length) break; 1617 } 1618 1619 *start = buffer + (offset - begin); 1620 len -= (offset - begin); 1621 1622 if (len > length) len = length; 1623 1624 return len; 1625} 1626#endif 1627 1628static int __devexit bmac_remove(struct macio_dev *mdev) 1629{ 1630 struct net_device *dev = macio_get_drvdata(mdev); 1631 struct bmac_data *bp = netdev_priv(dev); 1632 1633 unregister_netdev(dev); 1634 1635 free_irq(dev->irq, dev); 1636 free_irq(bp->tx_dma_intr, dev); 1637 free_irq(bp->rx_dma_intr, dev); 1638 1639 iounmap((void __iomem *)dev->base_addr); 1640 iounmap(bp->tx_dma); 1641 iounmap(bp->rx_dma); 1642 1643 macio_release_resources(mdev); 1644 1645 free_netdev(dev); 1646 1647 return 0; 1648} 1649 1650static struct of_match bmac_match[] = 1651{ 1652 { 1653 .name = "bmac", 1654 .type = OF_ANY_MATCH, 1655 .compatible = OF_ANY_MATCH, 1656 .data = (void *)0, 1657 }, 1658 { 1659 .name = OF_ANY_MATCH, 1660 .type = "network", 1661 .compatible = "bmac+", 1662 .data = (void *)1, 1663 }, 1664 {}, 1665}; 1666 1667static struct macio_driver bmac_driver = 1668{ 1669 .name = "bmac", 1670 .match_table = bmac_match, 1671 .probe = bmac_probe, 1672 .remove = bmac_remove, 1673#ifdef CONFIG_PM 1674 .suspend = bmac_suspend, 1675 .resume = bmac_resume, 1676#endif 1677}; 1678 1679 1680static int __init bmac_init(void) 1681{ 1682 if (bmac_emergency_rxbuf == NULL) { 1683 bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL); 1684 if (bmac_emergency_rxbuf == NULL) { 1685 printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n"); 1686 return -ENOMEM; 1687 } 1688 } 1689 1690 return macio_register_driver(&bmac_driver); 1691} 1692 1693static void __exit bmac_exit(void) 1694{ 1695 macio_unregister_driver(&bmac_driver); 1696 1697 if (bmac_emergency_rxbuf != NULL) { 1698 kfree(bmac_emergency_rxbuf); 1699 bmac_emergency_rxbuf = NULL; 1700 } 1701} 1702 1703MODULE_AUTHOR("Randy Gobbel/Paul Mackerras"); 1704MODULE_DESCRIPTION("PowerMac BMAC ethernet driver."); 1705MODULE_LICENSE("GPL"); 1706 1707module_init(bmac_init); 1708module_exit(bmac_exit);