at v2.6.21 1692 lines 42 kB view raw
1/* 2 * Network device driver for the BMAC ethernet controller on 3 * Apple Powermacs. Assumes it's under a DBDMA controller. 4 * 5 * Copyright (C) 1998 Randy Gobbel. 6 * 7 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to 8 * dynamic procfs inode. 9 */ 10#include <linux/module.h> 11#include <linux/kernel.h> 12#include <linux/netdevice.h> 13#include <linux/etherdevice.h> 14#include <linux/delay.h> 15#include <linux/string.h> 16#include <linux/timer.h> 17#include <linux/proc_fs.h> 18#include <linux/init.h> 19#include <linux/spinlock.h> 20#include <linux/crc32.h> 21#include <linux/bitrev.h> 22#include <asm/prom.h> 23#include <asm/dbdma.h> 24#include <asm/io.h> 25#include <asm/page.h> 26#include <asm/pgtable.h> 27#include <asm/machdep.h> 28#include <asm/pmac_feature.h> 29#include <asm/macio.h> 30#include <asm/irq.h> 31 32#include "bmac.h" 33 34#define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1)))) 35#define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1))) 36 37/* 38 * CRC polynomial - used in working out multicast filter bits. 39 */ 40#define ENET_CRCPOLY 0x04c11db7 41 42/* switch to use multicast code lifted from sunhme driver */ 43#define SUNHME_MULTICAST 44 45#define N_RX_RING 64 46#define N_TX_RING 32 47#define MAX_TX_ACTIVE 1 48#define ETHERCRC 4 49#define ETHERMINPACKET 64 50#define ETHERMTU 1500 51#define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2) 52#define TX_TIMEOUT HZ /* 1 second */ 53 54/* Bits in transmit DMA status */ 55#define TX_DMA_ERR 0x80 56 57#define XXDEBUG(args) 58 59struct bmac_data { 60 /* volatile struct bmac *bmac; */ 61 struct sk_buff_head *queue; 62 volatile struct dbdma_regs __iomem *tx_dma; 63 int tx_dma_intr; 64 volatile struct dbdma_regs __iomem *rx_dma; 65 int rx_dma_intr; 66 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */ 67 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */ 68 struct macio_dev *mdev; 69 int is_bmac_plus; 70 struct sk_buff *rx_bufs[N_RX_RING]; 71 int rx_fill; 72 int rx_empty; 73 struct sk_buff *tx_bufs[N_TX_RING]; 74 int tx_fill; 75 int tx_empty; 76 unsigned char tx_fullup; 77 struct net_device_stats stats; 78 struct timer_list tx_timeout; 79 int timeout_active; 80 int sleeping; 81 int opened; 82 unsigned short hash_use_count[64]; 83 unsigned short hash_table_mask[4]; 84 spinlock_t lock; 85}; 86 87#if 0 /* Move that to ethtool */ 88 89typedef struct bmac_reg_entry { 90 char *name; 91 unsigned short reg_offset; 92} bmac_reg_entry_t; 93 94#define N_REG_ENTRIES 31 95 96static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = { 97 {"MEMADD", MEMADD}, 98 {"MEMDATAHI", MEMDATAHI}, 99 {"MEMDATALO", MEMDATALO}, 100 {"TXPNTR", TXPNTR}, 101 {"RXPNTR", RXPNTR}, 102 {"IPG1", IPG1}, 103 {"IPG2", IPG2}, 104 {"ALIMIT", ALIMIT}, 105 {"SLOT", SLOT}, 106 {"PALEN", PALEN}, 107 {"PAPAT", PAPAT}, 108 {"TXSFD", TXSFD}, 109 {"JAM", JAM}, 110 {"TXCFG", TXCFG}, 111 {"TXMAX", TXMAX}, 112 {"TXMIN", TXMIN}, 113 {"PAREG", PAREG}, 114 {"DCNT", DCNT}, 115 {"NCCNT", NCCNT}, 116 {"NTCNT", NTCNT}, 117 {"EXCNT", EXCNT}, 118 {"LTCNT", LTCNT}, 119 {"TXSM", TXSM}, 120 {"RXCFG", RXCFG}, 121 {"RXMAX", RXMAX}, 122 {"RXMIN", RXMIN}, 123 {"FRCNT", FRCNT}, 124 {"AECNT", AECNT}, 125 {"FECNT", FECNT}, 126 {"RXSM", RXSM}, 127 {"RXCV", RXCV} 128}; 129 130#endif 131 132static unsigned char *bmac_emergency_rxbuf; 133 134/* 135 * Number of bytes of private data per BMAC: allow enough for 136 * the rx and tx dma commands plus a branch dma command each, 137 * and another 16 bytes to allow us to align the dma command 138 * buffers on a 16 byte boundary. 139 */ 140#define PRIV_BYTES (sizeof(struct bmac_data) \ 141 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \ 142 + sizeof(struct sk_buff_head)) 143 144static int bmac_open(struct net_device *dev); 145static int bmac_close(struct net_device *dev); 146static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev); 147static struct net_device_stats *bmac_stats(struct net_device *dev); 148static void bmac_set_multicast(struct net_device *dev); 149static void bmac_reset_and_enable(struct net_device *dev); 150static void bmac_start_chip(struct net_device *dev); 151static void bmac_init_chip(struct net_device *dev); 152static void bmac_init_registers(struct net_device *dev); 153static void bmac_enable_and_reset_chip(struct net_device *dev); 154static int bmac_set_address(struct net_device *dev, void *addr); 155static irqreturn_t bmac_misc_intr(int irq, void *dev_id); 156static irqreturn_t bmac_txdma_intr(int irq, void *dev_id); 157static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id); 158static void bmac_set_timeout(struct net_device *dev); 159static void bmac_tx_timeout(unsigned long data); 160static int bmac_output(struct sk_buff *skb, struct net_device *dev); 161static void bmac_start(struct net_device *dev); 162 163#define DBDMA_SET(x) ( ((x) | (x) << 16) ) 164#define DBDMA_CLEAR(x) ( (x) << 16) 165 166static inline void 167dbdma_st32(volatile __u32 __iomem *a, unsigned long x) 168{ 169 __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory"); 170 return; 171} 172 173static inline unsigned long 174dbdma_ld32(volatile __u32 __iomem *a) 175{ 176 __u32 swap; 177 __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a)); 178 return swap; 179} 180 181static void 182dbdma_continue(volatile struct dbdma_regs __iomem *dmap) 183{ 184 dbdma_st32(&dmap->control, 185 DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD)); 186 eieio(); 187} 188 189static void 190dbdma_reset(volatile struct dbdma_regs __iomem *dmap) 191{ 192 dbdma_st32(&dmap->control, 193 DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN)); 194 eieio(); 195 while (dbdma_ld32(&dmap->status) & RUN) 196 eieio(); 197} 198 199static void 200dbdma_setcmd(volatile struct dbdma_cmd *cp, 201 unsigned short cmd, unsigned count, unsigned long addr, 202 unsigned long cmd_dep) 203{ 204 out_le16(&cp->command, cmd); 205 out_le16(&cp->req_count, count); 206 out_le32(&cp->phy_addr, addr); 207 out_le32(&cp->cmd_dep, cmd_dep); 208 out_le16(&cp->xfer_status, 0); 209 out_le16(&cp->res_count, 0); 210} 211 212static inline 213void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data ) 214{ 215 out_le16((void __iomem *)dev->base_addr + reg_offset, data); 216} 217 218 219static inline 220unsigned short bmread(struct net_device *dev, unsigned long reg_offset ) 221{ 222 return in_le16((void __iomem *)dev->base_addr + reg_offset); 223} 224 225static void 226bmac_enable_and_reset_chip(struct net_device *dev) 227{ 228 struct bmac_data *bp = netdev_priv(dev); 229 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 230 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 231 232 if (rd) 233 dbdma_reset(rd); 234 if (td) 235 dbdma_reset(td); 236 237 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1); 238} 239 240#define MIFDELAY udelay(10) 241 242static unsigned int 243bmac_mif_readbits(struct net_device *dev, int nb) 244{ 245 unsigned int val = 0; 246 247 while (--nb >= 0) { 248 bmwrite(dev, MIFCSR, 0); 249 MIFDELAY; 250 if (bmread(dev, MIFCSR) & 8) 251 val |= 1 << nb; 252 bmwrite(dev, MIFCSR, 1); 253 MIFDELAY; 254 } 255 bmwrite(dev, MIFCSR, 0); 256 MIFDELAY; 257 bmwrite(dev, MIFCSR, 1); 258 MIFDELAY; 259 return val; 260} 261 262static void 263bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb) 264{ 265 int b; 266 267 while (--nb >= 0) { 268 b = (val & (1 << nb))? 6: 4; 269 bmwrite(dev, MIFCSR, b); 270 MIFDELAY; 271 bmwrite(dev, MIFCSR, b|1); 272 MIFDELAY; 273 } 274} 275 276static unsigned int 277bmac_mif_read(struct net_device *dev, unsigned int addr) 278{ 279 unsigned int val; 280 281 bmwrite(dev, MIFCSR, 4); 282 MIFDELAY; 283 bmac_mif_writebits(dev, ~0U, 32); 284 bmac_mif_writebits(dev, 6, 4); 285 bmac_mif_writebits(dev, addr, 10); 286 bmwrite(dev, MIFCSR, 2); 287 MIFDELAY; 288 bmwrite(dev, MIFCSR, 1); 289 MIFDELAY; 290 val = bmac_mif_readbits(dev, 17); 291 bmwrite(dev, MIFCSR, 4); 292 MIFDELAY; 293 return val; 294} 295 296static void 297bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val) 298{ 299 bmwrite(dev, MIFCSR, 4); 300 MIFDELAY; 301 bmac_mif_writebits(dev, ~0U, 32); 302 bmac_mif_writebits(dev, 5, 4); 303 bmac_mif_writebits(dev, addr, 10); 304 bmac_mif_writebits(dev, 2, 2); 305 bmac_mif_writebits(dev, val, 16); 306 bmac_mif_writebits(dev, 3, 2); 307} 308 309static void 310bmac_init_registers(struct net_device *dev) 311{ 312 struct bmac_data *bp = netdev_priv(dev); 313 volatile unsigned short regValue; 314 unsigned short *pWord16; 315 int i; 316 317 /* XXDEBUG(("bmac: enter init_registers\n")); */ 318 319 bmwrite(dev, RXRST, RxResetValue); 320 bmwrite(dev, TXRST, TxResetBit); 321 322 i = 100; 323 do { 324 --i; 325 udelay(10000); 326 regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */ 327 } while ((regValue & TxResetBit) && i > 0); 328 329 if (!bp->is_bmac_plus) { 330 regValue = bmread(dev, XCVRIF); 331 regValue |= ClkBit | SerialMode | COLActiveLow; 332 bmwrite(dev, XCVRIF, regValue); 333 udelay(10000); 334 } 335 336 bmwrite(dev, RSEED, (unsigned short)0x1968); 337 338 regValue = bmread(dev, XIFC); 339 regValue |= TxOutputEnable; 340 bmwrite(dev, XIFC, regValue); 341 342 bmread(dev, PAREG); 343 344 /* set collision counters to 0 */ 345 bmwrite(dev, NCCNT, 0); 346 bmwrite(dev, NTCNT, 0); 347 bmwrite(dev, EXCNT, 0); 348 bmwrite(dev, LTCNT, 0); 349 350 /* set rx counters to 0 */ 351 bmwrite(dev, FRCNT, 0); 352 bmwrite(dev, LECNT, 0); 353 bmwrite(dev, AECNT, 0); 354 bmwrite(dev, FECNT, 0); 355 bmwrite(dev, RXCV, 0); 356 357 /* set tx fifo information */ 358 bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */ 359 360 bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */ 361 bmwrite(dev, TXFIFOCSR, TxFIFOEnable ); 362 363 /* set rx fifo information */ 364 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */ 365 bmwrite(dev, RXFIFOCSR, RxFIFOEnable ); 366 367 //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */ 368 bmread(dev, STATUS); /* read it just to clear it */ 369 370 /* zero out the chip Hash Filter registers */ 371 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0; 372 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */ 373 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */ 374 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ 375 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ 376 377 pWord16 = (unsigned short *)dev->dev_addr; 378 bmwrite(dev, MADD0, *pWord16++); 379 bmwrite(dev, MADD1, *pWord16++); 380 bmwrite(dev, MADD2, *pWord16); 381 382 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets); 383 384 bmwrite(dev, INTDISABLE, EnableNormal); 385 386 return; 387} 388 389#if 0 390static void 391bmac_disable_interrupts(struct net_device *dev) 392{ 393 bmwrite(dev, INTDISABLE, DisableAll); 394} 395 396static void 397bmac_enable_interrupts(struct net_device *dev) 398{ 399 bmwrite(dev, INTDISABLE, EnableNormal); 400} 401#endif 402 403 404static void 405bmac_start_chip(struct net_device *dev) 406{ 407 struct bmac_data *bp = netdev_priv(dev); 408 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 409 unsigned short oldConfig; 410 411 /* enable rx dma channel */ 412 dbdma_continue(rd); 413 414 oldConfig = bmread(dev, TXCFG); 415 bmwrite(dev, TXCFG, oldConfig | TxMACEnable ); 416 417 /* turn on rx plus any other bits already on (promiscuous possibly) */ 418 oldConfig = bmread(dev, RXCFG); 419 bmwrite(dev, RXCFG, oldConfig | RxMACEnable ); 420 udelay(20000); 421} 422 423static void 424bmac_init_phy(struct net_device *dev) 425{ 426 unsigned int addr; 427 struct bmac_data *bp = netdev_priv(dev); 428 429 printk(KERN_DEBUG "phy registers:"); 430 for (addr = 0; addr < 32; ++addr) { 431 if ((addr & 7) == 0) 432 printk("\n" KERN_DEBUG); 433 printk(" %.4x", bmac_mif_read(dev, addr)); 434 } 435 printk("\n"); 436 if (bp->is_bmac_plus) { 437 unsigned int capable, ctrl; 438 439 ctrl = bmac_mif_read(dev, 0); 440 capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1; 441 if (bmac_mif_read(dev, 4) != capable 442 || (ctrl & 0x1000) == 0) { 443 bmac_mif_write(dev, 4, capable); 444 bmac_mif_write(dev, 0, 0x1200); 445 } else 446 bmac_mif_write(dev, 0, 0x1000); 447 } 448} 449 450static void bmac_init_chip(struct net_device *dev) 451{ 452 bmac_init_phy(dev); 453 bmac_init_registers(dev); 454} 455 456#ifdef CONFIG_PM 457static int bmac_suspend(struct macio_dev *mdev, pm_message_t state) 458{ 459 struct net_device* dev = macio_get_drvdata(mdev); 460 struct bmac_data *bp = netdev_priv(dev); 461 unsigned long flags; 462 unsigned short config; 463 int i; 464 465 netif_device_detach(dev); 466 /* prolly should wait for dma to finish & turn off the chip */ 467 spin_lock_irqsave(&bp->lock, flags); 468 if (bp->timeout_active) { 469 del_timer(&bp->tx_timeout); 470 bp->timeout_active = 0; 471 } 472 disable_irq(dev->irq); 473 disable_irq(bp->tx_dma_intr); 474 disable_irq(bp->rx_dma_intr); 475 bp->sleeping = 1; 476 spin_unlock_irqrestore(&bp->lock, flags); 477 if (bp->opened) { 478 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 479 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 480 481 config = bmread(dev, RXCFG); 482 bmwrite(dev, RXCFG, (config & ~RxMACEnable)); 483 config = bmread(dev, TXCFG); 484 bmwrite(dev, TXCFG, (config & ~TxMACEnable)); 485 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */ 486 /* disable rx and tx dma */ 487 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ 488 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ 489 /* free some skb's */ 490 for (i=0; i<N_RX_RING; i++) { 491 if (bp->rx_bufs[i] != NULL) { 492 dev_kfree_skb(bp->rx_bufs[i]); 493 bp->rx_bufs[i] = NULL; 494 } 495 } 496 for (i = 0; i<N_TX_RING; i++) { 497 if (bp->tx_bufs[i] != NULL) { 498 dev_kfree_skb(bp->tx_bufs[i]); 499 bp->tx_bufs[i] = NULL; 500 } 501 } 502 } 503 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); 504 return 0; 505} 506 507static int bmac_resume(struct macio_dev *mdev) 508{ 509 struct net_device* dev = macio_get_drvdata(mdev); 510 struct bmac_data *bp = netdev_priv(dev); 511 512 /* see if this is enough */ 513 if (bp->opened) 514 bmac_reset_and_enable(dev); 515 516 enable_irq(dev->irq); 517 enable_irq(bp->tx_dma_intr); 518 enable_irq(bp->rx_dma_intr); 519 netif_device_attach(dev); 520 521 return 0; 522} 523#endif /* CONFIG_PM */ 524 525static int bmac_set_address(struct net_device *dev, void *addr) 526{ 527 struct bmac_data *bp = netdev_priv(dev); 528 unsigned char *p = addr; 529 unsigned short *pWord16; 530 unsigned long flags; 531 int i; 532 533 XXDEBUG(("bmac: enter set_address\n")); 534 spin_lock_irqsave(&bp->lock, flags); 535 536 for (i = 0; i < 6; ++i) { 537 dev->dev_addr[i] = p[i]; 538 } 539 /* load up the hardware address */ 540 pWord16 = (unsigned short *)dev->dev_addr; 541 bmwrite(dev, MADD0, *pWord16++); 542 bmwrite(dev, MADD1, *pWord16++); 543 bmwrite(dev, MADD2, *pWord16); 544 545 spin_unlock_irqrestore(&bp->lock, flags); 546 XXDEBUG(("bmac: exit set_address\n")); 547 return 0; 548} 549 550static inline void bmac_set_timeout(struct net_device *dev) 551{ 552 struct bmac_data *bp = netdev_priv(dev); 553 unsigned long flags; 554 555 spin_lock_irqsave(&bp->lock, flags); 556 if (bp->timeout_active) 557 del_timer(&bp->tx_timeout); 558 bp->tx_timeout.expires = jiffies + TX_TIMEOUT; 559 bp->tx_timeout.function = bmac_tx_timeout; 560 bp->tx_timeout.data = (unsigned long) dev; 561 add_timer(&bp->tx_timeout); 562 bp->timeout_active = 1; 563 spin_unlock_irqrestore(&bp->lock, flags); 564} 565 566static void 567bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp) 568{ 569 void *vaddr; 570 unsigned long baddr; 571 unsigned long len; 572 573 len = skb->len; 574 vaddr = skb->data; 575 baddr = virt_to_bus(vaddr); 576 577 dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0); 578} 579 580static void 581bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp) 582{ 583 unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf; 584 585 dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN, 586 virt_to_bus(addr), 0); 587} 588 589static void 590bmac_init_tx_ring(struct bmac_data *bp) 591{ 592 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 593 594 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd)); 595 596 bp->tx_empty = 0; 597 bp->tx_fill = 0; 598 bp->tx_fullup = 0; 599 600 /* put a branch at the end of the tx command list */ 601 dbdma_setcmd(&bp->tx_cmds[N_TX_RING], 602 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds)); 603 604 /* reset tx dma */ 605 dbdma_reset(td); 606 out_le32(&td->wait_sel, 0x00200020); 607 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds)); 608} 609 610static int 611bmac_init_rx_ring(struct bmac_data *bp) 612{ 613 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 614 int i; 615 struct sk_buff *skb; 616 617 /* initialize list of sk_buffs for receiving and set up recv dma */ 618 memset((char *)bp->rx_cmds, 0, 619 (N_RX_RING + 1) * sizeof(struct dbdma_cmd)); 620 for (i = 0; i < N_RX_RING; i++) { 621 if ((skb = bp->rx_bufs[i]) == NULL) { 622 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2); 623 if (skb != NULL) 624 skb_reserve(skb, 2); 625 } 626 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); 627 } 628 629 bp->rx_empty = 0; 630 bp->rx_fill = i; 631 632 /* Put a branch back to the beginning of the receive command list */ 633 dbdma_setcmd(&bp->rx_cmds[N_RX_RING], 634 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds)); 635 636 /* start rx dma */ 637 dbdma_reset(rd); 638 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds)); 639 640 return 1; 641} 642 643 644static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev) 645{ 646 struct bmac_data *bp = netdev_priv(dev); 647 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 648 int i; 649 650 /* see if there's a free slot in the tx ring */ 651 /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */ 652 /* bp->tx_empty, bp->tx_fill)); */ 653 i = bp->tx_fill + 1; 654 if (i >= N_TX_RING) 655 i = 0; 656 if (i == bp->tx_empty) { 657 netif_stop_queue(dev); 658 bp->tx_fullup = 1; 659 XXDEBUG(("bmac_transmit_packet: tx ring full\n")); 660 return -1; /* can't take it at the moment */ 661 } 662 663 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0); 664 665 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]); 666 667 bp->tx_bufs[bp->tx_fill] = skb; 668 bp->tx_fill = i; 669 670 bp->stats.tx_bytes += skb->len; 671 672 dbdma_continue(td); 673 674 return 0; 675} 676 677static int rxintcount; 678 679static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id) 680{ 681 struct net_device *dev = (struct net_device *) dev_id; 682 struct bmac_data *bp = netdev_priv(dev); 683 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 684 volatile struct dbdma_cmd *cp; 685 int i, nb, stat; 686 struct sk_buff *skb; 687 unsigned int residual; 688 int last; 689 unsigned long flags; 690 691 spin_lock_irqsave(&bp->lock, flags); 692 693 if (++rxintcount < 10) { 694 XXDEBUG(("bmac_rxdma_intr\n")); 695 } 696 697 last = -1; 698 i = bp->rx_empty; 699 700 while (1) { 701 cp = &bp->rx_cmds[i]; 702 stat = ld_le16(&cp->xfer_status); 703 residual = ld_le16(&cp->res_count); 704 if ((stat & ACTIVE) == 0) 705 break; 706 nb = RX_BUFLEN - residual - 2; 707 if (nb < (ETHERMINPACKET - ETHERCRC)) { 708 skb = NULL; 709 bp->stats.rx_length_errors++; 710 bp->stats.rx_errors++; 711 } else { 712 skb = bp->rx_bufs[i]; 713 bp->rx_bufs[i] = NULL; 714 } 715 if (skb != NULL) { 716 nb -= ETHERCRC; 717 skb_put(skb, nb); 718 skb->dev = dev; 719 skb->protocol = eth_type_trans(skb, dev); 720 netif_rx(skb); 721 dev->last_rx = jiffies; 722 ++bp->stats.rx_packets; 723 bp->stats.rx_bytes += nb; 724 } else { 725 ++bp->stats.rx_dropped; 726 } 727 dev->last_rx = jiffies; 728 if ((skb = bp->rx_bufs[i]) == NULL) { 729 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2); 730 if (skb != NULL) 731 skb_reserve(bp->rx_bufs[i], 2); 732 } 733 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); 734 st_le16(&cp->res_count, 0); 735 st_le16(&cp->xfer_status, 0); 736 last = i; 737 if (++i >= N_RX_RING) i = 0; 738 } 739 740 if (last != -1) { 741 bp->rx_fill = last; 742 bp->rx_empty = i; 743 } 744 745 dbdma_continue(rd); 746 spin_unlock_irqrestore(&bp->lock, flags); 747 748 if (rxintcount < 10) { 749 XXDEBUG(("bmac_rxdma_intr done\n")); 750 } 751 return IRQ_HANDLED; 752} 753 754static int txintcount; 755 756static irqreturn_t bmac_txdma_intr(int irq, void *dev_id) 757{ 758 struct net_device *dev = (struct net_device *) dev_id; 759 struct bmac_data *bp = netdev_priv(dev); 760 volatile struct dbdma_cmd *cp; 761 int stat; 762 unsigned long flags; 763 764 spin_lock_irqsave(&bp->lock, flags); 765 766 if (txintcount++ < 10) { 767 XXDEBUG(("bmac_txdma_intr\n")); 768 } 769 770 /* del_timer(&bp->tx_timeout); */ 771 /* bp->timeout_active = 0; */ 772 773 while (1) { 774 cp = &bp->tx_cmds[bp->tx_empty]; 775 stat = ld_le16(&cp->xfer_status); 776 if (txintcount < 10) { 777 XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat)); 778 } 779 if (!(stat & ACTIVE)) { 780 /* 781 * status field might not have been filled by DBDMA 782 */ 783 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr))) 784 break; 785 } 786 787 if (bp->tx_bufs[bp->tx_empty]) { 788 ++bp->stats.tx_packets; 789 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]); 790 } 791 bp->tx_bufs[bp->tx_empty] = NULL; 792 bp->tx_fullup = 0; 793 netif_wake_queue(dev); 794 if (++bp->tx_empty >= N_TX_RING) 795 bp->tx_empty = 0; 796 if (bp->tx_empty == bp->tx_fill) 797 break; 798 } 799 800 spin_unlock_irqrestore(&bp->lock, flags); 801 802 if (txintcount < 10) { 803 XXDEBUG(("bmac_txdma_intr done->bmac_start\n")); 804 } 805 806 bmac_start(dev); 807 return IRQ_HANDLED; 808} 809 810static struct net_device_stats *bmac_stats(struct net_device *dev) 811{ 812 struct bmac_data *p = netdev_priv(dev); 813 814 return &p->stats; 815} 816 817#ifndef SUNHME_MULTICAST 818/* Real fast bit-reversal algorithm, 6-bit values */ 819static int reverse6[64] = { 820 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38, 821 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c, 822 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a, 823 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e, 824 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39, 825 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d, 826 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b, 827 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f 828}; 829 830static unsigned int 831crc416(unsigned int curval, unsigned short nxtval) 832{ 833 register unsigned int counter, cur = curval, next = nxtval; 834 register int high_crc_set, low_data_set; 835 836 /* Swap bytes */ 837 next = ((next & 0x00FF) << 8) | (next >> 8); 838 839 /* Compute bit-by-bit */ 840 for (counter = 0; counter < 16; ++counter) { 841 /* is high CRC bit set? */ 842 if ((cur & 0x80000000) == 0) high_crc_set = 0; 843 else high_crc_set = 1; 844 845 cur = cur << 1; 846 847 if ((next & 0x0001) == 0) low_data_set = 0; 848 else low_data_set = 1; 849 850 next = next >> 1; 851 852 /* do the XOR */ 853 if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY; 854 } 855 return cur; 856} 857 858static unsigned int 859bmac_crc(unsigned short *address) 860{ 861 unsigned int newcrc; 862 863 XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2])); 864 newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */ 865 newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */ 866 newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */ 867 868 return(newcrc); 869} 870 871/* 872 * Add requested mcast addr to BMac's hash table filter. 873 * 874 */ 875 876static void 877bmac_addhash(struct bmac_data *bp, unsigned char *addr) 878{ 879 unsigned int crc; 880 unsigned short mask; 881 882 if (!(*addr)) return; 883 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */ 884 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ 885 if (bp->hash_use_count[crc]++) return; /* This bit is already set */ 886 mask = crc % 16; 887 mask = (unsigned char)1 << mask; 888 bp->hash_use_count[crc/16] |= mask; 889} 890 891static void 892bmac_removehash(struct bmac_data *bp, unsigned char *addr) 893{ 894 unsigned int crc; 895 unsigned char mask; 896 897 /* Now, delete the address from the filter copy, as indicated */ 898 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */ 899 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ 900 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */ 901 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */ 902 mask = crc % 16; 903 mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */ 904 bp->hash_table_mask[crc/16] &= mask; 905} 906 907/* 908 * Sync the adapter with the software copy of the multicast mask 909 * (logical address filter). 910 */ 911 912static void 913bmac_rx_off(struct net_device *dev) 914{ 915 unsigned short rx_cfg; 916 917 rx_cfg = bmread(dev, RXCFG); 918 rx_cfg &= ~RxMACEnable; 919 bmwrite(dev, RXCFG, rx_cfg); 920 do { 921 rx_cfg = bmread(dev, RXCFG); 922 } while (rx_cfg & RxMACEnable); 923} 924 925unsigned short 926bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable) 927{ 928 unsigned short rx_cfg; 929 930 rx_cfg = bmread(dev, RXCFG); 931 rx_cfg |= RxMACEnable; 932 if (hash_enable) rx_cfg |= RxHashFilterEnable; 933 else rx_cfg &= ~RxHashFilterEnable; 934 if (promisc_enable) rx_cfg |= RxPromiscEnable; 935 else rx_cfg &= ~RxPromiscEnable; 936 bmwrite(dev, RXRST, RxResetValue); 937 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */ 938 bmwrite(dev, RXFIFOCSR, RxFIFOEnable ); 939 bmwrite(dev, RXCFG, rx_cfg ); 940 return rx_cfg; 941} 942 943static void 944bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp) 945{ 946 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */ 947 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */ 948 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ 949 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ 950} 951 952#if 0 953static void 954bmac_add_multi(struct net_device *dev, 955 struct bmac_data *bp, unsigned char *addr) 956{ 957 /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */ 958 bmac_addhash(bp, addr); 959 bmac_rx_off(dev); 960 bmac_update_hash_table_mask(dev, bp); 961 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0); 962 /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */ 963} 964 965static void 966bmac_remove_multi(struct net_device *dev, 967 struct bmac_data *bp, unsigned char *addr) 968{ 969 bmac_removehash(bp, addr); 970 bmac_rx_off(dev); 971 bmac_update_hash_table_mask(dev, bp); 972 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0); 973} 974#endif 975 976/* Set or clear the multicast filter for this adaptor. 977 num_addrs == -1 Promiscuous mode, receive all packets 978 num_addrs == 0 Normal mode, clear multicast list 979 num_addrs > 0 Multicast mode, receive normal and MC packets, and do 980 best-effort filtering. 981 */ 982static void bmac_set_multicast(struct net_device *dev) 983{ 984 struct dev_mc_list *dmi; 985 struct bmac_data *bp = netdev_priv(dev); 986 int num_addrs = dev->mc_count; 987 unsigned short rx_cfg; 988 int i; 989 990 if (bp->sleeping) 991 return; 992 993 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs)); 994 995 if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) { 996 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff; 997 bmac_update_hash_table_mask(dev, bp); 998 rx_cfg = bmac_rx_on(dev, 1, 0); 999 XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n")); 1000 } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) { 1001 rx_cfg = bmread(dev, RXCFG); 1002 rx_cfg |= RxPromiscEnable; 1003 bmwrite(dev, RXCFG, rx_cfg); 1004 rx_cfg = bmac_rx_on(dev, 0, 1); 1005 XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg)); 1006 } else { 1007 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0; 1008 for (i=0; i<64; i++) bp->hash_use_count[i] = 0; 1009 if (num_addrs == 0) { 1010 rx_cfg = bmac_rx_on(dev, 0, 0); 1011 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg)); 1012 } else { 1013 for (dmi=dev->mc_list; dmi!=NULL; dmi=dmi->next) 1014 bmac_addhash(bp, dmi->dmi_addr); 1015 bmac_update_hash_table_mask(dev, bp); 1016 rx_cfg = bmac_rx_on(dev, 1, 0); 1017 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg)); 1018 } 1019 } 1020 /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */ 1021} 1022#else /* ifdef SUNHME_MULTICAST */ 1023 1024/* The version of set_multicast below was lifted from sunhme.c */ 1025 1026static void bmac_set_multicast(struct net_device *dev) 1027{ 1028 struct dev_mc_list *dmi = dev->mc_list; 1029 char *addrs; 1030 int i; 1031 unsigned short rx_cfg; 1032 u32 crc; 1033 1034 if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) { 1035 bmwrite(dev, BHASH0, 0xffff); 1036 bmwrite(dev, BHASH1, 0xffff); 1037 bmwrite(dev, BHASH2, 0xffff); 1038 bmwrite(dev, BHASH3, 0xffff); 1039 } else if(dev->flags & IFF_PROMISC) { 1040 rx_cfg = bmread(dev, RXCFG); 1041 rx_cfg |= RxPromiscEnable; 1042 bmwrite(dev, RXCFG, rx_cfg); 1043 } else { 1044 u16 hash_table[4]; 1045 1046 rx_cfg = bmread(dev, RXCFG); 1047 rx_cfg &= ~RxPromiscEnable; 1048 bmwrite(dev, RXCFG, rx_cfg); 1049 1050 for(i = 0; i < 4; i++) hash_table[i] = 0; 1051 1052 for(i = 0; i < dev->mc_count; i++) { 1053 addrs = dmi->dmi_addr; 1054 dmi = dmi->next; 1055 1056 if(!(*addrs & 1)) 1057 continue; 1058 1059 crc = ether_crc_le(6, addrs); 1060 crc >>= 26; 1061 hash_table[crc >> 4] |= 1 << (crc & 0xf); 1062 } 1063 bmwrite(dev, BHASH0, hash_table[0]); 1064 bmwrite(dev, BHASH1, hash_table[1]); 1065 bmwrite(dev, BHASH2, hash_table[2]); 1066 bmwrite(dev, BHASH3, hash_table[3]); 1067 } 1068} 1069#endif /* SUNHME_MULTICAST */ 1070 1071static int miscintcount; 1072 1073static irqreturn_t bmac_misc_intr(int irq, void *dev_id) 1074{ 1075 struct net_device *dev = (struct net_device *) dev_id; 1076 struct bmac_data *bp = netdev_priv(dev); 1077 unsigned int status = bmread(dev, STATUS); 1078 if (miscintcount++ < 10) { 1079 XXDEBUG(("bmac_misc_intr\n")); 1080 } 1081 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */ 1082 /* bmac_txdma_intr_inner(irq, dev_id); */ 1083 /* if (status & FrameReceived) bp->stats.rx_dropped++; */ 1084 if (status & RxErrorMask) bp->stats.rx_errors++; 1085 if (status & RxCRCCntExp) bp->stats.rx_crc_errors++; 1086 if (status & RxLenCntExp) bp->stats.rx_length_errors++; 1087 if (status & RxOverFlow) bp->stats.rx_over_errors++; 1088 if (status & RxAlignCntExp) bp->stats.rx_frame_errors++; 1089 1090 /* if (status & FrameSent) bp->stats.tx_dropped++; */ 1091 if (status & TxErrorMask) bp->stats.tx_errors++; 1092 if (status & TxUnderrun) bp->stats.tx_fifo_errors++; 1093 if (status & TxNormalCollExp) bp->stats.collisions++; 1094 return IRQ_HANDLED; 1095} 1096 1097/* 1098 * Procedure for reading EEPROM 1099 */ 1100#define SROMAddressLength 5 1101#define DataInOn 0x0008 1102#define DataInOff 0x0000 1103#define Clk 0x0002 1104#define ChipSelect 0x0001 1105#define SDIShiftCount 3 1106#define SD0ShiftCount 2 1107#define DelayValue 1000 /* number of microseconds */ 1108#define SROMStartOffset 10 /* this is in words */ 1109#define SROMReadCount 3 /* number of words to read from SROM */ 1110#define SROMAddressBits 6 1111#define EnetAddressOffset 20 1112 1113static unsigned char 1114bmac_clock_out_bit(struct net_device *dev) 1115{ 1116 unsigned short data; 1117 unsigned short val; 1118 1119 bmwrite(dev, SROMCSR, ChipSelect | Clk); 1120 udelay(DelayValue); 1121 1122 data = bmread(dev, SROMCSR); 1123 udelay(DelayValue); 1124 val = (data >> SD0ShiftCount) & 1; 1125 1126 bmwrite(dev, SROMCSR, ChipSelect); 1127 udelay(DelayValue); 1128 1129 return val; 1130} 1131 1132static void 1133bmac_clock_in_bit(struct net_device *dev, unsigned int val) 1134{ 1135 unsigned short data; 1136 1137 if (val != 0 && val != 1) return; 1138 1139 data = (val << SDIShiftCount); 1140 bmwrite(dev, SROMCSR, data | ChipSelect ); 1141 udelay(DelayValue); 1142 1143 bmwrite(dev, SROMCSR, data | ChipSelect | Clk ); 1144 udelay(DelayValue); 1145 1146 bmwrite(dev, SROMCSR, data | ChipSelect); 1147 udelay(DelayValue); 1148} 1149 1150static void 1151reset_and_select_srom(struct net_device *dev) 1152{ 1153 /* first reset */ 1154 bmwrite(dev, SROMCSR, 0); 1155 udelay(DelayValue); 1156 1157 /* send it the read command (110) */ 1158 bmac_clock_in_bit(dev, 1); 1159 bmac_clock_in_bit(dev, 1); 1160 bmac_clock_in_bit(dev, 0); 1161} 1162 1163static unsigned short 1164read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len) 1165{ 1166 unsigned short data, val; 1167 int i; 1168 1169 /* send out the address we want to read from */ 1170 for (i = 0; i < addr_len; i++) { 1171 val = addr >> (addr_len-i-1); 1172 bmac_clock_in_bit(dev, val & 1); 1173 } 1174 1175 /* Now read in the 16-bit data */ 1176 data = 0; 1177 for (i = 0; i < 16; i++) { 1178 val = bmac_clock_out_bit(dev); 1179 data <<= 1; 1180 data |= val; 1181 } 1182 bmwrite(dev, SROMCSR, 0); 1183 1184 return data; 1185} 1186 1187/* 1188 * It looks like Cogent and SMC use different methods for calculating 1189 * checksums. What a pain.. 1190 */ 1191 1192static int 1193bmac_verify_checksum(struct net_device *dev) 1194{ 1195 unsigned short data, storedCS; 1196 1197 reset_and_select_srom(dev); 1198 data = read_srom(dev, 3, SROMAddressBits); 1199 storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00); 1200 1201 return 0; 1202} 1203 1204 1205static void 1206bmac_get_station_address(struct net_device *dev, unsigned char *ea) 1207{ 1208 int i; 1209 unsigned short data; 1210 1211 for (i = 0; i < 6; i++) 1212 { 1213 reset_and_select_srom(dev); 1214 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits); 1215 ea[2*i] = bitrev8(data & 0x0ff); 1216 ea[2*i+1] = bitrev8((data >> 8) & 0x0ff); 1217 } 1218} 1219 1220static void bmac_reset_and_enable(struct net_device *dev) 1221{ 1222 struct bmac_data *bp = netdev_priv(dev); 1223 unsigned long flags; 1224 struct sk_buff *skb; 1225 unsigned char *data; 1226 1227 spin_lock_irqsave(&bp->lock, flags); 1228 bmac_enable_and_reset_chip(dev); 1229 bmac_init_tx_ring(bp); 1230 bmac_init_rx_ring(bp); 1231 bmac_init_chip(dev); 1232 bmac_start_chip(dev); 1233 bmwrite(dev, INTDISABLE, EnableNormal); 1234 bp->sleeping = 0; 1235 1236 /* 1237 * It seems that the bmac can't receive until it's transmitted 1238 * a packet. So we give it a dummy packet to transmit. 1239 */ 1240 skb = dev_alloc_skb(ETHERMINPACKET); 1241 if (skb != NULL) { 1242 data = skb_put(skb, ETHERMINPACKET); 1243 memset(data, 0, ETHERMINPACKET); 1244 memcpy(data, dev->dev_addr, 6); 1245 memcpy(data+6, dev->dev_addr, 6); 1246 bmac_transmit_packet(skb, dev); 1247 } 1248 spin_unlock_irqrestore(&bp->lock, flags); 1249} 1250 1251static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_id *match) 1252{ 1253 int j, rev, ret; 1254 struct bmac_data *bp; 1255 const unsigned char *prop_addr; 1256 unsigned char addr[6]; 1257 struct net_device *dev; 1258 int is_bmac_plus = ((int)match->data) != 0; 1259 1260 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { 1261 printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n"); 1262 return -ENODEV; 1263 } 1264 prop_addr = get_property(macio_get_of_node(mdev), "mac-address", NULL); 1265 if (prop_addr == NULL) { 1266 prop_addr = get_property(macio_get_of_node(mdev), 1267 "local-mac-address", NULL); 1268 if (prop_addr == NULL) { 1269 printk(KERN_ERR "BMAC: Can't get mac-address\n"); 1270 return -ENODEV; 1271 } 1272 } 1273 memcpy(addr, prop_addr, sizeof(addr)); 1274 1275 dev = alloc_etherdev(PRIV_BYTES); 1276 if (!dev) { 1277 printk(KERN_ERR "BMAC: alloc_etherdev failed, out of memory\n"); 1278 return -ENOMEM; 1279 } 1280 1281 bp = netdev_priv(dev); 1282 SET_MODULE_OWNER(dev); 1283 SET_NETDEV_DEV(dev, &mdev->ofdev.dev); 1284 macio_set_drvdata(mdev, dev); 1285 1286 bp->mdev = mdev; 1287 spin_lock_init(&bp->lock); 1288 1289 if (macio_request_resources(mdev, "bmac")) { 1290 printk(KERN_ERR "BMAC: can't request IO resource !\n"); 1291 goto out_free; 1292 } 1293 1294 dev->base_addr = (unsigned long) 1295 ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0)); 1296 if (dev->base_addr == 0) 1297 goto out_release; 1298 1299 dev->irq = macio_irq(mdev, 0); 1300 1301 bmac_enable_and_reset_chip(dev); 1302 bmwrite(dev, INTDISABLE, DisableAll); 1303 1304 rev = addr[0] == 0 && addr[1] == 0xA0; 1305 for (j = 0; j < 6; ++j) 1306 dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j]; 1307 1308 /* Enable chip without interrupts for now */ 1309 bmac_enable_and_reset_chip(dev); 1310 bmwrite(dev, INTDISABLE, DisableAll); 1311 1312 dev->open = bmac_open; 1313 dev->stop = bmac_close; 1314 dev->hard_start_xmit = bmac_output; 1315 dev->get_stats = bmac_stats; 1316 dev->set_multicast_list = bmac_set_multicast; 1317 dev->set_mac_address = bmac_set_address; 1318 1319 bmac_get_station_address(dev, addr); 1320 if (bmac_verify_checksum(dev) != 0) 1321 goto err_out_iounmap; 1322 1323 bp->is_bmac_plus = is_bmac_plus; 1324 bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1)); 1325 if (!bp->tx_dma) 1326 goto err_out_iounmap; 1327 bp->tx_dma_intr = macio_irq(mdev, 1); 1328 bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2)); 1329 if (!bp->rx_dma) 1330 goto err_out_iounmap_tx; 1331 bp->rx_dma_intr = macio_irq(mdev, 2); 1332 1333 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1); 1334 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1; 1335 1336 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1); 1337 skb_queue_head_init(bp->queue); 1338 1339 init_timer(&bp->tx_timeout); 1340 1341 ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev); 1342 if (ret) { 1343 printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq); 1344 goto err_out_iounmap_rx; 1345 } 1346 ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev); 1347 if (ret) { 1348 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr); 1349 goto err_out_irq0; 1350 } 1351 ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev); 1352 if (ret) { 1353 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr); 1354 goto err_out_irq1; 1355 } 1356 1357 /* Mask chip interrupts and disable chip, will be 1358 * re-enabled on open() 1359 */ 1360 disable_irq(dev->irq); 1361 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); 1362 1363 if (register_netdev(dev) != 0) { 1364 printk(KERN_ERR "BMAC: Ethernet registration failed\n"); 1365 goto err_out_irq2; 1366 } 1367 1368 printk(KERN_INFO "%s: BMAC%s at", dev->name, (is_bmac_plus? "+": "")); 1369 for (j = 0; j < 6; ++j) 1370 printk("%c%.2x", (j? ':': ' '), dev->dev_addr[j]); 1371 XXDEBUG((", base_addr=%#0lx", dev->base_addr)); 1372 printk("\n"); 1373 1374 return 0; 1375 1376err_out_irq2: 1377 free_irq(bp->rx_dma_intr, dev); 1378err_out_irq1: 1379 free_irq(bp->tx_dma_intr, dev); 1380err_out_irq0: 1381 free_irq(dev->irq, dev); 1382err_out_iounmap_rx: 1383 iounmap(bp->rx_dma); 1384err_out_iounmap_tx: 1385 iounmap(bp->tx_dma); 1386err_out_iounmap: 1387 iounmap((void __iomem *)dev->base_addr); 1388out_release: 1389 macio_release_resources(mdev); 1390out_free: 1391 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); 1392 free_netdev(dev); 1393 1394 return -ENODEV; 1395} 1396 1397static int bmac_open(struct net_device *dev) 1398{ 1399 struct bmac_data *bp = netdev_priv(dev); 1400 /* XXDEBUG(("bmac: enter open\n")); */ 1401 /* reset the chip */ 1402 bp->opened = 1; 1403 bmac_reset_and_enable(dev); 1404 enable_irq(dev->irq); 1405 return 0; 1406} 1407 1408static int bmac_close(struct net_device *dev) 1409{ 1410 struct bmac_data *bp = netdev_priv(dev); 1411 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 1412 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 1413 unsigned short config; 1414 int i; 1415 1416 bp->sleeping = 1; 1417 1418 /* disable rx and tx */ 1419 config = bmread(dev, RXCFG); 1420 bmwrite(dev, RXCFG, (config & ~RxMACEnable)); 1421 1422 config = bmread(dev, TXCFG); 1423 bmwrite(dev, TXCFG, (config & ~TxMACEnable)); 1424 1425 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */ 1426 1427 /* disable rx and tx dma */ 1428 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ 1429 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ 1430 1431 /* free some skb's */ 1432 XXDEBUG(("bmac: free rx bufs\n")); 1433 for (i=0; i<N_RX_RING; i++) { 1434 if (bp->rx_bufs[i] != NULL) { 1435 dev_kfree_skb(bp->rx_bufs[i]); 1436 bp->rx_bufs[i] = NULL; 1437 } 1438 } 1439 XXDEBUG(("bmac: free tx bufs\n")); 1440 for (i = 0; i<N_TX_RING; i++) { 1441 if (bp->tx_bufs[i] != NULL) { 1442 dev_kfree_skb(bp->tx_bufs[i]); 1443 bp->tx_bufs[i] = NULL; 1444 } 1445 } 1446 XXDEBUG(("bmac: all bufs freed\n")); 1447 1448 bp->opened = 0; 1449 disable_irq(dev->irq); 1450 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); 1451 1452 return 0; 1453} 1454 1455static void 1456bmac_start(struct net_device *dev) 1457{ 1458 struct bmac_data *bp = netdev_priv(dev); 1459 int i; 1460 struct sk_buff *skb; 1461 unsigned long flags; 1462 1463 if (bp->sleeping) 1464 return; 1465 1466 spin_lock_irqsave(&bp->lock, flags); 1467 while (1) { 1468 i = bp->tx_fill + 1; 1469 if (i >= N_TX_RING) 1470 i = 0; 1471 if (i == bp->tx_empty) 1472 break; 1473 skb = skb_dequeue(bp->queue); 1474 if (skb == NULL) 1475 break; 1476 bmac_transmit_packet(skb, dev); 1477 } 1478 spin_unlock_irqrestore(&bp->lock, flags); 1479} 1480 1481static int 1482bmac_output(struct sk_buff *skb, struct net_device *dev) 1483{ 1484 struct bmac_data *bp = netdev_priv(dev); 1485 skb_queue_tail(bp->queue, skb); 1486 bmac_start(dev); 1487 return 0; 1488} 1489 1490static void bmac_tx_timeout(unsigned long data) 1491{ 1492 struct net_device *dev = (struct net_device *) data; 1493 struct bmac_data *bp = netdev_priv(dev); 1494 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 1495 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 1496 volatile struct dbdma_cmd *cp; 1497 unsigned long flags; 1498 unsigned short config, oldConfig; 1499 int i; 1500 1501 XXDEBUG(("bmac: tx_timeout called\n")); 1502 spin_lock_irqsave(&bp->lock, flags); 1503 bp->timeout_active = 0; 1504 1505 /* update various counters */ 1506/* bmac_handle_misc_intrs(bp, 0); */ 1507 1508 cp = &bp->tx_cmds[bp->tx_empty]; 1509/* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */ 1510/* ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */ 1511/* mb->pr, mb->xmtfs, mb->fifofc)); */ 1512 1513 /* turn off both tx and rx and reset the chip */ 1514 config = bmread(dev, RXCFG); 1515 bmwrite(dev, RXCFG, (config & ~RxMACEnable)); 1516 config = bmread(dev, TXCFG); 1517 bmwrite(dev, TXCFG, (config & ~TxMACEnable)); 1518 out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD)); 1519 printk(KERN_ERR "bmac: transmit timeout - resetting\n"); 1520 bmac_enable_and_reset_chip(dev); 1521 1522 /* restart rx dma */ 1523 cp = bus_to_virt(ld_le32(&rd->cmdptr)); 1524 out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD)); 1525 out_le16(&cp->xfer_status, 0); 1526 out_le32(&rd->cmdptr, virt_to_bus(cp)); 1527 out_le32(&rd->control, DBDMA_SET(RUN|WAKE)); 1528 1529 /* fix up the transmit side */ 1530 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n", 1531 bp->tx_empty, bp->tx_fill, bp->tx_fullup)); 1532 i = bp->tx_empty; 1533 ++bp->stats.tx_errors; 1534 if (i != bp->tx_fill) { 1535 dev_kfree_skb(bp->tx_bufs[i]); 1536 bp->tx_bufs[i] = NULL; 1537 if (++i >= N_TX_RING) i = 0; 1538 bp->tx_empty = i; 1539 } 1540 bp->tx_fullup = 0; 1541 netif_wake_queue(dev); 1542 if (i != bp->tx_fill) { 1543 cp = &bp->tx_cmds[i]; 1544 out_le16(&cp->xfer_status, 0); 1545 out_le16(&cp->command, OUTPUT_LAST); 1546 out_le32(&td->cmdptr, virt_to_bus(cp)); 1547 out_le32(&td->control, DBDMA_SET(RUN)); 1548 /* bmac_set_timeout(dev); */ 1549 XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i)); 1550 } 1551 1552 /* turn it back on */ 1553 oldConfig = bmread(dev, RXCFG); 1554 bmwrite(dev, RXCFG, oldConfig | RxMACEnable ); 1555 oldConfig = bmread(dev, TXCFG); 1556 bmwrite(dev, TXCFG, oldConfig | TxMACEnable ); 1557 1558 spin_unlock_irqrestore(&bp->lock, flags); 1559} 1560 1561#if 0 1562static void dump_dbdma(volatile struct dbdma_cmd *cp,int count) 1563{ 1564 int i,*ip; 1565 1566 for (i=0;i< count;i++) { 1567 ip = (int*)(cp+i); 1568 1569 printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n", 1570 ld_le32(ip+0), 1571 ld_le32(ip+1), 1572 ld_le32(ip+2), 1573 ld_le32(ip+3)); 1574 } 1575 1576} 1577#endif 1578 1579#if 0 1580static int 1581bmac_proc_info(char *buffer, char **start, off_t offset, int length) 1582{ 1583 int len = 0; 1584 off_t pos = 0; 1585 off_t begin = 0; 1586 int i; 1587 1588 if (bmac_devs == NULL) 1589 return (-ENOSYS); 1590 1591 len += sprintf(buffer, "BMAC counters & registers\n"); 1592 1593 for (i = 0; i<N_REG_ENTRIES; i++) { 1594 len += sprintf(buffer + len, "%s: %#08x\n", 1595 reg_entries[i].name, 1596 bmread(bmac_devs, reg_entries[i].reg_offset)); 1597 pos = begin + len; 1598 1599 if (pos < offset) { 1600 len = 0; 1601 begin = pos; 1602 } 1603 1604 if (pos > offset+length) break; 1605 } 1606 1607 *start = buffer + (offset - begin); 1608 len -= (offset - begin); 1609 1610 if (len > length) len = length; 1611 1612 return len; 1613} 1614#endif 1615 1616static int __devexit bmac_remove(struct macio_dev *mdev) 1617{ 1618 struct net_device *dev = macio_get_drvdata(mdev); 1619 struct bmac_data *bp = netdev_priv(dev); 1620 1621 unregister_netdev(dev); 1622 1623 free_irq(dev->irq, dev); 1624 free_irq(bp->tx_dma_intr, dev); 1625 free_irq(bp->rx_dma_intr, dev); 1626 1627 iounmap((void __iomem *)dev->base_addr); 1628 iounmap(bp->tx_dma); 1629 iounmap(bp->rx_dma); 1630 1631 macio_release_resources(mdev); 1632 1633 free_netdev(dev); 1634 1635 return 0; 1636} 1637 1638static struct of_device_id bmac_match[] = 1639{ 1640 { 1641 .name = "bmac", 1642 .data = (void *)0, 1643 }, 1644 { 1645 .type = "network", 1646 .compatible = "bmac+", 1647 .data = (void *)1, 1648 }, 1649 {}, 1650}; 1651MODULE_DEVICE_TABLE (of, bmac_match); 1652 1653static struct macio_driver bmac_driver = 1654{ 1655 .name = "bmac", 1656 .match_table = bmac_match, 1657 .probe = bmac_probe, 1658 .remove = bmac_remove, 1659#ifdef CONFIG_PM 1660 .suspend = bmac_suspend, 1661 .resume = bmac_resume, 1662#endif 1663}; 1664 1665 1666static int __init bmac_init(void) 1667{ 1668 if (bmac_emergency_rxbuf == NULL) { 1669 bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL); 1670 if (bmac_emergency_rxbuf == NULL) { 1671 printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n"); 1672 return -ENOMEM; 1673 } 1674 } 1675 1676 return macio_register_driver(&bmac_driver); 1677} 1678 1679static void __exit bmac_exit(void) 1680{ 1681 macio_unregister_driver(&bmac_driver); 1682 1683 kfree(bmac_emergency_rxbuf); 1684 bmac_emergency_rxbuf = NULL; 1685} 1686 1687MODULE_AUTHOR("Randy Gobbel/Paul Mackerras"); 1688MODULE_DESCRIPTION("PowerMac BMAC ethernet driver."); 1689MODULE_LICENSE("GPL"); 1690 1691module_init(bmac_init); 1692module_exit(bmac_exit);