at v2.6.26-rc7 1694 lines 42 kB view raw
1/* 2 * Network device driver for the BMAC ethernet controller on 3 * Apple Powermacs. Assumes it's under a DBDMA controller. 4 * 5 * Copyright (C) 1998 Randy Gobbel. 6 * 7 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to 8 * dynamic procfs inode. 9 */ 10#include <linux/module.h> 11#include <linux/kernel.h> 12#include <linux/netdevice.h> 13#include <linux/etherdevice.h> 14#include <linux/delay.h> 15#include <linux/string.h> 16#include <linux/timer.h> 17#include <linux/proc_fs.h> 18#include <linux/init.h> 19#include <linux/spinlock.h> 20#include <linux/crc32.h> 21#include <linux/bitrev.h> 22#include <linux/ethtool.h> 23#include <asm/prom.h> 24#include <asm/dbdma.h> 25#include <asm/io.h> 26#include <asm/page.h> 27#include <asm/pgtable.h> 28#include <asm/machdep.h> 29#include <asm/pmac_feature.h> 30#include <asm/macio.h> 31#include <asm/irq.h> 32 33#include "bmac.h" 34 35#define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1)))) 36#define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1))) 37 38/* 39 * CRC polynomial - used in working out multicast filter bits. 40 */ 41#define ENET_CRCPOLY 0x04c11db7 42 43/* switch to use multicast code lifted from sunhme driver */ 44#define SUNHME_MULTICAST 45 46#define N_RX_RING 64 47#define N_TX_RING 32 48#define MAX_TX_ACTIVE 1 49#define ETHERCRC 4 50#define ETHERMINPACKET 64 51#define ETHERMTU 1500 52#define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2) 53#define TX_TIMEOUT HZ /* 1 second */ 54 55/* Bits in transmit DMA status */ 56#define TX_DMA_ERR 0x80 57 58#define XXDEBUG(args) 59 60struct bmac_data { 61 /* volatile struct bmac *bmac; */ 62 struct sk_buff_head *queue; 63 volatile struct dbdma_regs __iomem *tx_dma; 64 int tx_dma_intr; 65 volatile struct dbdma_regs __iomem *rx_dma; 66 int rx_dma_intr; 67 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */ 68 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */ 69 struct macio_dev *mdev; 70 int is_bmac_plus; 71 struct sk_buff *rx_bufs[N_RX_RING]; 72 int rx_fill; 73 int rx_empty; 74 struct sk_buff *tx_bufs[N_TX_RING]; 75 int tx_fill; 76 int tx_empty; 77 unsigned char tx_fullup; 78 struct timer_list tx_timeout; 79 int timeout_active; 80 int sleeping; 81 int opened; 82 unsigned short hash_use_count[64]; 83 unsigned short hash_table_mask[4]; 84 spinlock_t lock; 85}; 86 87#if 0 /* Move that to ethtool */ 88 89typedef struct bmac_reg_entry { 90 char *name; 91 unsigned short reg_offset; 92} bmac_reg_entry_t; 93 94#define N_REG_ENTRIES 31 95 96static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = { 97 {"MEMADD", MEMADD}, 98 {"MEMDATAHI", MEMDATAHI}, 99 {"MEMDATALO", MEMDATALO}, 100 {"TXPNTR", TXPNTR}, 101 {"RXPNTR", RXPNTR}, 102 {"IPG1", IPG1}, 103 {"IPG2", IPG2}, 104 {"ALIMIT", ALIMIT}, 105 {"SLOT", SLOT}, 106 {"PALEN", PALEN}, 107 {"PAPAT", PAPAT}, 108 {"TXSFD", TXSFD}, 109 {"JAM", JAM}, 110 {"TXCFG", TXCFG}, 111 {"TXMAX", TXMAX}, 112 {"TXMIN", TXMIN}, 113 {"PAREG", PAREG}, 114 {"DCNT", DCNT}, 115 {"NCCNT", NCCNT}, 116 {"NTCNT", NTCNT}, 117 {"EXCNT", EXCNT}, 118 {"LTCNT", LTCNT}, 119 {"TXSM", TXSM}, 120 {"RXCFG", RXCFG}, 121 {"RXMAX", RXMAX}, 122 {"RXMIN", RXMIN}, 123 {"FRCNT", FRCNT}, 124 {"AECNT", AECNT}, 125 {"FECNT", FECNT}, 126 {"RXSM", RXSM}, 127 {"RXCV", RXCV} 128}; 129 130#endif 131 132static unsigned char *bmac_emergency_rxbuf; 133 134/* 135 * Number of bytes of private data per BMAC: allow enough for 136 * the rx and tx dma commands plus a branch dma command each, 137 * and another 16 bytes to allow us to align the dma command 138 * buffers on a 16 byte boundary. 139 */ 140#define PRIV_BYTES (sizeof(struct bmac_data) \ 141 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \ 142 + sizeof(struct sk_buff_head)) 143 144static int bmac_open(struct net_device *dev); 145static int bmac_close(struct net_device *dev); 146static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev); 147static void bmac_set_multicast(struct net_device *dev); 148static void bmac_reset_and_enable(struct net_device *dev); 149static void bmac_start_chip(struct net_device *dev); 150static void bmac_init_chip(struct net_device *dev); 151static void bmac_init_registers(struct net_device *dev); 152static void bmac_enable_and_reset_chip(struct net_device *dev); 153static int bmac_set_address(struct net_device *dev, void *addr); 154static irqreturn_t bmac_misc_intr(int irq, void *dev_id); 155static irqreturn_t bmac_txdma_intr(int irq, void *dev_id); 156static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id); 157static void bmac_set_timeout(struct net_device *dev); 158static void bmac_tx_timeout(unsigned long data); 159static int bmac_output(struct sk_buff *skb, struct net_device *dev); 160static void bmac_start(struct net_device *dev); 161 162#define DBDMA_SET(x) ( ((x) | (x) << 16) ) 163#define DBDMA_CLEAR(x) ( (x) << 16) 164 165static inline void 166dbdma_st32(volatile __u32 __iomem *a, unsigned long x) 167{ 168 __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory"); 169 return; 170} 171 172static inline unsigned long 173dbdma_ld32(volatile __u32 __iomem *a) 174{ 175 __u32 swap; 176 __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a)); 177 return swap; 178} 179 180static void 181dbdma_continue(volatile struct dbdma_regs __iomem *dmap) 182{ 183 dbdma_st32(&dmap->control, 184 DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD)); 185 eieio(); 186} 187 188static void 189dbdma_reset(volatile struct dbdma_regs __iomem *dmap) 190{ 191 dbdma_st32(&dmap->control, 192 DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN)); 193 eieio(); 194 while (dbdma_ld32(&dmap->status) & RUN) 195 eieio(); 196} 197 198static void 199dbdma_setcmd(volatile struct dbdma_cmd *cp, 200 unsigned short cmd, unsigned count, unsigned long addr, 201 unsigned long cmd_dep) 202{ 203 out_le16(&cp->command, cmd); 204 out_le16(&cp->req_count, count); 205 out_le32(&cp->phy_addr, addr); 206 out_le32(&cp->cmd_dep, cmd_dep); 207 out_le16(&cp->xfer_status, 0); 208 out_le16(&cp->res_count, 0); 209} 210 211static inline 212void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data ) 213{ 214 out_le16((void __iomem *)dev->base_addr + reg_offset, data); 215} 216 217 218static inline 219unsigned short bmread(struct net_device *dev, unsigned long reg_offset ) 220{ 221 return in_le16((void __iomem *)dev->base_addr + reg_offset); 222} 223 224static void 225bmac_enable_and_reset_chip(struct net_device *dev) 226{ 227 struct bmac_data *bp = netdev_priv(dev); 228 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 229 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 230 231 if (rd) 232 dbdma_reset(rd); 233 if (td) 234 dbdma_reset(td); 235 236 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1); 237} 238 239#define MIFDELAY udelay(10) 240 241static unsigned int 242bmac_mif_readbits(struct net_device *dev, int nb) 243{ 244 unsigned int val = 0; 245 246 while (--nb >= 0) { 247 bmwrite(dev, MIFCSR, 0); 248 MIFDELAY; 249 if (bmread(dev, MIFCSR) & 8) 250 val |= 1 << nb; 251 bmwrite(dev, MIFCSR, 1); 252 MIFDELAY; 253 } 254 bmwrite(dev, MIFCSR, 0); 255 MIFDELAY; 256 bmwrite(dev, MIFCSR, 1); 257 MIFDELAY; 258 return val; 259} 260 261static void 262bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb) 263{ 264 int b; 265 266 while (--nb >= 0) { 267 b = (val & (1 << nb))? 6: 4; 268 bmwrite(dev, MIFCSR, b); 269 MIFDELAY; 270 bmwrite(dev, MIFCSR, b|1); 271 MIFDELAY; 272 } 273} 274 275static unsigned int 276bmac_mif_read(struct net_device *dev, unsigned int addr) 277{ 278 unsigned int val; 279 280 bmwrite(dev, MIFCSR, 4); 281 MIFDELAY; 282 bmac_mif_writebits(dev, ~0U, 32); 283 bmac_mif_writebits(dev, 6, 4); 284 bmac_mif_writebits(dev, addr, 10); 285 bmwrite(dev, MIFCSR, 2); 286 MIFDELAY; 287 bmwrite(dev, MIFCSR, 1); 288 MIFDELAY; 289 val = bmac_mif_readbits(dev, 17); 290 bmwrite(dev, MIFCSR, 4); 291 MIFDELAY; 292 return val; 293} 294 295static void 296bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val) 297{ 298 bmwrite(dev, MIFCSR, 4); 299 MIFDELAY; 300 bmac_mif_writebits(dev, ~0U, 32); 301 bmac_mif_writebits(dev, 5, 4); 302 bmac_mif_writebits(dev, addr, 10); 303 bmac_mif_writebits(dev, 2, 2); 304 bmac_mif_writebits(dev, val, 16); 305 bmac_mif_writebits(dev, 3, 2); 306} 307 308static void 309bmac_init_registers(struct net_device *dev) 310{ 311 struct bmac_data *bp = netdev_priv(dev); 312 volatile unsigned short regValue; 313 unsigned short *pWord16; 314 int i; 315 316 /* XXDEBUG(("bmac: enter init_registers\n")); */ 317 318 bmwrite(dev, RXRST, RxResetValue); 319 bmwrite(dev, TXRST, TxResetBit); 320 321 i = 100; 322 do { 323 --i; 324 udelay(10000); 325 regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */ 326 } while ((regValue & TxResetBit) && i > 0); 327 328 if (!bp->is_bmac_plus) { 329 regValue = bmread(dev, XCVRIF); 330 regValue |= ClkBit | SerialMode | COLActiveLow; 331 bmwrite(dev, XCVRIF, regValue); 332 udelay(10000); 333 } 334 335 bmwrite(dev, RSEED, (unsigned short)0x1968); 336 337 regValue = bmread(dev, XIFC); 338 regValue |= TxOutputEnable; 339 bmwrite(dev, XIFC, regValue); 340 341 bmread(dev, PAREG); 342 343 /* set collision counters to 0 */ 344 bmwrite(dev, NCCNT, 0); 345 bmwrite(dev, NTCNT, 0); 346 bmwrite(dev, EXCNT, 0); 347 bmwrite(dev, LTCNT, 0); 348 349 /* set rx counters to 0 */ 350 bmwrite(dev, FRCNT, 0); 351 bmwrite(dev, LECNT, 0); 352 bmwrite(dev, AECNT, 0); 353 bmwrite(dev, FECNT, 0); 354 bmwrite(dev, RXCV, 0); 355 356 /* set tx fifo information */ 357 bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */ 358 359 bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */ 360 bmwrite(dev, TXFIFOCSR, TxFIFOEnable ); 361 362 /* set rx fifo information */ 363 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */ 364 bmwrite(dev, RXFIFOCSR, RxFIFOEnable ); 365 366 //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */ 367 bmread(dev, STATUS); /* read it just to clear it */ 368 369 /* zero out the chip Hash Filter registers */ 370 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0; 371 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */ 372 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */ 373 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ 374 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ 375 376 pWord16 = (unsigned short *)dev->dev_addr; 377 bmwrite(dev, MADD0, *pWord16++); 378 bmwrite(dev, MADD1, *pWord16++); 379 bmwrite(dev, MADD2, *pWord16); 380 381 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets); 382 383 bmwrite(dev, INTDISABLE, EnableNormal); 384 385 return; 386} 387 388#if 0 389static void 390bmac_disable_interrupts(struct net_device *dev) 391{ 392 bmwrite(dev, INTDISABLE, DisableAll); 393} 394 395static void 396bmac_enable_interrupts(struct net_device *dev) 397{ 398 bmwrite(dev, INTDISABLE, EnableNormal); 399} 400#endif 401 402 403static void 404bmac_start_chip(struct net_device *dev) 405{ 406 struct bmac_data *bp = netdev_priv(dev); 407 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 408 unsigned short oldConfig; 409 410 /* enable rx dma channel */ 411 dbdma_continue(rd); 412 413 oldConfig = bmread(dev, TXCFG); 414 bmwrite(dev, TXCFG, oldConfig | TxMACEnable ); 415 416 /* turn on rx plus any other bits already on (promiscuous possibly) */ 417 oldConfig = bmread(dev, RXCFG); 418 bmwrite(dev, RXCFG, oldConfig | RxMACEnable ); 419 udelay(20000); 420} 421 422static void 423bmac_init_phy(struct net_device *dev) 424{ 425 unsigned int addr; 426 struct bmac_data *bp = netdev_priv(dev); 427 428 printk(KERN_DEBUG "phy registers:"); 429 for (addr = 0; addr < 32; ++addr) { 430 if ((addr & 7) == 0) 431 printk("\n" KERN_DEBUG); 432 printk(" %.4x", bmac_mif_read(dev, addr)); 433 } 434 printk("\n"); 435 if (bp->is_bmac_plus) { 436 unsigned int capable, ctrl; 437 438 ctrl = bmac_mif_read(dev, 0); 439 capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1; 440 if (bmac_mif_read(dev, 4) != capable 441 || (ctrl & 0x1000) == 0) { 442 bmac_mif_write(dev, 4, capable); 443 bmac_mif_write(dev, 0, 0x1200); 444 } else 445 bmac_mif_write(dev, 0, 0x1000); 446 } 447} 448 449static void bmac_init_chip(struct net_device *dev) 450{ 451 bmac_init_phy(dev); 452 bmac_init_registers(dev); 453} 454 455#ifdef CONFIG_PM 456static int bmac_suspend(struct macio_dev *mdev, pm_message_t state) 457{ 458 struct net_device* dev = macio_get_drvdata(mdev); 459 struct bmac_data *bp = netdev_priv(dev); 460 unsigned long flags; 461 unsigned short config; 462 int i; 463 464 netif_device_detach(dev); 465 /* prolly should wait for dma to finish & turn off the chip */ 466 spin_lock_irqsave(&bp->lock, flags); 467 if (bp->timeout_active) { 468 del_timer(&bp->tx_timeout); 469 bp->timeout_active = 0; 470 } 471 disable_irq(dev->irq); 472 disable_irq(bp->tx_dma_intr); 473 disable_irq(bp->rx_dma_intr); 474 bp->sleeping = 1; 475 spin_unlock_irqrestore(&bp->lock, flags); 476 if (bp->opened) { 477 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 478 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 479 480 config = bmread(dev, RXCFG); 481 bmwrite(dev, RXCFG, (config & ~RxMACEnable)); 482 config = bmread(dev, TXCFG); 483 bmwrite(dev, TXCFG, (config & ~TxMACEnable)); 484 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */ 485 /* disable rx and tx dma */ 486 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ 487 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ 488 /* free some skb's */ 489 for (i=0; i<N_RX_RING; i++) { 490 if (bp->rx_bufs[i] != NULL) { 491 dev_kfree_skb(bp->rx_bufs[i]); 492 bp->rx_bufs[i] = NULL; 493 } 494 } 495 for (i = 0; i<N_TX_RING; i++) { 496 if (bp->tx_bufs[i] != NULL) { 497 dev_kfree_skb(bp->tx_bufs[i]); 498 bp->tx_bufs[i] = NULL; 499 } 500 } 501 } 502 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); 503 return 0; 504} 505 506static int bmac_resume(struct macio_dev *mdev) 507{ 508 struct net_device* dev = macio_get_drvdata(mdev); 509 struct bmac_data *bp = netdev_priv(dev); 510 511 /* see if this is enough */ 512 if (bp->opened) 513 bmac_reset_and_enable(dev); 514 515 enable_irq(dev->irq); 516 enable_irq(bp->tx_dma_intr); 517 enable_irq(bp->rx_dma_intr); 518 netif_device_attach(dev); 519 520 return 0; 521} 522#endif /* CONFIG_PM */ 523 524static int bmac_set_address(struct net_device *dev, void *addr) 525{ 526 struct bmac_data *bp = netdev_priv(dev); 527 unsigned char *p = addr; 528 unsigned short *pWord16; 529 unsigned long flags; 530 int i; 531 532 XXDEBUG(("bmac: enter set_address\n")); 533 spin_lock_irqsave(&bp->lock, flags); 534 535 for (i = 0; i < 6; ++i) { 536 dev->dev_addr[i] = p[i]; 537 } 538 /* load up the hardware address */ 539 pWord16 = (unsigned short *)dev->dev_addr; 540 bmwrite(dev, MADD0, *pWord16++); 541 bmwrite(dev, MADD1, *pWord16++); 542 bmwrite(dev, MADD2, *pWord16); 543 544 spin_unlock_irqrestore(&bp->lock, flags); 545 XXDEBUG(("bmac: exit set_address\n")); 546 return 0; 547} 548 549static inline void bmac_set_timeout(struct net_device *dev) 550{ 551 struct bmac_data *bp = netdev_priv(dev); 552 unsigned long flags; 553 554 spin_lock_irqsave(&bp->lock, flags); 555 if (bp->timeout_active) 556 del_timer(&bp->tx_timeout); 557 bp->tx_timeout.expires = jiffies + TX_TIMEOUT; 558 bp->tx_timeout.function = bmac_tx_timeout; 559 bp->tx_timeout.data = (unsigned long) dev; 560 add_timer(&bp->tx_timeout); 561 bp->timeout_active = 1; 562 spin_unlock_irqrestore(&bp->lock, flags); 563} 564 565static void 566bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp) 567{ 568 void *vaddr; 569 unsigned long baddr; 570 unsigned long len; 571 572 len = skb->len; 573 vaddr = skb->data; 574 baddr = virt_to_bus(vaddr); 575 576 dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0); 577} 578 579static void 580bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp) 581{ 582 unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf; 583 584 dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN, 585 virt_to_bus(addr), 0); 586} 587 588static void 589bmac_init_tx_ring(struct bmac_data *bp) 590{ 591 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 592 593 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd)); 594 595 bp->tx_empty = 0; 596 bp->tx_fill = 0; 597 bp->tx_fullup = 0; 598 599 /* put a branch at the end of the tx command list */ 600 dbdma_setcmd(&bp->tx_cmds[N_TX_RING], 601 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds)); 602 603 /* reset tx dma */ 604 dbdma_reset(td); 605 out_le32(&td->wait_sel, 0x00200020); 606 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds)); 607} 608 609static int 610bmac_init_rx_ring(struct bmac_data *bp) 611{ 612 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 613 int i; 614 struct sk_buff *skb; 615 616 /* initialize list of sk_buffs for receiving and set up recv dma */ 617 memset((char *)bp->rx_cmds, 0, 618 (N_RX_RING + 1) * sizeof(struct dbdma_cmd)); 619 for (i = 0; i < N_RX_RING; i++) { 620 if ((skb = bp->rx_bufs[i]) == NULL) { 621 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2); 622 if (skb != NULL) 623 skb_reserve(skb, 2); 624 } 625 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); 626 } 627 628 bp->rx_empty = 0; 629 bp->rx_fill = i; 630 631 /* Put a branch back to the beginning of the receive command list */ 632 dbdma_setcmd(&bp->rx_cmds[N_RX_RING], 633 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds)); 634 635 /* start rx dma */ 636 dbdma_reset(rd); 637 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds)); 638 639 return 1; 640} 641 642 643static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev) 644{ 645 struct bmac_data *bp = netdev_priv(dev); 646 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 647 int i; 648 649 /* see if there's a free slot in the tx ring */ 650 /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */ 651 /* bp->tx_empty, bp->tx_fill)); */ 652 i = bp->tx_fill + 1; 653 if (i >= N_TX_RING) 654 i = 0; 655 if (i == bp->tx_empty) { 656 netif_stop_queue(dev); 657 bp->tx_fullup = 1; 658 XXDEBUG(("bmac_transmit_packet: tx ring full\n")); 659 return -1; /* can't take it at the moment */ 660 } 661 662 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0); 663 664 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]); 665 666 bp->tx_bufs[bp->tx_fill] = skb; 667 bp->tx_fill = i; 668 669 dev->stats.tx_bytes += skb->len; 670 671 dbdma_continue(td); 672 673 return 0; 674} 675 676static int rxintcount; 677 678static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id) 679{ 680 struct net_device *dev = (struct net_device *) dev_id; 681 struct bmac_data *bp = netdev_priv(dev); 682 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 683 volatile struct dbdma_cmd *cp; 684 int i, nb, stat; 685 struct sk_buff *skb; 686 unsigned int residual; 687 int last; 688 unsigned long flags; 689 690 spin_lock_irqsave(&bp->lock, flags); 691 692 if (++rxintcount < 10) { 693 XXDEBUG(("bmac_rxdma_intr\n")); 694 } 695 696 last = -1; 697 i = bp->rx_empty; 698 699 while (1) { 700 cp = &bp->rx_cmds[i]; 701 stat = ld_le16(&cp->xfer_status); 702 residual = ld_le16(&cp->res_count); 703 if ((stat & ACTIVE) == 0) 704 break; 705 nb = RX_BUFLEN - residual - 2; 706 if (nb < (ETHERMINPACKET - ETHERCRC)) { 707 skb = NULL; 708 dev->stats.rx_length_errors++; 709 dev->stats.rx_errors++; 710 } else { 711 skb = bp->rx_bufs[i]; 712 bp->rx_bufs[i] = NULL; 713 } 714 if (skb != NULL) { 715 nb -= ETHERCRC; 716 skb_put(skb, nb); 717 skb->protocol = eth_type_trans(skb, dev); 718 netif_rx(skb); 719 dev->last_rx = jiffies; 720 ++dev->stats.rx_packets; 721 dev->stats.rx_bytes += nb; 722 } else { 723 ++dev->stats.rx_dropped; 724 } 725 dev->last_rx = jiffies; 726 if ((skb = bp->rx_bufs[i]) == NULL) { 727 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2); 728 if (skb != NULL) 729 skb_reserve(bp->rx_bufs[i], 2); 730 } 731 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); 732 st_le16(&cp->res_count, 0); 733 st_le16(&cp->xfer_status, 0); 734 last = i; 735 if (++i >= N_RX_RING) i = 0; 736 } 737 738 if (last != -1) { 739 bp->rx_fill = last; 740 bp->rx_empty = i; 741 } 742 743 dbdma_continue(rd); 744 spin_unlock_irqrestore(&bp->lock, flags); 745 746 if (rxintcount < 10) { 747 XXDEBUG(("bmac_rxdma_intr done\n")); 748 } 749 return IRQ_HANDLED; 750} 751 752static int txintcount; 753 754static irqreturn_t bmac_txdma_intr(int irq, void *dev_id) 755{ 756 struct net_device *dev = (struct net_device *) dev_id; 757 struct bmac_data *bp = netdev_priv(dev); 758 volatile struct dbdma_cmd *cp; 759 int stat; 760 unsigned long flags; 761 762 spin_lock_irqsave(&bp->lock, flags); 763 764 if (txintcount++ < 10) { 765 XXDEBUG(("bmac_txdma_intr\n")); 766 } 767 768 /* del_timer(&bp->tx_timeout); */ 769 /* bp->timeout_active = 0; */ 770 771 while (1) { 772 cp = &bp->tx_cmds[bp->tx_empty]; 773 stat = ld_le16(&cp->xfer_status); 774 if (txintcount < 10) { 775 XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat)); 776 } 777 if (!(stat & ACTIVE)) { 778 /* 779 * status field might not have been filled by DBDMA 780 */ 781 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr))) 782 break; 783 } 784 785 if (bp->tx_bufs[bp->tx_empty]) { 786 ++dev->stats.tx_packets; 787 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]); 788 } 789 bp->tx_bufs[bp->tx_empty] = NULL; 790 bp->tx_fullup = 0; 791 netif_wake_queue(dev); 792 if (++bp->tx_empty >= N_TX_RING) 793 bp->tx_empty = 0; 794 if (bp->tx_empty == bp->tx_fill) 795 break; 796 } 797 798 spin_unlock_irqrestore(&bp->lock, flags); 799 800 if (txintcount < 10) { 801 XXDEBUG(("bmac_txdma_intr done->bmac_start\n")); 802 } 803 804 bmac_start(dev); 805 return IRQ_HANDLED; 806} 807 808#ifndef SUNHME_MULTICAST 809/* Real fast bit-reversal algorithm, 6-bit values */ 810static int reverse6[64] = { 811 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38, 812 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c, 813 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a, 814 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e, 815 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39, 816 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d, 817 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b, 818 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f 819}; 820 821static unsigned int 822crc416(unsigned int curval, unsigned short nxtval) 823{ 824 register unsigned int counter, cur = curval, next = nxtval; 825 register int high_crc_set, low_data_set; 826 827 /* Swap bytes */ 828 next = ((next & 0x00FF) << 8) | (next >> 8); 829 830 /* Compute bit-by-bit */ 831 for (counter = 0; counter < 16; ++counter) { 832 /* is high CRC bit set? */ 833 if ((cur & 0x80000000) == 0) high_crc_set = 0; 834 else high_crc_set = 1; 835 836 cur = cur << 1; 837 838 if ((next & 0x0001) == 0) low_data_set = 0; 839 else low_data_set = 1; 840 841 next = next >> 1; 842 843 /* do the XOR */ 844 if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY; 845 } 846 return cur; 847} 848 849static unsigned int 850bmac_crc(unsigned short *address) 851{ 852 unsigned int newcrc; 853 854 XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2])); 855 newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */ 856 newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */ 857 newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */ 858 859 return(newcrc); 860} 861 862/* 863 * Add requested mcast addr to BMac's hash table filter. 864 * 865 */ 866 867static void 868bmac_addhash(struct bmac_data *bp, unsigned char *addr) 869{ 870 unsigned int crc; 871 unsigned short mask; 872 873 if (!(*addr)) return; 874 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */ 875 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ 876 if (bp->hash_use_count[crc]++) return; /* This bit is already set */ 877 mask = crc % 16; 878 mask = (unsigned char)1 << mask; 879 bp->hash_use_count[crc/16] |= mask; 880} 881 882static void 883bmac_removehash(struct bmac_data *bp, unsigned char *addr) 884{ 885 unsigned int crc; 886 unsigned char mask; 887 888 /* Now, delete the address from the filter copy, as indicated */ 889 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */ 890 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ 891 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */ 892 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */ 893 mask = crc % 16; 894 mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */ 895 bp->hash_table_mask[crc/16] &= mask; 896} 897 898/* 899 * Sync the adapter with the software copy of the multicast mask 900 * (logical address filter). 901 */ 902 903static void 904bmac_rx_off(struct net_device *dev) 905{ 906 unsigned short rx_cfg; 907 908 rx_cfg = bmread(dev, RXCFG); 909 rx_cfg &= ~RxMACEnable; 910 bmwrite(dev, RXCFG, rx_cfg); 911 do { 912 rx_cfg = bmread(dev, RXCFG); 913 } while (rx_cfg & RxMACEnable); 914} 915 916unsigned short 917bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable) 918{ 919 unsigned short rx_cfg; 920 921 rx_cfg = bmread(dev, RXCFG); 922 rx_cfg |= RxMACEnable; 923 if (hash_enable) rx_cfg |= RxHashFilterEnable; 924 else rx_cfg &= ~RxHashFilterEnable; 925 if (promisc_enable) rx_cfg |= RxPromiscEnable; 926 else rx_cfg &= ~RxPromiscEnable; 927 bmwrite(dev, RXRST, RxResetValue); 928 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */ 929 bmwrite(dev, RXFIFOCSR, RxFIFOEnable ); 930 bmwrite(dev, RXCFG, rx_cfg ); 931 return rx_cfg; 932} 933 934static void 935bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp) 936{ 937 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */ 938 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */ 939 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ 940 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ 941} 942 943#if 0 944static void 945bmac_add_multi(struct net_device *dev, 946 struct bmac_data *bp, unsigned char *addr) 947{ 948 /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */ 949 bmac_addhash(bp, addr); 950 bmac_rx_off(dev); 951 bmac_update_hash_table_mask(dev, bp); 952 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0); 953 /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */ 954} 955 956static void 957bmac_remove_multi(struct net_device *dev, 958 struct bmac_data *bp, unsigned char *addr) 959{ 960 bmac_removehash(bp, addr); 961 bmac_rx_off(dev); 962 bmac_update_hash_table_mask(dev, bp); 963 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0); 964} 965#endif 966 967/* Set or clear the multicast filter for this adaptor. 968 num_addrs == -1 Promiscuous mode, receive all packets 969 num_addrs == 0 Normal mode, clear multicast list 970 num_addrs > 0 Multicast mode, receive normal and MC packets, and do 971 best-effort filtering. 972 */ 973static void bmac_set_multicast(struct net_device *dev) 974{ 975 struct dev_mc_list *dmi; 976 struct bmac_data *bp = netdev_priv(dev); 977 int num_addrs = dev->mc_count; 978 unsigned short rx_cfg; 979 int i; 980 981 if (bp->sleeping) 982 return; 983 984 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs)); 985 986 if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) { 987 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff; 988 bmac_update_hash_table_mask(dev, bp); 989 rx_cfg = bmac_rx_on(dev, 1, 0); 990 XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n")); 991 } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) { 992 rx_cfg = bmread(dev, RXCFG); 993 rx_cfg |= RxPromiscEnable; 994 bmwrite(dev, RXCFG, rx_cfg); 995 rx_cfg = bmac_rx_on(dev, 0, 1); 996 XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg)); 997 } else { 998 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0; 999 for (i=0; i<64; i++) bp->hash_use_count[i] = 0; 1000 if (num_addrs == 0) { 1001 rx_cfg = bmac_rx_on(dev, 0, 0); 1002 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg)); 1003 } else { 1004 for (dmi=dev->mc_list; dmi!=NULL; dmi=dmi->next) 1005 bmac_addhash(bp, dmi->dmi_addr); 1006 bmac_update_hash_table_mask(dev, bp); 1007 rx_cfg = bmac_rx_on(dev, 1, 0); 1008 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg)); 1009 } 1010 } 1011 /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */ 1012} 1013#else /* ifdef SUNHME_MULTICAST */ 1014 1015/* The version of set_multicast below was lifted from sunhme.c */ 1016 1017static void bmac_set_multicast(struct net_device *dev) 1018{ 1019 struct dev_mc_list *dmi = dev->mc_list; 1020 char *addrs; 1021 int i; 1022 unsigned short rx_cfg; 1023 u32 crc; 1024 1025 if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) { 1026 bmwrite(dev, BHASH0, 0xffff); 1027 bmwrite(dev, BHASH1, 0xffff); 1028 bmwrite(dev, BHASH2, 0xffff); 1029 bmwrite(dev, BHASH3, 0xffff); 1030 } else if(dev->flags & IFF_PROMISC) { 1031 rx_cfg = bmread(dev, RXCFG); 1032 rx_cfg |= RxPromiscEnable; 1033 bmwrite(dev, RXCFG, rx_cfg); 1034 } else { 1035 u16 hash_table[4]; 1036 1037 rx_cfg = bmread(dev, RXCFG); 1038 rx_cfg &= ~RxPromiscEnable; 1039 bmwrite(dev, RXCFG, rx_cfg); 1040 1041 for(i = 0; i < 4; i++) hash_table[i] = 0; 1042 1043 for(i = 0; i < dev->mc_count; i++) { 1044 addrs = dmi->dmi_addr; 1045 dmi = dmi->next; 1046 1047 if(!(*addrs & 1)) 1048 continue; 1049 1050 crc = ether_crc_le(6, addrs); 1051 crc >>= 26; 1052 hash_table[crc >> 4] |= 1 << (crc & 0xf); 1053 } 1054 bmwrite(dev, BHASH0, hash_table[0]); 1055 bmwrite(dev, BHASH1, hash_table[1]); 1056 bmwrite(dev, BHASH2, hash_table[2]); 1057 bmwrite(dev, BHASH3, hash_table[3]); 1058 } 1059} 1060#endif /* SUNHME_MULTICAST */ 1061 1062static int miscintcount; 1063 1064static irqreturn_t bmac_misc_intr(int irq, void *dev_id) 1065{ 1066 struct net_device *dev = (struct net_device *) dev_id; 1067 struct bmac_data *bp = netdev_priv(dev); 1068 unsigned int status = bmread(dev, STATUS); 1069 if (miscintcount++ < 10) { 1070 XXDEBUG(("bmac_misc_intr\n")); 1071 } 1072 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */ 1073 /* bmac_txdma_intr_inner(irq, dev_id); */ 1074 /* if (status & FrameReceived) dev->stats.rx_dropped++; */ 1075 if (status & RxErrorMask) dev->stats.rx_errors++; 1076 if (status & RxCRCCntExp) dev->stats.rx_crc_errors++; 1077 if (status & RxLenCntExp) dev->stats.rx_length_errors++; 1078 if (status & RxOverFlow) dev->stats.rx_over_errors++; 1079 if (status & RxAlignCntExp) dev->stats.rx_frame_errors++; 1080 1081 /* if (status & FrameSent) dev->stats.tx_dropped++; */ 1082 if (status & TxErrorMask) dev->stats.tx_errors++; 1083 if (status & TxUnderrun) dev->stats.tx_fifo_errors++; 1084 if (status & TxNormalCollExp) dev->stats.collisions++; 1085 return IRQ_HANDLED; 1086} 1087 1088/* 1089 * Procedure for reading EEPROM 1090 */ 1091#define SROMAddressLength 5 1092#define DataInOn 0x0008 1093#define DataInOff 0x0000 1094#define Clk 0x0002 1095#define ChipSelect 0x0001 1096#define SDIShiftCount 3 1097#define SD0ShiftCount 2 1098#define DelayValue 1000 /* number of microseconds */ 1099#define SROMStartOffset 10 /* this is in words */ 1100#define SROMReadCount 3 /* number of words to read from SROM */ 1101#define SROMAddressBits 6 1102#define EnetAddressOffset 20 1103 1104static unsigned char 1105bmac_clock_out_bit(struct net_device *dev) 1106{ 1107 unsigned short data; 1108 unsigned short val; 1109 1110 bmwrite(dev, SROMCSR, ChipSelect | Clk); 1111 udelay(DelayValue); 1112 1113 data = bmread(dev, SROMCSR); 1114 udelay(DelayValue); 1115 val = (data >> SD0ShiftCount) & 1; 1116 1117 bmwrite(dev, SROMCSR, ChipSelect); 1118 udelay(DelayValue); 1119 1120 return val; 1121} 1122 1123static void 1124bmac_clock_in_bit(struct net_device *dev, unsigned int val) 1125{ 1126 unsigned short data; 1127 1128 if (val != 0 && val != 1) return; 1129 1130 data = (val << SDIShiftCount); 1131 bmwrite(dev, SROMCSR, data | ChipSelect ); 1132 udelay(DelayValue); 1133 1134 bmwrite(dev, SROMCSR, data | ChipSelect | Clk ); 1135 udelay(DelayValue); 1136 1137 bmwrite(dev, SROMCSR, data | ChipSelect); 1138 udelay(DelayValue); 1139} 1140 1141static void 1142reset_and_select_srom(struct net_device *dev) 1143{ 1144 /* first reset */ 1145 bmwrite(dev, SROMCSR, 0); 1146 udelay(DelayValue); 1147 1148 /* send it the read command (110) */ 1149 bmac_clock_in_bit(dev, 1); 1150 bmac_clock_in_bit(dev, 1); 1151 bmac_clock_in_bit(dev, 0); 1152} 1153 1154static unsigned short 1155read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len) 1156{ 1157 unsigned short data, val; 1158 int i; 1159 1160 /* send out the address we want to read from */ 1161 for (i = 0; i < addr_len; i++) { 1162 val = addr >> (addr_len-i-1); 1163 bmac_clock_in_bit(dev, val & 1); 1164 } 1165 1166 /* Now read in the 16-bit data */ 1167 data = 0; 1168 for (i = 0; i < 16; i++) { 1169 val = bmac_clock_out_bit(dev); 1170 data <<= 1; 1171 data |= val; 1172 } 1173 bmwrite(dev, SROMCSR, 0); 1174 1175 return data; 1176} 1177 1178/* 1179 * It looks like Cogent and SMC use different methods for calculating 1180 * checksums. What a pain.. 1181 */ 1182 1183static int 1184bmac_verify_checksum(struct net_device *dev) 1185{ 1186 unsigned short data, storedCS; 1187 1188 reset_and_select_srom(dev); 1189 data = read_srom(dev, 3, SROMAddressBits); 1190 storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00); 1191 1192 return 0; 1193} 1194 1195 1196static void 1197bmac_get_station_address(struct net_device *dev, unsigned char *ea) 1198{ 1199 int i; 1200 unsigned short data; 1201 1202 for (i = 0; i < 6; i++) 1203 { 1204 reset_and_select_srom(dev); 1205 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits); 1206 ea[2*i] = bitrev8(data & 0x0ff); 1207 ea[2*i+1] = bitrev8((data >> 8) & 0x0ff); 1208 } 1209} 1210 1211static void bmac_reset_and_enable(struct net_device *dev) 1212{ 1213 struct bmac_data *bp = netdev_priv(dev); 1214 unsigned long flags; 1215 struct sk_buff *skb; 1216 unsigned char *data; 1217 1218 spin_lock_irqsave(&bp->lock, flags); 1219 bmac_enable_and_reset_chip(dev); 1220 bmac_init_tx_ring(bp); 1221 bmac_init_rx_ring(bp); 1222 bmac_init_chip(dev); 1223 bmac_start_chip(dev); 1224 bmwrite(dev, INTDISABLE, EnableNormal); 1225 bp->sleeping = 0; 1226 1227 /* 1228 * It seems that the bmac can't receive until it's transmitted 1229 * a packet. So we give it a dummy packet to transmit. 1230 */ 1231 skb = dev_alloc_skb(ETHERMINPACKET); 1232 if (skb != NULL) { 1233 data = skb_put(skb, ETHERMINPACKET); 1234 memset(data, 0, ETHERMINPACKET); 1235 memcpy(data, dev->dev_addr, 6); 1236 memcpy(data+6, dev->dev_addr, 6); 1237 bmac_transmit_packet(skb, dev); 1238 } 1239 spin_unlock_irqrestore(&bp->lock, flags); 1240} 1241static void bmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1242{ 1243 struct bmac_data *bp = netdev_priv(dev); 1244 strcpy(info->driver, "bmac"); 1245 strcpy(info->bus_info, bp->mdev->ofdev.dev.bus_id); 1246} 1247 1248static const struct ethtool_ops bmac_ethtool_ops = { 1249 .get_drvinfo = bmac_get_drvinfo, 1250 .get_link = ethtool_op_get_link, 1251}; 1252 1253static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_id *match) 1254{ 1255 int j, rev, ret; 1256 struct bmac_data *bp; 1257 const unsigned char *prop_addr; 1258 unsigned char addr[6]; 1259 struct net_device *dev; 1260 int is_bmac_plus = ((int)match->data) != 0; 1261 DECLARE_MAC_BUF(mac); 1262 1263 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { 1264 printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n"); 1265 return -ENODEV; 1266 } 1267 prop_addr = of_get_property(macio_get_of_node(mdev), 1268 "mac-address", NULL); 1269 if (prop_addr == NULL) { 1270 prop_addr = of_get_property(macio_get_of_node(mdev), 1271 "local-mac-address", NULL); 1272 if (prop_addr == NULL) { 1273 printk(KERN_ERR "BMAC: Can't get mac-address\n"); 1274 return -ENODEV; 1275 } 1276 } 1277 memcpy(addr, prop_addr, sizeof(addr)); 1278 1279 dev = alloc_etherdev(PRIV_BYTES); 1280 if (!dev) { 1281 printk(KERN_ERR "BMAC: alloc_etherdev failed, out of memory\n"); 1282 return -ENOMEM; 1283 } 1284 1285 bp = netdev_priv(dev); 1286 SET_NETDEV_DEV(dev, &mdev->ofdev.dev); 1287 macio_set_drvdata(mdev, dev); 1288 1289 bp->mdev = mdev; 1290 spin_lock_init(&bp->lock); 1291 1292 if (macio_request_resources(mdev, "bmac")) { 1293 printk(KERN_ERR "BMAC: can't request IO resource !\n"); 1294 goto out_free; 1295 } 1296 1297 dev->base_addr = (unsigned long) 1298 ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0)); 1299 if (dev->base_addr == 0) 1300 goto out_release; 1301 1302 dev->irq = macio_irq(mdev, 0); 1303 1304 bmac_enable_and_reset_chip(dev); 1305 bmwrite(dev, INTDISABLE, DisableAll); 1306 1307 rev = addr[0] == 0 && addr[1] == 0xA0; 1308 for (j = 0; j < 6; ++j) 1309 dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j]; 1310 1311 /* Enable chip without interrupts for now */ 1312 bmac_enable_and_reset_chip(dev); 1313 bmwrite(dev, INTDISABLE, DisableAll); 1314 1315 dev->open = bmac_open; 1316 dev->stop = bmac_close; 1317 dev->ethtool_ops = &bmac_ethtool_ops; 1318 dev->hard_start_xmit = bmac_output; 1319 dev->set_multicast_list = bmac_set_multicast; 1320 dev->set_mac_address = bmac_set_address; 1321 1322 bmac_get_station_address(dev, addr); 1323 if (bmac_verify_checksum(dev) != 0) 1324 goto err_out_iounmap; 1325 1326 bp->is_bmac_plus = is_bmac_plus; 1327 bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1)); 1328 if (!bp->tx_dma) 1329 goto err_out_iounmap; 1330 bp->tx_dma_intr = macio_irq(mdev, 1); 1331 bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2)); 1332 if (!bp->rx_dma) 1333 goto err_out_iounmap_tx; 1334 bp->rx_dma_intr = macio_irq(mdev, 2); 1335 1336 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1); 1337 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1; 1338 1339 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1); 1340 skb_queue_head_init(bp->queue); 1341 1342 init_timer(&bp->tx_timeout); 1343 1344 ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev); 1345 if (ret) { 1346 printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq); 1347 goto err_out_iounmap_rx; 1348 } 1349 ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev); 1350 if (ret) { 1351 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr); 1352 goto err_out_irq0; 1353 } 1354 ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev); 1355 if (ret) { 1356 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr); 1357 goto err_out_irq1; 1358 } 1359 1360 /* Mask chip interrupts and disable chip, will be 1361 * re-enabled on open() 1362 */ 1363 disable_irq(dev->irq); 1364 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); 1365 1366 if (register_netdev(dev) != 0) { 1367 printk(KERN_ERR "BMAC: Ethernet registration failed\n"); 1368 goto err_out_irq2; 1369 } 1370 1371 printk(KERN_INFO "%s: BMAC%s at %s", 1372 dev->name, (is_bmac_plus ? "+" : ""), print_mac(mac, dev->dev_addr)); 1373 XXDEBUG((", base_addr=%#0lx", dev->base_addr)); 1374 printk("\n"); 1375 1376 return 0; 1377 1378err_out_irq2: 1379 free_irq(bp->rx_dma_intr, dev); 1380err_out_irq1: 1381 free_irq(bp->tx_dma_intr, dev); 1382err_out_irq0: 1383 free_irq(dev->irq, dev); 1384err_out_iounmap_rx: 1385 iounmap(bp->rx_dma); 1386err_out_iounmap_tx: 1387 iounmap(bp->tx_dma); 1388err_out_iounmap: 1389 iounmap((void __iomem *)dev->base_addr); 1390out_release: 1391 macio_release_resources(mdev); 1392out_free: 1393 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); 1394 free_netdev(dev); 1395 1396 return -ENODEV; 1397} 1398 1399static int bmac_open(struct net_device *dev) 1400{ 1401 struct bmac_data *bp = netdev_priv(dev); 1402 /* XXDEBUG(("bmac: enter open\n")); */ 1403 /* reset the chip */ 1404 bp->opened = 1; 1405 bmac_reset_and_enable(dev); 1406 enable_irq(dev->irq); 1407 return 0; 1408} 1409 1410static int bmac_close(struct net_device *dev) 1411{ 1412 struct bmac_data *bp = netdev_priv(dev); 1413 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 1414 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 1415 unsigned short config; 1416 int i; 1417 1418 bp->sleeping = 1; 1419 1420 /* disable rx and tx */ 1421 config = bmread(dev, RXCFG); 1422 bmwrite(dev, RXCFG, (config & ~RxMACEnable)); 1423 1424 config = bmread(dev, TXCFG); 1425 bmwrite(dev, TXCFG, (config & ~TxMACEnable)); 1426 1427 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */ 1428 1429 /* disable rx and tx dma */ 1430 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ 1431 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ 1432 1433 /* free some skb's */ 1434 XXDEBUG(("bmac: free rx bufs\n")); 1435 for (i=0; i<N_RX_RING; i++) { 1436 if (bp->rx_bufs[i] != NULL) { 1437 dev_kfree_skb(bp->rx_bufs[i]); 1438 bp->rx_bufs[i] = NULL; 1439 } 1440 } 1441 XXDEBUG(("bmac: free tx bufs\n")); 1442 for (i = 0; i<N_TX_RING; i++) { 1443 if (bp->tx_bufs[i] != NULL) { 1444 dev_kfree_skb(bp->tx_bufs[i]); 1445 bp->tx_bufs[i] = NULL; 1446 } 1447 } 1448 XXDEBUG(("bmac: all bufs freed\n")); 1449 1450 bp->opened = 0; 1451 disable_irq(dev->irq); 1452 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); 1453 1454 return 0; 1455} 1456 1457static void 1458bmac_start(struct net_device *dev) 1459{ 1460 struct bmac_data *bp = netdev_priv(dev); 1461 int i; 1462 struct sk_buff *skb; 1463 unsigned long flags; 1464 1465 if (bp->sleeping) 1466 return; 1467 1468 spin_lock_irqsave(&bp->lock, flags); 1469 while (1) { 1470 i = bp->tx_fill + 1; 1471 if (i >= N_TX_RING) 1472 i = 0; 1473 if (i == bp->tx_empty) 1474 break; 1475 skb = skb_dequeue(bp->queue); 1476 if (skb == NULL) 1477 break; 1478 bmac_transmit_packet(skb, dev); 1479 } 1480 spin_unlock_irqrestore(&bp->lock, flags); 1481} 1482 1483static int 1484bmac_output(struct sk_buff *skb, struct net_device *dev) 1485{ 1486 struct bmac_data *bp = netdev_priv(dev); 1487 skb_queue_tail(bp->queue, skb); 1488 bmac_start(dev); 1489 return 0; 1490} 1491 1492static void bmac_tx_timeout(unsigned long data) 1493{ 1494 struct net_device *dev = (struct net_device *) data; 1495 struct bmac_data *bp = netdev_priv(dev); 1496 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 1497 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 1498 volatile struct dbdma_cmd *cp; 1499 unsigned long flags; 1500 unsigned short config, oldConfig; 1501 int i; 1502 1503 XXDEBUG(("bmac: tx_timeout called\n")); 1504 spin_lock_irqsave(&bp->lock, flags); 1505 bp->timeout_active = 0; 1506 1507 /* update various counters */ 1508/* bmac_handle_misc_intrs(bp, 0); */ 1509 1510 cp = &bp->tx_cmds[bp->tx_empty]; 1511/* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */ 1512/* ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */ 1513/* mb->pr, mb->xmtfs, mb->fifofc)); */ 1514 1515 /* turn off both tx and rx and reset the chip */ 1516 config = bmread(dev, RXCFG); 1517 bmwrite(dev, RXCFG, (config & ~RxMACEnable)); 1518 config = bmread(dev, TXCFG); 1519 bmwrite(dev, TXCFG, (config & ~TxMACEnable)); 1520 out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD)); 1521 printk(KERN_ERR "bmac: transmit timeout - resetting\n"); 1522 bmac_enable_and_reset_chip(dev); 1523 1524 /* restart rx dma */ 1525 cp = bus_to_virt(ld_le32(&rd->cmdptr)); 1526 out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD)); 1527 out_le16(&cp->xfer_status, 0); 1528 out_le32(&rd->cmdptr, virt_to_bus(cp)); 1529 out_le32(&rd->control, DBDMA_SET(RUN|WAKE)); 1530 1531 /* fix up the transmit side */ 1532 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n", 1533 bp->tx_empty, bp->tx_fill, bp->tx_fullup)); 1534 i = bp->tx_empty; 1535 ++dev->stats.tx_errors; 1536 if (i != bp->tx_fill) { 1537 dev_kfree_skb(bp->tx_bufs[i]); 1538 bp->tx_bufs[i] = NULL; 1539 if (++i >= N_TX_RING) i = 0; 1540 bp->tx_empty = i; 1541 } 1542 bp->tx_fullup = 0; 1543 netif_wake_queue(dev); 1544 if (i != bp->tx_fill) { 1545 cp = &bp->tx_cmds[i]; 1546 out_le16(&cp->xfer_status, 0); 1547 out_le16(&cp->command, OUTPUT_LAST); 1548 out_le32(&td->cmdptr, virt_to_bus(cp)); 1549 out_le32(&td->control, DBDMA_SET(RUN)); 1550 /* bmac_set_timeout(dev); */ 1551 XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i)); 1552 } 1553 1554 /* turn it back on */ 1555 oldConfig = bmread(dev, RXCFG); 1556 bmwrite(dev, RXCFG, oldConfig | RxMACEnable ); 1557 oldConfig = bmread(dev, TXCFG); 1558 bmwrite(dev, TXCFG, oldConfig | TxMACEnable ); 1559 1560 spin_unlock_irqrestore(&bp->lock, flags); 1561} 1562 1563#if 0 1564static void dump_dbdma(volatile struct dbdma_cmd *cp,int count) 1565{ 1566 int i,*ip; 1567 1568 for (i=0;i< count;i++) { 1569 ip = (int*)(cp+i); 1570 1571 printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n", 1572 ld_le32(ip+0), 1573 ld_le32(ip+1), 1574 ld_le32(ip+2), 1575 ld_le32(ip+3)); 1576 } 1577 1578} 1579#endif 1580 1581#if 0 1582static int 1583bmac_proc_info(char *buffer, char **start, off_t offset, int length) 1584{ 1585 int len = 0; 1586 off_t pos = 0; 1587 off_t begin = 0; 1588 int i; 1589 1590 if (bmac_devs == NULL) 1591 return (-ENOSYS); 1592 1593 len += sprintf(buffer, "BMAC counters & registers\n"); 1594 1595 for (i = 0; i<N_REG_ENTRIES; i++) { 1596 len += sprintf(buffer + len, "%s: %#08x\n", 1597 reg_entries[i].name, 1598 bmread(bmac_devs, reg_entries[i].reg_offset)); 1599 pos = begin + len; 1600 1601 if (pos < offset) { 1602 len = 0; 1603 begin = pos; 1604 } 1605 1606 if (pos > offset+length) break; 1607 } 1608 1609 *start = buffer + (offset - begin); 1610 len -= (offset - begin); 1611 1612 if (len > length) len = length; 1613 1614 return len; 1615} 1616#endif 1617 1618static int __devexit bmac_remove(struct macio_dev *mdev) 1619{ 1620 struct net_device *dev = macio_get_drvdata(mdev); 1621 struct bmac_data *bp = netdev_priv(dev); 1622 1623 unregister_netdev(dev); 1624 1625 free_irq(dev->irq, dev); 1626 free_irq(bp->tx_dma_intr, dev); 1627 free_irq(bp->rx_dma_intr, dev); 1628 1629 iounmap((void __iomem *)dev->base_addr); 1630 iounmap(bp->tx_dma); 1631 iounmap(bp->rx_dma); 1632 1633 macio_release_resources(mdev); 1634 1635 free_netdev(dev); 1636 1637 return 0; 1638} 1639 1640static struct of_device_id bmac_match[] = 1641{ 1642 { 1643 .name = "bmac", 1644 .data = (void *)0, 1645 }, 1646 { 1647 .type = "network", 1648 .compatible = "bmac+", 1649 .data = (void *)1, 1650 }, 1651 {}, 1652}; 1653MODULE_DEVICE_TABLE (of, bmac_match); 1654 1655static struct macio_driver bmac_driver = 1656{ 1657 .name = "bmac", 1658 .match_table = bmac_match, 1659 .probe = bmac_probe, 1660 .remove = bmac_remove, 1661#ifdef CONFIG_PM 1662 .suspend = bmac_suspend, 1663 .resume = bmac_resume, 1664#endif 1665}; 1666 1667 1668static int __init bmac_init(void) 1669{ 1670 if (bmac_emergency_rxbuf == NULL) { 1671 bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL); 1672 if (bmac_emergency_rxbuf == NULL) { 1673 printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n"); 1674 return -ENOMEM; 1675 } 1676 } 1677 1678 return macio_register_driver(&bmac_driver); 1679} 1680 1681static void __exit bmac_exit(void) 1682{ 1683 macio_unregister_driver(&bmac_driver); 1684 1685 kfree(bmac_emergency_rxbuf); 1686 bmac_emergency_rxbuf = NULL; 1687} 1688 1689MODULE_AUTHOR("Randy Gobbel/Paul Mackerras"); 1690MODULE_DESCRIPTION("PowerMac BMAC ethernet driver."); 1691MODULE_LICENSE("GPL"); 1692 1693module_init(bmac_init); 1694module_exit(bmac_exit);