at v3.0 1690 lines 42 kB view raw
1/* 2 * Network device driver for the BMAC ethernet controller on 3 * Apple Powermacs. Assumes it's under a DBDMA controller. 4 * 5 * Copyright (C) 1998 Randy Gobbel. 6 * 7 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to 8 * dynamic procfs inode. 9 */ 10#include <linux/module.h> 11#include <linux/kernel.h> 12#include <linux/netdevice.h> 13#include <linux/etherdevice.h> 14#include <linux/delay.h> 15#include <linux/string.h> 16#include <linux/timer.h> 17#include <linux/proc_fs.h> 18#include <linux/init.h> 19#include <linux/spinlock.h> 20#include <linux/crc32.h> 21#include <linux/bitrev.h> 22#include <linux/ethtool.h> 23#include <linux/slab.h> 24#include <asm/prom.h> 25#include <asm/dbdma.h> 26#include <asm/io.h> 27#include <asm/page.h> 28#include <asm/pgtable.h> 29#include <asm/machdep.h> 30#include <asm/pmac_feature.h> 31#include <asm/macio.h> 32#include <asm/irq.h> 33 34#include "bmac.h" 35 36#define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1)))) 37#define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1))) 38 39/* 40 * CRC polynomial - used in working out multicast filter bits. 41 */ 42#define ENET_CRCPOLY 0x04c11db7 43 44/* switch to use multicast code lifted from sunhme driver */ 45#define SUNHME_MULTICAST 46 47#define N_RX_RING 64 48#define N_TX_RING 32 49#define MAX_TX_ACTIVE 1 50#define ETHERCRC 4 51#define ETHERMINPACKET 64 52#define ETHERMTU 1500 53#define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2) 54#define TX_TIMEOUT HZ /* 1 second */ 55 56/* Bits in transmit DMA status */ 57#define TX_DMA_ERR 0x80 58 59#define XXDEBUG(args) 60 61struct bmac_data { 62 /* volatile struct bmac *bmac; */ 63 struct sk_buff_head *queue; 64 volatile struct dbdma_regs __iomem *tx_dma; 65 int tx_dma_intr; 66 volatile struct dbdma_regs __iomem *rx_dma; 67 int rx_dma_intr; 68 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */ 69 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */ 70 struct macio_dev *mdev; 71 int is_bmac_plus; 72 struct sk_buff *rx_bufs[N_RX_RING]; 73 int rx_fill; 74 int rx_empty; 75 struct sk_buff *tx_bufs[N_TX_RING]; 76 int tx_fill; 77 int tx_empty; 78 unsigned char tx_fullup; 79 struct timer_list tx_timeout; 80 int timeout_active; 81 int sleeping; 82 int opened; 83 unsigned short hash_use_count[64]; 84 unsigned short hash_table_mask[4]; 85 spinlock_t lock; 86}; 87 88#if 0 /* Move that to ethtool */ 89 90typedef struct bmac_reg_entry { 91 char *name; 92 unsigned short reg_offset; 93} bmac_reg_entry_t; 94 95#define N_REG_ENTRIES 31 96 97static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = { 98 {"MEMADD", MEMADD}, 99 {"MEMDATAHI", MEMDATAHI}, 100 {"MEMDATALO", MEMDATALO}, 101 {"TXPNTR", TXPNTR}, 102 {"RXPNTR", RXPNTR}, 103 {"IPG1", IPG1}, 104 {"IPG2", IPG2}, 105 {"ALIMIT", ALIMIT}, 106 {"SLOT", SLOT}, 107 {"PALEN", PALEN}, 108 {"PAPAT", PAPAT}, 109 {"TXSFD", TXSFD}, 110 {"JAM", JAM}, 111 {"TXCFG", TXCFG}, 112 {"TXMAX", TXMAX}, 113 {"TXMIN", TXMIN}, 114 {"PAREG", PAREG}, 115 {"DCNT", DCNT}, 116 {"NCCNT", NCCNT}, 117 {"NTCNT", NTCNT}, 118 {"EXCNT", EXCNT}, 119 {"LTCNT", LTCNT}, 120 {"TXSM", TXSM}, 121 {"RXCFG", RXCFG}, 122 {"RXMAX", RXMAX}, 123 {"RXMIN", RXMIN}, 124 {"FRCNT", FRCNT}, 125 {"AECNT", AECNT}, 126 {"FECNT", FECNT}, 127 {"RXSM", RXSM}, 128 {"RXCV", RXCV} 129}; 130 131#endif 132 133static unsigned char *bmac_emergency_rxbuf; 134 135/* 136 * Number of bytes of private data per BMAC: allow enough for 137 * the rx and tx dma commands plus a branch dma command each, 138 * and another 16 bytes to allow us to align the dma command 139 * buffers on a 16 byte boundary. 140 */ 141#define PRIV_BYTES (sizeof(struct bmac_data) \ 142 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \ 143 + sizeof(struct sk_buff_head)) 144 145static int bmac_open(struct net_device *dev); 146static int bmac_close(struct net_device *dev); 147static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev); 148static void bmac_set_multicast(struct net_device *dev); 149static void bmac_reset_and_enable(struct net_device *dev); 150static void bmac_start_chip(struct net_device *dev); 151static void bmac_init_chip(struct net_device *dev); 152static void bmac_init_registers(struct net_device *dev); 153static void bmac_enable_and_reset_chip(struct net_device *dev); 154static int bmac_set_address(struct net_device *dev, void *addr); 155static irqreturn_t bmac_misc_intr(int irq, void *dev_id); 156static irqreturn_t bmac_txdma_intr(int irq, void *dev_id); 157static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id); 158static void bmac_set_timeout(struct net_device *dev); 159static void bmac_tx_timeout(unsigned long data); 160static int bmac_output(struct sk_buff *skb, struct net_device *dev); 161static void bmac_start(struct net_device *dev); 162 163#define DBDMA_SET(x) ( ((x) | (x) << 16) ) 164#define DBDMA_CLEAR(x) ( (x) << 16) 165 166static inline void 167dbdma_st32(volatile __u32 __iomem *a, unsigned long x) 168{ 169 __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory"); 170} 171 172static inline unsigned long 173dbdma_ld32(volatile __u32 __iomem *a) 174{ 175 __u32 swap; 176 __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a)); 177 return swap; 178} 179 180static void 181dbdma_continue(volatile struct dbdma_regs __iomem *dmap) 182{ 183 dbdma_st32(&dmap->control, 184 DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD)); 185 eieio(); 186} 187 188static void 189dbdma_reset(volatile struct dbdma_regs __iomem *dmap) 190{ 191 dbdma_st32(&dmap->control, 192 DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN)); 193 eieio(); 194 while (dbdma_ld32(&dmap->status) & RUN) 195 eieio(); 196} 197 198static void 199dbdma_setcmd(volatile struct dbdma_cmd *cp, 200 unsigned short cmd, unsigned count, unsigned long addr, 201 unsigned long cmd_dep) 202{ 203 out_le16(&cp->command, cmd); 204 out_le16(&cp->req_count, count); 205 out_le32(&cp->phy_addr, addr); 206 out_le32(&cp->cmd_dep, cmd_dep); 207 out_le16(&cp->xfer_status, 0); 208 out_le16(&cp->res_count, 0); 209} 210 211static inline 212void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data ) 213{ 214 out_le16((void __iomem *)dev->base_addr + reg_offset, data); 215} 216 217 218static inline 219unsigned short bmread(struct net_device *dev, unsigned long reg_offset ) 220{ 221 return in_le16((void __iomem *)dev->base_addr + reg_offset); 222} 223 224static void 225bmac_enable_and_reset_chip(struct net_device *dev) 226{ 227 struct bmac_data *bp = netdev_priv(dev); 228 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 229 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 230 231 if (rd) 232 dbdma_reset(rd); 233 if (td) 234 dbdma_reset(td); 235 236 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1); 237} 238 239#define MIFDELAY udelay(10) 240 241static unsigned int 242bmac_mif_readbits(struct net_device *dev, int nb) 243{ 244 unsigned int val = 0; 245 246 while (--nb >= 0) { 247 bmwrite(dev, MIFCSR, 0); 248 MIFDELAY; 249 if (bmread(dev, MIFCSR) & 8) 250 val |= 1 << nb; 251 bmwrite(dev, MIFCSR, 1); 252 MIFDELAY; 253 } 254 bmwrite(dev, MIFCSR, 0); 255 MIFDELAY; 256 bmwrite(dev, MIFCSR, 1); 257 MIFDELAY; 258 return val; 259} 260 261static void 262bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb) 263{ 264 int b; 265 266 while (--nb >= 0) { 267 b = (val & (1 << nb))? 6: 4; 268 bmwrite(dev, MIFCSR, b); 269 MIFDELAY; 270 bmwrite(dev, MIFCSR, b|1); 271 MIFDELAY; 272 } 273} 274 275static unsigned int 276bmac_mif_read(struct net_device *dev, unsigned int addr) 277{ 278 unsigned int val; 279 280 bmwrite(dev, MIFCSR, 4); 281 MIFDELAY; 282 bmac_mif_writebits(dev, ~0U, 32); 283 bmac_mif_writebits(dev, 6, 4); 284 bmac_mif_writebits(dev, addr, 10); 285 bmwrite(dev, MIFCSR, 2); 286 MIFDELAY; 287 bmwrite(dev, MIFCSR, 1); 288 MIFDELAY; 289 val = bmac_mif_readbits(dev, 17); 290 bmwrite(dev, MIFCSR, 4); 291 MIFDELAY; 292 return val; 293} 294 295static void 296bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val) 297{ 298 bmwrite(dev, MIFCSR, 4); 299 MIFDELAY; 300 bmac_mif_writebits(dev, ~0U, 32); 301 bmac_mif_writebits(dev, 5, 4); 302 bmac_mif_writebits(dev, addr, 10); 303 bmac_mif_writebits(dev, 2, 2); 304 bmac_mif_writebits(dev, val, 16); 305 bmac_mif_writebits(dev, 3, 2); 306} 307 308static void 309bmac_init_registers(struct net_device *dev) 310{ 311 struct bmac_data *bp = netdev_priv(dev); 312 volatile unsigned short regValue; 313 unsigned short *pWord16; 314 int i; 315 316 /* XXDEBUG(("bmac: enter init_registers\n")); */ 317 318 bmwrite(dev, RXRST, RxResetValue); 319 bmwrite(dev, TXRST, TxResetBit); 320 321 i = 100; 322 do { 323 --i; 324 udelay(10000); 325 regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */ 326 } while ((regValue & TxResetBit) && i > 0); 327 328 if (!bp->is_bmac_plus) { 329 regValue = bmread(dev, XCVRIF); 330 regValue |= ClkBit | SerialMode | COLActiveLow; 331 bmwrite(dev, XCVRIF, regValue); 332 udelay(10000); 333 } 334 335 bmwrite(dev, RSEED, (unsigned short)0x1968); 336 337 regValue = bmread(dev, XIFC); 338 regValue |= TxOutputEnable; 339 bmwrite(dev, XIFC, regValue); 340 341 bmread(dev, PAREG); 342 343 /* set collision counters to 0 */ 344 bmwrite(dev, NCCNT, 0); 345 bmwrite(dev, NTCNT, 0); 346 bmwrite(dev, EXCNT, 0); 347 bmwrite(dev, LTCNT, 0); 348 349 /* set rx counters to 0 */ 350 bmwrite(dev, FRCNT, 0); 351 bmwrite(dev, LECNT, 0); 352 bmwrite(dev, AECNT, 0); 353 bmwrite(dev, FECNT, 0); 354 bmwrite(dev, RXCV, 0); 355 356 /* set tx fifo information */ 357 bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */ 358 359 bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */ 360 bmwrite(dev, TXFIFOCSR, TxFIFOEnable ); 361 362 /* set rx fifo information */ 363 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */ 364 bmwrite(dev, RXFIFOCSR, RxFIFOEnable ); 365 366 //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */ 367 bmread(dev, STATUS); /* read it just to clear it */ 368 369 /* zero out the chip Hash Filter registers */ 370 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0; 371 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */ 372 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */ 373 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ 374 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ 375 376 pWord16 = (unsigned short *)dev->dev_addr; 377 bmwrite(dev, MADD0, *pWord16++); 378 bmwrite(dev, MADD1, *pWord16++); 379 bmwrite(dev, MADD2, *pWord16); 380 381 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets); 382 383 bmwrite(dev, INTDISABLE, EnableNormal); 384} 385 386#if 0 387static void 388bmac_disable_interrupts(struct net_device *dev) 389{ 390 bmwrite(dev, INTDISABLE, DisableAll); 391} 392 393static void 394bmac_enable_interrupts(struct net_device *dev) 395{ 396 bmwrite(dev, INTDISABLE, EnableNormal); 397} 398#endif 399 400 401static void 402bmac_start_chip(struct net_device *dev) 403{ 404 struct bmac_data *bp = netdev_priv(dev); 405 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 406 unsigned short oldConfig; 407 408 /* enable rx dma channel */ 409 dbdma_continue(rd); 410 411 oldConfig = bmread(dev, TXCFG); 412 bmwrite(dev, TXCFG, oldConfig | TxMACEnable ); 413 414 /* turn on rx plus any other bits already on (promiscuous possibly) */ 415 oldConfig = bmread(dev, RXCFG); 416 bmwrite(dev, RXCFG, oldConfig | RxMACEnable ); 417 udelay(20000); 418} 419 420static void 421bmac_init_phy(struct net_device *dev) 422{ 423 unsigned int addr; 424 struct bmac_data *bp = netdev_priv(dev); 425 426 printk(KERN_DEBUG "phy registers:"); 427 for (addr = 0; addr < 32; ++addr) { 428 if ((addr & 7) == 0) 429 printk(KERN_DEBUG); 430 printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr)); 431 } 432 printk(KERN_CONT "\n"); 433 434 if (bp->is_bmac_plus) { 435 unsigned int capable, ctrl; 436 437 ctrl = bmac_mif_read(dev, 0); 438 capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1; 439 if (bmac_mif_read(dev, 4) != capable || 440 (ctrl & 0x1000) == 0) { 441 bmac_mif_write(dev, 4, capable); 442 bmac_mif_write(dev, 0, 0x1200); 443 } else 444 bmac_mif_write(dev, 0, 0x1000); 445 } 446} 447 448static void bmac_init_chip(struct net_device *dev) 449{ 450 bmac_init_phy(dev); 451 bmac_init_registers(dev); 452} 453 454#ifdef CONFIG_PM 455static int bmac_suspend(struct macio_dev *mdev, pm_message_t state) 456{ 457 struct net_device* dev = macio_get_drvdata(mdev); 458 struct bmac_data *bp = netdev_priv(dev); 459 unsigned long flags; 460 unsigned short config; 461 int i; 462 463 netif_device_detach(dev); 464 /* prolly should wait for dma to finish & turn off the chip */ 465 spin_lock_irqsave(&bp->lock, flags); 466 if (bp->timeout_active) { 467 del_timer(&bp->tx_timeout); 468 bp->timeout_active = 0; 469 } 470 disable_irq(dev->irq); 471 disable_irq(bp->tx_dma_intr); 472 disable_irq(bp->rx_dma_intr); 473 bp->sleeping = 1; 474 spin_unlock_irqrestore(&bp->lock, flags); 475 if (bp->opened) { 476 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 477 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 478 479 config = bmread(dev, RXCFG); 480 bmwrite(dev, RXCFG, (config & ~RxMACEnable)); 481 config = bmread(dev, TXCFG); 482 bmwrite(dev, TXCFG, (config & ~TxMACEnable)); 483 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */ 484 /* disable rx and tx dma */ 485 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ 486 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ 487 /* free some skb's */ 488 for (i=0; i<N_RX_RING; i++) { 489 if (bp->rx_bufs[i] != NULL) { 490 dev_kfree_skb(bp->rx_bufs[i]); 491 bp->rx_bufs[i] = NULL; 492 } 493 } 494 for (i = 0; i<N_TX_RING; i++) { 495 if (bp->tx_bufs[i] != NULL) { 496 dev_kfree_skb(bp->tx_bufs[i]); 497 bp->tx_bufs[i] = NULL; 498 } 499 } 500 } 501 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); 502 return 0; 503} 504 505static int bmac_resume(struct macio_dev *mdev) 506{ 507 struct net_device* dev = macio_get_drvdata(mdev); 508 struct bmac_data *bp = netdev_priv(dev); 509 510 /* see if this is enough */ 511 if (bp->opened) 512 bmac_reset_and_enable(dev); 513 514 enable_irq(dev->irq); 515 enable_irq(bp->tx_dma_intr); 516 enable_irq(bp->rx_dma_intr); 517 netif_device_attach(dev); 518 519 return 0; 520} 521#endif /* CONFIG_PM */ 522 523static int bmac_set_address(struct net_device *dev, void *addr) 524{ 525 struct bmac_data *bp = netdev_priv(dev); 526 unsigned char *p = addr; 527 unsigned short *pWord16; 528 unsigned long flags; 529 int i; 530 531 XXDEBUG(("bmac: enter set_address\n")); 532 spin_lock_irqsave(&bp->lock, flags); 533 534 for (i = 0; i < 6; ++i) { 535 dev->dev_addr[i] = p[i]; 536 } 537 /* load up the hardware address */ 538 pWord16 = (unsigned short *)dev->dev_addr; 539 bmwrite(dev, MADD0, *pWord16++); 540 bmwrite(dev, MADD1, *pWord16++); 541 bmwrite(dev, MADD2, *pWord16); 542 543 spin_unlock_irqrestore(&bp->lock, flags); 544 XXDEBUG(("bmac: exit set_address\n")); 545 return 0; 546} 547 548static inline void bmac_set_timeout(struct net_device *dev) 549{ 550 struct bmac_data *bp = netdev_priv(dev); 551 unsigned long flags; 552 553 spin_lock_irqsave(&bp->lock, flags); 554 if (bp->timeout_active) 555 del_timer(&bp->tx_timeout); 556 bp->tx_timeout.expires = jiffies + TX_TIMEOUT; 557 bp->tx_timeout.function = bmac_tx_timeout; 558 bp->tx_timeout.data = (unsigned long) dev; 559 add_timer(&bp->tx_timeout); 560 bp->timeout_active = 1; 561 spin_unlock_irqrestore(&bp->lock, flags); 562} 563 564static void 565bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp) 566{ 567 void *vaddr; 568 unsigned long baddr; 569 unsigned long len; 570 571 len = skb->len; 572 vaddr = skb->data; 573 baddr = virt_to_bus(vaddr); 574 575 dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0); 576} 577 578static void 579bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp) 580{ 581 unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf; 582 583 dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN, 584 virt_to_bus(addr), 0); 585} 586 587static void 588bmac_init_tx_ring(struct bmac_data *bp) 589{ 590 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 591 592 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd)); 593 594 bp->tx_empty = 0; 595 bp->tx_fill = 0; 596 bp->tx_fullup = 0; 597 598 /* put a branch at the end of the tx command list */ 599 dbdma_setcmd(&bp->tx_cmds[N_TX_RING], 600 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds)); 601 602 /* reset tx dma */ 603 dbdma_reset(td); 604 out_le32(&td->wait_sel, 0x00200020); 605 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds)); 606} 607 608static int 609bmac_init_rx_ring(struct bmac_data *bp) 610{ 611 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 612 int i; 613 struct sk_buff *skb; 614 615 /* initialize list of sk_buffs for receiving and set up recv dma */ 616 memset((char *)bp->rx_cmds, 0, 617 (N_RX_RING + 1) * sizeof(struct dbdma_cmd)); 618 for (i = 0; i < N_RX_RING; i++) { 619 if ((skb = bp->rx_bufs[i]) == NULL) { 620 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2); 621 if (skb != NULL) 622 skb_reserve(skb, 2); 623 } 624 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); 625 } 626 627 bp->rx_empty = 0; 628 bp->rx_fill = i; 629 630 /* Put a branch back to the beginning of the receive command list */ 631 dbdma_setcmd(&bp->rx_cmds[N_RX_RING], 632 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds)); 633 634 /* start rx dma */ 635 dbdma_reset(rd); 636 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds)); 637 638 return 1; 639} 640 641 642static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev) 643{ 644 struct bmac_data *bp = netdev_priv(dev); 645 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 646 int i; 647 648 /* see if there's a free slot in the tx ring */ 649 /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */ 650 /* bp->tx_empty, bp->tx_fill)); */ 651 i = bp->tx_fill + 1; 652 if (i >= N_TX_RING) 653 i = 0; 654 if (i == bp->tx_empty) { 655 netif_stop_queue(dev); 656 bp->tx_fullup = 1; 657 XXDEBUG(("bmac_transmit_packet: tx ring full\n")); 658 return -1; /* can't take it at the moment */ 659 } 660 661 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0); 662 663 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]); 664 665 bp->tx_bufs[bp->tx_fill] = skb; 666 bp->tx_fill = i; 667 668 dev->stats.tx_bytes += skb->len; 669 670 dbdma_continue(td); 671 672 return 0; 673} 674 675static int rxintcount; 676 677static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id) 678{ 679 struct net_device *dev = (struct net_device *) dev_id; 680 struct bmac_data *bp = netdev_priv(dev); 681 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 682 volatile struct dbdma_cmd *cp; 683 int i, nb, stat; 684 struct sk_buff *skb; 685 unsigned int residual; 686 int last; 687 unsigned long flags; 688 689 spin_lock_irqsave(&bp->lock, flags); 690 691 if (++rxintcount < 10) { 692 XXDEBUG(("bmac_rxdma_intr\n")); 693 } 694 695 last = -1; 696 i = bp->rx_empty; 697 698 while (1) { 699 cp = &bp->rx_cmds[i]; 700 stat = ld_le16(&cp->xfer_status); 701 residual = ld_le16(&cp->res_count); 702 if ((stat & ACTIVE) == 0) 703 break; 704 nb = RX_BUFLEN - residual - 2; 705 if (nb < (ETHERMINPACKET - ETHERCRC)) { 706 skb = NULL; 707 dev->stats.rx_length_errors++; 708 dev->stats.rx_errors++; 709 } else { 710 skb = bp->rx_bufs[i]; 711 bp->rx_bufs[i] = NULL; 712 } 713 if (skb != NULL) { 714 nb -= ETHERCRC; 715 skb_put(skb, nb); 716 skb->protocol = eth_type_trans(skb, dev); 717 netif_rx(skb); 718 ++dev->stats.rx_packets; 719 dev->stats.rx_bytes += nb; 720 } else { 721 ++dev->stats.rx_dropped; 722 } 723 if ((skb = bp->rx_bufs[i]) == NULL) { 724 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2); 725 if (skb != NULL) 726 skb_reserve(bp->rx_bufs[i], 2); 727 } 728 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); 729 st_le16(&cp->res_count, 0); 730 st_le16(&cp->xfer_status, 0); 731 last = i; 732 if (++i >= N_RX_RING) i = 0; 733 } 734 735 if (last != -1) { 736 bp->rx_fill = last; 737 bp->rx_empty = i; 738 } 739 740 dbdma_continue(rd); 741 spin_unlock_irqrestore(&bp->lock, flags); 742 743 if (rxintcount < 10) { 744 XXDEBUG(("bmac_rxdma_intr done\n")); 745 } 746 return IRQ_HANDLED; 747} 748 749static int txintcount; 750 751static irqreturn_t bmac_txdma_intr(int irq, void *dev_id) 752{ 753 struct net_device *dev = (struct net_device *) dev_id; 754 struct bmac_data *bp = netdev_priv(dev); 755 volatile struct dbdma_cmd *cp; 756 int stat; 757 unsigned long flags; 758 759 spin_lock_irqsave(&bp->lock, flags); 760 761 if (txintcount++ < 10) { 762 XXDEBUG(("bmac_txdma_intr\n")); 763 } 764 765 /* del_timer(&bp->tx_timeout); */ 766 /* bp->timeout_active = 0; */ 767 768 while (1) { 769 cp = &bp->tx_cmds[bp->tx_empty]; 770 stat = ld_le16(&cp->xfer_status); 771 if (txintcount < 10) { 772 XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat)); 773 } 774 if (!(stat & ACTIVE)) { 775 /* 776 * status field might not have been filled by DBDMA 777 */ 778 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr))) 779 break; 780 } 781 782 if (bp->tx_bufs[bp->tx_empty]) { 783 ++dev->stats.tx_packets; 784 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]); 785 } 786 bp->tx_bufs[bp->tx_empty] = NULL; 787 bp->tx_fullup = 0; 788 netif_wake_queue(dev); 789 if (++bp->tx_empty >= N_TX_RING) 790 bp->tx_empty = 0; 791 if (bp->tx_empty == bp->tx_fill) 792 break; 793 } 794 795 spin_unlock_irqrestore(&bp->lock, flags); 796 797 if (txintcount < 10) { 798 XXDEBUG(("bmac_txdma_intr done->bmac_start\n")); 799 } 800 801 bmac_start(dev); 802 return IRQ_HANDLED; 803} 804 805#ifndef SUNHME_MULTICAST 806/* Real fast bit-reversal algorithm, 6-bit values */ 807static int reverse6[64] = { 808 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38, 809 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c, 810 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a, 811 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e, 812 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39, 813 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d, 814 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b, 815 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f 816}; 817 818static unsigned int 819crc416(unsigned int curval, unsigned short nxtval) 820{ 821 register unsigned int counter, cur = curval, next = nxtval; 822 register int high_crc_set, low_data_set; 823 824 /* Swap bytes */ 825 next = ((next & 0x00FF) << 8) | (next >> 8); 826 827 /* Compute bit-by-bit */ 828 for (counter = 0; counter < 16; ++counter) { 829 /* is high CRC bit set? */ 830 if ((cur & 0x80000000) == 0) high_crc_set = 0; 831 else high_crc_set = 1; 832 833 cur = cur << 1; 834 835 if ((next & 0x0001) == 0) low_data_set = 0; 836 else low_data_set = 1; 837 838 next = next >> 1; 839 840 /* do the XOR */ 841 if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY; 842 } 843 return cur; 844} 845 846static unsigned int 847bmac_crc(unsigned short *address) 848{ 849 unsigned int newcrc; 850 851 XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2])); 852 newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */ 853 newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */ 854 newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */ 855 856 return(newcrc); 857} 858 859/* 860 * Add requested mcast addr to BMac's hash table filter. 861 * 862 */ 863 864static void 865bmac_addhash(struct bmac_data *bp, unsigned char *addr) 866{ 867 unsigned int crc; 868 unsigned short mask; 869 870 if (!(*addr)) return; 871 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */ 872 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ 873 if (bp->hash_use_count[crc]++) return; /* This bit is already set */ 874 mask = crc % 16; 875 mask = (unsigned char)1 << mask; 876 bp->hash_use_count[crc/16] |= mask; 877} 878 879static void 880bmac_removehash(struct bmac_data *bp, unsigned char *addr) 881{ 882 unsigned int crc; 883 unsigned char mask; 884 885 /* Now, delete the address from the filter copy, as indicated */ 886 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */ 887 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ 888 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */ 889 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */ 890 mask = crc % 16; 891 mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */ 892 bp->hash_table_mask[crc/16] &= mask; 893} 894 895/* 896 * Sync the adapter with the software copy of the multicast mask 897 * (logical address filter). 898 */ 899 900static void 901bmac_rx_off(struct net_device *dev) 902{ 903 unsigned short rx_cfg; 904 905 rx_cfg = bmread(dev, RXCFG); 906 rx_cfg &= ~RxMACEnable; 907 bmwrite(dev, RXCFG, rx_cfg); 908 do { 909 rx_cfg = bmread(dev, RXCFG); 910 } while (rx_cfg & RxMACEnable); 911} 912 913unsigned short 914bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable) 915{ 916 unsigned short rx_cfg; 917 918 rx_cfg = bmread(dev, RXCFG); 919 rx_cfg |= RxMACEnable; 920 if (hash_enable) rx_cfg |= RxHashFilterEnable; 921 else rx_cfg &= ~RxHashFilterEnable; 922 if (promisc_enable) rx_cfg |= RxPromiscEnable; 923 else rx_cfg &= ~RxPromiscEnable; 924 bmwrite(dev, RXRST, RxResetValue); 925 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */ 926 bmwrite(dev, RXFIFOCSR, RxFIFOEnable ); 927 bmwrite(dev, RXCFG, rx_cfg ); 928 return rx_cfg; 929} 930 931static void 932bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp) 933{ 934 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */ 935 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */ 936 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ 937 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ 938} 939 940#if 0 941static void 942bmac_add_multi(struct net_device *dev, 943 struct bmac_data *bp, unsigned char *addr) 944{ 945 /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */ 946 bmac_addhash(bp, addr); 947 bmac_rx_off(dev); 948 bmac_update_hash_table_mask(dev, bp); 949 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0); 950 /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */ 951} 952 953static void 954bmac_remove_multi(struct net_device *dev, 955 struct bmac_data *bp, unsigned char *addr) 956{ 957 bmac_removehash(bp, addr); 958 bmac_rx_off(dev); 959 bmac_update_hash_table_mask(dev, bp); 960 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0); 961} 962#endif 963 964/* Set or clear the multicast filter for this adaptor. 965 num_addrs == -1 Promiscuous mode, receive all packets 966 num_addrs == 0 Normal mode, clear multicast list 967 num_addrs > 0 Multicast mode, receive normal and MC packets, and do 968 best-effort filtering. 969 */ 970static void bmac_set_multicast(struct net_device *dev) 971{ 972 struct netdev_hw_addr *ha; 973 struct bmac_data *bp = netdev_priv(dev); 974 int num_addrs = netdev_mc_count(dev); 975 unsigned short rx_cfg; 976 int i; 977 978 if (bp->sleeping) 979 return; 980 981 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs)); 982 983 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { 984 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff; 985 bmac_update_hash_table_mask(dev, bp); 986 rx_cfg = bmac_rx_on(dev, 1, 0); 987 XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n")); 988 } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) { 989 rx_cfg = bmread(dev, RXCFG); 990 rx_cfg |= RxPromiscEnable; 991 bmwrite(dev, RXCFG, rx_cfg); 992 rx_cfg = bmac_rx_on(dev, 0, 1); 993 XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg)); 994 } else { 995 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0; 996 for (i=0; i<64; i++) bp->hash_use_count[i] = 0; 997 if (num_addrs == 0) { 998 rx_cfg = bmac_rx_on(dev, 0, 0); 999 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg)); 1000 } else { 1001 netdev_for_each_mc_addr(ha, dev) 1002 bmac_addhash(bp, ha->addr); 1003 bmac_update_hash_table_mask(dev, bp); 1004 rx_cfg = bmac_rx_on(dev, 1, 0); 1005 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg)); 1006 } 1007 } 1008 /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */ 1009} 1010#else /* ifdef SUNHME_MULTICAST */ 1011 1012/* The version of set_multicast below was lifted from sunhme.c */ 1013 1014static void bmac_set_multicast(struct net_device *dev) 1015{ 1016 struct netdev_hw_addr *ha; 1017 char *addrs; 1018 int i; 1019 unsigned short rx_cfg; 1020 u32 crc; 1021 1022 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { 1023 bmwrite(dev, BHASH0, 0xffff); 1024 bmwrite(dev, BHASH1, 0xffff); 1025 bmwrite(dev, BHASH2, 0xffff); 1026 bmwrite(dev, BHASH3, 0xffff); 1027 } else if(dev->flags & IFF_PROMISC) { 1028 rx_cfg = bmread(dev, RXCFG); 1029 rx_cfg |= RxPromiscEnable; 1030 bmwrite(dev, RXCFG, rx_cfg); 1031 } else { 1032 u16 hash_table[4]; 1033 1034 rx_cfg = bmread(dev, RXCFG); 1035 rx_cfg &= ~RxPromiscEnable; 1036 bmwrite(dev, RXCFG, rx_cfg); 1037 1038 for(i = 0; i < 4; i++) hash_table[i] = 0; 1039 1040 netdev_for_each_mc_addr(ha, dev) { 1041 addrs = ha->addr; 1042 1043 if(!(*addrs & 1)) 1044 continue; 1045 1046 crc = ether_crc_le(6, addrs); 1047 crc >>= 26; 1048 hash_table[crc >> 4] |= 1 << (crc & 0xf); 1049 } 1050 bmwrite(dev, BHASH0, hash_table[0]); 1051 bmwrite(dev, BHASH1, hash_table[1]); 1052 bmwrite(dev, BHASH2, hash_table[2]); 1053 bmwrite(dev, BHASH3, hash_table[3]); 1054 } 1055} 1056#endif /* SUNHME_MULTICAST */ 1057 1058static int miscintcount; 1059 1060static irqreturn_t bmac_misc_intr(int irq, void *dev_id) 1061{ 1062 struct net_device *dev = (struct net_device *) dev_id; 1063 unsigned int status = bmread(dev, STATUS); 1064 if (miscintcount++ < 10) { 1065 XXDEBUG(("bmac_misc_intr\n")); 1066 } 1067 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */ 1068 /* bmac_txdma_intr_inner(irq, dev_id); */ 1069 /* if (status & FrameReceived) dev->stats.rx_dropped++; */ 1070 if (status & RxErrorMask) dev->stats.rx_errors++; 1071 if (status & RxCRCCntExp) dev->stats.rx_crc_errors++; 1072 if (status & RxLenCntExp) dev->stats.rx_length_errors++; 1073 if (status & RxOverFlow) dev->stats.rx_over_errors++; 1074 if (status & RxAlignCntExp) dev->stats.rx_frame_errors++; 1075 1076 /* if (status & FrameSent) dev->stats.tx_dropped++; */ 1077 if (status & TxErrorMask) dev->stats.tx_errors++; 1078 if (status & TxUnderrun) dev->stats.tx_fifo_errors++; 1079 if (status & TxNormalCollExp) dev->stats.collisions++; 1080 return IRQ_HANDLED; 1081} 1082 1083/* 1084 * Procedure for reading EEPROM 1085 */ 1086#define SROMAddressLength 5 1087#define DataInOn 0x0008 1088#define DataInOff 0x0000 1089#define Clk 0x0002 1090#define ChipSelect 0x0001 1091#define SDIShiftCount 3 1092#define SD0ShiftCount 2 1093#define DelayValue 1000 /* number of microseconds */ 1094#define SROMStartOffset 10 /* this is in words */ 1095#define SROMReadCount 3 /* number of words to read from SROM */ 1096#define SROMAddressBits 6 1097#define EnetAddressOffset 20 1098 1099static unsigned char 1100bmac_clock_out_bit(struct net_device *dev) 1101{ 1102 unsigned short data; 1103 unsigned short val; 1104 1105 bmwrite(dev, SROMCSR, ChipSelect | Clk); 1106 udelay(DelayValue); 1107 1108 data = bmread(dev, SROMCSR); 1109 udelay(DelayValue); 1110 val = (data >> SD0ShiftCount) & 1; 1111 1112 bmwrite(dev, SROMCSR, ChipSelect); 1113 udelay(DelayValue); 1114 1115 return val; 1116} 1117 1118static void 1119bmac_clock_in_bit(struct net_device *dev, unsigned int val) 1120{ 1121 unsigned short data; 1122 1123 if (val != 0 && val != 1) return; 1124 1125 data = (val << SDIShiftCount); 1126 bmwrite(dev, SROMCSR, data | ChipSelect ); 1127 udelay(DelayValue); 1128 1129 bmwrite(dev, SROMCSR, data | ChipSelect | Clk ); 1130 udelay(DelayValue); 1131 1132 bmwrite(dev, SROMCSR, data | ChipSelect); 1133 udelay(DelayValue); 1134} 1135 1136static void 1137reset_and_select_srom(struct net_device *dev) 1138{ 1139 /* first reset */ 1140 bmwrite(dev, SROMCSR, 0); 1141 udelay(DelayValue); 1142 1143 /* send it the read command (110) */ 1144 bmac_clock_in_bit(dev, 1); 1145 bmac_clock_in_bit(dev, 1); 1146 bmac_clock_in_bit(dev, 0); 1147} 1148 1149static unsigned short 1150read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len) 1151{ 1152 unsigned short data, val; 1153 int i; 1154 1155 /* send out the address we want to read from */ 1156 for (i = 0; i < addr_len; i++) { 1157 val = addr >> (addr_len-i-1); 1158 bmac_clock_in_bit(dev, val & 1); 1159 } 1160 1161 /* Now read in the 16-bit data */ 1162 data = 0; 1163 for (i = 0; i < 16; i++) { 1164 val = bmac_clock_out_bit(dev); 1165 data <<= 1; 1166 data |= val; 1167 } 1168 bmwrite(dev, SROMCSR, 0); 1169 1170 return data; 1171} 1172 1173/* 1174 * It looks like Cogent and SMC use different methods for calculating 1175 * checksums. What a pain.. 1176 */ 1177 1178static int 1179bmac_verify_checksum(struct net_device *dev) 1180{ 1181 unsigned short data, storedCS; 1182 1183 reset_and_select_srom(dev); 1184 data = read_srom(dev, 3, SROMAddressBits); 1185 storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00); 1186 1187 return 0; 1188} 1189 1190 1191static void 1192bmac_get_station_address(struct net_device *dev, unsigned char *ea) 1193{ 1194 int i; 1195 unsigned short data; 1196 1197 for (i = 0; i < 6; i++) 1198 { 1199 reset_and_select_srom(dev); 1200 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits); 1201 ea[2*i] = bitrev8(data & 0x0ff); 1202 ea[2*i+1] = bitrev8((data >> 8) & 0x0ff); 1203 } 1204} 1205 1206static void bmac_reset_and_enable(struct net_device *dev) 1207{ 1208 struct bmac_data *bp = netdev_priv(dev); 1209 unsigned long flags; 1210 struct sk_buff *skb; 1211 unsigned char *data; 1212 1213 spin_lock_irqsave(&bp->lock, flags); 1214 bmac_enable_and_reset_chip(dev); 1215 bmac_init_tx_ring(bp); 1216 bmac_init_rx_ring(bp); 1217 bmac_init_chip(dev); 1218 bmac_start_chip(dev); 1219 bmwrite(dev, INTDISABLE, EnableNormal); 1220 bp->sleeping = 0; 1221 1222 /* 1223 * It seems that the bmac can't receive until it's transmitted 1224 * a packet. So we give it a dummy packet to transmit. 1225 */ 1226 skb = dev_alloc_skb(ETHERMINPACKET); 1227 if (skb != NULL) { 1228 data = skb_put(skb, ETHERMINPACKET); 1229 memset(data, 0, ETHERMINPACKET); 1230 memcpy(data, dev->dev_addr, 6); 1231 memcpy(data+6, dev->dev_addr, 6); 1232 bmac_transmit_packet(skb, dev); 1233 } 1234 spin_unlock_irqrestore(&bp->lock, flags); 1235} 1236 1237static const struct ethtool_ops bmac_ethtool_ops = { 1238 .get_link = ethtool_op_get_link, 1239}; 1240 1241static const struct net_device_ops bmac_netdev_ops = { 1242 .ndo_open = bmac_open, 1243 .ndo_stop = bmac_close, 1244 .ndo_start_xmit = bmac_output, 1245 .ndo_set_multicast_list = bmac_set_multicast, 1246 .ndo_set_mac_address = bmac_set_address, 1247 .ndo_change_mtu = eth_change_mtu, 1248 .ndo_validate_addr = eth_validate_addr, 1249}; 1250 1251static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_id *match) 1252{ 1253 int j, rev, ret; 1254 struct bmac_data *bp; 1255 const unsigned char *prop_addr; 1256 unsigned char addr[6]; 1257 struct net_device *dev; 1258 int is_bmac_plus = ((int)match->data) != 0; 1259 1260 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { 1261 printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n"); 1262 return -ENODEV; 1263 } 1264 prop_addr = of_get_property(macio_get_of_node(mdev), 1265 "mac-address", NULL); 1266 if (prop_addr == NULL) { 1267 prop_addr = of_get_property(macio_get_of_node(mdev), 1268 "local-mac-address", NULL); 1269 if (prop_addr == NULL) { 1270 printk(KERN_ERR "BMAC: Can't get mac-address\n"); 1271 return -ENODEV; 1272 } 1273 } 1274 memcpy(addr, prop_addr, sizeof(addr)); 1275 1276 dev = alloc_etherdev(PRIV_BYTES); 1277 if (!dev) { 1278 printk(KERN_ERR "BMAC: alloc_etherdev failed, out of memory\n"); 1279 return -ENOMEM; 1280 } 1281 1282 bp = netdev_priv(dev); 1283 SET_NETDEV_DEV(dev, &mdev->ofdev.dev); 1284 macio_set_drvdata(mdev, dev); 1285 1286 bp->mdev = mdev; 1287 spin_lock_init(&bp->lock); 1288 1289 if (macio_request_resources(mdev, "bmac")) { 1290 printk(KERN_ERR "BMAC: can't request IO resource !\n"); 1291 goto out_free; 1292 } 1293 1294 dev->base_addr = (unsigned long) 1295 ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0)); 1296 if (dev->base_addr == 0) 1297 goto out_release; 1298 1299 dev->irq = macio_irq(mdev, 0); 1300 1301 bmac_enable_and_reset_chip(dev); 1302 bmwrite(dev, INTDISABLE, DisableAll); 1303 1304 rev = addr[0] == 0 && addr[1] == 0xA0; 1305 for (j = 0; j < 6; ++j) 1306 dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j]; 1307 1308 /* Enable chip without interrupts for now */ 1309 bmac_enable_and_reset_chip(dev); 1310 bmwrite(dev, INTDISABLE, DisableAll); 1311 1312 dev->netdev_ops = &bmac_netdev_ops; 1313 dev->ethtool_ops = &bmac_ethtool_ops; 1314 1315 bmac_get_station_address(dev, addr); 1316 if (bmac_verify_checksum(dev) != 0) 1317 goto err_out_iounmap; 1318 1319 bp->is_bmac_plus = is_bmac_plus; 1320 bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1)); 1321 if (!bp->tx_dma) 1322 goto err_out_iounmap; 1323 bp->tx_dma_intr = macio_irq(mdev, 1); 1324 bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2)); 1325 if (!bp->rx_dma) 1326 goto err_out_iounmap_tx; 1327 bp->rx_dma_intr = macio_irq(mdev, 2); 1328 1329 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1); 1330 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1; 1331 1332 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1); 1333 skb_queue_head_init(bp->queue); 1334 1335 init_timer(&bp->tx_timeout); 1336 1337 ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev); 1338 if (ret) { 1339 printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq); 1340 goto err_out_iounmap_rx; 1341 } 1342 ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev); 1343 if (ret) { 1344 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr); 1345 goto err_out_irq0; 1346 } 1347 ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev); 1348 if (ret) { 1349 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr); 1350 goto err_out_irq1; 1351 } 1352 1353 /* Mask chip interrupts and disable chip, will be 1354 * re-enabled on open() 1355 */ 1356 disable_irq(dev->irq); 1357 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); 1358 1359 if (register_netdev(dev) != 0) { 1360 printk(KERN_ERR "BMAC: Ethernet registration failed\n"); 1361 goto err_out_irq2; 1362 } 1363 1364 printk(KERN_INFO "%s: BMAC%s at %pM", 1365 dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr); 1366 XXDEBUG((", base_addr=%#0lx", dev->base_addr)); 1367 printk("\n"); 1368 1369 return 0; 1370 1371err_out_irq2: 1372 free_irq(bp->rx_dma_intr, dev); 1373err_out_irq1: 1374 free_irq(bp->tx_dma_intr, dev); 1375err_out_irq0: 1376 free_irq(dev->irq, dev); 1377err_out_iounmap_rx: 1378 iounmap(bp->rx_dma); 1379err_out_iounmap_tx: 1380 iounmap(bp->tx_dma); 1381err_out_iounmap: 1382 iounmap((void __iomem *)dev->base_addr); 1383out_release: 1384 macio_release_resources(mdev); 1385out_free: 1386 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); 1387 free_netdev(dev); 1388 1389 return -ENODEV; 1390} 1391 1392static int bmac_open(struct net_device *dev) 1393{ 1394 struct bmac_data *bp = netdev_priv(dev); 1395 /* XXDEBUG(("bmac: enter open\n")); */ 1396 /* reset the chip */ 1397 bp->opened = 1; 1398 bmac_reset_and_enable(dev); 1399 enable_irq(dev->irq); 1400 return 0; 1401} 1402 1403static int bmac_close(struct net_device *dev) 1404{ 1405 struct bmac_data *bp = netdev_priv(dev); 1406 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 1407 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 1408 unsigned short config; 1409 int i; 1410 1411 bp->sleeping = 1; 1412 1413 /* disable rx and tx */ 1414 config = bmread(dev, RXCFG); 1415 bmwrite(dev, RXCFG, (config & ~RxMACEnable)); 1416 1417 config = bmread(dev, TXCFG); 1418 bmwrite(dev, TXCFG, (config & ~TxMACEnable)); 1419 1420 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */ 1421 1422 /* disable rx and tx dma */ 1423 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ 1424 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ 1425 1426 /* free some skb's */ 1427 XXDEBUG(("bmac: free rx bufs\n")); 1428 for (i=0; i<N_RX_RING; i++) { 1429 if (bp->rx_bufs[i] != NULL) { 1430 dev_kfree_skb(bp->rx_bufs[i]); 1431 bp->rx_bufs[i] = NULL; 1432 } 1433 } 1434 XXDEBUG(("bmac: free tx bufs\n")); 1435 for (i = 0; i<N_TX_RING; i++) { 1436 if (bp->tx_bufs[i] != NULL) { 1437 dev_kfree_skb(bp->tx_bufs[i]); 1438 bp->tx_bufs[i] = NULL; 1439 } 1440 } 1441 XXDEBUG(("bmac: all bufs freed\n")); 1442 1443 bp->opened = 0; 1444 disable_irq(dev->irq); 1445 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); 1446 1447 return 0; 1448} 1449 1450static void 1451bmac_start(struct net_device *dev) 1452{ 1453 struct bmac_data *bp = netdev_priv(dev); 1454 int i; 1455 struct sk_buff *skb; 1456 unsigned long flags; 1457 1458 if (bp->sleeping) 1459 return; 1460 1461 spin_lock_irqsave(&bp->lock, flags); 1462 while (1) { 1463 i = bp->tx_fill + 1; 1464 if (i >= N_TX_RING) 1465 i = 0; 1466 if (i == bp->tx_empty) 1467 break; 1468 skb = skb_dequeue(bp->queue); 1469 if (skb == NULL) 1470 break; 1471 bmac_transmit_packet(skb, dev); 1472 } 1473 spin_unlock_irqrestore(&bp->lock, flags); 1474} 1475 1476static int 1477bmac_output(struct sk_buff *skb, struct net_device *dev) 1478{ 1479 struct bmac_data *bp = netdev_priv(dev); 1480 skb_queue_tail(bp->queue, skb); 1481 bmac_start(dev); 1482 return NETDEV_TX_OK; 1483} 1484 1485static void bmac_tx_timeout(unsigned long data) 1486{ 1487 struct net_device *dev = (struct net_device *) data; 1488 struct bmac_data *bp = netdev_priv(dev); 1489 volatile struct dbdma_regs __iomem *td = bp->tx_dma; 1490 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; 1491 volatile struct dbdma_cmd *cp; 1492 unsigned long flags; 1493 unsigned short config, oldConfig; 1494 int i; 1495 1496 XXDEBUG(("bmac: tx_timeout called\n")); 1497 spin_lock_irqsave(&bp->lock, flags); 1498 bp->timeout_active = 0; 1499 1500 /* update various counters */ 1501/* bmac_handle_misc_intrs(bp, 0); */ 1502 1503 cp = &bp->tx_cmds[bp->tx_empty]; 1504/* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */ 1505/* ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */ 1506/* mb->pr, mb->xmtfs, mb->fifofc)); */ 1507 1508 /* turn off both tx and rx and reset the chip */ 1509 config = bmread(dev, RXCFG); 1510 bmwrite(dev, RXCFG, (config & ~RxMACEnable)); 1511 config = bmread(dev, TXCFG); 1512 bmwrite(dev, TXCFG, (config & ~TxMACEnable)); 1513 out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD)); 1514 printk(KERN_ERR "bmac: transmit timeout - resetting\n"); 1515 bmac_enable_and_reset_chip(dev); 1516 1517 /* restart rx dma */ 1518 cp = bus_to_virt(ld_le32(&rd->cmdptr)); 1519 out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD)); 1520 out_le16(&cp->xfer_status, 0); 1521 out_le32(&rd->cmdptr, virt_to_bus(cp)); 1522 out_le32(&rd->control, DBDMA_SET(RUN|WAKE)); 1523 1524 /* fix up the transmit side */ 1525 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n", 1526 bp->tx_empty, bp->tx_fill, bp->tx_fullup)); 1527 i = bp->tx_empty; 1528 ++dev->stats.tx_errors; 1529 if (i != bp->tx_fill) { 1530 dev_kfree_skb(bp->tx_bufs[i]); 1531 bp->tx_bufs[i] = NULL; 1532 if (++i >= N_TX_RING) i = 0; 1533 bp->tx_empty = i; 1534 } 1535 bp->tx_fullup = 0; 1536 netif_wake_queue(dev); 1537 if (i != bp->tx_fill) { 1538 cp = &bp->tx_cmds[i]; 1539 out_le16(&cp->xfer_status, 0); 1540 out_le16(&cp->command, OUTPUT_LAST); 1541 out_le32(&td->cmdptr, virt_to_bus(cp)); 1542 out_le32(&td->control, DBDMA_SET(RUN)); 1543 /* bmac_set_timeout(dev); */ 1544 XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i)); 1545 } 1546 1547 /* turn it back on */ 1548 oldConfig = bmread(dev, RXCFG); 1549 bmwrite(dev, RXCFG, oldConfig | RxMACEnable ); 1550 oldConfig = bmread(dev, TXCFG); 1551 bmwrite(dev, TXCFG, oldConfig | TxMACEnable ); 1552 1553 spin_unlock_irqrestore(&bp->lock, flags); 1554} 1555 1556#if 0 1557static void dump_dbdma(volatile struct dbdma_cmd *cp,int count) 1558{ 1559 int i,*ip; 1560 1561 for (i=0;i< count;i++) { 1562 ip = (int*)(cp+i); 1563 1564 printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n", 1565 ld_le32(ip+0), 1566 ld_le32(ip+1), 1567 ld_le32(ip+2), 1568 ld_le32(ip+3)); 1569 } 1570 1571} 1572#endif 1573 1574#if 0 1575static int 1576bmac_proc_info(char *buffer, char **start, off_t offset, int length) 1577{ 1578 int len = 0; 1579 off_t pos = 0; 1580 off_t begin = 0; 1581 int i; 1582 1583 if (bmac_devs == NULL) 1584 return -ENOSYS; 1585 1586 len += sprintf(buffer, "BMAC counters & registers\n"); 1587 1588 for (i = 0; i<N_REG_ENTRIES; i++) { 1589 len += sprintf(buffer + len, "%s: %#08x\n", 1590 reg_entries[i].name, 1591 bmread(bmac_devs, reg_entries[i].reg_offset)); 1592 pos = begin + len; 1593 1594 if (pos < offset) { 1595 len = 0; 1596 begin = pos; 1597 } 1598 1599 if (pos > offset+length) break; 1600 } 1601 1602 *start = buffer + (offset - begin); 1603 len -= (offset - begin); 1604 1605 if (len > length) len = length; 1606 1607 return len; 1608} 1609#endif 1610 1611static int __devexit bmac_remove(struct macio_dev *mdev) 1612{ 1613 struct net_device *dev = macio_get_drvdata(mdev); 1614 struct bmac_data *bp = netdev_priv(dev); 1615 1616 unregister_netdev(dev); 1617 1618 free_irq(dev->irq, dev); 1619 free_irq(bp->tx_dma_intr, dev); 1620 free_irq(bp->rx_dma_intr, dev); 1621 1622 iounmap((void __iomem *)dev->base_addr); 1623 iounmap(bp->tx_dma); 1624 iounmap(bp->rx_dma); 1625 1626 macio_release_resources(mdev); 1627 1628 free_netdev(dev); 1629 1630 return 0; 1631} 1632 1633static struct of_device_id bmac_match[] = 1634{ 1635 { 1636 .name = "bmac", 1637 .data = (void *)0, 1638 }, 1639 { 1640 .type = "network", 1641 .compatible = "bmac+", 1642 .data = (void *)1, 1643 }, 1644 {}, 1645}; 1646MODULE_DEVICE_TABLE (of, bmac_match); 1647 1648static struct macio_driver bmac_driver = 1649{ 1650 .driver = { 1651 .name = "bmac", 1652 .owner = THIS_MODULE, 1653 .of_match_table = bmac_match, 1654 }, 1655 .probe = bmac_probe, 1656 .remove = bmac_remove, 1657#ifdef CONFIG_PM 1658 .suspend = bmac_suspend, 1659 .resume = bmac_resume, 1660#endif 1661}; 1662 1663 1664static int __init bmac_init(void) 1665{ 1666 if (bmac_emergency_rxbuf == NULL) { 1667 bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL); 1668 if (bmac_emergency_rxbuf == NULL) { 1669 printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n"); 1670 return -ENOMEM; 1671 } 1672 } 1673 1674 return macio_register_driver(&bmac_driver); 1675} 1676 1677static void __exit bmac_exit(void) 1678{ 1679 macio_unregister_driver(&bmac_driver); 1680 1681 kfree(bmac_emergency_rxbuf); 1682 bmac_emergency_rxbuf = NULL; 1683} 1684 1685MODULE_AUTHOR("Randy Gobbel/Paul Mackerras"); 1686MODULE_DESCRIPTION("PowerMac BMAC ethernet driver."); 1687MODULE_LICENSE("GPL"); 1688 1689module_init(bmac_init); 1690module_exit(bmac_exit);