Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.15-rc3 1567 lines 41 kB view raw
1/* 2 * Copyright 2000, 2001 MontaVista Software Inc. 3 * Author: MontaVista Software, Inc. 4 * stevel@mvista.com or source@mvista.com 5 * 6 * This program is free software; you can distribute it and/or modify it 7 * under the terms of the GNU General Public License (Version 2) as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * for more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program; if not, write to the Free Software Foundation, Inc., 17 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 18 * 19 * Ethernet driver for the MIPS GT96100 Advanced Communication Controller. 20 * 21 * Revision history 22 * 23 * 11.11.2001 Moved to 2.4.14, ppopov@mvista.com. Modified driver to add 24 * proper gt96100A support. 25 * 12.05.2001 Moved eth port 0 to irq 3 (mapped to GT_SERINT0 on EV96100A) 26 * in order for both ports to work. Also cleaned up boot 27 * option support (mac address string parsing), fleshed out 28 * gt96100_cleanup_module(), and other general code cleanups 29 * <stevel@mvista.com>. 30 */ 31#include <linux/module.h> 32#include <linux/kernel.h> 33#include <linux/string.h> 34#include <linux/timer.h> 35#include <linux/errno.h> 36#include <linux/in.h> 37#include <linux/ioport.h> 38#include <linux/slab.h> 39#include <linux/interrupt.h> 40#include <linux/pci.h> 41#include <linux/init.h> 42#include <linux/netdevice.h> 43#include <linux/etherdevice.h> 44#include <linux/skbuff.h> 45#include <linux/delay.h> 46#include <linux/ctype.h> 47#include <linux/bitops.h> 48 49#include <asm/irq.h> 50#include <asm/io.h> 51 52#define DESC_BE 1 53#define DESC_DATA_BE 1 54 55#define GT96100_DEBUG 2 56 57#include "gt96100eth.h" 58 59// prototypes 60static void* dmaalloc(size_t size, dma_addr_t *dma_handle); 61static void dmafree(size_t size, void *vaddr); 62static void gt96100_delay(int msec); 63static int gt96100_add_hash_entry(struct net_device *dev, 64 unsigned char* addr); 65static void read_mib_counters(struct gt96100_private *gp); 66static int read_MII(int phy_addr, u32 reg); 67static int write_MII(int phy_addr, u32 reg, u16 data); 68static int gt96100_init_module(void); 69static void gt96100_cleanup_module(void); 70static void dump_MII(int dbg_lvl, struct net_device *dev); 71static void dump_tx_desc(int dbg_lvl, struct net_device *dev, int i); 72static void dump_rx_desc(int dbg_lvl, struct net_device *dev, int i); 73static void dump_skb(int dbg_lvl, struct net_device *dev, 74 struct sk_buff *skb); 75static void update_stats(struct gt96100_private *gp); 76static void abort(struct net_device *dev, u32 abort_bits); 77static void hard_stop(struct net_device *dev); 78static void enable_ether_irq(struct net_device *dev); 79static void disable_ether_irq(struct net_device *dev); 80static int gt96100_probe1(struct pci_dev *pci, int port_num); 81static void reset_tx(struct net_device *dev); 82static void reset_rx(struct net_device *dev); 83static int gt96100_check_tx_consistent(struct gt96100_private *gp); 84static int gt96100_init(struct net_device *dev); 85static int gt96100_open(struct net_device *dev); 86static int gt96100_close(struct net_device *dev); 87static int gt96100_tx(struct sk_buff *skb, struct net_device *dev); 88static int gt96100_rx(struct net_device *dev, u32 status); 89static irqreturn_t gt96100_interrupt(int irq, void *dev_id, struct pt_regs *regs); 90static void gt96100_tx_timeout(struct net_device *dev); 91static void gt96100_set_rx_mode(struct net_device *dev); 92static struct net_device_stats* gt96100_get_stats(struct net_device *dev); 93 94extern char * __init prom_getcmdline(void); 95 96static int max_interrupt_work = 32; 97 98#define nibswap(x) ((((x) >> 4) & 0x0f) | (((x) << 4) & 0xf0)) 99 100#define RUN_AT(x) (jiffies + (x)) 101 102// For reading/writing 32-bit words and half-words from/to DMA memory 103#ifdef DESC_BE 104#define cpu_to_dma32 cpu_to_be32 105#define dma32_to_cpu be32_to_cpu 106#define cpu_to_dma16 cpu_to_be16 107#define dma16_to_cpu be16_to_cpu 108#else 109#define cpu_to_dma32 cpu_to_le32 110#define dma32_to_cpu le32_to_cpu 111#define cpu_to_dma16 cpu_to_le16 112#define dma16_to_cpu le16_to_cpu 113#endif 114 115static char mac0[18] = "00.02.03.04.05.06"; 116static char mac1[18] = "00.01.02.03.04.05"; 117MODULE_PARM(mac0, "c18"); 118MODULE_PARM(mac1, "c18"); 119MODULE_PARM_DESC(mac0, "MAC address for GT96100 ethernet port 0"); 120MODULE_PARM_DESC(mac1, "MAC address for GT96100 ethernet port 1"); 121 122/* 123 * Info for the GT96100 ethernet controller's ports. 124 */ 125static struct gt96100_if_t { 126 struct net_device *dev; 127 unsigned int iobase; // IO Base address of this port 128 int irq; // IRQ number of this port 129 char *mac_str; 130} gt96100_iflist[NUM_INTERFACES] = { 131 { 132 NULL, 133 GT96100_ETH0_BASE, GT96100_ETHER0_IRQ, 134 mac0 135 }, 136 { 137 NULL, 138 GT96100_ETH1_BASE, GT96100_ETHER1_IRQ, 139 mac1 140 } 141}; 142 143static inline const char* 144chip_name(int chip_rev) 145{ 146 switch (chip_rev) { 147 case REV_GT96100: 148 return "GT96100"; 149 case REV_GT96100A_1: 150 case REV_GT96100A: 151 return "GT96100A"; 152 default: 153 return "Unknown GT96100"; 154 } 155} 156 157/* 158 DMA memory allocation, derived from pci_alloc_consistent. 159*/ 160static void * dmaalloc(size_t size, dma_addr_t *dma_handle) 161{ 162 void *ret; 163 164 ret = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, get_order(size)); 165 166 if (ret != NULL) { 167 dma_cache_inv((unsigned long)ret, size); 168 if (dma_handle != NULL) 169 *dma_handle = virt_to_phys(ret); 170 171 /* bump virtual address up to non-cached area */ 172 ret = (void*)KSEG1ADDR(ret); 173 } 174 175 return ret; 176} 177 178static void dmafree(size_t size, void *vaddr) 179{ 180 vaddr = (void*)KSEG0ADDR(vaddr); 181 free_pages((unsigned long)vaddr, get_order(size)); 182} 183 184static void gt96100_delay(int ms) 185{ 186 if (in_interrupt()) 187 return; 188 else 189 msleep_interruptible(ms); 190} 191 192static int 193parse_mac_addr(struct net_device *dev, char* macstr) 194{ 195 int i, j; 196 unsigned char result, value; 197 198 for (i=0; i<6; i++) { 199 result = 0; 200 if (i != 5 && *(macstr+2) != '.') { 201 err(__FILE__ "invalid mac address format: %d %c\n", 202 i, *(macstr+2)); 203 return -EINVAL; 204 } 205 206 for (j=0; j<2; j++) { 207 if (isxdigit(*macstr) && 208 (value = isdigit(*macstr) ? *macstr-'0' : 209 toupper(*macstr)-'A'+10) < 16) { 210 result = result*16 + value; 211 macstr++; 212 } else { 213 err(__FILE__ "invalid mac address " 214 "character: %c\n", *macstr); 215 return -EINVAL; 216 } 217 } 218 219 macstr++; // step over '.' 220 dev->dev_addr[i] = result; 221 } 222 223 return 0; 224} 225 226 227static int 228read_MII(int phy_addr, u32 reg) 229{ 230 int timedout = 20; 231 u32 smir = smirOpCode | (phy_addr << smirPhyAdBit) | 232 (reg << smirRegAdBit); 233 234 // wait for last operation to complete 235 while (GT96100_READ(GT96100_ETH_SMI_REG) & smirBusy) { 236 // snooze for 1 msec and check again 237 gt96100_delay(1); 238 239 if (--timedout == 0) { 240 printk(KERN_ERR "%s: busy timeout!!\n", __FUNCTION__); 241 return -ENODEV; 242 } 243 } 244 245 GT96100_WRITE(GT96100_ETH_SMI_REG, smir); 246 247 timedout = 20; 248 // wait for read to complete 249 while (!((smir = GT96100_READ(GT96100_ETH_SMI_REG)) & smirReadValid)) { 250 // snooze for 1 msec and check again 251 gt96100_delay(1); 252 253 if (--timedout == 0) { 254 printk(KERN_ERR "%s: timeout!!\n", __FUNCTION__); 255 return -ENODEV; 256 } 257 } 258 259 return (int)(smir & smirDataMask); 260} 261 262static void 263dump_tx_desc(int dbg_lvl, struct net_device *dev, int i) 264{ 265 struct gt96100_private *gp = netdev_priv(dev); 266 gt96100_td_t *td = &gp->tx_ring[i]; 267 268 dbg(dbg_lvl, "Tx descriptor at 0x%08lx:\n", virt_to_phys(td)); 269 dbg(dbg_lvl, 270 " cmdstat=%04x, byte_cnt=%04x, buff_ptr=%04x, next=%04x\n", 271 dma32_to_cpu(td->cmdstat), 272 dma16_to_cpu(td->byte_cnt), 273 dma32_to_cpu(td->buff_ptr), 274 dma32_to_cpu(td->next)); 275} 276 277static void 278dump_rx_desc(int dbg_lvl, struct net_device *dev, int i) 279{ 280 struct gt96100_private *gp = netdev_priv(dev); 281 gt96100_rd_t *rd = &gp->rx_ring[i]; 282 283 dbg(dbg_lvl, "Rx descriptor at 0x%08lx:\n", virt_to_phys(rd)); 284 dbg(dbg_lvl, " cmdstat=%04x, buff_sz=%04x, byte_cnt=%04x, " 285 "buff_ptr=%04x, next=%04x\n", 286 dma32_to_cpu(rd->cmdstat), 287 dma16_to_cpu(rd->buff_sz), 288 dma16_to_cpu(rd->byte_cnt), 289 dma32_to_cpu(rd->buff_ptr), 290 dma32_to_cpu(rd->next)); 291} 292 293static int 294write_MII(int phy_addr, u32 reg, u16 data) 295{ 296 int timedout = 20; 297 u32 smir = (phy_addr << smirPhyAdBit) | 298 (reg << smirRegAdBit) | data; 299 300 // wait for last operation to complete 301 while (GT96100_READ(GT96100_ETH_SMI_REG) & smirBusy) { 302 // snooze for 1 msec and check again 303 gt96100_delay(1); 304 305 if (--timedout == 0) { 306 printk(KERN_ERR "%s: busy timeout!!\n", __FUNCTION__); 307 return -1; 308 } 309 } 310 311 GT96100_WRITE(GT96100_ETH_SMI_REG, smir); 312 return 0; 313} 314 315static void 316dump_MII(int dbg_lvl, struct net_device *dev) 317{ 318 int i, val; 319 struct gt96100_private *gp = netdev_priv(dev); 320 321 if (dbg_lvl <= GT96100_DEBUG) { 322 for (i=0; i<7; i++) { 323 if ((val = read_MII(gp->phy_addr, i)) >= 0) 324 printk("MII Reg %d=%x\n", i, val); 325 } 326 for (i=16; i<21; i++) { 327 if ((val = read_MII(gp->phy_addr, i)) >= 0) 328 printk("MII Reg %d=%x\n", i, val); 329 } 330 } 331} 332 333static void 334dump_hw_addr(int dbg_lvl, struct net_device *dev, const char* pfx, 335 const char* func, unsigned char* addr_str) 336{ 337 int i; 338 char buf[100], octet[5]; 339 340 if (dbg_lvl <= GT96100_DEBUG) { 341 sprintf(buf, pfx, func); 342 for (i = 0; i < 6; i++) { 343 sprintf(octet, "%2.2x%s", 344 addr_str[i], i<5 ? ":" : "\n"); 345 strcat(buf, octet); 346 } 347 info("%s", buf); 348 } 349} 350 351 352static void 353dump_skb(int dbg_lvl, struct net_device *dev, struct sk_buff *skb) 354{ 355 int i; 356 unsigned char* skbdata; 357 358 if (dbg_lvl <= GT96100_DEBUG) { 359 dbg(dbg_lvl, "%s: skb=%p, skb->data=%p, skb->len=%d\n", 360 __FUNCTION__, skb, skb->data, skb->len); 361 362 skbdata = (unsigned char*)KSEG1ADDR(skb->data); 363 364 for (i=0; i<skb->len; i++) { 365 if (!(i % 16)) 366 printk(KERN_DEBUG "\n %3.3x: %2.2x,", 367 i, skbdata[i]); 368 else 369 printk(KERN_DEBUG "%2.2x,", skbdata[i]); 370 } 371 printk(KERN_DEBUG "\n"); 372 } 373} 374 375 376static int 377gt96100_add_hash_entry(struct net_device *dev, unsigned char* addr) 378{ 379 struct gt96100_private *gp = netdev_priv(dev); 380 //u16 hashResult, stmp; 381 //unsigned char ctmp, hash_ea[6]; 382 u32 tblEntry1, tblEntry0, *tblEntryAddr; 383 int i; 384 385 tblEntry1 = hteValid | hteRD; 386 tblEntry1 |= (u32)addr[5] << 3; 387 tblEntry1 |= (u32)addr[4] << 11; 388 tblEntry1 |= (u32)addr[3] << 19; 389 tblEntry1 |= ((u32)addr[2] & 0x1f) << 27; 390 dbg(3, "%s: tblEntry1=%x\n", __FUNCTION__, tblEntry1); 391 tblEntry0 = ((u32)addr[2] >> 5) & 0x07; 392 tblEntry0 |= (u32)addr[1] << 3; 393 tblEntry0 |= (u32)addr[0] << 11; 394 dbg(3, "%s: tblEntry0=%x\n", __FUNCTION__, tblEntry0); 395 396#if 0 397 398 for (i=0; i<6; i++) { 399 // nibble swap 400 ctmp = nibswap(addr[i]); 401 // invert every nibble 402 hash_ea[i] = ((ctmp&1)<<3) | ((ctmp&8)>>3) | 403 ((ctmp&2)<<1) | ((ctmp&4)>>1); 404 hash_ea[i] |= ((ctmp&0x10)<<3) | ((ctmp&0x80)>>3) | 405 ((ctmp&0x20)<<1) | ((ctmp&0x40)>>1); 406 } 407 408 dump_hw_addr(3, dev, "%s: nib swap/invt addr=", __FUNCTION__, hash_ea); 409 410 if (gp->hash_mode == 0) { 411 hashResult = ((u16)hash_ea[0] & 0xfc) << 7; 412 stmp = ((u16)hash_ea[0] & 0x03) | 413 (((u16)hash_ea[1] & 0x7f) << 2); 414 stmp ^= (((u16)hash_ea[1] >> 7) & 0x01) | 415 ((u16)hash_ea[2] << 1); 416 stmp ^= (u16)hash_ea[3] | (((u16)hash_ea[4] & 1) << 8); 417 hashResult |= stmp; 418 } else { 419 return -1; // don't support hash mode 1 420 } 421 422 dbg(3, "%s: hashResult=%x\n", __FUNCTION__, hashResult); 423 424 tblEntryAddr = 425 (u32 *)(&gp->hash_table[((u32)hashResult & 0x7ff) << 3]); 426 427 dbg(3, "%s: tblEntryAddr=%p\n", tblEntryAddr, __FUNCTION__); 428 429 for (i=0; i<HASH_HOP_NUMBER; i++) { 430 if ((*tblEntryAddr & hteValid) && 431 !(*tblEntryAddr & hteSkip)) { 432 // This entry is already occupied, go to next entry 433 tblEntryAddr += 2; 434 dbg(3, "%s: skipping to %p\n", __FUNCTION__, 435 tblEntryAddr); 436 } else { 437 memset(tblEntryAddr, 0, 8); 438 tblEntryAddr[1] = cpu_to_dma32(tblEntry1); 439 tblEntryAddr[0] = cpu_to_dma32(tblEntry0); 440 break; 441 } 442 } 443 444 if (i >= HASH_HOP_NUMBER) { 445 err("%s: expired!\n", __FUNCTION__); 446 return -1; // Couldn't find an unused entry 447 } 448 449#else 450 451 tblEntryAddr = (u32 *)gp->hash_table; 452 for (i=0; i<RX_HASH_TABLE_SIZE/4; i+=2) { 453 tblEntryAddr[i+1] = cpu_to_dma32(tblEntry1); 454 tblEntryAddr[i] = cpu_to_dma32(tblEntry0); 455 } 456 457#endif 458 459 return 0; 460} 461 462 463static void 464read_mib_counters(struct gt96100_private *gp) 465{ 466 u32* mib_regs = (u32*)&gp->mib; 467 int i; 468 469 for (i=0; i<sizeof(mib_counters_t)/sizeof(u32); i++) 470 mib_regs[i] = GT96100ETH_READ(gp, GT96100_ETH_MIB_COUNT_BASE + 471 i*sizeof(u32)); 472} 473 474 475static void 476update_stats(struct gt96100_private *gp) 477{ 478 mib_counters_t *mib = &gp->mib; 479 struct net_device_stats *stats = &gp->stats; 480 481 read_mib_counters(gp); 482 483 stats->rx_packets = mib->totalFramesReceived; 484 stats->tx_packets = mib->framesSent; 485 stats->rx_bytes = mib->totalByteReceived; 486 stats->tx_bytes = mib->byteSent; 487 stats->rx_errors = mib->totalFramesReceived - mib->framesReceived; 488 //the tx error counters are incremented by the ISR 489 //rx_dropped incremented by gt96100_rx 490 //tx_dropped incremented by gt96100_tx 491 stats->multicast = mib->multicastFramesReceived; 492 // collisions incremented by gt96100_tx_complete 493 stats->rx_length_errors = mib->oversizeFrames + mib->fragments; 494 // The RxError condition means the Rx DMA encountered a 495 // CPU owned descriptor, which, if things are working as 496 // they should, means the Rx ring has overflowed. 497 stats->rx_over_errors = mib->macRxError; 498 stats->rx_crc_errors = mib->cRCError; 499} 500 501static void 502abort(struct net_device *dev, u32 abort_bits) 503{ 504 struct gt96100_private *gp = netdev_priv(dev); 505 int timedout = 100; // wait up to 100 msec for hard stop to complete 506 507 dbg(3, "%s\n", __FUNCTION__); 508 509 // Return if neither Rx or Tx abort bits are set 510 if (!(abort_bits & (sdcmrAR | sdcmrAT))) 511 return; 512 513 // make sure only the Rx/Tx abort bits are set 514 abort_bits &= (sdcmrAR | sdcmrAT); 515 516 spin_lock(&gp->lock); 517 518 // abort any Rx/Tx DMA immediately 519 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM, abort_bits); 520 521 dbg(3, "%s: SDMA comm = %x\n", __FUNCTION__, 522 GT96100ETH_READ(gp, GT96100_ETH_SDMA_COMM)); 523 524 // wait for abort to complete 525 while (GT96100ETH_READ(gp, GT96100_ETH_SDMA_COMM) & abort_bits) { 526 // snooze for 1 msec and check again 527 gt96100_delay(1); 528 529 if (--timedout == 0) { 530 err("%s: timeout!!\n", __FUNCTION__); 531 break; 532 } 533 } 534 535 spin_unlock(&gp->lock); 536} 537 538 539static void 540hard_stop(struct net_device *dev) 541{ 542 struct gt96100_private *gp = netdev_priv(dev); 543 544 dbg(3, "%s\n", __FUNCTION__); 545 546 disable_ether_irq(dev); 547 548 abort(dev, sdcmrAR | sdcmrAT); 549 550 // disable port 551 GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG, 0); 552} 553 554 555static void 556enable_ether_irq(struct net_device *dev) 557{ 558 struct gt96100_private *gp = netdev_priv(dev); 559 u32 intMask; 560 /* 561 * route ethernet interrupt to GT_SERINT0 for port 0, 562 * GT_INT0 for port 1. 563 */ 564 int intr_mask_reg = (gp->port_num == 0) ? 565 GT96100_SERINT0_MASK : GT96100_INT0_HIGH_MASK; 566 567 if (gp->chip_rev >= REV_GT96100A_1) { 568 intMask = icrTxBufferLow | icrTxEndLow | 569 icrTxErrorLow | icrRxOVR | icrTxUdr | 570 icrRxBufferQ0 | icrRxErrorQ0 | 571 icrMIIPhySTC | icrEtherIntSum; 572 } 573 else { 574 intMask = icrTxBufferLow | icrTxEndLow | 575 icrTxErrorLow | icrRxOVR | icrTxUdr | 576 icrRxBuffer | icrRxError | 577 icrMIIPhySTC | icrEtherIntSum; 578 } 579 580 // unmask interrupts 581 GT96100ETH_WRITE(gp, GT96100_ETH_INT_MASK, intMask); 582 583 intMask = GT96100_READ(intr_mask_reg); 584 intMask |= 1<<gp->port_num; 585 GT96100_WRITE(intr_mask_reg, intMask); 586} 587 588static void 589disable_ether_irq(struct net_device *dev) 590{ 591 struct gt96100_private *gp = netdev_priv(dev); 592 u32 intMask; 593 int intr_mask_reg = (gp->port_num == 0) ? 594 GT96100_SERINT0_MASK : GT96100_INT0_HIGH_MASK; 595 596 intMask = GT96100_READ(intr_mask_reg); 597 intMask &= ~(1<<gp->port_num); 598 GT96100_WRITE(intr_mask_reg, intMask); 599 600 GT96100ETH_WRITE(gp, GT96100_ETH_INT_MASK, 0); 601} 602 603 604/* 605 * Init GT96100 ethernet controller driver 606 */ 607static int gt96100_init_module(void) 608{ 609 struct pci_dev *pci; 610 int i, retval=0; 611 u32 cpuConfig; 612 613 /* 614 * Stupid probe because this really isn't a PCI device 615 */ 616 if (!(pci = pci_find_device(PCI_VENDOR_ID_MARVELL, 617 PCI_DEVICE_ID_MARVELL_GT96100, NULL)) && 618 !(pci = pci_find_device(PCI_VENDOR_ID_MARVELL, 619 PCI_DEVICE_ID_MARVELL_GT96100A, NULL))) { 620 printk(KERN_ERR __FILE__ ": GT96100 not found!\n"); 621 return -ENODEV; 622 } 623 624 cpuConfig = GT96100_READ(GT96100_CPU_INTERF_CONFIG); 625 if (cpuConfig & (1<<12)) { 626 printk(KERN_ERR __FILE__ 627 ": must be in Big Endian mode!\n"); 628 return -ENODEV; 629 } 630 631 for (i=0; i < NUM_INTERFACES; i++) 632 retval |= gt96100_probe1(pci, i); 633 634 return retval; 635} 636 637static int __init gt96100_probe1(struct pci_dev *pci, int port_num) 638{ 639 struct gt96100_private *gp = NULL; 640 struct gt96100_if_t *gtif = &gt96100_iflist[port_num]; 641 int phy_addr, phy_id1, phy_id2; 642 u32 phyAD; 643 int retval; 644 unsigned char chip_rev; 645 struct net_device *dev = NULL; 646 647 if (gtif->irq < 0) { 648 printk(KERN_ERR "%s: irq unknown - probing not supported\n", 649 __FUNCTION__); 650 return -ENODEV; 651 } 652 653 pci_read_config_byte(pci, PCI_REVISION_ID, &chip_rev); 654 655 if (chip_rev >= REV_GT96100A_1) { 656 phyAD = GT96100_READ(GT96100_ETH_PHY_ADDR_REG); 657 phy_addr = (phyAD >> (5*port_num)) & 0x1f; 658 } else { 659 /* 660 * not sure what's this about -- probably a gt bug 661 */ 662 phy_addr = port_num; 663 phyAD = GT96100_READ(GT96100_ETH_PHY_ADDR_REG); 664 phyAD &= ~(0x1f << (port_num*5)); 665 phyAD |= phy_addr << (port_num*5); 666 GT96100_WRITE(GT96100_ETH_PHY_ADDR_REG, phyAD); 667 } 668 669 // probe for the external PHY 670 if ((phy_id1 = read_MII(phy_addr, 2)) <= 0 || 671 (phy_id2 = read_MII(phy_addr, 3)) <= 0) { 672 printk(KERN_ERR "%s: no PHY found on MII%d\n", __FUNCTION__, port_num); 673 return -ENODEV; 674 } 675 676 if (!request_region(gtif->iobase, GT96100_ETH_IO_SIZE, "GT96100ETH")) { 677 printk(KERN_ERR "%s: request_region failed\n", __FUNCTION__); 678 return -EBUSY; 679 } 680 681 dev = alloc_etherdev(sizeof(struct gt96100_private)); 682 if (!dev) 683 goto out; 684 gtif->dev = dev; 685 686 /* private struct aligned and zeroed by alloc_etherdev */ 687 /* Fill in the 'dev' fields. */ 688 dev->base_addr = gtif->iobase; 689 dev->irq = gtif->irq; 690 691 if ((retval = parse_mac_addr(dev, gtif->mac_str))) { 692 err("%s: MAC address parse failed\n", __FUNCTION__); 693 retval = -EINVAL; 694 goto out1; 695 } 696 697 gp = netdev_priv(dev); 698 699 memset(gp, 0, sizeof(*gp)); // clear it 700 701 gp->port_num = port_num; 702 gp->io_size = GT96100_ETH_IO_SIZE; 703 gp->port_offset = port_num * GT96100_ETH_IO_SIZE; 704 gp->phy_addr = phy_addr; 705 gp->chip_rev = chip_rev; 706 707 info("%s found at 0x%x, irq %d\n", 708 chip_name(gp->chip_rev), gtif->iobase, gtif->irq); 709 dump_hw_addr(0, dev, "%s: HW Address ", __FUNCTION__, dev->dev_addr); 710 info("%s chip revision=%d\n", chip_name(gp->chip_rev), gp->chip_rev); 711 info("%s ethernet port %d\n", chip_name(gp->chip_rev), gp->port_num); 712 info("external PHY ID1=0x%04x, ID2=0x%04x\n", phy_id1, phy_id2); 713 714 // Allocate Rx and Tx descriptor rings 715 if (gp->rx_ring == NULL) { 716 // All descriptors in ring must be 16-byte aligned 717 gp->rx_ring = dmaalloc(sizeof(gt96100_rd_t) * RX_RING_SIZE 718 + sizeof(gt96100_td_t) * TX_RING_SIZE, 719 &gp->rx_ring_dma); 720 if (gp->rx_ring == NULL) { 721 retval = -ENOMEM; 722 goto out1; 723 } 724 725 gp->tx_ring = (gt96100_td_t *)(gp->rx_ring + RX_RING_SIZE); 726 gp->tx_ring_dma = 727 gp->rx_ring_dma + sizeof(gt96100_rd_t) * RX_RING_SIZE; 728 } 729 730 // Allocate the Rx Data Buffers 731 if (gp->rx_buff == NULL) { 732 gp->rx_buff = dmaalloc(PKT_BUF_SZ*RX_RING_SIZE, 733 &gp->rx_buff_dma); 734 if (gp->rx_buff == NULL) { 735 retval = -ENOMEM; 736 goto out2; 737 } 738 } 739 740 dbg(3, "%s: rx_ring=%p, tx_ring=%p\n", __FUNCTION__, 741 gp->rx_ring, gp->tx_ring); 742 743 // Allocate Rx Hash Table 744 if (gp->hash_table == NULL) { 745 gp->hash_table = (char*)dmaalloc(RX_HASH_TABLE_SIZE, 746 &gp->hash_table_dma); 747 if (gp->hash_table == NULL) { 748 retval = -ENOMEM; 749 goto out3; 750 } 751 } 752 753 dbg(3, "%s: hash=%p\n", __FUNCTION__, gp->hash_table); 754 755 spin_lock_init(&gp->lock); 756 757 dev->open = gt96100_open; 758 dev->hard_start_xmit = gt96100_tx; 759 dev->stop = gt96100_close; 760 dev->get_stats = gt96100_get_stats; 761 //dev->do_ioctl = gt96100_ioctl; 762 dev->set_multicast_list = gt96100_set_rx_mode; 763 dev->tx_timeout = gt96100_tx_timeout; 764 dev->watchdog_timeo = GT96100ETH_TX_TIMEOUT; 765 766 retval = register_netdev(dev); 767 if (retval) 768 goto out4; 769 return 0; 770 771out4: 772 dmafree(RX_HASH_TABLE_SIZE, gp->hash_table_dma); 773out3: 774 dmafree(PKT_BUF_SZ*RX_RING_SIZE, gp->rx_buff); 775out2: 776 dmafree(sizeof(gt96100_rd_t) * RX_RING_SIZE 777 + sizeof(gt96100_td_t) * TX_RING_SIZE, 778 gp->rx_ring); 779out1: 780 free_netdev (dev); 781out: 782 release_region(gtif->iobase, GT96100_ETH_IO_SIZE); 783 784 err("%s failed. Returns %d\n", __FUNCTION__, retval); 785 return retval; 786} 787 788 789static void 790reset_tx(struct net_device *dev) 791{ 792 struct gt96100_private *gp = netdev_priv(dev); 793 int i; 794 795 abort(dev, sdcmrAT); 796 797 for (i=0; i<TX_RING_SIZE; i++) { 798 if (gp->tx_skbuff[i]) { 799 if (in_interrupt()) 800 dev_kfree_skb_irq(gp->tx_skbuff[i]); 801 else 802 dev_kfree_skb(gp->tx_skbuff[i]); 803 gp->tx_skbuff[i] = NULL; 804 } 805 806 gp->tx_ring[i].cmdstat = 0; // CPU owns 807 gp->tx_ring[i].byte_cnt = 0; 808 gp->tx_ring[i].buff_ptr = 0; 809 gp->tx_ring[i].next = 810 cpu_to_dma32(gp->tx_ring_dma + 811 sizeof(gt96100_td_t) * (i+1)); 812 dump_tx_desc(4, dev, i); 813 } 814 /* Wrap the ring. */ 815 gp->tx_ring[i-1].next = cpu_to_dma32(gp->tx_ring_dma); 816 817 // setup only the lowest priority TxCDP reg 818 GT96100ETH_WRITE(gp, GT96100_ETH_CURR_TX_DESC_PTR0, gp->tx_ring_dma); 819 GT96100ETH_WRITE(gp, GT96100_ETH_CURR_TX_DESC_PTR1, 0); 820 821 // init Tx indeces and pkt counter 822 gp->tx_next_in = gp->tx_next_out = 0; 823 gp->tx_count = 0; 824 825} 826 827static void 828reset_rx(struct net_device *dev) 829{ 830 struct gt96100_private *gp = netdev_priv(dev); 831 int i; 832 833 abort(dev, sdcmrAR); 834 835 for (i=0; i<RX_RING_SIZE; i++) { 836 gp->rx_ring[i].next = 837 cpu_to_dma32(gp->rx_ring_dma + 838 sizeof(gt96100_rd_t) * (i+1)); 839 gp->rx_ring[i].buff_ptr = 840 cpu_to_dma32(gp->rx_buff_dma + i*PKT_BUF_SZ); 841 gp->rx_ring[i].buff_sz = cpu_to_dma16(PKT_BUF_SZ); 842 // Give ownership to device, set first and last, enable intr 843 gp->rx_ring[i].cmdstat = 844 cpu_to_dma32((u32)(rxFirst | rxLast | rxOwn | rxEI)); 845 dump_rx_desc(4, dev, i); 846 } 847 /* Wrap the ring. */ 848 gp->rx_ring[i-1].next = cpu_to_dma32(gp->rx_ring_dma); 849 850 // Setup only the lowest priority RxFDP and RxCDP regs 851 for (i=0; i<4; i++) { 852 if (i == 0) { 853 GT96100ETH_WRITE(gp, GT96100_ETH_1ST_RX_DESC_PTR0, 854 gp->rx_ring_dma); 855 GT96100ETH_WRITE(gp, GT96100_ETH_CURR_RX_DESC_PTR0, 856 gp->rx_ring_dma); 857 } else { 858 GT96100ETH_WRITE(gp, 859 GT96100_ETH_1ST_RX_DESC_PTR0 + i*4, 860 0); 861 GT96100ETH_WRITE(gp, 862 GT96100_ETH_CURR_RX_DESC_PTR0 + i*4, 863 0); 864 } 865 } 866 867 // init Rx NextOut index 868 gp->rx_next_out = 0; 869} 870 871 872// Returns 1 if the Tx counter and indeces don't gel 873static int 874gt96100_check_tx_consistent(struct gt96100_private *gp) 875{ 876 int diff = gp->tx_next_in - gp->tx_next_out; 877 878 diff = diff<0 ? TX_RING_SIZE + diff : diff; 879 diff = gp->tx_count == TX_RING_SIZE ? diff + TX_RING_SIZE : diff; 880 881 return (diff != gp->tx_count); 882} 883 884static int 885gt96100_init(struct net_device *dev) 886{ 887 struct gt96100_private *gp = netdev_priv(dev); 888 u32 tmp; 889 u16 mii_reg; 890 891 dbg(3, "%s: dev=%p\n", __FUNCTION__, dev); 892 dbg(3, "%s: scs10_lo=%4x, scs10_hi=%4x\n", __FUNCTION__, 893 GT96100_READ(0x8), GT96100_READ(0x10)); 894 dbg(3, "%s: scs32_lo=%4x, scs32_hi=%4x\n", __FUNCTION__, 895 GT96100_READ(0x18), GT96100_READ(0x20)); 896 897 // Stop and disable Port 898 hard_stop(dev); 899 900 // Setup CIU Arbiter 901 tmp = GT96100_READ(GT96100_CIU_ARBITER_CONFIG); 902 tmp |= (0x0c << (gp->port_num*2)); // set Ether DMA req priority to hi 903#ifndef DESC_BE 904 tmp &= ~(1<<31); // set desc endianess to little 905#else 906 tmp |= (1<<31); 907#endif 908 GT96100_WRITE(GT96100_CIU_ARBITER_CONFIG, tmp); 909 dbg(3, "%s: CIU Config=%x/%x\n", __FUNCTION__, 910 tmp, GT96100_READ(GT96100_CIU_ARBITER_CONFIG)); 911 912 // Set routing. 913 tmp = GT96100_READ(GT96100_ROUTE_MAIN) & (0x3f << 18); 914 tmp |= (0x07 << (18 + gp->port_num*3)); 915 GT96100_WRITE(GT96100_ROUTE_MAIN, tmp); 916 917 /* set MII as peripheral func */ 918 tmp = GT96100_READ(GT96100_GPP_CONFIG2); 919 tmp |= 0x7fff << (gp->port_num*16); 920 GT96100_WRITE(GT96100_GPP_CONFIG2, tmp); 921 922 /* Set up MII port pin directions */ 923 tmp = GT96100_READ(GT96100_GPP_IO2); 924 tmp |= 0x003d << (gp->port_num*16); 925 GT96100_WRITE(GT96100_GPP_IO2, tmp); 926 927 // Set-up hash table 928 memset(gp->hash_table, 0, RX_HASH_TABLE_SIZE); // clear it 929 gp->hash_mode = 0; 930 // Add a single entry to hash table - our ethernet address 931 gt96100_add_hash_entry(dev, dev->dev_addr); 932 // Set-up DMA ptr to hash table 933 GT96100ETH_WRITE(gp, GT96100_ETH_HASH_TBL_PTR, gp->hash_table_dma); 934 dbg(3, "%s: Hash Tbl Ptr=%x\n", __FUNCTION__, 935 GT96100ETH_READ(gp, GT96100_ETH_HASH_TBL_PTR)); 936 937 // Setup Tx 938 reset_tx(dev); 939 940 dbg(3, "%s: Curr Tx Desc Ptr0=%x\n", __FUNCTION__, 941 GT96100ETH_READ(gp, GT96100_ETH_CURR_TX_DESC_PTR0)); 942 943 // Setup Rx 944 reset_rx(dev); 945 946 dbg(3, "%s: 1st/Curr Rx Desc Ptr0=%x/%x\n", __FUNCTION__, 947 GT96100ETH_READ(gp, GT96100_ETH_1ST_RX_DESC_PTR0), 948 GT96100ETH_READ(gp, GT96100_ETH_CURR_RX_DESC_PTR0)); 949 950 // eth port config register 951 GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG_EXT, 952 pcxrFCTL | pcxrFCTLen | pcxrFLP | pcxrDPLXen); 953 954 mii_reg = read_MII(gp->phy_addr, 0x11); /* int enable register */ 955 mii_reg |= 2; /* enable mii interrupt */ 956 write_MII(gp->phy_addr, 0x11, mii_reg); 957 958 dbg(3, "%s: PhyAD=%x\n", __FUNCTION__, 959 GT96100_READ(GT96100_ETH_PHY_ADDR_REG)); 960 961 // setup DMA 962 963 // We want the Rx/Tx DMA to write/read data to/from memory in 964 // Big Endian mode. Also set DMA Burst Size to 8 64Bit words. 965#ifdef DESC_DATA_BE 966 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_CONFIG, 967 (0xf<<sdcrRCBit) | sdcrRIFB | (3<<sdcrBSZBit)); 968#else 969 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_CONFIG, 970 sdcrBLMR | sdcrBLMT | 971 (0xf<<sdcrRCBit) | sdcrRIFB | (3<<sdcrBSZBit)); 972#endif 973 dbg(3, "%s: SDMA Config=%x\n", __FUNCTION__, 974 GT96100ETH_READ(gp, GT96100_ETH_SDMA_CONFIG)); 975 976 // start Rx DMA 977 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM, sdcmrERD); 978 dbg(3, "%s: SDMA Comm=%x\n", __FUNCTION__, 979 GT96100ETH_READ(gp, GT96100_ETH_SDMA_COMM)); 980 981 // enable this port (set hash size to 1/2K) 982 GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG, pcrEN | pcrHS); 983 dbg(3, "%s: Port Config=%x\n", __FUNCTION__, 984 GT96100ETH_READ(gp, GT96100_ETH_PORT_CONFIG)); 985 986 /* 987 * Disable all Type-of-Service queueing. All Rx packets will be 988 * treated normally and will be sent to the lowest priority 989 * queue. 990 * 991 * Disable flow-control for now. FIXME: support flow control? 992 */ 993 994 // clear all the MIB ctr regs 995 GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG_EXT, 996 pcxrFCTL | pcxrFCTLen | pcxrFLP | 997 pcxrPRIOrxOverride); 998 read_mib_counters(gp); 999 GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG_EXT, 1000 pcxrFCTL | pcxrFCTLen | pcxrFLP | 1001 pcxrPRIOrxOverride | pcxrMIBclrMode); 1002 1003 dbg(3, "%s: Port Config Ext=%x\n", __FUNCTION__, 1004 GT96100ETH_READ(gp, GT96100_ETH_PORT_CONFIG_EXT)); 1005 1006 netif_start_queue(dev); 1007 1008 dump_MII(4, dev); 1009 1010 // enable interrupts 1011 enable_ether_irq(dev); 1012 1013 // we should now be receiving frames 1014 return 0; 1015} 1016 1017 1018static int 1019gt96100_open(struct net_device *dev) 1020{ 1021 int retval; 1022 1023 dbg(2, "%s: dev=%p\n", __FUNCTION__, dev); 1024 1025 // Initialize and startup the GT-96100 ethernet port 1026 if ((retval = gt96100_init(dev))) { 1027 err("error in gt96100_init\n"); 1028 free_irq(dev->irq, dev); 1029 return retval; 1030 } 1031 1032 if ((retval = request_irq(dev->irq, &gt96100_interrupt, 1033 SA_SHIRQ, dev->name, dev))) { 1034 err("unable to get IRQ %d\n", dev->irq); 1035 return retval; 1036 } 1037 1038 dbg(2, "%s: Initialization done.\n", __FUNCTION__); 1039 1040 return 0; 1041} 1042 1043static int 1044gt96100_close(struct net_device *dev) 1045{ 1046 dbg(3, "%s: dev=%p\n", __FUNCTION__, dev); 1047 1048 // stop the device 1049 if (netif_device_present(dev)) { 1050 netif_stop_queue(dev); 1051 hard_stop(dev); 1052 } 1053 1054 free_irq(dev->irq, dev); 1055 1056 return 0; 1057} 1058 1059 1060static int 1061gt96100_tx(struct sk_buff *skb, struct net_device *dev) 1062{ 1063 struct gt96100_private *gp = netdev_priv(dev); 1064 unsigned long flags; 1065 int nextIn; 1066 1067 spin_lock_irqsave(&gp->lock, flags); 1068 1069 nextIn = gp->tx_next_in; 1070 1071 dbg(3, "%s: nextIn=%d\n", __FUNCTION__, nextIn); 1072 1073 if (gp->tx_count >= TX_RING_SIZE) { 1074 warn("Tx Ring full, pkt dropped.\n"); 1075 gp->stats.tx_dropped++; 1076 spin_unlock_irqrestore(&gp->lock, flags); 1077 return 1; 1078 } 1079 1080 if (!(gp->last_psr & psrLink)) { 1081 err("%s: Link down, pkt dropped.\n", __FUNCTION__); 1082 gp->stats.tx_dropped++; 1083 spin_unlock_irqrestore(&gp->lock, flags); 1084 return 1; 1085 } 1086 1087 if (dma32_to_cpu(gp->tx_ring[nextIn].cmdstat) & txOwn) { 1088 err("%s: device owns descriptor, pkt dropped.\n", __FUNCTION__); 1089 gp->stats.tx_dropped++; 1090 // stop the queue, so Tx timeout can fix it 1091 netif_stop_queue(dev); 1092 spin_unlock_irqrestore(&gp->lock, flags); 1093 return 1; 1094 } 1095 1096 // Prepare the Descriptor at tx_next_in 1097 gp->tx_skbuff[nextIn] = skb; 1098 gp->tx_ring[nextIn].byte_cnt = cpu_to_dma16(skb->len); 1099 gp->tx_ring[nextIn].buff_ptr = cpu_to_dma32(virt_to_phys(skb->data)); 1100 // make sure packet gets written back to memory 1101 dma_cache_wback_inv((unsigned long)(skb->data), skb->len); 1102 // Give ownership to device, set first and last desc, enable interrupt 1103 // Setting of ownership bit must be *last*! 1104 gp->tx_ring[nextIn].cmdstat = 1105 cpu_to_dma32((u32)(txOwn | txGenCRC | txEI | 1106 txPad | txFirst | txLast)); 1107 1108 dump_tx_desc(4, dev, nextIn); 1109 dump_skb(4, dev, skb); 1110 1111 // increment tx_next_in with wrap 1112 gp->tx_next_in = (nextIn + 1) % TX_RING_SIZE; 1113 // If DMA is stopped, restart 1114 if (!(GT96100ETH_READ(gp, GT96100_ETH_PORT_STATUS) & psrTxLow)) 1115 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM, 1116 sdcmrERD | sdcmrTXDL); 1117 1118 // increment count and stop queue if full 1119 if (++gp->tx_count == TX_RING_SIZE) { 1120 gp->tx_full = 1; 1121 netif_stop_queue(dev); 1122 dbg(2, "Tx Ring now full, queue stopped.\n"); 1123 } 1124 1125 dev->trans_start = jiffies; 1126 spin_unlock_irqrestore(&gp->lock, flags); 1127 1128 return 0; 1129} 1130 1131 1132static int 1133gt96100_rx(struct net_device *dev, u32 status) 1134{ 1135 struct gt96100_private *gp = netdev_priv(dev); 1136 struct sk_buff *skb; 1137 int pkt_len, nextOut, cdp; 1138 gt96100_rd_t *rd; 1139 u32 cmdstat; 1140 1141 dbg(3, "%s: dev=%p, status=%x\n", __FUNCTION__, dev, status); 1142 1143 cdp = (GT96100ETH_READ(gp, GT96100_ETH_1ST_RX_DESC_PTR0) 1144 - gp->rx_ring_dma) / sizeof(gt96100_rd_t); 1145 1146 // Continue until we reach 1st descriptor pointer 1147 for (nextOut = gp->rx_next_out; nextOut != cdp; 1148 nextOut = (nextOut + 1) % RX_RING_SIZE) { 1149 1150 if (--gp->intr_work_done == 0) 1151 break; 1152 1153 rd = &gp->rx_ring[nextOut]; 1154 cmdstat = dma32_to_cpu(rd->cmdstat); 1155 1156 dbg(4, "%s: Rx desc cmdstat=%x, nextOut=%d\n", __FUNCTION__, 1157 cmdstat, nextOut); 1158 1159 if (cmdstat & (u32)rxOwn) { 1160 //err("%s: device owns descriptor!\n", __FUNCTION__); 1161 // DMA is not finished updating descriptor??? 1162 // Leave and come back later to pick-up where 1163 // we left off. 1164 break; 1165 } 1166 1167 // Drop this received pkt if there were any errors 1168 if (((cmdstat & (u32)(rxErrorSummary)) && 1169 (cmdstat & (u32)(rxFirst))) || (status & icrRxError)) { 1170 // update the detailed rx error counters that 1171 // are not covered by the MIB counters. 1172 if (cmdstat & (u32)rxOverrun) 1173 gp->stats.rx_fifo_errors++; 1174 cmdstat |= (u32)rxOwn; 1175 rd->cmdstat = cpu_to_dma32(cmdstat); 1176 continue; 1177 } 1178 1179 /* 1180 * Must be first and last (ie only) descriptor of packet. We 1181 * ignore (drop) any packets that do not fit in one descriptor. 1182 * Every descriptor's receive buffer is large enough to hold 1183 * the maximum 802.3 frame size, so a multi-descriptor packet 1184 * indicates an error. Most if not all corrupted packets will 1185 * have already been dropped by the above check for the 1186 * rxErrorSummary status bit. 1187 */ 1188 if (!(cmdstat & (u32)rxFirst) || !(cmdstat & (u32)rxLast)) { 1189 if (cmdstat & (u32)rxFirst) { 1190 /* 1191 * This is the first descriptor of a 1192 * multi-descriptor packet. It isn't corrupted 1193 * because the above check for rxErrorSummary 1194 * would have dropped it already, so what's 1195 * the deal with this packet? Good question, 1196 * let's dump it out. 1197 */ 1198 err("%s: desc not first and last!\n", __FUNCTION__); 1199 dump_rx_desc(0, dev, nextOut); 1200 } 1201 cmdstat |= (u32)rxOwn; 1202 rd->cmdstat = cpu_to_dma32(cmdstat); 1203 // continue to drop every descriptor of this packet 1204 continue; 1205 } 1206 1207 pkt_len = dma16_to_cpu(rd->byte_cnt); 1208 1209 /* Create new skb. */ 1210 skb = dev_alloc_skb(pkt_len+2); 1211 if (skb == NULL) { 1212 err("%s: Memory squeeze, dropping packet.\n", __FUNCTION__); 1213 gp->stats.rx_dropped++; 1214 cmdstat |= (u32)rxOwn; 1215 rd->cmdstat = cpu_to_dma32(cmdstat); 1216 continue; 1217 } 1218 skb->dev = dev; 1219 skb_reserve(skb, 2); /* 16 byte IP header align */ 1220 memcpy(skb_put(skb, pkt_len), 1221 &gp->rx_buff[nextOut*PKT_BUF_SZ], pkt_len); 1222 skb->protocol = eth_type_trans(skb, dev); 1223 dump_skb(4, dev, skb); 1224 1225 netif_rx(skb); /* pass the packet to upper layers */ 1226 dev->last_rx = jiffies; 1227 1228 // now we can release ownership of this desc back to device 1229 cmdstat |= (u32)rxOwn; 1230 rd->cmdstat = cpu_to_dma32(cmdstat); 1231 } 1232 1233 if (nextOut == gp->rx_next_out) 1234 dbg(3, "%s: RxCDP did not increment?\n", __FUNCTION__); 1235 1236 gp->rx_next_out = nextOut; 1237 return 0; 1238} 1239 1240 1241static void 1242gt96100_tx_complete(struct net_device *dev, u32 status) 1243{ 1244 struct gt96100_private *gp = netdev_priv(dev); 1245 int nextOut, cdp; 1246 gt96100_td_t *td; 1247 u32 cmdstat; 1248 1249 cdp = (GT96100ETH_READ(gp, GT96100_ETH_CURR_TX_DESC_PTR0) 1250 - gp->tx_ring_dma) / sizeof(gt96100_td_t); 1251 1252 // Continue until we reach the current descriptor pointer 1253 for (nextOut = gp->tx_next_out; nextOut != cdp; 1254 nextOut = (nextOut + 1) % TX_RING_SIZE) { 1255 1256 if (--gp->intr_work_done == 0) 1257 break; 1258 1259 td = &gp->tx_ring[nextOut]; 1260 cmdstat = dma32_to_cpu(td->cmdstat); 1261 1262 dbg(3, "%s: Tx desc cmdstat=%x, nextOut=%d\n", __FUNCTION__, 1263 cmdstat, nextOut); 1264 1265 if (cmdstat & (u32)txOwn) { 1266 /* 1267 * DMA is not finished writing descriptor??? 1268 * Leave and come back later to pick-up where 1269 * we left off. 1270 */ 1271 break; 1272 } 1273 1274 // increment Tx error stats 1275 if (cmdstat & (u32)txErrorSummary) { 1276 dbg(2, "%s: Tx error, cmdstat = %x\n", __FUNCTION__, 1277 cmdstat); 1278 gp->stats.tx_errors++; 1279 if (cmdstat & (u32)txReTxLimit) 1280 gp->stats.tx_aborted_errors++; 1281 if (cmdstat & (u32)txUnderrun) 1282 gp->stats.tx_fifo_errors++; 1283 if (cmdstat & (u32)txLateCollision) 1284 gp->stats.tx_window_errors++; 1285 } 1286 1287 if (cmdstat & (u32)txCollision) 1288 gp->stats.collisions += 1289 (u32)((cmdstat & txReTxCntMask) >> 1290 txReTxCntBit); 1291 1292 // Wake the queue if the ring was full 1293 if (gp->tx_full) { 1294 gp->tx_full = 0; 1295 if (gp->last_psr & psrLink) { 1296 netif_wake_queue(dev); 1297 dbg(2, "%s: Tx Ring was full, queue waked\n", 1298 __FUNCTION__); 1299 } 1300 } 1301 1302 // decrement tx ring buffer count 1303 if (gp->tx_count) gp->tx_count--; 1304 1305 // free the skb 1306 if (gp->tx_skbuff[nextOut]) { 1307 dbg(3, "%s: good Tx, skb=%p\n", __FUNCTION__, 1308 gp->tx_skbuff[nextOut]); 1309 dev_kfree_skb_irq(gp->tx_skbuff[nextOut]); 1310 gp->tx_skbuff[nextOut] = NULL; 1311 } else { 1312 err("%s: no skb!\n", __FUNCTION__); 1313 } 1314 } 1315 1316 gp->tx_next_out = nextOut; 1317 1318 if (gt96100_check_tx_consistent(gp)) { 1319 err("%s: Tx queue inconsistent!\n", __FUNCTION__); 1320 } 1321 1322 if ((status & icrTxEndLow) && gp->tx_count != 0) { 1323 // we must restart the DMA 1324 dbg(3, "%s: Restarting Tx DMA\n", __FUNCTION__); 1325 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM, 1326 sdcmrERD | sdcmrTXDL); 1327 } 1328} 1329 1330 1331static irqreturn_t 1332gt96100_interrupt(int irq, void *dev_id, struct pt_regs *regs) 1333{ 1334 struct net_device *dev = (struct net_device *)dev_id; 1335 struct gt96100_private *gp = netdev_priv(dev); 1336 u32 status; 1337 int handled = 0; 1338 1339 if (dev == NULL) { 1340 err("%s: null dev ptr\n", __FUNCTION__); 1341 return IRQ_NONE; 1342 } 1343 1344 dbg(3, "%s: entry, icr=%x\n", __FUNCTION__, 1345 GT96100ETH_READ(gp, GT96100_ETH_INT_CAUSE)); 1346 1347 spin_lock(&gp->lock); 1348 1349 gp->intr_work_done = max_interrupt_work; 1350 1351 while (gp->intr_work_done > 0) { 1352 1353 status = GT96100ETH_READ(gp, GT96100_ETH_INT_CAUSE); 1354 // ACK interrupts 1355 GT96100ETH_WRITE(gp, GT96100_ETH_INT_CAUSE, ~status); 1356 1357 if ((status & icrEtherIntSum) == 0 && 1358 !(status & (icrTxBufferLow|icrTxBufferHigh|icrRxBuffer))) 1359 break; 1360 1361 handled = 1; 1362 1363 if (status & icrMIIPhySTC) { 1364 u32 psr = GT96100ETH_READ(gp, GT96100_ETH_PORT_STATUS); 1365 if (gp->last_psr != psr) { 1366 dbg(0, "port status:\n"); 1367 dbg(0, " %s MBit/s, %s-duplex, " 1368 "flow-control %s, link is %s,\n", 1369 psr & psrSpeed ? "100":"10", 1370 psr & psrDuplex ? "full":"half", 1371 psr & psrFctl ? "disabled":"enabled", 1372 psr & psrLink ? "up":"down"); 1373 dbg(0, " TxLowQ is %s, TxHighQ is %s, " 1374 "Transmitter is %s\n", 1375 psr & psrTxLow ? "running":"stopped", 1376 psr & psrTxHigh ? "running":"stopped", 1377 psr & psrTxInProg ? "on":"off"); 1378 1379 if ((psr & psrLink) && !gp->tx_full && 1380 netif_queue_stopped(dev)) { 1381 dbg(0, "%s: Link up, waking queue.\n", 1382 __FUNCTION__); 1383 netif_wake_queue(dev); 1384 } else if (!(psr & psrLink) && 1385 !netif_queue_stopped(dev)) { 1386 dbg(0, "%s: Link down, stopping queue.\n", 1387 __FUNCTION__); 1388 netif_stop_queue(dev); 1389 } 1390 1391 gp->last_psr = psr; 1392 } 1393 1394 if (--gp->intr_work_done == 0) 1395 break; 1396 } 1397 1398 if (status & (icrTxBufferLow | icrTxEndLow)) 1399 gt96100_tx_complete(dev, status); 1400 1401 if (status & (icrRxBuffer | icrRxError)) { 1402 gt96100_rx(dev, status); 1403 } 1404 1405 // Now check TX errors (RX errors were handled in gt96100_rx) 1406 if (status & icrTxErrorLow) { 1407 err("%s: Tx resource error\n", __FUNCTION__); 1408 if (--gp->intr_work_done == 0) 1409 break; 1410 } 1411 1412 if (status & icrTxUdr) { 1413 err("%s: Tx underrun error\n", __FUNCTION__); 1414 if (--gp->intr_work_done == 0) 1415 break; 1416 } 1417 } 1418 1419 if (gp->intr_work_done == 0) { 1420 // ACK any remaining pending interrupts 1421 GT96100ETH_WRITE(gp, GT96100_ETH_INT_CAUSE, 0); 1422 dbg(3, "%s: hit max work\n", __FUNCTION__); 1423 } 1424 1425 dbg(3, "%s: exit, icr=%x\n", __FUNCTION__, 1426 GT96100ETH_READ(gp, GT96100_ETH_INT_CAUSE)); 1427 1428 spin_unlock(&gp->lock); 1429 return IRQ_RETVAL(handled); 1430} 1431 1432 1433static void 1434gt96100_tx_timeout(struct net_device *dev) 1435{ 1436 struct gt96100_private *gp = netdev_priv(dev); 1437 unsigned long flags; 1438 1439 spin_lock_irqsave(&gp->lock, flags); 1440 1441 if (!(gp->last_psr & psrLink)) { 1442 err("tx_timeout: link down.\n"); 1443 spin_unlock_irqrestore(&gp->lock, flags); 1444 } else { 1445 if (gt96100_check_tx_consistent(gp)) 1446 err("tx_timeout: Tx ring error.\n"); 1447 1448 disable_ether_irq(dev); 1449 spin_unlock_irqrestore(&gp->lock, flags); 1450 reset_tx(dev); 1451 enable_ether_irq(dev); 1452 1453 netif_wake_queue(dev); 1454 } 1455} 1456 1457 1458static void 1459gt96100_set_rx_mode(struct net_device *dev) 1460{ 1461 struct gt96100_private *gp = netdev_priv(dev); 1462 unsigned long flags; 1463 //struct dev_mc_list *mcptr; 1464 1465 dbg(3, "%s: dev=%p, flags=%x\n", __FUNCTION__, dev, dev->flags); 1466 1467 // stop the Receiver DMA 1468 abort(dev, sdcmrAR); 1469 1470 spin_lock_irqsave(&gp->lock, flags); 1471 1472 if (dev->flags & IFF_PROMISC) { 1473 GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG, 1474 pcrEN | pcrHS | pcrPM); 1475 } 1476 1477#if 0 1478 /* 1479 FIXME: currently multicast doesn't work - need to get hash table 1480 working first. 1481 */ 1482 if (dev->mc_count) { 1483 // clear hash table 1484 memset(gp->hash_table, 0, RX_HASH_TABLE_SIZE); 1485 // Add our ethernet address 1486 gt96100_add_hash_entry(dev, dev->dev_addr); 1487 1488 for (mcptr = dev->mc_list; mcptr; mcptr = mcptr->next) { 1489 dump_hw_addr(2, dev, "%s: addr=", __FUNCTION__, 1490 mcptr->dmi_addr); 1491 gt96100_add_hash_entry(dev, mcptr->dmi_addr); 1492 } 1493 } 1494#endif 1495 1496 // restart Rx DMA 1497 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM, sdcmrERD); 1498 1499 spin_unlock_irqrestore(&gp->lock, flags); 1500} 1501 1502static struct net_device_stats * 1503gt96100_get_stats(struct net_device *dev) 1504{ 1505 struct gt96100_private *gp = netdev_priv(dev); 1506 unsigned long flags; 1507 1508 dbg(3, "%s: dev=%p\n", __FUNCTION__, dev); 1509 1510 if (netif_device_present(dev)) { 1511 spin_lock_irqsave (&gp->lock, flags); 1512 update_stats(gp); 1513 spin_unlock_irqrestore (&gp->lock, flags); 1514 } 1515 1516 return &gp->stats; 1517} 1518 1519static void gt96100_cleanup_module(void) 1520{ 1521 int i; 1522 for (i=0; i<NUM_INTERFACES; i++) { 1523 struct gt96100_if_t *gtif = &gt96100_iflist[i]; 1524 if (gtif->dev != NULL) { 1525 struct gt96100_private *gp = (struct gt96100_private *) 1526 netdev_priv(gtif->dev); 1527 unregister_netdev(gtif->dev); 1528 dmafree(RX_HASH_TABLE_SIZE, gp->hash_table_dma); 1529 dmafree(PKT_BUF_SZ*RX_RING_SIZE, gp->rx_buff); 1530 dmafree(sizeof(gt96100_rd_t) * RX_RING_SIZE 1531 + sizeof(gt96100_td_t) * TX_RING_SIZE, 1532 gp->rx_ring); 1533 free_netdev(gtif->dev); 1534 release_region(gtif->iobase, gp->io_size); 1535 } 1536 } 1537} 1538 1539static int __init gt96100_setup(char *options) 1540{ 1541 char *this_opt; 1542 1543 if (!options || !*options) 1544 return 0; 1545 1546 while ((this_opt = strsep (&options, ",")) != NULL) { 1547 if (!*this_opt) 1548 continue; 1549 if (!strncmp(this_opt, "mac0:", 5)) { 1550 memcpy(mac0, this_opt+5, 17); 1551 mac0[17]= '\0'; 1552 } else if (!strncmp(this_opt, "mac1:", 5)) { 1553 memcpy(mac1, this_opt+5, 17); 1554 mac1[17]= '\0'; 1555 } 1556 } 1557 1558 return 1; 1559} 1560 1561__setup("gt96100eth=", gt96100_setup); 1562 1563module_init(gt96100_init_module); 1564module_exit(gt96100_cleanup_module); 1565 1566MODULE_AUTHOR("Steve Longerbeam <stevel@mvista.com>"); 1567MODULE_DESCRIPTION("GT96100 Ethernet driver");