Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.33-rc7 1840 lines 49 kB view raw
1/* D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */ 2/* 3 Copyright (c) 2001, 2002 by D-Link Corporation 4 Written by Edward Peng.<edward_peng@dlink.com.tw> 5 Created 03-May-2001, base on Linux' sundance.c. 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 2 of the License, or 10 (at your option) any later version. 11*/ 12 13#define DRV_NAME "DL2000/TC902x-based linux driver" 14#define DRV_VERSION "v1.19" 15#define DRV_RELDATE "2007/08/12" 16#include "dl2k.h" 17#include <linux/dma-mapping.h> 18 19static char version[] __devinitdata = 20 KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n"; 21#define MAX_UNITS 8 22static int mtu[MAX_UNITS]; 23static int vlan[MAX_UNITS]; 24static int jumbo[MAX_UNITS]; 25static char *media[MAX_UNITS]; 26static int tx_flow=-1; 27static int rx_flow=-1; 28static int copy_thresh; 29static int rx_coalesce=10; /* Rx frame count each interrupt */ 30static int rx_timeout=200; /* Rx DMA wait time in 640ns increments */ 31static int tx_coalesce=16; /* HW xmit count each TxDMAComplete */ 32 33 34MODULE_AUTHOR ("Edward Peng"); 35MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter"); 36MODULE_LICENSE("GPL"); 37module_param_array(mtu, int, NULL, 0); 38module_param_array(media, charp, NULL, 0); 39module_param_array(vlan, int, NULL, 0); 40module_param_array(jumbo, int, NULL, 0); 41module_param(tx_flow, int, 0); 42module_param(rx_flow, int, 0); 43module_param(copy_thresh, int, 0); 44module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */ 45module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */ 46module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */ 47 48 49/* Enable the default interrupts */ 50#define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \ 51 UpdateStats | LinkEvent) 52#define EnableInt() \ 53writew(DEFAULT_INTR, ioaddr + IntEnable) 54 55static const int max_intrloop = 50; 56static const int multicast_filter_limit = 0x40; 57 58static int rio_open (struct net_device *dev); 59static void rio_timer (unsigned long data); 60static void rio_tx_timeout (struct net_device *dev); 61static void alloc_list (struct net_device *dev); 62static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev); 63static irqreturn_t rio_interrupt (int irq, void *dev_instance); 64static void rio_free_tx (struct net_device *dev, int irq); 65static void tx_error (struct net_device *dev, int tx_status); 66static int receive_packet (struct net_device *dev); 67static void rio_error (struct net_device *dev, int int_status); 68static int change_mtu (struct net_device *dev, int new_mtu); 69static void set_multicast (struct net_device *dev); 70static struct net_device_stats *get_stats (struct net_device *dev); 71static int clear_stats (struct net_device *dev); 72static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); 73static int rio_close (struct net_device *dev); 74static int find_miiphy (struct net_device *dev); 75static int parse_eeprom (struct net_device *dev); 76static int read_eeprom (long ioaddr, int eep_addr); 77static int mii_wait_link (struct net_device *dev, int wait); 78static int mii_set_media (struct net_device *dev); 79static int mii_get_media (struct net_device *dev); 80static int mii_set_media_pcs (struct net_device *dev); 81static int mii_get_media_pcs (struct net_device *dev); 82static int mii_read (struct net_device *dev, int phy_addr, int reg_num); 83static int mii_write (struct net_device *dev, int phy_addr, int reg_num, 84 u16 data); 85 86static const struct ethtool_ops ethtool_ops; 87 88static const struct net_device_ops netdev_ops = { 89 .ndo_open = rio_open, 90 .ndo_start_xmit = start_xmit, 91 .ndo_stop = rio_close, 92 .ndo_get_stats = get_stats, 93 .ndo_validate_addr = eth_validate_addr, 94 .ndo_set_mac_address = eth_mac_addr, 95 .ndo_set_multicast_list = set_multicast, 96 .ndo_do_ioctl = rio_ioctl, 97 .ndo_tx_timeout = rio_tx_timeout, 98 .ndo_change_mtu = change_mtu, 99}; 100 101static int __devinit 102rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) 103{ 104 struct net_device *dev; 105 struct netdev_private *np; 106 static int card_idx; 107 int chip_idx = ent->driver_data; 108 int err, irq; 109 long ioaddr; 110 static int version_printed; 111 void *ring_space; 112 dma_addr_t ring_dma; 113 114 if (!version_printed++) 115 printk ("%s", version); 116 117 err = pci_enable_device (pdev); 118 if (err) 119 return err; 120 121 irq = pdev->irq; 122 err = pci_request_regions (pdev, "dl2k"); 123 if (err) 124 goto err_out_disable; 125 126 pci_set_master (pdev); 127 dev = alloc_etherdev (sizeof (*np)); 128 if (!dev) { 129 err = -ENOMEM; 130 goto err_out_res; 131 } 132 SET_NETDEV_DEV(dev, &pdev->dev); 133 134#ifdef MEM_MAPPING 135 ioaddr = pci_resource_start (pdev, 1); 136 ioaddr = (long) ioremap (ioaddr, RIO_IO_SIZE); 137 if (!ioaddr) { 138 err = -ENOMEM; 139 goto err_out_dev; 140 } 141#else 142 ioaddr = pci_resource_start (pdev, 0); 143#endif 144 dev->base_addr = ioaddr; 145 dev->irq = irq; 146 np = netdev_priv(dev); 147 np->chip_id = chip_idx; 148 np->pdev = pdev; 149 spin_lock_init (&np->tx_lock); 150 spin_lock_init (&np->rx_lock); 151 152 /* Parse manual configuration */ 153 np->an_enable = 1; 154 np->tx_coalesce = 1; 155 if (card_idx < MAX_UNITS) { 156 if (media[card_idx] != NULL) { 157 np->an_enable = 0; 158 if (strcmp (media[card_idx], "auto") == 0 || 159 strcmp (media[card_idx], "autosense") == 0 || 160 strcmp (media[card_idx], "0") == 0 ) { 161 np->an_enable = 2; 162 } else if (strcmp (media[card_idx], "100mbps_fd") == 0 || 163 strcmp (media[card_idx], "4") == 0) { 164 np->speed = 100; 165 np->full_duplex = 1; 166 } else if (strcmp (media[card_idx], "100mbps_hd") == 0 || 167 strcmp (media[card_idx], "3") == 0) { 168 np->speed = 100; 169 np->full_duplex = 0; 170 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 || 171 strcmp (media[card_idx], "2") == 0) { 172 np->speed = 10; 173 np->full_duplex = 1; 174 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 || 175 strcmp (media[card_idx], "1") == 0) { 176 np->speed = 10; 177 np->full_duplex = 0; 178 } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 || 179 strcmp (media[card_idx], "6") == 0) { 180 np->speed=1000; 181 np->full_duplex=1; 182 } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 || 183 strcmp (media[card_idx], "5") == 0) { 184 np->speed = 1000; 185 np->full_duplex = 0; 186 } else { 187 np->an_enable = 1; 188 } 189 } 190 if (jumbo[card_idx] != 0) { 191 np->jumbo = 1; 192 dev->mtu = MAX_JUMBO; 193 } else { 194 np->jumbo = 0; 195 if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE) 196 dev->mtu = mtu[card_idx]; 197 } 198 np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ? 199 vlan[card_idx] : 0; 200 if (rx_coalesce > 0 && rx_timeout > 0) { 201 np->rx_coalesce = rx_coalesce; 202 np->rx_timeout = rx_timeout; 203 np->coalesce = 1; 204 } 205 np->tx_flow = (tx_flow == 0) ? 0 : 1; 206 np->rx_flow = (rx_flow == 0) ? 0 : 1; 207 208 if (tx_coalesce < 1) 209 tx_coalesce = 1; 210 else if (tx_coalesce > TX_RING_SIZE-1) 211 tx_coalesce = TX_RING_SIZE - 1; 212 } 213 dev->netdev_ops = &netdev_ops; 214 dev->watchdog_timeo = TX_TIMEOUT; 215 SET_ETHTOOL_OPS(dev, &ethtool_ops); 216#if 0 217 dev->features = NETIF_F_IP_CSUM; 218#endif 219 pci_set_drvdata (pdev, dev); 220 221 ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma); 222 if (!ring_space) 223 goto err_out_iounmap; 224 np->tx_ring = (struct netdev_desc *) ring_space; 225 np->tx_ring_dma = ring_dma; 226 227 ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma); 228 if (!ring_space) 229 goto err_out_unmap_tx; 230 np->rx_ring = (struct netdev_desc *) ring_space; 231 np->rx_ring_dma = ring_dma; 232 233 /* Parse eeprom data */ 234 parse_eeprom (dev); 235 236 /* Find PHY address */ 237 err = find_miiphy (dev); 238 if (err) 239 goto err_out_unmap_rx; 240 241 /* Fiber device? */ 242 np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0; 243 np->link_status = 0; 244 /* Set media and reset PHY */ 245 if (np->phy_media) { 246 /* default Auto-Negotiation for fiber deivices */ 247 if (np->an_enable == 2) { 248 np->an_enable = 1; 249 } 250 mii_set_media_pcs (dev); 251 } else { 252 /* Auto-Negotiation is mandatory for 1000BASE-T, 253 IEEE 802.3ab Annex 28D page 14 */ 254 if (np->speed == 1000) 255 np->an_enable = 1; 256 mii_set_media (dev); 257 } 258 259 err = register_netdev (dev); 260 if (err) 261 goto err_out_unmap_rx; 262 263 card_idx++; 264 265 printk (KERN_INFO "%s: %s, %pM, IRQ %d\n", 266 dev->name, np->name, dev->dev_addr, irq); 267 if (tx_coalesce > 1) 268 printk(KERN_INFO "tx_coalesce:\t%d packets\n", 269 tx_coalesce); 270 if (np->coalesce) 271 printk(KERN_INFO 272 "rx_coalesce:\t%d packets\n" 273 "rx_timeout: \t%d ns\n", 274 np->rx_coalesce, np->rx_timeout*640); 275 if (np->vlan) 276 printk(KERN_INFO "vlan(id):\t%d\n", np->vlan); 277 return 0; 278 279 err_out_unmap_rx: 280 pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); 281 err_out_unmap_tx: 282 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); 283 err_out_iounmap: 284#ifdef MEM_MAPPING 285 iounmap ((void *) ioaddr); 286 287 err_out_dev: 288#endif 289 free_netdev (dev); 290 291 err_out_res: 292 pci_release_regions (pdev); 293 294 err_out_disable: 295 pci_disable_device (pdev); 296 return err; 297} 298 299static int 300find_miiphy (struct net_device *dev) 301{ 302 int i, phy_found = 0; 303 struct netdev_private *np; 304 long ioaddr; 305 np = netdev_priv(dev); 306 ioaddr = dev->base_addr; 307 np->phy_addr = 1; 308 309 for (i = 31; i >= 0; i--) { 310 int mii_status = mii_read (dev, i, 1); 311 if (mii_status != 0xffff && mii_status != 0x0000) { 312 np->phy_addr = i; 313 phy_found++; 314 } 315 } 316 if (!phy_found) { 317 printk (KERN_ERR "%s: No MII PHY found!\n", dev->name); 318 return -ENODEV; 319 } 320 return 0; 321} 322 323static int 324parse_eeprom (struct net_device *dev) 325{ 326 int i, j; 327 long ioaddr = dev->base_addr; 328 u8 sromdata[256]; 329 u8 *psib; 330 u32 crc; 331 PSROM_t psrom = (PSROM_t) sromdata; 332 struct netdev_private *np = netdev_priv(dev); 333 334 int cid, next; 335 336#ifdef MEM_MAPPING 337 ioaddr = pci_resource_start (np->pdev, 0); 338#endif 339 /* Read eeprom */ 340 for (i = 0; i < 128; i++) { 341 ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom (ioaddr, i)); 342 } 343#ifdef MEM_MAPPING 344 ioaddr = dev->base_addr; 345#endif 346 if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */ 347 /* Check CRC */ 348 crc = ~ether_crc_le (256 - 4, sromdata); 349 if (psrom->crc != crc) { 350 printk (KERN_ERR "%s: EEPROM data CRC error.\n", 351 dev->name); 352 return -1; 353 } 354 } 355 356 /* Set MAC address */ 357 for (i = 0; i < 6; i++) 358 dev->dev_addr[i] = psrom->mac_addr[i]; 359 360 if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) { 361 return 0; 362 } 363 364 /* Parse Software Information Block */ 365 i = 0x30; 366 psib = (u8 *) sromdata; 367 do { 368 cid = psib[i++]; 369 next = psib[i++]; 370 if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) { 371 printk (KERN_ERR "Cell data error\n"); 372 return -1; 373 } 374 switch (cid) { 375 case 0: /* Format version */ 376 break; 377 case 1: /* End of cell */ 378 return 0; 379 case 2: /* Duplex Polarity */ 380 np->duplex_polarity = psib[i]; 381 writeb (readb (ioaddr + PhyCtrl) | psib[i], 382 ioaddr + PhyCtrl); 383 break; 384 case 3: /* Wake Polarity */ 385 np->wake_polarity = psib[i]; 386 break; 387 case 9: /* Adapter description */ 388 j = (next - i > 255) ? 255 : next - i; 389 memcpy (np->name, &(psib[i]), j); 390 break; 391 case 4: 392 case 5: 393 case 6: 394 case 7: 395 case 8: /* Reversed */ 396 break; 397 default: /* Unknown cell */ 398 return -1; 399 } 400 i = next; 401 } while (1); 402 403 return 0; 404} 405 406static int 407rio_open (struct net_device *dev) 408{ 409 struct netdev_private *np = netdev_priv(dev); 410 long ioaddr = dev->base_addr; 411 int i; 412 u16 macctrl; 413 414 i = request_irq (dev->irq, rio_interrupt, IRQF_SHARED, dev->name, dev); 415 if (i) 416 return i; 417 418 /* Reset all logic functions */ 419 writew (GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset, 420 ioaddr + ASICCtrl + 2); 421 mdelay(10); 422 423 /* DebugCtrl bit 4, 5, 9 must set */ 424 writel (readl (ioaddr + DebugCtrl) | 0x0230, ioaddr + DebugCtrl); 425 426 /* Jumbo frame */ 427 if (np->jumbo != 0) 428 writew (MAX_JUMBO+14, ioaddr + MaxFrameSize); 429 430 alloc_list (dev); 431 432 /* Get station address */ 433 for (i = 0; i < 6; i++) 434 writeb (dev->dev_addr[i], ioaddr + StationAddr0 + i); 435 436 set_multicast (dev); 437 if (np->coalesce) { 438 writel (np->rx_coalesce | np->rx_timeout << 16, 439 ioaddr + RxDMAIntCtrl); 440 } 441 /* Set RIO to poll every N*320nsec. */ 442 writeb (0x20, ioaddr + RxDMAPollPeriod); 443 writeb (0xff, ioaddr + TxDMAPollPeriod); 444 writeb (0x30, ioaddr + RxDMABurstThresh); 445 writeb (0x30, ioaddr + RxDMAUrgentThresh); 446 writel (0x0007ffff, ioaddr + RmonStatMask); 447 /* clear statistics */ 448 clear_stats (dev); 449 450 /* VLAN supported */ 451 if (np->vlan) { 452 /* priority field in RxDMAIntCtrl */ 453 writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10, 454 ioaddr + RxDMAIntCtrl); 455 /* VLANId */ 456 writew (np->vlan, ioaddr + VLANId); 457 /* Length/Type should be 0x8100 */ 458 writel (0x8100 << 16 | np->vlan, ioaddr + VLANTag); 459 /* Enable AutoVLANuntagging, but disable AutoVLANtagging. 460 VLAN information tagged by TFC' VID, CFI fields. */ 461 writel (readl (ioaddr + MACCtrl) | AutoVLANuntagging, 462 ioaddr + MACCtrl); 463 } 464 465 init_timer (&np->timer); 466 np->timer.expires = jiffies + 1*HZ; 467 np->timer.data = (unsigned long) dev; 468 np->timer.function = &rio_timer; 469 add_timer (&np->timer); 470 471 /* Start Tx/Rx */ 472 writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable, 473 ioaddr + MACCtrl); 474 475 macctrl = 0; 476 macctrl |= (np->vlan) ? AutoVLANuntagging : 0; 477 macctrl |= (np->full_duplex) ? DuplexSelect : 0; 478 macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; 479 macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; 480 writew(macctrl, ioaddr + MACCtrl); 481 482 netif_start_queue (dev); 483 484 /* Enable default interrupts */ 485 EnableInt (); 486 return 0; 487} 488 489static void 490rio_timer (unsigned long data) 491{ 492 struct net_device *dev = (struct net_device *)data; 493 struct netdev_private *np = netdev_priv(dev); 494 unsigned int entry; 495 int next_tick = 1*HZ; 496 unsigned long flags; 497 498 spin_lock_irqsave(&np->rx_lock, flags); 499 /* Recover rx ring exhausted error */ 500 if (np->cur_rx - np->old_rx >= RX_RING_SIZE) { 501 printk(KERN_INFO "Try to recover rx ring exhausted...\n"); 502 /* Re-allocate skbuffs to fill the descriptor ring */ 503 for (; np->cur_rx - np->old_rx > 0; np->old_rx++) { 504 struct sk_buff *skb; 505 entry = np->old_rx % RX_RING_SIZE; 506 /* Dropped packets don't need to re-allocate */ 507 if (np->rx_skbuff[entry] == NULL) { 508 skb = netdev_alloc_skb_ip_align(dev, 509 np->rx_buf_sz); 510 if (skb == NULL) { 511 np->rx_ring[entry].fraginfo = 0; 512 printk (KERN_INFO 513 "%s: Still unable to re-allocate Rx skbuff.#%d\n", 514 dev->name, entry); 515 break; 516 } 517 np->rx_skbuff[entry] = skb; 518 np->rx_ring[entry].fraginfo = 519 cpu_to_le64 (pci_map_single 520 (np->pdev, skb->data, np->rx_buf_sz, 521 PCI_DMA_FROMDEVICE)); 522 } 523 np->rx_ring[entry].fraginfo |= 524 cpu_to_le64((u64)np->rx_buf_sz << 48); 525 np->rx_ring[entry].status = 0; 526 } /* end for */ 527 } /* end if */ 528 spin_unlock_irqrestore (&np->rx_lock, flags); 529 np->timer.expires = jiffies + next_tick; 530 add_timer(&np->timer); 531} 532 533static void 534rio_tx_timeout (struct net_device *dev) 535{ 536 long ioaddr = dev->base_addr; 537 538 printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n", 539 dev->name, readl (ioaddr + TxStatus)); 540 rio_free_tx(dev, 0); 541 dev->if_port = 0; 542 dev->trans_start = jiffies; /* prevent tx timeout */ 543} 544 545 /* allocate and initialize Tx and Rx descriptors */ 546static void 547alloc_list (struct net_device *dev) 548{ 549 struct netdev_private *np = netdev_priv(dev); 550 int i; 551 552 np->cur_rx = np->cur_tx = 0; 553 np->old_rx = np->old_tx = 0; 554 np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32); 555 556 /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */ 557 for (i = 0; i < TX_RING_SIZE; i++) { 558 np->tx_skbuff[i] = NULL; 559 np->tx_ring[i].status = cpu_to_le64 (TFDDone); 560 np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma + 561 ((i+1)%TX_RING_SIZE) * 562 sizeof (struct netdev_desc)); 563 } 564 565 /* Initialize Rx descriptors */ 566 for (i = 0; i < RX_RING_SIZE; i++) { 567 np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma + 568 ((i + 1) % RX_RING_SIZE) * 569 sizeof (struct netdev_desc)); 570 np->rx_ring[i].status = 0; 571 np->rx_ring[i].fraginfo = 0; 572 np->rx_skbuff[i] = NULL; 573 } 574 575 /* Allocate the rx buffers */ 576 for (i = 0; i < RX_RING_SIZE; i++) { 577 /* Allocated fixed size of skbuff */ 578 struct sk_buff *skb; 579 580 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); 581 np->rx_skbuff[i] = skb; 582 if (skb == NULL) { 583 printk (KERN_ERR 584 "%s: alloc_list: allocate Rx buffer error! ", 585 dev->name); 586 break; 587 } 588 /* Rubicon now supports 40 bits of addressing space. */ 589 np->rx_ring[i].fraginfo = 590 cpu_to_le64 ( pci_map_single ( 591 np->pdev, skb->data, np->rx_buf_sz, 592 PCI_DMA_FROMDEVICE)); 593 np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); 594 } 595 596 /* Set RFDListPtr */ 597 writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0); 598 writel (0, dev->base_addr + RFDListPtr1); 599 600 return; 601} 602 603static netdev_tx_t 604start_xmit (struct sk_buff *skb, struct net_device *dev) 605{ 606 struct netdev_private *np = netdev_priv(dev); 607 struct netdev_desc *txdesc; 608 unsigned entry; 609 u32 ioaddr; 610 u64 tfc_vlan_tag = 0; 611 612 if (np->link_status == 0) { /* Link Down */ 613 dev_kfree_skb(skb); 614 return NETDEV_TX_OK; 615 } 616 ioaddr = dev->base_addr; 617 entry = np->cur_tx % TX_RING_SIZE; 618 np->tx_skbuff[entry] = skb; 619 txdesc = &np->tx_ring[entry]; 620 621#if 0 622 if (skb->ip_summed == CHECKSUM_PARTIAL) { 623 txdesc->status |= 624 cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable | 625 IPChecksumEnable); 626 } 627#endif 628 if (np->vlan) { 629 tfc_vlan_tag = VLANTagInsert | 630 ((u64)np->vlan << 32) | 631 ((u64)skb->priority << 45); 632 } 633 txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, 634 skb->len, 635 PCI_DMA_TODEVICE)); 636 txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48); 637 638 /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode 639 * Work around: Always use 1 descriptor in 10Mbps mode */ 640 if (entry % np->tx_coalesce == 0 || np->speed == 10) 641 txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | 642 WordAlignDisable | 643 TxDMAIndicate | 644 (1 << FragCountShift)); 645 else 646 txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | 647 WordAlignDisable | 648 (1 << FragCountShift)); 649 650 /* TxDMAPollNow */ 651 writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl); 652 /* Schedule ISR */ 653 writel(10000, ioaddr + CountDown); 654 np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE; 655 if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE 656 < TX_QUEUE_LEN - 1 && np->speed != 10) { 657 /* do nothing */ 658 } else if (!netif_queue_stopped(dev)) { 659 netif_stop_queue (dev); 660 } 661 662 /* The first TFDListPtr */ 663 if (readl (dev->base_addr + TFDListPtr0) == 0) { 664 writel (np->tx_ring_dma + entry * sizeof (struct netdev_desc), 665 dev->base_addr + TFDListPtr0); 666 writel (0, dev->base_addr + TFDListPtr1); 667 } 668 669 return NETDEV_TX_OK; 670} 671 672static irqreturn_t 673rio_interrupt (int irq, void *dev_instance) 674{ 675 struct net_device *dev = dev_instance; 676 struct netdev_private *np; 677 unsigned int_status; 678 long ioaddr; 679 int cnt = max_intrloop; 680 int handled = 0; 681 682 ioaddr = dev->base_addr; 683 np = netdev_priv(dev); 684 while (1) { 685 int_status = readw (ioaddr + IntStatus); 686 writew (int_status, ioaddr + IntStatus); 687 int_status &= DEFAULT_INTR; 688 if (int_status == 0 || --cnt < 0) 689 break; 690 handled = 1; 691 /* Processing received packets */ 692 if (int_status & RxDMAComplete) 693 receive_packet (dev); 694 /* TxDMAComplete interrupt */ 695 if ((int_status & (TxDMAComplete|IntRequested))) { 696 int tx_status; 697 tx_status = readl (ioaddr + TxStatus); 698 if (tx_status & 0x01) 699 tx_error (dev, tx_status); 700 /* Free used tx skbuffs */ 701 rio_free_tx (dev, 1); 702 } 703 704 /* Handle uncommon events */ 705 if (int_status & 706 (HostError | LinkEvent | UpdateStats)) 707 rio_error (dev, int_status); 708 } 709 if (np->cur_tx != np->old_tx) 710 writel (100, ioaddr + CountDown); 711 return IRQ_RETVAL(handled); 712} 713 714static inline dma_addr_t desc_to_dma(struct netdev_desc *desc) 715{ 716 return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48); 717} 718 719static void 720rio_free_tx (struct net_device *dev, int irq) 721{ 722 struct netdev_private *np = netdev_priv(dev); 723 int entry = np->old_tx % TX_RING_SIZE; 724 int tx_use = 0; 725 unsigned long flag = 0; 726 727 if (irq) 728 spin_lock(&np->tx_lock); 729 else 730 spin_lock_irqsave(&np->tx_lock, flag); 731 732 /* Free used tx skbuffs */ 733 while (entry != np->cur_tx) { 734 struct sk_buff *skb; 735 736 if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone))) 737 break; 738 skb = np->tx_skbuff[entry]; 739 pci_unmap_single (np->pdev, 740 desc_to_dma(&np->tx_ring[entry]), 741 skb->len, PCI_DMA_TODEVICE); 742 if (irq) 743 dev_kfree_skb_irq (skb); 744 else 745 dev_kfree_skb (skb); 746 747 np->tx_skbuff[entry] = NULL; 748 entry = (entry + 1) % TX_RING_SIZE; 749 tx_use++; 750 } 751 if (irq) 752 spin_unlock(&np->tx_lock); 753 else 754 spin_unlock_irqrestore(&np->tx_lock, flag); 755 np->old_tx = entry; 756 757 /* If the ring is no longer full, clear tx_full and 758 call netif_wake_queue() */ 759 760 if (netif_queue_stopped(dev) && 761 ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE 762 < TX_QUEUE_LEN - 1 || np->speed == 10)) { 763 netif_wake_queue (dev); 764 } 765} 766 767static void 768tx_error (struct net_device *dev, int tx_status) 769{ 770 struct netdev_private *np; 771 long ioaddr = dev->base_addr; 772 int frame_id; 773 int i; 774 775 np = netdev_priv(dev); 776 777 frame_id = (tx_status & 0xffff0000); 778 printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", 779 dev->name, tx_status, frame_id); 780 np->stats.tx_errors++; 781 /* Ttransmit Underrun */ 782 if (tx_status & 0x10) { 783 np->stats.tx_fifo_errors++; 784 writew (readw (ioaddr + TxStartThresh) + 0x10, 785 ioaddr + TxStartThresh); 786 /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */ 787 writew (TxReset | DMAReset | FIFOReset | NetworkReset, 788 ioaddr + ASICCtrl + 2); 789 /* Wait for ResetBusy bit clear */ 790 for (i = 50; i > 0; i--) { 791 if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0) 792 break; 793 mdelay (1); 794 } 795 rio_free_tx (dev, 1); 796 /* Reset TFDListPtr */ 797 writel (np->tx_ring_dma + 798 np->old_tx * sizeof (struct netdev_desc), 799 dev->base_addr + TFDListPtr0); 800 writel (0, dev->base_addr + TFDListPtr1); 801 802 /* Let TxStartThresh stay default value */ 803 } 804 /* Late Collision */ 805 if (tx_status & 0x04) { 806 np->stats.tx_fifo_errors++; 807 /* TxReset and clear FIFO */ 808 writew (TxReset | FIFOReset, ioaddr + ASICCtrl + 2); 809 /* Wait reset done */ 810 for (i = 50; i > 0; i--) { 811 if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0) 812 break; 813 mdelay (1); 814 } 815 /* Let TxStartThresh stay default value */ 816 } 817 /* Maximum Collisions */ 818#ifdef ETHER_STATS 819 if (tx_status & 0x08) 820 np->stats.collisions16++; 821#else 822 if (tx_status & 0x08) 823 np->stats.collisions++; 824#endif 825 /* Restart the Tx */ 826 writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl); 827} 828 829static int 830receive_packet (struct net_device *dev) 831{ 832 struct netdev_private *np = netdev_priv(dev); 833 int entry = np->cur_rx % RX_RING_SIZE; 834 int cnt = 30; 835 836 /* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */ 837 while (1) { 838 struct netdev_desc *desc = &np->rx_ring[entry]; 839 int pkt_len; 840 u64 frame_status; 841 842 if (!(desc->status & cpu_to_le64(RFDDone)) || 843 !(desc->status & cpu_to_le64(FrameStart)) || 844 !(desc->status & cpu_to_le64(FrameEnd))) 845 break; 846 847 /* Chip omits the CRC. */ 848 frame_status = le64_to_cpu(desc->status); 849 pkt_len = frame_status & 0xffff; 850 if (--cnt < 0) 851 break; 852 /* Update rx error statistics, drop packet. */ 853 if (frame_status & RFS_Errors) { 854 np->stats.rx_errors++; 855 if (frame_status & (RxRuntFrame | RxLengthError)) 856 np->stats.rx_length_errors++; 857 if (frame_status & RxFCSError) 858 np->stats.rx_crc_errors++; 859 if (frame_status & RxAlignmentError && np->speed != 1000) 860 np->stats.rx_frame_errors++; 861 if (frame_status & RxFIFOOverrun) 862 np->stats.rx_fifo_errors++; 863 } else { 864 struct sk_buff *skb; 865 866 /* Small skbuffs for short packets */ 867 if (pkt_len > copy_thresh) { 868 pci_unmap_single (np->pdev, 869 desc_to_dma(desc), 870 np->rx_buf_sz, 871 PCI_DMA_FROMDEVICE); 872 skb_put (skb = np->rx_skbuff[entry], pkt_len); 873 np->rx_skbuff[entry] = NULL; 874 } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) { 875 pci_dma_sync_single_for_cpu(np->pdev, 876 desc_to_dma(desc), 877 np->rx_buf_sz, 878 PCI_DMA_FROMDEVICE); 879 skb_copy_to_linear_data (skb, 880 np->rx_skbuff[entry]->data, 881 pkt_len); 882 skb_put (skb, pkt_len); 883 pci_dma_sync_single_for_device(np->pdev, 884 desc_to_dma(desc), 885 np->rx_buf_sz, 886 PCI_DMA_FROMDEVICE); 887 } 888 skb->protocol = eth_type_trans (skb, dev); 889#if 0 890 /* Checksum done by hw, but csum value unavailable. */ 891 if (np->pdev->pci_rev_id >= 0x0c && 892 !(frame_status & (TCPError | UDPError | IPError))) { 893 skb->ip_summed = CHECKSUM_UNNECESSARY; 894 } 895#endif 896 netif_rx (skb); 897 } 898 entry = (entry + 1) % RX_RING_SIZE; 899 } 900 spin_lock(&np->rx_lock); 901 np->cur_rx = entry; 902 /* Re-allocate skbuffs to fill the descriptor ring */ 903 entry = np->old_rx; 904 while (entry != np->cur_rx) { 905 struct sk_buff *skb; 906 /* Dropped packets don't need to re-allocate */ 907 if (np->rx_skbuff[entry] == NULL) { 908 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); 909 if (skb == NULL) { 910 np->rx_ring[entry].fraginfo = 0; 911 printk (KERN_INFO 912 "%s: receive_packet: " 913 "Unable to re-allocate Rx skbuff.#%d\n", 914 dev->name, entry); 915 break; 916 } 917 np->rx_skbuff[entry] = skb; 918 np->rx_ring[entry].fraginfo = 919 cpu_to_le64 (pci_map_single 920 (np->pdev, skb->data, np->rx_buf_sz, 921 PCI_DMA_FROMDEVICE)); 922 } 923 np->rx_ring[entry].fraginfo |= 924 cpu_to_le64((u64)np->rx_buf_sz << 48); 925 np->rx_ring[entry].status = 0; 926 entry = (entry + 1) % RX_RING_SIZE; 927 } 928 np->old_rx = entry; 929 spin_unlock(&np->rx_lock); 930 return 0; 931} 932 933static void 934rio_error (struct net_device *dev, int int_status) 935{ 936 long ioaddr = dev->base_addr; 937 struct netdev_private *np = netdev_priv(dev); 938 u16 macctrl; 939 940 /* Link change event */ 941 if (int_status & LinkEvent) { 942 if (mii_wait_link (dev, 10) == 0) { 943 printk (KERN_INFO "%s: Link up\n", dev->name); 944 if (np->phy_media) 945 mii_get_media_pcs (dev); 946 else 947 mii_get_media (dev); 948 if (np->speed == 1000) 949 np->tx_coalesce = tx_coalesce; 950 else 951 np->tx_coalesce = 1; 952 macctrl = 0; 953 macctrl |= (np->vlan) ? AutoVLANuntagging : 0; 954 macctrl |= (np->full_duplex) ? DuplexSelect : 0; 955 macctrl |= (np->tx_flow) ? 956 TxFlowControlEnable : 0; 957 macctrl |= (np->rx_flow) ? 958 RxFlowControlEnable : 0; 959 writew(macctrl, ioaddr + MACCtrl); 960 np->link_status = 1; 961 netif_carrier_on(dev); 962 } else { 963 printk (KERN_INFO "%s: Link off\n", dev->name); 964 np->link_status = 0; 965 netif_carrier_off(dev); 966 } 967 } 968 969 /* UpdateStats statistics registers */ 970 if (int_status & UpdateStats) { 971 get_stats (dev); 972 } 973 974 /* PCI Error, a catastronphic error related to the bus interface 975 occurs, set GlobalReset and HostReset to reset. */ 976 if (int_status & HostError) { 977 printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n", 978 dev->name, int_status); 979 writew (GlobalReset | HostReset, ioaddr + ASICCtrl + 2); 980 mdelay (500); 981 } 982} 983 984static struct net_device_stats * 985get_stats (struct net_device *dev) 986{ 987 long ioaddr = dev->base_addr; 988 struct netdev_private *np = netdev_priv(dev); 989#ifdef MEM_MAPPING 990 int i; 991#endif 992 unsigned int stat_reg; 993 994 /* All statistics registers need to be acknowledged, 995 else statistic overflow could cause problems */ 996 997 np->stats.rx_packets += readl (ioaddr + FramesRcvOk); 998 np->stats.tx_packets += readl (ioaddr + FramesXmtOk); 999 np->stats.rx_bytes += readl (ioaddr + OctetRcvOk); 1000 np->stats.tx_bytes += readl (ioaddr + OctetXmtOk); 1001 1002 np->stats.multicast = readl (ioaddr + McstFramesRcvdOk); 1003 np->stats.collisions += readl (ioaddr + SingleColFrames) 1004 + readl (ioaddr + MultiColFrames); 1005 1006 /* detailed tx errors */ 1007 stat_reg = readw (ioaddr + FramesAbortXSColls); 1008 np->stats.tx_aborted_errors += stat_reg; 1009 np->stats.tx_errors += stat_reg; 1010 1011 stat_reg = readw (ioaddr + CarrierSenseErrors); 1012 np->stats.tx_carrier_errors += stat_reg; 1013 np->stats.tx_errors += stat_reg; 1014 1015 /* Clear all other statistic register. */ 1016 readl (ioaddr + McstOctetXmtOk); 1017 readw (ioaddr + BcstFramesXmtdOk); 1018 readl (ioaddr + McstFramesXmtdOk); 1019 readw (ioaddr + BcstFramesRcvdOk); 1020 readw (ioaddr + MacControlFramesRcvd); 1021 readw (ioaddr + FrameTooLongErrors); 1022 readw (ioaddr + InRangeLengthErrors); 1023 readw (ioaddr + FramesCheckSeqErrors); 1024 readw (ioaddr + FramesLostRxErrors); 1025 readl (ioaddr + McstOctetXmtOk); 1026 readl (ioaddr + BcstOctetXmtOk); 1027 readl (ioaddr + McstFramesXmtdOk); 1028 readl (ioaddr + FramesWDeferredXmt); 1029 readl (ioaddr + LateCollisions); 1030 readw (ioaddr + BcstFramesXmtdOk); 1031 readw (ioaddr + MacControlFramesXmtd); 1032 readw (ioaddr + FramesWEXDeferal); 1033 1034#ifdef MEM_MAPPING 1035 for (i = 0x100; i <= 0x150; i += 4) 1036 readl (ioaddr + i); 1037#endif 1038 readw (ioaddr + TxJumboFrames); 1039 readw (ioaddr + RxJumboFrames); 1040 readw (ioaddr + TCPCheckSumErrors); 1041 readw (ioaddr + UDPCheckSumErrors); 1042 readw (ioaddr + IPCheckSumErrors); 1043 return &np->stats; 1044} 1045 1046static int 1047clear_stats (struct net_device *dev) 1048{ 1049 long ioaddr = dev->base_addr; 1050#ifdef MEM_MAPPING 1051 int i; 1052#endif 1053 1054 /* All statistics registers need to be acknowledged, 1055 else statistic overflow could cause problems */ 1056 readl (ioaddr + FramesRcvOk); 1057 readl (ioaddr + FramesXmtOk); 1058 readl (ioaddr + OctetRcvOk); 1059 readl (ioaddr + OctetXmtOk); 1060 1061 readl (ioaddr + McstFramesRcvdOk); 1062 readl (ioaddr + SingleColFrames); 1063 readl (ioaddr + MultiColFrames); 1064 readl (ioaddr + LateCollisions); 1065 /* detailed rx errors */ 1066 readw (ioaddr + FrameTooLongErrors); 1067 readw (ioaddr + InRangeLengthErrors); 1068 readw (ioaddr + FramesCheckSeqErrors); 1069 readw (ioaddr + FramesLostRxErrors); 1070 1071 /* detailed tx errors */ 1072 readw (ioaddr + FramesAbortXSColls); 1073 readw (ioaddr + CarrierSenseErrors); 1074 1075 /* Clear all other statistic register. */ 1076 readl (ioaddr + McstOctetXmtOk); 1077 readw (ioaddr + BcstFramesXmtdOk); 1078 readl (ioaddr + McstFramesXmtdOk); 1079 readw (ioaddr + BcstFramesRcvdOk); 1080 readw (ioaddr + MacControlFramesRcvd); 1081 readl (ioaddr + McstOctetXmtOk); 1082 readl (ioaddr + BcstOctetXmtOk); 1083 readl (ioaddr + McstFramesXmtdOk); 1084 readl (ioaddr + FramesWDeferredXmt); 1085 readw (ioaddr + BcstFramesXmtdOk); 1086 readw (ioaddr + MacControlFramesXmtd); 1087 readw (ioaddr + FramesWEXDeferal); 1088#ifdef MEM_MAPPING 1089 for (i = 0x100; i <= 0x150; i += 4) 1090 readl (ioaddr + i); 1091#endif 1092 readw (ioaddr + TxJumboFrames); 1093 readw (ioaddr + RxJumboFrames); 1094 readw (ioaddr + TCPCheckSumErrors); 1095 readw (ioaddr + UDPCheckSumErrors); 1096 readw (ioaddr + IPCheckSumErrors); 1097 return 0; 1098} 1099 1100 1101static int 1102change_mtu (struct net_device *dev, int new_mtu) 1103{ 1104 struct netdev_private *np = netdev_priv(dev); 1105 int max = (np->jumbo) ? MAX_JUMBO : 1536; 1106 1107 if ((new_mtu < 68) || (new_mtu > max)) { 1108 return -EINVAL; 1109 } 1110 1111 dev->mtu = new_mtu; 1112 1113 return 0; 1114} 1115 1116static void 1117set_multicast (struct net_device *dev) 1118{ 1119 long ioaddr = dev->base_addr; 1120 u32 hash_table[2]; 1121 u16 rx_mode = 0; 1122 struct netdev_private *np = netdev_priv(dev); 1123 1124 hash_table[0] = hash_table[1] = 0; 1125 /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */ 1126 hash_table[1] |= 0x02000000; 1127 if (dev->flags & IFF_PROMISC) { 1128 /* Receive all frames promiscuously. */ 1129 rx_mode = ReceiveAllFrames; 1130 } else if ((dev->flags & IFF_ALLMULTI) || 1131 (dev->mc_count > multicast_filter_limit)) { 1132 /* Receive broadcast and multicast frames */ 1133 rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast; 1134 } else if (dev->mc_count > 0) { 1135 int i; 1136 struct dev_mc_list *mclist; 1137 /* Receive broadcast frames and multicast frames filtering 1138 by Hashtable */ 1139 rx_mode = 1140 ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast; 1141 for (i=0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1142 i++, mclist=mclist->next) 1143 { 1144 int bit, index = 0; 1145 int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr); 1146 /* The inverted high significant 6 bits of CRC are 1147 used as an index to hashtable */ 1148 for (bit = 0; bit < 6; bit++) 1149 if (crc & (1 << (31 - bit))) 1150 index |= (1 << bit); 1151 hash_table[index / 32] |= (1 << (index % 32)); 1152 } 1153 } else { 1154 rx_mode = ReceiveBroadcast | ReceiveUnicast; 1155 } 1156 if (np->vlan) { 1157 /* ReceiveVLANMatch field in ReceiveMode */ 1158 rx_mode |= ReceiveVLANMatch; 1159 } 1160 1161 writel (hash_table[0], ioaddr + HashTable0); 1162 writel (hash_table[1], ioaddr + HashTable1); 1163 writew (rx_mode, ioaddr + ReceiveMode); 1164} 1165 1166static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1167{ 1168 struct netdev_private *np = netdev_priv(dev); 1169 strcpy(info->driver, "dl2k"); 1170 strcpy(info->version, DRV_VERSION); 1171 strcpy(info->bus_info, pci_name(np->pdev)); 1172} 1173 1174static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1175{ 1176 struct netdev_private *np = netdev_priv(dev); 1177 if (np->phy_media) { 1178 /* fiber device */ 1179 cmd->supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE; 1180 cmd->advertising= ADVERTISED_Autoneg | ADVERTISED_FIBRE; 1181 cmd->port = PORT_FIBRE; 1182 cmd->transceiver = XCVR_INTERNAL; 1183 } else { 1184 /* copper device */ 1185 cmd->supported = SUPPORTED_10baseT_Half | 1186 SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half 1187 | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | 1188 SUPPORTED_Autoneg | SUPPORTED_MII; 1189 cmd->advertising = ADVERTISED_10baseT_Half | 1190 ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | 1191 ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full| 1192 ADVERTISED_Autoneg | ADVERTISED_MII; 1193 cmd->port = PORT_MII; 1194 cmd->transceiver = XCVR_INTERNAL; 1195 } 1196 if ( np->link_status ) { 1197 cmd->speed = np->speed; 1198 cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 1199 } else { 1200 cmd->speed = -1; 1201 cmd->duplex = -1; 1202 } 1203 if ( np->an_enable) 1204 cmd->autoneg = AUTONEG_ENABLE; 1205 else 1206 cmd->autoneg = AUTONEG_DISABLE; 1207 1208 cmd->phy_address = np->phy_addr; 1209 return 0; 1210} 1211 1212static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1213{ 1214 struct netdev_private *np = netdev_priv(dev); 1215 netif_carrier_off(dev); 1216 if (cmd->autoneg == AUTONEG_ENABLE) { 1217 if (np->an_enable) 1218 return 0; 1219 else { 1220 np->an_enable = 1; 1221 mii_set_media(dev); 1222 return 0; 1223 } 1224 } else { 1225 np->an_enable = 0; 1226 if (np->speed == 1000) { 1227 cmd->speed = SPEED_100; 1228 cmd->duplex = DUPLEX_FULL; 1229 printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n"); 1230 } 1231 switch(cmd->speed + cmd->duplex) { 1232 1233 case SPEED_10 + DUPLEX_HALF: 1234 np->speed = 10; 1235 np->full_duplex = 0; 1236 break; 1237 1238 case SPEED_10 + DUPLEX_FULL: 1239 np->speed = 10; 1240 np->full_duplex = 1; 1241 break; 1242 case SPEED_100 + DUPLEX_HALF: 1243 np->speed = 100; 1244 np->full_duplex = 0; 1245 break; 1246 case SPEED_100 + DUPLEX_FULL: 1247 np->speed = 100; 1248 np->full_duplex = 1; 1249 break; 1250 case SPEED_1000 + DUPLEX_HALF:/* not supported */ 1251 case SPEED_1000 + DUPLEX_FULL:/* not supported */ 1252 default: 1253 return -EINVAL; 1254 } 1255 mii_set_media(dev); 1256 } 1257 return 0; 1258} 1259 1260static u32 rio_get_link(struct net_device *dev) 1261{ 1262 struct netdev_private *np = netdev_priv(dev); 1263 return np->link_status; 1264} 1265 1266static const struct ethtool_ops ethtool_ops = { 1267 .get_drvinfo = rio_get_drvinfo, 1268 .get_settings = rio_get_settings, 1269 .set_settings = rio_set_settings, 1270 .get_link = rio_get_link, 1271}; 1272 1273static int 1274rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) 1275{ 1276 int phy_addr; 1277 struct netdev_private *np = netdev_priv(dev); 1278 struct mii_data *miidata = (struct mii_data *) &rq->ifr_ifru; 1279 1280 struct netdev_desc *desc; 1281 int i; 1282 1283 phy_addr = np->phy_addr; 1284 switch (cmd) { 1285 case SIOCDEVPRIVATE: 1286 break; 1287 1288 case SIOCDEVPRIVATE + 1: 1289 miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num); 1290 break; 1291 case SIOCDEVPRIVATE + 2: 1292 mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value); 1293 break; 1294 case SIOCDEVPRIVATE + 3: 1295 break; 1296 case SIOCDEVPRIVATE + 4: 1297 break; 1298 case SIOCDEVPRIVATE + 5: 1299 netif_stop_queue (dev); 1300 break; 1301 case SIOCDEVPRIVATE + 6: 1302 netif_wake_queue (dev); 1303 break; 1304 case SIOCDEVPRIVATE + 7: 1305 printk 1306 ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n", 1307 netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx, 1308 np->old_rx); 1309 break; 1310 case SIOCDEVPRIVATE + 8: 1311 printk("TX ring:\n"); 1312 for (i = 0; i < TX_RING_SIZE; i++) { 1313 desc = &np->tx_ring[i]; 1314 printk 1315 ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x", 1316 i, 1317 (u32) (np->tx_ring_dma + i * sizeof (*desc)), 1318 (u32)le64_to_cpu(desc->next_desc), 1319 (u32)le64_to_cpu(desc->status), 1320 (u32)(le64_to_cpu(desc->fraginfo) >> 32), 1321 (u32)le64_to_cpu(desc->fraginfo)); 1322 printk ("\n"); 1323 } 1324 printk ("\n"); 1325 break; 1326 1327 default: 1328 return -EOPNOTSUPP; 1329 } 1330 return 0; 1331} 1332 1333#define EEP_READ 0x0200 1334#define EEP_BUSY 0x8000 1335/* Read the EEPROM word */ 1336/* We use I/O instruction to read/write eeprom to avoid fail on some machines */ 1337static int 1338read_eeprom (long ioaddr, int eep_addr) 1339{ 1340 int i = 1000; 1341 outw (EEP_READ | (eep_addr & 0xff), ioaddr + EepromCtrl); 1342 while (i-- > 0) { 1343 if (!(inw (ioaddr + EepromCtrl) & EEP_BUSY)) { 1344 return inw (ioaddr + EepromData); 1345 } 1346 } 1347 return 0; 1348} 1349 1350enum phy_ctrl_bits { 1351 MII_READ = 0x00, MII_CLK = 0x01, MII_DATA1 = 0x02, MII_WRITE = 0x04, 1352 MII_DUPLEX = 0x08, 1353}; 1354 1355#define mii_delay() readb(ioaddr) 1356static void 1357mii_sendbit (struct net_device *dev, u32 data) 1358{ 1359 long ioaddr = dev->base_addr + PhyCtrl; 1360 data = (data) ? MII_DATA1 : 0; 1361 data |= MII_WRITE; 1362 data |= (readb (ioaddr) & 0xf8) | MII_WRITE; 1363 writeb (data, ioaddr); 1364 mii_delay (); 1365 writeb (data | MII_CLK, ioaddr); 1366 mii_delay (); 1367} 1368 1369static int 1370mii_getbit (struct net_device *dev) 1371{ 1372 long ioaddr = dev->base_addr + PhyCtrl; 1373 u8 data; 1374 1375 data = (readb (ioaddr) & 0xf8) | MII_READ; 1376 writeb (data, ioaddr); 1377 mii_delay (); 1378 writeb (data | MII_CLK, ioaddr); 1379 mii_delay (); 1380 return ((readb (ioaddr) >> 1) & 1); 1381} 1382 1383static void 1384mii_send_bits (struct net_device *dev, u32 data, int len) 1385{ 1386 int i; 1387 for (i = len - 1; i >= 0; i--) { 1388 mii_sendbit (dev, data & (1 << i)); 1389 } 1390} 1391 1392static int 1393mii_read (struct net_device *dev, int phy_addr, int reg_num) 1394{ 1395 u32 cmd; 1396 int i; 1397 u32 retval = 0; 1398 1399 /* Preamble */ 1400 mii_send_bits (dev, 0xffffffff, 32); 1401 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ 1402 /* ST,OP = 0110'b for read operation */ 1403 cmd = (0x06 << 10 | phy_addr << 5 | reg_num); 1404 mii_send_bits (dev, cmd, 14); 1405 /* Turnaround */ 1406 if (mii_getbit (dev)) 1407 goto err_out; 1408 /* Read data */ 1409 for (i = 0; i < 16; i++) { 1410 retval |= mii_getbit (dev); 1411 retval <<= 1; 1412 } 1413 /* End cycle */ 1414 mii_getbit (dev); 1415 return (retval >> 1) & 0xffff; 1416 1417 err_out: 1418 return 0; 1419} 1420static int 1421mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data) 1422{ 1423 u32 cmd; 1424 1425 /* Preamble */ 1426 mii_send_bits (dev, 0xffffffff, 32); 1427 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ 1428 /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */ 1429 cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data; 1430 mii_send_bits (dev, cmd, 32); 1431 /* End cycle */ 1432 mii_getbit (dev); 1433 return 0; 1434} 1435static int 1436mii_wait_link (struct net_device *dev, int wait) 1437{ 1438 __u16 bmsr; 1439 int phy_addr; 1440 struct netdev_private *np; 1441 1442 np = netdev_priv(dev); 1443 phy_addr = np->phy_addr; 1444 1445 do { 1446 bmsr = mii_read (dev, phy_addr, MII_BMSR); 1447 if (bmsr & MII_BMSR_LINK_STATUS) 1448 return 0; 1449 mdelay (1); 1450 } while (--wait > 0); 1451 return -1; 1452} 1453static int 1454mii_get_media (struct net_device *dev) 1455{ 1456 __u16 negotiate; 1457 __u16 bmsr; 1458 __u16 mscr; 1459 __u16 mssr; 1460 int phy_addr; 1461 struct netdev_private *np; 1462 1463 np = netdev_priv(dev); 1464 phy_addr = np->phy_addr; 1465 1466 bmsr = mii_read (dev, phy_addr, MII_BMSR); 1467 if (np->an_enable) { 1468 if (!(bmsr & MII_BMSR_AN_COMPLETE)) { 1469 /* Auto-Negotiation not completed */ 1470 return -1; 1471 } 1472 negotiate = mii_read (dev, phy_addr, MII_ANAR) & 1473 mii_read (dev, phy_addr, MII_ANLPAR); 1474 mscr = mii_read (dev, phy_addr, MII_MSCR); 1475 mssr = mii_read (dev, phy_addr, MII_MSSR); 1476 if (mscr & MII_MSCR_1000BT_FD && mssr & MII_MSSR_LP_1000BT_FD) { 1477 np->speed = 1000; 1478 np->full_duplex = 1; 1479 printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); 1480 } else if (mscr & MII_MSCR_1000BT_HD && mssr & MII_MSSR_LP_1000BT_HD) { 1481 np->speed = 1000; 1482 np->full_duplex = 0; 1483 printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n"); 1484 } else if (negotiate & MII_ANAR_100BX_FD) { 1485 np->speed = 100; 1486 np->full_duplex = 1; 1487 printk (KERN_INFO "Auto 100 Mbps, Full duplex\n"); 1488 } else if (negotiate & MII_ANAR_100BX_HD) { 1489 np->speed = 100; 1490 np->full_duplex = 0; 1491 printk (KERN_INFO "Auto 100 Mbps, Half duplex\n"); 1492 } else if (negotiate & MII_ANAR_10BT_FD) { 1493 np->speed = 10; 1494 np->full_duplex = 1; 1495 printk (KERN_INFO "Auto 10 Mbps, Full duplex\n"); 1496 } else if (negotiate & MII_ANAR_10BT_HD) { 1497 np->speed = 10; 1498 np->full_duplex = 0; 1499 printk (KERN_INFO "Auto 10 Mbps, Half duplex\n"); 1500 } 1501 if (negotiate & MII_ANAR_PAUSE) { 1502 np->tx_flow &= 1; 1503 np->rx_flow &= 1; 1504 } else if (negotiate & MII_ANAR_ASYMMETRIC) { 1505 np->tx_flow = 0; 1506 np->rx_flow &= 1; 1507 } 1508 /* else tx_flow, rx_flow = user select */ 1509 } else { 1510 __u16 bmcr = mii_read (dev, phy_addr, MII_BMCR); 1511 switch (bmcr & (MII_BMCR_SPEED_100 | MII_BMCR_SPEED_1000)) { 1512 case MII_BMCR_SPEED_1000: 1513 printk (KERN_INFO "Operating at 1000 Mbps, "); 1514 break; 1515 case MII_BMCR_SPEED_100: 1516 printk (KERN_INFO "Operating at 100 Mbps, "); 1517 break; 1518 case 0: 1519 printk (KERN_INFO "Operating at 10 Mbps, "); 1520 } 1521 if (bmcr & MII_BMCR_DUPLEX_MODE) { 1522 printk (KERN_CONT "Full duplex\n"); 1523 } else { 1524 printk (KERN_CONT "Half duplex\n"); 1525 } 1526 } 1527 if (np->tx_flow) 1528 printk(KERN_INFO "Enable Tx Flow Control\n"); 1529 else 1530 printk(KERN_INFO "Disable Tx Flow Control\n"); 1531 if (np->rx_flow) 1532 printk(KERN_INFO "Enable Rx Flow Control\n"); 1533 else 1534 printk(KERN_INFO "Disable Rx Flow Control\n"); 1535 1536 return 0; 1537} 1538 1539static int 1540mii_set_media (struct net_device *dev) 1541{ 1542 __u16 pscr; 1543 __u16 bmcr; 1544 __u16 bmsr; 1545 __u16 anar; 1546 int phy_addr; 1547 struct netdev_private *np; 1548 np = netdev_priv(dev); 1549 phy_addr = np->phy_addr; 1550 1551 /* Does user set speed? */ 1552 if (np->an_enable) { 1553 /* Advertise capabilities */ 1554 bmsr = mii_read (dev, phy_addr, MII_BMSR); 1555 anar = mii_read (dev, phy_addr, MII_ANAR) & 1556 ~MII_ANAR_100BX_FD & 1557 ~MII_ANAR_100BX_HD & 1558 ~MII_ANAR_100BT4 & 1559 ~MII_ANAR_10BT_FD & 1560 ~MII_ANAR_10BT_HD; 1561 if (bmsr & MII_BMSR_100BX_FD) 1562 anar |= MII_ANAR_100BX_FD; 1563 if (bmsr & MII_BMSR_100BX_HD) 1564 anar |= MII_ANAR_100BX_HD; 1565 if (bmsr & MII_BMSR_100BT4) 1566 anar |= MII_ANAR_100BT4; 1567 if (bmsr & MII_BMSR_10BT_FD) 1568 anar |= MII_ANAR_10BT_FD; 1569 if (bmsr & MII_BMSR_10BT_HD) 1570 anar |= MII_ANAR_10BT_HD; 1571 anar |= MII_ANAR_PAUSE | MII_ANAR_ASYMMETRIC; 1572 mii_write (dev, phy_addr, MII_ANAR, anar); 1573 1574 /* Enable Auto crossover */ 1575 pscr = mii_read (dev, phy_addr, MII_PHY_SCR); 1576 pscr |= 3 << 5; /* 11'b */ 1577 mii_write (dev, phy_addr, MII_PHY_SCR, pscr); 1578 1579 /* Soft reset PHY */ 1580 mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET); 1581 bmcr = MII_BMCR_AN_ENABLE | MII_BMCR_RESTART_AN | MII_BMCR_RESET; 1582 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1583 mdelay(1); 1584 } else { 1585 /* Force speed setting */ 1586 /* 1) Disable Auto crossover */ 1587 pscr = mii_read (dev, phy_addr, MII_PHY_SCR); 1588 pscr &= ~(3 << 5); 1589 mii_write (dev, phy_addr, MII_PHY_SCR, pscr); 1590 1591 /* 2) PHY Reset */ 1592 bmcr = mii_read (dev, phy_addr, MII_BMCR); 1593 bmcr |= MII_BMCR_RESET; 1594 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1595 1596 /* 3) Power Down */ 1597 bmcr = 0x1940; /* must be 0x1940 */ 1598 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1599 mdelay (100); /* wait a certain time */ 1600 1601 /* 4) Advertise nothing */ 1602 mii_write (dev, phy_addr, MII_ANAR, 0); 1603 1604 /* 5) Set media and Power Up */ 1605 bmcr = MII_BMCR_POWER_DOWN; 1606 if (np->speed == 100) { 1607 bmcr |= MII_BMCR_SPEED_100; 1608 printk (KERN_INFO "Manual 100 Mbps, "); 1609 } else if (np->speed == 10) { 1610 printk (KERN_INFO "Manual 10 Mbps, "); 1611 } 1612 if (np->full_duplex) { 1613 bmcr |= MII_BMCR_DUPLEX_MODE; 1614 printk (KERN_CONT "Full duplex\n"); 1615 } else { 1616 printk (KERN_CONT "Half duplex\n"); 1617 } 1618#if 0 1619 /* Set 1000BaseT Master/Slave setting */ 1620 mscr = mii_read (dev, phy_addr, MII_MSCR); 1621 mscr |= MII_MSCR_CFG_ENABLE; 1622 mscr &= ~MII_MSCR_CFG_VALUE = 0; 1623#endif 1624 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1625 mdelay(10); 1626 } 1627 return 0; 1628} 1629 1630static int 1631mii_get_media_pcs (struct net_device *dev) 1632{ 1633 __u16 negotiate; 1634 __u16 bmsr; 1635 int phy_addr; 1636 struct netdev_private *np; 1637 1638 np = netdev_priv(dev); 1639 phy_addr = np->phy_addr; 1640 1641 bmsr = mii_read (dev, phy_addr, PCS_BMSR); 1642 if (np->an_enable) { 1643 if (!(bmsr & MII_BMSR_AN_COMPLETE)) { 1644 /* Auto-Negotiation not completed */ 1645 return -1; 1646 } 1647 negotiate = mii_read (dev, phy_addr, PCS_ANAR) & 1648 mii_read (dev, phy_addr, PCS_ANLPAR); 1649 np->speed = 1000; 1650 if (negotiate & PCS_ANAR_FULL_DUPLEX) { 1651 printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); 1652 np->full_duplex = 1; 1653 } else { 1654 printk (KERN_INFO "Auto 1000 Mbps, half duplex\n"); 1655 np->full_duplex = 0; 1656 } 1657 if (negotiate & PCS_ANAR_PAUSE) { 1658 np->tx_flow &= 1; 1659 np->rx_flow &= 1; 1660 } else if (negotiate & PCS_ANAR_ASYMMETRIC) { 1661 np->tx_flow = 0; 1662 np->rx_flow &= 1; 1663 } 1664 /* else tx_flow, rx_flow = user select */ 1665 } else { 1666 __u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR); 1667 printk (KERN_INFO "Operating at 1000 Mbps, "); 1668 if (bmcr & MII_BMCR_DUPLEX_MODE) { 1669 printk (KERN_CONT "Full duplex\n"); 1670 } else { 1671 printk (KERN_CONT "Half duplex\n"); 1672 } 1673 } 1674 if (np->tx_flow) 1675 printk(KERN_INFO "Enable Tx Flow Control\n"); 1676 else 1677 printk(KERN_INFO "Disable Tx Flow Control\n"); 1678 if (np->rx_flow) 1679 printk(KERN_INFO "Enable Rx Flow Control\n"); 1680 else 1681 printk(KERN_INFO "Disable Rx Flow Control\n"); 1682 1683 return 0; 1684} 1685 1686static int 1687mii_set_media_pcs (struct net_device *dev) 1688{ 1689 __u16 bmcr; 1690 __u16 esr; 1691 __u16 anar; 1692 int phy_addr; 1693 struct netdev_private *np; 1694 np = netdev_priv(dev); 1695 phy_addr = np->phy_addr; 1696 1697 /* Auto-Negotiation? */ 1698 if (np->an_enable) { 1699 /* Advertise capabilities */ 1700 esr = mii_read (dev, phy_addr, PCS_ESR); 1701 anar = mii_read (dev, phy_addr, MII_ANAR) & 1702 ~PCS_ANAR_HALF_DUPLEX & 1703 ~PCS_ANAR_FULL_DUPLEX; 1704 if (esr & (MII_ESR_1000BT_HD | MII_ESR_1000BX_HD)) 1705 anar |= PCS_ANAR_HALF_DUPLEX; 1706 if (esr & (MII_ESR_1000BT_FD | MII_ESR_1000BX_FD)) 1707 anar |= PCS_ANAR_FULL_DUPLEX; 1708 anar |= PCS_ANAR_PAUSE | PCS_ANAR_ASYMMETRIC; 1709 mii_write (dev, phy_addr, MII_ANAR, anar); 1710 1711 /* Soft reset PHY */ 1712 mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET); 1713 bmcr = MII_BMCR_AN_ENABLE | MII_BMCR_RESTART_AN | 1714 MII_BMCR_RESET; 1715 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1716 mdelay(1); 1717 } else { 1718 /* Force speed setting */ 1719 /* PHY Reset */ 1720 bmcr = MII_BMCR_RESET; 1721 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1722 mdelay(10); 1723 if (np->full_duplex) { 1724 bmcr = MII_BMCR_DUPLEX_MODE; 1725 printk (KERN_INFO "Manual full duplex\n"); 1726 } else { 1727 bmcr = 0; 1728 printk (KERN_INFO "Manual half duplex\n"); 1729 } 1730 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1731 mdelay(10); 1732 1733 /* Advertise nothing */ 1734 mii_write (dev, phy_addr, MII_ANAR, 0); 1735 } 1736 return 0; 1737} 1738 1739 1740static int 1741rio_close (struct net_device *dev) 1742{ 1743 long ioaddr = dev->base_addr; 1744 struct netdev_private *np = netdev_priv(dev); 1745 struct sk_buff *skb; 1746 int i; 1747 1748 netif_stop_queue (dev); 1749 1750 /* Disable interrupts */ 1751 writew (0, ioaddr + IntEnable); 1752 1753 /* Stop Tx and Rx logics */ 1754 writel (TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl); 1755 1756 free_irq (dev->irq, dev); 1757 del_timer_sync (&np->timer); 1758 1759 /* Free all the skbuffs in the queue. */ 1760 for (i = 0; i < RX_RING_SIZE; i++) { 1761 np->rx_ring[i].status = 0; 1762 np->rx_ring[i].fraginfo = 0; 1763 skb = np->rx_skbuff[i]; 1764 if (skb) { 1765 pci_unmap_single(np->pdev, 1766 desc_to_dma(&np->rx_ring[i]), 1767 skb->len, PCI_DMA_FROMDEVICE); 1768 dev_kfree_skb (skb); 1769 np->rx_skbuff[i] = NULL; 1770 } 1771 } 1772 for (i = 0; i < TX_RING_SIZE; i++) { 1773 skb = np->tx_skbuff[i]; 1774 if (skb) { 1775 pci_unmap_single(np->pdev, 1776 desc_to_dma(&np->tx_ring[i]), 1777 skb->len, PCI_DMA_TODEVICE); 1778 dev_kfree_skb (skb); 1779 np->tx_skbuff[i] = NULL; 1780 } 1781 } 1782 1783 return 0; 1784} 1785 1786static void __devexit 1787rio_remove1 (struct pci_dev *pdev) 1788{ 1789 struct net_device *dev = pci_get_drvdata (pdev); 1790 1791 if (dev) { 1792 struct netdev_private *np = netdev_priv(dev); 1793 1794 unregister_netdev (dev); 1795 pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, 1796 np->rx_ring_dma); 1797 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, 1798 np->tx_ring_dma); 1799#ifdef MEM_MAPPING 1800 iounmap ((char *) (dev->base_addr)); 1801#endif 1802 free_netdev (dev); 1803 pci_release_regions (pdev); 1804 pci_disable_device (pdev); 1805 } 1806 pci_set_drvdata (pdev, NULL); 1807} 1808 1809static struct pci_driver rio_driver = { 1810 .name = "dl2k", 1811 .id_table = rio_pci_tbl, 1812 .probe = rio_probe1, 1813 .remove = __devexit_p(rio_remove1), 1814}; 1815 1816static int __init 1817rio_init (void) 1818{ 1819 return pci_register_driver(&rio_driver); 1820} 1821 1822static void __exit 1823rio_exit (void) 1824{ 1825 pci_unregister_driver (&rio_driver); 1826} 1827 1828module_init (rio_init); 1829module_exit (rio_exit); 1830 1831/* 1832 1833Compile command: 1834 1835gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c 1836 1837Read Documentation/networking/dl2k.txt for details. 1838 1839*/ 1840