Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.23-rc2 1834 lines 49 kB view raw
1/* D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */ 2/* 3 Copyright (c) 2001, 2002 by D-Link Corporation 4 Written by Edward Peng.<edward_peng@dlink.com.tw> 5 Created 03-May-2001, base on Linux' sundance.c. 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 2 of the License, or 10 (at your option) any later version. 11*/ 12 13#define DRV_NAME "D-Link DL2000-based linux driver" 14#define DRV_VERSION "v1.18" 15#define DRV_RELDATE "2006/06/27" 16#include "dl2k.h" 17#include <linux/dma-mapping.h> 18 19static char version[] __devinitdata = 20 KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n"; 21#define MAX_UNITS 8 22static int mtu[MAX_UNITS]; 23static int vlan[MAX_UNITS]; 24static int jumbo[MAX_UNITS]; 25static char *media[MAX_UNITS]; 26static int tx_flow=-1; 27static int rx_flow=-1; 28static int copy_thresh; 29static int rx_coalesce=10; /* Rx frame count each interrupt */ 30static int rx_timeout=200; /* Rx DMA wait time in 640ns increments */ 31static int tx_coalesce=16; /* HW xmit count each TxDMAComplete */ 32 33 34MODULE_AUTHOR ("Edward Peng"); 35MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter"); 36MODULE_LICENSE("GPL"); 37module_param_array(mtu, int, NULL, 0); 38module_param_array(media, charp, NULL, 0); 39module_param_array(vlan, int, NULL, 0); 40module_param_array(jumbo, int, NULL, 0); 41module_param(tx_flow, int, 0); 42module_param(rx_flow, int, 0); 43module_param(copy_thresh, int, 0); 44module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */ 45module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */ 46module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */ 47 48 49/* Enable the default interrupts */ 50#define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \ 51 UpdateStats | LinkEvent) 52#define EnableInt() \ 53writew(DEFAULT_INTR, ioaddr + IntEnable) 54 55static const int max_intrloop = 50; 56static const int multicast_filter_limit = 0x40; 57 58static int rio_open (struct net_device *dev); 59static void rio_timer (unsigned long data); 60static void rio_tx_timeout (struct net_device *dev); 61static void alloc_list (struct net_device *dev); 62static int start_xmit (struct sk_buff *skb, struct net_device *dev); 63static irqreturn_t rio_interrupt (int irq, void *dev_instance); 64static void rio_free_tx (struct net_device *dev, int irq); 65static void tx_error (struct net_device *dev, int tx_status); 66static int receive_packet (struct net_device *dev); 67static void rio_error (struct net_device *dev, int int_status); 68static int change_mtu (struct net_device *dev, int new_mtu); 69static void set_multicast (struct net_device *dev); 70static struct net_device_stats *get_stats (struct net_device *dev); 71static int clear_stats (struct net_device *dev); 72static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); 73static int rio_close (struct net_device *dev); 74static int find_miiphy (struct net_device *dev); 75static int parse_eeprom (struct net_device *dev); 76static int read_eeprom (long ioaddr, int eep_addr); 77static int mii_wait_link (struct net_device *dev, int wait); 78static int mii_set_media (struct net_device *dev); 79static int mii_get_media (struct net_device *dev); 80static int mii_set_media_pcs (struct net_device *dev); 81static int mii_get_media_pcs (struct net_device *dev); 82static int mii_read (struct net_device *dev, int phy_addr, int reg_num); 83static int mii_write (struct net_device *dev, int phy_addr, int reg_num, 84 u16 data); 85 86static const struct ethtool_ops ethtool_ops; 87 88static int __devinit 89rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) 90{ 91 struct net_device *dev; 92 struct netdev_private *np; 93 static int card_idx; 94 int chip_idx = ent->driver_data; 95 int err, irq; 96 long ioaddr; 97 static int version_printed; 98 void *ring_space; 99 dma_addr_t ring_dma; 100 101 if (!version_printed++) 102 printk ("%s", version); 103 104 err = pci_enable_device (pdev); 105 if (err) 106 return err; 107 108 irq = pdev->irq; 109 err = pci_request_regions (pdev, "dl2k"); 110 if (err) 111 goto err_out_disable; 112 113 pci_set_master (pdev); 114 dev = alloc_etherdev (sizeof (*np)); 115 if (!dev) { 116 err = -ENOMEM; 117 goto err_out_res; 118 } 119 SET_MODULE_OWNER (dev); 120 SET_NETDEV_DEV(dev, &pdev->dev); 121 122#ifdef MEM_MAPPING 123 ioaddr = pci_resource_start (pdev, 1); 124 ioaddr = (long) ioremap (ioaddr, RIO_IO_SIZE); 125 if (!ioaddr) { 126 err = -ENOMEM; 127 goto err_out_dev; 128 } 129#else 130 ioaddr = pci_resource_start (pdev, 0); 131#endif 132 dev->base_addr = ioaddr; 133 dev->irq = irq; 134 np = netdev_priv(dev); 135 np->chip_id = chip_idx; 136 np->pdev = pdev; 137 spin_lock_init (&np->tx_lock); 138 spin_lock_init (&np->rx_lock); 139 140 /* Parse manual configuration */ 141 np->an_enable = 1; 142 np->tx_coalesce = 1; 143 if (card_idx < MAX_UNITS) { 144 if (media[card_idx] != NULL) { 145 np->an_enable = 0; 146 if (strcmp (media[card_idx], "auto") == 0 || 147 strcmp (media[card_idx], "autosense") == 0 || 148 strcmp (media[card_idx], "0") == 0 ) { 149 np->an_enable = 2; 150 } else if (strcmp (media[card_idx], "100mbps_fd") == 0 || 151 strcmp (media[card_idx], "4") == 0) { 152 np->speed = 100; 153 np->full_duplex = 1; 154 } else if (strcmp (media[card_idx], "100mbps_hd") == 0 155 || strcmp (media[card_idx], "3") == 0) { 156 np->speed = 100; 157 np->full_duplex = 0; 158 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 || 159 strcmp (media[card_idx], "2") == 0) { 160 np->speed = 10; 161 np->full_duplex = 1; 162 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 || 163 strcmp (media[card_idx], "1") == 0) { 164 np->speed = 10; 165 np->full_duplex = 0; 166 } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 || 167 strcmp (media[card_idx], "6") == 0) { 168 np->speed=1000; 169 np->full_duplex=1; 170 } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 || 171 strcmp (media[card_idx], "5") == 0) { 172 np->speed = 1000; 173 np->full_duplex = 0; 174 } else { 175 np->an_enable = 1; 176 } 177 } 178 if (jumbo[card_idx] != 0) { 179 np->jumbo = 1; 180 dev->mtu = MAX_JUMBO; 181 } else { 182 np->jumbo = 0; 183 if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE) 184 dev->mtu = mtu[card_idx]; 185 } 186 np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ? 187 vlan[card_idx] : 0; 188 if (rx_coalesce > 0 && rx_timeout > 0) { 189 np->rx_coalesce = rx_coalesce; 190 np->rx_timeout = rx_timeout; 191 np->coalesce = 1; 192 } 193 np->tx_flow = (tx_flow == 0) ? 0 : 1; 194 np->rx_flow = (rx_flow == 0) ? 0 : 1; 195 196 if (tx_coalesce < 1) 197 tx_coalesce = 1; 198 else if (tx_coalesce > TX_RING_SIZE-1) 199 tx_coalesce = TX_RING_SIZE - 1; 200 } 201 dev->open = &rio_open; 202 dev->hard_start_xmit = &start_xmit; 203 dev->stop = &rio_close; 204 dev->get_stats = &get_stats; 205 dev->set_multicast_list = &set_multicast; 206 dev->do_ioctl = &rio_ioctl; 207 dev->tx_timeout = &rio_tx_timeout; 208 dev->watchdog_timeo = TX_TIMEOUT; 209 dev->change_mtu = &change_mtu; 210 SET_ETHTOOL_OPS(dev, &ethtool_ops); 211#if 0 212 dev->features = NETIF_F_IP_CSUM; 213#endif 214 pci_set_drvdata (pdev, dev); 215 216 ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma); 217 if (!ring_space) 218 goto err_out_iounmap; 219 np->tx_ring = (struct netdev_desc *) ring_space; 220 np->tx_ring_dma = ring_dma; 221 222 ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma); 223 if (!ring_space) 224 goto err_out_unmap_tx; 225 np->rx_ring = (struct netdev_desc *) ring_space; 226 np->rx_ring_dma = ring_dma; 227 228 /* Parse eeprom data */ 229 parse_eeprom (dev); 230 231 /* Find PHY address */ 232 err = find_miiphy (dev); 233 if (err) 234 goto err_out_unmap_rx; 235 236 /* Fiber device? */ 237 np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0; 238 np->link_status = 0; 239 /* Set media and reset PHY */ 240 if (np->phy_media) { 241 /* default Auto-Negotiation for fiber deivices */ 242 if (np->an_enable == 2) { 243 np->an_enable = 1; 244 } 245 mii_set_media_pcs (dev); 246 } else { 247 /* Auto-Negotiation is mandatory for 1000BASE-T, 248 IEEE 802.3ab Annex 28D page 14 */ 249 if (np->speed == 1000) 250 np->an_enable = 1; 251 mii_set_media (dev); 252 } 253 254 err = register_netdev (dev); 255 if (err) 256 goto err_out_unmap_rx; 257 258 card_idx++; 259 260 printk (KERN_INFO "%s: %s, %02x:%02x:%02x:%02x:%02x:%02x, IRQ %d\n", 261 dev->name, np->name, 262 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], 263 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5], irq); 264 if (tx_coalesce > 1) 265 printk(KERN_INFO "tx_coalesce:\t%d packets\n", 266 tx_coalesce); 267 if (np->coalesce) 268 printk(KERN_INFO "rx_coalesce:\t%d packets\n" 269 KERN_INFO "rx_timeout: \t%d ns\n", 270 np->rx_coalesce, np->rx_timeout*640); 271 if (np->vlan) 272 printk(KERN_INFO "vlan(id):\t%d\n", np->vlan); 273 return 0; 274 275 err_out_unmap_rx: 276 pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); 277 err_out_unmap_tx: 278 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); 279 err_out_iounmap: 280#ifdef MEM_MAPPING 281 iounmap ((void *) ioaddr); 282 283 err_out_dev: 284#endif 285 free_netdev (dev); 286 287 err_out_res: 288 pci_release_regions (pdev); 289 290 err_out_disable: 291 pci_disable_device (pdev); 292 return err; 293} 294 295int 296find_miiphy (struct net_device *dev) 297{ 298 int i, phy_found = 0; 299 struct netdev_private *np; 300 long ioaddr; 301 np = netdev_priv(dev); 302 ioaddr = dev->base_addr; 303 np->phy_addr = 1; 304 305 for (i = 31; i >= 0; i--) { 306 int mii_status = mii_read (dev, i, 1); 307 if (mii_status != 0xffff && mii_status != 0x0000) { 308 np->phy_addr = i; 309 phy_found++; 310 } 311 } 312 if (!phy_found) { 313 printk (KERN_ERR "%s: No MII PHY found!\n", dev->name); 314 return -ENODEV; 315 } 316 return 0; 317} 318 319int 320parse_eeprom (struct net_device *dev) 321{ 322 int i, j; 323 long ioaddr = dev->base_addr; 324 u8 sromdata[256]; 325 u8 *psib; 326 u32 crc; 327 PSROM_t psrom = (PSROM_t) sromdata; 328 struct netdev_private *np = netdev_priv(dev); 329 330 int cid, next; 331 332#ifdef MEM_MAPPING 333 ioaddr = pci_resource_start (np->pdev, 0); 334#endif 335 /* Read eeprom */ 336 for (i = 0; i < 128; i++) { 337 ((u16 *) sromdata)[i] = le16_to_cpu (read_eeprom (ioaddr, i)); 338 } 339#ifdef MEM_MAPPING 340 ioaddr = dev->base_addr; 341#endif 342 /* Check CRC */ 343 crc = ~ether_crc_le (256 - 4, sromdata); 344 if (psrom->crc != crc) { 345 printk (KERN_ERR "%s: EEPROM data CRC error.\n", dev->name); 346 return -1; 347 } 348 349 /* Set MAC address */ 350 for (i = 0; i < 6; i++) 351 dev->dev_addr[i] = psrom->mac_addr[i]; 352 353 /* Parse Software Information Block */ 354 i = 0x30; 355 psib = (u8 *) sromdata; 356 do { 357 cid = psib[i++]; 358 next = psib[i++]; 359 if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) { 360 printk (KERN_ERR "Cell data error\n"); 361 return -1; 362 } 363 switch (cid) { 364 case 0: /* Format version */ 365 break; 366 case 1: /* End of cell */ 367 return 0; 368 case 2: /* Duplex Polarity */ 369 np->duplex_polarity = psib[i]; 370 writeb (readb (ioaddr + PhyCtrl) | psib[i], 371 ioaddr + PhyCtrl); 372 break; 373 case 3: /* Wake Polarity */ 374 np->wake_polarity = psib[i]; 375 break; 376 case 9: /* Adapter description */ 377 j = (next - i > 255) ? 255 : next - i; 378 memcpy (np->name, &(psib[i]), j); 379 break; 380 case 4: 381 case 5: 382 case 6: 383 case 7: 384 case 8: /* Reversed */ 385 break; 386 default: /* Unknown cell */ 387 return -1; 388 } 389 i = next; 390 } while (1); 391 392 return 0; 393} 394 395static int 396rio_open (struct net_device *dev) 397{ 398 struct netdev_private *np = netdev_priv(dev); 399 long ioaddr = dev->base_addr; 400 int i; 401 u16 macctrl; 402 403 i = request_irq (dev->irq, &rio_interrupt, IRQF_SHARED, dev->name, dev); 404 if (i) 405 return i; 406 407 /* Reset all logic functions */ 408 writew (GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset, 409 ioaddr + ASICCtrl + 2); 410 mdelay(10); 411 412 /* DebugCtrl bit 4, 5, 9 must set */ 413 writel (readl (ioaddr + DebugCtrl) | 0x0230, ioaddr + DebugCtrl); 414 415 /* Jumbo frame */ 416 if (np->jumbo != 0) 417 writew (MAX_JUMBO+14, ioaddr + MaxFrameSize); 418 419 alloc_list (dev); 420 421 /* Get station address */ 422 for (i = 0; i < 6; i++) 423 writeb (dev->dev_addr[i], ioaddr + StationAddr0 + i); 424 425 set_multicast (dev); 426 if (np->coalesce) { 427 writel (np->rx_coalesce | np->rx_timeout << 16, 428 ioaddr + RxDMAIntCtrl); 429 } 430 /* Set RIO to poll every N*320nsec. */ 431 writeb (0x20, ioaddr + RxDMAPollPeriod); 432 writeb (0xff, ioaddr + TxDMAPollPeriod); 433 writeb (0x30, ioaddr + RxDMABurstThresh); 434 writeb (0x30, ioaddr + RxDMAUrgentThresh); 435 writel (0x0007ffff, ioaddr + RmonStatMask); 436 /* clear statistics */ 437 clear_stats (dev); 438 439 /* VLAN supported */ 440 if (np->vlan) { 441 /* priority field in RxDMAIntCtrl */ 442 writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10, 443 ioaddr + RxDMAIntCtrl); 444 /* VLANId */ 445 writew (np->vlan, ioaddr + VLANId); 446 /* Length/Type should be 0x8100 */ 447 writel (0x8100 << 16 | np->vlan, ioaddr + VLANTag); 448 /* Enable AutoVLANuntagging, but disable AutoVLANtagging. 449 VLAN information tagged by TFC' VID, CFI fields. */ 450 writel (readl (ioaddr + MACCtrl) | AutoVLANuntagging, 451 ioaddr + MACCtrl); 452 } 453 454 init_timer (&np->timer); 455 np->timer.expires = jiffies + 1*HZ; 456 np->timer.data = (unsigned long) dev; 457 np->timer.function = &rio_timer; 458 add_timer (&np->timer); 459 460 /* Start Tx/Rx */ 461 writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable, 462 ioaddr + MACCtrl); 463 464 macctrl = 0; 465 macctrl |= (np->vlan) ? AutoVLANuntagging : 0; 466 macctrl |= (np->full_duplex) ? DuplexSelect : 0; 467 macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; 468 macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; 469 writew(macctrl, ioaddr + MACCtrl); 470 471 netif_start_queue (dev); 472 473 /* Enable default interrupts */ 474 EnableInt (); 475 return 0; 476} 477 478static void 479rio_timer (unsigned long data) 480{ 481 struct net_device *dev = (struct net_device *)data; 482 struct netdev_private *np = netdev_priv(dev); 483 unsigned int entry; 484 int next_tick = 1*HZ; 485 unsigned long flags; 486 487 spin_lock_irqsave(&np->rx_lock, flags); 488 /* Recover rx ring exhausted error */ 489 if (np->cur_rx - np->old_rx >= RX_RING_SIZE) { 490 printk(KERN_INFO "Try to recover rx ring exhausted...\n"); 491 /* Re-allocate skbuffs to fill the descriptor ring */ 492 for (; np->cur_rx - np->old_rx > 0; np->old_rx++) { 493 struct sk_buff *skb; 494 entry = np->old_rx % RX_RING_SIZE; 495 /* Dropped packets don't need to re-allocate */ 496 if (np->rx_skbuff[entry] == NULL) { 497 skb = dev_alloc_skb (np->rx_buf_sz); 498 if (skb == NULL) { 499 np->rx_ring[entry].fraginfo = 0; 500 printk (KERN_INFO 501 "%s: Still unable to re-allocate Rx skbuff.#%d\n", 502 dev->name, entry); 503 break; 504 } 505 np->rx_skbuff[entry] = skb; 506 /* 16 byte align the IP header */ 507 skb_reserve (skb, 2); 508 np->rx_ring[entry].fraginfo = 509 cpu_to_le64 (pci_map_single 510 (np->pdev, skb->data, np->rx_buf_sz, 511 PCI_DMA_FROMDEVICE)); 512 } 513 np->rx_ring[entry].fraginfo |= 514 cpu_to_le64 (np->rx_buf_sz) << 48; 515 np->rx_ring[entry].status = 0; 516 } /* end for */ 517 } /* end if */ 518 spin_unlock_irqrestore (&np->rx_lock, flags); 519 np->timer.expires = jiffies + next_tick; 520 add_timer(&np->timer); 521} 522 523static void 524rio_tx_timeout (struct net_device *dev) 525{ 526 long ioaddr = dev->base_addr; 527 528 printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n", 529 dev->name, readl (ioaddr + TxStatus)); 530 rio_free_tx(dev, 0); 531 dev->if_port = 0; 532 dev->trans_start = jiffies; 533} 534 535 /* allocate and initialize Tx and Rx descriptors */ 536static void 537alloc_list (struct net_device *dev) 538{ 539 struct netdev_private *np = netdev_priv(dev); 540 int i; 541 542 np->cur_rx = np->cur_tx = 0; 543 np->old_rx = np->old_tx = 0; 544 np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32); 545 546 /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */ 547 for (i = 0; i < TX_RING_SIZE; i++) { 548 np->tx_skbuff[i] = NULL; 549 np->tx_ring[i].status = cpu_to_le64 (TFDDone); 550 np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma + 551 ((i+1)%TX_RING_SIZE) * 552 sizeof (struct netdev_desc)); 553 } 554 555 /* Initialize Rx descriptors */ 556 for (i = 0; i < RX_RING_SIZE; i++) { 557 np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma + 558 ((i + 1) % RX_RING_SIZE) * 559 sizeof (struct netdev_desc)); 560 np->rx_ring[i].status = 0; 561 np->rx_ring[i].fraginfo = 0; 562 np->rx_skbuff[i] = NULL; 563 } 564 565 /* Allocate the rx buffers */ 566 for (i = 0; i < RX_RING_SIZE; i++) { 567 /* Allocated fixed size of skbuff */ 568 struct sk_buff *skb = dev_alloc_skb (np->rx_buf_sz); 569 np->rx_skbuff[i] = skb; 570 if (skb == NULL) { 571 printk (KERN_ERR 572 "%s: alloc_list: allocate Rx buffer error! ", 573 dev->name); 574 break; 575 } 576 skb_reserve (skb, 2); /* 16 byte align the IP header. */ 577 /* Rubicon now supports 40 bits of addressing space. */ 578 np->rx_ring[i].fraginfo = 579 cpu_to_le64 ( pci_map_single ( 580 np->pdev, skb->data, np->rx_buf_sz, 581 PCI_DMA_FROMDEVICE)); 582 np->rx_ring[i].fraginfo |= cpu_to_le64 (np->rx_buf_sz) << 48; 583 } 584 585 /* Set RFDListPtr */ 586 writel (cpu_to_le32 (np->rx_ring_dma), dev->base_addr + RFDListPtr0); 587 writel (0, dev->base_addr + RFDListPtr1); 588 589 return; 590} 591 592static int 593start_xmit (struct sk_buff *skb, struct net_device *dev) 594{ 595 struct netdev_private *np = netdev_priv(dev); 596 struct netdev_desc *txdesc; 597 unsigned entry; 598 u32 ioaddr; 599 u64 tfc_vlan_tag = 0; 600 601 if (np->link_status == 0) { /* Link Down */ 602 dev_kfree_skb(skb); 603 return 0; 604 } 605 ioaddr = dev->base_addr; 606 entry = np->cur_tx % TX_RING_SIZE; 607 np->tx_skbuff[entry] = skb; 608 txdesc = &np->tx_ring[entry]; 609 610#if 0 611 if (skb->ip_summed == CHECKSUM_PARTIAL) { 612 txdesc->status |= 613 cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable | 614 IPChecksumEnable); 615 } 616#endif 617 if (np->vlan) { 618 tfc_vlan_tag = 619 cpu_to_le64 (VLANTagInsert) | 620 (cpu_to_le64 (np->vlan) << 32) | 621 (cpu_to_le64 (skb->priority) << 45); 622 } 623 txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, 624 skb->len, 625 PCI_DMA_TODEVICE)); 626 txdesc->fraginfo |= cpu_to_le64 (skb->len) << 48; 627 628 /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode 629 * Work around: Always use 1 descriptor in 10Mbps mode */ 630 if (entry % np->tx_coalesce == 0 || np->speed == 10) 631 txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | 632 WordAlignDisable | 633 TxDMAIndicate | 634 (1 << FragCountShift)); 635 else 636 txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | 637 WordAlignDisable | 638 (1 << FragCountShift)); 639 640 /* TxDMAPollNow */ 641 writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl); 642 /* Schedule ISR */ 643 writel(10000, ioaddr + CountDown); 644 np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE; 645 if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE 646 < TX_QUEUE_LEN - 1 && np->speed != 10) { 647 /* do nothing */ 648 } else if (!netif_queue_stopped(dev)) { 649 netif_stop_queue (dev); 650 } 651 652 /* The first TFDListPtr */ 653 if (readl (dev->base_addr + TFDListPtr0) == 0) { 654 writel (np->tx_ring_dma + entry * sizeof (struct netdev_desc), 655 dev->base_addr + TFDListPtr0); 656 writel (0, dev->base_addr + TFDListPtr1); 657 } 658 659 /* NETDEV WATCHDOG timer */ 660 dev->trans_start = jiffies; 661 return 0; 662} 663 664static irqreturn_t 665rio_interrupt (int irq, void *dev_instance) 666{ 667 struct net_device *dev = dev_instance; 668 struct netdev_private *np; 669 unsigned int_status; 670 long ioaddr; 671 int cnt = max_intrloop; 672 int handled = 0; 673 674 ioaddr = dev->base_addr; 675 np = netdev_priv(dev); 676 while (1) { 677 int_status = readw (ioaddr + IntStatus); 678 writew (int_status, ioaddr + IntStatus); 679 int_status &= DEFAULT_INTR; 680 if (int_status == 0 || --cnt < 0) 681 break; 682 handled = 1; 683 /* Processing received packets */ 684 if (int_status & RxDMAComplete) 685 receive_packet (dev); 686 /* TxDMAComplete interrupt */ 687 if ((int_status & (TxDMAComplete|IntRequested))) { 688 int tx_status; 689 tx_status = readl (ioaddr + TxStatus); 690 if (tx_status & 0x01) 691 tx_error (dev, tx_status); 692 /* Free used tx skbuffs */ 693 rio_free_tx (dev, 1); 694 } 695 696 /* Handle uncommon events */ 697 if (int_status & 698 (HostError | LinkEvent | UpdateStats)) 699 rio_error (dev, int_status); 700 } 701 if (np->cur_tx != np->old_tx) 702 writel (100, ioaddr + CountDown); 703 return IRQ_RETVAL(handled); 704} 705 706static void 707rio_free_tx (struct net_device *dev, int irq) 708{ 709 struct netdev_private *np = netdev_priv(dev); 710 int entry = np->old_tx % TX_RING_SIZE; 711 int tx_use = 0; 712 unsigned long flag = 0; 713 714 if (irq) 715 spin_lock(&np->tx_lock); 716 else 717 spin_lock_irqsave(&np->tx_lock, flag); 718 719 /* Free used tx skbuffs */ 720 while (entry != np->cur_tx) { 721 struct sk_buff *skb; 722 723 if (!(np->tx_ring[entry].status & TFDDone)) 724 break; 725 skb = np->tx_skbuff[entry]; 726 pci_unmap_single (np->pdev, 727 np->tx_ring[entry].fraginfo & DMA_48BIT_MASK, 728 skb->len, PCI_DMA_TODEVICE); 729 if (irq) 730 dev_kfree_skb_irq (skb); 731 else 732 dev_kfree_skb (skb); 733 734 np->tx_skbuff[entry] = NULL; 735 entry = (entry + 1) % TX_RING_SIZE; 736 tx_use++; 737 } 738 if (irq) 739 spin_unlock(&np->tx_lock); 740 else 741 spin_unlock_irqrestore(&np->tx_lock, flag); 742 np->old_tx = entry; 743 744 /* If the ring is no longer full, clear tx_full and 745 call netif_wake_queue() */ 746 747 if (netif_queue_stopped(dev) && 748 ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE 749 < TX_QUEUE_LEN - 1 || np->speed == 10)) { 750 netif_wake_queue (dev); 751 } 752} 753 754static void 755tx_error (struct net_device *dev, int tx_status) 756{ 757 struct netdev_private *np; 758 long ioaddr = dev->base_addr; 759 int frame_id; 760 int i; 761 762 np = netdev_priv(dev); 763 764 frame_id = (tx_status & 0xffff0000); 765 printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", 766 dev->name, tx_status, frame_id); 767 np->stats.tx_errors++; 768 /* Ttransmit Underrun */ 769 if (tx_status & 0x10) { 770 np->stats.tx_fifo_errors++; 771 writew (readw (ioaddr + TxStartThresh) + 0x10, 772 ioaddr + TxStartThresh); 773 /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */ 774 writew (TxReset | DMAReset | FIFOReset | NetworkReset, 775 ioaddr + ASICCtrl + 2); 776 /* Wait for ResetBusy bit clear */ 777 for (i = 50; i > 0; i--) { 778 if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0) 779 break; 780 mdelay (1); 781 } 782 rio_free_tx (dev, 1); 783 /* Reset TFDListPtr */ 784 writel (np->tx_ring_dma + 785 np->old_tx * sizeof (struct netdev_desc), 786 dev->base_addr + TFDListPtr0); 787 writel (0, dev->base_addr + TFDListPtr1); 788 789 /* Let TxStartThresh stay default value */ 790 } 791 /* Late Collision */ 792 if (tx_status & 0x04) { 793 np->stats.tx_fifo_errors++; 794 /* TxReset and clear FIFO */ 795 writew (TxReset | FIFOReset, ioaddr + ASICCtrl + 2); 796 /* Wait reset done */ 797 for (i = 50; i > 0; i--) { 798 if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0) 799 break; 800 mdelay (1); 801 } 802 /* Let TxStartThresh stay default value */ 803 } 804 /* Maximum Collisions */ 805#ifdef ETHER_STATS 806 if (tx_status & 0x08) 807 np->stats.collisions16++; 808#else 809 if (tx_status & 0x08) 810 np->stats.collisions++; 811#endif 812 /* Restart the Tx */ 813 writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl); 814} 815 816static int 817receive_packet (struct net_device *dev) 818{ 819 struct netdev_private *np = netdev_priv(dev); 820 int entry = np->cur_rx % RX_RING_SIZE; 821 int cnt = 30; 822 823 /* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */ 824 while (1) { 825 struct netdev_desc *desc = &np->rx_ring[entry]; 826 int pkt_len; 827 u64 frame_status; 828 829 if (!(desc->status & RFDDone) || 830 !(desc->status & FrameStart) || !(desc->status & FrameEnd)) 831 break; 832 833 /* Chip omits the CRC. */ 834 pkt_len = le64_to_cpu (desc->status & 0xffff); 835 frame_status = le64_to_cpu (desc->status); 836 if (--cnt < 0) 837 break; 838 /* Update rx error statistics, drop packet. */ 839 if (frame_status & RFS_Errors) { 840 np->stats.rx_errors++; 841 if (frame_status & (RxRuntFrame | RxLengthError)) 842 np->stats.rx_length_errors++; 843 if (frame_status & RxFCSError) 844 np->stats.rx_crc_errors++; 845 if (frame_status & RxAlignmentError && np->speed != 1000) 846 np->stats.rx_frame_errors++; 847 if (frame_status & RxFIFOOverrun) 848 np->stats.rx_fifo_errors++; 849 } else { 850 struct sk_buff *skb; 851 852 /* Small skbuffs for short packets */ 853 if (pkt_len > copy_thresh) { 854 pci_unmap_single (np->pdev, 855 desc->fraginfo & DMA_48BIT_MASK, 856 np->rx_buf_sz, 857 PCI_DMA_FROMDEVICE); 858 skb_put (skb = np->rx_skbuff[entry], pkt_len); 859 np->rx_skbuff[entry] = NULL; 860 } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) { 861 pci_dma_sync_single_for_cpu(np->pdev, 862 desc->fraginfo & 863 DMA_48BIT_MASK, 864 np->rx_buf_sz, 865 PCI_DMA_FROMDEVICE); 866 /* 16 byte align the IP header */ 867 skb_reserve (skb, 2); 868 skb_copy_to_linear_data (skb, 869 np->rx_skbuff[entry]->data, 870 pkt_len); 871 skb_put (skb, pkt_len); 872 pci_dma_sync_single_for_device(np->pdev, 873 desc->fraginfo & 874 DMA_48BIT_MASK, 875 np->rx_buf_sz, 876 PCI_DMA_FROMDEVICE); 877 } 878 skb->protocol = eth_type_trans (skb, dev); 879#if 0 880 /* Checksum done by hw, but csum value unavailable. */ 881 if (np->pdev->pci_rev_id >= 0x0c && 882 !(frame_status & (TCPError | UDPError | IPError))) { 883 skb->ip_summed = CHECKSUM_UNNECESSARY; 884 } 885#endif 886 netif_rx (skb); 887 dev->last_rx = jiffies; 888 } 889 entry = (entry + 1) % RX_RING_SIZE; 890 } 891 spin_lock(&np->rx_lock); 892 np->cur_rx = entry; 893 /* Re-allocate skbuffs to fill the descriptor ring */ 894 entry = np->old_rx; 895 while (entry != np->cur_rx) { 896 struct sk_buff *skb; 897 /* Dropped packets don't need to re-allocate */ 898 if (np->rx_skbuff[entry] == NULL) { 899 skb = dev_alloc_skb (np->rx_buf_sz); 900 if (skb == NULL) { 901 np->rx_ring[entry].fraginfo = 0; 902 printk (KERN_INFO 903 "%s: receive_packet: " 904 "Unable to re-allocate Rx skbuff.#%d\n", 905 dev->name, entry); 906 break; 907 } 908 np->rx_skbuff[entry] = skb; 909 /* 16 byte align the IP header */ 910 skb_reserve (skb, 2); 911 np->rx_ring[entry].fraginfo = 912 cpu_to_le64 (pci_map_single 913 (np->pdev, skb->data, np->rx_buf_sz, 914 PCI_DMA_FROMDEVICE)); 915 } 916 np->rx_ring[entry].fraginfo |= 917 cpu_to_le64 (np->rx_buf_sz) << 48; 918 np->rx_ring[entry].status = 0; 919 entry = (entry + 1) % RX_RING_SIZE; 920 } 921 np->old_rx = entry; 922 spin_unlock(&np->rx_lock); 923 return 0; 924} 925 926static void 927rio_error (struct net_device *dev, int int_status) 928{ 929 long ioaddr = dev->base_addr; 930 struct netdev_private *np = netdev_priv(dev); 931 u16 macctrl; 932 933 /* Link change event */ 934 if (int_status & LinkEvent) { 935 if (mii_wait_link (dev, 10) == 0) { 936 printk (KERN_INFO "%s: Link up\n", dev->name); 937 if (np->phy_media) 938 mii_get_media_pcs (dev); 939 else 940 mii_get_media (dev); 941 if (np->speed == 1000) 942 np->tx_coalesce = tx_coalesce; 943 else 944 np->tx_coalesce = 1; 945 macctrl = 0; 946 macctrl |= (np->vlan) ? AutoVLANuntagging : 0; 947 macctrl |= (np->full_duplex) ? DuplexSelect : 0; 948 macctrl |= (np->tx_flow) ? 949 TxFlowControlEnable : 0; 950 macctrl |= (np->rx_flow) ? 951 RxFlowControlEnable : 0; 952 writew(macctrl, ioaddr + MACCtrl); 953 np->link_status = 1; 954 netif_carrier_on(dev); 955 } else { 956 printk (KERN_INFO "%s: Link off\n", dev->name); 957 np->link_status = 0; 958 netif_carrier_off(dev); 959 } 960 } 961 962 /* UpdateStats statistics registers */ 963 if (int_status & UpdateStats) { 964 get_stats (dev); 965 } 966 967 /* PCI Error, a catastronphic error related to the bus interface 968 occurs, set GlobalReset and HostReset to reset. */ 969 if (int_status & HostError) { 970 printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n", 971 dev->name, int_status); 972 writew (GlobalReset | HostReset, ioaddr + ASICCtrl + 2); 973 mdelay (500); 974 } 975} 976 977static struct net_device_stats * 978get_stats (struct net_device *dev) 979{ 980 long ioaddr = dev->base_addr; 981 struct netdev_private *np = netdev_priv(dev); 982#ifdef MEM_MAPPING 983 int i; 984#endif 985 unsigned int stat_reg; 986 987 /* All statistics registers need to be acknowledged, 988 else statistic overflow could cause problems */ 989 990 np->stats.rx_packets += readl (ioaddr + FramesRcvOk); 991 np->stats.tx_packets += readl (ioaddr + FramesXmtOk); 992 np->stats.rx_bytes += readl (ioaddr + OctetRcvOk); 993 np->stats.tx_bytes += readl (ioaddr + OctetXmtOk); 994 995 np->stats.multicast = readl (ioaddr + McstFramesRcvdOk); 996 np->stats.collisions += readl (ioaddr + SingleColFrames) 997 + readl (ioaddr + MultiColFrames); 998 999 /* detailed tx errors */ 1000 stat_reg = readw (ioaddr + FramesAbortXSColls); 1001 np->stats.tx_aborted_errors += stat_reg; 1002 np->stats.tx_errors += stat_reg; 1003 1004 stat_reg = readw (ioaddr + CarrierSenseErrors); 1005 np->stats.tx_carrier_errors += stat_reg; 1006 np->stats.tx_errors += stat_reg; 1007 1008 /* Clear all other statistic register. */ 1009 readl (ioaddr + McstOctetXmtOk); 1010 readw (ioaddr + BcstFramesXmtdOk); 1011 readl (ioaddr + McstFramesXmtdOk); 1012 readw (ioaddr + BcstFramesRcvdOk); 1013 readw (ioaddr + MacControlFramesRcvd); 1014 readw (ioaddr + FrameTooLongErrors); 1015 readw (ioaddr + InRangeLengthErrors); 1016 readw (ioaddr + FramesCheckSeqErrors); 1017 readw (ioaddr + FramesLostRxErrors); 1018 readl (ioaddr + McstOctetXmtOk); 1019 readl (ioaddr + BcstOctetXmtOk); 1020 readl (ioaddr + McstFramesXmtdOk); 1021 readl (ioaddr + FramesWDeferredXmt); 1022 readl (ioaddr + LateCollisions); 1023 readw (ioaddr + BcstFramesXmtdOk); 1024 readw (ioaddr + MacControlFramesXmtd); 1025 readw (ioaddr + FramesWEXDeferal); 1026 1027#ifdef MEM_MAPPING 1028 for (i = 0x100; i <= 0x150; i += 4) 1029 readl (ioaddr + i); 1030#endif 1031 readw (ioaddr + TxJumboFrames); 1032 readw (ioaddr + RxJumboFrames); 1033 readw (ioaddr + TCPCheckSumErrors); 1034 readw (ioaddr + UDPCheckSumErrors); 1035 readw (ioaddr + IPCheckSumErrors); 1036 return &np->stats; 1037} 1038 1039static int 1040clear_stats (struct net_device *dev) 1041{ 1042 long ioaddr = dev->base_addr; 1043#ifdef MEM_MAPPING 1044 int i; 1045#endif 1046 1047 /* All statistics registers need to be acknowledged, 1048 else statistic overflow could cause problems */ 1049 readl (ioaddr + FramesRcvOk); 1050 readl (ioaddr + FramesXmtOk); 1051 readl (ioaddr + OctetRcvOk); 1052 readl (ioaddr + OctetXmtOk); 1053 1054 readl (ioaddr + McstFramesRcvdOk); 1055 readl (ioaddr + SingleColFrames); 1056 readl (ioaddr + MultiColFrames); 1057 readl (ioaddr + LateCollisions); 1058 /* detailed rx errors */ 1059 readw (ioaddr + FrameTooLongErrors); 1060 readw (ioaddr + InRangeLengthErrors); 1061 readw (ioaddr + FramesCheckSeqErrors); 1062 readw (ioaddr + FramesLostRxErrors); 1063 1064 /* detailed tx errors */ 1065 readw (ioaddr + FramesAbortXSColls); 1066 readw (ioaddr + CarrierSenseErrors); 1067 1068 /* Clear all other statistic register. */ 1069 readl (ioaddr + McstOctetXmtOk); 1070 readw (ioaddr + BcstFramesXmtdOk); 1071 readl (ioaddr + McstFramesXmtdOk); 1072 readw (ioaddr + BcstFramesRcvdOk); 1073 readw (ioaddr + MacControlFramesRcvd); 1074 readl (ioaddr + McstOctetXmtOk); 1075 readl (ioaddr + BcstOctetXmtOk); 1076 readl (ioaddr + McstFramesXmtdOk); 1077 readl (ioaddr + FramesWDeferredXmt); 1078 readw (ioaddr + BcstFramesXmtdOk); 1079 readw (ioaddr + MacControlFramesXmtd); 1080 readw (ioaddr + FramesWEXDeferal); 1081#ifdef MEM_MAPPING 1082 for (i = 0x100; i <= 0x150; i += 4) 1083 readl (ioaddr + i); 1084#endif 1085 readw (ioaddr + TxJumboFrames); 1086 readw (ioaddr + RxJumboFrames); 1087 readw (ioaddr + TCPCheckSumErrors); 1088 readw (ioaddr + UDPCheckSumErrors); 1089 readw (ioaddr + IPCheckSumErrors); 1090 return 0; 1091} 1092 1093 1094int 1095change_mtu (struct net_device *dev, int new_mtu) 1096{ 1097 struct netdev_private *np = netdev_priv(dev); 1098 int max = (np->jumbo) ? MAX_JUMBO : 1536; 1099 1100 if ((new_mtu < 68) || (new_mtu > max)) { 1101 return -EINVAL; 1102 } 1103 1104 dev->mtu = new_mtu; 1105 1106 return 0; 1107} 1108 1109static void 1110set_multicast (struct net_device *dev) 1111{ 1112 long ioaddr = dev->base_addr; 1113 u32 hash_table[2]; 1114 u16 rx_mode = 0; 1115 struct netdev_private *np = netdev_priv(dev); 1116 1117 hash_table[0] = hash_table[1] = 0; 1118 /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */ 1119 hash_table[1] |= cpu_to_le32(0x02000000); 1120 if (dev->flags & IFF_PROMISC) { 1121 /* Receive all frames promiscuously. */ 1122 rx_mode = ReceiveAllFrames; 1123 } else if ((dev->flags & IFF_ALLMULTI) || 1124 (dev->mc_count > multicast_filter_limit)) { 1125 /* Receive broadcast and multicast frames */ 1126 rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast; 1127 } else if (dev->mc_count > 0) { 1128 int i; 1129 struct dev_mc_list *mclist; 1130 /* Receive broadcast frames and multicast frames filtering 1131 by Hashtable */ 1132 rx_mode = 1133 ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast; 1134 for (i=0, mclist = dev->mc_list; mclist && i < dev->mc_count; 1135 i++, mclist=mclist->next) 1136 { 1137 int bit, index = 0; 1138 int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr); 1139 /* The inverted high significant 6 bits of CRC are 1140 used as an index to hashtable */ 1141 for (bit = 0; bit < 6; bit++) 1142 if (crc & (1 << (31 - bit))) 1143 index |= (1 << bit); 1144 hash_table[index / 32] |= (1 << (index % 32)); 1145 } 1146 } else { 1147 rx_mode = ReceiveBroadcast | ReceiveUnicast; 1148 } 1149 if (np->vlan) { 1150 /* ReceiveVLANMatch field in ReceiveMode */ 1151 rx_mode |= ReceiveVLANMatch; 1152 } 1153 1154 writel (hash_table[0], ioaddr + HashTable0); 1155 writel (hash_table[1], ioaddr + HashTable1); 1156 writew (rx_mode, ioaddr + ReceiveMode); 1157} 1158 1159static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1160{ 1161 struct netdev_private *np = netdev_priv(dev); 1162 strcpy(info->driver, "dl2k"); 1163 strcpy(info->version, DRV_VERSION); 1164 strcpy(info->bus_info, pci_name(np->pdev)); 1165} 1166 1167static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1168{ 1169 struct netdev_private *np = netdev_priv(dev); 1170 if (np->phy_media) { 1171 /* fiber device */ 1172 cmd->supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE; 1173 cmd->advertising= ADVERTISED_Autoneg | ADVERTISED_FIBRE; 1174 cmd->port = PORT_FIBRE; 1175 cmd->transceiver = XCVR_INTERNAL; 1176 } else { 1177 /* copper device */ 1178 cmd->supported = SUPPORTED_10baseT_Half | 1179 SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half 1180 | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | 1181 SUPPORTED_Autoneg | SUPPORTED_MII; 1182 cmd->advertising = ADVERTISED_10baseT_Half | 1183 ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | 1184 ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full| 1185 ADVERTISED_Autoneg | ADVERTISED_MII; 1186 cmd->port = PORT_MII; 1187 cmd->transceiver = XCVR_INTERNAL; 1188 } 1189 if ( np->link_status ) { 1190 cmd->speed = np->speed; 1191 cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 1192 } else { 1193 cmd->speed = -1; 1194 cmd->duplex = -1; 1195 } 1196 if ( np->an_enable) 1197 cmd->autoneg = AUTONEG_ENABLE; 1198 else 1199 cmd->autoneg = AUTONEG_DISABLE; 1200 1201 cmd->phy_address = np->phy_addr; 1202 return 0; 1203} 1204 1205static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1206{ 1207 struct netdev_private *np = netdev_priv(dev); 1208 netif_carrier_off(dev); 1209 if (cmd->autoneg == AUTONEG_ENABLE) { 1210 if (np->an_enable) 1211 return 0; 1212 else { 1213 np->an_enable = 1; 1214 mii_set_media(dev); 1215 return 0; 1216 } 1217 } else { 1218 np->an_enable = 0; 1219 if (np->speed == 1000) { 1220 cmd->speed = SPEED_100; 1221 cmd->duplex = DUPLEX_FULL; 1222 printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n"); 1223 } 1224 switch(cmd->speed + cmd->duplex) { 1225 1226 case SPEED_10 + DUPLEX_HALF: 1227 np->speed = 10; 1228 np->full_duplex = 0; 1229 break; 1230 1231 case SPEED_10 + DUPLEX_FULL: 1232 np->speed = 10; 1233 np->full_duplex = 1; 1234 break; 1235 case SPEED_100 + DUPLEX_HALF: 1236 np->speed = 100; 1237 np->full_duplex = 0; 1238 break; 1239 case SPEED_100 + DUPLEX_FULL: 1240 np->speed = 100; 1241 np->full_duplex = 1; 1242 break; 1243 case SPEED_1000 + DUPLEX_HALF:/* not supported */ 1244 case SPEED_1000 + DUPLEX_FULL:/* not supported */ 1245 default: 1246 return -EINVAL; 1247 } 1248 mii_set_media(dev); 1249 } 1250 return 0; 1251} 1252 1253static u32 rio_get_link(struct net_device *dev) 1254{ 1255 struct netdev_private *np = netdev_priv(dev); 1256 return np->link_status; 1257} 1258 1259static const struct ethtool_ops ethtool_ops = { 1260 .get_drvinfo = rio_get_drvinfo, 1261 .get_settings = rio_get_settings, 1262 .set_settings = rio_set_settings, 1263 .get_link = rio_get_link, 1264}; 1265 1266static int 1267rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) 1268{ 1269 int phy_addr; 1270 struct netdev_private *np = netdev_priv(dev); 1271 struct mii_data *miidata = (struct mii_data *) &rq->ifr_ifru; 1272 1273 struct netdev_desc *desc; 1274 int i; 1275 1276 phy_addr = np->phy_addr; 1277 switch (cmd) { 1278 case SIOCDEVPRIVATE: 1279 break; 1280 1281 case SIOCDEVPRIVATE + 1: 1282 miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num); 1283 break; 1284 case SIOCDEVPRIVATE + 2: 1285 mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value); 1286 break; 1287 case SIOCDEVPRIVATE + 3: 1288 break; 1289 case SIOCDEVPRIVATE + 4: 1290 break; 1291 case SIOCDEVPRIVATE + 5: 1292 netif_stop_queue (dev); 1293 break; 1294 case SIOCDEVPRIVATE + 6: 1295 netif_wake_queue (dev); 1296 break; 1297 case SIOCDEVPRIVATE + 7: 1298 printk 1299 ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n", 1300 netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx, 1301 np->old_rx); 1302 break; 1303 case SIOCDEVPRIVATE + 8: 1304 printk("TX ring:\n"); 1305 for (i = 0; i < TX_RING_SIZE; i++) { 1306 desc = &np->tx_ring[i]; 1307 printk 1308 ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x", 1309 i, 1310 (u32) (np->tx_ring_dma + i * sizeof (*desc)), 1311 (u32) desc->next_desc, 1312 (u32) desc->status, (u32) (desc->fraginfo >> 32), 1313 (u32) desc->fraginfo); 1314 printk ("\n"); 1315 } 1316 printk ("\n"); 1317 break; 1318 1319 default: 1320 return -EOPNOTSUPP; 1321 } 1322 return 0; 1323} 1324 1325#define EEP_READ 0x0200 1326#define EEP_BUSY 0x8000 1327/* Read the EEPROM word */ 1328/* We use I/O instruction to read/write eeprom to avoid fail on some machines */ 1329int 1330read_eeprom (long ioaddr, int eep_addr) 1331{ 1332 int i = 1000; 1333 outw (EEP_READ | (eep_addr & 0xff), ioaddr + EepromCtrl); 1334 while (i-- > 0) { 1335 if (!(inw (ioaddr + EepromCtrl) & EEP_BUSY)) { 1336 return inw (ioaddr + EepromData); 1337 } 1338 } 1339 return 0; 1340} 1341 1342enum phy_ctrl_bits { 1343 MII_READ = 0x00, MII_CLK = 0x01, MII_DATA1 = 0x02, MII_WRITE = 0x04, 1344 MII_DUPLEX = 0x08, 1345}; 1346 1347#define mii_delay() readb(ioaddr) 1348static void 1349mii_sendbit (struct net_device *dev, u32 data) 1350{ 1351 long ioaddr = dev->base_addr + PhyCtrl; 1352 data = (data) ? MII_DATA1 : 0; 1353 data |= MII_WRITE; 1354 data |= (readb (ioaddr) & 0xf8) | MII_WRITE; 1355 writeb (data, ioaddr); 1356 mii_delay (); 1357 writeb (data | MII_CLK, ioaddr); 1358 mii_delay (); 1359} 1360 1361static int 1362mii_getbit (struct net_device *dev) 1363{ 1364 long ioaddr = dev->base_addr + PhyCtrl; 1365 u8 data; 1366 1367 data = (readb (ioaddr) & 0xf8) | MII_READ; 1368 writeb (data, ioaddr); 1369 mii_delay (); 1370 writeb (data | MII_CLK, ioaddr); 1371 mii_delay (); 1372 return ((readb (ioaddr) >> 1) & 1); 1373} 1374 1375static void 1376mii_send_bits (struct net_device *dev, u32 data, int len) 1377{ 1378 int i; 1379 for (i = len - 1; i >= 0; i--) { 1380 mii_sendbit (dev, data & (1 << i)); 1381 } 1382} 1383 1384static int 1385mii_read (struct net_device *dev, int phy_addr, int reg_num) 1386{ 1387 u32 cmd; 1388 int i; 1389 u32 retval = 0; 1390 1391 /* Preamble */ 1392 mii_send_bits (dev, 0xffffffff, 32); 1393 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ 1394 /* ST,OP = 0110'b for read operation */ 1395 cmd = (0x06 << 10 | phy_addr << 5 | reg_num); 1396 mii_send_bits (dev, cmd, 14); 1397 /* Turnaround */ 1398 if (mii_getbit (dev)) 1399 goto err_out; 1400 /* Read data */ 1401 for (i = 0; i < 16; i++) { 1402 retval |= mii_getbit (dev); 1403 retval <<= 1; 1404 } 1405 /* End cycle */ 1406 mii_getbit (dev); 1407 return (retval >> 1) & 0xffff; 1408 1409 err_out: 1410 return 0; 1411} 1412static int 1413mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data) 1414{ 1415 u32 cmd; 1416 1417 /* Preamble */ 1418 mii_send_bits (dev, 0xffffffff, 32); 1419 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ 1420 /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */ 1421 cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data; 1422 mii_send_bits (dev, cmd, 32); 1423 /* End cycle */ 1424 mii_getbit (dev); 1425 return 0; 1426} 1427static int 1428mii_wait_link (struct net_device *dev, int wait) 1429{ 1430 BMSR_t bmsr; 1431 int phy_addr; 1432 struct netdev_private *np; 1433 1434 np = netdev_priv(dev); 1435 phy_addr = np->phy_addr; 1436 1437 do { 1438 bmsr.image = mii_read (dev, phy_addr, MII_BMSR); 1439 if (bmsr.bits.link_status) 1440 return 0; 1441 mdelay (1); 1442 } while (--wait > 0); 1443 return -1; 1444} 1445static int 1446mii_get_media (struct net_device *dev) 1447{ 1448 ANAR_t negotiate; 1449 BMSR_t bmsr; 1450 BMCR_t bmcr; 1451 MSCR_t mscr; 1452 MSSR_t mssr; 1453 int phy_addr; 1454 struct netdev_private *np; 1455 1456 np = netdev_priv(dev); 1457 phy_addr = np->phy_addr; 1458 1459 bmsr.image = mii_read (dev, phy_addr, MII_BMSR); 1460 if (np->an_enable) { 1461 if (!bmsr.bits.an_complete) { 1462 /* Auto-Negotiation not completed */ 1463 return -1; 1464 } 1465 negotiate.image = mii_read (dev, phy_addr, MII_ANAR) & 1466 mii_read (dev, phy_addr, MII_ANLPAR); 1467 mscr.image = mii_read (dev, phy_addr, MII_MSCR); 1468 mssr.image = mii_read (dev, phy_addr, MII_MSSR); 1469 if (mscr.bits.media_1000BT_FD & mssr.bits.lp_1000BT_FD) { 1470 np->speed = 1000; 1471 np->full_duplex = 1; 1472 printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); 1473 } else if (mscr.bits.media_1000BT_HD & mssr.bits.lp_1000BT_HD) { 1474 np->speed = 1000; 1475 np->full_duplex = 0; 1476 printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n"); 1477 } else if (negotiate.bits.media_100BX_FD) { 1478 np->speed = 100; 1479 np->full_duplex = 1; 1480 printk (KERN_INFO "Auto 100 Mbps, Full duplex\n"); 1481 } else if (negotiate.bits.media_100BX_HD) { 1482 np->speed = 100; 1483 np->full_duplex = 0; 1484 printk (KERN_INFO "Auto 100 Mbps, Half duplex\n"); 1485 } else if (negotiate.bits.media_10BT_FD) { 1486 np->speed = 10; 1487 np->full_duplex = 1; 1488 printk (KERN_INFO "Auto 10 Mbps, Full duplex\n"); 1489 } else if (negotiate.bits.media_10BT_HD) { 1490 np->speed = 10; 1491 np->full_duplex = 0; 1492 printk (KERN_INFO "Auto 10 Mbps, Half duplex\n"); 1493 } 1494 if (negotiate.bits.pause) { 1495 np->tx_flow &= 1; 1496 np->rx_flow &= 1; 1497 } else if (negotiate.bits.asymmetric) { 1498 np->tx_flow = 0; 1499 np->rx_flow &= 1; 1500 } 1501 /* else tx_flow, rx_flow = user select */ 1502 } else { 1503 bmcr.image = mii_read (dev, phy_addr, MII_BMCR); 1504 if (bmcr.bits.speed100 == 1 && bmcr.bits.speed1000 == 0) { 1505 printk (KERN_INFO "Operating at 100 Mbps, "); 1506 } else if (bmcr.bits.speed100 == 0 && bmcr.bits.speed1000 == 0) { 1507 printk (KERN_INFO "Operating at 10 Mbps, "); 1508 } else if (bmcr.bits.speed100 == 0 && bmcr.bits.speed1000 == 1) { 1509 printk (KERN_INFO "Operating at 1000 Mbps, "); 1510 } 1511 if (bmcr.bits.duplex_mode) { 1512 printk ("Full duplex\n"); 1513 } else { 1514 printk ("Half duplex\n"); 1515 } 1516 } 1517 if (np->tx_flow) 1518 printk(KERN_INFO "Enable Tx Flow Control\n"); 1519 else 1520 printk(KERN_INFO "Disable Tx Flow Control\n"); 1521 if (np->rx_flow) 1522 printk(KERN_INFO "Enable Rx Flow Control\n"); 1523 else 1524 printk(KERN_INFO "Disable Rx Flow Control\n"); 1525 1526 return 0; 1527} 1528 1529static int 1530mii_set_media (struct net_device *dev) 1531{ 1532 PHY_SCR_t pscr; 1533 BMCR_t bmcr; 1534 BMSR_t bmsr; 1535 ANAR_t anar; 1536 int phy_addr; 1537 struct netdev_private *np; 1538 np = netdev_priv(dev); 1539 phy_addr = np->phy_addr; 1540 1541 /* Does user set speed? */ 1542 if (np->an_enable) { 1543 /* Advertise capabilities */ 1544 bmsr.image = mii_read (dev, phy_addr, MII_BMSR); 1545 anar.image = mii_read (dev, phy_addr, MII_ANAR); 1546 anar.bits.media_100BX_FD = bmsr.bits.media_100BX_FD; 1547 anar.bits.media_100BX_HD = bmsr.bits.media_100BX_HD; 1548 anar.bits.media_100BT4 = bmsr.bits.media_100BT4; 1549 anar.bits.media_10BT_FD = bmsr.bits.media_10BT_FD; 1550 anar.bits.media_10BT_HD = bmsr.bits.media_10BT_HD; 1551 anar.bits.pause = 1; 1552 anar.bits.asymmetric = 1; 1553 mii_write (dev, phy_addr, MII_ANAR, anar.image); 1554 1555 /* Enable Auto crossover */ 1556 pscr.image = mii_read (dev, phy_addr, MII_PHY_SCR); 1557 pscr.bits.mdi_crossover_mode = 3; /* 11'b */ 1558 mii_write (dev, phy_addr, MII_PHY_SCR, pscr.image); 1559 1560 /* Soft reset PHY */ 1561 mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET); 1562 bmcr.image = 0; 1563 bmcr.bits.an_enable = 1; 1564 bmcr.bits.restart_an = 1; 1565 bmcr.bits.reset = 1; 1566 mii_write (dev, phy_addr, MII_BMCR, bmcr.image); 1567 mdelay(1); 1568 } else { 1569 /* Force speed setting */ 1570 /* 1) Disable Auto crossover */ 1571 pscr.image = mii_read (dev, phy_addr, MII_PHY_SCR); 1572 pscr.bits.mdi_crossover_mode = 0; 1573 mii_write (dev, phy_addr, MII_PHY_SCR, pscr.image); 1574 1575 /* 2) PHY Reset */ 1576 bmcr.image = mii_read (dev, phy_addr, MII_BMCR); 1577 bmcr.bits.reset = 1; 1578 mii_write (dev, phy_addr, MII_BMCR, bmcr.image); 1579 1580 /* 3) Power Down */ 1581 bmcr.image = 0x1940; /* must be 0x1940 */ 1582 mii_write (dev, phy_addr, MII_BMCR, bmcr.image); 1583 mdelay (100); /* wait a certain time */ 1584 1585 /* 4) Advertise nothing */ 1586 mii_write (dev, phy_addr, MII_ANAR, 0); 1587 1588 /* 5) Set media and Power Up */ 1589 bmcr.image = 0; 1590 bmcr.bits.power_down = 1; 1591 if (np->speed == 100) { 1592 bmcr.bits.speed100 = 1; 1593 bmcr.bits.speed1000 = 0; 1594 printk (KERN_INFO "Manual 100 Mbps, "); 1595 } else if (np->speed == 10) { 1596 bmcr.bits.speed100 = 0; 1597 bmcr.bits.speed1000 = 0; 1598 printk (KERN_INFO "Manual 10 Mbps, "); 1599 } 1600 if (np->full_duplex) { 1601 bmcr.bits.duplex_mode = 1; 1602 printk ("Full duplex\n"); 1603 } else { 1604 bmcr.bits.duplex_mode = 0; 1605 printk ("Half duplex\n"); 1606 } 1607#if 0 1608 /* Set 1000BaseT Master/Slave setting */ 1609 mscr.image = mii_read (dev, phy_addr, MII_MSCR); 1610 mscr.bits.cfg_enable = 1; 1611 mscr.bits.cfg_value = 0; 1612#endif 1613 mii_write (dev, phy_addr, MII_BMCR, bmcr.image); 1614 mdelay(10); 1615 } 1616 return 0; 1617} 1618 1619static int 1620mii_get_media_pcs (struct net_device *dev) 1621{ 1622 ANAR_PCS_t negotiate; 1623 BMSR_t bmsr; 1624 BMCR_t bmcr; 1625 int phy_addr; 1626 struct netdev_private *np; 1627 1628 np = netdev_priv(dev); 1629 phy_addr = np->phy_addr; 1630 1631 bmsr.image = mii_read (dev, phy_addr, PCS_BMSR); 1632 if (np->an_enable) { 1633 if (!bmsr.bits.an_complete) { 1634 /* Auto-Negotiation not completed */ 1635 return -1; 1636 } 1637 negotiate.image = mii_read (dev, phy_addr, PCS_ANAR) & 1638 mii_read (dev, phy_addr, PCS_ANLPAR); 1639 np->speed = 1000; 1640 if (negotiate.bits.full_duplex) { 1641 printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); 1642 np->full_duplex = 1; 1643 } else { 1644 printk (KERN_INFO "Auto 1000 Mbps, half duplex\n"); 1645 np->full_duplex = 0; 1646 } 1647 if (negotiate.bits.pause) { 1648 np->tx_flow &= 1; 1649 np->rx_flow &= 1; 1650 } else if (negotiate.bits.asymmetric) { 1651 np->tx_flow = 0; 1652 np->rx_flow &= 1; 1653 } 1654 /* else tx_flow, rx_flow = user select */ 1655 } else { 1656 bmcr.image = mii_read (dev, phy_addr, PCS_BMCR); 1657 printk (KERN_INFO "Operating at 1000 Mbps, "); 1658 if (bmcr.bits.duplex_mode) { 1659 printk ("Full duplex\n"); 1660 } else { 1661 printk ("Half duplex\n"); 1662 } 1663 } 1664 if (np->tx_flow) 1665 printk(KERN_INFO "Enable Tx Flow Control\n"); 1666 else 1667 printk(KERN_INFO "Disable Tx Flow Control\n"); 1668 if (np->rx_flow) 1669 printk(KERN_INFO "Enable Rx Flow Control\n"); 1670 else 1671 printk(KERN_INFO "Disable Rx Flow Control\n"); 1672 1673 return 0; 1674} 1675 1676static int 1677mii_set_media_pcs (struct net_device *dev) 1678{ 1679 BMCR_t bmcr; 1680 ESR_t esr; 1681 ANAR_PCS_t anar; 1682 int phy_addr; 1683 struct netdev_private *np; 1684 np = netdev_priv(dev); 1685 phy_addr = np->phy_addr; 1686 1687 /* Auto-Negotiation? */ 1688 if (np->an_enable) { 1689 /* Advertise capabilities */ 1690 esr.image = mii_read (dev, phy_addr, PCS_ESR); 1691 anar.image = mii_read (dev, phy_addr, MII_ANAR); 1692 anar.bits.half_duplex = 1693 esr.bits.media_1000BT_HD | esr.bits.media_1000BX_HD; 1694 anar.bits.full_duplex = 1695 esr.bits.media_1000BT_FD | esr.bits.media_1000BX_FD; 1696 anar.bits.pause = 1; 1697 anar.bits.asymmetric = 1; 1698 mii_write (dev, phy_addr, MII_ANAR, anar.image); 1699 1700 /* Soft reset PHY */ 1701 mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET); 1702 bmcr.image = 0; 1703 bmcr.bits.an_enable = 1; 1704 bmcr.bits.restart_an = 1; 1705 bmcr.bits.reset = 1; 1706 mii_write (dev, phy_addr, MII_BMCR, bmcr.image); 1707 mdelay(1); 1708 } else { 1709 /* Force speed setting */ 1710 /* PHY Reset */ 1711 bmcr.image = 0; 1712 bmcr.bits.reset = 1; 1713 mii_write (dev, phy_addr, MII_BMCR, bmcr.image); 1714 mdelay(10); 1715 bmcr.image = 0; 1716 bmcr.bits.an_enable = 0; 1717 if (np->full_duplex) { 1718 bmcr.bits.duplex_mode = 1; 1719 printk (KERN_INFO "Manual full duplex\n"); 1720 } else { 1721 bmcr.bits.duplex_mode = 0; 1722 printk (KERN_INFO "Manual half duplex\n"); 1723 } 1724 mii_write (dev, phy_addr, MII_BMCR, bmcr.image); 1725 mdelay(10); 1726 1727 /* Advertise nothing */ 1728 mii_write (dev, phy_addr, MII_ANAR, 0); 1729 } 1730 return 0; 1731} 1732 1733 1734static int 1735rio_close (struct net_device *dev) 1736{ 1737 long ioaddr = dev->base_addr; 1738 struct netdev_private *np = netdev_priv(dev); 1739 struct sk_buff *skb; 1740 int i; 1741 1742 netif_stop_queue (dev); 1743 1744 /* Disable interrupts */ 1745 writew (0, ioaddr + IntEnable); 1746 1747 /* Stop Tx and Rx logics */ 1748 writel (TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl); 1749 synchronize_irq (dev->irq); 1750 free_irq (dev->irq, dev); 1751 del_timer_sync (&np->timer); 1752 1753 /* Free all the skbuffs in the queue. */ 1754 for (i = 0; i < RX_RING_SIZE; i++) { 1755 np->rx_ring[i].status = 0; 1756 np->rx_ring[i].fraginfo = 0; 1757 skb = np->rx_skbuff[i]; 1758 if (skb) { 1759 pci_unmap_single(np->pdev, 1760 np->rx_ring[i].fraginfo & DMA_48BIT_MASK, 1761 skb->len, PCI_DMA_FROMDEVICE); 1762 dev_kfree_skb (skb); 1763 np->rx_skbuff[i] = NULL; 1764 } 1765 } 1766 for (i = 0; i < TX_RING_SIZE; i++) { 1767 skb = np->tx_skbuff[i]; 1768 if (skb) { 1769 pci_unmap_single(np->pdev, 1770 np->tx_ring[i].fraginfo & DMA_48BIT_MASK, 1771 skb->len, PCI_DMA_TODEVICE); 1772 dev_kfree_skb (skb); 1773 np->tx_skbuff[i] = NULL; 1774 } 1775 } 1776 1777 return 0; 1778} 1779 1780static void __devexit 1781rio_remove1 (struct pci_dev *pdev) 1782{ 1783 struct net_device *dev = pci_get_drvdata (pdev); 1784 1785 if (dev) { 1786 struct netdev_private *np = netdev_priv(dev); 1787 1788 unregister_netdev (dev); 1789 pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, 1790 np->rx_ring_dma); 1791 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, 1792 np->tx_ring_dma); 1793#ifdef MEM_MAPPING 1794 iounmap ((char *) (dev->base_addr)); 1795#endif 1796 free_netdev (dev); 1797 pci_release_regions (pdev); 1798 pci_disable_device (pdev); 1799 } 1800 pci_set_drvdata (pdev, NULL); 1801} 1802 1803static struct pci_driver rio_driver = { 1804 .name = "dl2k", 1805 .id_table = rio_pci_tbl, 1806 .probe = rio_probe1, 1807 .remove = __devexit_p(rio_remove1), 1808}; 1809 1810static int __init 1811rio_init (void) 1812{ 1813 return pci_register_driver(&rio_driver); 1814} 1815 1816static void __exit 1817rio_exit (void) 1818{ 1819 pci_unregister_driver (&rio_driver); 1820} 1821 1822module_init (rio_init); 1823module_exit (rio_exit); 1824 1825/* 1826 1827Compile command: 1828 1829gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c 1830 1831Read Documentation/networking/dl2k.txt for details. 1832 1833*/ 1834