Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.27 3229 lines 88 kB view raw
1/* 2 * acenic.c: Linux driver for the Alteon AceNIC Gigabit Ethernet card 3 * and other Tigon based cards. 4 * 5 * Copyright 1998-2002 by Jes Sorensen, <jes@trained-monkey.org>. 6 * 7 * Thanks to Alteon and 3Com for providing hardware and documentation 8 * enabling me to write this driver. 9 * 10 * A mailing list for discussing the use of this driver has been 11 * setup, please subscribe to the lists if you have any questions 12 * about the driver. Send mail to linux-acenic-help@sunsite.auc.dk to 13 * see how to subscribe. 14 * 15 * This program is free software; you can redistribute it and/or modify 16 * it under the terms of the GNU General Public License as published by 17 * the Free Software Foundation; either version 2 of the License, or 18 * (at your option) any later version. 19 * 20 * Additional credits: 21 * Pete Wyckoff <wyckoff@ca.sandia.gov>: Initial Linux/Alpha and trace 22 * dump support. The trace dump support has not been 23 * integrated yet however. 24 * Troy Benjegerdes: Big Endian (PPC) patches. 25 * Nate Stahl: Better out of memory handling and stats support. 26 * Aman Singla: Nasty race between interrupt handler and tx code dealing 27 * with 'testing the tx_ret_csm and setting tx_full' 28 * David S. Miller <davem@redhat.com>: conversion to new PCI dma mapping 29 * infrastructure and Sparc support 30 * Pierrick Pinasseau (CERN): For lending me an Ultra 5 to test the 31 * driver under Linux/Sparc64 32 * Matt Domsch <Matt_Domsch@dell.com>: Detect Alteon 1000baseT cards 33 * ETHTOOL_GDRVINFO support 34 * Chip Salzenberg <chip@valinux.com>: Fix race condition between tx 35 * handler and close() cleanup. 36 * Ken Aaker <kdaaker@rchland.vnet.ibm.com>: Correct check for whether 37 * memory mapped IO is enabled to 38 * make the driver work on RS/6000. 39 * Takayoshi Kouchi <kouchi@hpc.bs1.fc.nec.co.jp>: Identifying problem 40 * where the driver would disable 41 * bus master mode if it had to disable 42 * write and invalidate. 43 * Stephen Hack <stephen_hack@hp.com>: Fixed ace_set_mac_addr for little 44 * endian systems. 45 * Val Henson <vhenson@esscom.com>: Reset Jumbo skb producer and 46 * rx producer index when 47 * flushing the Jumbo ring. 48 * Hans Grobler <grobh@sun.ac.za>: Memory leak fixes in the 49 * driver init path. 50 * Grant Grundler <grundler@cup.hp.com>: PCI write posting fixes. 51 */ 52 53#include <linux/module.h> 54#include <linux/moduleparam.h> 55#include <linux/types.h> 56#include <linux/errno.h> 57#include <linux/ioport.h> 58#include <linux/pci.h> 59#include <linux/dma-mapping.h> 60#include <linux/kernel.h> 61#include <linux/netdevice.h> 62#include <linux/etherdevice.h> 63#include <linux/skbuff.h> 64#include <linux/init.h> 65#include <linux/delay.h> 66#include <linux/mm.h> 67#include <linux/highmem.h> 68#include <linux/sockios.h> 69 70#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 71#include <linux/if_vlan.h> 72#endif 73 74#ifdef SIOCETHTOOL 75#include <linux/ethtool.h> 76#endif 77 78#include <net/sock.h> 79#include <net/ip.h> 80 81#include <asm/system.h> 82#include <asm/io.h> 83#include <asm/irq.h> 84#include <asm/byteorder.h> 85#include <asm/uaccess.h> 86 87 88#define DRV_NAME "acenic" 89 90#undef INDEX_DEBUG 91 92#ifdef CONFIG_ACENIC_OMIT_TIGON_I 93#define ACE_IS_TIGON_I(ap) 0 94#define ACE_TX_RING_ENTRIES(ap) MAX_TX_RING_ENTRIES 95#else 96#define ACE_IS_TIGON_I(ap) (ap->version == 1) 97#define ACE_TX_RING_ENTRIES(ap) ap->tx_ring_entries 98#endif 99 100#ifndef PCI_VENDOR_ID_ALTEON 101#define PCI_VENDOR_ID_ALTEON 0x12ae 102#endif 103#ifndef PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE 104#define PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE 0x0001 105#define PCI_DEVICE_ID_ALTEON_ACENIC_COPPER 0x0002 106#endif 107#ifndef PCI_DEVICE_ID_3COM_3C985 108#define PCI_DEVICE_ID_3COM_3C985 0x0001 109#endif 110#ifndef PCI_VENDOR_ID_NETGEAR 111#define PCI_VENDOR_ID_NETGEAR 0x1385 112#define PCI_DEVICE_ID_NETGEAR_GA620 0x620a 113#endif 114#ifndef PCI_DEVICE_ID_NETGEAR_GA620T 115#define PCI_DEVICE_ID_NETGEAR_GA620T 0x630a 116#endif 117 118 119/* 120 * Farallon used the DEC vendor ID by mistake and they seem not 121 * to care - stinky! 122 */ 123#ifndef PCI_DEVICE_ID_FARALLON_PN9000SX 124#define PCI_DEVICE_ID_FARALLON_PN9000SX 0x1a 125#endif 126#ifndef PCI_DEVICE_ID_FARALLON_PN9100T 127#define PCI_DEVICE_ID_FARALLON_PN9100T 0xfa 128#endif 129#ifndef PCI_VENDOR_ID_SGI 130#define PCI_VENDOR_ID_SGI 0x10a9 131#endif 132#ifndef PCI_DEVICE_ID_SGI_ACENIC 133#define PCI_DEVICE_ID_SGI_ACENIC 0x0009 134#endif 135 136static struct pci_device_id acenic_pci_tbl[] = { 137 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE, 138 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, 139 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER, 140 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, 141 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C985, 142 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, 143 { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620, 144 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, 145 { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620T, 146 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, 147 /* 148 * Farallon used the DEC vendor ID on their cards incorrectly, 149 * then later Alteon's ID. 150 */ 151 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_FARALLON_PN9000SX, 152 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, 153 { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_FARALLON_PN9100T, 154 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, 155 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_ACENIC, 156 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, 157 { } 158}; 159MODULE_DEVICE_TABLE(pci, acenic_pci_tbl); 160 161#define ace_sync_irq(irq) synchronize_irq(irq) 162 163#ifndef offset_in_page 164#define offset_in_page(ptr) ((unsigned long)(ptr) & ~PAGE_MASK) 165#endif 166 167#define ACE_MAX_MOD_PARMS 8 168#define BOARD_IDX_STATIC 0 169#define BOARD_IDX_OVERFLOW -1 170 171#if (defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)) && \ 172 defined(NETIF_F_HW_VLAN_RX) 173#define ACENIC_DO_VLAN 1 174#define ACE_RCB_VLAN_FLAG RCB_FLG_VLAN_ASSIST 175#else 176#define ACENIC_DO_VLAN 0 177#define ACE_RCB_VLAN_FLAG 0 178#endif 179 180#include "acenic.h" 181 182/* 183 * These must be defined before the firmware is included. 184 */ 185#define MAX_TEXT_LEN 96*1024 186#define MAX_RODATA_LEN 8*1024 187#define MAX_DATA_LEN 2*1024 188 189#include "acenic_firmware.h" 190 191#ifndef tigon2FwReleaseLocal 192#define tigon2FwReleaseLocal 0 193#endif 194 195/* 196 * This driver currently supports Tigon I and Tigon II based cards 197 * including the Alteon AceNIC, the 3Com 3C985[B] and NetGear 198 * GA620. The driver should also work on the SGI, DEC and Farallon 199 * versions of the card, however I have not been able to test that 200 * myself. 201 * 202 * This card is really neat, it supports receive hardware checksumming 203 * and jumbo frames (up to 9000 bytes) and does a lot of work in the 204 * firmware. Also the programming interface is quite neat, except for 205 * the parts dealing with the i2c eeprom on the card ;-) 206 * 207 * Using jumbo frames: 208 * 209 * To enable jumbo frames, simply specify an mtu between 1500 and 9000 210 * bytes to ifconfig. Jumbo frames can be enabled or disabled at any time 211 * by running `ifconfig eth<X> mtu <MTU>' with <X> being the Ethernet 212 * interface number and <MTU> being the MTU value. 213 * 214 * Module parameters: 215 * 216 * When compiled as a loadable module, the driver allows for a number 217 * of module parameters to be specified. The driver supports the 218 * following module parameters: 219 * 220 * trace=<val> - Firmware trace level. This requires special traced 221 * firmware to replace the firmware supplied with 222 * the driver - for debugging purposes only. 223 * 224 * link=<val> - Link state. Normally you want to use the default link 225 * parameters set by the driver. This can be used to 226 * override these in case your switch doesn't negotiate 227 * the link properly. Valid values are: 228 * 0x0001 - Force half duplex link. 229 * 0x0002 - Do not negotiate line speed with the other end. 230 * 0x0010 - 10Mbit/sec link. 231 * 0x0020 - 100Mbit/sec link. 232 * 0x0040 - 1000Mbit/sec link. 233 * 0x0100 - Do not negotiate flow control. 234 * 0x0200 - Enable RX flow control Y 235 * 0x0400 - Enable TX flow control Y (Tigon II NICs only). 236 * Default value is 0x0270, ie. enable link+flow 237 * control negotiation. Negotiating the highest 238 * possible link speed with RX flow control enabled. 239 * 240 * When disabling link speed negotiation, only one link 241 * speed is allowed to be specified! 242 * 243 * tx_coal_tick=<val> - number of coalescing clock ticks (us) allowed 244 * to wait for more packets to arive before 245 * interrupting the host, from the time the first 246 * packet arrives. 247 * 248 * rx_coal_tick=<val> - number of coalescing clock ticks (us) allowed 249 * to wait for more packets to arive in the transmit ring, 250 * before interrupting the host, after transmitting the 251 * first packet in the ring. 252 * 253 * max_tx_desc=<val> - maximum number of transmit descriptors 254 * (packets) transmitted before interrupting the host. 255 * 256 * max_rx_desc=<val> - maximum number of receive descriptors 257 * (packets) received before interrupting the host. 258 * 259 * tx_ratio=<val> - 7 bit value (0 - 63) specifying the split in 64th 260 * increments of the NIC's on board memory to be used for 261 * transmit and receive buffers. For the 1MB NIC app. 800KB 262 * is available, on the 1/2MB NIC app. 300KB is available. 263 * 68KB will always be available as a minimum for both 264 * directions. The default value is a 50/50 split. 265 * dis_pci_mem_inval=<val> - disable PCI memory write and invalidate 266 * operations, default (1) is to always disable this as 267 * that is what Alteon does on NT. I have not been able 268 * to measure any real performance differences with 269 * this on my systems. Set <val>=0 if you want to 270 * enable these operations. 271 * 272 * If you use more than one NIC, specify the parameters for the 273 * individual NICs with a comma, ie. trace=0,0x00001fff,0 you want to 274 * run tracing on NIC #2 but not on NIC #1 and #3. 275 * 276 * TODO: 277 * 278 * - Proper multicast support. 279 * - NIC dump support. 280 * - More tuning parameters. 281 * 282 * The mini ring is not used under Linux and I am not sure it makes sense 283 * to actually use it. 284 * 285 * New interrupt handler strategy: 286 * 287 * The old interrupt handler worked using the traditional method of 288 * replacing an skbuff with a new one when a packet arrives. However 289 * the rx rings do not need to contain a static number of buffer 290 * descriptors, thus it makes sense to move the memory allocation out 291 * of the main interrupt handler and do it in a bottom half handler 292 * and only allocate new buffers when the number of buffers in the 293 * ring is below a certain threshold. In order to avoid starving the 294 * NIC under heavy load it is however necessary to force allocation 295 * when hitting a minimum threshold. The strategy for alloction is as 296 * follows: 297 * 298 * RX_LOW_BUF_THRES - allocate buffers in the bottom half 299 * RX_PANIC_LOW_THRES - we are very low on buffers, allocate 300 * the buffers in the interrupt handler 301 * RX_RING_THRES - maximum number of buffers in the rx ring 302 * RX_MINI_THRES - maximum number of buffers in the mini ring 303 * RX_JUMBO_THRES - maximum number of buffers in the jumbo ring 304 * 305 * One advantagous side effect of this allocation approach is that the 306 * entire rx processing can be done without holding any spin lock 307 * since the rx rings and registers are totally independent of the tx 308 * ring and its registers. This of course includes the kmalloc's of 309 * new skb's. Thus start_xmit can run in parallel with rx processing 310 * and the memory allocation on SMP systems. 311 * 312 * Note that running the skb reallocation in a bottom half opens up 313 * another can of races which needs to be handled properly. In 314 * particular it can happen that the interrupt handler tries to run 315 * the reallocation while the bottom half is either running on another 316 * CPU or was interrupted on the same CPU. To get around this the 317 * driver uses bitops to prevent the reallocation routines from being 318 * reentered. 319 * 320 * TX handling can also be done without holding any spin lock, wheee 321 * this is fun! since tx_ret_csm is only written to by the interrupt 322 * handler. The case to be aware of is when shutting down the device 323 * and cleaning up where it is necessary to make sure that 324 * start_xmit() is not running while this is happening. Well DaveM 325 * informs me that this case is already protected against ... bye bye 326 * Mr. Spin Lock, it was nice to know you. 327 * 328 * TX interrupts are now partly disabled so the NIC will only generate 329 * TX interrupts for the number of coal ticks, not for the number of 330 * TX packets in the queue. This should reduce the number of TX only, 331 * ie. when no RX processing is done, interrupts seen. 332 */ 333 334/* 335 * Threshold values for RX buffer allocation - the low water marks for 336 * when to start refilling the rings are set to 75% of the ring 337 * sizes. It seems to make sense to refill the rings entirely from the 338 * intrrupt handler once it gets below the panic threshold, that way 339 * we don't risk that the refilling is moved to another CPU when the 340 * one running the interrupt handler just got the slab code hot in its 341 * cache. 342 */ 343#define RX_RING_SIZE 72 344#define RX_MINI_SIZE 64 345#define RX_JUMBO_SIZE 48 346 347#define RX_PANIC_STD_THRES 16 348#define RX_PANIC_STD_REFILL (3*RX_PANIC_STD_THRES)/2 349#define RX_LOW_STD_THRES (3*RX_RING_SIZE)/4 350#define RX_PANIC_MINI_THRES 12 351#define RX_PANIC_MINI_REFILL (3*RX_PANIC_MINI_THRES)/2 352#define RX_LOW_MINI_THRES (3*RX_MINI_SIZE)/4 353#define RX_PANIC_JUMBO_THRES 6 354#define RX_PANIC_JUMBO_REFILL (3*RX_PANIC_JUMBO_THRES)/2 355#define RX_LOW_JUMBO_THRES (3*RX_JUMBO_SIZE)/4 356 357 358/* 359 * Size of the mini ring entries, basically these just should be big 360 * enough to take TCP ACKs 361 */ 362#define ACE_MINI_SIZE 100 363 364#define ACE_MINI_BUFSIZE ACE_MINI_SIZE 365#define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 4) 366#define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 4) 367 368/* 369 * There seems to be a magic difference in the effect between 995 and 996 370 * but little difference between 900 and 995 ... no idea why. 371 * 372 * There is now a default set of tuning parameters which is set, depending 373 * on whether or not the user enables Jumbo frames. It's assumed that if 374 * Jumbo frames are enabled, the user wants optimal tuning for that case. 375 */ 376#define DEF_TX_COAL 400 /* 996 */ 377#define DEF_TX_MAX_DESC 60 /* was 40 */ 378#define DEF_RX_COAL 120 /* 1000 */ 379#define DEF_RX_MAX_DESC 25 380#define DEF_TX_RATIO 21 /* 24 */ 381 382#define DEF_JUMBO_TX_COAL 20 383#define DEF_JUMBO_TX_MAX_DESC 60 384#define DEF_JUMBO_RX_COAL 30 385#define DEF_JUMBO_RX_MAX_DESC 6 386#define DEF_JUMBO_TX_RATIO 21 387 388#if tigon2FwReleaseLocal < 20001118 389/* 390 * Standard firmware and early modifications duplicate 391 * IRQ load without this flag (coal timer is never reset). 392 * Note that with this flag tx_coal should be less than 393 * time to xmit full tx ring. 394 * 400usec is not so bad for tx ring size of 128. 395 */ 396#define TX_COAL_INTS_ONLY 1 /* worth it */ 397#else 398/* 399 * With modified firmware, this is not necessary, but still useful. 400 */ 401#define TX_COAL_INTS_ONLY 1 402#endif 403 404#define DEF_TRACE 0 405#define DEF_STAT (2 * TICKS_PER_SEC) 406 407 408static int link_state[ACE_MAX_MOD_PARMS]; 409static int trace[ACE_MAX_MOD_PARMS]; 410static int tx_coal_tick[ACE_MAX_MOD_PARMS]; 411static int rx_coal_tick[ACE_MAX_MOD_PARMS]; 412static int max_tx_desc[ACE_MAX_MOD_PARMS]; 413static int max_rx_desc[ACE_MAX_MOD_PARMS]; 414static int tx_ratio[ACE_MAX_MOD_PARMS]; 415static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1}; 416 417MODULE_AUTHOR("Jes Sorensen <jes@trained-monkey.org>"); 418MODULE_LICENSE("GPL"); 419MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver"); 420 421module_param_array_named(link, link_state, int, NULL, 0); 422module_param_array(trace, int, NULL, 0); 423module_param_array(tx_coal_tick, int, NULL, 0); 424module_param_array(max_tx_desc, int, NULL, 0); 425module_param_array(rx_coal_tick, int, NULL, 0); 426module_param_array(max_rx_desc, int, NULL, 0); 427module_param_array(tx_ratio, int, NULL, 0); 428MODULE_PARM_DESC(link, "AceNIC/3C985/NetGear link state"); 429MODULE_PARM_DESC(trace, "AceNIC/3C985/NetGear firmware trace level"); 430MODULE_PARM_DESC(tx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives"); 431MODULE_PARM_DESC(max_tx_desc, "AceNIC/3C985/GA620 max number of transmit descriptors to wait"); 432MODULE_PARM_DESC(rx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives"); 433MODULE_PARM_DESC(max_rx_desc, "AceNIC/3C985/GA620 max number of receive descriptors to wait"); 434MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)"); 435 436 437static char version[] __devinitdata = 438 "acenic.c: v0.92 08/05/2002 Jes Sorensen, linux-acenic@SunSITE.dk\n" 439 " http://home.cern.ch/~jes/gige/acenic.html\n"; 440 441static int ace_get_settings(struct net_device *, struct ethtool_cmd *); 442static int ace_set_settings(struct net_device *, struct ethtool_cmd *); 443static void ace_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); 444 445static const struct ethtool_ops ace_ethtool_ops = { 446 .get_settings = ace_get_settings, 447 .set_settings = ace_set_settings, 448 .get_drvinfo = ace_get_drvinfo, 449}; 450 451static void ace_watchdog(struct net_device *dev); 452 453static int __devinit acenic_probe_one(struct pci_dev *pdev, 454 const struct pci_device_id *id) 455{ 456 struct net_device *dev; 457 struct ace_private *ap; 458 static int boards_found; 459 460 dev = alloc_etherdev(sizeof(struct ace_private)); 461 if (dev == NULL) { 462 printk(KERN_ERR "acenic: Unable to allocate " 463 "net_device structure!\n"); 464 return -ENOMEM; 465 } 466 467 SET_NETDEV_DEV(dev, &pdev->dev); 468 469 ap = dev->priv; 470 ap->pdev = pdev; 471 ap->name = pci_name(pdev); 472 473 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 474#if ACENIC_DO_VLAN 475 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 476 dev->vlan_rx_register = ace_vlan_rx_register; 477#endif 478 479 dev->tx_timeout = &ace_watchdog; 480 dev->watchdog_timeo = 5*HZ; 481 482 dev->open = &ace_open; 483 dev->stop = &ace_close; 484 dev->hard_start_xmit = &ace_start_xmit; 485 dev->get_stats = &ace_get_stats; 486 dev->set_multicast_list = &ace_set_multicast_list; 487 SET_ETHTOOL_OPS(dev, &ace_ethtool_ops); 488 dev->set_mac_address = &ace_set_mac_addr; 489 dev->change_mtu = &ace_change_mtu; 490 491 /* we only display this string ONCE */ 492 if (!boards_found) 493 printk(version); 494 495 if (pci_enable_device(pdev)) 496 goto fail_free_netdev; 497 498 /* 499 * Enable master mode before we start playing with the 500 * pci_command word since pci_set_master() will modify 501 * it. 502 */ 503 pci_set_master(pdev); 504 505 pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command); 506 507 /* OpenFirmware on Mac's does not set this - DOH.. */ 508 if (!(ap->pci_command & PCI_COMMAND_MEMORY)) { 509 printk(KERN_INFO "%s: Enabling PCI Memory Mapped " 510 "access - was not enabled by BIOS/Firmware\n", 511 ap->name); 512 ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY; 513 pci_write_config_word(ap->pdev, PCI_COMMAND, 514 ap->pci_command); 515 wmb(); 516 } 517 518 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ap->pci_latency); 519 if (ap->pci_latency <= 0x40) { 520 ap->pci_latency = 0x40; 521 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ap->pci_latency); 522 } 523 524 /* 525 * Remap the regs into kernel space - this is abuse of 526 * dev->base_addr since it was means for I/O port 527 * addresses but who gives a damn. 528 */ 529 dev->base_addr = pci_resource_start(pdev, 0); 530 ap->regs = ioremap(dev->base_addr, 0x4000); 531 if (!ap->regs) { 532 printk(KERN_ERR "%s: Unable to map I/O register, " 533 "AceNIC %i will be disabled.\n", 534 ap->name, boards_found); 535 goto fail_free_netdev; 536 } 537 538 switch(pdev->vendor) { 539 case PCI_VENDOR_ID_ALTEON: 540 if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9100T) { 541 printk(KERN_INFO "%s: Farallon PN9100-T ", 542 ap->name); 543 } else { 544 printk(KERN_INFO "%s: Alteon AceNIC ", 545 ap->name); 546 } 547 break; 548 case PCI_VENDOR_ID_3COM: 549 printk(KERN_INFO "%s: 3Com 3C985 ", ap->name); 550 break; 551 case PCI_VENDOR_ID_NETGEAR: 552 printk(KERN_INFO "%s: NetGear GA620 ", ap->name); 553 break; 554 case PCI_VENDOR_ID_DEC: 555 if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9000SX) { 556 printk(KERN_INFO "%s: Farallon PN9000-SX ", 557 ap->name); 558 break; 559 } 560 case PCI_VENDOR_ID_SGI: 561 printk(KERN_INFO "%s: SGI AceNIC ", ap->name); 562 break; 563 default: 564 printk(KERN_INFO "%s: Unknown AceNIC ", ap->name); 565 break; 566 } 567 568 printk("Gigabit Ethernet at 0x%08lx, ", dev->base_addr); 569 printk("irq %d\n", pdev->irq); 570 571#ifdef CONFIG_ACENIC_OMIT_TIGON_I 572 if ((readl(&ap->regs->HostCtrl) >> 28) == 4) { 573 printk(KERN_ERR "%s: Driver compiled without Tigon I" 574 " support - NIC disabled\n", dev->name); 575 goto fail_uninit; 576 } 577#endif 578 579 if (ace_allocate_descriptors(dev)) 580 goto fail_free_netdev; 581 582#ifdef MODULE 583 if (boards_found >= ACE_MAX_MOD_PARMS) 584 ap->board_idx = BOARD_IDX_OVERFLOW; 585 else 586 ap->board_idx = boards_found; 587#else 588 ap->board_idx = BOARD_IDX_STATIC; 589#endif 590 591 if (ace_init(dev)) 592 goto fail_free_netdev; 593 594 if (register_netdev(dev)) { 595 printk(KERN_ERR "acenic: device registration failed\n"); 596 goto fail_uninit; 597 } 598 ap->name = dev->name; 599 600 if (ap->pci_using_dac) 601 dev->features |= NETIF_F_HIGHDMA; 602 603 pci_set_drvdata(pdev, dev); 604 605 boards_found++; 606 return 0; 607 608 fail_uninit: 609 ace_init_cleanup(dev); 610 fail_free_netdev: 611 free_netdev(dev); 612 return -ENODEV; 613} 614 615static void __devexit acenic_remove_one(struct pci_dev *pdev) 616{ 617 struct net_device *dev = pci_get_drvdata(pdev); 618 struct ace_private *ap = netdev_priv(dev); 619 struct ace_regs __iomem *regs = ap->regs; 620 short i; 621 622 unregister_netdev(dev); 623 624 writel(readl(&regs->CpuCtrl) | CPU_HALT, &regs->CpuCtrl); 625 if (ap->version >= 2) 626 writel(readl(&regs->CpuBCtrl) | CPU_HALT, &regs->CpuBCtrl); 627 628 /* 629 * This clears any pending interrupts 630 */ 631 writel(1, &regs->Mb0Lo); 632 readl(&regs->CpuCtrl); /* flush */ 633 634 /* 635 * Make sure no other CPUs are processing interrupts 636 * on the card before the buffers are being released. 637 * Otherwise one might experience some `interesting' 638 * effects. 639 * 640 * Then release the RX buffers - jumbo buffers were 641 * already released in ace_close(). 642 */ 643 ace_sync_irq(dev->irq); 644 645 for (i = 0; i < RX_STD_RING_ENTRIES; i++) { 646 struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb; 647 648 if (skb) { 649 struct ring_info *ringp; 650 dma_addr_t mapping; 651 652 ringp = &ap->skb->rx_std_skbuff[i]; 653 mapping = pci_unmap_addr(ringp, mapping); 654 pci_unmap_page(ap->pdev, mapping, 655 ACE_STD_BUFSIZE, 656 PCI_DMA_FROMDEVICE); 657 658 ap->rx_std_ring[i].size = 0; 659 ap->skb->rx_std_skbuff[i].skb = NULL; 660 dev_kfree_skb(skb); 661 } 662 } 663 664 if (ap->version >= 2) { 665 for (i = 0; i < RX_MINI_RING_ENTRIES; i++) { 666 struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb; 667 668 if (skb) { 669 struct ring_info *ringp; 670 dma_addr_t mapping; 671 672 ringp = &ap->skb->rx_mini_skbuff[i]; 673 mapping = pci_unmap_addr(ringp,mapping); 674 pci_unmap_page(ap->pdev, mapping, 675 ACE_MINI_BUFSIZE, 676 PCI_DMA_FROMDEVICE); 677 678 ap->rx_mini_ring[i].size = 0; 679 ap->skb->rx_mini_skbuff[i].skb = NULL; 680 dev_kfree_skb(skb); 681 } 682 } 683 } 684 685 for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) { 686 struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb; 687 if (skb) { 688 struct ring_info *ringp; 689 dma_addr_t mapping; 690 691 ringp = &ap->skb->rx_jumbo_skbuff[i]; 692 mapping = pci_unmap_addr(ringp, mapping); 693 pci_unmap_page(ap->pdev, mapping, 694 ACE_JUMBO_BUFSIZE, 695 PCI_DMA_FROMDEVICE); 696 697 ap->rx_jumbo_ring[i].size = 0; 698 ap->skb->rx_jumbo_skbuff[i].skb = NULL; 699 dev_kfree_skb(skb); 700 } 701 } 702 703 ace_init_cleanup(dev); 704 free_netdev(dev); 705} 706 707static struct pci_driver acenic_pci_driver = { 708 .name = "acenic", 709 .id_table = acenic_pci_tbl, 710 .probe = acenic_probe_one, 711 .remove = __devexit_p(acenic_remove_one), 712}; 713 714static int __init acenic_init(void) 715{ 716 return pci_register_driver(&acenic_pci_driver); 717} 718 719static void __exit acenic_exit(void) 720{ 721 pci_unregister_driver(&acenic_pci_driver); 722} 723 724module_init(acenic_init); 725module_exit(acenic_exit); 726 727static void ace_free_descriptors(struct net_device *dev) 728{ 729 struct ace_private *ap = netdev_priv(dev); 730 int size; 731 732 if (ap->rx_std_ring != NULL) { 733 size = (sizeof(struct rx_desc) * 734 (RX_STD_RING_ENTRIES + 735 RX_JUMBO_RING_ENTRIES + 736 RX_MINI_RING_ENTRIES + 737 RX_RETURN_RING_ENTRIES)); 738 pci_free_consistent(ap->pdev, size, ap->rx_std_ring, 739 ap->rx_ring_base_dma); 740 ap->rx_std_ring = NULL; 741 ap->rx_jumbo_ring = NULL; 742 ap->rx_mini_ring = NULL; 743 ap->rx_return_ring = NULL; 744 } 745 if (ap->evt_ring != NULL) { 746 size = (sizeof(struct event) * EVT_RING_ENTRIES); 747 pci_free_consistent(ap->pdev, size, ap->evt_ring, 748 ap->evt_ring_dma); 749 ap->evt_ring = NULL; 750 } 751 if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) { 752 size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES); 753 pci_free_consistent(ap->pdev, size, ap->tx_ring, 754 ap->tx_ring_dma); 755 } 756 ap->tx_ring = NULL; 757 758 if (ap->evt_prd != NULL) { 759 pci_free_consistent(ap->pdev, sizeof(u32), 760 (void *)ap->evt_prd, ap->evt_prd_dma); 761 ap->evt_prd = NULL; 762 } 763 if (ap->rx_ret_prd != NULL) { 764 pci_free_consistent(ap->pdev, sizeof(u32), 765 (void *)ap->rx_ret_prd, 766 ap->rx_ret_prd_dma); 767 ap->rx_ret_prd = NULL; 768 } 769 if (ap->tx_csm != NULL) { 770 pci_free_consistent(ap->pdev, sizeof(u32), 771 (void *)ap->tx_csm, ap->tx_csm_dma); 772 ap->tx_csm = NULL; 773 } 774} 775 776 777static int ace_allocate_descriptors(struct net_device *dev) 778{ 779 struct ace_private *ap = netdev_priv(dev); 780 int size; 781 782 size = (sizeof(struct rx_desc) * 783 (RX_STD_RING_ENTRIES + 784 RX_JUMBO_RING_ENTRIES + 785 RX_MINI_RING_ENTRIES + 786 RX_RETURN_RING_ENTRIES)); 787 788 ap->rx_std_ring = pci_alloc_consistent(ap->pdev, size, 789 &ap->rx_ring_base_dma); 790 if (ap->rx_std_ring == NULL) 791 goto fail; 792 793 ap->rx_jumbo_ring = ap->rx_std_ring + RX_STD_RING_ENTRIES; 794 ap->rx_mini_ring = ap->rx_jumbo_ring + RX_JUMBO_RING_ENTRIES; 795 ap->rx_return_ring = ap->rx_mini_ring + RX_MINI_RING_ENTRIES; 796 797 size = (sizeof(struct event) * EVT_RING_ENTRIES); 798 799 ap->evt_ring = pci_alloc_consistent(ap->pdev, size, &ap->evt_ring_dma); 800 801 if (ap->evt_ring == NULL) 802 goto fail; 803 804 /* 805 * Only allocate a host TX ring for the Tigon II, the Tigon I 806 * has to use PCI registers for this ;-( 807 */ 808 if (!ACE_IS_TIGON_I(ap)) { 809 size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES); 810 811 ap->tx_ring = pci_alloc_consistent(ap->pdev, size, 812 &ap->tx_ring_dma); 813 814 if (ap->tx_ring == NULL) 815 goto fail; 816 } 817 818 ap->evt_prd = pci_alloc_consistent(ap->pdev, sizeof(u32), 819 &ap->evt_prd_dma); 820 if (ap->evt_prd == NULL) 821 goto fail; 822 823 ap->rx_ret_prd = pci_alloc_consistent(ap->pdev, sizeof(u32), 824 &ap->rx_ret_prd_dma); 825 if (ap->rx_ret_prd == NULL) 826 goto fail; 827 828 ap->tx_csm = pci_alloc_consistent(ap->pdev, sizeof(u32), 829 &ap->tx_csm_dma); 830 if (ap->tx_csm == NULL) 831 goto fail; 832 833 return 0; 834 835fail: 836 /* Clean up. */ 837 ace_init_cleanup(dev); 838 return 1; 839} 840 841 842/* 843 * Generic cleanup handling data allocated during init. Used when the 844 * module is unloaded or if an error occurs during initialization 845 */ 846static void ace_init_cleanup(struct net_device *dev) 847{ 848 struct ace_private *ap; 849 850 ap = netdev_priv(dev); 851 852 ace_free_descriptors(dev); 853 854 if (ap->info) 855 pci_free_consistent(ap->pdev, sizeof(struct ace_info), 856 ap->info, ap->info_dma); 857 kfree(ap->skb); 858 kfree(ap->trace_buf); 859 860 if (dev->irq) 861 free_irq(dev->irq, dev); 862 863 iounmap(ap->regs); 864} 865 866 867/* 868 * Commands are considered to be slow. 869 */ 870static inline void ace_issue_cmd(struct ace_regs __iomem *regs, struct cmd *cmd) 871{ 872 u32 idx; 873 874 idx = readl(&regs->CmdPrd); 875 876 writel(*(u32 *)(cmd), &regs->CmdRng[idx]); 877 idx = (idx + 1) % CMD_RING_ENTRIES; 878 879 writel(idx, &regs->CmdPrd); 880} 881 882 883static int __devinit ace_init(struct net_device *dev) 884{ 885 struct ace_private *ap; 886 struct ace_regs __iomem *regs; 887 struct ace_info *info = NULL; 888 struct pci_dev *pdev; 889 unsigned long myjif; 890 u64 tmp_ptr; 891 u32 tig_ver, mac1, mac2, tmp, pci_state; 892 int board_idx, ecode = 0; 893 short i; 894 unsigned char cache_size; 895 DECLARE_MAC_BUF(mac); 896 897 ap = netdev_priv(dev); 898 regs = ap->regs; 899 900 board_idx = ap->board_idx; 901 902 /* 903 * aman@sgi.com - its useful to do a NIC reset here to 904 * address the `Firmware not running' problem subsequent 905 * to any crashes involving the NIC 906 */ 907 writel(HW_RESET | (HW_RESET << 24), &regs->HostCtrl); 908 readl(&regs->HostCtrl); /* PCI write posting */ 909 udelay(5); 910 911 /* 912 * Don't access any other registers before this point! 913 */ 914#ifdef __BIG_ENDIAN 915 /* 916 * This will most likely need BYTE_SWAP once we switch 917 * to using __raw_writel() 918 */ 919 writel((WORD_SWAP | CLR_INT | ((WORD_SWAP | CLR_INT) << 24)), 920 &regs->HostCtrl); 921#else 922 writel((CLR_INT | WORD_SWAP | ((CLR_INT | WORD_SWAP) << 24)), 923 &regs->HostCtrl); 924#endif 925 readl(&regs->HostCtrl); /* PCI write posting */ 926 927 /* 928 * Stop the NIC CPU and clear pending interrupts 929 */ 930 writel(readl(&regs->CpuCtrl) | CPU_HALT, &regs->CpuCtrl); 931 readl(&regs->CpuCtrl); /* PCI write posting */ 932 writel(0, &regs->Mb0Lo); 933 934 tig_ver = readl(&regs->HostCtrl) >> 28; 935 936 switch(tig_ver){ 937#ifndef CONFIG_ACENIC_OMIT_TIGON_I 938 case 4: 939 case 5: 940 printk(KERN_INFO " Tigon I (Rev. %i), Firmware: %i.%i.%i, ", 941 tig_ver, tigonFwReleaseMajor, tigonFwReleaseMinor, 942 tigonFwReleaseFix); 943 writel(0, &regs->LocalCtrl); 944 ap->version = 1; 945 ap->tx_ring_entries = TIGON_I_TX_RING_ENTRIES; 946 break; 947#endif 948 case 6: 949 printk(KERN_INFO " Tigon II (Rev. %i), Firmware: %i.%i.%i, ", 950 tig_ver, tigon2FwReleaseMajor, tigon2FwReleaseMinor, 951 tigon2FwReleaseFix); 952 writel(readl(&regs->CpuBCtrl) | CPU_HALT, &regs->CpuBCtrl); 953 readl(&regs->CpuBCtrl); /* PCI write posting */ 954 /* 955 * The SRAM bank size does _not_ indicate the amount 956 * of memory on the card, it controls the _bank_ size! 957 * Ie. a 1MB AceNIC will have two banks of 512KB. 958 */ 959 writel(SRAM_BANK_512K, &regs->LocalCtrl); 960 writel(SYNC_SRAM_TIMING, &regs->MiscCfg); 961 ap->version = 2; 962 ap->tx_ring_entries = MAX_TX_RING_ENTRIES; 963 break; 964 default: 965 printk(KERN_WARNING " Unsupported Tigon version detected " 966 "(%i)\n", tig_ver); 967 ecode = -ENODEV; 968 goto init_error; 969 } 970 971 /* 972 * ModeStat _must_ be set after the SRAM settings as this change 973 * seems to corrupt the ModeStat and possible other registers. 974 * The SRAM settings survive resets and setting it to the same 975 * value a second time works as well. This is what caused the 976 * `Firmware not running' problem on the Tigon II. 977 */ 978#ifdef __BIG_ENDIAN 979 writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | ACE_BYTE_SWAP_BD | 980 ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, &regs->ModeStat); 981#else 982 writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | 983 ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, &regs->ModeStat); 984#endif 985 readl(&regs->ModeStat); /* PCI write posting */ 986 987 mac1 = 0; 988 for(i = 0; i < 4; i++) { 989 int t; 990 991 mac1 = mac1 << 8; 992 t = read_eeprom_byte(dev, 0x8c+i); 993 if (t < 0) { 994 ecode = -EIO; 995 goto init_error; 996 } else 997 mac1 |= (t & 0xff); 998 } 999 mac2 = 0; 1000 for(i = 4; i < 8; i++) { 1001 int t; 1002 1003 mac2 = mac2 << 8; 1004 t = read_eeprom_byte(dev, 0x8c+i); 1005 if (t < 0) { 1006 ecode = -EIO; 1007 goto init_error; 1008 } else 1009 mac2 |= (t & 0xff); 1010 } 1011 1012 writel(mac1, &regs->MacAddrHi); 1013 writel(mac2, &regs->MacAddrLo); 1014 1015 dev->dev_addr[0] = (mac1 >> 8) & 0xff; 1016 dev->dev_addr[1] = mac1 & 0xff; 1017 dev->dev_addr[2] = (mac2 >> 24) & 0xff; 1018 dev->dev_addr[3] = (mac2 >> 16) & 0xff; 1019 dev->dev_addr[4] = (mac2 >> 8) & 0xff; 1020 dev->dev_addr[5] = mac2 & 0xff; 1021 1022 printk("MAC: %s\n", print_mac(mac, dev->dev_addr)); 1023 1024 /* 1025 * Looks like this is necessary to deal with on all architectures, 1026 * even this %$#%$# N440BX Intel based thing doesn't get it right. 1027 * Ie. having two NICs in the machine, one will have the cache 1028 * line set at boot time, the other will not. 1029 */ 1030 pdev = ap->pdev; 1031 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_size); 1032 cache_size <<= 2; 1033 if (cache_size != SMP_CACHE_BYTES) { 1034 printk(KERN_INFO " PCI cache line size set incorrectly " 1035 "(%i bytes) by BIOS/FW, ", cache_size); 1036 if (cache_size > SMP_CACHE_BYTES) 1037 printk("expecting %i\n", SMP_CACHE_BYTES); 1038 else { 1039 printk("correcting to %i\n", SMP_CACHE_BYTES); 1040 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 1041 SMP_CACHE_BYTES >> 2); 1042 } 1043 } 1044 1045 pci_state = readl(&regs->PciState); 1046 printk(KERN_INFO " PCI bus width: %i bits, speed: %iMHz, " 1047 "latency: %i clks\n", 1048 (pci_state & PCI_32BIT) ? 32 : 64, 1049 (pci_state & PCI_66MHZ) ? 66 : 33, 1050 ap->pci_latency); 1051 1052 /* 1053 * Set the max DMA transfer size. Seems that for most systems 1054 * the performance is better when no MAX parameter is 1055 * set. However for systems enabling PCI write and invalidate, 1056 * DMA writes must be set to the L1 cache line size to get 1057 * optimal performance. 1058 * 1059 * The default is now to turn the PCI write and invalidate off 1060 * - that is what Alteon does for NT. 1061 */ 1062 tmp = READ_CMD_MEM | WRITE_CMD_MEM; 1063 if (ap->version >= 2) { 1064 tmp |= (MEM_READ_MULTIPLE | (pci_state & PCI_66MHZ)); 1065 /* 1066 * Tuning parameters only supported for 8 cards 1067 */ 1068 if (board_idx == BOARD_IDX_OVERFLOW || 1069 dis_pci_mem_inval[board_idx]) { 1070 if (ap->pci_command & PCI_COMMAND_INVALIDATE) { 1071 ap->pci_command &= ~PCI_COMMAND_INVALIDATE; 1072 pci_write_config_word(pdev, PCI_COMMAND, 1073 ap->pci_command); 1074 printk(KERN_INFO " Disabling PCI memory " 1075 "write and invalidate\n"); 1076 } 1077 } else if (ap->pci_command & PCI_COMMAND_INVALIDATE) { 1078 printk(KERN_INFO " PCI memory write & invalidate " 1079 "enabled by BIOS, enabling counter measures\n"); 1080 1081 switch(SMP_CACHE_BYTES) { 1082 case 16: 1083 tmp |= DMA_WRITE_MAX_16; 1084 break; 1085 case 32: 1086 tmp |= DMA_WRITE_MAX_32; 1087 break; 1088 case 64: 1089 tmp |= DMA_WRITE_MAX_64; 1090 break; 1091 case 128: 1092 tmp |= DMA_WRITE_MAX_128; 1093 break; 1094 default: 1095 printk(KERN_INFO " Cache line size %i not " 1096 "supported, PCI write and invalidate " 1097 "disabled\n", SMP_CACHE_BYTES); 1098 ap->pci_command &= ~PCI_COMMAND_INVALIDATE; 1099 pci_write_config_word(pdev, PCI_COMMAND, 1100 ap->pci_command); 1101 } 1102 } 1103 } 1104 1105#ifdef __sparc__ 1106 /* 1107 * On this platform, we know what the best dma settings 1108 * are. We use 64-byte maximum bursts, because if we 1109 * burst larger than the cache line size (or even cross 1110 * a 64byte boundary in a single burst) the UltraSparc 1111 * PCI controller will disconnect at 64-byte multiples. 1112 * 1113 * Read-multiple will be properly enabled above, and when 1114 * set will give the PCI controller proper hints about 1115 * prefetching. 1116 */ 1117 tmp &= ~DMA_READ_WRITE_MASK; 1118 tmp |= DMA_READ_MAX_64; 1119 tmp |= DMA_WRITE_MAX_64; 1120#endif 1121#ifdef __alpha__ 1122 tmp &= ~DMA_READ_WRITE_MASK; 1123 tmp |= DMA_READ_MAX_128; 1124 /* 1125 * All the docs say MUST NOT. Well, I did. 1126 * Nothing terrible happens, if we load wrong size. 1127 * Bit w&i still works better! 1128 */ 1129 tmp |= DMA_WRITE_MAX_128; 1130#endif 1131 writel(tmp, &regs->PciState); 1132 1133#if 0 1134 /* 1135 * The Host PCI bus controller driver has to set FBB. 1136 * If all devices on that PCI bus support FBB, then the controller 1137 * can enable FBB support in the Host PCI Bus controller (or on 1138 * the PCI-PCI bridge if that applies). 1139 * -ggg 1140 */ 1141 /* 1142 * I have received reports from people having problems when this 1143 * bit is enabled. 1144 */ 1145 if (!(ap->pci_command & PCI_COMMAND_FAST_BACK)) { 1146 printk(KERN_INFO " Enabling PCI Fast Back to Back\n"); 1147 ap->pci_command |= PCI_COMMAND_FAST_BACK; 1148 pci_write_config_word(pdev, PCI_COMMAND, ap->pci_command); 1149 } 1150#endif 1151 1152 /* 1153 * Configure DMA attributes. 1154 */ 1155 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 1156 ap->pci_using_dac = 1; 1157 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { 1158 ap->pci_using_dac = 0; 1159 } else { 1160 ecode = -ENODEV; 1161 goto init_error; 1162 } 1163 1164 /* 1165 * Initialize the generic info block and the command+event rings 1166 * and the control blocks for the transmit and receive rings 1167 * as they need to be setup once and for all. 1168 */ 1169 if (!(info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info), 1170 &ap->info_dma))) { 1171 ecode = -EAGAIN; 1172 goto init_error; 1173 } 1174 ap->info = info; 1175 1176 /* 1177 * Get the memory for the skb rings. 1178 */ 1179 if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL))) { 1180 ecode = -EAGAIN; 1181 goto init_error; 1182 } 1183 1184 ecode = request_irq(pdev->irq, ace_interrupt, IRQF_SHARED, 1185 DRV_NAME, dev); 1186 if (ecode) { 1187 printk(KERN_WARNING "%s: Requested IRQ %d is busy\n", 1188 DRV_NAME, pdev->irq); 1189 goto init_error; 1190 } else 1191 dev->irq = pdev->irq; 1192 1193#ifdef INDEX_DEBUG 1194 spin_lock_init(&ap->debug_lock); 1195 ap->last_tx = ACE_TX_RING_ENTRIES(ap) - 1; 1196 ap->last_std_rx = 0; 1197 ap->last_mini_rx = 0; 1198#endif 1199 1200 memset(ap->info, 0, sizeof(struct ace_info)); 1201 memset(ap->skb, 0, sizeof(struct ace_skb)); 1202 1203 ace_load_firmware(dev); 1204 ap->fw_running = 0; 1205 1206 tmp_ptr = ap->info_dma; 1207 writel(tmp_ptr >> 32, &regs->InfoPtrHi); 1208 writel(tmp_ptr & 0xffffffff, &regs->InfoPtrLo); 1209 1210 memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event)); 1211 1212 set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring_dma); 1213 info->evt_ctrl.flags = 0; 1214 1215 *(ap->evt_prd) = 0; 1216 wmb(); 1217 set_aceaddr(&info->evt_prd_ptr, ap->evt_prd_dma); 1218 writel(0, &regs->EvtCsm); 1219 1220 set_aceaddr(&info->cmd_ctrl.rngptr, 0x100); 1221 info->cmd_ctrl.flags = 0; 1222 info->cmd_ctrl.max_len = 0; 1223 1224 for (i = 0; i < CMD_RING_ENTRIES; i++) 1225 writel(0, &regs->CmdRng[i]); 1226 1227 writel(0, &regs->CmdPrd); 1228 writel(0, &regs->CmdCsm); 1229 1230 tmp_ptr = ap->info_dma; 1231 tmp_ptr += (unsigned long) &(((struct ace_info *)0)->s.stats); 1232 set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr); 1233 1234 set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma); 1235 info->rx_std_ctrl.max_len = ACE_STD_BUFSIZE; 1236 info->rx_std_ctrl.flags = 1237 RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG; 1238 1239 memset(ap->rx_std_ring, 0, 1240 RX_STD_RING_ENTRIES * sizeof(struct rx_desc)); 1241 1242 for (i = 0; i < RX_STD_RING_ENTRIES; i++) 1243 ap->rx_std_ring[i].flags = BD_FLG_TCP_UDP_SUM; 1244 1245 ap->rx_std_skbprd = 0; 1246 atomic_set(&ap->cur_rx_bufs, 0); 1247 1248 set_aceaddr(&info->rx_jumbo_ctrl.rngptr, 1249 (ap->rx_ring_base_dma + 1250 (sizeof(struct rx_desc) * RX_STD_RING_ENTRIES))); 1251 info->rx_jumbo_ctrl.max_len = 0; 1252 info->rx_jumbo_ctrl.flags = 1253 RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG; 1254 1255 memset(ap->rx_jumbo_ring, 0, 1256 RX_JUMBO_RING_ENTRIES * sizeof(struct rx_desc)); 1257 1258 for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) 1259 ap->rx_jumbo_ring[i].flags = BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO; 1260 1261 ap->rx_jumbo_skbprd = 0; 1262 atomic_set(&ap->cur_jumbo_bufs, 0); 1263 1264 memset(ap->rx_mini_ring, 0, 1265 RX_MINI_RING_ENTRIES * sizeof(struct rx_desc)); 1266 1267 if (ap->version >= 2) { 1268 set_aceaddr(&info->rx_mini_ctrl.rngptr, 1269 (ap->rx_ring_base_dma + 1270 (sizeof(struct rx_desc) * 1271 (RX_STD_RING_ENTRIES + 1272 RX_JUMBO_RING_ENTRIES)))); 1273 info->rx_mini_ctrl.max_len = ACE_MINI_SIZE; 1274 info->rx_mini_ctrl.flags = 1275 RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR|ACE_RCB_VLAN_FLAG; 1276 1277 for (i = 0; i < RX_MINI_RING_ENTRIES; i++) 1278 ap->rx_mini_ring[i].flags = 1279 BD_FLG_TCP_UDP_SUM | BD_FLG_MINI; 1280 } else { 1281 set_aceaddr(&info->rx_mini_ctrl.rngptr, 0); 1282 info->rx_mini_ctrl.flags = RCB_FLG_RNG_DISABLE; 1283 info->rx_mini_ctrl.max_len = 0; 1284 } 1285 1286 ap->rx_mini_skbprd = 0; 1287 atomic_set(&ap->cur_mini_bufs, 0); 1288 1289 set_aceaddr(&info->rx_return_ctrl.rngptr, 1290 (ap->rx_ring_base_dma + 1291 (sizeof(struct rx_desc) * 1292 (RX_STD_RING_ENTRIES + 1293 RX_JUMBO_RING_ENTRIES + 1294 RX_MINI_RING_ENTRIES)))); 1295 info->rx_return_ctrl.flags = 0; 1296 info->rx_return_ctrl.max_len = RX_RETURN_RING_ENTRIES; 1297 1298 memset(ap->rx_return_ring, 0, 1299 RX_RETURN_RING_ENTRIES * sizeof(struct rx_desc)); 1300 1301 set_aceaddr(&info->rx_ret_prd_ptr, ap->rx_ret_prd_dma); 1302 *(ap->rx_ret_prd) = 0; 1303 1304 writel(TX_RING_BASE, &regs->WinBase); 1305 1306 if (ACE_IS_TIGON_I(ap)) { 1307 ap->tx_ring = (__force struct tx_desc *) regs->Window; 1308 for (i = 0; i < (TIGON_I_TX_RING_ENTRIES 1309 * sizeof(struct tx_desc)) / sizeof(u32); i++) 1310 writel(0, (__force void __iomem *)ap->tx_ring + i * 4); 1311 1312 set_aceaddr(&info->tx_ctrl.rngptr, TX_RING_BASE); 1313 } else { 1314 memset(ap->tx_ring, 0, 1315 MAX_TX_RING_ENTRIES * sizeof(struct tx_desc)); 1316 1317 set_aceaddr(&info->tx_ctrl.rngptr, ap->tx_ring_dma); 1318 } 1319 1320 info->tx_ctrl.max_len = ACE_TX_RING_ENTRIES(ap); 1321 tmp = RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG; 1322 1323 /* 1324 * The Tigon I does not like having the TX ring in host memory ;-( 1325 */ 1326 if (!ACE_IS_TIGON_I(ap)) 1327 tmp |= RCB_FLG_TX_HOST_RING; 1328#if TX_COAL_INTS_ONLY 1329 tmp |= RCB_FLG_COAL_INT_ONLY; 1330#endif 1331 info->tx_ctrl.flags = tmp; 1332 1333 set_aceaddr(&info->tx_csm_ptr, ap->tx_csm_dma); 1334 1335 /* 1336 * Potential item for tuning parameter 1337 */ 1338#if 0 /* NO */ 1339 writel(DMA_THRESH_16W, &regs->DmaReadCfg); 1340 writel(DMA_THRESH_16W, &regs->DmaWriteCfg); 1341#else 1342 writel(DMA_THRESH_8W, &regs->DmaReadCfg); 1343 writel(DMA_THRESH_8W, &regs->DmaWriteCfg); 1344#endif 1345 1346 writel(0, &regs->MaskInt); 1347 writel(1, &regs->IfIdx); 1348#if 0 1349 /* 1350 * McKinley boxes do not like us fiddling with AssistState 1351 * this early 1352 */ 1353 writel(1, &regs->AssistState); 1354#endif 1355 1356 writel(DEF_STAT, &regs->TuneStatTicks); 1357 writel(DEF_TRACE, &regs->TuneTrace); 1358 1359 ace_set_rxtx_parms(dev, 0); 1360 1361 if (board_idx == BOARD_IDX_OVERFLOW) { 1362 printk(KERN_WARNING "%s: more than %i NICs detected, " 1363 "ignoring module parameters!\n", 1364 ap->name, ACE_MAX_MOD_PARMS); 1365 } else if (board_idx >= 0) { 1366 if (tx_coal_tick[board_idx]) 1367 writel(tx_coal_tick[board_idx], 1368 &regs->TuneTxCoalTicks); 1369 if (max_tx_desc[board_idx]) 1370 writel(max_tx_desc[board_idx], &regs->TuneMaxTxDesc); 1371 1372 if (rx_coal_tick[board_idx]) 1373 writel(rx_coal_tick[board_idx], 1374 &regs->TuneRxCoalTicks); 1375 if (max_rx_desc[board_idx]) 1376 writel(max_rx_desc[board_idx], &regs->TuneMaxRxDesc); 1377 1378 if (trace[board_idx]) 1379 writel(trace[board_idx], &regs->TuneTrace); 1380 1381 if ((tx_ratio[board_idx] > 0) && (tx_ratio[board_idx] < 64)) 1382 writel(tx_ratio[board_idx], &regs->TxBufRat); 1383 } 1384 1385 /* 1386 * Default link parameters 1387 */ 1388 tmp = LNK_ENABLE | LNK_FULL_DUPLEX | LNK_1000MB | LNK_100MB | 1389 LNK_10MB | LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL | LNK_NEGOTIATE; 1390 if(ap->version >= 2) 1391 tmp |= LNK_TX_FLOW_CTL_Y; 1392 1393 /* 1394 * Override link default parameters 1395 */ 1396 if ((board_idx >= 0) && link_state[board_idx]) { 1397 int option = link_state[board_idx]; 1398 1399 tmp = LNK_ENABLE; 1400 1401 if (option & 0x01) { 1402 printk(KERN_INFO "%s: Setting half duplex link\n", 1403 ap->name); 1404 tmp &= ~LNK_FULL_DUPLEX; 1405 } 1406 if (option & 0x02) 1407 tmp &= ~LNK_NEGOTIATE; 1408 if (option & 0x10) 1409 tmp |= LNK_10MB; 1410 if (option & 0x20) 1411 tmp |= LNK_100MB; 1412 if (option & 0x40) 1413 tmp |= LNK_1000MB; 1414 if ((option & 0x70) == 0) { 1415 printk(KERN_WARNING "%s: No media speed specified, " 1416 "forcing auto negotiation\n", ap->name); 1417 tmp |= LNK_NEGOTIATE | LNK_1000MB | 1418 LNK_100MB | LNK_10MB; 1419 } 1420 if ((option & 0x100) == 0) 1421 tmp |= LNK_NEG_FCTL; 1422 else 1423 printk(KERN_INFO "%s: Disabling flow control " 1424 "negotiation\n", ap->name); 1425 if (option & 0x200) 1426 tmp |= LNK_RX_FLOW_CTL_Y; 1427 if ((option & 0x400) && (ap->version >= 2)) { 1428 printk(KERN_INFO "%s: Enabling TX flow control\n", 1429 ap->name); 1430 tmp |= LNK_TX_FLOW_CTL_Y; 1431 } 1432 } 1433 1434 ap->link = tmp; 1435 writel(tmp, &regs->TuneLink); 1436 if (ap->version >= 2) 1437 writel(tmp, &regs->TuneFastLink); 1438 1439 if (ACE_IS_TIGON_I(ap)) 1440 writel(tigonFwStartAddr, &regs->Pc); 1441 if (ap->version == 2) 1442 writel(tigon2FwStartAddr, &regs->Pc); 1443 1444 writel(0, &regs->Mb0Lo); 1445 1446 /* 1447 * Set tx_csm before we start receiving interrupts, otherwise 1448 * the interrupt handler might think it is supposed to process 1449 * tx ints before we are up and running, which may cause a null 1450 * pointer access in the int handler. 1451 */ 1452 ap->cur_rx = 0; 1453 ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0; 1454 1455 wmb(); 1456 ace_set_txprd(regs, ap, 0); 1457 writel(0, &regs->RxRetCsm); 1458 1459 /* 1460 * Enable DMA engine now. 1461 * If we do this sooner, Mckinley box pukes. 1462 * I assume it's because Tigon II DMA engine wants to check 1463 * *something* even before the CPU is started. 1464 */ 1465 writel(1, &regs->AssistState); /* enable DMA */ 1466 1467 /* 1468 * Start the NIC CPU 1469 */ 1470 writel(readl(&regs->CpuCtrl) & ~(CPU_HALT|CPU_TRACE), &regs->CpuCtrl); 1471 readl(&regs->CpuCtrl); 1472 1473 /* 1474 * Wait for the firmware to spin up - max 3 seconds. 1475 */ 1476 myjif = jiffies + 3 * HZ; 1477 while (time_before(jiffies, myjif) && !ap->fw_running) 1478 cpu_relax(); 1479 1480 if (!ap->fw_running) { 1481 printk(KERN_ERR "%s: Firmware NOT running!\n", ap->name); 1482 1483 ace_dump_trace(ap); 1484 writel(readl(&regs->CpuCtrl) | CPU_HALT, &regs->CpuCtrl); 1485 readl(&regs->CpuCtrl); 1486 1487 /* aman@sgi.com - account for badly behaving firmware/NIC: 1488 * - have observed that the NIC may continue to generate 1489 * interrupts for some reason; attempt to stop it - halt 1490 * second CPU for Tigon II cards, and also clear Mb0 1491 * - if we're a module, we'll fail to load if this was 1492 * the only GbE card in the system => if the kernel does 1493 * see an interrupt from the NIC, code to handle it is 1494 * gone and OOps! - so free_irq also 1495 */ 1496 if (ap->version >= 2) 1497 writel(readl(&regs->CpuBCtrl) | CPU_HALT, 1498 &regs->CpuBCtrl); 1499 writel(0, &regs->Mb0Lo); 1500 readl(&regs->Mb0Lo); 1501 1502 ecode = -EBUSY; 1503 goto init_error; 1504 } 1505 1506 /* 1507 * We load the ring here as there seem to be no way to tell the 1508 * firmware to wipe the ring without re-initializing it. 1509 */ 1510 if (!test_and_set_bit(0, &ap->std_refill_busy)) 1511 ace_load_std_rx_ring(ap, RX_RING_SIZE); 1512 else 1513 printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n", 1514 ap->name); 1515 if (ap->version >= 2) { 1516 if (!test_and_set_bit(0, &ap->mini_refill_busy)) 1517 ace_load_mini_rx_ring(ap, RX_MINI_SIZE); 1518 else 1519 printk(KERN_ERR "%s: Someone is busy refilling " 1520 "the RX mini ring\n", ap->name); 1521 } 1522 return 0; 1523 1524 init_error: 1525 ace_init_cleanup(dev); 1526 return ecode; 1527} 1528 1529 1530static void ace_set_rxtx_parms(struct net_device *dev, int jumbo) 1531{ 1532 struct ace_private *ap = netdev_priv(dev); 1533 struct ace_regs __iomem *regs = ap->regs; 1534 int board_idx = ap->board_idx; 1535 1536 if (board_idx >= 0) { 1537 if (!jumbo) { 1538 if (!tx_coal_tick[board_idx]) 1539 writel(DEF_TX_COAL, &regs->TuneTxCoalTicks); 1540 if (!max_tx_desc[board_idx]) 1541 writel(DEF_TX_MAX_DESC, &regs->TuneMaxTxDesc); 1542 if (!rx_coal_tick[board_idx]) 1543 writel(DEF_RX_COAL, &regs->TuneRxCoalTicks); 1544 if (!max_rx_desc[board_idx]) 1545 writel(DEF_RX_MAX_DESC, &regs->TuneMaxRxDesc); 1546 if (!tx_ratio[board_idx]) 1547 writel(DEF_TX_RATIO, &regs->TxBufRat); 1548 } else { 1549 if (!tx_coal_tick[board_idx]) 1550 writel(DEF_JUMBO_TX_COAL, 1551 &regs->TuneTxCoalTicks); 1552 if (!max_tx_desc[board_idx]) 1553 writel(DEF_JUMBO_TX_MAX_DESC, 1554 &regs->TuneMaxTxDesc); 1555 if (!rx_coal_tick[board_idx]) 1556 writel(DEF_JUMBO_RX_COAL, 1557 &regs->TuneRxCoalTicks); 1558 if (!max_rx_desc[board_idx]) 1559 writel(DEF_JUMBO_RX_MAX_DESC, 1560 &regs->TuneMaxRxDesc); 1561 if (!tx_ratio[board_idx]) 1562 writel(DEF_JUMBO_TX_RATIO, &regs->TxBufRat); 1563 } 1564 } 1565} 1566 1567 1568static void ace_watchdog(struct net_device *data) 1569{ 1570 struct net_device *dev = data; 1571 struct ace_private *ap = netdev_priv(dev); 1572 struct ace_regs __iomem *regs = ap->regs; 1573 1574 /* 1575 * We haven't received a stats update event for more than 2.5 1576 * seconds and there is data in the transmit queue, thus we 1577 * asume the card is stuck. 1578 */ 1579 if (*ap->tx_csm != ap->tx_ret_csm) { 1580 printk(KERN_WARNING "%s: Transmitter is stuck, %08x\n", 1581 dev->name, (unsigned int)readl(&regs->HostCtrl)); 1582 /* This can happen due to ieee flow control. */ 1583 } else { 1584 printk(KERN_DEBUG "%s: BUG... transmitter died. Kicking it.\n", 1585 dev->name); 1586#if 0 1587 netif_wake_queue(dev); 1588#endif 1589 } 1590} 1591 1592 1593static void ace_tasklet(unsigned long dev) 1594{ 1595 struct ace_private *ap = netdev_priv((struct net_device *)dev); 1596 int cur_size; 1597 1598 cur_size = atomic_read(&ap->cur_rx_bufs); 1599 if ((cur_size < RX_LOW_STD_THRES) && 1600 !test_and_set_bit(0, &ap->std_refill_busy)) { 1601#ifdef DEBUG 1602 printk("refilling buffers (current %i)\n", cur_size); 1603#endif 1604 ace_load_std_rx_ring(ap, RX_RING_SIZE - cur_size); 1605 } 1606 1607 if (ap->version >= 2) { 1608 cur_size = atomic_read(&ap->cur_mini_bufs); 1609 if ((cur_size < RX_LOW_MINI_THRES) && 1610 !test_and_set_bit(0, &ap->mini_refill_busy)) { 1611#ifdef DEBUG 1612 printk("refilling mini buffers (current %i)\n", 1613 cur_size); 1614#endif 1615 ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size); 1616 } 1617 } 1618 1619 cur_size = atomic_read(&ap->cur_jumbo_bufs); 1620 if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) && 1621 !test_and_set_bit(0, &ap->jumbo_refill_busy)) { 1622#ifdef DEBUG 1623 printk("refilling jumbo buffers (current %i)\n", cur_size); 1624#endif 1625 ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size); 1626 } 1627 ap->tasklet_pending = 0; 1628} 1629 1630 1631/* 1632 * Copy the contents of the NIC's trace buffer to kernel memory. 1633 */ 1634static void ace_dump_trace(struct ace_private *ap) 1635{ 1636#if 0 1637 if (!ap->trace_buf) 1638 if (!(ap->trace_buf = kmalloc(ACE_TRACE_SIZE, GFP_KERNEL))) 1639 return; 1640#endif 1641} 1642 1643 1644/* 1645 * Load the standard rx ring. 1646 * 1647 * Loading rings is safe without holding the spin lock since this is 1648 * done only before the device is enabled, thus no interrupts are 1649 * generated and by the interrupt handler/tasklet handler. 1650 */ 1651static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs) 1652{ 1653 struct ace_regs __iomem *regs = ap->regs; 1654 short i, idx; 1655 1656 1657 prefetchw(&ap->cur_rx_bufs); 1658 1659 idx = ap->rx_std_skbprd; 1660 1661 for (i = 0; i < nr_bufs; i++) { 1662 struct sk_buff *skb; 1663 struct rx_desc *rd; 1664 dma_addr_t mapping; 1665 1666 skb = alloc_skb(ACE_STD_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC); 1667 if (!skb) 1668 break; 1669 1670 skb_reserve(skb, NET_IP_ALIGN); 1671 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), 1672 offset_in_page(skb->data), 1673 ACE_STD_BUFSIZE, 1674 PCI_DMA_FROMDEVICE); 1675 ap->skb->rx_std_skbuff[idx].skb = skb; 1676 pci_unmap_addr_set(&ap->skb->rx_std_skbuff[idx], 1677 mapping, mapping); 1678 1679 rd = &ap->rx_std_ring[idx]; 1680 set_aceaddr(&rd->addr, mapping); 1681 rd->size = ACE_STD_BUFSIZE; 1682 rd->idx = idx; 1683 idx = (idx + 1) % RX_STD_RING_ENTRIES; 1684 } 1685 1686 if (!i) 1687 goto error_out; 1688 1689 atomic_add(i, &ap->cur_rx_bufs); 1690 ap->rx_std_skbprd = idx; 1691 1692 if (ACE_IS_TIGON_I(ap)) { 1693 struct cmd cmd; 1694 cmd.evt = C_SET_RX_PRD_IDX; 1695 cmd.code = 0; 1696 cmd.idx = ap->rx_std_skbprd; 1697 ace_issue_cmd(regs, &cmd); 1698 } else { 1699 writel(idx, &regs->RxStdPrd); 1700 wmb(); 1701 } 1702 1703 out: 1704 clear_bit(0, &ap->std_refill_busy); 1705 return; 1706 1707 error_out: 1708 printk(KERN_INFO "Out of memory when allocating " 1709 "standard receive buffers\n"); 1710 goto out; 1711} 1712 1713 1714static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs) 1715{ 1716 struct ace_regs __iomem *regs = ap->regs; 1717 short i, idx; 1718 1719 prefetchw(&ap->cur_mini_bufs); 1720 1721 idx = ap->rx_mini_skbprd; 1722 for (i = 0; i < nr_bufs; i++) { 1723 struct sk_buff *skb; 1724 struct rx_desc *rd; 1725 dma_addr_t mapping; 1726 1727 skb = alloc_skb(ACE_MINI_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC); 1728 if (!skb) 1729 break; 1730 1731 skb_reserve(skb, NET_IP_ALIGN); 1732 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), 1733 offset_in_page(skb->data), 1734 ACE_MINI_BUFSIZE, 1735 PCI_DMA_FROMDEVICE); 1736 ap->skb->rx_mini_skbuff[idx].skb = skb; 1737 pci_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx], 1738 mapping, mapping); 1739 1740 rd = &ap->rx_mini_ring[idx]; 1741 set_aceaddr(&rd->addr, mapping); 1742 rd->size = ACE_MINI_BUFSIZE; 1743 rd->idx = idx; 1744 idx = (idx + 1) % RX_MINI_RING_ENTRIES; 1745 } 1746 1747 if (!i) 1748 goto error_out; 1749 1750 atomic_add(i, &ap->cur_mini_bufs); 1751 1752 ap->rx_mini_skbprd = idx; 1753 1754 writel(idx, &regs->RxMiniPrd); 1755 wmb(); 1756 1757 out: 1758 clear_bit(0, &ap->mini_refill_busy); 1759 return; 1760 error_out: 1761 printk(KERN_INFO "Out of memory when allocating " 1762 "mini receive buffers\n"); 1763 goto out; 1764} 1765 1766 1767/* 1768 * Load the jumbo rx ring, this may happen at any time if the MTU 1769 * is changed to a value > 1500. 1770 */ 1771static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs) 1772{ 1773 struct ace_regs __iomem *regs = ap->regs; 1774 short i, idx; 1775 1776 idx = ap->rx_jumbo_skbprd; 1777 1778 for (i = 0; i < nr_bufs; i++) { 1779 struct sk_buff *skb; 1780 struct rx_desc *rd; 1781 dma_addr_t mapping; 1782 1783 skb = alloc_skb(ACE_JUMBO_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC); 1784 if (!skb) 1785 break; 1786 1787 skb_reserve(skb, NET_IP_ALIGN); 1788 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), 1789 offset_in_page(skb->data), 1790 ACE_JUMBO_BUFSIZE, 1791 PCI_DMA_FROMDEVICE); 1792 ap->skb->rx_jumbo_skbuff[idx].skb = skb; 1793 pci_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx], 1794 mapping, mapping); 1795 1796 rd = &ap->rx_jumbo_ring[idx]; 1797 set_aceaddr(&rd->addr, mapping); 1798 rd->size = ACE_JUMBO_BUFSIZE; 1799 rd->idx = idx; 1800 idx = (idx + 1) % RX_JUMBO_RING_ENTRIES; 1801 } 1802 1803 if (!i) 1804 goto error_out; 1805 1806 atomic_add(i, &ap->cur_jumbo_bufs); 1807 ap->rx_jumbo_skbprd = idx; 1808 1809 if (ACE_IS_TIGON_I(ap)) { 1810 struct cmd cmd; 1811 cmd.evt = C_SET_RX_JUMBO_PRD_IDX; 1812 cmd.code = 0; 1813 cmd.idx = ap->rx_jumbo_skbprd; 1814 ace_issue_cmd(regs, &cmd); 1815 } else { 1816 writel(idx, &regs->RxJumboPrd); 1817 wmb(); 1818 } 1819 1820 out: 1821 clear_bit(0, &ap->jumbo_refill_busy); 1822 return; 1823 error_out: 1824 if (net_ratelimit()) 1825 printk(KERN_INFO "Out of memory when allocating " 1826 "jumbo receive buffers\n"); 1827 goto out; 1828} 1829 1830 1831/* 1832 * All events are considered to be slow (RX/TX ints do not generate 1833 * events) and are handled here, outside the main interrupt handler, 1834 * to reduce the size of the handler. 1835 */ 1836static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd) 1837{ 1838 struct ace_private *ap; 1839 1840 ap = netdev_priv(dev); 1841 1842 while (evtcsm != evtprd) { 1843 switch (ap->evt_ring[evtcsm].evt) { 1844 case E_FW_RUNNING: 1845 printk(KERN_INFO "%s: Firmware up and running\n", 1846 ap->name); 1847 ap->fw_running = 1; 1848 wmb(); 1849 break; 1850 case E_STATS_UPDATED: 1851 break; 1852 case E_LNK_STATE: 1853 { 1854 u16 code = ap->evt_ring[evtcsm].code; 1855 switch (code) { 1856 case E_C_LINK_UP: 1857 { 1858 u32 state = readl(&ap->regs->GigLnkState); 1859 printk(KERN_WARNING "%s: Optical link UP " 1860 "(%s Duplex, Flow Control: %s%s)\n", 1861 ap->name, 1862 state & LNK_FULL_DUPLEX ? "Full":"Half", 1863 state & LNK_TX_FLOW_CTL_Y ? "TX " : "", 1864 state & LNK_RX_FLOW_CTL_Y ? "RX" : ""); 1865 break; 1866 } 1867 case E_C_LINK_DOWN: 1868 printk(KERN_WARNING "%s: Optical link DOWN\n", 1869 ap->name); 1870 break; 1871 case E_C_LINK_10_100: 1872 printk(KERN_WARNING "%s: 10/100BaseT link " 1873 "UP\n", ap->name); 1874 break; 1875 default: 1876 printk(KERN_ERR "%s: Unknown optical link " 1877 "state %02x\n", ap->name, code); 1878 } 1879 break; 1880 } 1881 case E_ERROR: 1882 switch(ap->evt_ring[evtcsm].code) { 1883 case E_C_ERR_INVAL_CMD: 1884 printk(KERN_ERR "%s: invalid command error\n", 1885 ap->name); 1886 break; 1887 case E_C_ERR_UNIMP_CMD: 1888 printk(KERN_ERR "%s: unimplemented command " 1889 "error\n", ap->name); 1890 break; 1891 case E_C_ERR_BAD_CFG: 1892 printk(KERN_ERR "%s: bad config error\n", 1893 ap->name); 1894 break; 1895 default: 1896 printk(KERN_ERR "%s: unknown error %02x\n", 1897 ap->name, ap->evt_ring[evtcsm].code); 1898 } 1899 break; 1900 case E_RESET_JUMBO_RNG: 1901 { 1902 int i; 1903 for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) { 1904 if (ap->skb->rx_jumbo_skbuff[i].skb) { 1905 ap->rx_jumbo_ring[i].size = 0; 1906 set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0); 1907 dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb); 1908 ap->skb->rx_jumbo_skbuff[i].skb = NULL; 1909 } 1910 } 1911 1912 if (ACE_IS_TIGON_I(ap)) { 1913 struct cmd cmd; 1914 cmd.evt = C_SET_RX_JUMBO_PRD_IDX; 1915 cmd.code = 0; 1916 cmd.idx = 0; 1917 ace_issue_cmd(ap->regs, &cmd); 1918 } else { 1919 writel(0, &((ap->regs)->RxJumboPrd)); 1920 wmb(); 1921 } 1922 1923 ap->jumbo = 0; 1924 ap->rx_jumbo_skbprd = 0; 1925 printk(KERN_INFO "%s: Jumbo ring flushed\n", 1926 ap->name); 1927 clear_bit(0, &ap->jumbo_refill_busy); 1928 break; 1929 } 1930 default: 1931 printk(KERN_ERR "%s: Unhandled event 0x%02x\n", 1932 ap->name, ap->evt_ring[evtcsm].evt); 1933 } 1934 evtcsm = (evtcsm + 1) % EVT_RING_ENTRIES; 1935 } 1936 1937 return evtcsm; 1938} 1939 1940 1941static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm) 1942{ 1943 struct ace_private *ap = netdev_priv(dev); 1944 u32 idx; 1945 int mini_count = 0, std_count = 0; 1946 1947 idx = rxretcsm; 1948 1949 prefetchw(&ap->cur_rx_bufs); 1950 prefetchw(&ap->cur_mini_bufs); 1951 1952 while (idx != rxretprd) { 1953 struct ring_info *rip; 1954 struct sk_buff *skb; 1955 struct rx_desc *rxdesc, *retdesc; 1956 u32 skbidx; 1957 int bd_flags, desc_type, mapsize; 1958 u16 csum; 1959 1960 1961 /* make sure the rx descriptor isn't read before rxretprd */ 1962 if (idx == rxretcsm) 1963 rmb(); 1964 1965 retdesc = &ap->rx_return_ring[idx]; 1966 skbidx = retdesc->idx; 1967 bd_flags = retdesc->flags; 1968 desc_type = bd_flags & (BD_FLG_JUMBO | BD_FLG_MINI); 1969 1970 switch(desc_type) { 1971 /* 1972 * Normal frames do not have any flags set 1973 * 1974 * Mini and normal frames arrive frequently, 1975 * so use a local counter to avoid doing 1976 * atomic operations for each packet arriving. 1977 */ 1978 case 0: 1979 rip = &ap->skb->rx_std_skbuff[skbidx]; 1980 mapsize = ACE_STD_BUFSIZE; 1981 rxdesc = &ap->rx_std_ring[skbidx]; 1982 std_count++; 1983 break; 1984 case BD_FLG_JUMBO: 1985 rip = &ap->skb->rx_jumbo_skbuff[skbidx]; 1986 mapsize = ACE_JUMBO_BUFSIZE; 1987 rxdesc = &ap->rx_jumbo_ring[skbidx]; 1988 atomic_dec(&ap->cur_jumbo_bufs); 1989 break; 1990 case BD_FLG_MINI: 1991 rip = &ap->skb->rx_mini_skbuff[skbidx]; 1992 mapsize = ACE_MINI_BUFSIZE; 1993 rxdesc = &ap->rx_mini_ring[skbidx]; 1994 mini_count++; 1995 break; 1996 default: 1997 printk(KERN_INFO "%s: unknown frame type (0x%02x) " 1998 "returned by NIC\n", dev->name, 1999 retdesc->flags); 2000 goto error; 2001 } 2002 2003 skb = rip->skb; 2004 rip->skb = NULL; 2005 pci_unmap_page(ap->pdev, 2006 pci_unmap_addr(rip, mapping), 2007 mapsize, 2008 PCI_DMA_FROMDEVICE); 2009 skb_put(skb, retdesc->size); 2010 2011 /* 2012 * Fly baby, fly! 2013 */ 2014 csum = retdesc->tcp_udp_csum; 2015 2016 skb->protocol = eth_type_trans(skb, dev); 2017 2018 /* 2019 * Instead of forcing the poor tigon mips cpu to calculate 2020 * pseudo hdr checksum, we do this ourselves. 2021 */ 2022 if (bd_flags & BD_FLG_TCP_UDP_SUM) { 2023 skb->csum = htons(csum); 2024 skb->ip_summed = CHECKSUM_COMPLETE; 2025 } else { 2026 skb->ip_summed = CHECKSUM_NONE; 2027 } 2028 2029 /* send it up */ 2030#if ACENIC_DO_VLAN 2031 if (ap->vlgrp && (bd_flags & BD_FLG_VLAN_TAG)) { 2032 vlan_hwaccel_rx(skb, ap->vlgrp, retdesc->vlan); 2033 } else 2034#endif 2035 netif_rx(skb); 2036 2037 dev->last_rx = jiffies; 2038 dev->stats.rx_packets++; 2039 dev->stats.rx_bytes += retdesc->size; 2040 2041 idx = (idx + 1) % RX_RETURN_RING_ENTRIES; 2042 } 2043 2044 atomic_sub(std_count, &ap->cur_rx_bufs); 2045 if (!ACE_IS_TIGON_I(ap)) 2046 atomic_sub(mini_count, &ap->cur_mini_bufs); 2047 2048 out: 2049 /* 2050 * According to the documentation RxRetCsm is obsolete with 2051 * the 12.3.x Firmware - my Tigon I NICs seem to disagree! 2052 */ 2053 if (ACE_IS_TIGON_I(ap)) { 2054 writel(idx, &ap->regs->RxRetCsm); 2055 } 2056 ap->cur_rx = idx; 2057 2058 return; 2059 error: 2060 idx = rxretprd; 2061 goto out; 2062} 2063 2064 2065static inline void ace_tx_int(struct net_device *dev, 2066 u32 txcsm, u32 idx) 2067{ 2068 struct ace_private *ap = netdev_priv(dev); 2069 2070 do { 2071 struct sk_buff *skb; 2072 dma_addr_t mapping; 2073 struct tx_ring_info *info; 2074 2075 info = ap->skb->tx_skbuff + idx; 2076 skb = info->skb; 2077 mapping = pci_unmap_addr(info, mapping); 2078 2079 if (mapping) { 2080 pci_unmap_page(ap->pdev, mapping, 2081 pci_unmap_len(info, maplen), 2082 PCI_DMA_TODEVICE); 2083 pci_unmap_addr_set(info, mapping, 0); 2084 } 2085 2086 if (skb) { 2087 dev->stats.tx_packets++; 2088 dev->stats.tx_bytes += skb->len; 2089 dev_kfree_skb_irq(skb); 2090 info->skb = NULL; 2091 } 2092 2093 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); 2094 } while (idx != txcsm); 2095 2096 if (netif_queue_stopped(dev)) 2097 netif_wake_queue(dev); 2098 2099 wmb(); 2100 ap->tx_ret_csm = txcsm; 2101 2102 /* So... tx_ret_csm is advanced _after_ check for device wakeup. 2103 * 2104 * We could try to make it before. In this case we would get 2105 * the following race condition: hard_start_xmit on other cpu 2106 * enters after we advanced tx_ret_csm and fills space, 2107 * which we have just freed, so that we make illegal device wakeup. 2108 * There is no good way to workaround this (at entry 2109 * to ace_start_xmit detects this condition and prevents 2110 * ring corruption, but it is not a good workaround.) 2111 * 2112 * When tx_ret_csm is advanced after, we wake up device _only_ 2113 * if we really have some space in ring (though the core doing 2114 * hard_start_xmit can see full ring for some period and has to 2115 * synchronize.) Superb. 2116 * BUT! We get another subtle race condition. hard_start_xmit 2117 * may think that ring is full between wakeup and advancing 2118 * tx_ret_csm and will stop device instantly! It is not so bad. 2119 * We are guaranteed that there is something in ring, so that 2120 * the next irq will resume transmission. To speedup this we could 2121 * mark descriptor, which closes ring with BD_FLG_COAL_NOW 2122 * (see ace_start_xmit). 2123 * 2124 * Well, this dilemma exists in all lock-free devices. 2125 * We, following scheme used in drivers by Donald Becker, 2126 * select the least dangerous. 2127 * --ANK 2128 */ 2129} 2130 2131 2132static irqreturn_t ace_interrupt(int irq, void *dev_id) 2133{ 2134 struct net_device *dev = (struct net_device *)dev_id; 2135 struct ace_private *ap = netdev_priv(dev); 2136 struct ace_regs __iomem *regs = ap->regs; 2137 u32 idx; 2138 u32 txcsm, rxretcsm, rxretprd; 2139 u32 evtcsm, evtprd; 2140 2141 /* 2142 * In case of PCI shared interrupts or spurious interrupts, 2143 * we want to make sure it is actually our interrupt before 2144 * spending any time in here. 2145 */ 2146 if (!(readl(&regs->HostCtrl) & IN_INT)) 2147 return IRQ_NONE; 2148 2149 /* 2150 * ACK intr now. Otherwise we will lose updates to rx_ret_prd, 2151 * which happened _after_ rxretprd = *ap->rx_ret_prd; but before 2152 * writel(0, &regs->Mb0Lo). 2153 * 2154 * "IRQ avoidance" recommended in docs applies to IRQs served 2155 * threads and it is wrong even for that case. 2156 */ 2157 writel(0, &regs->Mb0Lo); 2158 readl(&regs->Mb0Lo); 2159 2160 /* 2161 * There is no conflict between transmit handling in 2162 * start_xmit and receive processing, thus there is no reason 2163 * to take a spin lock for RX handling. Wait until we start 2164 * working on the other stuff - hey we don't need a spin lock 2165 * anymore. 2166 */ 2167 rxretprd = *ap->rx_ret_prd; 2168 rxretcsm = ap->cur_rx; 2169 2170 if (rxretprd != rxretcsm) 2171 ace_rx_int(dev, rxretprd, rxretcsm); 2172 2173 txcsm = *ap->tx_csm; 2174 idx = ap->tx_ret_csm; 2175 2176 if (txcsm != idx) { 2177 /* 2178 * If each skb takes only one descriptor this check degenerates 2179 * to identity, because new space has just been opened. 2180 * But if skbs are fragmented we must check that this index 2181 * update releases enough of space, otherwise we just 2182 * wait for device to make more work. 2183 */ 2184 if (!tx_ring_full(ap, txcsm, ap->tx_prd)) 2185 ace_tx_int(dev, txcsm, idx); 2186 } 2187 2188 evtcsm = readl(&regs->EvtCsm); 2189 evtprd = *ap->evt_prd; 2190 2191 if (evtcsm != evtprd) { 2192 evtcsm = ace_handle_event(dev, evtcsm, evtprd); 2193 writel(evtcsm, &regs->EvtCsm); 2194 } 2195 2196 /* 2197 * This has to go last in the interrupt handler and run with 2198 * the spin lock released ... what lock? 2199 */ 2200 if (netif_running(dev)) { 2201 int cur_size; 2202 int run_tasklet = 0; 2203 2204 cur_size = atomic_read(&ap->cur_rx_bufs); 2205 if (cur_size < RX_LOW_STD_THRES) { 2206 if ((cur_size < RX_PANIC_STD_THRES) && 2207 !test_and_set_bit(0, &ap->std_refill_busy)) { 2208#ifdef DEBUG 2209 printk("low on std buffers %i\n", cur_size); 2210#endif 2211 ace_load_std_rx_ring(ap, 2212 RX_RING_SIZE - cur_size); 2213 } else 2214 run_tasklet = 1; 2215 } 2216 2217 if (!ACE_IS_TIGON_I(ap)) { 2218 cur_size = atomic_read(&ap->cur_mini_bufs); 2219 if (cur_size < RX_LOW_MINI_THRES) { 2220 if ((cur_size < RX_PANIC_MINI_THRES) && 2221 !test_and_set_bit(0, 2222 &ap->mini_refill_busy)) { 2223#ifdef DEBUG 2224 printk("low on mini buffers %i\n", 2225 cur_size); 2226#endif 2227 ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size); 2228 } else 2229 run_tasklet = 1; 2230 } 2231 } 2232 2233 if (ap->jumbo) { 2234 cur_size = atomic_read(&ap->cur_jumbo_bufs); 2235 if (cur_size < RX_LOW_JUMBO_THRES) { 2236 if ((cur_size < RX_PANIC_JUMBO_THRES) && 2237 !test_and_set_bit(0, 2238 &ap->jumbo_refill_busy)){ 2239#ifdef DEBUG 2240 printk("low on jumbo buffers %i\n", 2241 cur_size); 2242#endif 2243 ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size); 2244 } else 2245 run_tasklet = 1; 2246 } 2247 } 2248 if (run_tasklet && !ap->tasklet_pending) { 2249 ap->tasklet_pending = 1; 2250 tasklet_schedule(&ap->ace_tasklet); 2251 } 2252 } 2253 2254 return IRQ_HANDLED; 2255} 2256 2257 2258#if ACENIC_DO_VLAN 2259static void ace_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 2260{ 2261 struct ace_private *ap = netdev_priv(dev); 2262 unsigned long flags; 2263 2264 local_irq_save(flags); 2265 ace_mask_irq(dev); 2266 2267 ap->vlgrp = grp; 2268 2269 ace_unmask_irq(dev); 2270 local_irq_restore(flags); 2271} 2272#endif /* ACENIC_DO_VLAN */ 2273 2274 2275static int ace_open(struct net_device *dev) 2276{ 2277 struct ace_private *ap = netdev_priv(dev); 2278 struct ace_regs __iomem *regs = ap->regs; 2279 struct cmd cmd; 2280 2281 if (!(ap->fw_running)) { 2282 printk(KERN_WARNING "%s: Firmware not running!\n", dev->name); 2283 return -EBUSY; 2284 } 2285 2286 writel(dev->mtu + ETH_HLEN + 4, &regs->IfMtu); 2287 2288 cmd.evt = C_CLEAR_STATS; 2289 cmd.code = 0; 2290 cmd.idx = 0; 2291 ace_issue_cmd(regs, &cmd); 2292 2293 cmd.evt = C_HOST_STATE; 2294 cmd.code = C_C_STACK_UP; 2295 cmd.idx = 0; 2296 ace_issue_cmd(regs, &cmd); 2297 2298 if (ap->jumbo && 2299 !test_and_set_bit(0, &ap->jumbo_refill_busy)) 2300 ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE); 2301 2302 if (dev->flags & IFF_PROMISC) { 2303 cmd.evt = C_SET_PROMISC_MODE; 2304 cmd.code = C_C_PROMISC_ENABLE; 2305 cmd.idx = 0; 2306 ace_issue_cmd(regs, &cmd); 2307 2308 ap->promisc = 1; 2309 }else 2310 ap->promisc = 0; 2311 ap->mcast_all = 0; 2312 2313#if 0 2314 cmd.evt = C_LNK_NEGOTIATION; 2315 cmd.code = 0; 2316 cmd.idx = 0; 2317 ace_issue_cmd(regs, &cmd); 2318#endif 2319 2320 netif_start_queue(dev); 2321 2322 /* 2323 * Setup the bottom half rx ring refill handler 2324 */ 2325 tasklet_init(&ap->ace_tasklet, ace_tasklet, (unsigned long)dev); 2326 return 0; 2327} 2328 2329 2330static int ace_close(struct net_device *dev) 2331{ 2332 struct ace_private *ap = netdev_priv(dev); 2333 struct ace_regs __iomem *regs = ap->regs; 2334 struct cmd cmd; 2335 unsigned long flags; 2336 short i; 2337 2338 /* 2339 * Without (or before) releasing irq and stopping hardware, this 2340 * is an absolute non-sense, by the way. It will be reset instantly 2341 * by the first irq. 2342 */ 2343 netif_stop_queue(dev); 2344 2345 2346 if (ap->promisc) { 2347 cmd.evt = C_SET_PROMISC_MODE; 2348 cmd.code = C_C_PROMISC_DISABLE; 2349 cmd.idx = 0; 2350 ace_issue_cmd(regs, &cmd); 2351 ap->promisc = 0; 2352 } 2353 2354 cmd.evt = C_HOST_STATE; 2355 cmd.code = C_C_STACK_DOWN; 2356 cmd.idx = 0; 2357 ace_issue_cmd(regs, &cmd); 2358 2359 tasklet_kill(&ap->ace_tasklet); 2360 2361 /* 2362 * Make sure one CPU is not processing packets while 2363 * buffers are being released by another. 2364 */ 2365 2366 local_irq_save(flags); 2367 ace_mask_irq(dev); 2368 2369 for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) { 2370 struct sk_buff *skb; 2371 dma_addr_t mapping; 2372 struct tx_ring_info *info; 2373 2374 info = ap->skb->tx_skbuff + i; 2375 skb = info->skb; 2376 mapping = pci_unmap_addr(info, mapping); 2377 2378 if (mapping) { 2379 if (ACE_IS_TIGON_I(ap)) { 2380 /* NB: TIGON_1 is special, tx_ring is in io space */ 2381 struct tx_desc __iomem *tx; 2382 tx = (__force struct tx_desc __iomem *) &ap->tx_ring[i]; 2383 writel(0, &tx->addr.addrhi); 2384 writel(0, &tx->addr.addrlo); 2385 writel(0, &tx->flagsize); 2386 } else 2387 memset(ap->tx_ring + i, 0, 2388 sizeof(struct tx_desc)); 2389 pci_unmap_page(ap->pdev, mapping, 2390 pci_unmap_len(info, maplen), 2391 PCI_DMA_TODEVICE); 2392 pci_unmap_addr_set(info, mapping, 0); 2393 } 2394 if (skb) { 2395 dev_kfree_skb(skb); 2396 info->skb = NULL; 2397 } 2398 } 2399 2400 if (ap->jumbo) { 2401 cmd.evt = C_RESET_JUMBO_RNG; 2402 cmd.code = 0; 2403 cmd.idx = 0; 2404 ace_issue_cmd(regs, &cmd); 2405 } 2406 2407 ace_unmask_irq(dev); 2408 local_irq_restore(flags); 2409 2410 return 0; 2411} 2412 2413 2414static inline dma_addr_t 2415ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb, 2416 struct sk_buff *tail, u32 idx) 2417{ 2418 dma_addr_t mapping; 2419 struct tx_ring_info *info; 2420 2421 mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), 2422 offset_in_page(skb->data), 2423 skb->len, PCI_DMA_TODEVICE); 2424 2425 info = ap->skb->tx_skbuff + idx; 2426 info->skb = tail; 2427 pci_unmap_addr_set(info, mapping, mapping); 2428 pci_unmap_len_set(info, maplen, skb->len); 2429 return mapping; 2430} 2431 2432 2433static inline void 2434ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr, 2435 u32 flagsize, u32 vlan_tag) 2436{ 2437#if !USE_TX_COAL_NOW 2438 flagsize &= ~BD_FLG_COAL_NOW; 2439#endif 2440 2441 if (ACE_IS_TIGON_I(ap)) { 2442 struct tx_desc __iomem *io = (__force struct tx_desc __iomem *) desc; 2443 writel(addr >> 32, &io->addr.addrhi); 2444 writel(addr & 0xffffffff, &io->addr.addrlo); 2445 writel(flagsize, &io->flagsize); 2446#if ACENIC_DO_VLAN 2447 writel(vlan_tag, &io->vlanres); 2448#endif 2449 } else { 2450 desc->addr.addrhi = addr >> 32; 2451 desc->addr.addrlo = addr; 2452 desc->flagsize = flagsize; 2453#if ACENIC_DO_VLAN 2454 desc->vlanres = vlan_tag; 2455#endif 2456 } 2457} 2458 2459 2460static int ace_start_xmit(struct sk_buff *skb, struct net_device *dev) 2461{ 2462 struct ace_private *ap = netdev_priv(dev); 2463 struct ace_regs __iomem *regs = ap->regs; 2464 struct tx_desc *desc; 2465 u32 idx, flagsize; 2466 unsigned long maxjiff = jiffies + 3*HZ; 2467 2468restart: 2469 idx = ap->tx_prd; 2470 2471 if (tx_ring_full(ap, ap->tx_ret_csm, idx)) 2472 goto overflow; 2473 2474 if (!skb_shinfo(skb)->nr_frags) { 2475 dma_addr_t mapping; 2476 u32 vlan_tag = 0; 2477 2478 mapping = ace_map_tx_skb(ap, skb, skb, idx); 2479 flagsize = (skb->len << 16) | (BD_FLG_END); 2480 if (skb->ip_summed == CHECKSUM_PARTIAL) 2481 flagsize |= BD_FLG_TCP_UDP_SUM; 2482#if ACENIC_DO_VLAN 2483 if (vlan_tx_tag_present(skb)) { 2484 flagsize |= BD_FLG_VLAN_TAG; 2485 vlan_tag = vlan_tx_tag_get(skb); 2486 } 2487#endif 2488 desc = ap->tx_ring + idx; 2489 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); 2490 2491 /* Look at ace_tx_int for explanations. */ 2492 if (tx_ring_full(ap, ap->tx_ret_csm, idx)) 2493 flagsize |= BD_FLG_COAL_NOW; 2494 2495 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag); 2496 } else { 2497 dma_addr_t mapping; 2498 u32 vlan_tag = 0; 2499 int i, len = 0; 2500 2501 mapping = ace_map_tx_skb(ap, skb, NULL, idx); 2502 flagsize = (skb_headlen(skb) << 16); 2503 if (skb->ip_summed == CHECKSUM_PARTIAL) 2504 flagsize |= BD_FLG_TCP_UDP_SUM; 2505#if ACENIC_DO_VLAN 2506 if (vlan_tx_tag_present(skb)) { 2507 flagsize |= BD_FLG_VLAN_TAG; 2508 vlan_tag = vlan_tx_tag_get(skb); 2509 } 2510#endif 2511 2512 ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag); 2513 2514 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); 2515 2516 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2517 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2518 struct tx_ring_info *info; 2519 2520 len += frag->size; 2521 info = ap->skb->tx_skbuff + idx; 2522 desc = ap->tx_ring + idx; 2523 2524 mapping = pci_map_page(ap->pdev, frag->page, 2525 frag->page_offset, frag->size, 2526 PCI_DMA_TODEVICE); 2527 2528 flagsize = (frag->size << 16); 2529 if (skb->ip_summed == CHECKSUM_PARTIAL) 2530 flagsize |= BD_FLG_TCP_UDP_SUM; 2531 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); 2532 2533 if (i == skb_shinfo(skb)->nr_frags - 1) { 2534 flagsize |= BD_FLG_END; 2535 if (tx_ring_full(ap, ap->tx_ret_csm, idx)) 2536 flagsize |= BD_FLG_COAL_NOW; 2537 2538 /* 2539 * Only the last fragment frees 2540 * the skb! 2541 */ 2542 info->skb = skb; 2543 } else { 2544 info->skb = NULL; 2545 } 2546 pci_unmap_addr_set(info, mapping, mapping); 2547 pci_unmap_len_set(info, maplen, frag->size); 2548 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag); 2549 } 2550 } 2551 2552 wmb(); 2553 ap->tx_prd = idx; 2554 ace_set_txprd(regs, ap, idx); 2555 2556 if (flagsize & BD_FLG_COAL_NOW) { 2557 netif_stop_queue(dev); 2558 2559 /* 2560 * A TX-descriptor producer (an IRQ) might have gotten 2561 * inbetween, making the ring free again. Since xmit is 2562 * serialized, this is the only situation we have to 2563 * re-test. 2564 */ 2565 if (!tx_ring_full(ap, ap->tx_ret_csm, idx)) 2566 netif_wake_queue(dev); 2567 } 2568 2569 dev->trans_start = jiffies; 2570 return NETDEV_TX_OK; 2571 2572overflow: 2573 /* 2574 * This race condition is unavoidable with lock-free drivers. 2575 * We wake up the queue _before_ tx_prd is advanced, so that we can 2576 * enter hard_start_xmit too early, while tx ring still looks closed. 2577 * This happens ~1-4 times per 100000 packets, so that we can allow 2578 * to loop syncing to other CPU. Probably, we need an additional 2579 * wmb() in ace_tx_intr as well. 2580 * 2581 * Note that this race is relieved by reserving one more entry 2582 * in tx ring than it is necessary (see original non-SG driver). 2583 * However, with SG we need to reserve 2*MAX_SKB_FRAGS+1, which 2584 * is already overkill. 2585 * 2586 * Alternative is to return with 1 not throttling queue. In this 2587 * case loop becomes longer, no more useful effects. 2588 */ 2589 if (time_before(jiffies, maxjiff)) { 2590 barrier(); 2591 cpu_relax(); 2592 goto restart; 2593 } 2594 2595 /* The ring is stuck full. */ 2596 printk(KERN_WARNING "%s: Transmit ring stuck full\n", dev->name); 2597 return NETDEV_TX_BUSY; 2598} 2599 2600 2601static int ace_change_mtu(struct net_device *dev, int new_mtu) 2602{ 2603 struct ace_private *ap = netdev_priv(dev); 2604 struct ace_regs __iomem *regs = ap->regs; 2605 2606 if (new_mtu > ACE_JUMBO_MTU) 2607 return -EINVAL; 2608 2609 writel(new_mtu + ETH_HLEN + 4, &regs->IfMtu); 2610 dev->mtu = new_mtu; 2611 2612 if (new_mtu > ACE_STD_MTU) { 2613 if (!(ap->jumbo)) { 2614 printk(KERN_INFO "%s: Enabling Jumbo frame " 2615 "support\n", dev->name); 2616 ap->jumbo = 1; 2617 if (!test_and_set_bit(0, &ap->jumbo_refill_busy)) 2618 ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE); 2619 ace_set_rxtx_parms(dev, 1); 2620 } 2621 } else { 2622 while (test_and_set_bit(0, &ap->jumbo_refill_busy)); 2623 ace_sync_irq(dev->irq); 2624 ace_set_rxtx_parms(dev, 0); 2625 if (ap->jumbo) { 2626 struct cmd cmd; 2627 2628 cmd.evt = C_RESET_JUMBO_RNG; 2629 cmd.code = 0; 2630 cmd.idx = 0; 2631 ace_issue_cmd(regs, &cmd); 2632 } 2633 } 2634 2635 return 0; 2636} 2637 2638static int ace_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 2639{ 2640 struct ace_private *ap = netdev_priv(dev); 2641 struct ace_regs __iomem *regs = ap->regs; 2642 u32 link; 2643 2644 memset(ecmd, 0, sizeof(struct ethtool_cmd)); 2645 ecmd->supported = 2646 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 2647 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 2648 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | 2649 SUPPORTED_Autoneg | SUPPORTED_FIBRE); 2650 2651 ecmd->port = PORT_FIBRE; 2652 ecmd->transceiver = XCVR_INTERNAL; 2653 2654 link = readl(&regs->GigLnkState); 2655 if (link & LNK_1000MB) 2656 ecmd->speed = SPEED_1000; 2657 else { 2658 link = readl(&regs->FastLnkState); 2659 if (link & LNK_100MB) 2660 ecmd->speed = SPEED_100; 2661 else if (link & LNK_10MB) 2662 ecmd->speed = SPEED_10; 2663 else 2664 ecmd->speed = 0; 2665 } 2666 if (link & LNK_FULL_DUPLEX) 2667 ecmd->duplex = DUPLEX_FULL; 2668 else 2669 ecmd->duplex = DUPLEX_HALF; 2670 2671 if (link & LNK_NEGOTIATE) 2672 ecmd->autoneg = AUTONEG_ENABLE; 2673 else 2674 ecmd->autoneg = AUTONEG_DISABLE; 2675 2676#if 0 2677 /* 2678 * Current struct ethtool_cmd is insufficient 2679 */ 2680 ecmd->trace = readl(&regs->TuneTrace); 2681 2682 ecmd->txcoal = readl(&regs->TuneTxCoalTicks); 2683 ecmd->rxcoal = readl(&regs->TuneRxCoalTicks); 2684#endif 2685 ecmd->maxtxpkt = readl(&regs->TuneMaxTxDesc); 2686 ecmd->maxrxpkt = readl(&regs->TuneMaxRxDesc); 2687 2688 return 0; 2689} 2690 2691static int ace_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 2692{ 2693 struct ace_private *ap = netdev_priv(dev); 2694 struct ace_regs __iomem *regs = ap->regs; 2695 u32 link, speed; 2696 2697 link = readl(&regs->GigLnkState); 2698 if (link & LNK_1000MB) 2699 speed = SPEED_1000; 2700 else { 2701 link = readl(&regs->FastLnkState); 2702 if (link & LNK_100MB) 2703 speed = SPEED_100; 2704 else if (link & LNK_10MB) 2705 speed = SPEED_10; 2706 else 2707 speed = SPEED_100; 2708 } 2709 2710 link = LNK_ENABLE | LNK_1000MB | LNK_100MB | LNK_10MB | 2711 LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL; 2712 if (!ACE_IS_TIGON_I(ap)) 2713 link |= LNK_TX_FLOW_CTL_Y; 2714 if (ecmd->autoneg == AUTONEG_ENABLE) 2715 link |= LNK_NEGOTIATE; 2716 if (ecmd->speed != speed) { 2717 link &= ~(LNK_1000MB | LNK_100MB | LNK_10MB); 2718 switch (speed) { 2719 case SPEED_1000: 2720 link |= LNK_1000MB; 2721 break; 2722 case SPEED_100: 2723 link |= LNK_100MB; 2724 break; 2725 case SPEED_10: 2726 link |= LNK_10MB; 2727 break; 2728 } 2729 } 2730 2731 if (ecmd->duplex == DUPLEX_FULL) 2732 link |= LNK_FULL_DUPLEX; 2733 2734 if (link != ap->link) { 2735 struct cmd cmd; 2736 printk(KERN_INFO "%s: Renegotiating link state\n", 2737 dev->name); 2738 2739 ap->link = link; 2740 writel(link, &regs->TuneLink); 2741 if (!ACE_IS_TIGON_I(ap)) 2742 writel(link, &regs->TuneFastLink); 2743 wmb(); 2744 2745 cmd.evt = C_LNK_NEGOTIATION; 2746 cmd.code = 0; 2747 cmd.idx = 0; 2748 ace_issue_cmd(regs, &cmd); 2749 } 2750 return 0; 2751} 2752 2753static void ace_get_drvinfo(struct net_device *dev, 2754 struct ethtool_drvinfo *info) 2755{ 2756 struct ace_private *ap = netdev_priv(dev); 2757 2758 strlcpy(info->driver, "acenic", sizeof(info->driver)); 2759 snprintf(info->version, sizeof(info->version), "%i.%i.%i", 2760 tigonFwReleaseMajor, tigonFwReleaseMinor, 2761 tigonFwReleaseFix); 2762 2763 if (ap->pdev) 2764 strlcpy(info->bus_info, pci_name(ap->pdev), 2765 sizeof(info->bus_info)); 2766 2767} 2768 2769/* 2770 * Set the hardware MAC address. 2771 */ 2772static int ace_set_mac_addr(struct net_device *dev, void *p) 2773{ 2774 struct ace_private *ap = netdev_priv(dev); 2775 struct ace_regs __iomem *regs = ap->regs; 2776 struct sockaddr *addr=p; 2777 u8 *da; 2778 struct cmd cmd; 2779 2780 if(netif_running(dev)) 2781 return -EBUSY; 2782 2783 memcpy(dev->dev_addr, addr->sa_data,dev->addr_len); 2784 2785 da = (u8 *)dev->dev_addr; 2786 2787 writel(da[0] << 8 | da[1], &regs->MacAddrHi); 2788 writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5], 2789 &regs->MacAddrLo); 2790 2791 cmd.evt = C_SET_MAC_ADDR; 2792 cmd.code = 0; 2793 cmd.idx = 0; 2794 ace_issue_cmd(regs, &cmd); 2795 2796 return 0; 2797} 2798 2799 2800static void ace_set_multicast_list(struct net_device *dev) 2801{ 2802 struct ace_private *ap = netdev_priv(dev); 2803 struct ace_regs __iomem *regs = ap->regs; 2804 struct cmd cmd; 2805 2806 if ((dev->flags & IFF_ALLMULTI) && !(ap->mcast_all)) { 2807 cmd.evt = C_SET_MULTICAST_MODE; 2808 cmd.code = C_C_MCAST_ENABLE; 2809 cmd.idx = 0; 2810 ace_issue_cmd(regs, &cmd); 2811 ap->mcast_all = 1; 2812 } else if (ap->mcast_all) { 2813 cmd.evt = C_SET_MULTICAST_MODE; 2814 cmd.code = C_C_MCAST_DISABLE; 2815 cmd.idx = 0; 2816 ace_issue_cmd(regs, &cmd); 2817 ap->mcast_all = 0; 2818 } 2819 2820 if ((dev->flags & IFF_PROMISC) && !(ap->promisc)) { 2821 cmd.evt = C_SET_PROMISC_MODE; 2822 cmd.code = C_C_PROMISC_ENABLE; 2823 cmd.idx = 0; 2824 ace_issue_cmd(regs, &cmd); 2825 ap->promisc = 1; 2826 }else if (!(dev->flags & IFF_PROMISC) && (ap->promisc)) { 2827 cmd.evt = C_SET_PROMISC_MODE; 2828 cmd.code = C_C_PROMISC_DISABLE; 2829 cmd.idx = 0; 2830 ace_issue_cmd(regs, &cmd); 2831 ap->promisc = 0; 2832 } 2833 2834 /* 2835 * For the time being multicast relies on the upper layers 2836 * filtering it properly. The Firmware does not allow one to 2837 * set the entire multicast list at a time and keeping track of 2838 * it here is going to be messy. 2839 */ 2840 if ((dev->mc_count) && !(ap->mcast_all)) { 2841 cmd.evt = C_SET_MULTICAST_MODE; 2842 cmd.code = C_C_MCAST_ENABLE; 2843 cmd.idx = 0; 2844 ace_issue_cmd(regs, &cmd); 2845 }else if (!ap->mcast_all) { 2846 cmd.evt = C_SET_MULTICAST_MODE; 2847 cmd.code = C_C_MCAST_DISABLE; 2848 cmd.idx = 0; 2849 ace_issue_cmd(regs, &cmd); 2850 } 2851} 2852 2853 2854static struct net_device_stats *ace_get_stats(struct net_device *dev) 2855{ 2856 struct ace_private *ap = netdev_priv(dev); 2857 struct ace_mac_stats __iomem *mac_stats = 2858 (struct ace_mac_stats __iomem *)ap->regs->Stats; 2859 2860 dev->stats.rx_missed_errors = readl(&mac_stats->drop_space); 2861 dev->stats.multicast = readl(&mac_stats->kept_mc); 2862 dev->stats.collisions = readl(&mac_stats->coll); 2863 2864 return &dev->stats; 2865} 2866 2867 2868static void __devinit ace_copy(struct ace_regs __iomem *regs, void *src, 2869 u32 dest, int size) 2870{ 2871 void __iomem *tdest; 2872 u32 *wsrc; 2873 short tsize, i; 2874 2875 if (size <= 0) 2876 return; 2877 2878 while (size > 0) { 2879 tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1), 2880 min_t(u32, size, ACE_WINDOW_SIZE)); 2881 tdest = (void __iomem *) &regs->Window + 2882 (dest & (ACE_WINDOW_SIZE - 1)); 2883 writel(dest & ~(ACE_WINDOW_SIZE - 1), &regs->WinBase); 2884 /* 2885 * This requires byte swapping on big endian, however 2886 * writel does that for us 2887 */ 2888 wsrc = src; 2889 for (i = 0; i < (tsize / 4); i++) { 2890 writel(wsrc[i], tdest + i*4); 2891 } 2892 dest += tsize; 2893 src += tsize; 2894 size -= tsize; 2895 } 2896 2897 return; 2898} 2899 2900 2901static void __devinit ace_clear(struct ace_regs __iomem *regs, u32 dest, int size) 2902{ 2903 void __iomem *tdest; 2904 short tsize = 0, i; 2905 2906 if (size <= 0) 2907 return; 2908 2909 while (size > 0) { 2910 tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1), 2911 min_t(u32, size, ACE_WINDOW_SIZE)); 2912 tdest = (void __iomem *) &regs->Window + 2913 (dest & (ACE_WINDOW_SIZE - 1)); 2914 writel(dest & ~(ACE_WINDOW_SIZE - 1), &regs->WinBase); 2915 2916 for (i = 0; i < (tsize / 4); i++) { 2917 writel(0, tdest + i*4); 2918 } 2919 2920 dest += tsize; 2921 size -= tsize; 2922 } 2923 2924 return; 2925} 2926 2927 2928/* 2929 * Download the firmware into the SRAM on the NIC 2930 * 2931 * This operation requires the NIC to be halted and is performed with 2932 * interrupts disabled and with the spinlock hold. 2933 */ 2934static int __devinit ace_load_firmware(struct net_device *dev) 2935{ 2936 struct ace_private *ap = netdev_priv(dev); 2937 struct ace_regs __iomem *regs = ap->regs; 2938 2939 if (!(readl(&regs->CpuCtrl) & CPU_HALTED)) { 2940 printk(KERN_ERR "%s: trying to download firmware while the " 2941 "CPU is running!\n", ap->name); 2942 return -EFAULT; 2943 } 2944 2945 /* 2946 * Do not try to clear more than 512KB or we end up seeing 2947 * funny things on NICs with only 512KB SRAM 2948 */ 2949 ace_clear(regs, 0x2000, 0x80000-0x2000); 2950 if (ACE_IS_TIGON_I(ap)) { 2951 ace_copy(regs, tigonFwText, tigonFwTextAddr, tigonFwTextLen); 2952 ace_copy(regs, tigonFwData, tigonFwDataAddr, tigonFwDataLen); 2953 ace_copy(regs, tigonFwRodata, tigonFwRodataAddr, 2954 tigonFwRodataLen); 2955 ace_clear(regs, tigonFwBssAddr, tigonFwBssLen); 2956 ace_clear(regs, tigonFwSbssAddr, tigonFwSbssLen); 2957 }else if (ap->version == 2) { 2958 ace_clear(regs, tigon2FwBssAddr, tigon2FwBssLen); 2959 ace_clear(regs, tigon2FwSbssAddr, tigon2FwSbssLen); 2960 ace_copy(regs, tigon2FwText, tigon2FwTextAddr,tigon2FwTextLen); 2961 ace_copy(regs, tigon2FwRodata, tigon2FwRodataAddr, 2962 tigon2FwRodataLen); 2963 ace_copy(regs, tigon2FwData, tigon2FwDataAddr,tigon2FwDataLen); 2964 } 2965 2966 return 0; 2967} 2968 2969 2970/* 2971 * The eeprom on the AceNIC is an Atmel i2c EEPROM. 2972 * 2973 * Accessing the EEPROM is `interesting' to say the least - don't read 2974 * this code right after dinner. 2975 * 2976 * This is all about black magic and bit-banging the device .... I 2977 * wonder in what hospital they have put the guy who designed the i2c 2978 * specs. 2979 * 2980 * Oh yes, this is only the beginning! 2981 * 2982 * Thanks to Stevarino Webinski for helping tracking down the bugs in the 2983 * code i2c readout code by beta testing all my hacks. 2984 */ 2985static void __devinit eeprom_start(struct ace_regs __iomem *regs) 2986{ 2987 u32 local; 2988 2989 readl(&regs->LocalCtrl); 2990 udelay(ACE_SHORT_DELAY); 2991 local = readl(&regs->LocalCtrl); 2992 local |= EEPROM_DATA_OUT | EEPROM_WRITE_ENABLE; 2993 writel(local, &regs->LocalCtrl); 2994 readl(&regs->LocalCtrl); 2995 mb(); 2996 udelay(ACE_SHORT_DELAY); 2997 local |= EEPROM_CLK_OUT; 2998 writel(local, &regs->LocalCtrl); 2999 readl(&regs->LocalCtrl); 3000 mb(); 3001 udelay(ACE_SHORT_DELAY); 3002 local &= ~EEPROM_DATA_OUT; 3003 writel(local, &regs->LocalCtrl); 3004 readl(&regs->LocalCtrl); 3005 mb(); 3006 udelay(ACE_SHORT_DELAY); 3007 local &= ~EEPROM_CLK_OUT; 3008 writel(local, &regs->LocalCtrl); 3009 readl(&regs->LocalCtrl); 3010 mb(); 3011} 3012 3013 3014static void __devinit eeprom_prep(struct ace_regs __iomem *regs, u8 magic) 3015{ 3016 short i; 3017 u32 local; 3018 3019 udelay(ACE_SHORT_DELAY); 3020 local = readl(&regs->LocalCtrl); 3021 local &= ~EEPROM_DATA_OUT; 3022 local |= EEPROM_WRITE_ENABLE; 3023 writel(local, &regs->LocalCtrl); 3024 readl(&regs->LocalCtrl); 3025 mb(); 3026 3027 for (i = 0; i < 8; i++, magic <<= 1) { 3028 udelay(ACE_SHORT_DELAY); 3029 if (magic & 0x80) 3030 local |= EEPROM_DATA_OUT; 3031 else 3032 local &= ~EEPROM_DATA_OUT; 3033 writel(local, &regs->LocalCtrl); 3034 readl(&regs->LocalCtrl); 3035 mb(); 3036 3037 udelay(ACE_SHORT_DELAY); 3038 local |= EEPROM_CLK_OUT; 3039 writel(local, &regs->LocalCtrl); 3040 readl(&regs->LocalCtrl); 3041 mb(); 3042 udelay(ACE_SHORT_DELAY); 3043 local &= ~(EEPROM_CLK_OUT | EEPROM_DATA_OUT); 3044 writel(local, &regs->LocalCtrl); 3045 readl(&regs->LocalCtrl); 3046 mb(); 3047 } 3048} 3049 3050 3051static int __devinit eeprom_check_ack(struct ace_regs __iomem *regs) 3052{ 3053 int state; 3054 u32 local; 3055 3056 local = readl(&regs->LocalCtrl); 3057 local &= ~EEPROM_WRITE_ENABLE; 3058 writel(local, &regs->LocalCtrl); 3059 readl(&regs->LocalCtrl); 3060 mb(); 3061 udelay(ACE_LONG_DELAY); 3062 local |= EEPROM_CLK_OUT; 3063 writel(local, &regs->LocalCtrl); 3064 readl(&regs->LocalCtrl); 3065 mb(); 3066 udelay(ACE_SHORT_DELAY); 3067 /* sample data in middle of high clk */ 3068 state = (readl(&regs->LocalCtrl) & EEPROM_DATA_IN) != 0; 3069 udelay(ACE_SHORT_DELAY); 3070 mb(); 3071 writel(readl(&regs->LocalCtrl) & ~EEPROM_CLK_OUT, &regs->LocalCtrl); 3072 readl(&regs->LocalCtrl); 3073 mb(); 3074 3075 return state; 3076} 3077 3078 3079static void __devinit eeprom_stop(struct ace_regs __iomem *regs) 3080{ 3081 u32 local; 3082 3083 udelay(ACE_SHORT_DELAY); 3084 local = readl(&regs->LocalCtrl); 3085 local |= EEPROM_WRITE_ENABLE; 3086 writel(local, &regs->LocalCtrl); 3087 readl(&regs->LocalCtrl); 3088 mb(); 3089 udelay(ACE_SHORT_DELAY); 3090 local &= ~EEPROM_DATA_OUT; 3091 writel(local, &regs->LocalCtrl); 3092 readl(&regs->LocalCtrl); 3093 mb(); 3094 udelay(ACE_SHORT_DELAY); 3095 local |= EEPROM_CLK_OUT; 3096 writel(local, &regs->LocalCtrl); 3097 readl(&regs->LocalCtrl); 3098 mb(); 3099 udelay(ACE_SHORT_DELAY); 3100 local |= EEPROM_DATA_OUT; 3101 writel(local, &regs->LocalCtrl); 3102 readl(&regs->LocalCtrl); 3103 mb(); 3104 udelay(ACE_LONG_DELAY); 3105 local &= ~EEPROM_CLK_OUT; 3106 writel(local, &regs->LocalCtrl); 3107 mb(); 3108} 3109 3110 3111/* 3112 * Read a whole byte from the EEPROM. 3113 */ 3114static int __devinit read_eeprom_byte(struct net_device *dev, 3115 unsigned long offset) 3116{ 3117 struct ace_private *ap = netdev_priv(dev); 3118 struct ace_regs __iomem *regs = ap->regs; 3119 unsigned long flags; 3120 u32 local; 3121 int result = 0; 3122 short i; 3123 3124 /* 3125 * Don't take interrupts on this CPU will bit banging 3126 * the %#%#@$ I2C device 3127 */ 3128 local_irq_save(flags); 3129 3130 eeprom_start(regs); 3131 3132 eeprom_prep(regs, EEPROM_WRITE_SELECT); 3133 if (eeprom_check_ack(regs)) { 3134 local_irq_restore(flags); 3135 printk(KERN_ERR "%s: Unable to sync eeprom\n", ap->name); 3136 result = -EIO; 3137 goto eeprom_read_error; 3138 } 3139 3140 eeprom_prep(regs, (offset >> 8) & 0xff); 3141 if (eeprom_check_ack(regs)) { 3142 local_irq_restore(flags); 3143 printk(KERN_ERR "%s: Unable to set address byte 0\n", 3144 ap->name); 3145 result = -EIO; 3146 goto eeprom_read_error; 3147 } 3148 3149 eeprom_prep(regs, offset & 0xff); 3150 if (eeprom_check_ack(regs)) { 3151 local_irq_restore(flags); 3152 printk(KERN_ERR "%s: Unable to set address byte 1\n", 3153 ap->name); 3154 result = -EIO; 3155 goto eeprom_read_error; 3156 } 3157 3158 eeprom_start(regs); 3159 eeprom_prep(regs, EEPROM_READ_SELECT); 3160 if (eeprom_check_ack(regs)) { 3161 local_irq_restore(flags); 3162 printk(KERN_ERR "%s: Unable to set READ_SELECT\n", 3163 ap->name); 3164 result = -EIO; 3165 goto eeprom_read_error; 3166 } 3167 3168 for (i = 0; i < 8; i++) { 3169 local = readl(&regs->LocalCtrl); 3170 local &= ~EEPROM_WRITE_ENABLE; 3171 writel(local, &regs->LocalCtrl); 3172 readl(&regs->LocalCtrl); 3173 udelay(ACE_LONG_DELAY); 3174 mb(); 3175 local |= EEPROM_CLK_OUT; 3176 writel(local, &regs->LocalCtrl); 3177 readl(&regs->LocalCtrl); 3178 mb(); 3179 udelay(ACE_SHORT_DELAY); 3180 /* sample data mid high clk */ 3181 result = (result << 1) | 3182 ((readl(&regs->LocalCtrl) & EEPROM_DATA_IN) != 0); 3183 udelay(ACE_SHORT_DELAY); 3184 mb(); 3185 local = readl(&regs->LocalCtrl); 3186 local &= ~EEPROM_CLK_OUT; 3187 writel(local, &regs->LocalCtrl); 3188 readl(&regs->LocalCtrl); 3189 udelay(ACE_SHORT_DELAY); 3190 mb(); 3191 if (i == 7) { 3192 local |= EEPROM_WRITE_ENABLE; 3193 writel(local, &regs->LocalCtrl); 3194 readl(&regs->LocalCtrl); 3195 mb(); 3196 udelay(ACE_SHORT_DELAY); 3197 } 3198 } 3199 3200 local |= EEPROM_DATA_OUT; 3201 writel(local, &regs->LocalCtrl); 3202 readl(&regs->LocalCtrl); 3203 mb(); 3204 udelay(ACE_SHORT_DELAY); 3205 writel(readl(&regs->LocalCtrl) | EEPROM_CLK_OUT, &regs->LocalCtrl); 3206 readl(&regs->LocalCtrl); 3207 udelay(ACE_LONG_DELAY); 3208 writel(readl(&regs->LocalCtrl) & ~EEPROM_CLK_OUT, &regs->LocalCtrl); 3209 readl(&regs->LocalCtrl); 3210 mb(); 3211 udelay(ACE_SHORT_DELAY); 3212 eeprom_stop(regs); 3213 3214 local_irq_restore(flags); 3215 out: 3216 return result; 3217 3218 eeprom_read_error: 3219 printk(KERN_ERR "%s: Unable to read eeprom byte 0x%02lx\n", 3220 ap->name, offset); 3221 goto out; 3222} 3223 3224 3225/* 3226 * Local variables: 3227 * compile-command: "gcc -D__SMP__ -D__KERNEL__ -DMODULE -I../../include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -pipe -fno-strength-reduce -DMODVERSIONS -include ../../include/linux/modversions.h -c -o acenic.o acenic.c" 3228 * End: 3229 */