Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.28-rc4 5370 lines 145 kB view raw
1/* cassini.c: Sun Microsystems Cassini(+) ethernet driver. 2 * 3 * Copyright (C) 2004 Sun Microsystems Inc. 4 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation; either version 2 of the 9 * License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 19 * 02111-1307, USA. 20 * 21 * This driver uses the sungem driver (c) David Miller 22 * (davem@redhat.com) as its basis. 23 * 24 * The cassini chip has a number of features that distinguish it from 25 * the gem chip: 26 * 4 transmit descriptor rings that are used for either QoS (VLAN) or 27 * load balancing (non-VLAN mode) 28 * batching of multiple packets 29 * multiple CPU dispatching 30 * page-based RX descriptor engine with separate completion rings 31 * Gigabit support (GMII and PCS interface) 32 * MIF link up/down detection works 33 * 34 * RX is handled by page sized buffers that are attached as fragments to 35 * the skb. here's what's done: 36 * -- driver allocates pages at a time and keeps reference counts 37 * on them. 38 * -- the upper protocol layers assume that the header is in the skb 39 * itself. as a result, cassini will copy a small amount (64 bytes) 40 * to make them happy. 41 * -- driver appends the rest of the data pages as frags to skbuffs 42 * and increments the reference count 43 * -- on page reclamation, the driver swaps the page with a spare page. 44 * if that page is still in use, it frees its reference to that page, 45 * and allocates a new page for use. otherwise, it just recycles the 46 * the page. 47 * 48 * NOTE: cassini can parse the header. however, it's not worth it 49 * as long as the network stack requires a header copy. 50 * 51 * TX has 4 queues. currently these queues are used in a round-robin 52 * fashion for load balancing. They can also be used for QoS. for that 53 * to work, however, QoS information needs to be exposed down to the driver 54 * level so that subqueues get targetted to particular transmit rings. 55 * alternatively, the queues can be configured via use of the all-purpose 56 * ioctl. 57 * 58 * RX DATA: the rx completion ring has all the info, but the rx desc 59 * ring has all of the data. RX can conceivably come in under multiple 60 * interrupts, but the INT# assignment needs to be set up properly by 61 * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do 62 * that. also, the two descriptor rings are designed to distinguish between 63 * encrypted and non-encrypted packets, but we use them for buffering 64 * instead. 65 * 66 * by default, the selective clear mask is set up to process rx packets. 67 */ 68 69 70#include <linux/module.h> 71#include <linux/kernel.h> 72#include <linux/types.h> 73#include <linux/compiler.h> 74#include <linux/slab.h> 75#include <linux/delay.h> 76#include <linux/init.h> 77#include <linux/vmalloc.h> 78#include <linux/ioport.h> 79#include <linux/pci.h> 80#include <linux/mm.h> 81#include <linux/highmem.h> 82#include <linux/list.h> 83#include <linux/dma-mapping.h> 84 85#include <linux/netdevice.h> 86#include <linux/etherdevice.h> 87#include <linux/skbuff.h> 88#include <linux/ethtool.h> 89#include <linux/crc32.h> 90#include <linux/random.h> 91#include <linux/mii.h> 92#include <linux/ip.h> 93#include <linux/tcp.h> 94#include <linux/mutex.h> 95#include <linux/firmware.h> 96 97#include <net/checksum.h> 98 99#include <asm/atomic.h> 100#include <asm/system.h> 101#include <asm/io.h> 102#include <asm/byteorder.h> 103#include <asm/uaccess.h> 104 105#define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ) 106#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ) 107#define CAS_NCPUS num_online_cpus() 108 109#if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL) 110#define USE_NAPI 111#define cas_skb_release(x) netif_receive_skb(x) 112#else 113#define cas_skb_release(x) netif_rx(x) 114#endif 115 116/* select which firmware to use */ 117#define USE_HP_WORKAROUND 118#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */ 119#define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */ 120 121#include "cassini.h" 122 123#define USE_TX_COMPWB /* use completion writeback registers */ 124#define USE_CSMA_CD_PROTO /* standard CSMA/CD */ 125#define USE_RX_BLANK /* hw interrupt mitigation */ 126#undef USE_ENTROPY_DEV /* don't test for entropy device */ 127 128/* NOTE: these aren't useable unless PCI interrupts can be assigned. 129 * also, we need to make cp->lock finer-grained. 130 */ 131#undef USE_PCI_INTB 132#undef USE_PCI_INTC 133#undef USE_PCI_INTD 134#undef USE_QOS 135 136#undef USE_VPD_DEBUG /* debug vpd information if defined */ 137 138/* rx processing options */ 139#define USE_PAGE_ORDER /* specify to allocate large rx pages */ 140#define RX_DONT_BATCH 0 /* if 1, don't batch flows */ 141#define RX_COPY_ALWAYS 0 /* if 0, use frags */ 142#define RX_COPY_MIN 64 /* copy a little to make upper layers happy */ 143#undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */ 144 145#define DRV_MODULE_NAME "cassini" 146#define PFX DRV_MODULE_NAME ": " 147#define DRV_MODULE_VERSION "1.6" 148#define DRV_MODULE_RELDATE "21 May 2008" 149 150#define CAS_DEF_MSG_ENABLE \ 151 (NETIF_MSG_DRV | \ 152 NETIF_MSG_PROBE | \ 153 NETIF_MSG_LINK | \ 154 NETIF_MSG_TIMER | \ 155 NETIF_MSG_IFDOWN | \ 156 NETIF_MSG_IFUP | \ 157 NETIF_MSG_RX_ERR | \ 158 NETIF_MSG_TX_ERR) 159 160/* length of time before we decide the hardware is borked, 161 * and dev->tx_timeout() should be called to fix the problem 162 */ 163#define CAS_TX_TIMEOUT (HZ) 164#define CAS_LINK_TIMEOUT (22*HZ/10) 165#define CAS_LINK_FAST_TIMEOUT (1) 166 167/* timeout values for state changing. these specify the number 168 * of 10us delays to be used before giving up. 169 */ 170#define STOP_TRIES_PHY 1000 171#define STOP_TRIES 5000 172 173/* specify a minimum frame size to deal with some fifo issues 174 * max mtu == 2 * page size - ethernet header - 64 - swivel = 175 * 2 * page_size - 0x50 176 */ 177#define CAS_MIN_FRAME 97 178#define CAS_1000MB_MIN_FRAME 255 179#define CAS_MIN_MTU 60 180#define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000) 181 182#if 1 183/* 184 * Eliminate these and use separate atomic counters for each, to 185 * avoid a race condition. 186 */ 187#else 188#define CAS_RESET_MTU 1 189#define CAS_RESET_ALL 2 190#define CAS_RESET_SPARE 3 191#endif 192 193static char version[] __devinitdata = 194 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 195 196static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */ 197static int link_mode; 198 199MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)"); 200MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver"); 201MODULE_LICENSE("GPL"); 202MODULE_FIRMWARE("sun/cassini.bin"); 203module_param(cassini_debug, int, 0); 204MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value"); 205module_param(link_mode, int, 0); 206MODULE_PARM_DESC(link_mode, "default link mode"); 207 208/* 209 * Work around for a PCS bug in which the link goes down due to the chip 210 * being confused and never showing a link status of "up." 211 */ 212#define DEFAULT_LINKDOWN_TIMEOUT 5 213/* 214 * Value in seconds, for user input. 215 */ 216static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT; 217module_param(linkdown_timeout, int, 0); 218MODULE_PARM_DESC(linkdown_timeout, 219"min reset interval in sec. for PCS linkdown issue; disabled if not positive"); 220 221/* 222 * value in 'ticks' (units used by jiffies). Set when we init the 223 * module because 'HZ' in actually a function call on some flavors of 224 * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ. 225 */ 226static int link_transition_timeout; 227 228 229 230static u16 link_modes[] __devinitdata = { 231 BMCR_ANENABLE, /* 0 : autoneg */ 232 0, /* 1 : 10bt half duplex */ 233 BMCR_SPEED100, /* 2 : 100bt half duplex */ 234 BMCR_FULLDPLX, /* 3 : 10bt full duplex */ 235 BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */ 236 CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */ 237}; 238 239static struct pci_device_id cas_pci_tbl[] __devinitdata = { 240 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI, 241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 242 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN, 243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 244 { 0, } 245}; 246 247MODULE_DEVICE_TABLE(pci, cas_pci_tbl); 248 249static void cas_set_link_modes(struct cas *cp); 250 251static inline void cas_lock_tx(struct cas *cp) 252{ 253 int i; 254 255 for (i = 0; i < N_TX_RINGS; i++) 256 spin_lock(&cp->tx_lock[i]); 257} 258 259static inline void cas_lock_all(struct cas *cp) 260{ 261 spin_lock_irq(&cp->lock); 262 cas_lock_tx(cp); 263} 264 265/* WTZ: QA was finding deadlock problems with the previous 266 * versions after long test runs with multiple cards per machine. 267 * See if replacing cas_lock_all with safer versions helps. The 268 * symptoms QA is reporting match those we'd expect if interrupts 269 * aren't being properly restored, and we fixed a previous deadlock 270 * with similar symptoms by using save/restore versions in other 271 * places. 272 */ 273#define cas_lock_all_save(cp, flags) \ 274do { \ 275 struct cas *xxxcp = (cp); \ 276 spin_lock_irqsave(&xxxcp->lock, flags); \ 277 cas_lock_tx(xxxcp); \ 278} while (0) 279 280static inline void cas_unlock_tx(struct cas *cp) 281{ 282 int i; 283 284 for (i = N_TX_RINGS; i > 0; i--) 285 spin_unlock(&cp->tx_lock[i - 1]); 286} 287 288static inline void cas_unlock_all(struct cas *cp) 289{ 290 cas_unlock_tx(cp); 291 spin_unlock_irq(&cp->lock); 292} 293 294#define cas_unlock_all_restore(cp, flags) \ 295do { \ 296 struct cas *xxxcp = (cp); \ 297 cas_unlock_tx(xxxcp); \ 298 spin_unlock_irqrestore(&xxxcp->lock, flags); \ 299} while (0) 300 301static void cas_disable_irq(struct cas *cp, const int ring) 302{ 303 /* Make sure we won't get any more interrupts */ 304 if (ring == 0) { 305 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK); 306 return; 307 } 308 309 /* disable completion interrupts and selectively mask */ 310 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 311 switch (ring) { 312#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD) 313#ifdef USE_PCI_INTB 314 case 1: 315#endif 316#ifdef USE_PCI_INTC 317 case 2: 318#endif 319#ifdef USE_PCI_INTD 320 case 3: 321#endif 322 writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN, 323 cp->regs + REG_PLUS_INTRN_MASK(ring)); 324 break; 325#endif 326 default: 327 writel(INTRN_MASK_CLEAR_ALL, cp->regs + 328 REG_PLUS_INTRN_MASK(ring)); 329 break; 330 } 331 } 332} 333 334static inline void cas_mask_intr(struct cas *cp) 335{ 336 int i; 337 338 for (i = 0; i < N_RX_COMP_RINGS; i++) 339 cas_disable_irq(cp, i); 340} 341 342static void cas_enable_irq(struct cas *cp, const int ring) 343{ 344 if (ring == 0) { /* all but TX_DONE */ 345 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK); 346 return; 347 } 348 349 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 350 switch (ring) { 351#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD) 352#ifdef USE_PCI_INTB 353 case 1: 354#endif 355#ifdef USE_PCI_INTC 356 case 2: 357#endif 358#ifdef USE_PCI_INTD 359 case 3: 360#endif 361 writel(INTRN_MASK_RX_EN, cp->regs + 362 REG_PLUS_INTRN_MASK(ring)); 363 break; 364#endif 365 default: 366 break; 367 } 368 } 369} 370 371static inline void cas_unmask_intr(struct cas *cp) 372{ 373 int i; 374 375 for (i = 0; i < N_RX_COMP_RINGS; i++) 376 cas_enable_irq(cp, i); 377} 378 379static inline void cas_entropy_gather(struct cas *cp) 380{ 381#ifdef USE_ENTROPY_DEV 382 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) 383 return; 384 385 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV), 386 readl(cp->regs + REG_ENTROPY_IV), 387 sizeof(uint64_t)*8); 388#endif 389} 390 391static inline void cas_entropy_reset(struct cas *cp) 392{ 393#ifdef USE_ENTROPY_DEV 394 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) 395 return; 396 397 writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT, 398 cp->regs + REG_BIM_LOCAL_DEV_EN); 399 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET); 400 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG); 401 402 /* if we read back 0x0, we don't have an entropy device */ 403 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0) 404 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV; 405#endif 406} 407 408/* access to the phy. the following assumes that we've initialized the MIF to 409 * be in frame rather than bit-bang mode 410 */ 411static u16 cas_phy_read(struct cas *cp, int reg) 412{ 413 u32 cmd; 414 int limit = STOP_TRIES_PHY; 415 416 cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ; 417 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); 418 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); 419 cmd |= MIF_FRAME_TURN_AROUND_MSB; 420 writel(cmd, cp->regs + REG_MIF_FRAME); 421 422 /* poll for completion */ 423 while (limit-- > 0) { 424 udelay(10); 425 cmd = readl(cp->regs + REG_MIF_FRAME); 426 if (cmd & MIF_FRAME_TURN_AROUND_LSB) 427 return (cmd & MIF_FRAME_DATA_MASK); 428 } 429 return 0xFFFF; /* -1 */ 430} 431 432static int cas_phy_write(struct cas *cp, int reg, u16 val) 433{ 434 int limit = STOP_TRIES_PHY; 435 u32 cmd; 436 437 cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE; 438 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); 439 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); 440 cmd |= MIF_FRAME_TURN_AROUND_MSB; 441 cmd |= val & MIF_FRAME_DATA_MASK; 442 writel(cmd, cp->regs + REG_MIF_FRAME); 443 444 /* poll for completion */ 445 while (limit-- > 0) { 446 udelay(10); 447 cmd = readl(cp->regs + REG_MIF_FRAME); 448 if (cmd & MIF_FRAME_TURN_AROUND_LSB) 449 return 0; 450 } 451 return -1; 452} 453 454static void cas_phy_powerup(struct cas *cp) 455{ 456 u16 ctl = cas_phy_read(cp, MII_BMCR); 457 458 if ((ctl & BMCR_PDOWN) == 0) 459 return; 460 ctl &= ~BMCR_PDOWN; 461 cas_phy_write(cp, MII_BMCR, ctl); 462} 463 464static void cas_phy_powerdown(struct cas *cp) 465{ 466 u16 ctl = cas_phy_read(cp, MII_BMCR); 467 468 if (ctl & BMCR_PDOWN) 469 return; 470 ctl |= BMCR_PDOWN; 471 cas_phy_write(cp, MII_BMCR, ctl); 472} 473 474/* cp->lock held. note: the last put_page will free the buffer */ 475static int cas_page_free(struct cas *cp, cas_page_t *page) 476{ 477 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, 478 PCI_DMA_FROMDEVICE); 479 __free_pages(page->buffer, cp->page_order); 480 kfree(page); 481 return 0; 482} 483 484#ifdef RX_COUNT_BUFFERS 485#define RX_USED_ADD(x, y) ((x)->used += (y)) 486#define RX_USED_SET(x, y) ((x)->used = (y)) 487#else 488#define RX_USED_ADD(x, y) 489#define RX_USED_SET(x, y) 490#endif 491 492/* local page allocation routines for the receive buffers. jumbo pages 493 * require at least 8K contiguous and 8K aligned buffers. 494 */ 495static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags) 496{ 497 cas_page_t *page; 498 499 page = kmalloc(sizeof(cas_page_t), flags); 500 if (!page) 501 return NULL; 502 503 INIT_LIST_HEAD(&page->list); 504 RX_USED_SET(page, 0); 505 page->buffer = alloc_pages(flags, cp->page_order); 506 if (!page->buffer) 507 goto page_err; 508 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0, 509 cp->page_size, PCI_DMA_FROMDEVICE); 510 return page; 511 512page_err: 513 kfree(page); 514 return NULL; 515} 516 517/* initialize spare pool of rx buffers, but allocate during the open */ 518static void cas_spare_init(struct cas *cp) 519{ 520 spin_lock(&cp->rx_inuse_lock); 521 INIT_LIST_HEAD(&cp->rx_inuse_list); 522 spin_unlock(&cp->rx_inuse_lock); 523 524 spin_lock(&cp->rx_spare_lock); 525 INIT_LIST_HEAD(&cp->rx_spare_list); 526 cp->rx_spares_needed = RX_SPARE_COUNT; 527 spin_unlock(&cp->rx_spare_lock); 528} 529 530/* used on close. free all the spare buffers. */ 531static void cas_spare_free(struct cas *cp) 532{ 533 struct list_head list, *elem, *tmp; 534 535 /* free spare buffers */ 536 INIT_LIST_HEAD(&list); 537 spin_lock(&cp->rx_spare_lock); 538 list_splice_init(&cp->rx_spare_list, &list); 539 spin_unlock(&cp->rx_spare_lock); 540 list_for_each_safe(elem, tmp, &list) { 541 cas_page_free(cp, list_entry(elem, cas_page_t, list)); 542 } 543 544 INIT_LIST_HEAD(&list); 545#if 1 546 /* 547 * Looks like Adrian had protected this with a different 548 * lock than used everywhere else to manipulate this list. 549 */ 550 spin_lock(&cp->rx_inuse_lock); 551 list_splice_init(&cp->rx_inuse_list, &list); 552 spin_unlock(&cp->rx_inuse_lock); 553#else 554 spin_lock(&cp->rx_spare_lock); 555 list_splice_init(&cp->rx_inuse_list, &list); 556 spin_unlock(&cp->rx_spare_lock); 557#endif 558 list_for_each_safe(elem, tmp, &list) { 559 cas_page_free(cp, list_entry(elem, cas_page_t, list)); 560 } 561} 562 563/* replenish spares if needed */ 564static void cas_spare_recover(struct cas *cp, const gfp_t flags) 565{ 566 struct list_head list, *elem, *tmp; 567 int needed, i; 568 569 /* check inuse list. if we don't need any more free buffers, 570 * just free it 571 */ 572 573 /* make a local copy of the list */ 574 INIT_LIST_HEAD(&list); 575 spin_lock(&cp->rx_inuse_lock); 576 list_splice_init(&cp->rx_inuse_list, &list); 577 spin_unlock(&cp->rx_inuse_lock); 578 579 list_for_each_safe(elem, tmp, &list) { 580 cas_page_t *page = list_entry(elem, cas_page_t, list); 581 582 /* 583 * With the lockless pagecache, cassini buffering scheme gets 584 * slightly less accurate: we might find that a page has an 585 * elevated reference count here, due to a speculative ref, 586 * and skip it as in-use. Ideally we would be able to reclaim 587 * it. However this would be such a rare case, it doesn't 588 * matter too much as we should pick it up the next time round. 589 * 590 * Importantly, if we find that the page has a refcount of 1 591 * here (our refcount), then we know it is definitely not inuse 592 * so we can reuse it. 593 */ 594 if (page_count(page->buffer) > 1) 595 continue; 596 597 list_del(elem); 598 spin_lock(&cp->rx_spare_lock); 599 if (cp->rx_spares_needed > 0) { 600 list_add(elem, &cp->rx_spare_list); 601 cp->rx_spares_needed--; 602 spin_unlock(&cp->rx_spare_lock); 603 } else { 604 spin_unlock(&cp->rx_spare_lock); 605 cas_page_free(cp, page); 606 } 607 } 608 609 /* put any inuse buffers back on the list */ 610 if (!list_empty(&list)) { 611 spin_lock(&cp->rx_inuse_lock); 612 list_splice(&list, &cp->rx_inuse_list); 613 spin_unlock(&cp->rx_inuse_lock); 614 } 615 616 spin_lock(&cp->rx_spare_lock); 617 needed = cp->rx_spares_needed; 618 spin_unlock(&cp->rx_spare_lock); 619 if (!needed) 620 return; 621 622 /* we still need spares, so try to allocate some */ 623 INIT_LIST_HEAD(&list); 624 i = 0; 625 while (i < needed) { 626 cas_page_t *spare = cas_page_alloc(cp, flags); 627 if (!spare) 628 break; 629 list_add(&spare->list, &list); 630 i++; 631 } 632 633 spin_lock(&cp->rx_spare_lock); 634 list_splice(&list, &cp->rx_spare_list); 635 cp->rx_spares_needed -= i; 636 spin_unlock(&cp->rx_spare_lock); 637} 638 639/* pull a page from the list. */ 640static cas_page_t *cas_page_dequeue(struct cas *cp) 641{ 642 struct list_head *entry; 643 int recover; 644 645 spin_lock(&cp->rx_spare_lock); 646 if (list_empty(&cp->rx_spare_list)) { 647 /* try to do a quick recovery */ 648 spin_unlock(&cp->rx_spare_lock); 649 cas_spare_recover(cp, GFP_ATOMIC); 650 spin_lock(&cp->rx_spare_lock); 651 if (list_empty(&cp->rx_spare_list)) { 652 if (netif_msg_rx_err(cp)) 653 printk(KERN_ERR "%s: no spare buffers " 654 "available.\n", cp->dev->name); 655 spin_unlock(&cp->rx_spare_lock); 656 return NULL; 657 } 658 } 659 660 entry = cp->rx_spare_list.next; 661 list_del(entry); 662 recover = ++cp->rx_spares_needed; 663 spin_unlock(&cp->rx_spare_lock); 664 665 /* trigger the timer to do the recovery */ 666 if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) { 667#if 1 668 atomic_inc(&cp->reset_task_pending); 669 atomic_inc(&cp->reset_task_pending_spare); 670 schedule_work(&cp->reset_task); 671#else 672 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE); 673 schedule_work(&cp->reset_task); 674#endif 675 } 676 return list_entry(entry, cas_page_t, list); 677} 678 679 680static void cas_mif_poll(struct cas *cp, const int enable) 681{ 682 u32 cfg; 683 684 cfg = readl(cp->regs + REG_MIF_CFG); 685 cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1); 686 687 if (cp->phy_type & CAS_PHY_MII_MDIO1) 688 cfg |= MIF_CFG_PHY_SELECT; 689 690 /* poll and interrupt on link status change. */ 691 if (enable) { 692 cfg |= MIF_CFG_POLL_EN; 693 cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR); 694 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr); 695 } 696 writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF, 697 cp->regs + REG_MIF_MASK); 698 writel(cfg, cp->regs + REG_MIF_CFG); 699} 700 701/* Must be invoked under cp->lock */ 702static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep) 703{ 704 u16 ctl; 705#if 1 706 int lcntl; 707 int changed = 0; 708 int oldstate = cp->lstate; 709 int link_was_not_down = !(oldstate == link_down); 710#endif 711 /* Setup link parameters */ 712 if (!ep) 713 goto start_aneg; 714 lcntl = cp->link_cntl; 715 if (ep->autoneg == AUTONEG_ENABLE) 716 cp->link_cntl = BMCR_ANENABLE; 717 else { 718 cp->link_cntl = 0; 719 if (ep->speed == SPEED_100) 720 cp->link_cntl |= BMCR_SPEED100; 721 else if (ep->speed == SPEED_1000) 722 cp->link_cntl |= CAS_BMCR_SPEED1000; 723 if (ep->duplex == DUPLEX_FULL) 724 cp->link_cntl |= BMCR_FULLDPLX; 725 } 726#if 1 727 changed = (lcntl != cp->link_cntl); 728#endif 729start_aneg: 730 if (cp->lstate == link_up) { 731 printk(KERN_INFO "%s: PCS link down.\n", 732 cp->dev->name); 733 } else { 734 if (changed) { 735 printk(KERN_INFO "%s: link configuration changed\n", 736 cp->dev->name); 737 } 738 } 739 cp->lstate = link_down; 740 cp->link_transition = LINK_TRANSITION_LINK_DOWN; 741 if (!cp->hw_running) 742 return; 743#if 1 744 /* 745 * WTZ: If the old state was link_up, we turn off the carrier 746 * to replicate everything we do elsewhere on a link-down 747 * event when we were already in a link-up state.. 748 */ 749 if (oldstate == link_up) 750 netif_carrier_off(cp->dev); 751 if (changed && link_was_not_down) { 752 /* 753 * WTZ: This branch will simply schedule a full reset after 754 * we explicitly changed link modes in an ioctl. See if this 755 * fixes the link-problems we were having for forced mode. 756 */ 757 atomic_inc(&cp->reset_task_pending); 758 atomic_inc(&cp->reset_task_pending_all); 759 schedule_work(&cp->reset_task); 760 cp->timer_ticks = 0; 761 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); 762 return; 763 } 764#endif 765 if (cp->phy_type & CAS_PHY_SERDES) { 766 u32 val = readl(cp->regs + REG_PCS_MII_CTRL); 767 768 if (cp->link_cntl & BMCR_ANENABLE) { 769 val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN); 770 cp->lstate = link_aneg; 771 } else { 772 if (cp->link_cntl & BMCR_FULLDPLX) 773 val |= PCS_MII_CTRL_DUPLEX; 774 val &= ~PCS_MII_AUTONEG_EN; 775 cp->lstate = link_force_ok; 776 } 777 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; 778 writel(val, cp->regs + REG_PCS_MII_CTRL); 779 780 } else { 781 cas_mif_poll(cp, 0); 782 ctl = cas_phy_read(cp, MII_BMCR); 783 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | 784 CAS_BMCR_SPEED1000 | BMCR_ANENABLE); 785 ctl |= cp->link_cntl; 786 if (ctl & BMCR_ANENABLE) { 787 ctl |= BMCR_ANRESTART; 788 cp->lstate = link_aneg; 789 } else { 790 cp->lstate = link_force_ok; 791 } 792 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; 793 cas_phy_write(cp, MII_BMCR, ctl); 794 cas_mif_poll(cp, 1); 795 } 796 797 cp->timer_ticks = 0; 798 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); 799} 800 801/* Must be invoked under cp->lock. */ 802static int cas_reset_mii_phy(struct cas *cp) 803{ 804 int limit = STOP_TRIES_PHY; 805 u16 val; 806 807 cas_phy_write(cp, MII_BMCR, BMCR_RESET); 808 udelay(100); 809 while (limit--) { 810 val = cas_phy_read(cp, MII_BMCR); 811 if ((val & BMCR_RESET) == 0) 812 break; 813 udelay(10); 814 } 815 return (limit <= 0); 816} 817 818static int cas_saturn_firmware_init(struct cas *cp) 819{ 820 const struct firmware *fw; 821 const char fw_name[] = "sun/cassini.bin"; 822 int err; 823 824 if (PHY_NS_DP83065 != cp->phy_id) 825 return 0; 826 827 err = request_firmware(&fw, fw_name, &cp->pdev->dev); 828 if (err) { 829 printk(KERN_ERR "cassini: Failed to load firmware \"%s\"\n", 830 fw_name); 831 return err; 832 } 833 if (fw->size < 2) { 834 printk(KERN_ERR "cassini: bogus length %zu in \"%s\"\n", 835 fw->size, fw_name); 836 err = -EINVAL; 837 goto out; 838 } 839 cp->fw_load_addr= fw->data[1] << 8 | fw->data[0]; 840 cp->fw_size = fw->size - 2; 841 cp->fw_data = vmalloc(cp->fw_size); 842 if (!cp->fw_data) { 843 err = -ENOMEM; 844 printk(KERN_ERR "cassini: \"%s\" Failed %d\n", fw_name, err); 845 goto out; 846 } 847 memcpy(cp->fw_data, &fw->data[2], cp->fw_size); 848out: 849 release_firmware(fw); 850 return err; 851} 852 853static void cas_saturn_firmware_load(struct cas *cp) 854{ 855 int i; 856 857 cas_phy_powerdown(cp); 858 859 /* expanded memory access mode */ 860 cas_phy_write(cp, DP83065_MII_MEM, 0x0); 861 862 /* pointer configuration for new firmware */ 863 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9); 864 cas_phy_write(cp, DP83065_MII_REGD, 0xbd); 865 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa); 866 cas_phy_write(cp, DP83065_MII_REGD, 0x82); 867 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb); 868 cas_phy_write(cp, DP83065_MII_REGD, 0x0); 869 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc); 870 cas_phy_write(cp, DP83065_MII_REGD, 0x39); 871 872 /* download new firmware */ 873 cas_phy_write(cp, DP83065_MII_MEM, 0x1); 874 cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr); 875 for (i = 0; i < cp->fw_size; i++) 876 cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]); 877 878 /* enable firmware */ 879 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8); 880 cas_phy_write(cp, DP83065_MII_REGD, 0x1); 881} 882 883 884/* phy initialization */ 885static void cas_phy_init(struct cas *cp) 886{ 887 u16 val; 888 889 /* if we're in MII/GMII mode, set up phy */ 890 if (CAS_PHY_MII(cp->phy_type)) { 891 writel(PCS_DATAPATH_MODE_MII, 892 cp->regs + REG_PCS_DATAPATH_MODE); 893 894 cas_mif_poll(cp, 0); 895 cas_reset_mii_phy(cp); /* take out of isolate mode */ 896 897 if (PHY_LUCENT_B0 == cp->phy_id) { 898 /* workaround link up/down issue with lucent */ 899 cas_phy_write(cp, LUCENT_MII_REG, 0x8000); 900 cas_phy_write(cp, MII_BMCR, 0x00f1); 901 cas_phy_write(cp, LUCENT_MII_REG, 0x0); 902 903 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) { 904 /* workarounds for broadcom phy */ 905 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20); 906 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012); 907 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804); 908 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013); 909 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204); 910 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); 911 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132); 912 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); 913 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232); 914 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F); 915 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20); 916 917 } else if (PHY_BROADCOM_5411 == cp->phy_id) { 918 val = cas_phy_read(cp, BROADCOM_MII_REG4); 919 val = cas_phy_read(cp, BROADCOM_MII_REG4); 920 if (val & 0x0080) { 921 /* link workaround */ 922 cas_phy_write(cp, BROADCOM_MII_REG4, 923 val & ~0x0080); 924 } 925 926 } else if (cp->cas_flags & CAS_FLAG_SATURN) { 927 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ? 928 SATURN_PCFG_FSI : 0x0, 929 cp->regs + REG_SATURN_PCFG); 930 931 /* load firmware to address 10Mbps auto-negotiation 932 * issue. NOTE: this will need to be changed if the 933 * default firmware gets fixed. 934 */ 935 if (PHY_NS_DP83065 == cp->phy_id) { 936 cas_saturn_firmware_load(cp); 937 } 938 cas_phy_powerup(cp); 939 } 940 941 /* advertise capabilities */ 942 val = cas_phy_read(cp, MII_BMCR); 943 val &= ~BMCR_ANENABLE; 944 cas_phy_write(cp, MII_BMCR, val); 945 udelay(10); 946 947 cas_phy_write(cp, MII_ADVERTISE, 948 cas_phy_read(cp, MII_ADVERTISE) | 949 (ADVERTISE_10HALF | ADVERTISE_10FULL | 950 ADVERTISE_100HALF | ADVERTISE_100FULL | 951 CAS_ADVERTISE_PAUSE | 952 CAS_ADVERTISE_ASYM_PAUSE)); 953 954 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { 955 /* make sure that we don't advertise half 956 * duplex to avoid a chip issue 957 */ 958 val = cas_phy_read(cp, CAS_MII_1000_CTRL); 959 val &= ~CAS_ADVERTISE_1000HALF; 960 val |= CAS_ADVERTISE_1000FULL; 961 cas_phy_write(cp, CAS_MII_1000_CTRL, val); 962 } 963 964 } else { 965 /* reset pcs for serdes */ 966 u32 val; 967 int limit; 968 969 writel(PCS_DATAPATH_MODE_SERDES, 970 cp->regs + REG_PCS_DATAPATH_MODE); 971 972 /* enable serdes pins on saturn */ 973 if (cp->cas_flags & CAS_FLAG_SATURN) 974 writel(0, cp->regs + REG_SATURN_PCFG); 975 976 /* Reset PCS unit. */ 977 val = readl(cp->regs + REG_PCS_MII_CTRL); 978 val |= PCS_MII_RESET; 979 writel(val, cp->regs + REG_PCS_MII_CTRL); 980 981 limit = STOP_TRIES; 982 while (limit-- > 0) { 983 udelay(10); 984 if ((readl(cp->regs + REG_PCS_MII_CTRL) & 985 PCS_MII_RESET) == 0) 986 break; 987 } 988 if (limit <= 0) 989 printk(KERN_WARNING "%s: PCS reset bit would not " 990 "clear [%08x].\n", cp->dev->name, 991 readl(cp->regs + REG_PCS_STATE_MACHINE)); 992 993 /* Make sure PCS is disabled while changing advertisement 994 * configuration. 995 */ 996 writel(0x0, cp->regs + REG_PCS_CFG); 997 998 /* Advertise all capabilities except half-duplex. */ 999 val = readl(cp->regs + REG_PCS_MII_ADVERT); 1000 val &= ~PCS_MII_ADVERT_HD; 1001 val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE | 1002 PCS_MII_ADVERT_ASYM_PAUSE); 1003 writel(val, cp->regs + REG_PCS_MII_ADVERT); 1004 1005 /* enable PCS */ 1006 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG); 1007 1008 /* pcs workaround: enable sync detect */ 1009 writel(PCS_SERDES_CTRL_SYNCD_EN, 1010 cp->regs + REG_PCS_SERDES_CTRL); 1011 } 1012} 1013 1014 1015static int cas_pcs_link_check(struct cas *cp) 1016{ 1017 u32 stat, state_machine; 1018 int retval = 0; 1019 1020 /* The link status bit latches on zero, so you must 1021 * read it twice in such a case to see a transition 1022 * to the link being up. 1023 */ 1024 stat = readl(cp->regs + REG_PCS_MII_STATUS); 1025 if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0) 1026 stat = readl(cp->regs + REG_PCS_MII_STATUS); 1027 1028 /* The remote-fault indication is only valid 1029 * when autoneg has completed. 1030 */ 1031 if ((stat & (PCS_MII_STATUS_AUTONEG_COMP | 1032 PCS_MII_STATUS_REMOTE_FAULT)) == 1033 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) { 1034 if (netif_msg_link(cp)) 1035 printk(KERN_INFO "%s: PCS RemoteFault\n", 1036 cp->dev->name); 1037 } 1038 1039 /* work around link detection issue by querying the PCS state 1040 * machine directly. 1041 */ 1042 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE); 1043 if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) { 1044 stat &= ~PCS_MII_STATUS_LINK_STATUS; 1045 } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) { 1046 stat |= PCS_MII_STATUS_LINK_STATUS; 1047 } 1048 1049 if (stat & PCS_MII_STATUS_LINK_STATUS) { 1050 if (cp->lstate != link_up) { 1051 if (cp->opened) { 1052 cp->lstate = link_up; 1053 cp->link_transition = LINK_TRANSITION_LINK_UP; 1054 1055 cas_set_link_modes(cp); 1056 netif_carrier_on(cp->dev); 1057 } 1058 } 1059 } else if (cp->lstate == link_up) { 1060 cp->lstate = link_down; 1061 if (link_transition_timeout != 0 && 1062 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && 1063 !cp->link_transition_jiffies_valid) { 1064 /* 1065 * force a reset, as a workaround for the 1066 * link-failure problem. May want to move this to a 1067 * point a bit earlier in the sequence. If we had 1068 * generated a reset a short time ago, we'll wait for 1069 * the link timer to check the status until a 1070 * timer expires (link_transistion_jiffies_valid is 1071 * true when the timer is running.) Instead of using 1072 * a system timer, we just do a check whenever the 1073 * link timer is running - this clears the flag after 1074 * a suitable delay. 1075 */ 1076 retval = 1; 1077 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; 1078 cp->link_transition_jiffies = jiffies; 1079 cp->link_transition_jiffies_valid = 1; 1080 } else { 1081 cp->link_transition = LINK_TRANSITION_ON_FAILURE; 1082 } 1083 netif_carrier_off(cp->dev); 1084 if (cp->opened && netif_msg_link(cp)) { 1085 printk(KERN_INFO "%s: PCS link down.\n", 1086 cp->dev->name); 1087 } 1088 1089 /* Cassini only: if you force a mode, there can be 1090 * sync problems on link down. to fix that, the following 1091 * things need to be checked: 1092 * 1) read serialink state register 1093 * 2) read pcs status register to verify link down. 1094 * 3) if link down and serial link == 0x03, then you need 1095 * to global reset the chip. 1096 */ 1097 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) { 1098 /* should check to see if we're in a forced mode */ 1099 stat = readl(cp->regs + REG_PCS_SERDES_STATE); 1100 if (stat == 0x03) 1101 return 1; 1102 } 1103 } else if (cp->lstate == link_down) { 1104 if (link_transition_timeout != 0 && 1105 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && 1106 !cp->link_transition_jiffies_valid) { 1107 /* force a reset, as a workaround for the 1108 * link-failure problem. May want to move 1109 * this to a point a bit earlier in the 1110 * sequence. 1111 */ 1112 retval = 1; 1113 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; 1114 cp->link_transition_jiffies = jiffies; 1115 cp->link_transition_jiffies_valid = 1; 1116 } else { 1117 cp->link_transition = LINK_TRANSITION_STILL_FAILED; 1118 } 1119 } 1120 1121 return retval; 1122} 1123 1124static int cas_pcs_interrupt(struct net_device *dev, 1125 struct cas *cp, u32 status) 1126{ 1127 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS); 1128 1129 if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0) 1130 return 0; 1131 return cas_pcs_link_check(cp); 1132} 1133 1134static int cas_txmac_interrupt(struct net_device *dev, 1135 struct cas *cp, u32 status) 1136{ 1137 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS); 1138 1139 if (!txmac_stat) 1140 return 0; 1141 1142 if (netif_msg_intr(cp)) 1143 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n", 1144 cp->dev->name, txmac_stat); 1145 1146 /* Defer timer expiration is quite normal, 1147 * don't even log the event. 1148 */ 1149 if ((txmac_stat & MAC_TX_DEFER_TIMER) && 1150 !(txmac_stat & ~MAC_TX_DEFER_TIMER)) 1151 return 0; 1152 1153 spin_lock(&cp->stat_lock[0]); 1154 if (txmac_stat & MAC_TX_UNDERRUN) { 1155 printk(KERN_ERR "%s: TX MAC xmit underrun.\n", 1156 dev->name); 1157 cp->net_stats[0].tx_fifo_errors++; 1158 } 1159 1160 if (txmac_stat & MAC_TX_MAX_PACKET_ERR) { 1161 printk(KERN_ERR "%s: TX MAC max packet size error.\n", 1162 dev->name); 1163 cp->net_stats[0].tx_errors++; 1164 } 1165 1166 /* The rest are all cases of one of the 16-bit TX 1167 * counters expiring. 1168 */ 1169 if (txmac_stat & MAC_TX_COLL_NORMAL) 1170 cp->net_stats[0].collisions += 0x10000; 1171 1172 if (txmac_stat & MAC_TX_COLL_EXCESS) { 1173 cp->net_stats[0].tx_aborted_errors += 0x10000; 1174 cp->net_stats[0].collisions += 0x10000; 1175 } 1176 1177 if (txmac_stat & MAC_TX_COLL_LATE) { 1178 cp->net_stats[0].tx_aborted_errors += 0x10000; 1179 cp->net_stats[0].collisions += 0x10000; 1180 } 1181 spin_unlock(&cp->stat_lock[0]); 1182 1183 /* We do not keep track of MAC_TX_COLL_FIRST and 1184 * MAC_TX_PEAK_ATTEMPTS events. 1185 */ 1186 return 0; 1187} 1188 1189static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware) 1190{ 1191 cas_hp_inst_t *inst; 1192 u32 val; 1193 int i; 1194 1195 i = 0; 1196 while ((inst = firmware) && inst->note) { 1197 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR); 1198 1199 val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val); 1200 val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask); 1201 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI); 1202 1203 val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10); 1204 val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop); 1205 val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext); 1206 val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff); 1207 val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext); 1208 val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff); 1209 val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op); 1210 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID); 1211 1212 val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask); 1213 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift); 1214 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab); 1215 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg); 1216 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW); 1217 ++firmware; 1218 ++i; 1219 } 1220} 1221 1222static void cas_init_rx_dma(struct cas *cp) 1223{ 1224 u64 desc_dma = cp->block_dvma; 1225 u32 val; 1226 int i, size; 1227 1228 /* rx free descriptors */ 1229 val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL); 1230 val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0)); 1231 val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0)); 1232 if ((N_RX_DESC_RINGS > 1) && 1233 (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */ 1234 val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1)); 1235 writel(val, cp->regs + REG_RX_CFG); 1236 1237 val = (unsigned long) cp->init_rxds[0] - 1238 (unsigned long) cp->init_block; 1239 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI); 1240 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW); 1241 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); 1242 1243 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 1244 /* rx desc 2 is for IPSEC packets. however, 1245 * we don't it that for that purpose. 1246 */ 1247 val = (unsigned long) cp->init_rxds[1] - 1248 (unsigned long) cp->init_block; 1249 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI); 1250 writel((desc_dma + val) & 0xffffffff, cp->regs + 1251 REG_PLUS_RX_DB1_LOW); 1252 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs + 1253 REG_PLUS_RX_KICK1); 1254 } 1255 1256 /* rx completion registers */ 1257 val = (unsigned long) cp->init_rxcs[0] - 1258 (unsigned long) cp->init_block; 1259 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI); 1260 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW); 1261 1262 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 1263 /* rx comp 2-4 */ 1264 for (i = 1; i < MAX_RX_COMP_RINGS; i++) { 1265 val = (unsigned long) cp->init_rxcs[i] - 1266 (unsigned long) cp->init_block; 1267 writel((desc_dma + val) >> 32, cp->regs + 1268 REG_PLUS_RX_CBN_HI(i)); 1269 writel((desc_dma + val) & 0xffffffff, cp->regs + 1270 REG_PLUS_RX_CBN_LOW(i)); 1271 } 1272 } 1273 1274 /* read selective clear regs to prevent spurious interrupts 1275 * on reset because complete == kick. 1276 * selective clear set up to prevent interrupts on resets 1277 */ 1278 readl(cp->regs + REG_INTR_STATUS_ALIAS); 1279 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR); 1280 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 1281 for (i = 1; i < N_RX_COMP_RINGS; i++) 1282 readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i)); 1283 1284 /* 2 is different from 3 and 4 */ 1285 if (N_RX_COMP_RINGS > 1) 1286 writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1, 1287 cp->regs + REG_PLUS_ALIASN_CLEAR(1)); 1288 1289 for (i = 2; i < N_RX_COMP_RINGS; i++) 1290 writel(INTR_RX_DONE_ALT, 1291 cp->regs + REG_PLUS_ALIASN_CLEAR(i)); 1292 } 1293 1294 /* set up pause thresholds */ 1295 val = CAS_BASE(RX_PAUSE_THRESH_OFF, 1296 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM); 1297 val |= CAS_BASE(RX_PAUSE_THRESH_ON, 1298 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM); 1299 writel(val, cp->regs + REG_RX_PAUSE_THRESH); 1300 1301 /* zero out dma reassembly buffers */ 1302 for (i = 0; i < 64; i++) { 1303 writel(i, cp->regs + REG_RX_TABLE_ADDR); 1304 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW); 1305 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID); 1306 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI); 1307 } 1308 1309 /* make sure address register is 0 for normal operation */ 1310 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR); 1311 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR); 1312 1313 /* interrupt mitigation */ 1314#ifdef USE_RX_BLANK 1315 val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL); 1316 val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL); 1317 writel(val, cp->regs + REG_RX_BLANK); 1318#else 1319 writel(0x0, cp->regs + REG_RX_BLANK); 1320#endif 1321 1322 /* interrupt generation as a function of low water marks for 1323 * free desc and completion entries. these are used to trigger 1324 * housekeeping for rx descs. we don't use the free interrupt 1325 * as it's not very useful 1326 */ 1327 /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */ 1328 val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL); 1329 writel(val, cp->regs + REG_RX_AE_THRESH); 1330 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 1331 val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1)); 1332 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH); 1333 } 1334 1335 /* Random early detect registers. useful for congestion avoidance. 1336 * this should be tunable. 1337 */ 1338 writel(0x0, cp->regs + REG_RX_RED); 1339 1340 /* receive page sizes. default == 2K (0x800) */ 1341 val = 0; 1342 if (cp->page_size == 0x1000) 1343 val = 0x1; 1344 else if (cp->page_size == 0x2000) 1345 val = 0x2; 1346 else if (cp->page_size == 0x4000) 1347 val = 0x3; 1348 1349 /* round mtu + offset. constrain to page size. */ 1350 size = cp->dev->mtu + 64; 1351 if (size > cp->page_size) 1352 size = cp->page_size; 1353 1354 if (size <= 0x400) 1355 i = 0x0; 1356 else if (size <= 0x800) 1357 i = 0x1; 1358 else if (size <= 0x1000) 1359 i = 0x2; 1360 else 1361 i = 0x3; 1362 1363 cp->mtu_stride = 1 << (i + 10); 1364 val = CAS_BASE(RX_PAGE_SIZE, val); 1365 val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i); 1366 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10)); 1367 val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1); 1368 writel(val, cp->regs + REG_RX_PAGE_SIZE); 1369 1370 /* enable the header parser if desired */ 1371 if (CAS_HP_FIRMWARE == cas_prog_null) 1372 return; 1373 1374 val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS); 1375 val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK; 1376 val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL); 1377 writel(val, cp->regs + REG_HP_CFG); 1378} 1379 1380static inline void cas_rxc_init(struct cas_rx_comp *rxc) 1381{ 1382 memset(rxc, 0, sizeof(*rxc)); 1383 rxc->word4 = cpu_to_le64(RX_COMP4_ZERO); 1384} 1385 1386/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1] 1387 * flipping is protected by the fact that the chip will not 1388 * hand back the same page index while it's being processed. 1389 */ 1390static inline cas_page_t *cas_page_spare(struct cas *cp, const int index) 1391{ 1392 cas_page_t *page = cp->rx_pages[1][index]; 1393 cas_page_t *new; 1394 1395 if (page_count(page->buffer) == 1) 1396 return page; 1397 1398 new = cas_page_dequeue(cp); 1399 if (new) { 1400 spin_lock(&cp->rx_inuse_lock); 1401 list_add(&page->list, &cp->rx_inuse_list); 1402 spin_unlock(&cp->rx_inuse_lock); 1403 } 1404 return new; 1405} 1406 1407/* this needs to be changed if we actually use the ENC RX DESC ring */ 1408static cas_page_t *cas_page_swap(struct cas *cp, const int ring, 1409 const int index) 1410{ 1411 cas_page_t **page0 = cp->rx_pages[0]; 1412 cas_page_t **page1 = cp->rx_pages[1]; 1413 1414 /* swap if buffer is in use */ 1415 if (page_count(page0[index]->buffer) > 1) { 1416 cas_page_t *new = cas_page_spare(cp, index); 1417 if (new) { 1418 page1[index] = page0[index]; 1419 page0[index] = new; 1420 } 1421 } 1422 RX_USED_SET(page0[index], 0); 1423 return page0[index]; 1424} 1425 1426static void cas_clean_rxds(struct cas *cp) 1427{ 1428 /* only clean ring 0 as ring 1 is used for spare buffers */ 1429 struct cas_rx_desc *rxd = cp->init_rxds[0]; 1430 int i, size; 1431 1432 /* release all rx flows */ 1433 for (i = 0; i < N_RX_FLOWS; i++) { 1434 struct sk_buff *skb; 1435 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) { 1436 cas_skb_release(skb); 1437 } 1438 } 1439 1440 /* initialize descriptors */ 1441 size = RX_DESC_RINGN_SIZE(0); 1442 for (i = 0; i < size; i++) { 1443 cas_page_t *page = cas_page_swap(cp, 0, i); 1444 rxd[i].buffer = cpu_to_le64(page->dma_addr); 1445 rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) | 1446 CAS_BASE(RX_INDEX_RING, 0)); 1447 } 1448 1449 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4; 1450 cp->rx_last[0] = 0; 1451 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0); 1452} 1453 1454static void cas_clean_rxcs(struct cas *cp) 1455{ 1456 int i, j; 1457 1458 /* take ownership of rx comp descriptors */ 1459 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS); 1460 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS); 1461 for (i = 0; i < N_RX_COMP_RINGS; i++) { 1462 struct cas_rx_comp *rxc = cp->init_rxcs[i]; 1463 for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) { 1464 cas_rxc_init(rxc + j); 1465 } 1466 } 1467} 1468 1469#if 0 1470/* When we get a RX fifo overflow, the RX unit is probably hung 1471 * so we do the following. 1472 * 1473 * If any part of the reset goes wrong, we return 1 and that causes the 1474 * whole chip to be reset. 1475 */ 1476static int cas_rxmac_reset(struct cas *cp) 1477{ 1478 struct net_device *dev = cp->dev; 1479 int limit; 1480 u32 val; 1481 1482 /* First, reset MAC RX. */ 1483 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); 1484 for (limit = 0; limit < STOP_TRIES; limit++) { 1485 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN)) 1486 break; 1487 udelay(10); 1488 } 1489 if (limit == STOP_TRIES) { 1490 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole " 1491 "chip.\n", dev->name); 1492 return 1; 1493 } 1494 1495 /* Second, disable RX DMA. */ 1496 writel(0, cp->regs + REG_RX_CFG); 1497 for (limit = 0; limit < STOP_TRIES; limit++) { 1498 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN)) 1499 break; 1500 udelay(10); 1501 } 1502 if (limit == STOP_TRIES) { 1503 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole " 1504 "chip.\n", dev->name); 1505 return 1; 1506 } 1507 1508 mdelay(5); 1509 1510 /* Execute RX reset command. */ 1511 writel(SW_RESET_RX, cp->regs + REG_SW_RESET); 1512 for (limit = 0; limit < STOP_TRIES; limit++) { 1513 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX)) 1514 break; 1515 udelay(10); 1516 } 1517 if (limit == STOP_TRIES) { 1518 printk(KERN_ERR "%s: RX reset command will not execute, " 1519 "resetting whole chip.\n", dev->name); 1520 return 1; 1521 } 1522 1523 /* reset driver rx state */ 1524 cas_clean_rxds(cp); 1525 cas_clean_rxcs(cp); 1526 1527 /* Now, reprogram the rest of RX unit. */ 1528 cas_init_rx_dma(cp); 1529 1530 /* re-enable */ 1531 val = readl(cp->regs + REG_RX_CFG); 1532 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG); 1533 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); 1534 val = readl(cp->regs + REG_MAC_RX_CFG); 1535 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); 1536 return 0; 1537} 1538#endif 1539 1540static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp, 1541 u32 status) 1542{ 1543 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS); 1544 1545 if (!stat) 1546 return 0; 1547 1548 if (netif_msg_intr(cp)) 1549 printk(KERN_DEBUG "%s: rxmac interrupt, stat: 0x%x\n", 1550 cp->dev->name, stat); 1551 1552 /* these are all rollovers */ 1553 spin_lock(&cp->stat_lock[0]); 1554 if (stat & MAC_RX_ALIGN_ERR) 1555 cp->net_stats[0].rx_frame_errors += 0x10000; 1556 1557 if (stat & MAC_RX_CRC_ERR) 1558 cp->net_stats[0].rx_crc_errors += 0x10000; 1559 1560 if (stat & MAC_RX_LEN_ERR) 1561 cp->net_stats[0].rx_length_errors += 0x10000; 1562 1563 if (stat & MAC_RX_OVERFLOW) { 1564 cp->net_stats[0].rx_over_errors++; 1565 cp->net_stats[0].rx_fifo_errors++; 1566 } 1567 1568 /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR 1569 * events. 1570 */ 1571 spin_unlock(&cp->stat_lock[0]); 1572 return 0; 1573} 1574 1575static int cas_mac_interrupt(struct net_device *dev, struct cas *cp, 1576 u32 status) 1577{ 1578 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS); 1579 1580 if (!stat) 1581 return 0; 1582 1583 if (netif_msg_intr(cp)) 1584 printk(KERN_DEBUG "%s: mac interrupt, stat: 0x%x\n", 1585 cp->dev->name, stat); 1586 1587 /* This interrupt is just for pause frame and pause 1588 * tracking. It is useful for diagnostics and debug 1589 * but probably by default we will mask these events. 1590 */ 1591 if (stat & MAC_CTRL_PAUSE_STATE) 1592 cp->pause_entered++; 1593 1594 if (stat & MAC_CTRL_PAUSE_RECEIVED) 1595 cp->pause_last_time_recvd = (stat >> 16); 1596 1597 return 0; 1598} 1599 1600 1601/* Must be invoked under cp->lock. */ 1602static inline int cas_mdio_link_not_up(struct cas *cp) 1603{ 1604 u16 val; 1605 1606 switch (cp->lstate) { 1607 case link_force_ret: 1608 if (netif_msg_link(cp)) 1609 printk(KERN_INFO "%s: Autoneg failed again, keeping" 1610 " forced mode\n", cp->dev->name); 1611 cas_phy_write(cp, MII_BMCR, cp->link_fcntl); 1612 cp->timer_ticks = 5; 1613 cp->lstate = link_force_ok; 1614 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; 1615 break; 1616 1617 case link_aneg: 1618 val = cas_phy_read(cp, MII_BMCR); 1619 1620 /* Try forced modes. we try things in the following order: 1621 * 1000 full -> 100 full/half -> 10 half 1622 */ 1623 val &= ~(BMCR_ANRESTART | BMCR_ANENABLE); 1624 val |= BMCR_FULLDPLX; 1625 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? 1626 CAS_BMCR_SPEED1000 : BMCR_SPEED100; 1627 cas_phy_write(cp, MII_BMCR, val); 1628 cp->timer_ticks = 5; 1629 cp->lstate = link_force_try; 1630 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; 1631 break; 1632 1633 case link_force_try: 1634 /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */ 1635 val = cas_phy_read(cp, MII_BMCR); 1636 cp->timer_ticks = 5; 1637 if (val & CAS_BMCR_SPEED1000) { /* gigabit */ 1638 val &= ~CAS_BMCR_SPEED1000; 1639 val |= (BMCR_SPEED100 | BMCR_FULLDPLX); 1640 cas_phy_write(cp, MII_BMCR, val); 1641 break; 1642 } 1643 1644 if (val & BMCR_SPEED100) { 1645 if (val & BMCR_FULLDPLX) /* fd failed */ 1646 val &= ~BMCR_FULLDPLX; 1647 else { /* 100Mbps failed */ 1648 val &= ~BMCR_SPEED100; 1649 } 1650 cas_phy_write(cp, MII_BMCR, val); 1651 break; 1652 } 1653 default: 1654 break; 1655 } 1656 return 0; 1657} 1658 1659 1660/* must be invoked with cp->lock held */ 1661static int cas_mii_link_check(struct cas *cp, const u16 bmsr) 1662{ 1663 int restart; 1664 1665 if (bmsr & BMSR_LSTATUS) { 1666 /* Ok, here we got a link. If we had it due to a forced 1667 * fallback, and we were configured for autoneg, we 1668 * retry a short autoneg pass. If you know your hub is 1669 * broken, use ethtool ;) 1670 */ 1671 if ((cp->lstate == link_force_try) && 1672 (cp->link_cntl & BMCR_ANENABLE)) { 1673 cp->lstate = link_force_ret; 1674 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; 1675 cas_mif_poll(cp, 0); 1676 cp->link_fcntl = cas_phy_read(cp, MII_BMCR); 1677 cp->timer_ticks = 5; 1678 if (cp->opened && netif_msg_link(cp)) 1679 printk(KERN_INFO "%s: Got link after fallback, retrying" 1680 " autoneg once...\n", cp->dev->name); 1681 cas_phy_write(cp, MII_BMCR, 1682 cp->link_fcntl | BMCR_ANENABLE | 1683 BMCR_ANRESTART); 1684 cas_mif_poll(cp, 1); 1685 1686 } else if (cp->lstate != link_up) { 1687 cp->lstate = link_up; 1688 cp->link_transition = LINK_TRANSITION_LINK_UP; 1689 1690 if (cp->opened) { 1691 cas_set_link_modes(cp); 1692 netif_carrier_on(cp->dev); 1693 } 1694 } 1695 return 0; 1696 } 1697 1698 /* link not up. if the link was previously up, we restart the 1699 * whole process 1700 */ 1701 restart = 0; 1702 if (cp->lstate == link_up) { 1703 cp->lstate = link_down; 1704 cp->link_transition = LINK_TRANSITION_LINK_DOWN; 1705 1706 netif_carrier_off(cp->dev); 1707 if (cp->opened && netif_msg_link(cp)) 1708 printk(KERN_INFO "%s: Link down\n", 1709 cp->dev->name); 1710 restart = 1; 1711 1712 } else if (++cp->timer_ticks > 10) 1713 cas_mdio_link_not_up(cp); 1714 1715 return restart; 1716} 1717 1718static int cas_mif_interrupt(struct net_device *dev, struct cas *cp, 1719 u32 status) 1720{ 1721 u32 stat = readl(cp->regs + REG_MIF_STATUS); 1722 u16 bmsr; 1723 1724 /* check for a link change */ 1725 if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0) 1726 return 0; 1727 1728 bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat); 1729 return cas_mii_link_check(cp, bmsr); 1730} 1731 1732static int cas_pci_interrupt(struct net_device *dev, struct cas *cp, 1733 u32 status) 1734{ 1735 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS); 1736 1737 if (!stat) 1738 return 0; 1739 1740 printk(KERN_ERR "%s: PCI error [%04x:%04x] ", dev->name, stat, 1741 readl(cp->regs + REG_BIM_DIAG)); 1742 1743 /* cassini+ has this reserved */ 1744 if ((stat & PCI_ERR_BADACK) && 1745 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0)) 1746 printk("<No ACK64# during ABS64 cycle> "); 1747 1748 if (stat & PCI_ERR_DTRTO) 1749 printk("<Delayed transaction timeout> "); 1750 if (stat & PCI_ERR_OTHER) 1751 printk("<other> "); 1752 if (stat & PCI_ERR_BIM_DMA_WRITE) 1753 printk("<BIM DMA 0 write req> "); 1754 if (stat & PCI_ERR_BIM_DMA_READ) 1755 printk("<BIM DMA 0 read req> "); 1756 printk("\n"); 1757 1758 if (stat & PCI_ERR_OTHER) { 1759 u16 cfg; 1760 1761 /* Interrogate PCI config space for the 1762 * true cause. 1763 */ 1764 pci_read_config_word(cp->pdev, PCI_STATUS, &cfg); 1765 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n", 1766 dev->name, cfg); 1767 if (cfg & PCI_STATUS_PARITY) 1768 printk(KERN_ERR "%s: PCI parity error detected.\n", 1769 dev->name); 1770 if (cfg & PCI_STATUS_SIG_TARGET_ABORT) 1771 printk(KERN_ERR "%s: PCI target abort.\n", 1772 dev->name); 1773 if (cfg & PCI_STATUS_REC_TARGET_ABORT) 1774 printk(KERN_ERR "%s: PCI master acks target abort.\n", 1775 dev->name); 1776 if (cfg & PCI_STATUS_REC_MASTER_ABORT) 1777 printk(KERN_ERR "%s: PCI master abort.\n", dev->name); 1778 if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR) 1779 printk(KERN_ERR "%s: PCI system error SERR#.\n", 1780 dev->name); 1781 if (cfg & PCI_STATUS_DETECTED_PARITY) 1782 printk(KERN_ERR "%s: PCI parity error.\n", 1783 dev->name); 1784 1785 /* Write the error bits back to clear them. */ 1786 cfg &= (PCI_STATUS_PARITY | 1787 PCI_STATUS_SIG_TARGET_ABORT | 1788 PCI_STATUS_REC_TARGET_ABORT | 1789 PCI_STATUS_REC_MASTER_ABORT | 1790 PCI_STATUS_SIG_SYSTEM_ERROR | 1791 PCI_STATUS_DETECTED_PARITY); 1792 pci_write_config_word(cp->pdev, PCI_STATUS, cfg); 1793 } 1794 1795 /* For all PCI errors, we should reset the chip. */ 1796 return 1; 1797} 1798 1799/* All non-normal interrupt conditions get serviced here. 1800 * Returns non-zero if we should just exit the interrupt 1801 * handler right now (ie. if we reset the card which invalidates 1802 * all of the other original irq status bits). 1803 */ 1804static int cas_abnormal_irq(struct net_device *dev, struct cas *cp, 1805 u32 status) 1806{ 1807 if (status & INTR_RX_TAG_ERROR) { 1808 /* corrupt RX tag framing */ 1809 if (netif_msg_rx_err(cp)) 1810 printk(KERN_DEBUG "%s: corrupt rx tag framing\n", 1811 cp->dev->name); 1812 spin_lock(&cp->stat_lock[0]); 1813 cp->net_stats[0].rx_errors++; 1814 spin_unlock(&cp->stat_lock[0]); 1815 goto do_reset; 1816 } 1817 1818 if (status & INTR_RX_LEN_MISMATCH) { 1819 /* length mismatch. */ 1820 if (netif_msg_rx_err(cp)) 1821 printk(KERN_DEBUG "%s: length mismatch for rx frame\n", 1822 cp->dev->name); 1823 spin_lock(&cp->stat_lock[0]); 1824 cp->net_stats[0].rx_errors++; 1825 spin_unlock(&cp->stat_lock[0]); 1826 goto do_reset; 1827 } 1828 1829 if (status & INTR_PCS_STATUS) { 1830 if (cas_pcs_interrupt(dev, cp, status)) 1831 goto do_reset; 1832 } 1833 1834 if (status & INTR_TX_MAC_STATUS) { 1835 if (cas_txmac_interrupt(dev, cp, status)) 1836 goto do_reset; 1837 } 1838 1839 if (status & INTR_RX_MAC_STATUS) { 1840 if (cas_rxmac_interrupt(dev, cp, status)) 1841 goto do_reset; 1842 } 1843 1844 if (status & INTR_MAC_CTRL_STATUS) { 1845 if (cas_mac_interrupt(dev, cp, status)) 1846 goto do_reset; 1847 } 1848 1849 if (status & INTR_MIF_STATUS) { 1850 if (cas_mif_interrupt(dev, cp, status)) 1851 goto do_reset; 1852 } 1853 1854 if (status & INTR_PCI_ERROR_STATUS) { 1855 if (cas_pci_interrupt(dev, cp, status)) 1856 goto do_reset; 1857 } 1858 return 0; 1859 1860do_reset: 1861#if 1 1862 atomic_inc(&cp->reset_task_pending); 1863 atomic_inc(&cp->reset_task_pending_all); 1864 printk(KERN_ERR "%s:reset called in cas_abnormal_irq [0x%x]\n", 1865 dev->name, status); 1866 schedule_work(&cp->reset_task); 1867#else 1868 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); 1869 printk(KERN_ERR "reset called in cas_abnormal_irq\n"); 1870 schedule_work(&cp->reset_task); 1871#endif 1872 return 1; 1873} 1874 1875/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when 1876 * determining whether to do a netif_stop/wakeup 1877 */ 1878#define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1) 1879#define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK) 1880static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr, 1881 const int len) 1882{ 1883 unsigned long off = addr + len; 1884 1885 if (CAS_TABORT(cp) == 1) 1886 return 0; 1887 if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN) 1888 return 0; 1889 return TX_TARGET_ABORT_LEN; 1890} 1891 1892static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) 1893{ 1894 struct cas_tx_desc *txds; 1895 struct sk_buff **skbs; 1896 struct net_device *dev = cp->dev; 1897 int entry, count; 1898 1899 spin_lock(&cp->tx_lock[ring]); 1900 txds = cp->init_txds[ring]; 1901 skbs = cp->tx_skbs[ring]; 1902 entry = cp->tx_old[ring]; 1903 1904 count = TX_BUFF_COUNT(ring, entry, limit); 1905 while (entry != limit) { 1906 struct sk_buff *skb = skbs[entry]; 1907 dma_addr_t daddr; 1908 u32 dlen; 1909 int frag; 1910 1911 if (!skb) { 1912 /* this should never occur */ 1913 entry = TX_DESC_NEXT(ring, entry); 1914 continue; 1915 } 1916 1917 /* however, we might get only a partial skb release. */ 1918 count -= skb_shinfo(skb)->nr_frags + 1919 + cp->tx_tiny_use[ring][entry].nbufs + 1; 1920 if (count < 0) 1921 break; 1922 1923 if (netif_msg_tx_done(cp)) 1924 printk(KERN_DEBUG "%s: tx[%d] done, slot %d\n", 1925 cp->dev->name, ring, entry); 1926 1927 skbs[entry] = NULL; 1928 cp->tx_tiny_use[ring][entry].nbufs = 0; 1929 1930 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 1931 struct cas_tx_desc *txd = txds + entry; 1932 1933 daddr = le64_to_cpu(txd->buffer); 1934 dlen = CAS_VAL(TX_DESC_BUFLEN, 1935 le64_to_cpu(txd->control)); 1936 pci_unmap_page(cp->pdev, daddr, dlen, 1937 PCI_DMA_TODEVICE); 1938 entry = TX_DESC_NEXT(ring, entry); 1939 1940 /* tiny buffer may follow */ 1941 if (cp->tx_tiny_use[ring][entry].used) { 1942 cp->tx_tiny_use[ring][entry].used = 0; 1943 entry = TX_DESC_NEXT(ring, entry); 1944 } 1945 } 1946 1947 spin_lock(&cp->stat_lock[ring]); 1948 cp->net_stats[ring].tx_packets++; 1949 cp->net_stats[ring].tx_bytes += skb->len; 1950 spin_unlock(&cp->stat_lock[ring]); 1951 dev_kfree_skb_irq(skb); 1952 } 1953 cp->tx_old[ring] = entry; 1954 1955 /* this is wrong for multiple tx rings. the net device needs 1956 * multiple queues for this to do the right thing. we wait 1957 * for 2*packets to be available when using tiny buffers 1958 */ 1959 if (netif_queue_stopped(dev) && 1960 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))) 1961 netif_wake_queue(dev); 1962 spin_unlock(&cp->tx_lock[ring]); 1963} 1964 1965static void cas_tx(struct net_device *dev, struct cas *cp, 1966 u32 status) 1967{ 1968 int limit, ring; 1969#ifdef USE_TX_COMPWB 1970 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb); 1971#endif 1972 if (netif_msg_intr(cp)) 1973 printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %llx\n", 1974 cp->dev->name, status, (unsigned long long)compwb); 1975 /* process all the rings */ 1976 for (ring = 0; ring < N_TX_RINGS; ring++) { 1977#ifdef USE_TX_COMPWB 1978 /* use the completion writeback registers */ 1979 limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) | 1980 CAS_VAL(TX_COMPWB_LSB, compwb); 1981 compwb = TX_COMPWB_NEXT(compwb); 1982#else 1983 limit = readl(cp->regs + REG_TX_COMPN(ring)); 1984#endif 1985 if (cp->tx_old[ring] != limit) 1986 cas_tx_ringN(cp, ring, limit); 1987 } 1988} 1989 1990 1991static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, 1992 int entry, const u64 *words, 1993 struct sk_buff **skbref) 1994{ 1995 int dlen, hlen, len, i, alloclen; 1996 int off, swivel = RX_SWIVEL_OFF_VAL; 1997 struct cas_page *page; 1998 struct sk_buff *skb; 1999 void *addr, *crcaddr; 2000 __sum16 csum; 2001 char *p; 2002 2003 hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]); 2004 dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]); 2005 len = hlen + dlen; 2006 2007 if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT)) 2008 alloclen = len; 2009 else 2010 alloclen = max(hlen, RX_COPY_MIN); 2011 2012 skb = dev_alloc_skb(alloclen + swivel + cp->crc_size); 2013 if (skb == NULL) 2014 return -1; 2015 2016 *skbref = skb; 2017 skb_reserve(skb, swivel); 2018 2019 p = skb->data; 2020 addr = crcaddr = NULL; 2021 if (hlen) { /* always copy header pages */ 2022 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); 2023 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 2024 off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 + 2025 swivel; 2026 2027 i = hlen; 2028 if (!dlen) /* attach FCS */ 2029 i += cp->crc_size; 2030 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, 2031 PCI_DMA_FROMDEVICE); 2032 addr = cas_page_map(page->buffer); 2033 memcpy(p, addr + off, i); 2034 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, 2035 PCI_DMA_FROMDEVICE); 2036 cas_page_unmap(addr); 2037 RX_USED_ADD(page, 0x100); 2038 p += hlen; 2039 swivel = 0; 2040 } 2041 2042 2043 if (alloclen < (hlen + dlen)) { 2044 skb_frag_t *frag = skb_shinfo(skb)->frags; 2045 2046 /* normal or jumbo packets. we use frags */ 2047 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); 2048 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 2049 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; 2050 2051 hlen = min(cp->page_size - off, dlen); 2052 if (hlen < 0) { 2053 if (netif_msg_rx_err(cp)) { 2054 printk(KERN_DEBUG "%s: rx page overflow: " 2055 "%d\n", cp->dev->name, hlen); 2056 } 2057 dev_kfree_skb_irq(skb); 2058 return -1; 2059 } 2060 i = hlen; 2061 if (i == dlen) /* attach FCS */ 2062 i += cp->crc_size; 2063 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, 2064 PCI_DMA_FROMDEVICE); 2065 2066 /* make sure we always copy a header */ 2067 swivel = 0; 2068 if (p == (char *) skb->data) { /* not split */ 2069 addr = cas_page_map(page->buffer); 2070 memcpy(p, addr + off, RX_COPY_MIN); 2071 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, 2072 PCI_DMA_FROMDEVICE); 2073 cas_page_unmap(addr); 2074 off += RX_COPY_MIN; 2075 swivel = RX_COPY_MIN; 2076 RX_USED_ADD(page, cp->mtu_stride); 2077 } else { 2078 RX_USED_ADD(page, hlen); 2079 } 2080 skb_put(skb, alloclen); 2081 2082 skb_shinfo(skb)->nr_frags++; 2083 skb->data_len += hlen - swivel; 2084 skb->truesize += hlen - swivel; 2085 skb->len += hlen - swivel; 2086 2087 get_page(page->buffer); 2088 frag->page = page->buffer; 2089 frag->page_offset = off; 2090 frag->size = hlen - swivel; 2091 2092 /* any more data? */ 2093 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { 2094 hlen = dlen; 2095 off = 0; 2096 2097 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); 2098 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 2099 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, 2100 hlen + cp->crc_size, 2101 PCI_DMA_FROMDEVICE); 2102 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, 2103 hlen + cp->crc_size, 2104 PCI_DMA_FROMDEVICE); 2105 2106 skb_shinfo(skb)->nr_frags++; 2107 skb->data_len += hlen; 2108 skb->len += hlen; 2109 frag++; 2110 2111 get_page(page->buffer); 2112 frag->page = page->buffer; 2113 frag->page_offset = 0; 2114 frag->size = hlen; 2115 RX_USED_ADD(page, hlen + cp->crc_size); 2116 } 2117 2118 if (cp->crc_size) { 2119 addr = cas_page_map(page->buffer); 2120 crcaddr = addr + off + hlen; 2121 } 2122 2123 } else { 2124 /* copying packet */ 2125 if (!dlen) 2126 goto end_copy_pkt; 2127 2128 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); 2129 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 2130 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; 2131 hlen = min(cp->page_size - off, dlen); 2132 if (hlen < 0) { 2133 if (netif_msg_rx_err(cp)) { 2134 printk(KERN_DEBUG "%s: rx page overflow: " 2135 "%d\n", cp->dev->name, hlen); 2136 } 2137 dev_kfree_skb_irq(skb); 2138 return -1; 2139 } 2140 i = hlen; 2141 if (i == dlen) /* attach FCS */ 2142 i += cp->crc_size; 2143 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, 2144 PCI_DMA_FROMDEVICE); 2145 addr = cas_page_map(page->buffer); 2146 memcpy(p, addr + off, i); 2147 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, 2148 PCI_DMA_FROMDEVICE); 2149 cas_page_unmap(addr); 2150 if (p == (char *) skb->data) /* not split */ 2151 RX_USED_ADD(page, cp->mtu_stride); 2152 else 2153 RX_USED_ADD(page, i); 2154 2155 /* any more data? */ 2156 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { 2157 p += hlen; 2158 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); 2159 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 2160 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, 2161 dlen + cp->crc_size, 2162 PCI_DMA_FROMDEVICE); 2163 addr = cas_page_map(page->buffer); 2164 memcpy(p, addr, dlen + cp->crc_size); 2165 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, 2166 dlen + cp->crc_size, 2167 PCI_DMA_FROMDEVICE); 2168 cas_page_unmap(addr); 2169 RX_USED_ADD(page, dlen + cp->crc_size); 2170 } 2171end_copy_pkt: 2172 if (cp->crc_size) { 2173 addr = NULL; 2174 crcaddr = skb->data + alloclen; 2175 } 2176 skb_put(skb, alloclen); 2177 } 2178 2179 csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3])); 2180 if (cp->crc_size) { 2181 /* checksum includes FCS. strip it out. */ 2182 csum = csum_fold(csum_partial(crcaddr, cp->crc_size, 2183 csum_unfold(csum))); 2184 if (addr) 2185 cas_page_unmap(addr); 2186 } 2187 skb->protocol = eth_type_trans(skb, cp->dev); 2188 if (skb->protocol == htons(ETH_P_IP)) { 2189 skb->csum = csum_unfold(~csum); 2190 skb->ip_summed = CHECKSUM_COMPLETE; 2191 } else 2192 skb->ip_summed = CHECKSUM_NONE; 2193 return len; 2194} 2195 2196 2197/* we can handle up to 64 rx flows at a time. we do the same thing 2198 * as nonreassm except that we batch up the buffers. 2199 * NOTE: we currently just treat each flow as a bunch of packets that 2200 * we pass up. a better way would be to coalesce the packets 2201 * into a jumbo packet. to do that, we need to do the following: 2202 * 1) the first packet will have a clean split between header and 2203 * data. save both. 2204 * 2) each time the next flow packet comes in, extend the 2205 * data length and merge the checksums. 2206 * 3) on flow release, fix up the header. 2207 * 4) make sure the higher layer doesn't care. 2208 * because packets get coalesced, we shouldn't run into fragment count 2209 * issues. 2210 */ 2211static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words, 2212 struct sk_buff *skb) 2213{ 2214 int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1); 2215 struct sk_buff_head *flow = &cp->rx_flows[flowid]; 2216 2217 /* this is protected at a higher layer, so no need to 2218 * do any additional locking here. stick the buffer 2219 * at the end. 2220 */ 2221 __skb_queue_tail(flow, skb); 2222 if (words[0] & RX_COMP1_RELEASE_FLOW) { 2223 while ((skb = __skb_dequeue(flow))) { 2224 cas_skb_release(skb); 2225 } 2226 } 2227} 2228 2229/* put rx descriptor back on ring. if a buffer is in use by a higher 2230 * layer, this will need to put in a replacement. 2231 */ 2232static void cas_post_page(struct cas *cp, const int ring, const int index) 2233{ 2234 cas_page_t *new; 2235 int entry; 2236 2237 entry = cp->rx_old[ring]; 2238 2239 new = cas_page_swap(cp, ring, index); 2240 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr); 2241 cp->init_rxds[ring][entry].index = 2242 cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) | 2243 CAS_BASE(RX_INDEX_RING, ring)); 2244 2245 entry = RX_DESC_ENTRY(ring, entry + 1); 2246 cp->rx_old[ring] = entry; 2247 2248 if (entry % 4) 2249 return; 2250 2251 if (ring == 0) 2252 writel(entry, cp->regs + REG_RX_KICK); 2253 else if ((N_RX_DESC_RINGS > 1) && 2254 (cp->cas_flags & CAS_FLAG_REG_PLUS)) 2255 writel(entry, cp->regs + REG_PLUS_RX_KICK1); 2256} 2257 2258 2259/* only when things are bad */ 2260static int cas_post_rxds_ringN(struct cas *cp, int ring, int num) 2261{ 2262 unsigned int entry, last, count, released; 2263 int cluster; 2264 cas_page_t **page = cp->rx_pages[ring]; 2265 2266 entry = cp->rx_old[ring]; 2267 2268 if (netif_msg_intr(cp)) 2269 printk(KERN_DEBUG "%s: rxd[%d] interrupt, done: %d\n", 2270 cp->dev->name, ring, entry); 2271 2272 cluster = -1; 2273 count = entry & 0x3; 2274 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4); 2275 released = 0; 2276 while (entry != last) { 2277 /* make a new buffer if it's still in use */ 2278 if (page_count(page[entry]->buffer) > 1) { 2279 cas_page_t *new = cas_page_dequeue(cp); 2280 if (!new) { 2281 /* let the timer know that we need to 2282 * do this again 2283 */ 2284 cp->cas_flags |= CAS_FLAG_RXD_POST(ring); 2285 if (!timer_pending(&cp->link_timer)) 2286 mod_timer(&cp->link_timer, jiffies + 2287 CAS_LINK_FAST_TIMEOUT); 2288 cp->rx_old[ring] = entry; 2289 cp->rx_last[ring] = num ? num - released : 0; 2290 return -ENOMEM; 2291 } 2292 spin_lock(&cp->rx_inuse_lock); 2293 list_add(&page[entry]->list, &cp->rx_inuse_list); 2294 spin_unlock(&cp->rx_inuse_lock); 2295 cp->init_rxds[ring][entry].buffer = 2296 cpu_to_le64(new->dma_addr); 2297 page[entry] = new; 2298 2299 } 2300 2301 if (++count == 4) { 2302 cluster = entry; 2303 count = 0; 2304 } 2305 released++; 2306 entry = RX_DESC_ENTRY(ring, entry + 1); 2307 } 2308 cp->rx_old[ring] = entry; 2309 2310 if (cluster < 0) 2311 return 0; 2312 2313 if (ring == 0) 2314 writel(cluster, cp->regs + REG_RX_KICK); 2315 else if ((N_RX_DESC_RINGS > 1) && 2316 (cp->cas_flags & CAS_FLAG_REG_PLUS)) 2317 writel(cluster, cp->regs + REG_PLUS_RX_KICK1); 2318 return 0; 2319} 2320 2321 2322/* process a completion ring. packets are set up in three basic ways: 2323 * small packets: should be copied header + data in single buffer. 2324 * large packets: header and data in a single buffer. 2325 * split packets: header in a separate buffer from data. 2326 * data may be in multiple pages. data may be > 256 2327 * bytes but in a single page. 2328 * 2329 * NOTE: RX page posting is done in this routine as well. while there's 2330 * the capability of using multiple RX completion rings, it isn't 2331 * really worthwhile due to the fact that the page posting will 2332 * force serialization on the single descriptor ring. 2333 */ 2334static int cas_rx_ringN(struct cas *cp, int ring, int budget) 2335{ 2336 struct cas_rx_comp *rxcs = cp->init_rxcs[ring]; 2337 int entry, drops; 2338 int npackets = 0; 2339 2340 if (netif_msg_intr(cp)) 2341 printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n", 2342 cp->dev->name, ring, 2343 readl(cp->regs + REG_RX_COMP_HEAD), 2344 cp->rx_new[ring]); 2345 2346 entry = cp->rx_new[ring]; 2347 drops = 0; 2348 while (1) { 2349 struct cas_rx_comp *rxc = rxcs + entry; 2350 struct sk_buff *skb; 2351 int type, len; 2352 u64 words[4]; 2353 int i, dring; 2354 2355 words[0] = le64_to_cpu(rxc->word1); 2356 words[1] = le64_to_cpu(rxc->word2); 2357 words[2] = le64_to_cpu(rxc->word3); 2358 words[3] = le64_to_cpu(rxc->word4); 2359 2360 /* don't touch if still owned by hw */ 2361 type = CAS_VAL(RX_COMP1_TYPE, words[0]); 2362 if (type == 0) 2363 break; 2364 2365 /* hw hasn't cleared the zero bit yet */ 2366 if (words[3] & RX_COMP4_ZERO) { 2367 break; 2368 } 2369 2370 /* get info on the packet */ 2371 if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) { 2372 spin_lock(&cp->stat_lock[ring]); 2373 cp->net_stats[ring].rx_errors++; 2374 if (words[3] & RX_COMP4_LEN_MISMATCH) 2375 cp->net_stats[ring].rx_length_errors++; 2376 if (words[3] & RX_COMP4_BAD) 2377 cp->net_stats[ring].rx_crc_errors++; 2378 spin_unlock(&cp->stat_lock[ring]); 2379 2380 /* We'll just return it to Cassini. */ 2381 drop_it: 2382 spin_lock(&cp->stat_lock[ring]); 2383 ++cp->net_stats[ring].rx_dropped; 2384 spin_unlock(&cp->stat_lock[ring]); 2385 goto next; 2386 } 2387 2388 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb); 2389 if (len < 0) { 2390 ++drops; 2391 goto drop_it; 2392 } 2393 2394 /* see if it's a flow re-assembly or not. the driver 2395 * itself handles release back up. 2396 */ 2397 if (RX_DONT_BATCH || (type == 0x2)) { 2398 /* non-reassm: these always get released */ 2399 cas_skb_release(skb); 2400 } else { 2401 cas_rx_flow_pkt(cp, words, skb); 2402 } 2403 2404 spin_lock(&cp->stat_lock[ring]); 2405 cp->net_stats[ring].rx_packets++; 2406 cp->net_stats[ring].rx_bytes += len; 2407 spin_unlock(&cp->stat_lock[ring]); 2408 cp->dev->last_rx = jiffies; 2409 2410 next: 2411 npackets++; 2412 2413 /* should it be released? */ 2414 if (words[0] & RX_COMP1_RELEASE_HDR) { 2415 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); 2416 dring = CAS_VAL(RX_INDEX_RING, i); 2417 i = CAS_VAL(RX_INDEX_NUM, i); 2418 cas_post_page(cp, dring, i); 2419 } 2420 2421 if (words[0] & RX_COMP1_RELEASE_DATA) { 2422 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); 2423 dring = CAS_VAL(RX_INDEX_RING, i); 2424 i = CAS_VAL(RX_INDEX_NUM, i); 2425 cas_post_page(cp, dring, i); 2426 } 2427 2428 if (words[0] & RX_COMP1_RELEASE_NEXT) { 2429 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); 2430 dring = CAS_VAL(RX_INDEX_RING, i); 2431 i = CAS_VAL(RX_INDEX_NUM, i); 2432 cas_post_page(cp, dring, i); 2433 } 2434 2435 /* skip to the next entry */ 2436 entry = RX_COMP_ENTRY(ring, entry + 1 + 2437 CAS_VAL(RX_COMP1_SKIP, words[0])); 2438#ifdef USE_NAPI 2439 if (budget && (npackets >= budget)) 2440 break; 2441#endif 2442 } 2443 cp->rx_new[ring] = entry; 2444 2445 if (drops) 2446 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", 2447 cp->dev->name); 2448 return npackets; 2449} 2450 2451 2452/* put completion entries back on the ring */ 2453static void cas_post_rxcs_ringN(struct net_device *dev, 2454 struct cas *cp, int ring) 2455{ 2456 struct cas_rx_comp *rxc = cp->init_rxcs[ring]; 2457 int last, entry; 2458 2459 last = cp->rx_cur[ring]; 2460 entry = cp->rx_new[ring]; 2461 if (netif_msg_intr(cp)) 2462 printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n", 2463 dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD), 2464 entry); 2465 2466 /* zero and re-mark descriptors */ 2467 while (last != entry) { 2468 cas_rxc_init(rxc + last); 2469 last = RX_COMP_ENTRY(ring, last + 1); 2470 } 2471 cp->rx_cur[ring] = last; 2472 2473 if (ring == 0) 2474 writel(last, cp->regs + REG_RX_COMP_TAIL); 2475 else if (cp->cas_flags & CAS_FLAG_REG_PLUS) 2476 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring)); 2477} 2478 2479 2480 2481/* cassini can use all four PCI interrupts for the completion ring. 2482 * rings 3 and 4 are identical 2483 */ 2484#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD) 2485static inline void cas_handle_irqN(struct net_device *dev, 2486 struct cas *cp, const u32 status, 2487 const int ring) 2488{ 2489 if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT)) 2490 cas_post_rxcs_ringN(dev, cp, ring); 2491} 2492 2493static irqreturn_t cas_interruptN(int irq, void *dev_id) 2494{ 2495 struct net_device *dev = dev_id; 2496 struct cas *cp = netdev_priv(dev); 2497 unsigned long flags; 2498 int ring; 2499 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); 2500 2501 /* check for shared irq */ 2502 if (status == 0) 2503 return IRQ_NONE; 2504 2505 ring = (irq == cp->pci_irq_INTC) ? 2 : 3; 2506 spin_lock_irqsave(&cp->lock, flags); 2507 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ 2508#ifdef USE_NAPI 2509 cas_mask_intr(cp); 2510 netif_rx_schedule(dev, &cp->napi); 2511#else 2512 cas_rx_ringN(cp, ring, 0); 2513#endif 2514 status &= ~INTR_RX_DONE_ALT; 2515 } 2516 2517 if (status) 2518 cas_handle_irqN(dev, cp, status, ring); 2519 spin_unlock_irqrestore(&cp->lock, flags); 2520 return IRQ_HANDLED; 2521} 2522#endif 2523 2524#ifdef USE_PCI_INTB 2525/* everything but rx packets */ 2526static inline void cas_handle_irq1(struct cas *cp, const u32 status) 2527{ 2528 if (status & INTR_RX_BUF_UNAVAIL_1) { 2529 /* Frame arrived, no free RX buffers available. 2530 * NOTE: we can get this on a link transition. */ 2531 cas_post_rxds_ringN(cp, 1, 0); 2532 spin_lock(&cp->stat_lock[1]); 2533 cp->net_stats[1].rx_dropped++; 2534 spin_unlock(&cp->stat_lock[1]); 2535 } 2536 2537 if (status & INTR_RX_BUF_AE_1) 2538 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) - 2539 RX_AE_FREEN_VAL(1)); 2540 2541 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) 2542 cas_post_rxcs_ringN(cp, 1); 2543} 2544 2545/* ring 2 handles a few more events than 3 and 4 */ 2546static irqreturn_t cas_interrupt1(int irq, void *dev_id) 2547{ 2548 struct net_device *dev = dev_id; 2549 struct cas *cp = netdev_priv(dev); 2550 unsigned long flags; 2551 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); 2552 2553 /* check for shared interrupt */ 2554 if (status == 0) 2555 return IRQ_NONE; 2556 2557 spin_lock_irqsave(&cp->lock, flags); 2558 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ 2559#ifdef USE_NAPI 2560 cas_mask_intr(cp); 2561 netif_rx_schedule(dev, &cp->napi); 2562#else 2563 cas_rx_ringN(cp, 1, 0); 2564#endif 2565 status &= ~INTR_RX_DONE_ALT; 2566 } 2567 if (status) 2568 cas_handle_irq1(cp, status); 2569 spin_unlock_irqrestore(&cp->lock, flags); 2570 return IRQ_HANDLED; 2571} 2572#endif 2573 2574static inline void cas_handle_irq(struct net_device *dev, 2575 struct cas *cp, const u32 status) 2576{ 2577 /* housekeeping interrupts */ 2578 if (status & INTR_ERROR_MASK) 2579 cas_abnormal_irq(dev, cp, status); 2580 2581 if (status & INTR_RX_BUF_UNAVAIL) { 2582 /* Frame arrived, no free RX buffers available. 2583 * NOTE: we can get this on a link transition. 2584 */ 2585 cas_post_rxds_ringN(cp, 0, 0); 2586 spin_lock(&cp->stat_lock[0]); 2587 cp->net_stats[0].rx_dropped++; 2588 spin_unlock(&cp->stat_lock[0]); 2589 } else if (status & INTR_RX_BUF_AE) { 2590 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) - 2591 RX_AE_FREEN_VAL(0)); 2592 } 2593 2594 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) 2595 cas_post_rxcs_ringN(dev, cp, 0); 2596} 2597 2598static irqreturn_t cas_interrupt(int irq, void *dev_id) 2599{ 2600 struct net_device *dev = dev_id; 2601 struct cas *cp = netdev_priv(dev); 2602 unsigned long flags; 2603 u32 status = readl(cp->regs + REG_INTR_STATUS); 2604 2605 if (status == 0) 2606 return IRQ_NONE; 2607 2608 spin_lock_irqsave(&cp->lock, flags); 2609 if (status & (INTR_TX_ALL | INTR_TX_INTME)) { 2610 cas_tx(dev, cp, status); 2611 status &= ~(INTR_TX_ALL | INTR_TX_INTME); 2612 } 2613 2614 if (status & INTR_RX_DONE) { 2615#ifdef USE_NAPI 2616 cas_mask_intr(cp); 2617 netif_rx_schedule(dev, &cp->napi); 2618#else 2619 cas_rx_ringN(cp, 0, 0); 2620#endif 2621 status &= ~INTR_RX_DONE; 2622 } 2623 2624 if (status) 2625 cas_handle_irq(dev, cp, status); 2626 spin_unlock_irqrestore(&cp->lock, flags); 2627 return IRQ_HANDLED; 2628} 2629 2630 2631#ifdef USE_NAPI 2632static int cas_poll(struct napi_struct *napi, int budget) 2633{ 2634 struct cas *cp = container_of(napi, struct cas, napi); 2635 struct net_device *dev = cp->dev; 2636 int i, enable_intr, credits; 2637 u32 status = readl(cp->regs + REG_INTR_STATUS); 2638 unsigned long flags; 2639 2640 spin_lock_irqsave(&cp->lock, flags); 2641 cas_tx(dev, cp, status); 2642 spin_unlock_irqrestore(&cp->lock, flags); 2643 2644 /* NAPI rx packets. we spread the credits across all of the 2645 * rxc rings 2646 * 2647 * to make sure we're fair with the work we loop through each 2648 * ring N_RX_COMP_RING times with a request of 2649 * budget / N_RX_COMP_RINGS 2650 */ 2651 enable_intr = 1; 2652 credits = 0; 2653 for (i = 0; i < N_RX_COMP_RINGS; i++) { 2654 int j; 2655 for (j = 0; j < N_RX_COMP_RINGS; j++) { 2656 credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS); 2657 if (credits >= budget) { 2658 enable_intr = 0; 2659 goto rx_comp; 2660 } 2661 } 2662 } 2663 2664rx_comp: 2665 /* final rx completion */ 2666 spin_lock_irqsave(&cp->lock, flags); 2667 if (status) 2668 cas_handle_irq(dev, cp, status); 2669 2670#ifdef USE_PCI_INTB 2671 if (N_RX_COMP_RINGS > 1) { 2672 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); 2673 if (status) 2674 cas_handle_irq1(dev, cp, status); 2675 } 2676#endif 2677 2678#ifdef USE_PCI_INTC 2679 if (N_RX_COMP_RINGS > 2) { 2680 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2)); 2681 if (status) 2682 cas_handle_irqN(dev, cp, status, 2); 2683 } 2684#endif 2685 2686#ifdef USE_PCI_INTD 2687 if (N_RX_COMP_RINGS > 3) { 2688 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3)); 2689 if (status) 2690 cas_handle_irqN(dev, cp, status, 3); 2691 } 2692#endif 2693 spin_unlock_irqrestore(&cp->lock, flags); 2694 if (enable_intr) { 2695 netif_rx_complete(dev, napi); 2696 cas_unmask_intr(cp); 2697 } 2698 return credits; 2699} 2700#endif 2701 2702#ifdef CONFIG_NET_POLL_CONTROLLER 2703static void cas_netpoll(struct net_device *dev) 2704{ 2705 struct cas *cp = netdev_priv(dev); 2706 2707 cas_disable_irq(cp, 0); 2708 cas_interrupt(cp->pdev->irq, dev); 2709 cas_enable_irq(cp, 0); 2710 2711#ifdef USE_PCI_INTB 2712 if (N_RX_COMP_RINGS > 1) { 2713 /* cas_interrupt1(); */ 2714 } 2715#endif 2716#ifdef USE_PCI_INTC 2717 if (N_RX_COMP_RINGS > 2) { 2718 /* cas_interruptN(); */ 2719 } 2720#endif 2721#ifdef USE_PCI_INTD 2722 if (N_RX_COMP_RINGS > 3) { 2723 /* cas_interruptN(); */ 2724 } 2725#endif 2726} 2727#endif 2728 2729static void cas_tx_timeout(struct net_device *dev) 2730{ 2731 struct cas *cp = netdev_priv(dev); 2732 2733 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); 2734 if (!cp->hw_running) { 2735 printk("%s: hrm.. hw not running!\n", dev->name); 2736 return; 2737 } 2738 2739 printk(KERN_ERR "%s: MIF_STATE[%08x]\n", 2740 dev->name, readl(cp->regs + REG_MIF_STATE_MACHINE)); 2741 2742 printk(KERN_ERR "%s: MAC_STATE[%08x]\n", 2743 dev->name, readl(cp->regs + REG_MAC_STATE_MACHINE)); 2744 2745 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x] " 2746 "FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n", 2747 dev->name, 2748 readl(cp->regs + REG_TX_CFG), 2749 readl(cp->regs + REG_MAC_TX_STATUS), 2750 readl(cp->regs + REG_MAC_TX_CFG), 2751 readl(cp->regs + REG_TX_FIFO_PKT_CNT), 2752 readl(cp->regs + REG_TX_FIFO_WRITE_PTR), 2753 readl(cp->regs + REG_TX_FIFO_READ_PTR), 2754 readl(cp->regs + REG_TX_SM_1), 2755 readl(cp->regs + REG_TX_SM_2)); 2756 2757 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n", 2758 dev->name, 2759 readl(cp->regs + REG_RX_CFG), 2760 readl(cp->regs + REG_MAC_RX_STATUS), 2761 readl(cp->regs + REG_MAC_RX_CFG)); 2762 2763 printk(KERN_ERR "%s: HP_STATE[%08x:%08x:%08x:%08x]\n", 2764 dev->name, 2765 readl(cp->regs + REG_HP_STATE_MACHINE), 2766 readl(cp->regs + REG_HP_STATUS0), 2767 readl(cp->regs + REG_HP_STATUS1), 2768 readl(cp->regs + REG_HP_STATUS2)); 2769 2770#if 1 2771 atomic_inc(&cp->reset_task_pending); 2772 atomic_inc(&cp->reset_task_pending_all); 2773 schedule_work(&cp->reset_task); 2774#else 2775 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); 2776 schedule_work(&cp->reset_task); 2777#endif 2778} 2779 2780static inline int cas_intme(int ring, int entry) 2781{ 2782 /* Algorithm: IRQ every 1/2 of descriptors. */ 2783 if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1))) 2784 return 1; 2785 return 0; 2786} 2787 2788 2789static void cas_write_txd(struct cas *cp, int ring, int entry, 2790 dma_addr_t mapping, int len, u64 ctrl, int last) 2791{ 2792 struct cas_tx_desc *txd = cp->init_txds[ring] + entry; 2793 2794 ctrl |= CAS_BASE(TX_DESC_BUFLEN, len); 2795 if (cas_intme(ring, entry)) 2796 ctrl |= TX_DESC_INTME; 2797 if (last) 2798 ctrl |= TX_DESC_EOF; 2799 txd->control = cpu_to_le64(ctrl); 2800 txd->buffer = cpu_to_le64(mapping); 2801} 2802 2803static inline void *tx_tiny_buf(struct cas *cp, const int ring, 2804 const int entry) 2805{ 2806 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry; 2807} 2808 2809static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring, 2810 const int entry, const int tentry) 2811{ 2812 cp->tx_tiny_use[ring][tentry].nbufs++; 2813 cp->tx_tiny_use[ring][entry].used = 1; 2814 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry; 2815} 2816 2817static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, 2818 struct sk_buff *skb) 2819{ 2820 struct net_device *dev = cp->dev; 2821 int entry, nr_frags, frag, tabort, tentry; 2822 dma_addr_t mapping; 2823 unsigned long flags; 2824 u64 ctrl; 2825 u32 len; 2826 2827 spin_lock_irqsave(&cp->tx_lock[ring], flags); 2828 2829 /* This is a hard error, log it. */ 2830 if (TX_BUFFS_AVAIL(cp, ring) <= 2831 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) { 2832 netif_stop_queue(dev); 2833 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); 2834 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " 2835 "queue awake!\n", dev->name); 2836 return 1; 2837 } 2838 2839 ctrl = 0; 2840 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2841 const u64 csum_start_off = skb_transport_offset(skb); 2842 const u64 csum_stuff_off = csum_start_off + skb->csum_offset; 2843 2844 ctrl = TX_DESC_CSUM_EN | 2845 CAS_BASE(TX_DESC_CSUM_START, csum_start_off) | 2846 CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off); 2847 } 2848 2849 entry = cp->tx_new[ring]; 2850 cp->tx_skbs[ring][entry] = skb; 2851 2852 nr_frags = skb_shinfo(skb)->nr_frags; 2853 len = skb_headlen(skb); 2854 mapping = pci_map_page(cp->pdev, virt_to_page(skb->data), 2855 offset_in_page(skb->data), len, 2856 PCI_DMA_TODEVICE); 2857 2858 tentry = entry; 2859 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len); 2860 if (unlikely(tabort)) { 2861 /* NOTE: len is always > tabort */ 2862 cas_write_txd(cp, ring, entry, mapping, len - tabort, 2863 ctrl | TX_DESC_SOF, 0); 2864 entry = TX_DESC_NEXT(ring, entry); 2865 2866 skb_copy_from_linear_data_offset(skb, len - tabort, 2867 tx_tiny_buf(cp, ring, entry), tabort); 2868 mapping = tx_tiny_map(cp, ring, entry, tentry); 2869 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, 2870 (nr_frags == 0)); 2871 } else { 2872 cas_write_txd(cp, ring, entry, mapping, len, ctrl | 2873 TX_DESC_SOF, (nr_frags == 0)); 2874 } 2875 entry = TX_DESC_NEXT(ring, entry); 2876 2877 for (frag = 0; frag < nr_frags; frag++) { 2878 skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; 2879 2880 len = fragp->size; 2881 mapping = pci_map_page(cp->pdev, fragp->page, 2882 fragp->page_offset, len, 2883 PCI_DMA_TODEVICE); 2884 2885 tabort = cas_calc_tabort(cp, fragp->page_offset, len); 2886 if (unlikely(tabort)) { 2887 void *addr; 2888 2889 /* NOTE: len is always > tabort */ 2890 cas_write_txd(cp, ring, entry, mapping, len - tabort, 2891 ctrl, 0); 2892 entry = TX_DESC_NEXT(ring, entry); 2893 2894 addr = cas_page_map(fragp->page); 2895 memcpy(tx_tiny_buf(cp, ring, entry), 2896 addr + fragp->page_offset + len - tabort, 2897 tabort); 2898 cas_page_unmap(addr); 2899 mapping = tx_tiny_map(cp, ring, entry, tentry); 2900 len = tabort; 2901 } 2902 2903 cas_write_txd(cp, ring, entry, mapping, len, ctrl, 2904 (frag + 1 == nr_frags)); 2905 entry = TX_DESC_NEXT(ring, entry); 2906 } 2907 2908 cp->tx_new[ring] = entry; 2909 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)) 2910 netif_stop_queue(dev); 2911 2912 if (netif_msg_tx_queued(cp)) 2913 printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, " 2914 "avail %d\n", 2915 dev->name, ring, entry, skb->len, 2916 TX_BUFFS_AVAIL(cp, ring)); 2917 writel(entry, cp->regs + REG_TX_KICKN(ring)); 2918 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); 2919 return 0; 2920} 2921 2922static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev) 2923{ 2924 struct cas *cp = netdev_priv(dev); 2925 2926 /* this is only used as a load-balancing hint, so it doesn't 2927 * need to be SMP safe 2928 */ 2929 static int ring; 2930 2931 if (skb_padto(skb, cp->min_frame_size)) 2932 return 0; 2933 2934 /* XXX: we need some higher-level QoS hooks to steer packets to 2935 * individual queues. 2936 */ 2937 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb)) 2938 return 1; 2939 dev->trans_start = jiffies; 2940 return 0; 2941} 2942 2943static void cas_init_tx_dma(struct cas *cp) 2944{ 2945 u64 desc_dma = cp->block_dvma; 2946 unsigned long off; 2947 u32 val; 2948 int i; 2949 2950 /* set up tx completion writeback registers. must be 8-byte aligned */ 2951#ifdef USE_TX_COMPWB 2952 off = offsetof(struct cas_init_block, tx_compwb); 2953 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI); 2954 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW); 2955#endif 2956 2957 /* enable completion writebacks, enable paced mode, 2958 * disable read pipe, and disable pre-interrupt compwbs 2959 */ 2960 val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 | 2961 TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 | 2962 TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE | 2963 TX_CFG_INTR_COMPWB_DIS; 2964 2965 /* write out tx ring info and tx desc bases */ 2966 for (i = 0; i < MAX_TX_RINGS; i++) { 2967 off = (unsigned long) cp->init_txds[i] - 2968 (unsigned long) cp->init_block; 2969 2970 val |= CAS_TX_RINGN_BASE(i); 2971 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i)); 2972 writel((desc_dma + off) & 0xffffffff, cp->regs + 2973 REG_TX_DBN_LOW(i)); 2974 /* don't zero out the kick register here as the system 2975 * will wedge 2976 */ 2977 } 2978 writel(val, cp->regs + REG_TX_CFG); 2979 2980 /* program max burst sizes. these numbers should be different 2981 * if doing QoS. 2982 */ 2983#ifdef USE_QOS 2984 writel(0x800, cp->regs + REG_TX_MAXBURST_0); 2985 writel(0x1600, cp->regs + REG_TX_MAXBURST_1); 2986 writel(0x2400, cp->regs + REG_TX_MAXBURST_2); 2987 writel(0x4800, cp->regs + REG_TX_MAXBURST_3); 2988#else 2989 writel(0x800, cp->regs + REG_TX_MAXBURST_0); 2990 writel(0x800, cp->regs + REG_TX_MAXBURST_1); 2991 writel(0x800, cp->regs + REG_TX_MAXBURST_2); 2992 writel(0x800, cp->regs + REG_TX_MAXBURST_3); 2993#endif 2994} 2995 2996/* Must be invoked under cp->lock. */ 2997static inline void cas_init_dma(struct cas *cp) 2998{ 2999 cas_init_tx_dma(cp); 3000 cas_init_rx_dma(cp); 3001} 3002 3003/* Must be invoked under cp->lock. */ 3004static u32 cas_setup_multicast(struct cas *cp) 3005{ 3006 u32 rxcfg = 0; 3007 int i; 3008 3009 if (cp->dev->flags & IFF_PROMISC) { 3010 rxcfg |= MAC_RX_CFG_PROMISC_EN; 3011 3012 } else if (cp->dev->flags & IFF_ALLMULTI) { 3013 for (i=0; i < 16; i++) 3014 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i)); 3015 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; 3016 3017 } else { 3018 u16 hash_table[16]; 3019 u32 crc; 3020 struct dev_mc_list *dmi = cp->dev->mc_list; 3021 int i; 3022 3023 /* use the alternate mac address registers for the 3024 * first 15 multicast addresses 3025 */ 3026 for (i = 1; i <= CAS_MC_EXACT_MATCH_SIZE; i++) { 3027 if (!dmi) { 3028 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 0)); 3029 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 1)); 3030 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 2)); 3031 continue; 3032 } 3033 writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5], 3034 cp->regs + REG_MAC_ADDRN(i*3 + 0)); 3035 writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3], 3036 cp->regs + REG_MAC_ADDRN(i*3 + 1)); 3037 writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1], 3038 cp->regs + REG_MAC_ADDRN(i*3 + 2)); 3039 dmi = dmi->next; 3040 } 3041 3042 /* use hw hash table for the next series of 3043 * multicast addresses 3044 */ 3045 memset(hash_table, 0, sizeof(hash_table)); 3046 while (dmi) { 3047 crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr); 3048 crc >>= 24; 3049 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); 3050 dmi = dmi->next; 3051 } 3052 for (i=0; i < 16; i++) 3053 writel(hash_table[i], cp->regs + 3054 REG_MAC_HASH_TABLEN(i)); 3055 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; 3056 } 3057 3058 return rxcfg; 3059} 3060 3061/* must be invoked under cp->stat_lock[N_TX_RINGS] */ 3062static void cas_clear_mac_err(struct cas *cp) 3063{ 3064 writel(0, cp->regs + REG_MAC_COLL_NORMAL); 3065 writel(0, cp->regs + REG_MAC_COLL_FIRST); 3066 writel(0, cp->regs + REG_MAC_COLL_EXCESS); 3067 writel(0, cp->regs + REG_MAC_COLL_LATE); 3068 writel(0, cp->regs + REG_MAC_TIMER_DEFER); 3069 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK); 3070 writel(0, cp->regs + REG_MAC_RECV_FRAME); 3071 writel(0, cp->regs + REG_MAC_LEN_ERR); 3072 writel(0, cp->regs + REG_MAC_ALIGN_ERR); 3073 writel(0, cp->regs + REG_MAC_FCS_ERR); 3074 writel(0, cp->regs + REG_MAC_RX_CODE_ERR); 3075} 3076 3077 3078static void cas_mac_reset(struct cas *cp) 3079{ 3080 int i; 3081 3082 /* do both TX and RX reset */ 3083 writel(0x1, cp->regs + REG_MAC_TX_RESET); 3084 writel(0x1, cp->regs + REG_MAC_RX_RESET); 3085 3086 /* wait for TX */ 3087 i = STOP_TRIES; 3088 while (i-- > 0) { 3089 if (readl(cp->regs + REG_MAC_TX_RESET) == 0) 3090 break; 3091 udelay(10); 3092 } 3093 3094 /* wait for RX */ 3095 i = STOP_TRIES; 3096 while (i-- > 0) { 3097 if (readl(cp->regs + REG_MAC_RX_RESET) == 0) 3098 break; 3099 udelay(10); 3100 } 3101 3102 if (readl(cp->regs + REG_MAC_TX_RESET) | 3103 readl(cp->regs + REG_MAC_RX_RESET)) 3104 printk(KERN_ERR "%s: mac tx[%d]/rx[%d] reset failed [%08x]\n", 3105 cp->dev->name, readl(cp->regs + REG_MAC_TX_RESET), 3106 readl(cp->regs + REG_MAC_RX_RESET), 3107 readl(cp->regs + REG_MAC_STATE_MACHINE)); 3108} 3109 3110 3111/* Must be invoked under cp->lock. */ 3112static void cas_init_mac(struct cas *cp) 3113{ 3114 unsigned char *e = &cp->dev->dev_addr[0]; 3115 int i; 3116#ifdef CONFIG_CASSINI_MULTICAST_REG_WRITE 3117 u32 rxcfg; 3118#endif 3119 cas_mac_reset(cp); 3120 3121 /* setup core arbitration weight register */ 3122 writel(CAWR_RR_DIS, cp->regs + REG_CAWR); 3123 3124 /* XXX Use pci_dma_burst_advice() */ 3125#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) 3126 /* set the infinite burst register for chips that don't have 3127 * pci issues. 3128 */ 3129 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0) 3130 writel(INF_BURST_EN, cp->regs + REG_INF_BURST); 3131#endif 3132 3133 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE); 3134 3135 writel(0x00, cp->regs + REG_MAC_IPG0); 3136 writel(0x08, cp->regs + REG_MAC_IPG1); 3137 writel(0x04, cp->regs + REG_MAC_IPG2); 3138 3139 /* change later for 802.3z */ 3140 writel(0x40, cp->regs + REG_MAC_SLOT_TIME); 3141 3142 /* min frame + FCS */ 3143 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN); 3144 3145 /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we 3146 * specify the maximum frame size to prevent RX tag errors on 3147 * oversized frames. 3148 */ 3149 writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) | 3150 CAS_BASE(MAC_FRAMESIZE_MAX_FRAME, 3151 (CAS_MAX_MTU + ETH_HLEN + 4 + 4)), 3152 cp->regs + REG_MAC_FRAMESIZE_MAX); 3153 3154 /* NOTE: crc_size is used as a surrogate for half-duplex. 3155 * workaround saturn half-duplex issue by increasing preamble 3156 * size to 65 bytes. 3157 */ 3158 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size) 3159 writel(0x41, cp->regs + REG_MAC_PA_SIZE); 3160 else 3161 writel(0x07, cp->regs + REG_MAC_PA_SIZE); 3162 writel(0x04, cp->regs + REG_MAC_JAM_SIZE); 3163 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT); 3164 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE); 3165 3166 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED); 3167 3168 writel(0, cp->regs + REG_MAC_ADDR_FILTER0); 3169 writel(0, cp->regs + REG_MAC_ADDR_FILTER1); 3170 writel(0, cp->regs + REG_MAC_ADDR_FILTER2); 3171 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK); 3172 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK); 3173 3174 /* setup mac address in perfect filter array */ 3175 for (i = 0; i < 45; i++) 3176 writel(0x0, cp->regs + REG_MAC_ADDRN(i)); 3177 3178 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0)); 3179 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1)); 3180 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2)); 3181 3182 writel(0x0001, cp->regs + REG_MAC_ADDRN(42)); 3183 writel(0xc200, cp->regs + REG_MAC_ADDRN(43)); 3184 writel(0x0180, cp->regs + REG_MAC_ADDRN(44)); 3185 3186#ifndef CONFIG_CASSINI_MULTICAST_REG_WRITE 3187 cp->mac_rx_cfg = cas_setup_multicast(cp); 3188#else 3189 /* WTZ: Do what Adrian did in cas_set_multicast. Doing 3190 * a writel does not seem to be necessary because Cassini 3191 * seems to preserve the configuration when we do the reset. 3192 * If the chip is in trouble, though, it is not clear if we 3193 * can really count on this behavior. cas_set_multicast uses 3194 * spin_lock_irqsave, but we are called only in cas_init_hw and 3195 * cas_init_hw is protected by cas_lock_all, which calls 3196 * spin_lock_irq (so it doesn't need to save the flags, and 3197 * we should be OK for the writel, as that is the only 3198 * difference). 3199 */ 3200 cp->mac_rx_cfg = rxcfg = cas_setup_multicast(cp); 3201 writel(rxcfg, cp->regs + REG_MAC_RX_CFG); 3202#endif 3203 spin_lock(&cp->stat_lock[N_TX_RINGS]); 3204 cas_clear_mac_err(cp); 3205 spin_unlock(&cp->stat_lock[N_TX_RINGS]); 3206 3207 /* Setup MAC interrupts. We want to get all of the interesting 3208 * counter expiration events, but we do not want to hear about 3209 * normal rx/tx as the DMA engine tells us that. 3210 */ 3211 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK); 3212 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); 3213 3214 /* Don't enable even the PAUSE interrupts for now, we 3215 * make no use of those events other than to record them. 3216 */ 3217 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK); 3218} 3219 3220/* Must be invoked under cp->lock. */ 3221static void cas_init_pause_thresholds(struct cas *cp) 3222{ 3223 /* Calculate pause thresholds. Setting the OFF threshold to the 3224 * full RX fifo size effectively disables PAUSE generation 3225 */ 3226 if (cp->rx_fifo_size <= (2 * 1024)) { 3227 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size; 3228 } else { 3229 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63; 3230 if (max_frame * 3 > cp->rx_fifo_size) { 3231 cp->rx_pause_off = 7104; 3232 cp->rx_pause_on = 960; 3233 } else { 3234 int off = (cp->rx_fifo_size - (max_frame * 2)); 3235 int on = off - max_frame; 3236 cp->rx_pause_off = off; 3237 cp->rx_pause_on = on; 3238 } 3239 } 3240} 3241 3242static int cas_vpd_match(const void __iomem *p, const char *str) 3243{ 3244 int len = strlen(str) + 1; 3245 int i; 3246 3247 for (i = 0; i < len; i++) { 3248 if (readb(p + i) != str[i]) 3249 return 0; 3250 } 3251 return 1; 3252} 3253 3254 3255/* get the mac address by reading the vpd information in the rom. 3256 * also get the phy type and determine if there's an entropy generator. 3257 * NOTE: this is a bit convoluted for the following reasons: 3258 * 1) vpd info has order-dependent mac addresses for multinic cards 3259 * 2) the only way to determine the nic order is to use the slot 3260 * number. 3261 * 3) fiber cards don't have bridges, so their slot numbers don't 3262 * mean anything. 3263 * 4) we don't actually know we have a fiber card until after 3264 * the mac addresses are parsed. 3265 */ 3266static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr, 3267 const int offset) 3268{ 3269 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START; 3270 void __iomem *base, *kstart; 3271 int i, len; 3272 int found = 0; 3273#define VPD_FOUND_MAC 0x01 3274#define VPD_FOUND_PHY 0x02 3275 3276 int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */ 3277 int mac_off = 0; 3278 3279 /* give us access to the PROM */ 3280 writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD, 3281 cp->regs + REG_BIM_LOCAL_DEV_EN); 3282 3283 /* check for an expansion rom */ 3284 if (readb(p) != 0x55 || readb(p + 1) != 0xaa) 3285 goto use_random_mac_addr; 3286 3287 /* search for beginning of vpd */ 3288 base = NULL; 3289 for (i = 2; i < EXPANSION_ROM_SIZE; i++) { 3290 /* check for PCIR */ 3291 if ((readb(p + i + 0) == 0x50) && 3292 (readb(p + i + 1) == 0x43) && 3293 (readb(p + i + 2) == 0x49) && 3294 (readb(p + i + 3) == 0x52)) { 3295 base = p + (readb(p + i + 8) | 3296 (readb(p + i + 9) << 8)); 3297 break; 3298 } 3299 } 3300 3301 if (!base || (readb(base) != 0x82)) 3302 goto use_random_mac_addr; 3303 3304 i = (readb(base + 1) | (readb(base + 2) << 8)) + 3; 3305 while (i < EXPANSION_ROM_SIZE) { 3306 if (readb(base + i) != 0x90) /* no vpd found */ 3307 goto use_random_mac_addr; 3308 3309 /* found a vpd field */ 3310 len = readb(base + i + 1) | (readb(base + i + 2) << 8); 3311 3312 /* extract keywords */ 3313 kstart = base + i + 3; 3314 p = kstart; 3315 while ((p - kstart) < len) { 3316 int klen = readb(p + 2); 3317 int j; 3318 char type; 3319 3320 p += 3; 3321 3322 /* look for the following things: 3323 * -- correct length == 29 3324 * 3 (type) + 2 (size) + 3325 * 18 (strlen("local-mac-address") + 1) + 3326 * 6 (mac addr) 3327 * -- VPD Instance 'I' 3328 * -- VPD Type Bytes 'B' 3329 * -- VPD data length == 6 3330 * -- property string == local-mac-address 3331 * 3332 * -- correct length == 24 3333 * 3 (type) + 2 (size) + 3334 * 12 (strlen("entropy-dev") + 1) + 3335 * 7 (strlen("vms110") + 1) 3336 * -- VPD Instance 'I' 3337 * -- VPD Type String 'B' 3338 * -- VPD data length == 7 3339 * -- property string == entropy-dev 3340 * 3341 * -- correct length == 18 3342 * 3 (type) + 2 (size) + 3343 * 9 (strlen("phy-type") + 1) + 3344 * 4 (strlen("pcs") + 1) 3345 * -- VPD Instance 'I' 3346 * -- VPD Type String 'S' 3347 * -- VPD data length == 4 3348 * -- property string == phy-type 3349 * 3350 * -- correct length == 23 3351 * 3 (type) + 2 (size) + 3352 * 14 (strlen("phy-interface") + 1) + 3353 * 4 (strlen("pcs") + 1) 3354 * -- VPD Instance 'I' 3355 * -- VPD Type String 'S' 3356 * -- VPD data length == 4 3357 * -- property string == phy-interface 3358 */ 3359 if (readb(p) != 'I') 3360 goto next; 3361 3362 /* finally, check string and length */ 3363 type = readb(p + 3); 3364 if (type == 'B') { 3365 if ((klen == 29) && readb(p + 4) == 6 && 3366 cas_vpd_match(p + 5, 3367 "local-mac-address")) { 3368 if (mac_off++ > offset) 3369 goto next; 3370 3371 /* set mac address */ 3372 for (j = 0; j < 6; j++) 3373 dev_addr[j] = 3374 readb(p + 23 + j); 3375 goto found_mac; 3376 } 3377 } 3378 3379 if (type != 'S') 3380 goto next; 3381 3382#ifdef USE_ENTROPY_DEV 3383 if ((klen == 24) && 3384 cas_vpd_match(p + 5, "entropy-dev") && 3385 cas_vpd_match(p + 17, "vms110")) { 3386 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV; 3387 goto next; 3388 } 3389#endif 3390 3391 if (found & VPD_FOUND_PHY) 3392 goto next; 3393 3394 if ((klen == 18) && readb(p + 4) == 4 && 3395 cas_vpd_match(p + 5, "phy-type")) { 3396 if (cas_vpd_match(p + 14, "pcs")) { 3397 phy_type = CAS_PHY_SERDES; 3398 goto found_phy; 3399 } 3400 } 3401 3402 if ((klen == 23) && readb(p + 4) == 4 && 3403 cas_vpd_match(p + 5, "phy-interface")) { 3404 if (cas_vpd_match(p + 19, "pcs")) { 3405 phy_type = CAS_PHY_SERDES; 3406 goto found_phy; 3407 } 3408 } 3409found_mac: 3410 found |= VPD_FOUND_MAC; 3411 goto next; 3412 3413found_phy: 3414 found |= VPD_FOUND_PHY; 3415 3416next: 3417 p += klen; 3418 } 3419 i += len + 3; 3420 } 3421 3422use_random_mac_addr: 3423 if (found & VPD_FOUND_MAC) 3424 goto done; 3425 3426 /* Sun MAC prefix then 3 random bytes. */ 3427 printk(PFX "MAC address not found in ROM VPD\n"); 3428 dev_addr[0] = 0x08; 3429 dev_addr[1] = 0x00; 3430 dev_addr[2] = 0x20; 3431 get_random_bytes(dev_addr + 3, 3); 3432 3433done: 3434 writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN); 3435 return phy_type; 3436} 3437 3438/* check pci invariants */ 3439static void cas_check_pci_invariants(struct cas *cp) 3440{ 3441 struct pci_dev *pdev = cp->pdev; 3442 3443 cp->cas_flags = 0; 3444 if ((pdev->vendor == PCI_VENDOR_ID_SUN) && 3445 (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) { 3446 if (pdev->revision >= CAS_ID_REVPLUS) 3447 cp->cas_flags |= CAS_FLAG_REG_PLUS; 3448 if (pdev->revision < CAS_ID_REVPLUS02u) 3449 cp->cas_flags |= CAS_FLAG_TARGET_ABORT; 3450 3451 /* Original Cassini supports HW CSUM, but it's not 3452 * enabled by default as it can trigger TX hangs. 3453 */ 3454 if (pdev->revision < CAS_ID_REV2) 3455 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM; 3456 } else { 3457 /* Only sun has original cassini chips. */ 3458 cp->cas_flags |= CAS_FLAG_REG_PLUS; 3459 3460 /* We use a flag because the same phy might be externally 3461 * connected. 3462 */ 3463 if ((pdev->vendor == PCI_VENDOR_ID_NS) && 3464 (pdev->device == PCI_DEVICE_ID_NS_SATURN)) 3465 cp->cas_flags |= CAS_FLAG_SATURN; 3466 } 3467} 3468 3469 3470static int cas_check_invariants(struct cas *cp) 3471{ 3472 struct pci_dev *pdev = cp->pdev; 3473 u32 cfg; 3474 int i; 3475 3476 /* get page size for rx buffers. */ 3477 cp->page_order = 0; 3478#ifdef USE_PAGE_ORDER 3479 if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) { 3480 /* see if we can allocate larger pages */ 3481 struct page *page = alloc_pages(GFP_ATOMIC, 3482 CAS_JUMBO_PAGE_SHIFT - 3483 PAGE_SHIFT); 3484 if (page) { 3485 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT); 3486 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT; 3487 } else { 3488 printk(PFX "MTU limited to %d bytes\n", CAS_MAX_MTU); 3489 } 3490 } 3491#endif 3492 cp->page_size = (PAGE_SIZE << cp->page_order); 3493 3494 /* Fetch the FIFO configurations. */ 3495 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64; 3496 cp->rx_fifo_size = RX_FIFO_SIZE; 3497 3498 /* finish phy determination. MDIO1 takes precedence over MDIO0 if 3499 * they're both connected. 3500 */ 3501 cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr, 3502 PCI_SLOT(pdev->devfn)); 3503 if (cp->phy_type & CAS_PHY_SERDES) { 3504 cp->cas_flags |= CAS_FLAG_1000MB_CAP; 3505 return 0; /* no more checking needed */ 3506 } 3507 3508 /* MII */ 3509 cfg = readl(cp->regs + REG_MIF_CFG); 3510 if (cfg & MIF_CFG_MDIO_1) { 3511 cp->phy_type = CAS_PHY_MII_MDIO1; 3512 } else if (cfg & MIF_CFG_MDIO_0) { 3513 cp->phy_type = CAS_PHY_MII_MDIO0; 3514 } 3515 3516 cas_mif_poll(cp, 0); 3517 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); 3518 3519 for (i = 0; i < 32; i++) { 3520 u32 phy_id; 3521 int j; 3522 3523 for (j = 0; j < 3; j++) { 3524 cp->phy_addr = i; 3525 phy_id = cas_phy_read(cp, MII_PHYSID1) << 16; 3526 phy_id |= cas_phy_read(cp, MII_PHYSID2); 3527 if (phy_id && (phy_id != 0xFFFFFFFF)) { 3528 cp->phy_id = phy_id; 3529 goto done; 3530 } 3531 } 3532 } 3533 printk(KERN_ERR PFX "MII phy did not respond [%08x]\n", 3534 readl(cp->regs + REG_MIF_STATE_MACHINE)); 3535 return -1; 3536 3537done: 3538 /* see if we can do gigabit */ 3539 cfg = cas_phy_read(cp, MII_BMSR); 3540 if ((cfg & CAS_BMSR_1000_EXTEND) && 3541 cas_phy_read(cp, CAS_MII_1000_EXTEND)) 3542 cp->cas_flags |= CAS_FLAG_1000MB_CAP; 3543 return 0; 3544} 3545 3546/* Must be invoked under cp->lock. */ 3547static inline void cas_start_dma(struct cas *cp) 3548{ 3549 int i; 3550 u32 val; 3551 int txfailed = 0; 3552 3553 /* enable dma */ 3554 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN; 3555 writel(val, cp->regs + REG_TX_CFG); 3556 val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN; 3557 writel(val, cp->regs + REG_RX_CFG); 3558 3559 /* enable the mac */ 3560 val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN; 3561 writel(val, cp->regs + REG_MAC_TX_CFG); 3562 val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN; 3563 writel(val, cp->regs + REG_MAC_RX_CFG); 3564 3565 i = STOP_TRIES; 3566 while (i-- > 0) { 3567 val = readl(cp->regs + REG_MAC_TX_CFG); 3568 if ((val & MAC_TX_CFG_EN)) 3569 break; 3570 udelay(10); 3571 } 3572 if (i < 0) txfailed = 1; 3573 i = STOP_TRIES; 3574 while (i-- > 0) { 3575 val = readl(cp->regs + REG_MAC_RX_CFG); 3576 if ((val & MAC_RX_CFG_EN)) { 3577 if (txfailed) { 3578 printk(KERN_ERR 3579 "%s: enabling mac failed [tx:%08x:%08x].\n", 3580 cp->dev->name, 3581 readl(cp->regs + REG_MIF_STATE_MACHINE), 3582 readl(cp->regs + REG_MAC_STATE_MACHINE)); 3583 } 3584 goto enable_rx_done; 3585 } 3586 udelay(10); 3587 } 3588 printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n", 3589 cp->dev->name, 3590 (txfailed? "tx,rx":"rx"), 3591 readl(cp->regs + REG_MIF_STATE_MACHINE), 3592 readl(cp->regs + REG_MAC_STATE_MACHINE)); 3593 3594enable_rx_done: 3595 cas_unmask_intr(cp); /* enable interrupts */ 3596 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); 3597 writel(0, cp->regs + REG_RX_COMP_TAIL); 3598 3599 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 3600 if (N_RX_DESC_RINGS > 1) 3601 writel(RX_DESC_RINGN_SIZE(1) - 4, 3602 cp->regs + REG_PLUS_RX_KICK1); 3603 3604 for (i = 1; i < N_RX_COMP_RINGS; i++) 3605 writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i)); 3606 } 3607} 3608 3609/* Must be invoked under cp->lock. */ 3610static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd, 3611 int *pause) 3612{ 3613 u32 val = readl(cp->regs + REG_PCS_MII_LPA); 3614 *fd = (val & PCS_MII_LPA_FD) ? 1 : 0; 3615 *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00; 3616 if (val & PCS_MII_LPA_ASYM_PAUSE) 3617 *pause |= 0x10; 3618 *spd = 1000; 3619} 3620 3621/* Must be invoked under cp->lock. */ 3622static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd, 3623 int *pause) 3624{ 3625 u32 val; 3626 3627 *fd = 0; 3628 *spd = 10; 3629 *pause = 0; 3630 3631 /* use GMII registers */ 3632 val = cas_phy_read(cp, MII_LPA); 3633 if (val & CAS_LPA_PAUSE) 3634 *pause = 0x01; 3635 3636 if (val & CAS_LPA_ASYM_PAUSE) 3637 *pause |= 0x10; 3638 3639 if (val & LPA_DUPLEX) 3640 *fd = 1; 3641 if (val & LPA_100) 3642 *spd = 100; 3643 3644 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { 3645 val = cas_phy_read(cp, CAS_MII_1000_STATUS); 3646 if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF)) 3647 *spd = 1000; 3648 if (val & CAS_LPA_1000FULL) 3649 *fd = 1; 3650 } 3651} 3652 3653/* A link-up condition has occurred, initialize and enable the 3654 * rest of the chip. 3655 * 3656 * Must be invoked under cp->lock. 3657 */ 3658static void cas_set_link_modes(struct cas *cp) 3659{ 3660 u32 val; 3661 int full_duplex, speed, pause; 3662 3663 full_duplex = 0; 3664 speed = 10; 3665 pause = 0; 3666 3667 if (CAS_PHY_MII(cp->phy_type)) { 3668 cas_mif_poll(cp, 0); 3669 val = cas_phy_read(cp, MII_BMCR); 3670 if (val & BMCR_ANENABLE) { 3671 cas_read_mii_link_mode(cp, &full_duplex, &speed, 3672 &pause); 3673 } else { 3674 if (val & BMCR_FULLDPLX) 3675 full_duplex = 1; 3676 3677 if (val & BMCR_SPEED100) 3678 speed = 100; 3679 else if (val & CAS_BMCR_SPEED1000) 3680 speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? 3681 1000 : 100; 3682 } 3683 cas_mif_poll(cp, 1); 3684 3685 } else { 3686 val = readl(cp->regs + REG_PCS_MII_CTRL); 3687 cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause); 3688 if ((val & PCS_MII_AUTONEG_EN) == 0) { 3689 if (val & PCS_MII_CTRL_DUPLEX) 3690 full_duplex = 1; 3691 } 3692 } 3693 3694 if (netif_msg_link(cp)) 3695 printk(KERN_INFO "%s: Link up at %d Mbps, %s-duplex.\n", 3696 cp->dev->name, speed, (full_duplex ? "full" : "half")); 3697 3698 val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED; 3699 if (CAS_PHY_MII(cp->phy_type)) { 3700 val |= MAC_XIF_MII_BUFFER_OUTPUT_EN; 3701 if (!full_duplex) 3702 val |= MAC_XIF_DISABLE_ECHO; 3703 } 3704 if (full_duplex) 3705 val |= MAC_XIF_FDPLX_LED; 3706 if (speed == 1000) 3707 val |= MAC_XIF_GMII_MODE; 3708 writel(val, cp->regs + REG_MAC_XIF_CFG); 3709 3710 /* deal with carrier and collision detect. */ 3711 val = MAC_TX_CFG_IPG_EN; 3712 if (full_duplex) { 3713 val |= MAC_TX_CFG_IGNORE_CARRIER; 3714 val |= MAC_TX_CFG_IGNORE_COLL; 3715 } else { 3716#ifndef USE_CSMA_CD_PROTO 3717 val |= MAC_TX_CFG_NEVER_GIVE_UP_EN; 3718 val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM; 3719#endif 3720 } 3721 /* val now set up for REG_MAC_TX_CFG */ 3722 3723 /* If gigabit and half-duplex, enable carrier extension 3724 * mode. increase slot time to 512 bytes as well. 3725 * else, disable it and make sure slot time is 64 bytes. 3726 * also activate checksum bug workaround 3727 */ 3728 if ((speed == 1000) && !full_duplex) { 3729 writel(val | MAC_TX_CFG_CARRIER_EXTEND, 3730 cp->regs + REG_MAC_TX_CFG); 3731 3732 val = readl(cp->regs + REG_MAC_RX_CFG); 3733 val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */ 3734 writel(val | MAC_RX_CFG_CARRIER_EXTEND, 3735 cp->regs + REG_MAC_RX_CFG); 3736 3737 writel(0x200, cp->regs + REG_MAC_SLOT_TIME); 3738 3739 cp->crc_size = 4; 3740 /* minimum size gigabit frame at half duplex */ 3741 cp->min_frame_size = CAS_1000MB_MIN_FRAME; 3742 3743 } else { 3744 writel(val, cp->regs + REG_MAC_TX_CFG); 3745 3746 /* checksum bug workaround. don't strip FCS when in 3747 * half-duplex mode 3748 */ 3749 val = readl(cp->regs + REG_MAC_RX_CFG); 3750 if (full_duplex) { 3751 val |= MAC_RX_CFG_STRIP_FCS; 3752 cp->crc_size = 0; 3753 cp->min_frame_size = CAS_MIN_MTU; 3754 } else { 3755 val &= ~MAC_RX_CFG_STRIP_FCS; 3756 cp->crc_size = 4; 3757 cp->min_frame_size = CAS_MIN_FRAME; 3758 } 3759 writel(val & ~MAC_RX_CFG_CARRIER_EXTEND, 3760 cp->regs + REG_MAC_RX_CFG); 3761 writel(0x40, cp->regs + REG_MAC_SLOT_TIME); 3762 } 3763 3764 if (netif_msg_link(cp)) { 3765 if (pause & 0x01) { 3766 printk(KERN_INFO "%s: Pause is enabled " 3767 "(rxfifo: %d off: %d on: %d)\n", 3768 cp->dev->name, 3769 cp->rx_fifo_size, 3770 cp->rx_pause_off, 3771 cp->rx_pause_on); 3772 } else if (pause & 0x10) { 3773 printk(KERN_INFO "%s: TX pause enabled\n", 3774 cp->dev->name); 3775 } else { 3776 printk(KERN_INFO "%s: Pause is disabled\n", 3777 cp->dev->name); 3778 } 3779 } 3780 3781 val = readl(cp->regs + REG_MAC_CTRL_CFG); 3782 val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN); 3783 if (pause) { /* symmetric or asymmetric pause */ 3784 val |= MAC_CTRL_CFG_SEND_PAUSE_EN; 3785 if (pause & 0x01) { /* symmetric pause */ 3786 val |= MAC_CTRL_CFG_RECV_PAUSE_EN; 3787 } 3788 } 3789 writel(val, cp->regs + REG_MAC_CTRL_CFG); 3790 cas_start_dma(cp); 3791} 3792 3793/* Must be invoked under cp->lock. */ 3794static void cas_init_hw(struct cas *cp, int restart_link) 3795{ 3796 if (restart_link) 3797 cas_phy_init(cp); 3798 3799 cas_init_pause_thresholds(cp); 3800 cas_init_mac(cp); 3801 cas_init_dma(cp); 3802 3803 if (restart_link) { 3804 /* Default aneg parameters */ 3805 cp->timer_ticks = 0; 3806 cas_begin_auto_negotiation(cp, NULL); 3807 } else if (cp->lstate == link_up) { 3808 cas_set_link_modes(cp); 3809 netif_carrier_on(cp->dev); 3810 } 3811} 3812 3813/* Must be invoked under cp->lock. on earlier cassini boards, 3814 * SOFT_0 is tied to PCI reset. we use this to force a pci reset, 3815 * let it settle out, and then restore pci state. 3816 */ 3817static void cas_hard_reset(struct cas *cp) 3818{ 3819 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN); 3820 udelay(20); 3821 pci_restore_state(cp->pdev); 3822} 3823 3824 3825static void cas_global_reset(struct cas *cp, int blkflag) 3826{ 3827 int limit; 3828 3829 /* issue a global reset. don't use RSTOUT. */ 3830 if (blkflag && !CAS_PHY_MII(cp->phy_type)) { 3831 /* For PCS, when the blkflag is set, we should set the 3832 * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of 3833 * the last autonegotiation from being cleared. We'll 3834 * need some special handling if the chip is set into a 3835 * loopback mode. 3836 */ 3837 writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK), 3838 cp->regs + REG_SW_RESET); 3839 } else { 3840 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET); 3841 } 3842 3843 /* need to wait at least 3ms before polling register */ 3844 mdelay(3); 3845 3846 limit = STOP_TRIES; 3847 while (limit-- > 0) { 3848 u32 val = readl(cp->regs + REG_SW_RESET); 3849 if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0) 3850 goto done; 3851 udelay(10); 3852 } 3853 printk(KERN_ERR "%s: sw reset failed.\n", cp->dev->name); 3854 3855done: 3856 /* enable various BIM interrupts */ 3857 writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE | 3858 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG); 3859 3860 /* clear out pci error status mask for handled errors. 3861 * we don't deal with DMA counter overflows as they happen 3862 * all the time. 3863 */ 3864 writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO | 3865 PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE | 3866 PCI_ERR_BIM_DMA_READ), cp->regs + 3867 REG_PCI_ERR_STATUS_MASK); 3868 3869 /* set up for MII by default to address mac rx reset timeout 3870 * issue 3871 */ 3872 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); 3873} 3874 3875static void cas_reset(struct cas *cp, int blkflag) 3876{ 3877 u32 val; 3878 3879 cas_mask_intr(cp); 3880 cas_global_reset(cp, blkflag); 3881 cas_mac_reset(cp); 3882 cas_entropy_reset(cp); 3883 3884 /* disable dma engines. */ 3885 val = readl(cp->regs + REG_TX_CFG); 3886 val &= ~TX_CFG_DMA_EN; 3887 writel(val, cp->regs + REG_TX_CFG); 3888 3889 val = readl(cp->regs + REG_RX_CFG); 3890 val &= ~RX_CFG_DMA_EN; 3891 writel(val, cp->regs + REG_RX_CFG); 3892 3893 /* program header parser */ 3894 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) || 3895 (CAS_HP_ALT_FIRMWARE == cas_prog_null)) { 3896 cas_load_firmware(cp, CAS_HP_FIRMWARE); 3897 } else { 3898 cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE); 3899 } 3900 3901 /* clear out error registers */ 3902 spin_lock(&cp->stat_lock[N_TX_RINGS]); 3903 cas_clear_mac_err(cp); 3904 spin_unlock(&cp->stat_lock[N_TX_RINGS]); 3905} 3906 3907/* Shut down the chip, must be called with pm_mutex held. */ 3908static void cas_shutdown(struct cas *cp) 3909{ 3910 unsigned long flags; 3911 3912 /* Make us not-running to avoid timers respawning */ 3913 cp->hw_running = 0; 3914 3915 del_timer_sync(&cp->link_timer); 3916 3917 /* Stop the reset task */ 3918#if 0 3919 while (atomic_read(&cp->reset_task_pending_mtu) || 3920 atomic_read(&cp->reset_task_pending_spare) || 3921 atomic_read(&cp->reset_task_pending_all)) 3922 schedule(); 3923 3924#else 3925 while (atomic_read(&cp->reset_task_pending)) 3926 schedule(); 3927#endif 3928 /* Actually stop the chip */ 3929 cas_lock_all_save(cp, flags); 3930 cas_reset(cp, 0); 3931 if (cp->cas_flags & CAS_FLAG_SATURN) 3932 cas_phy_powerdown(cp); 3933 cas_unlock_all_restore(cp, flags); 3934} 3935 3936static int cas_change_mtu(struct net_device *dev, int new_mtu) 3937{ 3938 struct cas *cp = netdev_priv(dev); 3939 3940 if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU) 3941 return -EINVAL; 3942 3943 dev->mtu = new_mtu; 3944 if (!netif_running(dev) || !netif_device_present(dev)) 3945 return 0; 3946 3947 /* let the reset task handle it */ 3948#if 1 3949 atomic_inc(&cp->reset_task_pending); 3950 if ((cp->phy_type & CAS_PHY_SERDES)) { 3951 atomic_inc(&cp->reset_task_pending_all); 3952 } else { 3953 atomic_inc(&cp->reset_task_pending_mtu); 3954 } 3955 schedule_work(&cp->reset_task); 3956#else 3957 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ? 3958 CAS_RESET_ALL : CAS_RESET_MTU); 3959 printk(KERN_ERR "reset called in cas_change_mtu\n"); 3960 schedule_work(&cp->reset_task); 3961#endif 3962 3963 flush_scheduled_work(); 3964 return 0; 3965} 3966 3967static void cas_clean_txd(struct cas *cp, int ring) 3968{ 3969 struct cas_tx_desc *txd = cp->init_txds[ring]; 3970 struct sk_buff *skb, **skbs = cp->tx_skbs[ring]; 3971 u64 daddr, dlen; 3972 int i, size; 3973 3974 size = TX_DESC_RINGN_SIZE(ring); 3975 for (i = 0; i < size; i++) { 3976 int frag; 3977 3978 if (skbs[i] == NULL) 3979 continue; 3980 3981 skb = skbs[i]; 3982 skbs[i] = NULL; 3983 3984 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 3985 int ent = i & (size - 1); 3986 3987 /* first buffer is never a tiny buffer and so 3988 * needs to be unmapped. 3989 */ 3990 daddr = le64_to_cpu(txd[ent].buffer); 3991 dlen = CAS_VAL(TX_DESC_BUFLEN, 3992 le64_to_cpu(txd[ent].control)); 3993 pci_unmap_page(cp->pdev, daddr, dlen, 3994 PCI_DMA_TODEVICE); 3995 3996 if (frag != skb_shinfo(skb)->nr_frags) { 3997 i++; 3998 3999 /* next buffer might by a tiny buffer. 4000 * skip past it. 4001 */ 4002 ent = i & (size - 1); 4003 if (cp->tx_tiny_use[ring][ent].used) 4004 i++; 4005 } 4006 } 4007 dev_kfree_skb_any(skb); 4008 } 4009 4010 /* zero out tiny buf usage */ 4011 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring])); 4012} 4013 4014/* freed on close */ 4015static inline void cas_free_rx_desc(struct cas *cp, int ring) 4016{ 4017 cas_page_t **page = cp->rx_pages[ring]; 4018 int i, size; 4019 4020 size = RX_DESC_RINGN_SIZE(ring); 4021 for (i = 0; i < size; i++) { 4022 if (page[i]) { 4023 cas_page_free(cp, page[i]); 4024 page[i] = NULL; 4025 } 4026 } 4027} 4028 4029static void cas_free_rxds(struct cas *cp) 4030{ 4031 int i; 4032 4033 for (i = 0; i < N_RX_DESC_RINGS; i++) 4034 cas_free_rx_desc(cp, i); 4035} 4036 4037/* Must be invoked under cp->lock. */ 4038static void cas_clean_rings(struct cas *cp) 4039{ 4040 int i; 4041 4042 /* need to clean all tx rings */ 4043 memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS); 4044 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS); 4045 for (i = 0; i < N_TX_RINGS; i++) 4046 cas_clean_txd(cp, i); 4047 4048 /* zero out init block */ 4049 memset(cp->init_block, 0, sizeof(struct cas_init_block)); 4050 cas_clean_rxds(cp); 4051 cas_clean_rxcs(cp); 4052} 4053 4054/* allocated on open */ 4055static inline int cas_alloc_rx_desc(struct cas *cp, int ring) 4056{ 4057 cas_page_t **page = cp->rx_pages[ring]; 4058 int size, i = 0; 4059 4060 size = RX_DESC_RINGN_SIZE(ring); 4061 for (i = 0; i < size; i++) { 4062 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL) 4063 return -1; 4064 } 4065 return 0; 4066} 4067 4068static int cas_alloc_rxds(struct cas *cp) 4069{ 4070 int i; 4071 4072 for (i = 0; i < N_RX_DESC_RINGS; i++) { 4073 if (cas_alloc_rx_desc(cp, i) < 0) { 4074 cas_free_rxds(cp); 4075 return -1; 4076 } 4077 } 4078 return 0; 4079} 4080 4081static void cas_reset_task(struct work_struct *work) 4082{ 4083 struct cas *cp = container_of(work, struct cas, reset_task); 4084#if 0 4085 int pending = atomic_read(&cp->reset_task_pending); 4086#else 4087 int pending_all = atomic_read(&cp->reset_task_pending_all); 4088 int pending_spare = atomic_read(&cp->reset_task_pending_spare); 4089 int pending_mtu = atomic_read(&cp->reset_task_pending_mtu); 4090 4091 if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) { 4092 /* We can have more tasks scheduled than actually 4093 * needed. 4094 */ 4095 atomic_dec(&cp->reset_task_pending); 4096 return; 4097 } 4098#endif 4099 /* The link went down, we reset the ring, but keep 4100 * DMA stopped. Use this function for reset 4101 * on error as well. 4102 */ 4103 if (cp->hw_running) { 4104 unsigned long flags; 4105 4106 /* Make sure we don't get interrupts or tx packets */ 4107 netif_device_detach(cp->dev); 4108 cas_lock_all_save(cp, flags); 4109 4110 if (cp->opened) { 4111 /* We call cas_spare_recover when we call cas_open. 4112 * but we do not initialize the lists cas_spare_recover 4113 * uses until cas_open is called. 4114 */ 4115 cas_spare_recover(cp, GFP_ATOMIC); 4116 } 4117#if 1 4118 /* test => only pending_spare set */ 4119 if (!pending_all && !pending_mtu) 4120 goto done; 4121#else 4122 if (pending == CAS_RESET_SPARE) 4123 goto done; 4124#endif 4125 /* when pending == CAS_RESET_ALL, the following 4126 * call to cas_init_hw will restart auto negotiation. 4127 * Setting the second argument of cas_reset to 4128 * !(pending == CAS_RESET_ALL) will set this argument 4129 * to 1 (avoiding reinitializing the PHY for the normal 4130 * PCS case) when auto negotiation is not restarted. 4131 */ 4132#if 1 4133 cas_reset(cp, !(pending_all > 0)); 4134 if (cp->opened) 4135 cas_clean_rings(cp); 4136 cas_init_hw(cp, (pending_all > 0)); 4137#else 4138 cas_reset(cp, !(pending == CAS_RESET_ALL)); 4139 if (cp->opened) 4140 cas_clean_rings(cp); 4141 cas_init_hw(cp, pending == CAS_RESET_ALL); 4142#endif 4143 4144done: 4145 cas_unlock_all_restore(cp, flags); 4146 netif_device_attach(cp->dev); 4147 } 4148#if 1 4149 atomic_sub(pending_all, &cp->reset_task_pending_all); 4150 atomic_sub(pending_spare, &cp->reset_task_pending_spare); 4151 atomic_sub(pending_mtu, &cp->reset_task_pending_mtu); 4152 atomic_dec(&cp->reset_task_pending); 4153#else 4154 atomic_set(&cp->reset_task_pending, 0); 4155#endif 4156} 4157 4158static void cas_link_timer(unsigned long data) 4159{ 4160 struct cas *cp = (struct cas *) data; 4161 int mask, pending = 0, reset = 0; 4162 unsigned long flags; 4163 4164 if (link_transition_timeout != 0 && 4165 cp->link_transition_jiffies_valid && 4166 ((jiffies - cp->link_transition_jiffies) > 4167 (link_transition_timeout))) { 4168 /* One-second counter so link-down workaround doesn't 4169 * cause resets to occur so fast as to fool the switch 4170 * into thinking the link is down. 4171 */ 4172 cp->link_transition_jiffies_valid = 0; 4173 } 4174 4175 if (!cp->hw_running) 4176 return; 4177 4178 spin_lock_irqsave(&cp->lock, flags); 4179 cas_lock_tx(cp); 4180 cas_entropy_gather(cp); 4181 4182 /* If the link task is still pending, we just 4183 * reschedule the link timer 4184 */ 4185#if 1 4186 if (atomic_read(&cp->reset_task_pending_all) || 4187 atomic_read(&cp->reset_task_pending_spare) || 4188 atomic_read(&cp->reset_task_pending_mtu)) 4189 goto done; 4190#else 4191 if (atomic_read(&cp->reset_task_pending)) 4192 goto done; 4193#endif 4194 4195 /* check for rx cleaning */ 4196 if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) { 4197 int i, rmask; 4198 4199 for (i = 0; i < MAX_RX_DESC_RINGS; i++) { 4200 rmask = CAS_FLAG_RXD_POST(i); 4201 if ((mask & rmask) == 0) 4202 continue; 4203 4204 /* post_rxds will do a mod_timer */ 4205 if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) { 4206 pending = 1; 4207 continue; 4208 } 4209 cp->cas_flags &= ~rmask; 4210 } 4211 } 4212 4213 if (CAS_PHY_MII(cp->phy_type)) { 4214 u16 bmsr; 4215 cas_mif_poll(cp, 0); 4216 bmsr = cas_phy_read(cp, MII_BMSR); 4217 /* WTZ: Solaris driver reads this twice, but that 4218 * may be due to the PCS case and the use of a 4219 * common implementation. Read it twice here to be 4220 * safe. 4221 */ 4222 bmsr = cas_phy_read(cp, MII_BMSR); 4223 cas_mif_poll(cp, 1); 4224 readl(cp->regs + REG_MIF_STATUS); /* avoid dups */ 4225 reset = cas_mii_link_check(cp, bmsr); 4226 } else { 4227 reset = cas_pcs_link_check(cp); 4228 } 4229 4230 if (reset) 4231 goto done; 4232 4233 /* check for tx state machine confusion */ 4234 if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) { 4235 u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE); 4236 u32 wptr, rptr; 4237 int tlm = CAS_VAL(MAC_SM_TLM, val); 4238 4239 if (((tlm == 0x5) || (tlm == 0x3)) && 4240 (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) { 4241 if (netif_msg_tx_err(cp)) 4242 printk(KERN_DEBUG "%s: tx err: " 4243 "MAC_STATE[%08x]\n", 4244 cp->dev->name, val); 4245 reset = 1; 4246 goto done; 4247 } 4248 4249 val = readl(cp->regs + REG_TX_FIFO_PKT_CNT); 4250 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR); 4251 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR); 4252 if ((val == 0) && (wptr != rptr)) { 4253 if (netif_msg_tx_err(cp)) 4254 printk(KERN_DEBUG "%s: tx err: " 4255 "TX_FIFO[%08x:%08x:%08x]\n", 4256 cp->dev->name, val, wptr, rptr); 4257 reset = 1; 4258 } 4259 4260 if (reset) 4261 cas_hard_reset(cp); 4262 } 4263 4264done: 4265 if (reset) { 4266#if 1 4267 atomic_inc(&cp->reset_task_pending); 4268 atomic_inc(&cp->reset_task_pending_all); 4269 schedule_work(&cp->reset_task); 4270#else 4271 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); 4272 printk(KERN_ERR "reset called in cas_link_timer\n"); 4273 schedule_work(&cp->reset_task); 4274#endif 4275 } 4276 4277 if (!pending) 4278 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); 4279 cas_unlock_tx(cp); 4280 spin_unlock_irqrestore(&cp->lock, flags); 4281} 4282 4283/* tiny buffers are used to avoid target abort issues with 4284 * older cassini's 4285 */ 4286static void cas_tx_tiny_free(struct cas *cp) 4287{ 4288 struct pci_dev *pdev = cp->pdev; 4289 int i; 4290 4291 for (i = 0; i < N_TX_RINGS; i++) { 4292 if (!cp->tx_tiny_bufs[i]) 4293 continue; 4294 4295 pci_free_consistent(pdev, TX_TINY_BUF_BLOCK, 4296 cp->tx_tiny_bufs[i], 4297 cp->tx_tiny_dvma[i]); 4298 cp->tx_tiny_bufs[i] = NULL; 4299 } 4300} 4301 4302static int cas_tx_tiny_alloc(struct cas *cp) 4303{ 4304 struct pci_dev *pdev = cp->pdev; 4305 int i; 4306 4307 for (i = 0; i < N_TX_RINGS; i++) { 4308 cp->tx_tiny_bufs[i] = 4309 pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK, 4310 &cp->tx_tiny_dvma[i]); 4311 if (!cp->tx_tiny_bufs[i]) { 4312 cas_tx_tiny_free(cp); 4313 return -1; 4314 } 4315 } 4316 return 0; 4317} 4318 4319 4320static int cas_open(struct net_device *dev) 4321{ 4322 struct cas *cp = netdev_priv(dev); 4323 int hw_was_up, err; 4324 unsigned long flags; 4325 4326 mutex_lock(&cp->pm_mutex); 4327 4328 hw_was_up = cp->hw_running; 4329 4330 /* The power-management mutex protects the hw_running 4331 * etc. state so it is safe to do this bit without cp->lock 4332 */ 4333 if (!cp->hw_running) { 4334 /* Reset the chip */ 4335 cas_lock_all_save(cp, flags); 4336 /* We set the second arg to cas_reset to zero 4337 * because cas_init_hw below will have its second 4338 * argument set to non-zero, which will force 4339 * autonegotiation to start. 4340 */ 4341 cas_reset(cp, 0); 4342 cp->hw_running = 1; 4343 cas_unlock_all_restore(cp, flags); 4344 } 4345 4346 if (cas_tx_tiny_alloc(cp) < 0) 4347 return -ENOMEM; 4348 4349 /* alloc rx descriptors */ 4350 err = -ENOMEM; 4351 if (cas_alloc_rxds(cp) < 0) 4352 goto err_tx_tiny; 4353 4354 /* allocate spares */ 4355 cas_spare_init(cp); 4356 cas_spare_recover(cp, GFP_KERNEL); 4357 4358 /* We can now request the interrupt as we know it's masked 4359 * on the controller. cassini+ has up to 4 interrupts 4360 * that can be used, but you need to do explicit pci interrupt 4361 * mapping to expose them 4362 */ 4363 if (request_irq(cp->pdev->irq, cas_interrupt, 4364 IRQF_SHARED, dev->name, (void *) dev)) { 4365 printk(KERN_ERR "%s: failed to request irq !\n", 4366 cp->dev->name); 4367 err = -EAGAIN; 4368 goto err_spare; 4369 } 4370 4371#ifdef USE_NAPI 4372 napi_enable(&cp->napi); 4373#endif 4374 /* init hw */ 4375 cas_lock_all_save(cp, flags); 4376 cas_clean_rings(cp); 4377 cas_init_hw(cp, !hw_was_up); 4378 cp->opened = 1; 4379 cas_unlock_all_restore(cp, flags); 4380 4381 netif_start_queue(dev); 4382 mutex_unlock(&cp->pm_mutex); 4383 return 0; 4384 4385err_spare: 4386 cas_spare_free(cp); 4387 cas_free_rxds(cp); 4388err_tx_tiny: 4389 cas_tx_tiny_free(cp); 4390 mutex_unlock(&cp->pm_mutex); 4391 return err; 4392} 4393 4394static int cas_close(struct net_device *dev) 4395{ 4396 unsigned long flags; 4397 struct cas *cp = netdev_priv(dev); 4398 4399#ifdef USE_NAPI 4400 napi_disable(&cp->napi); 4401#endif 4402 /* Make sure we don't get distracted by suspend/resume */ 4403 mutex_lock(&cp->pm_mutex); 4404 4405 netif_stop_queue(dev); 4406 4407 /* Stop traffic, mark us closed */ 4408 cas_lock_all_save(cp, flags); 4409 cp->opened = 0; 4410 cas_reset(cp, 0); 4411 cas_phy_init(cp); 4412 cas_begin_auto_negotiation(cp, NULL); 4413 cas_clean_rings(cp); 4414 cas_unlock_all_restore(cp, flags); 4415 4416 free_irq(cp->pdev->irq, (void *) dev); 4417 cas_spare_free(cp); 4418 cas_free_rxds(cp); 4419 cas_tx_tiny_free(cp); 4420 mutex_unlock(&cp->pm_mutex); 4421 return 0; 4422} 4423 4424static struct { 4425 const char name[ETH_GSTRING_LEN]; 4426} ethtool_cassini_statnames[] = { 4427 {"collisions"}, 4428 {"rx_bytes"}, 4429 {"rx_crc_errors"}, 4430 {"rx_dropped"}, 4431 {"rx_errors"}, 4432 {"rx_fifo_errors"}, 4433 {"rx_frame_errors"}, 4434 {"rx_length_errors"}, 4435 {"rx_over_errors"}, 4436 {"rx_packets"}, 4437 {"tx_aborted_errors"}, 4438 {"tx_bytes"}, 4439 {"tx_dropped"}, 4440 {"tx_errors"}, 4441 {"tx_fifo_errors"}, 4442 {"tx_packets"} 4443}; 4444#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames) 4445 4446static struct { 4447 const int offsets; /* neg. values for 2nd arg to cas_read_phy */ 4448} ethtool_register_table[] = { 4449 {-MII_BMSR}, 4450 {-MII_BMCR}, 4451 {REG_CAWR}, 4452 {REG_INF_BURST}, 4453 {REG_BIM_CFG}, 4454 {REG_RX_CFG}, 4455 {REG_HP_CFG}, 4456 {REG_MAC_TX_CFG}, 4457 {REG_MAC_RX_CFG}, 4458 {REG_MAC_CTRL_CFG}, 4459 {REG_MAC_XIF_CFG}, 4460 {REG_MIF_CFG}, 4461 {REG_PCS_CFG}, 4462 {REG_SATURN_PCFG}, 4463 {REG_PCS_MII_STATUS}, 4464 {REG_PCS_STATE_MACHINE}, 4465 {REG_MAC_COLL_EXCESS}, 4466 {REG_MAC_COLL_LATE} 4467}; 4468#define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table) 4469#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN) 4470 4471static void cas_read_regs(struct cas *cp, u8 *ptr, int len) 4472{ 4473 u8 *p; 4474 int i; 4475 unsigned long flags; 4476 4477 spin_lock_irqsave(&cp->lock, flags); 4478 for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) { 4479 u16 hval; 4480 u32 val; 4481 if (ethtool_register_table[i].offsets < 0) { 4482 hval = cas_phy_read(cp, 4483 -ethtool_register_table[i].offsets); 4484 val = hval; 4485 } else { 4486 val= readl(cp->regs+ethtool_register_table[i].offsets); 4487 } 4488 memcpy(p, (u8 *)&val, sizeof(u32)); 4489 } 4490 spin_unlock_irqrestore(&cp->lock, flags); 4491} 4492 4493static struct net_device_stats *cas_get_stats(struct net_device *dev) 4494{ 4495 struct cas *cp = netdev_priv(dev); 4496 struct net_device_stats *stats = cp->net_stats; 4497 unsigned long flags; 4498 int i; 4499 unsigned long tmp; 4500 4501 /* we collate all of the stats into net_stats[N_TX_RING] */ 4502 if (!cp->hw_running) 4503 return stats + N_TX_RINGS; 4504 4505 /* collect outstanding stats */ 4506 /* WTZ: the Cassini spec gives these as 16 bit counters but 4507 * stored in 32-bit words. Added a mask of 0xffff to be safe, 4508 * in case the chip somehow puts any garbage in the other bits. 4509 * Also, counter usage didn't seem to mach what Adrian did 4510 * in the parts of the code that set these quantities. Made 4511 * that consistent. 4512 */ 4513 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags); 4514 stats[N_TX_RINGS].rx_crc_errors += 4515 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff; 4516 stats[N_TX_RINGS].rx_frame_errors += 4517 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff; 4518 stats[N_TX_RINGS].rx_length_errors += 4519 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff; 4520#if 1 4521 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) + 4522 (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff); 4523 stats[N_TX_RINGS].tx_aborted_errors += tmp; 4524 stats[N_TX_RINGS].collisions += 4525 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff); 4526#else 4527 stats[N_TX_RINGS].tx_aborted_errors += 4528 readl(cp->regs + REG_MAC_COLL_EXCESS); 4529 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) + 4530 readl(cp->regs + REG_MAC_COLL_LATE); 4531#endif 4532 cas_clear_mac_err(cp); 4533 4534 /* saved bits that are unique to ring 0 */ 4535 spin_lock(&cp->stat_lock[0]); 4536 stats[N_TX_RINGS].collisions += stats[0].collisions; 4537 stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors; 4538 stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors; 4539 stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors; 4540 stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors; 4541 stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors; 4542 spin_unlock(&cp->stat_lock[0]); 4543 4544 for (i = 0; i < N_TX_RINGS; i++) { 4545 spin_lock(&cp->stat_lock[i]); 4546 stats[N_TX_RINGS].rx_length_errors += 4547 stats[i].rx_length_errors; 4548 stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors; 4549 stats[N_TX_RINGS].rx_packets += stats[i].rx_packets; 4550 stats[N_TX_RINGS].tx_packets += stats[i].tx_packets; 4551 stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes; 4552 stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes; 4553 stats[N_TX_RINGS].rx_errors += stats[i].rx_errors; 4554 stats[N_TX_RINGS].tx_errors += stats[i].tx_errors; 4555 stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped; 4556 stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped; 4557 memset(stats + i, 0, sizeof(struct net_device_stats)); 4558 spin_unlock(&cp->stat_lock[i]); 4559 } 4560 spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags); 4561 return stats + N_TX_RINGS; 4562} 4563 4564 4565static void cas_set_multicast(struct net_device *dev) 4566{ 4567 struct cas *cp = netdev_priv(dev); 4568 u32 rxcfg, rxcfg_new; 4569 unsigned long flags; 4570 int limit = STOP_TRIES; 4571 4572 if (!cp->hw_running) 4573 return; 4574 4575 spin_lock_irqsave(&cp->lock, flags); 4576 rxcfg = readl(cp->regs + REG_MAC_RX_CFG); 4577 4578 /* disable RX MAC and wait for completion */ 4579 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); 4580 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) { 4581 if (!limit--) 4582 break; 4583 udelay(10); 4584 } 4585 4586 /* disable hash filter and wait for completion */ 4587 limit = STOP_TRIES; 4588 rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN); 4589 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); 4590 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) { 4591 if (!limit--) 4592 break; 4593 udelay(10); 4594 } 4595 4596 /* program hash filters */ 4597 cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp); 4598 rxcfg |= rxcfg_new; 4599 writel(rxcfg, cp->regs + REG_MAC_RX_CFG); 4600 spin_unlock_irqrestore(&cp->lock, flags); 4601} 4602 4603static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 4604{ 4605 struct cas *cp = netdev_priv(dev); 4606 strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN); 4607 strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN); 4608 info->fw_version[0] = '\0'; 4609 strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN); 4610 info->regdump_len = cp->casreg_len < CAS_MAX_REGS ? 4611 cp->casreg_len : CAS_MAX_REGS; 4612 info->n_stats = CAS_NUM_STAT_KEYS; 4613} 4614 4615static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 4616{ 4617 struct cas *cp = netdev_priv(dev); 4618 u16 bmcr; 4619 int full_duplex, speed, pause; 4620 unsigned long flags; 4621 enum link_state linkstate = link_up; 4622 4623 cmd->advertising = 0; 4624 cmd->supported = SUPPORTED_Autoneg; 4625 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { 4626 cmd->supported |= SUPPORTED_1000baseT_Full; 4627 cmd->advertising |= ADVERTISED_1000baseT_Full; 4628 } 4629 4630 /* Record PHY settings if HW is on. */ 4631 spin_lock_irqsave(&cp->lock, flags); 4632 bmcr = 0; 4633 linkstate = cp->lstate; 4634 if (CAS_PHY_MII(cp->phy_type)) { 4635 cmd->port = PORT_MII; 4636 cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ? 4637 XCVR_INTERNAL : XCVR_EXTERNAL; 4638 cmd->phy_address = cp->phy_addr; 4639 cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII | 4640 ADVERTISED_10baseT_Half | 4641 ADVERTISED_10baseT_Full | 4642 ADVERTISED_100baseT_Half | 4643 ADVERTISED_100baseT_Full; 4644 4645 cmd->supported |= 4646 (SUPPORTED_10baseT_Half | 4647 SUPPORTED_10baseT_Full | 4648 SUPPORTED_100baseT_Half | 4649 SUPPORTED_100baseT_Full | 4650 SUPPORTED_TP | SUPPORTED_MII); 4651 4652 if (cp->hw_running) { 4653 cas_mif_poll(cp, 0); 4654 bmcr = cas_phy_read(cp, MII_BMCR); 4655 cas_read_mii_link_mode(cp, &full_duplex, 4656 &speed, &pause); 4657 cas_mif_poll(cp, 1); 4658 } 4659 4660 } else { 4661 cmd->port = PORT_FIBRE; 4662 cmd->transceiver = XCVR_INTERNAL; 4663 cmd->phy_address = 0; 4664 cmd->supported |= SUPPORTED_FIBRE; 4665 cmd->advertising |= ADVERTISED_FIBRE; 4666 4667 if (cp->hw_running) { 4668 /* pcs uses the same bits as mii */ 4669 bmcr = readl(cp->regs + REG_PCS_MII_CTRL); 4670 cas_read_pcs_link_mode(cp, &full_duplex, 4671 &speed, &pause); 4672 } 4673 } 4674 spin_unlock_irqrestore(&cp->lock, flags); 4675 4676 if (bmcr & BMCR_ANENABLE) { 4677 cmd->advertising |= ADVERTISED_Autoneg; 4678 cmd->autoneg = AUTONEG_ENABLE; 4679 cmd->speed = ((speed == 10) ? 4680 SPEED_10 : 4681 ((speed == 1000) ? 4682 SPEED_1000 : SPEED_100)); 4683 cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 4684 } else { 4685 cmd->autoneg = AUTONEG_DISABLE; 4686 cmd->speed = 4687 (bmcr & CAS_BMCR_SPEED1000) ? 4688 SPEED_1000 : 4689 ((bmcr & BMCR_SPEED100) ? SPEED_100: 4690 SPEED_10); 4691 cmd->duplex = 4692 (bmcr & BMCR_FULLDPLX) ? 4693 DUPLEX_FULL : DUPLEX_HALF; 4694 } 4695 if (linkstate != link_up) { 4696 /* Force these to "unknown" if the link is not up and 4697 * autonogotiation in enabled. We can set the link 4698 * speed to 0, but not cmd->duplex, 4699 * because its legal values are 0 and 1. Ethtool will 4700 * print the value reported in parentheses after the 4701 * word "Unknown" for unrecognized values. 4702 * 4703 * If in forced mode, we report the speed and duplex 4704 * settings that we configured. 4705 */ 4706 if (cp->link_cntl & BMCR_ANENABLE) { 4707 cmd->speed = 0; 4708 cmd->duplex = 0xff; 4709 } else { 4710 cmd->speed = SPEED_10; 4711 if (cp->link_cntl & BMCR_SPEED100) { 4712 cmd->speed = SPEED_100; 4713 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) { 4714 cmd->speed = SPEED_1000; 4715 } 4716 cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)? 4717 DUPLEX_FULL : DUPLEX_HALF; 4718 } 4719 } 4720 return 0; 4721} 4722 4723static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 4724{ 4725 struct cas *cp = netdev_priv(dev); 4726 unsigned long flags; 4727 4728 /* Verify the settings we care about. */ 4729 if (cmd->autoneg != AUTONEG_ENABLE && 4730 cmd->autoneg != AUTONEG_DISABLE) 4731 return -EINVAL; 4732 4733 if (cmd->autoneg == AUTONEG_DISABLE && 4734 ((cmd->speed != SPEED_1000 && 4735 cmd->speed != SPEED_100 && 4736 cmd->speed != SPEED_10) || 4737 (cmd->duplex != DUPLEX_HALF && 4738 cmd->duplex != DUPLEX_FULL))) 4739 return -EINVAL; 4740 4741 /* Apply settings and restart link process. */ 4742 spin_lock_irqsave(&cp->lock, flags); 4743 cas_begin_auto_negotiation(cp, cmd); 4744 spin_unlock_irqrestore(&cp->lock, flags); 4745 return 0; 4746} 4747 4748static int cas_nway_reset(struct net_device *dev) 4749{ 4750 struct cas *cp = netdev_priv(dev); 4751 unsigned long flags; 4752 4753 if ((cp->link_cntl & BMCR_ANENABLE) == 0) 4754 return -EINVAL; 4755 4756 /* Restart link process. */ 4757 spin_lock_irqsave(&cp->lock, flags); 4758 cas_begin_auto_negotiation(cp, NULL); 4759 spin_unlock_irqrestore(&cp->lock, flags); 4760 4761 return 0; 4762} 4763 4764static u32 cas_get_link(struct net_device *dev) 4765{ 4766 struct cas *cp = netdev_priv(dev); 4767 return cp->lstate == link_up; 4768} 4769 4770static u32 cas_get_msglevel(struct net_device *dev) 4771{ 4772 struct cas *cp = netdev_priv(dev); 4773 return cp->msg_enable; 4774} 4775 4776static void cas_set_msglevel(struct net_device *dev, u32 value) 4777{ 4778 struct cas *cp = netdev_priv(dev); 4779 cp->msg_enable = value; 4780} 4781 4782static int cas_get_regs_len(struct net_device *dev) 4783{ 4784 struct cas *cp = netdev_priv(dev); 4785 return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS; 4786} 4787 4788static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs, 4789 void *p) 4790{ 4791 struct cas *cp = netdev_priv(dev); 4792 regs->version = 0; 4793 /* cas_read_regs handles locks (cp->lock). */ 4794 cas_read_regs(cp, p, regs->len / sizeof(u32)); 4795} 4796 4797static int cas_get_sset_count(struct net_device *dev, int sset) 4798{ 4799 switch (sset) { 4800 case ETH_SS_STATS: 4801 return CAS_NUM_STAT_KEYS; 4802 default: 4803 return -EOPNOTSUPP; 4804 } 4805} 4806 4807static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data) 4808{ 4809 memcpy(data, &ethtool_cassini_statnames, 4810 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN); 4811} 4812 4813static void cas_get_ethtool_stats(struct net_device *dev, 4814 struct ethtool_stats *estats, u64 *data) 4815{ 4816 struct cas *cp = netdev_priv(dev); 4817 struct net_device_stats *stats = cas_get_stats(cp->dev); 4818 int i = 0; 4819 data[i++] = stats->collisions; 4820 data[i++] = stats->rx_bytes; 4821 data[i++] = stats->rx_crc_errors; 4822 data[i++] = stats->rx_dropped; 4823 data[i++] = stats->rx_errors; 4824 data[i++] = stats->rx_fifo_errors; 4825 data[i++] = stats->rx_frame_errors; 4826 data[i++] = stats->rx_length_errors; 4827 data[i++] = stats->rx_over_errors; 4828 data[i++] = stats->rx_packets; 4829 data[i++] = stats->tx_aborted_errors; 4830 data[i++] = stats->tx_bytes; 4831 data[i++] = stats->tx_dropped; 4832 data[i++] = stats->tx_errors; 4833 data[i++] = stats->tx_fifo_errors; 4834 data[i++] = stats->tx_packets; 4835 BUG_ON(i != CAS_NUM_STAT_KEYS); 4836} 4837 4838static const struct ethtool_ops cas_ethtool_ops = { 4839 .get_drvinfo = cas_get_drvinfo, 4840 .get_settings = cas_get_settings, 4841 .set_settings = cas_set_settings, 4842 .nway_reset = cas_nway_reset, 4843 .get_link = cas_get_link, 4844 .get_msglevel = cas_get_msglevel, 4845 .set_msglevel = cas_set_msglevel, 4846 .get_regs_len = cas_get_regs_len, 4847 .get_regs = cas_get_regs, 4848 .get_sset_count = cas_get_sset_count, 4849 .get_strings = cas_get_strings, 4850 .get_ethtool_stats = cas_get_ethtool_stats, 4851}; 4852 4853static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 4854{ 4855 struct cas *cp = netdev_priv(dev); 4856 struct mii_ioctl_data *data = if_mii(ifr); 4857 unsigned long flags; 4858 int rc = -EOPNOTSUPP; 4859 4860 /* Hold the PM mutex while doing ioctl's or we may collide 4861 * with open/close and power management and oops. 4862 */ 4863 mutex_lock(&cp->pm_mutex); 4864 switch (cmd) { 4865 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 4866 data->phy_id = cp->phy_addr; 4867 /* Fallthrough... */ 4868 4869 case SIOCGMIIREG: /* Read MII PHY register. */ 4870 spin_lock_irqsave(&cp->lock, flags); 4871 cas_mif_poll(cp, 0); 4872 data->val_out = cas_phy_read(cp, data->reg_num & 0x1f); 4873 cas_mif_poll(cp, 1); 4874 spin_unlock_irqrestore(&cp->lock, flags); 4875 rc = 0; 4876 break; 4877 4878 case SIOCSMIIREG: /* Write MII PHY register. */ 4879 if (!capable(CAP_NET_ADMIN)) { 4880 rc = -EPERM; 4881 break; 4882 } 4883 spin_lock_irqsave(&cp->lock, flags); 4884 cas_mif_poll(cp, 0); 4885 rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in); 4886 cas_mif_poll(cp, 1); 4887 spin_unlock_irqrestore(&cp->lock, flags); 4888 break; 4889 default: 4890 break; 4891 }; 4892 4893 mutex_unlock(&cp->pm_mutex); 4894 return rc; 4895} 4896 4897/* When this chip sits underneath an Intel 31154 bridge, it is the 4898 * only subordinate device and we can tweak the bridge settings to 4899 * reflect that fact. 4900 */ 4901static void __devinit cas_program_bridge(struct pci_dev *cas_pdev) 4902{ 4903 struct pci_dev *pdev = cas_pdev->bus->self; 4904 u32 val; 4905 4906 if (!pdev) 4907 return; 4908 4909 if (pdev->vendor != 0x8086 || pdev->device != 0x537c) 4910 return; 4911 4912 /* Clear bit 10 (Bus Parking Control) in the Secondary 4913 * Arbiter Control/Status Register which lives at offset 4914 * 0x41. Using a 32-bit word read/modify/write at 0x40 4915 * is much simpler so that's how we do this. 4916 */ 4917 pci_read_config_dword(pdev, 0x40, &val); 4918 val &= ~0x00040000; 4919 pci_write_config_dword(pdev, 0x40, val); 4920 4921 /* Max out the Multi-Transaction Timer settings since 4922 * Cassini is the only device present. 4923 * 4924 * The register is 16-bit and lives at 0x50. When the 4925 * settings are enabled, it extends the GRANT# signal 4926 * for a requestor after a transaction is complete. This 4927 * allows the next request to run without first needing 4928 * to negotiate the GRANT# signal back. 4929 * 4930 * Bits 12:10 define the grant duration: 4931 * 4932 * 1 -- 16 clocks 4933 * 2 -- 32 clocks 4934 * 3 -- 64 clocks 4935 * 4 -- 128 clocks 4936 * 5 -- 256 clocks 4937 * 4938 * All other values are illegal. 4939 * 4940 * Bits 09:00 define which REQ/GNT signal pairs get the 4941 * GRANT# signal treatment. We set them all. 4942 */ 4943 pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff); 4944 4945 /* The Read Prefecth Policy register is 16-bit and sits at 4946 * offset 0x52. It enables a "smart" pre-fetch policy. We 4947 * enable it and max out all of the settings since only one 4948 * device is sitting underneath and thus bandwidth sharing is 4949 * not an issue. 4950 * 4951 * The register has several 3 bit fields, which indicates a 4952 * multiplier applied to the base amount of prefetching the 4953 * chip would do. These fields are at: 4954 * 4955 * 15:13 --- ReRead Primary Bus 4956 * 12:10 --- FirstRead Primary Bus 4957 * 09:07 --- ReRead Secondary Bus 4958 * 06:04 --- FirstRead Secondary Bus 4959 * 4960 * Bits 03:00 control which REQ/GNT pairs the prefetch settings 4961 * get enabled on. Bit 3 is a grouped enabler which controls 4962 * all of the REQ/GNT pairs from [8:3]. Bits 2 to 0 control 4963 * the individual REQ/GNT pairs [2:0]. 4964 */ 4965 pci_write_config_word(pdev, 0x52, 4966 (0x7 << 13) | 4967 (0x7 << 10) | 4968 (0x7 << 7) | 4969 (0x7 << 4) | 4970 (0xf << 0)); 4971 4972 /* Force cacheline size to 0x8 */ 4973 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08); 4974 4975 /* Force latency timer to maximum setting so Cassini can 4976 * sit on the bus as long as it likes. 4977 */ 4978 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff); 4979} 4980 4981static int __devinit cas_init_one(struct pci_dev *pdev, 4982 const struct pci_device_id *ent) 4983{ 4984 static int cas_version_printed = 0; 4985 unsigned long casreg_len; 4986 struct net_device *dev; 4987 struct cas *cp; 4988 int i, err, pci_using_dac; 4989 u16 pci_cmd; 4990 u8 orig_cacheline_size = 0, cas_cacheline_size = 0; 4991 DECLARE_MAC_BUF(mac); 4992 4993 if (cas_version_printed++ == 0) 4994 printk(KERN_INFO "%s", version); 4995 4996 err = pci_enable_device(pdev); 4997 if (err) { 4998 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n"); 4999 return err; 5000 } 5001 5002 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 5003 dev_err(&pdev->dev, "Cannot find proper PCI device " 5004 "base address, aborting.\n"); 5005 err = -ENODEV; 5006 goto err_out_disable_pdev; 5007 } 5008 5009 dev = alloc_etherdev(sizeof(*cp)); 5010 if (!dev) { 5011 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n"); 5012 err = -ENOMEM; 5013 goto err_out_disable_pdev; 5014 } 5015 SET_NETDEV_DEV(dev, &pdev->dev); 5016 5017 err = pci_request_regions(pdev, dev->name); 5018 if (err) { 5019 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n"); 5020 goto err_out_free_netdev; 5021 } 5022 pci_set_master(pdev); 5023 5024 /* we must always turn on parity response or else parity 5025 * doesn't get generated properly. disable SERR/PERR as well. 5026 * in addition, we want to turn MWI on. 5027 */ 5028 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 5029 pci_cmd &= ~PCI_COMMAND_SERR; 5030 pci_cmd |= PCI_COMMAND_PARITY; 5031 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); 5032 if (pci_try_set_mwi(pdev)) 5033 printk(KERN_WARNING PFX "Could not enable MWI for %s\n", 5034 pci_name(pdev)); 5035 5036 cas_program_bridge(pdev); 5037 5038 /* 5039 * On some architectures, the default cache line size set 5040 * by pci_try_set_mwi reduces perforamnce. We have to increase 5041 * it for this case. To start, we'll print some configuration 5042 * data. 5043 */ 5044#if 1 5045 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, 5046 &orig_cacheline_size); 5047 if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) { 5048 cas_cacheline_size = 5049 (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ? 5050 CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES; 5051 if (pci_write_config_byte(pdev, 5052 PCI_CACHE_LINE_SIZE, 5053 cas_cacheline_size)) { 5054 dev_err(&pdev->dev, "Could not set PCI cache " 5055 "line size\n"); 5056 goto err_write_cacheline; 5057 } 5058 } 5059#endif 5060 5061 5062 /* Configure DMA attributes. */ 5063 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 5064 pci_using_dac = 1; 5065 err = pci_set_consistent_dma_mask(pdev, 5066 DMA_64BIT_MASK); 5067 if (err < 0) { 5068 dev_err(&pdev->dev, "Unable to obtain 64-bit DMA " 5069 "for consistent allocations\n"); 5070 goto err_out_free_res; 5071 } 5072 5073 } else { 5074 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 5075 if (err) { 5076 dev_err(&pdev->dev, "No usable DMA configuration, " 5077 "aborting.\n"); 5078 goto err_out_free_res; 5079 } 5080 pci_using_dac = 0; 5081 } 5082 5083 casreg_len = pci_resource_len(pdev, 0); 5084 5085 cp = netdev_priv(dev); 5086 cp->pdev = pdev; 5087#if 1 5088 /* A value of 0 indicates we never explicitly set it */ 5089 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0; 5090#endif 5091 cp->dev = dev; 5092 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : 5093 cassini_debug; 5094 5095 cp->link_transition = LINK_TRANSITION_UNKNOWN; 5096 cp->link_transition_jiffies_valid = 0; 5097 5098 spin_lock_init(&cp->lock); 5099 spin_lock_init(&cp->rx_inuse_lock); 5100 spin_lock_init(&cp->rx_spare_lock); 5101 for (i = 0; i < N_TX_RINGS; i++) { 5102 spin_lock_init(&cp->stat_lock[i]); 5103 spin_lock_init(&cp->tx_lock[i]); 5104 } 5105 spin_lock_init(&cp->stat_lock[N_TX_RINGS]); 5106 mutex_init(&cp->pm_mutex); 5107 5108 init_timer(&cp->link_timer); 5109 cp->link_timer.function = cas_link_timer; 5110 cp->link_timer.data = (unsigned long) cp; 5111 5112#if 1 5113 /* Just in case the implementation of atomic operations 5114 * change so that an explicit initialization is necessary. 5115 */ 5116 atomic_set(&cp->reset_task_pending, 0); 5117 atomic_set(&cp->reset_task_pending_all, 0); 5118 atomic_set(&cp->reset_task_pending_spare, 0); 5119 atomic_set(&cp->reset_task_pending_mtu, 0); 5120#endif 5121 INIT_WORK(&cp->reset_task, cas_reset_task); 5122 5123 /* Default link parameters */ 5124 if (link_mode >= 0 && link_mode <= 6) 5125 cp->link_cntl = link_modes[link_mode]; 5126 else 5127 cp->link_cntl = BMCR_ANENABLE; 5128 cp->lstate = link_down; 5129 cp->link_transition = LINK_TRANSITION_LINK_DOWN; 5130 netif_carrier_off(cp->dev); 5131 cp->timer_ticks = 0; 5132 5133 /* give us access to cassini registers */ 5134 cp->regs = pci_iomap(pdev, 0, casreg_len); 5135 if (!cp->regs) { 5136 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n"); 5137 goto err_out_free_res; 5138 } 5139 cp->casreg_len = casreg_len; 5140 5141 pci_save_state(pdev); 5142 cas_check_pci_invariants(cp); 5143 cas_hard_reset(cp); 5144 cas_reset(cp, 0); 5145 if (cas_check_invariants(cp)) 5146 goto err_out_iounmap; 5147 if (cp->cas_flags & CAS_FLAG_SATURN) 5148 if (cas_saturn_firmware_init(cp)) 5149 goto err_out_iounmap; 5150 5151 cp->init_block = (struct cas_init_block *) 5152 pci_alloc_consistent(pdev, sizeof(struct cas_init_block), 5153 &cp->block_dvma); 5154 if (!cp->init_block) { 5155 dev_err(&pdev->dev, "Cannot allocate init block, aborting.\n"); 5156 goto err_out_iounmap; 5157 } 5158 5159 for (i = 0; i < N_TX_RINGS; i++) 5160 cp->init_txds[i] = cp->init_block->txds[i]; 5161 5162 for (i = 0; i < N_RX_DESC_RINGS; i++) 5163 cp->init_rxds[i] = cp->init_block->rxds[i]; 5164 5165 for (i = 0; i < N_RX_COMP_RINGS; i++) 5166 cp->init_rxcs[i] = cp->init_block->rxcs[i]; 5167 5168 for (i = 0; i < N_RX_FLOWS; i++) 5169 skb_queue_head_init(&cp->rx_flows[i]); 5170 5171 dev->open = cas_open; 5172 dev->stop = cas_close; 5173 dev->hard_start_xmit = cas_start_xmit; 5174 dev->get_stats = cas_get_stats; 5175 dev->set_multicast_list = cas_set_multicast; 5176 dev->do_ioctl = cas_ioctl; 5177 dev->ethtool_ops = &cas_ethtool_ops; 5178 dev->tx_timeout = cas_tx_timeout; 5179 dev->watchdog_timeo = CAS_TX_TIMEOUT; 5180 dev->change_mtu = cas_change_mtu; 5181#ifdef USE_NAPI 5182 netif_napi_add(dev, &cp->napi, cas_poll, 64); 5183#endif 5184#ifdef CONFIG_NET_POLL_CONTROLLER 5185 dev->poll_controller = cas_netpoll; 5186#endif 5187 dev->irq = pdev->irq; 5188 dev->dma = 0; 5189 5190 /* Cassini features. */ 5191 if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0) 5192 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 5193 5194 if (pci_using_dac) 5195 dev->features |= NETIF_F_HIGHDMA; 5196 5197 if (register_netdev(dev)) { 5198 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 5199 goto err_out_free_consistent; 5200 } 5201 5202 i = readl(cp->regs + REG_BIM_CFG); 5203 printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) " 5204 "Ethernet[%d] %s\n", dev->name, 5205 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "", 5206 (i & BIM_CFG_32BIT) ? "32" : "64", 5207 (i & BIM_CFG_66MHZ) ? "66" : "33", 5208 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq, 5209 print_mac(mac, dev->dev_addr)); 5210 5211 pci_set_drvdata(pdev, dev); 5212 cp->hw_running = 1; 5213 cas_entropy_reset(cp); 5214 cas_phy_init(cp); 5215 cas_begin_auto_negotiation(cp, NULL); 5216 return 0; 5217 5218err_out_free_consistent: 5219 pci_free_consistent(pdev, sizeof(struct cas_init_block), 5220 cp->init_block, cp->block_dvma); 5221 5222err_out_iounmap: 5223 mutex_lock(&cp->pm_mutex); 5224 if (cp->hw_running) 5225 cas_shutdown(cp); 5226 mutex_unlock(&cp->pm_mutex); 5227 5228 pci_iounmap(pdev, cp->regs); 5229 5230 5231err_out_free_res: 5232 pci_release_regions(pdev); 5233 5234err_write_cacheline: 5235 /* Try to restore it in case the error occured after we 5236 * set it. 5237 */ 5238 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size); 5239 5240err_out_free_netdev: 5241 free_netdev(dev); 5242 5243err_out_disable_pdev: 5244 pci_disable_device(pdev); 5245 pci_set_drvdata(pdev, NULL); 5246 return -ENODEV; 5247} 5248 5249static void __devexit cas_remove_one(struct pci_dev *pdev) 5250{ 5251 struct net_device *dev = pci_get_drvdata(pdev); 5252 struct cas *cp; 5253 if (!dev) 5254 return; 5255 5256 cp = netdev_priv(dev); 5257 unregister_netdev(dev); 5258 5259 if (cp->fw_data) 5260 vfree(cp->fw_data); 5261 5262 mutex_lock(&cp->pm_mutex); 5263 flush_scheduled_work(); 5264 if (cp->hw_running) 5265 cas_shutdown(cp); 5266 mutex_unlock(&cp->pm_mutex); 5267 5268#if 1 5269 if (cp->orig_cacheline_size) { 5270 /* Restore the cache line size if we had modified 5271 * it. 5272 */ 5273 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 5274 cp->orig_cacheline_size); 5275 } 5276#endif 5277 pci_free_consistent(pdev, sizeof(struct cas_init_block), 5278 cp->init_block, cp->block_dvma); 5279 pci_iounmap(pdev, cp->regs); 5280 free_netdev(dev); 5281 pci_release_regions(pdev); 5282 pci_disable_device(pdev); 5283 pci_set_drvdata(pdev, NULL); 5284} 5285 5286#ifdef CONFIG_PM 5287static int cas_suspend(struct pci_dev *pdev, pm_message_t state) 5288{ 5289 struct net_device *dev = pci_get_drvdata(pdev); 5290 struct cas *cp = netdev_priv(dev); 5291 unsigned long flags; 5292 5293 mutex_lock(&cp->pm_mutex); 5294 5295 /* If the driver is opened, we stop the DMA */ 5296 if (cp->opened) { 5297 netif_device_detach(dev); 5298 5299 cas_lock_all_save(cp, flags); 5300 5301 /* We can set the second arg of cas_reset to 0 5302 * because on resume, we'll call cas_init_hw with 5303 * its second arg set so that autonegotiation is 5304 * restarted. 5305 */ 5306 cas_reset(cp, 0); 5307 cas_clean_rings(cp); 5308 cas_unlock_all_restore(cp, flags); 5309 } 5310 5311 if (cp->hw_running) 5312 cas_shutdown(cp); 5313 mutex_unlock(&cp->pm_mutex); 5314 5315 return 0; 5316} 5317 5318static int cas_resume(struct pci_dev *pdev) 5319{ 5320 struct net_device *dev = pci_get_drvdata(pdev); 5321 struct cas *cp = netdev_priv(dev); 5322 5323 printk(KERN_INFO "%s: resuming\n", dev->name); 5324 5325 mutex_lock(&cp->pm_mutex); 5326 cas_hard_reset(cp); 5327 if (cp->opened) { 5328 unsigned long flags; 5329 cas_lock_all_save(cp, flags); 5330 cas_reset(cp, 0); 5331 cp->hw_running = 1; 5332 cas_clean_rings(cp); 5333 cas_init_hw(cp, 1); 5334 cas_unlock_all_restore(cp, flags); 5335 5336 netif_device_attach(dev); 5337 } 5338 mutex_unlock(&cp->pm_mutex); 5339 return 0; 5340} 5341#endif /* CONFIG_PM */ 5342 5343static struct pci_driver cas_driver = { 5344 .name = DRV_MODULE_NAME, 5345 .id_table = cas_pci_tbl, 5346 .probe = cas_init_one, 5347 .remove = __devexit_p(cas_remove_one), 5348#ifdef CONFIG_PM 5349 .suspend = cas_suspend, 5350 .resume = cas_resume 5351#endif 5352}; 5353 5354static int __init cas_init(void) 5355{ 5356 if (linkdown_timeout > 0) 5357 link_transition_timeout = linkdown_timeout * HZ; 5358 else 5359 link_transition_timeout = 0; 5360 5361 return pci_register_driver(&cas_driver); 5362} 5363 5364static void __exit cas_cleanup(void) 5365{ 5366 pci_unregister_driver(&cas_driver); 5367} 5368 5369module_init(cas_init); 5370module_exit(cas_cleanup);