Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.19-rc3 5257 lines 142 kB view raw
1/* cassini.c: Sun Microsystems Cassini(+) ethernet driver. 2 * 3 * Copyright (C) 2004 Sun Microsystems Inc. 4 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation; either version 2 of the 9 * License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 19 * 02111-1307, USA. 20 * 21 * This driver uses the sungem driver (c) David Miller 22 * (davem@redhat.com) as its basis. 23 * 24 * The cassini chip has a number of features that distinguish it from 25 * the gem chip: 26 * 4 transmit descriptor rings that are used for either QoS (VLAN) or 27 * load balancing (non-VLAN mode) 28 * batching of multiple packets 29 * multiple CPU dispatching 30 * page-based RX descriptor engine with separate completion rings 31 * Gigabit support (GMII and PCS interface) 32 * MIF link up/down detection works 33 * 34 * RX is handled by page sized buffers that are attached as fragments to 35 * the skb. here's what's done: 36 * -- driver allocates pages at a time and keeps reference counts 37 * on them. 38 * -- the upper protocol layers assume that the header is in the skb 39 * itself. as a result, cassini will copy a small amount (64 bytes) 40 * to make them happy. 41 * -- driver appends the rest of the data pages as frags to skbuffs 42 * and increments the reference count 43 * -- on page reclamation, the driver swaps the page with a spare page. 44 * if that page is still in use, it frees its reference to that page, 45 * and allocates a new page for use. otherwise, it just recycles the 46 * the page. 47 * 48 * NOTE: cassini can parse the header. however, it's not worth it 49 * as long as the network stack requires a header copy. 50 * 51 * TX has 4 queues. currently these queues are used in a round-robin 52 * fashion for load balancing. They can also be used for QoS. for that 53 * to work, however, QoS information needs to be exposed down to the driver 54 * level so that subqueues get targetted to particular transmit rings. 55 * alternatively, the queues can be configured via use of the all-purpose 56 * ioctl. 57 * 58 * RX DATA: the rx completion ring has all the info, but the rx desc 59 * ring has all of the data. RX can conceivably come in under multiple 60 * interrupts, but the INT# assignment needs to be set up properly by 61 * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do 62 * that. also, the two descriptor rings are designed to distinguish between 63 * encrypted and non-encrypted packets, but we use them for buffering 64 * instead. 65 * 66 * by default, the selective clear mask is set up to process rx packets. 67 */ 68 69 70#include <linux/module.h> 71#include <linux/kernel.h> 72#include <linux/types.h> 73#include <linux/compiler.h> 74#include <linux/slab.h> 75#include <linux/delay.h> 76#include <linux/init.h> 77#include <linux/ioport.h> 78#include <linux/pci.h> 79#include <linux/mm.h> 80#include <linux/highmem.h> 81#include <linux/list.h> 82#include <linux/dma-mapping.h> 83 84#include <linux/netdevice.h> 85#include <linux/etherdevice.h> 86#include <linux/skbuff.h> 87#include <linux/ethtool.h> 88#include <linux/crc32.h> 89#include <linux/random.h> 90#include <linux/mii.h> 91#include <linux/ip.h> 92#include <linux/tcp.h> 93#include <linux/mutex.h> 94 95#include <net/checksum.h> 96 97#include <asm/atomic.h> 98#include <asm/system.h> 99#include <asm/io.h> 100#include <asm/byteorder.h> 101#include <asm/uaccess.h> 102 103#define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ) 104#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ) 105#define CAS_NCPUS num_online_cpus() 106 107#if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL) 108#define USE_NAPI 109#define cas_skb_release(x) netif_receive_skb(x) 110#else 111#define cas_skb_release(x) netif_rx(x) 112#endif 113 114/* select which firmware to use */ 115#define USE_HP_WORKAROUND 116#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */ 117#define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */ 118 119#include "cassini.h" 120 121#define USE_TX_COMPWB /* use completion writeback registers */ 122#define USE_CSMA_CD_PROTO /* standard CSMA/CD */ 123#define USE_RX_BLANK /* hw interrupt mitigation */ 124#undef USE_ENTROPY_DEV /* don't test for entropy device */ 125 126/* NOTE: these aren't useable unless PCI interrupts can be assigned. 127 * also, we need to make cp->lock finer-grained. 128 */ 129#undef USE_PCI_INTB 130#undef USE_PCI_INTC 131#undef USE_PCI_INTD 132#undef USE_QOS 133 134#undef USE_VPD_DEBUG /* debug vpd information if defined */ 135 136/* rx processing options */ 137#define USE_PAGE_ORDER /* specify to allocate large rx pages */ 138#define RX_DONT_BATCH 0 /* if 1, don't batch flows */ 139#define RX_COPY_ALWAYS 0 /* if 0, use frags */ 140#define RX_COPY_MIN 64 /* copy a little to make upper layers happy */ 141#undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */ 142 143#define DRV_MODULE_NAME "cassini" 144#define PFX DRV_MODULE_NAME ": " 145#define DRV_MODULE_VERSION "1.4" 146#define DRV_MODULE_RELDATE "1 July 2004" 147 148#define CAS_DEF_MSG_ENABLE \ 149 (NETIF_MSG_DRV | \ 150 NETIF_MSG_PROBE | \ 151 NETIF_MSG_LINK | \ 152 NETIF_MSG_TIMER | \ 153 NETIF_MSG_IFDOWN | \ 154 NETIF_MSG_IFUP | \ 155 NETIF_MSG_RX_ERR | \ 156 NETIF_MSG_TX_ERR) 157 158/* length of time before we decide the hardware is borked, 159 * and dev->tx_timeout() should be called to fix the problem 160 */ 161#define CAS_TX_TIMEOUT (HZ) 162#define CAS_LINK_TIMEOUT (22*HZ/10) 163#define CAS_LINK_FAST_TIMEOUT (1) 164 165/* timeout values for state changing. these specify the number 166 * of 10us delays to be used before giving up. 167 */ 168#define STOP_TRIES_PHY 1000 169#define STOP_TRIES 5000 170 171/* specify a minimum frame size to deal with some fifo issues 172 * max mtu == 2 * page size - ethernet header - 64 - swivel = 173 * 2 * page_size - 0x50 174 */ 175#define CAS_MIN_FRAME 97 176#define CAS_1000MB_MIN_FRAME 255 177#define CAS_MIN_MTU 60 178#define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000) 179 180#if 1 181/* 182 * Eliminate these and use separate atomic counters for each, to 183 * avoid a race condition. 184 */ 185#else 186#define CAS_RESET_MTU 1 187#define CAS_RESET_ALL 2 188#define CAS_RESET_SPARE 3 189#endif 190 191static char version[] __devinitdata = 192 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 193 194static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */ 195static int link_mode; 196 197MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)"); 198MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver"); 199MODULE_LICENSE("GPL"); 200module_param(cassini_debug, int, 0); 201MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value"); 202module_param(link_mode, int, 0); 203MODULE_PARM_DESC(link_mode, "default link mode"); 204 205/* 206 * Work around for a PCS bug in which the link goes down due to the chip 207 * being confused and never showing a link status of "up." 208 */ 209#define DEFAULT_LINKDOWN_TIMEOUT 5 210/* 211 * Value in seconds, for user input. 212 */ 213static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT; 214module_param(linkdown_timeout, int, 0); 215MODULE_PARM_DESC(linkdown_timeout, 216"min reset interval in sec. for PCS linkdown issue; disabled if not positive"); 217 218/* 219 * value in 'ticks' (units used by jiffies). Set when we init the 220 * module because 'HZ' in actually a function call on some flavors of 221 * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ. 222 */ 223static int link_transition_timeout; 224 225 226 227static u16 link_modes[] __devinitdata = { 228 BMCR_ANENABLE, /* 0 : autoneg */ 229 0, /* 1 : 10bt half duplex */ 230 BMCR_SPEED100, /* 2 : 100bt half duplex */ 231 BMCR_FULLDPLX, /* 3 : 10bt full duplex */ 232 BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */ 233 CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */ 234}; 235 236static struct pci_device_id cas_pci_tbl[] __devinitdata = { 237 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI, 238 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 239 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN, 240 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 241 { 0, } 242}; 243 244MODULE_DEVICE_TABLE(pci, cas_pci_tbl); 245 246static void cas_set_link_modes(struct cas *cp); 247 248static inline void cas_lock_tx(struct cas *cp) 249{ 250 int i; 251 252 for (i = 0; i < N_TX_RINGS; i++) 253 spin_lock(&cp->tx_lock[i]); 254} 255 256static inline void cas_lock_all(struct cas *cp) 257{ 258 spin_lock_irq(&cp->lock); 259 cas_lock_tx(cp); 260} 261 262/* WTZ: QA was finding deadlock problems with the previous 263 * versions after long test runs with multiple cards per machine. 264 * See if replacing cas_lock_all with safer versions helps. The 265 * symptoms QA is reporting match those we'd expect if interrupts 266 * aren't being properly restored, and we fixed a previous deadlock 267 * with similar symptoms by using save/restore versions in other 268 * places. 269 */ 270#define cas_lock_all_save(cp, flags) \ 271do { \ 272 struct cas *xxxcp = (cp); \ 273 spin_lock_irqsave(&xxxcp->lock, flags); \ 274 cas_lock_tx(xxxcp); \ 275} while (0) 276 277static inline void cas_unlock_tx(struct cas *cp) 278{ 279 int i; 280 281 for (i = N_TX_RINGS; i > 0; i--) 282 spin_unlock(&cp->tx_lock[i - 1]); 283} 284 285static inline void cas_unlock_all(struct cas *cp) 286{ 287 cas_unlock_tx(cp); 288 spin_unlock_irq(&cp->lock); 289} 290 291#define cas_unlock_all_restore(cp, flags) \ 292do { \ 293 struct cas *xxxcp = (cp); \ 294 cas_unlock_tx(xxxcp); \ 295 spin_unlock_irqrestore(&xxxcp->lock, flags); \ 296} while (0) 297 298static void cas_disable_irq(struct cas *cp, const int ring) 299{ 300 /* Make sure we won't get any more interrupts */ 301 if (ring == 0) { 302 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK); 303 return; 304 } 305 306 /* disable completion interrupts and selectively mask */ 307 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 308 switch (ring) { 309#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD) 310#ifdef USE_PCI_INTB 311 case 1: 312#endif 313#ifdef USE_PCI_INTC 314 case 2: 315#endif 316#ifdef USE_PCI_INTD 317 case 3: 318#endif 319 writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN, 320 cp->regs + REG_PLUS_INTRN_MASK(ring)); 321 break; 322#endif 323 default: 324 writel(INTRN_MASK_CLEAR_ALL, cp->regs + 325 REG_PLUS_INTRN_MASK(ring)); 326 break; 327 } 328 } 329} 330 331static inline void cas_mask_intr(struct cas *cp) 332{ 333 int i; 334 335 for (i = 0; i < N_RX_COMP_RINGS; i++) 336 cas_disable_irq(cp, i); 337} 338 339static inline void cas_buffer_init(cas_page_t *cp) 340{ 341 struct page *page = cp->buffer; 342 atomic_set((atomic_t *)&page->lru.next, 1); 343} 344 345static inline int cas_buffer_count(cas_page_t *cp) 346{ 347 struct page *page = cp->buffer; 348 return atomic_read((atomic_t *)&page->lru.next); 349} 350 351static inline void cas_buffer_inc(cas_page_t *cp) 352{ 353 struct page *page = cp->buffer; 354 atomic_inc((atomic_t *)&page->lru.next); 355} 356 357static inline void cas_buffer_dec(cas_page_t *cp) 358{ 359 struct page *page = cp->buffer; 360 atomic_dec((atomic_t *)&page->lru.next); 361} 362 363static void cas_enable_irq(struct cas *cp, const int ring) 364{ 365 if (ring == 0) { /* all but TX_DONE */ 366 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK); 367 return; 368 } 369 370 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 371 switch (ring) { 372#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD) 373#ifdef USE_PCI_INTB 374 case 1: 375#endif 376#ifdef USE_PCI_INTC 377 case 2: 378#endif 379#ifdef USE_PCI_INTD 380 case 3: 381#endif 382 writel(INTRN_MASK_RX_EN, cp->regs + 383 REG_PLUS_INTRN_MASK(ring)); 384 break; 385#endif 386 default: 387 break; 388 } 389 } 390} 391 392static inline void cas_unmask_intr(struct cas *cp) 393{ 394 int i; 395 396 for (i = 0; i < N_RX_COMP_RINGS; i++) 397 cas_enable_irq(cp, i); 398} 399 400static inline void cas_entropy_gather(struct cas *cp) 401{ 402#ifdef USE_ENTROPY_DEV 403 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) 404 return; 405 406 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV), 407 readl(cp->regs + REG_ENTROPY_IV), 408 sizeof(uint64_t)*8); 409#endif 410} 411 412static inline void cas_entropy_reset(struct cas *cp) 413{ 414#ifdef USE_ENTROPY_DEV 415 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) 416 return; 417 418 writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT, 419 cp->regs + REG_BIM_LOCAL_DEV_EN); 420 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET); 421 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG); 422 423 /* if we read back 0x0, we don't have an entropy device */ 424 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0) 425 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV; 426#endif 427} 428 429/* access to the phy. the following assumes that we've initialized the MIF to 430 * be in frame rather than bit-bang mode 431 */ 432static u16 cas_phy_read(struct cas *cp, int reg) 433{ 434 u32 cmd; 435 int limit = STOP_TRIES_PHY; 436 437 cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ; 438 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); 439 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); 440 cmd |= MIF_FRAME_TURN_AROUND_MSB; 441 writel(cmd, cp->regs + REG_MIF_FRAME); 442 443 /* poll for completion */ 444 while (limit-- > 0) { 445 udelay(10); 446 cmd = readl(cp->regs + REG_MIF_FRAME); 447 if (cmd & MIF_FRAME_TURN_AROUND_LSB) 448 return (cmd & MIF_FRAME_DATA_MASK); 449 } 450 return 0xFFFF; /* -1 */ 451} 452 453static int cas_phy_write(struct cas *cp, int reg, u16 val) 454{ 455 int limit = STOP_TRIES_PHY; 456 u32 cmd; 457 458 cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE; 459 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); 460 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); 461 cmd |= MIF_FRAME_TURN_AROUND_MSB; 462 cmd |= val & MIF_FRAME_DATA_MASK; 463 writel(cmd, cp->regs + REG_MIF_FRAME); 464 465 /* poll for completion */ 466 while (limit-- > 0) { 467 udelay(10); 468 cmd = readl(cp->regs + REG_MIF_FRAME); 469 if (cmd & MIF_FRAME_TURN_AROUND_LSB) 470 return 0; 471 } 472 return -1; 473} 474 475static void cas_phy_powerup(struct cas *cp) 476{ 477 u16 ctl = cas_phy_read(cp, MII_BMCR); 478 479 if ((ctl & BMCR_PDOWN) == 0) 480 return; 481 ctl &= ~BMCR_PDOWN; 482 cas_phy_write(cp, MII_BMCR, ctl); 483} 484 485static void cas_phy_powerdown(struct cas *cp) 486{ 487 u16 ctl = cas_phy_read(cp, MII_BMCR); 488 489 if (ctl & BMCR_PDOWN) 490 return; 491 ctl |= BMCR_PDOWN; 492 cas_phy_write(cp, MII_BMCR, ctl); 493} 494 495/* cp->lock held. note: the last put_page will free the buffer */ 496static int cas_page_free(struct cas *cp, cas_page_t *page) 497{ 498 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, 499 PCI_DMA_FROMDEVICE); 500 cas_buffer_dec(page); 501 __free_pages(page->buffer, cp->page_order); 502 kfree(page); 503 return 0; 504} 505 506#ifdef RX_COUNT_BUFFERS 507#define RX_USED_ADD(x, y) ((x)->used += (y)) 508#define RX_USED_SET(x, y) ((x)->used = (y)) 509#else 510#define RX_USED_ADD(x, y) 511#define RX_USED_SET(x, y) 512#endif 513 514/* local page allocation routines for the receive buffers. jumbo pages 515 * require at least 8K contiguous and 8K aligned buffers. 516 */ 517static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags) 518{ 519 cas_page_t *page; 520 521 page = kmalloc(sizeof(cas_page_t), flags); 522 if (!page) 523 return NULL; 524 525 INIT_LIST_HEAD(&page->list); 526 RX_USED_SET(page, 0); 527 page->buffer = alloc_pages(flags, cp->page_order); 528 if (!page->buffer) 529 goto page_err; 530 cas_buffer_init(page); 531 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0, 532 cp->page_size, PCI_DMA_FROMDEVICE); 533 return page; 534 535page_err: 536 kfree(page); 537 return NULL; 538} 539 540/* initialize spare pool of rx buffers, but allocate during the open */ 541static void cas_spare_init(struct cas *cp) 542{ 543 spin_lock(&cp->rx_inuse_lock); 544 INIT_LIST_HEAD(&cp->rx_inuse_list); 545 spin_unlock(&cp->rx_inuse_lock); 546 547 spin_lock(&cp->rx_spare_lock); 548 INIT_LIST_HEAD(&cp->rx_spare_list); 549 cp->rx_spares_needed = RX_SPARE_COUNT; 550 spin_unlock(&cp->rx_spare_lock); 551} 552 553/* used on close. free all the spare buffers. */ 554static void cas_spare_free(struct cas *cp) 555{ 556 struct list_head list, *elem, *tmp; 557 558 /* free spare buffers */ 559 INIT_LIST_HEAD(&list); 560 spin_lock(&cp->rx_spare_lock); 561 list_splice(&cp->rx_spare_list, &list); 562 INIT_LIST_HEAD(&cp->rx_spare_list); 563 spin_unlock(&cp->rx_spare_lock); 564 list_for_each_safe(elem, tmp, &list) { 565 cas_page_free(cp, list_entry(elem, cas_page_t, list)); 566 } 567 568 INIT_LIST_HEAD(&list); 569#if 1 570 /* 571 * Looks like Adrian had protected this with a different 572 * lock than used everywhere else to manipulate this list. 573 */ 574 spin_lock(&cp->rx_inuse_lock); 575 list_splice(&cp->rx_inuse_list, &list); 576 INIT_LIST_HEAD(&cp->rx_inuse_list); 577 spin_unlock(&cp->rx_inuse_lock); 578#else 579 spin_lock(&cp->rx_spare_lock); 580 list_splice(&cp->rx_inuse_list, &list); 581 INIT_LIST_HEAD(&cp->rx_inuse_list); 582 spin_unlock(&cp->rx_spare_lock); 583#endif 584 list_for_each_safe(elem, tmp, &list) { 585 cas_page_free(cp, list_entry(elem, cas_page_t, list)); 586 } 587} 588 589/* replenish spares if needed */ 590static void cas_spare_recover(struct cas *cp, const gfp_t flags) 591{ 592 struct list_head list, *elem, *tmp; 593 int needed, i; 594 595 /* check inuse list. if we don't need any more free buffers, 596 * just free it 597 */ 598 599 /* make a local copy of the list */ 600 INIT_LIST_HEAD(&list); 601 spin_lock(&cp->rx_inuse_lock); 602 list_splice(&cp->rx_inuse_list, &list); 603 INIT_LIST_HEAD(&cp->rx_inuse_list); 604 spin_unlock(&cp->rx_inuse_lock); 605 606 list_for_each_safe(elem, tmp, &list) { 607 cas_page_t *page = list_entry(elem, cas_page_t, list); 608 609 if (cas_buffer_count(page) > 1) 610 continue; 611 612 list_del(elem); 613 spin_lock(&cp->rx_spare_lock); 614 if (cp->rx_spares_needed > 0) { 615 list_add(elem, &cp->rx_spare_list); 616 cp->rx_spares_needed--; 617 spin_unlock(&cp->rx_spare_lock); 618 } else { 619 spin_unlock(&cp->rx_spare_lock); 620 cas_page_free(cp, page); 621 } 622 } 623 624 /* put any inuse buffers back on the list */ 625 if (!list_empty(&list)) { 626 spin_lock(&cp->rx_inuse_lock); 627 list_splice(&list, &cp->rx_inuse_list); 628 spin_unlock(&cp->rx_inuse_lock); 629 } 630 631 spin_lock(&cp->rx_spare_lock); 632 needed = cp->rx_spares_needed; 633 spin_unlock(&cp->rx_spare_lock); 634 if (!needed) 635 return; 636 637 /* we still need spares, so try to allocate some */ 638 INIT_LIST_HEAD(&list); 639 i = 0; 640 while (i < needed) { 641 cas_page_t *spare = cas_page_alloc(cp, flags); 642 if (!spare) 643 break; 644 list_add(&spare->list, &list); 645 i++; 646 } 647 648 spin_lock(&cp->rx_spare_lock); 649 list_splice(&list, &cp->rx_spare_list); 650 cp->rx_spares_needed -= i; 651 spin_unlock(&cp->rx_spare_lock); 652} 653 654/* pull a page from the list. */ 655static cas_page_t *cas_page_dequeue(struct cas *cp) 656{ 657 struct list_head *entry; 658 int recover; 659 660 spin_lock(&cp->rx_spare_lock); 661 if (list_empty(&cp->rx_spare_list)) { 662 /* try to do a quick recovery */ 663 spin_unlock(&cp->rx_spare_lock); 664 cas_spare_recover(cp, GFP_ATOMIC); 665 spin_lock(&cp->rx_spare_lock); 666 if (list_empty(&cp->rx_spare_list)) { 667 if (netif_msg_rx_err(cp)) 668 printk(KERN_ERR "%s: no spare buffers " 669 "available.\n", cp->dev->name); 670 spin_unlock(&cp->rx_spare_lock); 671 return NULL; 672 } 673 } 674 675 entry = cp->rx_spare_list.next; 676 list_del(entry); 677 recover = ++cp->rx_spares_needed; 678 spin_unlock(&cp->rx_spare_lock); 679 680 /* trigger the timer to do the recovery */ 681 if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) { 682#if 1 683 atomic_inc(&cp->reset_task_pending); 684 atomic_inc(&cp->reset_task_pending_spare); 685 schedule_work(&cp->reset_task); 686#else 687 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE); 688 schedule_work(&cp->reset_task); 689#endif 690 } 691 return list_entry(entry, cas_page_t, list); 692} 693 694 695static void cas_mif_poll(struct cas *cp, const int enable) 696{ 697 u32 cfg; 698 699 cfg = readl(cp->regs + REG_MIF_CFG); 700 cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1); 701 702 if (cp->phy_type & CAS_PHY_MII_MDIO1) 703 cfg |= MIF_CFG_PHY_SELECT; 704 705 /* poll and interrupt on link status change. */ 706 if (enable) { 707 cfg |= MIF_CFG_POLL_EN; 708 cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR); 709 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr); 710 } 711 writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF, 712 cp->regs + REG_MIF_MASK); 713 writel(cfg, cp->regs + REG_MIF_CFG); 714} 715 716/* Must be invoked under cp->lock */ 717static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep) 718{ 719 u16 ctl; 720#if 1 721 int lcntl; 722 int changed = 0; 723 int oldstate = cp->lstate; 724 int link_was_not_down = !(oldstate == link_down); 725#endif 726 /* Setup link parameters */ 727 if (!ep) 728 goto start_aneg; 729 lcntl = cp->link_cntl; 730 if (ep->autoneg == AUTONEG_ENABLE) 731 cp->link_cntl = BMCR_ANENABLE; 732 else { 733 cp->link_cntl = 0; 734 if (ep->speed == SPEED_100) 735 cp->link_cntl |= BMCR_SPEED100; 736 else if (ep->speed == SPEED_1000) 737 cp->link_cntl |= CAS_BMCR_SPEED1000; 738 if (ep->duplex == DUPLEX_FULL) 739 cp->link_cntl |= BMCR_FULLDPLX; 740 } 741#if 1 742 changed = (lcntl != cp->link_cntl); 743#endif 744start_aneg: 745 if (cp->lstate == link_up) { 746 printk(KERN_INFO "%s: PCS link down.\n", 747 cp->dev->name); 748 } else { 749 if (changed) { 750 printk(KERN_INFO "%s: link configuration changed\n", 751 cp->dev->name); 752 } 753 } 754 cp->lstate = link_down; 755 cp->link_transition = LINK_TRANSITION_LINK_DOWN; 756 if (!cp->hw_running) 757 return; 758#if 1 759 /* 760 * WTZ: If the old state was link_up, we turn off the carrier 761 * to replicate everything we do elsewhere on a link-down 762 * event when we were already in a link-up state.. 763 */ 764 if (oldstate == link_up) 765 netif_carrier_off(cp->dev); 766 if (changed && link_was_not_down) { 767 /* 768 * WTZ: This branch will simply schedule a full reset after 769 * we explicitly changed link modes in an ioctl. See if this 770 * fixes the link-problems we were having for forced mode. 771 */ 772 atomic_inc(&cp->reset_task_pending); 773 atomic_inc(&cp->reset_task_pending_all); 774 schedule_work(&cp->reset_task); 775 cp->timer_ticks = 0; 776 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); 777 return; 778 } 779#endif 780 if (cp->phy_type & CAS_PHY_SERDES) { 781 u32 val = readl(cp->regs + REG_PCS_MII_CTRL); 782 783 if (cp->link_cntl & BMCR_ANENABLE) { 784 val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN); 785 cp->lstate = link_aneg; 786 } else { 787 if (cp->link_cntl & BMCR_FULLDPLX) 788 val |= PCS_MII_CTRL_DUPLEX; 789 val &= ~PCS_MII_AUTONEG_EN; 790 cp->lstate = link_force_ok; 791 } 792 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; 793 writel(val, cp->regs + REG_PCS_MII_CTRL); 794 795 } else { 796 cas_mif_poll(cp, 0); 797 ctl = cas_phy_read(cp, MII_BMCR); 798 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | 799 CAS_BMCR_SPEED1000 | BMCR_ANENABLE); 800 ctl |= cp->link_cntl; 801 if (ctl & BMCR_ANENABLE) { 802 ctl |= BMCR_ANRESTART; 803 cp->lstate = link_aneg; 804 } else { 805 cp->lstate = link_force_ok; 806 } 807 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; 808 cas_phy_write(cp, MII_BMCR, ctl); 809 cas_mif_poll(cp, 1); 810 } 811 812 cp->timer_ticks = 0; 813 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); 814} 815 816/* Must be invoked under cp->lock. */ 817static int cas_reset_mii_phy(struct cas *cp) 818{ 819 int limit = STOP_TRIES_PHY; 820 u16 val; 821 822 cas_phy_write(cp, MII_BMCR, BMCR_RESET); 823 udelay(100); 824 while (limit--) { 825 val = cas_phy_read(cp, MII_BMCR); 826 if ((val & BMCR_RESET) == 0) 827 break; 828 udelay(10); 829 } 830 return (limit <= 0); 831} 832 833static void cas_saturn_firmware_load(struct cas *cp) 834{ 835 cas_saturn_patch_t *patch = cas_saturn_patch; 836 837 cas_phy_powerdown(cp); 838 839 /* expanded memory access mode */ 840 cas_phy_write(cp, DP83065_MII_MEM, 0x0); 841 842 /* pointer configuration for new firmware */ 843 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9); 844 cas_phy_write(cp, DP83065_MII_REGD, 0xbd); 845 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa); 846 cas_phy_write(cp, DP83065_MII_REGD, 0x82); 847 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb); 848 cas_phy_write(cp, DP83065_MII_REGD, 0x0); 849 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc); 850 cas_phy_write(cp, DP83065_MII_REGD, 0x39); 851 852 /* download new firmware */ 853 cas_phy_write(cp, DP83065_MII_MEM, 0x1); 854 cas_phy_write(cp, DP83065_MII_REGE, patch->addr); 855 while (patch->addr) { 856 cas_phy_write(cp, DP83065_MII_REGD, patch->val); 857 patch++; 858 } 859 860 /* enable firmware */ 861 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8); 862 cas_phy_write(cp, DP83065_MII_REGD, 0x1); 863} 864 865 866/* phy initialization */ 867static void cas_phy_init(struct cas *cp) 868{ 869 u16 val; 870 871 /* if we're in MII/GMII mode, set up phy */ 872 if (CAS_PHY_MII(cp->phy_type)) { 873 writel(PCS_DATAPATH_MODE_MII, 874 cp->regs + REG_PCS_DATAPATH_MODE); 875 876 cas_mif_poll(cp, 0); 877 cas_reset_mii_phy(cp); /* take out of isolate mode */ 878 879 if (PHY_LUCENT_B0 == cp->phy_id) { 880 /* workaround link up/down issue with lucent */ 881 cas_phy_write(cp, LUCENT_MII_REG, 0x8000); 882 cas_phy_write(cp, MII_BMCR, 0x00f1); 883 cas_phy_write(cp, LUCENT_MII_REG, 0x0); 884 885 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) { 886 /* workarounds for broadcom phy */ 887 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20); 888 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012); 889 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804); 890 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013); 891 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204); 892 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); 893 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132); 894 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); 895 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232); 896 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F); 897 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20); 898 899 } else if (PHY_BROADCOM_5411 == cp->phy_id) { 900 val = cas_phy_read(cp, BROADCOM_MII_REG4); 901 val = cas_phy_read(cp, BROADCOM_MII_REG4); 902 if (val & 0x0080) { 903 /* link workaround */ 904 cas_phy_write(cp, BROADCOM_MII_REG4, 905 val & ~0x0080); 906 } 907 908 } else if (cp->cas_flags & CAS_FLAG_SATURN) { 909 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ? 910 SATURN_PCFG_FSI : 0x0, 911 cp->regs + REG_SATURN_PCFG); 912 913 /* load firmware to address 10Mbps auto-negotiation 914 * issue. NOTE: this will need to be changed if the 915 * default firmware gets fixed. 916 */ 917 if (PHY_NS_DP83065 == cp->phy_id) { 918 cas_saturn_firmware_load(cp); 919 } 920 cas_phy_powerup(cp); 921 } 922 923 /* advertise capabilities */ 924 val = cas_phy_read(cp, MII_BMCR); 925 val &= ~BMCR_ANENABLE; 926 cas_phy_write(cp, MII_BMCR, val); 927 udelay(10); 928 929 cas_phy_write(cp, MII_ADVERTISE, 930 cas_phy_read(cp, MII_ADVERTISE) | 931 (ADVERTISE_10HALF | ADVERTISE_10FULL | 932 ADVERTISE_100HALF | ADVERTISE_100FULL | 933 CAS_ADVERTISE_PAUSE | 934 CAS_ADVERTISE_ASYM_PAUSE)); 935 936 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { 937 /* make sure that we don't advertise half 938 * duplex to avoid a chip issue 939 */ 940 val = cas_phy_read(cp, CAS_MII_1000_CTRL); 941 val &= ~CAS_ADVERTISE_1000HALF; 942 val |= CAS_ADVERTISE_1000FULL; 943 cas_phy_write(cp, CAS_MII_1000_CTRL, val); 944 } 945 946 } else { 947 /* reset pcs for serdes */ 948 u32 val; 949 int limit; 950 951 writel(PCS_DATAPATH_MODE_SERDES, 952 cp->regs + REG_PCS_DATAPATH_MODE); 953 954 /* enable serdes pins on saturn */ 955 if (cp->cas_flags & CAS_FLAG_SATURN) 956 writel(0, cp->regs + REG_SATURN_PCFG); 957 958 /* Reset PCS unit. */ 959 val = readl(cp->regs + REG_PCS_MII_CTRL); 960 val |= PCS_MII_RESET; 961 writel(val, cp->regs + REG_PCS_MII_CTRL); 962 963 limit = STOP_TRIES; 964 while (limit-- > 0) { 965 udelay(10); 966 if ((readl(cp->regs + REG_PCS_MII_CTRL) & 967 PCS_MII_RESET) == 0) 968 break; 969 } 970 if (limit <= 0) 971 printk(KERN_WARNING "%s: PCS reset bit would not " 972 "clear [%08x].\n", cp->dev->name, 973 readl(cp->regs + REG_PCS_STATE_MACHINE)); 974 975 /* Make sure PCS is disabled while changing advertisement 976 * configuration. 977 */ 978 writel(0x0, cp->regs + REG_PCS_CFG); 979 980 /* Advertise all capabilities except half-duplex. */ 981 val = readl(cp->regs + REG_PCS_MII_ADVERT); 982 val &= ~PCS_MII_ADVERT_HD; 983 val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE | 984 PCS_MII_ADVERT_ASYM_PAUSE); 985 writel(val, cp->regs + REG_PCS_MII_ADVERT); 986 987 /* enable PCS */ 988 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG); 989 990 /* pcs workaround: enable sync detect */ 991 writel(PCS_SERDES_CTRL_SYNCD_EN, 992 cp->regs + REG_PCS_SERDES_CTRL); 993 } 994} 995 996 997static int cas_pcs_link_check(struct cas *cp) 998{ 999 u32 stat, state_machine; 1000 int retval = 0; 1001 1002 /* The link status bit latches on zero, so you must 1003 * read it twice in such a case to see a transition 1004 * to the link being up. 1005 */ 1006 stat = readl(cp->regs + REG_PCS_MII_STATUS); 1007 if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0) 1008 stat = readl(cp->regs + REG_PCS_MII_STATUS); 1009 1010 /* The remote-fault indication is only valid 1011 * when autoneg has completed. 1012 */ 1013 if ((stat & (PCS_MII_STATUS_AUTONEG_COMP | 1014 PCS_MII_STATUS_REMOTE_FAULT)) == 1015 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) { 1016 if (netif_msg_link(cp)) 1017 printk(KERN_INFO "%s: PCS RemoteFault\n", 1018 cp->dev->name); 1019 } 1020 1021 /* work around link detection issue by querying the PCS state 1022 * machine directly. 1023 */ 1024 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE); 1025 if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) { 1026 stat &= ~PCS_MII_STATUS_LINK_STATUS; 1027 } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) { 1028 stat |= PCS_MII_STATUS_LINK_STATUS; 1029 } 1030 1031 if (stat & PCS_MII_STATUS_LINK_STATUS) { 1032 if (cp->lstate != link_up) { 1033 if (cp->opened) { 1034 cp->lstate = link_up; 1035 cp->link_transition = LINK_TRANSITION_LINK_UP; 1036 1037 cas_set_link_modes(cp); 1038 netif_carrier_on(cp->dev); 1039 } 1040 } 1041 } else if (cp->lstate == link_up) { 1042 cp->lstate = link_down; 1043 if (link_transition_timeout != 0 && 1044 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && 1045 !cp->link_transition_jiffies_valid) { 1046 /* 1047 * force a reset, as a workaround for the 1048 * link-failure problem. May want to move this to a 1049 * point a bit earlier in the sequence. If we had 1050 * generated a reset a short time ago, we'll wait for 1051 * the link timer to check the status until a 1052 * timer expires (link_transistion_jiffies_valid is 1053 * true when the timer is running.) Instead of using 1054 * a system timer, we just do a check whenever the 1055 * link timer is running - this clears the flag after 1056 * a suitable delay. 1057 */ 1058 retval = 1; 1059 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; 1060 cp->link_transition_jiffies = jiffies; 1061 cp->link_transition_jiffies_valid = 1; 1062 } else { 1063 cp->link_transition = LINK_TRANSITION_ON_FAILURE; 1064 } 1065 netif_carrier_off(cp->dev); 1066 if (cp->opened && netif_msg_link(cp)) { 1067 printk(KERN_INFO "%s: PCS link down.\n", 1068 cp->dev->name); 1069 } 1070 1071 /* Cassini only: if you force a mode, there can be 1072 * sync problems on link down. to fix that, the following 1073 * things need to be checked: 1074 * 1) read serialink state register 1075 * 2) read pcs status register to verify link down. 1076 * 3) if link down and serial link == 0x03, then you need 1077 * to global reset the chip. 1078 */ 1079 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) { 1080 /* should check to see if we're in a forced mode */ 1081 stat = readl(cp->regs + REG_PCS_SERDES_STATE); 1082 if (stat == 0x03) 1083 return 1; 1084 } 1085 } else if (cp->lstate == link_down) { 1086 if (link_transition_timeout != 0 && 1087 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && 1088 !cp->link_transition_jiffies_valid) { 1089 /* force a reset, as a workaround for the 1090 * link-failure problem. May want to move 1091 * this to a point a bit earlier in the 1092 * sequence. 1093 */ 1094 retval = 1; 1095 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; 1096 cp->link_transition_jiffies = jiffies; 1097 cp->link_transition_jiffies_valid = 1; 1098 } else { 1099 cp->link_transition = LINK_TRANSITION_STILL_FAILED; 1100 } 1101 } 1102 1103 return retval; 1104} 1105 1106static int cas_pcs_interrupt(struct net_device *dev, 1107 struct cas *cp, u32 status) 1108{ 1109 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS); 1110 1111 if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0) 1112 return 0; 1113 return cas_pcs_link_check(cp); 1114} 1115 1116static int cas_txmac_interrupt(struct net_device *dev, 1117 struct cas *cp, u32 status) 1118{ 1119 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS); 1120 1121 if (!txmac_stat) 1122 return 0; 1123 1124 if (netif_msg_intr(cp)) 1125 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n", 1126 cp->dev->name, txmac_stat); 1127 1128 /* Defer timer expiration is quite normal, 1129 * don't even log the event. 1130 */ 1131 if ((txmac_stat & MAC_TX_DEFER_TIMER) && 1132 !(txmac_stat & ~MAC_TX_DEFER_TIMER)) 1133 return 0; 1134 1135 spin_lock(&cp->stat_lock[0]); 1136 if (txmac_stat & MAC_TX_UNDERRUN) { 1137 printk(KERN_ERR "%s: TX MAC xmit underrun.\n", 1138 dev->name); 1139 cp->net_stats[0].tx_fifo_errors++; 1140 } 1141 1142 if (txmac_stat & MAC_TX_MAX_PACKET_ERR) { 1143 printk(KERN_ERR "%s: TX MAC max packet size error.\n", 1144 dev->name); 1145 cp->net_stats[0].tx_errors++; 1146 } 1147 1148 /* The rest are all cases of one of the 16-bit TX 1149 * counters expiring. 1150 */ 1151 if (txmac_stat & MAC_TX_COLL_NORMAL) 1152 cp->net_stats[0].collisions += 0x10000; 1153 1154 if (txmac_stat & MAC_TX_COLL_EXCESS) { 1155 cp->net_stats[0].tx_aborted_errors += 0x10000; 1156 cp->net_stats[0].collisions += 0x10000; 1157 } 1158 1159 if (txmac_stat & MAC_TX_COLL_LATE) { 1160 cp->net_stats[0].tx_aborted_errors += 0x10000; 1161 cp->net_stats[0].collisions += 0x10000; 1162 } 1163 spin_unlock(&cp->stat_lock[0]); 1164 1165 /* We do not keep track of MAC_TX_COLL_FIRST and 1166 * MAC_TX_PEAK_ATTEMPTS events. 1167 */ 1168 return 0; 1169} 1170 1171static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware) 1172{ 1173 cas_hp_inst_t *inst; 1174 u32 val; 1175 int i; 1176 1177 i = 0; 1178 while ((inst = firmware) && inst->note) { 1179 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR); 1180 1181 val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val); 1182 val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask); 1183 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI); 1184 1185 val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10); 1186 val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop); 1187 val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext); 1188 val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff); 1189 val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext); 1190 val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff); 1191 val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op); 1192 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID); 1193 1194 val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask); 1195 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift); 1196 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab); 1197 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg); 1198 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW); 1199 ++firmware; 1200 ++i; 1201 } 1202} 1203 1204static void cas_init_rx_dma(struct cas *cp) 1205{ 1206 u64 desc_dma = cp->block_dvma; 1207 u32 val; 1208 int i, size; 1209 1210 /* rx free descriptors */ 1211 val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL); 1212 val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0)); 1213 val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0)); 1214 if ((N_RX_DESC_RINGS > 1) && 1215 (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */ 1216 val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1)); 1217 writel(val, cp->regs + REG_RX_CFG); 1218 1219 val = (unsigned long) cp->init_rxds[0] - 1220 (unsigned long) cp->init_block; 1221 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI); 1222 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW); 1223 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); 1224 1225 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 1226 /* rx desc 2 is for IPSEC packets. however, 1227 * we don't it that for that purpose. 1228 */ 1229 val = (unsigned long) cp->init_rxds[1] - 1230 (unsigned long) cp->init_block; 1231 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI); 1232 writel((desc_dma + val) & 0xffffffff, cp->regs + 1233 REG_PLUS_RX_DB1_LOW); 1234 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs + 1235 REG_PLUS_RX_KICK1); 1236 } 1237 1238 /* rx completion registers */ 1239 val = (unsigned long) cp->init_rxcs[0] - 1240 (unsigned long) cp->init_block; 1241 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI); 1242 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW); 1243 1244 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 1245 /* rx comp 2-4 */ 1246 for (i = 1; i < MAX_RX_COMP_RINGS; i++) { 1247 val = (unsigned long) cp->init_rxcs[i] - 1248 (unsigned long) cp->init_block; 1249 writel((desc_dma + val) >> 32, cp->regs + 1250 REG_PLUS_RX_CBN_HI(i)); 1251 writel((desc_dma + val) & 0xffffffff, cp->regs + 1252 REG_PLUS_RX_CBN_LOW(i)); 1253 } 1254 } 1255 1256 /* read selective clear regs to prevent spurious interrupts 1257 * on reset because complete == kick. 1258 * selective clear set up to prevent interrupts on resets 1259 */ 1260 readl(cp->regs + REG_INTR_STATUS_ALIAS); 1261 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR); 1262 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 1263 for (i = 1; i < N_RX_COMP_RINGS; i++) 1264 readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i)); 1265 1266 /* 2 is different from 3 and 4 */ 1267 if (N_RX_COMP_RINGS > 1) 1268 writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1, 1269 cp->regs + REG_PLUS_ALIASN_CLEAR(1)); 1270 1271 for (i = 2; i < N_RX_COMP_RINGS; i++) 1272 writel(INTR_RX_DONE_ALT, 1273 cp->regs + REG_PLUS_ALIASN_CLEAR(i)); 1274 } 1275 1276 /* set up pause thresholds */ 1277 val = CAS_BASE(RX_PAUSE_THRESH_OFF, 1278 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM); 1279 val |= CAS_BASE(RX_PAUSE_THRESH_ON, 1280 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM); 1281 writel(val, cp->regs + REG_RX_PAUSE_THRESH); 1282 1283 /* zero out dma reassembly buffers */ 1284 for (i = 0; i < 64; i++) { 1285 writel(i, cp->regs + REG_RX_TABLE_ADDR); 1286 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW); 1287 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID); 1288 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI); 1289 } 1290 1291 /* make sure address register is 0 for normal operation */ 1292 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR); 1293 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR); 1294 1295 /* interrupt mitigation */ 1296#ifdef USE_RX_BLANK 1297 val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL); 1298 val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL); 1299 writel(val, cp->regs + REG_RX_BLANK); 1300#else 1301 writel(0x0, cp->regs + REG_RX_BLANK); 1302#endif 1303 1304 /* interrupt generation as a function of low water marks for 1305 * free desc and completion entries. these are used to trigger 1306 * housekeeping for rx descs. we don't use the free interrupt 1307 * as it's not very useful 1308 */ 1309 /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */ 1310 val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL); 1311 writel(val, cp->regs + REG_RX_AE_THRESH); 1312 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 1313 val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1)); 1314 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH); 1315 } 1316 1317 /* Random early detect registers. useful for congestion avoidance. 1318 * this should be tunable. 1319 */ 1320 writel(0x0, cp->regs + REG_RX_RED); 1321 1322 /* receive page sizes. default == 2K (0x800) */ 1323 val = 0; 1324 if (cp->page_size == 0x1000) 1325 val = 0x1; 1326 else if (cp->page_size == 0x2000) 1327 val = 0x2; 1328 else if (cp->page_size == 0x4000) 1329 val = 0x3; 1330 1331 /* round mtu + offset. constrain to page size. */ 1332 size = cp->dev->mtu + 64; 1333 if (size > cp->page_size) 1334 size = cp->page_size; 1335 1336 if (size <= 0x400) 1337 i = 0x0; 1338 else if (size <= 0x800) 1339 i = 0x1; 1340 else if (size <= 0x1000) 1341 i = 0x2; 1342 else 1343 i = 0x3; 1344 1345 cp->mtu_stride = 1 << (i + 10); 1346 val = CAS_BASE(RX_PAGE_SIZE, val); 1347 val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i); 1348 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10)); 1349 val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1); 1350 writel(val, cp->regs + REG_RX_PAGE_SIZE); 1351 1352 /* enable the header parser if desired */ 1353 if (CAS_HP_FIRMWARE == cas_prog_null) 1354 return; 1355 1356 val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS); 1357 val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK; 1358 val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL); 1359 writel(val, cp->regs + REG_HP_CFG); 1360} 1361 1362static inline void cas_rxc_init(struct cas_rx_comp *rxc) 1363{ 1364 memset(rxc, 0, sizeof(*rxc)); 1365 rxc->word4 = cpu_to_le64(RX_COMP4_ZERO); 1366} 1367 1368/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1] 1369 * flipping is protected by the fact that the chip will not 1370 * hand back the same page index while it's being processed. 1371 */ 1372static inline cas_page_t *cas_page_spare(struct cas *cp, const int index) 1373{ 1374 cas_page_t *page = cp->rx_pages[1][index]; 1375 cas_page_t *new; 1376 1377 if (cas_buffer_count(page) == 1) 1378 return page; 1379 1380 new = cas_page_dequeue(cp); 1381 if (new) { 1382 spin_lock(&cp->rx_inuse_lock); 1383 list_add(&page->list, &cp->rx_inuse_list); 1384 spin_unlock(&cp->rx_inuse_lock); 1385 } 1386 return new; 1387} 1388 1389/* this needs to be changed if we actually use the ENC RX DESC ring */ 1390static cas_page_t *cas_page_swap(struct cas *cp, const int ring, 1391 const int index) 1392{ 1393 cas_page_t **page0 = cp->rx_pages[0]; 1394 cas_page_t **page1 = cp->rx_pages[1]; 1395 1396 /* swap if buffer is in use */ 1397 if (cas_buffer_count(page0[index]) > 1) { 1398 cas_page_t *new = cas_page_spare(cp, index); 1399 if (new) { 1400 page1[index] = page0[index]; 1401 page0[index] = new; 1402 } 1403 } 1404 RX_USED_SET(page0[index], 0); 1405 return page0[index]; 1406} 1407 1408static void cas_clean_rxds(struct cas *cp) 1409{ 1410 /* only clean ring 0 as ring 1 is used for spare buffers */ 1411 struct cas_rx_desc *rxd = cp->init_rxds[0]; 1412 int i, size; 1413 1414 /* release all rx flows */ 1415 for (i = 0; i < N_RX_FLOWS; i++) { 1416 struct sk_buff *skb; 1417 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) { 1418 cas_skb_release(skb); 1419 } 1420 } 1421 1422 /* initialize descriptors */ 1423 size = RX_DESC_RINGN_SIZE(0); 1424 for (i = 0; i < size; i++) { 1425 cas_page_t *page = cas_page_swap(cp, 0, i); 1426 rxd[i].buffer = cpu_to_le64(page->dma_addr); 1427 rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) | 1428 CAS_BASE(RX_INDEX_RING, 0)); 1429 } 1430 1431 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4; 1432 cp->rx_last[0] = 0; 1433 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0); 1434} 1435 1436static void cas_clean_rxcs(struct cas *cp) 1437{ 1438 int i, j; 1439 1440 /* take ownership of rx comp descriptors */ 1441 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS); 1442 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS); 1443 for (i = 0; i < N_RX_COMP_RINGS; i++) { 1444 struct cas_rx_comp *rxc = cp->init_rxcs[i]; 1445 for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) { 1446 cas_rxc_init(rxc + j); 1447 } 1448 } 1449} 1450 1451#if 0 1452/* When we get a RX fifo overflow, the RX unit is probably hung 1453 * so we do the following. 1454 * 1455 * If any part of the reset goes wrong, we return 1 and that causes the 1456 * whole chip to be reset. 1457 */ 1458static int cas_rxmac_reset(struct cas *cp) 1459{ 1460 struct net_device *dev = cp->dev; 1461 int limit; 1462 u32 val; 1463 1464 /* First, reset MAC RX. */ 1465 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); 1466 for (limit = 0; limit < STOP_TRIES; limit++) { 1467 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN)) 1468 break; 1469 udelay(10); 1470 } 1471 if (limit == STOP_TRIES) { 1472 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole " 1473 "chip.\n", dev->name); 1474 return 1; 1475 } 1476 1477 /* Second, disable RX DMA. */ 1478 writel(0, cp->regs + REG_RX_CFG); 1479 for (limit = 0; limit < STOP_TRIES; limit++) { 1480 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN)) 1481 break; 1482 udelay(10); 1483 } 1484 if (limit == STOP_TRIES) { 1485 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole " 1486 "chip.\n", dev->name); 1487 return 1; 1488 } 1489 1490 mdelay(5); 1491 1492 /* Execute RX reset command. */ 1493 writel(SW_RESET_RX, cp->regs + REG_SW_RESET); 1494 for (limit = 0; limit < STOP_TRIES; limit++) { 1495 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX)) 1496 break; 1497 udelay(10); 1498 } 1499 if (limit == STOP_TRIES) { 1500 printk(KERN_ERR "%s: RX reset command will not execute, " 1501 "resetting whole chip.\n", dev->name); 1502 return 1; 1503 } 1504 1505 /* reset driver rx state */ 1506 cas_clean_rxds(cp); 1507 cas_clean_rxcs(cp); 1508 1509 /* Now, reprogram the rest of RX unit. */ 1510 cas_init_rx_dma(cp); 1511 1512 /* re-enable */ 1513 val = readl(cp->regs + REG_RX_CFG); 1514 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG); 1515 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); 1516 val = readl(cp->regs + REG_MAC_RX_CFG); 1517 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); 1518 return 0; 1519} 1520#endif 1521 1522static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp, 1523 u32 status) 1524{ 1525 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS); 1526 1527 if (!stat) 1528 return 0; 1529 1530 if (netif_msg_intr(cp)) 1531 printk(KERN_DEBUG "%s: rxmac interrupt, stat: 0x%x\n", 1532 cp->dev->name, stat); 1533 1534 /* these are all rollovers */ 1535 spin_lock(&cp->stat_lock[0]); 1536 if (stat & MAC_RX_ALIGN_ERR) 1537 cp->net_stats[0].rx_frame_errors += 0x10000; 1538 1539 if (stat & MAC_RX_CRC_ERR) 1540 cp->net_stats[0].rx_crc_errors += 0x10000; 1541 1542 if (stat & MAC_RX_LEN_ERR) 1543 cp->net_stats[0].rx_length_errors += 0x10000; 1544 1545 if (stat & MAC_RX_OVERFLOW) { 1546 cp->net_stats[0].rx_over_errors++; 1547 cp->net_stats[0].rx_fifo_errors++; 1548 } 1549 1550 /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR 1551 * events. 1552 */ 1553 spin_unlock(&cp->stat_lock[0]); 1554 return 0; 1555} 1556 1557static int cas_mac_interrupt(struct net_device *dev, struct cas *cp, 1558 u32 status) 1559{ 1560 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS); 1561 1562 if (!stat) 1563 return 0; 1564 1565 if (netif_msg_intr(cp)) 1566 printk(KERN_DEBUG "%s: mac interrupt, stat: 0x%x\n", 1567 cp->dev->name, stat); 1568 1569 /* This interrupt is just for pause frame and pause 1570 * tracking. It is useful for diagnostics and debug 1571 * but probably by default we will mask these events. 1572 */ 1573 if (stat & MAC_CTRL_PAUSE_STATE) 1574 cp->pause_entered++; 1575 1576 if (stat & MAC_CTRL_PAUSE_RECEIVED) 1577 cp->pause_last_time_recvd = (stat >> 16); 1578 1579 return 0; 1580} 1581 1582 1583/* Must be invoked under cp->lock. */ 1584static inline int cas_mdio_link_not_up(struct cas *cp) 1585{ 1586 u16 val; 1587 1588 switch (cp->lstate) { 1589 case link_force_ret: 1590 if (netif_msg_link(cp)) 1591 printk(KERN_INFO "%s: Autoneg failed again, keeping" 1592 " forced mode\n", cp->dev->name); 1593 cas_phy_write(cp, MII_BMCR, cp->link_fcntl); 1594 cp->timer_ticks = 5; 1595 cp->lstate = link_force_ok; 1596 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; 1597 break; 1598 1599 case link_aneg: 1600 val = cas_phy_read(cp, MII_BMCR); 1601 1602 /* Try forced modes. we try things in the following order: 1603 * 1000 full -> 100 full/half -> 10 half 1604 */ 1605 val &= ~(BMCR_ANRESTART | BMCR_ANENABLE); 1606 val |= BMCR_FULLDPLX; 1607 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? 1608 CAS_BMCR_SPEED1000 : BMCR_SPEED100; 1609 cas_phy_write(cp, MII_BMCR, val); 1610 cp->timer_ticks = 5; 1611 cp->lstate = link_force_try; 1612 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; 1613 break; 1614 1615 case link_force_try: 1616 /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */ 1617 val = cas_phy_read(cp, MII_BMCR); 1618 cp->timer_ticks = 5; 1619 if (val & CAS_BMCR_SPEED1000) { /* gigabit */ 1620 val &= ~CAS_BMCR_SPEED1000; 1621 val |= (BMCR_SPEED100 | BMCR_FULLDPLX); 1622 cas_phy_write(cp, MII_BMCR, val); 1623 break; 1624 } 1625 1626 if (val & BMCR_SPEED100) { 1627 if (val & BMCR_FULLDPLX) /* fd failed */ 1628 val &= ~BMCR_FULLDPLX; 1629 else { /* 100Mbps failed */ 1630 val &= ~BMCR_SPEED100; 1631 } 1632 cas_phy_write(cp, MII_BMCR, val); 1633 break; 1634 } 1635 default: 1636 break; 1637 } 1638 return 0; 1639} 1640 1641 1642/* must be invoked with cp->lock held */ 1643static int cas_mii_link_check(struct cas *cp, const u16 bmsr) 1644{ 1645 int restart; 1646 1647 if (bmsr & BMSR_LSTATUS) { 1648 /* Ok, here we got a link. If we had it due to a forced 1649 * fallback, and we were configured for autoneg, we 1650 * retry a short autoneg pass. If you know your hub is 1651 * broken, use ethtool ;) 1652 */ 1653 if ((cp->lstate == link_force_try) && 1654 (cp->link_cntl & BMCR_ANENABLE)) { 1655 cp->lstate = link_force_ret; 1656 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; 1657 cas_mif_poll(cp, 0); 1658 cp->link_fcntl = cas_phy_read(cp, MII_BMCR); 1659 cp->timer_ticks = 5; 1660 if (cp->opened && netif_msg_link(cp)) 1661 printk(KERN_INFO "%s: Got link after fallback, retrying" 1662 " autoneg once...\n", cp->dev->name); 1663 cas_phy_write(cp, MII_BMCR, 1664 cp->link_fcntl | BMCR_ANENABLE | 1665 BMCR_ANRESTART); 1666 cas_mif_poll(cp, 1); 1667 1668 } else if (cp->lstate != link_up) { 1669 cp->lstate = link_up; 1670 cp->link_transition = LINK_TRANSITION_LINK_UP; 1671 1672 if (cp->opened) { 1673 cas_set_link_modes(cp); 1674 netif_carrier_on(cp->dev); 1675 } 1676 } 1677 return 0; 1678 } 1679 1680 /* link not up. if the link was previously up, we restart the 1681 * whole process 1682 */ 1683 restart = 0; 1684 if (cp->lstate == link_up) { 1685 cp->lstate = link_down; 1686 cp->link_transition = LINK_TRANSITION_LINK_DOWN; 1687 1688 netif_carrier_off(cp->dev); 1689 if (cp->opened && netif_msg_link(cp)) 1690 printk(KERN_INFO "%s: Link down\n", 1691 cp->dev->name); 1692 restart = 1; 1693 1694 } else if (++cp->timer_ticks > 10) 1695 cas_mdio_link_not_up(cp); 1696 1697 return restart; 1698} 1699 1700static int cas_mif_interrupt(struct net_device *dev, struct cas *cp, 1701 u32 status) 1702{ 1703 u32 stat = readl(cp->regs + REG_MIF_STATUS); 1704 u16 bmsr; 1705 1706 /* check for a link change */ 1707 if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0) 1708 return 0; 1709 1710 bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat); 1711 return cas_mii_link_check(cp, bmsr); 1712} 1713 1714static int cas_pci_interrupt(struct net_device *dev, struct cas *cp, 1715 u32 status) 1716{ 1717 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS); 1718 1719 if (!stat) 1720 return 0; 1721 1722 printk(KERN_ERR "%s: PCI error [%04x:%04x] ", dev->name, stat, 1723 readl(cp->regs + REG_BIM_DIAG)); 1724 1725 /* cassini+ has this reserved */ 1726 if ((stat & PCI_ERR_BADACK) && 1727 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0)) 1728 printk("<No ACK64# during ABS64 cycle> "); 1729 1730 if (stat & PCI_ERR_DTRTO) 1731 printk("<Delayed transaction timeout> "); 1732 if (stat & PCI_ERR_OTHER) 1733 printk("<other> "); 1734 if (stat & PCI_ERR_BIM_DMA_WRITE) 1735 printk("<BIM DMA 0 write req> "); 1736 if (stat & PCI_ERR_BIM_DMA_READ) 1737 printk("<BIM DMA 0 read req> "); 1738 printk("\n"); 1739 1740 if (stat & PCI_ERR_OTHER) { 1741 u16 cfg; 1742 1743 /* Interrogate PCI config space for the 1744 * true cause. 1745 */ 1746 pci_read_config_word(cp->pdev, PCI_STATUS, &cfg); 1747 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n", 1748 dev->name, cfg); 1749 if (cfg & PCI_STATUS_PARITY) 1750 printk(KERN_ERR "%s: PCI parity error detected.\n", 1751 dev->name); 1752 if (cfg & PCI_STATUS_SIG_TARGET_ABORT) 1753 printk(KERN_ERR "%s: PCI target abort.\n", 1754 dev->name); 1755 if (cfg & PCI_STATUS_REC_TARGET_ABORT) 1756 printk(KERN_ERR "%s: PCI master acks target abort.\n", 1757 dev->name); 1758 if (cfg & PCI_STATUS_REC_MASTER_ABORT) 1759 printk(KERN_ERR "%s: PCI master abort.\n", dev->name); 1760 if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR) 1761 printk(KERN_ERR "%s: PCI system error SERR#.\n", 1762 dev->name); 1763 if (cfg & PCI_STATUS_DETECTED_PARITY) 1764 printk(KERN_ERR "%s: PCI parity error.\n", 1765 dev->name); 1766 1767 /* Write the error bits back to clear them. */ 1768 cfg &= (PCI_STATUS_PARITY | 1769 PCI_STATUS_SIG_TARGET_ABORT | 1770 PCI_STATUS_REC_TARGET_ABORT | 1771 PCI_STATUS_REC_MASTER_ABORT | 1772 PCI_STATUS_SIG_SYSTEM_ERROR | 1773 PCI_STATUS_DETECTED_PARITY); 1774 pci_write_config_word(cp->pdev, PCI_STATUS, cfg); 1775 } 1776 1777 /* For all PCI errors, we should reset the chip. */ 1778 return 1; 1779} 1780 1781/* All non-normal interrupt conditions get serviced here. 1782 * Returns non-zero if we should just exit the interrupt 1783 * handler right now (ie. if we reset the card which invalidates 1784 * all of the other original irq status bits). 1785 */ 1786static int cas_abnormal_irq(struct net_device *dev, struct cas *cp, 1787 u32 status) 1788{ 1789 if (status & INTR_RX_TAG_ERROR) { 1790 /* corrupt RX tag framing */ 1791 if (netif_msg_rx_err(cp)) 1792 printk(KERN_DEBUG "%s: corrupt rx tag framing\n", 1793 cp->dev->name); 1794 spin_lock(&cp->stat_lock[0]); 1795 cp->net_stats[0].rx_errors++; 1796 spin_unlock(&cp->stat_lock[0]); 1797 goto do_reset; 1798 } 1799 1800 if (status & INTR_RX_LEN_MISMATCH) { 1801 /* length mismatch. */ 1802 if (netif_msg_rx_err(cp)) 1803 printk(KERN_DEBUG "%s: length mismatch for rx frame\n", 1804 cp->dev->name); 1805 spin_lock(&cp->stat_lock[0]); 1806 cp->net_stats[0].rx_errors++; 1807 spin_unlock(&cp->stat_lock[0]); 1808 goto do_reset; 1809 } 1810 1811 if (status & INTR_PCS_STATUS) { 1812 if (cas_pcs_interrupt(dev, cp, status)) 1813 goto do_reset; 1814 } 1815 1816 if (status & INTR_TX_MAC_STATUS) { 1817 if (cas_txmac_interrupt(dev, cp, status)) 1818 goto do_reset; 1819 } 1820 1821 if (status & INTR_RX_MAC_STATUS) { 1822 if (cas_rxmac_interrupt(dev, cp, status)) 1823 goto do_reset; 1824 } 1825 1826 if (status & INTR_MAC_CTRL_STATUS) { 1827 if (cas_mac_interrupt(dev, cp, status)) 1828 goto do_reset; 1829 } 1830 1831 if (status & INTR_MIF_STATUS) { 1832 if (cas_mif_interrupt(dev, cp, status)) 1833 goto do_reset; 1834 } 1835 1836 if (status & INTR_PCI_ERROR_STATUS) { 1837 if (cas_pci_interrupt(dev, cp, status)) 1838 goto do_reset; 1839 } 1840 return 0; 1841 1842do_reset: 1843#if 1 1844 atomic_inc(&cp->reset_task_pending); 1845 atomic_inc(&cp->reset_task_pending_all); 1846 printk(KERN_ERR "%s:reset called in cas_abnormal_irq [0x%x]\n", 1847 dev->name, status); 1848 schedule_work(&cp->reset_task); 1849#else 1850 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); 1851 printk(KERN_ERR "reset called in cas_abnormal_irq\n"); 1852 schedule_work(&cp->reset_task); 1853#endif 1854 return 1; 1855} 1856 1857/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when 1858 * determining whether to do a netif_stop/wakeup 1859 */ 1860#define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1) 1861#define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK) 1862static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr, 1863 const int len) 1864{ 1865 unsigned long off = addr + len; 1866 1867 if (CAS_TABORT(cp) == 1) 1868 return 0; 1869 if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN) 1870 return 0; 1871 return TX_TARGET_ABORT_LEN; 1872} 1873 1874static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) 1875{ 1876 struct cas_tx_desc *txds; 1877 struct sk_buff **skbs; 1878 struct net_device *dev = cp->dev; 1879 int entry, count; 1880 1881 spin_lock(&cp->tx_lock[ring]); 1882 txds = cp->init_txds[ring]; 1883 skbs = cp->tx_skbs[ring]; 1884 entry = cp->tx_old[ring]; 1885 1886 count = TX_BUFF_COUNT(ring, entry, limit); 1887 while (entry != limit) { 1888 struct sk_buff *skb = skbs[entry]; 1889 dma_addr_t daddr; 1890 u32 dlen; 1891 int frag; 1892 1893 if (!skb) { 1894 /* this should never occur */ 1895 entry = TX_DESC_NEXT(ring, entry); 1896 continue; 1897 } 1898 1899 /* however, we might get only a partial skb release. */ 1900 count -= skb_shinfo(skb)->nr_frags + 1901 + cp->tx_tiny_use[ring][entry].nbufs + 1; 1902 if (count < 0) 1903 break; 1904 1905 if (netif_msg_tx_done(cp)) 1906 printk(KERN_DEBUG "%s: tx[%d] done, slot %d\n", 1907 cp->dev->name, ring, entry); 1908 1909 skbs[entry] = NULL; 1910 cp->tx_tiny_use[ring][entry].nbufs = 0; 1911 1912 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 1913 struct cas_tx_desc *txd = txds + entry; 1914 1915 daddr = le64_to_cpu(txd->buffer); 1916 dlen = CAS_VAL(TX_DESC_BUFLEN, 1917 le64_to_cpu(txd->control)); 1918 pci_unmap_page(cp->pdev, daddr, dlen, 1919 PCI_DMA_TODEVICE); 1920 entry = TX_DESC_NEXT(ring, entry); 1921 1922 /* tiny buffer may follow */ 1923 if (cp->tx_tiny_use[ring][entry].used) { 1924 cp->tx_tiny_use[ring][entry].used = 0; 1925 entry = TX_DESC_NEXT(ring, entry); 1926 } 1927 } 1928 1929 spin_lock(&cp->stat_lock[ring]); 1930 cp->net_stats[ring].tx_packets++; 1931 cp->net_stats[ring].tx_bytes += skb->len; 1932 spin_unlock(&cp->stat_lock[ring]); 1933 dev_kfree_skb_irq(skb); 1934 } 1935 cp->tx_old[ring] = entry; 1936 1937 /* this is wrong for multiple tx rings. the net device needs 1938 * multiple queues for this to do the right thing. we wait 1939 * for 2*packets to be available when using tiny buffers 1940 */ 1941 if (netif_queue_stopped(dev) && 1942 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))) 1943 netif_wake_queue(dev); 1944 spin_unlock(&cp->tx_lock[ring]); 1945} 1946 1947static void cas_tx(struct net_device *dev, struct cas *cp, 1948 u32 status) 1949{ 1950 int limit, ring; 1951#ifdef USE_TX_COMPWB 1952 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb); 1953#endif 1954 if (netif_msg_intr(cp)) 1955 printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %llx\n", 1956 cp->dev->name, status, (unsigned long long)compwb); 1957 /* process all the rings */ 1958 for (ring = 0; ring < N_TX_RINGS; ring++) { 1959#ifdef USE_TX_COMPWB 1960 /* use the completion writeback registers */ 1961 limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) | 1962 CAS_VAL(TX_COMPWB_LSB, compwb); 1963 compwb = TX_COMPWB_NEXT(compwb); 1964#else 1965 limit = readl(cp->regs + REG_TX_COMPN(ring)); 1966#endif 1967 if (cp->tx_old[ring] != limit) 1968 cas_tx_ringN(cp, ring, limit); 1969 } 1970} 1971 1972 1973static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, 1974 int entry, const u64 *words, 1975 struct sk_buff **skbref) 1976{ 1977 int dlen, hlen, len, i, alloclen; 1978 int off, swivel = RX_SWIVEL_OFF_VAL; 1979 struct cas_page *page; 1980 struct sk_buff *skb; 1981 void *addr, *crcaddr; 1982 char *p; 1983 1984 hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]); 1985 dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]); 1986 len = hlen + dlen; 1987 1988 if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT)) 1989 alloclen = len; 1990 else 1991 alloclen = max(hlen, RX_COPY_MIN); 1992 1993 skb = dev_alloc_skb(alloclen + swivel + cp->crc_size); 1994 if (skb == NULL) 1995 return -1; 1996 1997 *skbref = skb; 1998 skb->dev = cp->dev; 1999 skb_reserve(skb, swivel); 2000 2001 p = skb->data; 2002 addr = crcaddr = NULL; 2003 if (hlen) { /* always copy header pages */ 2004 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); 2005 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 2006 off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 + 2007 swivel; 2008 2009 i = hlen; 2010 if (!dlen) /* attach FCS */ 2011 i += cp->crc_size; 2012 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, 2013 PCI_DMA_FROMDEVICE); 2014 addr = cas_page_map(page->buffer); 2015 memcpy(p, addr + off, i); 2016 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, 2017 PCI_DMA_FROMDEVICE); 2018 cas_page_unmap(addr); 2019 RX_USED_ADD(page, 0x100); 2020 p += hlen; 2021 swivel = 0; 2022 } 2023 2024 2025 if (alloclen < (hlen + dlen)) { 2026 skb_frag_t *frag = skb_shinfo(skb)->frags; 2027 2028 /* normal or jumbo packets. we use frags */ 2029 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); 2030 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 2031 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; 2032 2033 hlen = min(cp->page_size - off, dlen); 2034 if (hlen < 0) { 2035 if (netif_msg_rx_err(cp)) { 2036 printk(KERN_DEBUG "%s: rx page overflow: " 2037 "%d\n", cp->dev->name, hlen); 2038 } 2039 dev_kfree_skb_irq(skb); 2040 return -1; 2041 } 2042 i = hlen; 2043 if (i == dlen) /* attach FCS */ 2044 i += cp->crc_size; 2045 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, 2046 PCI_DMA_FROMDEVICE); 2047 2048 /* make sure we always copy a header */ 2049 swivel = 0; 2050 if (p == (char *) skb->data) { /* not split */ 2051 addr = cas_page_map(page->buffer); 2052 memcpy(p, addr + off, RX_COPY_MIN); 2053 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, 2054 PCI_DMA_FROMDEVICE); 2055 cas_page_unmap(addr); 2056 off += RX_COPY_MIN; 2057 swivel = RX_COPY_MIN; 2058 RX_USED_ADD(page, cp->mtu_stride); 2059 } else { 2060 RX_USED_ADD(page, hlen); 2061 } 2062 skb_put(skb, alloclen); 2063 2064 skb_shinfo(skb)->nr_frags++; 2065 skb->data_len += hlen - swivel; 2066 skb->len += hlen - swivel; 2067 2068 get_page(page->buffer); 2069 cas_buffer_inc(page); 2070 frag->page = page->buffer; 2071 frag->page_offset = off; 2072 frag->size = hlen - swivel; 2073 2074 /* any more data? */ 2075 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { 2076 hlen = dlen; 2077 off = 0; 2078 2079 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); 2080 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 2081 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, 2082 hlen + cp->crc_size, 2083 PCI_DMA_FROMDEVICE); 2084 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, 2085 hlen + cp->crc_size, 2086 PCI_DMA_FROMDEVICE); 2087 2088 skb_shinfo(skb)->nr_frags++; 2089 skb->data_len += hlen; 2090 skb->len += hlen; 2091 frag++; 2092 2093 get_page(page->buffer); 2094 cas_buffer_inc(page); 2095 frag->page = page->buffer; 2096 frag->page_offset = 0; 2097 frag->size = hlen; 2098 RX_USED_ADD(page, hlen + cp->crc_size); 2099 } 2100 2101 if (cp->crc_size) { 2102 addr = cas_page_map(page->buffer); 2103 crcaddr = addr + off + hlen; 2104 } 2105 2106 } else { 2107 /* copying packet */ 2108 if (!dlen) 2109 goto end_copy_pkt; 2110 2111 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); 2112 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 2113 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; 2114 hlen = min(cp->page_size - off, dlen); 2115 if (hlen < 0) { 2116 if (netif_msg_rx_err(cp)) { 2117 printk(KERN_DEBUG "%s: rx page overflow: " 2118 "%d\n", cp->dev->name, hlen); 2119 } 2120 dev_kfree_skb_irq(skb); 2121 return -1; 2122 } 2123 i = hlen; 2124 if (i == dlen) /* attach FCS */ 2125 i += cp->crc_size; 2126 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, 2127 PCI_DMA_FROMDEVICE); 2128 addr = cas_page_map(page->buffer); 2129 memcpy(p, addr + off, i); 2130 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, 2131 PCI_DMA_FROMDEVICE); 2132 cas_page_unmap(addr); 2133 if (p == (char *) skb->data) /* not split */ 2134 RX_USED_ADD(page, cp->mtu_stride); 2135 else 2136 RX_USED_ADD(page, i); 2137 2138 /* any more data? */ 2139 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { 2140 p += hlen; 2141 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); 2142 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 2143 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, 2144 dlen + cp->crc_size, 2145 PCI_DMA_FROMDEVICE); 2146 addr = cas_page_map(page->buffer); 2147 memcpy(p, addr, dlen + cp->crc_size); 2148 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, 2149 dlen + cp->crc_size, 2150 PCI_DMA_FROMDEVICE); 2151 cas_page_unmap(addr); 2152 RX_USED_ADD(page, dlen + cp->crc_size); 2153 } 2154end_copy_pkt: 2155 if (cp->crc_size) { 2156 addr = NULL; 2157 crcaddr = skb->data + alloclen; 2158 } 2159 skb_put(skb, alloclen); 2160 } 2161 2162 i = CAS_VAL(RX_COMP4_TCP_CSUM, words[3]); 2163 if (cp->crc_size) { 2164 /* checksum includes FCS. strip it out. */ 2165 i = csum_fold(csum_partial(crcaddr, cp->crc_size, i)); 2166 if (addr) 2167 cas_page_unmap(addr); 2168 } 2169 skb->csum = ntohs(i ^ 0xffff); 2170 skb->ip_summed = CHECKSUM_COMPLETE; 2171 skb->protocol = eth_type_trans(skb, cp->dev); 2172 return len; 2173} 2174 2175 2176/* we can handle up to 64 rx flows at a time. we do the same thing 2177 * as nonreassm except that we batch up the buffers. 2178 * NOTE: we currently just treat each flow as a bunch of packets that 2179 * we pass up. a better way would be to coalesce the packets 2180 * into a jumbo packet. to do that, we need to do the following: 2181 * 1) the first packet will have a clean split between header and 2182 * data. save both. 2183 * 2) each time the next flow packet comes in, extend the 2184 * data length and merge the checksums. 2185 * 3) on flow release, fix up the header. 2186 * 4) make sure the higher layer doesn't care. 2187 * because packets get coalesced, we shouldn't run into fragment count 2188 * issues. 2189 */ 2190static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words, 2191 struct sk_buff *skb) 2192{ 2193 int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1); 2194 struct sk_buff_head *flow = &cp->rx_flows[flowid]; 2195 2196 /* this is protected at a higher layer, so no need to 2197 * do any additional locking here. stick the buffer 2198 * at the end. 2199 */ 2200 __skb_insert(skb, flow->prev, (struct sk_buff *) flow, flow); 2201 if (words[0] & RX_COMP1_RELEASE_FLOW) { 2202 while ((skb = __skb_dequeue(flow))) { 2203 cas_skb_release(skb); 2204 } 2205 } 2206} 2207 2208/* put rx descriptor back on ring. if a buffer is in use by a higher 2209 * layer, this will need to put in a replacement. 2210 */ 2211static void cas_post_page(struct cas *cp, const int ring, const int index) 2212{ 2213 cas_page_t *new; 2214 int entry; 2215 2216 entry = cp->rx_old[ring]; 2217 2218 new = cas_page_swap(cp, ring, index); 2219 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr); 2220 cp->init_rxds[ring][entry].index = 2221 cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) | 2222 CAS_BASE(RX_INDEX_RING, ring)); 2223 2224 entry = RX_DESC_ENTRY(ring, entry + 1); 2225 cp->rx_old[ring] = entry; 2226 2227 if (entry % 4) 2228 return; 2229 2230 if (ring == 0) 2231 writel(entry, cp->regs + REG_RX_KICK); 2232 else if ((N_RX_DESC_RINGS > 1) && 2233 (cp->cas_flags & CAS_FLAG_REG_PLUS)) 2234 writel(entry, cp->regs + REG_PLUS_RX_KICK1); 2235} 2236 2237 2238/* only when things are bad */ 2239static int cas_post_rxds_ringN(struct cas *cp, int ring, int num) 2240{ 2241 unsigned int entry, last, count, released; 2242 int cluster; 2243 cas_page_t **page = cp->rx_pages[ring]; 2244 2245 entry = cp->rx_old[ring]; 2246 2247 if (netif_msg_intr(cp)) 2248 printk(KERN_DEBUG "%s: rxd[%d] interrupt, done: %d\n", 2249 cp->dev->name, ring, entry); 2250 2251 cluster = -1; 2252 count = entry & 0x3; 2253 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4); 2254 released = 0; 2255 while (entry != last) { 2256 /* make a new buffer if it's still in use */ 2257 if (cas_buffer_count(page[entry]) > 1) { 2258 cas_page_t *new = cas_page_dequeue(cp); 2259 if (!new) { 2260 /* let the timer know that we need to 2261 * do this again 2262 */ 2263 cp->cas_flags |= CAS_FLAG_RXD_POST(ring); 2264 if (!timer_pending(&cp->link_timer)) 2265 mod_timer(&cp->link_timer, jiffies + 2266 CAS_LINK_FAST_TIMEOUT); 2267 cp->rx_old[ring] = entry; 2268 cp->rx_last[ring] = num ? num - released : 0; 2269 return -ENOMEM; 2270 } 2271 spin_lock(&cp->rx_inuse_lock); 2272 list_add(&page[entry]->list, &cp->rx_inuse_list); 2273 spin_unlock(&cp->rx_inuse_lock); 2274 cp->init_rxds[ring][entry].buffer = 2275 cpu_to_le64(new->dma_addr); 2276 page[entry] = new; 2277 2278 } 2279 2280 if (++count == 4) { 2281 cluster = entry; 2282 count = 0; 2283 } 2284 released++; 2285 entry = RX_DESC_ENTRY(ring, entry + 1); 2286 } 2287 cp->rx_old[ring] = entry; 2288 2289 if (cluster < 0) 2290 return 0; 2291 2292 if (ring == 0) 2293 writel(cluster, cp->regs + REG_RX_KICK); 2294 else if ((N_RX_DESC_RINGS > 1) && 2295 (cp->cas_flags & CAS_FLAG_REG_PLUS)) 2296 writel(cluster, cp->regs + REG_PLUS_RX_KICK1); 2297 return 0; 2298} 2299 2300 2301/* process a completion ring. packets are set up in three basic ways: 2302 * small packets: should be copied header + data in single buffer. 2303 * large packets: header and data in a single buffer. 2304 * split packets: header in a separate buffer from data. 2305 * data may be in multiple pages. data may be > 256 2306 * bytes but in a single page. 2307 * 2308 * NOTE: RX page posting is done in this routine as well. while there's 2309 * the capability of using multiple RX completion rings, it isn't 2310 * really worthwhile due to the fact that the page posting will 2311 * force serialization on the single descriptor ring. 2312 */ 2313static int cas_rx_ringN(struct cas *cp, int ring, int budget) 2314{ 2315 struct cas_rx_comp *rxcs = cp->init_rxcs[ring]; 2316 int entry, drops; 2317 int npackets = 0; 2318 2319 if (netif_msg_intr(cp)) 2320 printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n", 2321 cp->dev->name, ring, 2322 readl(cp->regs + REG_RX_COMP_HEAD), 2323 cp->rx_new[ring]); 2324 2325 entry = cp->rx_new[ring]; 2326 drops = 0; 2327 while (1) { 2328 struct cas_rx_comp *rxc = rxcs + entry; 2329 struct sk_buff *skb; 2330 int type, len; 2331 u64 words[4]; 2332 int i, dring; 2333 2334 words[0] = le64_to_cpu(rxc->word1); 2335 words[1] = le64_to_cpu(rxc->word2); 2336 words[2] = le64_to_cpu(rxc->word3); 2337 words[3] = le64_to_cpu(rxc->word4); 2338 2339 /* don't touch if still owned by hw */ 2340 type = CAS_VAL(RX_COMP1_TYPE, words[0]); 2341 if (type == 0) 2342 break; 2343 2344 /* hw hasn't cleared the zero bit yet */ 2345 if (words[3] & RX_COMP4_ZERO) { 2346 break; 2347 } 2348 2349 /* get info on the packet */ 2350 if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) { 2351 spin_lock(&cp->stat_lock[ring]); 2352 cp->net_stats[ring].rx_errors++; 2353 if (words[3] & RX_COMP4_LEN_MISMATCH) 2354 cp->net_stats[ring].rx_length_errors++; 2355 if (words[3] & RX_COMP4_BAD) 2356 cp->net_stats[ring].rx_crc_errors++; 2357 spin_unlock(&cp->stat_lock[ring]); 2358 2359 /* We'll just return it to Cassini. */ 2360 drop_it: 2361 spin_lock(&cp->stat_lock[ring]); 2362 ++cp->net_stats[ring].rx_dropped; 2363 spin_unlock(&cp->stat_lock[ring]); 2364 goto next; 2365 } 2366 2367 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb); 2368 if (len < 0) { 2369 ++drops; 2370 goto drop_it; 2371 } 2372 2373 /* see if it's a flow re-assembly or not. the driver 2374 * itself handles release back up. 2375 */ 2376 if (RX_DONT_BATCH || (type == 0x2)) { 2377 /* non-reassm: these always get released */ 2378 cas_skb_release(skb); 2379 } else { 2380 cas_rx_flow_pkt(cp, words, skb); 2381 } 2382 2383 spin_lock(&cp->stat_lock[ring]); 2384 cp->net_stats[ring].rx_packets++; 2385 cp->net_stats[ring].rx_bytes += len; 2386 spin_unlock(&cp->stat_lock[ring]); 2387 cp->dev->last_rx = jiffies; 2388 2389 next: 2390 npackets++; 2391 2392 /* should it be released? */ 2393 if (words[0] & RX_COMP1_RELEASE_HDR) { 2394 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); 2395 dring = CAS_VAL(RX_INDEX_RING, i); 2396 i = CAS_VAL(RX_INDEX_NUM, i); 2397 cas_post_page(cp, dring, i); 2398 } 2399 2400 if (words[0] & RX_COMP1_RELEASE_DATA) { 2401 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); 2402 dring = CAS_VAL(RX_INDEX_RING, i); 2403 i = CAS_VAL(RX_INDEX_NUM, i); 2404 cas_post_page(cp, dring, i); 2405 } 2406 2407 if (words[0] & RX_COMP1_RELEASE_NEXT) { 2408 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); 2409 dring = CAS_VAL(RX_INDEX_RING, i); 2410 i = CAS_VAL(RX_INDEX_NUM, i); 2411 cas_post_page(cp, dring, i); 2412 } 2413 2414 /* skip to the next entry */ 2415 entry = RX_COMP_ENTRY(ring, entry + 1 + 2416 CAS_VAL(RX_COMP1_SKIP, words[0])); 2417#ifdef USE_NAPI 2418 if (budget && (npackets >= budget)) 2419 break; 2420#endif 2421 } 2422 cp->rx_new[ring] = entry; 2423 2424 if (drops) 2425 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", 2426 cp->dev->name); 2427 return npackets; 2428} 2429 2430 2431/* put completion entries back on the ring */ 2432static void cas_post_rxcs_ringN(struct net_device *dev, 2433 struct cas *cp, int ring) 2434{ 2435 struct cas_rx_comp *rxc = cp->init_rxcs[ring]; 2436 int last, entry; 2437 2438 last = cp->rx_cur[ring]; 2439 entry = cp->rx_new[ring]; 2440 if (netif_msg_intr(cp)) 2441 printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n", 2442 dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD), 2443 entry); 2444 2445 /* zero and re-mark descriptors */ 2446 while (last != entry) { 2447 cas_rxc_init(rxc + last); 2448 last = RX_COMP_ENTRY(ring, last + 1); 2449 } 2450 cp->rx_cur[ring] = last; 2451 2452 if (ring == 0) 2453 writel(last, cp->regs + REG_RX_COMP_TAIL); 2454 else if (cp->cas_flags & CAS_FLAG_REG_PLUS) 2455 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring)); 2456} 2457 2458 2459 2460/* cassini can use all four PCI interrupts for the completion ring. 2461 * rings 3 and 4 are identical 2462 */ 2463#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD) 2464static inline void cas_handle_irqN(struct net_device *dev, 2465 struct cas *cp, const u32 status, 2466 const int ring) 2467{ 2468 if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT)) 2469 cas_post_rxcs_ringN(dev, cp, ring); 2470} 2471 2472static irqreturn_t cas_interruptN(int irq, void *dev_id) 2473{ 2474 struct net_device *dev = dev_id; 2475 struct cas *cp = netdev_priv(dev); 2476 unsigned long flags; 2477 int ring; 2478 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); 2479 2480 /* check for shared irq */ 2481 if (status == 0) 2482 return IRQ_NONE; 2483 2484 ring = (irq == cp->pci_irq_INTC) ? 2 : 3; 2485 spin_lock_irqsave(&cp->lock, flags); 2486 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ 2487#ifdef USE_NAPI 2488 cas_mask_intr(cp); 2489 netif_rx_schedule(dev); 2490#else 2491 cas_rx_ringN(cp, ring, 0); 2492#endif 2493 status &= ~INTR_RX_DONE_ALT; 2494 } 2495 2496 if (status) 2497 cas_handle_irqN(dev, cp, status, ring); 2498 spin_unlock_irqrestore(&cp->lock, flags); 2499 return IRQ_HANDLED; 2500} 2501#endif 2502 2503#ifdef USE_PCI_INTB 2504/* everything but rx packets */ 2505static inline void cas_handle_irq1(struct cas *cp, const u32 status) 2506{ 2507 if (status & INTR_RX_BUF_UNAVAIL_1) { 2508 /* Frame arrived, no free RX buffers available. 2509 * NOTE: we can get this on a link transition. */ 2510 cas_post_rxds_ringN(cp, 1, 0); 2511 spin_lock(&cp->stat_lock[1]); 2512 cp->net_stats[1].rx_dropped++; 2513 spin_unlock(&cp->stat_lock[1]); 2514 } 2515 2516 if (status & INTR_RX_BUF_AE_1) 2517 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) - 2518 RX_AE_FREEN_VAL(1)); 2519 2520 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) 2521 cas_post_rxcs_ringN(cp, 1); 2522} 2523 2524/* ring 2 handles a few more events than 3 and 4 */ 2525static irqreturn_t cas_interrupt1(int irq, void *dev_id) 2526{ 2527 struct net_device *dev = dev_id; 2528 struct cas *cp = netdev_priv(dev); 2529 unsigned long flags; 2530 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); 2531 2532 /* check for shared interrupt */ 2533 if (status == 0) 2534 return IRQ_NONE; 2535 2536 spin_lock_irqsave(&cp->lock, flags); 2537 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ 2538#ifdef USE_NAPI 2539 cas_mask_intr(cp); 2540 netif_rx_schedule(dev); 2541#else 2542 cas_rx_ringN(cp, 1, 0); 2543#endif 2544 status &= ~INTR_RX_DONE_ALT; 2545 } 2546 if (status) 2547 cas_handle_irq1(cp, status); 2548 spin_unlock_irqrestore(&cp->lock, flags); 2549 return IRQ_HANDLED; 2550} 2551#endif 2552 2553static inline void cas_handle_irq(struct net_device *dev, 2554 struct cas *cp, const u32 status) 2555{ 2556 /* housekeeping interrupts */ 2557 if (status & INTR_ERROR_MASK) 2558 cas_abnormal_irq(dev, cp, status); 2559 2560 if (status & INTR_RX_BUF_UNAVAIL) { 2561 /* Frame arrived, no free RX buffers available. 2562 * NOTE: we can get this on a link transition. 2563 */ 2564 cas_post_rxds_ringN(cp, 0, 0); 2565 spin_lock(&cp->stat_lock[0]); 2566 cp->net_stats[0].rx_dropped++; 2567 spin_unlock(&cp->stat_lock[0]); 2568 } else if (status & INTR_RX_BUF_AE) { 2569 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) - 2570 RX_AE_FREEN_VAL(0)); 2571 } 2572 2573 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) 2574 cas_post_rxcs_ringN(dev, cp, 0); 2575} 2576 2577static irqreturn_t cas_interrupt(int irq, void *dev_id) 2578{ 2579 struct net_device *dev = dev_id; 2580 struct cas *cp = netdev_priv(dev); 2581 unsigned long flags; 2582 u32 status = readl(cp->regs + REG_INTR_STATUS); 2583 2584 if (status == 0) 2585 return IRQ_NONE; 2586 2587 spin_lock_irqsave(&cp->lock, flags); 2588 if (status & (INTR_TX_ALL | INTR_TX_INTME)) { 2589 cas_tx(dev, cp, status); 2590 status &= ~(INTR_TX_ALL | INTR_TX_INTME); 2591 } 2592 2593 if (status & INTR_RX_DONE) { 2594#ifdef USE_NAPI 2595 cas_mask_intr(cp); 2596 netif_rx_schedule(dev); 2597#else 2598 cas_rx_ringN(cp, 0, 0); 2599#endif 2600 status &= ~INTR_RX_DONE; 2601 } 2602 2603 if (status) 2604 cas_handle_irq(dev, cp, status); 2605 spin_unlock_irqrestore(&cp->lock, flags); 2606 return IRQ_HANDLED; 2607} 2608 2609 2610#ifdef USE_NAPI 2611static int cas_poll(struct net_device *dev, int *budget) 2612{ 2613 struct cas *cp = netdev_priv(dev); 2614 int i, enable_intr, todo, credits; 2615 u32 status = readl(cp->regs + REG_INTR_STATUS); 2616 unsigned long flags; 2617 2618 spin_lock_irqsave(&cp->lock, flags); 2619 cas_tx(dev, cp, status); 2620 spin_unlock_irqrestore(&cp->lock, flags); 2621 2622 /* NAPI rx packets. we spread the credits across all of the 2623 * rxc rings 2624 */ 2625 todo = min(*budget, dev->quota); 2626 2627 /* to make sure we're fair with the work we loop through each 2628 * ring N_RX_COMP_RING times with a request of 2629 * todo / N_RX_COMP_RINGS 2630 */ 2631 enable_intr = 1; 2632 credits = 0; 2633 for (i = 0; i < N_RX_COMP_RINGS; i++) { 2634 int j; 2635 for (j = 0; j < N_RX_COMP_RINGS; j++) { 2636 credits += cas_rx_ringN(cp, j, todo / N_RX_COMP_RINGS); 2637 if (credits >= todo) { 2638 enable_intr = 0; 2639 goto rx_comp; 2640 } 2641 } 2642 } 2643 2644rx_comp: 2645 *budget -= credits; 2646 dev->quota -= credits; 2647 2648 /* final rx completion */ 2649 spin_lock_irqsave(&cp->lock, flags); 2650 if (status) 2651 cas_handle_irq(dev, cp, status); 2652 2653#ifdef USE_PCI_INTB 2654 if (N_RX_COMP_RINGS > 1) { 2655 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); 2656 if (status) 2657 cas_handle_irq1(dev, cp, status); 2658 } 2659#endif 2660 2661#ifdef USE_PCI_INTC 2662 if (N_RX_COMP_RINGS > 2) { 2663 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2)); 2664 if (status) 2665 cas_handle_irqN(dev, cp, status, 2); 2666 } 2667#endif 2668 2669#ifdef USE_PCI_INTD 2670 if (N_RX_COMP_RINGS > 3) { 2671 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3)); 2672 if (status) 2673 cas_handle_irqN(dev, cp, status, 3); 2674 } 2675#endif 2676 spin_unlock_irqrestore(&cp->lock, flags); 2677 if (enable_intr) { 2678 netif_rx_complete(dev); 2679 cas_unmask_intr(cp); 2680 return 0; 2681 } 2682 return 1; 2683} 2684#endif 2685 2686#ifdef CONFIG_NET_POLL_CONTROLLER 2687static void cas_netpoll(struct net_device *dev) 2688{ 2689 struct cas *cp = netdev_priv(dev); 2690 2691 cas_disable_irq(cp, 0); 2692 cas_interrupt(cp->pdev->irq, dev); 2693 cas_enable_irq(cp, 0); 2694 2695#ifdef USE_PCI_INTB 2696 if (N_RX_COMP_RINGS > 1) { 2697 /* cas_interrupt1(); */ 2698 } 2699#endif 2700#ifdef USE_PCI_INTC 2701 if (N_RX_COMP_RINGS > 2) { 2702 /* cas_interruptN(); */ 2703 } 2704#endif 2705#ifdef USE_PCI_INTD 2706 if (N_RX_COMP_RINGS > 3) { 2707 /* cas_interruptN(); */ 2708 } 2709#endif 2710} 2711#endif 2712 2713static void cas_tx_timeout(struct net_device *dev) 2714{ 2715 struct cas *cp = netdev_priv(dev); 2716 2717 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); 2718 if (!cp->hw_running) { 2719 printk("%s: hrm.. hw not running!\n", dev->name); 2720 return; 2721 } 2722 2723 printk(KERN_ERR "%s: MIF_STATE[%08x]\n", 2724 dev->name, readl(cp->regs + REG_MIF_STATE_MACHINE)); 2725 2726 printk(KERN_ERR "%s: MAC_STATE[%08x]\n", 2727 dev->name, readl(cp->regs + REG_MAC_STATE_MACHINE)); 2728 2729 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x] " 2730 "FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n", 2731 dev->name, 2732 readl(cp->regs + REG_TX_CFG), 2733 readl(cp->regs + REG_MAC_TX_STATUS), 2734 readl(cp->regs + REG_MAC_TX_CFG), 2735 readl(cp->regs + REG_TX_FIFO_PKT_CNT), 2736 readl(cp->regs + REG_TX_FIFO_WRITE_PTR), 2737 readl(cp->regs + REG_TX_FIFO_READ_PTR), 2738 readl(cp->regs + REG_TX_SM_1), 2739 readl(cp->regs + REG_TX_SM_2)); 2740 2741 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n", 2742 dev->name, 2743 readl(cp->regs + REG_RX_CFG), 2744 readl(cp->regs + REG_MAC_RX_STATUS), 2745 readl(cp->regs + REG_MAC_RX_CFG)); 2746 2747 printk(KERN_ERR "%s: HP_STATE[%08x:%08x:%08x:%08x]\n", 2748 dev->name, 2749 readl(cp->regs + REG_HP_STATE_MACHINE), 2750 readl(cp->regs + REG_HP_STATUS0), 2751 readl(cp->regs + REG_HP_STATUS1), 2752 readl(cp->regs + REG_HP_STATUS2)); 2753 2754#if 1 2755 atomic_inc(&cp->reset_task_pending); 2756 atomic_inc(&cp->reset_task_pending_all); 2757 schedule_work(&cp->reset_task); 2758#else 2759 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); 2760 schedule_work(&cp->reset_task); 2761#endif 2762} 2763 2764static inline int cas_intme(int ring, int entry) 2765{ 2766 /* Algorithm: IRQ every 1/2 of descriptors. */ 2767 if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1))) 2768 return 1; 2769 return 0; 2770} 2771 2772 2773static void cas_write_txd(struct cas *cp, int ring, int entry, 2774 dma_addr_t mapping, int len, u64 ctrl, int last) 2775{ 2776 struct cas_tx_desc *txd = cp->init_txds[ring] + entry; 2777 2778 ctrl |= CAS_BASE(TX_DESC_BUFLEN, len); 2779 if (cas_intme(ring, entry)) 2780 ctrl |= TX_DESC_INTME; 2781 if (last) 2782 ctrl |= TX_DESC_EOF; 2783 txd->control = cpu_to_le64(ctrl); 2784 txd->buffer = cpu_to_le64(mapping); 2785} 2786 2787static inline void *tx_tiny_buf(struct cas *cp, const int ring, 2788 const int entry) 2789{ 2790 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry; 2791} 2792 2793static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring, 2794 const int entry, const int tentry) 2795{ 2796 cp->tx_tiny_use[ring][tentry].nbufs++; 2797 cp->tx_tiny_use[ring][entry].used = 1; 2798 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry; 2799} 2800 2801static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, 2802 struct sk_buff *skb) 2803{ 2804 struct net_device *dev = cp->dev; 2805 int entry, nr_frags, frag, tabort, tentry; 2806 dma_addr_t mapping; 2807 unsigned long flags; 2808 u64 ctrl; 2809 u32 len; 2810 2811 spin_lock_irqsave(&cp->tx_lock[ring], flags); 2812 2813 /* This is a hard error, log it. */ 2814 if (TX_BUFFS_AVAIL(cp, ring) <= 2815 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) { 2816 netif_stop_queue(dev); 2817 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); 2818 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " 2819 "queue awake!\n", dev->name); 2820 return 1; 2821 } 2822 2823 ctrl = 0; 2824 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2825 u64 csum_start_off, csum_stuff_off; 2826 2827 csum_start_off = (u64) (skb->h.raw - skb->data); 2828 csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data); 2829 2830 ctrl = TX_DESC_CSUM_EN | 2831 CAS_BASE(TX_DESC_CSUM_START, csum_start_off) | 2832 CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off); 2833 } 2834 2835 entry = cp->tx_new[ring]; 2836 cp->tx_skbs[ring][entry] = skb; 2837 2838 nr_frags = skb_shinfo(skb)->nr_frags; 2839 len = skb_headlen(skb); 2840 mapping = pci_map_page(cp->pdev, virt_to_page(skb->data), 2841 offset_in_page(skb->data), len, 2842 PCI_DMA_TODEVICE); 2843 2844 tentry = entry; 2845 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len); 2846 if (unlikely(tabort)) { 2847 /* NOTE: len is always > tabort */ 2848 cas_write_txd(cp, ring, entry, mapping, len - tabort, 2849 ctrl | TX_DESC_SOF, 0); 2850 entry = TX_DESC_NEXT(ring, entry); 2851 2852 memcpy(tx_tiny_buf(cp, ring, entry), skb->data + 2853 len - tabort, tabort); 2854 mapping = tx_tiny_map(cp, ring, entry, tentry); 2855 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, 2856 (nr_frags == 0)); 2857 } else { 2858 cas_write_txd(cp, ring, entry, mapping, len, ctrl | 2859 TX_DESC_SOF, (nr_frags == 0)); 2860 } 2861 entry = TX_DESC_NEXT(ring, entry); 2862 2863 for (frag = 0; frag < nr_frags; frag++) { 2864 skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; 2865 2866 len = fragp->size; 2867 mapping = pci_map_page(cp->pdev, fragp->page, 2868 fragp->page_offset, len, 2869 PCI_DMA_TODEVICE); 2870 2871 tabort = cas_calc_tabort(cp, fragp->page_offset, len); 2872 if (unlikely(tabort)) { 2873 void *addr; 2874 2875 /* NOTE: len is always > tabort */ 2876 cas_write_txd(cp, ring, entry, mapping, len - tabort, 2877 ctrl, 0); 2878 entry = TX_DESC_NEXT(ring, entry); 2879 2880 addr = cas_page_map(fragp->page); 2881 memcpy(tx_tiny_buf(cp, ring, entry), 2882 addr + fragp->page_offset + len - tabort, 2883 tabort); 2884 cas_page_unmap(addr); 2885 mapping = tx_tiny_map(cp, ring, entry, tentry); 2886 len = tabort; 2887 } 2888 2889 cas_write_txd(cp, ring, entry, mapping, len, ctrl, 2890 (frag + 1 == nr_frags)); 2891 entry = TX_DESC_NEXT(ring, entry); 2892 } 2893 2894 cp->tx_new[ring] = entry; 2895 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)) 2896 netif_stop_queue(dev); 2897 2898 if (netif_msg_tx_queued(cp)) 2899 printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, " 2900 "avail %d\n", 2901 dev->name, ring, entry, skb->len, 2902 TX_BUFFS_AVAIL(cp, ring)); 2903 writel(entry, cp->regs + REG_TX_KICKN(ring)); 2904 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); 2905 return 0; 2906} 2907 2908static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev) 2909{ 2910 struct cas *cp = netdev_priv(dev); 2911 2912 /* this is only used as a load-balancing hint, so it doesn't 2913 * need to be SMP safe 2914 */ 2915 static int ring; 2916 2917 if (skb_padto(skb, cp->min_frame_size)) 2918 return 0; 2919 2920 /* XXX: we need some higher-level QoS hooks to steer packets to 2921 * individual queues. 2922 */ 2923 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb)) 2924 return 1; 2925 dev->trans_start = jiffies; 2926 return 0; 2927} 2928 2929static void cas_init_tx_dma(struct cas *cp) 2930{ 2931 u64 desc_dma = cp->block_dvma; 2932 unsigned long off; 2933 u32 val; 2934 int i; 2935 2936 /* set up tx completion writeback registers. must be 8-byte aligned */ 2937#ifdef USE_TX_COMPWB 2938 off = offsetof(struct cas_init_block, tx_compwb); 2939 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI); 2940 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW); 2941#endif 2942 2943 /* enable completion writebacks, enable paced mode, 2944 * disable read pipe, and disable pre-interrupt compwbs 2945 */ 2946 val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 | 2947 TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 | 2948 TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE | 2949 TX_CFG_INTR_COMPWB_DIS; 2950 2951 /* write out tx ring info and tx desc bases */ 2952 for (i = 0; i < MAX_TX_RINGS; i++) { 2953 off = (unsigned long) cp->init_txds[i] - 2954 (unsigned long) cp->init_block; 2955 2956 val |= CAS_TX_RINGN_BASE(i); 2957 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i)); 2958 writel((desc_dma + off) & 0xffffffff, cp->regs + 2959 REG_TX_DBN_LOW(i)); 2960 /* don't zero out the kick register here as the system 2961 * will wedge 2962 */ 2963 } 2964 writel(val, cp->regs + REG_TX_CFG); 2965 2966 /* program max burst sizes. these numbers should be different 2967 * if doing QoS. 2968 */ 2969#ifdef USE_QOS 2970 writel(0x800, cp->regs + REG_TX_MAXBURST_0); 2971 writel(0x1600, cp->regs + REG_TX_MAXBURST_1); 2972 writel(0x2400, cp->regs + REG_TX_MAXBURST_2); 2973 writel(0x4800, cp->regs + REG_TX_MAXBURST_3); 2974#else 2975 writel(0x800, cp->regs + REG_TX_MAXBURST_0); 2976 writel(0x800, cp->regs + REG_TX_MAXBURST_1); 2977 writel(0x800, cp->regs + REG_TX_MAXBURST_2); 2978 writel(0x800, cp->regs + REG_TX_MAXBURST_3); 2979#endif 2980} 2981 2982/* Must be invoked under cp->lock. */ 2983static inline void cas_init_dma(struct cas *cp) 2984{ 2985 cas_init_tx_dma(cp); 2986 cas_init_rx_dma(cp); 2987} 2988 2989/* Must be invoked under cp->lock. */ 2990static u32 cas_setup_multicast(struct cas *cp) 2991{ 2992 u32 rxcfg = 0; 2993 int i; 2994 2995 if (cp->dev->flags & IFF_PROMISC) { 2996 rxcfg |= MAC_RX_CFG_PROMISC_EN; 2997 2998 } else if (cp->dev->flags & IFF_ALLMULTI) { 2999 for (i=0; i < 16; i++) 3000 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i)); 3001 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; 3002 3003 } else { 3004 u16 hash_table[16]; 3005 u32 crc; 3006 struct dev_mc_list *dmi = cp->dev->mc_list; 3007 int i; 3008 3009 /* use the alternate mac address registers for the 3010 * first 15 multicast addresses 3011 */ 3012 for (i = 1; i <= CAS_MC_EXACT_MATCH_SIZE; i++) { 3013 if (!dmi) { 3014 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 0)); 3015 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 1)); 3016 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 2)); 3017 continue; 3018 } 3019 writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5], 3020 cp->regs + REG_MAC_ADDRN(i*3 + 0)); 3021 writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3], 3022 cp->regs + REG_MAC_ADDRN(i*3 + 1)); 3023 writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1], 3024 cp->regs + REG_MAC_ADDRN(i*3 + 2)); 3025 dmi = dmi->next; 3026 } 3027 3028 /* use hw hash table for the next series of 3029 * multicast addresses 3030 */ 3031 memset(hash_table, 0, sizeof(hash_table)); 3032 while (dmi) { 3033 crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr); 3034 crc >>= 24; 3035 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); 3036 dmi = dmi->next; 3037 } 3038 for (i=0; i < 16; i++) 3039 writel(hash_table[i], cp->regs + 3040 REG_MAC_HASH_TABLEN(i)); 3041 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; 3042 } 3043 3044 return rxcfg; 3045} 3046 3047/* must be invoked under cp->stat_lock[N_TX_RINGS] */ 3048static void cas_clear_mac_err(struct cas *cp) 3049{ 3050 writel(0, cp->regs + REG_MAC_COLL_NORMAL); 3051 writel(0, cp->regs + REG_MAC_COLL_FIRST); 3052 writel(0, cp->regs + REG_MAC_COLL_EXCESS); 3053 writel(0, cp->regs + REG_MAC_COLL_LATE); 3054 writel(0, cp->regs + REG_MAC_TIMER_DEFER); 3055 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK); 3056 writel(0, cp->regs + REG_MAC_RECV_FRAME); 3057 writel(0, cp->regs + REG_MAC_LEN_ERR); 3058 writel(0, cp->regs + REG_MAC_ALIGN_ERR); 3059 writel(0, cp->regs + REG_MAC_FCS_ERR); 3060 writel(0, cp->regs + REG_MAC_RX_CODE_ERR); 3061} 3062 3063 3064static void cas_mac_reset(struct cas *cp) 3065{ 3066 int i; 3067 3068 /* do both TX and RX reset */ 3069 writel(0x1, cp->regs + REG_MAC_TX_RESET); 3070 writel(0x1, cp->regs + REG_MAC_RX_RESET); 3071 3072 /* wait for TX */ 3073 i = STOP_TRIES; 3074 while (i-- > 0) { 3075 if (readl(cp->regs + REG_MAC_TX_RESET) == 0) 3076 break; 3077 udelay(10); 3078 } 3079 3080 /* wait for RX */ 3081 i = STOP_TRIES; 3082 while (i-- > 0) { 3083 if (readl(cp->regs + REG_MAC_RX_RESET) == 0) 3084 break; 3085 udelay(10); 3086 } 3087 3088 if (readl(cp->regs + REG_MAC_TX_RESET) | 3089 readl(cp->regs + REG_MAC_RX_RESET)) 3090 printk(KERN_ERR "%s: mac tx[%d]/rx[%d] reset failed [%08x]\n", 3091 cp->dev->name, readl(cp->regs + REG_MAC_TX_RESET), 3092 readl(cp->regs + REG_MAC_RX_RESET), 3093 readl(cp->regs + REG_MAC_STATE_MACHINE)); 3094} 3095 3096 3097/* Must be invoked under cp->lock. */ 3098static void cas_init_mac(struct cas *cp) 3099{ 3100 unsigned char *e = &cp->dev->dev_addr[0]; 3101 int i; 3102#ifdef CONFIG_CASSINI_MULTICAST_REG_WRITE 3103 u32 rxcfg; 3104#endif 3105 cas_mac_reset(cp); 3106 3107 /* setup core arbitration weight register */ 3108 writel(CAWR_RR_DIS, cp->regs + REG_CAWR); 3109 3110 /* XXX Use pci_dma_burst_advice() */ 3111#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) 3112 /* set the infinite burst register for chips that don't have 3113 * pci issues. 3114 */ 3115 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0) 3116 writel(INF_BURST_EN, cp->regs + REG_INF_BURST); 3117#endif 3118 3119 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE); 3120 3121 writel(0x00, cp->regs + REG_MAC_IPG0); 3122 writel(0x08, cp->regs + REG_MAC_IPG1); 3123 writel(0x04, cp->regs + REG_MAC_IPG2); 3124 3125 /* change later for 802.3z */ 3126 writel(0x40, cp->regs + REG_MAC_SLOT_TIME); 3127 3128 /* min frame + FCS */ 3129 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN); 3130 3131 /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we 3132 * specify the maximum frame size to prevent RX tag errors on 3133 * oversized frames. 3134 */ 3135 writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) | 3136 CAS_BASE(MAC_FRAMESIZE_MAX_FRAME, 3137 (CAS_MAX_MTU + ETH_HLEN + 4 + 4)), 3138 cp->regs + REG_MAC_FRAMESIZE_MAX); 3139 3140 /* NOTE: crc_size is used as a surrogate for half-duplex. 3141 * workaround saturn half-duplex issue by increasing preamble 3142 * size to 65 bytes. 3143 */ 3144 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size) 3145 writel(0x41, cp->regs + REG_MAC_PA_SIZE); 3146 else 3147 writel(0x07, cp->regs + REG_MAC_PA_SIZE); 3148 writel(0x04, cp->regs + REG_MAC_JAM_SIZE); 3149 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT); 3150 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE); 3151 3152 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED); 3153 3154 writel(0, cp->regs + REG_MAC_ADDR_FILTER0); 3155 writel(0, cp->regs + REG_MAC_ADDR_FILTER1); 3156 writel(0, cp->regs + REG_MAC_ADDR_FILTER2); 3157 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK); 3158 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK); 3159 3160 /* setup mac address in perfect filter array */ 3161 for (i = 0; i < 45; i++) 3162 writel(0x0, cp->regs + REG_MAC_ADDRN(i)); 3163 3164 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0)); 3165 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1)); 3166 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2)); 3167 3168 writel(0x0001, cp->regs + REG_MAC_ADDRN(42)); 3169 writel(0xc200, cp->regs + REG_MAC_ADDRN(43)); 3170 writel(0x0180, cp->regs + REG_MAC_ADDRN(44)); 3171 3172#ifndef CONFIG_CASSINI_MULTICAST_REG_WRITE 3173 cp->mac_rx_cfg = cas_setup_multicast(cp); 3174#else 3175 /* WTZ: Do what Adrian did in cas_set_multicast. Doing 3176 * a writel does not seem to be necessary because Cassini 3177 * seems to preserve the configuration when we do the reset. 3178 * If the chip is in trouble, though, it is not clear if we 3179 * can really count on this behavior. cas_set_multicast uses 3180 * spin_lock_irqsave, but we are called only in cas_init_hw and 3181 * cas_init_hw is protected by cas_lock_all, which calls 3182 * spin_lock_irq (so it doesn't need to save the flags, and 3183 * we should be OK for the writel, as that is the only 3184 * difference). 3185 */ 3186 cp->mac_rx_cfg = rxcfg = cas_setup_multicast(cp); 3187 writel(rxcfg, cp->regs + REG_MAC_RX_CFG); 3188#endif 3189 spin_lock(&cp->stat_lock[N_TX_RINGS]); 3190 cas_clear_mac_err(cp); 3191 spin_unlock(&cp->stat_lock[N_TX_RINGS]); 3192 3193 /* Setup MAC interrupts. We want to get all of the interesting 3194 * counter expiration events, but we do not want to hear about 3195 * normal rx/tx as the DMA engine tells us that. 3196 */ 3197 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK); 3198 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); 3199 3200 /* Don't enable even the PAUSE interrupts for now, we 3201 * make no use of those events other than to record them. 3202 */ 3203 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK); 3204} 3205 3206/* Must be invoked under cp->lock. */ 3207static void cas_init_pause_thresholds(struct cas *cp) 3208{ 3209 /* Calculate pause thresholds. Setting the OFF threshold to the 3210 * full RX fifo size effectively disables PAUSE generation 3211 */ 3212 if (cp->rx_fifo_size <= (2 * 1024)) { 3213 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size; 3214 } else { 3215 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63; 3216 if (max_frame * 3 > cp->rx_fifo_size) { 3217 cp->rx_pause_off = 7104; 3218 cp->rx_pause_on = 960; 3219 } else { 3220 int off = (cp->rx_fifo_size - (max_frame * 2)); 3221 int on = off - max_frame; 3222 cp->rx_pause_off = off; 3223 cp->rx_pause_on = on; 3224 } 3225 } 3226} 3227 3228static int cas_vpd_match(const void __iomem *p, const char *str) 3229{ 3230 int len = strlen(str) + 1; 3231 int i; 3232 3233 for (i = 0; i < len; i++) { 3234 if (readb(p + i) != str[i]) 3235 return 0; 3236 } 3237 return 1; 3238} 3239 3240 3241/* get the mac address by reading the vpd information in the rom. 3242 * also get the phy type and determine if there's an entropy generator. 3243 * NOTE: this is a bit convoluted for the following reasons: 3244 * 1) vpd info has order-dependent mac addresses for multinic cards 3245 * 2) the only way to determine the nic order is to use the slot 3246 * number. 3247 * 3) fiber cards don't have bridges, so their slot numbers don't 3248 * mean anything. 3249 * 4) we don't actually know we have a fiber card until after 3250 * the mac addresses are parsed. 3251 */ 3252static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr, 3253 const int offset) 3254{ 3255 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START; 3256 void __iomem *base, *kstart; 3257 int i, len; 3258 int found = 0; 3259#define VPD_FOUND_MAC 0x01 3260#define VPD_FOUND_PHY 0x02 3261 3262 int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */ 3263 int mac_off = 0; 3264 3265 /* give us access to the PROM */ 3266 writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD, 3267 cp->regs + REG_BIM_LOCAL_DEV_EN); 3268 3269 /* check for an expansion rom */ 3270 if (readb(p) != 0x55 || readb(p + 1) != 0xaa) 3271 goto use_random_mac_addr; 3272 3273 /* search for beginning of vpd */ 3274 base = NULL; 3275 for (i = 2; i < EXPANSION_ROM_SIZE; i++) { 3276 /* check for PCIR */ 3277 if ((readb(p + i + 0) == 0x50) && 3278 (readb(p + i + 1) == 0x43) && 3279 (readb(p + i + 2) == 0x49) && 3280 (readb(p + i + 3) == 0x52)) { 3281 base = p + (readb(p + i + 8) | 3282 (readb(p + i + 9) << 8)); 3283 break; 3284 } 3285 } 3286 3287 if (!base || (readb(base) != 0x82)) 3288 goto use_random_mac_addr; 3289 3290 i = (readb(base + 1) | (readb(base + 2) << 8)) + 3; 3291 while (i < EXPANSION_ROM_SIZE) { 3292 if (readb(base + i) != 0x90) /* no vpd found */ 3293 goto use_random_mac_addr; 3294 3295 /* found a vpd field */ 3296 len = readb(base + i + 1) | (readb(base + i + 2) << 8); 3297 3298 /* extract keywords */ 3299 kstart = base + i + 3; 3300 p = kstart; 3301 while ((p - kstart) < len) { 3302 int klen = readb(p + 2); 3303 int j; 3304 char type; 3305 3306 p += 3; 3307 3308 /* look for the following things: 3309 * -- correct length == 29 3310 * 3 (type) + 2 (size) + 3311 * 18 (strlen("local-mac-address") + 1) + 3312 * 6 (mac addr) 3313 * -- VPD Instance 'I' 3314 * -- VPD Type Bytes 'B' 3315 * -- VPD data length == 6 3316 * -- property string == local-mac-address 3317 * 3318 * -- correct length == 24 3319 * 3 (type) + 2 (size) + 3320 * 12 (strlen("entropy-dev") + 1) + 3321 * 7 (strlen("vms110") + 1) 3322 * -- VPD Instance 'I' 3323 * -- VPD Type String 'B' 3324 * -- VPD data length == 7 3325 * -- property string == entropy-dev 3326 * 3327 * -- correct length == 18 3328 * 3 (type) + 2 (size) + 3329 * 9 (strlen("phy-type") + 1) + 3330 * 4 (strlen("pcs") + 1) 3331 * -- VPD Instance 'I' 3332 * -- VPD Type String 'S' 3333 * -- VPD data length == 4 3334 * -- property string == phy-type 3335 * 3336 * -- correct length == 23 3337 * 3 (type) + 2 (size) + 3338 * 14 (strlen("phy-interface") + 1) + 3339 * 4 (strlen("pcs") + 1) 3340 * -- VPD Instance 'I' 3341 * -- VPD Type String 'S' 3342 * -- VPD data length == 4 3343 * -- property string == phy-interface 3344 */ 3345 if (readb(p) != 'I') 3346 goto next; 3347 3348 /* finally, check string and length */ 3349 type = readb(p + 3); 3350 if (type == 'B') { 3351 if ((klen == 29) && readb(p + 4) == 6 && 3352 cas_vpd_match(p + 5, 3353 "local-mac-address")) { 3354 if (mac_off++ > offset) 3355 goto next; 3356 3357 /* set mac address */ 3358 for (j = 0; j < 6; j++) 3359 dev_addr[j] = 3360 readb(p + 23 + j); 3361 goto found_mac; 3362 } 3363 } 3364 3365 if (type != 'S') 3366 goto next; 3367 3368#ifdef USE_ENTROPY_DEV 3369 if ((klen == 24) && 3370 cas_vpd_match(p + 5, "entropy-dev") && 3371 cas_vpd_match(p + 17, "vms110")) { 3372 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV; 3373 goto next; 3374 } 3375#endif 3376 3377 if (found & VPD_FOUND_PHY) 3378 goto next; 3379 3380 if ((klen == 18) && readb(p + 4) == 4 && 3381 cas_vpd_match(p + 5, "phy-type")) { 3382 if (cas_vpd_match(p + 14, "pcs")) { 3383 phy_type = CAS_PHY_SERDES; 3384 goto found_phy; 3385 } 3386 } 3387 3388 if ((klen == 23) && readb(p + 4) == 4 && 3389 cas_vpd_match(p + 5, "phy-interface")) { 3390 if (cas_vpd_match(p + 19, "pcs")) { 3391 phy_type = CAS_PHY_SERDES; 3392 goto found_phy; 3393 } 3394 } 3395found_mac: 3396 found |= VPD_FOUND_MAC; 3397 goto next; 3398 3399found_phy: 3400 found |= VPD_FOUND_PHY; 3401 3402next: 3403 p += klen; 3404 } 3405 i += len + 3; 3406 } 3407 3408use_random_mac_addr: 3409 if (found & VPD_FOUND_MAC) 3410 goto done; 3411 3412 /* Sun MAC prefix then 3 random bytes. */ 3413 printk(PFX "MAC address not found in ROM VPD\n"); 3414 dev_addr[0] = 0x08; 3415 dev_addr[1] = 0x00; 3416 dev_addr[2] = 0x20; 3417 get_random_bytes(dev_addr + 3, 3); 3418 3419done: 3420 writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN); 3421 return phy_type; 3422} 3423 3424/* check pci invariants */ 3425static void cas_check_pci_invariants(struct cas *cp) 3426{ 3427 struct pci_dev *pdev = cp->pdev; 3428 u8 rev; 3429 3430 cp->cas_flags = 0; 3431 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); 3432 if ((pdev->vendor == PCI_VENDOR_ID_SUN) && 3433 (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) { 3434 if (rev >= CAS_ID_REVPLUS) 3435 cp->cas_flags |= CAS_FLAG_REG_PLUS; 3436 if (rev < CAS_ID_REVPLUS02u) 3437 cp->cas_flags |= CAS_FLAG_TARGET_ABORT; 3438 3439 /* Original Cassini supports HW CSUM, but it's not 3440 * enabled by default as it can trigger TX hangs. 3441 */ 3442 if (rev < CAS_ID_REV2) 3443 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM; 3444 } else { 3445 /* Only sun has original cassini chips. */ 3446 cp->cas_flags |= CAS_FLAG_REG_PLUS; 3447 3448 /* We use a flag because the same phy might be externally 3449 * connected. 3450 */ 3451 if ((pdev->vendor == PCI_VENDOR_ID_NS) && 3452 (pdev->device == PCI_DEVICE_ID_NS_SATURN)) 3453 cp->cas_flags |= CAS_FLAG_SATURN; 3454 } 3455} 3456 3457 3458static int cas_check_invariants(struct cas *cp) 3459{ 3460 struct pci_dev *pdev = cp->pdev; 3461 u32 cfg; 3462 int i; 3463 3464 /* get page size for rx buffers. */ 3465 cp->page_order = 0; 3466#ifdef USE_PAGE_ORDER 3467 if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) { 3468 /* see if we can allocate larger pages */ 3469 struct page *page = alloc_pages(GFP_ATOMIC, 3470 CAS_JUMBO_PAGE_SHIFT - 3471 PAGE_SHIFT); 3472 if (page) { 3473 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT); 3474 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT; 3475 } else { 3476 printk(PFX "MTU limited to %d bytes\n", CAS_MAX_MTU); 3477 } 3478 } 3479#endif 3480 cp->page_size = (PAGE_SIZE << cp->page_order); 3481 3482 /* Fetch the FIFO configurations. */ 3483 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64; 3484 cp->rx_fifo_size = RX_FIFO_SIZE; 3485 3486 /* finish phy determination. MDIO1 takes precedence over MDIO0 if 3487 * they're both connected. 3488 */ 3489 cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr, 3490 PCI_SLOT(pdev->devfn)); 3491 if (cp->phy_type & CAS_PHY_SERDES) { 3492 cp->cas_flags |= CAS_FLAG_1000MB_CAP; 3493 return 0; /* no more checking needed */ 3494 } 3495 3496 /* MII */ 3497 cfg = readl(cp->regs + REG_MIF_CFG); 3498 if (cfg & MIF_CFG_MDIO_1) { 3499 cp->phy_type = CAS_PHY_MII_MDIO1; 3500 } else if (cfg & MIF_CFG_MDIO_0) { 3501 cp->phy_type = CAS_PHY_MII_MDIO0; 3502 } 3503 3504 cas_mif_poll(cp, 0); 3505 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); 3506 3507 for (i = 0; i < 32; i++) { 3508 u32 phy_id; 3509 int j; 3510 3511 for (j = 0; j < 3; j++) { 3512 cp->phy_addr = i; 3513 phy_id = cas_phy_read(cp, MII_PHYSID1) << 16; 3514 phy_id |= cas_phy_read(cp, MII_PHYSID2); 3515 if (phy_id && (phy_id != 0xFFFFFFFF)) { 3516 cp->phy_id = phy_id; 3517 goto done; 3518 } 3519 } 3520 } 3521 printk(KERN_ERR PFX "MII phy did not respond [%08x]\n", 3522 readl(cp->regs + REG_MIF_STATE_MACHINE)); 3523 return -1; 3524 3525done: 3526 /* see if we can do gigabit */ 3527 cfg = cas_phy_read(cp, MII_BMSR); 3528 if ((cfg & CAS_BMSR_1000_EXTEND) && 3529 cas_phy_read(cp, CAS_MII_1000_EXTEND)) 3530 cp->cas_flags |= CAS_FLAG_1000MB_CAP; 3531 return 0; 3532} 3533 3534/* Must be invoked under cp->lock. */ 3535static inline void cas_start_dma(struct cas *cp) 3536{ 3537 int i; 3538 u32 val; 3539 int txfailed = 0; 3540 3541 /* enable dma */ 3542 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN; 3543 writel(val, cp->regs + REG_TX_CFG); 3544 val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN; 3545 writel(val, cp->regs + REG_RX_CFG); 3546 3547 /* enable the mac */ 3548 val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN; 3549 writel(val, cp->regs + REG_MAC_TX_CFG); 3550 val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN; 3551 writel(val, cp->regs + REG_MAC_RX_CFG); 3552 3553 i = STOP_TRIES; 3554 while (i-- > 0) { 3555 val = readl(cp->regs + REG_MAC_TX_CFG); 3556 if ((val & MAC_TX_CFG_EN)) 3557 break; 3558 udelay(10); 3559 } 3560 if (i < 0) txfailed = 1; 3561 i = STOP_TRIES; 3562 while (i-- > 0) { 3563 val = readl(cp->regs + REG_MAC_RX_CFG); 3564 if ((val & MAC_RX_CFG_EN)) { 3565 if (txfailed) { 3566 printk(KERN_ERR 3567 "%s: enabling mac failed [tx:%08x:%08x].\n", 3568 cp->dev->name, 3569 readl(cp->regs + REG_MIF_STATE_MACHINE), 3570 readl(cp->regs + REG_MAC_STATE_MACHINE)); 3571 } 3572 goto enable_rx_done; 3573 } 3574 udelay(10); 3575 } 3576 printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n", 3577 cp->dev->name, 3578 (txfailed? "tx,rx":"rx"), 3579 readl(cp->regs + REG_MIF_STATE_MACHINE), 3580 readl(cp->regs + REG_MAC_STATE_MACHINE)); 3581 3582enable_rx_done: 3583 cas_unmask_intr(cp); /* enable interrupts */ 3584 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); 3585 writel(0, cp->regs + REG_RX_COMP_TAIL); 3586 3587 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 3588 if (N_RX_DESC_RINGS > 1) 3589 writel(RX_DESC_RINGN_SIZE(1) - 4, 3590 cp->regs + REG_PLUS_RX_KICK1); 3591 3592 for (i = 1; i < N_RX_COMP_RINGS; i++) 3593 writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i)); 3594 } 3595} 3596 3597/* Must be invoked under cp->lock. */ 3598static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd, 3599 int *pause) 3600{ 3601 u32 val = readl(cp->regs + REG_PCS_MII_LPA); 3602 *fd = (val & PCS_MII_LPA_FD) ? 1 : 0; 3603 *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00; 3604 if (val & PCS_MII_LPA_ASYM_PAUSE) 3605 *pause |= 0x10; 3606 *spd = 1000; 3607} 3608 3609/* Must be invoked under cp->lock. */ 3610static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd, 3611 int *pause) 3612{ 3613 u32 val; 3614 3615 *fd = 0; 3616 *spd = 10; 3617 *pause = 0; 3618 3619 /* use GMII registers */ 3620 val = cas_phy_read(cp, MII_LPA); 3621 if (val & CAS_LPA_PAUSE) 3622 *pause = 0x01; 3623 3624 if (val & CAS_LPA_ASYM_PAUSE) 3625 *pause |= 0x10; 3626 3627 if (val & LPA_DUPLEX) 3628 *fd = 1; 3629 if (val & LPA_100) 3630 *spd = 100; 3631 3632 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { 3633 val = cas_phy_read(cp, CAS_MII_1000_STATUS); 3634 if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF)) 3635 *spd = 1000; 3636 if (val & CAS_LPA_1000FULL) 3637 *fd = 1; 3638 } 3639} 3640 3641/* A link-up condition has occurred, initialize and enable the 3642 * rest of the chip. 3643 * 3644 * Must be invoked under cp->lock. 3645 */ 3646static void cas_set_link_modes(struct cas *cp) 3647{ 3648 u32 val; 3649 int full_duplex, speed, pause; 3650 3651 full_duplex = 0; 3652 speed = 10; 3653 pause = 0; 3654 3655 if (CAS_PHY_MII(cp->phy_type)) { 3656 cas_mif_poll(cp, 0); 3657 val = cas_phy_read(cp, MII_BMCR); 3658 if (val & BMCR_ANENABLE) { 3659 cas_read_mii_link_mode(cp, &full_duplex, &speed, 3660 &pause); 3661 } else { 3662 if (val & BMCR_FULLDPLX) 3663 full_duplex = 1; 3664 3665 if (val & BMCR_SPEED100) 3666 speed = 100; 3667 else if (val & CAS_BMCR_SPEED1000) 3668 speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? 3669 1000 : 100; 3670 } 3671 cas_mif_poll(cp, 1); 3672 3673 } else { 3674 val = readl(cp->regs + REG_PCS_MII_CTRL); 3675 cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause); 3676 if ((val & PCS_MII_AUTONEG_EN) == 0) { 3677 if (val & PCS_MII_CTRL_DUPLEX) 3678 full_duplex = 1; 3679 } 3680 } 3681 3682 if (netif_msg_link(cp)) 3683 printk(KERN_INFO "%s: Link up at %d Mbps, %s-duplex.\n", 3684 cp->dev->name, speed, (full_duplex ? "full" : "half")); 3685 3686 val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED; 3687 if (CAS_PHY_MII(cp->phy_type)) { 3688 val |= MAC_XIF_MII_BUFFER_OUTPUT_EN; 3689 if (!full_duplex) 3690 val |= MAC_XIF_DISABLE_ECHO; 3691 } 3692 if (full_duplex) 3693 val |= MAC_XIF_FDPLX_LED; 3694 if (speed == 1000) 3695 val |= MAC_XIF_GMII_MODE; 3696 writel(val, cp->regs + REG_MAC_XIF_CFG); 3697 3698 /* deal with carrier and collision detect. */ 3699 val = MAC_TX_CFG_IPG_EN; 3700 if (full_duplex) { 3701 val |= MAC_TX_CFG_IGNORE_CARRIER; 3702 val |= MAC_TX_CFG_IGNORE_COLL; 3703 } else { 3704#ifndef USE_CSMA_CD_PROTO 3705 val |= MAC_TX_CFG_NEVER_GIVE_UP_EN; 3706 val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM; 3707#endif 3708 } 3709 /* val now set up for REG_MAC_TX_CFG */ 3710 3711 /* If gigabit and half-duplex, enable carrier extension 3712 * mode. increase slot time to 512 bytes as well. 3713 * else, disable it and make sure slot time is 64 bytes. 3714 * also activate checksum bug workaround 3715 */ 3716 if ((speed == 1000) && !full_duplex) { 3717 writel(val | MAC_TX_CFG_CARRIER_EXTEND, 3718 cp->regs + REG_MAC_TX_CFG); 3719 3720 val = readl(cp->regs + REG_MAC_RX_CFG); 3721 val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */ 3722 writel(val | MAC_RX_CFG_CARRIER_EXTEND, 3723 cp->regs + REG_MAC_RX_CFG); 3724 3725 writel(0x200, cp->regs + REG_MAC_SLOT_TIME); 3726 3727 cp->crc_size = 4; 3728 /* minimum size gigabit frame at half duplex */ 3729 cp->min_frame_size = CAS_1000MB_MIN_FRAME; 3730 3731 } else { 3732 writel(val, cp->regs + REG_MAC_TX_CFG); 3733 3734 /* checksum bug workaround. don't strip FCS when in 3735 * half-duplex mode 3736 */ 3737 val = readl(cp->regs + REG_MAC_RX_CFG); 3738 if (full_duplex) { 3739 val |= MAC_RX_CFG_STRIP_FCS; 3740 cp->crc_size = 0; 3741 cp->min_frame_size = CAS_MIN_MTU; 3742 } else { 3743 val &= ~MAC_RX_CFG_STRIP_FCS; 3744 cp->crc_size = 4; 3745 cp->min_frame_size = CAS_MIN_FRAME; 3746 } 3747 writel(val & ~MAC_RX_CFG_CARRIER_EXTEND, 3748 cp->regs + REG_MAC_RX_CFG); 3749 writel(0x40, cp->regs + REG_MAC_SLOT_TIME); 3750 } 3751 3752 if (netif_msg_link(cp)) { 3753 if (pause & 0x01) { 3754 printk(KERN_INFO "%s: Pause is enabled " 3755 "(rxfifo: %d off: %d on: %d)\n", 3756 cp->dev->name, 3757 cp->rx_fifo_size, 3758 cp->rx_pause_off, 3759 cp->rx_pause_on); 3760 } else if (pause & 0x10) { 3761 printk(KERN_INFO "%s: TX pause enabled\n", 3762 cp->dev->name); 3763 } else { 3764 printk(KERN_INFO "%s: Pause is disabled\n", 3765 cp->dev->name); 3766 } 3767 } 3768 3769 val = readl(cp->regs + REG_MAC_CTRL_CFG); 3770 val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN); 3771 if (pause) { /* symmetric or asymmetric pause */ 3772 val |= MAC_CTRL_CFG_SEND_PAUSE_EN; 3773 if (pause & 0x01) { /* symmetric pause */ 3774 val |= MAC_CTRL_CFG_RECV_PAUSE_EN; 3775 } 3776 } 3777 writel(val, cp->regs + REG_MAC_CTRL_CFG); 3778 cas_start_dma(cp); 3779} 3780 3781/* Must be invoked under cp->lock. */ 3782static void cas_init_hw(struct cas *cp, int restart_link) 3783{ 3784 if (restart_link) 3785 cas_phy_init(cp); 3786 3787 cas_init_pause_thresholds(cp); 3788 cas_init_mac(cp); 3789 cas_init_dma(cp); 3790 3791 if (restart_link) { 3792 /* Default aneg parameters */ 3793 cp->timer_ticks = 0; 3794 cas_begin_auto_negotiation(cp, NULL); 3795 } else if (cp->lstate == link_up) { 3796 cas_set_link_modes(cp); 3797 netif_carrier_on(cp->dev); 3798 } 3799} 3800 3801/* Must be invoked under cp->lock. on earlier cassini boards, 3802 * SOFT_0 is tied to PCI reset. we use this to force a pci reset, 3803 * let it settle out, and then restore pci state. 3804 */ 3805static void cas_hard_reset(struct cas *cp) 3806{ 3807 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN); 3808 udelay(20); 3809 pci_restore_state(cp->pdev); 3810} 3811 3812 3813static void cas_global_reset(struct cas *cp, int blkflag) 3814{ 3815 int limit; 3816 3817 /* issue a global reset. don't use RSTOUT. */ 3818 if (blkflag && !CAS_PHY_MII(cp->phy_type)) { 3819 /* For PCS, when the blkflag is set, we should set the 3820 * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of 3821 * the last autonegotiation from being cleared. We'll 3822 * need some special handling if the chip is set into a 3823 * loopback mode. 3824 */ 3825 writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK), 3826 cp->regs + REG_SW_RESET); 3827 } else { 3828 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET); 3829 } 3830 3831 /* need to wait at least 3ms before polling register */ 3832 mdelay(3); 3833 3834 limit = STOP_TRIES; 3835 while (limit-- > 0) { 3836 u32 val = readl(cp->regs + REG_SW_RESET); 3837 if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0) 3838 goto done; 3839 udelay(10); 3840 } 3841 printk(KERN_ERR "%s: sw reset failed.\n", cp->dev->name); 3842 3843done: 3844 /* enable various BIM interrupts */ 3845 writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE | 3846 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG); 3847 3848 /* clear out pci error status mask for handled errors. 3849 * we don't deal with DMA counter overflows as they happen 3850 * all the time. 3851 */ 3852 writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO | 3853 PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE | 3854 PCI_ERR_BIM_DMA_READ), cp->regs + 3855 REG_PCI_ERR_STATUS_MASK); 3856 3857 /* set up for MII by default to address mac rx reset timeout 3858 * issue 3859 */ 3860 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); 3861} 3862 3863static void cas_reset(struct cas *cp, int blkflag) 3864{ 3865 u32 val; 3866 3867 cas_mask_intr(cp); 3868 cas_global_reset(cp, blkflag); 3869 cas_mac_reset(cp); 3870 cas_entropy_reset(cp); 3871 3872 /* disable dma engines. */ 3873 val = readl(cp->regs + REG_TX_CFG); 3874 val &= ~TX_CFG_DMA_EN; 3875 writel(val, cp->regs + REG_TX_CFG); 3876 3877 val = readl(cp->regs + REG_RX_CFG); 3878 val &= ~RX_CFG_DMA_EN; 3879 writel(val, cp->regs + REG_RX_CFG); 3880 3881 /* program header parser */ 3882 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) || 3883 (CAS_HP_ALT_FIRMWARE == cas_prog_null)) { 3884 cas_load_firmware(cp, CAS_HP_FIRMWARE); 3885 } else { 3886 cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE); 3887 } 3888 3889 /* clear out error registers */ 3890 spin_lock(&cp->stat_lock[N_TX_RINGS]); 3891 cas_clear_mac_err(cp); 3892 spin_unlock(&cp->stat_lock[N_TX_RINGS]); 3893} 3894 3895/* Shut down the chip, must be called with pm_mutex held. */ 3896static void cas_shutdown(struct cas *cp) 3897{ 3898 unsigned long flags; 3899 3900 /* Make us not-running to avoid timers respawning */ 3901 cp->hw_running = 0; 3902 3903 del_timer_sync(&cp->link_timer); 3904 3905 /* Stop the reset task */ 3906#if 0 3907 while (atomic_read(&cp->reset_task_pending_mtu) || 3908 atomic_read(&cp->reset_task_pending_spare) || 3909 atomic_read(&cp->reset_task_pending_all)) 3910 schedule(); 3911 3912#else 3913 while (atomic_read(&cp->reset_task_pending)) 3914 schedule(); 3915#endif 3916 /* Actually stop the chip */ 3917 cas_lock_all_save(cp, flags); 3918 cas_reset(cp, 0); 3919 if (cp->cas_flags & CAS_FLAG_SATURN) 3920 cas_phy_powerdown(cp); 3921 cas_unlock_all_restore(cp, flags); 3922} 3923 3924static int cas_change_mtu(struct net_device *dev, int new_mtu) 3925{ 3926 struct cas *cp = netdev_priv(dev); 3927 3928 if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU) 3929 return -EINVAL; 3930 3931 dev->mtu = new_mtu; 3932 if (!netif_running(dev) || !netif_device_present(dev)) 3933 return 0; 3934 3935 /* let the reset task handle it */ 3936#if 1 3937 atomic_inc(&cp->reset_task_pending); 3938 if ((cp->phy_type & CAS_PHY_SERDES)) { 3939 atomic_inc(&cp->reset_task_pending_all); 3940 } else { 3941 atomic_inc(&cp->reset_task_pending_mtu); 3942 } 3943 schedule_work(&cp->reset_task); 3944#else 3945 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ? 3946 CAS_RESET_ALL : CAS_RESET_MTU); 3947 printk(KERN_ERR "reset called in cas_change_mtu\n"); 3948 schedule_work(&cp->reset_task); 3949#endif 3950 3951 flush_scheduled_work(); 3952 return 0; 3953} 3954 3955static void cas_clean_txd(struct cas *cp, int ring) 3956{ 3957 struct cas_tx_desc *txd = cp->init_txds[ring]; 3958 struct sk_buff *skb, **skbs = cp->tx_skbs[ring]; 3959 u64 daddr, dlen; 3960 int i, size; 3961 3962 size = TX_DESC_RINGN_SIZE(ring); 3963 for (i = 0; i < size; i++) { 3964 int frag; 3965 3966 if (skbs[i] == NULL) 3967 continue; 3968 3969 skb = skbs[i]; 3970 skbs[i] = NULL; 3971 3972 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 3973 int ent = i & (size - 1); 3974 3975 /* first buffer is never a tiny buffer and so 3976 * needs to be unmapped. 3977 */ 3978 daddr = le64_to_cpu(txd[ent].buffer); 3979 dlen = CAS_VAL(TX_DESC_BUFLEN, 3980 le64_to_cpu(txd[ent].control)); 3981 pci_unmap_page(cp->pdev, daddr, dlen, 3982 PCI_DMA_TODEVICE); 3983 3984 if (frag != skb_shinfo(skb)->nr_frags) { 3985 i++; 3986 3987 /* next buffer might by a tiny buffer. 3988 * skip past it. 3989 */ 3990 ent = i & (size - 1); 3991 if (cp->tx_tiny_use[ring][ent].used) 3992 i++; 3993 } 3994 } 3995 dev_kfree_skb_any(skb); 3996 } 3997 3998 /* zero out tiny buf usage */ 3999 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring])); 4000} 4001 4002/* freed on close */ 4003static inline void cas_free_rx_desc(struct cas *cp, int ring) 4004{ 4005 cas_page_t **page = cp->rx_pages[ring]; 4006 int i, size; 4007 4008 size = RX_DESC_RINGN_SIZE(ring); 4009 for (i = 0; i < size; i++) { 4010 if (page[i]) { 4011 cas_page_free(cp, page[i]); 4012 page[i] = NULL; 4013 } 4014 } 4015} 4016 4017static void cas_free_rxds(struct cas *cp) 4018{ 4019 int i; 4020 4021 for (i = 0; i < N_RX_DESC_RINGS; i++) 4022 cas_free_rx_desc(cp, i); 4023} 4024 4025/* Must be invoked under cp->lock. */ 4026static void cas_clean_rings(struct cas *cp) 4027{ 4028 int i; 4029 4030 /* need to clean all tx rings */ 4031 memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS); 4032 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS); 4033 for (i = 0; i < N_TX_RINGS; i++) 4034 cas_clean_txd(cp, i); 4035 4036 /* zero out init block */ 4037 memset(cp->init_block, 0, sizeof(struct cas_init_block)); 4038 cas_clean_rxds(cp); 4039 cas_clean_rxcs(cp); 4040} 4041 4042/* allocated on open */ 4043static inline int cas_alloc_rx_desc(struct cas *cp, int ring) 4044{ 4045 cas_page_t **page = cp->rx_pages[ring]; 4046 int size, i = 0; 4047 4048 size = RX_DESC_RINGN_SIZE(ring); 4049 for (i = 0; i < size; i++) { 4050 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL) 4051 return -1; 4052 } 4053 return 0; 4054} 4055 4056static int cas_alloc_rxds(struct cas *cp) 4057{ 4058 int i; 4059 4060 for (i = 0; i < N_RX_DESC_RINGS; i++) { 4061 if (cas_alloc_rx_desc(cp, i) < 0) { 4062 cas_free_rxds(cp); 4063 return -1; 4064 } 4065 } 4066 return 0; 4067} 4068 4069static void cas_reset_task(void *data) 4070{ 4071 struct cas *cp = (struct cas *) data; 4072#if 0 4073 int pending = atomic_read(&cp->reset_task_pending); 4074#else 4075 int pending_all = atomic_read(&cp->reset_task_pending_all); 4076 int pending_spare = atomic_read(&cp->reset_task_pending_spare); 4077 int pending_mtu = atomic_read(&cp->reset_task_pending_mtu); 4078 4079 if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) { 4080 /* We can have more tasks scheduled than actually 4081 * needed. 4082 */ 4083 atomic_dec(&cp->reset_task_pending); 4084 return; 4085 } 4086#endif 4087 /* The link went down, we reset the ring, but keep 4088 * DMA stopped. Use this function for reset 4089 * on error as well. 4090 */ 4091 if (cp->hw_running) { 4092 unsigned long flags; 4093 4094 /* Make sure we don't get interrupts or tx packets */ 4095 netif_device_detach(cp->dev); 4096 cas_lock_all_save(cp, flags); 4097 4098 if (cp->opened) { 4099 /* We call cas_spare_recover when we call cas_open. 4100 * but we do not initialize the lists cas_spare_recover 4101 * uses until cas_open is called. 4102 */ 4103 cas_spare_recover(cp, GFP_ATOMIC); 4104 } 4105#if 1 4106 /* test => only pending_spare set */ 4107 if (!pending_all && !pending_mtu) 4108 goto done; 4109#else 4110 if (pending == CAS_RESET_SPARE) 4111 goto done; 4112#endif 4113 /* when pending == CAS_RESET_ALL, the following 4114 * call to cas_init_hw will restart auto negotiation. 4115 * Setting the second argument of cas_reset to 4116 * !(pending == CAS_RESET_ALL) will set this argument 4117 * to 1 (avoiding reinitializing the PHY for the normal 4118 * PCS case) when auto negotiation is not restarted. 4119 */ 4120#if 1 4121 cas_reset(cp, !(pending_all > 0)); 4122 if (cp->opened) 4123 cas_clean_rings(cp); 4124 cas_init_hw(cp, (pending_all > 0)); 4125#else 4126 cas_reset(cp, !(pending == CAS_RESET_ALL)); 4127 if (cp->opened) 4128 cas_clean_rings(cp); 4129 cas_init_hw(cp, pending == CAS_RESET_ALL); 4130#endif 4131 4132done: 4133 cas_unlock_all_restore(cp, flags); 4134 netif_device_attach(cp->dev); 4135 } 4136#if 1 4137 atomic_sub(pending_all, &cp->reset_task_pending_all); 4138 atomic_sub(pending_spare, &cp->reset_task_pending_spare); 4139 atomic_sub(pending_mtu, &cp->reset_task_pending_mtu); 4140 atomic_dec(&cp->reset_task_pending); 4141#else 4142 atomic_set(&cp->reset_task_pending, 0); 4143#endif 4144} 4145 4146static void cas_link_timer(unsigned long data) 4147{ 4148 struct cas *cp = (struct cas *) data; 4149 int mask, pending = 0, reset = 0; 4150 unsigned long flags; 4151 4152 if (link_transition_timeout != 0 && 4153 cp->link_transition_jiffies_valid && 4154 ((jiffies - cp->link_transition_jiffies) > 4155 (link_transition_timeout))) { 4156 /* One-second counter so link-down workaround doesn't 4157 * cause resets to occur so fast as to fool the switch 4158 * into thinking the link is down. 4159 */ 4160 cp->link_transition_jiffies_valid = 0; 4161 } 4162 4163 if (!cp->hw_running) 4164 return; 4165 4166 spin_lock_irqsave(&cp->lock, flags); 4167 cas_lock_tx(cp); 4168 cas_entropy_gather(cp); 4169 4170 /* If the link task is still pending, we just 4171 * reschedule the link timer 4172 */ 4173#if 1 4174 if (atomic_read(&cp->reset_task_pending_all) || 4175 atomic_read(&cp->reset_task_pending_spare) || 4176 atomic_read(&cp->reset_task_pending_mtu)) 4177 goto done; 4178#else 4179 if (atomic_read(&cp->reset_task_pending)) 4180 goto done; 4181#endif 4182 4183 /* check for rx cleaning */ 4184 if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) { 4185 int i, rmask; 4186 4187 for (i = 0; i < MAX_RX_DESC_RINGS; i++) { 4188 rmask = CAS_FLAG_RXD_POST(i); 4189 if ((mask & rmask) == 0) 4190 continue; 4191 4192 /* post_rxds will do a mod_timer */ 4193 if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) { 4194 pending = 1; 4195 continue; 4196 } 4197 cp->cas_flags &= ~rmask; 4198 } 4199 } 4200 4201 if (CAS_PHY_MII(cp->phy_type)) { 4202 u16 bmsr; 4203 cas_mif_poll(cp, 0); 4204 bmsr = cas_phy_read(cp, MII_BMSR); 4205 /* WTZ: Solaris driver reads this twice, but that 4206 * may be due to the PCS case and the use of a 4207 * common implementation. Read it twice here to be 4208 * safe. 4209 */ 4210 bmsr = cas_phy_read(cp, MII_BMSR); 4211 cas_mif_poll(cp, 1); 4212 readl(cp->regs + REG_MIF_STATUS); /* avoid dups */ 4213 reset = cas_mii_link_check(cp, bmsr); 4214 } else { 4215 reset = cas_pcs_link_check(cp); 4216 } 4217 4218 if (reset) 4219 goto done; 4220 4221 /* check for tx state machine confusion */ 4222 if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) { 4223 u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE); 4224 u32 wptr, rptr; 4225 int tlm = CAS_VAL(MAC_SM_TLM, val); 4226 4227 if (((tlm == 0x5) || (tlm == 0x3)) && 4228 (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) { 4229 if (netif_msg_tx_err(cp)) 4230 printk(KERN_DEBUG "%s: tx err: " 4231 "MAC_STATE[%08x]\n", 4232 cp->dev->name, val); 4233 reset = 1; 4234 goto done; 4235 } 4236 4237 val = readl(cp->regs + REG_TX_FIFO_PKT_CNT); 4238 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR); 4239 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR); 4240 if ((val == 0) && (wptr != rptr)) { 4241 if (netif_msg_tx_err(cp)) 4242 printk(KERN_DEBUG "%s: tx err: " 4243 "TX_FIFO[%08x:%08x:%08x]\n", 4244 cp->dev->name, val, wptr, rptr); 4245 reset = 1; 4246 } 4247 4248 if (reset) 4249 cas_hard_reset(cp); 4250 } 4251 4252done: 4253 if (reset) { 4254#if 1 4255 atomic_inc(&cp->reset_task_pending); 4256 atomic_inc(&cp->reset_task_pending_all); 4257 schedule_work(&cp->reset_task); 4258#else 4259 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); 4260 printk(KERN_ERR "reset called in cas_link_timer\n"); 4261 schedule_work(&cp->reset_task); 4262#endif 4263 } 4264 4265 if (!pending) 4266 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); 4267 cas_unlock_tx(cp); 4268 spin_unlock_irqrestore(&cp->lock, flags); 4269} 4270 4271/* tiny buffers are used to avoid target abort issues with 4272 * older cassini's 4273 */ 4274static void cas_tx_tiny_free(struct cas *cp) 4275{ 4276 struct pci_dev *pdev = cp->pdev; 4277 int i; 4278 4279 for (i = 0; i < N_TX_RINGS; i++) { 4280 if (!cp->tx_tiny_bufs[i]) 4281 continue; 4282 4283 pci_free_consistent(pdev, TX_TINY_BUF_BLOCK, 4284 cp->tx_tiny_bufs[i], 4285 cp->tx_tiny_dvma[i]); 4286 cp->tx_tiny_bufs[i] = NULL; 4287 } 4288} 4289 4290static int cas_tx_tiny_alloc(struct cas *cp) 4291{ 4292 struct pci_dev *pdev = cp->pdev; 4293 int i; 4294 4295 for (i = 0; i < N_TX_RINGS; i++) { 4296 cp->tx_tiny_bufs[i] = 4297 pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK, 4298 &cp->tx_tiny_dvma[i]); 4299 if (!cp->tx_tiny_bufs[i]) { 4300 cas_tx_tiny_free(cp); 4301 return -1; 4302 } 4303 } 4304 return 0; 4305} 4306 4307 4308static int cas_open(struct net_device *dev) 4309{ 4310 struct cas *cp = netdev_priv(dev); 4311 int hw_was_up, err; 4312 unsigned long flags; 4313 4314 mutex_lock(&cp->pm_mutex); 4315 4316 hw_was_up = cp->hw_running; 4317 4318 /* The power-management mutex protects the hw_running 4319 * etc. state so it is safe to do this bit without cp->lock 4320 */ 4321 if (!cp->hw_running) { 4322 /* Reset the chip */ 4323 cas_lock_all_save(cp, flags); 4324 /* We set the second arg to cas_reset to zero 4325 * because cas_init_hw below will have its second 4326 * argument set to non-zero, which will force 4327 * autonegotiation to start. 4328 */ 4329 cas_reset(cp, 0); 4330 cp->hw_running = 1; 4331 cas_unlock_all_restore(cp, flags); 4332 } 4333 4334 if (cas_tx_tiny_alloc(cp) < 0) 4335 return -ENOMEM; 4336 4337 /* alloc rx descriptors */ 4338 err = -ENOMEM; 4339 if (cas_alloc_rxds(cp) < 0) 4340 goto err_tx_tiny; 4341 4342 /* allocate spares */ 4343 cas_spare_init(cp); 4344 cas_spare_recover(cp, GFP_KERNEL); 4345 4346 /* We can now request the interrupt as we know it's masked 4347 * on the controller. cassini+ has up to 4 interrupts 4348 * that can be used, but you need to do explicit pci interrupt 4349 * mapping to expose them 4350 */ 4351 if (request_irq(cp->pdev->irq, cas_interrupt, 4352 IRQF_SHARED, dev->name, (void *) dev)) { 4353 printk(KERN_ERR "%s: failed to request irq !\n", 4354 cp->dev->name); 4355 err = -EAGAIN; 4356 goto err_spare; 4357 } 4358 4359 /* init hw */ 4360 cas_lock_all_save(cp, flags); 4361 cas_clean_rings(cp); 4362 cas_init_hw(cp, !hw_was_up); 4363 cp->opened = 1; 4364 cas_unlock_all_restore(cp, flags); 4365 4366 netif_start_queue(dev); 4367 mutex_unlock(&cp->pm_mutex); 4368 return 0; 4369 4370err_spare: 4371 cas_spare_free(cp); 4372 cas_free_rxds(cp); 4373err_tx_tiny: 4374 cas_tx_tiny_free(cp); 4375 mutex_unlock(&cp->pm_mutex); 4376 return err; 4377} 4378 4379static int cas_close(struct net_device *dev) 4380{ 4381 unsigned long flags; 4382 struct cas *cp = netdev_priv(dev); 4383 4384 /* Make sure we don't get distracted by suspend/resume */ 4385 mutex_lock(&cp->pm_mutex); 4386 4387 netif_stop_queue(dev); 4388 4389 /* Stop traffic, mark us closed */ 4390 cas_lock_all_save(cp, flags); 4391 cp->opened = 0; 4392 cas_reset(cp, 0); 4393 cas_phy_init(cp); 4394 cas_begin_auto_negotiation(cp, NULL); 4395 cas_clean_rings(cp); 4396 cas_unlock_all_restore(cp, flags); 4397 4398 free_irq(cp->pdev->irq, (void *) dev); 4399 cas_spare_free(cp); 4400 cas_free_rxds(cp); 4401 cas_tx_tiny_free(cp); 4402 mutex_unlock(&cp->pm_mutex); 4403 return 0; 4404} 4405 4406static struct { 4407 const char name[ETH_GSTRING_LEN]; 4408} ethtool_cassini_statnames[] = { 4409 {"collisions"}, 4410 {"rx_bytes"}, 4411 {"rx_crc_errors"}, 4412 {"rx_dropped"}, 4413 {"rx_errors"}, 4414 {"rx_fifo_errors"}, 4415 {"rx_frame_errors"}, 4416 {"rx_length_errors"}, 4417 {"rx_over_errors"}, 4418 {"rx_packets"}, 4419 {"tx_aborted_errors"}, 4420 {"tx_bytes"}, 4421 {"tx_dropped"}, 4422 {"tx_errors"}, 4423 {"tx_fifo_errors"}, 4424 {"tx_packets"} 4425}; 4426#define CAS_NUM_STAT_KEYS (sizeof(ethtool_cassini_statnames)/ETH_GSTRING_LEN) 4427 4428static struct { 4429 const int offsets; /* neg. values for 2nd arg to cas_read_phy */ 4430} ethtool_register_table[] = { 4431 {-MII_BMSR}, 4432 {-MII_BMCR}, 4433 {REG_CAWR}, 4434 {REG_INF_BURST}, 4435 {REG_BIM_CFG}, 4436 {REG_RX_CFG}, 4437 {REG_HP_CFG}, 4438 {REG_MAC_TX_CFG}, 4439 {REG_MAC_RX_CFG}, 4440 {REG_MAC_CTRL_CFG}, 4441 {REG_MAC_XIF_CFG}, 4442 {REG_MIF_CFG}, 4443 {REG_PCS_CFG}, 4444 {REG_SATURN_PCFG}, 4445 {REG_PCS_MII_STATUS}, 4446 {REG_PCS_STATE_MACHINE}, 4447 {REG_MAC_COLL_EXCESS}, 4448 {REG_MAC_COLL_LATE} 4449}; 4450#define CAS_REG_LEN (sizeof(ethtool_register_table)/sizeof(int)) 4451#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN) 4452 4453static void cas_read_regs(struct cas *cp, u8 *ptr, int len) 4454{ 4455 u8 *p; 4456 int i; 4457 unsigned long flags; 4458 4459 spin_lock_irqsave(&cp->lock, flags); 4460 for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) { 4461 u16 hval; 4462 u32 val; 4463 if (ethtool_register_table[i].offsets < 0) { 4464 hval = cas_phy_read(cp, 4465 -ethtool_register_table[i].offsets); 4466 val = hval; 4467 } else { 4468 val= readl(cp->regs+ethtool_register_table[i].offsets); 4469 } 4470 memcpy(p, (u8 *)&val, sizeof(u32)); 4471 } 4472 spin_unlock_irqrestore(&cp->lock, flags); 4473} 4474 4475static struct net_device_stats *cas_get_stats(struct net_device *dev) 4476{ 4477 struct cas *cp = netdev_priv(dev); 4478 struct net_device_stats *stats = cp->net_stats; 4479 unsigned long flags; 4480 int i; 4481 unsigned long tmp; 4482 4483 /* we collate all of the stats into net_stats[N_TX_RING] */ 4484 if (!cp->hw_running) 4485 return stats + N_TX_RINGS; 4486 4487 /* collect outstanding stats */ 4488 /* WTZ: the Cassini spec gives these as 16 bit counters but 4489 * stored in 32-bit words. Added a mask of 0xffff to be safe, 4490 * in case the chip somehow puts any garbage in the other bits. 4491 * Also, counter usage didn't seem to mach what Adrian did 4492 * in the parts of the code that set these quantities. Made 4493 * that consistent. 4494 */ 4495 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags); 4496 stats[N_TX_RINGS].rx_crc_errors += 4497 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff; 4498 stats[N_TX_RINGS].rx_frame_errors += 4499 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff; 4500 stats[N_TX_RINGS].rx_length_errors += 4501 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff; 4502#if 1 4503 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) + 4504 (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff); 4505 stats[N_TX_RINGS].tx_aborted_errors += tmp; 4506 stats[N_TX_RINGS].collisions += 4507 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff); 4508#else 4509 stats[N_TX_RINGS].tx_aborted_errors += 4510 readl(cp->regs + REG_MAC_COLL_EXCESS); 4511 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) + 4512 readl(cp->regs + REG_MAC_COLL_LATE); 4513#endif 4514 cas_clear_mac_err(cp); 4515 4516 /* saved bits that are unique to ring 0 */ 4517 spin_lock(&cp->stat_lock[0]); 4518 stats[N_TX_RINGS].collisions += stats[0].collisions; 4519 stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors; 4520 stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors; 4521 stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors; 4522 stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors; 4523 stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors; 4524 spin_unlock(&cp->stat_lock[0]); 4525 4526 for (i = 0; i < N_TX_RINGS; i++) { 4527 spin_lock(&cp->stat_lock[i]); 4528 stats[N_TX_RINGS].rx_length_errors += 4529 stats[i].rx_length_errors; 4530 stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors; 4531 stats[N_TX_RINGS].rx_packets += stats[i].rx_packets; 4532 stats[N_TX_RINGS].tx_packets += stats[i].tx_packets; 4533 stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes; 4534 stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes; 4535 stats[N_TX_RINGS].rx_errors += stats[i].rx_errors; 4536 stats[N_TX_RINGS].tx_errors += stats[i].tx_errors; 4537 stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped; 4538 stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped; 4539 memset(stats + i, 0, sizeof(struct net_device_stats)); 4540 spin_unlock(&cp->stat_lock[i]); 4541 } 4542 spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags); 4543 return stats + N_TX_RINGS; 4544} 4545 4546 4547static void cas_set_multicast(struct net_device *dev) 4548{ 4549 struct cas *cp = netdev_priv(dev); 4550 u32 rxcfg, rxcfg_new; 4551 unsigned long flags; 4552 int limit = STOP_TRIES; 4553 4554 if (!cp->hw_running) 4555 return; 4556 4557 spin_lock_irqsave(&cp->lock, flags); 4558 rxcfg = readl(cp->regs + REG_MAC_RX_CFG); 4559 4560 /* disable RX MAC and wait for completion */ 4561 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); 4562 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) { 4563 if (!limit--) 4564 break; 4565 udelay(10); 4566 } 4567 4568 /* disable hash filter and wait for completion */ 4569 limit = STOP_TRIES; 4570 rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN); 4571 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); 4572 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) { 4573 if (!limit--) 4574 break; 4575 udelay(10); 4576 } 4577 4578 /* program hash filters */ 4579 cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp); 4580 rxcfg |= rxcfg_new; 4581 writel(rxcfg, cp->regs + REG_MAC_RX_CFG); 4582 spin_unlock_irqrestore(&cp->lock, flags); 4583} 4584 4585static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 4586{ 4587 struct cas *cp = netdev_priv(dev); 4588 strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN); 4589 strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN); 4590 info->fw_version[0] = '\0'; 4591 strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN); 4592 info->regdump_len = cp->casreg_len < CAS_MAX_REGS ? 4593 cp->casreg_len : CAS_MAX_REGS; 4594 info->n_stats = CAS_NUM_STAT_KEYS; 4595} 4596 4597static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 4598{ 4599 struct cas *cp = netdev_priv(dev); 4600 u16 bmcr; 4601 int full_duplex, speed, pause; 4602 unsigned long flags; 4603 enum link_state linkstate = link_up; 4604 4605 cmd->advertising = 0; 4606 cmd->supported = SUPPORTED_Autoneg; 4607 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { 4608 cmd->supported |= SUPPORTED_1000baseT_Full; 4609 cmd->advertising |= ADVERTISED_1000baseT_Full; 4610 } 4611 4612 /* Record PHY settings if HW is on. */ 4613 spin_lock_irqsave(&cp->lock, flags); 4614 bmcr = 0; 4615 linkstate = cp->lstate; 4616 if (CAS_PHY_MII(cp->phy_type)) { 4617 cmd->port = PORT_MII; 4618 cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ? 4619 XCVR_INTERNAL : XCVR_EXTERNAL; 4620 cmd->phy_address = cp->phy_addr; 4621 cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII | 4622 ADVERTISED_10baseT_Half | 4623 ADVERTISED_10baseT_Full | 4624 ADVERTISED_100baseT_Half | 4625 ADVERTISED_100baseT_Full; 4626 4627 cmd->supported |= 4628 (SUPPORTED_10baseT_Half | 4629 SUPPORTED_10baseT_Full | 4630 SUPPORTED_100baseT_Half | 4631 SUPPORTED_100baseT_Full | 4632 SUPPORTED_TP | SUPPORTED_MII); 4633 4634 if (cp->hw_running) { 4635 cas_mif_poll(cp, 0); 4636 bmcr = cas_phy_read(cp, MII_BMCR); 4637 cas_read_mii_link_mode(cp, &full_duplex, 4638 &speed, &pause); 4639 cas_mif_poll(cp, 1); 4640 } 4641 4642 } else { 4643 cmd->port = PORT_FIBRE; 4644 cmd->transceiver = XCVR_INTERNAL; 4645 cmd->phy_address = 0; 4646 cmd->supported |= SUPPORTED_FIBRE; 4647 cmd->advertising |= ADVERTISED_FIBRE; 4648 4649 if (cp->hw_running) { 4650 /* pcs uses the same bits as mii */ 4651 bmcr = readl(cp->regs + REG_PCS_MII_CTRL); 4652 cas_read_pcs_link_mode(cp, &full_duplex, 4653 &speed, &pause); 4654 } 4655 } 4656 spin_unlock_irqrestore(&cp->lock, flags); 4657 4658 if (bmcr & BMCR_ANENABLE) { 4659 cmd->advertising |= ADVERTISED_Autoneg; 4660 cmd->autoneg = AUTONEG_ENABLE; 4661 cmd->speed = ((speed == 10) ? 4662 SPEED_10 : 4663 ((speed == 1000) ? 4664 SPEED_1000 : SPEED_100)); 4665 cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 4666 } else { 4667 cmd->autoneg = AUTONEG_DISABLE; 4668 cmd->speed = 4669 (bmcr & CAS_BMCR_SPEED1000) ? 4670 SPEED_1000 : 4671 ((bmcr & BMCR_SPEED100) ? SPEED_100: 4672 SPEED_10); 4673 cmd->duplex = 4674 (bmcr & BMCR_FULLDPLX) ? 4675 DUPLEX_FULL : DUPLEX_HALF; 4676 } 4677 if (linkstate != link_up) { 4678 /* Force these to "unknown" if the link is not up and 4679 * autonogotiation in enabled. We can set the link 4680 * speed to 0, but not cmd->duplex, 4681 * because its legal values are 0 and 1. Ethtool will 4682 * print the value reported in parentheses after the 4683 * word "Unknown" for unrecognized values. 4684 * 4685 * If in forced mode, we report the speed and duplex 4686 * settings that we configured. 4687 */ 4688 if (cp->link_cntl & BMCR_ANENABLE) { 4689 cmd->speed = 0; 4690 cmd->duplex = 0xff; 4691 } else { 4692 cmd->speed = SPEED_10; 4693 if (cp->link_cntl & BMCR_SPEED100) { 4694 cmd->speed = SPEED_100; 4695 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) { 4696 cmd->speed = SPEED_1000; 4697 } 4698 cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)? 4699 DUPLEX_FULL : DUPLEX_HALF; 4700 } 4701 } 4702 return 0; 4703} 4704 4705static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 4706{ 4707 struct cas *cp = netdev_priv(dev); 4708 unsigned long flags; 4709 4710 /* Verify the settings we care about. */ 4711 if (cmd->autoneg != AUTONEG_ENABLE && 4712 cmd->autoneg != AUTONEG_DISABLE) 4713 return -EINVAL; 4714 4715 if (cmd->autoneg == AUTONEG_DISABLE && 4716 ((cmd->speed != SPEED_1000 && 4717 cmd->speed != SPEED_100 && 4718 cmd->speed != SPEED_10) || 4719 (cmd->duplex != DUPLEX_HALF && 4720 cmd->duplex != DUPLEX_FULL))) 4721 return -EINVAL; 4722 4723 /* Apply settings and restart link process. */ 4724 spin_lock_irqsave(&cp->lock, flags); 4725 cas_begin_auto_negotiation(cp, cmd); 4726 spin_unlock_irqrestore(&cp->lock, flags); 4727 return 0; 4728} 4729 4730static int cas_nway_reset(struct net_device *dev) 4731{ 4732 struct cas *cp = netdev_priv(dev); 4733 unsigned long flags; 4734 4735 if ((cp->link_cntl & BMCR_ANENABLE) == 0) 4736 return -EINVAL; 4737 4738 /* Restart link process. */ 4739 spin_lock_irqsave(&cp->lock, flags); 4740 cas_begin_auto_negotiation(cp, NULL); 4741 spin_unlock_irqrestore(&cp->lock, flags); 4742 4743 return 0; 4744} 4745 4746static u32 cas_get_link(struct net_device *dev) 4747{ 4748 struct cas *cp = netdev_priv(dev); 4749 return cp->lstate == link_up; 4750} 4751 4752static u32 cas_get_msglevel(struct net_device *dev) 4753{ 4754 struct cas *cp = netdev_priv(dev); 4755 return cp->msg_enable; 4756} 4757 4758static void cas_set_msglevel(struct net_device *dev, u32 value) 4759{ 4760 struct cas *cp = netdev_priv(dev); 4761 cp->msg_enable = value; 4762} 4763 4764static int cas_get_regs_len(struct net_device *dev) 4765{ 4766 struct cas *cp = netdev_priv(dev); 4767 return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS; 4768} 4769 4770static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs, 4771 void *p) 4772{ 4773 struct cas *cp = netdev_priv(dev); 4774 regs->version = 0; 4775 /* cas_read_regs handles locks (cp->lock). */ 4776 cas_read_regs(cp, p, regs->len / sizeof(u32)); 4777} 4778 4779static int cas_get_stats_count(struct net_device *dev) 4780{ 4781 return CAS_NUM_STAT_KEYS; 4782} 4783 4784static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data) 4785{ 4786 memcpy(data, &ethtool_cassini_statnames, 4787 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN); 4788} 4789 4790static void cas_get_ethtool_stats(struct net_device *dev, 4791 struct ethtool_stats *estats, u64 *data) 4792{ 4793 struct cas *cp = netdev_priv(dev); 4794 struct net_device_stats *stats = cas_get_stats(cp->dev); 4795 int i = 0; 4796 data[i++] = stats->collisions; 4797 data[i++] = stats->rx_bytes; 4798 data[i++] = stats->rx_crc_errors; 4799 data[i++] = stats->rx_dropped; 4800 data[i++] = stats->rx_errors; 4801 data[i++] = stats->rx_fifo_errors; 4802 data[i++] = stats->rx_frame_errors; 4803 data[i++] = stats->rx_length_errors; 4804 data[i++] = stats->rx_over_errors; 4805 data[i++] = stats->rx_packets; 4806 data[i++] = stats->tx_aborted_errors; 4807 data[i++] = stats->tx_bytes; 4808 data[i++] = stats->tx_dropped; 4809 data[i++] = stats->tx_errors; 4810 data[i++] = stats->tx_fifo_errors; 4811 data[i++] = stats->tx_packets; 4812 BUG_ON(i != CAS_NUM_STAT_KEYS); 4813} 4814 4815static const struct ethtool_ops cas_ethtool_ops = { 4816 .get_drvinfo = cas_get_drvinfo, 4817 .get_settings = cas_get_settings, 4818 .set_settings = cas_set_settings, 4819 .nway_reset = cas_nway_reset, 4820 .get_link = cas_get_link, 4821 .get_msglevel = cas_get_msglevel, 4822 .set_msglevel = cas_set_msglevel, 4823 .get_regs_len = cas_get_regs_len, 4824 .get_regs = cas_get_regs, 4825 .get_stats_count = cas_get_stats_count, 4826 .get_strings = cas_get_strings, 4827 .get_ethtool_stats = cas_get_ethtool_stats, 4828}; 4829 4830static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 4831{ 4832 struct cas *cp = netdev_priv(dev); 4833 struct mii_ioctl_data *data = if_mii(ifr); 4834 unsigned long flags; 4835 int rc = -EOPNOTSUPP; 4836 4837 /* Hold the PM mutex while doing ioctl's or we may collide 4838 * with open/close and power management and oops. 4839 */ 4840 mutex_lock(&cp->pm_mutex); 4841 switch (cmd) { 4842 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 4843 data->phy_id = cp->phy_addr; 4844 /* Fallthrough... */ 4845 4846 case SIOCGMIIREG: /* Read MII PHY register. */ 4847 spin_lock_irqsave(&cp->lock, flags); 4848 cas_mif_poll(cp, 0); 4849 data->val_out = cas_phy_read(cp, data->reg_num & 0x1f); 4850 cas_mif_poll(cp, 1); 4851 spin_unlock_irqrestore(&cp->lock, flags); 4852 rc = 0; 4853 break; 4854 4855 case SIOCSMIIREG: /* Write MII PHY register. */ 4856 if (!capable(CAP_NET_ADMIN)) { 4857 rc = -EPERM; 4858 break; 4859 } 4860 spin_lock_irqsave(&cp->lock, flags); 4861 cas_mif_poll(cp, 0); 4862 rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in); 4863 cas_mif_poll(cp, 1); 4864 spin_unlock_irqrestore(&cp->lock, flags); 4865 break; 4866 default: 4867 break; 4868 }; 4869 4870 mutex_unlock(&cp->pm_mutex); 4871 return rc; 4872} 4873 4874static int __devinit cas_init_one(struct pci_dev *pdev, 4875 const struct pci_device_id *ent) 4876{ 4877 static int cas_version_printed = 0; 4878 unsigned long casreg_len; 4879 struct net_device *dev; 4880 struct cas *cp; 4881 int i, err, pci_using_dac; 4882 u16 pci_cmd; 4883 u8 orig_cacheline_size = 0, cas_cacheline_size = 0; 4884 4885 if (cas_version_printed++ == 0) 4886 printk(KERN_INFO "%s", version); 4887 4888 err = pci_enable_device(pdev); 4889 if (err) { 4890 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n"); 4891 return err; 4892 } 4893 4894 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 4895 dev_err(&pdev->dev, "Cannot find proper PCI device " 4896 "base address, aborting.\n"); 4897 err = -ENODEV; 4898 goto err_out_disable_pdev; 4899 } 4900 4901 dev = alloc_etherdev(sizeof(*cp)); 4902 if (!dev) { 4903 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n"); 4904 err = -ENOMEM; 4905 goto err_out_disable_pdev; 4906 } 4907 SET_MODULE_OWNER(dev); 4908 SET_NETDEV_DEV(dev, &pdev->dev); 4909 4910 err = pci_request_regions(pdev, dev->name); 4911 if (err) { 4912 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n"); 4913 goto err_out_free_netdev; 4914 } 4915 pci_set_master(pdev); 4916 4917 /* we must always turn on parity response or else parity 4918 * doesn't get generated properly. disable SERR/PERR as well. 4919 * in addition, we want to turn MWI on. 4920 */ 4921 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 4922 pci_cmd &= ~PCI_COMMAND_SERR; 4923 pci_cmd |= PCI_COMMAND_PARITY; 4924 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); 4925 pci_set_mwi(pdev); 4926 /* 4927 * On some architectures, the default cache line size set 4928 * by pci_set_mwi reduces perforamnce. We have to increase 4929 * it for this case. To start, we'll print some configuration 4930 * data. 4931 */ 4932#if 1 4933 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, 4934 &orig_cacheline_size); 4935 if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) { 4936 cas_cacheline_size = 4937 (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ? 4938 CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES; 4939 if (pci_write_config_byte(pdev, 4940 PCI_CACHE_LINE_SIZE, 4941 cas_cacheline_size)) { 4942 dev_err(&pdev->dev, "Could not set PCI cache " 4943 "line size\n"); 4944 goto err_write_cacheline; 4945 } 4946 } 4947#endif 4948 4949 4950 /* Configure DMA attributes. */ 4951 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 4952 pci_using_dac = 1; 4953 err = pci_set_consistent_dma_mask(pdev, 4954 DMA_64BIT_MASK); 4955 if (err < 0) { 4956 dev_err(&pdev->dev, "Unable to obtain 64-bit DMA " 4957 "for consistent allocations\n"); 4958 goto err_out_free_res; 4959 } 4960 4961 } else { 4962 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 4963 if (err) { 4964 dev_err(&pdev->dev, "No usable DMA configuration, " 4965 "aborting.\n"); 4966 goto err_out_free_res; 4967 } 4968 pci_using_dac = 0; 4969 } 4970 4971 casreg_len = pci_resource_len(pdev, 0); 4972 4973 cp = netdev_priv(dev); 4974 cp->pdev = pdev; 4975#if 1 4976 /* A value of 0 indicates we never explicitly set it */ 4977 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0; 4978#endif 4979 cp->dev = dev; 4980 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : 4981 cassini_debug; 4982 4983 cp->link_transition = LINK_TRANSITION_UNKNOWN; 4984 cp->link_transition_jiffies_valid = 0; 4985 4986 spin_lock_init(&cp->lock); 4987 spin_lock_init(&cp->rx_inuse_lock); 4988 spin_lock_init(&cp->rx_spare_lock); 4989 for (i = 0; i < N_TX_RINGS; i++) { 4990 spin_lock_init(&cp->stat_lock[i]); 4991 spin_lock_init(&cp->tx_lock[i]); 4992 } 4993 spin_lock_init(&cp->stat_lock[N_TX_RINGS]); 4994 mutex_init(&cp->pm_mutex); 4995 4996 init_timer(&cp->link_timer); 4997 cp->link_timer.function = cas_link_timer; 4998 cp->link_timer.data = (unsigned long) cp; 4999 5000#if 1 5001 /* Just in case the implementation of atomic operations 5002 * change so that an explicit initialization is necessary. 5003 */ 5004 atomic_set(&cp->reset_task_pending, 0); 5005 atomic_set(&cp->reset_task_pending_all, 0); 5006 atomic_set(&cp->reset_task_pending_spare, 0); 5007 atomic_set(&cp->reset_task_pending_mtu, 0); 5008#endif 5009 INIT_WORK(&cp->reset_task, cas_reset_task, cp); 5010 5011 /* Default link parameters */ 5012 if (link_mode >= 0 && link_mode <= 6) 5013 cp->link_cntl = link_modes[link_mode]; 5014 else 5015 cp->link_cntl = BMCR_ANENABLE; 5016 cp->lstate = link_down; 5017 cp->link_transition = LINK_TRANSITION_LINK_DOWN; 5018 netif_carrier_off(cp->dev); 5019 cp->timer_ticks = 0; 5020 5021 /* give us access to cassini registers */ 5022 cp->regs = pci_iomap(pdev, 0, casreg_len); 5023 if (cp->regs == 0UL) { 5024 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n"); 5025 goto err_out_free_res; 5026 } 5027 cp->casreg_len = casreg_len; 5028 5029 pci_save_state(pdev); 5030 cas_check_pci_invariants(cp); 5031 cas_hard_reset(cp); 5032 cas_reset(cp, 0); 5033 if (cas_check_invariants(cp)) 5034 goto err_out_iounmap; 5035 5036 cp->init_block = (struct cas_init_block *) 5037 pci_alloc_consistent(pdev, sizeof(struct cas_init_block), 5038 &cp->block_dvma); 5039 if (!cp->init_block) { 5040 dev_err(&pdev->dev, "Cannot allocate init block, aborting.\n"); 5041 goto err_out_iounmap; 5042 } 5043 5044 for (i = 0; i < N_TX_RINGS; i++) 5045 cp->init_txds[i] = cp->init_block->txds[i]; 5046 5047 for (i = 0; i < N_RX_DESC_RINGS; i++) 5048 cp->init_rxds[i] = cp->init_block->rxds[i]; 5049 5050 for (i = 0; i < N_RX_COMP_RINGS; i++) 5051 cp->init_rxcs[i] = cp->init_block->rxcs[i]; 5052 5053 for (i = 0; i < N_RX_FLOWS; i++) 5054 skb_queue_head_init(&cp->rx_flows[i]); 5055 5056 dev->open = cas_open; 5057 dev->stop = cas_close; 5058 dev->hard_start_xmit = cas_start_xmit; 5059 dev->get_stats = cas_get_stats; 5060 dev->set_multicast_list = cas_set_multicast; 5061 dev->do_ioctl = cas_ioctl; 5062 dev->ethtool_ops = &cas_ethtool_ops; 5063 dev->tx_timeout = cas_tx_timeout; 5064 dev->watchdog_timeo = CAS_TX_TIMEOUT; 5065 dev->change_mtu = cas_change_mtu; 5066#ifdef USE_NAPI 5067 dev->poll = cas_poll; 5068 dev->weight = 64; 5069#endif 5070#ifdef CONFIG_NET_POLL_CONTROLLER 5071 dev->poll_controller = cas_netpoll; 5072#endif 5073 dev->irq = pdev->irq; 5074 dev->dma = 0; 5075 5076 /* Cassini features. */ 5077 if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0) 5078 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 5079 5080 if (pci_using_dac) 5081 dev->features |= NETIF_F_HIGHDMA; 5082 5083 if (register_netdev(dev)) { 5084 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 5085 goto err_out_free_consistent; 5086 } 5087 5088 i = readl(cp->regs + REG_BIM_CFG); 5089 printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) " 5090 "Ethernet[%d] ", dev->name, 5091 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "", 5092 (i & BIM_CFG_32BIT) ? "32" : "64", 5093 (i & BIM_CFG_66MHZ) ? "66" : "33", 5094 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq); 5095 5096 for (i = 0; i < 6; i++) 5097 printk("%2.2x%c", dev->dev_addr[i], 5098 i == 5 ? ' ' : ':'); 5099 printk("\n"); 5100 5101 pci_set_drvdata(pdev, dev); 5102 cp->hw_running = 1; 5103 cas_entropy_reset(cp); 5104 cas_phy_init(cp); 5105 cas_begin_auto_negotiation(cp, NULL); 5106 return 0; 5107 5108err_out_free_consistent: 5109 pci_free_consistent(pdev, sizeof(struct cas_init_block), 5110 cp->init_block, cp->block_dvma); 5111 5112err_out_iounmap: 5113 mutex_lock(&cp->pm_mutex); 5114 if (cp->hw_running) 5115 cas_shutdown(cp); 5116 mutex_unlock(&cp->pm_mutex); 5117 5118 pci_iounmap(pdev, cp->regs); 5119 5120 5121err_out_free_res: 5122 pci_release_regions(pdev); 5123 5124err_write_cacheline: 5125 /* Try to restore it in case the error occured after we 5126 * set it. 5127 */ 5128 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size); 5129 5130err_out_free_netdev: 5131 free_netdev(dev); 5132 5133err_out_disable_pdev: 5134 pci_disable_device(pdev); 5135 pci_set_drvdata(pdev, NULL); 5136 return -ENODEV; 5137} 5138 5139static void __devexit cas_remove_one(struct pci_dev *pdev) 5140{ 5141 struct net_device *dev = pci_get_drvdata(pdev); 5142 struct cas *cp; 5143 if (!dev) 5144 return; 5145 5146 cp = netdev_priv(dev); 5147 unregister_netdev(dev); 5148 5149 mutex_lock(&cp->pm_mutex); 5150 flush_scheduled_work(); 5151 if (cp->hw_running) 5152 cas_shutdown(cp); 5153 mutex_unlock(&cp->pm_mutex); 5154 5155#if 1 5156 if (cp->orig_cacheline_size) { 5157 /* Restore the cache line size if we had modified 5158 * it. 5159 */ 5160 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 5161 cp->orig_cacheline_size); 5162 } 5163#endif 5164 pci_free_consistent(pdev, sizeof(struct cas_init_block), 5165 cp->init_block, cp->block_dvma); 5166 pci_iounmap(pdev, cp->regs); 5167 free_netdev(dev); 5168 pci_release_regions(pdev); 5169 pci_disable_device(pdev); 5170 pci_set_drvdata(pdev, NULL); 5171} 5172 5173#ifdef CONFIG_PM 5174static int cas_suspend(struct pci_dev *pdev, pm_message_t state) 5175{ 5176 struct net_device *dev = pci_get_drvdata(pdev); 5177 struct cas *cp = netdev_priv(dev); 5178 unsigned long flags; 5179 5180 mutex_lock(&cp->pm_mutex); 5181 5182 /* If the driver is opened, we stop the DMA */ 5183 if (cp->opened) { 5184 netif_device_detach(dev); 5185 5186 cas_lock_all_save(cp, flags); 5187 5188 /* We can set the second arg of cas_reset to 0 5189 * because on resume, we'll call cas_init_hw with 5190 * its second arg set so that autonegotiation is 5191 * restarted. 5192 */ 5193 cas_reset(cp, 0); 5194 cas_clean_rings(cp); 5195 cas_unlock_all_restore(cp, flags); 5196 } 5197 5198 if (cp->hw_running) 5199 cas_shutdown(cp); 5200 mutex_unlock(&cp->pm_mutex); 5201 5202 return 0; 5203} 5204 5205static int cas_resume(struct pci_dev *pdev) 5206{ 5207 struct net_device *dev = pci_get_drvdata(pdev); 5208 struct cas *cp = netdev_priv(dev); 5209 5210 printk(KERN_INFO "%s: resuming\n", dev->name); 5211 5212 mutex_lock(&cp->pm_mutex); 5213 cas_hard_reset(cp); 5214 if (cp->opened) { 5215 unsigned long flags; 5216 cas_lock_all_save(cp, flags); 5217 cas_reset(cp, 0); 5218 cp->hw_running = 1; 5219 cas_clean_rings(cp); 5220 cas_init_hw(cp, 1); 5221 cas_unlock_all_restore(cp, flags); 5222 5223 netif_device_attach(dev); 5224 } 5225 mutex_unlock(&cp->pm_mutex); 5226 return 0; 5227} 5228#endif /* CONFIG_PM */ 5229 5230static struct pci_driver cas_driver = { 5231 .name = DRV_MODULE_NAME, 5232 .id_table = cas_pci_tbl, 5233 .probe = cas_init_one, 5234 .remove = __devexit_p(cas_remove_one), 5235#ifdef CONFIG_PM 5236 .suspend = cas_suspend, 5237 .resume = cas_resume 5238#endif 5239}; 5240 5241static int __init cas_init(void) 5242{ 5243 if (linkdown_timeout > 0) 5244 link_transition_timeout = linkdown_timeout * HZ; 5245 else 5246 link_transition_timeout = 0; 5247 5248 return pci_register_driver(&cas_driver); 5249} 5250 5251static void __exit cas_cleanup(void) 5252{ 5253 pci_unregister_driver(&cas_driver); 5254} 5255 5256module_init(cas_init); 5257module_exit(cas_cleanup);