Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.22-rc2 5254 lines 142 kB view raw
1/* cassini.c: Sun Microsystems Cassini(+) ethernet driver. 2 * 3 * Copyright (C) 2004 Sun Microsystems Inc. 4 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation; either version 2 of the 9 * License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 19 * 02111-1307, USA. 20 * 21 * This driver uses the sungem driver (c) David Miller 22 * (davem@redhat.com) as its basis. 23 * 24 * The cassini chip has a number of features that distinguish it from 25 * the gem chip: 26 * 4 transmit descriptor rings that are used for either QoS (VLAN) or 27 * load balancing (non-VLAN mode) 28 * batching of multiple packets 29 * multiple CPU dispatching 30 * page-based RX descriptor engine with separate completion rings 31 * Gigabit support (GMII and PCS interface) 32 * MIF link up/down detection works 33 * 34 * RX is handled by page sized buffers that are attached as fragments to 35 * the skb. here's what's done: 36 * -- driver allocates pages at a time and keeps reference counts 37 * on them. 38 * -- the upper protocol layers assume that the header is in the skb 39 * itself. as a result, cassini will copy a small amount (64 bytes) 40 * to make them happy. 41 * -- driver appends the rest of the data pages as frags to skbuffs 42 * and increments the reference count 43 * -- on page reclamation, the driver swaps the page with a spare page. 44 * if that page is still in use, it frees its reference to that page, 45 * and allocates a new page for use. otherwise, it just recycles the 46 * the page. 47 * 48 * NOTE: cassini can parse the header. however, it's not worth it 49 * as long as the network stack requires a header copy. 50 * 51 * TX has 4 queues. currently these queues are used in a round-robin 52 * fashion for load balancing. They can also be used for QoS. for that 53 * to work, however, QoS information needs to be exposed down to the driver 54 * level so that subqueues get targetted to particular transmit rings. 55 * alternatively, the queues can be configured via use of the all-purpose 56 * ioctl. 57 * 58 * RX DATA: the rx completion ring has all the info, but the rx desc 59 * ring has all of the data. RX can conceivably come in under multiple 60 * interrupts, but the INT# assignment needs to be set up properly by 61 * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do 62 * that. also, the two descriptor rings are designed to distinguish between 63 * encrypted and non-encrypted packets, but we use them for buffering 64 * instead. 65 * 66 * by default, the selective clear mask is set up to process rx packets. 67 */ 68 69 70#include <linux/module.h> 71#include <linux/kernel.h> 72#include <linux/types.h> 73#include <linux/compiler.h> 74#include <linux/slab.h> 75#include <linux/delay.h> 76#include <linux/init.h> 77#include <linux/ioport.h> 78#include <linux/pci.h> 79#include <linux/mm.h> 80#include <linux/highmem.h> 81#include <linux/list.h> 82#include <linux/dma-mapping.h> 83 84#include <linux/netdevice.h> 85#include <linux/etherdevice.h> 86#include <linux/skbuff.h> 87#include <linux/ethtool.h> 88#include <linux/crc32.h> 89#include <linux/random.h> 90#include <linux/mii.h> 91#include <linux/ip.h> 92#include <linux/tcp.h> 93#include <linux/mutex.h> 94 95#include <net/checksum.h> 96 97#include <asm/atomic.h> 98#include <asm/system.h> 99#include <asm/io.h> 100#include <asm/byteorder.h> 101#include <asm/uaccess.h> 102 103#define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ) 104#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ) 105#define CAS_NCPUS num_online_cpus() 106 107#if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL) 108#define USE_NAPI 109#define cas_skb_release(x) netif_receive_skb(x) 110#else 111#define cas_skb_release(x) netif_rx(x) 112#endif 113 114/* select which firmware to use */ 115#define USE_HP_WORKAROUND 116#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */ 117#define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */ 118 119#include "cassini.h" 120 121#define USE_TX_COMPWB /* use completion writeback registers */ 122#define USE_CSMA_CD_PROTO /* standard CSMA/CD */ 123#define USE_RX_BLANK /* hw interrupt mitigation */ 124#undef USE_ENTROPY_DEV /* don't test for entropy device */ 125 126/* NOTE: these aren't useable unless PCI interrupts can be assigned. 127 * also, we need to make cp->lock finer-grained. 128 */ 129#undef USE_PCI_INTB 130#undef USE_PCI_INTC 131#undef USE_PCI_INTD 132#undef USE_QOS 133 134#undef USE_VPD_DEBUG /* debug vpd information if defined */ 135 136/* rx processing options */ 137#define USE_PAGE_ORDER /* specify to allocate large rx pages */ 138#define RX_DONT_BATCH 0 /* if 1, don't batch flows */ 139#define RX_COPY_ALWAYS 0 /* if 0, use frags */ 140#define RX_COPY_MIN 64 /* copy a little to make upper layers happy */ 141#undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */ 142 143#define DRV_MODULE_NAME "cassini" 144#define PFX DRV_MODULE_NAME ": " 145#define DRV_MODULE_VERSION "1.4" 146#define DRV_MODULE_RELDATE "1 July 2004" 147 148#define CAS_DEF_MSG_ENABLE \ 149 (NETIF_MSG_DRV | \ 150 NETIF_MSG_PROBE | \ 151 NETIF_MSG_LINK | \ 152 NETIF_MSG_TIMER | \ 153 NETIF_MSG_IFDOWN | \ 154 NETIF_MSG_IFUP | \ 155 NETIF_MSG_RX_ERR | \ 156 NETIF_MSG_TX_ERR) 157 158/* length of time before we decide the hardware is borked, 159 * and dev->tx_timeout() should be called to fix the problem 160 */ 161#define CAS_TX_TIMEOUT (HZ) 162#define CAS_LINK_TIMEOUT (22*HZ/10) 163#define CAS_LINK_FAST_TIMEOUT (1) 164 165/* timeout values for state changing. these specify the number 166 * of 10us delays to be used before giving up. 167 */ 168#define STOP_TRIES_PHY 1000 169#define STOP_TRIES 5000 170 171/* specify a minimum frame size to deal with some fifo issues 172 * max mtu == 2 * page size - ethernet header - 64 - swivel = 173 * 2 * page_size - 0x50 174 */ 175#define CAS_MIN_FRAME 97 176#define CAS_1000MB_MIN_FRAME 255 177#define CAS_MIN_MTU 60 178#define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000) 179 180#if 1 181/* 182 * Eliminate these and use separate atomic counters for each, to 183 * avoid a race condition. 184 */ 185#else 186#define CAS_RESET_MTU 1 187#define CAS_RESET_ALL 2 188#define CAS_RESET_SPARE 3 189#endif 190 191static char version[] __devinitdata = 192 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 193 194static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */ 195static int link_mode; 196 197MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)"); 198MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver"); 199MODULE_LICENSE("GPL"); 200module_param(cassini_debug, int, 0); 201MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value"); 202module_param(link_mode, int, 0); 203MODULE_PARM_DESC(link_mode, "default link mode"); 204 205/* 206 * Work around for a PCS bug in which the link goes down due to the chip 207 * being confused and never showing a link status of "up." 208 */ 209#define DEFAULT_LINKDOWN_TIMEOUT 5 210/* 211 * Value in seconds, for user input. 212 */ 213static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT; 214module_param(linkdown_timeout, int, 0); 215MODULE_PARM_DESC(linkdown_timeout, 216"min reset interval in sec. for PCS linkdown issue; disabled if not positive"); 217 218/* 219 * value in 'ticks' (units used by jiffies). Set when we init the 220 * module because 'HZ' in actually a function call on some flavors of 221 * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ. 222 */ 223static int link_transition_timeout; 224 225 226 227static u16 link_modes[] __devinitdata = { 228 BMCR_ANENABLE, /* 0 : autoneg */ 229 0, /* 1 : 10bt half duplex */ 230 BMCR_SPEED100, /* 2 : 100bt half duplex */ 231 BMCR_FULLDPLX, /* 3 : 10bt full duplex */ 232 BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */ 233 CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */ 234}; 235 236static struct pci_device_id cas_pci_tbl[] __devinitdata = { 237 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI, 238 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 239 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN, 240 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 241 { 0, } 242}; 243 244MODULE_DEVICE_TABLE(pci, cas_pci_tbl); 245 246static void cas_set_link_modes(struct cas *cp); 247 248static inline void cas_lock_tx(struct cas *cp) 249{ 250 int i; 251 252 for (i = 0; i < N_TX_RINGS; i++) 253 spin_lock(&cp->tx_lock[i]); 254} 255 256static inline void cas_lock_all(struct cas *cp) 257{ 258 spin_lock_irq(&cp->lock); 259 cas_lock_tx(cp); 260} 261 262/* WTZ: QA was finding deadlock problems with the previous 263 * versions after long test runs with multiple cards per machine. 264 * See if replacing cas_lock_all with safer versions helps. The 265 * symptoms QA is reporting match those we'd expect if interrupts 266 * aren't being properly restored, and we fixed a previous deadlock 267 * with similar symptoms by using save/restore versions in other 268 * places. 269 */ 270#define cas_lock_all_save(cp, flags) \ 271do { \ 272 struct cas *xxxcp = (cp); \ 273 spin_lock_irqsave(&xxxcp->lock, flags); \ 274 cas_lock_tx(xxxcp); \ 275} while (0) 276 277static inline void cas_unlock_tx(struct cas *cp) 278{ 279 int i; 280 281 for (i = N_TX_RINGS; i > 0; i--) 282 spin_unlock(&cp->tx_lock[i - 1]); 283} 284 285static inline void cas_unlock_all(struct cas *cp) 286{ 287 cas_unlock_tx(cp); 288 spin_unlock_irq(&cp->lock); 289} 290 291#define cas_unlock_all_restore(cp, flags) \ 292do { \ 293 struct cas *xxxcp = (cp); \ 294 cas_unlock_tx(xxxcp); \ 295 spin_unlock_irqrestore(&xxxcp->lock, flags); \ 296} while (0) 297 298static void cas_disable_irq(struct cas *cp, const int ring) 299{ 300 /* Make sure we won't get any more interrupts */ 301 if (ring == 0) { 302 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK); 303 return; 304 } 305 306 /* disable completion interrupts and selectively mask */ 307 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 308 switch (ring) { 309#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD) 310#ifdef USE_PCI_INTB 311 case 1: 312#endif 313#ifdef USE_PCI_INTC 314 case 2: 315#endif 316#ifdef USE_PCI_INTD 317 case 3: 318#endif 319 writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN, 320 cp->regs + REG_PLUS_INTRN_MASK(ring)); 321 break; 322#endif 323 default: 324 writel(INTRN_MASK_CLEAR_ALL, cp->regs + 325 REG_PLUS_INTRN_MASK(ring)); 326 break; 327 } 328 } 329} 330 331static inline void cas_mask_intr(struct cas *cp) 332{ 333 int i; 334 335 for (i = 0; i < N_RX_COMP_RINGS; i++) 336 cas_disable_irq(cp, i); 337} 338 339static inline void cas_buffer_init(cas_page_t *cp) 340{ 341 struct page *page = cp->buffer; 342 atomic_set((atomic_t *)&page->lru.next, 1); 343} 344 345static inline int cas_buffer_count(cas_page_t *cp) 346{ 347 struct page *page = cp->buffer; 348 return atomic_read((atomic_t *)&page->lru.next); 349} 350 351static inline void cas_buffer_inc(cas_page_t *cp) 352{ 353 struct page *page = cp->buffer; 354 atomic_inc((atomic_t *)&page->lru.next); 355} 356 357static inline void cas_buffer_dec(cas_page_t *cp) 358{ 359 struct page *page = cp->buffer; 360 atomic_dec((atomic_t *)&page->lru.next); 361} 362 363static void cas_enable_irq(struct cas *cp, const int ring) 364{ 365 if (ring == 0) { /* all but TX_DONE */ 366 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK); 367 return; 368 } 369 370 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 371 switch (ring) { 372#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD) 373#ifdef USE_PCI_INTB 374 case 1: 375#endif 376#ifdef USE_PCI_INTC 377 case 2: 378#endif 379#ifdef USE_PCI_INTD 380 case 3: 381#endif 382 writel(INTRN_MASK_RX_EN, cp->regs + 383 REG_PLUS_INTRN_MASK(ring)); 384 break; 385#endif 386 default: 387 break; 388 } 389 } 390} 391 392static inline void cas_unmask_intr(struct cas *cp) 393{ 394 int i; 395 396 for (i = 0; i < N_RX_COMP_RINGS; i++) 397 cas_enable_irq(cp, i); 398} 399 400static inline void cas_entropy_gather(struct cas *cp) 401{ 402#ifdef USE_ENTROPY_DEV 403 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) 404 return; 405 406 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV), 407 readl(cp->regs + REG_ENTROPY_IV), 408 sizeof(uint64_t)*8); 409#endif 410} 411 412static inline void cas_entropy_reset(struct cas *cp) 413{ 414#ifdef USE_ENTROPY_DEV 415 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) 416 return; 417 418 writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT, 419 cp->regs + REG_BIM_LOCAL_DEV_EN); 420 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET); 421 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG); 422 423 /* if we read back 0x0, we don't have an entropy device */ 424 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0) 425 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV; 426#endif 427} 428 429/* access to the phy. the following assumes that we've initialized the MIF to 430 * be in frame rather than bit-bang mode 431 */ 432static u16 cas_phy_read(struct cas *cp, int reg) 433{ 434 u32 cmd; 435 int limit = STOP_TRIES_PHY; 436 437 cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ; 438 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); 439 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); 440 cmd |= MIF_FRAME_TURN_AROUND_MSB; 441 writel(cmd, cp->regs + REG_MIF_FRAME); 442 443 /* poll for completion */ 444 while (limit-- > 0) { 445 udelay(10); 446 cmd = readl(cp->regs + REG_MIF_FRAME); 447 if (cmd & MIF_FRAME_TURN_AROUND_LSB) 448 return (cmd & MIF_FRAME_DATA_MASK); 449 } 450 return 0xFFFF; /* -1 */ 451} 452 453static int cas_phy_write(struct cas *cp, int reg, u16 val) 454{ 455 int limit = STOP_TRIES_PHY; 456 u32 cmd; 457 458 cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE; 459 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); 460 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); 461 cmd |= MIF_FRAME_TURN_AROUND_MSB; 462 cmd |= val & MIF_FRAME_DATA_MASK; 463 writel(cmd, cp->regs + REG_MIF_FRAME); 464 465 /* poll for completion */ 466 while (limit-- > 0) { 467 udelay(10); 468 cmd = readl(cp->regs + REG_MIF_FRAME); 469 if (cmd & MIF_FRAME_TURN_AROUND_LSB) 470 return 0; 471 } 472 return -1; 473} 474 475static void cas_phy_powerup(struct cas *cp) 476{ 477 u16 ctl = cas_phy_read(cp, MII_BMCR); 478 479 if ((ctl & BMCR_PDOWN) == 0) 480 return; 481 ctl &= ~BMCR_PDOWN; 482 cas_phy_write(cp, MII_BMCR, ctl); 483} 484 485static void cas_phy_powerdown(struct cas *cp) 486{ 487 u16 ctl = cas_phy_read(cp, MII_BMCR); 488 489 if (ctl & BMCR_PDOWN) 490 return; 491 ctl |= BMCR_PDOWN; 492 cas_phy_write(cp, MII_BMCR, ctl); 493} 494 495/* cp->lock held. note: the last put_page will free the buffer */ 496static int cas_page_free(struct cas *cp, cas_page_t *page) 497{ 498 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, 499 PCI_DMA_FROMDEVICE); 500 cas_buffer_dec(page); 501 __free_pages(page->buffer, cp->page_order); 502 kfree(page); 503 return 0; 504} 505 506#ifdef RX_COUNT_BUFFERS 507#define RX_USED_ADD(x, y) ((x)->used += (y)) 508#define RX_USED_SET(x, y) ((x)->used = (y)) 509#else 510#define RX_USED_ADD(x, y) 511#define RX_USED_SET(x, y) 512#endif 513 514/* local page allocation routines for the receive buffers. jumbo pages 515 * require at least 8K contiguous and 8K aligned buffers. 516 */ 517static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags) 518{ 519 cas_page_t *page; 520 521 page = kmalloc(sizeof(cas_page_t), flags); 522 if (!page) 523 return NULL; 524 525 INIT_LIST_HEAD(&page->list); 526 RX_USED_SET(page, 0); 527 page->buffer = alloc_pages(flags, cp->page_order); 528 if (!page->buffer) 529 goto page_err; 530 cas_buffer_init(page); 531 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0, 532 cp->page_size, PCI_DMA_FROMDEVICE); 533 return page; 534 535page_err: 536 kfree(page); 537 return NULL; 538} 539 540/* initialize spare pool of rx buffers, but allocate during the open */ 541static void cas_spare_init(struct cas *cp) 542{ 543 spin_lock(&cp->rx_inuse_lock); 544 INIT_LIST_HEAD(&cp->rx_inuse_list); 545 spin_unlock(&cp->rx_inuse_lock); 546 547 spin_lock(&cp->rx_spare_lock); 548 INIT_LIST_HEAD(&cp->rx_spare_list); 549 cp->rx_spares_needed = RX_SPARE_COUNT; 550 spin_unlock(&cp->rx_spare_lock); 551} 552 553/* used on close. free all the spare buffers. */ 554static void cas_spare_free(struct cas *cp) 555{ 556 struct list_head list, *elem, *tmp; 557 558 /* free spare buffers */ 559 INIT_LIST_HEAD(&list); 560 spin_lock(&cp->rx_spare_lock); 561 list_splice(&cp->rx_spare_list, &list); 562 INIT_LIST_HEAD(&cp->rx_spare_list); 563 spin_unlock(&cp->rx_spare_lock); 564 list_for_each_safe(elem, tmp, &list) { 565 cas_page_free(cp, list_entry(elem, cas_page_t, list)); 566 } 567 568 INIT_LIST_HEAD(&list); 569#if 1 570 /* 571 * Looks like Adrian had protected this with a different 572 * lock than used everywhere else to manipulate this list. 573 */ 574 spin_lock(&cp->rx_inuse_lock); 575 list_splice(&cp->rx_inuse_list, &list); 576 INIT_LIST_HEAD(&cp->rx_inuse_list); 577 spin_unlock(&cp->rx_inuse_lock); 578#else 579 spin_lock(&cp->rx_spare_lock); 580 list_splice(&cp->rx_inuse_list, &list); 581 INIT_LIST_HEAD(&cp->rx_inuse_list); 582 spin_unlock(&cp->rx_spare_lock); 583#endif 584 list_for_each_safe(elem, tmp, &list) { 585 cas_page_free(cp, list_entry(elem, cas_page_t, list)); 586 } 587} 588 589/* replenish spares if needed */ 590static void cas_spare_recover(struct cas *cp, const gfp_t flags) 591{ 592 struct list_head list, *elem, *tmp; 593 int needed, i; 594 595 /* check inuse list. if we don't need any more free buffers, 596 * just free it 597 */ 598 599 /* make a local copy of the list */ 600 INIT_LIST_HEAD(&list); 601 spin_lock(&cp->rx_inuse_lock); 602 list_splice(&cp->rx_inuse_list, &list); 603 INIT_LIST_HEAD(&cp->rx_inuse_list); 604 spin_unlock(&cp->rx_inuse_lock); 605 606 list_for_each_safe(elem, tmp, &list) { 607 cas_page_t *page = list_entry(elem, cas_page_t, list); 608 609 if (cas_buffer_count(page) > 1) 610 continue; 611 612 list_del(elem); 613 spin_lock(&cp->rx_spare_lock); 614 if (cp->rx_spares_needed > 0) { 615 list_add(elem, &cp->rx_spare_list); 616 cp->rx_spares_needed--; 617 spin_unlock(&cp->rx_spare_lock); 618 } else { 619 spin_unlock(&cp->rx_spare_lock); 620 cas_page_free(cp, page); 621 } 622 } 623 624 /* put any inuse buffers back on the list */ 625 if (!list_empty(&list)) { 626 spin_lock(&cp->rx_inuse_lock); 627 list_splice(&list, &cp->rx_inuse_list); 628 spin_unlock(&cp->rx_inuse_lock); 629 } 630 631 spin_lock(&cp->rx_spare_lock); 632 needed = cp->rx_spares_needed; 633 spin_unlock(&cp->rx_spare_lock); 634 if (!needed) 635 return; 636 637 /* we still need spares, so try to allocate some */ 638 INIT_LIST_HEAD(&list); 639 i = 0; 640 while (i < needed) { 641 cas_page_t *spare = cas_page_alloc(cp, flags); 642 if (!spare) 643 break; 644 list_add(&spare->list, &list); 645 i++; 646 } 647 648 spin_lock(&cp->rx_spare_lock); 649 list_splice(&list, &cp->rx_spare_list); 650 cp->rx_spares_needed -= i; 651 spin_unlock(&cp->rx_spare_lock); 652} 653 654/* pull a page from the list. */ 655static cas_page_t *cas_page_dequeue(struct cas *cp) 656{ 657 struct list_head *entry; 658 int recover; 659 660 spin_lock(&cp->rx_spare_lock); 661 if (list_empty(&cp->rx_spare_list)) { 662 /* try to do a quick recovery */ 663 spin_unlock(&cp->rx_spare_lock); 664 cas_spare_recover(cp, GFP_ATOMIC); 665 spin_lock(&cp->rx_spare_lock); 666 if (list_empty(&cp->rx_spare_list)) { 667 if (netif_msg_rx_err(cp)) 668 printk(KERN_ERR "%s: no spare buffers " 669 "available.\n", cp->dev->name); 670 spin_unlock(&cp->rx_spare_lock); 671 return NULL; 672 } 673 } 674 675 entry = cp->rx_spare_list.next; 676 list_del(entry); 677 recover = ++cp->rx_spares_needed; 678 spin_unlock(&cp->rx_spare_lock); 679 680 /* trigger the timer to do the recovery */ 681 if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) { 682#if 1 683 atomic_inc(&cp->reset_task_pending); 684 atomic_inc(&cp->reset_task_pending_spare); 685 schedule_work(&cp->reset_task); 686#else 687 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE); 688 schedule_work(&cp->reset_task); 689#endif 690 } 691 return list_entry(entry, cas_page_t, list); 692} 693 694 695static void cas_mif_poll(struct cas *cp, const int enable) 696{ 697 u32 cfg; 698 699 cfg = readl(cp->regs + REG_MIF_CFG); 700 cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1); 701 702 if (cp->phy_type & CAS_PHY_MII_MDIO1) 703 cfg |= MIF_CFG_PHY_SELECT; 704 705 /* poll and interrupt on link status change. */ 706 if (enable) { 707 cfg |= MIF_CFG_POLL_EN; 708 cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR); 709 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr); 710 } 711 writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF, 712 cp->regs + REG_MIF_MASK); 713 writel(cfg, cp->regs + REG_MIF_CFG); 714} 715 716/* Must be invoked under cp->lock */ 717static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep) 718{ 719 u16 ctl; 720#if 1 721 int lcntl; 722 int changed = 0; 723 int oldstate = cp->lstate; 724 int link_was_not_down = !(oldstate == link_down); 725#endif 726 /* Setup link parameters */ 727 if (!ep) 728 goto start_aneg; 729 lcntl = cp->link_cntl; 730 if (ep->autoneg == AUTONEG_ENABLE) 731 cp->link_cntl = BMCR_ANENABLE; 732 else { 733 cp->link_cntl = 0; 734 if (ep->speed == SPEED_100) 735 cp->link_cntl |= BMCR_SPEED100; 736 else if (ep->speed == SPEED_1000) 737 cp->link_cntl |= CAS_BMCR_SPEED1000; 738 if (ep->duplex == DUPLEX_FULL) 739 cp->link_cntl |= BMCR_FULLDPLX; 740 } 741#if 1 742 changed = (lcntl != cp->link_cntl); 743#endif 744start_aneg: 745 if (cp->lstate == link_up) { 746 printk(KERN_INFO "%s: PCS link down.\n", 747 cp->dev->name); 748 } else { 749 if (changed) { 750 printk(KERN_INFO "%s: link configuration changed\n", 751 cp->dev->name); 752 } 753 } 754 cp->lstate = link_down; 755 cp->link_transition = LINK_TRANSITION_LINK_DOWN; 756 if (!cp->hw_running) 757 return; 758#if 1 759 /* 760 * WTZ: If the old state was link_up, we turn off the carrier 761 * to replicate everything we do elsewhere on a link-down 762 * event when we were already in a link-up state.. 763 */ 764 if (oldstate == link_up) 765 netif_carrier_off(cp->dev); 766 if (changed && link_was_not_down) { 767 /* 768 * WTZ: This branch will simply schedule a full reset after 769 * we explicitly changed link modes in an ioctl. See if this 770 * fixes the link-problems we were having for forced mode. 771 */ 772 atomic_inc(&cp->reset_task_pending); 773 atomic_inc(&cp->reset_task_pending_all); 774 schedule_work(&cp->reset_task); 775 cp->timer_ticks = 0; 776 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); 777 return; 778 } 779#endif 780 if (cp->phy_type & CAS_PHY_SERDES) { 781 u32 val = readl(cp->regs + REG_PCS_MII_CTRL); 782 783 if (cp->link_cntl & BMCR_ANENABLE) { 784 val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN); 785 cp->lstate = link_aneg; 786 } else { 787 if (cp->link_cntl & BMCR_FULLDPLX) 788 val |= PCS_MII_CTRL_DUPLEX; 789 val &= ~PCS_MII_AUTONEG_EN; 790 cp->lstate = link_force_ok; 791 } 792 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; 793 writel(val, cp->regs + REG_PCS_MII_CTRL); 794 795 } else { 796 cas_mif_poll(cp, 0); 797 ctl = cas_phy_read(cp, MII_BMCR); 798 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | 799 CAS_BMCR_SPEED1000 | BMCR_ANENABLE); 800 ctl |= cp->link_cntl; 801 if (ctl & BMCR_ANENABLE) { 802 ctl |= BMCR_ANRESTART; 803 cp->lstate = link_aneg; 804 } else { 805 cp->lstate = link_force_ok; 806 } 807 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; 808 cas_phy_write(cp, MII_BMCR, ctl); 809 cas_mif_poll(cp, 1); 810 } 811 812 cp->timer_ticks = 0; 813 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); 814} 815 816/* Must be invoked under cp->lock. */ 817static int cas_reset_mii_phy(struct cas *cp) 818{ 819 int limit = STOP_TRIES_PHY; 820 u16 val; 821 822 cas_phy_write(cp, MII_BMCR, BMCR_RESET); 823 udelay(100); 824 while (limit--) { 825 val = cas_phy_read(cp, MII_BMCR); 826 if ((val & BMCR_RESET) == 0) 827 break; 828 udelay(10); 829 } 830 return (limit <= 0); 831} 832 833static void cas_saturn_firmware_load(struct cas *cp) 834{ 835 cas_saturn_patch_t *patch = cas_saturn_patch; 836 837 cas_phy_powerdown(cp); 838 839 /* expanded memory access mode */ 840 cas_phy_write(cp, DP83065_MII_MEM, 0x0); 841 842 /* pointer configuration for new firmware */ 843 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9); 844 cas_phy_write(cp, DP83065_MII_REGD, 0xbd); 845 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa); 846 cas_phy_write(cp, DP83065_MII_REGD, 0x82); 847 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb); 848 cas_phy_write(cp, DP83065_MII_REGD, 0x0); 849 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc); 850 cas_phy_write(cp, DP83065_MII_REGD, 0x39); 851 852 /* download new firmware */ 853 cas_phy_write(cp, DP83065_MII_MEM, 0x1); 854 cas_phy_write(cp, DP83065_MII_REGE, patch->addr); 855 while (patch->addr) { 856 cas_phy_write(cp, DP83065_MII_REGD, patch->val); 857 patch++; 858 } 859 860 /* enable firmware */ 861 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8); 862 cas_phy_write(cp, DP83065_MII_REGD, 0x1); 863} 864 865 866/* phy initialization */ 867static void cas_phy_init(struct cas *cp) 868{ 869 u16 val; 870 871 /* if we're in MII/GMII mode, set up phy */ 872 if (CAS_PHY_MII(cp->phy_type)) { 873 writel(PCS_DATAPATH_MODE_MII, 874 cp->regs + REG_PCS_DATAPATH_MODE); 875 876 cas_mif_poll(cp, 0); 877 cas_reset_mii_phy(cp); /* take out of isolate mode */ 878 879 if (PHY_LUCENT_B0 == cp->phy_id) { 880 /* workaround link up/down issue with lucent */ 881 cas_phy_write(cp, LUCENT_MII_REG, 0x8000); 882 cas_phy_write(cp, MII_BMCR, 0x00f1); 883 cas_phy_write(cp, LUCENT_MII_REG, 0x0); 884 885 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) { 886 /* workarounds for broadcom phy */ 887 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20); 888 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012); 889 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804); 890 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013); 891 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204); 892 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); 893 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132); 894 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); 895 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232); 896 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F); 897 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20); 898 899 } else if (PHY_BROADCOM_5411 == cp->phy_id) { 900 val = cas_phy_read(cp, BROADCOM_MII_REG4); 901 val = cas_phy_read(cp, BROADCOM_MII_REG4); 902 if (val & 0x0080) { 903 /* link workaround */ 904 cas_phy_write(cp, BROADCOM_MII_REG4, 905 val & ~0x0080); 906 } 907 908 } else if (cp->cas_flags & CAS_FLAG_SATURN) { 909 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ? 910 SATURN_PCFG_FSI : 0x0, 911 cp->regs + REG_SATURN_PCFG); 912 913 /* load firmware to address 10Mbps auto-negotiation 914 * issue. NOTE: this will need to be changed if the 915 * default firmware gets fixed. 916 */ 917 if (PHY_NS_DP83065 == cp->phy_id) { 918 cas_saturn_firmware_load(cp); 919 } 920 cas_phy_powerup(cp); 921 } 922 923 /* advertise capabilities */ 924 val = cas_phy_read(cp, MII_BMCR); 925 val &= ~BMCR_ANENABLE; 926 cas_phy_write(cp, MII_BMCR, val); 927 udelay(10); 928 929 cas_phy_write(cp, MII_ADVERTISE, 930 cas_phy_read(cp, MII_ADVERTISE) | 931 (ADVERTISE_10HALF | ADVERTISE_10FULL | 932 ADVERTISE_100HALF | ADVERTISE_100FULL | 933 CAS_ADVERTISE_PAUSE | 934 CAS_ADVERTISE_ASYM_PAUSE)); 935 936 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { 937 /* make sure that we don't advertise half 938 * duplex to avoid a chip issue 939 */ 940 val = cas_phy_read(cp, CAS_MII_1000_CTRL); 941 val &= ~CAS_ADVERTISE_1000HALF; 942 val |= CAS_ADVERTISE_1000FULL; 943 cas_phy_write(cp, CAS_MII_1000_CTRL, val); 944 } 945 946 } else { 947 /* reset pcs for serdes */ 948 u32 val; 949 int limit; 950 951 writel(PCS_DATAPATH_MODE_SERDES, 952 cp->regs + REG_PCS_DATAPATH_MODE); 953 954 /* enable serdes pins on saturn */ 955 if (cp->cas_flags & CAS_FLAG_SATURN) 956 writel(0, cp->regs + REG_SATURN_PCFG); 957 958 /* Reset PCS unit. */ 959 val = readl(cp->regs + REG_PCS_MII_CTRL); 960 val |= PCS_MII_RESET; 961 writel(val, cp->regs + REG_PCS_MII_CTRL); 962 963 limit = STOP_TRIES; 964 while (limit-- > 0) { 965 udelay(10); 966 if ((readl(cp->regs + REG_PCS_MII_CTRL) & 967 PCS_MII_RESET) == 0) 968 break; 969 } 970 if (limit <= 0) 971 printk(KERN_WARNING "%s: PCS reset bit would not " 972 "clear [%08x].\n", cp->dev->name, 973 readl(cp->regs + REG_PCS_STATE_MACHINE)); 974 975 /* Make sure PCS is disabled while changing advertisement 976 * configuration. 977 */ 978 writel(0x0, cp->regs + REG_PCS_CFG); 979 980 /* Advertise all capabilities except half-duplex. */ 981 val = readl(cp->regs + REG_PCS_MII_ADVERT); 982 val &= ~PCS_MII_ADVERT_HD; 983 val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE | 984 PCS_MII_ADVERT_ASYM_PAUSE); 985 writel(val, cp->regs + REG_PCS_MII_ADVERT); 986 987 /* enable PCS */ 988 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG); 989 990 /* pcs workaround: enable sync detect */ 991 writel(PCS_SERDES_CTRL_SYNCD_EN, 992 cp->regs + REG_PCS_SERDES_CTRL); 993 } 994} 995 996 997static int cas_pcs_link_check(struct cas *cp) 998{ 999 u32 stat, state_machine; 1000 int retval = 0; 1001 1002 /* The link status bit latches on zero, so you must 1003 * read it twice in such a case to see a transition 1004 * to the link being up. 1005 */ 1006 stat = readl(cp->regs + REG_PCS_MII_STATUS); 1007 if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0) 1008 stat = readl(cp->regs + REG_PCS_MII_STATUS); 1009 1010 /* The remote-fault indication is only valid 1011 * when autoneg has completed. 1012 */ 1013 if ((stat & (PCS_MII_STATUS_AUTONEG_COMP | 1014 PCS_MII_STATUS_REMOTE_FAULT)) == 1015 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) { 1016 if (netif_msg_link(cp)) 1017 printk(KERN_INFO "%s: PCS RemoteFault\n", 1018 cp->dev->name); 1019 } 1020 1021 /* work around link detection issue by querying the PCS state 1022 * machine directly. 1023 */ 1024 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE); 1025 if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) { 1026 stat &= ~PCS_MII_STATUS_LINK_STATUS; 1027 } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) { 1028 stat |= PCS_MII_STATUS_LINK_STATUS; 1029 } 1030 1031 if (stat & PCS_MII_STATUS_LINK_STATUS) { 1032 if (cp->lstate != link_up) { 1033 if (cp->opened) { 1034 cp->lstate = link_up; 1035 cp->link_transition = LINK_TRANSITION_LINK_UP; 1036 1037 cas_set_link_modes(cp); 1038 netif_carrier_on(cp->dev); 1039 } 1040 } 1041 } else if (cp->lstate == link_up) { 1042 cp->lstate = link_down; 1043 if (link_transition_timeout != 0 && 1044 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && 1045 !cp->link_transition_jiffies_valid) { 1046 /* 1047 * force a reset, as a workaround for the 1048 * link-failure problem. May want to move this to a 1049 * point a bit earlier in the sequence. If we had 1050 * generated a reset a short time ago, we'll wait for 1051 * the link timer to check the status until a 1052 * timer expires (link_transistion_jiffies_valid is 1053 * true when the timer is running.) Instead of using 1054 * a system timer, we just do a check whenever the 1055 * link timer is running - this clears the flag after 1056 * a suitable delay. 1057 */ 1058 retval = 1; 1059 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; 1060 cp->link_transition_jiffies = jiffies; 1061 cp->link_transition_jiffies_valid = 1; 1062 } else { 1063 cp->link_transition = LINK_TRANSITION_ON_FAILURE; 1064 } 1065 netif_carrier_off(cp->dev); 1066 if (cp->opened && netif_msg_link(cp)) { 1067 printk(KERN_INFO "%s: PCS link down.\n", 1068 cp->dev->name); 1069 } 1070 1071 /* Cassini only: if you force a mode, there can be 1072 * sync problems on link down. to fix that, the following 1073 * things need to be checked: 1074 * 1) read serialink state register 1075 * 2) read pcs status register to verify link down. 1076 * 3) if link down and serial link == 0x03, then you need 1077 * to global reset the chip. 1078 */ 1079 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) { 1080 /* should check to see if we're in a forced mode */ 1081 stat = readl(cp->regs + REG_PCS_SERDES_STATE); 1082 if (stat == 0x03) 1083 return 1; 1084 } 1085 } else if (cp->lstate == link_down) { 1086 if (link_transition_timeout != 0 && 1087 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && 1088 !cp->link_transition_jiffies_valid) { 1089 /* force a reset, as a workaround for the 1090 * link-failure problem. May want to move 1091 * this to a point a bit earlier in the 1092 * sequence. 1093 */ 1094 retval = 1; 1095 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; 1096 cp->link_transition_jiffies = jiffies; 1097 cp->link_transition_jiffies_valid = 1; 1098 } else { 1099 cp->link_transition = LINK_TRANSITION_STILL_FAILED; 1100 } 1101 } 1102 1103 return retval; 1104} 1105 1106static int cas_pcs_interrupt(struct net_device *dev, 1107 struct cas *cp, u32 status) 1108{ 1109 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS); 1110 1111 if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0) 1112 return 0; 1113 return cas_pcs_link_check(cp); 1114} 1115 1116static int cas_txmac_interrupt(struct net_device *dev, 1117 struct cas *cp, u32 status) 1118{ 1119 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS); 1120 1121 if (!txmac_stat) 1122 return 0; 1123 1124 if (netif_msg_intr(cp)) 1125 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n", 1126 cp->dev->name, txmac_stat); 1127 1128 /* Defer timer expiration is quite normal, 1129 * don't even log the event. 1130 */ 1131 if ((txmac_stat & MAC_TX_DEFER_TIMER) && 1132 !(txmac_stat & ~MAC_TX_DEFER_TIMER)) 1133 return 0; 1134 1135 spin_lock(&cp->stat_lock[0]); 1136 if (txmac_stat & MAC_TX_UNDERRUN) { 1137 printk(KERN_ERR "%s: TX MAC xmit underrun.\n", 1138 dev->name); 1139 cp->net_stats[0].tx_fifo_errors++; 1140 } 1141 1142 if (txmac_stat & MAC_TX_MAX_PACKET_ERR) { 1143 printk(KERN_ERR "%s: TX MAC max packet size error.\n", 1144 dev->name); 1145 cp->net_stats[0].tx_errors++; 1146 } 1147 1148 /* The rest are all cases of one of the 16-bit TX 1149 * counters expiring. 1150 */ 1151 if (txmac_stat & MAC_TX_COLL_NORMAL) 1152 cp->net_stats[0].collisions += 0x10000; 1153 1154 if (txmac_stat & MAC_TX_COLL_EXCESS) { 1155 cp->net_stats[0].tx_aborted_errors += 0x10000; 1156 cp->net_stats[0].collisions += 0x10000; 1157 } 1158 1159 if (txmac_stat & MAC_TX_COLL_LATE) { 1160 cp->net_stats[0].tx_aborted_errors += 0x10000; 1161 cp->net_stats[0].collisions += 0x10000; 1162 } 1163 spin_unlock(&cp->stat_lock[0]); 1164 1165 /* We do not keep track of MAC_TX_COLL_FIRST and 1166 * MAC_TX_PEAK_ATTEMPTS events. 1167 */ 1168 return 0; 1169} 1170 1171static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware) 1172{ 1173 cas_hp_inst_t *inst; 1174 u32 val; 1175 int i; 1176 1177 i = 0; 1178 while ((inst = firmware) && inst->note) { 1179 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR); 1180 1181 val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val); 1182 val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask); 1183 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI); 1184 1185 val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10); 1186 val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop); 1187 val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext); 1188 val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff); 1189 val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext); 1190 val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff); 1191 val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op); 1192 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID); 1193 1194 val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask); 1195 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift); 1196 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab); 1197 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg); 1198 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW); 1199 ++firmware; 1200 ++i; 1201 } 1202} 1203 1204static void cas_init_rx_dma(struct cas *cp) 1205{ 1206 u64 desc_dma = cp->block_dvma; 1207 u32 val; 1208 int i, size; 1209 1210 /* rx free descriptors */ 1211 val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL); 1212 val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0)); 1213 val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0)); 1214 if ((N_RX_DESC_RINGS > 1) && 1215 (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */ 1216 val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1)); 1217 writel(val, cp->regs + REG_RX_CFG); 1218 1219 val = (unsigned long) cp->init_rxds[0] - 1220 (unsigned long) cp->init_block; 1221 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI); 1222 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW); 1223 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); 1224 1225 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 1226 /* rx desc 2 is for IPSEC packets. however, 1227 * we don't it that for that purpose. 1228 */ 1229 val = (unsigned long) cp->init_rxds[1] - 1230 (unsigned long) cp->init_block; 1231 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI); 1232 writel((desc_dma + val) & 0xffffffff, cp->regs + 1233 REG_PLUS_RX_DB1_LOW); 1234 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs + 1235 REG_PLUS_RX_KICK1); 1236 } 1237 1238 /* rx completion registers */ 1239 val = (unsigned long) cp->init_rxcs[0] - 1240 (unsigned long) cp->init_block; 1241 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI); 1242 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW); 1243 1244 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 1245 /* rx comp 2-4 */ 1246 for (i = 1; i < MAX_RX_COMP_RINGS; i++) { 1247 val = (unsigned long) cp->init_rxcs[i] - 1248 (unsigned long) cp->init_block; 1249 writel((desc_dma + val) >> 32, cp->regs + 1250 REG_PLUS_RX_CBN_HI(i)); 1251 writel((desc_dma + val) & 0xffffffff, cp->regs + 1252 REG_PLUS_RX_CBN_LOW(i)); 1253 } 1254 } 1255 1256 /* read selective clear regs to prevent spurious interrupts 1257 * on reset because complete == kick. 1258 * selective clear set up to prevent interrupts on resets 1259 */ 1260 readl(cp->regs + REG_INTR_STATUS_ALIAS); 1261 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR); 1262 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 1263 for (i = 1; i < N_RX_COMP_RINGS; i++) 1264 readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i)); 1265 1266 /* 2 is different from 3 and 4 */ 1267 if (N_RX_COMP_RINGS > 1) 1268 writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1, 1269 cp->regs + REG_PLUS_ALIASN_CLEAR(1)); 1270 1271 for (i = 2; i < N_RX_COMP_RINGS; i++) 1272 writel(INTR_RX_DONE_ALT, 1273 cp->regs + REG_PLUS_ALIASN_CLEAR(i)); 1274 } 1275 1276 /* set up pause thresholds */ 1277 val = CAS_BASE(RX_PAUSE_THRESH_OFF, 1278 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM); 1279 val |= CAS_BASE(RX_PAUSE_THRESH_ON, 1280 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM); 1281 writel(val, cp->regs + REG_RX_PAUSE_THRESH); 1282 1283 /* zero out dma reassembly buffers */ 1284 for (i = 0; i < 64; i++) { 1285 writel(i, cp->regs + REG_RX_TABLE_ADDR); 1286 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW); 1287 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID); 1288 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI); 1289 } 1290 1291 /* make sure address register is 0 for normal operation */ 1292 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR); 1293 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR); 1294 1295 /* interrupt mitigation */ 1296#ifdef USE_RX_BLANK 1297 val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL); 1298 val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL); 1299 writel(val, cp->regs + REG_RX_BLANK); 1300#else 1301 writel(0x0, cp->regs + REG_RX_BLANK); 1302#endif 1303 1304 /* interrupt generation as a function of low water marks for 1305 * free desc and completion entries. these are used to trigger 1306 * housekeeping for rx descs. we don't use the free interrupt 1307 * as it's not very useful 1308 */ 1309 /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */ 1310 val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL); 1311 writel(val, cp->regs + REG_RX_AE_THRESH); 1312 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 1313 val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1)); 1314 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH); 1315 } 1316 1317 /* Random early detect registers. useful for congestion avoidance. 1318 * this should be tunable. 1319 */ 1320 writel(0x0, cp->regs + REG_RX_RED); 1321 1322 /* receive page sizes. default == 2K (0x800) */ 1323 val = 0; 1324 if (cp->page_size == 0x1000) 1325 val = 0x1; 1326 else if (cp->page_size == 0x2000) 1327 val = 0x2; 1328 else if (cp->page_size == 0x4000) 1329 val = 0x3; 1330 1331 /* round mtu + offset. constrain to page size. */ 1332 size = cp->dev->mtu + 64; 1333 if (size > cp->page_size) 1334 size = cp->page_size; 1335 1336 if (size <= 0x400) 1337 i = 0x0; 1338 else if (size <= 0x800) 1339 i = 0x1; 1340 else if (size <= 0x1000) 1341 i = 0x2; 1342 else 1343 i = 0x3; 1344 1345 cp->mtu_stride = 1 << (i + 10); 1346 val = CAS_BASE(RX_PAGE_SIZE, val); 1347 val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i); 1348 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10)); 1349 val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1); 1350 writel(val, cp->regs + REG_RX_PAGE_SIZE); 1351 1352 /* enable the header parser if desired */ 1353 if (CAS_HP_FIRMWARE == cas_prog_null) 1354 return; 1355 1356 val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS); 1357 val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK; 1358 val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL); 1359 writel(val, cp->regs + REG_HP_CFG); 1360} 1361 1362static inline void cas_rxc_init(struct cas_rx_comp *rxc) 1363{ 1364 memset(rxc, 0, sizeof(*rxc)); 1365 rxc->word4 = cpu_to_le64(RX_COMP4_ZERO); 1366} 1367 1368/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1] 1369 * flipping is protected by the fact that the chip will not 1370 * hand back the same page index while it's being processed. 1371 */ 1372static inline cas_page_t *cas_page_spare(struct cas *cp, const int index) 1373{ 1374 cas_page_t *page = cp->rx_pages[1][index]; 1375 cas_page_t *new; 1376 1377 if (cas_buffer_count(page) == 1) 1378 return page; 1379 1380 new = cas_page_dequeue(cp); 1381 if (new) { 1382 spin_lock(&cp->rx_inuse_lock); 1383 list_add(&page->list, &cp->rx_inuse_list); 1384 spin_unlock(&cp->rx_inuse_lock); 1385 } 1386 return new; 1387} 1388 1389/* this needs to be changed if we actually use the ENC RX DESC ring */ 1390static cas_page_t *cas_page_swap(struct cas *cp, const int ring, 1391 const int index) 1392{ 1393 cas_page_t **page0 = cp->rx_pages[0]; 1394 cas_page_t **page1 = cp->rx_pages[1]; 1395 1396 /* swap if buffer is in use */ 1397 if (cas_buffer_count(page0[index]) > 1) { 1398 cas_page_t *new = cas_page_spare(cp, index); 1399 if (new) { 1400 page1[index] = page0[index]; 1401 page0[index] = new; 1402 } 1403 } 1404 RX_USED_SET(page0[index], 0); 1405 return page0[index]; 1406} 1407 1408static void cas_clean_rxds(struct cas *cp) 1409{ 1410 /* only clean ring 0 as ring 1 is used for spare buffers */ 1411 struct cas_rx_desc *rxd = cp->init_rxds[0]; 1412 int i, size; 1413 1414 /* release all rx flows */ 1415 for (i = 0; i < N_RX_FLOWS; i++) { 1416 struct sk_buff *skb; 1417 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) { 1418 cas_skb_release(skb); 1419 } 1420 } 1421 1422 /* initialize descriptors */ 1423 size = RX_DESC_RINGN_SIZE(0); 1424 for (i = 0; i < size; i++) { 1425 cas_page_t *page = cas_page_swap(cp, 0, i); 1426 rxd[i].buffer = cpu_to_le64(page->dma_addr); 1427 rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) | 1428 CAS_BASE(RX_INDEX_RING, 0)); 1429 } 1430 1431 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4; 1432 cp->rx_last[0] = 0; 1433 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0); 1434} 1435 1436static void cas_clean_rxcs(struct cas *cp) 1437{ 1438 int i, j; 1439 1440 /* take ownership of rx comp descriptors */ 1441 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS); 1442 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS); 1443 for (i = 0; i < N_RX_COMP_RINGS; i++) { 1444 struct cas_rx_comp *rxc = cp->init_rxcs[i]; 1445 for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) { 1446 cas_rxc_init(rxc + j); 1447 } 1448 } 1449} 1450 1451#if 0 1452/* When we get a RX fifo overflow, the RX unit is probably hung 1453 * so we do the following. 1454 * 1455 * If any part of the reset goes wrong, we return 1 and that causes the 1456 * whole chip to be reset. 1457 */ 1458static int cas_rxmac_reset(struct cas *cp) 1459{ 1460 struct net_device *dev = cp->dev; 1461 int limit; 1462 u32 val; 1463 1464 /* First, reset MAC RX. */ 1465 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); 1466 for (limit = 0; limit < STOP_TRIES; limit++) { 1467 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN)) 1468 break; 1469 udelay(10); 1470 } 1471 if (limit == STOP_TRIES) { 1472 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole " 1473 "chip.\n", dev->name); 1474 return 1; 1475 } 1476 1477 /* Second, disable RX DMA. */ 1478 writel(0, cp->regs + REG_RX_CFG); 1479 for (limit = 0; limit < STOP_TRIES; limit++) { 1480 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN)) 1481 break; 1482 udelay(10); 1483 } 1484 if (limit == STOP_TRIES) { 1485 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole " 1486 "chip.\n", dev->name); 1487 return 1; 1488 } 1489 1490 mdelay(5); 1491 1492 /* Execute RX reset command. */ 1493 writel(SW_RESET_RX, cp->regs + REG_SW_RESET); 1494 for (limit = 0; limit < STOP_TRIES; limit++) { 1495 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX)) 1496 break; 1497 udelay(10); 1498 } 1499 if (limit == STOP_TRIES) { 1500 printk(KERN_ERR "%s: RX reset command will not execute, " 1501 "resetting whole chip.\n", dev->name); 1502 return 1; 1503 } 1504 1505 /* reset driver rx state */ 1506 cas_clean_rxds(cp); 1507 cas_clean_rxcs(cp); 1508 1509 /* Now, reprogram the rest of RX unit. */ 1510 cas_init_rx_dma(cp); 1511 1512 /* re-enable */ 1513 val = readl(cp->regs + REG_RX_CFG); 1514 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG); 1515 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); 1516 val = readl(cp->regs + REG_MAC_RX_CFG); 1517 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); 1518 return 0; 1519} 1520#endif 1521 1522static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp, 1523 u32 status) 1524{ 1525 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS); 1526 1527 if (!stat) 1528 return 0; 1529 1530 if (netif_msg_intr(cp)) 1531 printk(KERN_DEBUG "%s: rxmac interrupt, stat: 0x%x\n", 1532 cp->dev->name, stat); 1533 1534 /* these are all rollovers */ 1535 spin_lock(&cp->stat_lock[0]); 1536 if (stat & MAC_RX_ALIGN_ERR) 1537 cp->net_stats[0].rx_frame_errors += 0x10000; 1538 1539 if (stat & MAC_RX_CRC_ERR) 1540 cp->net_stats[0].rx_crc_errors += 0x10000; 1541 1542 if (stat & MAC_RX_LEN_ERR) 1543 cp->net_stats[0].rx_length_errors += 0x10000; 1544 1545 if (stat & MAC_RX_OVERFLOW) { 1546 cp->net_stats[0].rx_over_errors++; 1547 cp->net_stats[0].rx_fifo_errors++; 1548 } 1549 1550 /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR 1551 * events. 1552 */ 1553 spin_unlock(&cp->stat_lock[0]); 1554 return 0; 1555} 1556 1557static int cas_mac_interrupt(struct net_device *dev, struct cas *cp, 1558 u32 status) 1559{ 1560 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS); 1561 1562 if (!stat) 1563 return 0; 1564 1565 if (netif_msg_intr(cp)) 1566 printk(KERN_DEBUG "%s: mac interrupt, stat: 0x%x\n", 1567 cp->dev->name, stat); 1568 1569 /* This interrupt is just for pause frame and pause 1570 * tracking. It is useful for diagnostics and debug 1571 * but probably by default we will mask these events. 1572 */ 1573 if (stat & MAC_CTRL_PAUSE_STATE) 1574 cp->pause_entered++; 1575 1576 if (stat & MAC_CTRL_PAUSE_RECEIVED) 1577 cp->pause_last_time_recvd = (stat >> 16); 1578 1579 return 0; 1580} 1581 1582 1583/* Must be invoked under cp->lock. */ 1584static inline int cas_mdio_link_not_up(struct cas *cp) 1585{ 1586 u16 val; 1587 1588 switch (cp->lstate) { 1589 case link_force_ret: 1590 if (netif_msg_link(cp)) 1591 printk(KERN_INFO "%s: Autoneg failed again, keeping" 1592 " forced mode\n", cp->dev->name); 1593 cas_phy_write(cp, MII_BMCR, cp->link_fcntl); 1594 cp->timer_ticks = 5; 1595 cp->lstate = link_force_ok; 1596 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; 1597 break; 1598 1599 case link_aneg: 1600 val = cas_phy_read(cp, MII_BMCR); 1601 1602 /* Try forced modes. we try things in the following order: 1603 * 1000 full -> 100 full/half -> 10 half 1604 */ 1605 val &= ~(BMCR_ANRESTART | BMCR_ANENABLE); 1606 val |= BMCR_FULLDPLX; 1607 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? 1608 CAS_BMCR_SPEED1000 : BMCR_SPEED100; 1609 cas_phy_write(cp, MII_BMCR, val); 1610 cp->timer_ticks = 5; 1611 cp->lstate = link_force_try; 1612 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; 1613 break; 1614 1615 case link_force_try: 1616 /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */ 1617 val = cas_phy_read(cp, MII_BMCR); 1618 cp->timer_ticks = 5; 1619 if (val & CAS_BMCR_SPEED1000) { /* gigabit */ 1620 val &= ~CAS_BMCR_SPEED1000; 1621 val |= (BMCR_SPEED100 | BMCR_FULLDPLX); 1622 cas_phy_write(cp, MII_BMCR, val); 1623 break; 1624 } 1625 1626 if (val & BMCR_SPEED100) { 1627 if (val & BMCR_FULLDPLX) /* fd failed */ 1628 val &= ~BMCR_FULLDPLX; 1629 else { /* 100Mbps failed */ 1630 val &= ~BMCR_SPEED100; 1631 } 1632 cas_phy_write(cp, MII_BMCR, val); 1633 break; 1634 } 1635 default: 1636 break; 1637 } 1638 return 0; 1639} 1640 1641 1642/* must be invoked with cp->lock held */ 1643static int cas_mii_link_check(struct cas *cp, const u16 bmsr) 1644{ 1645 int restart; 1646 1647 if (bmsr & BMSR_LSTATUS) { 1648 /* Ok, here we got a link. If we had it due to a forced 1649 * fallback, and we were configured for autoneg, we 1650 * retry a short autoneg pass. If you know your hub is 1651 * broken, use ethtool ;) 1652 */ 1653 if ((cp->lstate == link_force_try) && 1654 (cp->link_cntl & BMCR_ANENABLE)) { 1655 cp->lstate = link_force_ret; 1656 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; 1657 cas_mif_poll(cp, 0); 1658 cp->link_fcntl = cas_phy_read(cp, MII_BMCR); 1659 cp->timer_ticks = 5; 1660 if (cp->opened && netif_msg_link(cp)) 1661 printk(KERN_INFO "%s: Got link after fallback, retrying" 1662 " autoneg once...\n", cp->dev->name); 1663 cas_phy_write(cp, MII_BMCR, 1664 cp->link_fcntl | BMCR_ANENABLE | 1665 BMCR_ANRESTART); 1666 cas_mif_poll(cp, 1); 1667 1668 } else if (cp->lstate != link_up) { 1669 cp->lstate = link_up; 1670 cp->link_transition = LINK_TRANSITION_LINK_UP; 1671 1672 if (cp->opened) { 1673 cas_set_link_modes(cp); 1674 netif_carrier_on(cp->dev); 1675 } 1676 } 1677 return 0; 1678 } 1679 1680 /* link not up. if the link was previously up, we restart the 1681 * whole process 1682 */ 1683 restart = 0; 1684 if (cp->lstate == link_up) { 1685 cp->lstate = link_down; 1686 cp->link_transition = LINK_TRANSITION_LINK_DOWN; 1687 1688 netif_carrier_off(cp->dev); 1689 if (cp->opened && netif_msg_link(cp)) 1690 printk(KERN_INFO "%s: Link down\n", 1691 cp->dev->name); 1692 restart = 1; 1693 1694 } else if (++cp->timer_ticks > 10) 1695 cas_mdio_link_not_up(cp); 1696 1697 return restart; 1698} 1699 1700static int cas_mif_interrupt(struct net_device *dev, struct cas *cp, 1701 u32 status) 1702{ 1703 u32 stat = readl(cp->regs + REG_MIF_STATUS); 1704 u16 bmsr; 1705 1706 /* check for a link change */ 1707 if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0) 1708 return 0; 1709 1710 bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat); 1711 return cas_mii_link_check(cp, bmsr); 1712} 1713 1714static int cas_pci_interrupt(struct net_device *dev, struct cas *cp, 1715 u32 status) 1716{ 1717 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS); 1718 1719 if (!stat) 1720 return 0; 1721 1722 printk(KERN_ERR "%s: PCI error [%04x:%04x] ", dev->name, stat, 1723 readl(cp->regs + REG_BIM_DIAG)); 1724 1725 /* cassini+ has this reserved */ 1726 if ((stat & PCI_ERR_BADACK) && 1727 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0)) 1728 printk("<No ACK64# during ABS64 cycle> "); 1729 1730 if (stat & PCI_ERR_DTRTO) 1731 printk("<Delayed transaction timeout> "); 1732 if (stat & PCI_ERR_OTHER) 1733 printk("<other> "); 1734 if (stat & PCI_ERR_BIM_DMA_WRITE) 1735 printk("<BIM DMA 0 write req> "); 1736 if (stat & PCI_ERR_BIM_DMA_READ) 1737 printk("<BIM DMA 0 read req> "); 1738 printk("\n"); 1739 1740 if (stat & PCI_ERR_OTHER) { 1741 u16 cfg; 1742 1743 /* Interrogate PCI config space for the 1744 * true cause. 1745 */ 1746 pci_read_config_word(cp->pdev, PCI_STATUS, &cfg); 1747 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n", 1748 dev->name, cfg); 1749 if (cfg & PCI_STATUS_PARITY) 1750 printk(KERN_ERR "%s: PCI parity error detected.\n", 1751 dev->name); 1752 if (cfg & PCI_STATUS_SIG_TARGET_ABORT) 1753 printk(KERN_ERR "%s: PCI target abort.\n", 1754 dev->name); 1755 if (cfg & PCI_STATUS_REC_TARGET_ABORT) 1756 printk(KERN_ERR "%s: PCI master acks target abort.\n", 1757 dev->name); 1758 if (cfg & PCI_STATUS_REC_MASTER_ABORT) 1759 printk(KERN_ERR "%s: PCI master abort.\n", dev->name); 1760 if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR) 1761 printk(KERN_ERR "%s: PCI system error SERR#.\n", 1762 dev->name); 1763 if (cfg & PCI_STATUS_DETECTED_PARITY) 1764 printk(KERN_ERR "%s: PCI parity error.\n", 1765 dev->name); 1766 1767 /* Write the error bits back to clear them. */ 1768 cfg &= (PCI_STATUS_PARITY | 1769 PCI_STATUS_SIG_TARGET_ABORT | 1770 PCI_STATUS_REC_TARGET_ABORT | 1771 PCI_STATUS_REC_MASTER_ABORT | 1772 PCI_STATUS_SIG_SYSTEM_ERROR | 1773 PCI_STATUS_DETECTED_PARITY); 1774 pci_write_config_word(cp->pdev, PCI_STATUS, cfg); 1775 } 1776 1777 /* For all PCI errors, we should reset the chip. */ 1778 return 1; 1779} 1780 1781/* All non-normal interrupt conditions get serviced here. 1782 * Returns non-zero if we should just exit the interrupt 1783 * handler right now (ie. if we reset the card which invalidates 1784 * all of the other original irq status bits). 1785 */ 1786static int cas_abnormal_irq(struct net_device *dev, struct cas *cp, 1787 u32 status) 1788{ 1789 if (status & INTR_RX_TAG_ERROR) { 1790 /* corrupt RX tag framing */ 1791 if (netif_msg_rx_err(cp)) 1792 printk(KERN_DEBUG "%s: corrupt rx tag framing\n", 1793 cp->dev->name); 1794 spin_lock(&cp->stat_lock[0]); 1795 cp->net_stats[0].rx_errors++; 1796 spin_unlock(&cp->stat_lock[0]); 1797 goto do_reset; 1798 } 1799 1800 if (status & INTR_RX_LEN_MISMATCH) { 1801 /* length mismatch. */ 1802 if (netif_msg_rx_err(cp)) 1803 printk(KERN_DEBUG "%s: length mismatch for rx frame\n", 1804 cp->dev->name); 1805 spin_lock(&cp->stat_lock[0]); 1806 cp->net_stats[0].rx_errors++; 1807 spin_unlock(&cp->stat_lock[0]); 1808 goto do_reset; 1809 } 1810 1811 if (status & INTR_PCS_STATUS) { 1812 if (cas_pcs_interrupt(dev, cp, status)) 1813 goto do_reset; 1814 } 1815 1816 if (status & INTR_TX_MAC_STATUS) { 1817 if (cas_txmac_interrupt(dev, cp, status)) 1818 goto do_reset; 1819 } 1820 1821 if (status & INTR_RX_MAC_STATUS) { 1822 if (cas_rxmac_interrupt(dev, cp, status)) 1823 goto do_reset; 1824 } 1825 1826 if (status & INTR_MAC_CTRL_STATUS) { 1827 if (cas_mac_interrupt(dev, cp, status)) 1828 goto do_reset; 1829 } 1830 1831 if (status & INTR_MIF_STATUS) { 1832 if (cas_mif_interrupt(dev, cp, status)) 1833 goto do_reset; 1834 } 1835 1836 if (status & INTR_PCI_ERROR_STATUS) { 1837 if (cas_pci_interrupt(dev, cp, status)) 1838 goto do_reset; 1839 } 1840 return 0; 1841 1842do_reset: 1843#if 1 1844 atomic_inc(&cp->reset_task_pending); 1845 atomic_inc(&cp->reset_task_pending_all); 1846 printk(KERN_ERR "%s:reset called in cas_abnormal_irq [0x%x]\n", 1847 dev->name, status); 1848 schedule_work(&cp->reset_task); 1849#else 1850 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); 1851 printk(KERN_ERR "reset called in cas_abnormal_irq\n"); 1852 schedule_work(&cp->reset_task); 1853#endif 1854 return 1; 1855} 1856 1857/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when 1858 * determining whether to do a netif_stop/wakeup 1859 */ 1860#define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1) 1861#define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK) 1862static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr, 1863 const int len) 1864{ 1865 unsigned long off = addr + len; 1866 1867 if (CAS_TABORT(cp) == 1) 1868 return 0; 1869 if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN) 1870 return 0; 1871 return TX_TARGET_ABORT_LEN; 1872} 1873 1874static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) 1875{ 1876 struct cas_tx_desc *txds; 1877 struct sk_buff **skbs; 1878 struct net_device *dev = cp->dev; 1879 int entry, count; 1880 1881 spin_lock(&cp->tx_lock[ring]); 1882 txds = cp->init_txds[ring]; 1883 skbs = cp->tx_skbs[ring]; 1884 entry = cp->tx_old[ring]; 1885 1886 count = TX_BUFF_COUNT(ring, entry, limit); 1887 while (entry != limit) { 1888 struct sk_buff *skb = skbs[entry]; 1889 dma_addr_t daddr; 1890 u32 dlen; 1891 int frag; 1892 1893 if (!skb) { 1894 /* this should never occur */ 1895 entry = TX_DESC_NEXT(ring, entry); 1896 continue; 1897 } 1898 1899 /* however, we might get only a partial skb release. */ 1900 count -= skb_shinfo(skb)->nr_frags + 1901 + cp->tx_tiny_use[ring][entry].nbufs + 1; 1902 if (count < 0) 1903 break; 1904 1905 if (netif_msg_tx_done(cp)) 1906 printk(KERN_DEBUG "%s: tx[%d] done, slot %d\n", 1907 cp->dev->name, ring, entry); 1908 1909 skbs[entry] = NULL; 1910 cp->tx_tiny_use[ring][entry].nbufs = 0; 1911 1912 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 1913 struct cas_tx_desc *txd = txds + entry; 1914 1915 daddr = le64_to_cpu(txd->buffer); 1916 dlen = CAS_VAL(TX_DESC_BUFLEN, 1917 le64_to_cpu(txd->control)); 1918 pci_unmap_page(cp->pdev, daddr, dlen, 1919 PCI_DMA_TODEVICE); 1920 entry = TX_DESC_NEXT(ring, entry); 1921 1922 /* tiny buffer may follow */ 1923 if (cp->tx_tiny_use[ring][entry].used) { 1924 cp->tx_tiny_use[ring][entry].used = 0; 1925 entry = TX_DESC_NEXT(ring, entry); 1926 } 1927 } 1928 1929 spin_lock(&cp->stat_lock[ring]); 1930 cp->net_stats[ring].tx_packets++; 1931 cp->net_stats[ring].tx_bytes += skb->len; 1932 spin_unlock(&cp->stat_lock[ring]); 1933 dev_kfree_skb_irq(skb); 1934 } 1935 cp->tx_old[ring] = entry; 1936 1937 /* this is wrong for multiple tx rings. the net device needs 1938 * multiple queues for this to do the right thing. we wait 1939 * for 2*packets to be available when using tiny buffers 1940 */ 1941 if (netif_queue_stopped(dev) && 1942 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))) 1943 netif_wake_queue(dev); 1944 spin_unlock(&cp->tx_lock[ring]); 1945} 1946 1947static void cas_tx(struct net_device *dev, struct cas *cp, 1948 u32 status) 1949{ 1950 int limit, ring; 1951#ifdef USE_TX_COMPWB 1952 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb); 1953#endif 1954 if (netif_msg_intr(cp)) 1955 printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %llx\n", 1956 cp->dev->name, status, (unsigned long long)compwb); 1957 /* process all the rings */ 1958 for (ring = 0; ring < N_TX_RINGS; ring++) { 1959#ifdef USE_TX_COMPWB 1960 /* use the completion writeback registers */ 1961 limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) | 1962 CAS_VAL(TX_COMPWB_LSB, compwb); 1963 compwb = TX_COMPWB_NEXT(compwb); 1964#else 1965 limit = readl(cp->regs + REG_TX_COMPN(ring)); 1966#endif 1967 if (cp->tx_old[ring] != limit) 1968 cas_tx_ringN(cp, ring, limit); 1969 } 1970} 1971 1972 1973static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, 1974 int entry, const u64 *words, 1975 struct sk_buff **skbref) 1976{ 1977 int dlen, hlen, len, i, alloclen; 1978 int off, swivel = RX_SWIVEL_OFF_VAL; 1979 struct cas_page *page; 1980 struct sk_buff *skb; 1981 void *addr, *crcaddr; 1982 char *p; 1983 1984 hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]); 1985 dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]); 1986 len = hlen + dlen; 1987 1988 if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT)) 1989 alloclen = len; 1990 else 1991 alloclen = max(hlen, RX_COPY_MIN); 1992 1993 skb = dev_alloc_skb(alloclen + swivel + cp->crc_size); 1994 if (skb == NULL) 1995 return -1; 1996 1997 *skbref = skb; 1998 skb_reserve(skb, swivel); 1999 2000 p = skb->data; 2001 addr = crcaddr = NULL; 2002 if (hlen) { /* always copy header pages */ 2003 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); 2004 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 2005 off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 + 2006 swivel; 2007 2008 i = hlen; 2009 if (!dlen) /* attach FCS */ 2010 i += cp->crc_size; 2011 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, 2012 PCI_DMA_FROMDEVICE); 2013 addr = cas_page_map(page->buffer); 2014 memcpy(p, addr + off, i); 2015 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, 2016 PCI_DMA_FROMDEVICE); 2017 cas_page_unmap(addr); 2018 RX_USED_ADD(page, 0x100); 2019 p += hlen; 2020 swivel = 0; 2021 } 2022 2023 2024 if (alloclen < (hlen + dlen)) { 2025 skb_frag_t *frag = skb_shinfo(skb)->frags; 2026 2027 /* normal or jumbo packets. we use frags */ 2028 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); 2029 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 2030 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; 2031 2032 hlen = min(cp->page_size - off, dlen); 2033 if (hlen < 0) { 2034 if (netif_msg_rx_err(cp)) { 2035 printk(KERN_DEBUG "%s: rx page overflow: " 2036 "%d\n", cp->dev->name, hlen); 2037 } 2038 dev_kfree_skb_irq(skb); 2039 return -1; 2040 } 2041 i = hlen; 2042 if (i == dlen) /* attach FCS */ 2043 i += cp->crc_size; 2044 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, 2045 PCI_DMA_FROMDEVICE); 2046 2047 /* make sure we always copy a header */ 2048 swivel = 0; 2049 if (p == (char *) skb->data) { /* not split */ 2050 addr = cas_page_map(page->buffer); 2051 memcpy(p, addr + off, RX_COPY_MIN); 2052 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, 2053 PCI_DMA_FROMDEVICE); 2054 cas_page_unmap(addr); 2055 off += RX_COPY_MIN; 2056 swivel = RX_COPY_MIN; 2057 RX_USED_ADD(page, cp->mtu_stride); 2058 } else { 2059 RX_USED_ADD(page, hlen); 2060 } 2061 skb_put(skb, alloclen); 2062 2063 skb_shinfo(skb)->nr_frags++; 2064 skb->data_len += hlen - swivel; 2065 skb->len += hlen - swivel; 2066 2067 get_page(page->buffer); 2068 cas_buffer_inc(page); 2069 frag->page = page->buffer; 2070 frag->page_offset = off; 2071 frag->size = hlen - swivel; 2072 2073 /* any more data? */ 2074 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { 2075 hlen = dlen; 2076 off = 0; 2077 2078 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); 2079 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 2080 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, 2081 hlen + cp->crc_size, 2082 PCI_DMA_FROMDEVICE); 2083 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, 2084 hlen + cp->crc_size, 2085 PCI_DMA_FROMDEVICE); 2086 2087 skb_shinfo(skb)->nr_frags++; 2088 skb->data_len += hlen; 2089 skb->len += hlen; 2090 frag++; 2091 2092 get_page(page->buffer); 2093 cas_buffer_inc(page); 2094 frag->page = page->buffer; 2095 frag->page_offset = 0; 2096 frag->size = hlen; 2097 RX_USED_ADD(page, hlen + cp->crc_size); 2098 } 2099 2100 if (cp->crc_size) { 2101 addr = cas_page_map(page->buffer); 2102 crcaddr = addr + off + hlen; 2103 } 2104 2105 } else { 2106 /* copying packet */ 2107 if (!dlen) 2108 goto end_copy_pkt; 2109 2110 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); 2111 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 2112 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; 2113 hlen = min(cp->page_size - off, dlen); 2114 if (hlen < 0) { 2115 if (netif_msg_rx_err(cp)) { 2116 printk(KERN_DEBUG "%s: rx page overflow: " 2117 "%d\n", cp->dev->name, hlen); 2118 } 2119 dev_kfree_skb_irq(skb); 2120 return -1; 2121 } 2122 i = hlen; 2123 if (i == dlen) /* attach FCS */ 2124 i += cp->crc_size; 2125 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, 2126 PCI_DMA_FROMDEVICE); 2127 addr = cas_page_map(page->buffer); 2128 memcpy(p, addr + off, i); 2129 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, 2130 PCI_DMA_FROMDEVICE); 2131 cas_page_unmap(addr); 2132 if (p == (char *) skb->data) /* not split */ 2133 RX_USED_ADD(page, cp->mtu_stride); 2134 else 2135 RX_USED_ADD(page, i); 2136 2137 /* any more data? */ 2138 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { 2139 p += hlen; 2140 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); 2141 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 2142 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, 2143 dlen + cp->crc_size, 2144 PCI_DMA_FROMDEVICE); 2145 addr = cas_page_map(page->buffer); 2146 memcpy(p, addr, dlen + cp->crc_size); 2147 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, 2148 dlen + cp->crc_size, 2149 PCI_DMA_FROMDEVICE); 2150 cas_page_unmap(addr); 2151 RX_USED_ADD(page, dlen + cp->crc_size); 2152 } 2153end_copy_pkt: 2154 if (cp->crc_size) { 2155 addr = NULL; 2156 crcaddr = skb->data + alloclen; 2157 } 2158 skb_put(skb, alloclen); 2159 } 2160 2161 i = CAS_VAL(RX_COMP4_TCP_CSUM, words[3]); 2162 if (cp->crc_size) { 2163 /* checksum includes FCS. strip it out. */ 2164 i = csum_fold(csum_partial(crcaddr, cp->crc_size, i)); 2165 if (addr) 2166 cas_page_unmap(addr); 2167 } 2168 skb->csum = ntohs(i ^ 0xffff); 2169 skb->ip_summed = CHECKSUM_COMPLETE; 2170 skb->protocol = eth_type_trans(skb, cp->dev); 2171 return len; 2172} 2173 2174 2175/* we can handle up to 64 rx flows at a time. we do the same thing 2176 * as nonreassm except that we batch up the buffers. 2177 * NOTE: we currently just treat each flow as a bunch of packets that 2178 * we pass up. a better way would be to coalesce the packets 2179 * into a jumbo packet. to do that, we need to do the following: 2180 * 1) the first packet will have a clean split between header and 2181 * data. save both. 2182 * 2) each time the next flow packet comes in, extend the 2183 * data length and merge the checksums. 2184 * 3) on flow release, fix up the header. 2185 * 4) make sure the higher layer doesn't care. 2186 * because packets get coalesced, we shouldn't run into fragment count 2187 * issues. 2188 */ 2189static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words, 2190 struct sk_buff *skb) 2191{ 2192 int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1); 2193 struct sk_buff_head *flow = &cp->rx_flows[flowid]; 2194 2195 /* this is protected at a higher layer, so no need to 2196 * do any additional locking here. stick the buffer 2197 * at the end. 2198 */ 2199 __skb_insert(skb, flow->prev, (struct sk_buff *) flow, flow); 2200 if (words[0] & RX_COMP1_RELEASE_FLOW) { 2201 while ((skb = __skb_dequeue(flow))) { 2202 cas_skb_release(skb); 2203 } 2204 } 2205} 2206 2207/* put rx descriptor back on ring. if a buffer is in use by a higher 2208 * layer, this will need to put in a replacement. 2209 */ 2210static void cas_post_page(struct cas *cp, const int ring, const int index) 2211{ 2212 cas_page_t *new; 2213 int entry; 2214 2215 entry = cp->rx_old[ring]; 2216 2217 new = cas_page_swap(cp, ring, index); 2218 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr); 2219 cp->init_rxds[ring][entry].index = 2220 cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) | 2221 CAS_BASE(RX_INDEX_RING, ring)); 2222 2223 entry = RX_DESC_ENTRY(ring, entry + 1); 2224 cp->rx_old[ring] = entry; 2225 2226 if (entry % 4) 2227 return; 2228 2229 if (ring == 0) 2230 writel(entry, cp->regs + REG_RX_KICK); 2231 else if ((N_RX_DESC_RINGS > 1) && 2232 (cp->cas_flags & CAS_FLAG_REG_PLUS)) 2233 writel(entry, cp->regs + REG_PLUS_RX_KICK1); 2234} 2235 2236 2237/* only when things are bad */ 2238static int cas_post_rxds_ringN(struct cas *cp, int ring, int num) 2239{ 2240 unsigned int entry, last, count, released; 2241 int cluster; 2242 cas_page_t **page = cp->rx_pages[ring]; 2243 2244 entry = cp->rx_old[ring]; 2245 2246 if (netif_msg_intr(cp)) 2247 printk(KERN_DEBUG "%s: rxd[%d] interrupt, done: %d\n", 2248 cp->dev->name, ring, entry); 2249 2250 cluster = -1; 2251 count = entry & 0x3; 2252 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4); 2253 released = 0; 2254 while (entry != last) { 2255 /* make a new buffer if it's still in use */ 2256 if (cas_buffer_count(page[entry]) > 1) { 2257 cas_page_t *new = cas_page_dequeue(cp); 2258 if (!new) { 2259 /* let the timer know that we need to 2260 * do this again 2261 */ 2262 cp->cas_flags |= CAS_FLAG_RXD_POST(ring); 2263 if (!timer_pending(&cp->link_timer)) 2264 mod_timer(&cp->link_timer, jiffies + 2265 CAS_LINK_FAST_TIMEOUT); 2266 cp->rx_old[ring] = entry; 2267 cp->rx_last[ring] = num ? num - released : 0; 2268 return -ENOMEM; 2269 } 2270 spin_lock(&cp->rx_inuse_lock); 2271 list_add(&page[entry]->list, &cp->rx_inuse_list); 2272 spin_unlock(&cp->rx_inuse_lock); 2273 cp->init_rxds[ring][entry].buffer = 2274 cpu_to_le64(new->dma_addr); 2275 page[entry] = new; 2276 2277 } 2278 2279 if (++count == 4) { 2280 cluster = entry; 2281 count = 0; 2282 } 2283 released++; 2284 entry = RX_DESC_ENTRY(ring, entry + 1); 2285 } 2286 cp->rx_old[ring] = entry; 2287 2288 if (cluster < 0) 2289 return 0; 2290 2291 if (ring == 0) 2292 writel(cluster, cp->regs + REG_RX_KICK); 2293 else if ((N_RX_DESC_RINGS > 1) && 2294 (cp->cas_flags & CAS_FLAG_REG_PLUS)) 2295 writel(cluster, cp->regs + REG_PLUS_RX_KICK1); 2296 return 0; 2297} 2298 2299 2300/* process a completion ring. packets are set up in three basic ways: 2301 * small packets: should be copied header + data in single buffer. 2302 * large packets: header and data in a single buffer. 2303 * split packets: header in a separate buffer from data. 2304 * data may be in multiple pages. data may be > 256 2305 * bytes but in a single page. 2306 * 2307 * NOTE: RX page posting is done in this routine as well. while there's 2308 * the capability of using multiple RX completion rings, it isn't 2309 * really worthwhile due to the fact that the page posting will 2310 * force serialization on the single descriptor ring. 2311 */ 2312static int cas_rx_ringN(struct cas *cp, int ring, int budget) 2313{ 2314 struct cas_rx_comp *rxcs = cp->init_rxcs[ring]; 2315 int entry, drops; 2316 int npackets = 0; 2317 2318 if (netif_msg_intr(cp)) 2319 printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n", 2320 cp->dev->name, ring, 2321 readl(cp->regs + REG_RX_COMP_HEAD), 2322 cp->rx_new[ring]); 2323 2324 entry = cp->rx_new[ring]; 2325 drops = 0; 2326 while (1) { 2327 struct cas_rx_comp *rxc = rxcs + entry; 2328 struct sk_buff *skb; 2329 int type, len; 2330 u64 words[4]; 2331 int i, dring; 2332 2333 words[0] = le64_to_cpu(rxc->word1); 2334 words[1] = le64_to_cpu(rxc->word2); 2335 words[2] = le64_to_cpu(rxc->word3); 2336 words[3] = le64_to_cpu(rxc->word4); 2337 2338 /* don't touch if still owned by hw */ 2339 type = CAS_VAL(RX_COMP1_TYPE, words[0]); 2340 if (type == 0) 2341 break; 2342 2343 /* hw hasn't cleared the zero bit yet */ 2344 if (words[3] & RX_COMP4_ZERO) { 2345 break; 2346 } 2347 2348 /* get info on the packet */ 2349 if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) { 2350 spin_lock(&cp->stat_lock[ring]); 2351 cp->net_stats[ring].rx_errors++; 2352 if (words[3] & RX_COMP4_LEN_MISMATCH) 2353 cp->net_stats[ring].rx_length_errors++; 2354 if (words[3] & RX_COMP4_BAD) 2355 cp->net_stats[ring].rx_crc_errors++; 2356 spin_unlock(&cp->stat_lock[ring]); 2357 2358 /* We'll just return it to Cassini. */ 2359 drop_it: 2360 spin_lock(&cp->stat_lock[ring]); 2361 ++cp->net_stats[ring].rx_dropped; 2362 spin_unlock(&cp->stat_lock[ring]); 2363 goto next; 2364 } 2365 2366 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb); 2367 if (len < 0) { 2368 ++drops; 2369 goto drop_it; 2370 } 2371 2372 /* see if it's a flow re-assembly or not. the driver 2373 * itself handles release back up. 2374 */ 2375 if (RX_DONT_BATCH || (type == 0x2)) { 2376 /* non-reassm: these always get released */ 2377 cas_skb_release(skb); 2378 } else { 2379 cas_rx_flow_pkt(cp, words, skb); 2380 } 2381 2382 spin_lock(&cp->stat_lock[ring]); 2383 cp->net_stats[ring].rx_packets++; 2384 cp->net_stats[ring].rx_bytes += len; 2385 spin_unlock(&cp->stat_lock[ring]); 2386 cp->dev->last_rx = jiffies; 2387 2388 next: 2389 npackets++; 2390 2391 /* should it be released? */ 2392 if (words[0] & RX_COMP1_RELEASE_HDR) { 2393 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); 2394 dring = CAS_VAL(RX_INDEX_RING, i); 2395 i = CAS_VAL(RX_INDEX_NUM, i); 2396 cas_post_page(cp, dring, i); 2397 } 2398 2399 if (words[0] & RX_COMP1_RELEASE_DATA) { 2400 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); 2401 dring = CAS_VAL(RX_INDEX_RING, i); 2402 i = CAS_VAL(RX_INDEX_NUM, i); 2403 cas_post_page(cp, dring, i); 2404 } 2405 2406 if (words[0] & RX_COMP1_RELEASE_NEXT) { 2407 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); 2408 dring = CAS_VAL(RX_INDEX_RING, i); 2409 i = CAS_VAL(RX_INDEX_NUM, i); 2410 cas_post_page(cp, dring, i); 2411 } 2412 2413 /* skip to the next entry */ 2414 entry = RX_COMP_ENTRY(ring, entry + 1 + 2415 CAS_VAL(RX_COMP1_SKIP, words[0])); 2416#ifdef USE_NAPI 2417 if (budget && (npackets >= budget)) 2418 break; 2419#endif 2420 } 2421 cp->rx_new[ring] = entry; 2422 2423 if (drops) 2424 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", 2425 cp->dev->name); 2426 return npackets; 2427} 2428 2429 2430/* put completion entries back on the ring */ 2431static void cas_post_rxcs_ringN(struct net_device *dev, 2432 struct cas *cp, int ring) 2433{ 2434 struct cas_rx_comp *rxc = cp->init_rxcs[ring]; 2435 int last, entry; 2436 2437 last = cp->rx_cur[ring]; 2438 entry = cp->rx_new[ring]; 2439 if (netif_msg_intr(cp)) 2440 printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n", 2441 dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD), 2442 entry); 2443 2444 /* zero and re-mark descriptors */ 2445 while (last != entry) { 2446 cas_rxc_init(rxc + last); 2447 last = RX_COMP_ENTRY(ring, last + 1); 2448 } 2449 cp->rx_cur[ring] = last; 2450 2451 if (ring == 0) 2452 writel(last, cp->regs + REG_RX_COMP_TAIL); 2453 else if (cp->cas_flags & CAS_FLAG_REG_PLUS) 2454 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring)); 2455} 2456 2457 2458 2459/* cassini can use all four PCI interrupts for the completion ring. 2460 * rings 3 and 4 are identical 2461 */ 2462#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD) 2463static inline void cas_handle_irqN(struct net_device *dev, 2464 struct cas *cp, const u32 status, 2465 const int ring) 2466{ 2467 if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT)) 2468 cas_post_rxcs_ringN(dev, cp, ring); 2469} 2470 2471static irqreturn_t cas_interruptN(int irq, void *dev_id) 2472{ 2473 struct net_device *dev = dev_id; 2474 struct cas *cp = netdev_priv(dev); 2475 unsigned long flags; 2476 int ring; 2477 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); 2478 2479 /* check for shared irq */ 2480 if (status == 0) 2481 return IRQ_NONE; 2482 2483 ring = (irq == cp->pci_irq_INTC) ? 2 : 3; 2484 spin_lock_irqsave(&cp->lock, flags); 2485 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ 2486#ifdef USE_NAPI 2487 cas_mask_intr(cp); 2488 netif_rx_schedule(dev); 2489#else 2490 cas_rx_ringN(cp, ring, 0); 2491#endif 2492 status &= ~INTR_RX_DONE_ALT; 2493 } 2494 2495 if (status) 2496 cas_handle_irqN(dev, cp, status, ring); 2497 spin_unlock_irqrestore(&cp->lock, flags); 2498 return IRQ_HANDLED; 2499} 2500#endif 2501 2502#ifdef USE_PCI_INTB 2503/* everything but rx packets */ 2504static inline void cas_handle_irq1(struct cas *cp, const u32 status) 2505{ 2506 if (status & INTR_RX_BUF_UNAVAIL_1) { 2507 /* Frame arrived, no free RX buffers available. 2508 * NOTE: we can get this on a link transition. */ 2509 cas_post_rxds_ringN(cp, 1, 0); 2510 spin_lock(&cp->stat_lock[1]); 2511 cp->net_stats[1].rx_dropped++; 2512 spin_unlock(&cp->stat_lock[1]); 2513 } 2514 2515 if (status & INTR_RX_BUF_AE_1) 2516 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) - 2517 RX_AE_FREEN_VAL(1)); 2518 2519 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) 2520 cas_post_rxcs_ringN(cp, 1); 2521} 2522 2523/* ring 2 handles a few more events than 3 and 4 */ 2524static irqreturn_t cas_interrupt1(int irq, void *dev_id) 2525{ 2526 struct net_device *dev = dev_id; 2527 struct cas *cp = netdev_priv(dev); 2528 unsigned long flags; 2529 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); 2530 2531 /* check for shared interrupt */ 2532 if (status == 0) 2533 return IRQ_NONE; 2534 2535 spin_lock_irqsave(&cp->lock, flags); 2536 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ 2537#ifdef USE_NAPI 2538 cas_mask_intr(cp); 2539 netif_rx_schedule(dev); 2540#else 2541 cas_rx_ringN(cp, 1, 0); 2542#endif 2543 status &= ~INTR_RX_DONE_ALT; 2544 } 2545 if (status) 2546 cas_handle_irq1(cp, status); 2547 spin_unlock_irqrestore(&cp->lock, flags); 2548 return IRQ_HANDLED; 2549} 2550#endif 2551 2552static inline void cas_handle_irq(struct net_device *dev, 2553 struct cas *cp, const u32 status) 2554{ 2555 /* housekeeping interrupts */ 2556 if (status & INTR_ERROR_MASK) 2557 cas_abnormal_irq(dev, cp, status); 2558 2559 if (status & INTR_RX_BUF_UNAVAIL) { 2560 /* Frame arrived, no free RX buffers available. 2561 * NOTE: we can get this on a link transition. 2562 */ 2563 cas_post_rxds_ringN(cp, 0, 0); 2564 spin_lock(&cp->stat_lock[0]); 2565 cp->net_stats[0].rx_dropped++; 2566 spin_unlock(&cp->stat_lock[0]); 2567 } else if (status & INTR_RX_BUF_AE) { 2568 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) - 2569 RX_AE_FREEN_VAL(0)); 2570 } 2571 2572 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) 2573 cas_post_rxcs_ringN(dev, cp, 0); 2574} 2575 2576static irqreturn_t cas_interrupt(int irq, void *dev_id) 2577{ 2578 struct net_device *dev = dev_id; 2579 struct cas *cp = netdev_priv(dev); 2580 unsigned long flags; 2581 u32 status = readl(cp->regs + REG_INTR_STATUS); 2582 2583 if (status == 0) 2584 return IRQ_NONE; 2585 2586 spin_lock_irqsave(&cp->lock, flags); 2587 if (status & (INTR_TX_ALL | INTR_TX_INTME)) { 2588 cas_tx(dev, cp, status); 2589 status &= ~(INTR_TX_ALL | INTR_TX_INTME); 2590 } 2591 2592 if (status & INTR_RX_DONE) { 2593#ifdef USE_NAPI 2594 cas_mask_intr(cp); 2595 netif_rx_schedule(dev); 2596#else 2597 cas_rx_ringN(cp, 0, 0); 2598#endif 2599 status &= ~INTR_RX_DONE; 2600 } 2601 2602 if (status) 2603 cas_handle_irq(dev, cp, status); 2604 spin_unlock_irqrestore(&cp->lock, flags); 2605 return IRQ_HANDLED; 2606} 2607 2608 2609#ifdef USE_NAPI 2610static int cas_poll(struct net_device *dev, int *budget) 2611{ 2612 struct cas *cp = netdev_priv(dev); 2613 int i, enable_intr, todo, credits; 2614 u32 status = readl(cp->regs + REG_INTR_STATUS); 2615 unsigned long flags; 2616 2617 spin_lock_irqsave(&cp->lock, flags); 2618 cas_tx(dev, cp, status); 2619 spin_unlock_irqrestore(&cp->lock, flags); 2620 2621 /* NAPI rx packets. we spread the credits across all of the 2622 * rxc rings 2623 */ 2624 todo = min(*budget, dev->quota); 2625 2626 /* to make sure we're fair with the work we loop through each 2627 * ring N_RX_COMP_RING times with a request of 2628 * todo / N_RX_COMP_RINGS 2629 */ 2630 enable_intr = 1; 2631 credits = 0; 2632 for (i = 0; i < N_RX_COMP_RINGS; i++) { 2633 int j; 2634 for (j = 0; j < N_RX_COMP_RINGS; j++) { 2635 credits += cas_rx_ringN(cp, j, todo / N_RX_COMP_RINGS); 2636 if (credits >= todo) { 2637 enable_intr = 0; 2638 goto rx_comp; 2639 } 2640 } 2641 } 2642 2643rx_comp: 2644 *budget -= credits; 2645 dev->quota -= credits; 2646 2647 /* final rx completion */ 2648 spin_lock_irqsave(&cp->lock, flags); 2649 if (status) 2650 cas_handle_irq(dev, cp, status); 2651 2652#ifdef USE_PCI_INTB 2653 if (N_RX_COMP_RINGS > 1) { 2654 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); 2655 if (status) 2656 cas_handle_irq1(dev, cp, status); 2657 } 2658#endif 2659 2660#ifdef USE_PCI_INTC 2661 if (N_RX_COMP_RINGS > 2) { 2662 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2)); 2663 if (status) 2664 cas_handle_irqN(dev, cp, status, 2); 2665 } 2666#endif 2667 2668#ifdef USE_PCI_INTD 2669 if (N_RX_COMP_RINGS > 3) { 2670 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3)); 2671 if (status) 2672 cas_handle_irqN(dev, cp, status, 3); 2673 } 2674#endif 2675 spin_unlock_irqrestore(&cp->lock, flags); 2676 if (enable_intr) { 2677 netif_rx_complete(dev); 2678 cas_unmask_intr(cp); 2679 return 0; 2680 } 2681 return 1; 2682} 2683#endif 2684 2685#ifdef CONFIG_NET_POLL_CONTROLLER 2686static void cas_netpoll(struct net_device *dev) 2687{ 2688 struct cas *cp = netdev_priv(dev); 2689 2690 cas_disable_irq(cp, 0); 2691 cas_interrupt(cp->pdev->irq, dev); 2692 cas_enable_irq(cp, 0); 2693 2694#ifdef USE_PCI_INTB 2695 if (N_RX_COMP_RINGS > 1) { 2696 /* cas_interrupt1(); */ 2697 } 2698#endif 2699#ifdef USE_PCI_INTC 2700 if (N_RX_COMP_RINGS > 2) { 2701 /* cas_interruptN(); */ 2702 } 2703#endif 2704#ifdef USE_PCI_INTD 2705 if (N_RX_COMP_RINGS > 3) { 2706 /* cas_interruptN(); */ 2707 } 2708#endif 2709} 2710#endif 2711 2712static void cas_tx_timeout(struct net_device *dev) 2713{ 2714 struct cas *cp = netdev_priv(dev); 2715 2716 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); 2717 if (!cp->hw_running) { 2718 printk("%s: hrm.. hw not running!\n", dev->name); 2719 return; 2720 } 2721 2722 printk(KERN_ERR "%s: MIF_STATE[%08x]\n", 2723 dev->name, readl(cp->regs + REG_MIF_STATE_MACHINE)); 2724 2725 printk(KERN_ERR "%s: MAC_STATE[%08x]\n", 2726 dev->name, readl(cp->regs + REG_MAC_STATE_MACHINE)); 2727 2728 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x] " 2729 "FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n", 2730 dev->name, 2731 readl(cp->regs + REG_TX_CFG), 2732 readl(cp->regs + REG_MAC_TX_STATUS), 2733 readl(cp->regs + REG_MAC_TX_CFG), 2734 readl(cp->regs + REG_TX_FIFO_PKT_CNT), 2735 readl(cp->regs + REG_TX_FIFO_WRITE_PTR), 2736 readl(cp->regs + REG_TX_FIFO_READ_PTR), 2737 readl(cp->regs + REG_TX_SM_1), 2738 readl(cp->regs + REG_TX_SM_2)); 2739 2740 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n", 2741 dev->name, 2742 readl(cp->regs + REG_RX_CFG), 2743 readl(cp->regs + REG_MAC_RX_STATUS), 2744 readl(cp->regs + REG_MAC_RX_CFG)); 2745 2746 printk(KERN_ERR "%s: HP_STATE[%08x:%08x:%08x:%08x]\n", 2747 dev->name, 2748 readl(cp->regs + REG_HP_STATE_MACHINE), 2749 readl(cp->regs + REG_HP_STATUS0), 2750 readl(cp->regs + REG_HP_STATUS1), 2751 readl(cp->regs + REG_HP_STATUS2)); 2752 2753#if 1 2754 atomic_inc(&cp->reset_task_pending); 2755 atomic_inc(&cp->reset_task_pending_all); 2756 schedule_work(&cp->reset_task); 2757#else 2758 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); 2759 schedule_work(&cp->reset_task); 2760#endif 2761} 2762 2763static inline int cas_intme(int ring, int entry) 2764{ 2765 /* Algorithm: IRQ every 1/2 of descriptors. */ 2766 if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1))) 2767 return 1; 2768 return 0; 2769} 2770 2771 2772static void cas_write_txd(struct cas *cp, int ring, int entry, 2773 dma_addr_t mapping, int len, u64 ctrl, int last) 2774{ 2775 struct cas_tx_desc *txd = cp->init_txds[ring] + entry; 2776 2777 ctrl |= CAS_BASE(TX_DESC_BUFLEN, len); 2778 if (cas_intme(ring, entry)) 2779 ctrl |= TX_DESC_INTME; 2780 if (last) 2781 ctrl |= TX_DESC_EOF; 2782 txd->control = cpu_to_le64(ctrl); 2783 txd->buffer = cpu_to_le64(mapping); 2784} 2785 2786static inline void *tx_tiny_buf(struct cas *cp, const int ring, 2787 const int entry) 2788{ 2789 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry; 2790} 2791 2792static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring, 2793 const int entry, const int tentry) 2794{ 2795 cp->tx_tiny_use[ring][tentry].nbufs++; 2796 cp->tx_tiny_use[ring][entry].used = 1; 2797 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry; 2798} 2799 2800static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, 2801 struct sk_buff *skb) 2802{ 2803 struct net_device *dev = cp->dev; 2804 int entry, nr_frags, frag, tabort, tentry; 2805 dma_addr_t mapping; 2806 unsigned long flags; 2807 u64 ctrl; 2808 u32 len; 2809 2810 spin_lock_irqsave(&cp->tx_lock[ring], flags); 2811 2812 /* This is a hard error, log it. */ 2813 if (TX_BUFFS_AVAIL(cp, ring) <= 2814 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) { 2815 netif_stop_queue(dev); 2816 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); 2817 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " 2818 "queue awake!\n", dev->name); 2819 return 1; 2820 } 2821 2822 ctrl = 0; 2823 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2824 const u64 csum_start_off = skb_transport_offset(skb); 2825 const u64 csum_stuff_off = csum_start_off + skb->csum_offset; 2826 2827 ctrl = TX_DESC_CSUM_EN | 2828 CAS_BASE(TX_DESC_CSUM_START, csum_start_off) | 2829 CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off); 2830 } 2831 2832 entry = cp->tx_new[ring]; 2833 cp->tx_skbs[ring][entry] = skb; 2834 2835 nr_frags = skb_shinfo(skb)->nr_frags; 2836 len = skb_headlen(skb); 2837 mapping = pci_map_page(cp->pdev, virt_to_page(skb->data), 2838 offset_in_page(skb->data), len, 2839 PCI_DMA_TODEVICE); 2840 2841 tentry = entry; 2842 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len); 2843 if (unlikely(tabort)) { 2844 /* NOTE: len is always > tabort */ 2845 cas_write_txd(cp, ring, entry, mapping, len - tabort, 2846 ctrl | TX_DESC_SOF, 0); 2847 entry = TX_DESC_NEXT(ring, entry); 2848 2849 skb_copy_from_linear_data_offset(skb, len - tabort, 2850 tx_tiny_buf(cp, ring, entry), tabort); 2851 mapping = tx_tiny_map(cp, ring, entry, tentry); 2852 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, 2853 (nr_frags == 0)); 2854 } else { 2855 cas_write_txd(cp, ring, entry, mapping, len, ctrl | 2856 TX_DESC_SOF, (nr_frags == 0)); 2857 } 2858 entry = TX_DESC_NEXT(ring, entry); 2859 2860 for (frag = 0; frag < nr_frags; frag++) { 2861 skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; 2862 2863 len = fragp->size; 2864 mapping = pci_map_page(cp->pdev, fragp->page, 2865 fragp->page_offset, len, 2866 PCI_DMA_TODEVICE); 2867 2868 tabort = cas_calc_tabort(cp, fragp->page_offset, len); 2869 if (unlikely(tabort)) { 2870 void *addr; 2871 2872 /* NOTE: len is always > tabort */ 2873 cas_write_txd(cp, ring, entry, mapping, len - tabort, 2874 ctrl, 0); 2875 entry = TX_DESC_NEXT(ring, entry); 2876 2877 addr = cas_page_map(fragp->page); 2878 memcpy(tx_tiny_buf(cp, ring, entry), 2879 addr + fragp->page_offset + len - tabort, 2880 tabort); 2881 cas_page_unmap(addr); 2882 mapping = tx_tiny_map(cp, ring, entry, tentry); 2883 len = tabort; 2884 } 2885 2886 cas_write_txd(cp, ring, entry, mapping, len, ctrl, 2887 (frag + 1 == nr_frags)); 2888 entry = TX_DESC_NEXT(ring, entry); 2889 } 2890 2891 cp->tx_new[ring] = entry; 2892 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)) 2893 netif_stop_queue(dev); 2894 2895 if (netif_msg_tx_queued(cp)) 2896 printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, " 2897 "avail %d\n", 2898 dev->name, ring, entry, skb->len, 2899 TX_BUFFS_AVAIL(cp, ring)); 2900 writel(entry, cp->regs + REG_TX_KICKN(ring)); 2901 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); 2902 return 0; 2903} 2904 2905static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev) 2906{ 2907 struct cas *cp = netdev_priv(dev); 2908 2909 /* this is only used as a load-balancing hint, so it doesn't 2910 * need to be SMP safe 2911 */ 2912 static int ring; 2913 2914 if (skb_padto(skb, cp->min_frame_size)) 2915 return 0; 2916 2917 /* XXX: we need some higher-level QoS hooks to steer packets to 2918 * individual queues. 2919 */ 2920 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb)) 2921 return 1; 2922 dev->trans_start = jiffies; 2923 return 0; 2924} 2925 2926static void cas_init_tx_dma(struct cas *cp) 2927{ 2928 u64 desc_dma = cp->block_dvma; 2929 unsigned long off; 2930 u32 val; 2931 int i; 2932 2933 /* set up tx completion writeback registers. must be 8-byte aligned */ 2934#ifdef USE_TX_COMPWB 2935 off = offsetof(struct cas_init_block, tx_compwb); 2936 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI); 2937 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW); 2938#endif 2939 2940 /* enable completion writebacks, enable paced mode, 2941 * disable read pipe, and disable pre-interrupt compwbs 2942 */ 2943 val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 | 2944 TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 | 2945 TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE | 2946 TX_CFG_INTR_COMPWB_DIS; 2947 2948 /* write out tx ring info and tx desc bases */ 2949 for (i = 0; i < MAX_TX_RINGS; i++) { 2950 off = (unsigned long) cp->init_txds[i] - 2951 (unsigned long) cp->init_block; 2952 2953 val |= CAS_TX_RINGN_BASE(i); 2954 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i)); 2955 writel((desc_dma + off) & 0xffffffff, cp->regs + 2956 REG_TX_DBN_LOW(i)); 2957 /* don't zero out the kick register here as the system 2958 * will wedge 2959 */ 2960 } 2961 writel(val, cp->regs + REG_TX_CFG); 2962 2963 /* program max burst sizes. these numbers should be different 2964 * if doing QoS. 2965 */ 2966#ifdef USE_QOS 2967 writel(0x800, cp->regs + REG_TX_MAXBURST_0); 2968 writel(0x1600, cp->regs + REG_TX_MAXBURST_1); 2969 writel(0x2400, cp->regs + REG_TX_MAXBURST_2); 2970 writel(0x4800, cp->regs + REG_TX_MAXBURST_3); 2971#else 2972 writel(0x800, cp->regs + REG_TX_MAXBURST_0); 2973 writel(0x800, cp->regs + REG_TX_MAXBURST_1); 2974 writel(0x800, cp->regs + REG_TX_MAXBURST_2); 2975 writel(0x800, cp->regs + REG_TX_MAXBURST_3); 2976#endif 2977} 2978 2979/* Must be invoked under cp->lock. */ 2980static inline void cas_init_dma(struct cas *cp) 2981{ 2982 cas_init_tx_dma(cp); 2983 cas_init_rx_dma(cp); 2984} 2985 2986/* Must be invoked under cp->lock. */ 2987static u32 cas_setup_multicast(struct cas *cp) 2988{ 2989 u32 rxcfg = 0; 2990 int i; 2991 2992 if (cp->dev->flags & IFF_PROMISC) { 2993 rxcfg |= MAC_RX_CFG_PROMISC_EN; 2994 2995 } else if (cp->dev->flags & IFF_ALLMULTI) { 2996 for (i=0; i < 16; i++) 2997 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i)); 2998 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; 2999 3000 } else { 3001 u16 hash_table[16]; 3002 u32 crc; 3003 struct dev_mc_list *dmi = cp->dev->mc_list; 3004 int i; 3005 3006 /* use the alternate mac address registers for the 3007 * first 15 multicast addresses 3008 */ 3009 for (i = 1; i <= CAS_MC_EXACT_MATCH_SIZE; i++) { 3010 if (!dmi) { 3011 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 0)); 3012 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 1)); 3013 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 2)); 3014 continue; 3015 } 3016 writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5], 3017 cp->regs + REG_MAC_ADDRN(i*3 + 0)); 3018 writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3], 3019 cp->regs + REG_MAC_ADDRN(i*3 + 1)); 3020 writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1], 3021 cp->regs + REG_MAC_ADDRN(i*3 + 2)); 3022 dmi = dmi->next; 3023 } 3024 3025 /* use hw hash table for the next series of 3026 * multicast addresses 3027 */ 3028 memset(hash_table, 0, sizeof(hash_table)); 3029 while (dmi) { 3030 crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr); 3031 crc >>= 24; 3032 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); 3033 dmi = dmi->next; 3034 } 3035 for (i=0; i < 16; i++) 3036 writel(hash_table[i], cp->regs + 3037 REG_MAC_HASH_TABLEN(i)); 3038 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; 3039 } 3040 3041 return rxcfg; 3042} 3043 3044/* must be invoked under cp->stat_lock[N_TX_RINGS] */ 3045static void cas_clear_mac_err(struct cas *cp) 3046{ 3047 writel(0, cp->regs + REG_MAC_COLL_NORMAL); 3048 writel(0, cp->regs + REG_MAC_COLL_FIRST); 3049 writel(0, cp->regs + REG_MAC_COLL_EXCESS); 3050 writel(0, cp->regs + REG_MAC_COLL_LATE); 3051 writel(0, cp->regs + REG_MAC_TIMER_DEFER); 3052 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK); 3053 writel(0, cp->regs + REG_MAC_RECV_FRAME); 3054 writel(0, cp->regs + REG_MAC_LEN_ERR); 3055 writel(0, cp->regs + REG_MAC_ALIGN_ERR); 3056 writel(0, cp->regs + REG_MAC_FCS_ERR); 3057 writel(0, cp->regs + REG_MAC_RX_CODE_ERR); 3058} 3059 3060 3061static void cas_mac_reset(struct cas *cp) 3062{ 3063 int i; 3064 3065 /* do both TX and RX reset */ 3066 writel(0x1, cp->regs + REG_MAC_TX_RESET); 3067 writel(0x1, cp->regs + REG_MAC_RX_RESET); 3068 3069 /* wait for TX */ 3070 i = STOP_TRIES; 3071 while (i-- > 0) { 3072 if (readl(cp->regs + REG_MAC_TX_RESET) == 0) 3073 break; 3074 udelay(10); 3075 } 3076 3077 /* wait for RX */ 3078 i = STOP_TRIES; 3079 while (i-- > 0) { 3080 if (readl(cp->regs + REG_MAC_RX_RESET) == 0) 3081 break; 3082 udelay(10); 3083 } 3084 3085 if (readl(cp->regs + REG_MAC_TX_RESET) | 3086 readl(cp->regs + REG_MAC_RX_RESET)) 3087 printk(KERN_ERR "%s: mac tx[%d]/rx[%d] reset failed [%08x]\n", 3088 cp->dev->name, readl(cp->regs + REG_MAC_TX_RESET), 3089 readl(cp->regs + REG_MAC_RX_RESET), 3090 readl(cp->regs + REG_MAC_STATE_MACHINE)); 3091} 3092 3093 3094/* Must be invoked under cp->lock. */ 3095static void cas_init_mac(struct cas *cp) 3096{ 3097 unsigned char *e = &cp->dev->dev_addr[0]; 3098 int i; 3099#ifdef CONFIG_CASSINI_MULTICAST_REG_WRITE 3100 u32 rxcfg; 3101#endif 3102 cas_mac_reset(cp); 3103 3104 /* setup core arbitration weight register */ 3105 writel(CAWR_RR_DIS, cp->regs + REG_CAWR); 3106 3107 /* XXX Use pci_dma_burst_advice() */ 3108#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) 3109 /* set the infinite burst register for chips that don't have 3110 * pci issues. 3111 */ 3112 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0) 3113 writel(INF_BURST_EN, cp->regs + REG_INF_BURST); 3114#endif 3115 3116 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE); 3117 3118 writel(0x00, cp->regs + REG_MAC_IPG0); 3119 writel(0x08, cp->regs + REG_MAC_IPG1); 3120 writel(0x04, cp->regs + REG_MAC_IPG2); 3121 3122 /* change later for 802.3z */ 3123 writel(0x40, cp->regs + REG_MAC_SLOT_TIME); 3124 3125 /* min frame + FCS */ 3126 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN); 3127 3128 /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we 3129 * specify the maximum frame size to prevent RX tag errors on 3130 * oversized frames. 3131 */ 3132 writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) | 3133 CAS_BASE(MAC_FRAMESIZE_MAX_FRAME, 3134 (CAS_MAX_MTU + ETH_HLEN + 4 + 4)), 3135 cp->regs + REG_MAC_FRAMESIZE_MAX); 3136 3137 /* NOTE: crc_size is used as a surrogate for half-duplex. 3138 * workaround saturn half-duplex issue by increasing preamble 3139 * size to 65 bytes. 3140 */ 3141 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size) 3142 writel(0x41, cp->regs + REG_MAC_PA_SIZE); 3143 else 3144 writel(0x07, cp->regs + REG_MAC_PA_SIZE); 3145 writel(0x04, cp->regs + REG_MAC_JAM_SIZE); 3146 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT); 3147 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE); 3148 3149 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED); 3150 3151 writel(0, cp->regs + REG_MAC_ADDR_FILTER0); 3152 writel(0, cp->regs + REG_MAC_ADDR_FILTER1); 3153 writel(0, cp->regs + REG_MAC_ADDR_FILTER2); 3154 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK); 3155 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK); 3156 3157 /* setup mac address in perfect filter array */ 3158 for (i = 0; i < 45; i++) 3159 writel(0x0, cp->regs + REG_MAC_ADDRN(i)); 3160 3161 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0)); 3162 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1)); 3163 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2)); 3164 3165 writel(0x0001, cp->regs + REG_MAC_ADDRN(42)); 3166 writel(0xc200, cp->regs + REG_MAC_ADDRN(43)); 3167 writel(0x0180, cp->regs + REG_MAC_ADDRN(44)); 3168 3169#ifndef CONFIG_CASSINI_MULTICAST_REG_WRITE 3170 cp->mac_rx_cfg = cas_setup_multicast(cp); 3171#else 3172 /* WTZ: Do what Adrian did in cas_set_multicast. Doing 3173 * a writel does not seem to be necessary because Cassini 3174 * seems to preserve the configuration when we do the reset. 3175 * If the chip is in trouble, though, it is not clear if we 3176 * can really count on this behavior. cas_set_multicast uses 3177 * spin_lock_irqsave, but we are called only in cas_init_hw and 3178 * cas_init_hw is protected by cas_lock_all, which calls 3179 * spin_lock_irq (so it doesn't need to save the flags, and 3180 * we should be OK for the writel, as that is the only 3181 * difference). 3182 */ 3183 cp->mac_rx_cfg = rxcfg = cas_setup_multicast(cp); 3184 writel(rxcfg, cp->regs + REG_MAC_RX_CFG); 3185#endif 3186 spin_lock(&cp->stat_lock[N_TX_RINGS]); 3187 cas_clear_mac_err(cp); 3188 spin_unlock(&cp->stat_lock[N_TX_RINGS]); 3189 3190 /* Setup MAC interrupts. We want to get all of the interesting 3191 * counter expiration events, but we do not want to hear about 3192 * normal rx/tx as the DMA engine tells us that. 3193 */ 3194 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK); 3195 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); 3196 3197 /* Don't enable even the PAUSE interrupts for now, we 3198 * make no use of those events other than to record them. 3199 */ 3200 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK); 3201} 3202 3203/* Must be invoked under cp->lock. */ 3204static void cas_init_pause_thresholds(struct cas *cp) 3205{ 3206 /* Calculate pause thresholds. Setting the OFF threshold to the 3207 * full RX fifo size effectively disables PAUSE generation 3208 */ 3209 if (cp->rx_fifo_size <= (2 * 1024)) { 3210 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size; 3211 } else { 3212 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63; 3213 if (max_frame * 3 > cp->rx_fifo_size) { 3214 cp->rx_pause_off = 7104; 3215 cp->rx_pause_on = 960; 3216 } else { 3217 int off = (cp->rx_fifo_size - (max_frame * 2)); 3218 int on = off - max_frame; 3219 cp->rx_pause_off = off; 3220 cp->rx_pause_on = on; 3221 } 3222 } 3223} 3224 3225static int cas_vpd_match(const void __iomem *p, const char *str) 3226{ 3227 int len = strlen(str) + 1; 3228 int i; 3229 3230 for (i = 0; i < len; i++) { 3231 if (readb(p + i) != str[i]) 3232 return 0; 3233 } 3234 return 1; 3235} 3236 3237 3238/* get the mac address by reading the vpd information in the rom. 3239 * also get the phy type and determine if there's an entropy generator. 3240 * NOTE: this is a bit convoluted for the following reasons: 3241 * 1) vpd info has order-dependent mac addresses for multinic cards 3242 * 2) the only way to determine the nic order is to use the slot 3243 * number. 3244 * 3) fiber cards don't have bridges, so their slot numbers don't 3245 * mean anything. 3246 * 4) we don't actually know we have a fiber card until after 3247 * the mac addresses are parsed. 3248 */ 3249static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr, 3250 const int offset) 3251{ 3252 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START; 3253 void __iomem *base, *kstart; 3254 int i, len; 3255 int found = 0; 3256#define VPD_FOUND_MAC 0x01 3257#define VPD_FOUND_PHY 0x02 3258 3259 int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */ 3260 int mac_off = 0; 3261 3262 /* give us access to the PROM */ 3263 writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD, 3264 cp->regs + REG_BIM_LOCAL_DEV_EN); 3265 3266 /* check for an expansion rom */ 3267 if (readb(p) != 0x55 || readb(p + 1) != 0xaa) 3268 goto use_random_mac_addr; 3269 3270 /* search for beginning of vpd */ 3271 base = NULL; 3272 for (i = 2; i < EXPANSION_ROM_SIZE; i++) { 3273 /* check for PCIR */ 3274 if ((readb(p + i + 0) == 0x50) && 3275 (readb(p + i + 1) == 0x43) && 3276 (readb(p + i + 2) == 0x49) && 3277 (readb(p + i + 3) == 0x52)) { 3278 base = p + (readb(p + i + 8) | 3279 (readb(p + i + 9) << 8)); 3280 break; 3281 } 3282 } 3283 3284 if (!base || (readb(base) != 0x82)) 3285 goto use_random_mac_addr; 3286 3287 i = (readb(base + 1) | (readb(base + 2) << 8)) + 3; 3288 while (i < EXPANSION_ROM_SIZE) { 3289 if (readb(base + i) != 0x90) /* no vpd found */ 3290 goto use_random_mac_addr; 3291 3292 /* found a vpd field */ 3293 len = readb(base + i + 1) | (readb(base + i + 2) << 8); 3294 3295 /* extract keywords */ 3296 kstart = base + i + 3; 3297 p = kstart; 3298 while ((p - kstart) < len) { 3299 int klen = readb(p + 2); 3300 int j; 3301 char type; 3302 3303 p += 3; 3304 3305 /* look for the following things: 3306 * -- correct length == 29 3307 * 3 (type) + 2 (size) + 3308 * 18 (strlen("local-mac-address") + 1) + 3309 * 6 (mac addr) 3310 * -- VPD Instance 'I' 3311 * -- VPD Type Bytes 'B' 3312 * -- VPD data length == 6 3313 * -- property string == local-mac-address 3314 * 3315 * -- correct length == 24 3316 * 3 (type) + 2 (size) + 3317 * 12 (strlen("entropy-dev") + 1) + 3318 * 7 (strlen("vms110") + 1) 3319 * -- VPD Instance 'I' 3320 * -- VPD Type String 'B' 3321 * -- VPD data length == 7 3322 * -- property string == entropy-dev 3323 * 3324 * -- correct length == 18 3325 * 3 (type) + 2 (size) + 3326 * 9 (strlen("phy-type") + 1) + 3327 * 4 (strlen("pcs") + 1) 3328 * -- VPD Instance 'I' 3329 * -- VPD Type String 'S' 3330 * -- VPD data length == 4 3331 * -- property string == phy-type 3332 * 3333 * -- correct length == 23 3334 * 3 (type) + 2 (size) + 3335 * 14 (strlen("phy-interface") + 1) + 3336 * 4 (strlen("pcs") + 1) 3337 * -- VPD Instance 'I' 3338 * -- VPD Type String 'S' 3339 * -- VPD data length == 4 3340 * -- property string == phy-interface 3341 */ 3342 if (readb(p) != 'I') 3343 goto next; 3344 3345 /* finally, check string and length */ 3346 type = readb(p + 3); 3347 if (type == 'B') { 3348 if ((klen == 29) && readb(p + 4) == 6 && 3349 cas_vpd_match(p + 5, 3350 "local-mac-address")) { 3351 if (mac_off++ > offset) 3352 goto next; 3353 3354 /* set mac address */ 3355 for (j = 0; j < 6; j++) 3356 dev_addr[j] = 3357 readb(p + 23 + j); 3358 goto found_mac; 3359 } 3360 } 3361 3362 if (type != 'S') 3363 goto next; 3364 3365#ifdef USE_ENTROPY_DEV 3366 if ((klen == 24) && 3367 cas_vpd_match(p + 5, "entropy-dev") && 3368 cas_vpd_match(p + 17, "vms110")) { 3369 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV; 3370 goto next; 3371 } 3372#endif 3373 3374 if (found & VPD_FOUND_PHY) 3375 goto next; 3376 3377 if ((klen == 18) && readb(p + 4) == 4 && 3378 cas_vpd_match(p + 5, "phy-type")) { 3379 if (cas_vpd_match(p + 14, "pcs")) { 3380 phy_type = CAS_PHY_SERDES; 3381 goto found_phy; 3382 } 3383 } 3384 3385 if ((klen == 23) && readb(p + 4) == 4 && 3386 cas_vpd_match(p + 5, "phy-interface")) { 3387 if (cas_vpd_match(p + 19, "pcs")) { 3388 phy_type = CAS_PHY_SERDES; 3389 goto found_phy; 3390 } 3391 } 3392found_mac: 3393 found |= VPD_FOUND_MAC; 3394 goto next; 3395 3396found_phy: 3397 found |= VPD_FOUND_PHY; 3398 3399next: 3400 p += klen; 3401 } 3402 i += len + 3; 3403 } 3404 3405use_random_mac_addr: 3406 if (found & VPD_FOUND_MAC) 3407 goto done; 3408 3409 /* Sun MAC prefix then 3 random bytes. */ 3410 printk(PFX "MAC address not found in ROM VPD\n"); 3411 dev_addr[0] = 0x08; 3412 dev_addr[1] = 0x00; 3413 dev_addr[2] = 0x20; 3414 get_random_bytes(dev_addr + 3, 3); 3415 3416done: 3417 writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN); 3418 return phy_type; 3419} 3420 3421/* check pci invariants */ 3422static void cas_check_pci_invariants(struct cas *cp) 3423{ 3424 struct pci_dev *pdev = cp->pdev; 3425 u8 rev; 3426 3427 cp->cas_flags = 0; 3428 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); 3429 if ((pdev->vendor == PCI_VENDOR_ID_SUN) && 3430 (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) { 3431 if (rev >= CAS_ID_REVPLUS) 3432 cp->cas_flags |= CAS_FLAG_REG_PLUS; 3433 if (rev < CAS_ID_REVPLUS02u) 3434 cp->cas_flags |= CAS_FLAG_TARGET_ABORT; 3435 3436 /* Original Cassini supports HW CSUM, but it's not 3437 * enabled by default as it can trigger TX hangs. 3438 */ 3439 if (rev < CAS_ID_REV2) 3440 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM; 3441 } else { 3442 /* Only sun has original cassini chips. */ 3443 cp->cas_flags |= CAS_FLAG_REG_PLUS; 3444 3445 /* We use a flag because the same phy might be externally 3446 * connected. 3447 */ 3448 if ((pdev->vendor == PCI_VENDOR_ID_NS) && 3449 (pdev->device == PCI_DEVICE_ID_NS_SATURN)) 3450 cp->cas_flags |= CAS_FLAG_SATURN; 3451 } 3452} 3453 3454 3455static int cas_check_invariants(struct cas *cp) 3456{ 3457 struct pci_dev *pdev = cp->pdev; 3458 u32 cfg; 3459 int i; 3460 3461 /* get page size for rx buffers. */ 3462 cp->page_order = 0; 3463#ifdef USE_PAGE_ORDER 3464 if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) { 3465 /* see if we can allocate larger pages */ 3466 struct page *page = alloc_pages(GFP_ATOMIC, 3467 CAS_JUMBO_PAGE_SHIFT - 3468 PAGE_SHIFT); 3469 if (page) { 3470 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT); 3471 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT; 3472 } else { 3473 printk(PFX "MTU limited to %d bytes\n", CAS_MAX_MTU); 3474 } 3475 } 3476#endif 3477 cp->page_size = (PAGE_SIZE << cp->page_order); 3478 3479 /* Fetch the FIFO configurations. */ 3480 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64; 3481 cp->rx_fifo_size = RX_FIFO_SIZE; 3482 3483 /* finish phy determination. MDIO1 takes precedence over MDIO0 if 3484 * they're both connected. 3485 */ 3486 cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr, 3487 PCI_SLOT(pdev->devfn)); 3488 if (cp->phy_type & CAS_PHY_SERDES) { 3489 cp->cas_flags |= CAS_FLAG_1000MB_CAP; 3490 return 0; /* no more checking needed */ 3491 } 3492 3493 /* MII */ 3494 cfg = readl(cp->regs + REG_MIF_CFG); 3495 if (cfg & MIF_CFG_MDIO_1) { 3496 cp->phy_type = CAS_PHY_MII_MDIO1; 3497 } else if (cfg & MIF_CFG_MDIO_0) { 3498 cp->phy_type = CAS_PHY_MII_MDIO0; 3499 } 3500 3501 cas_mif_poll(cp, 0); 3502 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); 3503 3504 for (i = 0; i < 32; i++) { 3505 u32 phy_id; 3506 int j; 3507 3508 for (j = 0; j < 3; j++) { 3509 cp->phy_addr = i; 3510 phy_id = cas_phy_read(cp, MII_PHYSID1) << 16; 3511 phy_id |= cas_phy_read(cp, MII_PHYSID2); 3512 if (phy_id && (phy_id != 0xFFFFFFFF)) { 3513 cp->phy_id = phy_id; 3514 goto done; 3515 } 3516 } 3517 } 3518 printk(KERN_ERR PFX "MII phy did not respond [%08x]\n", 3519 readl(cp->regs + REG_MIF_STATE_MACHINE)); 3520 return -1; 3521 3522done: 3523 /* see if we can do gigabit */ 3524 cfg = cas_phy_read(cp, MII_BMSR); 3525 if ((cfg & CAS_BMSR_1000_EXTEND) && 3526 cas_phy_read(cp, CAS_MII_1000_EXTEND)) 3527 cp->cas_flags |= CAS_FLAG_1000MB_CAP; 3528 return 0; 3529} 3530 3531/* Must be invoked under cp->lock. */ 3532static inline void cas_start_dma(struct cas *cp) 3533{ 3534 int i; 3535 u32 val; 3536 int txfailed = 0; 3537 3538 /* enable dma */ 3539 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN; 3540 writel(val, cp->regs + REG_TX_CFG); 3541 val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN; 3542 writel(val, cp->regs + REG_RX_CFG); 3543 3544 /* enable the mac */ 3545 val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN; 3546 writel(val, cp->regs + REG_MAC_TX_CFG); 3547 val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN; 3548 writel(val, cp->regs + REG_MAC_RX_CFG); 3549 3550 i = STOP_TRIES; 3551 while (i-- > 0) { 3552 val = readl(cp->regs + REG_MAC_TX_CFG); 3553 if ((val & MAC_TX_CFG_EN)) 3554 break; 3555 udelay(10); 3556 } 3557 if (i < 0) txfailed = 1; 3558 i = STOP_TRIES; 3559 while (i-- > 0) { 3560 val = readl(cp->regs + REG_MAC_RX_CFG); 3561 if ((val & MAC_RX_CFG_EN)) { 3562 if (txfailed) { 3563 printk(KERN_ERR 3564 "%s: enabling mac failed [tx:%08x:%08x].\n", 3565 cp->dev->name, 3566 readl(cp->regs + REG_MIF_STATE_MACHINE), 3567 readl(cp->regs + REG_MAC_STATE_MACHINE)); 3568 } 3569 goto enable_rx_done; 3570 } 3571 udelay(10); 3572 } 3573 printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n", 3574 cp->dev->name, 3575 (txfailed? "tx,rx":"rx"), 3576 readl(cp->regs + REG_MIF_STATE_MACHINE), 3577 readl(cp->regs + REG_MAC_STATE_MACHINE)); 3578 3579enable_rx_done: 3580 cas_unmask_intr(cp); /* enable interrupts */ 3581 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); 3582 writel(0, cp->regs + REG_RX_COMP_TAIL); 3583 3584 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 3585 if (N_RX_DESC_RINGS > 1) 3586 writel(RX_DESC_RINGN_SIZE(1) - 4, 3587 cp->regs + REG_PLUS_RX_KICK1); 3588 3589 for (i = 1; i < N_RX_COMP_RINGS; i++) 3590 writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i)); 3591 } 3592} 3593 3594/* Must be invoked under cp->lock. */ 3595static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd, 3596 int *pause) 3597{ 3598 u32 val = readl(cp->regs + REG_PCS_MII_LPA); 3599 *fd = (val & PCS_MII_LPA_FD) ? 1 : 0; 3600 *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00; 3601 if (val & PCS_MII_LPA_ASYM_PAUSE) 3602 *pause |= 0x10; 3603 *spd = 1000; 3604} 3605 3606/* Must be invoked under cp->lock. */ 3607static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd, 3608 int *pause) 3609{ 3610 u32 val; 3611 3612 *fd = 0; 3613 *spd = 10; 3614 *pause = 0; 3615 3616 /* use GMII registers */ 3617 val = cas_phy_read(cp, MII_LPA); 3618 if (val & CAS_LPA_PAUSE) 3619 *pause = 0x01; 3620 3621 if (val & CAS_LPA_ASYM_PAUSE) 3622 *pause |= 0x10; 3623 3624 if (val & LPA_DUPLEX) 3625 *fd = 1; 3626 if (val & LPA_100) 3627 *spd = 100; 3628 3629 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { 3630 val = cas_phy_read(cp, CAS_MII_1000_STATUS); 3631 if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF)) 3632 *spd = 1000; 3633 if (val & CAS_LPA_1000FULL) 3634 *fd = 1; 3635 } 3636} 3637 3638/* A link-up condition has occurred, initialize and enable the 3639 * rest of the chip. 3640 * 3641 * Must be invoked under cp->lock. 3642 */ 3643static void cas_set_link_modes(struct cas *cp) 3644{ 3645 u32 val; 3646 int full_duplex, speed, pause; 3647 3648 full_duplex = 0; 3649 speed = 10; 3650 pause = 0; 3651 3652 if (CAS_PHY_MII(cp->phy_type)) { 3653 cas_mif_poll(cp, 0); 3654 val = cas_phy_read(cp, MII_BMCR); 3655 if (val & BMCR_ANENABLE) { 3656 cas_read_mii_link_mode(cp, &full_duplex, &speed, 3657 &pause); 3658 } else { 3659 if (val & BMCR_FULLDPLX) 3660 full_duplex = 1; 3661 3662 if (val & BMCR_SPEED100) 3663 speed = 100; 3664 else if (val & CAS_BMCR_SPEED1000) 3665 speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? 3666 1000 : 100; 3667 } 3668 cas_mif_poll(cp, 1); 3669 3670 } else { 3671 val = readl(cp->regs + REG_PCS_MII_CTRL); 3672 cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause); 3673 if ((val & PCS_MII_AUTONEG_EN) == 0) { 3674 if (val & PCS_MII_CTRL_DUPLEX) 3675 full_duplex = 1; 3676 } 3677 } 3678 3679 if (netif_msg_link(cp)) 3680 printk(KERN_INFO "%s: Link up at %d Mbps, %s-duplex.\n", 3681 cp->dev->name, speed, (full_duplex ? "full" : "half")); 3682 3683 val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED; 3684 if (CAS_PHY_MII(cp->phy_type)) { 3685 val |= MAC_XIF_MII_BUFFER_OUTPUT_EN; 3686 if (!full_duplex) 3687 val |= MAC_XIF_DISABLE_ECHO; 3688 } 3689 if (full_duplex) 3690 val |= MAC_XIF_FDPLX_LED; 3691 if (speed == 1000) 3692 val |= MAC_XIF_GMII_MODE; 3693 writel(val, cp->regs + REG_MAC_XIF_CFG); 3694 3695 /* deal with carrier and collision detect. */ 3696 val = MAC_TX_CFG_IPG_EN; 3697 if (full_duplex) { 3698 val |= MAC_TX_CFG_IGNORE_CARRIER; 3699 val |= MAC_TX_CFG_IGNORE_COLL; 3700 } else { 3701#ifndef USE_CSMA_CD_PROTO 3702 val |= MAC_TX_CFG_NEVER_GIVE_UP_EN; 3703 val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM; 3704#endif 3705 } 3706 /* val now set up for REG_MAC_TX_CFG */ 3707 3708 /* If gigabit and half-duplex, enable carrier extension 3709 * mode. increase slot time to 512 bytes as well. 3710 * else, disable it and make sure slot time is 64 bytes. 3711 * also activate checksum bug workaround 3712 */ 3713 if ((speed == 1000) && !full_duplex) { 3714 writel(val | MAC_TX_CFG_CARRIER_EXTEND, 3715 cp->regs + REG_MAC_TX_CFG); 3716 3717 val = readl(cp->regs + REG_MAC_RX_CFG); 3718 val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */ 3719 writel(val | MAC_RX_CFG_CARRIER_EXTEND, 3720 cp->regs + REG_MAC_RX_CFG); 3721 3722 writel(0x200, cp->regs + REG_MAC_SLOT_TIME); 3723 3724 cp->crc_size = 4; 3725 /* minimum size gigabit frame at half duplex */ 3726 cp->min_frame_size = CAS_1000MB_MIN_FRAME; 3727 3728 } else { 3729 writel(val, cp->regs + REG_MAC_TX_CFG); 3730 3731 /* checksum bug workaround. don't strip FCS when in 3732 * half-duplex mode 3733 */ 3734 val = readl(cp->regs + REG_MAC_RX_CFG); 3735 if (full_duplex) { 3736 val |= MAC_RX_CFG_STRIP_FCS; 3737 cp->crc_size = 0; 3738 cp->min_frame_size = CAS_MIN_MTU; 3739 } else { 3740 val &= ~MAC_RX_CFG_STRIP_FCS; 3741 cp->crc_size = 4; 3742 cp->min_frame_size = CAS_MIN_FRAME; 3743 } 3744 writel(val & ~MAC_RX_CFG_CARRIER_EXTEND, 3745 cp->regs + REG_MAC_RX_CFG); 3746 writel(0x40, cp->regs + REG_MAC_SLOT_TIME); 3747 } 3748 3749 if (netif_msg_link(cp)) { 3750 if (pause & 0x01) { 3751 printk(KERN_INFO "%s: Pause is enabled " 3752 "(rxfifo: %d off: %d on: %d)\n", 3753 cp->dev->name, 3754 cp->rx_fifo_size, 3755 cp->rx_pause_off, 3756 cp->rx_pause_on); 3757 } else if (pause & 0x10) { 3758 printk(KERN_INFO "%s: TX pause enabled\n", 3759 cp->dev->name); 3760 } else { 3761 printk(KERN_INFO "%s: Pause is disabled\n", 3762 cp->dev->name); 3763 } 3764 } 3765 3766 val = readl(cp->regs + REG_MAC_CTRL_CFG); 3767 val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN); 3768 if (pause) { /* symmetric or asymmetric pause */ 3769 val |= MAC_CTRL_CFG_SEND_PAUSE_EN; 3770 if (pause & 0x01) { /* symmetric pause */ 3771 val |= MAC_CTRL_CFG_RECV_PAUSE_EN; 3772 } 3773 } 3774 writel(val, cp->regs + REG_MAC_CTRL_CFG); 3775 cas_start_dma(cp); 3776} 3777 3778/* Must be invoked under cp->lock. */ 3779static void cas_init_hw(struct cas *cp, int restart_link) 3780{ 3781 if (restart_link) 3782 cas_phy_init(cp); 3783 3784 cas_init_pause_thresholds(cp); 3785 cas_init_mac(cp); 3786 cas_init_dma(cp); 3787 3788 if (restart_link) { 3789 /* Default aneg parameters */ 3790 cp->timer_ticks = 0; 3791 cas_begin_auto_negotiation(cp, NULL); 3792 } else if (cp->lstate == link_up) { 3793 cas_set_link_modes(cp); 3794 netif_carrier_on(cp->dev); 3795 } 3796} 3797 3798/* Must be invoked under cp->lock. on earlier cassini boards, 3799 * SOFT_0 is tied to PCI reset. we use this to force a pci reset, 3800 * let it settle out, and then restore pci state. 3801 */ 3802static void cas_hard_reset(struct cas *cp) 3803{ 3804 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN); 3805 udelay(20); 3806 pci_restore_state(cp->pdev); 3807} 3808 3809 3810static void cas_global_reset(struct cas *cp, int blkflag) 3811{ 3812 int limit; 3813 3814 /* issue a global reset. don't use RSTOUT. */ 3815 if (blkflag && !CAS_PHY_MII(cp->phy_type)) { 3816 /* For PCS, when the blkflag is set, we should set the 3817 * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of 3818 * the last autonegotiation from being cleared. We'll 3819 * need some special handling if the chip is set into a 3820 * loopback mode. 3821 */ 3822 writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK), 3823 cp->regs + REG_SW_RESET); 3824 } else { 3825 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET); 3826 } 3827 3828 /* need to wait at least 3ms before polling register */ 3829 mdelay(3); 3830 3831 limit = STOP_TRIES; 3832 while (limit-- > 0) { 3833 u32 val = readl(cp->regs + REG_SW_RESET); 3834 if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0) 3835 goto done; 3836 udelay(10); 3837 } 3838 printk(KERN_ERR "%s: sw reset failed.\n", cp->dev->name); 3839 3840done: 3841 /* enable various BIM interrupts */ 3842 writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE | 3843 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG); 3844 3845 /* clear out pci error status mask for handled errors. 3846 * we don't deal with DMA counter overflows as they happen 3847 * all the time. 3848 */ 3849 writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO | 3850 PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE | 3851 PCI_ERR_BIM_DMA_READ), cp->regs + 3852 REG_PCI_ERR_STATUS_MASK); 3853 3854 /* set up for MII by default to address mac rx reset timeout 3855 * issue 3856 */ 3857 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); 3858} 3859 3860static void cas_reset(struct cas *cp, int blkflag) 3861{ 3862 u32 val; 3863 3864 cas_mask_intr(cp); 3865 cas_global_reset(cp, blkflag); 3866 cas_mac_reset(cp); 3867 cas_entropy_reset(cp); 3868 3869 /* disable dma engines. */ 3870 val = readl(cp->regs + REG_TX_CFG); 3871 val &= ~TX_CFG_DMA_EN; 3872 writel(val, cp->regs + REG_TX_CFG); 3873 3874 val = readl(cp->regs + REG_RX_CFG); 3875 val &= ~RX_CFG_DMA_EN; 3876 writel(val, cp->regs + REG_RX_CFG); 3877 3878 /* program header parser */ 3879 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) || 3880 (CAS_HP_ALT_FIRMWARE == cas_prog_null)) { 3881 cas_load_firmware(cp, CAS_HP_FIRMWARE); 3882 } else { 3883 cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE); 3884 } 3885 3886 /* clear out error registers */ 3887 spin_lock(&cp->stat_lock[N_TX_RINGS]); 3888 cas_clear_mac_err(cp); 3889 spin_unlock(&cp->stat_lock[N_TX_RINGS]); 3890} 3891 3892/* Shut down the chip, must be called with pm_mutex held. */ 3893static void cas_shutdown(struct cas *cp) 3894{ 3895 unsigned long flags; 3896 3897 /* Make us not-running to avoid timers respawning */ 3898 cp->hw_running = 0; 3899 3900 del_timer_sync(&cp->link_timer); 3901 3902 /* Stop the reset task */ 3903#if 0 3904 while (atomic_read(&cp->reset_task_pending_mtu) || 3905 atomic_read(&cp->reset_task_pending_spare) || 3906 atomic_read(&cp->reset_task_pending_all)) 3907 schedule(); 3908 3909#else 3910 while (atomic_read(&cp->reset_task_pending)) 3911 schedule(); 3912#endif 3913 /* Actually stop the chip */ 3914 cas_lock_all_save(cp, flags); 3915 cas_reset(cp, 0); 3916 if (cp->cas_flags & CAS_FLAG_SATURN) 3917 cas_phy_powerdown(cp); 3918 cas_unlock_all_restore(cp, flags); 3919} 3920 3921static int cas_change_mtu(struct net_device *dev, int new_mtu) 3922{ 3923 struct cas *cp = netdev_priv(dev); 3924 3925 if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU) 3926 return -EINVAL; 3927 3928 dev->mtu = new_mtu; 3929 if (!netif_running(dev) || !netif_device_present(dev)) 3930 return 0; 3931 3932 /* let the reset task handle it */ 3933#if 1 3934 atomic_inc(&cp->reset_task_pending); 3935 if ((cp->phy_type & CAS_PHY_SERDES)) { 3936 atomic_inc(&cp->reset_task_pending_all); 3937 } else { 3938 atomic_inc(&cp->reset_task_pending_mtu); 3939 } 3940 schedule_work(&cp->reset_task); 3941#else 3942 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ? 3943 CAS_RESET_ALL : CAS_RESET_MTU); 3944 printk(KERN_ERR "reset called in cas_change_mtu\n"); 3945 schedule_work(&cp->reset_task); 3946#endif 3947 3948 flush_scheduled_work(); 3949 return 0; 3950} 3951 3952static void cas_clean_txd(struct cas *cp, int ring) 3953{ 3954 struct cas_tx_desc *txd = cp->init_txds[ring]; 3955 struct sk_buff *skb, **skbs = cp->tx_skbs[ring]; 3956 u64 daddr, dlen; 3957 int i, size; 3958 3959 size = TX_DESC_RINGN_SIZE(ring); 3960 for (i = 0; i < size; i++) { 3961 int frag; 3962 3963 if (skbs[i] == NULL) 3964 continue; 3965 3966 skb = skbs[i]; 3967 skbs[i] = NULL; 3968 3969 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 3970 int ent = i & (size - 1); 3971 3972 /* first buffer is never a tiny buffer and so 3973 * needs to be unmapped. 3974 */ 3975 daddr = le64_to_cpu(txd[ent].buffer); 3976 dlen = CAS_VAL(TX_DESC_BUFLEN, 3977 le64_to_cpu(txd[ent].control)); 3978 pci_unmap_page(cp->pdev, daddr, dlen, 3979 PCI_DMA_TODEVICE); 3980 3981 if (frag != skb_shinfo(skb)->nr_frags) { 3982 i++; 3983 3984 /* next buffer might by a tiny buffer. 3985 * skip past it. 3986 */ 3987 ent = i & (size - 1); 3988 if (cp->tx_tiny_use[ring][ent].used) 3989 i++; 3990 } 3991 } 3992 dev_kfree_skb_any(skb); 3993 } 3994 3995 /* zero out tiny buf usage */ 3996 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring])); 3997} 3998 3999/* freed on close */ 4000static inline void cas_free_rx_desc(struct cas *cp, int ring) 4001{ 4002 cas_page_t **page = cp->rx_pages[ring]; 4003 int i, size; 4004 4005 size = RX_DESC_RINGN_SIZE(ring); 4006 for (i = 0; i < size; i++) { 4007 if (page[i]) { 4008 cas_page_free(cp, page[i]); 4009 page[i] = NULL; 4010 } 4011 } 4012} 4013 4014static void cas_free_rxds(struct cas *cp) 4015{ 4016 int i; 4017 4018 for (i = 0; i < N_RX_DESC_RINGS; i++) 4019 cas_free_rx_desc(cp, i); 4020} 4021 4022/* Must be invoked under cp->lock. */ 4023static void cas_clean_rings(struct cas *cp) 4024{ 4025 int i; 4026 4027 /* need to clean all tx rings */ 4028 memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS); 4029 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS); 4030 for (i = 0; i < N_TX_RINGS; i++) 4031 cas_clean_txd(cp, i); 4032 4033 /* zero out init block */ 4034 memset(cp->init_block, 0, sizeof(struct cas_init_block)); 4035 cas_clean_rxds(cp); 4036 cas_clean_rxcs(cp); 4037} 4038 4039/* allocated on open */ 4040static inline int cas_alloc_rx_desc(struct cas *cp, int ring) 4041{ 4042 cas_page_t **page = cp->rx_pages[ring]; 4043 int size, i = 0; 4044 4045 size = RX_DESC_RINGN_SIZE(ring); 4046 for (i = 0; i < size; i++) { 4047 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL) 4048 return -1; 4049 } 4050 return 0; 4051} 4052 4053static int cas_alloc_rxds(struct cas *cp) 4054{ 4055 int i; 4056 4057 for (i = 0; i < N_RX_DESC_RINGS; i++) { 4058 if (cas_alloc_rx_desc(cp, i) < 0) { 4059 cas_free_rxds(cp); 4060 return -1; 4061 } 4062 } 4063 return 0; 4064} 4065 4066static void cas_reset_task(struct work_struct *work) 4067{ 4068 struct cas *cp = container_of(work, struct cas, reset_task); 4069#if 0 4070 int pending = atomic_read(&cp->reset_task_pending); 4071#else 4072 int pending_all = atomic_read(&cp->reset_task_pending_all); 4073 int pending_spare = atomic_read(&cp->reset_task_pending_spare); 4074 int pending_mtu = atomic_read(&cp->reset_task_pending_mtu); 4075 4076 if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) { 4077 /* We can have more tasks scheduled than actually 4078 * needed. 4079 */ 4080 atomic_dec(&cp->reset_task_pending); 4081 return; 4082 } 4083#endif 4084 /* The link went down, we reset the ring, but keep 4085 * DMA stopped. Use this function for reset 4086 * on error as well. 4087 */ 4088 if (cp->hw_running) { 4089 unsigned long flags; 4090 4091 /* Make sure we don't get interrupts or tx packets */ 4092 netif_device_detach(cp->dev); 4093 cas_lock_all_save(cp, flags); 4094 4095 if (cp->opened) { 4096 /* We call cas_spare_recover when we call cas_open. 4097 * but we do not initialize the lists cas_spare_recover 4098 * uses until cas_open is called. 4099 */ 4100 cas_spare_recover(cp, GFP_ATOMIC); 4101 } 4102#if 1 4103 /* test => only pending_spare set */ 4104 if (!pending_all && !pending_mtu) 4105 goto done; 4106#else 4107 if (pending == CAS_RESET_SPARE) 4108 goto done; 4109#endif 4110 /* when pending == CAS_RESET_ALL, the following 4111 * call to cas_init_hw will restart auto negotiation. 4112 * Setting the second argument of cas_reset to 4113 * !(pending == CAS_RESET_ALL) will set this argument 4114 * to 1 (avoiding reinitializing the PHY for the normal 4115 * PCS case) when auto negotiation is not restarted. 4116 */ 4117#if 1 4118 cas_reset(cp, !(pending_all > 0)); 4119 if (cp->opened) 4120 cas_clean_rings(cp); 4121 cas_init_hw(cp, (pending_all > 0)); 4122#else 4123 cas_reset(cp, !(pending == CAS_RESET_ALL)); 4124 if (cp->opened) 4125 cas_clean_rings(cp); 4126 cas_init_hw(cp, pending == CAS_RESET_ALL); 4127#endif 4128 4129done: 4130 cas_unlock_all_restore(cp, flags); 4131 netif_device_attach(cp->dev); 4132 } 4133#if 1 4134 atomic_sub(pending_all, &cp->reset_task_pending_all); 4135 atomic_sub(pending_spare, &cp->reset_task_pending_spare); 4136 atomic_sub(pending_mtu, &cp->reset_task_pending_mtu); 4137 atomic_dec(&cp->reset_task_pending); 4138#else 4139 atomic_set(&cp->reset_task_pending, 0); 4140#endif 4141} 4142 4143static void cas_link_timer(unsigned long data) 4144{ 4145 struct cas *cp = (struct cas *) data; 4146 int mask, pending = 0, reset = 0; 4147 unsigned long flags; 4148 4149 if (link_transition_timeout != 0 && 4150 cp->link_transition_jiffies_valid && 4151 ((jiffies - cp->link_transition_jiffies) > 4152 (link_transition_timeout))) { 4153 /* One-second counter so link-down workaround doesn't 4154 * cause resets to occur so fast as to fool the switch 4155 * into thinking the link is down. 4156 */ 4157 cp->link_transition_jiffies_valid = 0; 4158 } 4159 4160 if (!cp->hw_running) 4161 return; 4162 4163 spin_lock_irqsave(&cp->lock, flags); 4164 cas_lock_tx(cp); 4165 cas_entropy_gather(cp); 4166 4167 /* If the link task is still pending, we just 4168 * reschedule the link timer 4169 */ 4170#if 1 4171 if (atomic_read(&cp->reset_task_pending_all) || 4172 atomic_read(&cp->reset_task_pending_spare) || 4173 atomic_read(&cp->reset_task_pending_mtu)) 4174 goto done; 4175#else 4176 if (atomic_read(&cp->reset_task_pending)) 4177 goto done; 4178#endif 4179 4180 /* check for rx cleaning */ 4181 if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) { 4182 int i, rmask; 4183 4184 for (i = 0; i < MAX_RX_DESC_RINGS; i++) { 4185 rmask = CAS_FLAG_RXD_POST(i); 4186 if ((mask & rmask) == 0) 4187 continue; 4188 4189 /* post_rxds will do a mod_timer */ 4190 if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) { 4191 pending = 1; 4192 continue; 4193 } 4194 cp->cas_flags &= ~rmask; 4195 } 4196 } 4197 4198 if (CAS_PHY_MII(cp->phy_type)) { 4199 u16 bmsr; 4200 cas_mif_poll(cp, 0); 4201 bmsr = cas_phy_read(cp, MII_BMSR); 4202 /* WTZ: Solaris driver reads this twice, but that 4203 * may be due to the PCS case and the use of a 4204 * common implementation. Read it twice here to be 4205 * safe. 4206 */ 4207 bmsr = cas_phy_read(cp, MII_BMSR); 4208 cas_mif_poll(cp, 1); 4209 readl(cp->regs + REG_MIF_STATUS); /* avoid dups */ 4210 reset = cas_mii_link_check(cp, bmsr); 4211 } else { 4212 reset = cas_pcs_link_check(cp); 4213 } 4214 4215 if (reset) 4216 goto done; 4217 4218 /* check for tx state machine confusion */ 4219 if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) { 4220 u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE); 4221 u32 wptr, rptr; 4222 int tlm = CAS_VAL(MAC_SM_TLM, val); 4223 4224 if (((tlm == 0x5) || (tlm == 0x3)) && 4225 (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) { 4226 if (netif_msg_tx_err(cp)) 4227 printk(KERN_DEBUG "%s: tx err: " 4228 "MAC_STATE[%08x]\n", 4229 cp->dev->name, val); 4230 reset = 1; 4231 goto done; 4232 } 4233 4234 val = readl(cp->regs + REG_TX_FIFO_PKT_CNT); 4235 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR); 4236 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR); 4237 if ((val == 0) && (wptr != rptr)) { 4238 if (netif_msg_tx_err(cp)) 4239 printk(KERN_DEBUG "%s: tx err: " 4240 "TX_FIFO[%08x:%08x:%08x]\n", 4241 cp->dev->name, val, wptr, rptr); 4242 reset = 1; 4243 } 4244 4245 if (reset) 4246 cas_hard_reset(cp); 4247 } 4248 4249done: 4250 if (reset) { 4251#if 1 4252 atomic_inc(&cp->reset_task_pending); 4253 atomic_inc(&cp->reset_task_pending_all); 4254 schedule_work(&cp->reset_task); 4255#else 4256 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); 4257 printk(KERN_ERR "reset called in cas_link_timer\n"); 4258 schedule_work(&cp->reset_task); 4259#endif 4260 } 4261 4262 if (!pending) 4263 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); 4264 cas_unlock_tx(cp); 4265 spin_unlock_irqrestore(&cp->lock, flags); 4266} 4267 4268/* tiny buffers are used to avoid target abort issues with 4269 * older cassini's 4270 */ 4271static void cas_tx_tiny_free(struct cas *cp) 4272{ 4273 struct pci_dev *pdev = cp->pdev; 4274 int i; 4275 4276 for (i = 0; i < N_TX_RINGS; i++) { 4277 if (!cp->tx_tiny_bufs[i]) 4278 continue; 4279 4280 pci_free_consistent(pdev, TX_TINY_BUF_BLOCK, 4281 cp->tx_tiny_bufs[i], 4282 cp->tx_tiny_dvma[i]); 4283 cp->tx_tiny_bufs[i] = NULL; 4284 } 4285} 4286 4287static int cas_tx_tiny_alloc(struct cas *cp) 4288{ 4289 struct pci_dev *pdev = cp->pdev; 4290 int i; 4291 4292 for (i = 0; i < N_TX_RINGS; i++) { 4293 cp->tx_tiny_bufs[i] = 4294 pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK, 4295 &cp->tx_tiny_dvma[i]); 4296 if (!cp->tx_tiny_bufs[i]) { 4297 cas_tx_tiny_free(cp); 4298 return -1; 4299 } 4300 } 4301 return 0; 4302} 4303 4304 4305static int cas_open(struct net_device *dev) 4306{ 4307 struct cas *cp = netdev_priv(dev); 4308 int hw_was_up, err; 4309 unsigned long flags; 4310 4311 mutex_lock(&cp->pm_mutex); 4312 4313 hw_was_up = cp->hw_running; 4314 4315 /* The power-management mutex protects the hw_running 4316 * etc. state so it is safe to do this bit without cp->lock 4317 */ 4318 if (!cp->hw_running) { 4319 /* Reset the chip */ 4320 cas_lock_all_save(cp, flags); 4321 /* We set the second arg to cas_reset to zero 4322 * because cas_init_hw below will have its second 4323 * argument set to non-zero, which will force 4324 * autonegotiation to start. 4325 */ 4326 cas_reset(cp, 0); 4327 cp->hw_running = 1; 4328 cas_unlock_all_restore(cp, flags); 4329 } 4330 4331 if (cas_tx_tiny_alloc(cp) < 0) 4332 return -ENOMEM; 4333 4334 /* alloc rx descriptors */ 4335 err = -ENOMEM; 4336 if (cas_alloc_rxds(cp) < 0) 4337 goto err_tx_tiny; 4338 4339 /* allocate spares */ 4340 cas_spare_init(cp); 4341 cas_spare_recover(cp, GFP_KERNEL); 4342 4343 /* We can now request the interrupt as we know it's masked 4344 * on the controller. cassini+ has up to 4 interrupts 4345 * that can be used, but you need to do explicit pci interrupt 4346 * mapping to expose them 4347 */ 4348 if (request_irq(cp->pdev->irq, cas_interrupt, 4349 IRQF_SHARED, dev->name, (void *) dev)) { 4350 printk(KERN_ERR "%s: failed to request irq !\n", 4351 cp->dev->name); 4352 err = -EAGAIN; 4353 goto err_spare; 4354 } 4355 4356 /* init hw */ 4357 cas_lock_all_save(cp, flags); 4358 cas_clean_rings(cp); 4359 cas_init_hw(cp, !hw_was_up); 4360 cp->opened = 1; 4361 cas_unlock_all_restore(cp, flags); 4362 4363 netif_start_queue(dev); 4364 mutex_unlock(&cp->pm_mutex); 4365 return 0; 4366 4367err_spare: 4368 cas_spare_free(cp); 4369 cas_free_rxds(cp); 4370err_tx_tiny: 4371 cas_tx_tiny_free(cp); 4372 mutex_unlock(&cp->pm_mutex); 4373 return err; 4374} 4375 4376static int cas_close(struct net_device *dev) 4377{ 4378 unsigned long flags; 4379 struct cas *cp = netdev_priv(dev); 4380 4381 /* Make sure we don't get distracted by suspend/resume */ 4382 mutex_lock(&cp->pm_mutex); 4383 4384 netif_stop_queue(dev); 4385 4386 /* Stop traffic, mark us closed */ 4387 cas_lock_all_save(cp, flags); 4388 cp->opened = 0; 4389 cas_reset(cp, 0); 4390 cas_phy_init(cp); 4391 cas_begin_auto_negotiation(cp, NULL); 4392 cas_clean_rings(cp); 4393 cas_unlock_all_restore(cp, flags); 4394 4395 free_irq(cp->pdev->irq, (void *) dev); 4396 cas_spare_free(cp); 4397 cas_free_rxds(cp); 4398 cas_tx_tiny_free(cp); 4399 mutex_unlock(&cp->pm_mutex); 4400 return 0; 4401} 4402 4403static struct { 4404 const char name[ETH_GSTRING_LEN]; 4405} ethtool_cassini_statnames[] = { 4406 {"collisions"}, 4407 {"rx_bytes"}, 4408 {"rx_crc_errors"}, 4409 {"rx_dropped"}, 4410 {"rx_errors"}, 4411 {"rx_fifo_errors"}, 4412 {"rx_frame_errors"}, 4413 {"rx_length_errors"}, 4414 {"rx_over_errors"}, 4415 {"rx_packets"}, 4416 {"tx_aborted_errors"}, 4417 {"tx_bytes"}, 4418 {"tx_dropped"}, 4419 {"tx_errors"}, 4420 {"tx_fifo_errors"}, 4421 {"tx_packets"} 4422}; 4423#define CAS_NUM_STAT_KEYS (sizeof(ethtool_cassini_statnames)/ETH_GSTRING_LEN) 4424 4425static struct { 4426 const int offsets; /* neg. values for 2nd arg to cas_read_phy */ 4427} ethtool_register_table[] = { 4428 {-MII_BMSR}, 4429 {-MII_BMCR}, 4430 {REG_CAWR}, 4431 {REG_INF_BURST}, 4432 {REG_BIM_CFG}, 4433 {REG_RX_CFG}, 4434 {REG_HP_CFG}, 4435 {REG_MAC_TX_CFG}, 4436 {REG_MAC_RX_CFG}, 4437 {REG_MAC_CTRL_CFG}, 4438 {REG_MAC_XIF_CFG}, 4439 {REG_MIF_CFG}, 4440 {REG_PCS_CFG}, 4441 {REG_SATURN_PCFG}, 4442 {REG_PCS_MII_STATUS}, 4443 {REG_PCS_STATE_MACHINE}, 4444 {REG_MAC_COLL_EXCESS}, 4445 {REG_MAC_COLL_LATE} 4446}; 4447#define CAS_REG_LEN (sizeof(ethtool_register_table)/sizeof(int)) 4448#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN) 4449 4450static void cas_read_regs(struct cas *cp, u8 *ptr, int len) 4451{ 4452 u8 *p; 4453 int i; 4454 unsigned long flags; 4455 4456 spin_lock_irqsave(&cp->lock, flags); 4457 for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) { 4458 u16 hval; 4459 u32 val; 4460 if (ethtool_register_table[i].offsets < 0) { 4461 hval = cas_phy_read(cp, 4462 -ethtool_register_table[i].offsets); 4463 val = hval; 4464 } else { 4465 val= readl(cp->regs+ethtool_register_table[i].offsets); 4466 } 4467 memcpy(p, (u8 *)&val, sizeof(u32)); 4468 } 4469 spin_unlock_irqrestore(&cp->lock, flags); 4470} 4471 4472static struct net_device_stats *cas_get_stats(struct net_device *dev) 4473{ 4474 struct cas *cp = netdev_priv(dev); 4475 struct net_device_stats *stats = cp->net_stats; 4476 unsigned long flags; 4477 int i; 4478 unsigned long tmp; 4479 4480 /* we collate all of the stats into net_stats[N_TX_RING] */ 4481 if (!cp->hw_running) 4482 return stats + N_TX_RINGS; 4483 4484 /* collect outstanding stats */ 4485 /* WTZ: the Cassini spec gives these as 16 bit counters but 4486 * stored in 32-bit words. Added a mask of 0xffff to be safe, 4487 * in case the chip somehow puts any garbage in the other bits. 4488 * Also, counter usage didn't seem to mach what Adrian did 4489 * in the parts of the code that set these quantities. Made 4490 * that consistent. 4491 */ 4492 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags); 4493 stats[N_TX_RINGS].rx_crc_errors += 4494 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff; 4495 stats[N_TX_RINGS].rx_frame_errors += 4496 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff; 4497 stats[N_TX_RINGS].rx_length_errors += 4498 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff; 4499#if 1 4500 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) + 4501 (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff); 4502 stats[N_TX_RINGS].tx_aborted_errors += tmp; 4503 stats[N_TX_RINGS].collisions += 4504 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff); 4505#else 4506 stats[N_TX_RINGS].tx_aborted_errors += 4507 readl(cp->regs + REG_MAC_COLL_EXCESS); 4508 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) + 4509 readl(cp->regs + REG_MAC_COLL_LATE); 4510#endif 4511 cas_clear_mac_err(cp); 4512 4513 /* saved bits that are unique to ring 0 */ 4514 spin_lock(&cp->stat_lock[0]); 4515 stats[N_TX_RINGS].collisions += stats[0].collisions; 4516 stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors; 4517 stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors; 4518 stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors; 4519 stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors; 4520 stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors; 4521 spin_unlock(&cp->stat_lock[0]); 4522 4523 for (i = 0; i < N_TX_RINGS; i++) { 4524 spin_lock(&cp->stat_lock[i]); 4525 stats[N_TX_RINGS].rx_length_errors += 4526 stats[i].rx_length_errors; 4527 stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors; 4528 stats[N_TX_RINGS].rx_packets += stats[i].rx_packets; 4529 stats[N_TX_RINGS].tx_packets += stats[i].tx_packets; 4530 stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes; 4531 stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes; 4532 stats[N_TX_RINGS].rx_errors += stats[i].rx_errors; 4533 stats[N_TX_RINGS].tx_errors += stats[i].tx_errors; 4534 stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped; 4535 stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped; 4536 memset(stats + i, 0, sizeof(struct net_device_stats)); 4537 spin_unlock(&cp->stat_lock[i]); 4538 } 4539 spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags); 4540 return stats + N_TX_RINGS; 4541} 4542 4543 4544static void cas_set_multicast(struct net_device *dev) 4545{ 4546 struct cas *cp = netdev_priv(dev); 4547 u32 rxcfg, rxcfg_new; 4548 unsigned long flags; 4549 int limit = STOP_TRIES; 4550 4551 if (!cp->hw_running) 4552 return; 4553 4554 spin_lock_irqsave(&cp->lock, flags); 4555 rxcfg = readl(cp->regs + REG_MAC_RX_CFG); 4556 4557 /* disable RX MAC and wait for completion */ 4558 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); 4559 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) { 4560 if (!limit--) 4561 break; 4562 udelay(10); 4563 } 4564 4565 /* disable hash filter and wait for completion */ 4566 limit = STOP_TRIES; 4567 rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN); 4568 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); 4569 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) { 4570 if (!limit--) 4571 break; 4572 udelay(10); 4573 } 4574 4575 /* program hash filters */ 4576 cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp); 4577 rxcfg |= rxcfg_new; 4578 writel(rxcfg, cp->regs + REG_MAC_RX_CFG); 4579 spin_unlock_irqrestore(&cp->lock, flags); 4580} 4581 4582static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 4583{ 4584 struct cas *cp = netdev_priv(dev); 4585 strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN); 4586 strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN); 4587 info->fw_version[0] = '\0'; 4588 strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN); 4589 info->regdump_len = cp->casreg_len < CAS_MAX_REGS ? 4590 cp->casreg_len : CAS_MAX_REGS; 4591 info->n_stats = CAS_NUM_STAT_KEYS; 4592} 4593 4594static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 4595{ 4596 struct cas *cp = netdev_priv(dev); 4597 u16 bmcr; 4598 int full_duplex, speed, pause; 4599 unsigned long flags; 4600 enum link_state linkstate = link_up; 4601 4602 cmd->advertising = 0; 4603 cmd->supported = SUPPORTED_Autoneg; 4604 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { 4605 cmd->supported |= SUPPORTED_1000baseT_Full; 4606 cmd->advertising |= ADVERTISED_1000baseT_Full; 4607 } 4608 4609 /* Record PHY settings if HW is on. */ 4610 spin_lock_irqsave(&cp->lock, flags); 4611 bmcr = 0; 4612 linkstate = cp->lstate; 4613 if (CAS_PHY_MII(cp->phy_type)) { 4614 cmd->port = PORT_MII; 4615 cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ? 4616 XCVR_INTERNAL : XCVR_EXTERNAL; 4617 cmd->phy_address = cp->phy_addr; 4618 cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII | 4619 ADVERTISED_10baseT_Half | 4620 ADVERTISED_10baseT_Full | 4621 ADVERTISED_100baseT_Half | 4622 ADVERTISED_100baseT_Full; 4623 4624 cmd->supported |= 4625 (SUPPORTED_10baseT_Half | 4626 SUPPORTED_10baseT_Full | 4627 SUPPORTED_100baseT_Half | 4628 SUPPORTED_100baseT_Full | 4629 SUPPORTED_TP | SUPPORTED_MII); 4630 4631 if (cp->hw_running) { 4632 cas_mif_poll(cp, 0); 4633 bmcr = cas_phy_read(cp, MII_BMCR); 4634 cas_read_mii_link_mode(cp, &full_duplex, 4635 &speed, &pause); 4636 cas_mif_poll(cp, 1); 4637 } 4638 4639 } else { 4640 cmd->port = PORT_FIBRE; 4641 cmd->transceiver = XCVR_INTERNAL; 4642 cmd->phy_address = 0; 4643 cmd->supported |= SUPPORTED_FIBRE; 4644 cmd->advertising |= ADVERTISED_FIBRE; 4645 4646 if (cp->hw_running) { 4647 /* pcs uses the same bits as mii */ 4648 bmcr = readl(cp->regs + REG_PCS_MII_CTRL); 4649 cas_read_pcs_link_mode(cp, &full_duplex, 4650 &speed, &pause); 4651 } 4652 } 4653 spin_unlock_irqrestore(&cp->lock, flags); 4654 4655 if (bmcr & BMCR_ANENABLE) { 4656 cmd->advertising |= ADVERTISED_Autoneg; 4657 cmd->autoneg = AUTONEG_ENABLE; 4658 cmd->speed = ((speed == 10) ? 4659 SPEED_10 : 4660 ((speed == 1000) ? 4661 SPEED_1000 : SPEED_100)); 4662 cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 4663 } else { 4664 cmd->autoneg = AUTONEG_DISABLE; 4665 cmd->speed = 4666 (bmcr & CAS_BMCR_SPEED1000) ? 4667 SPEED_1000 : 4668 ((bmcr & BMCR_SPEED100) ? SPEED_100: 4669 SPEED_10); 4670 cmd->duplex = 4671 (bmcr & BMCR_FULLDPLX) ? 4672 DUPLEX_FULL : DUPLEX_HALF; 4673 } 4674 if (linkstate != link_up) { 4675 /* Force these to "unknown" if the link is not up and 4676 * autonogotiation in enabled. We can set the link 4677 * speed to 0, but not cmd->duplex, 4678 * because its legal values are 0 and 1. Ethtool will 4679 * print the value reported in parentheses after the 4680 * word "Unknown" for unrecognized values. 4681 * 4682 * If in forced mode, we report the speed and duplex 4683 * settings that we configured. 4684 */ 4685 if (cp->link_cntl & BMCR_ANENABLE) { 4686 cmd->speed = 0; 4687 cmd->duplex = 0xff; 4688 } else { 4689 cmd->speed = SPEED_10; 4690 if (cp->link_cntl & BMCR_SPEED100) { 4691 cmd->speed = SPEED_100; 4692 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) { 4693 cmd->speed = SPEED_1000; 4694 } 4695 cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)? 4696 DUPLEX_FULL : DUPLEX_HALF; 4697 } 4698 } 4699 return 0; 4700} 4701 4702static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 4703{ 4704 struct cas *cp = netdev_priv(dev); 4705 unsigned long flags; 4706 4707 /* Verify the settings we care about. */ 4708 if (cmd->autoneg != AUTONEG_ENABLE && 4709 cmd->autoneg != AUTONEG_DISABLE) 4710 return -EINVAL; 4711 4712 if (cmd->autoneg == AUTONEG_DISABLE && 4713 ((cmd->speed != SPEED_1000 && 4714 cmd->speed != SPEED_100 && 4715 cmd->speed != SPEED_10) || 4716 (cmd->duplex != DUPLEX_HALF && 4717 cmd->duplex != DUPLEX_FULL))) 4718 return -EINVAL; 4719 4720 /* Apply settings and restart link process. */ 4721 spin_lock_irqsave(&cp->lock, flags); 4722 cas_begin_auto_negotiation(cp, cmd); 4723 spin_unlock_irqrestore(&cp->lock, flags); 4724 return 0; 4725} 4726 4727static int cas_nway_reset(struct net_device *dev) 4728{ 4729 struct cas *cp = netdev_priv(dev); 4730 unsigned long flags; 4731 4732 if ((cp->link_cntl & BMCR_ANENABLE) == 0) 4733 return -EINVAL; 4734 4735 /* Restart link process. */ 4736 spin_lock_irqsave(&cp->lock, flags); 4737 cas_begin_auto_negotiation(cp, NULL); 4738 spin_unlock_irqrestore(&cp->lock, flags); 4739 4740 return 0; 4741} 4742 4743static u32 cas_get_link(struct net_device *dev) 4744{ 4745 struct cas *cp = netdev_priv(dev); 4746 return cp->lstate == link_up; 4747} 4748 4749static u32 cas_get_msglevel(struct net_device *dev) 4750{ 4751 struct cas *cp = netdev_priv(dev); 4752 return cp->msg_enable; 4753} 4754 4755static void cas_set_msglevel(struct net_device *dev, u32 value) 4756{ 4757 struct cas *cp = netdev_priv(dev); 4758 cp->msg_enable = value; 4759} 4760 4761static int cas_get_regs_len(struct net_device *dev) 4762{ 4763 struct cas *cp = netdev_priv(dev); 4764 return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS; 4765} 4766 4767static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs, 4768 void *p) 4769{ 4770 struct cas *cp = netdev_priv(dev); 4771 regs->version = 0; 4772 /* cas_read_regs handles locks (cp->lock). */ 4773 cas_read_regs(cp, p, regs->len / sizeof(u32)); 4774} 4775 4776static int cas_get_stats_count(struct net_device *dev) 4777{ 4778 return CAS_NUM_STAT_KEYS; 4779} 4780 4781static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data) 4782{ 4783 memcpy(data, &ethtool_cassini_statnames, 4784 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN); 4785} 4786 4787static void cas_get_ethtool_stats(struct net_device *dev, 4788 struct ethtool_stats *estats, u64 *data) 4789{ 4790 struct cas *cp = netdev_priv(dev); 4791 struct net_device_stats *stats = cas_get_stats(cp->dev); 4792 int i = 0; 4793 data[i++] = stats->collisions; 4794 data[i++] = stats->rx_bytes; 4795 data[i++] = stats->rx_crc_errors; 4796 data[i++] = stats->rx_dropped; 4797 data[i++] = stats->rx_errors; 4798 data[i++] = stats->rx_fifo_errors; 4799 data[i++] = stats->rx_frame_errors; 4800 data[i++] = stats->rx_length_errors; 4801 data[i++] = stats->rx_over_errors; 4802 data[i++] = stats->rx_packets; 4803 data[i++] = stats->tx_aborted_errors; 4804 data[i++] = stats->tx_bytes; 4805 data[i++] = stats->tx_dropped; 4806 data[i++] = stats->tx_errors; 4807 data[i++] = stats->tx_fifo_errors; 4808 data[i++] = stats->tx_packets; 4809 BUG_ON(i != CAS_NUM_STAT_KEYS); 4810} 4811 4812static const struct ethtool_ops cas_ethtool_ops = { 4813 .get_drvinfo = cas_get_drvinfo, 4814 .get_settings = cas_get_settings, 4815 .set_settings = cas_set_settings, 4816 .nway_reset = cas_nway_reset, 4817 .get_link = cas_get_link, 4818 .get_msglevel = cas_get_msglevel, 4819 .set_msglevel = cas_set_msglevel, 4820 .get_regs_len = cas_get_regs_len, 4821 .get_regs = cas_get_regs, 4822 .get_stats_count = cas_get_stats_count, 4823 .get_strings = cas_get_strings, 4824 .get_ethtool_stats = cas_get_ethtool_stats, 4825}; 4826 4827static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 4828{ 4829 struct cas *cp = netdev_priv(dev); 4830 struct mii_ioctl_data *data = if_mii(ifr); 4831 unsigned long flags; 4832 int rc = -EOPNOTSUPP; 4833 4834 /* Hold the PM mutex while doing ioctl's or we may collide 4835 * with open/close and power management and oops. 4836 */ 4837 mutex_lock(&cp->pm_mutex); 4838 switch (cmd) { 4839 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 4840 data->phy_id = cp->phy_addr; 4841 /* Fallthrough... */ 4842 4843 case SIOCGMIIREG: /* Read MII PHY register. */ 4844 spin_lock_irqsave(&cp->lock, flags); 4845 cas_mif_poll(cp, 0); 4846 data->val_out = cas_phy_read(cp, data->reg_num & 0x1f); 4847 cas_mif_poll(cp, 1); 4848 spin_unlock_irqrestore(&cp->lock, flags); 4849 rc = 0; 4850 break; 4851 4852 case SIOCSMIIREG: /* Write MII PHY register. */ 4853 if (!capable(CAP_NET_ADMIN)) { 4854 rc = -EPERM; 4855 break; 4856 } 4857 spin_lock_irqsave(&cp->lock, flags); 4858 cas_mif_poll(cp, 0); 4859 rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in); 4860 cas_mif_poll(cp, 1); 4861 spin_unlock_irqrestore(&cp->lock, flags); 4862 break; 4863 default: 4864 break; 4865 }; 4866 4867 mutex_unlock(&cp->pm_mutex); 4868 return rc; 4869} 4870 4871static int __devinit cas_init_one(struct pci_dev *pdev, 4872 const struct pci_device_id *ent) 4873{ 4874 static int cas_version_printed = 0; 4875 unsigned long casreg_len; 4876 struct net_device *dev; 4877 struct cas *cp; 4878 int i, err, pci_using_dac; 4879 u16 pci_cmd; 4880 u8 orig_cacheline_size = 0, cas_cacheline_size = 0; 4881 4882 if (cas_version_printed++ == 0) 4883 printk(KERN_INFO "%s", version); 4884 4885 err = pci_enable_device(pdev); 4886 if (err) { 4887 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n"); 4888 return err; 4889 } 4890 4891 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 4892 dev_err(&pdev->dev, "Cannot find proper PCI device " 4893 "base address, aborting.\n"); 4894 err = -ENODEV; 4895 goto err_out_disable_pdev; 4896 } 4897 4898 dev = alloc_etherdev(sizeof(*cp)); 4899 if (!dev) { 4900 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n"); 4901 err = -ENOMEM; 4902 goto err_out_disable_pdev; 4903 } 4904 SET_MODULE_OWNER(dev); 4905 SET_NETDEV_DEV(dev, &pdev->dev); 4906 4907 err = pci_request_regions(pdev, dev->name); 4908 if (err) { 4909 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n"); 4910 goto err_out_free_netdev; 4911 } 4912 pci_set_master(pdev); 4913 4914 /* we must always turn on parity response or else parity 4915 * doesn't get generated properly. disable SERR/PERR as well. 4916 * in addition, we want to turn MWI on. 4917 */ 4918 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 4919 pci_cmd &= ~PCI_COMMAND_SERR; 4920 pci_cmd |= PCI_COMMAND_PARITY; 4921 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); 4922 pci_set_mwi(pdev); 4923 /* 4924 * On some architectures, the default cache line size set 4925 * by pci_set_mwi reduces perforamnce. We have to increase 4926 * it for this case. To start, we'll print some configuration 4927 * data. 4928 */ 4929#if 1 4930 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, 4931 &orig_cacheline_size); 4932 if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) { 4933 cas_cacheline_size = 4934 (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ? 4935 CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES; 4936 if (pci_write_config_byte(pdev, 4937 PCI_CACHE_LINE_SIZE, 4938 cas_cacheline_size)) { 4939 dev_err(&pdev->dev, "Could not set PCI cache " 4940 "line size\n"); 4941 goto err_write_cacheline; 4942 } 4943 } 4944#endif 4945 4946 4947 /* Configure DMA attributes. */ 4948 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 4949 pci_using_dac = 1; 4950 err = pci_set_consistent_dma_mask(pdev, 4951 DMA_64BIT_MASK); 4952 if (err < 0) { 4953 dev_err(&pdev->dev, "Unable to obtain 64-bit DMA " 4954 "for consistent allocations\n"); 4955 goto err_out_free_res; 4956 } 4957 4958 } else { 4959 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 4960 if (err) { 4961 dev_err(&pdev->dev, "No usable DMA configuration, " 4962 "aborting.\n"); 4963 goto err_out_free_res; 4964 } 4965 pci_using_dac = 0; 4966 } 4967 4968 casreg_len = pci_resource_len(pdev, 0); 4969 4970 cp = netdev_priv(dev); 4971 cp->pdev = pdev; 4972#if 1 4973 /* A value of 0 indicates we never explicitly set it */ 4974 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0; 4975#endif 4976 cp->dev = dev; 4977 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : 4978 cassini_debug; 4979 4980 cp->link_transition = LINK_TRANSITION_UNKNOWN; 4981 cp->link_transition_jiffies_valid = 0; 4982 4983 spin_lock_init(&cp->lock); 4984 spin_lock_init(&cp->rx_inuse_lock); 4985 spin_lock_init(&cp->rx_spare_lock); 4986 for (i = 0; i < N_TX_RINGS; i++) { 4987 spin_lock_init(&cp->stat_lock[i]); 4988 spin_lock_init(&cp->tx_lock[i]); 4989 } 4990 spin_lock_init(&cp->stat_lock[N_TX_RINGS]); 4991 mutex_init(&cp->pm_mutex); 4992 4993 init_timer(&cp->link_timer); 4994 cp->link_timer.function = cas_link_timer; 4995 cp->link_timer.data = (unsigned long) cp; 4996 4997#if 1 4998 /* Just in case the implementation of atomic operations 4999 * change so that an explicit initialization is necessary. 5000 */ 5001 atomic_set(&cp->reset_task_pending, 0); 5002 atomic_set(&cp->reset_task_pending_all, 0); 5003 atomic_set(&cp->reset_task_pending_spare, 0); 5004 atomic_set(&cp->reset_task_pending_mtu, 0); 5005#endif 5006 INIT_WORK(&cp->reset_task, cas_reset_task); 5007 5008 /* Default link parameters */ 5009 if (link_mode >= 0 && link_mode <= 6) 5010 cp->link_cntl = link_modes[link_mode]; 5011 else 5012 cp->link_cntl = BMCR_ANENABLE; 5013 cp->lstate = link_down; 5014 cp->link_transition = LINK_TRANSITION_LINK_DOWN; 5015 netif_carrier_off(cp->dev); 5016 cp->timer_ticks = 0; 5017 5018 /* give us access to cassini registers */ 5019 cp->regs = pci_iomap(pdev, 0, casreg_len); 5020 if (cp->regs == 0UL) { 5021 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n"); 5022 goto err_out_free_res; 5023 } 5024 cp->casreg_len = casreg_len; 5025 5026 pci_save_state(pdev); 5027 cas_check_pci_invariants(cp); 5028 cas_hard_reset(cp); 5029 cas_reset(cp, 0); 5030 if (cas_check_invariants(cp)) 5031 goto err_out_iounmap; 5032 5033 cp->init_block = (struct cas_init_block *) 5034 pci_alloc_consistent(pdev, sizeof(struct cas_init_block), 5035 &cp->block_dvma); 5036 if (!cp->init_block) { 5037 dev_err(&pdev->dev, "Cannot allocate init block, aborting.\n"); 5038 goto err_out_iounmap; 5039 } 5040 5041 for (i = 0; i < N_TX_RINGS; i++) 5042 cp->init_txds[i] = cp->init_block->txds[i]; 5043 5044 for (i = 0; i < N_RX_DESC_RINGS; i++) 5045 cp->init_rxds[i] = cp->init_block->rxds[i]; 5046 5047 for (i = 0; i < N_RX_COMP_RINGS; i++) 5048 cp->init_rxcs[i] = cp->init_block->rxcs[i]; 5049 5050 for (i = 0; i < N_RX_FLOWS; i++) 5051 skb_queue_head_init(&cp->rx_flows[i]); 5052 5053 dev->open = cas_open; 5054 dev->stop = cas_close; 5055 dev->hard_start_xmit = cas_start_xmit; 5056 dev->get_stats = cas_get_stats; 5057 dev->set_multicast_list = cas_set_multicast; 5058 dev->do_ioctl = cas_ioctl; 5059 dev->ethtool_ops = &cas_ethtool_ops; 5060 dev->tx_timeout = cas_tx_timeout; 5061 dev->watchdog_timeo = CAS_TX_TIMEOUT; 5062 dev->change_mtu = cas_change_mtu; 5063#ifdef USE_NAPI 5064 dev->poll = cas_poll; 5065 dev->weight = 64; 5066#endif 5067#ifdef CONFIG_NET_POLL_CONTROLLER 5068 dev->poll_controller = cas_netpoll; 5069#endif 5070 dev->irq = pdev->irq; 5071 dev->dma = 0; 5072 5073 /* Cassini features. */ 5074 if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0) 5075 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 5076 5077 if (pci_using_dac) 5078 dev->features |= NETIF_F_HIGHDMA; 5079 5080 if (register_netdev(dev)) { 5081 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 5082 goto err_out_free_consistent; 5083 } 5084 5085 i = readl(cp->regs + REG_BIM_CFG); 5086 printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) " 5087 "Ethernet[%d] ", dev->name, 5088 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "", 5089 (i & BIM_CFG_32BIT) ? "32" : "64", 5090 (i & BIM_CFG_66MHZ) ? "66" : "33", 5091 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq); 5092 5093 for (i = 0; i < 6; i++) 5094 printk("%2.2x%c", dev->dev_addr[i], 5095 i == 5 ? ' ' : ':'); 5096 printk("\n"); 5097 5098 pci_set_drvdata(pdev, dev); 5099 cp->hw_running = 1; 5100 cas_entropy_reset(cp); 5101 cas_phy_init(cp); 5102 cas_begin_auto_negotiation(cp, NULL); 5103 return 0; 5104 5105err_out_free_consistent: 5106 pci_free_consistent(pdev, sizeof(struct cas_init_block), 5107 cp->init_block, cp->block_dvma); 5108 5109err_out_iounmap: 5110 mutex_lock(&cp->pm_mutex); 5111 if (cp->hw_running) 5112 cas_shutdown(cp); 5113 mutex_unlock(&cp->pm_mutex); 5114 5115 pci_iounmap(pdev, cp->regs); 5116 5117 5118err_out_free_res: 5119 pci_release_regions(pdev); 5120 5121err_write_cacheline: 5122 /* Try to restore it in case the error occured after we 5123 * set it. 5124 */ 5125 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size); 5126 5127err_out_free_netdev: 5128 free_netdev(dev); 5129 5130err_out_disable_pdev: 5131 pci_disable_device(pdev); 5132 pci_set_drvdata(pdev, NULL); 5133 return -ENODEV; 5134} 5135 5136static void __devexit cas_remove_one(struct pci_dev *pdev) 5137{ 5138 struct net_device *dev = pci_get_drvdata(pdev); 5139 struct cas *cp; 5140 if (!dev) 5141 return; 5142 5143 cp = netdev_priv(dev); 5144 unregister_netdev(dev); 5145 5146 mutex_lock(&cp->pm_mutex); 5147 flush_scheduled_work(); 5148 if (cp->hw_running) 5149 cas_shutdown(cp); 5150 mutex_unlock(&cp->pm_mutex); 5151 5152#if 1 5153 if (cp->orig_cacheline_size) { 5154 /* Restore the cache line size if we had modified 5155 * it. 5156 */ 5157 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 5158 cp->orig_cacheline_size); 5159 } 5160#endif 5161 pci_free_consistent(pdev, sizeof(struct cas_init_block), 5162 cp->init_block, cp->block_dvma); 5163 pci_iounmap(pdev, cp->regs); 5164 free_netdev(dev); 5165 pci_release_regions(pdev); 5166 pci_disable_device(pdev); 5167 pci_set_drvdata(pdev, NULL); 5168} 5169 5170#ifdef CONFIG_PM 5171static int cas_suspend(struct pci_dev *pdev, pm_message_t state) 5172{ 5173 struct net_device *dev = pci_get_drvdata(pdev); 5174 struct cas *cp = netdev_priv(dev); 5175 unsigned long flags; 5176 5177 mutex_lock(&cp->pm_mutex); 5178 5179 /* If the driver is opened, we stop the DMA */ 5180 if (cp->opened) { 5181 netif_device_detach(dev); 5182 5183 cas_lock_all_save(cp, flags); 5184 5185 /* We can set the second arg of cas_reset to 0 5186 * because on resume, we'll call cas_init_hw with 5187 * its second arg set so that autonegotiation is 5188 * restarted. 5189 */ 5190 cas_reset(cp, 0); 5191 cas_clean_rings(cp); 5192 cas_unlock_all_restore(cp, flags); 5193 } 5194 5195 if (cp->hw_running) 5196 cas_shutdown(cp); 5197 mutex_unlock(&cp->pm_mutex); 5198 5199 return 0; 5200} 5201 5202static int cas_resume(struct pci_dev *pdev) 5203{ 5204 struct net_device *dev = pci_get_drvdata(pdev); 5205 struct cas *cp = netdev_priv(dev); 5206 5207 printk(KERN_INFO "%s: resuming\n", dev->name); 5208 5209 mutex_lock(&cp->pm_mutex); 5210 cas_hard_reset(cp); 5211 if (cp->opened) { 5212 unsigned long flags; 5213 cas_lock_all_save(cp, flags); 5214 cas_reset(cp, 0); 5215 cp->hw_running = 1; 5216 cas_clean_rings(cp); 5217 cas_init_hw(cp, 1); 5218 cas_unlock_all_restore(cp, flags); 5219 5220 netif_device_attach(dev); 5221 } 5222 mutex_unlock(&cp->pm_mutex); 5223 return 0; 5224} 5225#endif /* CONFIG_PM */ 5226 5227static struct pci_driver cas_driver = { 5228 .name = DRV_MODULE_NAME, 5229 .id_table = cas_pci_tbl, 5230 .probe = cas_init_one, 5231 .remove = __devexit_p(cas_remove_one), 5232#ifdef CONFIG_PM 5233 .suspend = cas_suspend, 5234 .resume = cas_resume 5235#endif 5236}; 5237 5238static int __init cas_init(void) 5239{ 5240 if (linkdown_timeout > 0) 5241 link_transition_timeout = linkdown_timeout * HZ; 5242 else 5243 link_transition_timeout = 0; 5244 5245 return pci_register_driver(&cas_driver); 5246} 5247 5248static void __exit cas_cleanup(void) 5249{ 5250 pci_unregister_driver(&cas_driver); 5251} 5252 5253module_init(cas_init); 5254module_exit(cas_cleanup);