Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.35 5310 lines 144 kB view raw
1/* cassini.c: Sun Microsystems Cassini(+) ethernet driver. 2 * 3 * Copyright (C) 2004 Sun Microsystems Inc. 4 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation; either version 2 of the 9 * License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 19 * 02111-1307, USA. 20 * 21 * This driver uses the sungem driver (c) David Miller 22 * (davem@redhat.com) as its basis. 23 * 24 * The cassini chip has a number of features that distinguish it from 25 * the gem chip: 26 * 4 transmit descriptor rings that are used for either QoS (VLAN) or 27 * load balancing (non-VLAN mode) 28 * batching of multiple packets 29 * multiple CPU dispatching 30 * page-based RX descriptor engine with separate completion rings 31 * Gigabit support (GMII and PCS interface) 32 * MIF link up/down detection works 33 * 34 * RX is handled by page sized buffers that are attached as fragments to 35 * the skb. here's what's done: 36 * -- driver allocates pages at a time and keeps reference counts 37 * on them. 38 * -- the upper protocol layers assume that the header is in the skb 39 * itself. as a result, cassini will copy a small amount (64 bytes) 40 * to make them happy. 41 * -- driver appends the rest of the data pages as frags to skbuffs 42 * and increments the reference count 43 * -- on page reclamation, the driver swaps the page with a spare page. 44 * if that page is still in use, it frees its reference to that page, 45 * and allocates a new page for use. otherwise, it just recycles the 46 * the page. 47 * 48 * NOTE: cassini can parse the header. however, it's not worth it 49 * as long as the network stack requires a header copy. 50 * 51 * TX has 4 queues. currently these queues are used in a round-robin 52 * fashion for load balancing. They can also be used for QoS. for that 53 * to work, however, QoS information needs to be exposed down to the driver 54 * level so that subqueues get targetted to particular transmit rings. 55 * alternatively, the queues can be configured via use of the all-purpose 56 * ioctl. 57 * 58 * RX DATA: the rx completion ring has all the info, but the rx desc 59 * ring has all of the data. RX can conceivably come in under multiple 60 * interrupts, but the INT# assignment needs to be set up properly by 61 * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do 62 * that. also, the two descriptor rings are designed to distinguish between 63 * encrypted and non-encrypted packets, but we use them for buffering 64 * instead. 65 * 66 * by default, the selective clear mask is set up to process rx packets. 67 */ 68 69#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 70 71#include <linux/module.h> 72#include <linux/kernel.h> 73#include <linux/types.h> 74#include <linux/compiler.h> 75#include <linux/slab.h> 76#include <linux/delay.h> 77#include <linux/init.h> 78#include <linux/vmalloc.h> 79#include <linux/ioport.h> 80#include <linux/pci.h> 81#include <linux/mm.h> 82#include <linux/highmem.h> 83#include <linux/list.h> 84#include <linux/dma-mapping.h> 85 86#include <linux/netdevice.h> 87#include <linux/etherdevice.h> 88#include <linux/skbuff.h> 89#include <linux/ethtool.h> 90#include <linux/crc32.h> 91#include <linux/random.h> 92#include <linux/mii.h> 93#include <linux/ip.h> 94#include <linux/tcp.h> 95#include <linux/mutex.h> 96#include <linux/firmware.h> 97 98#include <net/checksum.h> 99 100#include <asm/atomic.h> 101#include <asm/system.h> 102#include <asm/io.h> 103#include <asm/byteorder.h> 104#include <asm/uaccess.h> 105 106#define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ) 107#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ) 108#define CAS_NCPUS num_online_cpus() 109 110#ifdef CONFIG_CASSINI_NAPI 111#define USE_NAPI 112#define cas_skb_release(x) netif_receive_skb(x) 113#else 114#define cas_skb_release(x) netif_rx(x) 115#endif 116 117/* select which firmware to use */ 118#define USE_HP_WORKAROUND 119#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */ 120#define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */ 121 122#include "cassini.h" 123 124#define USE_TX_COMPWB /* use completion writeback registers */ 125#define USE_CSMA_CD_PROTO /* standard CSMA/CD */ 126#define USE_RX_BLANK /* hw interrupt mitigation */ 127#undef USE_ENTROPY_DEV /* don't test for entropy device */ 128 129/* NOTE: these aren't useable unless PCI interrupts can be assigned. 130 * also, we need to make cp->lock finer-grained. 131 */ 132#undef USE_PCI_INTB 133#undef USE_PCI_INTC 134#undef USE_PCI_INTD 135#undef USE_QOS 136 137#undef USE_VPD_DEBUG /* debug vpd information if defined */ 138 139/* rx processing options */ 140#define USE_PAGE_ORDER /* specify to allocate large rx pages */ 141#define RX_DONT_BATCH 0 /* if 1, don't batch flows */ 142#define RX_COPY_ALWAYS 0 /* if 0, use frags */ 143#define RX_COPY_MIN 64 /* copy a little to make upper layers happy */ 144#undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */ 145 146#define DRV_MODULE_NAME "cassini" 147#define DRV_MODULE_VERSION "1.6" 148#define DRV_MODULE_RELDATE "21 May 2008" 149 150#define CAS_DEF_MSG_ENABLE \ 151 (NETIF_MSG_DRV | \ 152 NETIF_MSG_PROBE | \ 153 NETIF_MSG_LINK | \ 154 NETIF_MSG_TIMER | \ 155 NETIF_MSG_IFDOWN | \ 156 NETIF_MSG_IFUP | \ 157 NETIF_MSG_RX_ERR | \ 158 NETIF_MSG_TX_ERR) 159 160/* length of time before we decide the hardware is borked, 161 * and dev->tx_timeout() should be called to fix the problem 162 */ 163#define CAS_TX_TIMEOUT (HZ) 164#define CAS_LINK_TIMEOUT (22*HZ/10) 165#define CAS_LINK_FAST_TIMEOUT (1) 166 167/* timeout values for state changing. these specify the number 168 * of 10us delays to be used before giving up. 169 */ 170#define STOP_TRIES_PHY 1000 171#define STOP_TRIES 5000 172 173/* specify a minimum frame size to deal with some fifo issues 174 * max mtu == 2 * page size - ethernet header - 64 - swivel = 175 * 2 * page_size - 0x50 176 */ 177#define CAS_MIN_FRAME 97 178#define CAS_1000MB_MIN_FRAME 255 179#define CAS_MIN_MTU 60 180#define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000) 181 182#if 1 183/* 184 * Eliminate these and use separate atomic counters for each, to 185 * avoid a race condition. 186 */ 187#else 188#define CAS_RESET_MTU 1 189#define CAS_RESET_ALL 2 190#define CAS_RESET_SPARE 3 191#endif 192 193static char version[] __devinitdata = 194 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 195 196static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */ 197static int link_mode; 198 199MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)"); 200MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver"); 201MODULE_LICENSE("GPL"); 202MODULE_FIRMWARE("sun/cassini.bin"); 203module_param(cassini_debug, int, 0); 204MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value"); 205module_param(link_mode, int, 0); 206MODULE_PARM_DESC(link_mode, "default link mode"); 207 208/* 209 * Work around for a PCS bug in which the link goes down due to the chip 210 * being confused and never showing a link status of "up." 211 */ 212#define DEFAULT_LINKDOWN_TIMEOUT 5 213/* 214 * Value in seconds, for user input. 215 */ 216static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT; 217module_param(linkdown_timeout, int, 0); 218MODULE_PARM_DESC(linkdown_timeout, 219"min reset interval in sec. for PCS linkdown issue; disabled if not positive"); 220 221/* 222 * value in 'ticks' (units used by jiffies). Set when we init the 223 * module because 'HZ' in actually a function call on some flavors of 224 * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ. 225 */ 226static int link_transition_timeout; 227 228 229 230static u16 link_modes[] __devinitdata = { 231 BMCR_ANENABLE, /* 0 : autoneg */ 232 0, /* 1 : 10bt half duplex */ 233 BMCR_SPEED100, /* 2 : 100bt half duplex */ 234 BMCR_FULLDPLX, /* 3 : 10bt full duplex */ 235 BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */ 236 CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */ 237}; 238 239static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = { 240 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI, 241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 242 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN, 243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 244 { 0, } 245}; 246 247MODULE_DEVICE_TABLE(pci, cas_pci_tbl); 248 249static void cas_set_link_modes(struct cas *cp); 250 251static inline void cas_lock_tx(struct cas *cp) 252{ 253 int i; 254 255 for (i = 0; i < N_TX_RINGS; i++) 256 spin_lock(&cp->tx_lock[i]); 257} 258 259static inline void cas_lock_all(struct cas *cp) 260{ 261 spin_lock_irq(&cp->lock); 262 cas_lock_tx(cp); 263} 264 265/* WTZ: QA was finding deadlock problems with the previous 266 * versions after long test runs with multiple cards per machine. 267 * See if replacing cas_lock_all with safer versions helps. The 268 * symptoms QA is reporting match those we'd expect if interrupts 269 * aren't being properly restored, and we fixed a previous deadlock 270 * with similar symptoms by using save/restore versions in other 271 * places. 272 */ 273#define cas_lock_all_save(cp, flags) \ 274do { \ 275 struct cas *xxxcp = (cp); \ 276 spin_lock_irqsave(&xxxcp->lock, flags); \ 277 cas_lock_tx(xxxcp); \ 278} while (0) 279 280static inline void cas_unlock_tx(struct cas *cp) 281{ 282 int i; 283 284 for (i = N_TX_RINGS; i > 0; i--) 285 spin_unlock(&cp->tx_lock[i - 1]); 286} 287 288static inline void cas_unlock_all(struct cas *cp) 289{ 290 cas_unlock_tx(cp); 291 spin_unlock_irq(&cp->lock); 292} 293 294#define cas_unlock_all_restore(cp, flags) \ 295do { \ 296 struct cas *xxxcp = (cp); \ 297 cas_unlock_tx(xxxcp); \ 298 spin_unlock_irqrestore(&xxxcp->lock, flags); \ 299} while (0) 300 301static void cas_disable_irq(struct cas *cp, const int ring) 302{ 303 /* Make sure we won't get any more interrupts */ 304 if (ring == 0) { 305 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK); 306 return; 307 } 308 309 /* disable completion interrupts and selectively mask */ 310 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 311 switch (ring) { 312#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD) 313#ifdef USE_PCI_INTB 314 case 1: 315#endif 316#ifdef USE_PCI_INTC 317 case 2: 318#endif 319#ifdef USE_PCI_INTD 320 case 3: 321#endif 322 writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN, 323 cp->regs + REG_PLUS_INTRN_MASK(ring)); 324 break; 325#endif 326 default: 327 writel(INTRN_MASK_CLEAR_ALL, cp->regs + 328 REG_PLUS_INTRN_MASK(ring)); 329 break; 330 } 331 } 332} 333 334static inline void cas_mask_intr(struct cas *cp) 335{ 336 int i; 337 338 for (i = 0; i < N_RX_COMP_RINGS; i++) 339 cas_disable_irq(cp, i); 340} 341 342static void cas_enable_irq(struct cas *cp, const int ring) 343{ 344 if (ring == 0) { /* all but TX_DONE */ 345 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK); 346 return; 347 } 348 349 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 350 switch (ring) { 351#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD) 352#ifdef USE_PCI_INTB 353 case 1: 354#endif 355#ifdef USE_PCI_INTC 356 case 2: 357#endif 358#ifdef USE_PCI_INTD 359 case 3: 360#endif 361 writel(INTRN_MASK_RX_EN, cp->regs + 362 REG_PLUS_INTRN_MASK(ring)); 363 break; 364#endif 365 default: 366 break; 367 } 368 } 369} 370 371static inline void cas_unmask_intr(struct cas *cp) 372{ 373 int i; 374 375 for (i = 0; i < N_RX_COMP_RINGS; i++) 376 cas_enable_irq(cp, i); 377} 378 379static inline void cas_entropy_gather(struct cas *cp) 380{ 381#ifdef USE_ENTROPY_DEV 382 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) 383 return; 384 385 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV), 386 readl(cp->regs + REG_ENTROPY_IV), 387 sizeof(uint64_t)*8); 388#endif 389} 390 391static inline void cas_entropy_reset(struct cas *cp) 392{ 393#ifdef USE_ENTROPY_DEV 394 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) 395 return; 396 397 writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT, 398 cp->regs + REG_BIM_LOCAL_DEV_EN); 399 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET); 400 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG); 401 402 /* if we read back 0x0, we don't have an entropy device */ 403 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0) 404 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV; 405#endif 406} 407 408/* access to the phy. the following assumes that we've initialized the MIF to 409 * be in frame rather than bit-bang mode 410 */ 411static u16 cas_phy_read(struct cas *cp, int reg) 412{ 413 u32 cmd; 414 int limit = STOP_TRIES_PHY; 415 416 cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ; 417 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); 418 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); 419 cmd |= MIF_FRAME_TURN_AROUND_MSB; 420 writel(cmd, cp->regs + REG_MIF_FRAME); 421 422 /* poll for completion */ 423 while (limit-- > 0) { 424 udelay(10); 425 cmd = readl(cp->regs + REG_MIF_FRAME); 426 if (cmd & MIF_FRAME_TURN_AROUND_LSB) 427 return (cmd & MIF_FRAME_DATA_MASK); 428 } 429 return 0xFFFF; /* -1 */ 430} 431 432static int cas_phy_write(struct cas *cp, int reg, u16 val) 433{ 434 int limit = STOP_TRIES_PHY; 435 u32 cmd; 436 437 cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE; 438 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); 439 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); 440 cmd |= MIF_FRAME_TURN_AROUND_MSB; 441 cmd |= val & MIF_FRAME_DATA_MASK; 442 writel(cmd, cp->regs + REG_MIF_FRAME); 443 444 /* poll for completion */ 445 while (limit-- > 0) { 446 udelay(10); 447 cmd = readl(cp->regs + REG_MIF_FRAME); 448 if (cmd & MIF_FRAME_TURN_AROUND_LSB) 449 return 0; 450 } 451 return -1; 452} 453 454static void cas_phy_powerup(struct cas *cp) 455{ 456 u16 ctl = cas_phy_read(cp, MII_BMCR); 457 458 if ((ctl & BMCR_PDOWN) == 0) 459 return; 460 ctl &= ~BMCR_PDOWN; 461 cas_phy_write(cp, MII_BMCR, ctl); 462} 463 464static void cas_phy_powerdown(struct cas *cp) 465{ 466 u16 ctl = cas_phy_read(cp, MII_BMCR); 467 468 if (ctl & BMCR_PDOWN) 469 return; 470 ctl |= BMCR_PDOWN; 471 cas_phy_write(cp, MII_BMCR, ctl); 472} 473 474/* cp->lock held. note: the last put_page will free the buffer */ 475static int cas_page_free(struct cas *cp, cas_page_t *page) 476{ 477 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, 478 PCI_DMA_FROMDEVICE); 479 __free_pages(page->buffer, cp->page_order); 480 kfree(page); 481 return 0; 482} 483 484#ifdef RX_COUNT_BUFFERS 485#define RX_USED_ADD(x, y) ((x)->used += (y)) 486#define RX_USED_SET(x, y) ((x)->used = (y)) 487#else 488#define RX_USED_ADD(x, y) 489#define RX_USED_SET(x, y) 490#endif 491 492/* local page allocation routines for the receive buffers. jumbo pages 493 * require at least 8K contiguous and 8K aligned buffers. 494 */ 495static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags) 496{ 497 cas_page_t *page; 498 499 page = kmalloc(sizeof(cas_page_t), flags); 500 if (!page) 501 return NULL; 502 503 INIT_LIST_HEAD(&page->list); 504 RX_USED_SET(page, 0); 505 page->buffer = alloc_pages(flags, cp->page_order); 506 if (!page->buffer) 507 goto page_err; 508 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0, 509 cp->page_size, PCI_DMA_FROMDEVICE); 510 return page; 511 512page_err: 513 kfree(page); 514 return NULL; 515} 516 517/* initialize spare pool of rx buffers, but allocate during the open */ 518static void cas_spare_init(struct cas *cp) 519{ 520 spin_lock(&cp->rx_inuse_lock); 521 INIT_LIST_HEAD(&cp->rx_inuse_list); 522 spin_unlock(&cp->rx_inuse_lock); 523 524 spin_lock(&cp->rx_spare_lock); 525 INIT_LIST_HEAD(&cp->rx_spare_list); 526 cp->rx_spares_needed = RX_SPARE_COUNT; 527 spin_unlock(&cp->rx_spare_lock); 528} 529 530/* used on close. free all the spare buffers. */ 531static void cas_spare_free(struct cas *cp) 532{ 533 struct list_head list, *elem, *tmp; 534 535 /* free spare buffers */ 536 INIT_LIST_HEAD(&list); 537 spin_lock(&cp->rx_spare_lock); 538 list_splice_init(&cp->rx_spare_list, &list); 539 spin_unlock(&cp->rx_spare_lock); 540 list_for_each_safe(elem, tmp, &list) { 541 cas_page_free(cp, list_entry(elem, cas_page_t, list)); 542 } 543 544 INIT_LIST_HEAD(&list); 545#if 1 546 /* 547 * Looks like Adrian had protected this with a different 548 * lock than used everywhere else to manipulate this list. 549 */ 550 spin_lock(&cp->rx_inuse_lock); 551 list_splice_init(&cp->rx_inuse_list, &list); 552 spin_unlock(&cp->rx_inuse_lock); 553#else 554 spin_lock(&cp->rx_spare_lock); 555 list_splice_init(&cp->rx_inuse_list, &list); 556 spin_unlock(&cp->rx_spare_lock); 557#endif 558 list_for_each_safe(elem, tmp, &list) { 559 cas_page_free(cp, list_entry(elem, cas_page_t, list)); 560 } 561} 562 563/* replenish spares if needed */ 564static void cas_spare_recover(struct cas *cp, const gfp_t flags) 565{ 566 struct list_head list, *elem, *tmp; 567 int needed, i; 568 569 /* check inuse list. if we don't need any more free buffers, 570 * just free it 571 */ 572 573 /* make a local copy of the list */ 574 INIT_LIST_HEAD(&list); 575 spin_lock(&cp->rx_inuse_lock); 576 list_splice_init(&cp->rx_inuse_list, &list); 577 spin_unlock(&cp->rx_inuse_lock); 578 579 list_for_each_safe(elem, tmp, &list) { 580 cas_page_t *page = list_entry(elem, cas_page_t, list); 581 582 /* 583 * With the lockless pagecache, cassini buffering scheme gets 584 * slightly less accurate: we might find that a page has an 585 * elevated reference count here, due to a speculative ref, 586 * and skip it as in-use. Ideally we would be able to reclaim 587 * it. However this would be such a rare case, it doesn't 588 * matter too much as we should pick it up the next time round. 589 * 590 * Importantly, if we find that the page has a refcount of 1 591 * here (our refcount), then we know it is definitely not inuse 592 * so we can reuse it. 593 */ 594 if (page_count(page->buffer) > 1) 595 continue; 596 597 list_del(elem); 598 spin_lock(&cp->rx_spare_lock); 599 if (cp->rx_spares_needed > 0) { 600 list_add(elem, &cp->rx_spare_list); 601 cp->rx_spares_needed--; 602 spin_unlock(&cp->rx_spare_lock); 603 } else { 604 spin_unlock(&cp->rx_spare_lock); 605 cas_page_free(cp, page); 606 } 607 } 608 609 /* put any inuse buffers back on the list */ 610 if (!list_empty(&list)) { 611 spin_lock(&cp->rx_inuse_lock); 612 list_splice(&list, &cp->rx_inuse_list); 613 spin_unlock(&cp->rx_inuse_lock); 614 } 615 616 spin_lock(&cp->rx_spare_lock); 617 needed = cp->rx_spares_needed; 618 spin_unlock(&cp->rx_spare_lock); 619 if (!needed) 620 return; 621 622 /* we still need spares, so try to allocate some */ 623 INIT_LIST_HEAD(&list); 624 i = 0; 625 while (i < needed) { 626 cas_page_t *spare = cas_page_alloc(cp, flags); 627 if (!spare) 628 break; 629 list_add(&spare->list, &list); 630 i++; 631 } 632 633 spin_lock(&cp->rx_spare_lock); 634 list_splice(&list, &cp->rx_spare_list); 635 cp->rx_spares_needed -= i; 636 spin_unlock(&cp->rx_spare_lock); 637} 638 639/* pull a page from the list. */ 640static cas_page_t *cas_page_dequeue(struct cas *cp) 641{ 642 struct list_head *entry; 643 int recover; 644 645 spin_lock(&cp->rx_spare_lock); 646 if (list_empty(&cp->rx_spare_list)) { 647 /* try to do a quick recovery */ 648 spin_unlock(&cp->rx_spare_lock); 649 cas_spare_recover(cp, GFP_ATOMIC); 650 spin_lock(&cp->rx_spare_lock); 651 if (list_empty(&cp->rx_spare_list)) { 652 netif_err(cp, rx_err, cp->dev, 653 "no spare buffers available\n"); 654 spin_unlock(&cp->rx_spare_lock); 655 return NULL; 656 } 657 } 658 659 entry = cp->rx_spare_list.next; 660 list_del(entry); 661 recover = ++cp->rx_spares_needed; 662 spin_unlock(&cp->rx_spare_lock); 663 664 /* trigger the timer to do the recovery */ 665 if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) { 666#if 1 667 atomic_inc(&cp->reset_task_pending); 668 atomic_inc(&cp->reset_task_pending_spare); 669 schedule_work(&cp->reset_task); 670#else 671 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE); 672 schedule_work(&cp->reset_task); 673#endif 674 } 675 return list_entry(entry, cas_page_t, list); 676} 677 678 679static void cas_mif_poll(struct cas *cp, const int enable) 680{ 681 u32 cfg; 682 683 cfg = readl(cp->regs + REG_MIF_CFG); 684 cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1); 685 686 if (cp->phy_type & CAS_PHY_MII_MDIO1) 687 cfg |= MIF_CFG_PHY_SELECT; 688 689 /* poll and interrupt on link status change. */ 690 if (enable) { 691 cfg |= MIF_CFG_POLL_EN; 692 cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR); 693 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr); 694 } 695 writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF, 696 cp->regs + REG_MIF_MASK); 697 writel(cfg, cp->regs + REG_MIF_CFG); 698} 699 700/* Must be invoked under cp->lock */ 701static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep) 702{ 703 u16 ctl; 704#if 1 705 int lcntl; 706 int changed = 0; 707 int oldstate = cp->lstate; 708 int link_was_not_down = !(oldstate == link_down); 709#endif 710 /* Setup link parameters */ 711 if (!ep) 712 goto start_aneg; 713 lcntl = cp->link_cntl; 714 if (ep->autoneg == AUTONEG_ENABLE) 715 cp->link_cntl = BMCR_ANENABLE; 716 else { 717 cp->link_cntl = 0; 718 if (ep->speed == SPEED_100) 719 cp->link_cntl |= BMCR_SPEED100; 720 else if (ep->speed == SPEED_1000) 721 cp->link_cntl |= CAS_BMCR_SPEED1000; 722 if (ep->duplex == DUPLEX_FULL) 723 cp->link_cntl |= BMCR_FULLDPLX; 724 } 725#if 1 726 changed = (lcntl != cp->link_cntl); 727#endif 728start_aneg: 729 if (cp->lstate == link_up) { 730 netdev_info(cp->dev, "PCS link down\n"); 731 } else { 732 if (changed) { 733 netdev_info(cp->dev, "link configuration changed\n"); 734 } 735 } 736 cp->lstate = link_down; 737 cp->link_transition = LINK_TRANSITION_LINK_DOWN; 738 if (!cp->hw_running) 739 return; 740#if 1 741 /* 742 * WTZ: If the old state was link_up, we turn off the carrier 743 * to replicate everything we do elsewhere on a link-down 744 * event when we were already in a link-up state.. 745 */ 746 if (oldstate == link_up) 747 netif_carrier_off(cp->dev); 748 if (changed && link_was_not_down) { 749 /* 750 * WTZ: This branch will simply schedule a full reset after 751 * we explicitly changed link modes in an ioctl. See if this 752 * fixes the link-problems we were having for forced mode. 753 */ 754 atomic_inc(&cp->reset_task_pending); 755 atomic_inc(&cp->reset_task_pending_all); 756 schedule_work(&cp->reset_task); 757 cp->timer_ticks = 0; 758 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); 759 return; 760 } 761#endif 762 if (cp->phy_type & CAS_PHY_SERDES) { 763 u32 val = readl(cp->regs + REG_PCS_MII_CTRL); 764 765 if (cp->link_cntl & BMCR_ANENABLE) { 766 val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN); 767 cp->lstate = link_aneg; 768 } else { 769 if (cp->link_cntl & BMCR_FULLDPLX) 770 val |= PCS_MII_CTRL_DUPLEX; 771 val &= ~PCS_MII_AUTONEG_EN; 772 cp->lstate = link_force_ok; 773 } 774 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; 775 writel(val, cp->regs + REG_PCS_MII_CTRL); 776 777 } else { 778 cas_mif_poll(cp, 0); 779 ctl = cas_phy_read(cp, MII_BMCR); 780 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | 781 CAS_BMCR_SPEED1000 | BMCR_ANENABLE); 782 ctl |= cp->link_cntl; 783 if (ctl & BMCR_ANENABLE) { 784 ctl |= BMCR_ANRESTART; 785 cp->lstate = link_aneg; 786 } else { 787 cp->lstate = link_force_ok; 788 } 789 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; 790 cas_phy_write(cp, MII_BMCR, ctl); 791 cas_mif_poll(cp, 1); 792 } 793 794 cp->timer_ticks = 0; 795 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); 796} 797 798/* Must be invoked under cp->lock. */ 799static int cas_reset_mii_phy(struct cas *cp) 800{ 801 int limit = STOP_TRIES_PHY; 802 u16 val; 803 804 cas_phy_write(cp, MII_BMCR, BMCR_RESET); 805 udelay(100); 806 while (--limit) { 807 val = cas_phy_read(cp, MII_BMCR); 808 if ((val & BMCR_RESET) == 0) 809 break; 810 udelay(10); 811 } 812 return (limit <= 0); 813} 814 815static int cas_saturn_firmware_init(struct cas *cp) 816{ 817 const struct firmware *fw; 818 const char fw_name[] = "sun/cassini.bin"; 819 int err; 820 821 if (PHY_NS_DP83065 != cp->phy_id) 822 return 0; 823 824 err = request_firmware(&fw, fw_name, &cp->pdev->dev); 825 if (err) { 826 pr_err("Failed to load firmware \"%s\"\n", 827 fw_name); 828 return err; 829 } 830 if (fw->size < 2) { 831 pr_err("bogus length %zu in \"%s\"\n", 832 fw->size, fw_name); 833 err = -EINVAL; 834 goto out; 835 } 836 cp->fw_load_addr= fw->data[1] << 8 | fw->data[0]; 837 cp->fw_size = fw->size - 2; 838 cp->fw_data = vmalloc(cp->fw_size); 839 if (!cp->fw_data) { 840 err = -ENOMEM; 841 pr_err("\"%s\" Failed %d\n", fw_name, err); 842 goto out; 843 } 844 memcpy(cp->fw_data, &fw->data[2], cp->fw_size); 845out: 846 release_firmware(fw); 847 return err; 848} 849 850static void cas_saturn_firmware_load(struct cas *cp) 851{ 852 int i; 853 854 cas_phy_powerdown(cp); 855 856 /* expanded memory access mode */ 857 cas_phy_write(cp, DP83065_MII_MEM, 0x0); 858 859 /* pointer configuration for new firmware */ 860 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9); 861 cas_phy_write(cp, DP83065_MII_REGD, 0xbd); 862 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa); 863 cas_phy_write(cp, DP83065_MII_REGD, 0x82); 864 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb); 865 cas_phy_write(cp, DP83065_MII_REGD, 0x0); 866 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc); 867 cas_phy_write(cp, DP83065_MII_REGD, 0x39); 868 869 /* download new firmware */ 870 cas_phy_write(cp, DP83065_MII_MEM, 0x1); 871 cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr); 872 for (i = 0; i < cp->fw_size; i++) 873 cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]); 874 875 /* enable firmware */ 876 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8); 877 cas_phy_write(cp, DP83065_MII_REGD, 0x1); 878} 879 880 881/* phy initialization */ 882static void cas_phy_init(struct cas *cp) 883{ 884 u16 val; 885 886 /* if we're in MII/GMII mode, set up phy */ 887 if (CAS_PHY_MII(cp->phy_type)) { 888 writel(PCS_DATAPATH_MODE_MII, 889 cp->regs + REG_PCS_DATAPATH_MODE); 890 891 cas_mif_poll(cp, 0); 892 cas_reset_mii_phy(cp); /* take out of isolate mode */ 893 894 if (PHY_LUCENT_B0 == cp->phy_id) { 895 /* workaround link up/down issue with lucent */ 896 cas_phy_write(cp, LUCENT_MII_REG, 0x8000); 897 cas_phy_write(cp, MII_BMCR, 0x00f1); 898 cas_phy_write(cp, LUCENT_MII_REG, 0x0); 899 900 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) { 901 /* workarounds for broadcom phy */ 902 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20); 903 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012); 904 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804); 905 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013); 906 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204); 907 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); 908 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132); 909 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); 910 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232); 911 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F); 912 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20); 913 914 } else if (PHY_BROADCOM_5411 == cp->phy_id) { 915 val = cas_phy_read(cp, BROADCOM_MII_REG4); 916 val = cas_phy_read(cp, BROADCOM_MII_REG4); 917 if (val & 0x0080) { 918 /* link workaround */ 919 cas_phy_write(cp, BROADCOM_MII_REG4, 920 val & ~0x0080); 921 } 922 923 } else if (cp->cas_flags & CAS_FLAG_SATURN) { 924 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ? 925 SATURN_PCFG_FSI : 0x0, 926 cp->regs + REG_SATURN_PCFG); 927 928 /* load firmware to address 10Mbps auto-negotiation 929 * issue. NOTE: this will need to be changed if the 930 * default firmware gets fixed. 931 */ 932 if (PHY_NS_DP83065 == cp->phy_id) { 933 cas_saturn_firmware_load(cp); 934 } 935 cas_phy_powerup(cp); 936 } 937 938 /* advertise capabilities */ 939 val = cas_phy_read(cp, MII_BMCR); 940 val &= ~BMCR_ANENABLE; 941 cas_phy_write(cp, MII_BMCR, val); 942 udelay(10); 943 944 cas_phy_write(cp, MII_ADVERTISE, 945 cas_phy_read(cp, MII_ADVERTISE) | 946 (ADVERTISE_10HALF | ADVERTISE_10FULL | 947 ADVERTISE_100HALF | ADVERTISE_100FULL | 948 CAS_ADVERTISE_PAUSE | 949 CAS_ADVERTISE_ASYM_PAUSE)); 950 951 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { 952 /* make sure that we don't advertise half 953 * duplex to avoid a chip issue 954 */ 955 val = cas_phy_read(cp, CAS_MII_1000_CTRL); 956 val &= ~CAS_ADVERTISE_1000HALF; 957 val |= CAS_ADVERTISE_1000FULL; 958 cas_phy_write(cp, CAS_MII_1000_CTRL, val); 959 } 960 961 } else { 962 /* reset pcs for serdes */ 963 u32 val; 964 int limit; 965 966 writel(PCS_DATAPATH_MODE_SERDES, 967 cp->regs + REG_PCS_DATAPATH_MODE); 968 969 /* enable serdes pins on saturn */ 970 if (cp->cas_flags & CAS_FLAG_SATURN) 971 writel(0, cp->regs + REG_SATURN_PCFG); 972 973 /* Reset PCS unit. */ 974 val = readl(cp->regs + REG_PCS_MII_CTRL); 975 val |= PCS_MII_RESET; 976 writel(val, cp->regs + REG_PCS_MII_CTRL); 977 978 limit = STOP_TRIES; 979 while (--limit > 0) { 980 udelay(10); 981 if ((readl(cp->regs + REG_PCS_MII_CTRL) & 982 PCS_MII_RESET) == 0) 983 break; 984 } 985 if (limit <= 0) 986 netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n", 987 readl(cp->regs + REG_PCS_STATE_MACHINE)); 988 989 /* Make sure PCS is disabled while changing advertisement 990 * configuration. 991 */ 992 writel(0x0, cp->regs + REG_PCS_CFG); 993 994 /* Advertise all capabilities except half-duplex. */ 995 val = readl(cp->regs + REG_PCS_MII_ADVERT); 996 val &= ~PCS_MII_ADVERT_HD; 997 val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE | 998 PCS_MII_ADVERT_ASYM_PAUSE); 999 writel(val, cp->regs + REG_PCS_MII_ADVERT); 1000 1001 /* enable PCS */ 1002 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG); 1003 1004 /* pcs workaround: enable sync detect */ 1005 writel(PCS_SERDES_CTRL_SYNCD_EN, 1006 cp->regs + REG_PCS_SERDES_CTRL); 1007 } 1008} 1009 1010 1011static int cas_pcs_link_check(struct cas *cp) 1012{ 1013 u32 stat, state_machine; 1014 int retval = 0; 1015 1016 /* The link status bit latches on zero, so you must 1017 * read it twice in such a case to see a transition 1018 * to the link being up. 1019 */ 1020 stat = readl(cp->regs + REG_PCS_MII_STATUS); 1021 if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0) 1022 stat = readl(cp->regs + REG_PCS_MII_STATUS); 1023 1024 /* The remote-fault indication is only valid 1025 * when autoneg has completed. 1026 */ 1027 if ((stat & (PCS_MII_STATUS_AUTONEG_COMP | 1028 PCS_MII_STATUS_REMOTE_FAULT)) == 1029 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) 1030 netif_info(cp, link, cp->dev, "PCS RemoteFault\n"); 1031 1032 /* work around link detection issue by querying the PCS state 1033 * machine directly. 1034 */ 1035 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE); 1036 if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) { 1037 stat &= ~PCS_MII_STATUS_LINK_STATUS; 1038 } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) { 1039 stat |= PCS_MII_STATUS_LINK_STATUS; 1040 } 1041 1042 if (stat & PCS_MII_STATUS_LINK_STATUS) { 1043 if (cp->lstate != link_up) { 1044 if (cp->opened) { 1045 cp->lstate = link_up; 1046 cp->link_transition = LINK_TRANSITION_LINK_UP; 1047 1048 cas_set_link_modes(cp); 1049 netif_carrier_on(cp->dev); 1050 } 1051 } 1052 } else if (cp->lstate == link_up) { 1053 cp->lstate = link_down; 1054 if (link_transition_timeout != 0 && 1055 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && 1056 !cp->link_transition_jiffies_valid) { 1057 /* 1058 * force a reset, as a workaround for the 1059 * link-failure problem. May want to move this to a 1060 * point a bit earlier in the sequence. If we had 1061 * generated a reset a short time ago, we'll wait for 1062 * the link timer to check the status until a 1063 * timer expires (link_transistion_jiffies_valid is 1064 * true when the timer is running.) Instead of using 1065 * a system timer, we just do a check whenever the 1066 * link timer is running - this clears the flag after 1067 * a suitable delay. 1068 */ 1069 retval = 1; 1070 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; 1071 cp->link_transition_jiffies = jiffies; 1072 cp->link_transition_jiffies_valid = 1; 1073 } else { 1074 cp->link_transition = LINK_TRANSITION_ON_FAILURE; 1075 } 1076 netif_carrier_off(cp->dev); 1077 if (cp->opened) 1078 netif_info(cp, link, cp->dev, "PCS link down\n"); 1079 1080 /* Cassini only: if you force a mode, there can be 1081 * sync problems on link down. to fix that, the following 1082 * things need to be checked: 1083 * 1) read serialink state register 1084 * 2) read pcs status register to verify link down. 1085 * 3) if link down and serial link == 0x03, then you need 1086 * to global reset the chip. 1087 */ 1088 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) { 1089 /* should check to see if we're in a forced mode */ 1090 stat = readl(cp->regs + REG_PCS_SERDES_STATE); 1091 if (stat == 0x03) 1092 return 1; 1093 } 1094 } else if (cp->lstate == link_down) { 1095 if (link_transition_timeout != 0 && 1096 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && 1097 !cp->link_transition_jiffies_valid) { 1098 /* force a reset, as a workaround for the 1099 * link-failure problem. May want to move 1100 * this to a point a bit earlier in the 1101 * sequence. 1102 */ 1103 retval = 1; 1104 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; 1105 cp->link_transition_jiffies = jiffies; 1106 cp->link_transition_jiffies_valid = 1; 1107 } else { 1108 cp->link_transition = LINK_TRANSITION_STILL_FAILED; 1109 } 1110 } 1111 1112 return retval; 1113} 1114 1115static int cas_pcs_interrupt(struct net_device *dev, 1116 struct cas *cp, u32 status) 1117{ 1118 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS); 1119 1120 if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0) 1121 return 0; 1122 return cas_pcs_link_check(cp); 1123} 1124 1125static int cas_txmac_interrupt(struct net_device *dev, 1126 struct cas *cp, u32 status) 1127{ 1128 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS); 1129 1130 if (!txmac_stat) 1131 return 0; 1132 1133 netif_printk(cp, intr, KERN_DEBUG, cp->dev, 1134 "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat); 1135 1136 /* Defer timer expiration is quite normal, 1137 * don't even log the event. 1138 */ 1139 if ((txmac_stat & MAC_TX_DEFER_TIMER) && 1140 !(txmac_stat & ~MAC_TX_DEFER_TIMER)) 1141 return 0; 1142 1143 spin_lock(&cp->stat_lock[0]); 1144 if (txmac_stat & MAC_TX_UNDERRUN) { 1145 netdev_err(dev, "TX MAC xmit underrun\n"); 1146 cp->net_stats[0].tx_fifo_errors++; 1147 } 1148 1149 if (txmac_stat & MAC_TX_MAX_PACKET_ERR) { 1150 netdev_err(dev, "TX MAC max packet size error\n"); 1151 cp->net_stats[0].tx_errors++; 1152 } 1153 1154 /* The rest are all cases of one of the 16-bit TX 1155 * counters expiring. 1156 */ 1157 if (txmac_stat & MAC_TX_COLL_NORMAL) 1158 cp->net_stats[0].collisions += 0x10000; 1159 1160 if (txmac_stat & MAC_TX_COLL_EXCESS) { 1161 cp->net_stats[0].tx_aborted_errors += 0x10000; 1162 cp->net_stats[0].collisions += 0x10000; 1163 } 1164 1165 if (txmac_stat & MAC_TX_COLL_LATE) { 1166 cp->net_stats[0].tx_aborted_errors += 0x10000; 1167 cp->net_stats[0].collisions += 0x10000; 1168 } 1169 spin_unlock(&cp->stat_lock[0]); 1170 1171 /* We do not keep track of MAC_TX_COLL_FIRST and 1172 * MAC_TX_PEAK_ATTEMPTS events. 1173 */ 1174 return 0; 1175} 1176 1177static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware) 1178{ 1179 cas_hp_inst_t *inst; 1180 u32 val; 1181 int i; 1182 1183 i = 0; 1184 while ((inst = firmware) && inst->note) { 1185 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR); 1186 1187 val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val); 1188 val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask); 1189 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI); 1190 1191 val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10); 1192 val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop); 1193 val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext); 1194 val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff); 1195 val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext); 1196 val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff); 1197 val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op); 1198 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID); 1199 1200 val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask); 1201 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift); 1202 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab); 1203 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg); 1204 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW); 1205 ++firmware; 1206 ++i; 1207 } 1208} 1209 1210static void cas_init_rx_dma(struct cas *cp) 1211{ 1212 u64 desc_dma = cp->block_dvma; 1213 u32 val; 1214 int i, size; 1215 1216 /* rx free descriptors */ 1217 val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL); 1218 val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0)); 1219 val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0)); 1220 if ((N_RX_DESC_RINGS > 1) && 1221 (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */ 1222 val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1)); 1223 writel(val, cp->regs + REG_RX_CFG); 1224 1225 val = (unsigned long) cp->init_rxds[0] - 1226 (unsigned long) cp->init_block; 1227 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI); 1228 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW); 1229 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); 1230 1231 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 1232 /* rx desc 2 is for IPSEC packets. however, 1233 * we don't it that for that purpose. 1234 */ 1235 val = (unsigned long) cp->init_rxds[1] - 1236 (unsigned long) cp->init_block; 1237 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI); 1238 writel((desc_dma + val) & 0xffffffff, cp->regs + 1239 REG_PLUS_RX_DB1_LOW); 1240 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs + 1241 REG_PLUS_RX_KICK1); 1242 } 1243 1244 /* rx completion registers */ 1245 val = (unsigned long) cp->init_rxcs[0] - 1246 (unsigned long) cp->init_block; 1247 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI); 1248 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW); 1249 1250 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 1251 /* rx comp 2-4 */ 1252 for (i = 1; i < MAX_RX_COMP_RINGS; i++) { 1253 val = (unsigned long) cp->init_rxcs[i] - 1254 (unsigned long) cp->init_block; 1255 writel((desc_dma + val) >> 32, cp->regs + 1256 REG_PLUS_RX_CBN_HI(i)); 1257 writel((desc_dma + val) & 0xffffffff, cp->regs + 1258 REG_PLUS_RX_CBN_LOW(i)); 1259 } 1260 } 1261 1262 /* read selective clear regs to prevent spurious interrupts 1263 * on reset because complete == kick. 1264 * selective clear set up to prevent interrupts on resets 1265 */ 1266 readl(cp->regs + REG_INTR_STATUS_ALIAS); 1267 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR); 1268 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 1269 for (i = 1; i < N_RX_COMP_RINGS; i++) 1270 readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i)); 1271 1272 /* 2 is different from 3 and 4 */ 1273 if (N_RX_COMP_RINGS > 1) 1274 writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1, 1275 cp->regs + REG_PLUS_ALIASN_CLEAR(1)); 1276 1277 for (i = 2; i < N_RX_COMP_RINGS; i++) 1278 writel(INTR_RX_DONE_ALT, 1279 cp->regs + REG_PLUS_ALIASN_CLEAR(i)); 1280 } 1281 1282 /* set up pause thresholds */ 1283 val = CAS_BASE(RX_PAUSE_THRESH_OFF, 1284 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM); 1285 val |= CAS_BASE(RX_PAUSE_THRESH_ON, 1286 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM); 1287 writel(val, cp->regs + REG_RX_PAUSE_THRESH); 1288 1289 /* zero out dma reassembly buffers */ 1290 for (i = 0; i < 64; i++) { 1291 writel(i, cp->regs + REG_RX_TABLE_ADDR); 1292 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW); 1293 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID); 1294 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI); 1295 } 1296 1297 /* make sure address register is 0 for normal operation */ 1298 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR); 1299 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR); 1300 1301 /* interrupt mitigation */ 1302#ifdef USE_RX_BLANK 1303 val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL); 1304 val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL); 1305 writel(val, cp->regs + REG_RX_BLANK); 1306#else 1307 writel(0x0, cp->regs + REG_RX_BLANK); 1308#endif 1309 1310 /* interrupt generation as a function of low water marks for 1311 * free desc and completion entries. these are used to trigger 1312 * housekeeping for rx descs. we don't use the free interrupt 1313 * as it's not very useful 1314 */ 1315 /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */ 1316 val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL); 1317 writel(val, cp->regs + REG_RX_AE_THRESH); 1318 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 1319 val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1)); 1320 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH); 1321 } 1322 1323 /* Random early detect registers. useful for congestion avoidance. 1324 * this should be tunable. 1325 */ 1326 writel(0x0, cp->regs + REG_RX_RED); 1327 1328 /* receive page sizes. default == 2K (0x800) */ 1329 val = 0; 1330 if (cp->page_size == 0x1000) 1331 val = 0x1; 1332 else if (cp->page_size == 0x2000) 1333 val = 0x2; 1334 else if (cp->page_size == 0x4000) 1335 val = 0x3; 1336 1337 /* round mtu + offset. constrain to page size. */ 1338 size = cp->dev->mtu + 64; 1339 if (size > cp->page_size) 1340 size = cp->page_size; 1341 1342 if (size <= 0x400) 1343 i = 0x0; 1344 else if (size <= 0x800) 1345 i = 0x1; 1346 else if (size <= 0x1000) 1347 i = 0x2; 1348 else 1349 i = 0x3; 1350 1351 cp->mtu_stride = 1 << (i + 10); 1352 val = CAS_BASE(RX_PAGE_SIZE, val); 1353 val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i); 1354 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10)); 1355 val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1); 1356 writel(val, cp->regs + REG_RX_PAGE_SIZE); 1357 1358 /* enable the header parser if desired */ 1359 if (CAS_HP_FIRMWARE == cas_prog_null) 1360 return; 1361 1362 val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS); 1363 val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK; 1364 val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL); 1365 writel(val, cp->regs + REG_HP_CFG); 1366} 1367 1368static inline void cas_rxc_init(struct cas_rx_comp *rxc) 1369{ 1370 memset(rxc, 0, sizeof(*rxc)); 1371 rxc->word4 = cpu_to_le64(RX_COMP4_ZERO); 1372} 1373 1374/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1] 1375 * flipping is protected by the fact that the chip will not 1376 * hand back the same page index while it's being processed. 1377 */ 1378static inline cas_page_t *cas_page_spare(struct cas *cp, const int index) 1379{ 1380 cas_page_t *page = cp->rx_pages[1][index]; 1381 cas_page_t *new; 1382 1383 if (page_count(page->buffer) == 1) 1384 return page; 1385 1386 new = cas_page_dequeue(cp); 1387 if (new) { 1388 spin_lock(&cp->rx_inuse_lock); 1389 list_add(&page->list, &cp->rx_inuse_list); 1390 spin_unlock(&cp->rx_inuse_lock); 1391 } 1392 return new; 1393} 1394 1395/* this needs to be changed if we actually use the ENC RX DESC ring */ 1396static cas_page_t *cas_page_swap(struct cas *cp, const int ring, 1397 const int index) 1398{ 1399 cas_page_t **page0 = cp->rx_pages[0]; 1400 cas_page_t **page1 = cp->rx_pages[1]; 1401 1402 /* swap if buffer is in use */ 1403 if (page_count(page0[index]->buffer) > 1) { 1404 cas_page_t *new = cas_page_spare(cp, index); 1405 if (new) { 1406 page1[index] = page0[index]; 1407 page0[index] = new; 1408 } 1409 } 1410 RX_USED_SET(page0[index], 0); 1411 return page0[index]; 1412} 1413 1414static void cas_clean_rxds(struct cas *cp) 1415{ 1416 /* only clean ring 0 as ring 1 is used for spare buffers */ 1417 struct cas_rx_desc *rxd = cp->init_rxds[0]; 1418 int i, size; 1419 1420 /* release all rx flows */ 1421 for (i = 0; i < N_RX_FLOWS; i++) { 1422 struct sk_buff *skb; 1423 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) { 1424 cas_skb_release(skb); 1425 } 1426 } 1427 1428 /* initialize descriptors */ 1429 size = RX_DESC_RINGN_SIZE(0); 1430 for (i = 0; i < size; i++) { 1431 cas_page_t *page = cas_page_swap(cp, 0, i); 1432 rxd[i].buffer = cpu_to_le64(page->dma_addr); 1433 rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) | 1434 CAS_BASE(RX_INDEX_RING, 0)); 1435 } 1436 1437 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4; 1438 cp->rx_last[0] = 0; 1439 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0); 1440} 1441 1442static void cas_clean_rxcs(struct cas *cp) 1443{ 1444 int i, j; 1445 1446 /* take ownership of rx comp descriptors */ 1447 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS); 1448 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS); 1449 for (i = 0; i < N_RX_COMP_RINGS; i++) { 1450 struct cas_rx_comp *rxc = cp->init_rxcs[i]; 1451 for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) { 1452 cas_rxc_init(rxc + j); 1453 } 1454 } 1455} 1456 1457#if 0 1458/* When we get a RX fifo overflow, the RX unit is probably hung 1459 * so we do the following. 1460 * 1461 * If any part of the reset goes wrong, we return 1 and that causes the 1462 * whole chip to be reset. 1463 */ 1464static int cas_rxmac_reset(struct cas *cp) 1465{ 1466 struct net_device *dev = cp->dev; 1467 int limit; 1468 u32 val; 1469 1470 /* First, reset MAC RX. */ 1471 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); 1472 for (limit = 0; limit < STOP_TRIES; limit++) { 1473 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN)) 1474 break; 1475 udelay(10); 1476 } 1477 if (limit == STOP_TRIES) { 1478 netdev_err(dev, "RX MAC will not disable, resetting whole chip\n"); 1479 return 1; 1480 } 1481 1482 /* Second, disable RX DMA. */ 1483 writel(0, cp->regs + REG_RX_CFG); 1484 for (limit = 0; limit < STOP_TRIES; limit++) { 1485 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN)) 1486 break; 1487 udelay(10); 1488 } 1489 if (limit == STOP_TRIES) { 1490 netdev_err(dev, "RX DMA will not disable, resetting whole chip\n"); 1491 return 1; 1492 } 1493 1494 mdelay(5); 1495 1496 /* Execute RX reset command. */ 1497 writel(SW_RESET_RX, cp->regs + REG_SW_RESET); 1498 for (limit = 0; limit < STOP_TRIES; limit++) { 1499 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX)) 1500 break; 1501 udelay(10); 1502 } 1503 if (limit == STOP_TRIES) { 1504 netdev_err(dev, "RX reset command will not execute, resetting whole chip\n"); 1505 return 1; 1506 } 1507 1508 /* reset driver rx state */ 1509 cas_clean_rxds(cp); 1510 cas_clean_rxcs(cp); 1511 1512 /* Now, reprogram the rest of RX unit. */ 1513 cas_init_rx_dma(cp); 1514 1515 /* re-enable */ 1516 val = readl(cp->regs + REG_RX_CFG); 1517 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG); 1518 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); 1519 val = readl(cp->regs + REG_MAC_RX_CFG); 1520 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); 1521 return 0; 1522} 1523#endif 1524 1525static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp, 1526 u32 status) 1527{ 1528 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS); 1529 1530 if (!stat) 1531 return 0; 1532 1533 netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat); 1534 1535 /* these are all rollovers */ 1536 spin_lock(&cp->stat_lock[0]); 1537 if (stat & MAC_RX_ALIGN_ERR) 1538 cp->net_stats[0].rx_frame_errors += 0x10000; 1539 1540 if (stat & MAC_RX_CRC_ERR) 1541 cp->net_stats[0].rx_crc_errors += 0x10000; 1542 1543 if (stat & MAC_RX_LEN_ERR) 1544 cp->net_stats[0].rx_length_errors += 0x10000; 1545 1546 if (stat & MAC_RX_OVERFLOW) { 1547 cp->net_stats[0].rx_over_errors++; 1548 cp->net_stats[0].rx_fifo_errors++; 1549 } 1550 1551 /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR 1552 * events. 1553 */ 1554 spin_unlock(&cp->stat_lock[0]); 1555 return 0; 1556} 1557 1558static int cas_mac_interrupt(struct net_device *dev, struct cas *cp, 1559 u32 status) 1560{ 1561 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS); 1562 1563 if (!stat) 1564 return 0; 1565 1566 netif_printk(cp, intr, KERN_DEBUG, cp->dev, 1567 "mac interrupt, stat: 0x%x\n", stat); 1568 1569 /* This interrupt is just for pause frame and pause 1570 * tracking. It is useful for diagnostics and debug 1571 * but probably by default we will mask these events. 1572 */ 1573 if (stat & MAC_CTRL_PAUSE_STATE) 1574 cp->pause_entered++; 1575 1576 if (stat & MAC_CTRL_PAUSE_RECEIVED) 1577 cp->pause_last_time_recvd = (stat >> 16); 1578 1579 return 0; 1580} 1581 1582 1583/* Must be invoked under cp->lock. */ 1584static inline int cas_mdio_link_not_up(struct cas *cp) 1585{ 1586 u16 val; 1587 1588 switch (cp->lstate) { 1589 case link_force_ret: 1590 netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n"); 1591 cas_phy_write(cp, MII_BMCR, cp->link_fcntl); 1592 cp->timer_ticks = 5; 1593 cp->lstate = link_force_ok; 1594 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; 1595 break; 1596 1597 case link_aneg: 1598 val = cas_phy_read(cp, MII_BMCR); 1599 1600 /* Try forced modes. we try things in the following order: 1601 * 1000 full -> 100 full/half -> 10 half 1602 */ 1603 val &= ~(BMCR_ANRESTART | BMCR_ANENABLE); 1604 val |= BMCR_FULLDPLX; 1605 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? 1606 CAS_BMCR_SPEED1000 : BMCR_SPEED100; 1607 cas_phy_write(cp, MII_BMCR, val); 1608 cp->timer_ticks = 5; 1609 cp->lstate = link_force_try; 1610 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; 1611 break; 1612 1613 case link_force_try: 1614 /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */ 1615 val = cas_phy_read(cp, MII_BMCR); 1616 cp->timer_ticks = 5; 1617 if (val & CAS_BMCR_SPEED1000) { /* gigabit */ 1618 val &= ~CAS_BMCR_SPEED1000; 1619 val |= (BMCR_SPEED100 | BMCR_FULLDPLX); 1620 cas_phy_write(cp, MII_BMCR, val); 1621 break; 1622 } 1623 1624 if (val & BMCR_SPEED100) { 1625 if (val & BMCR_FULLDPLX) /* fd failed */ 1626 val &= ~BMCR_FULLDPLX; 1627 else { /* 100Mbps failed */ 1628 val &= ~BMCR_SPEED100; 1629 } 1630 cas_phy_write(cp, MII_BMCR, val); 1631 break; 1632 } 1633 default: 1634 break; 1635 } 1636 return 0; 1637} 1638 1639 1640/* must be invoked with cp->lock held */ 1641static int cas_mii_link_check(struct cas *cp, const u16 bmsr) 1642{ 1643 int restart; 1644 1645 if (bmsr & BMSR_LSTATUS) { 1646 /* Ok, here we got a link. If we had it due to a forced 1647 * fallback, and we were configured for autoneg, we 1648 * retry a short autoneg pass. If you know your hub is 1649 * broken, use ethtool ;) 1650 */ 1651 if ((cp->lstate == link_force_try) && 1652 (cp->link_cntl & BMCR_ANENABLE)) { 1653 cp->lstate = link_force_ret; 1654 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; 1655 cas_mif_poll(cp, 0); 1656 cp->link_fcntl = cas_phy_read(cp, MII_BMCR); 1657 cp->timer_ticks = 5; 1658 if (cp->opened) 1659 netif_info(cp, link, cp->dev, 1660 "Got link after fallback, retrying autoneg once...\n"); 1661 cas_phy_write(cp, MII_BMCR, 1662 cp->link_fcntl | BMCR_ANENABLE | 1663 BMCR_ANRESTART); 1664 cas_mif_poll(cp, 1); 1665 1666 } else if (cp->lstate != link_up) { 1667 cp->lstate = link_up; 1668 cp->link_transition = LINK_TRANSITION_LINK_UP; 1669 1670 if (cp->opened) { 1671 cas_set_link_modes(cp); 1672 netif_carrier_on(cp->dev); 1673 } 1674 } 1675 return 0; 1676 } 1677 1678 /* link not up. if the link was previously up, we restart the 1679 * whole process 1680 */ 1681 restart = 0; 1682 if (cp->lstate == link_up) { 1683 cp->lstate = link_down; 1684 cp->link_transition = LINK_TRANSITION_LINK_DOWN; 1685 1686 netif_carrier_off(cp->dev); 1687 if (cp->opened) 1688 netif_info(cp, link, cp->dev, "Link down\n"); 1689 restart = 1; 1690 1691 } else if (++cp->timer_ticks > 10) 1692 cas_mdio_link_not_up(cp); 1693 1694 return restart; 1695} 1696 1697static int cas_mif_interrupt(struct net_device *dev, struct cas *cp, 1698 u32 status) 1699{ 1700 u32 stat = readl(cp->regs + REG_MIF_STATUS); 1701 u16 bmsr; 1702 1703 /* check for a link change */ 1704 if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0) 1705 return 0; 1706 1707 bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat); 1708 return cas_mii_link_check(cp, bmsr); 1709} 1710 1711static int cas_pci_interrupt(struct net_device *dev, struct cas *cp, 1712 u32 status) 1713{ 1714 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS); 1715 1716 if (!stat) 1717 return 0; 1718 1719 netdev_err(dev, "PCI error [%04x:%04x]", 1720 stat, readl(cp->regs + REG_BIM_DIAG)); 1721 1722 /* cassini+ has this reserved */ 1723 if ((stat & PCI_ERR_BADACK) && 1724 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0)) 1725 pr_cont(" <No ACK64# during ABS64 cycle>"); 1726 1727 if (stat & PCI_ERR_DTRTO) 1728 pr_cont(" <Delayed transaction timeout>"); 1729 if (stat & PCI_ERR_OTHER) 1730 pr_cont(" <other>"); 1731 if (stat & PCI_ERR_BIM_DMA_WRITE) 1732 pr_cont(" <BIM DMA 0 write req>"); 1733 if (stat & PCI_ERR_BIM_DMA_READ) 1734 pr_cont(" <BIM DMA 0 read req>"); 1735 pr_cont("\n"); 1736 1737 if (stat & PCI_ERR_OTHER) { 1738 u16 cfg; 1739 1740 /* Interrogate PCI config space for the 1741 * true cause. 1742 */ 1743 pci_read_config_word(cp->pdev, PCI_STATUS, &cfg); 1744 netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg); 1745 if (cfg & PCI_STATUS_PARITY) 1746 netdev_err(dev, "PCI parity error detected\n"); 1747 if (cfg & PCI_STATUS_SIG_TARGET_ABORT) 1748 netdev_err(dev, "PCI target abort\n"); 1749 if (cfg & PCI_STATUS_REC_TARGET_ABORT) 1750 netdev_err(dev, "PCI master acks target abort\n"); 1751 if (cfg & PCI_STATUS_REC_MASTER_ABORT) 1752 netdev_err(dev, "PCI master abort\n"); 1753 if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR) 1754 netdev_err(dev, "PCI system error SERR#\n"); 1755 if (cfg & PCI_STATUS_DETECTED_PARITY) 1756 netdev_err(dev, "PCI parity error\n"); 1757 1758 /* Write the error bits back to clear them. */ 1759 cfg &= (PCI_STATUS_PARITY | 1760 PCI_STATUS_SIG_TARGET_ABORT | 1761 PCI_STATUS_REC_TARGET_ABORT | 1762 PCI_STATUS_REC_MASTER_ABORT | 1763 PCI_STATUS_SIG_SYSTEM_ERROR | 1764 PCI_STATUS_DETECTED_PARITY); 1765 pci_write_config_word(cp->pdev, PCI_STATUS, cfg); 1766 } 1767 1768 /* For all PCI errors, we should reset the chip. */ 1769 return 1; 1770} 1771 1772/* All non-normal interrupt conditions get serviced here. 1773 * Returns non-zero if we should just exit the interrupt 1774 * handler right now (ie. if we reset the card which invalidates 1775 * all of the other original irq status bits). 1776 */ 1777static int cas_abnormal_irq(struct net_device *dev, struct cas *cp, 1778 u32 status) 1779{ 1780 if (status & INTR_RX_TAG_ERROR) { 1781 /* corrupt RX tag framing */ 1782 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, 1783 "corrupt rx tag framing\n"); 1784 spin_lock(&cp->stat_lock[0]); 1785 cp->net_stats[0].rx_errors++; 1786 spin_unlock(&cp->stat_lock[0]); 1787 goto do_reset; 1788 } 1789 1790 if (status & INTR_RX_LEN_MISMATCH) { 1791 /* length mismatch. */ 1792 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, 1793 "length mismatch for rx frame\n"); 1794 spin_lock(&cp->stat_lock[0]); 1795 cp->net_stats[0].rx_errors++; 1796 spin_unlock(&cp->stat_lock[0]); 1797 goto do_reset; 1798 } 1799 1800 if (status & INTR_PCS_STATUS) { 1801 if (cas_pcs_interrupt(dev, cp, status)) 1802 goto do_reset; 1803 } 1804 1805 if (status & INTR_TX_MAC_STATUS) { 1806 if (cas_txmac_interrupt(dev, cp, status)) 1807 goto do_reset; 1808 } 1809 1810 if (status & INTR_RX_MAC_STATUS) { 1811 if (cas_rxmac_interrupt(dev, cp, status)) 1812 goto do_reset; 1813 } 1814 1815 if (status & INTR_MAC_CTRL_STATUS) { 1816 if (cas_mac_interrupt(dev, cp, status)) 1817 goto do_reset; 1818 } 1819 1820 if (status & INTR_MIF_STATUS) { 1821 if (cas_mif_interrupt(dev, cp, status)) 1822 goto do_reset; 1823 } 1824 1825 if (status & INTR_PCI_ERROR_STATUS) { 1826 if (cas_pci_interrupt(dev, cp, status)) 1827 goto do_reset; 1828 } 1829 return 0; 1830 1831do_reset: 1832#if 1 1833 atomic_inc(&cp->reset_task_pending); 1834 atomic_inc(&cp->reset_task_pending_all); 1835 netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status); 1836 schedule_work(&cp->reset_task); 1837#else 1838 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); 1839 netdev_err(dev, "reset called in cas_abnormal_irq\n"); 1840 schedule_work(&cp->reset_task); 1841#endif 1842 return 1; 1843} 1844 1845/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when 1846 * determining whether to do a netif_stop/wakeup 1847 */ 1848#define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1) 1849#define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK) 1850static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr, 1851 const int len) 1852{ 1853 unsigned long off = addr + len; 1854 1855 if (CAS_TABORT(cp) == 1) 1856 return 0; 1857 if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN) 1858 return 0; 1859 return TX_TARGET_ABORT_LEN; 1860} 1861 1862static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) 1863{ 1864 struct cas_tx_desc *txds; 1865 struct sk_buff **skbs; 1866 struct net_device *dev = cp->dev; 1867 int entry, count; 1868 1869 spin_lock(&cp->tx_lock[ring]); 1870 txds = cp->init_txds[ring]; 1871 skbs = cp->tx_skbs[ring]; 1872 entry = cp->tx_old[ring]; 1873 1874 count = TX_BUFF_COUNT(ring, entry, limit); 1875 while (entry != limit) { 1876 struct sk_buff *skb = skbs[entry]; 1877 dma_addr_t daddr; 1878 u32 dlen; 1879 int frag; 1880 1881 if (!skb) { 1882 /* this should never occur */ 1883 entry = TX_DESC_NEXT(ring, entry); 1884 continue; 1885 } 1886 1887 /* however, we might get only a partial skb release. */ 1888 count -= skb_shinfo(skb)->nr_frags + 1889 + cp->tx_tiny_use[ring][entry].nbufs + 1; 1890 if (count < 0) 1891 break; 1892 1893 netif_printk(cp, tx_done, KERN_DEBUG, cp->dev, 1894 "tx[%d] done, slot %d\n", ring, entry); 1895 1896 skbs[entry] = NULL; 1897 cp->tx_tiny_use[ring][entry].nbufs = 0; 1898 1899 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 1900 struct cas_tx_desc *txd = txds + entry; 1901 1902 daddr = le64_to_cpu(txd->buffer); 1903 dlen = CAS_VAL(TX_DESC_BUFLEN, 1904 le64_to_cpu(txd->control)); 1905 pci_unmap_page(cp->pdev, daddr, dlen, 1906 PCI_DMA_TODEVICE); 1907 entry = TX_DESC_NEXT(ring, entry); 1908 1909 /* tiny buffer may follow */ 1910 if (cp->tx_tiny_use[ring][entry].used) { 1911 cp->tx_tiny_use[ring][entry].used = 0; 1912 entry = TX_DESC_NEXT(ring, entry); 1913 } 1914 } 1915 1916 spin_lock(&cp->stat_lock[ring]); 1917 cp->net_stats[ring].tx_packets++; 1918 cp->net_stats[ring].tx_bytes += skb->len; 1919 spin_unlock(&cp->stat_lock[ring]); 1920 dev_kfree_skb_irq(skb); 1921 } 1922 cp->tx_old[ring] = entry; 1923 1924 /* this is wrong for multiple tx rings. the net device needs 1925 * multiple queues for this to do the right thing. we wait 1926 * for 2*packets to be available when using tiny buffers 1927 */ 1928 if (netif_queue_stopped(dev) && 1929 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))) 1930 netif_wake_queue(dev); 1931 spin_unlock(&cp->tx_lock[ring]); 1932} 1933 1934static void cas_tx(struct net_device *dev, struct cas *cp, 1935 u32 status) 1936{ 1937 int limit, ring; 1938#ifdef USE_TX_COMPWB 1939 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb); 1940#endif 1941 netif_printk(cp, intr, KERN_DEBUG, cp->dev, 1942 "tx interrupt, status: 0x%x, %llx\n", 1943 status, (unsigned long long)compwb); 1944 /* process all the rings */ 1945 for (ring = 0; ring < N_TX_RINGS; ring++) { 1946#ifdef USE_TX_COMPWB 1947 /* use the completion writeback registers */ 1948 limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) | 1949 CAS_VAL(TX_COMPWB_LSB, compwb); 1950 compwb = TX_COMPWB_NEXT(compwb); 1951#else 1952 limit = readl(cp->regs + REG_TX_COMPN(ring)); 1953#endif 1954 if (cp->tx_old[ring] != limit) 1955 cas_tx_ringN(cp, ring, limit); 1956 } 1957} 1958 1959 1960static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, 1961 int entry, const u64 *words, 1962 struct sk_buff **skbref) 1963{ 1964 int dlen, hlen, len, i, alloclen; 1965 int off, swivel = RX_SWIVEL_OFF_VAL; 1966 struct cas_page *page; 1967 struct sk_buff *skb; 1968 void *addr, *crcaddr; 1969 __sum16 csum; 1970 char *p; 1971 1972 hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]); 1973 dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]); 1974 len = hlen + dlen; 1975 1976 if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT)) 1977 alloclen = len; 1978 else 1979 alloclen = max(hlen, RX_COPY_MIN); 1980 1981 skb = dev_alloc_skb(alloclen + swivel + cp->crc_size); 1982 if (skb == NULL) 1983 return -1; 1984 1985 *skbref = skb; 1986 skb_reserve(skb, swivel); 1987 1988 p = skb->data; 1989 addr = crcaddr = NULL; 1990 if (hlen) { /* always copy header pages */ 1991 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); 1992 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 1993 off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 + 1994 swivel; 1995 1996 i = hlen; 1997 if (!dlen) /* attach FCS */ 1998 i += cp->crc_size; 1999 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, 2000 PCI_DMA_FROMDEVICE); 2001 addr = cas_page_map(page->buffer); 2002 memcpy(p, addr + off, i); 2003 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, 2004 PCI_DMA_FROMDEVICE); 2005 cas_page_unmap(addr); 2006 RX_USED_ADD(page, 0x100); 2007 p += hlen; 2008 swivel = 0; 2009 } 2010 2011 2012 if (alloclen < (hlen + dlen)) { 2013 skb_frag_t *frag = skb_shinfo(skb)->frags; 2014 2015 /* normal or jumbo packets. we use frags */ 2016 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); 2017 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 2018 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; 2019 2020 hlen = min(cp->page_size - off, dlen); 2021 if (hlen < 0) { 2022 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, 2023 "rx page overflow: %d\n", hlen); 2024 dev_kfree_skb_irq(skb); 2025 return -1; 2026 } 2027 i = hlen; 2028 if (i == dlen) /* attach FCS */ 2029 i += cp->crc_size; 2030 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, 2031 PCI_DMA_FROMDEVICE); 2032 2033 /* make sure we always copy a header */ 2034 swivel = 0; 2035 if (p == (char *) skb->data) { /* not split */ 2036 addr = cas_page_map(page->buffer); 2037 memcpy(p, addr + off, RX_COPY_MIN); 2038 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, 2039 PCI_DMA_FROMDEVICE); 2040 cas_page_unmap(addr); 2041 off += RX_COPY_MIN; 2042 swivel = RX_COPY_MIN; 2043 RX_USED_ADD(page, cp->mtu_stride); 2044 } else { 2045 RX_USED_ADD(page, hlen); 2046 } 2047 skb_put(skb, alloclen); 2048 2049 skb_shinfo(skb)->nr_frags++; 2050 skb->data_len += hlen - swivel; 2051 skb->truesize += hlen - swivel; 2052 skb->len += hlen - swivel; 2053 2054 get_page(page->buffer); 2055 frag->page = page->buffer; 2056 frag->page_offset = off; 2057 frag->size = hlen - swivel; 2058 2059 /* any more data? */ 2060 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { 2061 hlen = dlen; 2062 off = 0; 2063 2064 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); 2065 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 2066 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, 2067 hlen + cp->crc_size, 2068 PCI_DMA_FROMDEVICE); 2069 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, 2070 hlen + cp->crc_size, 2071 PCI_DMA_FROMDEVICE); 2072 2073 skb_shinfo(skb)->nr_frags++; 2074 skb->data_len += hlen; 2075 skb->len += hlen; 2076 frag++; 2077 2078 get_page(page->buffer); 2079 frag->page = page->buffer; 2080 frag->page_offset = 0; 2081 frag->size = hlen; 2082 RX_USED_ADD(page, hlen + cp->crc_size); 2083 } 2084 2085 if (cp->crc_size) { 2086 addr = cas_page_map(page->buffer); 2087 crcaddr = addr + off + hlen; 2088 } 2089 2090 } else { 2091 /* copying packet */ 2092 if (!dlen) 2093 goto end_copy_pkt; 2094 2095 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); 2096 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 2097 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; 2098 hlen = min(cp->page_size - off, dlen); 2099 if (hlen < 0) { 2100 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, 2101 "rx page overflow: %d\n", hlen); 2102 dev_kfree_skb_irq(skb); 2103 return -1; 2104 } 2105 i = hlen; 2106 if (i == dlen) /* attach FCS */ 2107 i += cp->crc_size; 2108 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, 2109 PCI_DMA_FROMDEVICE); 2110 addr = cas_page_map(page->buffer); 2111 memcpy(p, addr + off, i); 2112 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, 2113 PCI_DMA_FROMDEVICE); 2114 cas_page_unmap(addr); 2115 if (p == (char *) skb->data) /* not split */ 2116 RX_USED_ADD(page, cp->mtu_stride); 2117 else 2118 RX_USED_ADD(page, i); 2119 2120 /* any more data? */ 2121 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { 2122 p += hlen; 2123 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); 2124 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; 2125 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, 2126 dlen + cp->crc_size, 2127 PCI_DMA_FROMDEVICE); 2128 addr = cas_page_map(page->buffer); 2129 memcpy(p, addr, dlen + cp->crc_size); 2130 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, 2131 dlen + cp->crc_size, 2132 PCI_DMA_FROMDEVICE); 2133 cas_page_unmap(addr); 2134 RX_USED_ADD(page, dlen + cp->crc_size); 2135 } 2136end_copy_pkt: 2137 if (cp->crc_size) { 2138 addr = NULL; 2139 crcaddr = skb->data + alloclen; 2140 } 2141 skb_put(skb, alloclen); 2142 } 2143 2144 csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3])); 2145 if (cp->crc_size) { 2146 /* checksum includes FCS. strip it out. */ 2147 csum = csum_fold(csum_partial(crcaddr, cp->crc_size, 2148 csum_unfold(csum))); 2149 if (addr) 2150 cas_page_unmap(addr); 2151 } 2152 skb->protocol = eth_type_trans(skb, cp->dev); 2153 if (skb->protocol == htons(ETH_P_IP)) { 2154 skb->csum = csum_unfold(~csum); 2155 skb->ip_summed = CHECKSUM_COMPLETE; 2156 } else 2157 skb->ip_summed = CHECKSUM_NONE; 2158 return len; 2159} 2160 2161 2162/* we can handle up to 64 rx flows at a time. we do the same thing 2163 * as nonreassm except that we batch up the buffers. 2164 * NOTE: we currently just treat each flow as a bunch of packets that 2165 * we pass up. a better way would be to coalesce the packets 2166 * into a jumbo packet. to do that, we need to do the following: 2167 * 1) the first packet will have a clean split between header and 2168 * data. save both. 2169 * 2) each time the next flow packet comes in, extend the 2170 * data length and merge the checksums. 2171 * 3) on flow release, fix up the header. 2172 * 4) make sure the higher layer doesn't care. 2173 * because packets get coalesced, we shouldn't run into fragment count 2174 * issues. 2175 */ 2176static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words, 2177 struct sk_buff *skb) 2178{ 2179 int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1); 2180 struct sk_buff_head *flow = &cp->rx_flows[flowid]; 2181 2182 /* this is protected at a higher layer, so no need to 2183 * do any additional locking here. stick the buffer 2184 * at the end. 2185 */ 2186 __skb_queue_tail(flow, skb); 2187 if (words[0] & RX_COMP1_RELEASE_FLOW) { 2188 while ((skb = __skb_dequeue(flow))) { 2189 cas_skb_release(skb); 2190 } 2191 } 2192} 2193 2194/* put rx descriptor back on ring. if a buffer is in use by a higher 2195 * layer, this will need to put in a replacement. 2196 */ 2197static void cas_post_page(struct cas *cp, const int ring, const int index) 2198{ 2199 cas_page_t *new; 2200 int entry; 2201 2202 entry = cp->rx_old[ring]; 2203 2204 new = cas_page_swap(cp, ring, index); 2205 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr); 2206 cp->init_rxds[ring][entry].index = 2207 cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) | 2208 CAS_BASE(RX_INDEX_RING, ring)); 2209 2210 entry = RX_DESC_ENTRY(ring, entry + 1); 2211 cp->rx_old[ring] = entry; 2212 2213 if (entry % 4) 2214 return; 2215 2216 if (ring == 0) 2217 writel(entry, cp->regs + REG_RX_KICK); 2218 else if ((N_RX_DESC_RINGS > 1) && 2219 (cp->cas_flags & CAS_FLAG_REG_PLUS)) 2220 writel(entry, cp->regs + REG_PLUS_RX_KICK1); 2221} 2222 2223 2224/* only when things are bad */ 2225static int cas_post_rxds_ringN(struct cas *cp, int ring, int num) 2226{ 2227 unsigned int entry, last, count, released; 2228 int cluster; 2229 cas_page_t **page = cp->rx_pages[ring]; 2230 2231 entry = cp->rx_old[ring]; 2232 2233 netif_printk(cp, intr, KERN_DEBUG, cp->dev, 2234 "rxd[%d] interrupt, done: %d\n", ring, entry); 2235 2236 cluster = -1; 2237 count = entry & 0x3; 2238 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4); 2239 released = 0; 2240 while (entry != last) { 2241 /* make a new buffer if it's still in use */ 2242 if (page_count(page[entry]->buffer) > 1) { 2243 cas_page_t *new = cas_page_dequeue(cp); 2244 if (!new) { 2245 /* let the timer know that we need to 2246 * do this again 2247 */ 2248 cp->cas_flags |= CAS_FLAG_RXD_POST(ring); 2249 if (!timer_pending(&cp->link_timer)) 2250 mod_timer(&cp->link_timer, jiffies + 2251 CAS_LINK_FAST_TIMEOUT); 2252 cp->rx_old[ring] = entry; 2253 cp->rx_last[ring] = num ? num - released : 0; 2254 return -ENOMEM; 2255 } 2256 spin_lock(&cp->rx_inuse_lock); 2257 list_add(&page[entry]->list, &cp->rx_inuse_list); 2258 spin_unlock(&cp->rx_inuse_lock); 2259 cp->init_rxds[ring][entry].buffer = 2260 cpu_to_le64(new->dma_addr); 2261 page[entry] = new; 2262 2263 } 2264 2265 if (++count == 4) { 2266 cluster = entry; 2267 count = 0; 2268 } 2269 released++; 2270 entry = RX_DESC_ENTRY(ring, entry + 1); 2271 } 2272 cp->rx_old[ring] = entry; 2273 2274 if (cluster < 0) 2275 return 0; 2276 2277 if (ring == 0) 2278 writel(cluster, cp->regs + REG_RX_KICK); 2279 else if ((N_RX_DESC_RINGS > 1) && 2280 (cp->cas_flags & CAS_FLAG_REG_PLUS)) 2281 writel(cluster, cp->regs + REG_PLUS_RX_KICK1); 2282 return 0; 2283} 2284 2285 2286/* process a completion ring. packets are set up in three basic ways: 2287 * small packets: should be copied header + data in single buffer. 2288 * large packets: header and data in a single buffer. 2289 * split packets: header in a separate buffer from data. 2290 * data may be in multiple pages. data may be > 256 2291 * bytes but in a single page. 2292 * 2293 * NOTE: RX page posting is done in this routine as well. while there's 2294 * the capability of using multiple RX completion rings, it isn't 2295 * really worthwhile due to the fact that the page posting will 2296 * force serialization on the single descriptor ring. 2297 */ 2298static int cas_rx_ringN(struct cas *cp, int ring, int budget) 2299{ 2300 struct cas_rx_comp *rxcs = cp->init_rxcs[ring]; 2301 int entry, drops; 2302 int npackets = 0; 2303 2304 netif_printk(cp, intr, KERN_DEBUG, cp->dev, 2305 "rx[%d] interrupt, done: %d/%d\n", 2306 ring, 2307 readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]); 2308 2309 entry = cp->rx_new[ring]; 2310 drops = 0; 2311 while (1) { 2312 struct cas_rx_comp *rxc = rxcs + entry; 2313 struct sk_buff *uninitialized_var(skb); 2314 int type, len; 2315 u64 words[4]; 2316 int i, dring; 2317 2318 words[0] = le64_to_cpu(rxc->word1); 2319 words[1] = le64_to_cpu(rxc->word2); 2320 words[2] = le64_to_cpu(rxc->word3); 2321 words[3] = le64_to_cpu(rxc->word4); 2322 2323 /* don't touch if still owned by hw */ 2324 type = CAS_VAL(RX_COMP1_TYPE, words[0]); 2325 if (type == 0) 2326 break; 2327 2328 /* hw hasn't cleared the zero bit yet */ 2329 if (words[3] & RX_COMP4_ZERO) { 2330 break; 2331 } 2332 2333 /* get info on the packet */ 2334 if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) { 2335 spin_lock(&cp->stat_lock[ring]); 2336 cp->net_stats[ring].rx_errors++; 2337 if (words[3] & RX_COMP4_LEN_MISMATCH) 2338 cp->net_stats[ring].rx_length_errors++; 2339 if (words[3] & RX_COMP4_BAD) 2340 cp->net_stats[ring].rx_crc_errors++; 2341 spin_unlock(&cp->stat_lock[ring]); 2342 2343 /* We'll just return it to Cassini. */ 2344 drop_it: 2345 spin_lock(&cp->stat_lock[ring]); 2346 ++cp->net_stats[ring].rx_dropped; 2347 spin_unlock(&cp->stat_lock[ring]); 2348 goto next; 2349 } 2350 2351 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb); 2352 if (len < 0) { 2353 ++drops; 2354 goto drop_it; 2355 } 2356 2357 /* see if it's a flow re-assembly or not. the driver 2358 * itself handles release back up. 2359 */ 2360 if (RX_DONT_BATCH || (type == 0x2)) { 2361 /* non-reassm: these always get released */ 2362 cas_skb_release(skb); 2363 } else { 2364 cas_rx_flow_pkt(cp, words, skb); 2365 } 2366 2367 spin_lock(&cp->stat_lock[ring]); 2368 cp->net_stats[ring].rx_packets++; 2369 cp->net_stats[ring].rx_bytes += len; 2370 spin_unlock(&cp->stat_lock[ring]); 2371 2372 next: 2373 npackets++; 2374 2375 /* should it be released? */ 2376 if (words[0] & RX_COMP1_RELEASE_HDR) { 2377 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); 2378 dring = CAS_VAL(RX_INDEX_RING, i); 2379 i = CAS_VAL(RX_INDEX_NUM, i); 2380 cas_post_page(cp, dring, i); 2381 } 2382 2383 if (words[0] & RX_COMP1_RELEASE_DATA) { 2384 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); 2385 dring = CAS_VAL(RX_INDEX_RING, i); 2386 i = CAS_VAL(RX_INDEX_NUM, i); 2387 cas_post_page(cp, dring, i); 2388 } 2389 2390 if (words[0] & RX_COMP1_RELEASE_NEXT) { 2391 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); 2392 dring = CAS_VAL(RX_INDEX_RING, i); 2393 i = CAS_VAL(RX_INDEX_NUM, i); 2394 cas_post_page(cp, dring, i); 2395 } 2396 2397 /* skip to the next entry */ 2398 entry = RX_COMP_ENTRY(ring, entry + 1 + 2399 CAS_VAL(RX_COMP1_SKIP, words[0])); 2400#ifdef USE_NAPI 2401 if (budget && (npackets >= budget)) 2402 break; 2403#endif 2404 } 2405 cp->rx_new[ring] = entry; 2406 2407 if (drops) 2408 netdev_info(cp->dev, "Memory squeeze, deferring packet\n"); 2409 return npackets; 2410} 2411 2412 2413/* put completion entries back on the ring */ 2414static void cas_post_rxcs_ringN(struct net_device *dev, 2415 struct cas *cp, int ring) 2416{ 2417 struct cas_rx_comp *rxc = cp->init_rxcs[ring]; 2418 int last, entry; 2419 2420 last = cp->rx_cur[ring]; 2421 entry = cp->rx_new[ring]; 2422 netif_printk(cp, intr, KERN_DEBUG, dev, 2423 "rxc[%d] interrupt, done: %d/%d\n", 2424 ring, readl(cp->regs + REG_RX_COMP_HEAD), entry); 2425 2426 /* zero and re-mark descriptors */ 2427 while (last != entry) { 2428 cas_rxc_init(rxc + last); 2429 last = RX_COMP_ENTRY(ring, last + 1); 2430 } 2431 cp->rx_cur[ring] = last; 2432 2433 if (ring == 0) 2434 writel(last, cp->regs + REG_RX_COMP_TAIL); 2435 else if (cp->cas_flags & CAS_FLAG_REG_PLUS) 2436 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring)); 2437} 2438 2439 2440 2441/* cassini can use all four PCI interrupts for the completion ring. 2442 * rings 3 and 4 are identical 2443 */ 2444#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD) 2445static inline void cas_handle_irqN(struct net_device *dev, 2446 struct cas *cp, const u32 status, 2447 const int ring) 2448{ 2449 if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT)) 2450 cas_post_rxcs_ringN(dev, cp, ring); 2451} 2452 2453static irqreturn_t cas_interruptN(int irq, void *dev_id) 2454{ 2455 struct net_device *dev = dev_id; 2456 struct cas *cp = netdev_priv(dev); 2457 unsigned long flags; 2458 int ring; 2459 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); 2460 2461 /* check for shared irq */ 2462 if (status == 0) 2463 return IRQ_NONE; 2464 2465 ring = (irq == cp->pci_irq_INTC) ? 2 : 3; 2466 spin_lock_irqsave(&cp->lock, flags); 2467 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ 2468#ifdef USE_NAPI 2469 cas_mask_intr(cp); 2470 napi_schedule(&cp->napi); 2471#else 2472 cas_rx_ringN(cp, ring, 0); 2473#endif 2474 status &= ~INTR_RX_DONE_ALT; 2475 } 2476 2477 if (status) 2478 cas_handle_irqN(dev, cp, status, ring); 2479 spin_unlock_irqrestore(&cp->lock, flags); 2480 return IRQ_HANDLED; 2481} 2482#endif 2483 2484#ifdef USE_PCI_INTB 2485/* everything but rx packets */ 2486static inline void cas_handle_irq1(struct cas *cp, const u32 status) 2487{ 2488 if (status & INTR_RX_BUF_UNAVAIL_1) { 2489 /* Frame arrived, no free RX buffers available. 2490 * NOTE: we can get this on a link transition. */ 2491 cas_post_rxds_ringN(cp, 1, 0); 2492 spin_lock(&cp->stat_lock[1]); 2493 cp->net_stats[1].rx_dropped++; 2494 spin_unlock(&cp->stat_lock[1]); 2495 } 2496 2497 if (status & INTR_RX_BUF_AE_1) 2498 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) - 2499 RX_AE_FREEN_VAL(1)); 2500 2501 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) 2502 cas_post_rxcs_ringN(cp, 1); 2503} 2504 2505/* ring 2 handles a few more events than 3 and 4 */ 2506static irqreturn_t cas_interrupt1(int irq, void *dev_id) 2507{ 2508 struct net_device *dev = dev_id; 2509 struct cas *cp = netdev_priv(dev); 2510 unsigned long flags; 2511 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); 2512 2513 /* check for shared interrupt */ 2514 if (status == 0) 2515 return IRQ_NONE; 2516 2517 spin_lock_irqsave(&cp->lock, flags); 2518 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ 2519#ifdef USE_NAPI 2520 cas_mask_intr(cp); 2521 napi_schedule(&cp->napi); 2522#else 2523 cas_rx_ringN(cp, 1, 0); 2524#endif 2525 status &= ~INTR_RX_DONE_ALT; 2526 } 2527 if (status) 2528 cas_handle_irq1(cp, status); 2529 spin_unlock_irqrestore(&cp->lock, flags); 2530 return IRQ_HANDLED; 2531} 2532#endif 2533 2534static inline void cas_handle_irq(struct net_device *dev, 2535 struct cas *cp, const u32 status) 2536{ 2537 /* housekeeping interrupts */ 2538 if (status & INTR_ERROR_MASK) 2539 cas_abnormal_irq(dev, cp, status); 2540 2541 if (status & INTR_RX_BUF_UNAVAIL) { 2542 /* Frame arrived, no free RX buffers available. 2543 * NOTE: we can get this on a link transition. 2544 */ 2545 cas_post_rxds_ringN(cp, 0, 0); 2546 spin_lock(&cp->stat_lock[0]); 2547 cp->net_stats[0].rx_dropped++; 2548 spin_unlock(&cp->stat_lock[0]); 2549 } else if (status & INTR_RX_BUF_AE) { 2550 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) - 2551 RX_AE_FREEN_VAL(0)); 2552 } 2553 2554 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) 2555 cas_post_rxcs_ringN(dev, cp, 0); 2556} 2557 2558static irqreturn_t cas_interrupt(int irq, void *dev_id) 2559{ 2560 struct net_device *dev = dev_id; 2561 struct cas *cp = netdev_priv(dev); 2562 unsigned long flags; 2563 u32 status = readl(cp->regs + REG_INTR_STATUS); 2564 2565 if (status == 0) 2566 return IRQ_NONE; 2567 2568 spin_lock_irqsave(&cp->lock, flags); 2569 if (status & (INTR_TX_ALL | INTR_TX_INTME)) { 2570 cas_tx(dev, cp, status); 2571 status &= ~(INTR_TX_ALL | INTR_TX_INTME); 2572 } 2573 2574 if (status & INTR_RX_DONE) { 2575#ifdef USE_NAPI 2576 cas_mask_intr(cp); 2577 napi_schedule(&cp->napi); 2578#else 2579 cas_rx_ringN(cp, 0, 0); 2580#endif 2581 status &= ~INTR_RX_DONE; 2582 } 2583 2584 if (status) 2585 cas_handle_irq(dev, cp, status); 2586 spin_unlock_irqrestore(&cp->lock, flags); 2587 return IRQ_HANDLED; 2588} 2589 2590 2591#ifdef USE_NAPI 2592static int cas_poll(struct napi_struct *napi, int budget) 2593{ 2594 struct cas *cp = container_of(napi, struct cas, napi); 2595 struct net_device *dev = cp->dev; 2596 int i, enable_intr, credits; 2597 u32 status = readl(cp->regs + REG_INTR_STATUS); 2598 unsigned long flags; 2599 2600 spin_lock_irqsave(&cp->lock, flags); 2601 cas_tx(dev, cp, status); 2602 spin_unlock_irqrestore(&cp->lock, flags); 2603 2604 /* NAPI rx packets. we spread the credits across all of the 2605 * rxc rings 2606 * 2607 * to make sure we're fair with the work we loop through each 2608 * ring N_RX_COMP_RING times with a request of 2609 * budget / N_RX_COMP_RINGS 2610 */ 2611 enable_intr = 1; 2612 credits = 0; 2613 for (i = 0; i < N_RX_COMP_RINGS; i++) { 2614 int j; 2615 for (j = 0; j < N_RX_COMP_RINGS; j++) { 2616 credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS); 2617 if (credits >= budget) { 2618 enable_intr = 0; 2619 goto rx_comp; 2620 } 2621 } 2622 } 2623 2624rx_comp: 2625 /* final rx completion */ 2626 spin_lock_irqsave(&cp->lock, flags); 2627 if (status) 2628 cas_handle_irq(dev, cp, status); 2629 2630#ifdef USE_PCI_INTB 2631 if (N_RX_COMP_RINGS > 1) { 2632 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); 2633 if (status) 2634 cas_handle_irq1(dev, cp, status); 2635 } 2636#endif 2637 2638#ifdef USE_PCI_INTC 2639 if (N_RX_COMP_RINGS > 2) { 2640 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2)); 2641 if (status) 2642 cas_handle_irqN(dev, cp, status, 2); 2643 } 2644#endif 2645 2646#ifdef USE_PCI_INTD 2647 if (N_RX_COMP_RINGS > 3) { 2648 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3)); 2649 if (status) 2650 cas_handle_irqN(dev, cp, status, 3); 2651 } 2652#endif 2653 spin_unlock_irqrestore(&cp->lock, flags); 2654 if (enable_intr) { 2655 napi_complete(napi); 2656 cas_unmask_intr(cp); 2657 } 2658 return credits; 2659} 2660#endif 2661 2662#ifdef CONFIG_NET_POLL_CONTROLLER 2663static void cas_netpoll(struct net_device *dev) 2664{ 2665 struct cas *cp = netdev_priv(dev); 2666 2667 cas_disable_irq(cp, 0); 2668 cas_interrupt(cp->pdev->irq, dev); 2669 cas_enable_irq(cp, 0); 2670 2671#ifdef USE_PCI_INTB 2672 if (N_RX_COMP_RINGS > 1) { 2673 /* cas_interrupt1(); */ 2674 } 2675#endif 2676#ifdef USE_PCI_INTC 2677 if (N_RX_COMP_RINGS > 2) { 2678 /* cas_interruptN(); */ 2679 } 2680#endif 2681#ifdef USE_PCI_INTD 2682 if (N_RX_COMP_RINGS > 3) { 2683 /* cas_interruptN(); */ 2684 } 2685#endif 2686} 2687#endif 2688 2689static void cas_tx_timeout(struct net_device *dev) 2690{ 2691 struct cas *cp = netdev_priv(dev); 2692 2693 netdev_err(dev, "transmit timed out, resetting\n"); 2694 if (!cp->hw_running) { 2695 netdev_err(dev, "hrm.. hw not running!\n"); 2696 return; 2697 } 2698 2699 netdev_err(dev, "MIF_STATE[%08x]\n", 2700 readl(cp->regs + REG_MIF_STATE_MACHINE)); 2701 2702 netdev_err(dev, "MAC_STATE[%08x]\n", 2703 readl(cp->regs + REG_MAC_STATE_MACHINE)); 2704 2705 netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n", 2706 readl(cp->regs + REG_TX_CFG), 2707 readl(cp->regs + REG_MAC_TX_STATUS), 2708 readl(cp->regs + REG_MAC_TX_CFG), 2709 readl(cp->regs + REG_TX_FIFO_PKT_CNT), 2710 readl(cp->regs + REG_TX_FIFO_WRITE_PTR), 2711 readl(cp->regs + REG_TX_FIFO_READ_PTR), 2712 readl(cp->regs + REG_TX_SM_1), 2713 readl(cp->regs + REG_TX_SM_2)); 2714 2715 netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n", 2716 readl(cp->regs + REG_RX_CFG), 2717 readl(cp->regs + REG_MAC_RX_STATUS), 2718 readl(cp->regs + REG_MAC_RX_CFG)); 2719 2720 netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n", 2721 readl(cp->regs + REG_HP_STATE_MACHINE), 2722 readl(cp->regs + REG_HP_STATUS0), 2723 readl(cp->regs + REG_HP_STATUS1), 2724 readl(cp->regs + REG_HP_STATUS2)); 2725 2726#if 1 2727 atomic_inc(&cp->reset_task_pending); 2728 atomic_inc(&cp->reset_task_pending_all); 2729 schedule_work(&cp->reset_task); 2730#else 2731 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); 2732 schedule_work(&cp->reset_task); 2733#endif 2734} 2735 2736static inline int cas_intme(int ring, int entry) 2737{ 2738 /* Algorithm: IRQ every 1/2 of descriptors. */ 2739 if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1))) 2740 return 1; 2741 return 0; 2742} 2743 2744 2745static void cas_write_txd(struct cas *cp, int ring, int entry, 2746 dma_addr_t mapping, int len, u64 ctrl, int last) 2747{ 2748 struct cas_tx_desc *txd = cp->init_txds[ring] + entry; 2749 2750 ctrl |= CAS_BASE(TX_DESC_BUFLEN, len); 2751 if (cas_intme(ring, entry)) 2752 ctrl |= TX_DESC_INTME; 2753 if (last) 2754 ctrl |= TX_DESC_EOF; 2755 txd->control = cpu_to_le64(ctrl); 2756 txd->buffer = cpu_to_le64(mapping); 2757} 2758 2759static inline void *tx_tiny_buf(struct cas *cp, const int ring, 2760 const int entry) 2761{ 2762 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry; 2763} 2764 2765static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring, 2766 const int entry, const int tentry) 2767{ 2768 cp->tx_tiny_use[ring][tentry].nbufs++; 2769 cp->tx_tiny_use[ring][entry].used = 1; 2770 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry; 2771} 2772 2773static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, 2774 struct sk_buff *skb) 2775{ 2776 struct net_device *dev = cp->dev; 2777 int entry, nr_frags, frag, tabort, tentry; 2778 dma_addr_t mapping; 2779 unsigned long flags; 2780 u64 ctrl; 2781 u32 len; 2782 2783 spin_lock_irqsave(&cp->tx_lock[ring], flags); 2784 2785 /* This is a hard error, log it. */ 2786 if (TX_BUFFS_AVAIL(cp, ring) <= 2787 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) { 2788 netif_stop_queue(dev); 2789 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); 2790 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); 2791 return 1; 2792 } 2793 2794 ctrl = 0; 2795 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2796 const u64 csum_start_off = skb_transport_offset(skb); 2797 const u64 csum_stuff_off = csum_start_off + skb->csum_offset; 2798 2799 ctrl = TX_DESC_CSUM_EN | 2800 CAS_BASE(TX_DESC_CSUM_START, csum_start_off) | 2801 CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off); 2802 } 2803 2804 entry = cp->tx_new[ring]; 2805 cp->tx_skbs[ring][entry] = skb; 2806 2807 nr_frags = skb_shinfo(skb)->nr_frags; 2808 len = skb_headlen(skb); 2809 mapping = pci_map_page(cp->pdev, virt_to_page(skb->data), 2810 offset_in_page(skb->data), len, 2811 PCI_DMA_TODEVICE); 2812 2813 tentry = entry; 2814 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len); 2815 if (unlikely(tabort)) { 2816 /* NOTE: len is always > tabort */ 2817 cas_write_txd(cp, ring, entry, mapping, len - tabort, 2818 ctrl | TX_DESC_SOF, 0); 2819 entry = TX_DESC_NEXT(ring, entry); 2820 2821 skb_copy_from_linear_data_offset(skb, len - tabort, 2822 tx_tiny_buf(cp, ring, entry), tabort); 2823 mapping = tx_tiny_map(cp, ring, entry, tentry); 2824 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, 2825 (nr_frags == 0)); 2826 } else { 2827 cas_write_txd(cp, ring, entry, mapping, len, ctrl | 2828 TX_DESC_SOF, (nr_frags == 0)); 2829 } 2830 entry = TX_DESC_NEXT(ring, entry); 2831 2832 for (frag = 0; frag < nr_frags; frag++) { 2833 skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; 2834 2835 len = fragp->size; 2836 mapping = pci_map_page(cp->pdev, fragp->page, 2837 fragp->page_offset, len, 2838 PCI_DMA_TODEVICE); 2839 2840 tabort = cas_calc_tabort(cp, fragp->page_offset, len); 2841 if (unlikely(tabort)) { 2842 void *addr; 2843 2844 /* NOTE: len is always > tabort */ 2845 cas_write_txd(cp, ring, entry, mapping, len - tabort, 2846 ctrl, 0); 2847 entry = TX_DESC_NEXT(ring, entry); 2848 2849 addr = cas_page_map(fragp->page); 2850 memcpy(tx_tiny_buf(cp, ring, entry), 2851 addr + fragp->page_offset + len - tabort, 2852 tabort); 2853 cas_page_unmap(addr); 2854 mapping = tx_tiny_map(cp, ring, entry, tentry); 2855 len = tabort; 2856 } 2857 2858 cas_write_txd(cp, ring, entry, mapping, len, ctrl, 2859 (frag + 1 == nr_frags)); 2860 entry = TX_DESC_NEXT(ring, entry); 2861 } 2862 2863 cp->tx_new[ring] = entry; 2864 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)) 2865 netif_stop_queue(dev); 2866 2867 netif_printk(cp, tx_queued, KERN_DEBUG, dev, 2868 "tx[%d] queued, slot %d, skblen %d, avail %d\n", 2869 ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring)); 2870 writel(entry, cp->regs + REG_TX_KICKN(ring)); 2871 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); 2872 return 0; 2873} 2874 2875static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev) 2876{ 2877 struct cas *cp = netdev_priv(dev); 2878 2879 /* this is only used as a load-balancing hint, so it doesn't 2880 * need to be SMP safe 2881 */ 2882 static int ring; 2883 2884 if (skb_padto(skb, cp->min_frame_size)) 2885 return NETDEV_TX_OK; 2886 2887 /* XXX: we need some higher-level QoS hooks to steer packets to 2888 * individual queues. 2889 */ 2890 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb)) 2891 return NETDEV_TX_BUSY; 2892 return NETDEV_TX_OK; 2893} 2894 2895static void cas_init_tx_dma(struct cas *cp) 2896{ 2897 u64 desc_dma = cp->block_dvma; 2898 unsigned long off; 2899 u32 val; 2900 int i; 2901 2902 /* set up tx completion writeback registers. must be 8-byte aligned */ 2903#ifdef USE_TX_COMPWB 2904 off = offsetof(struct cas_init_block, tx_compwb); 2905 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI); 2906 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW); 2907#endif 2908 2909 /* enable completion writebacks, enable paced mode, 2910 * disable read pipe, and disable pre-interrupt compwbs 2911 */ 2912 val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 | 2913 TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 | 2914 TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE | 2915 TX_CFG_INTR_COMPWB_DIS; 2916 2917 /* write out tx ring info and tx desc bases */ 2918 for (i = 0; i < MAX_TX_RINGS; i++) { 2919 off = (unsigned long) cp->init_txds[i] - 2920 (unsigned long) cp->init_block; 2921 2922 val |= CAS_TX_RINGN_BASE(i); 2923 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i)); 2924 writel((desc_dma + off) & 0xffffffff, cp->regs + 2925 REG_TX_DBN_LOW(i)); 2926 /* don't zero out the kick register here as the system 2927 * will wedge 2928 */ 2929 } 2930 writel(val, cp->regs + REG_TX_CFG); 2931 2932 /* program max burst sizes. these numbers should be different 2933 * if doing QoS. 2934 */ 2935#ifdef USE_QOS 2936 writel(0x800, cp->regs + REG_TX_MAXBURST_0); 2937 writel(0x1600, cp->regs + REG_TX_MAXBURST_1); 2938 writel(0x2400, cp->regs + REG_TX_MAXBURST_2); 2939 writel(0x4800, cp->regs + REG_TX_MAXBURST_3); 2940#else 2941 writel(0x800, cp->regs + REG_TX_MAXBURST_0); 2942 writel(0x800, cp->regs + REG_TX_MAXBURST_1); 2943 writel(0x800, cp->regs + REG_TX_MAXBURST_2); 2944 writel(0x800, cp->regs + REG_TX_MAXBURST_3); 2945#endif 2946} 2947 2948/* Must be invoked under cp->lock. */ 2949static inline void cas_init_dma(struct cas *cp) 2950{ 2951 cas_init_tx_dma(cp); 2952 cas_init_rx_dma(cp); 2953} 2954 2955static void cas_process_mc_list(struct cas *cp) 2956{ 2957 u16 hash_table[16]; 2958 u32 crc; 2959 struct netdev_hw_addr *ha; 2960 int i = 1; 2961 2962 memset(hash_table, 0, sizeof(hash_table)); 2963 netdev_for_each_mc_addr(ha, cp->dev) { 2964 if (i <= CAS_MC_EXACT_MATCH_SIZE) { 2965 /* use the alternate mac address registers for the 2966 * first 15 multicast addresses 2967 */ 2968 writel((ha->addr[4] << 8) | ha->addr[5], 2969 cp->regs + REG_MAC_ADDRN(i*3 + 0)); 2970 writel((ha->addr[2] << 8) | ha->addr[3], 2971 cp->regs + REG_MAC_ADDRN(i*3 + 1)); 2972 writel((ha->addr[0] << 8) | ha->addr[1], 2973 cp->regs + REG_MAC_ADDRN(i*3 + 2)); 2974 i++; 2975 } 2976 else { 2977 /* use hw hash table for the next series of 2978 * multicast addresses 2979 */ 2980 crc = ether_crc_le(ETH_ALEN, ha->addr); 2981 crc >>= 24; 2982 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); 2983 } 2984 } 2985 for (i = 0; i < 16; i++) 2986 writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i)); 2987} 2988 2989/* Must be invoked under cp->lock. */ 2990static u32 cas_setup_multicast(struct cas *cp) 2991{ 2992 u32 rxcfg = 0; 2993 int i; 2994 2995 if (cp->dev->flags & IFF_PROMISC) { 2996 rxcfg |= MAC_RX_CFG_PROMISC_EN; 2997 2998 } else if (cp->dev->flags & IFF_ALLMULTI) { 2999 for (i=0; i < 16; i++) 3000 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i)); 3001 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; 3002 3003 } else { 3004 cas_process_mc_list(cp); 3005 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; 3006 } 3007 3008 return rxcfg; 3009} 3010 3011/* must be invoked under cp->stat_lock[N_TX_RINGS] */ 3012static void cas_clear_mac_err(struct cas *cp) 3013{ 3014 writel(0, cp->regs + REG_MAC_COLL_NORMAL); 3015 writel(0, cp->regs + REG_MAC_COLL_FIRST); 3016 writel(0, cp->regs + REG_MAC_COLL_EXCESS); 3017 writel(0, cp->regs + REG_MAC_COLL_LATE); 3018 writel(0, cp->regs + REG_MAC_TIMER_DEFER); 3019 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK); 3020 writel(0, cp->regs + REG_MAC_RECV_FRAME); 3021 writel(0, cp->regs + REG_MAC_LEN_ERR); 3022 writel(0, cp->regs + REG_MAC_ALIGN_ERR); 3023 writel(0, cp->regs + REG_MAC_FCS_ERR); 3024 writel(0, cp->regs + REG_MAC_RX_CODE_ERR); 3025} 3026 3027 3028static void cas_mac_reset(struct cas *cp) 3029{ 3030 int i; 3031 3032 /* do both TX and RX reset */ 3033 writel(0x1, cp->regs + REG_MAC_TX_RESET); 3034 writel(0x1, cp->regs + REG_MAC_RX_RESET); 3035 3036 /* wait for TX */ 3037 i = STOP_TRIES; 3038 while (i-- > 0) { 3039 if (readl(cp->regs + REG_MAC_TX_RESET) == 0) 3040 break; 3041 udelay(10); 3042 } 3043 3044 /* wait for RX */ 3045 i = STOP_TRIES; 3046 while (i-- > 0) { 3047 if (readl(cp->regs + REG_MAC_RX_RESET) == 0) 3048 break; 3049 udelay(10); 3050 } 3051 3052 if (readl(cp->regs + REG_MAC_TX_RESET) | 3053 readl(cp->regs + REG_MAC_RX_RESET)) 3054 netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n", 3055 readl(cp->regs + REG_MAC_TX_RESET), 3056 readl(cp->regs + REG_MAC_RX_RESET), 3057 readl(cp->regs + REG_MAC_STATE_MACHINE)); 3058} 3059 3060 3061/* Must be invoked under cp->lock. */ 3062static void cas_init_mac(struct cas *cp) 3063{ 3064 unsigned char *e = &cp->dev->dev_addr[0]; 3065 int i; 3066#ifdef CONFIG_CASSINI_MULTICAST_REG_WRITE 3067 u32 rxcfg; 3068#endif 3069 cas_mac_reset(cp); 3070 3071 /* setup core arbitration weight register */ 3072 writel(CAWR_RR_DIS, cp->regs + REG_CAWR); 3073 3074 /* XXX Use pci_dma_burst_advice() */ 3075#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) 3076 /* set the infinite burst register for chips that don't have 3077 * pci issues. 3078 */ 3079 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0) 3080 writel(INF_BURST_EN, cp->regs + REG_INF_BURST); 3081#endif 3082 3083 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE); 3084 3085 writel(0x00, cp->regs + REG_MAC_IPG0); 3086 writel(0x08, cp->regs + REG_MAC_IPG1); 3087 writel(0x04, cp->regs + REG_MAC_IPG2); 3088 3089 /* change later for 802.3z */ 3090 writel(0x40, cp->regs + REG_MAC_SLOT_TIME); 3091 3092 /* min frame + FCS */ 3093 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN); 3094 3095 /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we 3096 * specify the maximum frame size to prevent RX tag errors on 3097 * oversized frames. 3098 */ 3099 writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) | 3100 CAS_BASE(MAC_FRAMESIZE_MAX_FRAME, 3101 (CAS_MAX_MTU + ETH_HLEN + 4 + 4)), 3102 cp->regs + REG_MAC_FRAMESIZE_MAX); 3103 3104 /* NOTE: crc_size is used as a surrogate for half-duplex. 3105 * workaround saturn half-duplex issue by increasing preamble 3106 * size to 65 bytes. 3107 */ 3108 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size) 3109 writel(0x41, cp->regs + REG_MAC_PA_SIZE); 3110 else 3111 writel(0x07, cp->regs + REG_MAC_PA_SIZE); 3112 writel(0x04, cp->regs + REG_MAC_JAM_SIZE); 3113 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT); 3114 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE); 3115 3116 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED); 3117 3118 writel(0, cp->regs + REG_MAC_ADDR_FILTER0); 3119 writel(0, cp->regs + REG_MAC_ADDR_FILTER1); 3120 writel(0, cp->regs + REG_MAC_ADDR_FILTER2); 3121 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK); 3122 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK); 3123 3124 /* setup mac address in perfect filter array */ 3125 for (i = 0; i < 45; i++) 3126 writel(0x0, cp->regs + REG_MAC_ADDRN(i)); 3127 3128 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0)); 3129 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1)); 3130 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2)); 3131 3132 writel(0x0001, cp->regs + REG_MAC_ADDRN(42)); 3133 writel(0xc200, cp->regs + REG_MAC_ADDRN(43)); 3134 writel(0x0180, cp->regs + REG_MAC_ADDRN(44)); 3135 3136#ifndef CONFIG_CASSINI_MULTICAST_REG_WRITE 3137 cp->mac_rx_cfg = cas_setup_multicast(cp); 3138#else 3139 /* WTZ: Do what Adrian did in cas_set_multicast. Doing 3140 * a writel does not seem to be necessary because Cassini 3141 * seems to preserve the configuration when we do the reset. 3142 * If the chip is in trouble, though, it is not clear if we 3143 * can really count on this behavior. cas_set_multicast uses 3144 * spin_lock_irqsave, but we are called only in cas_init_hw and 3145 * cas_init_hw is protected by cas_lock_all, which calls 3146 * spin_lock_irq (so it doesn't need to save the flags, and 3147 * we should be OK for the writel, as that is the only 3148 * difference). 3149 */ 3150 cp->mac_rx_cfg = rxcfg = cas_setup_multicast(cp); 3151 writel(rxcfg, cp->regs + REG_MAC_RX_CFG); 3152#endif 3153 spin_lock(&cp->stat_lock[N_TX_RINGS]); 3154 cas_clear_mac_err(cp); 3155 spin_unlock(&cp->stat_lock[N_TX_RINGS]); 3156 3157 /* Setup MAC interrupts. We want to get all of the interesting 3158 * counter expiration events, but we do not want to hear about 3159 * normal rx/tx as the DMA engine tells us that. 3160 */ 3161 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK); 3162 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); 3163 3164 /* Don't enable even the PAUSE interrupts for now, we 3165 * make no use of those events other than to record them. 3166 */ 3167 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK); 3168} 3169 3170/* Must be invoked under cp->lock. */ 3171static void cas_init_pause_thresholds(struct cas *cp) 3172{ 3173 /* Calculate pause thresholds. Setting the OFF threshold to the 3174 * full RX fifo size effectively disables PAUSE generation 3175 */ 3176 if (cp->rx_fifo_size <= (2 * 1024)) { 3177 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size; 3178 } else { 3179 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63; 3180 if (max_frame * 3 > cp->rx_fifo_size) { 3181 cp->rx_pause_off = 7104; 3182 cp->rx_pause_on = 960; 3183 } else { 3184 int off = (cp->rx_fifo_size - (max_frame * 2)); 3185 int on = off - max_frame; 3186 cp->rx_pause_off = off; 3187 cp->rx_pause_on = on; 3188 } 3189 } 3190} 3191 3192static int cas_vpd_match(const void __iomem *p, const char *str) 3193{ 3194 int len = strlen(str) + 1; 3195 int i; 3196 3197 for (i = 0; i < len; i++) { 3198 if (readb(p + i) != str[i]) 3199 return 0; 3200 } 3201 return 1; 3202} 3203 3204 3205/* get the mac address by reading the vpd information in the rom. 3206 * also get the phy type and determine if there's an entropy generator. 3207 * NOTE: this is a bit convoluted for the following reasons: 3208 * 1) vpd info has order-dependent mac addresses for multinic cards 3209 * 2) the only way to determine the nic order is to use the slot 3210 * number. 3211 * 3) fiber cards don't have bridges, so their slot numbers don't 3212 * mean anything. 3213 * 4) we don't actually know we have a fiber card until after 3214 * the mac addresses are parsed. 3215 */ 3216static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr, 3217 const int offset) 3218{ 3219 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START; 3220 void __iomem *base, *kstart; 3221 int i, len; 3222 int found = 0; 3223#define VPD_FOUND_MAC 0x01 3224#define VPD_FOUND_PHY 0x02 3225 3226 int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */ 3227 int mac_off = 0; 3228 3229 /* give us access to the PROM */ 3230 writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD, 3231 cp->regs + REG_BIM_LOCAL_DEV_EN); 3232 3233 /* check for an expansion rom */ 3234 if (readb(p) != 0x55 || readb(p + 1) != 0xaa) 3235 goto use_random_mac_addr; 3236 3237 /* search for beginning of vpd */ 3238 base = NULL; 3239 for (i = 2; i < EXPANSION_ROM_SIZE; i++) { 3240 /* check for PCIR */ 3241 if ((readb(p + i + 0) == 0x50) && 3242 (readb(p + i + 1) == 0x43) && 3243 (readb(p + i + 2) == 0x49) && 3244 (readb(p + i + 3) == 0x52)) { 3245 base = p + (readb(p + i + 8) | 3246 (readb(p + i + 9) << 8)); 3247 break; 3248 } 3249 } 3250 3251 if (!base || (readb(base) != 0x82)) 3252 goto use_random_mac_addr; 3253 3254 i = (readb(base + 1) | (readb(base + 2) << 8)) + 3; 3255 while (i < EXPANSION_ROM_SIZE) { 3256 if (readb(base + i) != 0x90) /* no vpd found */ 3257 goto use_random_mac_addr; 3258 3259 /* found a vpd field */ 3260 len = readb(base + i + 1) | (readb(base + i + 2) << 8); 3261 3262 /* extract keywords */ 3263 kstart = base + i + 3; 3264 p = kstart; 3265 while ((p - kstart) < len) { 3266 int klen = readb(p + 2); 3267 int j; 3268 char type; 3269 3270 p += 3; 3271 3272 /* look for the following things: 3273 * -- correct length == 29 3274 * 3 (type) + 2 (size) + 3275 * 18 (strlen("local-mac-address") + 1) + 3276 * 6 (mac addr) 3277 * -- VPD Instance 'I' 3278 * -- VPD Type Bytes 'B' 3279 * -- VPD data length == 6 3280 * -- property string == local-mac-address 3281 * 3282 * -- correct length == 24 3283 * 3 (type) + 2 (size) + 3284 * 12 (strlen("entropy-dev") + 1) + 3285 * 7 (strlen("vms110") + 1) 3286 * -- VPD Instance 'I' 3287 * -- VPD Type String 'B' 3288 * -- VPD data length == 7 3289 * -- property string == entropy-dev 3290 * 3291 * -- correct length == 18 3292 * 3 (type) + 2 (size) + 3293 * 9 (strlen("phy-type") + 1) + 3294 * 4 (strlen("pcs") + 1) 3295 * -- VPD Instance 'I' 3296 * -- VPD Type String 'S' 3297 * -- VPD data length == 4 3298 * -- property string == phy-type 3299 * 3300 * -- correct length == 23 3301 * 3 (type) + 2 (size) + 3302 * 14 (strlen("phy-interface") + 1) + 3303 * 4 (strlen("pcs") + 1) 3304 * -- VPD Instance 'I' 3305 * -- VPD Type String 'S' 3306 * -- VPD data length == 4 3307 * -- property string == phy-interface 3308 */ 3309 if (readb(p) != 'I') 3310 goto next; 3311 3312 /* finally, check string and length */ 3313 type = readb(p + 3); 3314 if (type == 'B') { 3315 if ((klen == 29) && readb(p + 4) == 6 && 3316 cas_vpd_match(p + 5, 3317 "local-mac-address")) { 3318 if (mac_off++ > offset) 3319 goto next; 3320 3321 /* set mac address */ 3322 for (j = 0; j < 6; j++) 3323 dev_addr[j] = 3324 readb(p + 23 + j); 3325 goto found_mac; 3326 } 3327 } 3328 3329 if (type != 'S') 3330 goto next; 3331 3332#ifdef USE_ENTROPY_DEV 3333 if ((klen == 24) && 3334 cas_vpd_match(p + 5, "entropy-dev") && 3335 cas_vpd_match(p + 17, "vms110")) { 3336 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV; 3337 goto next; 3338 } 3339#endif 3340 3341 if (found & VPD_FOUND_PHY) 3342 goto next; 3343 3344 if ((klen == 18) && readb(p + 4) == 4 && 3345 cas_vpd_match(p + 5, "phy-type")) { 3346 if (cas_vpd_match(p + 14, "pcs")) { 3347 phy_type = CAS_PHY_SERDES; 3348 goto found_phy; 3349 } 3350 } 3351 3352 if ((klen == 23) && readb(p + 4) == 4 && 3353 cas_vpd_match(p + 5, "phy-interface")) { 3354 if (cas_vpd_match(p + 19, "pcs")) { 3355 phy_type = CAS_PHY_SERDES; 3356 goto found_phy; 3357 } 3358 } 3359found_mac: 3360 found |= VPD_FOUND_MAC; 3361 goto next; 3362 3363found_phy: 3364 found |= VPD_FOUND_PHY; 3365 3366next: 3367 p += klen; 3368 } 3369 i += len + 3; 3370 } 3371 3372use_random_mac_addr: 3373 if (found & VPD_FOUND_MAC) 3374 goto done; 3375 3376 /* Sun MAC prefix then 3 random bytes. */ 3377 pr_info("MAC address not found in ROM VPD\n"); 3378 dev_addr[0] = 0x08; 3379 dev_addr[1] = 0x00; 3380 dev_addr[2] = 0x20; 3381 get_random_bytes(dev_addr + 3, 3); 3382 3383done: 3384 writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN); 3385 return phy_type; 3386} 3387 3388/* check pci invariants */ 3389static void cas_check_pci_invariants(struct cas *cp) 3390{ 3391 struct pci_dev *pdev = cp->pdev; 3392 3393 cp->cas_flags = 0; 3394 if ((pdev->vendor == PCI_VENDOR_ID_SUN) && 3395 (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) { 3396 if (pdev->revision >= CAS_ID_REVPLUS) 3397 cp->cas_flags |= CAS_FLAG_REG_PLUS; 3398 if (pdev->revision < CAS_ID_REVPLUS02u) 3399 cp->cas_flags |= CAS_FLAG_TARGET_ABORT; 3400 3401 /* Original Cassini supports HW CSUM, but it's not 3402 * enabled by default as it can trigger TX hangs. 3403 */ 3404 if (pdev->revision < CAS_ID_REV2) 3405 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM; 3406 } else { 3407 /* Only sun has original cassini chips. */ 3408 cp->cas_flags |= CAS_FLAG_REG_PLUS; 3409 3410 /* We use a flag because the same phy might be externally 3411 * connected. 3412 */ 3413 if ((pdev->vendor == PCI_VENDOR_ID_NS) && 3414 (pdev->device == PCI_DEVICE_ID_NS_SATURN)) 3415 cp->cas_flags |= CAS_FLAG_SATURN; 3416 } 3417} 3418 3419 3420static int cas_check_invariants(struct cas *cp) 3421{ 3422 struct pci_dev *pdev = cp->pdev; 3423 u32 cfg; 3424 int i; 3425 3426 /* get page size for rx buffers. */ 3427 cp->page_order = 0; 3428#ifdef USE_PAGE_ORDER 3429 if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) { 3430 /* see if we can allocate larger pages */ 3431 struct page *page = alloc_pages(GFP_ATOMIC, 3432 CAS_JUMBO_PAGE_SHIFT - 3433 PAGE_SHIFT); 3434 if (page) { 3435 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT); 3436 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT; 3437 } else { 3438 printk("MTU limited to %d bytes\n", CAS_MAX_MTU); 3439 } 3440 } 3441#endif 3442 cp->page_size = (PAGE_SIZE << cp->page_order); 3443 3444 /* Fetch the FIFO configurations. */ 3445 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64; 3446 cp->rx_fifo_size = RX_FIFO_SIZE; 3447 3448 /* finish phy determination. MDIO1 takes precedence over MDIO0 if 3449 * they're both connected. 3450 */ 3451 cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr, 3452 PCI_SLOT(pdev->devfn)); 3453 if (cp->phy_type & CAS_PHY_SERDES) { 3454 cp->cas_flags |= CAS_FLAG_1000MB_CAP; 3455 return 0; /* no more checking needed */ 3456 } 3457 3458 /* MII */ 3459 cfg = readl(cp->regs + REG_MIF_CFG); 3460 if (cfg & MIF_CFG_MDIO_1) { 3461 cp->phy_type = CAS_PHY_MII_MDIO1; 3462 } else if (cfg & MIF_CFG_MDIO_0) { 3463 cp->phy_type = CAS_PHY_MII_MDIO0; 3464 } 3465 3466 cas_mif_poll(cp, 0); 3467 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); 3468 3469 for (i = 0; i < 32; i++) { 3470 u32 phy_id; 3471 int j; 3472 3473 for (j = 0; j < 3; j++) { 3474 cp->phy_addr = i; 3475 phy_id = cas_phy_read(cp, MII_PHYSID1) << 16; 3476 phy_id |= cas_phy_read(cp, MII_PHYSID2); 3477 if (phy_id && (phy_id != 0xFFFFFFFF)) { 3478 cp->phy_id = phy_id; 3479 goto done; 3480 } 3481 } 3482 } 3483 pr_err("MII phy did not respond [%08x]\n", 3484 readl(cp->regs + REG_MIF_STATE_MACHINE)); 3485 return -1; 3486 3487done: 3488 /* see if we can do gigabit */ 3489 cfg = cas_phy_read(cp, MII_BMSR); 3490 if ((cfg & CAS_BMSR_1000_EXTEND) && 3491 cas_phy_read(cp, CAS_MII_1000_EXTEND)) 3492 cp->cas_flags |= CAS_FLAG_1000MB_CAP; 3493 return 0; 3494} 3495 3496/* Must be invoked under cp->lock. */ 3497static inline void cas_start_dma(struct cas *cp) 3498{ 3499 int i; 3500 u32 val; 3501 int txfailed = 0; 3502 3503 /* enable dma */ 3504 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN; 3505 writel(val, cp->regs + REG_TX_CFG); 3506 val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN; 3507 writel(val, cp->regs + REG_RX_CFG); 3508 3509 /* enable the mac */ 3510 val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN; 3511 writel(val, cp->regs + REG_MAC_TX_CFG); 3512 val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN; 3513 writel(val, cp->regs + REG_MAC_RX_CFG); 3514 3515 i = STOP_TRIES; 3516 while (i-- > 0) { 3517 val = readl(cp->regs + REG_MAC_TX_CFG); 3518 if ((val & MAC_TX_CFG_EN)) 3519 break; 3520 udelay(10); 3521 } 3522 if (i < 0) txfailed = 1; 3523 i = STOP_TRIES; 3524 while (i-- > 0) { 3525 val = readl(cp->regs + REG_MAC_RX_CFG); 3526 if ((val & MAC_RX_CFG_EN)) { 3527 if (txfailed) { 3528 netdev_err(cp->dev, 3529 "enabling mac failed [tx:%08x:%08x]\n", 3530 readl(cp->regs + REG_MIF_STATE_MACHINE), 3531 readl(cp->regs + REG_MAC_STATE_MACHINE)); 3532 } 3533 goto enable_rx_done; 3534 } 3535 udelay(10); 3536 } 3537 netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n", 3538 (txfailed ? "tx,rx" : "rx"), 3539 readl(cp->regs + REG_MIF_STATE_MACHINE), 3540 readl(cp->regs + REG_MAC_STATE_MACHINE)); 3541 3542enable_rx_done: 3543 cas_unmask_intr(cp); /* enable interrupts */ 3544 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); 3545 writel(0, cp->regs + REG_RX_COMP_TAIL); 3546 3547 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { 3548 if (N_RX_DESC_RINGS > 1) 3549 writel(RX_DESC_RINGN_SIZE(1) - 4, 3550 cp->regs + REG_PLUS_RX_KICK1); 3551 3552 for (i = 1; i < N_RX_COMP_RINGS; i++) 3553 writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i)); 3554 } 3555} 3556 3557/* Must be invoked under cp->lock. */ 3558static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd, 3559 int *pause) 3560{ 3561 u32 val = readl(cp->regs + REG_PCS_MII_LPA); 3562 *fd = (val & PCS_MII_LPA_FD) ? 1 : 0; 3563 *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00; 3564 if (val & PCS_MII_LPA_ASYM_PAUSE) 3565 *pause |= 0x10; 3566 *spd = 1000; 3567} 3568 3569/* Must be invoked under cp->lock. */ 3570static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd, 3571 int *pause) 3572{ 3573 u32 val; 3574 3575 *fd = 0; 3576 *spd = 10; 3577 *pause = 0; 3578 3579 /* use GMII registers */ 3580 val = cas_phy_read(cp, MII_LPA); 3581 if (val & CAS_LPA_PAUSE) 3582 *pause = 0x01; 3583 3584 if (val & CAS_LPA_ASYM_PAUSE) 3585 *pause |= 0x10; 3586 3587 if (val & LPA_DUPLEX) 3588 *fd = 1; 3589 if (val & LPA_100) 3590 *spd = 100; 3591 3592 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { 3593 val = cas_phy_read(cp, CAS_MII_1000_STATUS); 3594 if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF)) 3595 *spd = 1000; 3596 if (val & CAS_LPA_1000FULL) 3597 *fd = 1; 3598 } 3599} 3600 3601/* A link-up condition has occurred, initialize and enable the 3602 * rest of the chip. 3603 * 3604 * Must be invoked under cp->lock. 3605 */ 3606static void cas_set_link_modes(struct cas *cp) 3607{ 3608 u32 val; 3609 int full_duplex, speed, pause; 3610 3611 full_duplex = 0; 3612 speed = 10; 3613 pause = 0; 3614 3615 if (CAS_PHY_MII(cp->phy_type)) { 3616 cas_mif_poll(cp, 0); 3617 val = cas_phy_read(cp, MII_BMCR); 3618 if (val & BMCR_ANENABLE) { 3619 cas_read_mii_link_mode(cp, &full_duplex, &speed, 3620 &pause); 3621 } else { 3622 if (val & BMCR_FULLDPLX) 3623 full_duplex = 1; 3624 3625 if (val & BMCR_SPEED100) 3626 speed = 100; 3627 else if (val & CAS_BMCR_SPEED1000) 3628 speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? 3629 1000 : 100; 3630 } 3631 cas_mif_poll(cp, 1); 3632 3633 } else { 3634 val = readl(cp->regs + REG_PCS_MII_CTRL); 3635 cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause); 3636 if ((val & PCS_MII_AUTONEG_EN) == 0) { 3637 if (val & PCS_MII_CTRL_DUPLEX) 3638 full_duplex = 1; 3639 } 3640 } 3641 3642 netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n", 3643 speed, full_duplex ? "full" : "half"); 3644 3645 val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED; 3646 if (CAS_PHY_MII(cp->phy_type)) { 3647 val |= MAC_XIF_MII_BUFFER_OUTPUT_EN; 3648 if (!full_duplex) 3649 val |= MAC_XIF_DISABLE_ECHO; 3650 } 3651 if (full_duplex) 3652 val |= MAC_XIF_FDPLX_LED; 3653 if (speed == 1000) 3654 val |= MAC_XIF_GMII_MODE; 3655 writel(val, cp->regs + REG_MAC_XIF_CFG); 3656 3657 /* deal with carrier and collision detect. */ 3658 val = MAC_TX_CFG_IPG_EN; 3659 if (full_duplex) { 3660 val |= MAC_TX_CFG_IGNORE_CARRIER; 3661 val |= MAC_TX_CFG_IGNORE_COLL; 3662 } else { 3663#ifndef USE_CSMA_CD_PROTO 3664 val |= MAC_TX_CFG_NEVER_GIVE_UP_EN; 3665 val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM; 3666#endif 3667 } 3668 /* val now set up for REG_MAC_TX_CFG */ 3669 3670 /* If gigabit and half-duplex, enable carrier extension 3671 * mode. increase slot time to 512 bytes as well. 3672 * else, disable it and make sure slot time is 64 bytes. 3673 * also activate checksum bug workaround 3674 */ 3675 if ((speed == 1000) && !full_duplex) { 3676 writel(val | MAC_TX_CFG_CARRIER_EXTEND, 3677 cp->regs + REG_MAC_TX_CFG); 3678 3679 val = readl(cp->regs + REG_MAC_RX_CFG); 3680 val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */ 3681 writel(val | MAC_RX_CFG_CARRIER_EXTEND, 3682 cp->regs + REG_MAC_RX_CFG); 3683 3684 writel(0x200, cp->regs + REG_MAC_SLOT_TIME); 3685 3686 cp->crc_size = 4; 3687 /* minimum size gigabit frame at half duplex */ 3688 cp->min_frame_size = CAS_1000MB_MIN_FRAME; 3689 3690 } else { 3691 writel(val, cp->regs + REG_MAC_TX_CFG); 3692 3693 /* checksum bug workaround. don't strip FCS when in 3694 * half-duplex mode 3695 */ 3696 val = readl(cp->regs + REG_MAC_RX_CFG); 3697 if (full_duplex) { 3698 val |= MAC_RX_CFG_STRIP_FCS; 3699 cp->crc_size = 0; 3700 cp->min_frame_size = CAS_MIN_MTU; 3701 } else { 3702 val &= ~MAC_RX_CFG_STRIP_FCS; 3703 cp->crc_size = 4; 3704 cp->min_frame_size = CAS_MIN_FRAME; 3705 } 3706 writel(val & ~MAC_RX_CFG_CARRIER_EXTEND, 3707 cp->regs + REG_MAC_RX_CFG); 3708 writel(0x40, cp->regs + REG_MAC_SLOT_TIME); 3709 } 3710 3711 if (netif_msg_link(cp)) { 3712 if (pause & 0x01) { 3713 netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n", 3714 cp->rx_fifo_size, 3715 cp->rx_pause_off, 3716 cp->rx_pause_on); 3717 } else if (pause & 0x10) { 3718 netdev_info(cp->dev, "TX pause enabled\n"); 3719 } else { 3720 netdev_info(cp->dev, "Pause is disabled\n"); 3721 } 3722 } 3723 3724 val = readl(cp->regs + REG_MAC_CTRL_CFG); 3725 val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN); 3726 if (pause) { /* symmetric or asymmetric pause */ 3727 val |= MAC_CTRL_CFG_SEND_PAUSE_EN; 3728 if (pause & 0x01) { /* symmetric pause */ 3729 val |= MAC_CTRL_CFG_RECV_PAUSE_EN; 3730 } 3731 } 3732 writel(val, cp->regs + REG_MAC_CTRL_CFG); 3733 cas_start_dma(cp); 3734} 3735 3736/* Must be invoked under cp->lock. */ 3737static void cas_init_hw(struct cas *cp, int restart_link) 3738{ 3739 if (restart_link) 3740 cas_phy_init(cp); 3741 3742 cas_init_pause_thresholds(cp); 3743 cas_init_mac(cp); 3744 cas_init_dma(cp); 3745 3746 if (restart_link) { 3747 /* Default aneg parameters */ 3748 cp->timer_ticks = 0; 3749 cas_begin_auto_negotiation(cp, NULL); 3750 } else if (cp->lstate == link_up) { 3751 cas_set_link_modes(cp); 3752 netif_carrier_on(cp->dev); 3753 } 3754} 3755 3756/* Must be invoked under cp->lock. on earlier cassini boards, 3757 * SOFT_0 is tied to PCI reset. we use this to force a pci reset, 3758 * let it settle out, and then restore pci state. 3759 */ 3760static void cas_hard_reset(struct cas *cp) 3761{ 3762 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN); 3763 udelay(20); 3764 pci_restore_state(cp->pdev); 3765} 3766 3767 3768static void cas_global_reset(struct cas *cp, int blkflag) 3769{ 3770 int limit; 3771 3772 /* issue a global reset. don't use RSTOUT. */ 3773 if (blkflag && !CAS_PHY_MII(cp->phy_type)) { 3774 /* For PCS, when the blkflag is set, we should set the 3775 * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of 3776 * the last autonegotiation from being cleared. We'll 3777 * need some special handling if the chip is set into a 3778 * loopback mode. 3779 */ 3780 writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK), 3781 cp->regs + REG_SW_RESET); 3782 } else { 3783 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET); 3784 } 3785 3786 /* need to wait at least 3ms before polling register */ 3787 mdelay(3); 3788 3789 limit = STOP_TRIES; 3790 while (limit-- > 0) { 3791 u32 val = readl(cp->regs + REG_SW_RESET); 3792 if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0) 3793 goto done; 3794 udelay(10); 3795 } 3796 netdev_err(cp->dev, "sw reset failed\n"); 3797 3798done: 3799 /* enable various BIM interrupts */ 3800 writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE | 3801 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG); 3802 3803 /* clear out pci error status mask for handled errors. 3804 * we don't deal with DMA counter overflows as they happen 3805 * all the time. 3806 */ 3807 writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO | 3808 PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE | 3809 PCI_ERR_BIM_DMA_READ), cp->regs + 3810 REG_PCI_ERR_STATUS_MASK); 3811 3812 /* set up for MII by default to address mac rx reset timeout 3813 * issue 3814 */ 3815 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); 3816} 3817 3818static void cas_reset(struct cas *cp, int blkflag) 3819{ 3820 u32 val; 3821 3822 cas_mask_intr(cp); 3823 cas_global_reset(cp, blkflag); 3824 cas_mac_reset(cp); 3825 cas_entropy_reset(cp); 3826 3827 /* disable dma engines. */ 3828 val = readl(cp->regs + REG_TX_CFG); 3829 val &= ~TX_CFG_DMA_EN; 3830 writel(val, cp->regs + REG_TX_CFG); 3831 3832 val = readl(cp->regs + REG_RX_CFG); 3833 val &= ~RX_CFG_DMA_EN; 3834 writel(val, cp->regs + REG_RX_CFG); 3835 3836 /* program header parser */ 3837 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) || 3838 (CAS_HP_ALT_FIRMWARE == cas_prog_null)) { 3839 cas_load_firmware(cp, CAS_HP_FIRMWARE); 3840 } else { 3841 cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE); 3842 } 3843 3844 /* clear out error registers */ 3845 spin_lock(&cp->stat_lock[N_TX_RINGS]); 3846 cas_clear_mac_err(cp); 3847 spin_unlock(&cp->stat_lock[N_TX_RINGS]); 3848} 3849 3850/* Shut down the chip, must be called with pm_mutex held. */ 3851static void cas_shutdown(struct cas *cp) 3852{ 3853 unsigned long flags; 3854 3855 /* Make us not-running to avoid timers respawning */ 3856 cp->hw_running = 0; 3857 3858 del_timer_sync(&cp->link_timer); 3859 3860 /* Stop the reset task */ 3861#if 0 3862 while (atomic_read(&cp->reset_task_pending_mtu) || 3863 atomic_read(&cp->reset_task_pending_spare) || 3864 atomic_read(&cp->reset_task_pending_all)) 3865 schedule(); 3866 3867#else 3868 while (atomic_read(&cp->reset_task_pending)) 3869 schedule(); 3870#endif 3871 /* Actually stop the chip */ 3872 cas_lock_all_save(cp, flags); 3873 cas_reset(cp, 0); 3874 if (cp->cas_flags & CAS_FLAG_SATURN) 3875 cas_phy_powerdown(cp); 3876 cas_unlock_all_restore(cp, flags); 3877} 3878 3879static int cas_change_mtu(struct net_device *dev, int new_mtu) 3880{ 3881 struct cas *cp = netdev_priv(dev); 3882 3883 if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU) 3884 return -EINVAL; 3885 3886 dev->mtu = new_mtu; 3887 if (!netif_running(dev) || !netif_device_present(dev)) 3888 return 0; 3889 3890 /* let the reset task handle it */ 3891#if 1 3892 atomic_inc(&cp->reset_task_pending); 3893 if ((cp->phy_type & CAS_PHY_SERDES)) { 3894 atomic_inc(&cp->reset_task_pending_all); 3895 } else { 3896 atomic_inc(&cp->reset_task_pending_mtu); 3897 } 3898 schedule_work(&cp->reset_task); 3899#else 3900 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ? 3901 CAS_RESET_ALL : CAS_RESET_MTU); 3902 pr_err("reset called in cas_change_mtu\n"); 3903 schedule_work(&cp->reset_task); 3904#endif 3905 3906 flush_scheduled_work(); 3907 return 0; 3908} 3909 3910static void cas_clean_txd(struct cas *cp, int ring) 3911{ 3912 struct cas_tx_desc *txd = cp->init_txds[ring]; 3913 struct sk_buff *skb, **skbs = cp->tx_skbs[ring]; 3914 u64 daddr, dlen; 3915 int i, size; 3916 3917 size = TX_DESC_RINGN_SIZE(ring); 3918 for (i = 0; i < size; i++) { 3919 int frag; 3920 3921 if (skbs[i] == NULL) 3922 continue; 3923 3924 skb = skbs[i]; 3925 skbs[i] = NULL; 3926 3927 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 3928 int ent = i & (size - 1); 3929 3930 /* first buffer is never a tiny buffer and so 3931 * needs to be unmapped. 3932 */ 3933 daddr = le64_to_cpu(txd[ent].buffer); 3934 dlen = CAS_VAL(TX_DESC_BUFLEN, 3935 le64_to_cpu(txd[ent].control)); 3936 pci_unmap_page(cp->pdev, daddr, dlen, 3937 PCI_DMA_TODEVICE); 3938 3939 if (frag != skb_shinfo(skb)->nr_frags) { 3940 i++; 3941 3942 /* next buffer might by a tiny buffer. 3943 * skip past it. 3944 */ 3945 ent = i & (size - 1); 3946 if (cp->tx_tiny_use[ring][ent].used) 3947 i++; 3948 } 3949 } 3950 dev_kfree_skb_any(skb); 3951 } 3952 3953 /* zero out tiny buf usage */ 3954 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring])); 3955} 3956 3957/* freed on close */ 3958static inline void cas_free_rx_desc(struct cas *cp, int ring) 3959{ 3960 cas_page_t **page = cp->rx_pages[ring]; 3961 int i, size; 3962 3963 size = RX_DESC_RINGN_SIZE(ring); 3964 for (i = 0; i < size; i++) { 3965 if (page[i]) { 3966 cas_page_free(cp, page[i]); 3967 page[i] = NULL; 3968 } 3969 } 3970} 3971 3972static void cas_free_rxds(struct cas *cp) 3973{ 3974 int i; 3975 3976 for (i = 0; i < N_RX_DESC_RINGS; i++) 3977 cas_free_rx_desc(cp, i); 3978} 3979 3980/* Must be invoked under cp->lock. */ 3981static void cas_clean_rings(struct cas *cp) 3982{ 3983 int i; 3984 3985 /* need to clean all tx rings */ 3986 memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS); 3987 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS); 3988 for (i = 0; i < N_TX_RINGS; i++) 3989 cas_clean_txd(cp, i); 3990 3991 /* zero out init block */ 3992 memset(cp->init_block, 0, sizeof(struct cas_init_block)); 3993 cas_clean_rxds(cp); 3994 cas_clean_rxcs(cp); 3995} 3996 3997/* allocated on open */ 3998static inline int cas_alloc_rx_desc(struct cas *cp, int ring) 3999{ 4000 cas_page_t **page = cp->rx_pages[ring]; 4001 int size, i = 0; 4002 4003 size = RX_DESC_RINGN_SIZE(ring); 4004 for (i = 0; i < size; i++) { 4005 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL) 4006 return -1; 4007 } 4008 return 0; 4009} 4010 4011static int cas_alloc_rxds(struct cas *cp) 4012{ 4013 int i; 4014 4015 for (i = 0; i < N_RX_DESC_RINGS; i++) { 4016 if (cas_alloc_rx_desc(cp, i) < 0) { 4017 cas_free_rxds(cp); 4018 return -1; 4019 } 4020 } 4021 return 0; 4022} 4023 4024static void cas_reset_task(struct work_struct *work) 4025{ 4026 struct cas *cp = container_of(work, struct cas, reset_task); 4027#if 0 4028 int pending = atomic_read(&cp->reset_task_pending); 4029#else 4030 int pending_all = atomic_read(&cp->reset_task_pending_all); 4031 int pending_spare = atomic_read(&cp->reset_task_pending_spare); 4032 int pending_mtu = atomic_read(&cp->reset_task_pending_mtu); 4033 4034 if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) { 4035 /* We can have more tasks scheduled than actually 4036 * needed. 4037 */ 4038 atomic_dec(&cp->reset_task_pending); 4039 return; 4040 } 4041#endif 4042 /* The link went down, we reset the ring, but keep 4043 * DMA stopped. Use this function for reset 4044 * on error as well. 4045 */ 4046 if (cp->hw_running) { 4047 unsigned long flags; 4048 4049 /* Make sure we don't get interrupts or tx packets */ 4050 netif_device_detach(cp->dev); 4051 cas_lock_all_save(cp, flags); 4052 4053 if (cp->opened) { 4054 /* We call cas_spare_recover when we call cas_open. 4055 * but we do not initialize the lists cas_spare_recover 4056 * uses until cas_open is called. 4057 */ 4058 cas_spare_recover(cp, GFP_ATOMIC); 4059 } 4060#if 1 4061 /* test => only pending_spare set */ 4062 if (!pending_all && !pending_mtu) 4063 goto done; 4064#else 4065 if (pending == CAS_RESET_SPARE) 4066 goto done; 4067#endif 4068 /* when pending == CAS_RESET_ALL, the following 4069 * call to cas_init_hw will restart auto negotiation. 4070 * Setting the second argument of cas_reset to 4071 * !(pending == CAS_RESET_ALL) will set this argument 4072 * to 1 (avoiding reinitializing the PHY for the normal 4073 * PCS case) when auto negotiation is not restarted. 4074 */ 4075#if 1 4076 cas_reset(cp, !(pending_all > 0)); 4077 if (cp->opened) 4078 cas_clean_rings(cp); 4079 cas_init_hw(cp, (pending_all > 0)); 4080#else 4081 cas_reset(cp, !(pending == CAS_RESET_ALL)); 4082 if (cp->opened) 4083 cas_clean_rings(cp); 4084 cas_init_hw(cp, pending == CAS_RESET_ALL); 4085#endif 4086 4087done: 4088 cas_unlock_all_restore(cp, flags); 4089 netif_device_attach(cp->dev); 4090 } 4091#if 1 4092 atomic_sub(pending_all, &cp->reset_task_pending_all); 4093 atomic_sub(pending_spare, &cp->reset_task_pending_spare); 4094 atomic_sub(pending_mtu, &cp->reset_task_pending_mtu); 4095 atomic_dec(&cp->reset_task_pending); 4096#else 4097 atomic_set(&cp->reset_task_pending, 0); 4098#endif 4099} 4100 4101static void cas_link_timer(unsigned long data) 4102{ 4103 struct cas *cp = (struct cas *) data; 4104 int mask, pending = 0, reset = 0; 4105 unsigned long flags; 4106 4107 if (link_transition_timeout != 0 && 4108 cp->link_transition_jiffies_valid && 4109 ((jiffies - cp->link_transition_jiffies) > 4110 (link_transition_timeout))) { 4111 /* One-second counter so link-down workaround doesn't 4112 * cause resets to occur so fast as to fool the switch 4113 * into thinking the link is down. 4114 */ 4115 cp->link_transition_jiffies_valid = 0; 4116 } 4117 4118 if (!cp->hw_running) 4119 return; 4120 4121 spin_lock_irqsave(&cp->lock, flags); 4122 cas_lock_tx(cp); 4123 cas_entropy_gather(cp); 4124 4125 /* If the link task is still pending, we just 4126 * reschedule the link timer 4127 */ 4128#if 1 4129 if (atomic_read(&cp->reset_task_pending_all) || 4130 atomic_read(&cp->reset_task_pending_spare) || 4131 atomic_read(&cp->reset_task_pending_mtu)) 4132 goto done; 4133#else 4134 if (atomic_read(&cp->reset_task_pending)) 4135 goto done; 4136#endif 4137 4138 /* check for rx cleaning */ 4139 if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) { 4140 int i, rmask; 4141 4142 for (i = 0; i < MAX_RX_DESC_RINGS; i++) { 4143 rmask = CAS_FLAG_RXD_POST(i); 4144 if ((mask & rmask) == 0) 4145 continue; 4146 4147 /* post_rxds will do a mod_timer */ 4148 if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) { 4149 pending = 1; 4150 continue; 4151 } 4152 cp->cas_flags &= ~rmask; 4153 } 4154 } 4155 4156 if (CAS_PHY_MII(cp->phy_type)) { 4157 u16 bmsr; 4158 cas_mif_poll(cp, 0); 4159 bmsr = cas_phy_read(cp, MII_BMSR); 4160 /* WTZ: Solaris driver reads this twice, but that 4161 * may be due to the PCS case and the use of a 4162 * common implementation. Read it twice here to be 4163 * safe. 4164 */ 4165 bmsr = cas_phy_read(cp, MII_BMSR); 4166 cas_mif_poll(cp, 1); 4167 readl(cp->regs + REG_MIF_STATUS); /* avoid dups */ 4168 reset = cas_mii_link_check(cp, bmsr); 4169 } else { 4170 reset = cas_pcs_link_check(cp); 4171 } 4172 4173 if (reset) 4174 goto done; 4175 4176 /* check for tx state machine confusion */ 4177 if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) { 4178 u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE); 4179 u32 wptr, rptr; 4180 int tlm = CAS_VAL(MAC_SM_TLM, val); 4181 4182 if (((tlm == 0x5) || (tlm == 0x3)) && 4183 (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) { 4184 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev, 4185 "tx err: MAC_STATE[%08x]\n", val); 4186 reset = 1; 4187 goto done; 4188 } 4189 4190 val = readl(cp->regs + REG_TX_FIFO_PKT_CNT); 4191 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR); 4192 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR); 4193 if ((val == 0) && (wptr != rptr)) { 4194 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev, 4195 "tx err: TX_FIFO[%08x:%08x:%08x]\n", 4196 val, wptr, rptr); 4197 reset = 1; 4198 } 4199 4200 if (reset) 4201 cas_hard_reset(cp); 4202 } 4203 4204done: 4205 if (reset) { 4206#if 1 4207 atomic_inc(&cp->reset_task_pending); 4208 atomic_inc(&cp->reset_task_pending_all); 4209 schedule_work(&cp->reset_task); 4210#else 4211 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); 4212 pr_err("reset called in cas_link_timer\n"); 4213 schedule_work(&cp->reset_task); 4214#endif 4215 } 4216 4217 if (!pending) 4218 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); 4219 cas_unlock_tx(cp); 4220 spin_unlock_irqrestore(&cp->lock, flags); 4221} 4222 4223/* tiny buffers are used to avoid target abort issues with 4224 * older cassini's 4225 */ 4226static void cas_tx_tiny_free(struct cas *cp) 4227{ 4228 struct pci_dev *pdev = cp->pdev; 4229 int i; 4230 4231 for (i = 0; i < N_TX_RINGS; i++) { 4232 if (!cp->tx_tiny_bufs[i]) 4233 continue; 4234 4235 pci_free_consistent(pdev, TX_TINY_BUF_BLOCK, 4236 cp->tx_tiny_bufs[i], 4237 cp->tx_tiny_dvma[i]); 4238 cp->tx_tiny_bufs[i] = NULL; 4239 } 4240} 4241 4242static int cas_tx_tiny_alloc(struct cas *cp) 4243{ 4244 struct pci_dev *pdev = cp->pdev; 4245 int i; 4246 4247 for (i = 0; i < N_TX_RINGS; i++) { 4248 cp->tx_tiny_bufs[i] = 4249 pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK, 4250 &cp->tx_tiny_dvma[i]); 4251 if (!cp->tx_tiny_bufs[i]) { 4252 cas_tx_tiny_free(cp); 4253 return -1; 4254 } 4255 } 4256 return 0; 4257} 4258 4259 4260static int cas_open(struct net_device *dev) 4261{ 4262 struct cas *cp = netdev_priv(dev); 4263 int hw_was_up, err; 4264 unsigned long flags; 4265 4266 mutex_lock(&cp->pm_mutex); 4267 4268 hw_was_up = cp->hw_running; 4269 4270 /* The power-management mutex protects the hw_running 4271 * etc. state so it is safe to do this bit without cp->lock 4272 */ 4273 if (!cp->hw_running) { 4274 /* Reset the chip */ 4275 cas_lock_all_save(cp, flags); 4276 /* We set the second arg to cas_reset to zero 4277 * because cas_init_hw below will have its second 4278 * argument set to non-zero, which will force 4279 * autonegotiation to start. 4280 */ 4281 cas_reset(cp, 0); 4282 cp->hw_running = 1; 4283 cas_unlock_all_restore(cp, flags); 4284 } 4285 4286 err = -ENOMEM; 4287 if (cas_tx_tiny_alloc(cp) < 0) 4288 goto err_unlock; 4289 4290 /* alloc rx descriptors */ 4291 if (cas_alloc_rxds(cp) < 0) 4292 goto err_tx_tiny; 4293 4294 /* allocate spares */ 4295 cas_spare_init(cp); 4296 cas_spare_recover(cp, GFP_KERNEL); 4297 4298 /* We can now request the interrupt as we know it's masked 4299 * on the controller. cassini+ has up to 4 interrupts 4300 * that can be used, but you need to do explicit pci interrupt 4301 * mapping to expose them 4302 */ 4303 if (request_irq(cp->pdev->irq, cas_interrupt, 4304 IRQF_SHARED, dev->name, (void *) dev)) { 4305 netdev_err(cp->dev, "failed to request irq !\n"); 4306 err = -EAGAIN; 4307 goto err_spare; 4308 } 4309 4310#ifdef USE_NAPI 4311 napi_enable(&cp->napi); 4312#endif 4313 /* init hw */ 4314 cas_lock_all_save(cp, flags); 4315 cas_clean_rings(cp); 4316 cas_init_hw(cp, !hw_was_up); 4317 cp->opened = 1; 4318 cas_unlock_all_restore(cp, flags); 4319 4320 netif_start_queue(dev); 4321 mutex_unlock(&cp->pm_mutex); 4322 return 0; 4323 4324err_spare: 4325 cas_spare_free(cp); 4326 cas_free_rxds(cp); 4327err_tx_tiny: 4328 cas_tx_tiny_free(cp); 4329err_unlock: 4330 mutex_unlock(&cp->pm_mutex); 4331 return err; 4332} 4333 4334static int cas_close(struct net_device *dev) 4335{ 4336 unsigned long flags; 4337 struct cas *cp = netdev_priv(dev); 4338 4339#ifdef USE_NAPI 4340 napi_disable(&cp->napi); 4341#endif 4342 /* Make sure we don't get distracted by suspend/resume */ 4343 mutex_lock(&cp->pm_mutex); 4344 4345 netif_stop_queue(dev); 4346 4347 /* Stop traffic, mark us closed */ 4348 cas_lock_all_save(cp, flags); 4349 cp->opened = 0; 4350 cas_reset(cp, 0); 4351 cas_phy_init(cp); 4352 cas_begin_auto_negotiation(cp, NULL); 4353 cas_clean_rings(cp); 4354 cas_unlock_all_restore(cp, flags); 4355 4356 free_irq(cp->pdev->irq, (void *) dev); 4357 cas_spare_free(cp); 4358 cas_free_rxds(cp); 4359 cas_tx_tiny_free(cp); 4360 mutex_unlock(&cp->pm_mutex); 4361 return 0; 4362} 4363 4364static struct { 4365 const char name[ETH_GSTRING_LEN]; 4366} ethtool_cassini_statnames[] = { 4367 {"collisions"}, 4368 {"rx_bytes"}, 4369 {"rx_crc_errors"}, 4370 {"rx_dropped"}, 4371 {"rx_errors"}, 4372 {"rx_fifo_errors"}, 4373 {"rx_frame_errors"}, 4374 {"rx_length_errors"}, 4375 {"rx_over_errors"}, 4376 {"rx_packets"}, 4377 {"tx_aborted_errors"}, 4378 {"tx_bytes"}, 4379 {"tx_dropped"}, 4380 {"tx_errors"}, 4381 {"tx_fifo_errors"}, 4382 {"tx_packets"} 4383}; 4384#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames) 4385 4386static struct { 4387 const int offsets; /* neg. values for 2nd arg to cas_read_phy */ 4388} ethtool_register_table[] = { 4389 {-MII_BMSR}, 4390 {-MII_BMCR}, 4391 {REG_CAWR}, 4392 {REG_INF_BURST}, 4393 {REG_BIM_CFG}, 4394 {REG_RX_CFG}, 4395 {REG_HP_CFG}, 4396 {REG_MAC_TX_CFG}, 4397 {REG_MAC_RX_CFG}, 4398 {REG_MAC_CTRL_CFG}, 4399 {REG_MAC_XIF_CFG}, 4400 {REG_MIF_CFG}, 4401 {REG_PCS_CFG}, 4402 {REG_SATURN_PCFG}, 4403 {REG_PCS_MII_STATUS}, 4404 {REG_PCS_STATE_MACHINE}, 4405 {REG_MAC_COLL_EXCESS}, 4406 {REG_MAC_COLL_LATE} 4407}; 4408#define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table) 4409#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN) 4410 4411static void cas_read_regs(struct cas *cp, u8 *ptr, int len) 4412{ 4413 u8 *p; 4414 int i; 4415 unsigned long flags; 4416 4417 spin_lock_irqsave(&cp->lock, flags); 4418 for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) { 4419 u16 hval; 4420 u32 val; 4421 if (ethtool_register_table[i].offsets < 0) { 4422 hval = cas_phy_read(cp, 4423 -ethtool_register_table[i].offsets); 4424 val = hval; 4425 } else { 4426 val= readl(cp->regs+ethtool_register_table[i].offsets); 4427 } 4428 memcpy(p, (u8 *)&val, sizeof(u32)); 4429 } 4430 spin_unlock_irqrestore(&cp->lock, flags); 4431} 4432 4433static struct net_device_stats *cas_get_stats(struct net_device *dev) 4434{ 4435 struct cas *cp = netdev_priv(dev); 4436 struct net_device_stats *stats = cp->net_stats; 4437 unsigned long flags; 4438 int i; 4439 unsigned long tmp; 4440 4441 /* we collate all of the stats into net_stats[N_TX_RING] */ 4442 if (!cp->hw_running) 4443 return stats + N_TX_RINGS; 4444 4445 /* collect outstanding stats */ 4446 /* WTZ: the Cassini spec gives these as 16 bit counters but 4447 * stored in 32-bit words. Added a mask of 0xffff to be safe, 4448 * in case the chip somehow puts any garbage in the other bits. 4449 * Also, counter usage didn't seem to mach what Adrian did 4450 * in the parts of the code that set these quantities. Made 4451 * that consistent. 4452 */ 4453 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags); 4454 stats[N_TX_RINGS].rx_crc_errors += 4455 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff; 4456 stats[N_TX_RINGS].rx_frame_errors += 4457 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff; 4458 stats[N_TX_RINGS].rx_length_errors += 4459 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff; 4460#if 1 4461 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) + 4462 (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff); 4463 stats[N_TX_RINGS].tx_aborted_errors += tmp; 4464 stats[N_TX_RINGS].collisions += 4465 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff); 4466#else 4467 stats[N_TX_RINGS].tx_aborted_errors += 4468 readl(cp->regs + REG_MAC_COLL_EXCESS); 4469 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) + 4470 readl(cp->regs + REG_MAC_COLL_LATE); 4471#endif 4472 cas_clear_mac_err(cp); 4473 4474 /* saved bits that are unique to ring 0 */ 4475 spin_lock(&cp->stat_lock[0]); 4476 stats[N_TX_RINGS].collisions += stats[0].collisions; 4477 stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors; 4478 stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors; 4479 stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors; 4480 stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors; 4481 stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors; 4482 spin_unlock(&cp->stat_lock[0]); 4483 4484 for (i = 0; i < N_TX_RINGS; i++) { 4485 spin_lock(&cp->stat_lock[i]); 4486 stats[N_TX_RINGS].rx_length_errors += 4487 stats[i].rx_length_errors; 4488 stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors; 4489 stats[N_TX_RINGS].rx_packets += stats[i].rx_packets; 4490 stats[N_TX_RINGS].tx_packets += stats[i].tx_packets; 4491 stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes; 4492 stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes; 4493 stats[N_TX_RINGS].rx_errors += stats[i].rx_errors; 4494 stats[N_TX_RINGS].tx_errors += stats[i].tx_errors; 4495 stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped; 4496 stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped; 4497 memset(stats + i, 0, sizeof(struct net_device_stats)); 4498 spin_unlock(&cp->stat_lock[i]); 4499 } 4500 spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags); 4501 return stats + N_TX_RINGS; 4502} 4503 4504 4505static void cas_set_multicast(struct net_device *dev) 4506{ 4507 struct cas *cp = netdev_priv(dev); 4508 u32 rxcfg, rxcfg_new; 4509 unsigned long flags; 4510 int limit = STOP_TRIES; 4511 4512 if (!cp->hw_running) 4513 return; 4514 4515 spin_lock_irqsave(&cp->lock, flags); 4516 rxcfg = readl(cp->regs + REG_MAC_RX_CFG); 4517 4518 /* disable RX MAC and wait for completion */ 4519 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); 4520 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) { 4521 if (!limit--) 4522 break; 4523 udelay(10); 4524 } 4525 4526 /* disable hash filter and wait for completion */ 4527 limit = STOP_TRIES; 4528 rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN); 4529 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); 4530 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) { 4531 if (!limit--) 4532 break; 4533 udelay(10); 4534 } 4535 4536 /* program hash filters */ 4537 cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp); 4538 rxcfg |= rxcfg_new; 4539 writel(rxcfg, cp->regs + REG_MAC_RX_CFG); 4540 spin_unlock_irqrestore(&cp->lock, flags); 4541} 4542 4543static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 4544{ 4545 struct cas *cp = netdev_priv(dev); 4546 strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN); 4547 strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN); 4548 info->fw_version[0] = '\0'; 4549 strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN); 4550 info->regdump_len = cp->casreg_len < CAS_MAX_REGS ? 4551 cp->casreg_len : CAS_MAX_REGS; 4552 info->n_stats = CAS_NUM_STAT_KEYS; 4553} 4554 4555static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 4556{ 4557 struct cas *cp = netdev_priv(dev); 4558 u16 bmcr; 4559 int full_duplex, speed, pause; 4560 unsigned long flags; 4561 enum link_state linkstate = link_up; 4562 4563 cmd->advertising = 0; 4564 cmd->supported = SUPPORTED_Autoneg; 4565 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { 4566 cmd->supported |= SUPPORTED_1000baseT_Full; 4567 cmd->advertising |= ADVERTISED_1000baseT_Full; 4568 } 4569 4570 /* Record PHY settings if HW is on. */ 4571 spin_lock_irqsave(&cp->lock, flags); 4572 bmcr = 0; 4573 linkstate = cp->lstate; 4574 if (CAS_PHY_MII(cp->phy_type)) { 4575 cmd->port = PORT_MII; 4576 cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ? 4577 XCVR_INTERNAL : XCVR_EXTERNAL; 4578 cmd->phy_address = cp->phy_addr; 4579 cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII | 4580 ADVERTISED_10baseT_Half | 4581 ADVERTISED_10baseT_Full | 4582 ADVERTISED_100baseT_Half | 4583 ADVERTISED_100baseT_Full; 4584 4585 cmd->supported |= 4586 (SUPPORTED_10baseT_Half | 4587 SUPPORTED_10baseT_Full | 4588 SUPPORTED_100baseT_Half | 4589 SUPPORTED_100baseT_Full | 4590 SUPPORTED_TP | SUPPORTED_MII); 4591 4592 if (cp->hw_running) { 4593 cas_mif_poll(cp, 0); 4594 bmcr = cas_phy_read(cp, MII_BMCR); 4595 cas_read_mii_link_mode(cp, &full_duplex, 4596 &speed, &pause); 4597 cas_mif_poll(cp, 1); 4598 } 4599 4600 } else { 4601 cmd->port = PORT_FIBRE; 4602 cmd->transceiver = XCVR_INTERNAL; 4603 cmd->phy_address = 0; 4604 cmd->supported |= SUPPORTED_FIBRE; 4605 cmd->advertising |= ADVERTISED_FIBRE; 4606 4607 if (cp->hw_running) { 4608 /* pcs uses the same bits as mii */ 4609 bmcr = readl(cp->regs + REG_PCS_MII_CTRL); 4610 cas_read_pcs_link_mode(cp, &full_duplex, 4611 &speed, &pause); 4612 } 4613 } 4614 spin_unlock_irqrestore(&cp->lock, flags); 4615 4616 if (bmcr & BMCR_ANENABLE) { 4617 cmd->advertising |= ADVERTISED_Autoneg; 4618 cmd->autoneg = AUTONEG_ENABLE; 4619 cmd->speed = ((speed == 10) ? 4620 SPEED_10 : 4621 ((speed == 1000) ? 4622 SPEED_1000 : SPEED_100)); 4623 cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 4624 } else { 4625 cmd->autoneg = AUTONEG_DISABLE; 4626 cmd->speed = 4627 (bmcr & CAS_BMCR_SPEED1000) ? 4628 SPEED_1000 : 4629 ((bmcr & BMCR_SPEED100) ? SPEED_100: 4630 SPEED_10); 4631 cmd->duplex = 4632 (bmcr & BMCR_FULLDPLX) ? 4633 DUPLEX_FULL : DUPLEX_HALF; 4634 } 4635 if (linkstate != link_up) { 4636 /* Force these to "unknown" if the link is not up and 4637 * autonogotiation in enabled. We can set the link 4638 * speed to 0, but not cmd->duplex, 4639 * because its legal values are 0 and 1. Ethtool will 4640 * print the value reported in parentheses after the 4641 * word "Unknown" for unrecognized values. 4642 * 4643 * If in forced mode, we report the speed and duplex 4644 * settings that we configured. 4645 */ 4646 if (cp->link_cntl & BMCR_ANENABLE) { 4647 cmd->speed = 0; 4648 cmd->duplex = 0xff; 4649 } else { 4650 cmd->speed = SPEED_10; 4651 if (cp->link_cntl & BMCR_SPEED100) { 4652 cmd->speed = SPEED_100; 4653 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) { 4654 cmd->speed = SPEED_1000; 4655 } 4656 cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)? 4657 DUPLEX_FULL : DUPLEX_HALF; 4658 } 4659 } 4660 return 0; 4661} 4662 4663static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 4664{ 4665 struct cas *cp = netdev_priv(dev); 4666 unsigned long flags; 4667 4668 /* Verify the settings we care about. */ 4669 if (cmd->autoneg != AUTONEG_ENABLE && 4670 cmd->autoneg != AUTONEG_DISABLE) 4671 return -EINVAL; 4672 4673 if (cmd->autoneg == AUTONEG_DISABLE && 4674 ((cmd->speed != SPEED_1000 && 4675 cmd->speed != SPEED_100 && 4676 cmd->speed != SPEED_10) || 4677 (cmd->duplex != DUPLEX_HALF && 4678 cmd->duplex != DUPLEX_FULL))) 4679 return -EINVAL; 4680 4681 /* Apply settings and restart link process. */ 4682 spin_lock_irqsave(&cp->lock, flags); 4683 cas_begin_auto_negotiation(cp, cmd); 4684 spin_unlock_irqrestore(&cp->lock, flags); 4685 return 0; 4686} 4687 4688static int cas_nway_reset(struct net_device *dev) 4689{ 4690 struct cas *cp = netdev_priv(dev); 4691 unsigned long flags; 4692 4693 if ((cp->link_cntl & BMCR_ANENABLE) == 0) 4694 return -EINVAL; 4695 4696 /* Restart link process. */ 4697 spin_lock_irqsave(&cp->lock, flags); 4698 cas_begin_auto_negotiation(cp, NULL); 4699 spin_unlock_irqrestore(&cp->lock, flags); 4700 4701 return 0; 4702} 4703 4704static u32 cas_get_link(struct net_device *dev) 4705{ 4706 struct cas *cp = netdev_priv(dev); 4707 return cp->lstate == link_up; 4708} 4709 4710static u32 cas_get_msglevel(struct net_device *dev) 4711{ 4712 struct cas *cp = netdev_priv(dev); 4713 return cp->msg_enable; 4714} 4715 4716static void cas_set_msglevel(struct net_device *dev, u32 value) 4717{ 4718 struct cas *cp = netdev_priv(dev); 4719 cp->msg_enable = value; 4720} 4721 4722static int cas_get_regs_len(struct net_device *dev) 4723{ 4724 struct cas *cp = netdev_priv(dev); 4725 return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS; 4726} 4727 4728static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs, 4729 void *p) 4730{ 4731 struct cas *cp = netdev_priv(dev); 4732 regs->version = 0; 4733 /* cas_read_regs handles locks (cp->lock). */ 4734 cas_read_regs(cp, p, regs->len / sizeof(u32)); 4735} 4736 4737static int cas_get_sset_count(struct net_device *dev, int sset) 4738{ 4739 switch (sset) { 4740 case ETH_SS_STATS: 4741 return CAS_NUM_STAT_KEYS; 4742 default: 4743 return -EOPNOTSUPP; 4744 } 4745} 4746 4747static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data) 4748{ 4749 memcpy(data, &ethtool_cassini_statnames, 4750 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN); 4751} 4752 4753static void cas_get_ethtool_stats(struct net_device *dev, 4754 struct ethtool_stats *estats, u64 *data) 4755{ 4756 struct cas *cp = netdev_priv(dev); 4757 struct net_device_stats *stats = cas_get_stats(cp->dev); 4758 int i = 0; 4759 data[i++] = stats->collisions; 4760 data[i++] = stats->rx_bytes; 4761 data[i++] = stats->rx_crc_errors; 4762 data[i++] = stats->rx_dropped; 4763 data[i++] = stats->rx_errors; 4764 data[i++] = stats->rx_fifo_errors; 4765 data[i++] = stats->rx_frame_errors; 4766 data[i++] = stats->rx_length_errors; 4767 data[i++] = stats->rx_over_errors; 4768 data[i++] = stats->rx_packets; 4769 data[i++] = stats->tx_aborted_errors; 4770 data[i++] = stats->tx_bytes; 4771 data[i++] = stats->tx_dropped; 4772 data[i++] = stats->tx_errors; 4773 data[i++] = stats->tx_fifo_errors; 4774 data[i++] = stats->tx_packets; 4775 BUG_ON(i != CAS_NUM_STAT_KEYS); 4776} 4777 4778static const struct ethtool_ops cas_ethtool_ops = { 4779 .get_drvinfo = cas_get_drvinfo, 4780 .get_settings = cas_get_settings, 4781 .set_settings = cas_set_settings, 4782 .nway_reset = cas_nway_reset, 4783 .get_link = cas_get_link, 4784 .get_msglevel = cas_get_msglevel, 4785 .set_msglevel = cas_set_msglevel, 4786 .get_regs_len = cas_get_regs_len, 4787 .get_regs = cas_get_regs, 4788 .get_sset_count = cas_get_sset_count, 4789 .get_strings = cas_get_strings, 4790 .get_ethtool_stats = cas_get_ethtool_stats, 4791}; 4792 4793static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 4794{ 4795 struct cas *cp = netdev_priv(dev); 4796 struct mii_ioctl_data *data = if_mii(ifr); 4797 unsigned long flags; 4798 int rc = -EOPNOTSUPP; 4799 4800 /* Hold the PM mutex while doing ioctl's or we may collide 4801 * with open/close and power management and oops. 4802 */ 4803 mutex_lock(&cp->pm_mutex); 4804 switch (cmd) { 4805 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 4806 data->phy_id = cp->phy_addr; 4807 /* Fallthrough... */ 4808 4809 case SIOCGMIIREG: /* Read MII PHY register. */ 4810 spin_lock_irqsave(&cp->lock, flags); 4811 cas_mif_poll(cp, 0); 4812 data->val_out = cas_phy_read(cp, data->reg_num & 0x1f); 4813 cas_mif_poll(cp, 1); 4814 spin_unlock_irqrestore(&cp->lock, flags); 4815 rc = 0; 4816 break; 4817 4818 case SIOCSMIIREG: /* Write MII PHY register. */ 4819 spin_lock_irqsave(&cp->lock, flags); 4820 cas_mif_poll(cp, 0); 4821 rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in); 4822 cas_mif_poll(cp, 1); 4823 spin_unlock_irqrestore(&cp->lock, flags); 4824 break; 4825 default: 4826 break; 4827 } 4828 4829 mutex_unlock(&cp->pm_mutex); 4830 return rc; 4831} 4832 4833/* When this chip sits underneath an Intel 31154 bridge, it is the 4834 * only subordinate device and we can tweak the bridge settings to 4835 * reflect that fact. 4836 */ 4837static void __devinit cas_program_bridge(struct pci_dev *cas_pdev) 4838{ 4839 struct pci_dev *pdev = cas_pdev->bus->self; 4840 u32 val; 4841 4842 if (!pdev) 4843 return; 4844 4845 if (pdev->vendor != 0x8086 || pdev->device != 0x537c) 4846 return; 4847 4848 /* Clear bit 10 (Bus Parking Control) in the Secondary 4849 * Arbiter Control/Status Register which lives at offset 4850 * 0x41. Using a 32-bit word read/modify/write at 0x40 4851 * is much simpler so that's how we do this. 4852 */ 4853 pci_read_config_dword(pdev, 0x40, &val); 4854 val &= ~0x00040000; 4855 pci_write_config_dword(pdev, 0x40, val); 4856 4857 /* Max out the Multi-Transaction Timer settings since 4858 * Cassini is the only device present. 4859 * 4860 * The register is 16-bit and lives at 0x50. When the 4861 * settings are enabled, it extends the GRANT# signal 4862 * for a requestor after a transaction is complete. This 4863 * allows the next request to run without first needing 4864 * to negotiate the GRANT# signal back. 4865 * 4866 * Bits 12:10 define the grant duration: 4867 * 4868 * 1 -- 16 clocks 4869 * 2 -- 32 clocks 4870 * 3 -- 64 clocks 4871 * 4 -- 128 clocks 4872 * 5 -- 256 clocks 4873 * 4874 * All other values are illegal. 4875 * 4876 * Bits 09:00 define which REQ/GNT signal pairs get the 4877 * GRANT# signal treatment. We set them all. 4878 */ 4879 pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff); 4880 4881 /* The Read Prefecth Policy register is 16-bit and sits at 4882 * offset 0x52. It enables a "smart" pre-fetch policy. We 4883 * enable it and max out all of the settings since only one 4884 * device is sitting underneath and thus bandwidth sharing is 4885 * not an issue. 4886 * 4887 * The register has several 3 bit fields, which indicates a 4888 * multiplier applied to the base amount of prefetching the 4889 * chip would do. These fields are at: 4890 * 4891 * 15:13 --- ReRead Primary Bus 4892 * 12:10 --- FirstRead Primary Bus 4893 * 09:07 --- ReRead Secondary Bus 4894 * 06:04 --- FirstRead Secondary Bus 4895 * 4896 * Bits 03:00 control which REQ/GNT pairs the prefetch settings 4897 * get enabled on. Bit 3 is a grouped enabler which controls 4898 * all of the REQ/GNT pairs from [8:3]. Bits 2 to 0 control 4899 * the individual REQ/GNT pairs [2:0]. 4900 */ 4901 pci_write_config_word(pdev, 0x52, 4902 (0x7 << 13) | 4903 (0x7 << 10) | 4904 (0x7 << 7) | 4905 (0x7 << 4) | 4906 (0xf << 0)); 4907 4908 /* Force cacheline size to 0x8 */ 4909 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08); 4910 4911 /* Force latency timer to maximum setting so Cassini can 4912 * sit on the bus as long as it likes. 4913 */ 4914 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff); 4915} 4916 4917static const struct net_device_ops cas_netdev_ops = { 4918 .ndo_open = cas_open, 4919 .ndo_stop = cas_close, 4920 .ndo_start_xmit = cas_start_xmit, 4921 .ndo_get_stats = cas_get_stats, 4922 .ndo_set_multicast_list = cas_set_multicast, 4923 .ndo_do_ioctl = cas_ioctl, 4924 .ndo_tx_timeout = cas_tx_timeout, 4925 .ndo_change_mtu = cas_change_mtu, 4926 .ndo_set_mac_address = eth_mac_addr, 4927 .ndo_validate_addr = eth_validate_addr, 4928#ifdef CONFIG_NET_POLL_CONTROLLER 4929 .ndo_poll_controller = cas_netpoll, 4930#endif 4931}; 4932 4933static int __devinit cas_init_one(struct pci_dev *pdev, 4934 const struct pci_device_id *ent) 4935{ 4936 static int cas_version_printed = 0; 4937 unsigned long casreg_len; 4938 struct net_device *dev; 4939 struct cas *cp; 4940 int i, err, pci_using_dac; 4941 u16 pci_cmd; 4942 u8 orig_cacheline_size = 0, cas_cacheline_size = 0; 4943 4944 if (cas_version_printed++ == 0) 4945 pr_info("%s", version); 4946 4947 err = pci_enable_device(pdev); 4948 if (err) { 4949 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 4950 return err; 4951 } 4952 4953 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 4954 dev_err(&pdev->dev, "Cannot find proper PCI device " 4955 "base address, aborting\n"); 4956 err = -ENODEV; 4957 goto err_out_disable_pdev; 4958 } 4959 4960 dev = alloc_etherdev(sizeof(*cp)); 4961 if (!dev) { 4962 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n"); 4963 err = -ENOMEM; 4964 goto err_out_disable_pdev; 4965 } 4966 SET_NETDEV_DEV(dev, &pdev->dev); 4967 4968 err = pci_request_regions(pdev, dev->name); 4969 if (err) { 4970 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 4971 goto err_out_free_netdev; 4972 } 4973 pci_set_master(pdev); 4974 4975 /* we must always turn on parity response or else parity 4976 * doesn't get generated properly. disable SERR/PERR as well. 4977 * in addition, we want to turn MWI on. 4978 */ 4979 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 4980 pci_cmd &= ~PCI_COMMAND_SERR; 4981 pci_cmd |= PCI_COMMAND_PARITY; 4982 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); 4983 if (pci_try_set_mwi(pdev)) 4984 pr_warning("Could not enable MWI for %s\n", pci_name(pdev)); 4985 4986 cas_program_bridge(pdev); 4987 4988 /* 4989 * On some architectures, the default cache line size set 4990 * by pci_try_set_mwi reduces perforamnce. We have to increase 4991 * it for this case. To start, we'll print some configuration 4992 * data. 4993 */ 4994#if 1 4995 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, 4996 &orig_cacheline_size); 4997 if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) { 4998 cas_cacheline_size = 4999 (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ? 5000 CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES; 5001 if (pci_write_config_byte(pdev, 5002 PCI_CACHE_LINE_SIZE, 5003 cas_cacheline_size)) { 5004 dev_err(&pdev->dev, "Could not set PCI cache " 5005 "line size\n"); 5006 goto err_write_cacheline; 5007 } 5008 } 5009#endif 5010 5011 5012 /* Configure DMA attributes. */ 5013 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 5014 pci_using_dac = 1; 5015 err = pci_set_consistent_dma_mask(pdev, 5016 DMA_BIT_MASK(64)); 5017 if (err < 0) { 5018 dev_err(&pdev->dev, "Unable to obtain 64-bit DMA " 5019 "for consistent allocations\n"); 5020 goto err_out_free_res; 5021 } 5022 5023 } else { 5024 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 5025 if (err) { 5026 dev_err(&pdev->dev, "No usable DMA configuration, " 5027 "aborting\n"); 5028 goto err_out_free_res; 5029 } 5030 pci_using_dac = 0; 5031 } 5032 5033 casreg_len = pci_resource_len(pdev, 0); 5034 5035 cp = netdev_priv(dev); 5036 cp->pdev = pdev; 5037#if 1 5038 /* A value of 0 indicates we never explicitly set it */ 5039 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0; 5040#endif 5041 cp->dev = dev; 5042 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : 5043 cassini_debug; 5044 5045 cp->link_transition = LINK_TRANSITION_UNKNOWN; 5046 cp->link_transition_jiffies_valid = 0; 5047 5048 spin_lock_init(&cp->lock); 5049 spin_lock_init(&cp->rx_inuse_lock); 5050 spin_lock_init(&cp->rx_spare_lock); 5051 for (i = 0; i < N_TX_RINGS; i++) { 5052 spin_lock_init(&cp->stat_lock[i]); 5053 spin_lock_init(&cp->tx_lock[i]); 5054 } 5055 spin_lock_init(&cp->stat_lock[N_TX_RINGS]); 5056 mutex_init(&cp->pm_mutex); 5057 5058 init_timer(&cp->link_timer); 5059 cp->link_timer.function = cas_link_timer; 5060 cp->link_timer.data = (unsigned long) cp; 5061 5062#if 1 5063 /* Just in case the implementation of atomic operations 5064 * change so that an explicit initialization is necessary. 5065 */ 5066 atomic_set(&cp->reset_task_pending, 0); 5067 atomic_set(&cp->reset_task_pending_all, 0); 5068 atomic_set(&cp->reset_task_pending_spare, 0); 5069 atomic_set(&cp->reset_task_pending_mtu, 0); 5070#endif 5071 INIT_WORK(&cp->reset_task, cas_reset_task); 5072 5073 /* Default link parameters */ 5074 if (link_mode >= 0 && link_mode < 6) 5075 cp->link_cntl = link_modes[link_mode]; 5076 else 5077 cp->link_cntl = BMCR_ANENABLE; 5078 cp->lstate = link_down; 5079 cp->link_transition = LINK_TRANSITION_LINK_DOWN; 5080 netif_carrier_off(cp->dev); 5081 cp->timer_ticks = 0; 5082 5083 /* give us access to cassini registers */ 5084 cp->regs = pci_iomap(pdev, 0, casreg_len); 5085 if (!cp->regs) { 5086 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 5087 goto err_out_free_res; 5088 } 5089 cp->casreg_len = casreg_len; 5090 5091 pci_save_state(pdev); 5092 cas_check_pci_invariants(cp); 5093 cas_hard_reset(cp); 5094 cas_reset(cp, 0); 5095 if (cas_check_invariants(cp)) 5096 goto err_out_iounmap; 5097 if (cp->cas_flags & CAS_FLAG_SATURN) 5098 if (cas_saturn_firmware_init(cp)) 5099 goto err_out_iounmap; 5100 5101 cp->init_block = (struct cas_init_block *) 5102 pci_alloc_consistent(pdev, sizeof(struct cas_init_block), 5103 &cp->block_dvma); 5104 if (!cp->init_block) { 5105 dev_err(&pdev->dev, "Cannot allocate init block, aborting\n"); 5106 goto err_out_iounmap; 5107 } 5108 5109 for (i = 0; i < N_TX_RINGS; i++) 5110 cp->init_txds[i] = cp->init_block->txds[i]; 5111 5112 for (i = 0; i < N_RX_DESC_RINGS; i++) 5113 cp->init_rxds[i] = cp->init_block->rxds[i]; 5114 5115 for (i = 0; i < N_RX_COMP_RINGS; i++) 5116 cp->init_rxcs[i] = cp->init_block->rxcs[i]; 5117 5118 for (i = 0; i < N_RX_FLOWS; i++) 5119 skb_queue_head_init(&cp->rx_flows[i]); 5120 5121 dev->netdev_ops = &cas_netdev_ops; 5122 dev->ethtool_ops = &cas_ethtool_ops; 5123 dev->watchdog_timeo = CAS_TX_TIMEOUT; 5124 5125#ifdef USE_NAPI 5126 netif_napi_add(dev, &cp->napi, cas_poll, 64); 5127#endif 5128 dev->irq = pdev->irq; 5129 dev->dma = 0; 5130 5131 /* Cassini features. */ 5132 if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0) 5133 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 5134 5135 if (pci_using_dac) 5136 dev->features |= NETIF_F_HIGHDMA; 5137 5138 if (register_netdev(dev)) { 5139 dev_err(&pdev->dev, "Cannot register net device, aborting\n"); 5140 goto err_out_free_consistent; 5141 } 5142 5143 i = readl(cp->regs + REG_BIM_CFG); 5144 netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n", 5145 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "", 5146 (i & BIM_CFG_32BIT) ? "32" : "64", 5147 (i & BIM_CFG_66MHZ) ? "66" : "33", 5148 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq, 5149 dev->dev_addr); 5150 5151 pci_set_drvdata(pdev, dev); 5152 cp->hw_running = 1; 5153 cas_entropy_reset(cp); 5154 cas_phy_init(cp); 5155 cas_begin_auto_negotiation(cp, NULL); 5156 return 0; 5157 5158err_out_free_consistent: 5159 pci_free_consistent(pdev, sizeof(struct cas_init_block), 5160 cp->init_block, cp->block_dvma); 5161 5162err_out_iounmap: 5163 mutex_lock(&cp->pm_mutex); 5164 if (cp->hw_running) 5165 cas_shutdown(cp); 5166 mutex_unlock(&cp->pm_mutex); 5167 5168 pci_iounmap(pdev, cp->regs); 5169 5170 5171err_out_free_res: 5172 pci_release_regions(pdev); 5173 5174err_write_cacheline: 5175 /* Try to restore it in case the error occured after we 5176 * set it. 5177 */ 5178 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size); 5179 5180err_out_free_netdev: 5181 free_netdev(dev); 5182 5183err_out_disable_pdev: 5184 pci_disable_device(pdev); 5185 pci_set_drvdata(pdev, NULL); 5186 return -ENODEV; 5187} 5188 5189static void __devexit cas_remove_one(struct pci_dev *pdev) 5190{ 5191 struct net_device *dev = pci_get_drvdata(pdev); 5192 struct cas *cp; 5193 if (!dev) 5194 return; 5195 5196 cp = netdev_priv(dev); 5197 unregister_netdev(dev); 5198 5199 if (cp->fw_data) 5200 vfree(cp->fw_data); 5201 5202 mutex_lock(&cp->pm_mutex); 5203 flush_scheduled_work(); 5204 if (cp->hw_running) 5205 cas_shutdown(cp); 5206 mutex_unlock(&cp->pm_mutex); 5207 5208#if 1 5209 if (cp->orig_cacheline_size) { 5210 /* Restore the cache line size if we had modified 5211 * it. 5212 */ 5213 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 5214 cp->orig_cacheline_size); 5215 } 5216#endif 5217 pci_free_consistent(pdev, sizeof(struct cas_init_block), 5218 cp->init_block, cp->block_dvma); 5219 pci_iounmap(pdev, cp->regs); 5220 free_netdev(dev); 5221 pci_release_regions(pdev); 5222 pci_disable_device(pdev); 5223 pci_set_drvdata(pdev, NULL); 5224} 5225 5226#ifdef CONFIG_PM 5227static int cas_suspend(struct pci_dev *pdev, pm_message_t state) 5228{ 5229 struct net_device *dev = pci_get_drvdata(pdev); 5230 struct cas *cp = netdev_priv(dev); 5231 unsigned long flags; 5232 5233 mutex_lock(&cp->pm_mutex); 5234 5235 /* If the driver is opened, we stop the DMA */ 5236 if (cp->opened) { 5237 netif_device_detach(dev); 5238 5239 cas_lock_all_save(cp, flags); 5240 5241 /* We can set the second arg of cas_reset to 0 5242 * because on resume, we'll call cas_init_hw with 5243 * its second arg set so that autonegotiation is 5244 * restarted. 5245 */ 5246 cas_reset(cp, 0); 5247 cas_clean_rings(cp); 5248 cas_unlock_all_restore(cp, flags); 5249 } 5250 5251 if (cp->hw_running) 5252 cas_shutdown(cp); 5253 mutex_unlock(&cp->pm_mutex); 5254 5255 return 0; 5256} 5257 5258static int cas_resume(struct pci_dev *pdev) 5259{ 5260 struct net_device *dev = pci_get_drvdata(pdev); 5261 struct cas *cp = netdev_priv(dev); 5262 5263 netdev_info(dev, "resuming\n"); 5264 5265 mutex_lock(&cp->pm_mutex); 5266 cas_hard_reset(cp); 5267 if (cp->opened) { 5268 unsigned long flags; 5269 cas_lock_all_save(cp, flags); 5270 cas_reset(cp, 0); 5271 cp->hw_running = 1; 5272 cas_clean_rings(cp); 5273 cas_init_hw(cp, 1); 5274 cas_unlock_all_restore(cp, flags); 5275 5276 netif_device_attach(dev); 5277 } 5278 mutex_unlock(&cp->pm_mutex); 5279 return 0; 5280} 5281#endif /* CONFIG_PM */ 5282 5283static struct pci_driver cas_driver = { 5284 .name = DRV_MODULE_NAME, 5285 .id_table = cas_pci_tbl, 5286 .probe = cas_init_one, 5287 .remove = __devexit_p(cas_remove_one), 5288#ifdef CONFIG_PM 5289 .suspend = cas_suspend, 5290 .resume = cas_resume 5291#endif 5292}; 5293 5294static int __init cas_init(void) 5295{ 5296 if (linkdown_timeout > 0) 5297 link_transition_timeout = linkdown_timeout * HZ; 5298 else 5299 link_transition_timeout = 0; 5300 5301 return pci_register_driver(&cas_driver); 5302} 5303 5304static void __exit cas_cleanup(void) 5305{ 5306 pci_unregister_driver(&cas_driver); 5307} 5308 5309module_init(cas_init); 5310module_exit(cas_cleanup);