Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.30-rc3 10323 lines 237 kB view raw
1/* niu.c: Neptune ethernet driver. 2 * 3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net) 4 */ 5 6#include <linux/module.h> 7#include <linux/init.h> 8#include <linux/pci.h> 9#include <linux/dma-mapping.h> 10#include <linux/netdevice.h> 11#include <linux/ethtool.h> 12#include <linux/etherdevice.h> 13#include <linux/platform_device.h> 14#include <linux/delay.h> 15#include <linux/bitops.h> 16#include <linux/mii.h> 17#include <linux/if_ether.h> 18#include <linux/if_vlan.h> 19#include <linux/ip.h> 20#include <linux/in.h> 21#include <linux/ipv6.h> 22#include <linux/log2.h> 23#include <linux/jiffies.h> 24#include <linux/crc32.h> 25 26#include <linux/io.h> 27 28#ifdef CONFIG_SPARC64 29#include <linux/of_device.h> 30#endif 31 32#include "niu.h" 33 34#define DRV_MODULE_NAME "niu" 35#define PFX DRV_MODULE_NAME ": " 36#define DRV_MODULE_VERSION "1.0" 37#define DRV_MODULE_RELDATE "Nov 14, 2008" 38 39static char version[] __devinitdata = 40 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 41 42MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 43MODULE_DESCRIPTION("NIU ethernet driver"); 44MODULE_LICENSE("GPL"); 45MODULE_VERSION(DRV_MODULE_VERSION); 46 47#ifndef DMA_44BIT_MASK 48#define DMA_44BIT_MASK 0x00000fffffffffffULL 49#endif 50 51#ifndef readq 52static u64 readq(void __iomem *reg) 53{ 54 return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32); 55} 56 57static void writeq(u64 val, void __iomem *reg) 58{ 59 writel(val & 0xffffffff, reg); 60 writel(val >> 32, reg + 0x4UL); 61} 62#endif 63 64static struct pci_device_id niu_pci_tbl[] = { 65 {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)}, 66 {} 67}; 68 69MODULE_DEVICE_TABLE(pci, niu_pci_tbl); 70 71#define NIU_TX_TIMEOUT (5 * HZ) 72 73#define nr64(reg) readq(np->regs + (reg)) 74#define nw64(reg, val) writeq((val), np->regs + (reg)) 75 76#define nr64_mac(reg) readq(np->mac_regs + (reg)) 77#define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg)) 78 79#define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg)) 80#define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg)) 81 82#define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg)) 83#define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg)) 84 85#define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg)) 86#define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg)) 87 88#define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 89 90static int niu_debug; 91static int debug = -1; 92module_param(debug, int, 0); 93MODULE_PARM_DESC(debug, "NIU debug level"); 94 95#define niudbg(TYPE, f, a...) \ 96do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \ 97 printk(KERN_DEBUG PFX f, ## a); \ 98} while (0) 99 100#define niuinfo(TYPE, f, a...) \ 101do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \ 102 printk(KERN_INFO PFX f, ## a); \ 103} while (0) 104 105#define niuwarn(TYPE, f, a...) \ 106do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \ 107 printk(KERN_WARNING PFX f, ## a); \ 108} while (0) 109 110#define niu_lock_parent(np, flags) \ 111 spin_lock_irqsave(&np->parent->lock, flags) 112#define niu_unlock_parent(np, flags) \ 113 spin_unlock_irqrestore(&np->parent->lock, flags) 114 115static int serdes_init_10g_serdes(struct niu *np); 116 117static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg, 118 u64 bits, int limit, int delay) 119{ 120 while (--limit >= 0) { 121 u64 val = nr64_mac(reg); 122 123 if (!(val & bits)) 124 break; 125 udelay(delay); 126 } 127 if (limit < 0) 128 return -ENODEV; 129 return 0; 130} 131 132static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg, 133 u64 bits, int limit, int delay, 134 const char *reg_name) 135{ 136 int err; 137 138 nw64_mac(reg, bits); 139 err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay); 140 if (err) 141 dev_err(np->device, PFX "%s: bits (%llx) of register %s " 142 "would not clear, val[%llx]\n", 143 np->dev->name, (unsigned long long) bits, reg_name, 144 (unsigned long long) nr64_mac(reg)); 145 return err; 146} 147 148#define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ 149({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 150 __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ 151}) 152 153static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg, 154 u64 bits, int limit, int delay) 155{ 156 while (--limit >= 0) { 157 u64 val = nr64_ipp(reg); 158 159 if (!(val & bits)) 160 break; 161 udelay(delay); 162 } 163 if (limit < 0) 164 return -ENODEV; 165 return 0; 166} 167 168static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg, 169 u64 bits, int limit, int delay, 170 const char *reg_name) 171{ 172 int err; 173 u64 val; 174 175 val = nr64_ipp(reg); 176 val |= bits; 177 nw64_ipp(reg, val); 178 179 err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay); 180 if (err) 181 dev_err(np->device, PFX "%s: bits (%llx) of register %s " 182 "would not clear, val[%llx]\n", 183 np->dev->name, (unsigned long long) bits, reg_name, 184 (unsigned long long) nr64_ipp(reg)); 185 return err; 186} 187 188#define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ 189({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 190 __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ 191}) 192 193static int __niu_wait_bits_clear(struct niu *np, unsigned long reg, 194 u64 bits, int limit, int delay) 195{ 196 while (--limit >= 0) { 197 u64 val = nr64(reg); 198 199 if (!(val & bits)) 200 break; 201 udelay(delay); 202 } 203 if (limit < 0) 204 return -ENODEV; 205 return 0; 206} 207 208#define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \ 209({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 210 __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \ 211}) 212 213static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg, 214 u64 bits, int limit, int delay, 215 const char *reg_name) 216{ 217 int err; 218 219 nw64(reg, bits); 220 err = __niu_wait_bits_clear(np, reg, bits, limit, delay); 221 if (err) 222 dev_err(np->device, PFX "%s: bits (%llx) of register %s " 223 "would not clear, val[%llx]\n", 224 np->dev->name, (unsigned long long) bits, reg_name, 225 (unsigned long long) nr64(reg)); 226 return err; 227} 228 229#define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ 230({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 231 __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ 232}) 233 234static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on) 235{ 236 u64 val = (u64) lp->timer; 237 238 if (on) 239 val |= LDG_IMGMT_ARM; 240 241 nw64(LDG_IMGMT(lp->ldg_num), val); 242} 243 244static int niu_ldn_irq_enable(struct niu *np, int ldn, int on) 245{ 246 unsigned long mask_reg, bits; 247 u64 val; 248 249 if (ldn < 0 || ldn > LDN_MAX) 250 return -EINVAL; 251 252 if (ldn < 64) { 253 mask_reg = LD_IM0(ldn); 254 bits = LD_IM0_MASK; 255 } else { 256 mask_reg = LD_IM1(ldn - 64); 257 bits = LD_IM1_MASK; 258 } 259 260 val = nr64(mask_reg); 261 if (on) 262 val &= ~bits; 263 else 264 val |= bits; 265 nw64(mask_reg, val); 266 267 return 0; 268} 269 270static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on) 271{ 272 struct niu_parent *parent = np->parent; 273 int i; 274 275 for (i = 0; i <= LDN_MAX; i++) { 276 int err; 277 278 if (parent->ldg_map[i] != lp->ldg_num) 279 continue; 280 281 err = niu_ldn_irq_enable(np, i, on); 282 if (err) 283 return err; 284 } 285 return 0; 286} 287 288static int niu_enable_interrupts(struct niu *np, int on) 289{ 290 int i; 291 292 for (i = 0; i < np->num_ldg; i++) { 293 struct niu_ldg *lp = &np->ldg[i]; 294 int err; 295 296 err = niu_enable_ldn_in_ldg(np, lp, on); 297 if (err) 298 return err; 299 } 300 for (i = 0; i < np->num_ldg; i++) 301 niu_ldg_rearm(np, &np->ldg[i], on); 302 303 return 0; 304} 305 306static u32 phy_encode(u32 type, int port) 307{ 308 return (type << (port * 2)); 309} 310 311static u32 phy_decode(u32 val, int port) 312{ 313 return (val >> (port * 2)) & PORT_TYPE_MASK; 314} 315 316static int mdio_wait(struct niu *np) 317{ 318 int limit = 1000; 319 u64 val; 320 321 while (--limit > 0) { 322 val = nr64(MIF_FRAME_OUTPUT); 323 if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1) 324 return val & MIF_FRAME_OUTPUT_DATA; 325 326 udelay(10); 327 } 328 329 return -ENODEV; 330} 331 332static int mdio_read(struct niu *np, int port, int dev, int reg) 333{ 334 int err; 335 336 nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); 337 err = mdio_wait(np); 338 if (err < 0) 339 return err; 340 341 nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev)); 342 return mdio_wait(np); 343} 344 345static int mdio_write(struct niu *np, int port, int dev, int reg, int data) 346{ 347 int err; 348 349 nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); 350 err = mdio_wait(np); 351 if (err < 0) 352 return err; 353 354 nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data)); 355 err = mdio_wait(np); 356 if (err < 0) 357 return err; 358 359 return 0; 360} 361 362static int mii_read(struct niu *np, int port, int reg) 363{ 364 nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg)); 365 return mdio_wait(np); 366} 367 368static int mii_write(struct niu *np, int port, int reg, int data) 369{ 370 int err; 371 372 nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data)); 373 err = mdio_wait(np); 374 if (err < 0) 375 return err; 376 377 return 0; 378} 379 380static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val) 381{ 382 int err; 383 384 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 385 ESR2_TI_PLL_TX_CFG_L(channel), 386 val & 0xffff); 387 if (!err) 388 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 389 ESR2_TI_PLL_TX_CFG_H(channel), 390 val >> 16); 391 return err; 392} 393 394static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val) 395{ 396 int err; 397 398 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 399 ESR2_TI_PLL_RX_CFG_L(channel), 400 val & 0xffff); 401 if (!err) 402 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 403 ESR2_TI_PLL_RX_CFG_H(channel), 404 val >> 16); 405 return err; 406} 407 408/* Mode is always 10G fiber. */ 409static int serdes_init_niu_10g_fiber(struct niu *np) 410{ 411 struct niu_link_config *lp = &np->link_config; 412 u32 tx_cfg, rx_cfg; 413 unsigned long i; 414 415 tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); 416 rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | 417 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | 418 PLL_RX_CFG_EQ_LP_ADAPTIVE); 419 420 if (lp->loopback_mode == LOOPBACK_PHY) { 421 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; 422 423 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 424 ESR2_TI_PLL_TEST_CFG_L, test_cfg); 425 426 tx_cfg |= PLL_TX_CFG_ENTEST; 427 rx_cfg |= PLL_RX_CFG_ENTEST; 428 } 429 430 /* Initialize all 4 lanes of the SERDES. */ 431 for (i = 0; i < 4; i++) { 432 int err = esr2_set_tx_cfg(np, i, tx_cfg); 433 if (err) 434 return err; 435 } 436 437 for (i = 0; i < 4; i++) { 438 int err = esr2_set_rx_cfg(np, i, rx_cfg); 439 if (err) 440 return err; 441 } 442 443 return 0; 444} 445 446static int serdes_init_niu_1g_serdes(struct niu *np) 447{ 448 struct niu_link_config *lp = &np->link_config; 449 u16 pll_cfg, pll_sts; 450 int max_retry = 100; 451 u64 uninitialized_var(sig), mask, val; 452 u32 tx_cfg, rx_cfg; 453 unsigned long i; 454 int err; 455 456 tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV | 457 PLL_TX_CFG_RATE_HALF); 458 rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | 459 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | 460 PLL_RX_CFG_RATE_HALF); 461 462 if (np->port == 0) 463 rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE; 464 465 if (lp->loopback_mode == LOOPBACK_PHY) { 466 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; 467 468 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 469 ESR2_TI_PLL_TEST_CFG_L, test_cfg); 470 471 tx_cfg |= PLL_TX_CFG_ENTEST; 472 rx_cfg |= PLL_RX_CFG_ENTEST; 473 } 474 475 /* Initialize PLL for 1G */ 476 pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X); 477 478 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 479 ESR2_TI_PLL_CFG_L, pll_cfg); 480 if (err) { 481 dev_err(np->device, PFX "NIU Port %d " 482 "serdes_init_niu_1g_serdes: " 483 "mdio write to ESR2_TI_PLL_CFG_L failed", np->port); 484 return err; 485 } 486 487 pll_sts = PLL_CFG_ENPLL; 488 489 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 490 ESR2_TI_PLL_STS_L, pll_sts); 491 if (err) { 492 dev_err(np->device, PFX "NIU Port %d " 493 "serdes_init_niu_1g_serdes: " 494 "mdio write to ESR2_TI_PLL_STS_L failed", np->port); 495 return err; 496 } 497 498 udelay(200); 499 500 /* Initialize all 4 lanes of the SERDES. */ 501 for (i = 0; i < 4; i++) { 502 err = esr2_set_tx_cfg(np, i, tx_cfg); 503 if (err) 504 return err; 505 } 506 507 for (i = 0; i < 4; i++) { 508 err = esr2_set_rx_cfg(np, i, rx_cfg); 509 if (err) 510 return err; 511 } 512 513 switch (np->port) { 514 case 0: 515 val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); 516 mask = val; 517 break; 518 519 case 1: 520 val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); 521 mask = val; 522 break; 523 524 default: 525 return -EINVAL; 526 } 527 528 while (max_retry--) { 529 sig = nr64(ESR_INT_SIGNALS); 530 if ((sig & mask) == val) 531 break; 532 533 mdelay(500); 534 } 535 536 if ((sig & mask) != val) { 537 dev_err(np->device, PFX "Port %u signal bits [%08x] are not " 538 "[%08x]\n", np->port, (int) (sig & mask), (int) val); 539 return -ENODEV; 540 } 541 542 return 0; 543} 544 545static int serdes_init_niu_10g_serdes(struct niu *np) 546{ 547 struct niu_link_config *lp = &np->link_config; 548 u32 tx_cfg, rx_cfg, pll_cfg, pll_sts; 549 int max_retry = 100; 550 u64 uninitialized_var(sig), mask, val; 551 unsigned long i; 552 int err; 553 554 tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); 555 rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | 556 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | 557 PLL_RX_CFG_EQ_LP_ADAPTIVE); 558 559 if (lp->loopback_mode == LOOPBACK_PHY) { 560 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; 561 562 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 563 ESR2_TI_PLL_TEST_CFG_L, test_cfg); 564 565 tx_cfg |= PLL_TX_CFG_ENTEST; 566 rx_cfg |= PLL_RX_CFG_ENTEST; 567 } 568 569 /* Initialize PLL for 10G */ 570 pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X); 571 572 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 573 ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff); 574 if (err) { 575 dev_err(np->device, PFX "NIU Port %d " 576 "serdes_init_niu_10g_serdes: " 577 "mdio write to ESR2_TI_PLL_CFG_L failed", np->port); 578 return err; 579 } 580 581 pll_sts = PLL_CFG_ENPLL; 582 583 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 584 ESR2_TI_PLL_STS_L, pll_sts & 0xffff); 585 if (err) { 586 dev_err(np->device, PFX "NIU Port %d " 587 "serdes_init_niu_10g_serdes: " 588 "mdio write to ESR2_TI_PLL_STS_L failed", np->port); 589 return err; 590 } 591 592 udelay(200); 593 594 /* Initialize all 4 lanes of the SERDES. */ 595 for (i = 0; i < 4; i++) { 596 err = esr2_set_tx_cfg(np, i, tx_cfg); 597 if (err) 598 return err; 599 } 600 601 for (i = 0; i < 4; i++) { 602 err = esr2_set_rx_cfg(np, i, rx_cfg); 603 if (err) 604 return err; 605 } 606 607 /* check if serdes is ready */ 608 609 switch (np->port) { 610 case 0: 611 mask = ESR_INT_SIGNALS_P0_BITS; 612 val = (ESR_INT_SRDY0_P0 | 613 ESR_INT_DET0_P0 | 614 ESR_INT_XSRDY_P0 | 615 ESR_INT_XDP_P0_CH3 | 616 ESR_INT_XDP_P0_CH2 | 617 ESR_INT_XDP_P0_CH1 | 618 ESR_INT_XDP_P0_CH0); 619 break; 620 621 case 1: 622 mask = ESR_INT_SIGNALS_P1_BITS; 623 val = (ESR_INT_SRDY0_P1 | 624 ESR_INT_DET0_P1 | 625 ESR_INT_XSRDY_P1 | 626 ESR_INT_XDP_P1_CH3 | 627 ESR_INT_XDP_P1_CH2 | 628 ESR_INT_XDP_P1_CH1 | 629 ESR_INT_XDP_P1_CH0); 630 break; 631 632 default: 633 return -EINVAL; 634 } 635 636 while (max_retry--) { 637 sig = nr64(ESR_INT_SIGNALS); 638 if ((sig & mask) == val) 639 break; 640 641 mdelay(500); 642 } 643 644 if ((sig & mask) != val) { 645 pr_info(PFX "NIU Port %u signal bits [%08x] are not " 646 "[%08x] for 10G...trying 1G\n", 647 np->port, (int) (sig & mask), (int) val); 648 649 /* 10G failed, try initializing at 1G */ 650 err = serdes_init_niu_1g_serdes(np); 651 if (!err) { 652 np->flags &= ~NIU_FLAGS_10G; 653 np->mac_xcvr = MAC_XCVR_PCS; 654 } else { 655 dev_err(np->device, PFX "Port %u 10G/1G SERDES " 656 "Link Failed \n", np->port); 657 return -ENODEV; 658 } 659 } 660 return 0; 661} 662 663static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val) 664{ 665 int err; 666 667 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan)); 668 if (err >= 0) { 669 *val = (err & 0xffff); 670 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 671 ESR_RXTX_CTRL_H(chan)); 672 if (err >= 0) 673 *val |= ((err & 0xffff) << 16); 674 err = 0; 675 } 676 return err; 677} 678 679static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val) 680{ 681 int err; 682 683 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 684 ESR_GLUE_CTRL0_L(chan)); 685 if (err >= 0) { 686 *val = (err & 0xffff); 687 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 688 ESR_GLUE_CTRL0_H(chan)); 689 if (err >= 0) { 690 *val |= ((err & 0xffff) << 16); 691 err = 0; 692 } 693 } 694 return err; 695} 696 697static int esr_read_reset(struct niu *np, u32 *val) 698{ 699 int err; 700 701 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 702 ESR_RXTX_RESET_CTRL_L); 703 if (err >= 0) { 704 *val = (err & 0xffff); 705 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 706 ESR_RXTX_RESET_CTRL_H); 707 if (err >= 0) { 708 *val |= ((err & 0xffff) << 16); 709 err = 0; 710 } 711 } 712 return err; 713} 714 715static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val) 716{ 717 int err; 718 719 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 720 ESR_RXTX_CTRL_L(chan), val & 0xffff); 721 if (!err) 722 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 723 ESR_RXTX_CTRL_H(chan), (val >> 16)); 724 return err; 725} 726 727static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val) 728{ 729 int err; 730 731 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 732 ESR_GLUE_CTRL0_L(chan), val & 0xffff); 733 if (!err) 734 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 735 ESR_GLUE_CTRL0_H(chan), (val >> 16)); 736 return err; 737} 738 739static int esr_reset(struct niu *np) 740{ 741 u32 uninitialized_var(reset); 742 int err; 743 744 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 745 ESR_RXTX_RESET_CTRL_L, 0x0000); 746 if (err) 747 return err; 748 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 749 ESR_RXTX_RESET_CTRL_H, 0xffff); 750 if (err) 751 return err; 752 udelay(200); 753 754 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 755 ESR_RXTX_RESET_CTRL_L, 0xffff); 756 if (err) 757 return err; 758 udelay(200); 759 760 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 761 ESR_RXTX_RESET_CTRL_H, 0x0000); 762 if (err) 763 return err; 764 udelay(200); 765 766 err = esr_read_reset(np, &reset); 767 if (err) 768 return err; 769 if (reset != 0) { 770 dev_err(np->device, PFX "Port %u ESR_RESET " 771 "did not clear [%08x]\n", 772 np->port, reset); 773 return -ENODEV; 774 } 775 776 return 0; 777} 778 779static int serdes_init_10g(struct niu *np) 780{ 781 struct niu_link_config *lp = &np->link_config; 782 unsigned long ctrl_reg, test_cfg_reg, i; 783 u64 ctrl_val, test_cfg_val, sig, mask, val; 784 int err; 785 786 switch (np->port) { 787 case 0: 788 ctrl_reg = ENET_SERDES_0_CTRL_CFG; 789 test_cfg_reg = ENET_SERDES_0_TEST_CFG; 790 break; 791 case 1: 792 ctrl_reg = ENET_SERDES_1_CTRL_CFG; 793 test_cfg_reg = ENET_SERDES_1_TEST_CFG; 794 break; 795 796 default: 797 return -EINVAL; 798 } 799 ctrl_val = (ENET_SERDES_CTRL_SDET_0 | 800 ENET_SERDES_CTRL_SDET_1 | 801 ENET_SERDES_CTRL_SDET_2 | 802 ENET_SERDES_CTRL_SDET_3 | 803 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | 804 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | 805 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | 806 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | 807 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | 808 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | 809 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | 810 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); 811 test_cfg_val = 0; 812 813 if (lp->loopback_mode == LOOPBACK_PHY) { 814 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << 815 ENET_SERDES_TEST_MD_0_SHIFT) | 816 (ENET_TEST_MD_PAD_LOOPBACK << 817 ENET_SERDES_TEST_MD_1_SHIFT) | 818 (ENET_TEST_MD_PAD_LOOPBACK << 819 ENET_SERDES_TEST_MD_2_SHIFT) | 820 (ENET_TEST_MD_PAD_LOOPBACK << 821 ENET_SERDES_TEST_MD_3_SHIFT)); 822 } 823 824 nw64(ctrl_reg, ctrl_val); 825 nw64(test_cfg_reg, test_cfg_val); 826 827 /* Initialize all 4 lanes of the SERDES. */ 828 for (i = 0; i < 4; i++) { 829 u32 rxtx_ctrl, glue0; 830 831 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); 832 if (err) 833 return err; 834 err = esr_read_glue0(np, i, &glue0); 835 if (err) 836 return err; 837 838 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); 839 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | 840 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); 841 842 glue0 &= ~(ESR_GLUE_CTRL0_SRATE | 843 ESR_GLUE_CTRL0_THCNT | 844 ESR_GLUE_CTRL0_BLTIME); 845 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | 846 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | 847 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | 848 (BLTIME_300_CYCLES << 849 ESR_GLUE_CTRL0_BLTIME_SHIFT)); 850 851 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); 852 if (err) 853 return err; 854 err = esr_write_glue0(np, i, glue0); 855 if (err) 856 return err; 857 } 858 859 err = esr_reset(np); 860 if (err) 861 return err; 862 863 sig = nr64(ESR_INT_SIGNALS); 864 switch (np->port) { 865 case 0: 866 mask = ESR_INT_SIGNALS_P0_BITS; 867 val = (ESR_INT_SRDY0_P0 | 868 ESR_INT_DET0_P0 | 869 ESR_INT_XSRDY_P0 | 870 ESR_INT_XDP_P0_CH3 | 871 ESR_INT_XDP_P0_CH2 | 872 ESR_INT_XDP_P0_CH1 | 873 ESR_INT_XDP_P0_CH0); 874 break; 875 876 case 1: 877 mask = ESR_INT_SIGNALS_P1_BITS; 878 val = (ESR_INT_SRDY0_P1 | 879 ESR_INT_DET0_P1 | 880 ESR_INT_XSRDY_P1 | 881 ESR_INT_XDP_P1_CH3 | 882 ESR_INT_XDP_P1_CH2 | 883 ESR_INT_XDP_P1_CH1 | 884 ESR_INT_XDP_P1_CH0); 885 break; 886 887 default: 888 return -EINVAL; 889 } 890 891 if ((sig & mask) != val) { 892 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { 893 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; 894 return 0; 895 } 896 dev_err(np->device, PFX "Port %u signal bits [%08x] are not " 897 "[%08x]\n", np->port, (int) (sig & mask), (int) val); 898 return -ENODEV; 899 } 900 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) 901 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; 902 return 0; 903} 904 905static int serdes_init_1g(struct niu *np) 906{ 907 u64 val; 908 909 val = nr64(ENET_SERDES_1_PLL_CFG); 910 val &= ~ENET_SERDES_PLL_FBDIV2; 911 switch (np->port) { 912 case 0: 913 val |= ENET_SERDES_PLL_HRATE0; 914 break; 915 case 1: 916 val |= ENET_SERDES_PLL_HRATE1; 917 break; 918 case 2: 919 val |= ENET_SERDES_PLL_HRATE2; 920 break; 921 case 3: 922 val |= ENET_SERDES_PLL_HRATE3; 923 break; 924 default: 925 return -EINVAL; 926 } 927 nw64(ENET_SERDES_1_PLL_CFG, val); 928 929 return 0; 930} 931 932static int serdes_init_1g_serdes(struct niu *np) 933{ 934 struct niu_link_config *lp = &np->link_config; 935 unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; 936 u64 ctrl_val, test_cfg_val, sig, mask, val; 937 int err; 938 u64 reset_val, val_rd; 939 940 val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 | 941 ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 | 942 ENET_SERDES_PLL_FBDIV0; 943 switch (np->port) { 944 case 0: 945 reset_val = ENET_SERDES_RESET_0; 946 ctrl_reg = ENET_SERDES_0_CTRL_CFG; 947 test_cfg_reg = ENET_SERDES_0_TEST_CFG; 948 pll_cfg = ENET_SERDES_0_PLL_CFG; 949 break; 950 case 1: 951 reset_val = ENET_SERDES_RESET_1; 952 ctrl_reg = ENET_SERDES_1_CTRL_CFG; 953 test_cfg_reg = ENET_SERDES_1_TEST_CFG; 954 pll_cfg = ENET_SERDES_1_PLL_CFG; 955 break; 956 957 default: 958 return -EINVAL; 959 } 960 ctrl_val = (ENET_SERDES_CTRL_SDET_0 | 961 ENET_SERDES_CTRL_SDET_1 | 962 ENET_SERDES_CTRL_SDET_2 | 963 ENET_SERDES_CTRL_SDET_3 | 964 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | 965 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | 966 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | 967 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | 968 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | 969 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | 970 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | 971 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); 972 test_cfg_val = 0; 973 974 if (lp->loopback_mode == LOOPBACK_PHY) { 975 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << 976 ENET_SERDES_TEST_MD_0_SHIFT) | 977 (ENET_TEST_MD_PAD_LOOPBACK << 978 ENET_SERDES_TEST_MD_1_SHIFT) | 979 (ENET_TEST_MD_PAD_LOOPBACK << 980 ENET_SERDES_TEST_MD_2_SHIFT) | 981 (ENET_TEST_MD_PAD_LOOPBACK << 982 ENET_SERDES_TEST_MD_3_SHIFT)); 983 } 984 985 nw64(ENET_SERDES_RESET, reset_val); 986 mdelay(20); 987 val_rd = nr64(ENET_SERDES_RESET); 988 val_rd &= ~reset_val; 989 nw64(pll_cfg, val); 990 nw64(ctrl_reg, ctrl_val); 991 nw64(test_cfg_reg, test_cfg_val); 992 nw64(ENET_SERDES_RESET, val_rd); 993 mdelay(2000); 994 995 /* Initialize all 4 lanes of the SERDES. */ 996 for (i = 0; i < 4; i++) { 997 u32 rxtx_ctrl, glue0; 998 999 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); 1000 if (err) 1001 return err; 1002 err = esr_read_glue0(np, i, &glue0); 1003 if (err) 1004 return err; 1005 1006 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); 1007 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | 1008 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); 1009 1010 glue0 &= ~(ESR_GLUE_CTRL0_SRATE | 1011 ESR_GLUE_CTRL0_THCNT | 1012 ESR_GLUE_CTRL0_BLTIME); 1013 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | 1014 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | 1015 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | 1016 (BLTIME_300_CYCLES << 1017 ESR_GLUE_CTRL0_BLTIME_SHIFT)); 1018 1019 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); 1020 if (err) 1021 return err; 1022 err = esr_write_glue0(np, i, glue0); 1023 if (err) 1024 return err; 1025 } 1026 1027 1028 sig = nr64(ESR_INT_SIGNALS); 1029 switch (np->port) { 1030 case 0: 1031 val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); 1032 mask = val; 1033 break; 1034 1035 case 1: 1036 val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); 1037 mask = val; 1038 break; 1039 1040 default: 1041 return -EINVAL; 1042 } 1043 1044 if ((sig & mask) != val) { 1045 dev_err(np->device, PFX "Port %u signal bits [%08x] are not " 1046 "[%08x]\n", np->port, (int) (sig & mask), (int) val); 1047 return -ENODEV; 1048 } 1049 1050 return 0; 1051} 1052 1053static int link_status_1g_serdes(struct niu *np, int *link_up_p) 1054{ 1055 struct niu_link_config *lp = &np->link_config; 1056 int link_up; 1057 u64 val; 1058 u16 current_speed; 1059 unsigned long flags; 1060 u8 current_duplex; 1061 1062 link_up = 0; 1063 current_speed = SPEED_INVALID; 1064 current_duplex = DUPLEX_INVALID; 1065 1066 spin_lock_irqsave(&np->lock, flags); 1067 1068 val = nr64_pcs(PCS_MII_STAT); 1069 1070 if (val & PCS_MII_STAT_LINK_STATUS) { 1071 link_up = 1; 1072 current_speed = SPEED_1000; 1073 current_duplex = DUPLEX_FULL; 1074 } 1075 1076 lp->active_speed = current_speed; 1077 lp->active_duplex = current_duplex; 1078 spin_unlock_irqrestore(&np->lock, flags); 1079 1080 *link_up_p = link_up; 1081 return 0; 1082} 1083 1084static int link_status_10g_serdes(struct niu *np, int *link_up_p) 1085{ 1086 unsigned long flags; 1087 struct niu_link_config *lp = &np->link_config; 1088 int link_up = 0; 1089 int link_ok = 1; 1090 u64 val, val2; 1091 u16 current_speed; 1092 u8 current_duplex; 1093 1094 if (!(np->flags & NIU_FLAGS_10G)) 1095 return link_status_1g_serdes(np, link_up_p); 1096 1097 current_speed = SPEED_INVALID; 1098 current_duplex = DUPLEX_INVALID; 1099 spin_lock_irqsave(&np->lock, flags); 1100 1101 val = nr64_xpcs(XPCS_STATUS(0)); 1102 val2 = nr64_mac(XMAC_INTER2); 1103 if (val2 & 0x01000000) 1104 link_ok = 0; 1105 1106 if ((val & 0x1000ULL) && link_ok) { 1107 link_up = 1; 1108 current_speed = SPEED_10000; 1109 current_duplex = DUPLEX_FULL; 1110 } 1111 lp->active_speed = current_speed; 1112 lp->active_duplex = current_duplex; 1113 spin_unlock_irqrestore(&np->lock, flags); 1114 *link_up_p = link_up; 1115 return 0; 1116} 1117 1118static int link_status_mii(struct niu *np, int *link_up_p) 1119{ 1120 struct niu_link_config *lp = &np->link_config; 1121 int err; 1122 int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus; 1123 int supported, advertising, active_speed, active_duplex; 1124 1125 err = mii_read(np, np->phy_addr, MII_BMCR); 1126 if (unlikely(err < 0)) 1127 return err; 1128 bmcr = err; 1129 1130 err = mii_read(np, np->phy_addr, MII_BMSR); 1131 if (unlikely(err < 0)) 1132 return err; 1133 bmsr = err; 1134 1135 err = mii_read(np, np->phy_addr, MII_ADVERTISE); 1136 if (unlikely(err < 0)) 1137 return err; 1138 advert = err; 1139 1140 err = mii_read(np, np->phy_addr, MII_LPA); 1141 if (unlikely(err < 0)) 1142 return err; 1143 lpa = err; 1144 1145 if (likely(bmsr & BMSR_ESTATEN)) { 1146 err = mii_read(np, np->phy_addr, MII_ESTATUS); 1147 if (unlikely(err < 0)) 1148 return err; 1149 estatus = err; 1150 1151 err = mii_read(np, np->phy_addr, MII_CTRL1000); 1152 if (unlikely(err < 0)) 1153 return err; 1154 ctrl1000 = err; 1155 1156 err = mii_read(np, np->phy_addr, MII_STAT1000); 1157 if (unlikely(err < 0)) 1158 return err; 1159 stat1000 = err; 1160 } else 1161 estatus = ctrl1000 = stat1000 = 0; 1162 1163 supported = 0; 1164 if (bmsr & BMSR_ANEGCAPABLE) 1165 supported |= SUPPORTED_Autoneg; 1166 if (bmsr & BMSR_10HALF) 1167 supported |= SUPPORTED_10baseT_Half; 1168 if (bmsr & BMSR_10FULL) 1169 supported |= SUPPORTED_10baseT_Full; 1170 if (bmsr & BMSR_100HALF) 1171 supported |= SUPPORTED_100baseT_Half; 1172 if (bmsr & BMSR_100FULL) 1173 supported |= SUPPORTED_100baseT_Full; 1174 if (estatus & ESTATUS_1000_THALF) 1175 supported |= SUPPORTED_1000baseT_Half; 1176 if (estatus & ESTATUS_1000_TFULL) 1177 supported |= SUPPORTED_1000baseT_Full; 1178 lp->supported = supported; 1179 1180 advertising = 0; 1181 if (advert & ADVERTISE_10HALF) 1182 advertising |= ADVERTISED_10baseT_Half; 1183 if (advert & ADVERTISE_10FULL) 1184 advertising |= ADVERTISED_10baseT_Full; 1185 if (advert & ADVERTISE_100HALF) 1186 advertising |= ADVERTISED_100baseT_Half; 1187 if (advert & ADVERTISE_100FULL) 1188 advertising |= ADVERTISED_100baseT_Full; 1189 if (ctrl1000 & ADVERTISE_1000HALF) 1190 advertising |= ADVERTISED_1000baseT_Half; 1191 if (ctrl1000 & ADVERTISE_1000FULL) 1192 advertising |= ADVERTISED_1000baseT_Full; 1193 1194 if (bmcr & BMCR_ANENABLE) { 1195 int neg, neg1000; 1196 1197 lp->active_autoneg = 1; 1198 advertising |= ADVERTISED_Autoneg; 1199 1200 neg = advert & lpa; 1201 neg1000 = (ctrl1000 << 2) & stat1000; 1202 1203 if (neg1000 & (LPA_1000FULL | LPA_1000HALF)) 1204 active_speed = SPEED_1000; 1205 else if (neg & LPA_100) 1206 active_speed = SPEED_100; 1207 else if (neg & (LPA_10HALF | LPA_10FULL)) 1208 active_speed = SPEED_10; 1209 else 1210 active_speed = SPEED_INVALID; 1211 1212 if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX)) 1213 active_duplex = DUPLEX_FULL; 1214 else if (active_speed != SPEED_INVALID) 1215 active_duplex = DUPLEX_HALF; 1216 else 1217 active_duplex = DUPLEX_INVALID; 1218 } else { 1219 lp->active_autoneg = 0; 1220 1221 if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100)) 1222 active_speed = SPEED_1000; 1223 else if (bmcr & BMCR_SPEED100) 1224 active_speed = SPEED_100; 1225 else 1226 active_speed = SPEED_10; 1227 1228 if (bmcr & BMCR_FULLDPLX) 1229 active_duplex = DUPLEX_FULL; 1230 else 1231 active_duplex = DUPLEX_HALF; 1232 } 1233 1234 lp->active_advertising = advertising; 1235 lp->active_speed = active_speed; 1236 lp->active_duplex = active_duplex; 1237 *link_up_p = !!(bmsr & BMSR_LSTATUS); 1238 1239 return 0; 1240} 1241 1242static int link_status_1g_rgmii(struct niu *np, int *link_up_p) 1243{ 1244 struct niu_link_config *lp = &np->link_config; 1245 u16 current_speed, bmsr; 1246 unsigned long flags; 1247 u8 current_duplex; 1248 int err, link_up; 1249 1250 link_up = 0; 1251 current_speed = SPEED_INVALID; 1252 current_duplex = DUPLEX_INVALID; 1253 1254 spin_lock_irqsave(&np->lock, flags); 1255 1256 err = -EINVAL; 1257 1258 err = mii_read(np, np->phy_addr, MII_BMSR); 1259 if (err < 0) 1260 goto out; 1261 1262 bmsr = err; 1263 if (bmsr & BMSR_LSTATUS) { 1264 u16 adv, lpa, common, estat; 1265 1266 err = mii_read(np, np->phy_addr, MII_ADVERTISE); 1267 if (err < 0) 1268 goto out; 1269 adv = err; 1270 1271 err = mii_read(np, np->phy_addr, MII_LPA); 1272 if (err < 0) 1273 goto out; 1274 lpa = err; 1275 1276 common = adv & lpa; 1277 1278 err = mii_read(np, np->phy_addr, MII_ESTATUS); 1279 if (err < 0) 1280 goto out; 1281 estat = err; 1282 link_up = 1; 1283 current_speed = SPEED_1000; 1284 current_duplex = DUPLEX_FULL; 1285 1286 } 1287 lp->active_speed = current_speed; 1288 lp->active_duplex = current_duplex; 1289 err = 0; 1290 1291out: 1292 spin_unlock_irqrestore(&np->lock, flags); 1293 1294 *link_up_p = link_up; 1295 return err; 1296} 1297 1298static int link_status_1g(struct niu *np, int *link_up_p) 1299{ 1300 struct niu_link_config *lp = &np->link_config; 1301 unsigned long flags; 1302 int err; 1303 1304 spin_lock_irqsave(&np->lock, flags); 1305 1306 err = link_status_mii(np, link_up_p); 1307 lp->supported |= SUPPORTED_TP; 1308 lp->active_advertising |= ADVERTISED_TP; 1309 1310 spin_unlock_irqrestore(&np->lock, flags); 1311 return err; 1312} 1313 1314static int bcm8704_reset(struct niu *np) 1315{ 1316 int err, limit; 1317 1318 err = mdio_read(np, np->phy_addr, 1319 BCM8704_PHYXS_DEV_ADDR, MII_BMCR); 1320 if (err < 0) 1321 return err; 1322 err |= BMCR_RESET; 1323 err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 1324 MII_BMCR, err); 1325 if (err) 1326 return err; 1327 1328 limit = 1000; 1329 while (--limit >= 0) { 1330 err = mdio_read(np, np->phy_addr, 1331 BCM8704_PHYXS_DEV_ADDR, MII_BMCR); 1332 if (err < 0) 1333 return err; 1334 if (!(err & BMCR_RESET)) 1335 break; 1336 } 1337 if (limit < 0) { 1338 dev_err(np->device, PFX "Port %u PHY will not reset " 1339 "(bmcr=%04x)\n", np->port, (err & 0xffff)); 1340 return -ENODEV; 1341 } 1342 return 0; 1343} 1344 1345/* When written, certain PHY registers need to be read back twice 1346 * in order for the bits to settle properly. 1347 */ 1348static int bcm8704_user_dev3_readback(struct niu *np, int reg) 1349{ 1350 int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); 1351 if (err < 0) 1352 return err; 1353 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); 1354 if (err < 0) 1355 return err; 1356 return 0; 1357} 1358 1359static int bcm8706_init_user_dev3(struct niu *np) 1360{ 1361 int err; 1362 1363 1364 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1365 BCM8704_USER_OPT_DIGITAL_CTRL); 1366 if (err < 0) 1367 return err; 1368 err &= ~USER_ODIG_CTRL_GPIOS; 1369 err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); 1370 err |= USER_ODIG_CTRL_RESV2; 1371 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1372 BCM8704_USER_OPT_DIGITAL_CTRL, err); 1373 if (err) 1374 return err; 1375 1376 mdelay(1000); 1377 1378 return 0; 1379} 1380 1381static int bcm8704_init_user_dev3(struct niu *np) 1382{ 1383 int err; 1384 1385 err = mdio_write(np, np->phy_addr, 1386 BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL, 1387 (USER_CONTROL_OPTXRST_LVL | 1388 USER_CONTROL_OPBIASFLT_LVL | 1389 USER_CONTROL_OBTMPFLT_LVL | 1390 USER_CONTROL_OPPRFLT_LVL | 1391 USER_CONTROL_OPTXFLT_LVL | 1392 USER_CONTROL_OPRXLOS_LVL | 1393 USER_CONTROL_OPRXFLT_LVL | 1394 USER_CONTROL_OPTXON_LVL | 1395 (0x3f << USER_CONTROL_RES1_SHIFT))); 1396 if (err) 1397 return err; 1398 1399 err = mdio_write(np, np->phy_addr, 1400 BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL, 1401 (USER_PMD_TX_CTL_XFP_CLKEN | 1402 (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) | 1403 (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) | 1404 USER_PMD_TX_CTL_TSCK_LPWREN)); 1405 if (err) 1406 return err; 1407 1408 err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL); 1409 if (err) 1410 return err; 1411 err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL); 1412 if (err) 1413 return err; 1414 1415 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1416 BCM8704_USER_OPT_DIGITAL_CTRL); 1417 if (err < 0) 1418 return err; 1419 err &= ~USER_ODIG_CTRL_GPIOS; 1420 err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); 1421 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1422 BCM8704_USER_OPT_DIGITAL_CTRL, err); 1423 if (err) 1424 return err; 1425 1426 mdelay(1000); 1427 1428 return 0; 1429} 1430 1431static int mrvl88x2011_act_led(struct niu *np, int val) 1432{ 1433 int err; 1434 1435 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 1436 MRVL88X2011_LED_8_TO_11_CTL); 1437 if (err < 0) 1438 return err; 1439 1440 err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK); 1441 err |= MRVL88X2011_LED(MRVL88X2011_LED_ACT,val); 1442 1443 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 1444 MRVL88X2011_LED_8_TO_11_CTL, err); 1445} 1446 1447static int mrvl88x2011_led_blink_rate(struct niu *np, int rate) 1448{ 1449 int err; 1450 1451 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 1452 MRVL88X2011_LED_BLINK_CTL); 1453 if (err >= 0) { 1454 err &= ~MRVL88X2011_LED_BLKRATE_MASK; 1455 err |= (rate << 4); 1456 1457 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 1458 MRVL88X2011_LED_BLINK_CTL, err); 1459 } 1460 1461 return err; 1462} 1463 1464static int xcvr_init_10g_mrvl88x2011(struct niu *np) 1465{ 1466 int err; 1467 1468 /* Set LED functions */ 1469 err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS); 1470 if (err) 1471 return err; 1472 1473 /* led activity */ 1474 err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF); 1475 if (err) 1476 return err; 1477 1478 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 1479 MRVL88X2011_GENERAL_CTL); 1480 if (err < 0) 1481 return err; 1482 1483 err |= MRVL88X2011_ENA_XFPREFCLK; 1484 1485 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 1486 MRVL88X2011_GENERAL_CTL, err); 1487 if (err < 0) 1488 return err; 1489 1490 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1491 MRVL88X2011_PMA_PMD_CTL_1); 1492 if (err < 0) 1493 return err; 1494 1495 if (np->link_config.loopback_mode == LOOPBACK_MAC) 1496 err |= MRVL88X2011_LOOPBACK; 1497 else 1498 err &= ~MRVL88X2011_LOOPBACK; 1499 1500 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1501 MRVL88X2011_PMA_PMD_CTL_1, err); 1502 if (err < 0) 1503 return err; 1504 1505 /* Enable PMD */ 1506 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1507 MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX); 1508} 1509 1510 1511static int xcvr_diag_bcm870x(struct niu *np) 1512{ 1513 u16 analog_stat0, tx_alarm_status; 1514 int err = 0; 1515 1516#if 1 1517 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, 1518 MII_STAT1000); 1519 if (err < 0) 1520 return err; 1521 pr_info(PFX "Port %u PMA_PMD(MII_STAT1000) [%04x]\n", 1522 np->port, err); 1523 1524 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20); 1525 if (err < 0) 1526 return err; 1527 pr_info(PFX "Port %u USER_DEV3(0x20) [%04x]\n", 1528 np->port, err); 1529 1530 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 1531 MII_NWAYTEST); 1532 if (err < 0) 1533 return err; 1534 pr_info(PFX "Port %u PHYXS(MII_NWAYTEST) [%04x]\n", 1535 np->port, err); 1536#endif 1537 1538 /* XXX dig this out it might not be so useful XXX */ 1539 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1540 BCM8704_USER_ANALOG_STATUS0); 1541 if (err < 0) 1542 return err; 1543 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1544 BCM8704_USER_ANALOG_STATUS0); 1545 if (err < 0) 1546 return err; 1547 analog_stat0 = err; 1548 1549 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1550 BCM8704_USER_TX_ALARM_STATUS); 1551 if (err < 0) 1552 return err; 1553 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1554 BCM8704_USER_TX_ALARM_STATUS); 1555 if (err < 0) 1556 return err; 1557 tx_alarm_status = err; 1558 1559 if (analog_stat0 != 0x03fc) { 1560 if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) { 1561 pr_info(PFX "Port %u cable not connected " 1562 "or bad cable.\n", np->port); 1563 } else if (analog_stat0 == 0x639c) { 1564 pr_info(PFX "Port %u optical module is bad " 1565 "or missing.\n", np->port); 1566 } 1567 } 1568 1569 return 0; 1570} 1571 1572static int xcvr_10g_set_lb_bcm870x(struct niu *np) 1573{ 1574 struct niu_link_config *lp = &np->link_config; 1575 int err; 1576 1577 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 1578 MII_BMCR); 1579 if (err < 0) 1580 return err; 1581 1582 err &= ~BMCR_LOOPBACK; 1583 1584 if (lp->loopback_mode == LOOPBACK_MAC) 1585 err |= BMCR_LOOPBACK; 1586 1587 err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 1588 MII_BMCR, err); 1589 if (err) 1590 return err; 1591 1592 return 0; 1593} 1594 1595static int xcvr_init_10g_bcm8706(struct niu *np) 1596{ 1597 int err = 0; 1598 u64 val; 1599 1600 if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) && 1601 (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0) 1602 return err; 1603 1604 val = nr64_mac(XMAC_CONFIG); 1605 val &= ~XMAC_CONFIG_LED_POLARITY; 1606 val |= XMAC_CONFIG_FORCE_LED_ON; 1607 nw64_mac(XMAC_CONFIG, val); 1608 1609 val = nr64(MIF_CONFIG); 1610 val |= MIF_CONFIG_INDIRECT_MODE; 1611 nw64(MIF_CONFIG, val); 1612 1613 err = bcm8704_reset(np); 1614 if (err) 1615 return err; 1616 1617 err = xcvr_10g_set_lb_bcm870x(np); 1618 if (err) 1619 return err; 1620 1621 err = bcm8706_init_user_dev3(np); 1622 if (err) 1623 return err; 1624 1625 err = xcvr_diag_bcm870x(np); 1626 if (err) 1627 return err; 1628 1629 return 0; 1630} 1631 1632static int xcvr_init_10g_bcm8704(struct niu *np) 1633{ 1634 int err; 1635 1636 err = bcm8704_reset(np); 1637 if (err) 1638 return err; 1639 1640 err = bcm8704_init_user_dev3(np); 1641 if (err) 1642 return err; 1643 1644 err = xcvr_10g_set_lb_bcm870x(np); 1645 if (err) 1646 return err; 1647 1648 err = xcvr_diag_bcm870x(np); 1649 if (err) 1650 return err; 1651 1652 return 0; 1653} 1654 1655static int xcvr_init_10g(struct niu *np) 1656{ 1657 int phy_id, err; 1658 u64 val; 1659 1660 val = nr64_mac(XMAC_CONFIG); 1661 val &= ~XMAC_CONFIG_LED_POLARITY; 1662 val |= XMAC_CONFIG_FORCE_LED_ON; 1663 nw64_mac(XMAC_CONFIG, val); 1664 1665 /* XXX shared resource, lock parent XXX */ 1666 val = nr64(MIF_CONFIG); 1667 val |= MIF_CONFIG_INDIRECT_MODE; 1668 nw64(MIF_CONFIG, val); 1669 1670 phy_id = phy_decode(np->parent->port_phy, np->port); 1671 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; 1672 1673 /* handle different phy types */ 1674 switch (phy_id & NIU_PHY_ID_MASK) { 1675 case NIU_PHY_ID_MRVL88X2011: 1676 err = xcvr_init_10g_mrvl88x2011(np); 1677 break; 1678 1679 default: /* bcom 8704 */ 1680 err = xcvr_init_10g_bcm8704(np); 1681 break; 1682 } 1683 1684 return 0; 1685} 1686 1687static int mii_reset(struct niu *np) 1688{ 1689 int limit, err; 1690 1691 err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET); 1692 if (err) 1693 return err; 1694 1695 limit = 1000; 1696 while (--limit >= 0) { 1697 udelay(500); 1698 err = mii_read(np, np->phy_addr, MII_BMCR); 1699 if (err < 0) 1700 return err; 1701 if (!(err & BMCR_RESET)) 1702 break; 1703 } 1704 if (limit < 0) { 1705 dev_err(np->device, PFX "Port %u MII would not reset, " 1706 "bmcr[%04x]\n", np->port, err); 1707 return -ENODEV; 1708 } 1709 1710 return 0; 1711} 1712 1713static int xcvr_init_1g_rgmii(struct niu *np) 1714{ 1715 int err; 1716 u64 val; 1717 u16 bmcr, bmsr, estat; 1718 1719 val = nr64(MIF_CONFIG); 1720 val &= ~MIF_CONFIG_INDIRECT_MODE; 1721 nw64(MIF_CONFIG, val); 1722 1723 err = mii_reset(np); 1724 if (err) 1725 return err; 1726 1727 err = mii_read(np, np->phy_addr, MII_BMSR); 1728 if (err < 0) 1729 return err; 1730 bmsr = err; 1731 1732 estat = 0; 1733 if (bmsr & BMSR_ESTATEN) { 1734 err = mii_read(np, np->phy_addr, MII_ESTATUS); 1735 if (err < 0) 1736 return err; 1737 estat = err; 1738 } 1739 1740 bmcr = 0; 1741 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); 1742 if (err) 1743 return err; 1744 1745 if (bmsr & BMSR_ESTATEN) { 1746 u16 ctrl1000 = 0; 1747 1748 if (estat & ESTATUS_1000_TFULL) 1749 ctrl1000 |= ADVERTISE_1000FULL; 1750 err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000); 1751 if (err) 1752 return err; 1753 } 1754 1755 bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX); 1756 1757 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); 1758 if (err) 1759 return err; 1760 1761 err = mii_read(np, np->phy_addr, MII_BMCR); 1762 if (err < 0) 1763 return err; 1764 bmcr = mii_read(np, np->phy_addr, MII_BMCR); 1765 1766 err = mii_read(np, np->phy_addr, MII_BMSR); 1767 if (err < 0) 1768 return err; 1769 1770 return 0; 1771} 1772 1773static int mii_init_common(struct niu *np) 1774{ 1775 struct niu_link_config *lp = &np->link_config; 1776 u16 bmcr, bmsr, adv, estat; 1777 int err; 1778 1779 err = mii_reset(np); 1780 if (err) 1781 return err; 1782 1783 err = mii_read(np, np->phy_addr, MII_BMSR); 1784 if (err < 0) 1785 return err; 1786 bmsr = err; 1787 1788 estat = 0; 1789 if (bmsr & BMSR_ESTATEN) { 1790 err = mii_read(np, np->phy_addr, MII_ESTATUS); 1791 if (err < 0) 1792 return err; 1793 estat = err; 1794 } 1795 1796 bmcr = 0; 1797 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); 1798 if (err) 1799 return err; 1800 1801 if (lp->loopback_mode == LOOPBACK_MAC) { 1802 bmcr |= BMCR_LOOPBACK; 1803 if (lp->active_speed == SPEED_1000) 1804 bmcr |= BMCR_SPEED1000; 1805 if (lp->active_duplex == DUPLEX_FULL) 1806 bmcr |= BMCR_FULLDPLX; 1807 } 1808 1809 if (lp->loopback_mode == LOOPBACK_PHY) { 1810 u16 aux; 1811 1812 aux = (BCM5464R_AUX_CTL_EXT_LB | 1813 BCM5464R_AUX_CTL_WRITE_1); 1814 err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux); 1815 if (err) 1816 return err; 1817 } 1818 1819 if (lp->autoneg) { 1820 u16 ctrl1000; 1821 1822 adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP; 1823 if ((bmsr & BMSR_10HALF) && 1824 (lp->advertising & ADVERTISED_10baseT_Half)) 1825 adv |= ADVERTISE_10HALF; 1826 if ((bmsr & BMSR_10FULL) && 1827 (lp->advertising & ADVERTISED_10baseT_Full)) 1828 adv |= ADVERTISE_10FULL; 1829 if ((bmsr & BMSR_100HALF) && 1830 (lp->advertising & ADVERTISED_100baseT_Half)) 1831 adv |= ADVERTISE_100HALF; 1832 if ((bmsr & BMSR_100FULL) && 1833 (lp->advertising & ADVERTISED_100baseT_Full)) 1834 adv |= ADVERTISE_100FULL; 1835 err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv); 1836 if (err) 1837 return err; 1838 1839 if (likely(bmsr & BMSR_ESTATEN)) { 1840 ctrl1000 = 0; 1841 if ((estat & ESTATUS_1000_THALF) && 1842 (lp->advertising & ADVERTISED_1000baseT_Half)) 1843 ctrl1000 |= ADVERTISE_1000HALF; 1844 if ((estat & ESTATUS_1000_TFULL) && 1845 (lp->advertising & ADVERTISED_1000baseT_Full)) 1846 ctrl1000 |= ADVERTISE_1000FULL; 1847 err = mii_write(np, np->phy_addr, 1848 MII_CTRL1000, ctrl1000); 1849 if (err) 1850 return err; 1851 } 1852 1853 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 1854 } else { 1855 /* !lp->autoneg */ 1856 int fulldpx; 1857 1858 if (lp->duplex == DUPLEX_FULL) { 1859 bmcr |= BMCR_FULLDPLX; 1860 fulldpx = 1; 1861 } else if (lp->duplex == DUPLEX_HALF) 1862 fulldpx = 0; 1863 else 1864 return -EINVAL; 1865 1866 if (lp->speed == SPEED_1000) { 1867 /* if X-full requested while not supported, or 1868 X-half requested while not supported... */ 1869 if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) || 1870 (!fulldpx && !(estat & ESTATUS_1000_THALF))) 1871 return -EINVAL; 1872 bmcr |= BMCR_SPEED1000; 1873 } else if (lp->speed == SPEED_100) { 1874 if ((fulldpx && !(bmsr & BMSR_100FULL)) || 1875 (!fulldpx && !(bmsr & BMSR_100HALF))) 1876 return -EINVAL; 1877 bmcr |= BMCR_SPEED100; 1878 } else if (lp->speed == SPEED_10) { 1879 if ((fulldpx && !(bmsr & BMSR_10FULL)) || 1880 (!fulldpx && !(bmsr & BMSR_10HALF))) 1881 return -EINVAL; 1882 } else 1883 return -EINVAL; 1884 } 1885 1886 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); 1887 if (err) 1888 return err; 1889 1890#if 0 1891 err = mii_read(np, np->phy_addr, MII_BMCR); 1892 if (err < 0) 1893 return err; 1894 bmcr = err; 1895 1896 err = mii_read(np, np->phy_addr, MII_BMSR); 1897 if (err < 0) 1898 return err; 1899 bmsr = err; 1900 1901 pr_info(PFX "Port %u after MII init bmcr[%04x] bmsr[%04x]\n", 1902 np->port, bmcr, bmsr); 1903#endif 1904 1905 return 0; 1906} 1907 1908static int xcvr_init_1g(struct niu *np) 1909{ 1910 u64 val; 1911 1912 /* XXX shared resource, lock parent XXX */ 1913 val = nr64(MIF_CONFIG); 1914 val &= ~MIF_CONFIG_INDIRECT_MODE; 1915 nw64(MIF_CONFIG, val); 1916 1917 return mii_init_common(np); 1918} 1919 1920static int niu_xcvr_init(struct niu *np) 1921{ 1922 const struct niu_phy_ops *ops = np->phy_ops; 1923 int err; 1924 1925 err = 0; 1926 if (ops->xcvr_init) 1927 err = ops->xcvr_init(np); 1928 1929 return err; 1930} 1931 1932static int niu_serdes_init(struct niu *np) 1933{ 1934 const struct niu_phy_ops *ops = np->phy_ops; 1935 int err; 1936 1937 err = 0; 1938 if (ops->serdes_init) 1939 err = ops->serdes_init(np); 1940 1941 return err; 1942} 1943 1944static void niu_init_xif(struct niu *); 1945static void niu_handle_led(struct niu *, int status); 1946 1947static int niu_link_status_common(struct niu *np, int link_up) 1948{ 1949 struct niu_link_config *lp = &np->link_config; 1950 struct net_device *dev = np->dev; 1951 unsigned long flags; 1952 1953 if (!netif_carrier_ok(dev) && link_up) { 1954 niuinfo(LINK, "%s: Link is up at %s, %s duplex\n", 1955 dev->name, 1956 (lp->active_speed == SPEED_10000 ? 1957 "10Gb/sec" : 1958 (lp->active_speed == SPEED_1000 ? 1959 "1Gb/sec" : 1960 (lp->active_speed == SPEED_100 ? 1961 "100Mbit/sec" : "10Mbit/sec"))), 1962 (lp->active_duplex == DUPLEX_FULL ? 1963 "full" : "half")); 1964 1965 spin_lock_irqsave(&np->lock, flags); 1966 niu_init_xif(np); 1967 niu_handle_led(np, 1); 1968 spin_unlock_irqrestore(&np->lock, flags); 1969 1970 netif_carrier_on(dev); 1971 } else if (netif_carrier_ok(dev) && !link_up) { 1972 niuwarn(LINK, "%s: Link is down\n", dev->name); 1973 spin_lock_irqsave(&np->lock, flags); 1974 niu_handle_led(np, 0); 1975 spin_unlock_irqrestore(&np->lock, flags); 1976 netif_carrier_off(dev); 1977 } 1978 1979 return 0; 1980} 1981 1982static int link_status_10g_mrvl(struct niu *np, int *link_up_p) 1983{ 1984 int err, link_up, pma_status, pcs_status; 1985 1986 link_up = 0; 1987 1988 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1989 MRVL88X2011_10G_PMD_STATUS_2); 1990 if (err < 0) 1991 goto out; 1992 1993 /* Check PMA/PMD Register: 1.0001.2 == 1 */ 1994 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1995 MRVL88X2011_PMA_PMD_STATUS_1); 1996 if (err < 0) 1997 goto out; 1998 1999 pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); 2000 2001 /* Check PMC Register : 3.0001.2 == 1: read twice */ 2002 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 2003 MRVL88X2011_PMA_PMD_STATUS_1); 2004 if (err < 0) 2005 goto out; 2006 2007 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 2008 MRVL88X2011_PMA_PMD_STATUS_1); 2009 if (err < 0) 2010 goto out; 2011 2012 pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); 2013 2014 /* Check XGXS Register : 4.0018.[0-3,12] */ 2015 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR, 2016 MRVL88X2011_10G_XGXS_LANE_STAT); 2017 if (err < 0) 2018 goto out; 2019 2020 if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 | 2021 PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 | 2022 PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC | 2023 0x800)) 2024 link_up = (pma_status && pcs_status) ? 1 : 0; 2025 2026 np->link_config.active_speed = SPEED_10000; 2027 np->link_config.active_duplex = DUPLEX_FULL; 2028 err = 0; 2029out: 2030 mrvl88x2011_act_led(np, (link_up ? 2031 MRVL88X2011_LED_CTL_PCS_ACT : 2032 MRVL88X2011_LED_CTL_OFF)); 2033 2034 *link_up_p = link_up; 2035 return err; 2036} 2037 2038static int link_status_10g_bcm8706(struct niu *np, int *link_up_p) 2039{ 2040 int err, link_up; 2041 link_up = 0; 2042 2043 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, 2044 BCM8704_PMD_RCV_SIGDET); 2045 if (err < 0) 2046 goto out; 2047 if (!(err & PMD_RCV_SIGDET_GLOBAL)) { 2048 err = 0; 2049 goto out; 2050 } 2051 2052 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 2053 BCM8704_PCS_10G_R_STATUS); 2054 if (err < 0) 2055 goto out; 2056 2057 if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { 2058 err = 0; 2059 goto out; 2060 } 2061 2062 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 2063 BCM8704_PHYXS_XGXS_LANE_STAT); 2064 if (err < 0) 2065 goto out; 2066 if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | 2067 PHYXS_XGXS_LANE_STAT_MAGIC | 2068 PHYXS_XGXS_LANE_STAT_PATTEST | 2069 PHYXS_XGXS_LANE_STAT_LANE3 | 2070 PHYXS_XGXS_LANE_STAT_LANE2 | 2071 PHYXS_XGXS_LANE_STAT_LANE1 | 2072 PHYXS_XGXS_LANE_STAT_LANE0)) { 2073 err = 0; 2074 np->link_config.active_speed = SPEED_INVALID; 2075 np->link_config.active_duplex = DUPLEX_INVALID; 2076 goto out; 2077 } 2078 2079 link_up = 1; 2080 np->link_config.active_speed = SPEED_10000; 2081 np->link_config.active_duplex = DUPLEX_FULL; 2082 err = 0; 2083 2084out: 2085 *link_up_p = link_up; 2086 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) 2087 err = 0; 2088 return err; 2089} 2090 2091static int link_status_10g_bcom(struct niu *np, int *link_up_p) 2092{ 2093 int err, link_up; 2094 2095 link_up = 0; 2096 2097 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, 2098 BCM8704_PMD_RCV_SIGDET); 2099 if (err < 0) 2100 goto out; 2101 if (!(err & PMD_RCV_SIGDET_GLOBAL)) { 2102 err = 0; 2103 goto out; 2104 } 2105 2106 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 2107 BCM8704_PCS_10G_R_STATUS); 2108 if (err < 0) 2109 goto out; 2110 if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { 2111 err = 0; 2112 goto out; 2113 } 2114 2115 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 2116 BCM8704_PHYXS_XGXS_LANE_STAT); 2117 if (err < 0) 2118 goto out; 2119 2120 if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | 2121 PHYXS_XGXS_LANE_STAT_MAGIC | 2122 PHYXS_XGXS_LANE_STAT_LANE3 | 2123 PHYXS_XGXS_LANE_STAT_LANE2 | 2124 PHYXS_XGXS_LANE_STAT_LANE1 | 2125 PHYXS_XGXS_LANE_STAT_LANE0)) { 2126 err = 0; 2127 goto out; 2128 } 2129 2130 link_up = 1; 2131 np->link_config.active_speed = SPEED_10000; 2132 np->link_config.active_duplex = DUPLEX_FULL; 2133 err = 0; 2134 2135out: 2136 *link_up_p = link_up; 2137 return err; 2138} 2139 2140static int link_status_10g(struct niu *np, int *link_up_p) 2141{ 2142 unsigned long flags; 2143 int err = -EINVAL; 2144 2145 spin_lock_irqsave(&np->lock, flags); 2146 2147 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { 2148 int phy_id; 2149 2150 phy_id = phy_decode(np->parent->port_phy, np->port); 2151 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; 2152 2153 /* handle different phy types */ 2154 switch (phy_id & NIU_PHY_ID_MASK) { 2155 case NIU_PHY_ID_MRVL88X2011: 2156 err = link_status_10g_mrvl(np, link_up_p); 2157 break; 2158 2159 default: /* bcom 8704 */ 2160 err = link_status_10g_bcom(np, link_up_p); 2161 break; 2162 } 2163 } 2164 2165 spin_unlock_irqrestore(&np->lock, flags); 2166 2167 return err; 2168} 2169 2170static int niu_10g_phy_present(struct niu *np) 2171{ 2172 u64 sig, mask, val; 2173 2174 sig = nr64(ESR_INT_SIGNALS); 2175 switch (np->port) { 2176 case 0: 2177 mask = ESR_INT_SIGNALS_P0_BITS; 2178 val = (ESR_INT_SRDY0_P0 | 2179 ESR_INT_DET0_P0 | 2180 ESR_INT_XSRDY_P0 | 2181 ESR_INT_XDP_P0_CH3 | 2182 ESR_INT_XDP_P0_CH2 | 2183 ESR_INT_XDP_P0_CH1 | 2184 ESR_INT_XDP_P0_CH0); 2185 break; 2186 2187 case 1: 2188 mask = ESR_INT_SIGNALS_P1_BITS; 2189 val = (ESR_INT_SRDY0_P1 | 2190 ESR_INT_DET0_P1 | 2191 ESR_INT_XSRDY_P1 | 2192 ESR_INT_XDP_P1_CH3 | 2193 ESR_INT_XDP_P1_CH2 | 2194 ESR_INT_XDP_P1_CH1 | 2195 ESR_INT_XDP_P1_CH0); 2196 break; 2197 2198 default: 2199 return 0; 2200 } 2201 2202 if ((sig & mask) != val) 2203 return 0; 2204 return 1; 2205} 2206 2207static int link_status_10g_hotplug(struct niu *np, int *link_up_p) 2208{ 2209 unsigned long flags; 2210 int err = 0; 2211 int phy_present; 2212 int phy_present_prev; 2213 2214 spin_lock_irqsave(&np->lock, flags); 2215 2216 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { 2217 phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ? 2218 1 : 0; 2219 phy_present = niu_10g_phy_present(np); 2220 if (phy_present != phy_present_prev) { 2221 /* state change */ 2222 if (phy_present) { 2223 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; 2224 if (np->phy_ops->xcvr_init) 2225 err = np->phy_ops->xcvr_init(np); 2226 if (err) { 2227 /* debounce */ 2228 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; 2229 } 2230 } else { 2231 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; 2232 *link_up_p = 0; 2233 niuwarn(LINK, "%s: Hotplug PHY Removed\n", 2234 np->dev->name); 2235 } 2236 } 2237 if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) 2238 err = link_status_10g_bcm8706(np, link_up_p); 2239 } 2240 2241 spin_unlock_irqrestore(&np->lock, flags); 2242 2243 return err; 2244} 2245 2246static int niu_link_status(struct niu *np, int *link_up_p) 2247{ 2248 const struct niu_phy_ops *ops = np->phy_ops; 2249 int err; 2250 2251 err = 0; 2252 if (ops->link_status) 2253 err = ops->link_status(np, link_up_p); 2254 2255 return err; 2256} 2257 2258static void niu_timer(unsigned long __opaque) 2259{ 2260 struct niu *np = (struct niu *) __opaque; 2261 unsigned long off; 2262 int err, link_up; 2263 2264 err = niu_link_status(np, &link_up); 2265 if (!err) 2266 niu_link_status_common(np, link_up); 2267 2268 if (netif_carrier_ok(np->dev)) 2269 off = 5 * HZ; 2270 else 2271 off = 1 * HZ; 2272 np->timer.expires = jiffies + off; 2273 2274 add_timer(&np->timer); 2275} 2276 2277static const struct niu_phy_ops phy_ops_10g_serdes = { 2278 .serdes_init = serdes_init_10g_serdes, 2279 .link_status = link_status_10g_serdes, 2280}; 2281 2282static const struct niu_phy_ops phy_ops_10g_serdes_niu = { 2283 .serdes_init = serdes_init_niu_10g_serdes, 2284 .link_status = link_status_10g_serdes, 2285}; 2286 2287static const struct niu_phy_ops phy_ops_1g_serdes_niu = { 2288 .serdes_init = serdes_init_niu_1g_serdes, 2289 .link_status = link_status_1g_serdes, 2290}; 2291 2292static const struct niu_phy_ops phy_ops_1g_rgmii = { 2293 .xcvr_init = xcvr_init_1g_rgmii, 2294 .link_status = link_status_1g_rgmii, 2295}; 2296 2297static const struct niu_phy_ops phy_ops_10g_fiber_niu = { 2298 .serdes_init = serdes_init_niu_10g_fiber, 2299 .xcvr_init = xcvr_init_10g, 2300 .link_status = link_status_10g, 2301}; 2302 2303static const struct niu_phy_ops phy_ops_10g_fiber = { 2304 .serdes_init = serdes_init_10g, 2305 .xcvr_init = xcvr_init_10g, 2306 .link_status = link_status_10g, 2307}; 2308 2309static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = { 2310 .serdes_init = serdes_init_10g, 2311 .xcvr_init = xcvr_init_10g_bcm8706, 2312 .link_status = link_status_10g_hotplug, 2313}; 2314 2315static const struct niu_phy_ops phy_ops_10g_copper = { 2316 .serdes_init = serdes_init_10g, 2317 .link_status = link_status_10g, /* XXX */ 2318}; 2319 2320static const struct niu_phy_ops phy_ops_1g_fiber = { 2321 .serdes_init = serdes_init_1g, 2322 .xcvr_init = xcvr_init_1g, 2323 .link_status = link_status_1g, 2324}; 2325 2326static const struct niu_phy_ops phy_ops_1g_copper = { 2327 .xcvr_init = xcvr_init_1g, 2328 .link_status = link_status_1g, 2329}; 2330 2331struct niu_phy_template { 2332 const struct niu_phy_ops *ops; 2333 u32 phy_addr_base; 2334}; 2335 2336static const struct niu_phy_template phy_template_niu_10g_fiber = { 2337 .ops = &phy_ops_10g_fiber_niu, 2338 .phy_addr_base = 16, 2339}; 2340 2341static const struct niu_phy_template phy_template_niu_10g_serdes = { 2342 .ops = &phy_ops_10g_serdes_niu, 2343 .phy_addr_base = 0, 2344}; 2345 2346static const struct niu_phy_template phy_template_niu_1g_serdes = { 2347 .ops = &phy_ops_1g_serdes_niu, 2348 .phy_addr_base = 0, 2349}; 2350 2351static const struct niu_phy_template phy_template_10g_fiber = { 2352 .ops = &phy_ops_10g_fiber, 2353 .phy_addr_base = 8, 2354}; 2355 2356static const struct niu_phy_template phy_template_10g_fiber_hotplug = { 2357 .ops = &phy_ops_10g_fiber_hotplug, 2358 .phy_addr_base = 8, 2359}; 2360 2361static const struct niu_phy_template phy_template_10g_copper = { 2362 .ops = &phy_ops_10g_copper, 2363 .phy_addr_base = 10, 2364}; 2365 2366static const struct niu_phy_template phy_template_1g_fiber = { 2367 .ops = &phy_ops_1g_fiber, 2368 .phy_addr_base = 0, 2369}; 2370 2371static const struct niu_phy_template phy_template_1g_copper = { 2372 .ops = &phy_ops_1g_copper, 2373 .phy_addr_base = 0, 2374}; 2375 2376static const struct niu_phy_template phy_template_1g_rgmii = { 2377 .ops = &phy_ops_1g_rgmii, 2378 .phy_addr_base = 0, 2379}; 2380 2381static const struct niu_phy_template phy_template_10g_serdes = { 2382 .ops = &phy_ops_10g_serdes, 2383 .phy_addr_base = 0, 2384}; 2385 2386static int niu_atca_port_num[4] = { 2387 0, 0, 11, 10 2388}; 2389 2390static int serdes_init_10g_serdes(struct niu *np) 2391{ 2392 struct niu_link_config *lp = &np->link_config; 2393 unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; 2394 u64 ctrl_val, test_cfg_val, sig, mask, val; 2395 u64 reset_val; 2396 2397 switch (np->port) { 2398 case 0: 2399 reset_val = ENET_SERDES_RESET_0; 2400 ctrl_reg = ENET_SERDES_0_CTRL_CFG; 2401 test_cfg_reg = ENET_SERDES_0_TEST_CFG; 2402 pll_cfg = ENET_SERDES_0_PLL_CFG; 2403 break; 2404 case 1: 2405 reset_val = ENET_SERDES_RESET_1; 2406 ctrl_reg = ENET_SERDES_1_CTRL_CFG; 2407 test_cfg_reg = ENET_SERDES_1_TEST_CFG; 2408 pll_cfg = ENET_SERDES_1_PLL_CFG; 2409 break; 2410 2411 default: 2412 return -EINVAL; 2413 } 2414 ctrl_val = (ENET_SERDES_CTRL_SDET_0 | 2415 ENET_SERDES_CTRL_SDET_1 | 2416 ENET_SERDES_CTRL_SDET_2 | 2417 ENET_SERDES_CTRL_SDET_3 | 2418 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | 2419 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | 2420 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | 2421 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | 2422 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | 2423 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | 2424 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | 2425 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); 2426 test_cfg_val = 0; 2427 2428 if (lp->loopback_mode == LOOPBACK_PHY) { 2429 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << 2430 ENET_SERDES_TEST_MD_0_SHIFT) | 2431 (ENET_TEST_MD_PAD_LOOPBACK << 2432 ENET_SERDES_TEST_MD_1_SHIFT) | 2433 (ENET_TEST_MD_PAD_LOOPBACK << 2434 ENET_SERDES_TEST_MD_2_SHIFT) | 2435 (ENET_TEST_MD_PAD_LOOPBACK << 2436 ENET_SERDES_TEST_MD_3_SHIFT)); 2437 } 2438 2439 esr_reset(np); 2440 nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2); 2441 nw64(ctrl_reg, ctrl_val); 2442 nw64(test_cfg_reg, test_cfg_val); 2443 2444 /* Initialize all 4 lanes of the SERDES. */ 2445 for (i = 0; i < 4; i++) { 2446 u32 rxtx_ctrl, glue0; 2447 int err; 2448 2449 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); 2450 if (err) 2451 return err; 2452 err = esr_read_glue0(np, i, &glue0); 2453 if (err) 2454 return err; 2455 2456 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); 2457 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | 2458 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); 2459 2460 glue0 &= ~(ESR_GLUE_CTRL0_SRATE | 2461 ESR_GLUE_CTRL0_THCNT | 2462 ESR_GLUE_CTRL0_BLTIME); 2463 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | 2464 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | 2465 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | 2466 (BLTIME_300_CYCLES << 2467 ESR_GLUE_CTRL0_BLTIME_SHIFT)); 2468 2469 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); 2470 if (err) 2471 return err; 2472 err = esr_write_glue0(np, i, glue0); 2473 if (err) 2474 return err; 2475 } 2476 2477 2478 sig = nr64(ESR_INT_SIGNALS); 2479 switch (np->port) { 2480 case 0: 2481 mask = ESR_INT_SIGNALS_P0_BITS; 2482 val = (ESR_INT_SRDY0_P0 | 2483 ESR_INT_DET0_P0 | 2484 ESR_INT_XSRDY_P0 | 2485 ESR_INT_XDP_P0_CH3 | 2486 ESR_INT_XDP_P0_CH2 | 2487 ESR_INT_XDP_P0_CH1 | 2488 ESR_INT_XDP_P0_CH0); 2489 break; 2490 2491 case 1: 2492 mask = ESR_INT_SIGNALS_P1_BITS; 2493 val = (ESR_INT_SRDY0_P1 | 2494 ESR_INT_DET0_P1 | 2495 ESR_INT_XSRDY_P1 | 2496 ESR_INT_XDP_P1_CH3 | 2497 ESR_INT_XDP_P1_CH2 | 2498 ESR_INT_XDP_P1_CH1 | 2499 ESR_INT_XDP_P1_CH0); 2500 break; 2501 2502 default: 2503 return -EINVAL; 2504 } 2505 2506 if ((sig & mask) != val) { 2507 int err; 2508 err = serdes_init_1g_serdes(np); 2509 if (!err) { 2510 np->flags &= ~NIU_FLAGS_10G; 2511 np->mac_xcvr = MAC_XCVR_PCS; 2512 } else { 2513 dev_err(np->device, PFX "Port %u 10G/1G SERDES Link Failed \n", 2514 np->port); 2515 return -ENODEV; 2516 } 2517 } 2518 2519 return 0; 2520} 2521 2522static int niu_determine_phy_disposition(struct niu *np) 2523{ 2524 struct niu_parent *parent = np->parent; 2525 u8 plat_type = parent->plat_type; 2526 const struct niu_phy_template *tp; 2527 u32 phy_addr_off = 0; 2528 2529 if (plat_type == PLAT_TYPE_NIU) { 2530 switch (np->flags & 2531 (NIU_FLAGS_10G | 2532 NIU_FLAGS_FIBER | 2533 NIU_FLAGS_XCVR_SERDES)) { 2534 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: 2535 /* 10G Serdes */ 2536 tp = &phy_template_niu_10g_serdes; 2537 break; 2538 case NIU_FLAGS_XCVR_SERDES: 2539 /* 1G Serdes */ 2540 tp = &phy_template_niu_1g_serdes; 2541 break; 2542 case NIU_FLAGS_10G | NIU_FLAGS_FIBER: 2543 /* 10G Fiber */ 2544 default: 2545 tp = &phy_template_niu_10g_fiber; 2546 phy_addr_off += np->port; 2547 break; 2548 } 2549 } else { 2550 switch (np->flags & 2551 (NIU_FLAGS_10G | 2552 NIU_FLAGS_FIBER | 2553 NIU_FLAGS_XCVR_SERDES)) { 2554 case 0: 2555 /* 1G copper */ 2556 tp = &phy_template_1g_copper; 2557 if (plat_type == PLAT_TYPE_VF_P0) 2558 phy_addr_off = 10; 2559 else if (plat_type == PLAT_TYPE_VF_P1) 2560 phy_addr_off = 26; 2561 2562 phy_addr_off += (np->port ^ 0x3); 2563 break; 2564 2565 case NIU_FLAGS_10G: 2566 /* 10G copper */ 2567 tp = &phy_template_10g_copper; 2568 break; 2569 2570 case NIU_FLAGS_FIBER: 2571 /* 1G fiber */ 2572 tp = &phy_template_1g_fiber; 2573 break; 2574 2575 case NIU_FLAGS_10G | NIU_FLAGS_FIBER: 2576 /* 10G fiber */ 2577 tp = &phy_template_10g_fiber; 2578 if (plat_type == PLAT_TYPE_VF_P0 || 2579 plat_type == PLAT_TYPE_VF_P1) 2580 phy_addr_off = 8; 2581 phy_addr_off += np->port; 2582 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { 2583 tp = &phy_template_10g_fiber_hotplug; 2584 if (np->port == 0) 2585 phy_addr_off = 8; 2586 if (np->port == 1) 2587 phy_addr_off = 12; 2588 } 2589 break; 2590 2591 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: 2592 case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER: 2593 case NIU_FLAGS_XCVR_SERDES: 2594 switch(np->port) { 2595 case 0: 2596 case 1: 2597 tp = &phy_template_10g_serdes; 2598 break; 2599 case 2: 2600 case 3: 2601 tp = &phy_template_1g_rgmii; 2602 break; 2603 default: 2604 return -EINVAL; 2605 break; 2606 } 2607 phy_addr_off = niu_atca_port_num[np->port]; 2608 break; 2609 2610 default: 2611 return -EINVAL; 2612 } 2613 } 2614 2615 np->phy_ops = tp->ops; 2616 np->phy_addr = tp->phy_addr_base + phy_addr_off; 2617 2618 return 0; 2619} 2620 2621static int niu_init_link(struct niu *np) 2622{ 2623 struct niu_parent *parent = np->parent; 2624 int err, ignore; 2625 2626 if (parent->plat_type == PLAT_TYPE_NIU) { 2627 err = niu_xcvr_init(np); 2628 if (err) 2629 return err; 2630 msleep(200); 2631 } 2632 err = niu_serdes_init(np); 2633 if (err) 2634 return err; 2635 msleep(200); 2636 err = niu_xcvr_init(np); 2637 if (!err) 2638 niu_link_status(np, &ignore); 2639 return 0; 2640} 2641 2642static void niu_set_primary_mac(struct niu *np, unsigned char *addr) 2643{ 2644 u16 reg0 = addr[4] << 8 | addr[5]; 2645 u16 reg1 = addr[2] << 8 | addr[3]; 2646 u16 reg2 = addr[0] << 8 | addr[1]; 2647 2648 if (np->flags & NIU_FLAGS_XMAC) { 2649 nw64_mac(XMAC_ADDR0, reg0); 2650 nw64_mac(XMAC_ADDR1, reg1); 2651 nw64_mac(XMAC_ADDR2, reg2); 2652 } else { 2653 nw64_mac(BMAC_ADDR0, reg0); 2654 nw64_mac(BMAC_ADDR1, reg1); 2655 nw64_mac(BMAC_ADDR2, reg2); 2656 } 2657} 2658 2659static int niu_num_alt_addr(struct niu *np) 2660{ 2661 if (np->flags & NIU_FLAGS_XMAC) 2662 return XMAC_NUM_ALT_ADDR; 2663 else 2664 return BMAC_NUM_ALT_ADDR; 2665} 2666 2667static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr) 2668{ 2669 u16 reg0 = addr[4] << 8 | addr[5]; 2670 u16 reg1 = addr[2] << 8 | addr[3]; 2671 u16 reg2 = addr[0] << 8 | addr[1]; 2672 2673 if (index >= niu_num_alt_addr(np)) 2674 return -EINVAL; 2675 2676 if (np->flags & NIU_FLAGS_XMAC) { 2677 nw64_mac(XMAC_ALT_ADDR0(index), reg0); 2678 nw64_mac(XMAC_ALT_ADDR1(index), reg1); 2679 nw64_mac(XMAC_ALT_ADDR2(index), reg2); 2680 } else { 2681 nw64_mac(BMAC_ALT_ADDR0(index), reg0); 2682 nw64_mac(BMAC_ALT_ADDR1(index), reg1); 2683 nw64_mac(BMAC_ALT_ADDR2(index), reg2); 2684 } 2685 2686 return 0; 2687} 2688 2689static int niu_enable_alt_mac(struct niu *np, int index, int on) 2690{ 2691 unsigned long reg; 2692 u64 val, mask; 2693 2694 if (index >= niu_num_alt_addr(np)) 2695 return -EINVAL; 2696 2697 if (np->flags & NIU_FLAGS_XMAC) { 2698 reg = XMAC_ADDR_CMPEN; 2699 mask = 1 << index; 2700 } else { 2701 reg = BMAC_ADDR_CMPEN; 2702 mask = 1 << (index + 1); 2703 } 2704 2705 val = nr64_mac(reg); 2706 if (on) 2707 val |= mask; 2708 else 2709 val &= ~mask; 2710 nw64_mac(reg, val); 2711 2712 return 0; 2713} 2714 2715static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg, 2716 int num, int mac_pref) 2717{ 2718 u64 val = nr64_mac(reg); 2719 val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR); 2720 val |= num; 2721 if (mac_pref) 2722 val |= HOST_INFO_MPR; 2723 nw64_mac(reg, val); 2724} 2725 2726static int __set_rdc_table_num(struct niu *np, 2727 int xmac_index, int bmac_index, 2728 int rdc_table_num, int mac_pref) 2729{ 2730 unsigned long reg; 2731 2732 if (rdc_table_num & ~HOST_INFO_MACRDCTBLN) 2733 return -EINVAL; 2734 if (np->flags & NIU_FLAGS_XMAC) 2735 reg = XMAC_HOST_INFO(xmac_index); 2736 else 2737 reg = BMAC_HOST_INFO(bmac_index); 2738 __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref); 2739 return 0; 2740} 2741 2742static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num, 2743 int mac_pref) 2744{ 2745 return __set_rdc_table_num(np, 17, 0, table_num, mac_pref); 2746} 2747 2748static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num, 2749 int mac_pref) 2750{ 2751 return __set_rdc_table_num(np, 16, 8, table_num, mac_pref); 2752} 2753 2754static int niu_set_alt_mac_rdc_table(struct niu *np, int idx, 2755 int table_num, int mac_pref) 2756{ 2757 if (idx >= niu_num_alt_addr(np)) 2758 return -EINVAL; 2759 return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref); 2760} 2761 2762static u64 vlan_entry_set_parity(u64 reg_val) 2763{ 2764 u64 port01_mask; 2765 u64 port23_mask; 2766 2767 port01_mask = 0x00ff; 2768 port23_mask = 0xff00; 2769 2770 if (hweight64(reg_val & port01_mask) & 1) 2771 reg_val |= ENET_VLAN_TBL_PARITY0; 2772 else 2773 reg_val &= ~ENET_VLAN_TBL_PARITY0; 2774 2775 if (hweight64(reg_val & port23_mask) & 1) 2776 reg_val |= ENET_VLAN_TBL_PARITY1; 2777 else 2778 reg_val &= ~ENET_VLAN_TBL_PARITY1; 2779 2780 return reg_val; 2781} 2782 2783static void vlan_tbl_write(struct niu *np, unsigned long index, 2784 int port, int vpr, int rdc_table) 2785{ 2786 u64 reg_val = nr64(ENET_VLAN_TBL(index)); 2787 2788 reg_val &= ~((ENET_VLAN_TBL_VPR | 2789 ENET_VLAN_TBL_VLANRDCTBLN) << 2790 ENET_VLAN_TBL_SHIFT(port)); 2791 if (vpr) 2792 reg_val |= (ENET_VLAN_TBL_VPR << 2793 ENET_VLAN_TBL_SHIFT(port)); 2794 reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port)); 2795 2796 reg_val = vlan_entry_set_parity(reg_val); 2797 2798 nw64(ENET_VLAN_TBL(index), reg_val); 2799} 2800 2801static void vlan_tbl_clear(struct niu *np) 2802{ 2803 int i; 2804 2805 for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) 2806 nw64(ENET_VLAN_TBL(i), 0); 2807} 2808 2809static int tcam_wait_bit(struct niu *np, u64 bit) 2810{ 2811 int limit = 1000; 2812 2813 while (--limit > 0) { 2814 if (nr64(TCAM_CTL) & bit) 2815 break; 2816 udelay(1); 2817 } 2818 if (limit < 0) 2819 return -ENODEV; 2820 2821 return 0; 2822} 2823 2824static int tcam_flush(struct niu *np, int index) 2825{ 2826 nw64(TCAM_KEY_0, 0x00); 2827 nw64(TCAM_KEY_MASK_0, 0xff); 2828 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); 2829 2830 return tcam_wait_bit(np, TCAM_CTL_STAT); 2831} 2832 2833#if 0 2834static int tcam_read(struct niu *np, int index, 2835 u64 *key, u64 *mask) 2836{ 2837 int err; 2838 2839 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index)); 2840 err = tcam_wait_bit(np, TCAM_CTL_STAT); 2841 if (!err) { 2842 key[0] = nr64(TCAM_KEY_0); 2843 key[1] = nr64(TCAM_KEY_1); 2844 key[2] = nr64(TCAM_KEY_2); 2845 key[3] = nr64(TCAM_KEY_3); 2846 mask[0] = nr64(TCAM_KEY_MASK_0); 2847 mask[1] = nr64(TCAM_KEY_MASK_1); 2848 mask[2] = nr64(TCAM_KEY_MASK_2); 2849 mask[3] = nr64(TCAM_KEY_MASK_3); 2850 } 2851 return err; 2852} 2853#endif 2854 2855static int tcam_write(struct niu *np, int index, 2856 u64 *key, u64 *mask) 2857{ 2858 nw64(TCAM_KEY_0, key[0]); 2859 nw64(TCAM_KEY_1, key[1]); 2860 nw64(TCAM_KEY_2, key[2]); 2861 nw64(TCAM_KEY_3, key[3]); 2862 nw64(TCAM_KEY_MASK_0, mask[0]); 2863 nw64(TCAM_KEY_MASK_1, mask[1]); 2864 nw64(TCAM_KEY_MASK_2, mask[2]); 2865 nw64(TCAM_KEY_MASK_3, mask[3]); 2866 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); 2867 2868 return tcam_wait_bit(np, TCAM_CTL_STAT); 2869} 2870 2871#if 0 2872static int tcam_assoc_read(struct niu *np, int index, u64 *data) 2873{ 2874 int err; 2875 2876 nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index)); 2877 err = tcam_wait_bit(np, TCAM_CTL_STAT); 2878 if (!err) 2879 *data = nr64(TCAM_KEY_1); 2880 2881 return err; 2882} 2883#endif 2884 2885static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data) 2886{ 2887 nw64(TCAM_KEY_1, assoc_data); 2888 nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index)); 2889 2890 return tcam_wait_bit(np, TCAM_CTL_STAT); 2891} 2892 2893static void tcam_enable(struct niu *np, int on) 2894{ 2895 u64 val = nr64(FFLP_CFG_1); 2896 2897 if (on) 2898 val &= ~FFLP_CFG_1_TCAM_DIS; 2899 else 2900 val |= FFLP_CFG_1_TCAM_DIS; 2901 nw64(FFLP_CFG_1, val); 2902} 2903 2904static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio) 2905{ 2906 u64 val = nr64(FFLP_CFG_1); 2907 2908 val &= ~(FFLP_CFG_1_FFLPINITDONE | 2909 FFLP_CFG_1_CAMLAT | 2910 FFLP_CFG_1_CAMRATIO); 2911 val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT); 2912 val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT); 2913 nw64(FFLP_CFG_1, val); 2914 2915 val = nr64(FFLP_CFG_1); 2916 val |= FFLP_CFG_1_FFLPINITDONE; 2917 nw64(FFLP_CFG_1, val); 2918} 2919 2920static int tcam_user_eth_class_enable(struct niu *np, unsigned long class, 2921 int on) 2922{ 2923 unsigned long reg; 2924 u64 val; 2925 2926 if (class < CLASS_CODE_ETHERTYPE1 || 2927 class > CLASS_CODE_ETHERTYPE2) 2928 return -EINVAL; 2929 2930 reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); 2931 val = nr64(reg); 2932 if (on) 2933 val |= L2_CLS_VLD; 2934 else 2935 val &= ~L2_CLS_VLD; 2936 nw64(reg, val); 2937 2938 return 0; 2939} 2940 2941#if 0 2942static int tcam_user_eth_class_set(struct niu *np, unsigned long class, 2943 u64 ether_type) 2944{ 2945 unsigned long reg; 2946 u64 val; 2947 2948 if (class < CLASS_CODE_ETHERTYPE1 || 2949 class > CLASS_CODE_ETHERTYPE2 || 2950 (ether_type & ~(u64)0xffff) != 0) 2951 return -EINVAL; 2952 2953 reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); 2954 val = nr64(reg); 2955 val &= ~L2_CLS_ETYPE; 2956 val |= (ether_type << L2_CLS_ETYPE_SHIFT); 2957 nw64(reg, val); 2958 2959 return 0; 2960} 2961#endif 2962 2963static int tcam_user_ip_class_enable(struct niu *np, unsigned long class, 2964 int on) 2965{ 2966 unsigned long reg; 2967 u64 val; 2968 2969 if (class < CLASS_CODE_USER_PROG1 || 2970 class > CLASS_CODE_USER_PROG4) 2971 return -EINVAL; 2972 2973 reg = L3_CLS(class - CLASS_CODE_USER_PROG1); 2974 val = nr64(reg); 2975 if (on) 2976 val |= L3_CLS_VALID; 2977 else 2978 val &= ~L3_CLS_VALID; 2979 nw64(reg, val); 2980 2981 return 0; 2982} 2983 2984static int tcam_user_ip_class_set(struct niu *np, unsigned long class, 2985 int ipv6, u64 protocol_id, 2986 u64 tos_mask, u64 tos_val) 2987{ 2988 unsigned long reg; 2989 u64 val; 2990 2991 if (class < CLASS_CODE_USER_PROG1 || 2992 class > CLASS_CODE_USER_PROG4 || 2993 (protocol_id & ~(u64)0xff) != 0 || 2994 (tos_mask & ~(u64)0xff) != 0 || 2995 (tos_val & ~(u64)0xff) != 0) 2996 return -EINVAL; 2997 2998 reg = L3_CLS(class - CLASS_CODE_USER_PROG1); 2999 val = nr64(reg); 3000 val &= ~(L3_CLS_IPVER | L3_CLS_PID | 3001 L3_CLS_TOSMASK | L3_CLS_TOS); 3002 if (ipv6) 3003 val |= L3_CLS_IPVER; 3004 val |= (protocol_id << L3_CLS_PID_SHIFT); 3005 val |= (tos_mask << L3_CLS_TOSMASK_SHIFT); 3006 val |= (tos_val << L3_CLS_TOS_SHIFT); 3007 nw64(reg, val); 3008 3009 return 0; 3010} 3011 3012static int tcam_early_init(struct niu *np) 3013{ 3014 unsigned long i; 3015 int err; 3016 3017 tcam_enable(np, 0); 3018 tcam_set_lat_and_ratio(np, 3019 DEFAULT_TCAM_LATENCY, 3020 DEFAULT_TCAM_ACCESS_RATIO); 3021 for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) { 3022 err = tcam_user_eth_class_enable(np, i, 0); 3023 if (err) 3024 return err; 3025 } 3026 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) { 3027 err = tcam_user_ip_class_enable(np, i, 0); 3028 if (err) 3029 return err; 3030 } 3031 3032 return 0; 3033} 3034 3035static int tcam_flush_all(struct niu *np) 3036{ 3037 unsigned long i; 3038 3039 for (i = 0; i < np->parent->tcam_num_entries; i++) { 3040 int err = tcam_flush(np, i); 3041 if (err) 3042 return err; 3043 } 3044 return 0; 3045} 3046 3047static u64 hash_addr_regval(unsigned long index, unsigned long num_entries) 3048{ 3049 return ((u64)index | (num_entries == 1 ? 3050 HASH_TBL_ADDR_AUTOINC : 0)); 3051} 3052 3053#if 0 3054static int hash_read(struct niu *np, unsigned long partition, 3055 unsigned long index, unsigned long num_entries, 3056 u64 *data) 3057{ 3058 u64 val = hash_addr_regval(index, num_entries); 3059 unsigned long i; 3060 3061 if (partition >= FCRAM_NUM_PARTITIONS || 3062 index + num_entries > FCRAM_SIZE) 3063 return -EINVAL; 3064 3065 nw64(HASH_TBL_ADDR(partition), val); 3066 for (i = 0; i < num_entries; i++) 3067 data[i] = nr64(HASH_TBL_DATA(partition)); 3068 3069 return 0; 3070} 3071#endif 3072 3073static int hash_write(struct niu *np, unsigned long partition, 3074 unsigned long index, unsigned long num_entries, 3075 u64 *data) 3076{ 3077 u64 val = hash_addr_regval(index, num_entries); 3078 unsigned long i; 3079 3080 if (partition >= FCRAM_NUM_PARTITIONS || 3081 index + (num_entries * 8) > FCRAM_SIZE) 3082 return -EINVAL; 3083 3084 nw64(HASH_TBL_ADDR(partition), val); 3085 for (i = 0; i < num_entries; i++) 3086 nw64(HASH_TBL_DATA(partition), data[i]); 3087 3088 return 0; 3089} 3090 3091static void fflp_reset(struct niu *np) 3092{ 3093 u64 val; 3094 3095 nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST); 3096 udelay(10); 3097 nw64(FFLP_CFG_1, 0); 3098 3099 val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE; 3100 nw64(FFLP_CFG_1, val); 3101} 3102 3103static void fflp_set_timings(struct niu *np) 3104{ 3105 u64 val = nr64(FFLP_CFG_1); 3106 3107 val &= ~FFLP_CFG_1_FFLPINITDONE; 3108 val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT); 3109 nw64(FFLP_CFG_1, val); 3110 3111 val = nr64(FFLP_CFG_1); 3112 val |= FFLP_CFG_1_FFLPINITDONE; 3113 nw64(FFLP_CFG_1, val); 3114 3115 val = nr64(FCRAM_REF_TMR); 3116 val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN); 3117 val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT); 3118 val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT); 3119 nw64(FCRAM_REF_TMR, val); 3120} 3121 3122static int fflp_set_partition(struct niu *np, u64 partition, 3123 u64 mask, u64 base, int enable) 3124{ 3125 unsigned long reg; 3126 u64 val; 3127 3128 if (partition >= FCRAM_NUM_PARTITIONS || 3129 (mask & ~(u64)0x1f) != 0 || 3130 (base & ~(u64)0x1f) != 0) 3131 return -EINVAL; 3132 3133 reg = FLW_PRT_SEL(partition); 3134 3135 val = nr64(reg); 3136 val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE); 3137 val |= (mask << FLW_PRT_SEL_MASK_SHIFT); 3138 val |= (base << FLW_PRT_SEL_BASE_SHIFT); 3139 if (enable) 3140 val |= FLW_PRT_SEL_EXT; 3141 nw64(reg, val); 3142 3143 return 0; 3144} 3145 3146static int fflp_disable_all_partitions(struct niu *np) 3147{ 3148 unsigned long i; 3149 3150 for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) { 3151 int err = fflp_set_partition(np, 0, 0, 0, 0); 3152 if (err) 3153 return err; 3154 } 3155 return 0; 3156} 3157 3158static void fflp_llcsnap_enable(struct niu *np, int on) 3159{ 3160 u64 val = nr64(FFLP_CFG_1); 3161 3162 if (on) 3163 val |= FFLP_CFG_1_LLCSNAP; 3164 else 3165 val &= ~FFLP_CFG_1_LLCSNAP; 3166 nw64(FFLP_CFG_1, val); 3167} 3168 3169static void fflp_errors_enable(struct niu *np, int on) 3170{ 3171 u64 val = nr64(FFLP_CFG_1); 3172 3173 if (on) 3174 val &= ~FFLP_CFG_1_ERRORDIS; 3175 else 3176 val |= FFLP_CFG_1_ERRORDIS; 3177 nw64(FFLP_CFG_1, val); 3178} 3179 3180static int fflp_hash_clear(struct niu *np) 3181{ 3182 struct fcram_hash_ipv4 ent; 3183 unsigned long i; 3184 3185 /* IPV4 hash entry with valid bit clear, rest is don't care. */ 3186 memset(&ent, 0, sizeof(ent)); 3187 ent.header = HASH_HEADER_EXT; 3188 3189 for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) { 3190 int err = hash_write(np, 0, i, 1, (u64 *) &ent); 3191 if (err) 3192 return err; 3193 } 3194 return 0; 3195} 3196 3197static int fflp_early_init(struct niu *np) 3198{ 3199 struct niu_parent *parent; 3200 unsigned long flags; 3201 int err; 3202 3203 niu_lock_parent(np, flags); 3204 3205 parent = np->parent; 3206 err = 0; 3207 if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) { 3208 niudbg(PROBE, "fflp_early_init: Initting hw on port %u\n", 3209 np->port); 3210 if (np->parent->plat_type != PLAT_TYPE_NIU) { 3211 fflp_reset(np); 3212 fflp_set_timings(np); 3213 err = fflp_disable_all_partitions(np); 3214 if (err) { 3215 niudbg(PROBE, "fflp_disable_all_partitions " 3216 "failed, err=%d\n", err); 3217 goto out; 3218 } 3219 } 3220 3221 err = tcam_early_init(np); 3222 if (err) { 3223 niudbg(PROBE, "tcam_early_init failed, err=%d\n", 3224 err); 3225 goto out; 3226 } 3227 fflp_llcsnap_enable(np, 1); 3228 fflp_errors_enable(np, 0); 3229 nw64(H1POLY, 0); 3230 nw64(H2POLY, 0); 3231 3232 err = tcam_flush_all(np); 3233 if (err) { 3234 niudbg(PROBE, "tcam_flush_all failed, err=%d\n", 3235 err); 3236 goto out; 3237 } 3238 if (np->parent->plat_type != PLAT_TYPE_NIU) { 3239 err = fflp_hash_clear(np); 3240 if (err) { 3241 niudbg(PROBE, "fflp_hash_clear failed, " 3242 "err=%d\n", err); 3243 goto out; 3244 } 3245 } 3246 3247 vlan_tbl_clear(np); 3248 3249 niudbg(PROBE, "fflp_early_init: Success\n"); 3250 parent->flags |= PARENT_FLGS_CLS_HWINIT; 3251 } 3252out: 3253 niu_unlock_parent(np, flags); 3254 return err; 3255} 3256 3257static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key) 3258{ 3259 if (class_code < CLASS_CODE_USER_PROG1 || 3260 class_code > CLASS_CODE_SCTP_IPV6) 3261 return -EINVAL; 3262 3263 nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key); 3264 return 0; 3265} 3266 3267static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key) 3268{ 3269 if (class_code < CLASS_CODE_USER_PROG1 || 3270 class_code > CLASS_CODE_SCTP_IPV6) 3271 return -EINVAL; 3272 3273 nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key); 3274 return 0; 3275} 3276 3277/* Entries for the ports are interleaved in the TCAM */ 3278static u16 tcam_get_index(struct niu *np, u16 idx) 3279{ 3280 /* One entry reserved for IP fragment rule */ 3281 if (idx >= (np->clas.tcam_sz - 1)) 3282 idx = 0; 3283 return (np->clas.tcam_top + ((idx+1) * np->parent->num_ports)); 3284} 3285 3286static u16 tcam_get_size(struct niu *np) 3287{ 3288 /* One entry reserved for IP fragment rule */ 3289 return np->clas.tcam_sz - 1; 3290} 3291 3292static u16 tcam_get_valid_entry_cnt(struct niu *np) 3293{ 3294 /* One entry reserved for IP fragment rule */ 3295 return np->clas.tcam_valid_entries - 1; 3296} 3297 3298static void niu_rx_skb_append(struct sk_buff *skb, struct page *page, 3299 u32 offset, u32 size) 3300{ 3301 int i = skb_shinfo(skb)->nr_frags; 3302 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3303 3304 frag->page = page; 3305 frag->page_offset = offset; 3306 frag->size = size; 3307 3308 skb->len += size; 3309 skb->data_len += size; 3310 skb->truesize += size; 3311 3312 skb_shinfo(skb)->nr_frags = i + 1; 3313} 3314 3315static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a) 3316{ 3317 a >>= PAGE_SHIFT; 3318 a ^= (a >> ilog2(MAX_RBR_RING_SIZE)); 3319 3320 return (a & (MAX_RBR_RING_SIZE - 1)); 3321} 3322 3323static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr, 3324 struct page ***link) 3325{ 3326 unsigned int h = niu_hash_rxaddr(rp, addr); 3327 struct page *p, **pp; 3328 3329 addr &= PAGE_MASK; 3330 pp = &rp->rxhash[h]; 3331 for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) { 3332 if (p->index == addr) { 3333 *link = pp; 3334 break; 3335 } 3336 } 3337 3338 return p; 3339} 3340 3341static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base) 3342{ 3343 unsigned int h = niu_hash_rxaddr(rp, base); 3344 3345 page->index = base; 3346 page->mapping = (struct address_space *) rp->rxhash[h]; 3347 rp->rxhash[h] = page; 3348} 3349 3350static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp, 3351 gfp_t mask, int start_index) 3352{ 3353 struct page *page; 3354 u64 addr; 3355 int i; 3356 3357 page = alloc_page(mask); 3358 if (!page) 3359 return -ENOMEM; 3360 3361 addr = np->ops->map_page(np->device, page, 0, 3362 PAGE_SIZE, DMA_FROM_DEVICE); 3363 3364 niu_hash_page(rp, page, addr); 3365 if (rp->rbr_blocks_per_page > 1) 3366 atomic_add(rp->rbr_blocks_per_page - 1, 3367 &compound_head(page)->_count); 3368 3369 for (i = 0; i < rp->rbr_blocks_per_page; i++) { 3370 __le32 *rbr = &rp->rbr[start_index + i]; 3371 3372 *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT); 3373 addr += rp->rbr_block_size; 3374 } 3375 3376 return 0; 3377} 3378 3379static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) 3380{ 3381 int index = rp->rbr_index; 3382 3383 rp->rbr_pending++; 3384 if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) { 3385 int err = niu_rbr_add_page(np, rp, mask, index); 3386 3387 if (unlikely(err)) { 3388 rp->rbr_pending--; 3389 return; 3390 } 3391 3392 rp->rbr_index += rp->rbr_blocks_per_page; 3393 BUG_ON(rp->rbr_index > rp->rbr_table_size); 3394 if (rp->rbr_index == rp->rbr_table_size) 3395 rp->rbr_index = 0; 3396 3397 if (rp->rbr_pending >= rp->rbr_kick_thresh) { 3398 nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending); 3399 rp->rbr_pending = 0; 3400 } 3401 } 3402} 3403 3404static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp) 3405{ 3406 unsigned int index = rp->rcr_index; 3407 int num_rcr = 0; 3408 3409 rp->rx_dropped++; 3410 while (1) { 3411 struct page *page, **link; 3412 u64 addr, val; 3413 u32 rcr_size; 3414 3415 num_rcr++; 3416 3417 val = le64_to_cpup(&rp->rcr[index]); 3418 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << 3419 RCR_ENTRY_PKT_BUF_ADDR_SHIFT; 3420 page = niu_find_rxpage(rp, addr, &link); 3421 3422 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> 3423 RCR_ENTRY_PKTBUFSZ_SHIFT]; 3424 if ((page->index + PAGE_SIZE) - rcr_size == addr) { 3425 *link = (struct page *) page->mapping; 3426 np->ops->unmap_page(np->device, page->index, 3427 PAGE_SIZE, DMA_FROM_DEVICE); 3428 page->index = 0; 3429 page->mapping = NULL; 3430 __free_page(page); 3431 rp->rbr_refill_pending++; 3432 } 3433 3434 index = NEXT_RCR(rp, index); 3435 if (!(val & RCR_ENTRY_MULTI)) 3436 break; 3437 3438 } 3439 rp->rcr_index = index; 3440 3441 return num_rcr; 3442} 3443 3444static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, 3445 struct rx_ring_info *rp) 3446{ 3447 unsigned int index = rp->rcr_index; 3448 struct sk_buff *skb; 3449 int len, num_rcr; 3450 3451 skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE); 3452 if (unlikely(!skb)) 3453 return niu_rx_pkt_ignore(np, rp); 3454 3455 num_rcr = 0; 3456 while (1) { 3457 struct page *page, **link; 3458 u32 rcr_size, append_size; 3459 u64 addr, val, off; 3460 3461 num_rcr++; 3462 3463 val = le64_to_cpup(&rp->rcr[index]); 3464 3465 len = (val & RCR_ENTRY_L2_LEN) >> 3466 RCR_ENTRY_L2_LEN_SHIFT; 3467 len -= ETH_FCS_LEN; 3468 3469 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << 3470 RCR_ENTRY_PKT_BUF_ADDR_SHIFT; 3471 page = niu_find_rxpage(rp, addr, &link); 3472 3473 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> 3474 RCR_ENTRY_PKTBUFSZ_SHIFT]; 3475 3476 off = addr & ~PAGE_MASK; 3477 append_size = rcr_size; 3478 if (num_rcr == 1) { 3479 int ptype; 3480 3481 off += 2; 3482 append_size -= 2; 3483 3484 ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT); 3485 if ((ptype == RCR_PKT_TYPE_TCP || 3486 ptype == RCR_PKT_TYPE_UDP) && 3487 !(val & (RCR_ENTRY_NOPORT | 3488 RCR_ENTRY_ERROR))) 3489 skb->ip_summed = CHECKSUM_UNNECESSARY; 3490 else 3491 skb->ip_summed = CHECKSUM_NONE; 3492 } 3493 if (!(val & RCR_ENTRY_MULTI)) 3494 append_size = len - skb->len; 3495 3496 niu_rx_skb_append(skb, page, off, append_size); 3497 if ((page->index + rp->rbr_block_size) - rcr_size == addr) { 3498 *link = (struct page *) page->mapping; 3499 np->ops->unmap_page(np->device, page->index, 3500 PAGE_SIZE, DMA_FROM_DEVICE); 3501 page->index = 0; 3502 page->mapping = NULL; 3503 rp->rbr_refill_pending++; 3504 } else 3505 get_page(page); 3506 3507 index = NEXT_RCR(rp, index); 3508 if (!(val & RCR_ENTRY_MULTI)) 3509 break; 3510 3511 } 3512 rp->rcr_index = index; 3513 3514 skb_reserve(skb, NET_IP_ALIGN); 3515 __pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX)); 3516 3517 rp->rx_packets++; 3518 rp->rx_bytes += skb->len; 3519 3520 skb->protocol = eth_type_trans(skb, np->dev); 3521 skb_record_rx_queue(skb, rp->rx_channel); 3522 napi_gro_receive(napi, skb); 3523 3524 return num_rcr; 3525} 3526 3527static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) 3528{ 3529 int blocks_per_page = rp->rbr_blocks_per_page; 3530 int err, index = rp->rbr_index; 3531 3532 err = 0; 3533 while (index < (rp->rbr_table_size - blocks_per_page)) { 3534 err = niu_rbr_add_page(np, rp, mask, index); 3535 if (err) 3536 break; 3537 3538 index += blocks_per_page; 3539 } 3540 3541 rp->rbr_index = index; 3542 return err; 3543} 3544 3545static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp) 3546{ 3547 int i; 3548 3549 for (i = 0; i < MAX_RBR_RING_SIZE; i++) { 3550 struct page *page; 3551 3552 page = rp->rxhash[i]; 3553 while (page) { 3554 struct page *next = (struct page *) page->mapping; 3555 u64 base = page->index; 3556 3557 np->ops->unmap_page(np->device, base, PAGE_SIZE, 3558 DMA_FROM_DEVICE); 3559 page->index = 0; 3560 page->mapping = NULL; 3561 3562 __free_page(page); 3563 3564 page = next; 3565 } 3566 } 3567 3568 for (i = 0; i < rp->rbr_table_size; i++) 3569 rp->rbr[i] = cpu_to_le32(0); 3570 rp->rbr_index = 0; 3571} 3572 3573static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) 3574{ 3575 struct tx_buff_info *tb = &rp->tx_buffs[idx]; 3576 struct sk_buff *skb = tb->skb; 3577 struct tx_pkt_hdr *tp; 3578 u64 tx_flags; 3579 int i, len; 3580 3581 tp = (struct tx_pkt_hdr *) skb->data; 3582 tx_flags = le64_to_cpup(&tp->flags); 3583 3584 rp->tx_packets++; 3585 rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) - 3586 ((tx_flags & TXHDR_PAD) / 2)); 3587 3588 len = skb_headlen(skb); 3589 np->ops->unmap_single(np->device, tb->mapping, 3590 len, DMA_TO_DEVICE); 3591 3592 if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK) 3593 rp->mark_pending--; 3594 3595 tb->skb = NULL; 3596 do { 3597 idx = NEXT_TX(rp, idx); 3598 len -= MAX_TX_DESC_LEN; 3599 } while (len > 0); 3600 3601 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3602 tb = &rp->tx_buffs[idx]; 3603 BUG_ON(tb->skb != NULL); 3604 np->ops->unmap_page(np->device, tb->mapping, 3605 skb_shinfo(skb)->frags[i].size, 3606 DMA_TO_DEVICE); 3607 idx = NEXT_TX(rp, idx); 3608 } 3609 3610 dev_kfree_skb(skb); 3611 3612 return idx; 3613} 3614 3615#define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4) 3616 3617static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) 3618{ 3619 struct netdev_queue *txq; 3620 u16 pkt_cnt, tmp; 3621 int cons, index; 3622 u64 cs; 3623 3624 index = (rp - np->tx_rings); 3625 txq = netdev_get_tx_queue(np->dev, index); 3626 3627 cs = rp->tx_cs; 3628 if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK)))) 3629 goto out; 3630 3631 tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT; 3632 pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) & 3633 (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT); 3634 3635 rp->last_pkt_cnt = tmp; 3636 3637 cons = rp->cons; 3638 3639 niudbg(TX_DONE, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n", 3640 np->dev->name, pkt_cnt, cons); 3641 3642 while (pkt_cnt--) 3643 cons = release_tx_packet(np, rp, cons); 3644 3645 rp->cons = cons; 3646 smp_mb(); 3647 3648out: 3649 if (unlikely(netif_tx_queue_stopped(txq) && 3650 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { 3651 __netif_tx_lock(txq, smp_processor_id()); 3652 if (netif_tx_queue_stopped(txq) && 3653 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) 3654 netif_tx_wake_queue(txq); 3655 __netif_tx_unlock(txq); 3656 } 3657} 3658 3659static inline void niu_sync_rx_discard_stats(struct niu *np, 3660 struct rx_ring_info *rp, 3661 const int limit) 3662{ 3663 /* This elaborate scheme is needed for reading the RX discard 3664 * counters, as they are only 16-bit and can overflow quickly, 3665 * and because the overflow indication bit is not usable as 3666 * the counter value does not wrap, but remains at max value 3667 * 0xFFFF. 3668 * 3669 * In theory and in practice counters can be lost in between 3670 * reading nr64() and clearing the counter nw64(). For this 3671 * reason, the number of counter clearings nw64() is 3672 * limited/reduced though the limit parameter. 3673 */ 3674 int rx_channel = rp->rx_channel; 3675 u32 misc, wred; 3676 3677 /* RXMISC (Receive Miscellaneous Discard Count), covers the 3678 * following discard events: IPP (Input Port Process), 3679 * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive 3680 * Block Ring) prefetch buffer is empty. 3681 */ 3682 misc = nr64(RXMISC(rx_channel)); 3683 if (unlikely((misc & RXMISC_COUNT) > limit)) { 3684 nw64(RXMISC(rx_channel), 0); 3685 rp->rx_errors += misc & RXMISC_COUNT; 3686 3687 if (unlikely(misc & RXMISC_OFLOW)) 3688 dev_err(np->device, "rx-%d: Counter overflow " 3689 "RXMISC discard\n", rx_channel); 3690 3691 niudbg(RX_ERR, "%s-rx-%d: MISC drop=%u over=%u\n", 3692 np->dev->name, rx_channel, misc, misc-limit); 3693 } 3694 3695 /* WRED (Weighted Random Early Discard) by hardware */ 3696 wred = nr64(RED_DIS_CNT(rx_channel)); 3697 if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) { 3698 nw64(RED_DIS_CNT(rx_channel), 0); 3699 rp->rx_dropped += wred & RED_DIS_CNT_COUNT; 3700 3701 if (unlikely(wred & RED_DIS_CNT_OFLOW)) 3702 dev_err(np->device, "rx-%d: Counter overflow " 3703 "WRED discard\n", rx_channel); 3704 3705 niudbg(RX_ERR, "%s-rx-%d: WRED drop=%u over=%u\n", 3706 np->dev->name, rx_channel, wred, wred-limit); 3707 } 3708} 3709 3710static int niu_rx_work(struct napi_struct *napi, struct niu *np, 3711 struct rx_ring_info *rp, int budget) 3712{ 3713 int qlen, rcr_done = 0, work_done = 0; 3714 struct rxdma_mailbox *mbox = rp->mbox; 3715 u64 stat; 3716 3717#if 1 3718 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); 3719 qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN; 3720#else 3721 stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); 3722 qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN); 3723#endif 3724 mbox->rx_dma_ctl_stat = 0; 3725 mbox->rcrstat_a = 0; 3726 3727 niudbg(RX_STATUS, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n", 3728 np->dev->name, rp->rx_channel, (unsigned long long) stat, qlen); 3729 3730 rcr_done = work_done = 0; 3731 qlen = min(qlen, budget); 3732 while (work_done < qlen) { 3733 rcr_done += niu_process_rx_pkt(napi, np, rp); 3734 work_done++; 3735 } 3736 3737 if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) { 3738 unsigned int i; 3739 3740 for (i = 0; i < rp->rbr_refill_pending; i++) 3741 niu_rbr_refill(np, rp, GFP_ATOMIC); 3742 rp->rbr_refill_pending = 0; 3743 } 3744 3745 stat = (RX_DMA_CTL_STAT_MEX | 3746 ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) | 3747 ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT)); 3748 3749 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat); 3750 3751 /* Only sync discards stats when qlen indicate potential for drops */ 3752 if (qlen > 10) 3753 niu_sync_rx_discard_stats(np, rp, 0x7FFF); 3754 3755 return work_done; 3756} 3757 3758static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget) 3759{ 3760 u64 v0 = lp->v0; 3761 u32 tx_vec = (v0 >> 32); 3762 u32 rx_vec = (v0 & 0xffffffff); 3763 int i, work_done = 0; 3764 3765 niudbg(INTR, "%s: niu_poll_core() v0[%016llx]\n", 3766 np->dev->name, (unsigned long long) v0); 3767 3768 for (i = 0; i < np->num_tx_rings; i++) { 3769 struct tx_ring_info *rp = &np->tx_rings[i]; 3770 if (tx_vec & (1 << rp->tx_channel)) 3771 niu_tx_work(np, rp); 3772 nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0); 3773 } 3774 3775 for (i = 0; i < np->num_rx_rings; i++) { 3776 struct rx_ring_info *rp = &np->rx_rings[i]; 3777 3778 if (rx_vec & (1 << rp->rx_channel)) { 3779 int this_work_done; 3780 3781 this_work_done = niu_rx_work(&lp->napi, np, rp, 3782 budget); 3783 3784 budget -= this_work_done; 3785 work_done += this_work_done; 3786 } 3787 nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0); 3788 } 3789 3790 return work_done; 3791} 3792 3793static int niu_poll(struct napi_struct *napi, int budget) 3794{ 3795 struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi); 3796 struct niu *np = lp->np; 3797 int work_done; 3798 3799 work_done = niu_poll_core(np, lp, budget); 3800 3801 if (work_done < budget) { 3802 napi_complete(napi); 3803 niu_ldg_rearm(np, lp, 1); 3804 } 3805 return work_done; 3806} 3807 3808static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp, 3809 u64 stat) 3810{ 3811 dev_err(np->device, PFX "%s: RX channel %u errors ( ", 3812 np->dev->name, rp->rx_channel); 3813 3814 if (stat & RX_DMA_CTL_STAT_RBR_TMOUT) 3815 printk("RBR_TMOUT "); 3816 if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR) 3817 printk("RSP_CNT "); 3818 if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS) 3819 printk("BYTE_EN_BUS "); 3820 if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR) 3821 printk("RSP_DAT "); 3822 if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR) 3823 printk("RCR_ACK "); 3824 if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR) 3825 printk("RCR_SHA_PAR "); 3826 if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR) 3827 printk("RBR_PRE_PAR "); 3828 if (stat & RX_DMA_CTL_STAT_CONFIG_ERR) 3829 printk("CONFIG "); 3830 if (stat & RX_DMA_CTL_STAT_RCRINCON) 3831 printk("RCRINCON "); 3832 if (stat & RX_DMA_CTL_STAT_RCRFULL) 3833 printk("RCRFULL "); 3834 if (stat & RX_DMA_CTL_STAT_RBRFULL) 3835 printk("RBRFULL "); 3836 if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE) 3837 printk("RBRLOGPAGE "); 3838 if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE) 3839 printk("CFIGLOGPAGE "); 3840 if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR) 3841 printk("DC_FIDO "); 3842 3843 printk(")\n"); 3844} 3845 3846static int niu_rx_error(struct niu *np, struct rx_ring_info *rp) 3847{ 3848 u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); 3849 int err = 0; 3850 3851 3852 if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL | 3853 RX_DMA_CTL_STAT_PORT_FATAL)) 3854 err = -EINVAL; 3855 3856 if (err) { 3857 dev_err(np->device, PFX "%s: RX channel %u error, stat[%llx]\n", 3858 np->dev->name, rp->rx_channel, 3859 (unsigned long long) stat); 3860 3861 niu_log_rxchan_errors(np, rp, stat); 3862 } 3863 3864 nw64(RX_DMA_CTL_STAT(rp->rx_channel), 3865 stat & RX_DMA_CTL_WRITE_CLEAR_ERRS); 3866 3867 return err; 3868} 3869 3870static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp, 3871 u64 cs) 3872{ 3873 dev_err(np->device, PFX "%s: TX channel %u errors ( ", 3874 np->dev->name, rp->tx_channel); 3875 3876 if (cs & TX_CS_MBOX_ERR) 3877 printk("MBOX "); 3878 if (cs & TX_CS_PKT_SIZE_ERR) 3879 printk("PKT_SIZE "); 3880 if (cs & TX_CS_TX_RING_OFLOW) 3881 printk("TX_RING_OFLOW "); 3882 if (cs & TX_CS_PREF_BUF_PAR_ERR) 3883 printk("PREF_BUF_PAR "); 3884 if (cs & TX_CS_NACK_PREF) 3885 printk("NACK_PREF "); 3886 if (cs & TX_CS_NACK_PKT_RD) 3887 printk("NACK_PKT_RD "); 3888 if (cs & TX_CS_CONF_PART_ERR) 3889 printk("CONF_PART "); 3890 if (cs & TX_CS_PKT_PRT_ERR) 3891 printk("PKT_PTR "); 3892 3893 printk(")\n"); 3894} 3895 3896static int niu_tx_error(struct niu *np, struct tx_ring_info *rp) 3897{ 3898 u64 cs, logh, logl; 3899 3900 cs = nr64(TX_CS(rp->tx_channel)); 3901 logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel)); 3902 logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel)); 3903 3904 dev_err(np->device, PFX "%s: TX channel %u error, " 3905 "cs[%llx] logh[%llx] logl[%llx]\n", 3906 np->dev->name, rp->tx_channel, 3907 (unsigned long long) cs, 3908 (unsigned long long) logh, 3909 (unsigned long long) logl); 3910 3911 niu_log_txchan_errors(np, rp, cs); 3912 3913 return -ENODEV; 3914} 3915 3916static int niu_mif_interrupt(struct niu *np) 3917{ 3918 u64 mif_status = nr64(MIF_STATUS); 3919 int phy_mdint = 0; 3920 3921 if (np->flags & NIU_FLAGS_XMAC) { 3922 u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS); 3923 3924 if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT) 3925 phy_mdint = 1; 3926 } 3927 3928 dev_err(np->device, PFX "%s: MIF interrupt, " 3929 "stat[%llx] phy_mdint(%d)\n", 3930 np->dev->name, (unsigned long long) mif_status, phy_mdint); 3931 3932 return -ENODEV; 3933} 3934 3935static void niu_xmac_interrupt(struct niu *np) 3936{ 3937 struct niu_xmac_stats *mp = &np->mac_stats.xmac; 3938 u64 val; 3939 3940 val = nr64_mac(XTXMAC_STATUS); 3941 if (val & XTXMAC_STATUS_FRAME_CNT_EXP) 3942 mp->tx_frames += TXMAC_FRM_CNT_COUNT; 3943 if (val & XTXMAC_STATUS_BYTE_CNT_EXP) 3944 mp->tx_bytes += TXMAC_BYTE_CNT_COUNT; 3945 if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR) 3946 mp->tx_fifo_errors++; 3947 if (val & XTXMAC_STATUS_TXMAC_OFLOW) 3948 mp->tx_overflow_errors++; 3949 if (val & XTXMAC_STATUS_MAX_PSIZE_ERR) 3950 mp->tx_max_pkt_size_errors++; 3951 if (val & XTXMAC_STATUS_TXMAC_UFLOW) 3952 mp->tx_underflow_errors++; 3953 3954 val = nr64_mac(XRXMAC_STATUS); 3955 if (val & XRXMAC_STATUS_LCL_FLT_STATUS) 3956 mp->rx_local_faults++; 3957 if (val & XRXMAC_STATUS_RFLT_DET) 3958 mp->rx_remote_faults++; 3959 if (val & XRXMAC_STATUS_LFLT_CNT_EXP) 3960 mp->rx_link_faults += LINK_FAULT_CNT_COUNT; 3961 if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP) 3962 mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT; 3963 if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP) 3964 mp->rx_frags += RXMAC_FRAG_CNT_COUNT; 3965 if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP) 3966 mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT; 3967 if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) 3968 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; 3969 if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) 3970 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; 3971 if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP) 3972 mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT; 3973 if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP) 3974 mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT; 3975 if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP) 3976 mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT; 3977 if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP) 3978 mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT; 3979 if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP) 3980 mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT; 3981 if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP) 3982 mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT; 3983 if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP) 3984 mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT; 3985 if (val & XRXMAC_STAT_MSK_RXOCTET_CNT_EXP) 3986 mp->rx_octets += RXMAC_BT_CNT_COUNT; 3987 if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP) 3988 mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT; 3989 if (val & XRXMAC_STATUS_LENERR_CNT_EXP) 3990 mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT; 3991 if (val & XRXMAC_STATUS_CRCERR_CNT_EXP) 3992 mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT; 3993 if (val & XRXMAC_STATUS_RXUFLOW) 3994 mp->rx_underflows++; 3995 if (val & XRXMAC_STATUS_RXOFLOW) 3996 mp->rx_overflows++; 3997 3998 val = nr64_mac(XMAC_FC_STAT); 3999 if (val & XMAC_FC_STAT_TX_MAC_NPAUSE) 4000 mp->pause_off_state++; 4001 if (val & XMAC_FC_STAT_TX_MAC_PAUSE) 4002 mp->pause_on_state++; 4003 if (val & XMAC_FC_STAT_RX_MAC_RPAUSE) 4004 mp->pause_received++; 4005} 4006 4007static void niu_bmac_interrupt(struct niu *np) 4008{ 4009 struct niu_bmac_stats *mp = &np->mac_stats.bmac; 4010 u64 val; 4011 4012 val = nr64_mac(BTXMAC_STATUS); 4013 if (val & BTXMAC_STATUS_UNDERRUN) 4014 mp->tx_underflow_errors++; 4015 if (val & BTXMAC_STATUS_MAX_PKT_ERR) 4016 mp->tx_max_pkt_size_errors++; 4017 if (val & BTXMAC_STATUS_BYTE_CNT_EXP) 4018 mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT; 4019 if (val & BTXMAC_STATUS_FRAME_CNT_EXP) 4020 mp->tx_frames += BTXMAC_FRM_CNT_COUNT; 4021 4022 val = nr64_mac(BRXMAC_STATUS); 4023 if (val & BRXMAC_STATUS_OVERFLOW) 4024 mp->rx_overflows++; 4025 if (val & BRXMAC_STATUS_FRAME_CNT_EXP) 4026 mp->rx_frames += BRXMAC_FRAME_CNT_COUNT; 4027 if (val & BRXMAC_STATUS_ALIGN_ERR_EXP) 4028 mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; 4029 if (val & BRXMAC_STATUS_CRC_ERR_EXP) 4030 mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; 4031 if (val & BRXMAC_STATUS_LEN_ERR_EXP) 4032 mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT; 4033 4034 val = nr64_mac(BMAC_CTRL_STATUS); 4035 if (val & BMAC_CTRL_STATUS_NOPAUSE) 4036 mp->pause_off_state++; 4037 if (val & BMAC_CTRL_STATUS_PAUSE) 4038 mp->pause_on_state++; 4039 if (val & BMAC_CTRL_STATUS_PAUSE_RECV) 4040 mp->pause_received++; 4041} 4042 4043static int niu_mac_interrupt(struct niu *np) 4044{ 4045 if (np->flags & NIU_FLAGS_XMAC) 4046 niu_xmac_interrupt(np); 4047 else 4048 niu_bmac_interrupt(np); 4049 4050 return 0; 4051} 4052 4053static void niu_log_device_error(struct niu *np, u64 stat) 4054{ 4055 dev_err(np->device, PFX "%s: Core device errors ( ", 4056 np->dev->name); 4057 4058 if (stat & SYS_ERR_MASK_META2) 4059 printk("META2 "); 4060 if (stat & SYS_ERR_MASK_META1) 4061 printk("META1 "); 4062 if (stat & SYS_ERR_MASK_PEU) 4063 printk("PEU "); 4064 if (stat & SYS_ERR_MASK_TXC) 4065 printk("TXC "); 4066 if (stat & SYS_ERR_MASK_RDMC) 4067 printk("RDMC "); 4068 if (stat & SYS_ERR_MASK_TDMC) 4069 printk("TDMC "); 4070 if (stat & SYS_ERR_MASK_ZCP) 4071 printk("ZCP "); 4072 if (stat & SYS_ERR_MASK_FFLP) 4073 printk("FFLP "); 4074 if (stat & SYS_ERR_MASK_IPP) 4075 printk("IPP "); 4076 if (stat & SYS_ERR_MASK_MAC) 4077 printk("MAC "); 4078 if (stat & SYS_ERR_MASK_SMX) 4079 printk("SMX "); 4080 4081 printk(")\n"); 4082} 4083 4084static int niu_device_error(struct niu *np) 4085{ 4086 u64 stat = nr64(SYS_ERR_STAT); 4087 4088 dev_err(np->device, PFX "%s: Core device error, stat[%llx]\n", 4089 np->dev->name, (unsigned long long) stat); 4090 4091 niu_log_device_error(np, stat); 4092 4093 return -ENODEV; 4094} 4095 4096static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp, 4097 u64 v0, u64 v1, u64 v2) 4098{ 4099 4100 int i, err = 0; 4101 4102 lp->v0 = v0; 4103 lp->v1 = v1; 4104 lp->v2 = v2; 4105 4106 if (v1 & 0x00000000ffffffffULL) { 4107 u32 rx_vec = (v1 & 0xffffffff); 4108 4109 for (i = 0; i < np->num_rx_rings; i++) { 4110 struct rx_ring_info *rp = &np->rx_rings[i]; 4111 4112 if (rx_vec & (1 << rp->rx_channel)) { 4113 int r = niu_rx_error(np, rp); 4114 if (r) { 4115 err = r; 4116 } else { 4117 if (!v0) 4118 nw64(RX_DMA_CTL_STAT(rp->rx_channel), 4119 RX_DMA_CTL_STAT_MEX); 4120 } 4121 } 4122 } 4123 } 4124 if (v1 & 0x7fffffff00000000ULL) { 4125 u32 tx_vec = (v1 >> 32) & 0x7fffffff; 4126 4127 for (i = 0; i < np->num_tx_rings; i++) { 4128 struct tx_ring_info *rp = &np->tx_rings[i]; 4129 4130 if (tx_vec & (1 << rp->tx_channel)) { 4131 int r = niu_tx_error(np, rp); 4132 if (r) 4133 err = r; 4134 } 4135 } 4136 } 4137 if ((v0 | v1) & 0x8000000000000000ULL) { 4138 int r = niu_mif_interrupt(np); 4139 if (r) 4140 err = r; 4141 } 4142 if (v2) { 4143 if (v2 & 0x01ef) { 4144 int r = niu_mac_interrupt(np); 4145 if (r) 4146 err = r; 4147 } 4148 if (v2 & 0x0210) { 4149 int r = niu_device_error(np); 4150 if (r) 4151 err = r; 4152 } 4153 } 4154 4155 if (err) 4156 niu_enable_interrupts(np, 0); 4157 4158 return err; 4159} 4160 4161static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp, 4162 int ldn) 4163{ 4164 struct rxdma_mailbox *mbox = rp->mbox; 4165 u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); 4166 4167 stat_write = (RX_DMA_CTL_STAT_RCRTHRES | 4168 RX_DMA_CTL_STAT_RCRTO); 4169 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write); 4170 4171 niudbg(INTR, "%s: rxchan_intr stat[%llx]\n", 4172 np->dev->name, (unsigned long long) stat); 4173} 4174 4175static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp, 4176 int ldn) 4177{ 4178 rp->tx_cs = nr64(TX_CS(rp->tx_channel)); 4179 4180 niudbg(INTR, "%s: txchan_intr cs[%llx]\n", 4181 np->dev->name, (unsigned long long) rp->tx_cs); 4182} 4183 4184static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0) 4185{ 4186 struct niu_parent *parent = np->parent; 4187 u32 rx_vec, tx_vec; 4188 int i; 4189 4190 tx_vec = (v0 >> 32); 4191 rx_vec = (v0 & 0xffffffff); 4192 4193 for (i = 0; i < np->num_rx_rings; i++) { 4194 struct rx_ring_info *rp = &np->rx_rings[i]; 4195 int ldn = LDN_RXDMA(rp->rx_channel); 4196 4197 if (parent->ldg_map[ldn] != ldg) 4198 continue; 4199 4200 nw64(LD_IM0(ldn), LD_IM0_MASK); 4201 if (rx_vec & (1 << rp->rx_channel)) 4202 niu_rxchan_intr(np, rp, ldn); 4203 } 4204 4205 for (i = 0; i < np->num_tx_rings; i++) { 4206 struct tx_ring_info *rp = &np->tx_rings[i]; 4207 int ldn = LDN_TXDMA(rp->tx_channel); 4208 4209 if (parent->ldg_map[ldn] != ldg) 4210 continue; 4211 4212 nw64(LD_IM0(ldn), LD_IM0_MASK); 4213 if (tx_vec & (1 << rp->tx_channel)) 4214 niu_txchan_intr(np, rp, ldn); 4215 } 4216} 4217 4218static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp, 4219 u64 v0, u64 v1, u64 v2) 4220{ 4221 if (likely(napi_schedule_prep(&lp->napi))) { 4222 lp->v0 = v0; 4223 lp->v1 = v1; 4224 lp->v2 = v2; 4225 __niu_fastpath_interrupt(np, lp->ldg_num, v0); 4226 __napi_schedule(&lp->napi); 4227 } 4228} 4229 4230static irqreturn_t niu_interrupt(int irq, void *dev_id) 4231{ 4232 struct niu_ldg *lp = dev_id; 4233 struct niu *np = lp->np; 4234 int ldg = lp->ldg_num; 4235 unsigned long flags; 4236 u64 v0, v1, v2; 4237 4238 if (netif_msg_intr(np)) 4239 printk(KERN_DEBUG PFX "niu_interrupt() ldg[%p](%d) ", 4240 lp, ldg); 4241 4242 spin_lock_irqsave(&np->lock, flags); 4243 4244 v0 = nr64(LDSV0(ldg)); 4245 v1 = nr64(LDSV1(ldg)); 4246 v2 = nr64(LDSV2(ldg)); 4247 4248 if (netif_msg_intr(np)) 4249 printk("v0[%llx] v1[%llx] v2[%llx]\n", 4250 (unsigned long long) v0, 4251 (unsigned long long) v1, 4252 (unsigned long long) v2); 4253 4254 if (unlikely(!v0 && !v1 && !v2)) { 4255 spin_unlock_irqrestore(&np->lock, flags); 4256 return IRQ_NONE; 4257 } 4258 4259 if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) { 4260 int err = niu_slowpath_interrupt(np, lp, v0, v1, v2); 4261 if (err) 4262 goto out; 4263 } 4264 if (likely(v0 & ~((u64)1 << LDN_MIF))) 4265 niu_schedule_napi(np, lp, v0, v1, v2); 4266 else 4267 niu_ldg_rearm(np, lp, 1); 4268out: 4269 spin_unlock_irqrestore(&np->lock, flags); 4270 4271 return IRQ_HANDLED; 4272} 4273 4274static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp) 4275{ 4276 if (rp->mbox) { 4277 np->ops->free_coherent(np->device, 4278 sizeof(struct rxdma_mailbox), 4279 rp->mbox, rp->mbox_dma); 4280 rp->mbox = NULL; 4281 } 4282 if (rp->rcr) { 4283 np->ops->free_coherent(np->device, 4284 MAX_RCR_RING_SIZE * sizeof(__le64), 4285 rp->rcr, rp->rcr_dma); 4286 rp->rcr = NULL; 4287 rp->rcr_table_size = 0; 4288 rp->rcr_index = 0; 4289 } 4290 if (rp->rbr) { 4291 niu_rbr_free(np, rp); 4292 4293 np->ops->free_coherent(np->device, 4294 MAX_RBR_RING_SIZE * sizeof(__le32), 4295 rp->rbr, rp->rbr_dma); 4296 rp->rbr = NULL; 4297 rp->rbr_table_size = 0; 4298 rp->rbr_index = 0; 4299 } 4300 kfree(rp->rxhash); 4301 rp->rxhash = NULL; 4302} 4303 4304static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp) 4305{ 4306 if (rp->mbox) { 4307 np->ops->free_coherent(np->device, 4308 sizeof(struct txdma_mailbox), 4309 rp->mbox, rp->mbox_dma); 4310 rp->mbox = NULL; 4311 } 4312 if (rp->descr) { 4313 int i; 4314 4315 for (i = 0; i < MAX_TX_RING_SIZE; i++) { 4316 if (rp->tx_buffs[i].skb) 4317 (void) release_tx_packet(np, rp, i); 4318 } 4319 4320 np->ops->free_coherent(np->device, 4321 MAX_TX_RING_SIZE * sizeof(__le64), 4322 rp->descr, rp->descr_dma); 4323 rp->descr = NULL; 4324 rp->pending = 0; 4325 rp->prod = 0; 4326 rp->cons = 0; 4327 rp->wrap_bit = 0; 4328 } 4329} 4330 4331static void niu_free_channels(struct niu *np) 4332{ 4333 int i; 4334 4335 if (np->rx_rings) { 4336 for (i = 0; i < np->num_rx_rings; i++) { 4337 struct rx_ring_info *rp = &np->rx_rings[i]; 4338 4339 niu_free_rx_ring_info(np, rp); 4340 } 4341 kfree(np->rx_rings); 4342 np->rx_rings = NULL; 4343 np->num_rx_rings = 0; 4344 } 4345 4346 if (np->tx_rings) { 4347 for (i = 0; i < np->num_tx_rings; i++) { 4348 struct tx_ring_info *rp = &np->tx_rings[i]; 4349 4350 niu_free_tx_ring_info(np, rp); 4351 } 4352 kfree(np->tx_rings); 4353 np->tx_rings = NULL; 4354 np->num_tx_rings = 0; 4355 } 4356} 4357 4358static int niu_alloc_rx_ring_info(struct niu *np, 4359 struct rx_ring_info *rp) 4360{ 4361 BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64); 4362 4363 rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *), 4364 GFP_KERNEL); 4365 if (!rp->rxhash) 4366 return -ENOMEM; 4367 4368 rp->mbox = np->ops->alloc_coherent(np->device, 4369 sizeof(struct rxdma_mailbox), 4370 &rp->mbox_dma, GFP_KERNEL); 4371 if (!rp->mbox) 4372 return -ENOMEM; 4373 if ((unsigned long)rp->mbox & (64UL - 1)) { 4374 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " 4375 "RXDMA mailbox %p\n", np->dev->name, rp->mbox); 4376 return -EINVAL; 4377 } 4378 4379 rp->rcr = np->ops->alloc_coherent(np->device, 4380 MAX_RCR_RING_SIZE * sizeof(__le64), 4381 &rp->rcr_dma, GFP_KERNEL); 4382 if (!rp->rcr) 4383 return -ENOMEM; 4384 if ((unsigned long)rp->rcr & (64UL - 1)) { 4385 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " 4386 "RXDMA RCR table %p\n", np->dev->name, rp->rcr); 4387 return -EINVAL; 4388 } 4389 rp->rcr_table_size = MAX_RCR_RING_SIZE; 4390 rp->rcr_index = 0; 4391 4392 rp->rbr = np->ops->alloc_coherent(np->device, 4393 MAX_RBR_RING_SIZE * sizeof(__le32), 4394 &rp->rbr_dma, GFP_KERNEL); 4395 if (!rp->rbr) 4396 return -ENOMEM; 4397 if ((unsigned long)rp->rbr & (64UL - 1)) { 4398 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " 4399 "RXDMA RBR table %p\n", np->dev->name, rp->rbr); 4400 return -EINVAL; 4401 } 4402 rp->rbr_table_size = MAX_RBR_RING_SIZE; 4403 rp->rbr_index = 0; 4404 rp->rbr_pending = 0; 4405 4406 return 0; 4407} 4408 4409static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp) 4410{ 4411 int mtu = np->dev->mtu; 4412 4413 /* These values are recommended by the HW designers for fair 4414 * utilization of DRR amongst the rings. 4415 */ 4416 rp->max_burst = mtu + 32; 4417 if (rp->max_burst > 4096) 4418 rp->max_burst = 4096; 4419} 4420 4421static int niu_alloc_tx_ring_info(struct niu *np, 4422 struct tx_ring_info *rp) 4423{ 4424 BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64); 4425 4426 rp->mbox = np->ops->alloc_coherent(np->device, 4427 sizeof(struct txdma_mailbox), 4428 &rp->mbox_dma, GFP_KERNEL); 4429 if (!rp->mbox) 4430 return -ENOMEM; 4431 if ((unsigned long)rp->mbox & (64UL - 1)) { 4432 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " 4433 "TXDMA mailbox %p\n", np->dev->name, rp->mbox); 4434 return -EINVAL; 4435 } 4436 4437 rp->descr = np->ops->alloc_coherent(np->device, 4438 MAX_TX_RING_SIZE * sizeof(__le64), 4439 &rp->descr_dma, GFP_KERNEL); 4440 if (!rp->descr) 4441 return -ENOMEM; 4442 if ((unsigned long)rp->descr & (64UL - 1)) { 4443 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " 4444 "TXDMA descr table %p\n", np->dev->name, rp->descr); 4445 return -EINVAL; 4446 } 4447 4448 rp->pending = MAX_TX_RING_SIZE; 4449 rp->prod = 0; 4450 rp->cons = 0; 4451 rp->wrap_bit = 0; 4452 4453 /* XXX make these configurable... XXX */ 4454 rp->mark_freq = rp->pending / 4; 4455 4456 niu_set_max_burst(np, rp); 4457 4458 return 0; 4459} 4460 4461static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp) 4462{ 4463 u16 bss; 4464 4465 bss = min(PAGE_SHIFT, 15); 4466 4467 rp->rbr_block_size = 1 << bss; 4468 rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss); 4469 4470 rp->rbr_sizes[0] = 256; 4471 rp->rbr_sizes[1] = 1024; 4472 if (np->dev->mtu > ETH_DATA_LEN) { 4473 switch (PAGE_SIZE) { 4474 case 4 * 1024: 4475 rp->rbr_sizes[2] = 4096; 4476 break; 4477 4478 default: 4479 rp->rbr_sizes[2] = 8192; 4480 break; 4481 } 4482 } else { 4483 rp->rbr_sizes[2] = 2048; 4484 } 4485 rp->rbr_sizes[3] = rp->rbr_block_size; 4486} 4487 4488static int niu_alloc_channels(struct niu *np) 4489{ 4490 struct niu_parent *parent = np->parent; 4491 int first_rx_channel, first_tx_channel; 4492 int i, port, err; 4493 4494 port = np->port; 4495 first_rx_channel = first_tx_channel = 0; 4496 for (i = 0; i < port; i++) { 4497 first_rx_channel += parent->rxchan_per_port[i]; 4498 first_tx_channel += parent->txchan_per_port[i]; 4499 } 4500 4501 np->num_rx_rings = parent->rxchan_per_port[port]; 4502 np->num_tx_rings = parent->txchan_per_port[port]; 4503 4504 np->dev->real_num_tx_queues = np->num_tx_rings; 4505 4506 np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info), 4507 GFP_KERNEL); 4508 err = -ENOMEM; 4509 if (!np->rx_rings) 4510 goto out_err; 4511 4512 for (i = 0; i < np->num_rx_rings; i++) { 4513 struct rx_ring_info *rp = &np->rx_rings[i]; 4514 4515 rp->np = np; 4516 rp->rx_channel = first_rx_channel + i; 4517 4518 err = niu_alloc_rx_ring_info(np, rp); 4519 if (err) 4520 goto out_err; 4521 4522 niu_size_rbr(np, rp); 4523 4524 /* XXX better defaults, configurable, etc... XXX */ 4525 rp->nonsyn_window = 64; 4526 rp->nonsyn_threshold = rp->rcr_table_size - 64; 4527 rp->syn_window = 64; 4528 rp->syn_threshold = rp->rcr_table_size - 64; 4529 rp->rcr_pkt_threshold = 16; 4530 rp->rcr_timeout = 8; 4531 rp->rbr_kick_thresh = RBR_REFILL_MIN; 4532 if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page) 4533 rp->rbr_kick_thresh = rp->rbr_blocks_per_page; 4534 4535 err = niu_rbr_fill(np, rp, GFP_KERNEL); 4536 if (err) 4537 return err; 4538 } 4539 4540 np->tx_rings = kzalloc(np->num_tx_rings * sizeof(struct tx_ring_info), 4541 GFP_KERNEL); 4542 err = -ENOMEM; 4543 if (!np->tx_rings) 4544 goto out_err; 4545 4546 for (i = 0; i < np->num_tx_rings; i++) { 4547 struct tx_ring_info *rp = &np->tx_rings[i]; 4548 4549 rp->np = np; 4550 rp->tx_channel = first_tx_channel + i; 4551 4552 err = niu_alloc_tx_ring_info(np, rp); 4553 if (err) 4554 goto out_err; 4555 } 4556 4557 return 0; 4558 4559out_err: 4560 niu_free_channels(np); 4561 return err; 4562} 4563 4564static int niu_tx_cs_sng_poll(struct niu *np, int channel) 4565{ 4566 int limit = 1000; 4567 4568 while (--limit > 0) { 4569 u64 val = nr64(TX_CS(channel)); 4570 if (val & TX_CS_SNG_STATE) 4571 return 0; 4572 } 4573 return -ENODEV; 4574} 4575 4576static int niu_tx_channel_stop(struct niu *np, int channel) 4577{ 4578 u64 val = nr64(TX_CS(channel)); 4579 4580 val |= TX_CS_STOP_N_GO; 4581 nw64(TX_CS(channel), val); 4582 4583 return niu_tx_cs_sng_poll(np, channel); 4584} 4585 4586static int niu_tx_cs_reset_poll(struct niu *np, int channel) 4587{ 4588 int limit = 1000; 4589 4590 while (--limit > 0) { 4591 u64 val = nr64(TX_CS(channel)); 4592 if (!(val & TX_CS_RST)) 4593 return 0; 4594 } 4595 return -ENODEV; 4596} 4597 4598static int niu_tx_channel_reset(struct niu *np, int channel) 4599{ 4600 u64 val = nr64(TX_CS(channel)); 4601 int err; 4602 4603 val |= TX_CS_RST; 4604 nw64(TX_CS(channel), val); 4605 4606 err = niu_tx_cs_reset_poll(np, channel); 4607 if (!err) 4608 nw64(TX_RING_KICK(channel), 0); 4609 4610 return err; 4611} 4612 4613static int niu_tx_channel_lpage_init(struct niu *np, int channel) 4614{ 4615 u64 val; 4616 4617 nw64(TX_LOG_MASK1(channel), 0); 4618 nw64(TX_LOG_VAL1(channel), 0); 4619 nw64(TX_LOG_MASK2(channel), 0); 4620 nw64(TX_LOG_VAL2(channel), 0); 4621 nw64(TX_LOG_PAGE_RELO1(channel), 0); 4622 nw64(TX_LOG_PAGE_RELO2(channel), 0); 4623 nw64(TX_LOG_PAGE_HDL(channel), 0); 4624 4625 val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT; 4626 val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1); 4627 nw64(TX_LOG_PAGE_VLD(channel), val); 4628 4629 /* XXX TXDMA 32bit mode? XXX */ 4630 4631 return 0; 4632} 4633 4634static void niu_txc_enable_port(struct niu *np, int on) 4635{ 4636 unsigned long flags; 4637 u64 val, mask; 4638 4639 niu_lock_parent(np, flags); 4640 val = nr64(TXC_CONTROL); 4641 mask = (u64)1 << np->port; 4642 if (on) { 4643 val |= TXC_CONTROL_ENABLE | mask; 4644 } else { 4645 val &= ~mask; 4646 if ((val & ~TXC_CONTROL_ENABLE) == 0) 4647 val &= ~TXC_CONTROL_ENABLE; 4648 } 4649 nw64(TXC_CONTROL, val); 4650 niu_unlock_parent(np, flags); 4651} 4652 4653static void niu_txc_set_imask(struct niu *np, u64 imask) 4654{ 4655 unsigned long flags; 4656 u64 val; 4657 4658 niu_lock_parent(np, flags); 4659 val = nr64(TXC_INT_MASK); 4660 val &= ~TXC_INT_MASK_VAL(np->port); 4661 val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port)); 4662 niu_unlock_parent(np, flags); 4663} 4664 4665static void niu_txc_port_dma_enable(struct niu *np, int on) 4666{ 4667 u64 val = 0; 4668 4669 if (on) { 4670 int i; 4671 4672 for (i = 0; i < np->num_tx_rings; i++) 4673 val |= (1 << np->tx_rings[i].tx_channel); 4674 } 4675 nw64(TXC_PORT_DMA(np->port), val); 4676} 4677 4678static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp) 4679{ 4680 int err, channel = rp->tx_channel; 4681 u64 val, ring_len; 4682 4683 err = niu_tx_channel_stop(np, channel); 4684 if (err) 4685 return err; 4686 4687 err = niu_tx_channel_reset(np, channel); 4688 if (err) 4689 return err; 4690 4691 err = niu_tx_channel_lpage_init(np, channel); 4692 if (err) 4693 return err; 4694 4695 nw64(TXC_DMA_MAX(channel), rp->max_burst); 4696 nw64(TX_ENT_MSK(channel), 0); 4697 4698 if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE | 4699 TX_RNG_CFIG_STADDR)) { 4700 dev_err(np->device, PFX "%s: TX ring channel %d " 4701 "DMA addr (%llx) is not aligned.\n", 4702 np->dev->name, channel, 4703 (unsigned long long) rp->descr_dma); 4704 return -EINVAL; 4705 } 4706 4707 /* The length field in TX_RNG_CFIG is measured in 64-byte 4708 * blocks. rp->pending is the number of TX descriptors in 4709 * our ring, 8 bytes each, thus we divide by 8 bytes more 4710 * to get the proper value the chip wants. 4711 */ 4712 ring_len = (rp->pending / 8); 4713 4714 val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) | 4715 rp->descr_dma); 4716 nw64(TX_RNG_CFIG(channel), val); 4717 4718 if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) || 4719 ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) { 4720 dev_err(np->device, PFX "%s: TX ring channel %d " 4721 "MBOX addr (%llx) is has illegal bits.\n", 4722 np->dev->name, channel, 4723 (unsigned long long) rp->mbox_dma); 4724 return -EINVAL; 4725 } 4726 nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32); 4727 nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR); 4728 4729 nw64(TX_CS(channel), 0); 4730 4731 rp->last_pkt_cnt = 0; 4732 4733 return 0; 4734} 4735 4736static void niu_init_rdc_groups(struct niu *np) 4737{ 4738 struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port]; 4739 int i, first_table_num = tp->first_table_num; 4740 4741 for (i = 0; i < tp->num_tables; i++) { 4742 struct rdc_table *tbl = &tp->tables[i]; 4743 int this_table = first_table_num + i; 4744 int slot; 4745 4746 for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) 4747 nw64(RDC_TBL(this_table, slot), 4748 tbl->rxdma_channel[slot]); 4749 } 4750 4751 nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]); 4752} 4753 4754static void niu_init_drr_weight(struct niu *np) 4755{ 4756 int type = phy_decode(np->parent->port_phy, np->port); 4757 u64 val; 4758 4759 switch (type) { 4760 case PORT_TYPE_10G: 4761 val = PT_DRR_WEIGHT_DEFAULT_10G; 4762 break; 4763 4764 case PORT_TYPE_1G: 4765 default: 4766 val = PT_DRR_WEIGHT_DEFAULT_1G; 4767 break; 4768 } 4769 nw64(PT_DRR_WT(np->port), val); 4770} 4771 4772static int niu_init_hostinfo(struct niu *np) 4773{ 4774 struct niu_parent *parent = np->parent; 4775 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; 4776 int i, err, num_alt = niu_num_alt_addr(np); 4777 int first_rdc_table = tp->first_table_num; 4778 4779 err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); 4780 if (err) 4781 return err; 4782 4783 err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); 4784 if (err) 4785 return err; 4786 4787 for (i = 0; i < num_alt; i++) { 4788 err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1); 4789 if (err) 4790 return err; 4791 } 4792 4793 return 0; 4794} 4795 4796static int niu_rx_channel_reset(struct niu *np, int channel) 4797{ 4798 return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel), 4799 RXDMA_CFIG1_RST, 1000, 10, 4800 "RXDMA_CFIG1"); 4801} 4802 4803static int niu_rx_channel_lpage_init(struct niu *np, int channel) 4804{ 4805 u64 val; 4806 4807 nw64(RX_LOG_MASK1(channel), 0); 4808 nw64(RX_LOG_VAL1(channel), 0); 4809 nw64(RX_LOG_MASK2(channel), 0); 4810 nw64(RX_LOG_VAL2(channel), 0); 4811 nw64(RX_LOG_PAGE_RELO1(channel), 0); 4812 nw64(RX_LOG_PAGE_RELO2(channel), 0); 4813 nw64(RX_LOG_PAGE_HDL(channel), 0); 4814 4815 val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT; 4816 val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1); 4817 nw64(RX_LOG_PAGE_VLD(channel), val); 4818 4819 return 0; 4820} 4821 4822static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp) 4823{ 4824 u64 val; 4825 4826 val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) | 4827 ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) | 4828 ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) | 4829 ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT)); 4830 nw64(RDC_RED_PARA(rp->rx_channel), val); 4831} 4832 4833static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret) 4834{ 4835 u64 val = 0; 4836 4837 *ret = 0; 4838 switch (rp->rbr_block_size) { 4839 case 4 * 1024: 4840 val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT); 4841 break; 4842 case 8 * 1024: 4843 val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT); 4844 break; 4845 case 16 * 1024: 4846 val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT); 4847 break; 4848 case 32 * 1024: 4849 val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT); 4850 break; 4851 default: 4852 return -EINVAL; 4853 } 4854 val |= RBR_CFIG_B_VLD2; 4855 switch (rp->rbr_sizes[2]) { 4856 case 2 * 1024: 4857 val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT); 4858 break; 4859 case 4 * 1024: 4860 val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT); 4861 break; 4862 case 8 * 1024: 4863 val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT); 4864 break; 4865 case 16 * 1024: 4866 val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT); 4867 break; 4868 4869 default: 4870 return -EINVAL; 4871 } 4872 val |= RBR_CFIG_B_VLD1; 4873 switch (rp->rbr_sizes[1]) { 4874 case 1 * 1024: 4875 val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT); 4876 break; 4877 case 2 * 1024: 4878 val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT); 4879 break; 4880 case 4 * 1024: 4881 val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT); 4882 break; 4883 case 8 * 1024: 4884 val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT); 4885 break; 4886 4887 default: 4888 return -EINVAL; 4889 } 4890 val |= RBR_CFIG_B_VLD0; 4891 switch (rp->rbr_sizes[0]) { 4892 case 256: 4893 val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT); 4894 break; 4895 case 512: 4896 val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT); 4897 break; 4898 case 1 * 1024: 4899 val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT); 4900 break; 4901 case 2 * 1024: 4902 val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT); 4903 break; 4904 4905 default: 4906 return -EINVAL; 4907 } 4908 4909 *ret = val; 4910 return 0; 4911} 4912 4913static int niu_enable_rx_channel(struct niu *np, int channel, int on) 4914{ 4915 u64 val = nr64(RXDMA_CFIG1(channel)); 4916 int limit; 4917 4918 if (on) 4919 val |= RXDMA_CFIG1_EN; 4920 else 4921 val &= ~RXDMA_CFIG1_EN; 4922 nw64(RXDMA_CFIG1(channel), val); 4923 4924 limit = 1000; 4925 while (--limit > 0) { 4926 if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST) 4927 break; 4928 udelay(10); 4929 } 4930 if (limit <= 0) 4931 return -ENODEV; 4932 return 0; 4933} 4934 4935static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp) 4936{ 4937 int err, channel = rp->rx_channel; 4938 u64 val; 4939 4940 err = niu_rx_channel_reset(np, channel); 4941 if (err) 4942 return err; 4943 4944 err = niu_rx_channel_lpage_init(np, channel); 4945 if (err) 4946 return err; 4947 4948 niu_rx_channel_wred_init(np, rp); 4949 4950 nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY); 4951 nw64(RX_DMA_CTL_STAT(channel), 4952 (RX_DMA_CTL_STAT_MEX | 4953 RX_DMA_CTL_STAT_RCRTHRES | 4954 RX_DMA_CTL_STAT_RCRTO | 4955 RX_DMA_CTL_STAT_RBR_EMPTY)); 4956 nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32); 4957 nw64(RXDMA_CFIG2(channel), (rp->mbox_dma & 0x00000000ffffffc0)); 4958 nw64(RBR_CFIG_A(channel), 4959 ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) | 4960 (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR))); 4961 err = niu_compute_rbr_cfig_b(rp, &val); 4962 if (err) 4963 return err; 4964 nw64(RBR_CFIG_B(channel), val); 4965 nw64(RCRCFIG_A(channel), 4966 ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) | 4967 (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR))); 4968 nw64(RCRCFIG_B(channel), 4969 ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) | 4970 RCRCFIG_B_ENTOUT | 4971 ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT)); 4972 4973 err = niu_enable_rx_channel(np, channel, 1); 4974 if (err) 4975 return err; 4976 4977 nw64(RBR_KICK(channel), rp->rbr_index); 4978 4979 val = nr64(RX_DMA_CTL_STAT(channel)); 4980 val |= RX_DMA_CTL_STAT_RBR_EMPTY; 4981 nw64(RX_DMA_CTL_STAT(channel), val); 4982 4983 return 0; 4984} 4985 4986static int niu_init_rx_channels(struct niu *np) 4987{ 4988 unsigned long flags; 4989 u64 seed = jiffies_64; 4990 int err, i; 4991 4992 niu_lock_parent(np, flags); 4993 nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider); 4994 nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL)); 4995 niu_unlock_parent(np, flags); 4996 4997 /* XXX RXDMA 32bit mode? XXX */ 4998 4999 niu_init_rdc_groups(np); 5000 niu_init_drr_weight(np); 5001 5002 err = niu_init_hostinfo(np); 5003 if (err) 5004 return err; 5005 5006 for (i = 0; i < np->num_rx_rings; i++) { 5007 struct rx_ring_info *rp = &np->rx_rings[i]; 5008 5009 err = niu_init_one_rx_channel(np, rp); 5010 if (err) 5011 return err; 5012 } 5013 5014 return 0; 5015} 5016 5017static int niu_set_ip_frag_rule(struct niu *np) 5018{ 5019 struct niu_parent *parent = np->parent; 5020 struct niu_classifier *cp = &np->clas; 5021 struct niu_tcam_entry *tp; 5022 int index, err; 5023 5024 index = cp->tcam_top; 5025 tp = &parent->tcam[index]; 5026 5027 /* Note that the noport bit is the same in both ipv4 and 5028 * ipv6 format TCAM entries. 5029 */ 5030 memset(tp, 0, sizeof(*tp)); 5031 tp->key[1] = TCAM_V4KEY1_NOPORT; 5032 tp->key_mask[1] = TCAM_V4KEY1_NOPORT; 5033 tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | 5034 ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT)); 5035 err = tcam_write(np, index, tp->key, tp->key_mask); 5036 if (err) 5037 return err; 5038 err = tcam_assoc_write(np, index, tp->assoc_data); 5039 if (err) 5040 return err; 5041 tp->valid = 1; 5042 cp->tcam_valid_entries++; 5043 5044 return 0; 5045} 5046 5047static int niu_init_classifier_hw(struct niu *np) 5048{ 5049 struct niu_parent *parent = np->parent; 5050 struct niu_classifier *cp = &np->clas; 5051 int i, err; 5052 5053 nw64(H1POLY, cp->h1_init); 5054 nw64(H2POLY, cp->h2_init); 5055 5056 err = niu_init_hostinfo(np); 5057 if (err) 5058 return err; 5059 5060 for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) { 5061 struct niu_vlan_rdc *vp = &cp->vlan_mappings[i]; 5062 5063 vlan_tbl_write(np, i, np->port, 5064 vp->vlan_pref, vp->rdc_num); 5065 } 5066 5067 for (i = 0; i < cp->num_alt_mac_mappings; i++) { 5068 struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i]; 5069 5070 err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num, 5071 ap->rdc_num, ap->mac_pref); 5072 if (err) 5073 return err; 5074 } 5075 5076 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { 5077 int index = i - CLASS_CODE_USER_PROG1; 5078 5079 err = niu_set_tcam_key(np, i, parent->tcam_key[index]); 5080 if (err) 5081 return err; 5082 err = niu_set_flow_key(np, i, parent->flow_key[index]); 5083 if (err) 5084 return err; 5085 } 5086 5087 err = niu_set_ip_frag_rule(np); 5088 if (err) 5089 return err; 5090 5091 tcam_enable(np, 1); 5092 5093 return 0; 5094} 5095 5096static int niu_zcp_write(struct niu *np, int index, u64 *data) 5097{ 5098 nw64(ZCP_RAM_DATA0, data[0]); 5099 nw64(ZCP_RAM_DATA1, data[1]); 5100 nw64(ZCP_RAM_DATA2, data[2]); 5101 nw64(ZCP_RAM_DATA3, data[3]); 5102 nw64(ZCP_RAM_DATA4, data[4]); 5103 nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL); 5104 nw64(ZCP_RAM_ACC, 5105 (ZCP_RAM_ACC_WRITE | 5106 (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | 5107 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); 5108 5109 return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 5110 1000, 100); 5111} 5112 5113static int niu_zcp_read(struct niu *np, int index, u64 *data) 5114{ 5115 int err; 5116 5117 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 5118 1000, 100); 5119 if (err) { 5120 dev_err(np->device, PFX "%s: ZCP read busy won't clear, " 5121 "ZCP_RAM_ACC[%llx]\n", np->dev->name, 5122 (unsigned long long) nr64(ZCP_RAM_ACC)); 5123 return err; 5124 } 5125 5126 nw64(ZCP_RAM_ACC, 5127 (ZCP_RAM_ACC_READ | 5128 (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | 5129 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); 5130 5131 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 5132 1000, 100); 5133 if (err) { 5134 dev_err(np->device, PFX "%s: ZCP read busy2 won't clear, " 5135 "ZCP_RAM_ACC[%llx]\n", np->dev->name, 5136 (unsigned long long) nr64(ZCP_RAM_ACC)); 5137 return err; 5138 } 5139 5140 data[0] = nr64(ZCP_RAM_DATA0); 5141 data[1] = nr64(ZCP_RAM_DATA1); 5142 data[2] = nr64(ZCP_RAM_DATA2); 5143 data[3] = nr64(ZCP_RAM_DATA3); 5144 data[4] = nr64(ZCP_RAM_DATA4); 5145 5146 return 0; 5147} 5148 5149static void niu_zcp_cfifo_reset(struct niu *np) 5150{ 5151 u64 val = nr64(RESET_CFIFO); 5152 5153 val |= RESET_CFIFO_RST(np->port); 5154 nw64(RESET_CFIFO, val); 5155 udelay(10); 5156 5157 val &= ~RESET_CFIFO_RST(np->port); 5158 nw64(RESET_CFIFO, val); 5159} 5160 5161static int niu_init_zcp(struct niu *np) 5162{ 5163 u64 data[5], rbuf[5]; 5164 int i, max, err; 5165 5166 if (np->parent->plat_type != PLAT_TYPE_NIU) { 5167 if (np->port == 0 || np->port == 1) 5168 max = ATLAS_P0_P1_CFIFO_ENTRIES; 5169 else 5170 max = ATLAS_P2_P3_CFIFO_ENTRIES; 5171 } else 5172 max = NIU_CFIFO_ENTRIES; 5173 5174 data[0] = 0; 5175 data[1] = 0; 5176 data[2] = 0; 5177 data[3] = 0; 5178 data[4] = 0; 5179 5180 for (i = 0; i < max; i++) { 5181 err = niu_zcp_write(np, i, data); 5182 if (err) 5183 return err; 5184 err = niu_zcp_read(np, i, rbuf); 5185 if (err) 5186 return err; 5187 } 5188 5189 niu_zcp_cfifo_reset(np); 5190 nw64(CFIFO_ECC(np->port), 0); 5191 nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL); 5192 (void) nr64(ZCP_INT_STAT); 5193 nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL); 5194 5195 return 0; 5196} 5197 5198static void niu_ipp_write(struct niu *np, int index, u64 *data) 5199{ 5200 u64 val = nr64_ipp(IPP_CFIG); 5201 5202 nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W); 5203 nw64_ipp(IPP_DFIFO_WR_PTR, index); 5204 nw64_ipp(IPP_DFIFO_WR0, data[0]); 5205 nw64_ipp(IPP_DFIFO_WR1, data[1]); 5206 nw64_ipp(IPP_DFIFO_WR2, data[2]); 5207 nw64_ipp(IPP_DFIFO_WR3, data[3]); 5208 nw64_ipp(IPP_DFIFO_WR4, data[4]); 5209 nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W); 5210} 5211 5212static void niu_ipp_read(struct niu *np, int index, u64 *data) 5213{ 5214 nw64_ipp(IPP_DFIFO_RD_PTR, index); 5215 data[0] = nr64_ipp(IPP_DFIFO_RD0); 5216 data[1] = nr64_ipp(IPP_DFIFO_RD1); 5217 data[2] = nr64_ipp(IPP_DFIFO_RD2); 5218 data[3] = nr64_ipp(IPP_DFIFO_RD3); 5219 data[4] = nr64_ipp(IPP_DFIFO_RD4); 5220} 5221 5222static int niu_ipp_reset(struct niu *np) 5223{ 5224 return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST, 5225 1000, 100, "IPP_CFIG"); 5226} 5227 5228static int niu_init_ipp(struct niu *np) 5229{ 5230 u64 data[5], rbuf[5], val; 5231 int i, max, err; 5232 5233 if (np->parent->plat_type != PLAT_TYPE_NIU) { 5234 if (np->port == 0 || np->port == 1) 5235 max = ATLAS_P0_P1_DFIFO_ENTRIES; 5236 else 5237 max = ATLAS_P2_P3_DFIFO_ENTRIES; 5238 } else 5239 max = NIU_DFIFO_ENTRIES; 5240 5241 data[0] = 0; 5242 data[1] = 0; 5243 data[2] = 0; 5244 data[3] = 0; 5245 data[4] = 0; 5246 5247 for (i = 0; i < max; i++) { 5248 niu_ipp_write(np, i, data); 5249 niu_ipp_read(np, i, rbuf); 5250 } 5251 5252 (void) nr64_ipp(IPP_INT_STAT); 5253 (void) nr64_ipp(IPP_INT_STAT); 5254 5255 err = niu_ipp_reset(np); 5256 if (err) 5257 return err; 5258 5259 (void) nr64_ipp(IPP_PKT_DIS); 5260 (void) nr64_ipp(IPP_BAD_CS_CNT); 5261 (void) nr64_ipp(IPP_ECC); 5262 5263 (void) nr64_ipp(IPP_INT_STAT); 5264 5265 nw64_ipp(IPP_MSK, ~IPP_MSK_ALL); 5266 5267 val = nr64_ipp(IPP_CFIG); 5268 val &= ~IPP_CFIG_IP_MAX_PKT; 5269 val |= (IPP_CFIG_IPP_ENABLE | 5270 IPP_CFIG_DFIFO_ECC_EN | 5271 IPP_CFIG_DROP_BAD_CRC | 5272 IPP_CFIG_CKSUM_EN | 5273 (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT)); 5274 nw64_ipp(IPP_CFIG, val); 5275 5276 return 0; 5277} 5278 5279static void niu_handle_led(struct niu *np, int status) 5280{ 5281 u64 val; 5282 val = nr64_mac(XMAC_CONFIG); 5283 5284 if ((np->flags & NIU_FLAGS_10G) != 0 && 5285 (np->flags & NIU_FLAGS_FIBER) != 0) { 5286 if (status) { 5287 val |= XMAC_CONFIG_LED_POLARITY; 5288 val &= ~XMAC_CONFIG_FORCE_LED_ON; 5289 } else { 5290 val |= XMAC_CONFIG_FORCE_LED_ON; 5291 val &= ~XMAC_CONFIG_LED_POLARITY; 5292 } 5293 } 5294 5295 nw64_mac(XMAC_CONFIG, val); 5296} 5297 5298static void niu_init_xif_xmac(struct niu *np) 5299{ 5300 struct niu_link_config *lp = &np->link_config; 5301 u64 val; 5302 5303 if (np->flags & NIU_FLAGS_XCVR_SERDES) { 5304 val = nr64(MIF_CONFIG); 5305 val |= MIF_CONFIG_ATCA_GE; 5306 nw64(MIF_CONFIG, val); 5307 } 5308 5309 val = nr64_mac(XMAC_CONFIG); 5310 val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; 5311 5312 val |= XMAC_CONFIG_TX_OUTPUT_EN; 5313 5314 if (lp->loopback_mode == LOOPBACK_MAC) { 5315 val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; 5316 val |= XMAC_CONFIG_LOOPBACK; 5317 } else { 5318 val &= ~XMAC_CONFIG_LOOPBACK; 5319 } 5320 5321 if (np->flags & NIU_FLAGS_10G) { 5322 val &= ~XMAC_CONFIG_LFS_DISABLE; 5323 } else { 5324 val |= XMAC_CONFIG_LFS_DISABLE; 5325 if (!(np->flags & NIU_FLAGS_FIBER) && 5326 !(np->flags & NIU_FLAGS_XCVR_SERDES)) 5327 val |= XMAC_CONFIG_1G_PCS_BYPASS; 5328 else 5329 val &= ~XMAC_CONFIG_1G_PCS_BYPASS; 5330 } 5331 5332 val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; 5333 5334 if (lp->active_speed == SPEED_100) 5335 val |= XMAC_CONFIG_SEL_CLK_25MHZ; 5336 else 5337 val &= ~XMAC_CONFIG_SEL_CLK_25MHZ; 5338 5339 nw64_mac(XMAC_CONFIG, val); 5340 5341 val = nr64_mac(XMAC_CONFIG); 5342 val &= ~XMAC_CONFIG_MODE_MASK; 5343 if (np->flags & NIU_FLAGS_10G) { 5344 val |= XMAC_CONFIG_MODE_XGMII; 5345 } else { 5346 if (lp->active_speed == SPEED_1000) 5347 val |= XMAC_CONFIG_MODE_GMII; 5348 else 5349 val |= XMAC_CONFIG_MODE_MII; 5350 } 5351 5352 nw64_mac(XMAC_CONFIG, val); 5353} 5354 5355static void niu_init_xif_bmac(struct niu *np) 5356{ 5357 struct niu_link_config *lp = &np->link_config; 5358 u64 val; 5359 5360 val = BMAC_XIF_CONFIG_TX_OUTPUT_EN; 5361 5362 if (lp->loopback_mode == LOOPBACK_MAC) 5363 val |= BMAC_XIF_CONFIG_MII_LOOPBACK; 5364 else 5365 val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK; 5366 5367 if (lp->active_speed == SPEED_1000) 5368 val |= BMAC_XIF_CONFIG_GMII_MODE; 5369 else 5370 val &= ~BMAC_XIF_CONFIG_GMII_MODE; 5371 5372 val &= ~(BMAC_XIF_CONFIG_LINK_LED | 5373 BMAC_XIF_CONFIG_LED_POLARITY); 5374 5375 if (!(np->flags & NIU_FLAGS_10G) && 5376 !(np->flags & NIU_FLAGS_FIBER) && 5377 lp->active_speed == SPEED_100) 5378 val |= BMAC_XIF_CONFIG_25MHZ_CLOCK; 5379 else 5380 val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK; 5381 5382 nw64_mac(BMAC_XIF_CONFIG, val); 5383} 5384 5385static void niu_init_xif(struct niu *np) 5386{ 5387 if (np->flags & NIU_FLAGS_XMAC) 5388 niu_init_xif_xmac(np); 5389 else 5390 niu_init_xif_bmac(np); 5391} 5392 5393static void niu_pcs_mii_reset(struct niu *np) 5394{ 5395 int limit = 1000; 5396 u64 val = nr64_pcs(PCS_MII_CTL); 5397 val |= PCS_MII_CTL_RST; 5398 nw64_pcs(PCS_MII_CTL, val); 5399 while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) { 5400 udelay(100); 5401 val = nr64_pcs(PCS_MII_CTL); 5402 } 5403} 5404 5405static void niu_xpcs_reset(struct niu *np) 5406{ 5407 int limit = 1000; 5408 u64 val = nr64_xpcs(XPCS_CONTROL1); 5409 val |= XPCS_CONTROL1_RESET; 5410 nw64_xpcs(XPCS_CONTROL1, val); 5411 while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) { 5412 udelay(100); 5413 val = nr64_xpcs(XPCS_CONTROL1); 5414 } 5415} 5416 5417static int niu_init_pcs(struct niu *np) 5418{ 5419 struct niu_link_config *lp = &np->link_config; 5420 u64 val; 5421 5422 switch (np->flags & (NIU_FLAGS_10G | 5423 NIU_FLAGS_FIBER | 5424 NIU_FLAGS_XCVR_SERDES)) { 5425 case NIU_FLAGS_FIBER: 5426 /* 1G fiber */ 5427 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); 5428 nw64_pcs(PCS_DPATH_MODE, 0); 5429 niu_pcs_mii_reset(np); 5430 break; 5431 5432 case NIU_FLAGS_10G: 5433 case NIU_FLAGS_10G | NIU_FLAGS_FIBER: 5434 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: 5435 /* 10G SERDES */ 5436 if (!(np->flags & NIU_FLAGS_XMAC)) 5437 return -EINVAL; 5438 5439 /* 10G copper or fiber */ 5440 val = nr64_mac(XMAC_CONFIG); 5441 val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; 5442 nw64_mac(XMAC_CONFIG, val); 5443 5444 niu_xpcs_reset(np); 5445 5446 val = nr64_xpcs(XPCS_CONTROL1); 5447 if (lp->loopback_mode == LOOPBACK_PHY) 5448 val |= XPCS_CONTROL1_LOOPBACK; 5449 else 5450 val &= ~XPCS_CONTROL1_LOOPBACK; 5451 nw64_xpcs(XPCS_CONTROL1, val); 5452 5453 nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0); 5454 (void) nr64_xpcs(XPCS_SYMERR_CNT01); 5455 (void) nr64_xpcs(XPCS_SYMERR_CNT23); 5456 break; 5457 5458 5459 case NIU_FLAGS_XCVR_SERDES: 5460 /* 1G SERDES */ 5461 niu_pcs_mii_reset(np); 5462 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); 5463 nw64_pcs(PCS_DPATH_MODE, 0); 5464 break; 5465 5466 case 0: 5467 /* 1G copper */ 5468 case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER: 5469 /* 1G RGMII FIBER */ 5470 nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII); 5471 niu_pcs_mii_reset(np); 5472 break; 5473 5474 default: 5475 return -EINVAL; 5476 } 5477 5478 return 0; 5479} 5480 5481static int niu_reset_tx_xmac(struct niu *np) 5482{ 5483 return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST, 5484 (XTXMAC_SW_RST_REG_RS | 5485 XTXMAC_SW_RST_SOFT_RST), 5486 1000, 100, "XTXMAC_SW_RST"); 5487} 5488 5489static int niu_reset_tx_bmac(struct niu *np) 5490{ 5491 int limit; 5492 5493 nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET); 5494 limit = 1000; 5495 while (--limit >= 0) { 5496 if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET)) 5497 break; 5498 udelay(100); 5499 } 5500 if (limit < 0) { 5501 dev_err(np->device, PFX "Port %u TX BMAC would not reset, " 5502 "BTXMAC_SW_RST[%llx]\n", 5503 np->port, 5504 (unsigned long long) nr64_mac(BTXMAC_SW_RST)); 5505 return -ENODEV; 5506 } 5507 5508 return 0; 5509} 5510 5511static int niu_reset_tx_mac(struct niu *np) 5512{ 5513 if (np->flags & NIU_FLAGS_XMAC) 5514 return niu_reset_tx_xmac(np); 5515 else 5516 return niu_reset_tx_bmac(np); 5517} 5518 5519static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max) 5520{ 5521 u64 val; 5522 5523 val = nr64_mac(XMAC_MIN); 5524 val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE | 5525 XMAC_MIN_RX_MIN_PKT_SIZE); 5526 val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT); 5527 val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT); 5528 nw64_mac(XMAC_MIN, val); 5529 5530 nw64_mac(XMAC_MAX, max); 5531 5532 nw64_mac(XTXMAC_STAT_MSK, ~(u64)0); 5533 5534 val = nr64_mac(XMAC_IPG); 5535 if (np->flags & NIU_FLAGS_10G) { 5536 val &= ~XMAC_IPG_IPG_XGMII; 5537 val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT); 5538 } else { 5539 val &= ~XMAC_IPG_IPG_MII_GMII; 5540 val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT); 5541 } 5542 nw64_mac(XMAC_IPG, val); 5543 5544 val = nr64_mac(XMAC_CONFIG); 5545 val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC | 5546 XMAC_CONFIG_STRETCH_MODE | 5547 XMAC_CONFIG_VAR_MIN_IPG_EN | 5548 XMAC_CONFIG_TX_ENABLE); 5549 nw64_mac(XMAC_CONFIG, val); 5550 5551 nw64_mac(TXMAC_FRM_CNT, 0); 5552 nw64_mac(TXMAC_BYTE_CNT, 0); 5553} 5554 5555static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max) 5556{ 5557 u64 val; 5558 5559 nw64_mac(BMAC_MIN_FRAME, min); 5560 nw64_mac(BMAC_MAX_FRAME, max); 5561 5562 nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0); 5563 nw64_mac(BMAC_CTRL_TYPE, 0x8808); 5564 nw64_mac(BMAC_PREAMBLE_SIZE, 7); 5565 5566 val = nr64_mac(BTXMAC_CONFIG); 5567 val &= ~(BTXMAC_CONFIG_FCS_DISABLE | 5568 BTXMAC_CONFIG_ENABLE); 5569 nw64_mac(BTXMAC_CONFIG, val); 5570} 5571 5572static void niu_init_tx_mac(struct niu *np) 5573{ 5574 u64 min, max; 5575 5576 min = 64; 5577 if (np->dev->mtu > ETH_DATA_LEN) 5578 max = 9216; 5579 else 5580 max = 1522; 5581 5582 /* The XMAC_MIN register only accepts values for TX min which 5583 * have the low 3 bits cleared. 5584 */ 5585 BUILD_BUG_ON(min & 0x7); 5586 5587 if (np->flags & NIU_FLAGS_XMAC) 5588 niu_init_tx_xmac(np, min, max); 5589 else 5590 niu_init_tx_bmac(np, min, max); 5591} 5592 5593static int niu_reset_rx_xmac(struct niu *np) 5594{ 5595 int limit; 5596 5597 nw64_mac(XRXMAC_SW_RST, 5598 XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST); 5599 limit = 1000; 5600 while (--limit >= 0) { 5601 if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS | 5602 XRXMAC_SW_RST_SOFT_RST))) 5603 break; 5604 udelay(100); 5605 } 5606 if (limit < 0) { 5607 dev_err(np->device, PFX "Port %u RX XMAC would not reset, " 5608 "XRXMAC_SW_RST[%llx]\n", 5609 np->port, 5610 (unsigned long long) nr64_mac(XRXMAC_SW_RST)); 5611 return -ENODEV; 5612 } 5613 5614 return 0; 5615} 5616 5617static int niu_reset_rx_bmac(struct niu *np) 5618{ 5619 int limit; 5620 5621 nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET); 5622 limit = 1000; 5623 while (--limit >= 0) { 5624 if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET)) 5625 break; 5626 udelay(100); 5627 } 5628 if (limit < 0) { 5629 dev_err(np->device, PFX "Port %u RX BMAC would not reset, " 5630 "BRXMAC_SW_RST[%llx]\n", 5631 np->port, 5632 (unsigned long long) nr64_mac(BRXMAC_SW_RST)); 5633 return -ENODEV; 5634 } 5635 5636 return 0; 5637} 5638 5639static int niu_reset_rx_mac(struct niu *np) 5640{ 5641 if (np->flags & NIU_FLAGS_XMAC) 5642 return niu_reset_rx_xmac(np); 5643 else 5644 return niu_reset_rx_bmac(np); 5645} 5646 5647static void niu_init_rx_xmac(struct niu *np) 5648{ 5649 struct niu_parent *parent = np->parent; 5650 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; 5651 int first_rdc_table = tp->first_table_num; 5652 unsigned long i; 5653 u64 val; 5654 5655 nw64_mac(XMAC_ADD_FILT0, 0); 5656 nw64_mac(XMAC_ADD_FILT1, 0); 5657 nw64_mac(XMAC_ADD_FILT2, 0); 5658 nw64_mac(XMAC_ADD_FILT12_MASK, 0); 5659 nw64_mac(XMAC_ADD_FILT00_MASK, 0); 5660 for (i = 0; i < MAC_NUM_HASH; i++) 5661 nw64_mac(XMAC_HASH_TBL(i), 0); 5662 nw64_mac(XRXMAC_STAT_MSK, ~(u64)0); 5663 niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); 5664 niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); 5665 5666 val = nr64_mac(XMAC_CONFIG); 5667 val &= ~(XMAC_CONFIG_RX_MAC_ENABLE | 5668 XMAC_CONFIG_PROMISCUOUS | 5669 XMAC_CONFIG_PROMISC_GROUP | 5670 XMAC_CONFIG_ERR_CHK_DIS | 5671 XMAC_CONFIG_RX_CRC_CHK_DIS | 5672 XMAC_CONFIG_RESERVED_MULTICAST | 5673 XMAC_CONFIG_RX_CODEV_CHK_DIS | 5674 XMAC_CONFIG_ADDR_FILTER_EN | 5675 XMAC_CONFIG_RCV_PAUSE_ENABLE | 5676 XMAC_CONFIG_STRIP_CRC | 5677 XMAC_CONFIG_PASS_FLOW_CTRL | 5678 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN); 5679 val |= (XMAC_CONFIG_HASH_FILTER_EN); 5680 nw64_mac(XMAC_CONFIG, val); 5681 5682 nw64_mac(RXMAC_BT_CNT, 0); 5683 nw64_mac(RXMAC_BC_FRM_CNT, 0); 5684 nw64_mac(RXMAC_MC_FRM_CNT, 0); 5685 nw64_mac(RXMAC_FRAG_CNT, 0); 5686 nw64_mac(RXMAC_HIST_CNT1, 0); 5687 nw64_mac(RXMAC_HIST_CNT2, 0); 5688 nw64_mac(RXMAC_HIST_CNT3, 0); 5689 nw64_mac(RXMAC_HIST_CNT4, 0); 5690 nw64_mac(RXMAC_HIST_CNT5, 0); 5691 nw64_mac(RXMAC_HIST_CNT6, 0); 5692 nw64_mac(RXMAC_HIST_CNT7, 0); 5693 nw64_mac(RXMAC_MPSZER_CNT, 0); 5694 nw64_mac(RXMAC_CRC_ER_CNT, 0); 5695 nw64_mac(RXMAC_CD_VIO_CNT, 0); 5696 nw64_mac(LINK_FAULT_CNT, 0); 5697} 5698 5699static void niu_init_rx_bmac(struct niu *np) 5700{ 5701 struct niu_parent *parent = np->parent; 5702 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; 5703 int first_rdc_table = tp->first_table_num; 5704 unsigned long i; 5705 u64 val; 5706 5707 nw64_mac(BMAC_ADD_FILT0, 0); 5708 nw64_mac(BMAC_ADD_FILT1, 0); 5709 nw64_mac(BMAC_ADD_FILT2, 0); 5710 nw64_mac(BMAC_ADD_FILT12_MASK, 0); 5711 nw64_mac(BMAC_ADD_FILT00_MASK, 0); 5712 for (i = 0; i < MAC_NUM_HASH; i++) 5713 nw64_mac(BMAC_HASH_TBL(i), 0); 5714 niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); 5715 niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); 5716 nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0); 5717 5718 val = nr64_mac(BRXMAC_CONFIG); 5719 val &= ~(BRXMAC_CONFIG_ENABLE | 5720 BRXMAC_CONFIG_STRIP_PAD | 5721 BRXMAC_CONFIG_STRIP_FCS | 5722 BRXMAC_CONFIG_PROMISC | 5723 BRXMAC_CONFIG_PROMISC_GRP | 5724 BRXMAC_CONFIG_ADDR_FILT_EN | 5725 BRXMAC_CONFIG_DISCARD_DIS); 5726 val |= (BRXMAC_CONFIG_HASH_FILT_EN); 5727 nw64_mac(BRXMAC_CONFIG, val); 5728 5729 val = nr64_mac(BMAC_ADDR_CMPEN); 5730 val |= BMAC_ADDR_CMPEN_EN0; 5731 nw64_mac(BMAC_ADDR_CMPEN, val); 5732} 5733 5734static void niu_init_rx_mac(struct niu *np) 5735{ 5736 niu_set_primary_mac(np, np->dev->dev_addr); 5737 5738 if (np->flags & NIU_FLAGS_XMAC) 5739 niu_init_rx_xmac(np); 5740 else 5741 niu_init_rx_bmac(np); 5742} 5743 5744static void niu_enable_tx_xmac(struct niu *np, int on) 5745{ 5746 u64 val = nr64_mac(XMAC_CONFIG); 5747 5748 if (on) 5749 val |= XMAC_CONFIG_TX_ENABLE; 5750 else 5751 val &= ~XMAC_CONFIG_TX_ENABLE; 5752 nw64_mac(XMAC_CONFIG, val); 5753} 5754 5755static void niu_enable_tx_bmac(struct niu *np, int on) 5756{ 5757 u64 val = nr64_mac(BTXMAC_CONFIG); 5758 5759 if (on) 5760 val |= BTXMAC_CONFIG_ENABLE; 5761 else 5762 val &= ~BTXMAC_CONFIG_ENABLE; 5763 nw64_mac(BTXMAC_CONFIG, val); 5764} 5765 5766static void niu_enable_tx_mac(struct niu *np, int on) 5767{ 5768 if (np->flags & NIU_FLAGS_XMAC) 5769 niu_enable_tx_xmac(np, on); 5770 else 5771 niu_enable_tx_bmac(np, on); 5772} 5773 5774static void niu_enable_rx_xmac(struct niu *np, int on) 5775{ 5776 u64 val = nr64_mac(XMAC_CONFIG); 5777 5778 val &= ~(XMAC_CONFIG_HASH_FILTER_EN | 5779 XMAC_CONFIG_PROMISCUOUS); 5780 5781 if (np->flags & NIU_FLAGS_MCAST) 5782 val |= XMAC_CONFIG_HASH_FILTER_EN; 5783 if (np->flags & NIU_FLAGS_PROMISC) 5784 val |= XMAC_CONFIG_PROMISCUOUS; 5785 5786 if (on) 5787 val |= XMAC_CONFIG_RX_MAC_ENABLE; 5788 else 5789 val &= ~XMAC_CONFIG_RX_MAC_ENABLE; 5790 nw64_mac(XMAC_CONFIG, val); 5791} 5792 5793static void niu_enable_rx_bmac(struct niu *np, int on) 5794{ 5795 u64 val = nr64_mac(BRXMAC_CONFIG); 5796 5797 val &= ~(BRXMAC_CONFIG_HASH_FILT_EN | 5798 BRXMAC_CONFIG_PROMISC); 5799 5800 if (np->flags & NIU_FLAGS_MCAST) 5801 val |= BRXMAC_CONFIG_HASH_FILT_EN; 5802 if (np->flags & NIU_FLAGS_PROMISC) 5803 val |= BRXMAC_CONFIG_PROMISC; 5804 5805 if (on) 5806 val |= BRXMAC_CONFIG_ENABLE; 5807 else 5808 val &= ~BRXMAC_CONFIG_ENABLE; 5809 nw64_mac(BRXMAC_CONFIG, val); 5810} 5811 5812static void niu_enable_rx_mac(struct niu *np, int on) 5813{ 5814 if (np->flags & NIU_FLAGS_XMAC) 5815 niu_enable_rx_xmac(np, on); 5816 else 5817 niu_enable_rx_bmac(np, on); 5818} 5819 5820static int niu_init_mac(struct niu *np) 5821{ 5822 int err; 5823 5824 niu_init_xif(np); 5825 err = niu_init_pcs(np); 5826 if (err) 5827 return err; 5828 5829 err = niu_reset_tx_mac(np); 5830 if (err) 5831 return err; 5832 niu_init_tx_mac(np); 5833 err = niu_reset_rx_mac(np); 5834 if (err) 5835 return err; 5836 niu_init_rx_mac(np); 5837 5838 /* This looks hookey but the RX MAC reset we just did will 5839 * undo some of the state we setup in niu_init_tx_mac() so we 5840 * have to call it again. In particular, the RX MAC reset will 5841 * set the XMAC_MAX register back to it's default value. 5842 */ 5843 niu_init_tx_mac(np); 5844 niu_enable_tx_mac(np, 1); 5845 5846 niu_enable_rx_mac(np, 1); 5847 5848 return 0; 5849} 5850 5851static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp) 5852{ 5853 (void) niu_tx_channel_stop(np, rp->tx_channel); 5854} 5855 5856static void niu_stop_tx_channels(struct niu *np) 5857{ 5858 int i; 5859 5860 for (i = 0; i < np->num_tx_rings; i++) { 5861 struct tx_ring_info *rp = &np->tx_rings[i]; 5862 5863 niu_stop_one_tx_channel(np, rp); 5864 } 5865} 5866 5867static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp) 5868{ 5869 (void) niu_tx_channel_reset(np, rp->tx_channel); 5870} 5871 5872static void niu_reset_tx_channels(struct niu *np) 5873{ 5874 int i; 5875 5876 for (i = 0; i < np->num_tx_rings; i++) { 5877 struct tx_ring_info *rp = &np->tx_rings[i]; 5878 5879 niu_reset_one_tx_channel(np, rp); 5880 } 5881} 5882 5883static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp) 5884{ 5885 (void) niu_enable_rx_channel(np, rp->rx_channel, 0); 5886} 5887 5888static void niu_stop_rx_channels(struct niu *np) 5889{ 5890 int i; 5891 5892 for (i = 0; i < np->num_rx_rings; i++) { 5893 struct rx_ring_info *rp = &np->rx_rings[i]; 5894 5895 niu_stop_one_rx_channel(np, rp); 5896 } 5897} 5898 5899static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp) 5900{ 5901 int channel = rp->rx_channel; 5902 5903 (void) niu_rx_channel_reset(np, channel); 5904 nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL); 5905 nw64(RX_DMA_CTL_STAT(channel), 0); 5906 (void) niu_enable_rx_channel(np, channel, 0); 5907} 5908 5909static void niu_reset_rx_channels(struct niu *np) 5910{ 5911 int i; 5912 5913 for (i = 0; i < np->num_rx_rings; i++) { 5914 struct rx_ring_info *rp = &np->rx_rings[i]; 5915 5916 niu_reset_one_rx_channel(np, rp); 5917 } 5918} 5919 5920static void niu_disable_ipp(struct niu *np) 5921{ 5922 u64 rd, wr, val; 5923 int limit; 5924 5925 rd = nr64_ipp(IPP_DFIFO_RD_PTR); 5926 wr = nr64_ipp(IPP_DFIFO_WR_PTR); 5927 limit = 100; 5928 while (--limit >= 0 && (rd != wr)) { 5929 rd = nr64_ipp(IPP_DFIFO_RD_PTR); 5930 wr = nr64_ipp(IPP_DFIFO_WR_PTR); 5931 } 5932 if (limit < 0 && 5933 (rd != 0 && wr != 1)) { 5934 dev_err(np->device, PFX "%s: IPP would not quiesce, " 5935 "rd_ptr[%llx] wr_ptr[%llx]\n", 5936 np->dev->name, 5937 (unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR), 5938 (unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR)); 5939 } 5940 5941 val = nr64_ipp(IPP_CFIG); 5942 val &= ~(IPP_CFIG_IPP_ENABLE | 5943 IPP_CFIG_DFIFO_ECC_EN | 5944 IPP_CFIG_DROP_BAD_CRC | 5945 IPP_CFIG_CKSUM_EN); 5946 nw64_ipp(IPP_CFIG, val); 5947 5948 (void) niu_ipp_reset(np); 5949} 5950 5951static int niu_init_hw(struct niu *np) 5952{ 5953 int i, err; 5954 5955 niudbg(IFUP, "%s: Initialize TXC\n", np->dev->name); 5956 niu_txc_enable_port(np, 1); 5957 niu_txc_port_dma_enable(np, 1); 5958 niu_txc_set_imask(np, 0); 5959 5960 niudbg(IFUP, "%s: Initialize TX channels\n", np->dev->name); 5961 for (i = 0; i < np->num_tx_rings; i++) { 5962 struct tx_ring_info *rp = &np->tx_rings[i]; 5963 5964 err = niu_init_one_tx_channel(np, rp); 5965 if (err) 5966 return err; 5967 } 5968 5969 niudbg(IFUP, "%s: Initialize RX channels\n", np->dev->name); 5970 err = niu_init_rx_channels(np); 5971 if (err) 5972 goto out_uninit_tx_channels; 5973 5974 niudbg(IFUP, "%s: Initialize classifier\n", np->dev->name); 5975 err = niu_init_classifier_hw(np); 5976 if (err) 5977 goto out_uninit_rx_channels; 5978 5979 niudbg(IFUP, "%s: Initialize ZCP\n", np->dev->name); 5980 err = niu_init_zcp(np); 5981 if (err) 5982 goto out_uninit_rx_channels; 5983 5984 niudbg(IFUP, "%s: Initialize IPP\n", np->dev->name); 5985 err = niu_init_ipp(np); 5986 if (err) 5987 goto out_uninit_rx_channels; 5988 5989 niudbg(IFUP, "%s: Initialize MAC\n", np->dev->name); 5990 err = niu_init_mac(np); 5991 if (err) 5992 goto out_uninit_ipp; 5993 5994 return 0; 5995 5996out_uninit_ipp: 5997 niudbg(IFUP, "%s: Uninit IPP\n", np->dev->name); 5998 niu_disable_ipp(np); 5999 6000out_uninit_rx_channels: 6001 niudbg(IFUP, "%s: Uninit RX channels\n", np->dev->name); 6002 niu_stop_rx_channels(np); 6003 niu_reset_rx_channels(np); 6004 6005out_uninit_tx_channels: 6006 niudbg(IFUP, "%s: Uninit TX channels\n", np->dev->name); 6007 niu_stop_tx_channels(np); 6008 niu_reset_tx_channels(np); 6009 6010 return err; 6011} 6012 6013static void niu_stop_hw(struct niu *np) 6014{ 6015 niudbg(IFDOWN, "%s: Disable interrupts\n", np->dev->name); 6016 niu_enable_interrupts(np, 0); 6017 6018 niudbg(IFDOWN, "%s: Disable RX MAC\n", np->dev->name); 6019 niu_enable_rx_mac(np, 0); 6020 6021 niudbg(IFDOWN, "%s: Disable IPP\n", np->dev->name); 6022 niu_disable_ipp(np); 6023 6024 niudbg(IFDOWN, "%s: Stop TX channels\n", np->dev->name); 6025 niu_stop_tx_channels(np); 6026 6027 niudbg(IFDOWN, "%s: Stop RX channels\n", np->dev->name); 6028 niu_stop_rx_channels(np); 6029 6030 niudbg(IFDOWN, "%s: Reset TX channels\n", np->dev->name); 6031 niu_reset_tx_channels(np); 6032 6033 niudbg(IFDOWN, "%s: Reset RX channels\n", np->dev->name); 6034 niu_reset_rx_channels(np); 6035} 6036 6037static void niu_set_irq_name(struct niu *np) 6038{ 6039 int port = np->port; 6040 int i, j = 1; 6041 6042 sprintf(np->irq_name[0], "%s:MAC", np->dev->name); 6043 6044 if (port == 0) { 6045 sprintf(np->irq_name[1], "%s:MIF", np->dev->name); 6046 sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name); 6047 j = 3; 6048 } 6049 6050 for (i = 0; i < np->num_ldg - j; i++) { 6051 if (i < np->num_rx_rings) 6052 sprintf(np->irq_name[i+j], "%s-rx-%d", 6053 np->dev->name, i); 6054 else if (i < np->num_tx_rings + np->num_rx_rings) 6055 sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name, 6056 i - np->num_rx_rings); 6057 } 6058} 6059 6060static int niu_request_irq(struct niu *np) 6061{ 6062 int i, j, err; 6063 6064 niu_set_irq_name(np); 6065 6066 err = 0; 6067 for (i = 0; i < np->num_ldg; i++) { 6068 struct niu_ldg *lp = &np->ldg[i]; 6069 6070 err = request_irq(lp->irq, niu_interrupt, 6071 IRQF_SHARED | IRQF_SAMPLE_RANDOM, 6072 np->irq_name[i], lp); 6073 if (err) 6074 goto out_free_irqs; 6075 6076 } 6077 6078 return 0; 6079 6080out_free_irqs: 6081 for (j = 0; j < i; j++) { 6082 struct niu_ldg *lp = &np->ldg[j]; 6083 6084 free_irq(lp->irq, lp); 6085 } 6086 return err; 6087} 6088 6089static void niu_free_irq(struct niu *np) 6090{ 6091 int i; 6092 6093 for (i = 0; i < np->num_ldg; i++) { 6094 struct niu_ldg *lp = &np->ldg[i]; 6095 6096 free_irq(lp->irq, lp); 6097 } 6098} 6099 6100static void niu_enable_napi(struct niu *np) 6101{ 6102 int i; 6103 6104 for (i = 0; i < np->num_ldg; i++) 6105 napi_enable(&np->ldg[i].napi); 6106} 6107 6108static void niu_disable_napi(struct niu *np) 6109{ 6110 int i; 6111 6112 for (i = 0; i < np->num_ldg; i++) 6113 napi_disable(&np->ldg[i].napi); 6114} 6115 6116static int niu_open(struct net_device *dev) 6117{ 6118 struct niu *np = netdev_priv(dev); 6119 int err; 6120 6121 netif_carrier_off(dev); 6122 6123 err = niu_alloc_channels(np); 6124 if (err) 6125 goto out_err; 6126 6127 err = niu_enable_interrupts(np, 0); 6128 if (err) 6129 goto out_free_channels; 6130 6131 err = niu_request_irq(np); 6132 if (err) 6133 goto out_free_channels; 6134 6135 niu_enable_napi(np); 6136 6137 spin_lock_irq(&np->lock); 6138 6139 err = niu_init_hw(np); 6140 if (!err) { 6141 init_timer(&np->timer); 6142 np->timer.expires = jiffies + HZ; 6143 np->timer.data = (unsigned long) np; 6144 np->timer.function = niu_timer; 6145 6146 err = niu_enable_interrupts(np, 1); 6147 if (err) 6148 niu_stop_hw(np); 6149 } 6150 6151 spin_unlock_irq(&np->lock); 6152 6153 if (err) { 6154 niu_disable_napi(np); 6155 goto out_free_irq; 6156 } 6157 6158 netif_tx_start_all_queues(dev); 6159 6160 if (np->link_config.loopback_mode != LOOPBACK_DISABLED) 6161 netif_carrier_on(dev); 6162 6163 add_timer(&np->timer); 6164 6165 return 0; 6166 6167out_free_irq: 6168 niu_free_irq(np); 6169 6170out_free_channels: 6171 niu_free_channels(np); 6172 6173out_err: 6174 return err; 6175} 6176 6177static void niu_full_shutdown(struct niu *np, struct net_device *dev) 6178{ 6179 cancel_work_sync(&np->reset_task); 6180 6181 niu_disable_napi(np); 6182 netif_tx_stop_all_queues(dev); 6183 6184 del_timer_sync(&np->timer); 6185 6186 spin_lock_irq(&np->lock); 6187 6188 niu_stop_hw(np); 6189 6190 spin_unlock_irq(&np->lock); 6191} 6192 6193static int niu_close(struct net_device *dev) 6194{ 6195 struct niu *np = netdev_priv(dev); 6196 6197 niu_full_shutdown(np, dev); 6198 6199 niu_free_irq(np); 6200 6201 niu_free_channels(np); 6202 6203 niu_handle_led(np, 0); 6204 6205 return 0; 6206} 6207 6208static void niu_sync_xmac_stats(struct niu *np) 6209{ 6210 struct niu_xmac_stats *mp = &np->mac_stats.xmac; 6211 6212 mp->tx_frames += nr64_mac(TXMAC_FRM_CNT); 6213 mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT); 6214 6215 mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT); 6216 mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT); 6217 mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT); 6218 mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT); 6219 mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT); 6220 mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1); 6221 mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2); 6222 mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3); 6223 mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4); 6224 mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5); 6225 mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6); 6226 mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7); 6227 mp->rx_octets += nr64_mac(RXMAC_BT_CNT); 6228 mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT); 6229 mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT); 6230 mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT); 6231} 6232 6233static void niu_sync_bmac_stats(struct niu *np) 6234{ 6235 struct niu_bmac_stats *mp = &np->mac_stats.bmac; 6236 6237 mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT); 6238 mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT); 6239 6240 mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT); 6241 mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); 6242 mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); 6243 mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT); 6244} 6245 6246static void niu_sync_mac_stats(struct niu *np) 6247{ 6248 if (np->flags & NIU_FLAGS_XMAC) 6249 niu_sync_xmac_stats(np); 6250 else 6251 niu_sync_bmac_stats(np); 6252} 6253 6254static void niu_get_rx_stats(struct niu *np) 6255{ 6256 unsigned long pkts, dropped, errors, bytes; 6257 int i; 6258 6259 pkts = dropped = errors = bytes = 0; 6260 for (i = 0; i < np->num_rx_rings; i++) { 6261 struct rx_ring_info *rp = &np->rx_rings[i]; 6262 6263 niu_sync_rx_discard_stats(np, rp, 0); 6264 6265 pkts += rp->rx_packets; 6266 bytes += rp->rx_bytes; 6267 dropped += rp->rx_dropped; 6268 errors += rp->rx_errors; 6269 } 6270 np->dev->stats.rx_packets = pkts; 6271 np->dev->stats.rx_bytes = bytes; 6272 np->dev->stats.rx_dropped = dropped; 6273 np->dev->stats.rx_errors = errors; 6274} 6275 6276static void niu_get_tx_stats(struct niu *np) 6277{ 6278 unsigned long pkts, errors, bytes; 6279 int i; 6280 6281 pkts = errors = bytes = 0; 6282 for (i = 0; i < np->num_tx_rings; i++) { 6283 struct tx_ring_info *rp = &np->tx_rings[i]; 6284 6285 pkts += rp->tx_packets; 6286 bytes += rp->tx_bytes; 6287 errors += rp->tx_errors; 6288 } 6289 np->dev->stats.tx_packets = pkts; 6290 np->dev->stats.tx_bytes = bytes; 6291 np->dev->stats.tx_errors = errors; 6292} 6293 6294static struct net_device_stats *niu_get_stats(struct net_device *dev) 6295{ 6296 struct niu *np = netdev_priv(dev); 6297 6298 niu_get_rx_stats(np); 6299 niu_get_tx_stats(np); 6300 6301 return &dev->stats; 6302} 6303 6304static void niu_load_hash_xmac(struct niu *np, u16 *hash) 6305{ 6306 int i; 6307 6308 for (i = 0; i < 16; i++) 6309 nw64_mac(XMAC_HASH_TBL(i), hash[i]); 6310} 6311 6312static void niu_load_hash_bmac(struct niu *np, u16 *hash) 6313{ 6314 int i; 6315 6316 for (i = 0; i < 16; i++) 6317 nw64_mac(BMAC_HASH_TBL(i), hash[i]); 6318} 6319 6320static void niu_load_hash(struct niu *np, u16 *hash) 6321{ 6322 if (np->flags & NIU_FLAGS_XMAC) 6323 niu_load_hash_xmac(np, hash); 6324 else 6325 niu_load_hash_bmac(np, hash); 6326} 6327 6328static void niu_set_rx_mode(struct net_device *dev) 6329{ 6330 struct niu *np = netdev_priv(dev); 6331 int i, alt_cnt, err; 6332 struct dev_addr_list *addr; 6333 unsigned long flags; 6334 u16 hash[16] = { 0, }; 6335 6336 spin_lock_irqsave(&np->lock, flags); 6337 niu_enable_rx_mac(np, 0); 6338 6339 np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC); 6340 if (dev->flags & IFF_PROMISC) 6341 np->flags |= NIU_FLAGS_PROMISC; 6342 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0)) 6343 np->flags |= NIU_FLAGS_MCAST; 6344 6345 alt_cnt = dev->uc_count; 6346 if (alt_cnt > niu_num_alt_addr(np)) { 6347 alt_cnt = 0; 6348 np->flags |= NIU_FLAGS_PROMISC; 6349 } 6350 6351 if (alt_cnt) { 6352 int index = 0; 6353 6354 for (addr = dev->uc_list; addr; addr = addr->next) { 6355 err = niu_set_alt_mac(np, index, 6356 addr->da_addr); 6357 if (err) 6358 printk(KERN_WARNING PFX "%s: Error %d " 6359 "adding alt mac %d\n", 6360 dev->name, err, index); 6361 err = niu_enable_alt_mac(np, index, 1); 6362 if (err) 6363 printk(KERN_WARNING PFX "%s: Error %d " 6364 "enabling alt mac %d\n", 6365 dev->name, err, index); 6366 6367 index++; 6368 } 6369 } else { 6370 int alt_start; 6371 if (np->flags & NIU_FLAGS_XMAC) 6372 alt_start = 0; 6373 else 6374 alt_start = 1; 6375 for (i = alt_start; i < niu_num_alt_addr(np); i++) { 6376 err = niu_enable_alt_mac(np, i, 0); 6377 if (err) 6378 printk(KERN_WARNING PFX "%s: Error %d " 6379 "disabling alt mac %d\n", 6380 dev->name, err, i); 6381 } 6382 } 6383 if (dev->flags & IFF_ALLMULTI) { 6384 for (i = 0; i < 16; i++) 6385 hash[i] = 0xffff; 6386 } else if (dev->mc_count > 0) { 6387 for (addr = dev->mc_list; addr; addr = addr->next) { 6388 u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr); 6389 6390 crc >>= 24; 6391 hash[crc >> 4] |= (1 << (15 - (crc & 0xf))); 6392 } 6393 } 6394 6395 if (np->flags & NIU_FLAGS_MCAST) 6396 niu_load_hash(np, hash); 6397 6398 niu_enable_rx_mac(np, 1); 6399 spin_unlock_irqrestore(&np->lock, flags); 6400} 6401 6402static int niu_set_mac_addr(struct net_device *dev, void *p) 6403{ 6404 struct niu *np = netdev_priv(dev); 6405 struct sockaddr *addr = p; 6406 unsigned long flags; 6407 6408 if (!is_valid_ether_addr(addr->sa_data)) 6409 return -EINVAL; 6410 6411 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 6412 6413 if (!netif_running(dev)) 6414 return 0; 6415 6416 spin_lock_irqsave(&np->lock, flags); 6417 niu_enable_rx_mac(np, 0); 6418 niu_set_primary_mac(np, dev->dev_addr); 6419 niu_enable_rx_mac(np, 1); 6420 spin_unlock_irqrestore(&np->lock, flags); 6421 6422 return 0; 6423} 6424 6425static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 6426{ 6427 return -EOPNOTSUPP; 6428} 6429 6430static void niu_netif_stop(struct niu *np) 6431{ 6432 np->dev->trans_start = jiffies; /* prevent tx timeout */ 6433 6434 niu_disable_napi(np); 6435 6436 netif_tx_disable(np->dev); 6437} 6438 6439static void niu_netif_start(struct niu *np) 6440{ 6441 /* NOTE: unconditional netif_wake_queue is only appropriate 6442 * so long as all callers are assured to have free tx slots 6443 * (such as after niu_init_hw). 6444 */ 6445 netif_tx_wake_all_queues(np->dev); 6446 6447 niu_enable_napi(np); 6448 6449 niu_enable_interrupts(np, 1); 6450} 6451 6452static void niu_reset_buffers(struct niu *np) 6453{ 6454 int i, j, k, err; 6455 6456 if (np->rx_rings) { 6457 for (i = 0; i < np->num_rx_rings; i++) { 6458 struct rx_ring_info *rp = &np->rx_rings[i]; 6459 6460 for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) { 6461 struct page *page; 6462 6463 page = rp->rxhash[j]; 6464 while (page) { 6465 struct page *next = 6466 (struct page *) page->mapping; 6467 u64 base = page->index; 6468 base = base >> RBR_DESCR_ADDR_SHIFT; 6469 rp->rbr[k++] = cpu_to_le32(base); 6470 page = next; 6471 } 6472 } 6473 for (; k < MAX_RBR_RING_SIZE; k++) { 6474 err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k); 6475 if (unlikely(err)) 6476 break; 6477 } 6478 6479 rp->rbr_index = rp->rbr_table_size - 1; 6480 rp->rcr_index = 0; 6481 rp->rbr_pending = 0; 6482 rp->rbr_refill_pending = 0; 6483 } 6484 } 6485 if (np->tx_rings) { 6486 for (i = 0; i < np->num_tx_rings; i++) { 6487 struct tx_ring_info *rp = &np->tx_rings[i]; 6488 6489 for (j = 0; j < MAX_TX_RING_SIZE; j++) { 6490 if (rp->tx_buffs[j].skb) 6491 (void) release_tx_packet(np, rp, j); 6492 } 6493 6494 rp->pending = MAX_TX_RING_SIZE; 6495 rp->prod = 0; 6496 rp->cons = 0; 6497 rp->wrap_bit = 0; 6498 } 6499 } 6500} 6501 6502static void niu_reset_task(struct work_struct *work) 6503{ 6504 struct niu *np = container_of(work, struct niu, reset_task); 6505 unsigned long flags; 6506 int err; 6507 6508 spin_lock_irqsave(&np->lock, flags); 6509 if (!netif_running(np->dev)) { 6510 spin_unlock_irqrestore(&np->lock, flags); 6511 return; 6512 } 6513 6514 spin_unlock_irqrestore(&np->lock, flags); 6515 6516 del_timer_sync(&np->timer); 6517 6518 niu_netif_stop(np); 6519 6520 spin_lock_irqsave(&np->lock, flags); 6521 6522 niu_stop_hw(np); 6523 6524 spin_unlock_irqrestore(&np->lock, flags); 6525 6526 niu_reset_buffers(np); 6527 6528 spin_lock_irqsave(&np->lock, flags); 6529 6530 err = niu_init_hw(np); 6531 if (!err) { 6532 np->timer.expires = jiffies + HZ; 6533 add_timer(&np->timer); 6534 niu_netif_start(np); 6535 } 6536 6537 spin_unlock_irqrestore(&np->lock, flags); 6538} 6539 6540static void niu_tx_timeout(struct net_device *dev) 6541{ 6542 struct niu *np = netdev_priv(dev); 6543 6544 dev_err(np->device, PFX "%s: Transmit timed out, resetting\n", 6545 dev->name); 6546 6547 schedule_work(&np->reset_task); 6548} 6549 6550static void niu_set_txd(struct tx_ring_info *rp, int index, 6551 u64 mapping, u64 len, u64 mark, 6552 u64 n_frags) 6553{ 6554 __le64 *desc = &rp->descr[index]; 6555 6556 *desc = cpu_to_le64(mark | 6557 (n_frags << TX_DESC_NUM_PTR_SHIFT) | 6558 (len << TX_DESC_TR_LEN_SHIFT) | 6559 (mapping & TX_DESC_SAD)); 6560} 6561 6562static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr, 6563 u64 pad_bytes, u64 len) 6564{ 6565 u16 eth_proto, eth_proto_inner; 6566 u64 csum_bits, l3off, ihl, ret; 6567 u8 ip_proto; 6568 int ipv6; 6569 6570 eth_proto = be16_to_cpu(ehdr->h_proto); 6571 eth_proto_inner = eth_proto; 6572 if (eth_proto == ETH_P_8021Q) { 6573 struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr; 6574 __be16 val = vp->h_vlan_encapsulated_proto; 6575 6576 eth_proto_inner = be16_to_cpu(val); 6577 } 6578 6579 ipv6 = ihl = 0; 6580 switch (skb->protocol) { 6581 case cpu_to_be16(ETH_P_IP): 6582 ip_proto = ip_hdr(skb)->protocol; 6583 ihl = ip_hdr(skb)->ihl; 6584 break; 6585 case cpu_to_be16(ETH_P_IPV6): 6586 ip_proto = ipv6_hdr(skb)->nexthdr; 6587 ihl = (40 >> 2); 6588 ipv6 = 1; 6589 break; 6590 default: 6591 ip_proto = ihl = 0; 6592 break; 6593 } 6594 6595 csum_bits = TXHDR_CSUM_NONE; 6596 if (skb->ip_summed == CHECKSUM_PARTIAL) { 6597 u64 start, stuff; 6598 6599 csum_bits = (ip_proto == IPPROTO_TCP ? 6600 TXHDR_CSUM_TCP : 6601 (ip_proto == IPPROTO_UDP ? 6602 TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP)); 6603 6604 start = skb_transport_offset(skb) - 6605 (pad_bytes + sizeof(struct tx_pkt_hdr)); 6606 stuff = start + skb->csum_offset; 6607 6608 csum_bits |= (start / 2) << TXHDR_L4START_SHIFT; 6609 csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT; 6610 } 6611 6612 l3off = skb_network_offset(skb) - 6613 (pad_bytes + sizeof(struct tx_pkt_hdr)); 6614 6615 ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) | 6616 (len << TXHDR_LEN_SHIFT) | 6617 ((l3off / 2) << TXHDR_L3START_SHIFT) | 6618 (ihl << TXHDR_IHL_SHIFT) | 6619 ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) | 6620 ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) | 6621 (ipv6 ? TXHDR_IP_VER : 0) | 6622 csum_bits); 6623 6624 return ret; 6625} 6626 6627static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev) 6628{ 6629 struct niu *np = netdev_priv(dev); 6630 unsigned long align, headroom; 6631 struct netdev_queue *txq; 6632 struct tx_ring_info *rp; 6633 struct tx_pkt_hdr *tp; 6634 unsigned int len, nfg; 6635 struct ethhdr *ehdr; 6636 int prod, i, tlen; 6637 u64 mapping, mrk; 6638 6639 i = skb_get_queue_mapping(skb); 6640 rp = &np->tx_rings[i]; 6641 txq = netdev_get_tx_queue(dev, i); 6642 6643 if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) { 6644 netif_tx_stop_queue(txq); 6645 dev_err(np->device, PFX "%s: BUG! Tx ring full when " 6646 "queue awake!\n", dev->name); 6647 rp->tx_errors++; 6648 return NETDEV_TX_BUSY; 6649 } 6650 6651 if (skb->len < ETH_ZLEN) { 6652 unsigned int pad_bytes = ETH_ZLEN - skb->len; 6653 6654 if (skb_pad(skb, pad_bytes)) 6655 goto out; 6656 skb_put(skb, pad_bytes); 6657 } 6658 6659 len = sizeof(struct tx_pkt_hdr) + 15; 6660 if (skb_headroom(skb) < len) { 6661 struct sk_buff *skb_new; 6662 6663 skb_new = skb_realloc_headroom(skb, len); 6664 if (!skb_new) { 6665 rp->tx_errors++; 6666 goto out_drop; 6667 } 6668 kfree_skb(skb); 6669 skb = skb_new; 6670 } else 6671 skb_orphan(skb); 6672 6673 align = ((unsigned long) skb->data & (16 - 1)); 6674 headroom = align + sizeof(struct tx_pkt_hdr); 6675 6676 ehdr = (struct ethhdr *) skb->data; 6677 tp = (struct tx_pkt_hdr *) skb_push(skb, headroom); 6678 6679 len = skb->len - sizeof(struct tx_pkt_hdr); 6680 tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len)); 6681 tp->resv = 0; 6682 6683 len = skb_headlen(skb); 6684 mapping = np->ops->map_single(np->device, skb->data, 6685 len, DMA_TO_DEVICE); 6686 6687 prod = rp->prod; 6688 6689 rp->tx_buffs[prod].skb = skb; 6690 rp->tx_buffs[prod].mapping = mapping; 6691 6692 mrk = TX_DESC_SOP; 6693 if (++rp->mark_counter == rp->mark_freq) { 6694 rp->mark_counter = 0; 6695 mrk |= TX_DESC_MARK; 6696 rp->mark_pending++; 6697 } 6698 6699 tlen = len; 6700 nfg = skb_shinfo(skb)->nr_frags; 6701 while (tlen > 0) { 6702 tlen -= MAX_TX_DESC_LEN; 6703 nfg++; 6704 } 6705 6706 while (len > 0) { 6707 unsigned int this_len = len; 6708 6709 if (this_len > MAX_TX_DESC_LEN) 6710 this_len = MAX_TX_DESC_LEN; 6711 6712 niu_set_txd(rp, prod, mapping, this_len, mrk, nfg); 6713 mrk = nfg = 0; 6714 6715 prod = NEXT_TX(rp, prod); 6716 mapping += this_len; 6717 len -= this_len; 6718 } 6719 6720 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 6721 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 6722 6723 len = frag->size; 6724 mapping = np->ops->map_page(np->device, frag->page, 6725 frag->page_offset, len, 6726 DMA_TO_DEVICE); 6727 6728 rp->tx_buffs[prod].skb = NULL; 6729 rp->tx_buffs[prod].mapping = mapping; 6730 6731 niu_set_txd(rp, prod, mapping, len, 0, 0); 6732 6733 prod = NEXT_TX(rp, prod); 6734 } 6735 6736 if (prod < rp->prod) 6737 rp->wrap_bit ^= TX_RING_KICK_WRAP; 6738 rp->prod = prod; 6739 6740 nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3)); 6741 6742 if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) { 6743 netif_tx_stop_queue(txq); 6744 if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)) 6745 netif_tx_wake_queue(txq); 6746 } 6747 6748 dev->trans_start = jiffies; 6749 6750out: 6751 return NETDEV_TX_OK; 6752 6753out_drop: 6754 rp->tx_errors++; 6755 kfree_skb(skb); 6756 goto out; 6757} 6758 6759static int niu_change_mtu(struct net_device *dev, int new_mtu) 6760{ 6761 struct niu *np = netdev_priv(dev); 6762 int err, orig_jumbo, new_jumbo; 6763 6764 if (new_mtu < 68 || new_mtu > NIU_MAX_MTU) 6765 return -EINVAL; 6766 6767 orig_jumbo = (dev->mtu > ETH_DATA_LEN); 6768 new_jumbo = (new_mtu > ETH_DATA_LEN); 6769 6770 dev->mtu = new_mtu; 6771 6772 if (!netif_running(dev) || 6773 (orig_jumbo == new_jumbo)) 6774 return 0; 6775 6776 niu_full_shutdown(np, dev); 6777 6778 niu_free_channels(np); 6779 6780 niu_enable_napi(np); 6781 6782 err = niu_alloc_channels(np); 6783 if (err) 6784 return err; 6785 6786 spin_lock_irq(&np->lock); 6787 6788 err = niu_init_hw(np); 6789 if (!err) { 6790 init_timer(&np->timer); 6791 np->timer.expires = jiffies + HZ; 6792 np->timer.data = (unsigned long) np; 6793 np->timer.function = niu_timer; 6794 6795 err = niu_enable_interrupts(np, 1); 6796 if (err) 6797 niu_stop_hw(np); 6798 } 6799 6800 spin_unlock_irq(&np->lock); 6801 6802 if (!err) { 6803 netif_tx_start_all_queues(dev); 6804 if (np->link_config.loopback_mode != LOOPBACK_DISABLED) 6805 netif_carrier_on(dev); 6806 6807 add_timer(&np->timer); 6808 } 6809 6810 return err; 6811} 6812 6813static void niu_get_drvinfo(struct net_device *dev, 6814 struct ethtool_drvinfo *info) 6815{ 6816 struct niu *np = netdev_priv(dev); 6817 struct niu_vpd *vpd = &np->vpd; 6818 6819 strcpy(info->driver, DRV_MODULE_NAME); 6820 strcpy(info->version, DRV_MODULE_VERSION); 6821 sprintf(info->fw_version, "%d.%d", 6822 vpd->fcode_major, vpd->fcode_minor); 6823 if (np->parent->plat_type != PLAT_TYPE_NIU) 6824 strcpy(info->bus_info, pci_name(np->pdev)); 6825} 6826 6827static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 6828{ 6829 struct niu *np = netdev_priv(dev); 6830 struct niu_link_config *lp; 6831 6832 lp = &np->link_config; 6833 6834 memset(cmd, 0, sizeof(*cmd)); 6835 cmd->phy_address = np->phy_addr; 6836 cmd->supported = lp->supported; 6837 cmd->advertising = lp->active_advertising; 6838 cmd->autoneg = lp->active_autoneg; 6839 cmd->speed = lp->active_speed; 6840 cmd->duplex = lp->active_duplex; 6841 cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP; 6842 cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ? 6843 XCVR_EXTERNAL : XCVR_INTERNAL; 6844 6845 return 0; 6846} 6847 6848static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 6849{ 6850 struct niu *np = netdev_priv(dev); 6851 struct niu_link_config *lp = &np->link_config; 6852 6853 lp->advertising = cmd->advertising; 6854 lp->speed = cmd->speed; 6855 lp->duplex = cmd->duplex; 6856 lp->autoneg = cmd->autoneg; 6857 return niu_init_link(np); 6858} 6859 6860static u32 niu_get_msglevel(struct net_device *dev) 6861{ 6862 struct niu *np = netdev_priv(dev); 6863 return np->msg_enable; 6864} 6865 6866static void niu_set_msglevel(struct net_device *dev, u32 value) 6867{ 6868 struct niu *np = netdev_priv(dev); 6869 np->msg_enable = value; 6870} 6871 6872static int niu_nway_reset(struct net_device *dev) 6873{ 6874 struct niu *np = netdev_priv(dev); 6875 6876 if (np->link_config.autoneg) 6877 return niu_init_link(np); 6878 6879 return 0; 6880} 6881 6882static int niu_get_eeprom_len(struct net_device *dev) 6883{ 6884 struct niu *np = netdev_priv(dev); 6885 6886 return np->eeprom_len; 6887} 6888 6889static int niu_get_eeprom(struct net_device *dev, 6890 struct ethtool_eeprom *eeprom, u8 *data) 6891{ 6892 struct niu *np = netdev_priv(dev); 6893 u32 offset, len, val; 6894 6895 offset = eeprom->offset; 6896 len = eeprom->len; 6897 6898 if (offset + len < offset) 6899 return -EINVAL; 6900 if (offset >= np->eeprom_len) 6901 return -EINVAL; 6902 if (offset + len > np->eeprom_len) 6903 len = eeprom->len = np->eeprom_len - offset; 6904 6905 if (offset & 3) { 6906 u32 b_offset, b_count; 6907 6908 b_offset = offset & 3; 6909 b_count = 4 - b_offset; 6910 if (b_count > len) 6911 b_count = len; 6912 6913 val = nr64(ESPC_NCR((offset - b_offset) / 4)); 6914 memcpy(data, ((char *)&val) + b_offset, b_count); 6915 data += b_count; 6916 len -= b_count; 6917 offset += b_count; 6918 } 6919 while (len >= 4) { 6920 val = nr64(ESPC_NCR(offset / 4)); 6921 memcpy(data, &val, 4); 6922 data += 4; 6923 len -= 4; 6924 offset += 4; 6925 } 6926 if (len) { 6927 val = nr64(ESPC_NCR(offset / 4)); 6928 memcpy(data, &val, len); 6929 } 6930 return 0; 6931} 6932 6933static void niu_ethflow_to_l3proto(int flow_type, u8 *pid) 6934{ 6935 switch (flow_type) { 6936 case TCP_V4_FLOW: 6937 case TCP_V6_FLOW: 6938 *pid = IPPROTO_TCP; 6939 break; 6940 case UDP_V4_FLOW: 6941 case UDP_V6_FLOW: 6942 *pid = IPPROTO_UDP; 6943 break; 6944 case SCTP_V4_FLOW: 6945 case SCTP_V6_FLOW: 6946 *pid = IPPROTO_SCTP; 6947 break; 6948 case AH_V4_FLOW: 6949 case AH_V6_FLOW: 6950 *pid = IPPROTO_AH; 6951 break; 6952 case ESP_V4_FLOW: 6953 case ESP_V6_FLOW: 6954 *pid = IPPROTO_ESP; 6955 break; 6956 default: 6957 *pid = 0; 6958 break; 6959 } 6960} 6961 6962static int niu_class_to_ethflow(u64 class, int *flow_type) 6963{ 6964 switch (class) { 6965 case CLASS_CODE_TCP_IPV4: 6966 *flow_type = TCP_V4_FLOW; 6967 break; 6968 case CLASS_CODE_UDP_IPV4: 6969 *flow_type = UDP_V4_FLOW; 6970 break; 6971 case CLASS_CODE_AH_ESP_IPV4: 6972 *flow_type = AH_V4_FLOW; 6973 break; 6974 case CLASS_CODE_SCTP_IPV4: 6975 *flow_type = SCTP_V4_FLOW; 6976 break; 6977 case CLASS_CODE_TCP_IPV6: 6978 *flow_type = TCP_V6_FLOW; 6979 break; 6980 case CLASS_CODE_UDP_IPV6: 6981 *flow_type = UDP_V6_FLOW; 6982 break; 6983 case CLASS_CODE_AH_ESP_IPV6: 6984 *flow_type = AH_V6_FLOW; 6985 break; 6986 case CLASS_CODE_SCTP_IPV6: 6987 *flow_type = SCTP_V6_FLOW; 6988 break; 6989 case CLASS_CODE_USER_PROG1: 6990 case CLASS_CODE_USER_PROG2: 6991 case CLASS_CODE_USER_PROG3: 6992 case CLASS_CODE_USER_PROG4: 6993 *flow_type = IP_USER_FLOW; 6994 break; 6995 default: 6996 return 0; 6997 } 6998 6999 return 1; 7000} 7001 7002static int niu_ethflow_to_class(int flow_type, u64 *class) 7003{ 7004 switch (flow_type) { 7005 case TCP_V4_FLOW: 7006 *class = CLASS_CODE_TCP_IPV4; 7007 break; 7008 case UDP_V4_FLOW: 7009 *class = CLASS_CODE_UDP_IPV4; 7010 break; 7011 case AH_V4_FLOW: 7012 case ESP_V4_FLOW: 7013 *class = CLASS_CODE_AH_ESP_IPV4; 7014 break; 7015 case SCTP_V4_FLOW: 7016 *class = CLASS_CODE_SCTP_IPV4; 7017 break; 7018 case TCP_V6_FLOW: 7019 *class = CLASS_CODE_TCP_IPV6; 7020 break; 7021 case UDP_V6_FLOW: 7022 *class = CLASS_CODE_UDP_IPV6; 7023 break; 7024 case AH_V6_FLOW: 7025 case ESP_V6_FLOW: 7026 *class = CLASS_CODE_AH_ESP_IPV6; 7027 break; 7028 case SCTP_V6_FLOW: 7029 *class = CLASS_CODE_SCTP_IPV6; 7030 break; 7031 default: 7032 return 0; 7033 } 7034 7035 return 1; 7036} 7037 7038static u64 niu_flowkey_to_ethflow(u64 flow_key) 7039{ 7040 u64 ethflow = 0; 7041 7042 if (flow_key & FLOW_KEY_L2DA) 7043 ethflow |= RXH_L2DA; 7044 if (flow_key & FLOW_KEY_VLAN) 7045 ethflow |= RXH_VLAN; 7046 if (flow_key & FLOW_KEY_IPSA) 7047 ethflow |= RXH_IP_SRC; 7048 if (flow_key & FLOW_KEY_IPDA) 7049 ethflow |= RXH_IP_DST; 7050 if (flow_key & FLOW_KEY_PROTO) 7051 ethflow |= RXH_L3_PROTO; 7052 if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT)) 7053 ethflow |= RXH_L4_B_0_1; 7054 if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT)) 7055 ethflow |= RXH_L4_B_2_3; 7056 7057 return ethflow; 7058 7059} 7060 7061static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key) 7062{ 7063 u64 key = 0; 7064 7065 if (ethflow & RXH_L2DA) 7066 key |= FLOW_KEY_L2DA; 7067 if (ethflow & RXH_VLAN) 7068 key |= FLOW_KEY_VLAN; 7069 if (ethflow & RXH_IP_SRC) 7070 key |= FLOW_KEY_IPSA; 7071 if (ethflow & RXH_IP_DST) 7072 key |= FLOW_KEY_IPDA; 7073 if (ethflow & RXH_L3_PROTO) 7074 key |= FLOW_KEY_PROTO; 7075 if (ethflow & RXH_L4_B_0_1) 7076 key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT); 7077 if (ethflow & RXH_L4_B_2_3) 7078 key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT); 7079 7080 *flow_key = key; 7081 7082 return 1; 7083 7084} 7085 7086static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) 7087{ 7088 u64 class; 7089 7090 nfc->data = 0; 7091 7092 if (!niu_ethflow_to_class(nfc->flow_type, &class)) 7093 return -EINVAL; 7094 7095 if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & 7096 TCAM_KEY_DISC) 7097 nfc->data = RXH_DISCARD; 7098 else 7099 nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class - 7100 CLASS_CODE_USER_PROG1]); 7101 return 0; 7102} 7103 7104static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp, 7105 struct ethtool_rx_flow_spec *fsp) 7106{ 7107 7108 fsp->h_u.tcp_ip4_spec.ip4src = (tp->key[3] & TCAM_V4KEY3_SADDR) >> 7109 TCAM_V4KEY3_SADDR_SHIFT; 7110 fsp->h_u.tcp_ip4_spec.ip4dst = (tp->key[3] & TCAM_V4KEY3_DADDR) >> 7111 TCAM_V4KEY3_DADDR_SHIFT; 7112 fsp->m_u.tcp_ip4_spec.ip4src = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> 7113 TCAM_V4KEY3_SADDR_SHIFT; 7114 fsp->m_u.tcp_ip4_spec.ip4dst = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> 7115 TCAM_V4KEY3_DADDR_SHIFT; 7116 7117 fsp->h_u.tcp_ip4_spec.ip4src = 7118 cpu_to_be32(fsp->h_u.tcp_ip4_spec.ip4src); 7119 fsp->m_u.tcp_ip4_spec.ip4src = 7120 cpu_to_be32(fsp->m_u.tcp_ip4_spec.ip4src); 7121 fsp->h_u.tcp_ip4_spec.ip4dst = 7122 cpu_to_be32(fsp->h_u.tcp_ip4_spec.ip4dst); 7123 fsp->m_u.tcp_ip4_spec.ip4dst = 7124 cpu_to_be32(fsp->m_u.tcp_ip4_spec.ip4dst); 7125 7126 fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >> 7127 TCAM_V4KEY2_TOS_SHIFT; 7128 fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >> 7129 TCAM_V4KEY2_TOS_SHIFT; 7130 7131 switch (fsp->flow_type) { 7132 case TCP_V4_FLOW: 7133 case UDP_V4_FLOW: 7134 case SCTP_V4_FLOW: 7135 fsp->h_u.tcp_ip4_spec.psrc = 7136 ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> 7137 TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; 7138 fsp->h_u.tcp_ip4_spec.pdst = 7139 ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> 7140 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; 7141 fsp->m_u.tcp_ip4_spec.psrc = 7142 ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> 7143 TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; 7144 fsp->m_u.tcp_ip4_spec.pdst = 7145 ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> 7146 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; 7147 7148 fsp->h_u.tcp_ip4_spec.psrc = 7149 cpu_to_be16(fsp->h_u.tcp_ip4_spec.psrc); 7150 fsp->h_u.tcp_ip4_spec.pdst = 7151 cpu_to_be16(fsp->h_u.tcp_ip4_spec.pdst); 7152 fsp->m_u.tcp_ip4_spec.psrc = 7153 cpu_to_be16(fsp->m_u.tcp_ip4_spec.psrc); 7154 fsp->m_u.tcp_ip4_spec.pdst = 7155 cpu_to_be16(fsp->m_u.tcp_ip4_spec.pdst); 7156 break; 7157 case AH_V4_FLOW: 7158 case ESP_V4_FLOW: 7159 fsp->h_u.ah_ip4_spec.spi = 7160 (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> 7161 TCAM_V4KEY2_PORT_SPI_SHIFT; 7162 fsp->m_u.ah_ip4_spec.spi = 7163 (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> 7164 TCAM_V4KEY2_PORT_SPI_SHIFT; 7165 7166 fsp->h_u.ah_ip4_spec.spi = 7167 cpu_to_be32(fsp->h_u.ah_ip4_spec.spi); 7168 fsp->m_u.ah_ip4_spec.spi = 7169 cpu_to_be32(fsp->m_u.ah_ip4_spec.spi); 7170 break; 7171 case IP_USER_FLOW: 7172 fsp->h_u.usr_ip4_spec.l4_4_bytes = 7173 (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> 7174 TCAM_V4KEY2_PORT_SPI_SHIFT; 7175 fsp->m_u.usr_ip4_spec.l4_4_bytes = 7176 (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> 7177 TCAM_V4KEY2_PORT_SPI_SHIFT; 7178 7179 fsp->h_u.usr_ip4_spec.l4_4_bytes = 7180 cpu_to_be32(fsp->h_u.usr_ip4_spec.l4_4_bytes); 7181 fsp->m_u.usr_ip4_spec.l4_4_bytes = 7182 cpu_to_be32(fsp->m_u.usr_ip4_spec.l4_4_bytes); 7183 7184 fsp->h_u.usr_ip4_spec.proto = 7185 (tp->key[2] & TCAM_V4KEY2_PROTO) >> 7186 TCAM_V4KEY2_PROTO_SHIFT; 7187 fsp->m_u.usr_ip4_spec.proto = 7188 (tp->key_mask[2] & TCAM_V4KEY2_PROTO) >> 7189 TCAM_V4KEY2_PROTO_SHIFT; 7190 7191 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 7192 break; 7193 default: 7194 break; 7195 } 7196} 7197 7198static int niu_get_ethtool_tcam_entry(struct niu *np, 7199 struct ethtool_rxnfc *nfc) 7200{ 7201 struct niu_parent *parent = np->parent; 7202 struct niu_tcam_entry *tp; 7203 struct ethtool_rx_flow_spec *fsp = &nfc->fs; 7204 u16 idx; 7205 u64 class; 7206 int ret = 0; 7207 7208 idx = tcam_get_index(np, (u16)nfc->fs.location); 7209 7210 tp = &parent->tcam[idx]; 7211 if (!tp->valid) { 7212 pr_info(PFX "niu%d: %s entry [%d] invalid for idx[%d]\n", 7213 parent->index, np->dev->name, (u16)nfc->fs.location, idx); 7214 return -EINVAL; 7215 } 7216 7217 /* fill the flow spec entry */ 7218 class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> 7219 TCAM_V4KEY0_CLASS_CODE_SHIFT; 7220 ret = niu_class_to_ethflow(class, &fsp->flow_type); 7221 7222 if (ret < 0) { 7223 pr_info(PFX "niu%d: %s niu_class_to_ethflow failed\n", 7224 parent->index, np->dev->name); 7225 ret = -EINVAL; 7226 goto out; 7227 } 7228 7229 if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) { 7230 u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >> 7231 TCAM_V4KEY2_PROTO_SHIFT; 7232 if (proto == IPPROTO_ESP) { 7233 if (fsp->flow_type == AH_V4_FLOW) 7234 fsp->flow_type = ESP_V4_FLOW; 7235 else 7236 fsp->flow_type = ESP_V6_FLOW; 7237 } 7238 } 7239 7240 switch (fsp->flow_type) { 7241 case TCP_V4_FLOW: 7242 case UDP_V4_FLOW: 7243 case SCTP_V4_FLOW: 7244 case AH_V4_FLOW: 7245 case ESP_V4_FLOW: 7246 niu_get_ip4fs_from_tcam_key(tp, fsp); 7247 break; 7248 case TCP_V6_FLOW: 7249 case UDP_V6_FLOW: 7250 case SCTP_V6_FLOW: 7251 case AH_V6_FLOW: 7252 case ESP_V6_FLOW: 7253 /* Not yet implemented */ 7254 ret = -EINVAL; 7255 break; 7256 case IP_USER_FLOW: 7257 niu_get_ip4fs_from_tcam_key(tp, fsp); 7258 break; 7259 default: 7260 ret = -EINVAL; 7261 break; 7262 } 7263 7264 if (ret < 0) 7265 goto out; 7266 7267 if (tp->assoc_data & TCAM_ASSOCDATA_DISC) 7268 fsp->ring_cookie = RX_CLS_FLOW_DISC; 7269 else 7270 fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >> 7271 TCAM_ASSOCDATA_OFFSET_SHIFT; 7272 7273 /* put the tcam size here */ 7274 nfc->data = tcam_get_size(np); 7275out: 7276 return ret; 7277} 7278 7279static int niu_get_ethtool_tcam_all(struct niu *np, 7280 struct ethtool_rxnfc *nfc, 7281 u32 *rule_locs) 7282{ 7283 struct niu_parent *parent = np->parent; 7284 struct niu_tcam_entry *tp; 7285 int i, idx, cnt; 7286 u16 n_entries; 7287 unsigned long flags; 7288 7289 7290 /* put the tcam size here */ 7291 nfc->data = tcam_get_size(np); 7292 7293 niu_lock_parent(np, flags); 7294 n_entries = nfc->rule_cnt; 7295 for (cnt = 0, i = 0; i < nfc->data; i++) { 7296 idx = tcam_get_index(np, i); 7297 tp = &parent->tcam[idx]; 7298 if (!tp->valid) 7299 continue; 7300 rule_locs[cnt] = i; 7301 cnt++; 7302 } 7303 niu_unlock_parent(np, flags); 7304 7305 if (n_entries != cnt) { 7306 /* print warning, this should not happen */ 7307 pr_info(PFX "niu%d: %s In niu_get_ethtool_tcam_all, " 7308 "n_entries[%d] != cnt[%d]!!!\n\n", 7309 np->parent->index, np->dev->name, n_entries, cnt); 7310 } 7311 7312 return 0; 7313} 7314 7315static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 7316 void *rule_locs) 7317{ 7318 struct niu *np = netdev_priv(dev); 7319 int ret = 0; 7320 7321 switch (cmd->cmd) { 7322 case ETHTOOL_GRXFH: 7323 ret = niu_get_hash_opts(np, cmd); 7324 break; 7325 case ETHTOOL_GRXRINGS: 7326 cmd->data = np->num_rx_rings; 7327 break; 7328 case ETHTOOL_GRXCLSRLCNT: 7329 cmd->rule_cnt = tcam_get_valid_entry_cnt(np); 7330 break; 7331 case ETHTOOL_GRXCLSRULE: 7332 ret = niu_get_ethtool_tcam_entry(np, cmd); 7333 break; 7334 case ETHTOOL_GRXCLSRLALL: 7335 ret = niu_get_ethtool_tcam_all(np, cmd, (u32 *)rule_locs); 7336 break; 7337 default: 7338 ret = -EINVAL; 7339 break; 7340 } 7341 7342 return ret; 7343} 7344 7345static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) 7346{ 7347 u64 class; 7348 u64 flow_key = 0; 7349 unsigned long flags; 7350 7351 if (!niu_ethflow_to_class(nfc->flow_type, &class)) 7352 return -EINVAL; 7353 7354 if (class < CLASS_CODE_USER_PROG1 || 7355 class > CLASS_CODE_SCTP_IPV6) 7356 return -EINVAL; 7357 7358 if (nfc->data & RXH_DISCARD) { 7359 niu_lock_parent(np, flags); 7360 flow_key = np->parent->tcam_key[class - 7361 CLASS_CODE_USER_PROG1]; 7362 flow_key |= TCAM_KEY_DISC; 7363 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key); 7364 np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key; 7365 niu_unlock_parent(np, flags); 7366 return 0; 7367 } else { 7368 /* Discard was set before, but is not set now */ 7369 if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & 7370 TCAM_KEY_DISC) { 7371 niu_lock_parent(np, flags); 7372 flow_key = np->parent->tcam_key[class - 7373 CLASS_CODE_USER_PROG1]; 7374 flow_key &= ~TCAM_KEY_DISC; 7375 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), 7376 flow_key); 7377 np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = 7378 flow_key; 7379 niu_unlock_parent(np, flags); 7380 } 7381 } 7382 7383 if (!niu_ethflow_to_flowkey(nfc->data, &flow_key)) 7384 return -EINVAL; 7385 7386 niu_lock_parent(np, flags); 7387 nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key); 7388 np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key; 7389 niu_unlock_parent(np, flags); 7390 7391 return 0; 7392} 7393 7394static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp, 7395 struct niu_tcam_entry *tp, 7396 int l2_rdc_tab, u64 class) 7397{ 7398 u8 pid = 0; 7399 u32 sip, dip, sipm, dipm, spi, spim; 7400 u16 sport, dport, spm, dpm; 7401 7402 sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src); 7403 sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src); 7404 dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst); 7405 dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst); 7406 7407 tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT; 7408 tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE; 7409 tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT; 7410 tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM; 7411 7412 tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT; 7413 tp->key[3] |= dip; 7414 7415 tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT; 7416 tp->key_mask[3] |= dipm; 7417 7418 tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos << 7419 TCAM_V4KEY2_TOS_SHIFT); 7420 tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos << 7421 TCAM_V4KEY2_TOS_SHIFT); 7422 switch (fsp->flow_type) { 7423 case TCP_V4_FLOW: 7424 case UDP_V4_FLOW: 7425 case SCTP_V4_FLOW: 7426 sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc); 7427 spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc); 7428 dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst); 7429 dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst); 7430 7431 tp->key[2] |= (((u64)sport << 16) | dport); 7432 tp->key_mask[2] |= (((u64)spm << 16) | dpm); 7433 niu_ethflow_to_l3proto(fsp->flow_type, &pid); 7434 break; 7435 case AH_V4_FLOW: 7436 case ESP_V4_FLOW: 7437 spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi); 7438 spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi); 7439 7440 tp->key[2] |= spi; 7441 tp->key_mask[2] |= spim; 7442 niu_ethflow_to_l3proto(fsp->flow_type, &pid); 7443 break; 7444 case IP_USER_FLOW: 7445 spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes); 7446 spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes); 7447 7448 tp->key[2] |= spi; 7449 tp->key_mask[2] |= spim; 7450 pid = fsp->h_u.usr_ip4_spec.proto; 7451 break; 7452 default: 7453 break; 7454 } 7455 7456 tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT); 7457 if (pid) { 7458 tp->key_mask[2] |= TCAM_V4KEY2_PROTO; 7459 } 7460} 7461 7462static int niu_add_ethtool_tcam_entry(struct niu *np, 7463 struct ethtool_rxnfc *nfc) 7464{ 7465 struct niu_parent *parent = np->parent; 7466 struct niu_tcam_entry *tp; 7467 struct ethtool_rx_flow_spec *fsp = &nfc->fs; 7468 struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port]; 7469 int l2_rdc_table = rdc_table->first_table_num; 7470 u16 idx; 7471 u64 class; 7472 unsigned long flags; 7473 int err, ret; 7474 7475 ret = 0; 7476 7477 idx = nfc->fs.location; 7478 if (idx >= tcam_get_size(np)) 7479 return -EINVAL; 7480 7481 if (fsp->flow_type == IP_USER_FLOW) { 7482 int i; 7483 int add_usr_cls = 0; 7484 int ipv6 = 0; 7485 struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec; 7486 struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec; 7487 7488 niu_lock_parent(np, flags); 7489 7490 for (i = 0; i < NIU_L3_PROG_CLS; i++) { 7491 if (parent->l3_cls[i]) { 7492 if (uspec->proto == parent->l3_cls_pid[i]) { 7493 class = parent->l3_cls[i]; 7494 parent->l3_cls_refcnt[i]++; 7495 add_usr_cls = 1; 7496 break; 7497 } 7498 } else { 7499 /* Program new user IP class */ 7500 switch (i) { 7501 case 0: 7502 class = CLASS_CODE_USER_PROG1; 7503 break; 7504 case 1: 7505 class = CLASS_CODE_USER_PROG2; 7506 break; 7507 case 2: 7508 class = CLASS_CODE_USER_PROG3; 7509 break; 7510 case 3: 7511 class = CLASS_CODE_USER_PROG4; 7512 break; 7513 default: 7514 break; 7515 } 7516 if (uspec->ip_ver == ETH_RX_NFC_IP6) 7517 ipv6 = 1; 7518 ret = tcam_user_ip_class_set(np, class, ipv6, 7519 uspec->proto, 7520 uspec->tos, 7521 umask->tos); 7522 if (ret) 7523 goto out; 7524 7525 ret = tcam_user_ip_class_enable(np, class, 1); 7526 if (ret) 7527 goto out; 7528 parent->l3_cls[i] = class; 7529 parent->l3_cls_pid[i] = uspec->proto; 7530 parent->l3_cls_refcnt[i]++; 7531 add_usr_cls = 1; 7532 break; 7533 } 7534 } 7535 if (!add_usr_cls) { 7536 pr_info(PFX "niu%d: %s niu_add_ethtool_tcam_entry: " 7537 "Could not find/insert class for pid %d\n", 7538 parent->index, np->dev->name, uspec->proto); 7539 ret = -EINVAL; 7540 goto out; 7541 } 7542 niu_unlock_parent(np, flags); 7543 } else { 7544 if (!niu_ethflow_to_class(fsp->flow_type, &class)) { 7545 return -EINVAL; 7546 } 7547 } 7548 7549 niu_lock_parent(np, flags); 7550 7551 idx = tcam_get_index(np, idx); 7552 tp = &parent->tcam[idx]; 7553 7554 memset(tp, 0, sizeof(*tp)); 7555 7556 /* fill in the tcam key and mask */ 7557 switch (fsp->flow_type) { 7558 case TCP_V4_FLOW: 7559 case UDP_V4_FLOW: 7560 case SCTP_V4_FLOW: 7561 case AH_V4_FLOW: 7562 case ESP_V4_FLOW: 7563 niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class); 7564 break; 7565 case TCP_V6_FLOW: 7566 case UDP_V6_FLOW: 7567 case SCTP_V6_FLOW: 7568 case AH_V6_FLOW: 7569 case ESP_V6_FLOW: 7570 /* Not yet implemented */ 7571 pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: " 7572 "flow %d for IPv6 not implemented\n\n", 7573 parent->index, np->dev->name, fsp->flow_type); 7574 ret = -EINVAL; 7575 goto out; 7576 case IP_USER_FLOW: 7577 if (fsp->h_u.usr_ip4_spec.ip_ver == ETH_RX_NFC_IP4) { 7578 niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, 7579 class); 7580 } else { 7581 /* Not yet implemented */ 7582 pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: " 7583 "usr flow for IPv6 not implemented\n\n", 7584 parent->index, np->dev->name); 7585 ret = -EINVAL; 7586 goto out; 7587 } 7588 break; 7589 default: 7590 pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: " 7591 "Unknown flow type %d\n\n", 7592 parent->index, np->dev->name, fsp->flow_type); 7593 ret = -EINVAL; 7594 goto out; 7595 } 7596 7597 /* fill in the assoc data */ 7598 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { 7599 tp->assoc_data = TCAM_ASSOCDATA_DISC; 7600 } else { 7601 if (fsp->ring_cookie >= np->num_rx_rings) { 7602 pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: " 7603 "Invalid RX ring %lld\n\n", 7604 parent->index, np->dev->name, 7605 (long long) fsp->ring_cookie); 7606 ret = -EINVAL; 7607 goto out; 7608 } 7609 tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | 7610 (fsp->ring_cookie << 7611 TCAM_ASSOCDATA_OFFSET_SHIFT)); 7612 } 7613 7614 err = tcam_write(np, idx, tp->key, tp->key_mask); 7615 if (err) { 7616 ret = -EINVAL; 7617 goto out; 7618 } 7619 err = tcam_assoc_write(np, idx, tp->assoc_data); 7620 if (err) { 7621 ret = -EINVAL; 7622 goto out; 7623 } 7624 7625 /* validate the entry */ 7626 tp->valid = 1; 7627 np->clas.tcam_valid_entries++; 7628out: 7629 niu_unlock_parent(np, flags); 7630 7631 return ret; 7632} 7633 7634static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc) 7635{ 7636 struct niu_parent *parent = np->parent; 7637 struct niu_tcam_entry *tp; 7638 u16 idx; 7639 unsigned long flags; 7640 u64 class; 7641 int ret = 0; 7642 7643 if (loc >= tcam_get_size(np)) 7644 return -EINVAL; 7645 7646 niu_lock_parent(np, flags); 7647 7648 idx = tcam_get_index(np, loc); 7649 tp = &parent->tcam[idx]; 7650 7651 /* if the entry is of a user defined class, then update*/ 7652 class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> 7653 TCAM_V4KEY0_CLASS_CODE_SHIFT; 7654 7655 if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) { 7656 int i; 7657 for (i = 0; i < NIU_L3_PROG_CLS; i++) { 7658 if (parent->l3_cls[i] == class) { 7659 parent->l3_cls_refcnt[i]--; 7660 if (!parent->l3_cls_refcnt[i]) { 7661 /* disable class */ 7662 ret = tcam_user_ip_class_enable(np, 7663 class, 7664 0); 7665 if (ret) 7666 goto out; 7667 parent->l3_cls[i] = 0; 7668 parent->l3_cls_pid[i] = 0; 7669 } 7670 break; 7671 } 7672 } 7673 if (i == NIU_L3_PROG_CLS) { 7674 pr_info(PFX "niu%d: %s In niu_del_ethtool_tcam_entry," 7675 "Usr class 0x%llx not found \n", 7676 parent->index, np->dev->name, 7677 (unsigned long long) class); 7678 ret = -EINVAL; 7679 goto out; 7680 } 7681 } 7682 7683 ret = tcam_flush(np, idx); 7684 if (ret) 7685 goto out; 7686 7687 /* invalidate the entry */ 7688 tp->valid = 0; 7689 np->clas.tcam_valid_entries--; 7690out: 7691 niu_unlock_parent(np, flags); 7692 7693 return ret; 7694} 7695 7696static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 7697{ 7698 struct niu *np = netdev_priv(dev); 7699 int ret = 0; 7700 7701 switch (cmd->cmd) { 7702 case ETHTOOL_SRXFH: 7703 ret = niu_set_hash_opts(np, cmd); 7704 break; 7705 case ETHTOOL_SRXCLSRLINS: 7706 ret = niu_add_ethtool_tcam_entry(np, cmd); 7707 break; 7708 case ETHTOOL_SRXCLSRLDEL: 7709 ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location); 7710 break; 7711 default: 7712 ret = -EINVAL; 7713 break; 7714 } 7715 7716 return ret; 7717} 7718 7719static const struct { 7720 const char string[ETH_GSTRING_LEN]; 7721} niu_xmac_stat_keys[] = { 7722 { "tx_frames" }, 7723 { "tx_bytes" }, 7724 { "tx_fifo_errors" }, 7725 { "tx_overflow_errors" }, 7726 { "tx_max_pkt_size_errors" }, 7727 { "tx_underflow_errors" }, 7728 { "rx_local_faults" }, 7729 { "rx_remote_faults" }, 7730 { "rx_link_faults" }, 7731 { "rx_align_errors" }, 7732 { "rx_frags" }, 7733 { "rx_mcasts" }, 7734 { "rx_bcasts" }, 7735 { "rx_hist_cnt1" }, 7736 { "rx_hist_cnt2" }, 7737 { "rx_hist_cnt3" }, 7738 { "rx_hist_cnt4" }, 7739 { "rx_hist_cnt5" }, 7740 { "rx_hist_cnt6" }, 7741 { "rx_hist_cnt7" }, 7742 { "rx_octets" }, 7743 { "rx_code_violations" }, 7744 { "rx_len_errors" }, 7745 { "rx_crc_errors" }, 7746 { "rx_underflows" }, 7747 { "rx_overflows" }, 7748 { "pause_off_state" }, 7749 { "pause_on_state" }, 7750 { "pause_received" }, 7751}; 7752 7753#define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys) 7754 7755static const struct { 7756 const char string[ETH_GSTRING_LEN]; 7757} niu_bmac_stat_keys[] = { 7758 { "tx_underflow_errors" }, 7759 { "tx_max_pkt_size_errors" }, 7760 { "tx_bytes" }, 7761 { "tx_frames" }, 7762 { "rx_overflows" }, 7763 { "rx_frames" }, 7764 { "rx_align_errors" }, 7765 { "rx_crc_errors" }, 7766 { "rx_len_errors" }, 7767 { "pause_off_state" }, 7768 { "pause_on_state" }, 7769 { "pause_received" }, 7770}; 7771 7772#define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys) 7773 7774static const struct { 7775 const char string[ETH_GSTRING_LEN]; 7776} niu_rxchan_stat_keys[] = { 7777 { "rx_channel" }, 7778 { "rx_packets" }, 7779 { "rx_bytes" }, 7780 { "rx_dropped" }, 7781 { "rx_errors" }, 7782}; 7783 7784#define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys) 7785 7786static const struct { 7787 const char string[ETH_GSTRING_LEN]; 7788} niu_txchan_stat_keys[] = { 7789 { "tx_channel" }, 7790 { "tx_packets" }, 7791 { "tx_bytes" }, 7792 { "tx_errors" }, 7793}; 7794 7795#define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys) 7796 7797static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data) 7798{ 7799 struct niu *np = netdev_priv(dev); 7800 int i; 7801 7802 if (stringset != ETH_SS_STATS) 7803 return; 7804 7805 if (np->flags & NIU_FLAGS_XMAC) { 7806 memcpy(data, niu_xmac_stat_keys, 7807 sizeof(niu_xmac_stat_keys)); 7808 data += sizeof(niu_xmac_stat_keys); 7809 } else { 7810 memcpy(data, niu_bmac_stat_keys, 7811 sizeof(niu_bmac_stat_keys)); 7812 data += sizeof(niu_bmac_stat_keys); 7813 } 7814 for (i = 0; i < np->num_rx_rings; i++) { 7815 memcpy(data, niu_rxchan_stat_keys, 7816 sizeof(niu_rxchan_stat_keys)); 7817 data += sizeof(niu_rxchan_stat_keys); 7818 } 7819 for (i = 0; i < np->num_tx_rings; i++) { 7820 memcpy(data, niu_txchan_stat_keys, 7821 sizeof(niu_txchan_stat_keys)); 7822 data += sizeof(niu_txchan_stat_keys); 7823 } 7824} 7825 7826static int niu_get_stats_count(struct net_device *dev) 7827{ 7828 struct niu *np = netdev_priv(dev); 7829 7830 return ((np->flags & NIU_FLAGS_XMAC ? 7831 NUM_XMAC_STAT_KEYS : 7832 NUM_BMAC_STAT_KEYS) + 7833 (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) + 7834 (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS)); 7835} 7836 7837static void niu_get_ethtool_stats(struct net_device *dev, 7838 struct ethtool_stats *stats, u64 *data) 7839{ 7840 struct niu *np = netdev_priv(dev); 7841 int i; 7842 7843 niu_sync_mac_stats(np); 7844 if (np->flags & NIU_FLAGS_XMAC) { 7845 memcpy(data, &np->mac_stats.xmac, 7846 sizeof(struct niu_xmac_stats)); 7847 data += (sizeof(struct niu_xmac_stats) / sizeof(u64)); 7848 } else { 7849 memcpy(data, &np->mac_stats.bmac, 7850 sizeof(struct niu_bmac_stats)); 7851 data += (sizeof(struct niu_bmac_stats) / sizeof(u64)); 7852 } 7853 for (i = 0; i < np->num_rx_rings; i++) { 7854 struct rx_ring_info *rp = &np->rx_rings[i]; 7855 7856 niu_sync_rx_discard_stats(np, rp, 0); 7857 7858 data[0] = rp->rx_channel; 7859 data[1] = rp->rx_packets; 7860 data[2] = rp->rx_bytes; 7861 data[3] = rp->rx_dropped; 7862 data[4] = rp->rx_errors; 7863 data += 5; 7864 } 7865 for (i = 0; i < np->num_tx_rings; i++) { 7866 struct tx_ring_info *rp = &np->tx_rings[i]; 7867 7868 data[0] = rp->tx_channel; 7869 data[1] = rp->tx_packets; 7870 data[2] = rp->tx_bytes; 7871 data[3] = rp->tx_errors; 7872 data += 4; 7873 } 7874} 7875 7876static u64 niu_led_state_save(struct niu *np) 7877{ 7878 if (np->flags & NIU_FLAGS_XMAC) 7879 return nr64_mac(XMAC_CONFIG); 7880 else 7881 return nr64_mac(BMAC_XIF_CONFIG); 7882} 7883 7884static void niu_led_state_restore(struct niu *np, u64 val) 7885{ 7886 if (np->flags & NIU_FLAGS_XMAC) 7887 nw64_mac(XMAC_CONFIG, val); 7888 else 7889 nw64_mac(BMAC_XIF_CONFIG, val); 7890} 7891 7892static void niu_force_led(struct niu *np, int on) 7893{ 7894 u64 val, reg, bit; 7895 7896 if (np->flags & NIU_FLAGS_XMAC) { 7897 reg = XMAC_CONFIG; 7898 bit = XMAC_CONFIG_FORCE_LED_ON; 7899 } else { 7900 reg = BMAC_XIF_CONFIG; 7901 bit = BMAC_XIF_CONFIG_LINK_LED; 7902 } 7903 7904 val = nr64_mac(reg); 7905 if (on) 7906 val |= bit; 7907 else 7908 val &= ~bit; 7909 nw64_mac(reg, val); 7910} 7911 7912static int niu_phys_id(struct net_device *dev, u32 data) 7913{ 7914 struct niu *np = netdev_priv(dev); 7915 u64 orig_led_state; 7916 int i; 7917 7918 if (!netif_running(dev)) 7919 return -EAGAIN; 7920 7921 if (data == 0) 7922 data = 2; 7923 7924 orig_led_state = niu_led_state_save(np); 7925 for (i = 0; i < (data * 2); i++) { 7926 int on = ((i % 2) == 0); 7927 7928 niu_force_led(np, on); 7929 7930 if (msleep_interruptible(500)) 7931 break; 7932 } 7933 niu_led_state_restore(np, orig_led_state); 7934 7935 return 0; 7936} 7937 7938static const struct ethtool_ops niu_ethtool_ops = { 7939 .get_drvinfo = niu_get_drvinfo, 7940 .get_link = ethtool_op_get_link, 7941 .get_msglevel = niu_get_msglevel, 7942 .set_msglevel = niu_set_msglevel, 7943 .nway_reset = niu_nway_reset, 7944 .get_eeprom_len = niu_get_eeprom_len, 7945 .get_eeprom = niu_get_eeprom, 7946 .get_settings = niu_get_settings, 7947 .set_settings = niu_set_settings, 7948 .get_strings = niu_get_strings, 7949 .get_stats_count = niu_get_stats_count, 7950 .get_ethtool_stats = niu_get_ethtool_stats, 7951 .phys_id = niu_phys_id, 7952 .get_rxnfc = niu_get_nfc, 7953 .set_rxnfc = niu_set_nfc, 7954}; 7955 7956static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, 7957 int ldg, int ldn) 7958{ 7959 if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) 7960 return -EINVAL; 7961 if (ldn < 0 || ldn > LDN_MAX) 7962 return -EINVAL; 7963 7964 parent->ldg_map[ldn] = ldg; 7965 7966 if (np->parent->plat_type == PLAT_TYPE_NIU) { 7967 /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by 7968 * the firmware, and we're not supposed to change them. 7969 * Validate the mapping, because if it's wrong we probably 7970 * won't get any interrupts and that's painful to debug. 7971 */ 7972 if (nr64(LDG_NUM(ldn)) != ldg) { 7973 dev_err(np->device, PFX "Port %u, mis-matched " 7974 "LDG assignment " 7975 "for ldn %d, should be %d is %llu\n", 7976 np->port, ldn, ldg, 7977 (unsigned long long) nr64(LDG_NUM(ldn))); 7978 return -EINVAL; 7979 } 7980 } else 7981 nw64(LDG_NUM(ldn), ldg); 7982 7983 return 0; 7984} 7985 7986static int niu_set_ldg_timer_res(struct niu *np, int res) 7987{ 7988 if (res < 0 || res > LDG_TIMER_RES_VAL) 7989 return -EINVAL; 7990 7991 7992 nw64(LDG_TIMER_RES, res); 7993 7994 return 0; 7995} 7996 7997static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector) 7998{ 7999 if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) || 8000 (func < 0 || func > 3) || 8001 (vector < 0 || vector > 0x1f)) 8002 return -EINVAL; 8003 8004 nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector); 8005 8006 return 0; 8007} 8008 8009static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr) 8010{ 8011 u64 frame, frame_base = (ESPC_PIO_STAT_READ_START | 8012 (addr << ESPC_PIO_STAT_ADDR_SHIFT)); 8013 int limit; 8014 8015 if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT)) 8016 return -EINVAL; 8017 8018 frame = frame_base; 8019 nw64(ESPC_PIO_STAT, frame); 8020 limit = 64; 8021 do { 8022 udelay(5); 8023 frame = nr64(ESPC_PIO_STAT); 8024 if (frame & ESPC_PIO_STAT_READ_END) 8025 break; 8026 } while (limit--); 8027 if (!(frame & ESPC_PIO_STAT_READ_END)) { 8028 dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n", 8029 (unsigned long long) frame); 8030 return -ENODEV; 8031 } 8032 8033 frame = frame_base; 8034 nw64(ESPC_PIO_STAT, frame); 8035 limit = 64; 8036 do { 8037 udelay(5); 8038 frame = nr64(ESPC_PIO_STAT); 8039 if (frame & ESPC_PIO_STAT_READ_END) 8040 break; 8041 } while (limit--); 8042 if (!(frame & ESPC_PIO_STAT_READ_END)) { 8043 dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n", 8044 (unsigned long long) frame); 8045 return -ENODEV; 8046 } 8047 8048 frame = nr64(ESPC_PIO_STAT); 8049 return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT; 8050} 8051 8052static int __devinit niu_pci_eeprom_read16(struct niu *np, u32 off) 8053{ 8054 int err = niu_pci_eeprom_read(np, off); 8055 u16 val; 8056 8057 if (err < 0) 8058 return err; 8059 val = (err << 8); 8060 err = niu_pci_eeprom_read(np, off + 1); 8061 if (err < 0) 8062 return err; 8063 val |= (err & 0xff); 8064 8065 return val; 8066} 8067 8068static int __devinit niu_pci_eeprom_read16_swp(struct niu *np, u32 off) 8069{ 8070 int err = niu_pci_eeprom_read(np, off); 8071 u16 val; 8072 8073 if (err < 0) 8074 return err; 8075 8076 val = (err & 0xff); 8077 err = niu_pci_eeprom_read(np, off + 1); 8078 if (err < 0) 8079 return err; 8080 8081 val |= (err & 0xff) << 8; 8082 8083 return val; 8084} 8085 8086static int __devinit niu_pci_vpd_get_propname(struct niu *np, 8087 u32 off, 8088 char *namebuf, 8089 int namebuf_len) 8090{ 8091 int i; 8092 8093 for (i = 0; i < namebuf_len; i++) { 8094 int err = niu_pci_eeprom_read(np, off + i); 8095 if (err < 0) 8096 return err; 8097 *namebuf++ = err; 8098 if (!err) 8099 break; 8100 } 8101 if (i >= namebuf_len) 8102 return -EINVAL; 8103 8104 return i + 1; 8105} 8106 8107static void __devinit niu_vpd_parse_version(struct niu *np) 8108{ 8109 struct niu_vpd *vpd = &np->vpd; 8110 int len = strlen(vpd->version) + 1; 8111 const char *s = vpd->version; 8112 int i; 8113 8114 for (i = 0; i < len - 5; i++) { 8115 if (!strncmp(s + i, "FCode ", 5)) 8116 break; 8117 } 8118 if (i >= len - 5) 8119 return; 8120 8121 s += i + 5; 8122 sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor); 8123 8124 niudbg(PROBE, "VPD_SCAN: FCODE major(%d) minor(%d)\n", 8125 vpd->fcode_major, vpd->fcode_minor); 8126 if (vpd->fcode_major > NIU_VPD_MIN_MAJOR || 8127 (vpd->fcode_major == NIU_VPD_MIN_MAJOR && 8128 vpd->fcode_minor >= NIU_VPD_MIN_MINOR)) 8129 np->flags |= NIU_FLAGS_VPD_VALID; 8130} 8131 8132/* ESPC_PIO_EN_ENABLE must be set */ 8133static int __devinit niu_pci_vpd_scan_props(struct niu *np, 8134 u32 start, u32 end) 8135{ 8136 unsigned int found_mask = 0; 8137#define FOUND_MASK_MODEL 0x00000001 8138#define FOUND_MASK_BMODEL 0x00000002 8139#define FOUND_MASK_VERS 0x00000004 8140#define FOUND_MASK_MAC 0x00000008 8141#define FOUND_MASK_NMAC 0x00000010 8142#define FOUND_MASK_PHY 0x00000020 8143#define FOUND_MASK_ALL 0x0000003f 8144 8145 niudbg(PROBE, "VPD_SCAN: start[%x] end[%x]\n", 8146 start, end); 8147 while (start < end) { 8148 int len, err, instance, type, prop_len; 8149 char namebuf[64]; 8150 u8 *prop_buf; 8151 int max_len; 8152 8153 if (found_mask == FOUND_MASK_ALL) { 8154 niu_vpd_parse_version(np); 8155 return 1; 8156 } 8157 8158 err = niu_pci_eeprom_read(np, start + 2); 8159 if (err < 0) 8160 return err; 8161 len = err; 8162 start += 3; 8163 8164 instance = niu_pci_eeprom_read(np, start); 8165 type = niu_pci_eeprom_read(np, start + 3); 8166 prop_len = niu_pci_eeprom_read(np, start + 4); 8167 err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64); 8168 if (err < 0) 8169 return err; 8170 8171 prop_buf = NULL; 8172 max_len = 0; 8173 if (!strcmp(namebuf, "model")) { 8174 prop_buf = np->vpd.model; 8175 max_len = NIU_VPD_MODEL_MAX; 8176 found_mask |= FOUND_MASK_MODEL; 8177 } else if (!strcmp(namebuf, "board-model")) { 8178 prop_buf = np->vpd.board_model; 8179 max_len = NIU_VPD_BD_MODEL_MAX; 8180 found_mask |= FOUND_MASK_BMODEL; 8181 } else if (!strcmp(namebuf, "version")) { 8182 prop_buf = np->vpd.version; 8183 max_len = NIU_VPD_VERSION_MAX; 8184 found_mask |= FOUND_MASK_VERS; 8185 } else if (!strcmp(namebuf, "local-mac-address")) { 8186 prop_buf = np->vpd.local_mac; 8187 max_len = ETH_ALEN; 8188 found_mask |= FOUND_MASK_MAC; 8189 } else if (!strcmp(namebuf, "num-mac-addresses")) { 8190 prop_buf = &np->vpd.mac_num; 8191 max_len = 1; 8192 found_mask |= FOUND_MASK_NMAC; 8193 } else if (!strcmp(namebuf, "phy-type")) { 8194 prop_buf = np->vpd.phy_type; 8195 max_len = NIU_VPD_PHY_TYPE_MAX; 8196 found_mask |= FOUND_MASK_PHY; 8197 } 8198 8199 if (max_len && prop_len > max_len) { 8200 dev_err(np->device, PFX "Property '%s' length (%d) is " 8201 "too long.\n", namebuf, prop_len); 8202 return -EINVAL; 8203 } 8204 8205 if (prop_buf) { 8206 u32 off = start + 5 + err; 8207 int i; 8208 8209 niudbg(PROBE, "VPD_SCAN: Reading in property [%s] " 8210 "len[%d]\n", namebuf, prop_len); 8211 for (i = 0; i < prop_len; i++) 8212 *prop_buf++ = niu_pci_eeprom_read(np, off + i); 8213 } 8214 8215 start += len; 8216 } 8217 8218 return 0; 8219} 8220 8221/* ESPC_PIO_EN_ENABLE must be set */ 8222static void __devinit niu_pci_vpd_fetch(struct niu *np, u32 start) 8223{ 8224 u32 offset; 8225 int err; 8226 8227 err = niu_pci_eeprom_read16_swp(np, start + 1); 8228 if (err < 0) 8229 return; 8230 8231 offset = err + 3; 8232 8233 while (start + offset < ESPC_EEPROM_SIZE) { 8234 u32 here = start + offset; 8235 u32 end; 8236 8237 err = niu_pci_eeprom_read(np, here); 8238 if (err != 0x90) 8239 return; 8240 8241 err = niu_pci_eeprom_read16_swp(np, here + 1); 8242 if (err < 0) 8243 return; 8244 8245 here = start + offset + 3; 8246 end = start + offset + err; 8247 8248 offset += err; 8249 8250 err = niu_pci_vpd_scan_props(np, here, end); 8251 if (err < 0 || err == 1) 8252 return; 8253 } 8254} 8255 8256/* ESPC_PIO_EN_ENABLE must be set */ 8257static u32 __devinit niu_pci_vpd_offset(struct niu *np) 8258{ 8259 u32 start = 0, end = ESPC_EEPROM_SIZE, ret; 8260 int err; 8261 8262 while (start < end) { 8263 ret = start; 8264 8265 /* ROM header signature? */ 8266 err = niu_pci_eeprom_read16(np, start + 0); 8267 if (err != 0x55aa) 8268 return 0; 8269 8270 /* Apply offset to PCI data structure. */ 8271 err = niu_pci_eeprom_read16(np, start + 23); 8272 if (err < 0) 8273 return 0; 8274 start += err; 8275 8276 /* Check for "PCIR" signature. */ 8277 err = niu_pci_eeprom_read16(np, start + 0); 8278 if (err != 0x5043) 8279 return 0; 8280 err = niu_pci_eeprom_read16(np, start + 2); 8281 if (err != 0x4952) 8282 return 0; 8283 8284 /* Check for OBP image type. */ 8285 err = niu_pci_eeprom_read(np, start + 20); 8286 if (err < 0) 8287 return 0; 8288 if (err != 0x01) { 8289 err = niu_pci_eeprom_read(np, ret + 2); 8290 if (err < 0) 8291 return 0; 8292 8293 start = ret + (err * 512); 8294 continue; 8295 } 8296 8297 err = niu_pci_eeprom_read16_swp(np, start + 8); 8298 if (err < 0) 8299 return err; 8300 ret += err; 8301 8302 err = niu_pci_eeprom_read(np, ret + 0); 8303 if (err != 0x82) 8304 return 0; 8305 8306 return ret; 8307 } 8308 8309 return 0; 8310} 8311 8312static int __devinit niu_phy_type_prop_decode(struct niu *np, 8313 const char *phy_prop) 8314{ 8315 if (!strcmp(phy_prop, "mif")) { 8316 /* 1G copper, MII */ 8317 np->flags &= ~(NIU_FLAGS_FIBER | 8318 NIU_FLAGS_10G); 8319 np->mac_xcvr = MAC_XCVR_MII; 8320 } else if (!strcmp(phy_prop, "xgf")) { 8321 /* 10G fiber, XPCS */ 8322 np->flags |= (NIU_FLAGS_10G | 8323 NIU_FLAGS_FIBER); 8324 np->mac_xcvr = MAC_XCVR_XPCS; 8325 } else if (!strcmp(phy_prop, "pcs")) { 8326 /* 1G fiber, PCS */ 8327 np->flags &= ~NIU_FLAGS_10G; 8328 np->flags |= NIU_FLAGS_FIBER; 8329 np->mac_xcvr = MAC_XCVR_PCS; 8330 } else if (!strcmp(phy_prop, "xgc")) { 8331 /* 10G copper, XPCS */ 8332 np->flags |= NIU_FLAGS_10G; 8333 np->flags &= ~NIU_FLAGS_FIBER; 8334 np->mac_xcvr = MAC_XCVR_XPCS; 8335 } else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) { 8336 /* 10G Serdes or 1G Serdes, default to 10G */ 8337 np->flags |= NIU_FLAGS_10G; 8338 np->flags &= ~NIU_FLAGS_FIBER; 8339 np->flags |= NIU_FLAGS_XCVR_SERDES; 8340 np->mac_xcvr = MAC_XCVR_XPCS; 8341 } else { 8342 return -EINVAL; 8343 } 8344 return 0; 8345} 8346 8347static int niu_pci_vpd_get_nports(struct niu *np) 8348{ 8349 int ports = 0; 8350 8351 if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) || 8352 (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) || 8353 (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) || 8354 (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) || 8355 (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) { 8356 ports = 4; 8357 } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) || 8358 (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) || 8359 (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) || 8360 (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) { 8361 ports = 2; 8362 } 8363 8364 return ports; 8365} 8366 8367static void __devinit niu_pci_vpd_validate(struct niu *np) 8368{ 8369 struct net_device *dev = np->dev; 8370 struct niu_vpd *vpd = &np->vpd; 8371 u8 val8; 8372 8373 if (!is_valid_ether_addr(&vpd->local_mac[0])) { 8374 dev_err(np->device, PFX "VPD MAC invalid, " 8375 "falling back to SPROM.\n"); 8376 8377 np->flags &= ~NIU_FLAGS_VPD_VALID; 8378 return; 8379 } 8380 8381 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || 8382 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { 8383 np->flags |= NIU_FLAGS_10G; 8384 np->flags &= ~NIU_FLAGS_FIBER; 8385 np->flags |= NIU_FLAGS_XCVR_SERDES; 8386 np->mac_xcvr = MAC_XCVR_PCS; 8387 if (np->port > 1) { 8388 np->flags |= NIU_FLAGS_FIBER; 8389 np->flags &= ~NIU_FLAGS_10G; 8390 } 8391 if (np->flags & NIU_FLAGS_10G) 8392 np->mac_xcvr = MAC_XCVR_XPCS; 8393 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { 8394 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | 8395 NIU_FLAGS_HOTPLUG_PHY); 8396 } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { 8397 dev_err(np->device, PFX "Illegal phy string [%s].\n", 8398 np->vpd.phy_type); 8399 dev_err(np->device, PFX "Falling back to SPROM.\n"); 8400 np->flags &= ~NIU_FLAGS_VPD_VALID; 8401 return; 8402 } 8403 8404 memcpy(dev->perm_addr, vpd->local_mac, ETH_ALEN); 8405 8406 val8 = dev->perm_addr[5]; 8407 dev->perm_addr[5] += np->port; 8408 if (dev->perm_addr[5] < val8) 8409 dev->perm_addr[4]++; 8410 8411 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); 8412} 8413 8414static int __devinit niu_pci_probe_sprom(struct niu *np) 8415{ 8416 struct net_device *dev = np->dev; 8417 int len, i; 8418 u64 val, sum; 8419 u8 val8; 8420 8421 val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ); 8422 val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT; 8423 len = val / 4; 8424 8425 np->eeprom_len = len; 8426 8427 niudbg(PROBE, "SPROM: Image size %llu\n", (unsigned long long) val); 8428 8429 sum = 0; 8430 for (i = 0; i < len; i++) { 8431 val = nr64(ESPC_NCR(i)); 8432 sum += (val >> 0) & 0xff; 8433 sum += (val >> 8) & 0xff; 8434 sum += (val >> 16) & 0xff; 8435 sum += (val >> 24) & 0xff; 8436 } 8437 niudbg(PROBE, "SPROM: Checksum %x\n", (int)(sum & 0xff)); 8438 if ((sum & 0xff) != 0xab) { 8439 dev_err(np->device, PFX "Bad SPROM checksum " 8440 "(%x, should be 0xab)\n", (int) (sum & 0xff)); 8441 return -EINVAL; 8442 } 8443 8444 val = nr64(ESPC_PHY_TYPE); 8445 switch (np->port) { 8446 case 0: 8447 val8 = (val & ESPC_PHY_TYPE_PORT0) >> 8448 ESPC_PHY_TYPE_PORT0_SHIFT; 8449 break; 8450 case 1: 8451 val8 = (val & ESPC_PHY_TYPE_PORT1) >> 8452 ESPC_PHY_TYPE_PORT1_SHIFT; 8453 break; 8454 case 2: 8455 val8 = (val & ESPC_PHY_TYPE_PORT2) >> 8456 ESPC_PHY_TYPE_PORT2_SHIFT; 8457 break; 8458 case 3: 8459 val8 = (val & ESPC_PHY_TYPE_PORT3) >> 8460 ESPC_PHY_TYPE_PORT3_SHIFT; 8461 break; 8462 default: 8463 dev_err(np->device, PFX "Bogus port number %u\n", 8464 np->port); 8465 return -EINVAL; 8466 } 8467 niudbg(PROBE, "SPROM: PHY type %x\n", val8); 8468 8469 switch (val8) { 8470 case ESPC_PHY_TYPE_1G_COPPER: 8471 /* 1G copper, MII */ 8472 np->flags &= ~(NIU_FLAGS_FIBER | 8473 NIU_FLAGS_10G); 8474 np->mac_xcvr = MAC_XCVR_MII; 8475 break; 8476 8477 case ESPC_PHY_TYPE_1G_FIBER: 8478 /* 1G fiber, PCS */ 8479 np->flags &= ~NIU_FLAGS_10G; 8480 np->flags |= NIU_FLAGS_FIBER; 8481 np->mac_xcvr = MAC_XCVR_PCS; 8482 break; 8483 8484 case ESPC_PHY_TYPE_10G_COPPER: 8485 /* 10G copper, XPCS */ 8486 np->flags |= NIU_FLAGS_10G; 8487 np->flags &= ~NIU_FLAGS_FIBER; 8488 np->mac_xcvr = MAC_XCVR_XPCS; 8489 break; 8490 8491 case ESPC_PHY_TYPE_10G_FIBER: 8492 /* 10G fiber, XPCS */ 8493 np->flags |= (NIU_FLAGS_10G | 8494 NIU_FLAGS_FIBER); 8495 np->mac_xcvr = MAC_XCVR_XPCS; 8496 break; 8497 8498 default: 8499 dev_err(np->device, PFX "Bogus SPROM phy type %u\n", val8); 8500 return -EINVAL; 8501 } 8502 8503 val = nr64(ESPC_MAC_ADDR0); 8504 niudbg(PROBE, "SPROM: MAC_ADDR0[%08llx]\n", 8505 (unsigned long long) val); 8506 dev->perm_addr[0] = (val >> 0) & 0xff; 8507 dev->perm_addr[1] = (val >> 8) & 0xff; 8508 dev->perm_addr[2] = (val >> 16) & 0xff; 8509 dev->perm_addr[3] = (val >> 24) & 0xff; 8510 8511 val = nr64(ESPC_MAC_ADDR1); 8512 niudbg(PROBE, "SPROM: MAC_ADDR1[%08llx]\n", 8513 (unsigned long long) val); 8514 dev->perm_addr[4] = (val >> 0) & 0xff; 8515 dev->perm_addr[5] = (val >> 8) & 0xff; 8516 8517 if (!is_valid_ether_addr(&dev->perm_addr[0])) { 8518 dev_err(np->device, PFX "SPROM MAC address invalid\n"); 8519 dev_err(np->device, PFX "[ \n"); 8520 for (i = 0; i < 6; i++) 8521 printk("%02x ", dev->perm_addr[i]); 8522 printk("]\n"); 8523 return -EINVAL; 8524 } 8525 8526 val8 = dev->perm_addr[5]; 8527 dev->perm_addr[5] += np->port; 8528 if (dev->perm_addr[5] < val8) 8529 dev->perm_addr[4]++; 8530 8531 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); 8532 8533 val = nr64(ESPC_MOD_STR_LEN); 8534 niudbg(PROBE, "SPROM: MOD_STR_LEN[%llu]\n", 8535 (unsigned long long) val); 8536 if (val >= 8 * 4) 8537 return -EINVAL; 8538 8539 for (i = 0; i < val; i += 4) { 8540 u64 tmp = nr64(ESPC_NCR(5 + (i / 4))); 8541 8542 np->vpd.model[i + 3] = (tmp >> 0) & 0xff; 8543 np->vpd.model[i + 2] = (tmp >> 8) & 0xff; 8544 np->vpd.model[i + 1] = (tmp >> 16) & 0xff; 8545 np->vpd.model[i + 0] = (tmp >> 24) & 0xff; 8546 } 8547 np->vpd.model[val] = '\0'; 8548 8549 val = nr64(ESPC_BD_MOD_STR_LEN); 8550 niudbg(PROBE, "SPROM: BD_MOD_STR_LEN[%llu]\n", 8551 (unsigned long long) val); 8552 if (val >= 4 * 4) 8553 return -EINVAL; 8554 8555 for (i = 0; i < val; i += 4) { 8556 u64 tmp = nr64(ESPC_NCR(14 + (i / 4))); 8557 8558 np->vpd.board_model[i + 3] = (tmp >> 0) & 0xff; 8559 np->vpd.board_model[i + 2] = (tmp >> 8) & 0xff; 8560 np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff; 8561 np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff; 8562 } 8563 np->vpd.board_model[val] = '\0'; 8564 8565 np->vpd.mac_num = 8566 nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL; 8567 niudbg(PROBE, "SPROM: NUM_PORTS_MACS[%d]\n", 8568 np->vpd.mac_num); 8569 8570 return 0; 8571} 8572 8573static int __devinit niu_get_and_validate_port(struct niu *np) 8574{ 8575 struct niu_parent *parent = np->parent; 8576 8577 if (np->port <= 1) 8578 np->flags |= NIU_FLAGS_XMAC; 8579 8580 if (!parent->num_ports) { 8581 if (parent->plat_type == PLAT_TYPE_NIU) { 8582 parent->num_ports = 2; 8583 } else { 8584 parent->num_ports = niu_pci_vpd_get_nports(np); 8585 if (!parent->num_ports) { 8586 /* Fall back to SPROM as last resort. 8587 * This will fail on most cards. 8588 */ 8589 parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) & 8590 ESPC_NUM_PORTS_MACS_VAL; 8591 8592 /* All of the current probing methods fail on 8593 * Maramba on-board parts. 8594 */ 8595 if (!parent->num_ports) 8596 parent->num_ports = 4; 8597 } 8598 } 8599 } 8600 8601 niudbg(PROBE, "niu_get_and_validate_port: port[%d] num_ports[%d]\n", 8602 np->port, parent->num_ports); 8603 if (np->port >= parent->num_ports) 8604 return -ENODEV; 8605 8606 return 0; 8607} 8608 8609static int __devinit phy_record(struct niu_parent *parent, 8610 struct phy_probe_info *p, 8611 int dev_id_1, int dev_id_2, u8 phy_port, 8612 int type) 8613{ 8614 u32 id = (dev_id_1 << 16) | dev_id_2; 8615 u8 idx; 8616 8617 if (dev_id_1 < 0 || dev_id_2 < 0) 8618 return 0; 8619 if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) { 8620 if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) && 8621 ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) && 8622 ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706)) 8623 return 0; 8624 } else { 8625 if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R) 8626 return 0; 8627 } 8628 8629 pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n", 8630 parent->index, id, 8631 (type == PHY_TYPE_PMA_PMD ? 8632 "PMA/PMD" : 8633 (type == PHY_TYPE_PCS ? 8634 "PCS" : "MII")), 8635 phy_port); 8636 8637 if (p->cur[type] >= NIU_MAX_PORTS) { 8638 printk(KERN_ERR PFX "Too many PHY ports.\n"); 8639 return -EINVAL; 8640 } 8641 idx = p->cur[type]; 8642 p->phy_id[type][idx] = id; 8643 p->phy_port[type][idx] = phy_port; 8644 p->cur[type] = idx + 1; 8645 return 0; 8646} 8647 8648static int __devinit port_has_10g(struct phy_probe_info *p, int port) 8649{ 8650 int i; 8651 8652 for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) { 8653 if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port) 8654 return 1; 8655 } 8656 for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) { 8657 if (p->phy_port[PHY_TYPE_PCS][i] == port) 8658 return 1; 8659 } 8660 8661 return 0; 8662} 8663 8664static int __devinit count_10g_ports(struct phy_probe_info *p, int *lowest) 8665{ 8666 int port, cnt; 8667 8668 cnt = 0; 8669 *lowest = 32; 8670 for (port = 8; port < 32; port++) { 8671 if (port_has_10g(p, port)) { 8672 if (!cnt) 8673 *lowest = port; 8674 cnt++; 8675 } 8676 } 8677 8678 return cnt; 8679} 8680 8681static int __devinit count_1g_ports(struct phy_probe_info *p, int *lowest) 8682{ 8683 *lowest = 32; 8684 if (p->cur[PHY_TYPE_MII]) 8685 *lowest = p->phy_port[PHY_TYPE_MII][0]; 8686 8687 return p->cur[PHY_TYPE_MII]; 8688} 8689 8690static void __devinit niu_n2_divide_channels(struct niu_parent *parent) 8691{ 8692 int num_ports = parent->num_ports; 8693 int i; 8694 8695 for (i = 0; i < num_ports; i++) { 8696 parent->rxchan_per_port[i] = (16 / num_ports); 8697 parent->txchan_per_port[i] = (16 / num_ports); 8698 8699 pr_info(PFX "niu%d: Port %u [%u RX chans] " 8700 "[%u TX chans]\n", 8701 parent->index, i, 8702 parent->rxchan_per_port[i], 8703 parent->txchan_per_port[i]); 8704 } 8705} 8706 8707static void __devinit niu_divide_channels(struct niu_parent *parent, 8708 int num_10g, int num_1g) 8709{ 8710 int num_ports = parent->num_ports; 8711 int rx_chans_per_10g, rx_chans_per_1g; 8712 int tx_chans_per_10g, tx_chans_per_1g; 8713 int i, tot_rx, tot_tx; 8714 8715 if (!num_10g || !num_1g) { 8716 rx_chans_per_10g = rx_chans_per_1g = 8717 (NIU_NUM_RXCHAN / num_ports); 8718 tx_chans_per_10g = tx_chans_per_1g = 8719 (NIU_NUM_TXCHAN / num_ports); 8720 } else { 8721 rx_chans_per_1g = NIU_NUM_RXCHAN / 8; 8722 rx_chans_per_10g = (NIU_NUM_RXCHAN - 8723 (rx_chans_per_1g * num_1g)) / 8724 num_10g; 8725 8726 tx_chans_per_1g = NIU_NUM_TXCHAN / 6; 8727 tx_chans_per_10g = (NIU_NUM_TXCHAN - 8728 (tx_chans_per_1g * num_1g)) / 8729 num_10g; 8730 } 8731 8732 tot_rx = tot_tx = 0; 8733 for (i = 0; i < num_ports; i++) { 8734 int type = phy_decode(parent->port_phy, i); 8735 8736 if (type == PORT_TYPE_10G) { 8737 parent->rxchan_per_port[i] = rx_chans_per_10g; 8738 parent->txchan_per_port[i] = tx_chans_per_10g; 8739 } else { 8740 parent->rxchan_per_port[i] = rx_chans_per_1g; 8741 parent->txchan_per_port[i] = tx_chans_per_1g; 8742 } 8743 pr_info(PFX "niu%d: Port %u [%u RX chans] " 8744 "[%u TX chans]\n", 8745 parent->index, i, 8746 parent->rxchan_per_port[i], 8747 parent->txchan_per_port[i]); 8748 tot_rx += parent->rxchan_per_port[i]; 8749 tot_tx += parent->txchan_per_port[i]; 8750 } 8751 8752 if (tot_rx > NIU_NUM_RXCHAN) { 8753 printk(KERN_ERR PFX "niu%d: Too many RX channels (%d), " 8754 "resetting to one per port.\n", 8755 parent->index, tot_rx); 8756 for (i = 0; i < num_ports; i++) 8757 parent->rxchan_per_port[i] = 1; 8758 } 8759 if (tot_tx > NIU_NUM_TXCHAN) { 8760 printk(KERN_ERR PFX "niu%d: Too many TX channels (%d), " 8761 "resetting to one per port.\n", 8762 parent->index, tot_tx); 8763 for (i = 0; i < num_ports; i++) 8764 parent->txchan_per_port[i] = 1; 8765 } 8766 if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) { 8767 printk(KERN_WARNING PFX "niu%d: Driver bug, wasted channels, " 8768 "RX[%d] TX[%d]\n", 8769 parent->index, tot_rx, tot_tx); 8770 } 8771} 8772 8773static void __devinit niu_divide_rdc_groups(struct niu_parent *parent, 8774 int num_10g, int num_1g) 8775{ 8776 int i, num_ports = parent->num_ports; 8777 int rdc_group, rdc_groups_per_port; 8778 int rdc_channel_base; 8779 8780 rdc_group = 0; 8781 rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports; 8782 8783 rdc_channel_base = 0; 8784 8785 for (i = 0; i < num_ports; i++) { 8786 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i]; 8787 int grp, num_channels = parent->rxchan_per_port[i]; 8788 int this_channel_offset; 8789 8790 tp->first_table_num = rdc_group; 8791 tp->num_tables = rdc_groups_per_port; 8792 this_channel_offset = 0; 8793 for (grp = 0; grp < tp->num_tables; grp++) { 8794 struct rdc_table *rt = &tp->tables[grp]; 8795 int slot; 8796 8797 pr_info(PFX "niu%d: Port %d RDC tbl(%d) [ ", 8798 parent->index, i, tp->first_table_num + grp); 8799 for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) { 8800 rt->rxdma_channel[slot] = 8801 rdc_channel_base + this_channel_offset; 8802 8803 printk("%d ", rt->rxdma_channel[slot]); 8804 8805 if (++this_channel_offset == num_channels) 8806 this_channel_offset = 0; 8807 } 8808 printk("]\n"); 8809 } 8810 8811 parent->rdc_default[i] = rdc_channel_base; 8812 8813 rdc_channel_base += num_channels; 8814 rdc_group += rdc_groups_per_port; 8815 } 8816} 8817 8818static int __devinit fill_phy_probe_info(struct niu *np, 8819 struct niu_parent *parent, 8820 struct phy_probe_info *info) 8821{ 8822 unsigned long flags; 8823 int port, err; 8824 8825 memset(info, 0, sizeof(*info)); 8826 8827 /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */ 8828 niu_lock_parent(np, flags); 8829 err = 0; 8830 for (port = 8; port < 32; port++) { 8831 int dev_id_1, dev_id_2; 8832 8833 dev_id_1 = mdio_read(np, port, 8834 NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1); 8835 dev_id_2 = mdio_read(np, port, 8836 NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2); 8837 err = phy_record(parent, info, dev_id_1, dev_id_2, port, 8838 PHY_TYPE_PMA_PMD); 8839 if (err) 8840 break; 8841 dev_id_1 = mdio_read(np, port, 8842 NIU_PCS_DEV_ADDR, MII_PHYSID1); 8843 dev_id_2 = mdio_read(np, port, 8844 NIU_PCS_DEV_ADDR, MII_PHYSID2); 8845 err = phy_record(parent, info, dev_id_1, dev_id_2, port, 8846 PHY_TYPE_PCS); 8847 if (err) 8848 break; 8849 dev_id_1 = mii_read(np, port, MII_PHYSID1); 8850 dev_id_2 = mii_read(np, port, MII_PHYSID2); 8851 err = phy_record(parent, info, dev_id_1, dev_id_2, port, 8852 PHY_TYPE_MII); 8853 if (err) 8854 break; 8855 } 8856 niu_unlock_parent(np, flags); 8857 8858 return err; 8859} 8860 8861static int __devinit walk_phys(struct niu *np, struct niu_parent *parent) 8862{ 8863 struct phy_probe_info *info = &parent->phy_probe_info; 8864 int lowest_10g, lowest_1g; 8865 int num_10g, num_1g; 8866 u32 val; 8867 int err; 8868 8869 num_10g = num_1g = 0; 8870 8871 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || 8872 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { 8873 num_10g = 0; 8874 num_1g = 2; 8875 parent->plat_type = PLAT_TYPE_ATCA_CP3220; 8876 parent->num_ports = 4; 8877 val = (phy_encode(PORT_TYPE_1G, 0) | 8878 phy_encode(PORT_TYPE_1G, 1) | 8879 phy_encode(PORT_TYPE_1G, 2) | 8880 phy_encode(PORT_TYPE_1G, 3)); 8881 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { 8882 num_10g = 2; 8883 num_1g = 0; 8884 parent->num_ports = 2; 8885 val = (phy_encode(PORT_TYPE_10G, 0) | 8886 phy_encode(PORT_TYPE_10G, 1)); 8887 } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) && 8888 (parent->plat_type == PLAT_TYPE_NIU)) { 8889 /* this is the Monza case */ 8890 if (np->flags & NIU_FLAGS_10G) { 8891 val = (phy_encode(PORT_TYPE_10G, 0) | 8892 phy_encode(PORT_TYPE_10G, 1)); 8893 } else { 8894 val = (phy_encode(PORT_TYPE_1G, 0) | 8895 phy_encode(PORT_TYPE_1G, 1)); 8896 } 8897 } else { 8898 err = fill_phy_probe_info(np, parent, info); 8899 if (err) 8900 return err; 8901 8902 num_10g = count_10g_ports(info, &lowest_10g); 8903 num_1g = count_1g_ports(info, &lowest_1g); 8904 8905 switch ((num_10g << 4) | num_1g) { 8906 case 0x24: 8907 if (lowest_1g == 10) 8908 parent->plat_type = PLAT_TYPE_VF_P0; 8909 else if (lowest_1g == 26) 8910 parent->plat_type = PLAT_TYPE_VF_P1; 8911 else 8912 goto unknown_vg_1g_port; 8913 8914 /* fallthru */ 8915 case 0x22: 8916 val = (phy_encode(PORT_TYPE_10G, 0) | 8917 phy_encode(PORT_TYPE_10G, 1) | 8918 phy_encode(PORT_TYPE_1G, 2) | 8919 phy_encode(PORT_TYPE_1G, 3)); 8920 break; 8921 8922 case 0x20: 8923 val = (phy_encode(PORT_TYPE_10G, 0) | 8924 phy_encode(PORT_TYPE_10G, 1)); 8925 break; 8926 8927 case 0x10: 8928 val = phy_encode(PORT_TYPE_10G, np->port); 8929 break; 8930 8931 case 0x14: 8932 if (lowest_1g == 10) 8933 parent->plat_type = PLAT_TYPE_VF_P0; 8934 else if (lowest_1g == 26) 8935 parent->plat_type = PLAT_TYPE_VF_P1; 8936 else 8937 goto unknown_vg_1g_port; 8938 8939 /* fallthru */ 8940 case 0x13: 8941 if ((lowest_10g & 0x7) == 0) 8942 val = (phy_encode(PORT_TYPE_10G, 0) | 8943 phy_encode(PORT_TYPE_1G, 1) | 8944 phy_encode(PORT_TYPE_1G, 2) | 8945 phy_encode(PORT_TYPE_1G, 3)); 8946 else 8947 val = (phy_encode(PORT_TYPE_1G, 0) | 8948 phy_encode(PORT_TYPE_10G, 1) | 8949 phy_encode(PORT_TYPE_1G, 2) | 8950 phy_encode(PORT_TYPE_1G, 3)); 8951 break; 8952 8953 case 0x04: 8954 if (lowest_1g == 10) 8955 parent->plat_type = PLAT_TYPE_VF_P0; 8956 else if (lowest_1g == 26) 8957 parent->plat_type = PLAT_TYPE_VF_P1; 8958 else 8959 goto unknown_vg_1g_port; 8960 8961 val = (phy_encode(PORT_TYPE_1G, 0) | 8962 phy_encode(PORT_TYPE_1G, 1) | 8963 phy_encode(PORT_TYPE_1G, 2) | 8964 phy_encode(PORT_TYPE_1G, 3)); 8965 break; 8966 8967 default: 8968 printk(KERN_ERR PFX "Unsupported port config " 8969 "10G[%d] 1G[%d]\n", 8970 num_10g, num_1g); 8971 return -EINVAL; 8972 } 8973 } 8974 8975 parent->port_phy = val; 8976 8977 if (parent->plat_type == PLAT_TYPE_NIU) 8978 niu_n2_divide_channels(parent); 8979 else 8980 niu_divide_channels(parent, num_10g, num_1g); 8981 8982 niu_divide_rdc_groups(parent, num_10g, num_1g); 8983 8984 return 0; 8985 8986unknown_vg_1g_port: 8987 printk(KERN_ERR PFX "Cannot identify platform type, 1gport=%d\n", 8988 lowest_1g); 8989 return -EINVAL; 8990} 8991 8992static int __devinit niu_probe_ports(struct niu *np) 8993{ 8994 struct niu_parent *parent = np->parent; 8995 int err, i; 8996 8997 niudbg(PROBE, "niu_probe_ports(): port_phy[%08x]\n", 8998 parent->port_phy); 8999 9000 if (parent->port_phy == PORT_PHY_UNKNOWN) { 9001 err = walk_phys(np, parent); 9002 if (err) 9003 return err; 9004 9005 niu_set_ldg_timer_res(np, 2); 9006 for (i = 0; i <= LDN_MAX; i++) 9007 niu_ldn_irq_enable(np, i, 0); 9008 } 9009 9010 if (parent->port_phy == PORT_PHY_INVALID) 9011 return -EINVAL; 9012 9013 return 0; 9014} 9015 9016static int __devinit niu_classifier_swstate_init(struct niu *np) 9017{ 9018 struct niu_classifier *cp = &np->clas; 9019 9020 niudbg(PROBE, "niu_classifier_swstate_init: num_tcam(%d)\n", 9021 np->parent->tcam_num_entries); 9022 9023 cp->tcam_top = (u16) np->port; 9024 cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports; 9025 cp->h1_init = 0xffffffff; 9026 cp->h2_init = 0xffff; 9027 9028 return fflp_early_init(np); 9029} 9030 9031static void __devinit niu_link_config_init(struct niu *np) 9032{ 9033 struct niu_link_config *lp = &np->link_config; 9034 9035 lp->advertising = (ADVERTISED_10baseT_Half | 9036 ADVERTISED_10baseT_Full | 9037 ADVERTISED_100baseT_Half | 9038 ADVERTISED_100baseT_Full | 9039 ADVERTISED_1000baseT_Half | 9040 ADVERTISED_1000baseT_Full | 9041 ADVERTISED_10000baseT_Full | 9042 ADVERTISED_Autoneg); 9043 lp->speed = lp->active_speed = SPEED_INVALID; 9044 lp->duplex = DUPLEX_FULL; 9045 lp->active_duplex = DUPLEX_INVALID; 9046 lp->autoneg = 1; 9047#if 0 9048 lp->loopback_mode = LOOPBACK_MAC; 9049 lp->active_speed = SPEED_10000; 9050 lp->active_duplex = DUPLEX_FULL; 9051#else 9052 lp->loopback_mode = LOOPBACK_DISABLED; 9053#endif 9054} 9055 9056static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np) 9057{ 9058 switch (np->port) { 9059 case 0: 9060 np->mac_regs = np->regs + XMAC_PORT0_OFF; 9061 np->ipp_off = 0x00000; 9062 np->pcs_off = 0x04000; 9063 np->xpcs_off = 0x02000; 9064 break; 9065 9066 case 1: 9067 np->mac_regs = np->regs + XMAC_PORT1_OFF; 9068 np->ipp_off = 0x08000; 9069 np->pcs_off = 0x0a000; 9070 np->xpcs_off = 0x08000; 9071 break; 9072 9073 case 2: 9074 np->mac_regs = np->regs + BMAC_PORT2_OFF; 9075 np->ipp_off = 0x04000; 9076 np->pcs_off = 0x0e000; 9077 np->xpcs_off = ~0UL; 9078 break; 9079 9080 case 3: 9081 np->mac_regs = np->regs + BMAC_PORT3_OFF; 9082 np->ipp_off = 0x0c000; 9083 np->pcs_off = 0x12000; 9084 np->xpcs_off = ~0UL; 9085 break; 9086 9087 default: 9088 dev_err(np->device, PFX "Port %u is invalid, cannot " 9089 "compute MAC block offset.\n", np->port); 9090 return -EINVAL; 9091 } 9092 9093 return 0; 9094} 9095 9096static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map) 9097{ 9098 struct msix_entry msi_vec[NIU_NUM_LDG]; 9099 struct niu_parent *parent = np->parent; 9100 struct pci_dev *pdev = np->pdev; 9101 int i, num_irqs, err; 9102 u8 first_ldg; 9103 9104 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port; 9105 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++) 9106 ldg_num_map[i] = first_ldg + i; 9107 9108 num_irqs = (parent->rxchan_per_port[np->port] + 9109 parent->txchan_per_port[np->port] + 9110 (np->port == 0 ? 3 : 1)); 9111 BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports)); 9112 9113retry: 9114 for (i = 0; i < num_irqs; i++) { 9115 msi_vec[i].vector = 0; 9116 msi_vec[i].entry = i; 9117 } 9118 9119 err = pci_enable_msix(pdev, msi_vec, num_irqs); 9120 if (err < 0) { 9121 np->flags &= ~NIU_FLAGS_MSIX; 9122 return; 9123 } 9124 if (err > 0) { 9125 num_irqs = err; 9126 goto retry; 9127 } 9128 9129 np->flags |= NIU_FLAGS_MSIX; 9130 for (i = 0; i < num_irqs; i++) 9131 np->ldg[i].irq = msi_vec[i].vector; 9132 np->num_ldg = num_irqs; 9133} 9134 9135static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map) 9136{ 9137#ifdef CONFIG_SPARC64 9138 struct of_device *op = np->op; 9139 const u32 *int_prop; 9140 int i; 9141 9142 int_prop = of_get_property(op->node, "interrupts", NULL); 9143 if (!int_prop) 9144 return -ENODEV; 9145 9146 for (i = 0; i < op->num_irqs; i++) { 9147 ldg_num_map[i] = int_prop[i]; 9148 np->ldg[i].irq = op->irqs[i]; 9149 } 9150 9151 np->num_ldg = op->num_irqs; 9152 9153 return 0; 9154#else 9155 return -EINVAL; 9156#endif 9157} 9158 9159static int __devinit niu_ldg_init(struct niu *np) 9160{ 9161 struct niu_parent *parent = np->parent; 9162 u8 ldg_num_map[NIU_NUM_LDG]; 9163 int first_chan, num_chan; 9164 int i, err, ldg_rotor; 9165 u8 port; 9166 9167 np->num_ldg = 1; 9168 np->ldg[0].irq = np->dev->irq; 9169 if (parent->plat_type == PLAT_TYPE_NIU) { 9170 err = niu_n2_irq_init(np, ldg_num_map); 9171 if (err) 9172 return err; 9173 } else 9174 niu_try_msix(np, ldg_num_map); 9175 9176 port = np->port; 9177 for (i = 0; i < np->num_ldg; i++) { 9178 struct niu_ldg *lp = &np->ldg[i]; 9179 9180 netif_napi_add(np->dev, &lp->napi, niu_poll, 64); 9181 9182 lp->np = np; 9183 lp->ldg_num = ldg_num_map[i]; 9184 lp->timer = 2; /* XXX */ 9185 9186 /* On N2 NIU the firmware has setup the SID mappings so they go 9187 * to the correct values that will route the LDG to the proper 9188 * interrupt in the NCU interrupt table. 9189 */ 9190 if (np->parent->plat_type != PLAT_TYPE_NIU) { 9191 err = niu_set_ldg_sid(np, lp->ldg_num, port, i); 9192 if (err) 9193 return err; 9194 } 9195 } 9196 9197 /* We adopt the LDG assignment ordering used by the N2 NIU 9198 * 'interrupt' properties because that simplifies a lot of 9199 * things. This ordering is: 9200 * 9201 * MAC 9202 * MIF (if port zero) 9203 * SYSERR (if port zero) 9204 * RX channels 9205 * TX channels 9206 */ 9207 9208 ldg_rotor = 0; 9209 9210 err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], 9211 LDN_MAC(port)); 9212 if (err) 9213 return err; 9214 9215 ldg_rotor++; 9216 if (ldg_rotor == np->num_ldg) 9217 ldg_rotor = 0; 9218 9219 if (port == 0) { 9220 err = niu_ldg_assign_ldn(np, parent, 9221 ldg_num_map[ldg_rotor], 9222 LDN_MIF); 9223 if (err) 9224 return err; 9225 9226 ldg_rotor++; 9227 if (ldg_rotor == np->num_ldg) 9228 ldg_rotor = 0; 9229 9230 err = niu_ldg_assign_ldn(np, parent, 9231 ldg_num_map[ldg_rotor], 9232 LDN_DEVICE_ERROR); 9233 if (err) 9234 return err; 9235 9236 ldg_rotor++; 9237 if (ldg_rotor == np->num_ldg) 9238 ldg_rotor = 0; 9239 9240 } 9241 9242 first_chan = 0; 9243 for (i = 0; i < port; i++) 9244 first_chan += parent->rxchan_per_port[port]; 9245 num_chan = parent->rxchan_per_port[port]; 9246 9247 for (i = first_chan; i < (first_chan + num_chan); i++) { 9248 err = niu_ldg_assign_ldn(np, parent, 9249 ldg_num_map[ldg_rotor], 9250 LDN_RXDMA(i)); 9251 if (err) 9252 return err; 9253 ldg_rotor++; 9254 if (ldg_rotor == np->num_ldg) 9255 ldg_rotor = 0; 9256 } 9257 9258 first_chan = 0; 9259 for (i = 0; i < port; i++) 9260 first_chan += parent->txchan_per_port[port]; 9261 num_chan = parent->txchan_per_port[port]; 9262 for (i = first_chan; i < (first_chan + num_chan); i++) { 9263 err = niu_ldg_assign_ldn(np, parent, 9264 ldg_num_map[ldg_rotor], 9265 LDN_TXDMA(i)); 9266 if (err) 9267 return err; 9268 ldg_rotor++; 9269 if (ldg_rotor == np->num_ldg) 9270 ldg_rotor = 0; 9271 } 9272 9273 return 0; 9274} 9275 9276static void __devexit niu_ldg_free(struct niu *np) 9277{ 9278 if (np->flags & NIU_FLAGS_MSIX) 9279 pci_disable_msix(np->pdev); 9280} 9281 9282static int __devinit niu_get_of_props(struct niu *np) 9283{ 9284#ifdef CONFIG_SPARC64 9285 struct net_device *dev = np->dev; 9286 struct device_node *dp; 9287 const char *phy_type; 9288 const u8 *mac_addr; 9289 const char *model; 9290 int prop_len; 9291 9292 if (np->parent->plat_type == PLAT_TYPE_NIU) 9293 dp = np->op->node; 9294 else 9295 dp = pci_device_to_OF_node(np->pdev); 9296 9297 phy_type = of_get_property(dp, "phy-type", &prop_len); 9298 if (!phy_type) { 9299 dev_err(np->device, PFX "%s: OF node lacks " 9300 "phy-type property\n", 9301 dp->full_name); 9302 return -EINVAL; 9303 } 9304 9305 if (!strcmp(phy_type, "none")) 9306 return -ENODEV; 9307 9308 strcpy(np->vpd.phy_type, phy_type); 9309 9310 if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { 9311 dev_err(np->device, PFX "%s: Illegal phy string [%s].\n", 9312 dp->full_name, np->vpd.phy_type); 9313 return -EINVAL; 9314 } 9315 9316 mac_addr = of_get_property(dp, "local-mac-address", &prop_len); 9317 if (!mac_addr) { 9318 dev_err(np->device, PFX "%s: OF node lacks " 9319 "local-mac-address property\n", 9320 dp->full_name); 9321 return -EINVAL; 9322 } 9323 if (prop_len != dev->addr_len) { 9324 dev_err(np->device, PFX "%s: OF MAC address prop len (%d) " 9325 "is wrong.\n", 9326 dp->full_name, prop_len); 9327 } 9328 memcpy(dev->perm_addr, mac_addr, dev->addr_len); 9329 if (!is_valid_ether_addr(&dev->perm_addr[0])) { 9330 int i; 9331 9332 dev_err(np->device, PFX "%s: OF MAC address is invalid\n", 9333 dp->full_name); 9334 dev_err(np->device, PFX "%s: [ \n", 9335 dp->full_name); 9336 for (i = 0; i < 6; i++) 9337 printk("%02x ", dev->perm_addr[i]); 9338 printk("]\n"); 9339 return -EINVAL; 9340 } 9341 9342 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); 9343 9344 model = of_get_property(dp, "model", &prop_len); 9345 9346 if (model) 9347 strcpy(np->vpd.model, model); 9348 9349 return 0; 9350#else 9351 return -EINVAL; 9352#endif 9353} 9354 9355static int __devinit niu_get_invariants(struct niu *np) 9356{ 9357 int err, have_props; 9358 u32 offset; 9359 9360 err = niu_get_of_props(np); 9361 if (err == -ENODEV) 9362 return err; 9363 9364 have_props = !err; 9365 9366 err = niu_init_mac_ipp_pcs_base(np); 9367 if (err) 9368 return err; 9369 9370 if (have_props) { 9371 err = niu_get_and_validate_port(np); 9372 if (err) 9373 return err; 9374 9375 } else { 9376 if (np->parent->plat_type == PLAT_TYPE_NIU) 9377 return -EINVAL; 9378 9379 nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE); 9380 offset = niu_pci_vpd_offset(np); 9381 niudbg(PROBE, "niu_get_invariants: VPD offset [%08x]\n", 9382 offset); 9383 if (offset) 9384 niu_pci_vpd_fetch(np, offset); 9385 nw64(ESPC_PIO_EN, 0); 9386 9387 if (np->flags & NIU_FLAGS_VPD_VALID) { 9388 niu_pci_vpd_validate(np); 9389 err = niu_get_and_validate_port(np); 9390 if (err) 9391 return err; 9392 } 9393 9394 if (!(np->flags & NIU_FLAGS_VPD_VALID)) { 9395 err = niu_get_and_validate_port(np); 9396 if (err) 9397 return err; 9398 err = niu_pci_probe_sprom(np); 9399 if (err) 9400 return err; 9401 } 9402 } 9403 9404 err = niu_probe_ports(np); 9405 if (err) 9406 return err; 9407 9408 niu_ldg_init(np); 9409 9410 niu_classifier_swstate_init(np); 9411 niu_link_config_init(np); 9412 9413 err = niu_determine_phy_disposition(np); 9414 if (!err) 9415 err = niu_init_link(np); 9416 9417 return err; 9418} 9419 9420static LIST_HEAD(niu_parent_list); 9421static DEFINE_MUTEX(niu_parent_lock); 9422static int niu_parent_index; 9423 9424static ssize_t show_port_phy(struct device *dev, 9425 struct device_attribute *attr, char *buf) 9426{ 9427 struct platform_device *plat_dev = to_platform_device(dev); 9428 struct niu_parent *p = plat_dev->dev.platform_data; 9429 u32 port_phy = p->port_phy; 9430 char *orig_buf = buf; 9431 int i; 9432 9433 if (port_phy == PORT_PHY_UNKNOWN || 9434 port_phy == PORT_PHY_INVALID) 9435 return 0; 9436 9437 for (i = 0; i < p->num_ports; i++) { 9438 const char *type_str; 9439 int type; 9440 9441 type = phy_decode(port_phy, i); 9442 if (type == PORT_TYPE_10G) 9443 type_str = "10G"; 9444 else 9445 type_str = "1G"; 9446 buf += sprintf(buf, 9447 (i == 0) ? "%s" : " %s", 9448 type_str); 9449 } 9450 buf += sprintf(buf, "\n"); 9451 return buf - orig_buf; 9452} 9453 9454static ssize_t show_plat_type(struct device *dev, 9455 struct device_attribute *attr, char *buf) 9456{ 9457 struct platform_device *plat_dev = to_platform_device(dev); 9458 struct niu_parent *p = plat_dev->dev.platform_data; 9459 const char *type_str; 9460 9461 switch (p->plat_type) { 9462 case PLAT_TYPE_ATLAS: 9463 type_str = "atlas"; 9464 break; 9465 case PLAT_TYPE_NIU: 9466 type_str = "niu"; 9467 break; 9468 case PLAT_TYPE_VF_P0: 9469 type_str = "vf_p0"; 9470 break; 9471 case PLAT_TYPE_VF_P1: 9472 type_str = "vf_p1"; 9473 break; 9474 default: 9475 type_str = "unknown"; 9476 break; 9477 } 9478 9479 return sprintf(buf, "%s\n", type_str); 9480} 9481 9482static ssize_t __show_chan_per_port(struct device *dev, 9483 struct device_attribute *attr, char *buf, 9484 int rx) 9485{ 9486 struct platform_device *plat_dev = to_platform_device(dev); 9487 struct niu_parent *p = plat_dev->dev.platform_data; 9488 char *orig_buf = buf; 9489 u8 *arr; 9490 int i; 9491 9492 arr = (rx ? p->rxchan_per_port : p->txchan_per_port); 9493 9494 for (i = 0; i < p->num_ports; i++) { 9495 buf += sprintf(buf, 9496 (i == 0) ? "%d" : " %d", 9497 arr[i]); 9498 } 9499 buf += sprintf(buf, "\n"); 9500 9501 return buf - orig_buf; 9502} 9503 9504static ssize_t show_rxchan_per_port(struct device *dev, 9505 struct device_attribute *attr, char *buf) 9506{ 9507 return __show_chan_per_port(dev, attr, buf, 1); 9508} 9509 9510static ssize_t show_txchan_per_port(struct device *dev, 9511 struct device_attribute *attr, char *buf) 9512{ 9513 return __show_chan_per_port(dev, attr, buf, 1); 9514} 9515 9516static ssize_t show_num_ports(struct device *dev, 9517 struct device_attribute *attr, char *buf) 9518{ 9519 struct platform_device *plat_dev = to_platform_device(dev); 9520 struct niu_parent *p = plat_dev->dev.platform_data; 9521 9522 return sprintf(buf, "%d\n", p->num_ports); 9523} 9524 9525static struct device_attribute niu_parent_attributes[] = { 9526 __ATTR(port_phy, S_IRUGO, show_port_phy, NULL), 9527 __ATTR(plat_type, S_IRUGO, show_plat_type, NULL), 9528 __ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL), 9529 __ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL), 9530 __ATTR(num_ports, S_IRUGO, show_num_ports, NULL), 9531 {} 9532}; 9533 9534static struct niu_parent * __devinit niu_new_parent(struct niu *np, 9535 union niu_parent_id *id, 9536 u8 ptype) 9537{ 9538 struct platform_device *plat_dev; 9539 struct niu_parent *p; 9540 int i; 9541 9542 niudbg(PROBE, "niu_new_parent: Creating new parent.\n"); 9543 9544 plat_dev = platform_device_register_simple("niu", niu_parent_index, 9545 NULL, 0); 9546 if (IS_ERR(plat_dev)) 9547 return NULL; 9548 9549 for (i = 0; attr_name(niu_parent_attributes[i]); i++) { 9550 int err = device_create_file(&plat_dev->dev, 9551 &niu_parent_attributes[i]); 9552 if (err) 9553 goto fail_unregister; 9554 } 9555 9556 p = kzalloc(sizeof(*p), GFP_KERNEL); 9557 if (!p) 9558 goto fail_unregister; 9559 9560 p->index = niu_parent_index++; 9561 9562 plat_dev->dev.platform_data = p; 9563 p->plat_dev = plat_dev; 9564 9565 memcpy(&p->id, id, sizeof(*id)); 9566 p->plat_type = ptype; 9567 INIT_LIST_HEAD(&p->list); 9568 atomic_set(&p->refcnt, 0); 9569 list_add(&p->list, &niu_parent_list); 9570 spin_lock_init(&p->lock); 9571 9572 p->rxdma_clock_divider = 7500; 9573 9574 p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES; 9575 if (p->plat_type == PLAT_TYPE_NIU) 9576 p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES; 9577 9578 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { 9579 int index = i - CLASS_CODE_USER_PROG1; 9580 9581 p->tcam_key[index] = TCAM_KEY_TSEL; 9582 p->flow_key[index] = (FLOW_KEY_IPSA | 9583 FLOW_KEY_IPDA | 9584 FLOW_KEY_PROTO | 9585 (FLOW_KEY_L4_BYTE12 << 9586 FLOW_KEY_L4_0_SHIFT) | 9587 (FLOW_KEY_L4_BYTE12 << 9588 FLOW_KEY_L4_1_SHIFT)); 9589 } 9590 9591 for (i = 0; i < LDN_MAX + 1; i++) 9592 p->ldg_map[i] = LDG_INVALID; 9593 9594 return p; 9595 9596fail_unregister: 9597 platform_device_unregister(plat_dev); 9598 return NULL; 9599} 9600 9601static struct niu_parent * __devinit niu_get_parent(struct niu *np, 9602 union niu_parent_id *id, 9603 u8 ptype) 9604{ 9605 struct niu_parent *p, *tmp; 9606 int port = np->port; 9607 9608 niudbg(PROBE, "niu_get_parent: platform_type[%u] port[%u]\n", 9609 ptype, port); 9610 9611 mutex_lock(&niu_parent_lock); 9612 p = NULL; 9613 list_for_each_entry(tmp, &niu_parent_list, list) { 9614 if (!memcmp(id, &tmp->id, sizeof(*id))) { 9615 p = tmp; 9616 break; 9617 } 9618 } 9619 if (!p) 9620 p = niu_new_parent(np, id, ptype); 9621 9622 if (p) { 9623 char port_name[6]; 9624 int err; 9625 9626 sprintf(port_name, "port%d", port); 9627 err = sysfs_create_link(&p->plat_dev->dev.kobj, 9628 &np->device->kobj, 9629 port_name); 9630 if (!err) { 9631 p->ports[port] = np; 9632 atomic_inc(&p->refcnt); 9633 } 9634 } 9635 mutex_unlock(&niu_parent_lock); 9636 9637 return p; 9638} 9639 9640static void niu_put_parent(struct niu *np) 9641{ 9642 struct niu_parent *p = np->parent; 9643 u8 port = np->port; 9644 char port_name[6]; 9645 9646 BUG_ON(!p || p->ports[port] != np); 9647 9648 niudbg(PROBE, "niu_put_parent: port[%u]\n", port); 9649 9650 sprintf(port_name, "port%d", port); 9651 9652 mutex_lock(&niu_parent_lock); 9653 9654 sysfs_remove_link(&p->plat_dev->dev.kobj, port_name); 9655 9656 p->ports[port] = NULL; 9657 np->parent = NULL; 9658 9659 if (atomic_dec_and_test(&p->refcnt)) { 9660 list_del(&p->list); 9661 platform_device_unregister(p->plat_dev); 9662 } 9663 9664 mutex_unlock(&niu_parent_lock); 9665} 9666 9667static void *niu_pci_alloc_coherent(struct device *dev, size_t size, 9668 u64 *handle, gfp_t flag) 9669{ 9670 dma_addr_t dh; 9671 void *ret; 9672 9673 ret = dma_alloc_coherent(dev, size, &dh, flag); 9674 if (ret) 9675 *handle = dh; 9676 return ret; 9677} 9678 9679static void niu_pci_free_coherent(struct device *dev, size_t size, 9680 void *cpu_addr, u64 handle) 9681{ 9682 dma_free_coherent(dev, size, cpu_addr, handle); 9683} 9684 9685static u64 niu_pci_map_page(struct device *dev, struct page *page, 9686 unsigned long offset, size_t size, 9687 enum dma_data_direction direction) 9688{ 9689 return dma_map_page(dev, page, offset, size, direction); 9690} 9691 9692static void niu_pci_unmap_page(struct device *dev, u64 dma_address, 9693 size_t size, enum dma_data_direction direction) 9694{ 9695 dma_unmap_page(dev, dma_address, size, direction); 9696} 9697 9698static u64 niu_pci_map_single(struct device *dev, void *cpu_addr, 9699 size_t size, 9700 enum dma_data_direction direction) 9701{ 9702 return dma_map_single(dev, cpu_addr, size, direction); 9703} 9704 9705static void niu_pci_unmap_single(struct device *dev, u64 dma_address, 9706 size_t size, 9707 enum dma_data_direction direction) 9708{ 9709 dma_unmap_single(dev, dma_address, size, direction); 9710} 9711 9712static const struct niu_ops niu_pci_ops = { 9713 .alloc_coherent = niu_pci_alloc_coherent, 9714 .free_coherent = niu_pci_free_coherent, 9715 .map_page = niu_pci_map_page, 9716 .unmap_page = niu_pci_unmap_page, 9717 .map_single = niu_pci_map_single, 9718 .unmap_single = niu_pci_unmap_single, 9719}; 9720 9721static void __devinit niu_driver_version(void) 9722{ 9723 static int niu_version_printed; 9724 9725 if (niu_version_printed++ == 0) 9726 pr_info("%s", version); 9727} 9728 9729static struct net_device * __devinit niu_alloc_and_init( 9730 struct device *gen_dev, struct pci_dev *pdev, 9731 struct of_device *op, const struct niu_ops *ops, 9732 u8 port) 9733{ 9734 struct net_device *dev; 9735 struct niu *np; 9736 9737 dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN); 9738 if (!dev) { 9739 dev_err(gen_dev, PFX "Etherdev alloc failed, aborting.\n"); 9740 return NULL; 9741 } 9742 9743 SET_NETDEV_DEV(dev, gen_dev); 9744 9745 np = netdev_priv(dev); 9746 np->dev = dev; 9747 np->pdev = pdev; 9748 np->op = op; 9749 np->device = gen_dev; 9750 np->ops = ops; 9751 9752 np->msg_enable = niu_debug; 9753 9754 spin_lock_init(&np->lock); 9755 INIT_WORK(&np->reset_task, niu_reset_task); 9756 9757 np->port = port; 9758 9759 return dev; 9760} 9761 9762static const struct net_device_ops niu_netdev_ops = { 9763 .ndo_open = niu_open, 9764 .ndo_stop = niu_close, 9765 .ndo_start_xmit = niu_start_xmit, 9766 .ndo_get_stats = niu_get_stats, 9767 .ndo_set_multicast_list = niu_set_rx_mode, 9768 .ndo_validate_addr = eth_validate_addr, 9769 .ndo_set_mac_address = niu_set_mac_addr, 9770 .ndo_do_ioctl = niu_ioctl, 9771 .ndo_tx_timeout = niu_tx_timeout, 9772 .ndo_change_mtu = niu_change_mtu, 9773}; 9774 9775static void __devinit niu_assign_netdev_ops(struct net_device *dev) 9776{ 9777 dev->netdev_ops = &niu_netdev_ops; 9778 dev->ethtool_ops = &niu_ethtool_ops; 9779 dev->watchdog_timeo = NIU_TX_TIMEOUT; 9780} 9781 9782static void __devinit niu_device_announce(struct niu *np) 9783{ 9784 struct net_device *dev = np->dev; 9785 9786 pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr); 9787 9788 if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) { 9789 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", 9790 dev->name, 9791 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), 9792 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), 9793 (np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"), 9794 (np->mac_xcvr == MAC_XCVR_MII ? "MII" : 9795 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), 9796 np->vpd.phy_type); 9797 } else { 9798 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", 9799 dev->name, 9800 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), 9801 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), 9802 (np->flags & NIU_FLAGS_FIBER ? "FIBER" : 9803 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" : 9804 "COPPER")), 9805 (np->mac_xcvr == MAC_XCVR_MII ? "MII" : 9806 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), 9807 np->vpd.phy_type); 9808 } 9809} 9810 9811static int __devinit niu_pci_init_one(struct pci_dev *pdev, 9812 const struct pci_device_id *ent) 9813{ 9814 union niu_parent_id parent_id; 9815 struct net_device *dev; 9816 struct niu *np; 9817 int err, pos; 9818 u64 dma_mask; 9819 u16 val16; 9820 9821 niu_driver_version(); 9822 9823 err = pci_enable_device(pdev); 9824 if (err) { 9825 dev_err(&pdev->dev, PFX "Cannot enable PCI device, " 9826 "aborting.\n"); 9827 return err; 9828 } 9829 9830 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || 9831 !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 9832 dev_err(&pdev->dev, PFX "Cannot find proper PCI device " 9833 "base addresses, aborting.\n"); 9834 err = -ENODEV; 9835 goto err_out_disable_pdev; 9836 } 9837 9838 err = pci_request_regions(pdev, DRV_MODULE_NAME); 9839 if (err) { 9840 dev_err(&pdev->dev, PFX "Cannot obtain PCI resources, " 9841 "aborting.\n"); 9842 goto err_out_disable_pdev; 9843 } 9844 9845 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 9846 if (pos <= 0) { 9847 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, " 9848 "aborting.\n"); 9849 goto err_out_free_res; 9850 } 9851 9852 dev = niu_alloc_and_init(&pdev->dev, pdev, NULL, 9853 &niu_pci_ops, PCI_FUNC(pdev->devfn)); 9854 if (!dev) { 9855 err = -ENOMEM; 9856 goto err_out_free_res; 9857 } 9858 np = netdev_priv(dev); 9859 9860 memset(&parent_id, 0, sizeof(parent_id)); 9861 parent_id.pci.domain = pci_domain_nr(pdev->bus); 9862 parent_id.pci.bus = pdev->bus->number; 9863 parent_id.pci.device = PCI_SLOT(pdev->devfn); 9864 9865 np->parent = niu_get_parent(np, &parent_id, 9866 PLAT_TYPE_ATLAS); 9867 if (!np->parent) { 9868 err = -ENOMEM; 9869 goto err_out_free_dev; 9870 } 9871 9872 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16); 9873 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; 9874 val16 |= (PCI_EXP_DEVCTL_CERE | 9875 PCI_EXP_DEVCTL_NFERE | 9876 PCI_EXP_DEVCTL_FERE | 9877 PCI_EXP_DEVCTL_URRE | 9878 PCI_EXP_DEVCTL_RELAX_EN); 9879 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16); 9880 9881 dma_mask = DMA_44BIT_MASK; 9882 err = pci_set_dma_mask(pdev, dma_mask); 9883 if (!err) { 9884 dev->features |= NETIF_F_HIGHDMA; 9885 err = pci_set_consistent_dma_mask(pdev, dma_mask); 9886 if (err) { 9887 dev_err(&pdev->dev, PFX "Unable to obtain 44 bit " 9888 "DMA for consistent allocations, " 9889 "aborting.\n"); 9890 goto err_out_release_parent; 9891 } 9892 } 9893 if (err || dma_mask == DMA_BIT_MASK(32)) { 9894 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 9895 if (err) { 9896 dev_err(&pdev->dev, PFX "No usable DMA configuration, " 9897 "aborting.\n"); 9898 goto err_out_release_parent; 9899 } 9900 } 9901 9902 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM); 9903 9904 np->regs = pci_ioremap_bar(pdev, 0); 9905 if (!np->regs) { 9906 dev_err(&pdev->dev, PFX "Cannot map device registers, " 9907 "aborting.\n"); 9908 err = -ENOMEM; 9909 goto err_out_release_parent; 9910 } 9911 9912 pci_set_master(pdev); 9913 pci_save_state(pdev); 9914 9915 dev->irq = pdev->irq; 9916 9917 niu_assign_netdev_ops(dev); 9918 9919 err = niu_get_invariants(np); 9920 if (err) { 9921 if (err != -ENODEV) 9922 dev_err(&pdev->dev, PFX "Problem fetching invariants " 9923 "of chip, aborting.\n"); 9924 goto err_out_iounmap; 9925 } 9926 9927 err = register_netdev(dev); 9928 if (err) { 9929 dev_err(&pdev->dev, PFX "Cannot register net device, " 9930 "aborting.\n"); 9931 goto err_out_iounmap; 9932 } 9933 9934 pci_set_drvdata(pdev, dev); 9935 9936 niu_device_announce(np); 9937 9938 return 0; 9939 9940err_out_iounmap: 9941 if (np->regs) { 9942 iounmap(np->regs); 9943 np->regs = NULL; 9944 } 9945 9946err_out_release_parent: 9947 niu_put_parent(np); 9948 9949err_out_free_dev: 9950 free_netdev(dev); 9951 9952err_out_free_res: 9953 pci_release_regions(pdev); 9954 9955err_out_disable_pdev: 9956 pci_disable_device(pdev); 9957 pci_set_drvdata(pdev, NULL); 9958 9959 return err; 9960} 9961 9962static void __devexit niu_pci_remove_one(struct pci_dev *pdev) 9963{ 9964 struct net_device *dev = pci_get_drvdata(pdev); 9965 9966 if (dev) { 9967 struct niu *np = netdev_priv(dev); 9968 9969 unregister_netdev(dev); 9970 if (np->regs) { 9971 iounmap(np->regs); 9972 np->regs = NULL; 9973 } 9974 9975 niu_ldg_free(np); 9976 9977 niu_put_parent(np); 9978 9979 free_netdev(dev); 9980 pci_release_regions(pdev); 9981 pci_disable_device(pdev); 9982 pci_set_drvdata(pdev, NULL); 9983 } 9984} 9985 9986static int niu_suspend(struct pci_dev *pdev, pm_message_t state) 9987{ 9988 struct net_device *dev = pci_get_drvdata(pdev); 9989 struct niu *np = netdev_priv(dev); 9990 unsigned long flags; 9991 9992 if (!netif_running(dev)) 9993 return 0; 9994 9995 flush_scheduled_work(); 9996 niu_netif_stop(np); 9997 9998 del_timer_sync(&np->timer); 9999 10000 spin_lock_irqsave(&np->lock, flags); 10001 niu_enable_interrupts(np, 0); 10002 spin_unlock_irqrestore(&np->lock, flags); 10003 10004 netif_device_detach(dev); 10005 10006 spin_lock_irqsave(&np->lock, flags); 10007 niu_stop_hw(np); 10008 spin_unlock_irqrestore(&np->lock, flags); 10009 10010 pci_save_state(pdev); 10011 10012 return 0; 10013} 10014 10015static int niu_resume(struct pci_dev *pdev) 10016{ 10017 struct net_device *dev = pci_get_drvdata(pdev); 10018 struct niu *np = netdev_priv(dev); 10019 unsigned long flags; 10020 int err; 10021 10022 if (!netif_running(dev)) 10023 return 0; 10024 10025 pci_restore_state(pdev); 10026 10027 netif_device_attach(dev); 10028 10029 spin_lock_irqsave(&np->lock, flags); 10030 10031 err = niu_init_hw(np); 10032 if (!err) { 10033 np->timer.expires = jiffies + HZ; 10034 add_timer(&np->timer); 10035 niu_netif_start(np); 10036 } 10037 10038 spin_unlock_irqrestore(&np->lock, flags); 10039 10040 return err; 10041} 10042 10043static struct pci_driver niu_pci_driver = { 10044 .name = DRV_MODULE_NAME, 10045 .id_table = niu_pci_tbl, 10046 .probe = niu_pci_init_one, 10047 .remove = __devexit_p(niu_pci_remove_one), 10048 .suspend = niu_suspend, 10049 .resume = niu_resume, 10050}; 10051 10052#ifdef CONFIG_SPARC64 10053static void *niu_phys_alloc_coherent(struct device *dev, size_t size, 10054 u64 *dma_addr, gfp_t flag) 10055{ 10056 unsigned long order = get_order(size); 10057 unsigned long page = __get_free_pages(flag, order); 10058 10059 if (page == 0UL) 10060 return NULL; 10061 memset((char *)page, 0, PAGE_SIZE << order); 10062 *dma_addr = __pa(page); 10063 10064 return (void *) page; 10065} 10066 10067static void niu_phys_free_coherent(struct device *dev, size_t size, 10068 void *cpu_addr, u64 handle) 10069{ 10070 unsigned long order = get_order(size); 10071 10072 free_pages((unsigned long) cpu_addr, order); 10073} 10074 10075static u64 niu_phys_map_page(struct device *dev, struct page *page, 10076 unsigned long offset, size_t size, 10077 enum dma_data_direction direction) 10078{ 10079 return page_to_phys(page) + offset; 10080} 10081 10082static void niu_phys_unmap_page(struct device *dev, u64 dma_address, 10083 size_t size, enum dma_data_direction direction) 10084{ 10085 /* Nothing to do. */ 10086} 10087 10088static u64 niu_phys_map_single(struct device *dev, void *cpu_addr, 10089 size_t size, 10090 enum dma_data_direction direction) 10091{ 10092 return __pa(cpu_addr); 10093} 10094 10095static void niu_phys_unmap_single(struct device *dev, u64 dma_address, 10096 size_t size, 10097 enum dma_data_direction direction) 10098{ 10099 /* Nothing to do. */ 10100} 10101 10102static const struct niu_ops niu_phys_ops = { 10103 .alloc_coherent = niu_phys_alloc_coherent, 10104 .free_coherent = niu_phys_free_coherent, 10105 .map_page = niu_phys_map_page, 10106 .unmap_page = niu_phys_unmap_page, 10107 .map_single = niu_phys_map_single, 10108 .unmap_single = niu_phys_unmap_single, 10109}; 10110 10111static unsigned long res_size(struct resource *r) 10112{ 10113 return r->end - r->start + 1UL; 10114} 10115 10116static int __devinit niu_of_probe(struct of_device *op, 10117 const struct of_device_id *match) 10118{ 10119 union niu_parent_id parent_id; 10120 struct net_device *dev; 10121 struct niu *np; 10122 const u32 *reg; 10123 int err; 10124 10125 niu_driver_version(); 10126 10127 reg = of_get_property(op->node, "reg", NULL); 10128 if (!reg) { 10129 dev_err(&op->dev, PFX "%s: No 'reg' property, aborting.\n", 10130 op->node->full_name); 10131 return -ENODEV; 10132 } 10133 10134 dev = niu_alloc_and_init(&op->dev, NULL, op, 10135 &niu_phys_ops, reg[0] & 0x1); 10136 if (!dev) { 10137 err = -ENOMEM; 10138 goto err_out; 10139 } 10140 np = netdev_priv(dev); 10141 10142 memset(&parent_id, 0, sizeof(parent_id)); 10143 parent_id.of = of_get_parent(op->node); 10144 10145 np->parent = niu_get_parent(np, &parent_id, 10146 PLAT_TYPE_NIU); 10147 if (!np->parent) { 10148 err = -ENOMEM; 10149 goto err_out_free_dev; 10150 } 10151 10152 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM); 10153 10154 np->regs = of_ioremap(&op->resource[1], 0, 10155 res_size(&op->resource[1]), 10156 "niu regs"); 10157 if (!np->regs) { 10158 dev_err(&op->dev, PFX "Cannot map device registers, " 10159 "aborting.\n"); 10160 err = -ENOMEM; 10161 goto err_out_release_parent; 10162 } 10163 10164 np->vir_regs_1 = of_ioremap(&op->resource[2], 0, 10165 res_size(&op->resource[2]), 10166 "niu vregs-1"); 10167 if (!np->vir_regs_1) { 10168 dev_err(&op->dev, PFX "Cannot map device vir registers 1, " 10169 "aborting.\n"); 10170 err = -ENOMEM; 10171 goto err_out_iounmap; 10172 } 10173 10174 np->vir_regs_2 = of_ioremap(&op->resource[3], 0, 10175 res_size(&op->resource[3]), 10176 "niu vregs-2"); 10177 if (!np->vir_regs_2) { 10178 dev_err(&op->dev, PFX "Cannot map device vir registers 2, " 10179 "aborting.\n"); 10180 err = -ENOMEM; 10181 goto err_out_iounmap; 10182 } 10183 10184 niu_assign_netdev_ops(dev); 10185 10186 err = niu_get_invariants(np); 10187 if (err) { 10188 if (err != -ENODEV) 10189 dev_err(&op->dev, PFX "Problem fetching invariants " 10190 "of chip, aborting.\n"); 10191 goto err_out_iounmap; 10192 } 10193 10194 err = register_netdev(dev); 10195 if (err) { 10196 dev_err(&op->dev, PFX "Cannot register net device, " 10197 "aborting.\n"); 10198 goto err_out_iounmap; 10199 } 10200 10201 dev_set_drvdata(&op->dev, dev); 10202 10203 niu_device_announce(np); 10204 10205 return 0; 10206 10207err_out_iounmap: 10208 if (np->vir_regs_1) { 10209 of_iounmap(&op->resource[2], np->vir_regs_1, 10210 res_size(&op->resource[2])); 10211 np->vir_regs_1 = NULL; 10212 } 10213 10214 if (np->vir_regs_2) { 10215 of_iounmap(&op->resource[3], np->vir_regs_2, 10216 res_size(&op->resource[3])); 10217 np->vir_regs_2 = NULL; 10218 } 10219 10220 if (np->regs) { 10221 of_iounmap(&op->resource[1], np->regs, 10222 res_size(&op->resource[1])); 10223 np->regs = NULL; 10224 } 10225 10226err_out_release_parent: 10227 niu_put_parent(np); 10228 10229err_out_free_dev: 10230 free_netdev(dev); 10231 10232err_out: 10233 return err; 10234} 10235 10236static int __devexit niu_of_remove(struct of_device *op) 10237{ 10238 struct net_device *dev = dev_get_drvdata(&op->dev); 10239 10240 if (dev) { 10241 struct niu *np = netdev_priv(dev); 10242 10243 unregister_netdev(dev); 10244 10245 if (np->vir_regs_1) { 10246 of_iounmap(&op->resource[2], np->vir_regs_1, 10247 res_size(&op->resource[2])); 10248 np->vir_regs_1 = NULL; 10249 } 10250 10251 if (np->vir_regs_2) { 10252 of_iounmap(&op->resource[3], np->vir_regs_2, 10253 res_size(&op->resource[3])); 10254 np->vir_regs_2 = NULL; 10255 } 10256 10257 if (np->regs) { 10258 of_iounmap(&op->resource[1], np->regs, 10259 res_size(&op->resource[1])); 10260 np->regs = NULL; 10261 } 10262 10263 niu_ldg_free(np); 10264 10265 niu_put_parent(np); 10266 10267 free_netdev(dev); 10268 dev_set_drvdata(&op->dev, NULL); 10269 } 10270 return 0; 10271} 10272 10273static const struct of_device_id niu_match[] = { 10274 { 10275 .name = "network", 10276 .compatible = "SUNW,niusl", 10277 }, 10278 {}, 10279}; 10280MODULE_DEVICE_TABLE(of, niu_match); 10281 10282static struct of_platform_driver niu_of_driver = { 10283 .name = "niu", 10284 .match_table = niu_match, 10285 .probe = niu_of_probe, 10286 .remove = __devexit_p(niu_of_remove), 10287}; 10288 10289#endif /* CONFIG_SPARC64 */ 10290 10291static int __init niu_init(void) 10292{ 10293 int err = 0; 10294 10295 BUILD_BUG_ON(PAGE_SIZE < 4 * 1024); 10296 10297 niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT); 10298 10299#ifdef CONFIG_SPARC64 10300 err = of_register_driver(&niu_of_driver, &of_bus_type); 10301#endif 10302 10303 if (!err) { 10304 err = pci_register_driver(&niu_pci_driver); 10305#ifdef CONFIG_SPARC64 10306 if (err) 10307 of_unregister_driver(&niu_of_driver); 10308#endif 10309 } 10310 10311 return err; 10312} 10313 10314static void __exit niu_exit(void) 10315{ 10316 pci_unregister_driver(&niu_pci_driver); 10317#ifdef CONFIG_SPARC64 10318 of_unregister_driver(&niu_of_driver); 10319#endif 10320} 10321 10322module_init(niu_init); 10323module_exit(niu_exit);