Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 17431928194b36a0f88082df875e2e036da7fddf 10280 lines 237 kB view raw
1/* niu.c: Neptune ethernet driver. 2 * 3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net) 4 */ 5 6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8#include <linux/module.h> 9#include <linux/init.h> 10#include <linux/pci.h> 11#include <linux/dma-mapping.h> 12#include <linux/netdevice.h> 13#include <linux/ethtool.h> 14#include <linux/etherdevice.h> 15#include <linux/platform_device.h> 16#include <linux/delay.h> 17#include <linux/bitops.h> 18#include <linux/mii.h> 19#include <linux/if_ether.h> 20#include <linux/if_vlan.h> 21#include <linux/ip.h> 22#include <linux/in.h> 23#include <linux/ipv6.h> 24#include <linux/log2.h> 25#include <linux/jiffies.h> 26#include <linux/crc32.h> 27#include <linux/list.h> 28#include <linux/slab.h> 29 30#include <linux/io.h> 31 32#ifdef CONFIG_SPARC64 33#include <linux/of_device.h> 34#endif 35 36#include "niu.h" 37 38#define DRV_MODULE_NAME "niu" 39#define DRV_MODULE_VERSION "1.1" 40#define DRV_MODULE_RELDATE "Apr 22, 2010" 41 42static char version[] __devinitdata = 43 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 44 45MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 46MODULE_DESCRIPTION("NIU ethernet driver"); 47MODULE_LICENSE("GPL"); 48MODULE_VERSION(DRV_MODULE_VERSION); 49 50#ifndef readq 51static u64 readq(void __iomem *reg) 52{ 53 return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32); 54} 55 56static void writeq(u64 val, void __iomem *reg) 57{ 58 writel(val & 0xffffffff, reg); 59 writel(val >> 32, reg + 0x4UL); 60} 61#endif 62 63static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = { 64 {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)}, 65 {} 66}; 67 68MODULE_DEVICE_TABLE(pci, niu_pci_tbl); 69 70#define NIU_TX_TIMEOUT (5 * HZ) 71 72#define nr64(reg) readq(np->regs + (reg)) 73#define nw64(reg, val) writeq((val), np->regs + (reg)) 74 75#define nr64_mac(reg) readq(np->mac_regs + (reg)) 76#define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg)) 77 78#define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg)) 79#define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg)) 80 81#define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg)) 82#define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg)) 83 84#define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg)) 85#define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg)) 86 87#define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 88 89static int niu_debug; 90static int debug = -1; 91module_param(debug, int, 0); 92MODULE_PARM_DESC(debug, "NIU debug level"); 93 94#define niu_lock_parent(np, flags) \ 95 spin_lock_irqsave(&np->parent->lock, flags) 96#define niu_unlock_parent(np, flags) \ 97 spin_unlock_irqrestore(&np->parent->lock, flags) 98 99static int serdes_init_10g_serdes(struct niu *np); 100 101static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg, 102 u64 bits, int limit, int delay) 103{ 104 while (--limit >= 0) { 105 u64 val = nr64_mac(reg); 106 107 if (!(val & bits)) 108 break; 109 udelay(delay); 110 } 111 if (limit < 0) 112 return -ENODEV; 113 return 0; 114} 115 116static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg, 117 u64 bits, int limit, int delay, 118 const char *reg_name) 119{ 120 int err; 121 122 nw64_mac(reg, bits); 123 err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay); 124 if (err) 125 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", 126 (unsigned long long)bits, reg_name, 127 (unsigned long long)nr64_mac(reg)); 128 return err; 129} 130 131#define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ 132({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 133 __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ 134}) 135 136static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg, 137 u64 bits, int limit, int delay) 138{ 139 while (--limit >= 0) { 140 u64 val = nr64_ipp(reg); 141 142 if (!(val & bits)) 143 break; 144 udelay(delay); 145 } 146 if (limit < 0) 147 return -ENODEV; 148 return 0; 149} 150 151static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg, 152 u64 bits, int limit, int delay, 153 const char *reg_name) 154{ 155 int err; 156 u64 val; 157 158 val = nr64_ipp(reg); 159 val |= bits; 160 nw64_ipp(reg, val); 161 162 err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay); 163 if (err) 164 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", 165 (unsigned long long)bits, reg_name, 166 (unsigned long long)nr64_ipp(reg)); 167 return err; 168} 169 170#define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ 171({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 172 __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ 173}) 174 175static int __niu_wait_bits_clear(struct niu *np, unsigned long reg, 176 u64 bits, int limit, int delay) 177{ 178 while (--limit >= 0) { 179 u64 val = nr64(reg); 180 181 if (!(val & bits)) 182 break; 183 udelay(delay); 184 } 185 if (limit < 0) 186 return -ENODEV; 187 return 0; 188} 189 190#define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \ 191({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 192 __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \ 193}) 194 195static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg, 196 u64 bits, int limit, int delay, 197 const char *reg_name) 198{ 199 int err; 200 201 nw64(reg, bits); 202 err = __niu_wait_bits_clear(np, reg, bits, limit, delay); 203 if (err) 204 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", 205 (unsigned long long)bits, reg_name, 206 (unsigned long long)nr64(reg)); 207 return err; 208} 209 210#define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ 211({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 212 __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ 213}) 214 215static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on) 216{ 217 u64 val = (u64) lp->timer; 218 219 if (on) 220 val |= LDG_IMGMT_ARM; 221 222 nw64(LDG_IMGMT(lp->ldg_num), val); 223} 224 225static int niu_ldn_irq_enable(struct niu *np, int ldn, int on) 226{ 227 unsigned long mask_reg, bits; 228 u64 val; 229 230 if (ldn < 0 || ldn > LDN_MAX) 231 return -EINVAL; 232 233 if (ldn < 64) { 234 mask_reg = LD_IM0(ldn); 235 bits = LD_IM0_MASK; 236 } else { 237 mask_reg = LD_IM1(ldn - 64); 238 bits = LD_IM1_MASK; 239 } 240 241 val = nr64(mask_reg); 242 if (on) 243 val &= ~bits; 244 else 245 val |= bits; 246 nw64(mask_reg, val); 247 248 return 0; 249} 250 251static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on) 252{ 253 struct niu_parent *parent = np->parent; 254 int i; 255 256 for (i = 0; i <= LDN_MAX; i++) { 257 int err; 258 259 if (parent->ldg_map[i] != lp->ldg_num) 260 continue; 261 262 err = niu_ldn_irq_enable(np, i, on); 263 if (err) 264 return err; 265 } 266 return 0; 267} 268 269static int niu_enable_interrupts(struct niu *np, int on) 270{ 271 int i; 272 273 for (i = 0; i < np->num_ldg; i++) { 274 struct niu_ldg *lp = &np->ldg[i]; 275 int err; 276 277 err = niu_enable_ldn_in_ldg(np, lp, on); 278 if (err) 279 return err; 280 } 281 for (i = 0; i < np->num_ldg; i++) 282 niu_ldg_rearm(np, &np->ldg[i], on); 283 284 return 0; 285} 286 287static u32 phy_encode(u32 type, int port) 288{ 289 return (type << (port * 2)); 290} 291 292static u32 phy_decode(u32 val, int port) 293{ 294 return (val >> (port * 2)) & PORT_TYPE_MASK; 295} 296 297static int mdio_wait(struct niu *np) 298{ 299 int limit = 1000; 300 u64 val; 301 302 while (--limit > 0) { 303 val = nr64(MIF_FRAME_OUTPUT); 304 if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1) 305 return val & MIF_FRAME_OUTPUT_DATA; 306 307 udelay(10); 308 } 309 310 return -ENODEV; 311} 312 313static int mdio_read(struct niu *np, int port, int dev, int reg) 314{ 315 int err; 316 317 nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); 318 err = mdio_wait(np); 319 if (err < 0) 320 return err; 321 322 nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev)); 323 return mdio_wait(np); 324} 325 326static int mdio_write(struct niu *np, int port, int dev, int reg, int data) 327{ 328 int err; 329 330 nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); 331 err = mdio_wait(np); 332 if (err < 0) 333 return err; 334 335 nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data)); 336 err = mdio_wait(np); 337 if (err < 0) 338 return err; 339 340 return 0; 341} 342 343static int mii_read(struct niu *np, int port, int reg) 344{ 345 nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg)); 346 return mdio_wait(np); 347} 348 349static int mii_write(struct niu *np, int port, int reg, int data) 350{ 351 int err; 352 353 nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data)); 354 err = mdio_wait(np); 355 if (err < 0) 356 return err; 357 358 return 0; 359} 360 361static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val) 362{ 363 int err; 364 365 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 366 ESR2_TI_PLL_TX_CFG_L(channel), 367 val & 0xffff); 368 if (!err) 369 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 370 ESR2_TI_PLL_TX_CFG_H(channel), 371 val >> 16); 372 return err; 373} 374 375static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val) 376{ 377 int err; 378 379 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 380 ESR2_TI_PLL_RX_CFG_L(channel), 381 val & 0xffff); 382 if (!err) 383 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 384 ESR2_TI_PLL_RX_CFG_H(channel), 385 val >> 16); 386 return err; 387} 388 389/* Mode is always 10G fiber. */ 390static int serdes_init_niu_10g_fiber(struct niu *np) 391{ 392 struct niu_link_config *lp = &np->link_config; 393 u32 tx_cfg, rx_cfg; 394 unsigned long i; 395 396 tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); 397 rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | 398 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | 399 PLL_RX_CFG_EQ_LP_ADAPTIVE); 400 401 if (lp->loopback_mode == LOOPBACK_PHY) { 402 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; 403 404 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 405 ESR2_TI_PLL_TEST_CFG_L, test_cfg); 406 407 tx_cfg |= PLL_TX_CFG_ENTEST; 408 rx_cfg |= PLL_RX_CFG_ENTEST; 409 } 410 411 /* Initialize all 4 lanes of the SERDES. */ 412 for (i = 0; i < 4; i++) { 413 int err = esr2_set_tx_cfg(np, i, tx_cfg); 414 if (err) 415 return err; 416 } 417 418 for (i = 0; i < 4; i++) { 419 int err = esr2_set_rx_cfg(np, i, rx_cfg); 420 if (err) 421 return err; 422 } 423 424 return 0; 425} 426 427static int serdes_init_niu_1g_serdes(struct niu *np) 428{ 429 struct niu_link_config *lp = &np->link_config; 430 u16 pll_cfg, pll_sts; 431 int max_retry = 100; 432 u64 uninitialized_var(sig), mask, val; 433 u32 tx_cfg, rx_cfg; 434 unsigned long i; 435 int err; 436 437 tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV | 438 PLL_TX_CFG_RATE_HALF); 439 rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | 440 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | 441 PLL_RX_CFG_RATE_HALF); 442 443 if (np->port == 0) 444 rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE; 445 446 if (lp->loopback_mode == LOOPBACK_PHY) { 447 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; 448 449 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 450 ESR2_TI_PLL_TEST_CFG_L, test_cfg); 451 452 tx_cfg |= PLL_TX_CFG_ENTEST; 453 rx_cfg |= PLL_RX_CFG_ENTEST; 454 } 455 456 /* Initialize PLL for 1G */ 457 pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X); 458 459 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 460 ESR2_TI_PLL_CFG_L, pll_cfg); 461 if (err) { 462 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", 463 np->port, __func__); 464 return err; 465 } 466 467 pll_sts = PLL_CFG_ENPLL; 468 469 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 470 ESR2_TI_PLL_STS_L, pll_sts); 471 if (err) { 472 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", 473 np->port, __func__); 474 return err; 475 } 476 477 udelay(200); 478 479 /* Initialize all 4 lanes of the SERDES. */ 480 for (i = 0; i < 4; i++) { 481 err = esr2_set_tx_cfg(np, i, tx_cfg); 482 if (err) 483 return err; 484 } 485 486 for (i = 0; i < 4; i++) { 487 err = esr2_set_rx_cfg(np, i, rx_cfg); 488 if (err) 489 return err; 490 } 491 492 switch (np->port) { 493 case 0: 494 val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); 495 mask = val; 496 break; 497 498 case 1: 499 val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); 500 mask = val; 501 break; 502 503 default: 504 return -EINVAL; 505 } 506 507 while (max_retry--) { 508 sig = nr64(ESR_INT_SIGNALS); 509 if ((sig & mask) == val) 510 break; 511 512 mdelay(500); 513 } 514 515 if ((sig & mask) != val) { 516 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", 517 np->port, (int)(sig & mask), (int)val); 518 return -ENODEV; 519 } 520 521 return 0; 522} 523 524static int serdes_init_niu_10g_serdes(struct niu *np) 525{ 526 struct niu_link_config *lp = &np->link_config; 527 u32 tx_cfg, rx_cfg, pll_cfg, pll_sts; 528 int max_retry = 100; 529 u64 uninitialized_var(sig), mask, val; 530 unsigned long i; 531 int err; 532 533 tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); 534 rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | 535 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | 536 PLL_RX_CFG_EQ_LP_ADAPTIVE); 537 538 if (lp->loopback_mode == LOOPBACK_PHY) { 539 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; 540 541 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 542 ESR2_TI_PLL_TEST_CFG_L, test_cfg); 543 544 tx_cfg |= PLL_TX_CFG_ENTEST; 545 rx_cfg |= PLL_RX_CFG_ENTEST; 546 } 547 548 /* Initialize PLL for 10G */ 549 pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X); 550 551 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 552 ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff); 553 if (err) { 554 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", 555 np->port, __func__); 556 return err; 557 } 558 559 pll_sts = PLL_CFG_ENPLL; 560 561 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 562 ESR2_TI_PLL_STS_L, pll_sts & 0xffff); 563 if (err) { 564 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", 565 np->port, __func__); 566 return err; 567 } 568 569 udelay(200); 570 571 /* Initialize all 4 lanes of the SERDES. */ 572 for (i = 0; i < 4; i++) { 573 err = esr2_set_tx_cfg(np, i, tx_cfg); 574 if (err) 575 return err; 576 } 577 578 for (i = 0; i < 4; i++) { 579 err = esr2_set_rx_cfg(np, i, rx_cfg); 580 if (err) 581 return err; 582 } 583 584 /* check if serdes is ready */ 585 586 switch (np->port) { 587 case 0: 588 mask = ESR_INT_SIGNALS_P0_BITS; 589 val = (ESR_INT_SRDY0_P0 | 590 ESR_INT_DET0_P0 | 591 ESR_INT_XSRDY_P0 | 592 ESR_INT_XDP_P0_CH3 | 593 ESR_INT_XDP_P0_CH2 | 594 ESR_INT_XDP_P0_CH1 | 595 ESR_INT_XDP_P0_CH0); 596 break; 597 598 case 1: 599 mask = ESR_INT_SIGNALS_P1_BITS; 600 val = (ESR_INT_SRDY0_P1 | 601 ESR_INT_DET0_P1 | 602 ESR_INT_XSRDY_P1 | 603 ESR_INT_XDP_P1_CH3 | 604 ESR_INT_XDP_P1_CH2 | 605 ESR_INT_XDP_P1_CH1 | 606 ESR_INT_XDP_P1_CH0); 607 break; 608 609 default: 610 return -EINVAL; 611 } 612 613 while (max_retry--) { 614 sig = nr64(ESR_INT_SIGNALS); 615 if ((sig & mask) == val) 616 break; 617 618 mdelay(500); 619 } 620 621 if ((sig & mask) != val) { 622 pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n", 623 np->port, (int)(sig & mask), (int)val); 624 625 /* 10G failed, try initializing at 1G */ 626 err = serdes_init_niu_1g_serdes(np); 627 if (!err) { 628 np->flags &= ~NIU_FLAGS_10G; 629 np->mac_xcvr = MAC_XCVR_PCS; 630 } else { 631 netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", 632 np->port); 633 return -ENODEV; 634 } 635 } 636 return 0; 637} 638 639static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val) 640{ 641 int err; 642 643 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan)); 644 if (err >= 0) { 645 *val = (err & 0xffff); 646 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 647 ESR_RXTX_CTRL_H(chan)); 648 if (err >= 0) 649 *val |= ((err & 0xffff) << 16); 650 err = 0; 651 } 652 return err; 653} 654 655static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val) 656{ 657 int err; 658 659 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 660 ESR_GLUE_CTRL0_L(chan)); 661 if (err >= 0) { 662 *val = (err & 0xffff); 663 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 664 ESR_GLUE_CTRL0_H(chan)); 665 if (err >= 0) { 666 *val |= ((err & 0xffff) << 16); 667 err = 0; 668 } 669 } 670 return err; 671} 672 673static int esr_read_reset(struct niu *np, u32 *val) 674{ 675 int err; 676 677 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 678 ESR_RXTX_RESET_CTRL_L); 679 if (err >= 0) { 680 *val = (err & 0xffff); 681 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 682 ESR_RXTX_RESET_CTRL_H); 683 if (err >= 0) { 684 *val |= ((err & 0xffff) << 16); 685 err = 0; 686 } 687 } 688 return err; 689} 690 691static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val) 692{ 693 int err; 694 695 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 696 ESR_RXTX_CTRL_L(chan), val & 0xffff); 697 if (!err) 698 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 699 ESR_RXTX_CTRL_H(chan), (val >> 16)); 700 return err; 701} 702 703static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val) 704{ 705 int err; 706 707 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 708 ESR_GLUE_CTRL0_L(chan), val & 0xffff); 709 if (!err) 710 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 711 ESR_GLUE_CTRL0_H(chan), (val >> 16)); 712 return err; 713} 714 715static int esr_reset(struct niu *np) 716{ 717 u32 uninitialized_var(reset); 718 int err; 719 720 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 721 ESR_RXTX_RESET_CTRL_L, 0x0000); 722 if (err) 723 return err; 724 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 725 ESR_RXTX_RESET_CTRL_H, 0xffff); 726 if (err) 727 return err; 728 udelay(200); 729 730 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 731 ESR_RXTX_RESET_CTRL_L, 0xffff); 732 if (err) 733 return err; 734 udelay(200); 735 736 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 737 ESR_RXTX_RESET_CTRL_H, 0x0000); 738 if (err) 739 return err; 740 udelay(200); 741 742 err = esr_read_reset(np, &reset); 743 if (err) 744 return err; 745 if (reset != 0) { 746 netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n", 747 np->port, reset); 748 return -ENODEV; 749 } 750 751 return 0; 752} 753 754static int serdes_init_10g(struct niu *np) 755{ 756 struct niu_link_config *lp = &np->link_config; 757 unsigned long ctrl_reg, test_cfg_reg, i; 758 u64 ctrl_val, test_cfg_val, sig, mask, val; 759 int err; 760 761 switch (np->port) { 762 case 0: 763 ctrl_reg = ENET_SERDES_0_CTRL_CFG; 764 test_cfg_reg = ENET_SERDES_0_TEST_CFG; 765 break; 766 case 1: 767 ctrl_reg = ENET_SERDES_1_CTRL_CFG; 768 test_cfg_reg = ENET_SERDES_1_TEST_CFG; 769 break; 770 771 default: 772 return -EINVAL; 773 } 774 ctrl_val = (ENET_SERDES_CTRL_SDET_0 | 775 ENET_SERDES_CTRL_SDET_1 | 776 ENET_SERDES_CTRL_SDET_2 | 777 ENET_SERDES_CTRL_SDET_3 | 778 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | 779 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | 780 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | 781 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | 782 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | 783 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | 784 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | 785 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); 786 test_cfg_val = 0; 787 788 if (lp->loopback_mode == LOOPBACK_PHY) { 789 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << 790 ENET_SERDES_TEST_MD_0_SHIFT) | 791 (ENET_TEST_MD_PAD_LOOPBACK << 792 ENET_SERDES_TEST_MD_1_SHIFT) | 793 (ENET_TEST_MD_PAD_LOOPBACK << 794 ENET_SERDES_TEST_MD_2_SHIFT) | 795 (ENET_TEST_MD_PAD_LOOPBACK << 796 ENET_SERDES_TEST_MD_3_SHIFT)); 797 } 798 799 nw64(ctrl_reg, ctrl_val); 800 nw64(test_cfg_reg, test_cfg_val); 801 802 /* Initialize all 4 lanes of the SERDES. */ 803 for (i = 0; i < 4; i++) { 804 u32 rxtx_ctrl, glue0; 805 806 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); 807 if (err) 808 return err; 809 err = esr_read_glue0(np, i, &glue0); 810 if (err) 811 return err; 812 813 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); 814 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | 815 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); 816 817 glue0 &= ~(ESR_GLUE_CTRL0_SRATE | 818 ESR_GLUE_CTRL0_THCNT | 819 ESR_GLUE_CTRL0_BLTIME); 820 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | 821 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | 822 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | 823 (BLTIME_300_CYCLES << 824 ESR_GLUE_CTRL0_BLTIME_SHIFT)); 825 826 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); 827 if (err) 828 return err; 829 err = esr_write_glue0(np, i, glue0); 830 if (err) 831 return err; 832 } 833 834 err = esr_reset(np); 835 if (err) 836 return err; 837 838 sig = nr64(ESR_INT_SIGNALS); 839 switch (np->port) { 840 case 0: 841 mask = ESR_INT_SIGNALS_P0_BITS; 842 val = (ESR_INT_SRDY0_P0 | 843 ESR_INT_DET0_P0 | 844 ESR_INT_XSRDY_P0 | 845 ESR_INT_XDP_P0_CH3 | 846 ESR_INT_XDP_P0_CH2 | 847 ESR_INT_XDP_P0_CH1 | 848 ESR_INT_XDP_P0_CH0); 849 break; 850 851 case 1: 852 mask = ESR_INT_SIGNALS_P1_BITS; 853 val = (ESR_INT_SRDY0_P1 | 854 ESR_INT_DET0_P1 | 855 ESR_INT_XSRDY_P1 | 856 ESR_INT_XDP_P1_CH3 | 857 ESR_INT_XDP_P1_CH2 | 858 ESR_INT_XDP_P1_CH1 | 859 ESR_INT_XDP_P1_CH0); 860 break; 861 862 default: 863 return -EINVAL; 864 } 865 866 if ((sig & mask) != val) { 867 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { 868 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; 869 return 0; 870 } 871 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", 872 np->port, (int)(sig & mask), (int)val); 873 return -ENODEV; 874 } 875 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) 876 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; 877 return 0; 878} 879 880static int serdes_init_1g(struct niu *np) 881{ 882 u64 val; 883 884 val = nr64(ENET_SERDES_1_PLL_CFG); 885 val &= ~ENET_SERDES_PLL_FBDIV2; 886 switch (np->port) { 887 case 0: 888 val |= ENET_SERDES_PLL_HRATE0; 889 break; 890 case 1: 891 val |= ENET_SERDES_PLL_HRATE1; 892 break; 893 case 2: 894 val |= ENET_SERDES_PLL_HRATE2; 895 break; 896 case 3: 897 val |= ENET_SERDES_PLL_HRATE3; 898 break; 899 default: 900 return -EINVAL; 901 } 902 nw64(ENET_SERDES_1_PLL_CFG, val); 903 904 return 0; 905} 906 907static int serdes_init_1g_serdes(struct niu *np) 908{ 909 struct niu_link_config *lp = &np->link_config; 910 unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; 911 u64 ctrl_val, test_cfg_val, sig, mask, val; 912 int err; 913 u64 reset_val, val_rd; 914 915 val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 | 916 ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 | 917 ENET_SERDES_PLL_FBDIV0; 918 switch (np->port) { 919 case 0: 920 reset_val = ENET_SERDES_RESET_0; 921 ctrl_reg = ENET_SERDES_0_CTRL_CFG; 922 test_cfg_reg = ENET_SERDES_0_TEST_CFG; 923 pll_cfg = ENET_SERDES_0_PLL_CFG; 924 break; 925 case 1: 926 reset_val = ENET_SERDES_RESET_1; 927 ctrl_reg = ENET_SERDES_1_CTRL_CFG; 928 test_cfg_reg = ENET_SERDES_1_TEST_CFG; 929 pll_cfg = ENET_SERDES_1_PLL_CFG; 930 break; 931 932 default: 933 return -EINVAL; 934 } 935 ctrl_val = (ENET_SERDES_CTRL_SDET_0 | 936 ENET_SERDES_CTRL_SDET_1 | 937 ENET_SERDES_CTRL_SDET_2 | 938 ENET_SERDES_CTRL_SDET_3 | 939 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | 940 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | 941 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | 942 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | 943 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | 944 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | 945 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | 946 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); 947 test_cfg_val = 0; 948 949 if (lp->loopback_mode == LOOPBACK_PHY) { 950 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << 951 ENET_SERDES_TEST_MD_0_SHIFT) | 952 (ENET_TEST_MD_PAD_LOOPBACK << 953 ENET_SERDES_TEST_MD_1_SHIFT) | 954 (ENET_TEST_MD_PAD_LOOPBACK << 955 ENET_SERDES_TEST_MD_2_SHIFT) | 956 (ENET_TEST_MD_PAD_LOOPBACK << 957 ENET_SERDES_TEST_MD_3_SHIFT)); 958 } 959 960 nw64(ENET_SERDES_RESET, reset_val); 961 mdelay(20); 962 val_rd = nr64(ENET_SERDES_RESET); 963 val_rd &= ~reset_val; 964 nw64(pll_cfg, val); 965 nw64(ctrl_reg, ctrl_val); 966 nw64(test_cfg_reg, test_cfg_val); 967 nw64(ENET_SERDES_RESET, val_rd); 968 mdelay(2000); 969 970 /* Initialize all 4 lanes of the SERDES. */ 971 for (i = 0; i < 4; i++) { 972 u32 rxtx_ctrl, glue0; 973 974 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); 975 if (err) 976 return err; 977 err = esr_read_glue0(np, i, &glue0); 978 if (err) 979 return err; 980 981 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); 982 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | 983 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); 984 985 glue0 &= ~(ESR_GLUE_CTRL0_SRATE | 986 ESR_GLUE_CTRL0_THCNT | 987 ESR_GLUE_CTRL0_BLTIME); 988 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | 989 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | 990 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | 991 (BLTIME_300_CYCLES << 992 ESR_GLUE_CTRL0_BLTIME_SHIFT)); 993 994 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); 995 if (err) 996 return err; 997 err = esr_write_glue0(np, i, glue0); 998 if (err) 999 return err; 1000 } 1001 1002 1003 sig = nr64(ESR_INT_SIGNALS); 1004 switch (np->port) { 1005 case 0: 1006 val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); 1007 mask = val; 1008 break; 1009 1010 case 1: 1011 val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); 1012 mask = val; 1013 break; 1014 1015 default: 1016 return -EINVAL; 1017 } 1018 1019 if ((sig & mask) != val) { 1020 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", 1021 np->port, (int)(sig & mask), (int)val); 1022 return -ENODEV; 1023 } 1024 1025 return 0; 1026} 1027 1028static int link_status_1g_serdes(struct niu *np, int *link_up_p) 1029{ 1030 struct niu_link_config *lp = &np->link_config; 1031 int link_up; 1032 u64 val; 1033 u16 current_speed; 1034 unsigned long flags; 1035 u8 current_duplex; 1036 1037 link_up = 0; 1038 current_speed = SPEED_INVALID; 1039 current_duplex = DUPLEX_INVALID; 1040 1041 spin_lock_irqsave(&np->lock, flags); 1042 1043 val = nr64_pcs(PCS_MII_STAT); 1044 1045 if (val & PCS_MII_STAT_LINK_STATUS) { 1046 link_up = 1; 1047 current_speed = SPEED_1000; 1048 current_duplex = DUPLEX_FULL; 1049 } 1050 1051 lp->active_speed = current_speed; 1052 lp->active_duplex = current_duplex; 1053 spin_unlock_irqrestore(&np->lock, flags); 1054 1055 *link_up_p = link_up; 1056 return 0; 1057} 1058 1059static int link_status_10g_serdes(struct niu *np, int *link_up_p) 1060{ 1061 unsigned long flags; 1062 struct niu_link_config *lp = &np->link_config; 1063 int link_up = 0; 1064 int link_ok = 1; 1065 u64 val, val2; 1066 u16 current_speed; 1067 u8 current_duplex; 1068 1069 if (!(np->flags & NIU_FLAGS_10G)) 1070 return link_status_1g_serdes(np, link_up_p); 1071 1072 current_speed = SPEED_INVALID; 1073 current_duplex = DUPLEX_INVALID; 1074 spin_lock_irqsave(&np->lock, flags); 1075 1076 val = nr64_xpcs(XPCS_STATUS(0)); 1077 val2 = nr64_mac(XMAC_INTER2); 1078 if (val2 & 0x01000000) 1079 link_ok = 0; 1080 1081 if ((val & 0x1000ULL) && link_ok) { 1082 link_up = 1; 1083 current_speed = SPEED_10000; 1084 current_duplex = DUPLEX_FULL; 1085 } 1086 lp->active_speed = current_speed; 1087 lp->active_duplex = current_duplex; 1088 spin_unlock_irqrestore(&np->lock, flags); 1089 *link_up_p = link_up; 1090 return 0; 1091} 1092 1093static int link_status_mii(struct niu *np, int *link_up_p) 1094{ 1095 struct niu_link_config *lp = &np->link_config; 1096 int err; 1097 int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus; 1098 int supported, advertising, active_speed, active_duplex; 1099 1100 err = mii_read(np, np->phy_addr, MII_BMCR); 1101 if (unlikely(err < 0)) 1102 return err; 1103 bmcr = err; 1104 1105 err = mii_read(np, np->phy_addr, MII_BMSR); 1106 if (unlikely(err < 0)) 1107 return err; 1108 bmsr = err; 1109 1110 err = mii_read(np, np->phy_addr, MII_ADVERTISE); 1111 if (unlikely(err < 0)) 1112 return err; 1113 advert = err; 1114 1115 err = mii_read(np, np->phy_addr, MII_LPA); 1116 if (unlikely(err < 0)) 1117 return err; 1118 lpa = err; 1119 1120 if (likely(bmsr & BMSR_ESTATEN)) { 1121 err = mii_read(np, np->phy_addr, MII_ESTATUS); 1122 if (unlikely(err < 0)) 1123 return err; 1124 estatus = err; 1125 1126 err = mii_read(np, np->phy_addr, MII_CTRL1000); 1127 if (unlikely(err < 0)) 1128 return err; 1129 ctrl1000 = err; 1130 1131 err = mii_read(np, np->phy_addr, MII_STAT1000); 1132 if (unlikely(err < 0)) 1133 return err; 1134 stat1000 = err; 1135 } else 1136 estatus = ctrl1000 = stat1000 = 0; 1137 1138 supported = 0; 1139 if (bmsr & BMSR_ANEGCAPABLE) 1140 supported |= SUPPORTED_Autoneg; 1141 if (bmsr & BMSR_10HALF) 1142 supported |= SUPPORTED_10baseT_Half; 1143 if (bmsr & BMSR_10FULL) 1144 supported |= SUPPORTED_10baseT_Full; 1145 if (bmsr & BMSR_100HALF) 1146 supported |= SUPPORTED_100baseT_Half; 1147 if (bmsr & BMSR_100FULL) 1148 supported |= SUPPORTED_100baseT_Full; 1149 if (estatus & ESTATUS_1000_THALF) 1150 supported |= SUPPORTED_1000baseT_Half; 1151 if (estatus & ESTATUS_1000_TFULL) 1152 supported |= SUPPORTED_1000baseT_Full; 1153 lp->supported = supported; 1154 1155 advertising = 0; 1156 if (advert & ADVERTISE_10HALF) 1157 advertising |= ADVERTISED_10baseT_Half; 1158 if (advert & ADVERTISE_10FULL) 1159 advertising |= ADVERTISED_10baseT_Full; 1160 if (advert & ADVERTISE_100HALF) 1161 advertising |= ADVERTISED_100baseT_Half; 1162 if (advert & ADVERTISE_100FULL) 1163 advertising |= ADVERTISED_100baseT_Full; 1164 if (ctrl1000 & ADVERTISE_1000HALF) 1165 advertising |= ADVERTISED_1000baseT_Half; 1166 if (ctrl1000 & ADVERTISE_1000FULL) 1167 advertising |= ADVERTISED_1000baseT_Full; 1168 1169 if (bmcr & BMCR_ANENABLE) { 1170 int neg, neg1000; 1171 1172 lp->active_autoneg = 1; 1173 advertising |= ADVERTISED_Autoneg; 1174 1175 neg = advert & lpa; 1176 neg1000 = (ctrl1000 << 2) & stat1000; 1177 1178 if (neg1000 & (LPA_1000FULL | LPA_1000HALF)) 1179 active_speed = SPEED_1000; 1180 else if (neg & LPA_100) 1181 active_speed = SPEED_100; 1182 else if (neg & (LPA_10HALF | LPA_10FULL)) 1183 active_speed = SPEED_10; 1184 else 1185 active_speed = SPEED_INVALID; 1186 1187 if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX)) 1188 active_duplex = DUPLEX_FULL; 1189 else if (active_speed != SPEED_INVALID) 1190 active_duplex = DUPLEX_HALF; 1191 else 1192 active_duplex = DUPLEX_INVALID; 1193 } else { 1194 lp->active_autoneg = 0; 1195 1196 if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100)) 1197 active_speed = SPEED_1000; 1198 else if (bmcr & BMCR_SPEED100) 1199 active_speed = SPEED_100; 1200 else 1201 active_speed = SPEED_10; 1202 1203 if (bmcr & BMCR_FULLDPLX) 1204 active_duplex = DUPLEX_FULL; 1205 else 1206 active_duplex = DUPLEX_HALF; 1207 } 1208 1209 lp->active_advertising = advertising; 1210 lp->active_speed = active_speed; 1211 lp->active_duplex = active_duplex; 1212 *link_up_p = !!(bmsr & BMSR_LSTATUS); 1213 1214 return 0; 1215} 1216 1217static int link_status_1g_rgmii(struct niu *np, int *link_up_p) 1218{ 1219 struct niu_link_config *lp = &np->link_config; 1220 u16 current_speed, bmsr; 1221 unsigned long flags; 1222 u8 current_duplex; 1223 int err, link_up; 1224 1225 link_up = 0; 1226 current_speed = SPEED_INVALID; 1227 current_duplex = DUPLEX_INVALID; 1228 1229 spin_lock_irqsave(&np->lock, flags); 1230 1231 err = -EINVAL; 1232 1233 err = mii_read(np, np->phy_addr, MII_BMSR); 1234 if (err < 0) 1235 goto out; 1236 1237 bmsr = err; 1238 if (bmsr & BMSR_LSTATUS) { 1239 u16 adv, lpa, common, estat; 1240 1241 err = mii_read(np, np->phy_addr, MII_ADVERTISE); 1242 if (err < 0) 1243 goto out; 1244 adv = err; 1245 1246 err = mii_read(np, np->phy_addr, MII_LPA); 1247 if (err < 0) 1248 goto out; 1249 lpa = err; 1250 1251 common = adv & lpa; 1252 1253 err = mii_read(np, np->phy_addr, MII_ESTATUS); 1254 if (err < 0) 1255 goto out; 1256 estat = err; 1257 link_up = 1; 1258 current_speed = SPEED_1000; 1259 current_duplex = DUPLEX_FULL; 1260 1261 } 1262 lp->active_speed = current_speed; 1263 lp->active_duplex = current_duplex; 1264 err = 0; 1265 1266out: 1267 spin_unlock_irqrestore(&np->lock, flags); 1268 1269 *link_up_p = link_up; 1270 return err; 1271} 1272 1273static int link_status_1g(struct niu *np, int *link_up_p) 1274{ 1275 struct niu_link_config *lp = &np->link_config; 1276 unsigned long flags; 1277 int err; 1278 1279 spin_lock_irqsave(&np->lock, flags); 1280 1281 err = link_status_mii(np, link_up_p); 1282 lp->supported |= SUPPORTED_TP; 1283 lp->active_advertising |= ADVERTISED_TP; 1284 1285 spin_unlock_irqrestore(&np->lock, flags); 1286 return err; 1287} 1288 1289static int bcm8704_reset(struct niu *np) 1290{ 1291 int err, limit; 1292 1293 err = mdio_read(np, np->phy_addr, 1294 BCM8704_PHYXS_DEV_ADDR, MII_BMCR); 1295 if (err < 0 || err == 0xffff) 1296 return err; 1297 err |= BMCR_RESET; 1298 err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 1299 MII_BMCR, err); 1300 if (err) 1301 return err; 1302 1303 limit = 1000; 1304 while (--limit >= 0) { 1305 err = mdio_read(np, np->phy_addr, 1306 BCM8704_PHYXS_DEV_ADDR, MII_BMCR); 1307 if (err < 0) 1308 return err; 1309 if (!(err & BMCR_RESET)) 1310 break; 1311 } 1312 if (limit < 0) { 1313 netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n", 1314 np->port, (err & 0xffff)); 1315 return -ENODEV; 1316 } 1317 return 0; 1318} 1319 1320/* When written, certain PHY registers need to be read back twice 1321 * in order for the bits to settle properly. 1322 */ 1323static int bcm8704_user_dev3_readback(struct niu *np, int reg) 1324{ 1325 int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); 1326 if (err < 0) 1327 return err; 1328 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); 1329 if (err < 0) 1330 return err; 1331 return 0; 1332} 1333 1334static int bcm8706_init_user_dev3(struct niu *np) 1335{ 1336 int err; 1337 1338 1339 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1340 BCM8704_USER_OPT_DIGITAL_CTRL); 1341 if (err < 0) 1342 return err; 1343 err &= ~USER_ODIG_CTRL_GPIOS; 1344 err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); 1345 err |= USER_ODIG_CTRL_RESV2; 1346 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1347 BCM8704_USER_OPT_DIGITAL_CTRL, err); 1348 if (err) 1349 return err; 1350 1351 mdelay(1000); 1352 1353 return 0; 1354} 1355 1356static int bcm8704_init_user_dev3(struct niu *np) 1357{ 1358 int err; 1359 1360 err = mdio_write(np, np->phy_addr, 1361 BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL, 1362 (USER_CONTROL_OPTXRST_LVL | 1363 USER_CONTROL_OPBIASFLT_LVL | 1364 USER_CONTROL_OBTMPFLT_LVL | 1365 USER_CONTROL_OPPRFLT_LVL | 1366 USER_CONTROL_OPTXFLT_LVL | 1367 USER_CONTROL_OPRXLOS_LVL | 1368 USER_CONTROL_OPRXFLT_LVL | 1369 USER_CONTROL_OPTXON_LVL | 1370 (0x3f << USER_CONTROL_RES1_SHIFT))); 1371 if (err) 1372 return err; 1373 1374 err = mdio_write(np, np->phy_addr, 1375 BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL, 1376 (USER_PMD_TX_CTL_XFP_CLKEN | 1377 (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) | 1378 (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) | 1379 USER_PMD_TX_CTL_TSCK_LPWREN)); 1380 if (err) 1381 return err; 1382 1383 err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL); 1384 if (err) 1385 return err; 1386 err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL); 1387 if (err) 1388 return err; 1389 1390 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1391 BCM8704_USER_OPT_DIGITAL_CTRL); 1392 if (err < 0) 1393 return err; 1394 err &= ~USER_ODIG_CTRL_GPIOS; 1395 err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); 1396 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1397 BCM8704_USER_OPT_DIGITAL_CTRL, err); 1398 if (err) 1399 return err; 1400 1401 mdelay(1000); 1402 1403 return 0; 1404} 1405 1406static int mrvl88x2011_act_led(struct niu *np, int val) 1407{ 1408 int err; 1409 1410 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 1411 MRVL88X2011_LED_8_TO_11_CTL); 1412 if (err < 0) 1413 return err; 1414 1415 err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK); 1416 err |= MRVL88X2011_LED(MRVL88X2011_LED_ACT,val); 1417 1418 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 1419 MRVL88X2011_LED_8_TO_11_CTL, err); 1420} 1421 1422static int mrvl88x2011_led_blink_rate(struct niu *np, int rate) 1423{ 1424 int err; 1425 1426 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 1427 MRVL88X2011_LED_BLINK_CTL); 1428 if (err >= 0) { 1429 err &= ~MRVL88X2011_LED_BLKRATE_MASK; 1430 err |= (rate << 4); 1431 1432 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 1433 MRVL88X2011_LED_BLINK_CTL, err); 1434 } 1435 1436 return err; 1437} 1438 1439static int xcvr_init_10g_mrvl88x2011(struct niu *np) 1440{ 1441 int err; 1442 1443 /* Set LED functions */ 1444 err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS); 1445 if (err) 1446 return err; 1447 1448 /* led activity */ 1449 err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF); 1450 if (err) 1451 return err; 1452 1453 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 1454 MRVL88X2011_GENERAL_CTL); 1455 if (err < 0) 1456 return err; 1457 1458 err |= MRVL88X2011_ENA_XFPREFCLK; 1459 1460 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 1461 MRVL88X2011_GENERAL_CTL, err); 1462 if (err < 0) 1463 return err; 1464 1465 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1466 MRVL88X2011_PMA_PMD_CTL_1); 1467 if (err < 0) 1468 return err; 1469 1470 if (np->link_config.loopback_mode == LOOPBACK_MAC) 1471 err |= MRVL88X2011_LOOPBACK; 1472 else 1473 err &= ~MRVL88X2011_LOOPBACK; 1474 1475 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1476 MRVL88X2011_PMA_PMD_CTL_1, err); 1477 if (err < 0) 1478 return err; 1479 1480 /* Enable PMD */ 1481 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1482 MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX); 1483} 1484 1485 1486static int xcvr_diag_bcm870x(struct niu *np) 1487{ 1488 u16 analog_stat0, tx_alarm_status; 1489 int err = 0; 1490 1491#if 1 1492 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, 1493 MII_STAT1000); 1494 if (err < 0) 1495 return err; 1496 pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err); 1497 1498 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20); 1499 if (err < 0) 1500 return err; 1501 pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err); 1502 1503 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 1504 MII_NWAYTEST); 1505 if (err < 0) 1506 return err; 1507 pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err); 1508#endif 1509 1510 /* XXX dig this out it might not be so useful XXX */ 1511 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1512 BCM8704_USER_ANALOG_STATUS0); 1513 if (err < 0) 1514 return err; 1515 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1516 BCM8704_USER_ANALOG_STATUS0); 1517 if (err < 0) 1518 return err; 1519 analog_stat0 = err; 1520 1521 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1522 BCM8704_USER_TX_ALARM_STATUS); 1523 if (err < 0) 1524 return err; 1525 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1526 BCM8704_USER_TX_ALARM_STATUS); 1527 if (err < 0) 1528 return err; 1529 tx_alarm_status = err; 1530 1531 if (analog_stat0 != 0x03fc) { 1532 if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) { 1533 pr_info("Port %u cable not connected or bad cable\n", 1534 np->port); 1535 } else if (analog_stat0 == 0x639c) { 1536 pr_info("Port %u optical module is bad or missing\n", 1537 np->port); 1538 } 1539 } 1540 1541 return 0; 1542} 1543 1544static int xcvr_10g_set_lb_bcm870x(struct niu *np) 1545{ 1546 struct niu_link_config *lp = &np->link_config; 1547 int err; 1548 1549 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 1550 MII_BMCR); 1551 if (err < 0) 1552 return err; 1553 1554 err &= ~BMCR_LOOPBACK; 1555 1556 if (lp->loopback_mode == LOOPBACK_MAC) 1557 err |= BMCR_LOOPBACK; 1558 1559 err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 1560 MII_BMCR, err); 1561 if (err) 1562 return err; 1563 1564 return 0; 1565} 1566 1567static int xcvr_init_10g_bcm8706(struct niu *np) 1568{ 1569 int err = 0; 1570 u64 val; 1571 1572 if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) && 1573 (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0) 1574 return err; 1575 1576 val = nr64_mac(XMAC_CONFIG); 1577 val &= ~XMAC_CONFIG_LED_POLARITY; 1578 val |= XMAC_CONFIG_FORCE_LED_ON; 1579 nw64_mac(XMAC_CONFIG, val); 1580 1581 val = nr64(MIF_CONFIG); 1582 val |= MIF_CONFIG_INDIRECT_MODE; 1583 nw64(MIF_CONFIG, val); 1584 1585 err = bcm8704_reset(np); 1586 if (err) 1587 return err; 1588 1589 err = xcvr_10g_set_lb_bcm870x(np); 1590 if (err) 1591 return err; 1592 1593 err = bcm8706_init_user_dev3(np); 1594 if (err) 1595 return err; 1596 1597 err = xcvr_diag_bcm870x(np); 1598 if (err) 1599 return err; 1600 1601 return 0; 1602} 1603 1604static int xcvr_init_10g_bcm8704(struct niu *np) 1605{ 1606 int err; 1607 1608 err = bcm8704_reset(np); 1609 if (err) 1610 return err; 1611 1612 err = bcm8704_init_user_dev3(np); 1613 if (err) 1614 return err; 1615 1616 err = xcvr_10g_set_lb_bcm870x(np); 1617 if (err) 1618 return err; 1619 1620 err = xcvr_diag_bcm870x(np); 1621 if (err) 1622 return err; 1623 1624 return 0; 1625} 1626 1627static int xcvr_init_10g(struct niu *np) 1628{ 1629 int phy_id, err; 1630 u64 val; 1631 1632 val = nr64_mac(XMAC_CONFIG); 1633 val &= ~XMAC_CONFIG_LED_POLARITY; 1634 val |= XMAC_CONFIG_FORCE_LED_ON; 1635 nw64_mac(XMAC_CONFIG, val); 1636 1637 /* XXX shared resource, lock parent XXX */ 1638 val = nr64(MIF_CONFIG); 1639 val |= MIF_CONFIG_INDIRECT_MODE; 1640 nw64(MIF_CONFIG, val); 1641 1642 phy_id = phy_decode(np->parent->port_phy, np->port); 1643 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; 1644 1645 /* handle different phy types */ 1646 switch (phy_id & NIU_PHY_ID_MASK) { 1647 case NIU_PHY_ID_MRVL88X2011: 1648 err = xcvr_init_10g_mrvl88x2011(np); 1649 break; 1650 1651 default: /* bcom 8704 */ 1652 err = xcvr_init_10g_bcm8704(np); 1653 break; 1654 } 1655 1656 return 0; 1657} 1658 1659static int mii_reset(struct niu *np) 1660{ 1661 int limit, err; 1662 1663 err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET); 1664 if (err) 1665 return err; 1666 1667 limit = 1000; 1668 while (--limit >= 0) { 1669 udelay(500); 1670 err = mii_read(np, np->phy_addr, MII_BMCR); 1671 if (err < 0) 1672 return err; 1673 if (!(err & BMCR_RESET)) 1674 break; 1675 } 1676 if (limit < 0) { 1677 netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n", 1678 np->port, err); 1679 return -ENODEV; 1680 } 1681 1682 return 0; 1683} 1684 1685static int xcvr_init_1g_rgmii(struct niu *np) 1686{ 1687 int err; 1688 u64 val; 1689 u16 bmcr, bmsr, estat; 1690 1691 val = nr64(MIF_CONFIG); 1692 val &= ~MIF_CONFIG_INDIRECT_MODE; 1693 nw64(MIF_CONFIG, val); 1694 1695 err = mii_reset(np); 1696 if (err) 1697 return err; 1698 1699 err = mii_read(np, np->phy_addr, MII_BMSR); 1700 if (err < 0) 1701 return err; 1702 bmsr = err; 1703 1704 estat = 0; 1705 if (bmsr & BMSR_ESTATEN) { 1706 err = mii_read(np, np->phy_addr, MII_ESTATUS); 1707 if (err < 0) 1708 return err; 1709 estat = err; 1710 } 1711 1712 bmcr = 0; 1713 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); 1714 if (err) 1715 return err; 1716 1717 if (bmsr & BMSR_ESTATEN) { 1718 u16 ctrl1000 = 0; 1719 1720 if (estat & ESTATUS_1000_TFULL) 1721 ctrl1000 |= ADVERTISE_1000FULL; 1722 err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000); 1723 if (err) 1724 return err; 1725 } 1726 1727 bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX); 1728 1729 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); 1730 if (err) 1731 return err; 1732 1733 err = mii_read(np, np->phy_addr, MII_BMCR); 1734 if (err < 0) 1735 return err; 1736 bmcr = mii_read(np, np->phy_addr, MII_BMCR); 1737 1738 err = mii_read(np, np->phy_addr, MII_BMSR); 1739 if (err < 0) 1740 return err; 1741 1742 return 0; 1743} 1744 1745static int mii_init_common(struct niu *np) 1746{ 1747 struct niu_link_config *lp = &np->link_config; 1748 u16 bmcr, bmsr, adv, estat; 1749 int err; 1750 1751 err = mii_reset(np); 1752 if (err) 1753 return err; 1754 1755 err = mii_read(np, np->phy_addr, MII_BMSR); 1756 if (err < 0) 1757 return err; 1758 bmsr = err; 1759 1760 estat = 0; 1761 if (bmsr & BMSR_ESTATEN) { 1762 err = mii_read(np, np->phy_addr, MII_ESTATUS); 1763 if (err < 0) 1764 return err; 1765 estat = err; 1766 } 1767 1768 bmcr = 0; 1769 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); 1770 if (err) 1771 return err; 1772 1773 if (lp->loopback_mode == LOOPBACK_MAC) { 1774 bmcr |= BMCR_LOOPBACK; 1775 if (lp->active_speed == SPEED_1000) 1776 bmcr |= BMCR_SPEED1000; 1777 if (lp->active_duplex == DUPLEX_FULL) 1778 bmcr |= BMCR_FULLDPLX; 1779 } 1780 1781 if (lp->loopback_mode == LOOPBACK_PHY) { 1782 u16 aux; 1783 1784 aux = (BCM5464R_AUX_CTL_EXT_LB | 1785 BCM5464R_AUX_CTL_WRITE_1); 1786 err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux); 1787 if (err) 1788 return err; 1789 } 1790 1791 if (lp->autoneg) { 1792 u16 ctrl1000; 1793 1794 adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP; 1795 if ((bmsr & BMSR_10HALF) && 1796 (lp->advertising & ADVERTISED_10baseT_Half)) 1797 adv |= ADVERTISE_10HALF; 1798 if ((bmsr & BMSR_10FULL) && 1799 (lp->advertising & ADVERTISED_10baseT_Full)) 1800 adv |= ADVERTISE_10FULL; 1801 if ((bmsr & BMSR_100HALF) && 1802 (lp->advertising & ADVERTISED_100baseT_Half)) 1803 adv |= ADVERTISE_100HALF; 1804 if ((bmsr & BMSR_100FULL) && 1805 (lp->advertising & ADVERTISED_100baseT_Full)) 1806 adv |= ADVERTISE_100FULL; 1807 err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv); 1808 if (err) 1809 return err; 1810 1811 if (likely(bmsr & BMSR_ESTATEN)) { 1812 ctrl1000 = 0; 1813 if ((estat & ESTATUS_1000_THALF) && 1814 (lp->advertising & ADVERTISED_1000baseT_Half)) 1815 ctrl1000 |= ADVERTISE_1000HALF; 1816 if ((estat & ESTATUS_1000_TFULL) && 1817 (lp->advertising & ADVERTISED_1000baseT_Full)) 1818 ctrl1000 |= ADVERTISE_1000FULL; 1819 err = mii_write(np, np->phy_addr, 1820 MII_CTRL1000, ctrl1000); 1821 if (err) 1822 return err; 1823 } 1824 1825 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 1826 } else { 1827 /* !lp->autoneg */ 1828 int fulldpx; 1829 1830 if (lp->duplex == DUPLEX_FULL) { 1831 bmcr |= BMCR_FULLDPLX; 1832 fulldpx = 1; 1833 } else if (lp->duplex == DUPLEX_HALF) 1834 fulldpx = 0; 1835 else 1836 return -EINVAL; 1837 1838 if (lp->speed == SPEED_1000) { 1839 /* if X-full requested while not supported, or 1840 X-half requested while not supported... */ 1841 if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) || 1842 (!fulldpx && !(estat & ESTATUS_1000_THALF))) 1843 return -EINVAL; 1844 bmcr |= BMCR_SPEED1000; 1845 } else if (lp->speed == SPEED_100) { 1846 if ((fulldpx && !(bmsr & BMSR_100FULL)) || 1847 (!fulldpx && !(bmsr & BMSR_100HALF))) 1848 return -EINVAL; 1849 bmcr |= BMCR_SPEED100; 1850 } else if (lp->speed == SPEED_10) { 1851 if ((fulldpx && !(bmsr & BMSR_10FULL)) || 1852 (!fulldpx && !(bmsr & BMSR_10HALF))) 1853 return -EINVAL; 1854 } else 1855 return -EINVAL; 1856 } 1857 1858 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); 1859 if (err) 1860 return err; 1861 1862#if 0 1863 err = mii_read(np, np->phy_addr, MII_BMCR); 1864 if (err < 0) 1865 return err; 1866 bmcr = err; 1867 1868 err = mii_read(np, np->phy_addr, MII_BMSR); 1869 if (err < 0) 1870 return err; 1871 bmsr = err; 1872 1873 pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n", 1874 np->port, bmcr, bmsr); 1875#endif 1876 1877 return 0; 1878} 1879 1880static int xcvr_init_1g(struct niu *np) 1881{ 1882 u64 val; 1883 1884 /* XXX shared resource, lock parent XXX */ 1885 val = nr64(MIF_CONFIG); 1886 val &= ~MIF_CONFIG_INDIRECT_MODE; 1887 nw64(MIF_CONFIG, val); 1888 1889 return mii_init_common(np); 1890} 1891 1892static int niu_xcvr_init(struct niu *np) 1893{ 1894 const struct niu_phy_ops *ops = np->phy_ops; 1895 int err; 1896 1897 err = 0; 1898 if (ops->xcvr_init) 1899 err = ops->xcvr_init(np); 1900 1901 return err; 1902} 1903 1904static int niu_serdes_init(struct niu *np) 1905{ 1906 const struct niu_phy_ops *ops = np->phy_ops; 1907 int err; 1908 1909 err = 0; 1910 if (ops->serdes_init) 1911 err = ops->serdes_init(np); 1912 1913 return err; 1914} 1915 1916static void niu_init_xif(struct niu *); 1917static void niu_handle_led(struct niu *, int status); 1918 1919static int niu_link_status_common(struct niu *np, int link_up) 1920{ 1921 struct niu_link_config *lp = &np->link_config; 1922 struct net_device *dev = np->dev; 1923 unsigned long flags; 1924 1925 if (!netif_carrier_ok(dev) && link_up) { 1926 netif_info(np, link, dev, "Link is up at %s, %s duplex\n", 1927 lp->active_speed == SPEED_10000 ? "10Gb/sec" : 1928 lp->active_speed == SPEED_1000 ? "1Gb/sec" : 1929 lp->active_speed == SPEED_100 ? "100Mbit/sec" : 1930 "10Mbit/sec", 1931 lp->active_duplex == DUPLEX_FULL ? "full" : "half"); 1932 1933 spin_lock_irqsave(&np->lock, flags); 1934 niu_init_xif(np); 1935 niu_handle_led(np, 1); 1936 spin_unlock_irqrestore(&np->lock, flags); 1937 1938 netif_carrier_on(dev); 1939 } else if (netif_carrier_ok(dev) && !link_up) { 1940 netif_warn(np, link, dev, "Link is down\n"); 1941 spin_lock_irqsave(&np->lock, flags); 1942 niu_handle_led(np, 0); 1943 spin_unlock_irqrestore(&np->lock, flags); 1944 netif_carrier_off(dev); 1945 } 1946 1947 return 0; 1948} 1949 1950static int link_status_10g_mrvl(struct niu *np, int *link_up_p) 1951{ 1952 int err, link_up, pma_status, pcs_status; 1953 1954 link_up = 0; 1955 1956 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1957 MRVL88X2011_10G_PMD_STATUS_2); 1958 if (err < 0) 1959 goto out; 1960 1961 /* Check PMA/PMD Register: 1.0001.2 == 1 */ 1962 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1963 MRVL88X2011_PMA_PMD_STATUS_1); 1964 if (err < 0) 1965 goto out; 1966 1967 pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); 1968 1969 /* Check PMC Register : 3.0001.2 == 1: read twice */ 1970 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 1971 MRVL88X2011_PMA_PMD_STATUS_1); 1972 if (err < 0) 1973 goto out; 1974 1975 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 1976 MRVL88X2011_PMA_PMD_STATUS_1); 1977 if (err < 0) 1978 goto out; 1979 1980 pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); 1981 1982 /* Check XGXS Register : 4.0018.[0-3,12] */ 1983 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR, 1984 MRVL88X2011_10G_XGXS_LANE_STAT); 1985 if (err < 0) 1986 goto out; 1987 1988 if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 | 1989 PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 | 1990 PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC | 1991 0x800)) 1992 link_up = (pma_status && pcs_status) ? 1 : 0; 1993 1994 np->link_config.active_speed = SPEED_10000; 1995 np->link_config.active_duplex = DUPLEX_FULL; 1996 err = 0; 1997out: 1998 mrvl88x2011_act_led(np, (link_up ? 1999 MRVL88X2011_LED_CTL_PCS_ACT : 2000 MRVL88X2011_LED_CTL_OFF)); 2001 2002 *link_up_p = link_up; 2003 return err; 2004} 2005 2006static int link_status_10g_bcm8706(struct niu *np, int *link_up_p) 2007{ 2008 int err, link_up; 2009 link_up = 0; 2010 2011 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, 2012 BCM8704_PMD_RCV_SIGDET); 2013 if (err < 0 || err == 0xffff) 2014 goto out; 2015 if (!(err & PMD_RCV_SIGDET_GLOBAL)) { 2016 err = 0; 2017 goto out; 2018 } 2019 2020 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 2021 BCM8704_PCS_10G_R_STATUS); 2022 if (err < 0) 2023 goto out; 2024 2025 if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { 2026 err = 0; 2027 goto out; 2028 } 2029 2030 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 2031 BCM8704_PHYXS_XGXS_LANE_STAT); 2032 if (err < 0) 2033 goto out; 2034 if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | 2035 PHYXS_XGXS_LANE_STAT_MAGIC | 2036 PHYXS_XGXS_LANE_STAT_PATTEST | 2037 PHYXS_XGXS_LANE_STAT_LANE3 | 2038 PHYXS_XGXS_LANE_STAT_LANE2 | 2039 PHYXS_XGXS_LANE_STAT_LANE1 | 2040 PHYXS_XGXS_LANE_STAT_LANE0)) { 2041 err = 0; 2042 np->link_config.active_speed = SPEED_INVALID; 2043 np->link_config.active_duplex = DUPLEX_INVALID; 2044 goto out; 2045 } 2046 2047 link_up = 1; 2048 np->link_config.active_speed = SPEED_10000; 2049 np->link_config.active_duplex = DUPLEX_FULL; 2050 err = 0; 2051 2052out: 2053 *link_up_p = link_up; 2054 return err; 2055} 2056 2057static int link_status_10g_bcom(struct niu *np, int *link_up_p) 2058{ 2059 int err, link_up; 2060 2061 link_up = 0; 2062 2063 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, 2064 BCM8704_PMD_RCV_SIGDET); 2065 if (err < 0) 2066 goto out; 2067 if (!(err & PMD_RCV_SIGDET_GLOBAL)) { 2068 err = 0; 2069 goto out; 2070 } 2071 2072 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 2073 BCM8704_PCS_10G_R_STATUS); 2074 if (err < 0) 2075 goto out; 2076 if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { 2077 err = 0; 2078 goto out; 2079 } 2080 2081 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 2082 BCM8704_PHYXS_XGXS_LANE_STAT); 2083 if (err < 0) 2084 goto out; 2085 2086 if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | 2087 PHYXS_XGXS_LANE_STAT_MAGIC | 2088 PHYXS_XGXS_LANE_STAT_LANE3 | 2089 PHYXS_XGXS_LANE_STAT_LANE2 | 2090 PHYXS_XGXS_LANE_STAT_LANE1 | 2091 PHYXS_XGXS_LANE_STAT_LANE0)) { 2092 err = 0; 2093 goto out; 2094 } 2095 2096 link_up = 1; 2097 np->link_config.active_speed = SPEED_10000; 2098 np->link_config.active_duplex = DUPLEX_FULL; 2099 err = 0; 2100 2101out: 2102 *link_up_p = link_up; 2103 return err; 2104} 2105 2106static int link_status_10g(struct niu *np, int *link_up_p) 2107{ 2108 unsigned long flags; 2109 int err = -EINVAL; 2110 2111 spin_lock_irqsave(&np->lock, flags); 2112 2113 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { 2114 int phy_id; 2115 2116 phy_id = phy_decode(np->parent->port_phy, np->port); 2117 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; 2118 2119 /* handle different phy types */ 2120 switch (phy_id & NIU_PHY_ID_MASK) { 2121 case NIU_PHY_ID_MRVL88X2011: 2122 err = link_status_10g_mrvl(np, link_up_p); 2123 break; 2124 2125 default: /* bcom 8704 */ 2126 err = link_status_10g_bcom(np, link_up_p); 2127 break; 2128 } 2129 } 2130 2131 spin_unlock_irqrestore(&np->lock, flags); 2132 2133 return err; 2134} 2135 2136static int niu_10g_phy_present(struct niu *np) 2137{ 2138 u64 sig, mask, val; 2139 2140 sig = nr64(ESR_INT_SIGNALS); 2141 switch (np->port) { 2142 case 0: 2143 mask = ESR_INT_SIGNALS_P0_BITS; 2144 val = (ESR_INT_SRDY0_P0 | 2145 ESR_INT_DET0_P0 | 2146 ESR_INT_XSRDY_P0 | 2147 ESR_INT_XDP_P0_CH3 | 2148 ESR_INT_XDP_P0_CH2 | 2149 ESR_INT_XDP_P0_CH1 | 2150 ESR_INT_XDP_P0_CH0); 2151 break; 2152 2153 case 1: 2154 mask = ESR_INT_SIGNALS_P1_BITS; 2155 val = (ESR_INT_SRDY0_P1 | 2156 ESR_INT_DET0_P1 | 2157 ESR_INT_XSRDY_P1 | 2158 ESR_INT_XDP_P1_CH3 | 2159 ESR_INT_XDP_P1_CH2 | 2160 ESR_INT_XDP_P1_CH1 | 2161 ESR_INT_XDP_P1_CH0); 2162 break; 2163 2164 default: 2165 return 0; 2166 } 2167 2168 if ((sig & mask) != val) 2169 return 0; 2170 return 1; 2171} 2172 2173static int link_status_10g_hotplug(struct niu *np, int *link_up_p) 2174{ 2175 unsigned long flags; 2176 int err = 0; 2177 int phy_present; 2178 int phy_present_prev; 2179 2180 spin_lock_irqsave(&np->lock, flags); 2181 2182 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { 2183 phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ? 2184 1 : 0; 2185 phy_present = niu_10g_phy_present(np); 2186 if (phy_present != phy_present_prev) { 2187 /* state change */ 2188 if (phy_present) { 2189 /* A NEM was just plugged in */ 2190 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; 2191 if (np->phy_ops->xcvr_init) 2192 err = np->phy_ops->xcvr_init(np); 2193 if (err) { 2194 err = mdio_read(np, np->phy_addr, 2195 BCM8704_PHYXS_DEV_ADDR, MII_BMCR); 2196 if (err == 0xffff) { 2197 /* No mdio, back-to-back XAUI */ 2198 goto out; 2199 } 2200 /* debounce */ 2201 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; 2202 } 2203 } else { 2204 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; 2205 *link_up_p = 0; 2206 netif_warn(np, link, np->dev, 2207 "Hotplug PHY Removed\n"); 2208 } 2209 } 2210out: 2211 if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) { 2212 err = link_status_10g_bcm8706(np, link_up_p); 2213 if (err == 0xffff) { 2214 /* No mdio, back-to-back XAUI: it is C10NEM */ 2215 *link_up_p = 1; 2216 np->link_config.active_speed = SPEED_10000; 2217 np->link_config.active_duplex = DUPLEX_FULL; 2218 } 2219 } 2220 } 2221 2222 spin_unlock_irqrestore(&np->lock, flags); 2223 2224 return 0; 2225} 2226 2227static int niu_link_status(struct niu *np, int *link_up_p) 2228{ 2229 const struct niu_phy_ops *ops = np->phy_ops; 2230 int err; 2231 2232 err = 0; 2233 if (ops->link_status) 2234 err = ops->link_status(np, link_up_p); 2235 2236 return err; 2237} 2238 2239static void niu_timer(unsigned long __opaque) 2240{ 2241 struct niu *np = (struct niu *) __opaque; 2242 unsigned long off; 2243 int err, link_up; 2244 2245 err = niu_link_status(np, &link_up); 2246 if (!err) 2247 niu_link_status_common(np, link_up); 2248 2249 if (netif_carrier_ok(np->dev)) 2250 off = 5 * HZ; 2251 else 2252 off = 1 * HZ; 2253 np->timer.expires = jiffies + off; 2254 2255 add_timer(&np->timer); 2256} 2257 2258static const struct niu_phy_ops phy_ops_10g_serdes = { 2259 .serdes_init = serdes_init_10g_serdes, 2260 .link_status = link_status_10g_serdes, 2261}; 2262 2263static const struct niu_phy_ops phy_ops_10g_serdes_niu = { 2264 .serdes_init = serdes_init_niu_10g_serdes, 2265 .link_status = link_status_10g_serdes, 2266}; 2267 2268static const struct niu_phy_ops phy_ops_1g_serdes_niu = { 2269 .serdes_init = serdes_init_niu_1g_serdes, 2270 .link_status = link_status_1g_serdes, 2271}; 2272 2273static const struct niu_phy_ops phy_ops_1g_rgmii = { 2274 .xcvr_init = xcvr_init_1g_rgmii, 2275 .link_status = link_status_1g_rgmii, 2276}; 2277 2278static const struct niu_phy_ops phy_ops_10g_fiber_niu = { 2279 .serdes_init = serdes_init_niu_10g_fiber, 2280 .xcvr_init = xcvr_init_10g, 2281 .link_status = link_status_10g, 2282}; 2283 2284static const struct niu_phy_ops phy_ops_10g_fiber = { 2285 .serdes_init = serdes_init_10g, 2286 .xcvr_init = xcvr_init_10g, 2287 .link_status = link_status_10g, 2288}; 2289 2290static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = { 2291 .serdes_init = serdes_init_10g, 2292 .xcvr_init = xcvr_init_10g_bcm8706, 2293 .link_status = link_status_10g_hotplug, 2294}; 2295 2296static const struct niu_phy_ops phy_ops_niu_10g_hotplug = { 2297 .serdes_init = serdes_init_niu_10g_fiber, 2298 .xcvr_init = xcvr_init_10g_bcm8706, 2299 .link_status = link_status_10g_hotplug, 2300}; 2301 2302static const struct niu_phy_ops phy_ops_10g_copper = { 2303 .serdes_init = serdes_init_10g, 2304 .link_status = link_status_10g, /* XXX */ 2305}; 2306 2307static const struct niu_phy_ops phy_ops_1g_fiber = { 2308 .serdes_init = serdes_init_1g, 2309 .xcvr_init = xcvr_init_1g, 2310 .link_status = link_status_1g, 2311}; 2312 2313static const struct niu_phy_ops phy_ops_1g_copper = { 2314 .xcvr_init = xcvr_init_1g, 2315 .link_status = link_status_1g, 2316}; 2317 2318struct niu_phy_template { 2319 const struct niu_phy_ops *ops; 2320 u32 phy_addr_base; 2321}; 2322 2323static const struct niu_phy_template phy_template_niu_10g_fiber = { 2324 .ops = &phy_ops_10g_fiber_niu, 2325 .phy_addr_base = 16, 2326}; 2327 2328static const struct niu_phy_template phy_template_niu_10g_serdes = { 2329 .ops = &phy_ops_10g_serdes_niu, 2330 .phy_addr_base = 0, 2331}; 2332 2333static const struct niu_phy_template phy_template_niu_1g_serdes = { 2334 .ops = &phy_ops_1g_serdes_niu, 2335 .phy_addr_base = 0, 2336}; 2337 2338static const struct niu_phy_template phy_template_10g_fiber = { 2339 .ops = &phy_ops_10g_fiber, 2340 .phy_addr_base = 8, 2341}; 2342 2343static const struct niu_phy_template phy_template_10g_fiber_hotplug = { 2344 .ops = &phy_ops_10g_fiber_hotplug, 2345 .phy_addr_base = 8, 2346}; 2347 2348static const struct niu_phy_template phy_template_niu_10g_hotplug = { 2349 .ops = &phy_ops_niu_10g_hotplug, 2350 .phy_addr_base = 8, 2351}; 2352 2353static const struct niu_phy_template phy_template_10g_copper = { 2354 .ops = &phy_ops_10g_copper, 2355 .phy_addr_base = 10, 2356}; 2357 2358static const struct niu_phy_template phy_template_1g_fiber = { 2359 .ops = &phy_ops_1g_fiber, 2360 .phy_addr_base = 0, 2361}; 2362 2363static const struct niu_phy_template phy_template_1g_copper = { 2364 .ops = &phy_ops_1g_copper, 2365 .phy_addr_base = 0, 2366}; 2367 2368static const struct niu_phy_template phy_template_1g_rgmii = { 2369 .ops = &phy_ops_1g_rgmii, 2370 .phy_addr_base = 0, 2371}; 2372 2373static const struct niu_phy_template phy_template_10g_serdes = { 2374 .ops = &phy_ops_10g_serdes, 2375 .phy_addr_base = 0, 2376}; 2377 2378static int niu_atca_port_num[4] = { 2379 0, 0, 11, 10 2380}; 2381 2382static int serdes_init_10g_serdes(struct niu *np) 2383{ 2384 struct niu_link_config *lp = &np->link_config; 2385 unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; 2386 u64 ctrl_val, test_cfg_val, sig, mask, val; 2387 u64 reset_val; 2388 2389 switch (np->port) { 2390 case 0: 2391 reset_val = ENET_SERDES_RESET_0; 2392 ctrl_reg = ENET_SERDES_0_CTRL_CFG; 2393 test_cfg_reg = ENET_SERDES_0_TEST_CFG; 2394 pll_cfg = ENET_SERDES_0_PLL_CFG; 2395 break; 2396 case 1: 2397 reset_val = ENET_SERDES_RESET_1; 2398 ctrl_reg = ENET_SERDES_1_CTRL_CFG; 2399 test_cfg_reg = ENET_SERDES_1_TEST_CFG; 2400 pll_cfg = ENET_SERDES_1_PLL_CFG; 2401 break; 2402 2403 default: 2404 return -EINVAL; 2405 } 2406 ctrl_val = (ENET_SERDES_CTRL_SDET_0 | 2407 ENET_SERDES_CTRL_SDET_1 | 2408 ENET_SERDES_CTRL_SDET_2 | 2409 ENET_SERDES_CTRL_SDET_3 | 2410 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | 2411 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | 2412 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | 2413 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | 2414 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | 2415 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | 2416 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | 2417 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); 2418 test_cfg_val = 0; 2419 2420 if (lp->loopback_mode == LOOPBACK_PHY) { 2421 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << 2422 ENET_SERDES_TEST_MD_0_SHIFT) | 2423 (ENET_TEST_MD_PAD_LOOPBACK << 2424 ENET_SERDES_TEST_MD_1_SHIFT) | 2425 (ENET_TEST_MD_PAD_LOOPBACK << 2426 ENET_SERDES_TEST_MD_2_SHIFT) | 2427 (ENET_TEST_MD_PAD_LOOPBACK << 2428 ENET_SERDES_TEST_MD_3_SHIFT)); 2429 } 2430 2431 esr_reset(np); 2432 nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2); 2433 nw64(ctrl_reg, ctrl_val); 2434 nw64(test_cfg_reg, test_cfg_val); 2435 2436 /* Initialize all 4 lanes of the SERDES. */ 2437 for (i = 0; i < 4; i++) { 2438 u32 rxtx_ctrl, glue0; 2439 int err; 2440 2441 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); 2442 if (err) 2443 return err; 2444 err = esr_read_glue0(np, i, &glue0); 2445 if (err) 2446 return err; 2447 2448 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); 2449 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | 2450 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); 2451 2452 glue0 &= ~(ESR_GLUE_CTRL0_SRATE | 2453 ESR_GLUE_CTRL0_THCNT | 2454 ESR_GLUE_CTRL0_BLTIME); 2455 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | 2456 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | 2457 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | 2458 (BLTIME_300_CYCLES << 2459 ESR_GLUE_CTRL0_BLTIME_SHIFT)); 2460 2461 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); 2462 if (err) 2463 return err; 2464 err = esr_write_glue0(np, i, glue0); 2465 if (err) 2466 return err; 2467 } 2468 2469 2470 sig = nr64(ESR_INT_SIGNALS); 2471 switch (np->port) { 2472 case 0: 2473 mask = ESR_INT_SIGNALS_P0_BITS; 2474 val = (ESR_INT_SRDY0_P0 | 2475 ESR_INT_DET0_P0 | 2476 ESR_INT_XSRDY_P0 | 2477 ESR_INT_XDP_P0_CH3 | 2478 ESR_INT_XDP_P0_CH2 | 2479 ESR_INT_XDP_P0_CH1 | 2480 ESR_INT_XDP_P0_CH0); 2481 break; 2482 2483 case 1: 2484 mask = ESR_INT_SIGNALS_P1_BITS; 2485 val = (ESR_INT_SRDY0_P1 | 2486 ESR_INT_DET0_P1 | 2487 ESR_INT_XSRDY_P1 | 2488 ESR_INT_XDP_P1_CH3 | 2489 ESR_INT_XDP_P1_CH2 | 2490 ESR_INT_XDP_P1_CH1 | 2491 ESR_INT_XDP_P1_CH0); 2492 break; 2493 2494 default: 2495 return -EINVAL; 2496 } 2497 2498 if ((sig & mask) != val) { 2499 int err; 2500 err = serdes_init_1g_serdes(np); 2501 if (!err) { 2502 np->flags &= ~NIU_FLAGS_10G; 2503 np->mac_xcvr = MAC_XCVR_PCS; 2504 } else { 2505 netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", 2506 np->port); 2507 return -ENODEV; 2508 } 2509 } 2510 2511 return 0; 2512} 2513 2514static int niu_determine_phy_disposition(struct niu *np) 2515{ 2516 struct niu_parent *parent = np->parent; 2517 u8 plat_type = parent->plat_type; 2518 const struct niu_phy_template *tp; 2519 u32 phy_addr_off = 0; 2520 2521 if (plat_type == PLAT_TYPE_NIU) { 2522 switch (np->flags & 2523 (NIU_FLAGS_10G | 2524 NIU_FLAGS_FIBER | 2525 NIU_FLAGS_XCVR_SERDES)) { 2526 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: 2527 /* 10G Serdes */ 2528 tp = &phy_template_niu_10g_serdes; 2529 break; 2530 case NIU_FLAGS_XCVR_SERDES: 2531 /* 1G Serdes */ 2532 tp = &phy_template_niu_1g_serdes; 2533 break; 2534 case NIU_FLAGS_10G | NIU_FLAGS_FIBER: 2535 /* 10G Fiber */ 2536 default: 2537 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { 2538 tp = &phy_template_niu_10g_hotplug; 2539 if (np->port == 0) 2540 phy_addr_off = 8; 2541 if (np->port == 1) 2542 phy_addr_off = 12; 2543 } else { 2544 tp = &phy_template_niu_10g_fiber; 2545 phy_addr_off += np->port; 2546 } 2547 break; 2548 } 2549 } else { 2550 switch (np->flags & 2551 (NIU_FLAGS_10G | 2552 NIU_FLAGS_FIBER | 2553 NIU_FLAGS_XCVR_SERDES)) { 2554 case 0: 2555 /* 1G copper */ 2556 tp = &phy_template_1g_copper; 2557 if (plat_type == PLAT_TYPE_VF_P0) 2558 phy_addr_off = 10; 2559 else if (plat_type == PLAT_TYPE_VF_P1) 2560 phy_addr_off = 26; 2561 2562 phy_addr_off += (np->port ^ 0x3); 2563 break; 2564 2565 case NIU_FLAGS_10G: 2566 /* 10G copper */ 2567 tp = &phy_template_10g_copper; 2568 break; 2569 2570 case NIU_FLAGS_FIBER: 2571 /* 1G fiber */ 2572 tp = &phy_template_1g_fiber; 2573 break; 2574 2575 case NIU_FLAGS_10G | NIU_FLAGS_FIBER: 2576 /* 10G fiber */ 2577 tp = &phy_template_10g_fiber; 2578 if (plat_type == PLAT_TYPE_VF_P0 || 2579 plat_type == PLAT_TYPE_VF_P1) 2580 phy_addr_off = 8; 2581 phy_addr_off += np->port; 2582 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { 2583 tp = &phy_template_10g_fiber_hotplug; 2584 if (np->port == 0) 2585 phy_addr_off = 8; 2586 if (np->port == 1) 2587 phy_addr_off = 12; 2588 } 2589 break; 2590 2591 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: 2592 case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER: 2593 case NIU_FLAGS_XCVR_SERDES: 2594 switch(np->port) { 2595 case 0: 2596 case 1: 2597 tp = &phy_template_10g_serdes; 2598 break; 2599 case 2: 2600 case 3: 2601 tp = &phy_template_1g_rgmii; 2602 break; 2603 default: 2604 return -EINVAL; 2605 break; 2606 } 2607 phy_addr_off = niu_atca_port_num[np->port]; 2608 break; 2609 2610 default: 2611 return -EINVAL; 2612 } 2613 } 2614 2615 np->phy_ops = tp->ops; 2616 np->phy_addr = tp->phy_addr_base + phy_addr_off; 2617 2618 return 0; 2619} 2620 2621static int niu_init_link(struct niu *np) 2622{ 2623 struct niu_parent *parent = np->parent; 2624 int err, ignore; 2625 2626 if (parent->plat_type == PLAT_TYPE_NIU) { 2627 err = niu_xcvr_init(np); 2628 if (err) 2629 return err; 2630 msleep(200); 2631 } 2632 err = niu_serdes_init(np); 2633 if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY)) 2634 return err; 2635 msleep(200); 2636 err = niu_xcvr_init(np); 2637 if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY)) 2638 niu_link_status(np, &ignore); 2639 return 0; 2640} 2641 2642static void niu_set_primary_mac(struct niu *np, unsigned char *addr) 2643{ 2644 u16 reg0 = addr[4] << 8 | addr[5]; 2645 u16 reg1 = addr[2] << 8 | addr[3]; 2646 u16 reg2 = addr[0] << 8 | addr[1]; 2647 2648 if (np->flags & NIU_FLAGS_XMAC) { 2649 nw64_mac(XMAC_ADDR0, reg0); 2650 nw64_mac(XMAC_ADDR1, reg1); 2651 nw64_mac(XMAC_ADDR2, reg2); 2652 } else { 2653 nw64_mac(BMAC_ADDR0, reg0); 2654 nw64_mac(BMAC_ADDR1, reg1); 2655 nw64_mac(BMAC_ADDR2, reg2); 2656 } 2657} 2658 2659static int niu_num_alt_addr(struct niu *np) 2660{ 2661 if (np->flags & NIU_FLAGS_XMAC) 2662 return XMAC_NUM_ALT_ADDR; 2663 else 2664 return BMAC_NUM_ALT_ADDR; 2665} 2666 2667static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr) 2668{ 2669 u16 reg0 = addr[4] << 8 | addr[5]; 2670 u16 reg1 = addr[2] << 8 | addr[3]; 2671 u16 reg2 = addr[0] << 8 | addr[1]; 2672 2673 if (index >= niu_num_alt_addr(np)) 2674 return -EINVAL; 2675 2676 if (np->flags & NIU_FLAGS_XMAC) { 2677 nw64_mac(XMAC_ALT_ADDR0(index), reg0); 2678 nw64_mac(XMAC_ALT_ADDR1(index), reg1); 2679 nw64_mac(XMAC_ALT_ADDR2(index), reg2); 2680 } else { 2681 nw64_mac(BMAC_ALT_ADDR0(index), reg0); 2682 nw64_mac(BMAC_ALT_ADDR1(index), reg1); 2683 nw64_mac(BMAC_ALT_ADDR2(index), reg2); 2684 } 2685 2686 return 0; 2687} 2688 2689static int niu_enable_alt_mac(struct niu *np, int index, int on) 2690{ 2691 unsigned long reg; 2692 u64 val, mask; 2693 2694 if (index >= niu_num_alt_addr(np)) 2695 return -EINVAL; 2696 2697 if (np->flags & NIU_FLAGS_XMAC) { 2698 reg = XMAC_ADDR_CMPEN; 2699 mask = 1 << index; 2700 } else { 2701 reg = BMAC_ADDR_CMPEN; 2702 mask = 1 << (index + 1); 2703 } 2704 2705 val = nr64_mac(reg); 2706 if (on) 2707 val |= mask; 2708 else 2709 val &= ~mask; 2710 nw64_mac(reg, val); 2711 2712 return 0; 2713} 2714 2715static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg, 2716 int num, int mac_pref) 2717{ 2718 u64 val = nr64_mac(reg); 2719 val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR); 2720 val |= num; 2721 if (mac_pref) 2722 val |= HOST_INFO_MPR; 2723 nw64_mac(reg, val); 2724} 2725 2726static int __set_rdc_table_num(struct niu *np, 2727 int xmac_index, int bmac_index, 2728 int rdc_table_num, int mac_pref) 2729{ 2730 unsigned long reg; 2731 2732 if (rdc_table_num & ~HOST_INFO_MACRDCTBLN) 2733 return -EINVAL; 2734 if (np->flags & NIU_FLAGS_XMAC) 2735 reg = XMAC_HOST_INFO(xmac_index); 2736 else 2737 reg = BMAC_HOST_INFO(bmac_index); 2738 __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref); 2739 return 0; 2740} 2741 2742static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num, 2743 int mac_pref) 2744{ 2745 return __set_rdc_table_num(np, 17, 0, table_num, mac_pref); 2746} 2747 2748static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num, 2749 int mac_pref) 2750{ 2751 return __set_rdc_table_num(np, 16, 8, table_num, mac_pref); 2752} 2753 2754static int niu_set_alt_mac_rdc_table(struct niu *np, int idx, 2755 int table_num, int mac_pref) 2756{ 2757 if (idx >= niu_num_alt_addr(np)) 2758 return -EINVAL; 2759 return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref); 2760} 2761 2762static u64 vlan_entry_set_parity(u64 reg_val) 2763{ 2764 u64 port01_mask; 2765 u64 port23_mask; 2766 2767 port01_mask = 0x00ff; 2768 port23_mask = 0xff00; 2769 2770 if (hweight64(reg_val & port01_mask) & 1) 2771 reg_val |= ENET_VLAN_TBL_PARITY0; 2772 else 2773 reg_val &= ~ENET_VLAN_TBL_PARITY0; 2774 2775 if (hweight64(reg_val & port23_mask) & 1) 2776 reg_val |= ENET_VLAN_TBL_PARITY1; 2777 else 2778 reg_val &= ~ENET_VLAN_TBL_PARITY1; 2779 2780 return reg_val; 2781} 2782 2783static void vlan_tbl_write(struct niu *np, unsigned long index, 2784 int port, int vpr, int rdc_table) 2785{ 2786 u64 reg_val = nr64(ENET_VLAN_TBL(index)); 2787 2788 reg_val &= ~((ENET_VLAN_TBL_VPR | 2789 ENET_VLAN_TBL_VLANRDCTBLN) << 2790 ENET_VLAN_TBL_SHIFT(port)); 2791 if (vpr) 2792 reg_val |= (ENET_VLAN_TBL_VPR << 2793 ENET_VLAN_TBL_SHIFT(port)); 2794 reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port)); 2795 2796 reg_val = vlan_entry_set_parity(reg_val); 2797 2798 nw64(ENET_VLAN_TBL(index), reg_val); 2799} 2800 2801static void vlan_tbl_clear(struct niu *np) 2802{ 2803 int i; 2804 2805 for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) 2806 nw64(ENET_VLAN_TBL(i), 0); 2807} 2808 2809static int tcam_wait_bit(struct niu *np, u64 bit) 2810{ 2811 int limit = 1000; 2812 2813 while (--limit > 0) { 2814 if (nr64(TCAM_CTL) & bit) 2815 break; 2816 udelay(1); 2817 } 2818 if (limit <= 0) 2819 return -ENODEV; 2820 2821 return 0; 2822} 2823 2824static int tcam_flush(struct niu *np, int index) 2825{ 2826 nw64(TCAM_KEY_0, 0x00); 2827 nw64(TCAM_KEY_MASK_0, 0xff); 2828 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); 2829 2830 return tcam_wait_bit(np, TCAM_CTL_STAT); 2831} 2832 2833#if 0 2834static int tcam_read(struct niu *np, int index, 2835 u64 *key, u64 *mask) 2836{ 2837 int err; 2838 2839 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index)); 2840 err = tcam_wait_bit(np, TCAM_CTL_STAT); 2841 if (!err) { 2842 key[0] = nr64(TCAM_KEY_0); 2843 key[1] = nr64(TCAM_KEY_1); 2844 key[2] = nr64(TCAM_KEY_2); 2845 key[3] = nr64(TCAM_KEY_3); 2846 mask[0] = nr64(TCAM_KEY_MASK_0); 2847 mask[1] = nr64(TCAM_KEY_MASK_1); 2848 mask[2] = nr64(TCAM_KEY_MASK_2); 2849 mask[3] = nr64(TCAM_KEY_MASK_3); 2850 } 2851 return err; 2852} 2853#endif 2854 2855static int tcam_write(struct niu *np, int index, 2856 u64 *key, u64 *mask) 2857{ 2858 nw64(TCAM_KEY_0, key[0]); 2859 nw64(TCAM_KEY_1, key[1]); 2860 nw64(TCAM_KEY_2, key[2]); 2861 nw64(TCAM_KEY_3, key[3]); 2862 nw64(TCAM_KEY_MASK_0, mask[0]); 2863 nw64(TCAM_KEY_MASK_1, mask[1]); 2864 nw64(TCAM_KEY_MASK_2, mask[2]); 2865 nw64(TCAM_KEY_MASK_3, mask[3]); 2866 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); 2867 2868 return tcam_wait_bit(np, TCAM_CTL_STAT); 2869} 2870 2871#if 0 2872static int tcam_assoc_read(struct niu *np, int index, u64 *data) 2873{ 2874 int err; 2875 2876 nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index)); 2877 err = tcam_wait_bit(np, TCAM_CTL_STAT); 2878 if (!err) 2879 *data = nr64(TCAM_KEY_1); 2880 2881 return err; 2882} 2883#endif 2884 2885static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data) 2886{ 2887 nw64(TCAM_KEY_1, assoc_data); 2888 nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index)); 2889 2890 return tcam_wait_bit(np, TCAM_CTL_STAT); 2891} 2892 2893static void tcam_enable(struct niu *np, int on) 2894{ 2895 u64 val = nr64(FFLP_CFG_1); 2896 2897 if (on) 2898 val &= ~FFLP_CFG_1_TCAM_DIS; 2899 else 2900 val |= FFLP_CFG_1_TCAM_DIS; 2901 nw64(FFLP_CFG_1, val); 2902} 2903 2904static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio) 2905{ 2906 u64 val = nr64(FFLP_CFG_1); 2907 2908 val &= ~(FFLP_CFG_1_FFLPINITDONE | 2909 FFLP_CFG_1_CAMLAT | 2910 FFLP_CFG_1_CAMRATIO); 2911 val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT); 2912 val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT); 2913 nw64(FFLP_CFG_1, val); 2914 2915 val = nr64(FFLP_CFG_1); 2916 val |= FFLP_CFG_1_FFLPINITDONE; 2917 nw64(FFLP_CFG_1, val); 2918} 2919 2920static int tcam_user_eth_class_enable(struct niu *np, unsigned long class, 2921 int on) 2922{ 2923 unsigned long reg; 2924 u64 val; 2925 2926 if (class < CLASS_CODE_ETHERTYPE1 || 2927 class > CLASS_CODE_ETHERTYPE2) 2928 return -EINVAL; 2929 2930 reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); 2931 val = nr64(reg); 2932 if (on) 2933 val |= L2_CLS_VLD; 2934 else 2935 val &= ~L2_CLS_VLD; 2936 nw64(reg, val); 2937 2938 return 0; 2939} 2940 2941#if 0 2942static int tcam_user_eth_class_set(struct niu *np, unsigned long class, 2943 u64 ether_type) 2944{ 2945 unsigned long reg; 2946 u64 val; 2947 2948 if (class < CLASS_CODE_ETHERTYPE1 || 2949 class > CLASS_CODE_ETHERTYPE2 || 2950 (ether_type & ~(u64)0xffff) != 0) 2951 return -EINVAL; 2952 2953 reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); 2954 val = nr64(reg); 2955 val &= ~L2_CLS_ETYPE; 2956 val |= (ether_type << L2_CLS_ETYPE_SHIFT); 2957 nw64(reg, val); 2958 2959 return 0; 2960} 2961#endif 2962 2963static int tcam_user_ip_class_enable(struct niu *np, unsigned long class, 2964 int on) 2965{ 2966 unsigned long reg; 2967 u64 val; 2968 2969 if (class < CLASS_CODE_USER_PROG1 || 2970 class > CLASS_CODE_USER_PROG4) 2971 return -EINVAL; 2972 2973 reg = L3_CLS(class - CLASS_CODE_USER_PROG1); 2974 val = nr64(reg); 2975 if (on) 2976 val |= L3_CLS_VALID; 2977 else 2978 val &= ~L3_CLS_VALID; 2979 nw64(reg, val); 2980 2981 return 0; 2982} 2983 2984static int tcam_user_ip_class_set(struct niu *np, unsigned long class, 2985 int ipv6, u64 protocol_id, 2986 u64 tos_mask, u64 tos_val) 2987{ 2988 unsigned long reg; 2989 u64 val; 2990 2991 if (class < CLASS_CODE_USER_PROG1 || 2992 class > CLASS_CODE_USER_PROG4 || 2993 (protocol_id & ~(u64)0xff) != 0 || 2994 (tos_mask & ~(u64)0xff) != 0 || 2995 (tos_val & ~(u64)0xff) != 0) 2996 return -EINVAL; 2997 2998 reg = L3_CLS(class - CLASS_CODE_USER_PROG1); 2999 val = nr64(reg); 3000 val &= ~(L3_CLS_IPVER | L3_CLS_PID | 3001 L3_CLS_TOSMASK | L3_CLS_TOS); 3002 if (ipv6) 3003 val |= L3_CLS_IPVER; 3004 val |= (protocol_id << L3_CLS_PID_SHIFT); 3005 val |= (tos_mask << L3_CLS_TOSMASK_SHIFT); 3006 val |= (tos_val << L3_CLS_TOS_SHIFT); 3007 nw64(reg, val); 3008 3009 return 0; 3010} 3011 3012static int tcam_early_init(struct niu *np) 3013{ 3014 unsigned long i; 3015 int err; 3016 3017 tcam_enable(np, 0); 3018 tcam_set_lat_and_ratio(np, 3019 DEFAULT_TCAM_LATENCY, 3020 DEFAULT_TCAM_ACCESS_RATIO); 3021 for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) { 3022 err = tcam_user_eth_class_enable(np, i, 0); 3023 if (err) 3024 return err; 3025 } 3026 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) { 3027 err = tcam_user_ip_class_enable(np, i, 0); 3028 if (err) 3029 return err; 3030 } 3031 3032 return 0; 3033} 3034 3035static int tcam_flush_all(struct niu *np) 3036{ 3037 unsigned long i; 3038 3039 for (i = 0; i < np->parent->tcam_num_entries; i++) { 3040 int err = tcam_flush(np, i); 3041 if (err) 3042 return err; 3043 } 3044 return 0; 3045} 3046 3047static u64 hash_addr_regval(unsigned long index, unsigned long num_entries) 3048{ 3049 return ((u64)index | (num_entries == 1 ? 3050 HASH_TBL_ADDR_AUTOINC : 0)); 3051} 3052 3053#if 0 3054static int hash_read(struct niu *np, unsigned long partition, 3055 unsigned long index, unsigned long num_entries, 3056 u64 *data) 3057{ 3058 u64 val = hash_addr_regval(index, num_entries); 3059 unsigned long i; 3060 3061 if (partition >= FCRAM_NUM_PARTITIONS || 3062 index + num_entries > FCRAM_SIZE) 3063 return -EINVAL; 3064 3065 nw64(HASH_TBL_ADDR(partition), val); 3066 for (i = 0; i < num_entries; i++) 3067 data[i] = nr64(HASH_TBL_DATA(partition)); 3068 3069 return 0; 3070} 3071#endif 3072 3073static int hash_write(struct niu *np, unsigned long partition, 3074 unsigned long index, unsigned long num_entries, 3075 u64 *data) 3076{ 3077 u64 val = hash_addr_regval(index, num_entries); 3078 unsigned long i; 3079 3080 if (partition >= FCRAM_NUM_PARTITIONS || 3081 index + (num_entries * 8) > FCRAM_SIZE) 3082 return -EINVAL; 3083 3084 nw64(HASH_TBL_ADDR(partition), val); 3085 for (i = 0; i < num_entries; i++) 3086 nw64(HASH_TBL_DATA(partition), data[i]); 3087 3088 return 0; 3089} 3090 3091static void fflp_reset(struct niu *np) 3092{ 3093 u64 val; 3094 3095 nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST); 3096 udelay(10); 3097 nw64(FFLP_CFG_1, 0); 3098 3099 val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE; 3100 nw64(FFLP_CFG_1, val); 3101} 3102 3103static void fflp_set_timings(struct niu *np) 3104{ 3105 u64 val = nr64(FFLP_CFG_1); 3106 3107 val &= ~FFLP_CFG_1_FFLPINITDONE; 3108 val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT); 3109 nw64(FFLP_CFG_1, val); 3110 3111 val = nr64(FFLP_CFG_1); 3112 val |= FFLP_CFG_1_FFLPINITDONE; 3113 nw64(FFLP_CFG_1, val); 3114 3115 val = nr64(FCRAM_REF_TMR); 3116 val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN); 3117 val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT); 3118 val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT); 3119 nw64(FCRAM_REF_TMR, val); 3120} 3121 3122static int fflp_set_partition(struct niu *np, u64 partition, 3123 u64 mask, u64 base, int enable) 3124{ 3125 unsigned long reg; 3126 u64 val; 3127 3128 if (partition >= FCRAM_NUM_PARTITIONS || 3129 (mask & ~(u64)0x1f) != 0 || 3130 (base & ~(u64)0x1f) != 0) 3131 return -EINVAL; 3132 3133 reg = FLW_PRT_SEL(partition); 3134 3135 val = nr64(reg); 3136 val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE); 3137 val |= (mask << FLW_PRT_SEL_MASK_SHIFT); 3138 val |= (base << FLW_PRT_SEL_BASE_SHIFT); 3139 if (enable) 3140 val |= FLW_PRT_SEL_EXT; 3141 nw64(reg, val); 3142 3143 return 0; 3144} 3145 3146static int fflp_disable_all_partitions(struct niu *np) 3147{ 3148 unsigned long i; 3149 3150 for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) { 3151 int err = fflp_set_partition(np, 0, 0, 0, 0); 3152 if (err) 3153 return err; 3154 } 3155 return 0; 3156} 3157 3158static void fflp_llcsnap_enable(struct niu *np, int on) 3159{ 3160 u64 val = nr64(FFLP_CFG_1); 3161 3162 if (on) 3163 val |= FFLP_CFG_1_LLCSNAP; 3164 else 3165 val &= ~FFLP_CFG_1_LLCSNAP; 3166 nw64(FFLP_CFG_1, val); 3167} 3168 3169static void fflp_errors_enable(struct niu *np, int on) 3170{ 3171 u64 val = nr64(FFLP_CFG_1); 3172 3173 if (on) 3174 val &= ~FFLP_CFG_1_ERRORDIS; 3175 else 3176 val |= FFLP_CFG_1_ERRORDIS; 3177 nw64(FFLP_CFG_1, val); 3178} 3179 3180static int fflp_hash_clear(struct niu *np) 3181{ 3182 struct fcram_hash_ipv4 ent; 3183 unsigned long i; 3184 3185 /* IPV4 hash entry with valid bit clear, rest is don't care. */ 3186 memset(&ent, 0, sizeof(ent)); 3187 ent.header = HASH_HEADER_EXT; 3188 3189 for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) { 3190 int err = hash_write(np, 0, i, 1, (u64 *) &ent); 3191 if (err) 3192 return err; 3193 } 3194 return 0; 3195} 3196 3197static int fflp_early_init(struct niu *np) 3198{ 3199 struct niu_parent *parent; 3200 unsigned long flags; 3201 int err; 3202 3203 niu_lock_parent(np, flags); 3204 3205 parent = np->parent; 3206 err = 0; 3207 if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) { 3208 if (np->parent->plat_type != PLAT_TYPE_NIU) { 3209 fflp_reset(np); 3210 fflp_set_timings(np); 3211 err = fflp_disable_all_partitions(np); 3212 if (err) { 3213 netif_printk(np, probe, KERN_DEBUG, np->dev, 3214 "fflp_disable_all_partitions failed, err=%d\n", 3215 err); 3216 goto out; 3217 } 3218 } 3219 3220 err = tcam_early_init(np); 3221 if (err) { 3222 netif_printk(np, probe, KERN_DEBUG, np->dev, 3223 "tcam_early_init failed, err=%d\n", err); 3224 goto out; 3225 } 3226 fflp_llcsnap_enable(np, 1); 3227 fflp_errors_enable(np, 0); 3228 nw64(H1POLY, 0); 3229 nw64(H2POLY, 0); 3230 3231 err = tcam_flush_all(np); 3232 if (err) { 3233 netif_printk(np, probe, KERN_DEBUG, np->dev, 3234 "tcam_flush_all failed, err=%d\n", err); 3235 goto out; 3236 } 3237 if (np->parent->plat_type != PLAT_TYPE_NIU) { 3238 err = fflp_hash_clear(np); 3239 if (err) { 3240 netif_printk(np, probe, KERN_DEBUG, np->dev, 3241 "fflp_hash_clear failed, err=%d\n", 3242 err); 3243 goto out; 3244 } 3245 } 3246 3247 vlan_tbl_clear(np); 3248 3249 parent->flags |= PARENT_FLGS_CLS_HWINIT; 3250 } 3251out: 3252 niu_unlock_parent(np, flags); 3253 return err; 3254} 3255 3256static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key) 3257{ 3258 if (class_code < CLASS_CODE_USER_PROG1 || 3259 class_code > CLASS_CODE_SCTP_IPV6) 3260 return -EINVAL; 3261 3262 nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key); 3263 return 0; 3264} 3265 3266static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key) 3267{ 3268 if (class_code < CLASS_CODE_USER_PROG1 || 3269 class_code > CLASS_CODE_SCTP_IPV6) 3270 return -EINVAL; 3271 3272 nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key); 3273 return 0; 3274} 3275 3276/* Entries for the ports are interleaved in the TCAM */ 3277static u16 tcam_get_index(struct niu *np, u16 idx) 3278{ 3279 /* One entry reserved for IP fragment rule */ 3280 if (idx >= (np->clas.tcam_sz - 1)) 3281 idx = 0; 3282 return (np->clas.tcam_top + ((idx+1) * np->parent->num_ports)); 3283} 3284 3285static u16 tcam_get_size(struct niu *np) 3286{ 3287 /* One entry reserved for IP fragment rule */ 3288 return np->clas.tcam_sz - 1; 3289} 3290 3291static u16 tcam_get_valid_entry_cnt(struct niu *np) 3292{ 3293 /* One entry reserved for IP fragment rule */ 3294 return np->clas.tcam_valid_entries - 1; 3295} 3296 3297static void niu_rx_skb_append(struct sk_buff *skb, struct page *page, 3298 u32 offset, u32 size) 3299{ 3300 int i = skb_shinfo(skb)->nr_frags; 3301 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3302 3303 frag->page = page; 3304 frag->page_offset = offset; 3305 frag->size = size; 3306 3307 skb->len += size; 3308 skb->data_len += size; 3309 skb->truesize += size; 3310 3311 skb_shinfo(skb)->nr_frags = i + 1; 3312} 3313 3314static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a) 3315{ 3316 a >>= PAGE_SHIFT; 3317 a ^= (a >> ilog2(MAX_RBR_RING_SIZE)); 3318 3319 return (a & (MAX_RBR_RING_SIZE - 1)); 3320} 3321 3322static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr, 3323 struct page ***link) 3324{ 3325 unsigned int h = niu_hash_rxaddr(rp, addr); 3326 struct page *p, **pp; 3327 3328 addr &= PAGE_MASK; 3329 pp = &rp->rxhash[h]; 3330 for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) { 3331 if (p->index == addr) { 3332 *link = pp; 3333 break; 3334 } 3335 } 3336 3337 return p; 3338} 3339 3340static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base) 3341{ 3342 unsigned int h = niu_hash_rxaddr(rp, base); 3343 3344 page->index = base; 3345 page->mapping = (struct address_space *) rp->rxhash[h]; 3346 rp->rxhash[h] = page; 3347} 3348 3349static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp, 3350 gfp_t mask, int start_index) 3351{ 3352 struct page *page; 3353 u64 addr; 3354 int i; 3355 3356 page = alloc_page(mask); 3357 if (!page) 3358 return -ENOMEM; 3359 3360 addr = np->ops->map_page(np->device, page, 0, 3361 PAGE_SIZE, DMA_FROM_DEVICE); 3362 3363 niu_hash_page(rp, page, addr); 3364 if (rp->rbr_blocks_per_page > 1) 3365 atomic_add(rp->rbr_blocks_per_page - 1, 3366 &compound_head(page)->_count); 3367 3368 for (i = 0; i < rp->rbr_blocks_per_page; i++) { 3369 __le32 *rbr = &rp->rbr[start_index + i]; 3370 3371 *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT); 3372 addr += rp->rbr_block_size; 3373 } 3374 3375 return 0; 3376} 3377 3378static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) 3379{ 3380 int index = rp->rbr_index; 3381 3382 rp->rbr_pending++; 3383 if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) { 3384 int err = niu_rbr_add_page(np, rp, mask, index); 3385 3386 if (unlikely(err)) { 3387 rp->rbr_pending--; 3388 return; 3389 } 3390 3391 rp->rbr_index += rp->rbr_blocks_per_page; 3392 BUG_ON(rp->rbr_index > rp->rbr_table_size); 3393 if (rp->rbr_index == rp->rbr_table_size) 3394 rp->rbr_index = 0; 3395 3396 if (rp->rbr_pending >= rp->rbr_kick_thresh) { 3397 nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending); 3398 rp->rbr_pending = 0; 3399 } 3400 } 3401} 3402 3403static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp) 3404{ 3405 unsigned int index = rp->rcr_index; 3406 int num_rcr = 0; 3407 3408 rp->rx_dropped++; 3409 while (1) { 3410 struct page *page, **link; 3411 u64 addr, val; 3412 u32 rcr_size; 3413 3414 num_rcr++; 3415 3416 val = le64_to_cpup(&rp->rcr[index]); 3417 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << 3418 RCR_ENTRY_PKT_BUF_ADDR_SHIFT; 3419 page = niu_find_rxpage(rp, addr, &link); 3420 3421 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> 3422 RCR_ENTRY_PKTBUFSZ_SHIFT]; 3423 if ((page->index + PAGE_SIZE) - rcr_size == addr) { 3424 *link = (struct page *) page->mapping; 3425 np->ops->unmap_page(np->device, page->index, 3426 PAGE_SIZE, DMA_FROM_DEVICE); 3427 page->index = 0; 3428 page->mapping = NULL; 3429 __free_page(page); 3430 rp->rbr_refill_pending++; 3431 } 3432 3433 index = NEXT_RCR(rp, index); 3434 if (!(val & RCR_ENTRY_MULTI)) 3435 break; 3436 3437 } 3438 rp->rcr_index = index; 3439 3440 return num_rcr; 3441} 3442 3443static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, 3444 struct rx_ring_info *rp) 3445{ 3446 unsigned int index = rp->rcr_index; 3447 struct rx_pkt_hdr1 *rh; 3448 struct sk_buff *skb; 3449 int len, num_rcr; 3450 3451 skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE); 3452 if (unlikely(!skb)) 3453 return niu_rx_pkt_ignore(np, rp); 3454 3455 num_rcr = 0; 3456 while (1) { 3457 struct page *page, **link; 3458 u32 rcr_size, append_size; 3459 u64 addr, val, off; 3460 3461 num_rcr++; 3462 3463 val = le64_to_cpup(&rp->rcr[index]); 3464 3465 len = (val & RCR_ENTRY_L2_LEN) >> 3466 RCR_ENTRY_L2_LEN_SHIFT; 3467 len -= ETH_FCS_LEN; 3468 3469 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << 3470 RCR_ENTRY_PKT_BUF_ADDR_SHIFT; 3471 page = niu_find_rxpage(rp, addr, &link); 3472 3473 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> 3474 RCR_ENTRY_PKTBUFSZ_SHIFT]; 3475 3476 off = addr & ~PAGE_MASK; 3477 append_size = rcr_size; 3478 if (num_rcr == 1) { 3479 int ptype; 3480 3481 ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT); 3482 if ((ptype == RCR_PKT_TYPE_TCP || 3483 ptype == RCR_PKT_TYPE_UDP) && 3484 !(val & (RCR_ENTRY_NOPORT | 3485 RCR_ENTRY_ERROR))) 3486 skb->ip_summed = CHECKSUM_UNNECESSARY; 3487 else 3488 skb->ip_summed = CHECKSUM_NONE; 3489 } else if (!(val & RCR_ENTRY_MULTI)) 3490 append_size = len - skb->len; 3491 3492 niu_rx_skb_append(skb, page, off, append_size); 3493 if ((page->index + rp->rbr_block_size) - rcr_size == addr) { 3494 *link = (struct page *) page->mapping; 3495 np->ops->unmap_page(np->device, page->index, 3496 PAGE_SIZE, DMA_FROM_DEVICE); 3497 page->index = 0; 3498 page->mapping = NULL; 3499 rp->rbr_refill_pending++; 3500 } else 3501 get_page(page); 3502 3503 index = NEXT_RCR(rp, index); 3504 if (!(val & RCR_ENTRY_MULTI)) 3505 break; 3506 3507 } 3508 rp->rcr_index = index; 3509 3510 len += sizeof(*rh); 3511 len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN); 3512 __pskb_pull_tail(skb, len); 3513 3514 rh = (struct rx_pkt_hdr1 *) skb->data; 3515 if (np->dev->features & NETIF_F_RXHASH) 3516 skb->rxhash = ((u32)rh->hashval2_0 << 24 | 3517 (u32)rh->hashval2_1 << 16 | 3518 (u32)rh->hashval1_1 << 8 | 3519 (u32)rh->hashval1_2 << 0); 3520 skb_pull(skb, sizeof(*rh)); 3521 3522 rp->rx_packets++; 3523 rp->rx_bytes += skb->len; 3524 3525 skb->protocol = eth_type_trans(skb, np->dev); 3526 skb_record_rx_queue(skb, rp->rx_channel); 3527 napi_gro_receive(napi, skb); 3528 3529 return num_rcr; 3530} 3531 3532static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) 3533{ 3534 int blocks_per_page = rp->rbr_blocks_per_page; 3535 int err, index = rp->rbr_index; 3536 3537 err = 0; 3538 while (index < (rp->rbr_table_size - blocks_per_page)) { 3539 err = niu_rbr_add_page(np, rp, mask, index); 3540 if (err) 3541 break; 3542 3543 index += blocks_per_page; 3544 } 3545 3546 rp->rbr_index = index; 3547 return err; 3548} 3549 3550static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp) 3551{ 3552 int i; 3553 3554 for (i = 0; i < MAX_RBR_RING_SIZE; i++) { 3555 struct page *page; 3556 3557 page = rp->rxhash[i]; 3558 while (page) { 3559 struct page *next = (struct page *) page->mapping; 3560 u64 base = page->index; 3561 3562 np->ops->unmap_page(np->device, base, PAGE_SIZE, 3563 DMA_FROM_DEVICE); 3564 page->index = 0; 3565 page->mapping = NULL; 3566 3567 __free_page(page); 3568 3569 page = next; 3570 } 3571 } 3572 3573 for (i = 0; i < rp->rbr_table_size; i++) 3574 rp->rbr[i] = cpu_to_le32(0); 3575 rp->rbr_index = 0; 3576} 3577 3578static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) 3579{ 3580 struct tx_buff_info *tb = &rp->tx_buffs[idx]; 3581 struct sk_buff *skb = tb->skb; 3582 struct tx_pkt_hdr *tp; 3583 u64 tx_flags; 3584 int i, len; 3585 3586 tp = (struct tx_pkt_hdr *) skb->data; 3587 tx_flags = le64_to_cpup(&tp->flags); 3588 3589 rp->tx_packets++; 3590 rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) - 3591 ((tx_flags & TXHDR_PAD) / 2)); 3592 3593 len = skb_headlen(skb); 3594 np->ops->unmap_single(np->device, tb->mapping, 3595 len, DMA_TO_DEVICE); 3596 3597 if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK) 3598 rp->mark_pending--; 3599 3600 tb->skb = NULL; 3601 do { 3602 idx = NEXT_TX(rp, idx); 3603 len -= MAX_TX_DESC_LEN; 3604 } while (len > 0); 3605 3606 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3607 tb = &rp->tx_buffs[idx]; 3608 BUG_ON(tb->skb != NULL); 3609 np->ops->unmap_page(np->device, tb->mapping, 3610 skb_shinfo(skb)->frags[i].size, 3611 DMA_TO_DEVICE); 3612 idx = NEXT_TX(rp, idx); 3613 } 3614 3615 dev_kfree_skb(skb); 3616 3617 return idx; 3618} 3619 3620#define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4) 3621 3622static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) 3623{ 3624 struct netdev_queue *txq; 3625 u16 pkt_cnt, tmp; 3626 int cons, index; 3627 u64 cs; 3628 3629 index = (rp - np->tx_rings); 3630 txq = netdev_get_tx_queue(np->dev, index); 3631 3632 cs = rp->tx_cs; 3633 if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK)))) 3634 goto out; 3635 3636 tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT; 3637 pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) & 3638 (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT); 3639 3640 rp->last_pkt_cnt = tmp; 3641 3642 cons = rp->cons; 3643 3644 netif_printk(np, tx_done, KERN_DEBUG, np->dev, 3645 "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); 3646 3647 while (pkt_cnt--) 3648 cons = release_tx_packet(np, rp, cons); 3649 3650 rp->cons = cons; 3651 smp_mb(); 3652 3653out: 3654 if (unlikely(netif_tx_queue_stopped(txq) && 3655 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { 3656 __netif_tx_lock(txq, smp_processor_id()); 3657 if (netif_tx_queue_stopped(txq) && 3658 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) 3659 netif_tx_wake_queue(txq); 3660 __netif_tx_unlock(txq); 3661 } 3662} 3663 3664static inline void niu_sync_rx_discard_stats(struct niu *np, 3665 struct rx_ring_info *rp, 3666 const int limit) 3667{ 3668 /* This elaborate scheme is needed for reading the RX discard 3669 * counters, as they are only 16-bit and can overflow quickly, 3670 * and because the overflow indication bit is not usable as 3671 * the counter value does not wrap, but remains at max value 3672 * 0xFFFF. 3673 * 3674 * In theory and in practice counters can be lost in between 3675 * reading nr64() and clearing the counter nw64(). For this 3676 * reason, the number of counter clearings nw64() is 3677 * limited/reduced though the limit parameter. 3678 */ 3679 int rx_channel = rp->rx_channel; 3680 u32 misc, wred; 3681 3682 /* RXMISC (Receive Miscellaneous Discard Count), covers the 3683 * following discard events: IPP (Input Port Process), 3684 * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive 3685 * Block Ring) prefetch buffer is empty. 3686 */ 3687 misc = nr64(RXMISC(rx_channel)); 3688 if (unlikely((misc & RXMISC_COUNT) > limit)) { 3689 nw64(RXMISC(rx_channel), 0); 3690 rp->rx_errors += misc & RXMISC_COUNT; 3691 3692 if (unlikely(misc & RXMISC_OFLOW)) 3693 dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n", 3694 rx_channel); 3695 3696 netif_printk(np, rx_err, KERN_DEBUG, np->dev, 3697 "rx-%d: MISC drop=%u over=%u\n", 3698 rx_channel, misc, misc-limit); 3699 } 3700 3701 /* WRED (Weighted Random Early Discard) by hardware */ 3702 wred = nr64(RED_DIS_CNT(rx_channel)); 3703 if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) { 3704 nw64(RED_DIS_CNT(rx_channel), 0); 3705 rp->rx_dropped += wred & RED_DIS_CNT_COUNT; 3706 3707 if (unlikely(wred & RED_DIS_CNT_OFLOW)) 3708 dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel); 3709 3710 netif_printk(np, rx_err, KERN_DEBUG, np->dev, 3711 "rx-%d: WRED drop=%u over=%u\n", 3712 rx_channel, wred, wred-limit); 3713 } 3714} 3715 3716static int niu_rx_work(struct napi_struct *napi, struct niu *np, 3717 struct rx_ring_info *rp, int budget) 3718{ 3719 int qlen, rcr_done = 0, work_done = 0; 3720 struct rxdma_mailbox *mbox = rp->mbox; 3721 u64 stat; 3722 3723#if 1 3724 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); 3725 qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN; 3726#else 3727 stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); 3728 qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN); 3729#endif 3730 mbox->rx_dma_ctl_stat = 0; 3731 mbox->rcrstat_a = 0; 3732 3733 netif_printk(np, rx_status, KERN_DEBUG, np->dev, 3734 "%s(chan[%d]), stat[%llx] qlen=%d\n", 3735 __func__, rp->rx_channel, (unsigned long long)stat, qlen); 3736 3737 rcr_done = work_done = 0; 3738 qlen = min(qlen, budget); 3739 while (work_done < qlen) { 3740 rcr_done += niu_process_rx_pkt(napi, np, rp); 3741 work_done++; 3742 } 3743 3744 if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) { 3745 unsigned int i; 3746 3747 for (i = 0; i < rp->rbr_refill_pending; i++) 3748 niu_rbr_refill(np, rp, GFP_ATOMIC); 3749 rp->rbr_refill_pending = 0; 3750 } 3751 3752 stat = (RX_DMA_CTL_STAT_MEX | 3753 ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) | 3754 ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT)); 3755 3756 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat); 3757 3758 /* Only sync discards stats when qlen indicate potential for drops */ 3759 if (qlen > 10) 3760 niu_sync_rx_discard_stats(np, rp, 0x7FFF); 3761 3762 return work_done; 3763} 3764 3765static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget) 3766{ 3767 u64 v0 = lp->v0; 3768 u32 tx_vec = (v0 >> 32); 3769 u32 rx_vec = (v0 & 0xffffffff); 3770 int i, work_done = 0; 3771 3772 netif_printk(np, intr, KERN_DEBUG, np->dev, 3773 "%s() v0[%016llx]\n", __func__, (unsigned long long)v0); 3774 3775 for (i = 0; i < np->num_tx_rings; i++) { 3776 struct tx_ring_info *rp = &np->tx_rings[i]; 3777 if (tx_vec & (1 << rp->tx_channel)) 3778 niu_tx_work(np, rp); 3779 nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0); 3780 } 3781 3782 for (i = 0; i < np->num_rx_rings; i++) { 3783 struct rx_ring_info *rp = &np->rx_rings[i]; 3784 3785 if (rx_vec & (1 << rp->rx_channel)) { 3786 int this_work_done; 3787 3788 this_work_done = niu_rx_work(&lp->napi, np, rp, 3789 budget); 3790 3791 budget -= this_work_done; 3792 work_done += this_work_done; 3793 } 3794 nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0); 3795 } 3796 3797 return work_done; 3798} 3799 3800static int niu_poll(struct napi_struct *napi, int budget) 3801{ 3802 struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi); 3803 struct niu *np = lp->np; 3804 int work_done; 3805 3806 work_done = niu_poll_core(np, lp, budget); 3807 3808 if (work_done < budget) { 3809 napi_complete(napi); 3810 niu_ldg_rearm(np, lp, 1); 3811 } 3812 return work_done; 3813} 3814 3815static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp, 3816 u64 stat) 3817{ 3818 netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel); 3819 3820 if (stat & RX_DMA_CTL_STAT_RBR_TMOUT) 3821 pr_cont("RBR_TMOUT "); 3822 if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR) 3823 pr_cont("RSP_CNT "); 3824 if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS) 3825 pr_cont("BYTE_EN_BUS "); 3826 if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR) 3827 pr_cont("RSP_DAT "); 3828 if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR) 3829 pr_cont("RCR_ACK "); 3830 if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR) 3831 pr_cont("RCR_SHA_PAR "); 3832 if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR) 3833 pr_cont("RBR_PRE_PAR "); 3834 if (stat & RX_DMA_CTL_STAT_CONFIG_ERR) 3835 pr_cont("CONFIG "); 3836 if (stat & RX_DMA_CTL_STAT_RCRINCON) 3837 pr_cont("RCRINCON "); 3838 if (stat & RX_DMA_CTL_STAT_RCRFULL) 3839 pr_cont("RCRFULL "); 3840 if (stat & RX_DMA_CTL_STAT_RBRFULL) 3841 pr_cont("RBRFULL "); 3842 if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE) 3843 pr_cont("RBRLOGPAGE "); 3844 if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE) 3845 pr_cont("CFIGLOGPAGE "); 3846 if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR) 3847 pr_cont("DC_FIDO "); 3848 3849 pr_cont(")\n"); 3850} 3851 3852static int niu_rx_error(struct niu *np, struct rx_ring_info *rp) 3853{ 3854 u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); 3855 int err = 0; 3856 3857 3858 if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL | 3859 RX_DMA_CTL_STAT_PORT_FATAL)) 3860 err = -EINVAL; 3861 3862 if (err) { 3863 netdev_err(np->dev, "RX channel %u error, stat[%llx]\n", 3864 rp->rx_channel, 3865 (unsigned long long) stat); 3866 3867 niu_log_rxchan_errors(np, rp, stat); 3868 } 3869 3870 nw64(RX_DMA_CTL_STAT(rp->rx_channel), 3871 stat & RX_DMA_CTL_WRITE_CLEAR_ERRS); 3872 3873 return err; 3874} 3875 3876static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp, 3877 u64 cs) 3878{ 3879 netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel); 3880 3881 if (cs & TX_CS_MBOX_ERR) 3882 pr_cont("MBOX "); 3883 if (cs & TX_CS_PKT_SIZE_ERR) 3884 pr_cont("PKT_SIZE "); 3885 if (cs & TX_CS_TX_RING_OFLOW) 3886 pr_cont("TX_RING_OFLOW "); 3887 if (cs & TX_CS_PREF_BUF_PAR_ERR) 3888 pr_cont("PREF_BUF_PAR "); 3889 if (cs & TX_CS_NACK_PREF) 3890 pr_cont("NACK_PREF "); 3891 if (cs & TX_CS_NACK_PKT_RD) 3892 pr_cont("NACK_PKT_RD "); 3893 if (cs & TX_CS_CONF_PART_ERR) 3894 pr_cont("CONF_PART "); 3895 if (cs & TX_CS_PKT_PRT_ERR) 3896 pr_cont("PKT_PTR "); 3897 3898 pr_cont(")\n"); 3899} 3900 3901static int niu_tx_error(struct niu *np, struct tx_ring_info *rp) 3902{ 3903 u64 cs, logh, logl; 3904 3905 cs = nr64(TX_CS(rp->tx_channel)); 3906 logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel)); 3907 logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel)); 3908 3909 netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n", 3910 rp->tx_channel, 3911 (unsigned long long)cs, 3912 (unsigned long long)logh, 3913 (unsigned long long)logl); 3914 3915 niu_log_txchan_errors(np, rp, cs); 3916 3917 return -ENODEV; 3918} 3919 3920static int niu_mif_interrupt(struct niu *np) 3921{ 3922 u64 mif_status = nr64(MIF_STATUS); 3923 int phy_mdint = 0; 3924 3925 if (np->flags & NIU_FLAGS_XMAC) { 3926 u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS); 3927 3928 if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT) 3929 phy_mdint = 1; 3930 } 3931 3932 netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n", 3933 (unsigned long long)mif_status, phy_mdint); 3934 3935 return -ENODEV; 3936} 3937 3938static void niu_xmac_interrupt(struct niu *np) 3939{ 3940 struct niu_xmac_stats *mp = &np->mac_stats.xmac; 3941 u64 val; 3942 3943 val = nr64_mac(XTXMAC_STATUS); 3944 if (val & XTXMAC_STATUS_FRAME_CNT_EXP) 3945 mp->tx_frames += TXMAC_FRM_CNT_COUNT; 3946 if (val & XTXMAC_STATUS_BYTE_CNT_EXP) 3947 mp->tx_bytes += TXMAC_BYTE_CNT_COUNT; 3948 if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR) 3949 mp->tx_fifo_errors++; 3950 if (val & XTXMAC_STATUS_TXMAC_OFLOW) 3951 mp->tx_overflow_errors++; 3952 if (val & XTXMAC_STATUS_MAX_PSIZE_ERR) 3953 mp->tx_max_pkt_size_errors++; 3954 if (val & XTXMAC_STATUS_TXMAC_UFLOW) 3955 mp->tx_underflow_errors++; 3956 3957 val = nr64_mac(XRXMAC_STATUS); 3958 if (val & XRXMAC_STATUS_LCL_FLT_STATUS) 3959 mp->rx_local_faults++; 3960 if (val & XRXMAC_STATUS_RFLT_DET) 3961 mp->rx_remote_faults++; 3962 if (val & XRXMAC_STATUS_LFLT_CNT_EXP) 3963 mp->rx_link_faults += LINK_FAULT_CNT_COUNT; 3964 if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP) 3965 mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT; 3966 if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP) 3967 mp->rx_frags += RXMAC_FRAG_CNT_COUNT; 3968 if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP) 3969 mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT; 3970 if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) 3971 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; 3972 if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) 3973 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; 3974 if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP) 3975 mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT; 3976 if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP) 3977 mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT; 3978 if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP) 3979 mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT; 3980 if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP) 3981 mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT; 3982 if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP) 3983 mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT; 3984 if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP) 3985 mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT; 3986 if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP) 3987 mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT; 3988 if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP) 3989 mp->rx_octets += RXMAC_BT_CNT_COUNT; 3990 if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP) 3991 mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT; 3992 if (val & XRXMAC_STATUS_LENERR_CNT_EXP) 3993 mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT; 3994 if (val & XRXMAC_STATUS_CRCERR_CNT_EXP) 3995 mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT; 3996 if (val & XRXMAC_STATUS_RXUFLOW) 3997 mp->rx_underflows++; 3998 if (val & XRXMAC_STATUS_RXOFLOW) 3999 mp->rx_overflows++; 4000 4001 val = nr64_mac(XMAC_FC_STAT); 4002 if (val & XMAC_FC_STAT_TX_MAC_NPAUSE) 4003 mp->pause_off_state++; 4004 if (val & XMAC_FC_STAT_TX_MAC_PAUSE) 4005 mp->pause_on_state++; 4006 if (val & XMAC_FC_STAT_RX_MAC_RPAUSE) 4007 mp->pause_received++; 4008} 4009 4010static void niu_bmac_interrupt(struct niu *np) 4011{ 4012 struct niu_bmac_stats *mp = &np->mac_stats.bmac; 4013 u64 val; 4014 4015 val = nr64_mac(BTXMAC_STATUS); 4016 if (val & BTXMAC_STATUS_UNDERRUN) 4017 mp->tx_underflow_errors++; 4018 if (val & BTXMAC_STATUS_MAX_PKT_ERR) 4019 mp->tx_max_pkt_size_errors++; 4020 if (val & BTXMAC_STATUS_BYTE_CNT_EXP) 4021 mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT; 4022 if (val & BTXMAC_STATUS_FRAME_CNT_EXP) 4023 mp->tx_frames += BTXMAC_FRM_CNT_COUNT; 4024 4025 val = nr64_mac(BRXMAC_STATUS); 4026 if (val & BRXMAC_STATUS_OVERFLOW) 4027 mp->rx_overflows++; 4028 if (val & BRXMAC_STATUS_FRAME_CNT_EXP) 4029 mp->rx_frames += BRXMAC_FRAME_CNT_COUNT; 4030 if (val & BRXMAC_STATUS_ALIGN_ERR_EXP) 4031 mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; 4032 if (val & BRXMAC_STATUS_CRC_ERR_EXP) 4033 mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; 4034 if (val & BRXMAC_STATUS_LEN_ERR_EXP) 4035 mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT; 4036 4037 val = nr64_mac(BMAC_CTRL_STATUS); 4038 if (val & BMAC_CTRL_STATUS_NOPAUSE) 4039 mp->pause_off_state++; 4040 if (val & BMAC_CTRL_STATUS_PAUSE) 4041 mp->pause_on_state++; 4042 if (val & BMAC_CTRL_STATUS_PAUSE_RECV) 4043 mp->pause_received++; 4044} 4045 4046static int niu_mac_interrupt(struct niu *np) 4047{ 4048 if (np->flags & NIU_FLAGS_XMAC) 4049 niu_xmac_interrupt(np); 4050 else 4051 niu_bmac_interrupt(np); 4052 4053 return 0; 4054} 4055 4056static void niu_log_device_error(struct niu *np, u64 stat) 4057{ 4058 netdev_err(np->dev, "Core device errors ( "); 4059 4060 if (stat & SYS_ERR_MASK_META2) 4061 pr_cont("META2 "); 4062 if (stat & SYS_ERR_MASK_META1) 4063 pr_cont("META1 "); 4064 if (stat & SYS_ERR_MASK_PEU) 4065 pr_cont("PEU "); 4066 if (stat & SYS_ERR_MASK_TXC) 4067 pr_cont("TXC "); 4068 if (stat & SYS_ERR_MASK_RDMC) 4069 pr_cont("RDMC "); 4070 if (stat & SYS_ERR_MASK_TDMC) 4071 pr_cont("TDMC "); 4072 if (stat & SYS_ERR_MASK_ZCP) 4073 pr_cont("ZCP "); 4074 if (stat & SYS_ERR_MASK_FFLP) 4075 pr_cont("FFLP "); 4076 if (stat & SYS_ERR_MASK_IPP) 4077 pr_cont("IPP "); 4078 if (stat & SYS_ERR_MASK_MAC) 4079 pr_cont("MAC "); 4080 if (stat & SYS_ERR_MASK_SMX) 4081 pr_cont("SMX "); 4082 4083 pr_cont(")\n"); 4084} 4085 4086static int niu_device_error(struct niu *np) 4087{ 4088 u64 stat = nr64(SYS_ERR_STAT); 4089 4090 netdev_err(np->dev, "Core device error, stat[%llx]\n", 4091 (unsigned long long)stat); 4092 4093 niu_log_device_error(np, stat); 4094 4095 return -ENODEV; 4096} 4097 4098static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp, 4099 u64 v0, u64 v1, u64 v2) 4100{ 4101 4102 int i, err = 0; 4103 4104 lp->v0 = v0; 4105 lp->v1 = v1; 4106 lp->v2 = v2; 4107 4108 if (v1 & 0x00000000ffffffffULL) { 4109 u32 rx_vec = (v1 & 0xffffffff); 4110 4111 for (i = 0; i < np->num_rx_rings; i++) { 4112 struct rx_ring_info *rp = &np->rx_rings[i]; 4113 4114 if (rx_vec & (1 << rp->rx_channel)) { 4115 int r = niu_rx_error(np, rp); 4116 if (r) { 4117 err = r; 4118 } else { 4119 if (!v0) 4120 nw64(RX_DMA_CTL_STAT(rp->rx_channel), 4121 RX_DMA_CTL_STAT_MEX); 4122 } 4123 } 4124 } 4125 } 4126 if (v1 & 0x7fffffff00000000ULL) { 4127 u32 tx_vec = (v1 >> 32) & 0x7fffffff; 4128 4129 for (i = 0; i < np->num_tx_rings; i++) { 4130 struct tx_ring_info *rp = &np->tx_rings[i]; 4131 4132 if (tx_vec & (1 << rp->tx_channel)) { 4133 int r = niu_tx_error(np, rp); 4134 if (r) 4135 err = r; 4136 } 4137 } 4138 } 4139 if ((v0 | v1) & 0x8000000000000000ULL) { 4140 int r = niu_mif_interrupt(np); 4141 if (r) 4142 err = r; 4143 } 4144 if (v2) { 4145 if (v2 & 0x01ef) { 4146 int r = niu_mac_interrupt(np); 4147 if (r) 4148 err = r; 4149 } 4150 if (v2 & 0x0210) { 4151 int r = niu_device_error(np); 4152 if (r) 4153 err = r; 4154 } 4155 } 4156 4157 if (err) 4158 niu_enable_interrupts(np, 0); 4159 4160 return err; 4161} 4162 4163static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp, 4164 int ldn) 4165{ 4166 struct rxdma_mailbox *mbox = rp->mbox; 4167 u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); 4168 4169 stat_write = (RX_DMA_CTL_STAT_RCRTHRES | 4170 RX_DMA_CTL_STAT_RCRTO); 4171 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write); 4172 4173 netif_printk(np, intr, KERN_DEBUG, np->dev, 4174 "%s() stat[%llx]\n", __func__, (unsigned long long)stat); 4175} 4176 4177static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp, 4178 int ldn) 4179{ 4180 rp->tx_cs = nr64(TX_CS(rp->tx_channel)); 4181 4182 netif_printk(np, intr, KERN_DEBUG, np->dev, 4183 "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs); 4184} 4185 4186static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0) 4187{ 4188 struct niu_parent *parent = np->parent; 4189 u32 rx_vec, tx_vec; 4190 int i; 4191 4192 tx_vec = (v0 >> 32); 4193 rx_vec = (v0 & 0xffffffff); 4194 4195 for (i = 0; i < np->num_rx_rings; i++) { 4196 struct rx_ring_info *rp = &np->rx_rings[i]; 4197 int ldn = LDN_RXDMA(rp->rx_channel); 4198 4199 if (parent->ldg_map[ldn] != ldg) 4200 continue; 4201 4202 nw64(LD_IM0(ldn), LD_IM0_MASK); 4203 if (rx_vec & (1 << rp->rx_channel)) 4204 niu_rxchan_intr(np, rp, ldn); 4205 } 4206 4207 for (i = 0; i < np->num_tx_rings; i++) { 4208 struct tx_ring_info *rp = &np->tx_rings[i]; 4209 int ldn = LDN_TXDMA(rp->tx_channel); 4210 4211 if (parent->ldg_map[ldn] != ldg) 4212 continue; 4213 4214 nw64(LD_IM0(ldn), LD_IM0_MASK); 4215 if (tx_vec & (1 << rp->tx_channel)) 4216 niu_txchan_intr(np, rp, ldn); 4217 } 4218} 4219 4220static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp, 4221 u64 v0, u64 v1, u64 v2) 4222{ 4223 if (likely(napi_schedule_prep(&lp->napi))) { 4224 lp->v0 = v0; 4225 lp->v1 = v1; 4226 lp->v2 = v2; 4227 __niu_fastpath_interrupt(np, lp->ldg_num, v0); 4228 __napi_schedule(&lp->napi); 4229 } 4230} 4231 4232static irqreturn_t niu_interrupt(int irq, void *dev_id) 4233{ 4234 struct niu_ldg *lp = dev_id; 4235 struct niu *np = lp->np; 4236 int ldg = lp->ldg_num; 4237 unsigned long flags; 4238 u64 v0, v1, v2; 4239 4240 if (netif_msg_intr(np)) 4241 printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)", 4242 __func__, lp, ldg); 4243 4244 spin_lock_irqsave(&np->lock, flags); 4245 4246 v0 = nr64(LDSV0(ldg)); 4247 v1 = nr64(LDSV1(ldg)); 4248 v2 = nr64(LDSV2(ldg)); 4249 4250 if (netif_msg_intr(np)) 4251 pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n", 4252 (unsigned long long) v0, 4253 (unsigned long long) v1, 4254 (unsigned long long) v2); 4255 4256 if (unlikely(!v0 && !v1 && !v2)) { 4257 spin_unlock_irqrestore(&np->lock, flags); 4258 return IRQ_NONE; 4259 } 4260 4261 if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) { 4262 int err = niu_slowpath_interrupt(np, lp, v0, v1, v2); 4263 if (err) 4264 goto out; 4265 } 4266 if (likely(v0 & ~((u64)1 << LDN_MIF))) 4267 niu_schedule_napi(np, lp, v0, v1, v2); 4268 else 4269 niu_ldg_rearm(np, lp, 1); 4270out: 4271 spin_unlock_irqrestore(&np->lock, flags); 4272 4273 return IRQ_HANDLED; 4274} 4275 4276static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp) 4277{ 4278 if (rp->mbox) { 4279 np->ops->free_coherent(np->device, 4280 sizeof(struct rxdma_mailbox), 4281 rp->mbox, rp->mbox_dma); 4282 rp->mbox = NULL; 4283 } 4284 if (rp->rcr) { 4285 np->ops->free_coherent(np->device, 4286 MAX_RCR_RING_SIZE * sizeof(__le64), 4287 rp->rcr, rp->rcr_dma); 4288 rp->rcr = NULL; 4289 rp->rcr_table_size = 0; 4290 rp->rcr_index = 0; 4291 } 4292 if (rp->rbr) { 4293 niu_rbr_free(np, rp); 4294 4295 np->ops->free_coherent(np->device, 4296 MAX_RBR_RING_SIZE * sizeof(__le32), 4297 rp->rbr, rp->rbr_dma); 4298 rp->rbr = NULL; 4299 rp->rbr_table_size = 0; 4300 rp->rbr_index = 0; 4301 } 4302 kfree(rp->rxhash); 4303 rp->rxhash = NULL; 4304} 4305 4306static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp) 4307{ 4308 if (rp->mbox) { 4309 np->ops->free_coherent(np->device, 4310 sizeof(struct txdma_mailbox), 4311 rp->mbox, rp->mbox_dma); 4312 rp->mbox = NULL; 4313 } 4314 if (rp->descr) { 4315 int i; 4316 4317 for (i = 0; i < MAX_TX_RING_SIZE; i++) { 4318 if (rp->tx_buffs[i].skb) 4319 (void) release_tx_packet(np, rp, i); 4320 } 4321 4322 np->ops->free_coherent(np->device, 4323 MAX_TX_RING_SIZE * sizeof(__le64), 4324 rp->descr, rp->descr_dma); 4325 rp->descr = NULL; 4326 rp->pending = 0; 4327 rp->prod = 0; 4328 rp->cons = 0; 4329 rp->wrap_bit = 0; 4330 } 4331} 4332 4333static void niu_free_channels(struct niu *np) 4334{ 4335 int i; 4336 4337 if (np->rx_rings) { 4338 for (i = 0; i < np->num_rx_rings; i++) { 4339 struct rx_ring_info *rp = &np->rx_rings[i]; 4340 4341 niu_free_rx_ring_info(np, rp); 4342 } 4343 kfree(np->rx_rings); 4344 np->rx_rings = NULL; 4345 np->num_rx_rings = 0; 4346 } 4347 4348 if (np->tx_rings) { 4349 for (i = 0; i < np->num_tx_rings; i++) { 4350 struct tx_ring_info *rp = &np->tx_rings[i]; 4351 4352 niu_free_tx_ring_info(np, rp); 4353 } 4354 kfree(np->tx_rings); 4355 np->tx_rings = NULL; 4356 np->num_tx_rings = 0; 4357 } 4358} 4359 4360static int niu_alloc_rx_ring_info(struct niu *np, 4361 struct rx_ring_info *rp) 4362{ 4363 BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64); 4364 4365 rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *), 4366 GFP_KERNEL); 4367 if (!rp->rxhash) 4368 return -ENOMEM; 4369 4370 rp->mbox = np->ops->alloc_coherent(np->device, 4371 sizeof(struct rxdma_mailbox), 4372 &rp->mbox_dma, GFP_KERNEL); 4373 if (!rp->mbox) 4374 return -ENOMEM; 4375 if ((unsigned long)rp->mbox & (64UL - 1)) { 4376 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n", 4377 rp->mbox); 4378 return -EINVAL; 4379 } 4380 4381 rp->rcr = np->ops->alloc_coherent(np->device, 4382 MAX_RCR_RING_SIZE * sizeof(__le64), 4383 &rp->rcr_dma, GFP_KERNEL); 4384 if (!rp->rcr) 4385 return -ENOMEM; 4386 if ((unsigned long)rp->rcr & (64UL - 1)) { 4387 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n", 4388 rp->rcr); 4389 return -EINVAL; 4390 } 4391 rp->rcr_table_size = MAX_RCR_RING_SIZE; 4392 rp->rcr_index = 0; 4393 4394 rp->rbr = np->ops->alloc_coherent(np->device, 4395 MAX_RBR_RING_SIZE * sizeof(__le32), 4396 &rp->rbr_dma, GFP_KERNEL); 4397 if (!rp->rbr) 4398 return -ENOMEM; 4399 if ((unsigned long)rp->rbr & (64UL - 1)) { 4400 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n", 4401 rp->rbr); 4402 return -EINVAL; 4403 } 4404 rp->rbr_table_size = MAX_RBR_RING_SIZE; 4405 rp->rbr_index = 0; 4406 rp->rbr_pending = 0; 4407 4408 return 0; 4409} 4410 4411static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp) 4412{ 4413 int mtu = np->dev->mtu; 4414 4415 /* These values are recommended by the HW designers for fair 4416 * utilization of DRR amongst the rings. 4417 */ 4418 rp->max_burst = mtu + 32; 4419 if (rp->max_burst > 4096) 4420 rp->max_burst = 4096; 4421} 4422 4423static int niu_alloc_tx_ring_info(struct niu *np, 4424 struct tx_ring_info *rp) 4425{ 4426 BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64); 4427 4428 rp->mbox = np->ops->alloc_coherent(np->device, 4429 sizeof(struct txdma_mailbox), 4430 &rp->mbox_dma, GFP_KERNEL); 4431 if (!rp->mbox) 4432 return -ENOMEM; 4433 if ((unsigned long)rp->mbox & (64UL - 1)) { 4434 netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n", 4435 rp->mbox); 4436 return -EINVAL; 4437 } 4438 4439 rp->descr = np->ops->alloc_coherent(np->device, 4440 MAX_TX_RING_SIZE * sizeof(__le64), 4441 &rp->descr_dma, GFP_KERNEL); 4442 if (!rp->descr) 4443 return -ENOMEM; 4444 if ((unsigned long)rp->descr & (64UL - 1)) { 4445 netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n", 4446 rp->descr); 4447 return -EINVAL; 4448 } 4449 4450 rp->pending = MAX_TX_RING_SIZE; 4451 rp->prod = 0; 4452 rp->cons = 0; 4453 rp->wrap_bit = 0; 4454 4455 /* XXX make these configurable... XXX */ 4456 rp->mark_freq = rp->pending / 4; 4457 4458 niu_set_max_burst(np, rp); 4459 4460 return 0; 4461} 4462 4463static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp) 4464{ 4465 u16 bss; 4466 4467 bss = min(PAGE_SHIFT, 15); 4468 4469 rp->rbr_block_size = 1 << bss; 4470 rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss); 4471 4472 rp->rbr_sizes[0] = 256; 4473 rp->rbr_sizes[1] = 1024; 4474 if (np->dev->mtu > ETH_DATA_LEN) { 4475 switch (PAGE_SIZE) { 4476 case 4 * 1024: 4477 rp->rbr_sizes[2] = 4096; 4478 break; 4479 4480 default: 4481 rp->rbr_sizes[2] = 8192; 4482 break; 4483 } 4484 } else { 4485 rp->rbr_sizes[2] = 2048; 4486 } 4487 rp->rbr_sizes[3] = rp->rbr_block_size; 4488} 4489 4490static int niu_alloc_channels(struct niu *np) 4491{ 4492 struct niu_parent *parent = np->parent; 4493 int first_rx_channel, first_tx_channel; 4494 int i, port, err; 4495 4496 port = np->port; 4497 first_rx_channel = first_tx_channel = 0; 4498 for (i = 0; i < port; i++) { 4499 first_rx_channel += parent->rxchan_per_port[i]; 4500 first_tx_channel += parent->txchan_per_port[i]; 4501 } 4502 4503 np->num_rx_rings = parent->rxchan_per_port[port]; 4504 np->num_tx_rings = parent->txchan_per_port[port]; 4505 4506 np->dev->real_num_tx_queues = np->num_tx_rings; 4507 4508 np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info), 4509 GFP_KERNEL); 4510 err = -ENOMEM; 4511 if (!np->rx_rings) 4512 goto out_err; 4513 4514 for (i = 0; i < np->num_rx_rings; i++) { 4515 struct rx_ring_info *rp = &np->rx_rings[i]; 4516 4517 rp->np = np; 4518 rp->rx_channel = first_rx_channel + i; 4519 4520 err = niu_alloc_rx_ring_info(np, rp); 4521 if (err) 4522 goto out_err; 4523 4524 niu_size_rbr(np, rp); 4525 4526 /* XXX better defaults, configurable, etc... XXX */ 4527 rp->nonsyn_window = 64; 4528 rp->nonsyn_threshold = rp->rcr_table_size - 64; 4529 rp->syn_window = 64; 4530 rp->syn_threshold = rp->rcr_table_size - 64; 4531 rp->rcr_pkt_threshold = 16; 4532 rp->rcr_timeout = 8; 4533 rp->rbr_kick_thresh = RBR_REFILL_MIN; 4534 if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page) 4535 rp->rbr_kick_thresh = rp->rbr_blocks_per_page; 4536 4537 err = niu_rbr_fill(np, rp, GFP_KERNEL); 4538 if (err) 4539 return err; 4540 } 4541 4542 np->tx_rings = kzalloc(np->num_tx_rings * sizeof(struct tx_ring_info), 4543 GFP_KERNEL); 4544 err = -ENOMEM; 4545 if (!np->tx_rings) 4546 goto out_err; 4547 4548 for (i = 0; i < np->num_tx_rings; i++) { 4549 struct tx_ring_info *rp = &np->tx_rings[i]; 4550 4551 rp->np = np; 4552 rp->tx_channel = first_tx_channel + i; 4553 4554 err = niu_alloc_tx_ring_info(np, rp); 4555 if (err) 4556 goto out_err; 4557 } 4558 4559 return 0; 4560 4561out_err: 4562 niu_free_channels(np); 4563 return err; 4564} 4565 4566static int niu_tx_cs_sng_poll(struct niu *np, int channel) 4567{ 4568 int limit = 1000; 4569 4570 while (--limit > 0) { 4571 u64 val = nr64(TX_CS(channel)); 4572 if (val & TX_CS_SNG_STATE) 4573 return 0; 4574 } 4575 return -ENODEV; 4576} 4577 4578static int niu_tx_channel_stop(struct niu *np, int channel) 4579{ 4580 u64 val = nr64(TX_CS(channel)); 4581 4582 val |= TX_CS_STOP_N_GO; 4583 nw64(TX_CS(channel), val); 4584 4585 return niu_tx_cs_sng_poll(np, channel); 4586} 4587 4588static int niu_tx_cs_reset_poll(struct niu *np, int channel) 4589{ 4590 int limit = 1000; 4591 4592 while (--limit > 0) { 4593 u64 val = nr64(TX_CS(channel)); 4594 if (!(val & TX_CS_RST)) 4595 return 0; 4596 } 4597 return -ENODEV; 4598} 4599 4600static int niu_tx_channel_reset(struct niu *np, int channel) 4601{ 4602 u64 val = nr64(TX_CS(channel)); 4603 int err; 4604 4605 val |= TX_CS_RST; 4606 nw64(TX_CS(channel), val); 4607 4608 err = niu_tx_cs_reset_poll(np, channel); 4609 if (!err) 4610 nw64(TX_RING_KICK(channel), 0); 4611 4612 return err; 4613} 4614 4615static int niu_tx_channel_lpage_init(struct niu *np, int channel) 4616{ 4617 u64 val; 4618 4619 nw64(TX_LOG_MASK1(channel), 0); 4620 nw64(TX_LOG_VAL1(channel), 0); 4621 nw64(TX_LOG_MASK2(channel), 0); 4622 nw64(TX_LOG_VAL2(channel), 0); 4623 nw64(TX_LOG_PAGE_RELO1(channel), 0); 4624 nw64(TX_LOG_PAGE_RELO2(channel), 0); 4625 nw64(TX_LOG_PAGE_HDL(channel), 0); 4626 4627 val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT; 4628 val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1); 4629 nw64(TX_LOG_PAGE_VLD(channel), val); 4630 4631 /* XXX TXDMA 32bit mode? XXX */ 4632 4633 return 0; 4634} 4635 4636static void niu_txc_enable_port(struct niu *np, int on) 4637{ 4638 unsigned long flags; 4639 u64 val, mask; 4640 4641 niu_lock_parent(np, flags); 4642 val = nr64(TXC_CONTROL); 4643 mask = (u64)1 << np->port; 4644 if (on) { 4645 val |= TXC_CONTROL_ENABLE | mask; 4646 } else { 4647 val &= ~mask; 4648 if ((val & ~TXC_CONTROL_ENABLE) == 0) 4649 val &= ~TXC_CONTROL_ENABLE; 4650 } 4651 nw64(TXC_CONTROL, val); 4652 niu_unlock_parent(np, flags); 4653} 4654 4655static void niu_txc_set_imask(struct niu *np, u64 imask) 4656{ 4657 unsigned long flags; 4658 u64 val; 4659 4660 niu_lock_parent(np, flags); 4661 val = nr64(TXC_INT_MASK); 4662 val &= ~TXC_INT_MASK_VAL(np->port); 4663 val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port)); 4664 niu_unlock_parent(np, flags); 4665} 4666 4667static void niu_txc_port_dma_enable(struct niu *np, int on) 4668{ 4669 u64 val = 0; 4670 4671 if (on) { 4672 int i; 4673 4674 for (i = 0; i < np->num_tx_rings; i++) 4675 val |= (1 << np->tx_rings[i].tx_channel); 4676 } 4677 nw64(TXC_PORT_DMA(np->port), val); 4678} 4679 4680static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp) 4681{ 4682 int err, channel = rp->tx_channel; 4683 u64 val, ring_len; 4684 4685 err = niu_tx_channel_stop(np, channel); 4686 if (err) 4687 return err; 4688 4689 err = niu_tx_channel_reset(np, channel); 4690 if (err) 4691 return err; 4692 4693 err = niu_tx_channel_lpage_init(np, channel); 4694 if (err) 4695 return err; 4696 4697 nw64(TXC_DMA_MAX(channel), rp->max_burst); 4698 nw64(TX_ENT_MSK(channel), 0); 4699 4700 if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE | 4701 TX_RNG_CFIG_STADDR)) { 4702 netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n", 4703 channel, (unsigned long long)rp->descr_dma); 4704 return -EINVAL; 4705 } 4706 4707 /* The length field in TX_RNG_CFIG is measured in 64-byte 4708 * blocks. rp->pending is the number of TX descriptors in 4709 * our ring, 8 bytes each, thus we divide by 8 bytes more 4710 * to get the proper value the chip wants. 4711 */ 4712 ring_len = (rp->pending / 8); 4713 4714 val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) | 4715 rp->descr_dma); 4716 nw64(TX_RNG_CFIG(channel), val); 4717 4718 if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) || 4719 ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) { 4720 netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n", 4721 channel, (unsigned long long)rp->mbox_dma); 4722 return -EINVAL; 4723 } 4724 nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32); 4725 nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR); 4726 4727 nw64(TX_CS(channel), 0); 4728 4729 rp->last_pkt_cnt = 0; 4730 4731 return 0; 4732} 4733 4734static void niu_init_rdc_groups(struct niu *np) 4735{ 4736 struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port]; 4737 int i, first_table_num = tp->first_table_num; 4738 4739 for (i = 0; i < tp->num_tables; i++) { 4740 struct rdc_table *tbl = &tp->tables[i]; 4741 int this_table = first_table_num + i; 4742 int slot; 4743 4744 for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) 4745 nw64(RDC_TBL(this_table, slot), 4746 tbl->rxdma_channel[slot]); 4747 } 4748 4749 nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]); 4750} 4751 4752static void niu_init_drr_weight(struct niu *np) 4753{ 4754 int type = phy_decode(np->parent->port_phy, np->port); 4755 u64 val; 4756 4757 switch (type) { 4758 case PORT_TYPE_10G: 4759 val = PT_DRR_WEIGHT_DEFAULT_10G; 4760 break; 4761 4762 case PORT_TYPE_1G: 4763 default: 4764 val = PT_DRR_WEIGHT_DEFAULT_1G; 4765 break; 4766 } 4767 nw64(PT_DRR_WT(np->port), val); 4768} 4769 4770static int niu_init_hostinfo(struct niu *np) 4771{ 4772 struct niu_parent *parent = np->parent; 4773 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; 4774 int i, err, num_alt = niu_num_alt_addr(np); 4775 int first_rdc_table = tp->first_table_num; 4776 4777 err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); 4778 if (err) 4779 return err; 4780 4781 err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); 4782 if (err) 4783 return err; 4784 4785 for (i = 0; i < num_alt; i++) { 4786 err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1); 4787 if (err) 4788 return err; 4789 } 4790 4791 return 0; 4792} 4793 4794static int niu_rx_channel_reset(struct niu *np, int channel) 4795{ 4796 return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel), 4797 RXDMA_CFIG1_RST, 1000, 10, 4798 "RXDMA_CFIG1"); 4799} 4800 4801static int niu_rx_channel_lpage_init(struct niu *np, int channel) 4802{ 4803 u64 val; 4804 4805 nw64(RX_LOG_MASK1(channel), 0); 4806 nw64(RX_LOG_VAL1(channel), 0); 4807 nw64(RX_LOG_MASK2(channel), 0); 4808 nw64(RX_LOG_VAL2(channel), 0); 4809 nw64(RX_LOG_PAGE_RELO1(channel), 0); 4810 nw64(RX_LOG_PAGE_RELO2(channel), 0); 4811 nw64(RX_LOG_PAGE_HDL(channel), 0); 4812 4813 val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT; 4814 val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1); 4815 nw64(RX_LOG_PAGE_VLD(channel), val); 4816 4817 return 0; 4818} 4819 4820static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp) 4821{ 4822 u64 val; 4823 4824 val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) | 4825 ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) | 4826 ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) | 4827 ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT)); 4828 nw64(RDC_RED_PARA(rp->rx_channel), val); 4829} 4830 4831static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret) 4832{ 4833 u64 val = 0; 4834 4835 *ret = 0; 4836 switch (rp->rbr_block_size) { 4837 case 4 * 1024: 4838 val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT); 4839 break; 4840 case 8 * 1024: 4841 val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT); 4842 break; 4843 case 16 * 1024: 4844 val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT); 4845 break; 4846 case 32 * 1024: 4847 val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT); 4848 break; 4849 default: 4850 return -EINVAL; 4851 } 4852 val |= RBR_CFIG_B_VLD2; 4853 switch (rp->rbr_sizes[2]) { 4854 case 2 * 1024: 4855 val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT); 4856 break; 4857 case 4 * 1024: 4858 val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT); 4859 break; 4860 case 8 * 1024: 4861 val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT); 4862 break; 4863 case 16 * 1024: 4864 val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT); 4865 break; 4866 4867 default: 4868 return -EINVAL; 4869 } 4870 val |= RBR_CFIG_B_VLD1; 4871 switch (rp->rbr_sizes[1]) { 4872 case 1 * 1024: 4873 val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT); 4874 break; 4875 case 2 * 1024: 4876 val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT); 4877 break; 4878 case 4 * 1024: 4879 val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT); 4880 break; 4881 case 8 * 1024: 4882 val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT); 4883 break; 4884 4885 default: 4886 return -EINVAL; 4887 } 4888 val |= RBR_CFIG_B_VLD0; 4889 switch (rp->rbr_sizes[0]) { 4890 case 256: 4891 val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT); 4892 break; 4893 case 512: 4894 val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT); 4895 break; 4896 case 1 * 1024: 4897 val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT); 4898 break; 4899 case 2 * 1024: 4900 val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT); 4901 break; 4902 4903 default: 4904 return -EINVAL; 4905 } 4906 4907 *ret = val; 4908 return 0; 4909} 4910 4911static int niu_enable_rx_channel(struct niu *np, int channel, int on) 4912{ 4913 u64 val = nr64(RXDMA_CFIG1(channel)); 4914 int limit; 4915 4916 if (on) 4917 val |= RXDMA_CFIG1_EN; 4918 else 4919 val &= ~RXDMA_CFIG1_EN; 4920 nw64(RXDMA_CFIG1(channel), val); 4921 4922 limit = 1000; 4923 while (--limit > 0) { 4924 if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST) 4925 break; 4926 udelay(10); 4927 } 4928 if (limit <= 0) 4929 return -ENODEV; 4930 return 0; 4931} 4932 4933static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp) 4934{ 4935 int err, channel = rp->rx_channel; 4936 u64 val; 4937 4938 err = niu_rx_channel_reset(np, channel); 4939 if (err) 4940 return err; 4941 4942 err = niu_rx_channel_lpage_init(np, channel); 4943 if (err) 4944 return err; 4945 4946 niu_rx_channel_wred_init(np, rp); 4947 4948 nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY); 4949 nw64(RX_DMA_CTL_STAT(channel), 4950 (RX_DMA_CTL_STAT_MEX | 4951 RX_DMA_CTL_STAT_RCRTHRES | 4952 RX_DMA_CTL_STAT_RCRTO | 4953 RX_DMA_CTL_STAT_RBR_EMPTY)); 4954 nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32); 4955 nw64(RXDMA_CFIG2(channel), 4956 ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) | 4957 RXDMA_CFIG2_FULL_HDR)); 4958 nw64(RBR_CFIG_A(channel), 4959 ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) | 4960 (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR))); 4961 err = niu_compute_rbr_cfig_b(rp, &val); 4962 if (err) 4963 return err; 4964 nw64(RBR_CFIG_B(channel), val); 4965 nw64(RCRCFIG_A(channel), 4966 ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) | 4967 (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR))); 4968 nw64(RCRCFIG_B(channel), 4969 ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) | 4970 RCRCFIG_B_ENTOUT | 4971 ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT)); 4972 4973 err = niu_enable_rx_channel(np, channel, 1); 4974 if (err) 4975 return err; 4976 4977 nw64(RBR_KICK(channel), rp->rbr_index); 4978 4979 val = nr64(RX_DMA_CTL_STAT(channel)); 4980 val |= RX_DMA_CTL_STAT_RBR_EMPTY; 4981 nw64(RX_DMA_CTL_STAT(channel), val); 4982 4983 return 0; 4984} 4985 4986static int niu_init_rx_channels(struct niu *np) 4987{ 4988 unsigned long flags; 4989 u64 seed = jiffies_64; 4990 int err, i; 4991 4992 niu_lock_parent(np, flags); 4993 nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider); 4994 nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL)); 4995 niu_unlock_parent(np, flags); 4996 4997 /* XXX RXDMA 32bit mode? XXX */ 4998 4999 niu_init_rdc_groups(np); 5000 niu_init_drr_weight(np); 5001 5002 err = niu_init_hostinfo(np); 5003 if (err) 5004 return err; 5005 5006 for (i = 0; i < np->num_rx_rings; i++) { 5007 struct rx_ring_info *rp = &np->rx_rings[i]; 5008 5009 err = niu_init_one_rx_channel(np, rp); 5010 if (err) 5011 return err; 5012 } 5013 5014 return 0; 5015} 5016 5017static int niu_set_ip_frag_rule(struct niu *np) 5018{ 5019 struct niu_parent *parent = np->parent; 5020 struct niu_classifier *cp = &np->clas; 5021 struct niu_tcam_entry *tp; 5022 int index, err; 5023 5024 index = cp->tcam_top; 5025 tp = &parent->tcam[index]; 5026 5027 /* Note that the noport bit is the same in both ipv4 and 5028 * ipv6 format TCAM entries. 5029 */ 5030 memset(tp, 0, sizeof(*tp)); 5031 tp->key[1] = TCAM_V4KEY1_NOPORT; 5032 tp->key_mask[1] = TCAM_V4KEY1_NOPORT; 5033 tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | 5034 ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT)); 5035 err = tcam_write(np, index, tp->key, tp->key_mask); 5036 if (err) 5037 return err; 5038 err = tcam_assoc_write(np, index, tp->assoc_data); 5039 if (err) 5040 return err; 5041 tp->valid = 1; 5042 cp->tcam_valid_entries++; 5043 5044 return 0; 5045} 5046 5047static int niu_init_classifier_hw(struct niu *np) 5048{ 5049 struct niu_parent *parent = np->parent; 5050 struct niu_classifier *cp = &np->clas; 5051 int i, err; 5052 5053 nw64(H1POLY, cp->h1_init); 5054 nw64(H2POLY, cp->h2_init); 5055 5056 err = niu_init_hostinfo(np); 5057 if (err) 5058 return err; 5059 5060 for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) { 5061 struct niu_vlan_rdc *vp = &cp->vlan_mappings[i]; 5062 5063 vlan_tbl_write(np, i, np->port, 5064 vp->vlan_pref, vp->rdc_num); 5065 } 5066 5067 for (i = 0; i < cp->num_alt_mac_mappings; i++) { 5068 struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i]; 5069 5070 err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num, 5071 ap->rdc_num, ap->mac_pref); 5072 if (err) 5073 return err; 5074 } 5075 5076 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { 5077 int index = i - CLASS_CODE_USER_PROG1; 5078 5079 err = niu_set_tcam_key(np, i, parent->tcam_key[index]); 5080 if (err) 5081 return err; 5082 err = niu_set_flow_key(np, i, parent->flow_key[index]); 5083 if (err) 5084 return err; 5085 } 5086 5087 err = niu_set_ip_frag_rule(np); 5088 if (err) 5089 return err; 5090 5091 tcam_enable(np, 1); 5092 5093 return 0; 5094} 5095 5096static int niu_zcp_write(struct niu *np, int index, u64 *data) 5097{ 5098 nw64(ZCP_RAM_DATA0, data[0]); 5099 nw64(ZCP_RAM_DATA1, data[1]); 5100 nw64(ZCP_RAM_DATA2, data[2]); 5101 nw64(ZCP_RAM_DATA3, data[3]); 5102 nw64(ZCP_RAM_DATA4, data[4]); 5103 nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL); 5104 nw64(ZCP_RAM_ACC, 5105 (ZCP_RAM_ACC_WRITE | 5106 (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | 5107 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); 5108 5109 return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 5110 1000, 100); 5111} 5112 5113static int niu_zcp_read(struct niu *np, int index, u64 *data) 5114{ 5115 int err; 5116 5117 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 5118 1000, 100); 5119 if (err) { 5120 netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n", 5121 (unsigned long long)nr64(ZCP_RAM_ACC)); 5122 return err; 5123 } 5124 5125 nw64(ZCP_RAM_ACC, 5126 (ZCP_RAM_ACC_READ | 5127 (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | 5128 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); 5129 5130 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 5131 1000, 100); 5132 if (err) { 5133 netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n", 5134 (unsigned long long)nr64(ZCP_RAM_ACC)); 5135 return err; 5136 } 5137 5138 data[0] = nr64(ZCP_RAM_DATA0); 5139 data[1] = nr64(ZCP_RAM_DATA1); 5140 data[2] = nr64(ZCP_RAM_DATA2); 5141 data[3] = nr64(ZCP_RAM_DATA3); 5142 data[4] = nr64(ZCP_RAM_DATA4); 5143 5144 return 0; 5145} 5146 5147static void niu_zcp_cfifo_reset(struct niu *np) 5148{ 5149 u64 val = nr64(RESET_CFIFO); 5150 5151 val |= RESET_CFIFO_RST(np->port); 5152 nw64(RESET_CFIFO, val); 5153 udelay(10); 5154 5155 val &= ~RESET_CFIFO_RST(np->port); 5156 nw64(RESET_CFIFO, val); 5157} 5158 5159static int niu_init_zcp(struct niu *np) 5160{ 5161 u64 data[5], rbuf[5]; 5162 int i, max, err; 5163 5164 if (np->parent->plat_type != PLAT_TYPE_NIU) { 5165 if (np->port == 0 || np->port == 1) 5166 max = ATLAS_P0_P1_CFIFO_ENTRIES; 5167 else 5168 max = ATLAS_P2_P3_CFIFO_ENTRIES; 5169 } else 5170 max = NIU_CFIFO_ENTRIES; 5171 5172 data[0] = 0; 5173 data[1] = 0; 5174 data[2] = 0; 5175 data[3] = 0; 5176 data[4] = 0; 5177 5178 for (i = 0; i < max; i++) { 5179 err = niu_zcp_write(np, i, data); 5180 if (err) 5181 return err; 5182 err = niu_zcp_read(np, i, rbuf); 5183 if (err) 5184 return err; 5185 } 5186 5187 niu_zcp_cfifo_reset(np); 5188 nw64(CFIFO_ECC(np->port), 0); 5189 nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL); 5190 (void) nr64(ZCP_INT_STAT); 5191 nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL); 5192 5193 return 0; 5194} 5195 5196static void niu_ipp_write(struct niu *np, int index, u64 *data) 5197{ 5198 u64 val = nr64_ipp(IPP_CFIG); 5199 5200 nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W); 5201 nw64_ipp(IPP_DFIFO_WR_PTR, index); 5202 nw64_ipp(IPP_DFIFO_WR0, data[0]); 5203 nw64_ipp(IPP_DFIFO_WR1, data[1]); 5204 nw64_ipp(IPP_DFIFO_WR2, data[2]); 5205 nw64_ipp(IPP_DFIFO_WR3, data[3]); 5206 nw64_ipp(IPP_DFIFO_WR4, data[4]); 5207 nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W); 5208} 5209 5210static void niu_ipp_read(struct niu *np, int index, u64 *data) 5211{ 5212 nw64_ipp(IPP_DFIFO_RD_PTR, index); 5213 data[0] = nr64_ipp(IPP_DFIFO_RD0); 5214 data[1] = nr64_ipp(IPP_DFIFO_RD1); 5215 data[2] = nr64_ipp(IPP_DFIFO_RD2); 5216 data[3] = nr64_ipp(IPP_DFIFO_RD3); 5217 data[4] = nr64_ipp(IPP_DFIFO_RD4); 5218} 5219 5220static int niu_ipp_reset(struct niu *np) 5221{ 5222 return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST, 5223 1000, 100, "IPP_CFIG"); 5224} 5225 5226static int niu_init_ipp(struct niu *np) 5227{ 5228 u64 data[5], rbuf[5], val; 5229 int i, max, err; 5230 5231 if (np->parent->plat_type != PLAT_TYPE_NIU) { 5232 if (np->port == 0 || np->port == 1) 5233 max = ATLAS_P0_P1_DFIFO_ENTRIES; 5234 else 5235 max = ATLAS_P2_P3_DFIFO_ENTRIES; 5236 } else 5237 max = NIU_DFIFO_ENTRIES; 5238 5239 data[0] = 0; 5240 data[1] = 0; 5241 data[2] = 0; 5242 data[3] = 0; 5243 data[4] = 0; 5244 5245 for (i = 0; i < max; i++) { 5246 niu_ipp_write(np, i, data); 5247 niu_ipp_read(np, i, rbuf); 5248 } 5249 5250 (void) nr64_ipp(IPP_INT_STAT); 5251 (void) nr64_ipp(IPP_INT_STAT); 5252 5253 err = niu_ipp_reset(np); 5254 if (err) 5255 return err; 5256 5257 (void) nr64_ipp(IPP_PKT_DIS); 5258 (void) nr64_ipp(IPP_BAD_CS_CNT); 5259 (void) nr64_ipp(IPP_ECC); 5260 5261 (void) nr64_ipp(IPP_INT_STAT); 5262 5263 nw64_ipp(IPP_MSK, ~IPP_MSK_ALL); 5264 5265 val = nr64_ipp(IPP_CFIG); 5266 val &= ~IPP_CFIG_IP_MAX_PKT; 5267 val |= (IPP_CFIG_IPP_ENABLE | 5268 IPP_CFIG_DFIFO_ECC_EN | 5269 IPP_CFIG_DROP_BAD_CRC | 5270 IPP_CFIG_CKSUM_EN | 5271 (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT)); 5272 nw64_ipp(IPP_CFIG, val); 5273 5274 return 0; 5275} 5276 5277static void niu_handle_led(struct niu *np, int status) 5278{ 5279 u64 val; 5280 val = nr64_mac(XMAC_CONFIG); 5281 5282 if ((np->flags & NIU_FLAGS_10G) != 0 && 5283 (np->flags & NIU_FLAGS_FIBER) != 0) { 5284 if (status) { 5285 val |= XMAC_CONFIG_LED_POLARITY; 5286 val &= ~XMAC_CONFIG_FORCE_LED_ON; 5287 } else { 5288 val |= XMAC_CONFIG_FORCE_LED_ON; 5289 val &= ~XMAC_CONFIG_LED_POLARITY; 5290 } 5291 } 5292 5293 nw64_mac(XMAC_CONFIG, val); 5294} 5295 5296static void niu_init_xif_xmac(struct niu *np) 5297{ 5298 struct niu_link_config *lp = &np->link_config; 5299 u64 val; 5300 5301 if (np->flags & NIU_FLAGS_XCVR_SERDES) { 5302 val = nr64(MIF_CONFIG); 5303 val |= MIF_CONFIG_ATCA_GE; 5304 nw64(MIF_CONFIG, val); 5305 } 5306 5307 val = nr64_mac(XMAC_CONFIG); 5308 val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; 5309 5310 val |= XMAC_CONFIG_TX_OUTPUT_EN; 5311 5312 if (lp->loopback_mode == LOOPBACK_MAC) { 5313 val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; 5314 val |= XMAC_CONFIG_LOOPBACK; 5315 } else { 5316 val &= ~XMAC_CONFIG_LOOPBACK; 5317 } 5318 5319 if (np->flags & NIU_FLAGS_10G) { 5320 val &= ~XMAC_CONFIG_LFS_DISABLE; 5321 } else { 5322 val |= XMAC_CONFIG_LFS_DISABLE; 5323 if (!(np->flags & NIU_FLAGS_FIBER) && 5324 !(np->flags & NIU_FLAGS_XCVR_SERDES)) 5325 val |= XMAC_CONFIG_1G_PCS_BYPASS; 5326 else 5327 val &= ~XMAC_CONFIG_1G_PCS_BYPASS; 5328 } 5329 5330 val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; 5331 5332 if (lp->active_speed == SPEED_100) 5333 val |= XMAC_CONFIG_SEL_CLK_25MHZ; 5334 else 5335 val &= ~XMAC_CONFIG_SEL_CLK_25MHZ; 5336 5337 nw64_mac(XMAC_CONFIG, val); 5338 5339 val = nr64_mac(XMAC_CONFIG); 5340 val &= ~XMAC_CONFIG_MODE_MASK; 5341 if (np->flags & NIU_FLAGS_10G) { 5342 val |= XMAC_CONFIG_MODE_XGMII; 5343 } else { 5344 if (lp->active_speed == SPEED_1000) 5345 val |= XMAC_CONFIG_MODE_GMII; 5346 else 5347 val |= XMAC_CONFIG_MODE_MII; 5348 } 5349 5350 nw64_mac(XMAC_CONFIG, val); 5351} 5352 5353static void niu_init_xif_bmac(struct niu *np) 5354{ 5355 struct niu_link_config *lp = &np->link_config; 5356 u64 val; 5357 5358 val = BMAC_XIF_CONFIG_TX_OUTPUT_EN; 5359 5360 if (lp->loopback_mode == LOOPBACK_MAC) 5361 val |= BMAC_XIF_CONFIG_MII_LOOPBACK; 5362 else 5363 val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK; 5364 5365 if (lp->active_speed == SPEED_1000) 5366 val |= BMAC_XIF_CONFIG_GMII_MODE; 5367 else 5368 val &= ~BMAC_XIF_CONFIG_GMII_MODE; 5369 5370 val &= ~(BMAC_XIF_CONFIG_LINK_LED | 5371 BMAC_XIF_CONFIG_LED_POLARITY); 5372 5373 if (!(np->flags & NIU_FLAGS_10G) && 5374 !(np->flags & NIU_FLAGS_FIBER) && 5375 lp->active_speed == SPEED_100) 5376 val |= BMAC_XIF_CONFIG_25MHZ_CLOCK; 5377 else 5378 val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK; 5379 5380 nw64_mac(BMAC_XIF_CONFIG, val); 5381} 5382 5383static void niu_init_xif(struct niu *np) 5384{ 5385 if (np->flags & NIU_FLAGS_XMAC) 5386 niu_init_xif_xmac(np); 5387 else 5388 niu_init_xif_bmac(np); 5389} 5390 5391static void niu_pcs_mii_reset(struct niu *np) 5392{ 5393 int limit = 1000; 5394 u64 val = nr64_pcs(PCS_MII_CTL); 5395 val |= PCS_MII_CTL_RST; 5396 nw64_pcs(PCS_MII_CTL, val); 5397 while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) { 5398 udelay(100); 5399 val = nr64_pcs(PCS_MII_CTL); 5400 } 5401} 5402 5403static void niu_xpcs_reset(struct niu *np) 5404{ 5405 int limit = 1000; 5406 u64 val = nr64_xpcs(XPCS_CONTROL1); 5407 val |= XPCS_CONTROL1_RESET; 5408 nw64_xpcs(XPCS_CONTROL1, val); 5409 while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) { 5410 udelay(100); 5411 val = nr64_xpcs(XPCS_CONTROL1); 5412 } 5413} 5414 5415static int niu_init_pcs(struct niu *np) 5416{ 5417 struct niu_link_config *lp = &np->link_config; 5418 u64 val; 5419 5420 switch (np->flags & (NIU_FLAGS_10G | 5421 NIU_FLAGS_FIBER | 5422 NIU_FLAGS_XCVR_SERDES)) { 5423 case NIU_FLAGS_FIBER: 5424 /* 1G fiber */ 5425 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); 5426 nw64_pcs(PCS_DPATH_MODE, 0); 5427 niu_pcs_mii_reset(np); 5428 break; 5429 5430 case NIU_FLAGS_10G: 5431 case NIU_FLAGS_10G | NIU_FLAGS_FIBER: 5432 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: 5433 /* 10G SERDES */ 5434 if (!(np->flags & NIU_FLAGS_XMAC)) 5435 return -EINVAL; 5436 5437 /* 10G copper or fiber */ 5438 val = nr64_mac(XMAC_CONFIG); 5439 val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; 5440 nw64_mac(XMAC_CONFIG, val); 5441 5442 niu_xpcs_reset(np); 5443 5444 val = nr64_xpcs(XPCS_CONTROL1); 5445 if (lp->loopback_mode == LOOPBACK_PHY) 5446 val |= XPCS_CONTROL1_LOOPBACK; 5447 else 5448 val &= ~XPCS_CONTROL1_LOOPBACK; 5449 nw64_xpcs(XPCS_CONTROL1, val); 5450 5451 nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0); 5452 (void) nr64_xpcs(XPCS_SYMERR_CNT01); 5453 (void) nr64_xpcs(XPCS_SYMERR_CNT23); 5454 break; 5455 5456 5457 case NIU_FLAGS_XCVR_SERDES: 5458 /* 1G SERDES */ 5459 niu_pcs_mii_reset(np); 5460 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); 5461 nw64_pcs(PCS_DPATH_MODE, 0); 5462 break; 5463 5464 case 0: 5465 /* 1G copper */ 5466 case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER: 5467 /* 1G RGMII FIBER */ 5468 nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII); 5469 niu_pcs_mii_reset(np); 5470 break; 5471 5472 default: 5473 return -EINVAL; 5474 } 5475 5476 return 0; 5477} 5478 5479static int niu_reset_tx_xmac(struct niu *np) 5480{ 5481 return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST, 5482 (XTXMAC_SW_RST_REG_RS | 5483 XTXMAC_SW_RST_SOFT_RST), 5484 1000, 100, "XTXMAC_SW_RST"); 5485} 5486 5487static int niu_reset_tx_bmac(struct niu *np) 5488{ 5489 int limit; 5490 5491 nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET); 5492 limit = 1000; 5493 while (--limit >= 0) { 5494 if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET)) 5495 break; 5496 udelay(100); 5497 } 5498 if (limit < 0) { 5499 dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n", 5500 np->port, 5501 (unsigned long long) nr64_mac(BTXMAC_SW_RST)); 5502 return -ENODEV; 5503 } 5504 5505 return 0; 5506} 5507 5508static int niu_reset_tx_mac(struct niu *np) 5509{ 5510 if (np->flags & NIU_FLAGS_XMAC) 5511 return niu_reset_tx_xmac(np); 5512 else 5513 return niu_reset_tx_bmac(np); 5514} 5515 5516static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max) 5517{ 5518 u64 val; 5519 5520 val = nr64_mac(XMAC_MIN); 5521 val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE | 5522 XMAC_MIN_RX_MIN_PKT_SIZE); 5523 val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT); 5524 val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT); 5525 nw64_mac(XMAC_MIN, val); 5526 5527 nw64_mac(XMAC_MAX, max); 5528 5529 nw64_mac(XTXMAC_STAT_MSK, ~(u64)0); 5530 5531 val = nr64_mac(XMAC_IPG); 5532 if (np->flags & NIU_FLAGS_10G) { 5533 val &= ~XMAC_IPG_IPG_XGMII; 5534 val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT); 5535 } else { 5536 val &= ~XMAC_IPG_IPG_MII_GMII; 5537 val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT); 5538 } 5539 nw64_mac(XMAC_IPG, val); 5540 5541 val = nr64_mac(XMAC_CONFIG); 5542 val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC | 5543 XMAC_CONFIG_STRETCH_MODE | 5544 XMAC_CONFIG_VAR_MIN_IPG_EN | 5545 XMAC_CONFIG_TX_ENABLE); 5546 nw64_mac(XMAC_CONFIG, val); 5547 5548 nw64_mac(TXMAC_FRM_CNT, 0); 5549 nw64_mac(TXMAC_BYTE_CNT, 0); 5550} 5551 5552static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max) 5553{ 5554 u64 val; 5555 5556 nw64_mac(BMAC_MIN_FRAME, min); 5557 nw64_mac(BMAC_MAX_FRAME, max); 5558 5559 nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0); 5560 nw64_mac(BMAC_CTRL_TYPE, 0x8808); 5561 nw64_mac(BMAC_PREAMBLE_SIZE, 7); 5562 5563 val = nr64_mac(BTXMAC_CONFIG); 5564 val &= ~(BTXMAC_CONFIG_FCS_DISABLE | 5565 BTXMAC_CONFIG_ENABLE); 5566 nw64_mac(BTXMAC_CONFIG, val); 5567} 5568 5569static void niu_init_tx_mac(struct niu *np) 5570{ 5571 u64 min, max; 5572 5573 min = 64; 5574 if (np->dev->mtu > ETH_DATA_LEN) 5575 max = 9216; 5576 else 5577 max = 1522; 5578 5579 /* The XMAC_MIN register only accepts values for TX min which 5580 * have the low 3 bits cleared. 5581 */ 5582 BUG_ON(min & 0x7); 5583 5584 if (np->flags & NIU_FLAGS_XMAC) 5585 niu_init_tx_xmac(np, min, max); 5586 else 5587 niu_init_tx_bmac(np, min, max); 5588} 5589 5590static int niu_reset_rx_xmac(struct niu *np) 5591{ 5592 int limit; 5593 5594 nw64_mac(XRXMAC_SW_RST, 5595 XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST); 5596 limit = 1000; 5597 while (--limit >= 0) { 5598 if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS | 5599 XRXMAC_SW_RST_SOFT_RST))) 5600 break; 5601 udelay(100); 5602 } 5603 if (limit < 0) { 5604 dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n", 5605 np->port, 5606 (unsigned long long) nr64_mac(XRXMAC_SW_RST)); 5607 return -ENODEV; 5608 } 5609 5610 return 0; 5611} 5612 5613static int niu_reset_rx_bmac(struct niu *np) 5614{ 5615 int limit; 5616 5617 nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET); 5618 limit = 1000; 5619 while (--limit >= 0) { 5620 if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET)) 5621 break; 5622 udelay(100); 5623 } 5624 if (limit < 0) { 5625 dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n", 5626 np->port, 5627 (unsigned long long) nr64_mac(BRXMAC_SW_RST)); 5628 return -ENODEV; 5629 } 5630 5631 return 0; 5632} 5633 5634static int niu_reset_rx_mac(struct niu *np) 5635{ 5636 if (np->flags & NIU_FLAGS_XMAC) 5637 return niu_reset_rx_xmac(np); 5638 else 5639 return niu_reset_rx_bmac(np); 5640} 5641 5642static void niu_init_rx_xmac(struct niu *np) 5643{ 5644 struct niu_parent *parent = np->parent; 5645 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; 5646 int first_rdc_table = tp->first_table_num; 5647 unsigned long i; 5648 u64 val; 5649 5650 nw64_mac(XMAC_ADD_FILT0, 0); 5651 nw64_mac(XMAC_ADD_FILT1, 0); 5652 nw64_mac(XMAC_ADD_FILT2, 0); 5653 nw64_mac(XMAC_ADD_FILT12_MASK, 0); 5654 nw64_mac(XMAC_ADD_FILT00_MASK, 0); 5655 for (i = 0; i < MAC_NUM_HASH; i++) 5656 nw64_mac(XMAC_HASH_TBL(i), 0); 5657 nw64_mac(XRXMAC_STAT_MSK, ~(u64)0); 5658 niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); 5659 niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); 5660 5661 val = nr64_mac(XMAC_CONFIG); 5662 val &= ~(XMAC_CONFIG_RX_MAC_ENABLE | 5663 XMAC_CONFIG_PROMISCUOUS | 5664 XMAC_CONFIG_PROMISC_GROUP | 5665 XMAC_CONFIG_ERR_CHK_DIS | 5666 XMAC_CONFIG_RX_CRC_CHK_DIS | 5667 XMAC_CONFIG_RESERVED_MULTICAST | 5668 XMAC_CONFIG_RX_CODEV_CHK_DIS | 5669 XMAC_CONFIG_ADDR_FILTER_EN | 5670 XMAC_CONFIG_RCV_PAUSE_ENABLE | 5671 XMAC_CONFIG_STRIP_CRC | 5672 XMAC_CONFIG_PASS_FLOW_CTRL | 5673 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN); 5674 val |= (XMAC_CONFIG_HASH_FILTER_EN); 5675 nw64_mac(XMAC_CONFIG, val); 5676 5677 nw64_mac(RXMAC_BT_CNT, 0); 5678 nw64_mac(RXMAC_BC_FRM_CNT, 0); 5679 nw64_mac(RXMAC_MC_FRM_CNT, 0); 5680 nw64_mac(RXMAC_FRAG_CNT, 0); 5681 nw64_mac(RXMAC_HIST_CNT1, 0); 5682 nw64_mac(RXMAC_HIST_CNT2, 0); 5683 nw64_mac(RXMAC_HIST_CNT3, 0); 5684 nw64_mac(RXMAC_HIST_CNT4, 0); 5685 nw64_mac(RXMAC_HIST_CNT5, 0); 5686 nw64_mac(RXMAC_HIST_CNT6, 0); 5687 nw64_mac(RXMAC_HIST_CNT7, 0); 5688 nw64_mac(RXMAC_MPSZER_CNT, 0); 5689 nw64_mac(RXMAC_CRC_ER_CNT, 0); 5690 nw64_mac(RXMAC_CD_VIO_CNT, 0); 5691 nw64_mac(LINK_FAULT_CNT, 0); 5692} 5693 5694static void niu_init_rx_bmac(struct niu *np) 5695{ 5696 struct niu_parent *parent = np->parent; 5697 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; 5698 int first_rdc_table = tp->first_table_num; 5699 unsigned long i; 5700 u64 val; 5701 5702 nw64_mac(BMAC_ADD_FILT0, 0); 5703 nw64_mac(BMAC_ADD_FILT1, 0); 5704 nw64_mac(BMAC_ADD_FILT2, 0); 5705 nw64_mac(BMAC_ADD_FILT12_MASK, 0); 5706 nw64_mac(BMAC_ADD_FILT00_MASK, 0); 5707 for (i = 0; i < MAC_NUM_HASH; i++) 5708 nw64_mac(BMAC_HASH_TBL(i), 0); 5709 niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); 5710 niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); 5711 nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0); 5712 5713 val = nr64_mac(BRXMAC_CONFIG); 5714 val &= ~(BRXMAC_CONFIG_ENABLE | 5715 BRXMAC_CONFIG_STRIP_PAD | 5716 BRXMAC_CONFIG_STRIP_FCS | 5717 BRXMAC_CONFIG_PROMISC | 5718 BRXMAC_CONFIG_PROMISC_GRP | 5719 BRXMAC_CONFIG_ADDR_FILT_EN | 5720 BRXMAC_CONFIG_DISCARD_DIS); 5721 val |= (BRXMAC_CONFIG_HASH_FILT_EN); 5722 nw64_mac(BRXMAC_CONFIG, val); 5723 5724 val = nr64_mac(BMAC_ADDR_CMPEN); 5725 val |= BMAC_ADDR_CMPEN_EN0; 5726 nw64_mac(BMAC_ADDR_CMPEN, val); 5727} 5728 5729static void niu_init_rx_mac(struct niu *np) 5730{ 5731 niu_set_primary_mac(np, np->dev->dev_addr); 5732 5733 if (np->flags & NIU_FLAGS_XMAC) 5734 niu_init_rx_xmac(np); 5735 else 5736 niu_init_rx_bmac(np); 5737} 5738 5739static void niu_enable_tx_xmac(struct niu *np, int on) 5740{ 5741 u64 val = nr64_mac(XMAC_CONFIG); 5742 5743 if (on) 5744 val |= XMAC_CONFIG_TX_ENABLE; 5745 else 5746 val &= ~XMAC_CONFIG_TX_ENABLE; 5747 nw64_mac(XMAC_CONFIG, val); 5748} 5749 5750static void niu_enable_tx_bmac(struct niu *np, int on) 5751{ 5752 u64 val = nr64_mac(BTXMAC_CONFIG); 5753 5754 if (on) 5755 val |= BTXMAC_CONFIG_ENABLE; 5756 else 5757 val &= ~BTXMAC_CONFIG_ENABLE; 5758 nw64_mac(BTXMAC_CONFIG, val); 5759} 5760 5761static void niu_enable_tx_mac(struct niu *np, int on) 5762{ 5763 if (np->flags & NIU_FLAGS_XMAC) 5764 niu_enable_tx_xmac(np, on); 5765 else 5766 niu_enable_tx_bmac(np, on); 5767} 5768 5769static void niu_enable_rx_xmac(struct niu *np, int on) 5770{ 5771 u64 val = nr64_mac(XMAC_CONFIG); 5772 5773 val &= ~(XMAC_CONFIG_HASH_FILTER_EN | 5774 XMAC_CONFIG_PROMISCUOUS); 5775 5776 if (np->flags & NIU_FLAGS_MCAST) 5777 val |= XMAC_CONFIG_HASH_FILTER_EN; 5778 if (np->flags & NIU_FLAGS_PROMISC) 5779 val |= XMAC_CONFIG_PROMISCUOUS; 5780 5781 if (on) 5782 val |= XMAC_CONFIG_RX_MAC_ENABLE; 5783 else 5784 val &= ~XMAC_CONFIG_RX_MAC_ENABLE; 5785 nw64_mac(XMAC_CONFIG, val); 5786} 5787 5788static void niu_enable_rx_bmac(struct niu *np, int on) 5789{ 5790 u64 val = nr64_mac(BRXMAC_CONFIG); 5791 5792 val &= ~(BRXMAC_CONFIG_HASH_FILT_EN | 5793 BRXMAC_CONFIG_PROMISC); 5794 5795 if (np->flags & NIU_FLAGS_MCAST) 5796 val |= BRXMAC_CONFIG_HASH_FILT_EN; 5797 if (np->flags & NIU_FLAGS_PROMISC) 5798 val |= BRXMAC_CONFIG_PROMISC; 5799 5800 if (on) 5801 val |= BRXMAC_CONFIG_ENABLE; 5802 else 5803 val &= ~BRXMAC_CONFIG_ENABLE; 5804 nw64_mac(BRXMAC_CONFIG, val); 5805} 5806 5807static void niu_enable_rx_mac(struct niu *np, int on) 5808{ 5809 if (np->flags & NIU_FLAGS_XMAC) 5810 niu_enable_rx_xmac(np, on); 5811 else 5812 niu_enable_rx_bmac(np, on); 5813} 5814 5815static int niu_init_mac(struct niu *np) 5816{ 5817 int err; 5818 5819 niu_init_xif(np); 5820 err = niu_init_pcs(np); 5821 if (err) 5822 return err; 5823 5824 err = niu_reset_tx_mac(np); 5825 if (err) 5826 return err; 5827 niu_init_tx_mac(np); 5828 err = niu_reset_rx_mac(np); 5829 if (err) 5830 return err; 5831 niu_init_rx_mac(np); 5832 5833 /* This looks hookey but the RX MAC reset we just did will 5834 * undo some of the state we setup in niu_init_tx_mac() so we 5835 * have to call it again. In particular, the RX MAC reset will 5836 * set the XMAC_MAX register back to it's default value. 5837 */ 5838 niu_init_tx_mac(np); 5839 niu_enable_tx_mac(np, 1); 5840 5841 niu_enable_rx_mac(np, 1); 5842 5843 return 0; 5844} 5845 5846static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp) 5847{ 5848 (void) niu_tx_channel_stop(np, rp->tx_channel); 5849} 5850 5851static void niu_stop_tx_channels(struct niu *np) 5852{ 5853 int i; 5854 5855 for (i = 0; i < np->num_tx_rings; i++) { 5856 struct tx_ring_info *rp = &np->tx_rings[i]; 5857 5858 niu_stop_one_tx_channel(np, rp); 5859 } 5860} 5861 5862static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp) 5863{ 5864 (void) niu_tx_channel_reset(np, rp->tx_channel); 5865} 5866 5867static void niu_reset_tx_channels(struct niu *np) 5868{ 5869 int i; 5870 5871 for (i = 0; i < np->num_tx_rings; i++) { 5872 struct tx_ring_info *rp = &np->tx_rings[i]; 5873 5874 niu_reset_one_tx_channel(np, rp); 5875 } 5876} 5877 5878static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp) 5879{ 5880 (void) niu_enable_rx_channel(np, rp->rx_channel, 0); 5881} 5882 5883static void niu_stop_rx_channels(struct niu *np) 5884{ 5885 int i; 5886 5887 for (i = 0; i < np->num_rx_rings; i++) { 5888 struct rx_ring_info *rp = &np->rx_rings[i]; 5889 5890 niu_stop_one_rx_channel(np, rp); 5891 } 5892} 5893 5894static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp) 5895{ 5896 int channel = rp->rx_channel; 5897 5898 (void) niu_rx_channel_reset(np, channel); 5899 nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL); 5900 nw64(RX_DMA_CTL_STAT(channel), 0); 5901 (void) niu_enable_rx_channel(np, channel, 0); 5902} 5903 5904static void niu_reset_rx_channels(struct niu *np) 5905{ 5906 int i; 5907 5908 for (i = 0; i < np->num_rx_rings; i++) { 5909 struct rx_ring_info *rp = &np->rx_rings[i]; 5910 5911 niu_reset_one_rx_channel(np, rp); 5912 } 5913} 5914 5915static void niu_disable_ipp(struct niu *np) 5916{ 5917 u64 rd, wr, val; 5918 int limit; 5919 5920 rd = nr64_ipp(IPP_DFIFO_RD_PTR); 5921 wr = nr64_ipp(IPP_DFIFO_WR_PTR); 5922 limit = 100; 5923 while (--limit >= 0 && (rd != wr)) { 5924 rd = nr64_ipp(IPP_DFIFO_RD_PTR); 5925 wr = nr64_ipp(IPP_DFIFO_WR_PTR); 5926 } 5927 if (limit < 0 && 5928 (rd != 0 && wr != 1)) { 5929 netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n", 5930 (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR), 5931 (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR)); 5932 } 5933 5934 val = nr64_ipp(IPP_CFIG); 5935 val &= ~(IPP_CFIG_IPP_ENABLE | 5936 IPP_CFIG_DFIFO_ECC_EN | 5937 IPP_CFIG_DROP_BAD_CRC | 5938 IPP_CFIG_CKSUM_EN); 5939 nw64_ipp(IPP_CFIG, val); 5940 5941 (void) niu_ipp_reset(np); 5942} 5943 5944static int niu_init_hw(struct niu *np) 5945{ 5946 int i, err; 5947 5948 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n"); 5949 niu_txc_enable_port(np, 1); 5950 niu_txc_port_dma_enable(np, 1); 5951 niu_txc_set_imask(np, 0); 5952 5953 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n"); 5954 for (i = 0; i < np->num_tx_rings; i++) { 5955 struct tx_ring_info *rp = &np->tx_rings[i]; 5956 5957 err = niu_init_one_tx_channel(np, rp); 5958 if (err) 5959 return err; 5960 } 5961 5962 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n"); 5963 err = niu_init_rx_channels(np); 5964 if (err) 5965 goto out_uninit_tx_channels; 5966 5967 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n"); 5968 err = niu_init_classifier_hw(np); 5969 if (err) 5970 goto out_uninit_rx_channels; 5971 5972 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n"); 5973 err = niu_init_zcp(np); 5974 if (err) 5975 goto out_uninit_rx_channels; 5976 5977 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n"); 5978 err = niu_init_ipp(np); 5979 if (err) 5980 goto out_uninit_rx_channels; 5981 5982 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n"); 5983 err = niu_init_mac(np); 5984 if (err) 5985 goto out_uninit_ipp; 5986 5987 return 0; 5988 5989out_uninit_ipp: 5990 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n"); 5991 niu_disable_ipp(np); 5992 5993out_uninit_rx_channels: 5994 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n"); 5995 niu_stop_rx_channels(np); 5996 niu_reset_rx_channels(np); 5997 5998out_uninit_tx_channels: 5999 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n"); 6000 niu_stop_tx_channels(np); 6001 niu_reset_tx_channels(np); 6002 6003 return err; 6004} 6005 6006static void niu_stop_hw(struct niu *np) 6007{ 6008 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n"); 6009 niu_enable_interrupts(np, 0); 6010 6011 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n"); 6012 niu_enable_rx_mac(np, 0); 6013 6014 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n"); 6015 niu_disable_ipp(np); 6016 6017 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n"); 6018 niu_stop_tx_channels(np); 6019 6020 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n"); 6021 niu_stop_rx_channels(np); 6022 6023 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n"); 6024 niu_reset_tx_channels(np); 6025 6026 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n"); 6027 niu_reset_rx_channels(np); 6028} 6029 6030static void niu_set_irq_name(struct niu *np) 6031{ 6032 int port = np->port; 6033 int i, j = 1; 6034 6035 sprintf(np->irq_name[0], "%s:MAC", np->dev->name); 6036 6037 if (port == 0) { 6038 sprintf(np->irq_name[1], "%s:MIF", np->dev->name); 6039 sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name); 6040 j = 3; 6041 } 6042 6043 for (i = 0; i < np->num_ldg - j; i++) { 6044 if (i < np->num_rx_rings) 6045 sprintf(np->irq_name[i+j], "%s-rx-%d", 6046 np->dev->name, i); 6047 else if (i < np->num_tx_rings + np->num_rx_rings) 6048 sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name, 6049 i - np->num_rx_rings); 6050 } 6051} 6052 6053static int niu_request_irq(struct niu *np) 6054{ 6055 int i, j, err; 6056 6057 niu_set_irq_name(np); 6058 6059 err = 0; 6060 for (i = 0; i < np->num_ldg; i++) { 6061 struct niu_ldg *lp = &np->ldg[i]; 6062 6063 err = request_irq(lp->irq, niu_interrupt, 6064 IRQF_SHARED | IRQF_SAMPLE_RANDOM, 6065 np->irq_name[i], lp); 6066 if (err) 6067 goto out_free_irqs; 6068 6069 } 6070 6071 return 0; 6072 6073out_free_irqs: 6074 for (j = 0; j < i; j++) { 6075 struct niu_ldg *lp = &np->ldg[j]; 6076 6077 free_irq(lp->irq, lp); 6078 } 6079 return err; 6080} 6081 6082static void niu_free_irq(struct niu *np) 6083{ 6084 int i; 6085 6086 for (i = 0; i < np->num_ldg; i++) { 6087 struct niu_ldg *lp = &np->ldg[i]; 6088 6089 free_irq(lp->irq, lp); 6090 } 6091} 6092 6093static void niu_enable_napi(struct niu *np) 6094{ 6095 int i; 6096 6097 for (i = 0; i < np->num_ldg; i++) 6098 napi_enable(&np->ldg[i].napi); 6099} 6100 6101static void niu_disable_napi(struct niu *np) 6102{ 6103 int i; 6104 6105 for (i = 0; i < np->num_ldg; i++) 6106 napi_disable(&np->ldg[i].napi); 6107} 6108 6109static int niu_open(struct net_device *dev) 6110{ 6111 struct niu *np = netdev_priv(dev); 6112 int err; 6113 6114 netif_carrier_off(dev); 6115 6116 err = niu_alloc_channels(np); 6117 if (err) 6118 goto out_err; 6119 6120 err = niu_enable_interrupts(np, 0); 6121 if (err) 6122 goto out_free_channels; 6123 6124 err = niu_request_irq(np); 6125 if (err) 6126 goto out_free_channels; 6127 6128 niu_enable_napi(np); 6129 6130 spin_lock_irq(&np->lock); 6131 6132 err = niu_init_hw(np); 6133 if (!err) { 6134 init_timer(&np->timer); 6135 np->timer.expires = jiffies + HZ; 6136 np->timer.data = (unsigned long) np; 6137 np->timer.function = niu_timer; 6138 6139 err = niu_enable_interrupts(np, 1); 6140 if (err) 6141 niu_stop_hw(np); 6142 } 6143 6144 spin_unlock_irq(&np->lock); 6145 6146 if (err) { 6147 niu_disable_napi(np); 6148 goto out_free_irq; 6149 } 6150 6151 netif_tx_start_all_queues(dev); 6152 6153 if (np->link_config.loopback_mode != LOOPBACK_DISABLED) 6154 netif_carrier_on(dev); 6155 6156 add_timer(&np->timer); 6157 6158 return 0; 6159 6160out_free_irq: 6161 niu_free_irq(np); 6162 6163out_free_channels: 6164 niu_free_channels(np); 6165 6166out_err: 6167 return err; 6168} 6169 6170static void niu_full_shutdown(struct niu *np, struct net_device *dev) 6171{ 6172 cancel_work_sync(&np->reset_task); 6173 6174 niu_disable_napi(np); 6175 netif_tx_stop_all_queues(dev); 6176 6177 del_timer_sync(&np->timer); 6178 6179 spin_lock_irq(&np->lock); 6180 6181 niu_stop_hw(np); 6182 6183 spin_unlock_irq(&np->lock); 6184} 6185 6186static int niu_close(struct net_device *dev) 6187{ 6188 struct niu *np = netdev_priv(dev); 6189 6190 niu_full_shutdown(np, dev); 6191 6192 niu_free_irq(np); 6193 6194 niu_free_channels(np); 6195 6196 niu_handle_led(np, 0); 6197 6198 return 0; 6199} 6200 6201static void niu_sync_xmac_stats(struct niu *np) 6202{ 6203 struct niu_xmac_stats *mp = &np->mac_stats.xmac; 6204 6205 mp->tx_frames += nr64_mac(TXMAC_FRM_CNT); 6206 mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT); 6207 6208 mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT); 6209 mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT); 6210 mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT); 6211 mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT); 6212 mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT); 6213 mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1); 6214 mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2); 6215 mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3); 6216 mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4); 6217 mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5); 6218 mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6); 6219 mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7); 6220 mp->rx_octets += nr64_mac(RXMAC_BT_CNT); 6221 mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT); 6222 mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT); 6223 mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT); 6224} 6225 6226static void niu_sync_bmac_stats(struct niu *np) 6227{ 6228 struct niu_bmac_stats *mp = &np->mac_stats.bmac; 6229 6230 mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT); 6231 mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT); 6232 6233 mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT); 6234 mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); 6235 mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); 6236 mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT); 6237} 6238 6239static void niu_sync_mac_stats(struct niu *np) 6240{ 6241 if (np->flags & NIU_FLAGS_XMAC) 6242 niu_sync_xmac_stats(np); 6243 else 6244 niu_sync_bmac_stats(np); 6245} 6246 6247static void niu_get_rx_stats(struct niu *np) 6248{ 6249 unsigned long pkts, dropped, errors, bytes; 6250 int i; 6251 6252 pkts = dropped = errors = bytes = 0; 6253 for (i = 0; i < np->num_rx_rings; i++) { 6254 struct rx_ring_info *rp = &np->rx_rings[i]; 6255 6256 niu_sync_rx_discard_stats(np, rp, 0); 6257 6258 pkts += rp->rx_packets; 6259 bytes += rp->rx_bytes; 6260 dropped += rp->rx_dropped; 6261 errors += rp->rx_errors; 6262 } 6263 np->dev->stats.rx_packets = pkts; 6264 np->dev->stats.rx_bytes = bytes; 6265 np->dev->stats.rx_dropped = dropped; 6266 np->dev->stats.rx_errors = errors; 6267} 6268 6269static void niu_get_tx_stats(struct niu *np) 6270{ 6271 unsigned long pkts, errors, bytes; 6272 int i; 6273 6274 pkts = errors = bytes = 0; 6275 for (i = 0; i < np->num_tx_rings; i++) { 6276 struct tx_ring_info *rp = &np->tx_rings[i]; 6277 6278 pkts += rp->tx_packets; 6279 bytes += rp->tx_bytes; 6280 errors += rp->tx_errors; 6281 } 6282 np->dev->stats.tx_packets = pkts; 6283 np->dev->stats.tx_bytes = bytes; 6284 np->dev->stats.tx_errors = errors; 6285} 6286 6287static struct net_device_stats *niu_get_stats(struct net_device *dev) 6288{ 6289 struct niu *np = netdev_priv(dev); 6290 6291 niu_get_rx_stats(np); 6292 niu_get_tx_stats(np); 6293 6294 return &dev->stats; 6295} 6296 6297static void niu_load_hash_xmac(struct niu *np, u16 *hash) 6298{ 6299 int i; 6300 6301 for (i = 0; i < 16; i++) 6302 nw64_mac(XMAC_HASH_TBL(i), hash[i]); 6303} 6304 6305static void niu_load_hash_bmac(struct niu *np, u16 *hash) 6306{ 6307 int i; 6308 6309 for (i = 0; i < 16; i++) 6310 nw64_mac(BMAC_HASH_TBL(i), hash[i]); 6311} 6312 6313static void niu_load_hash(struct niu *np, u16 *hash) 6314{ 6315 if (np->flags & NIU_FLAGS_XMAC) 6316 niu_load_hash_xmac(np, hash); 6317 else 6318 niu_load_hash_bmac(np, hash); 6319} 6320 6321static void niu_set_rx_mode(struct net_device *dev) 6322{ 6323 struct niu *np = netdev_priv(dev); 6324 int i, alt_cnt, err; 6325 struct netdev_hw_addr *ha; 6326 unsigned long flags; 6327 u16 hash[16] = { 0, }; 6328 6329 spin_lock_irqsave(&np->lock, flags); 6330 niu_enable_rx_mac(np, 0); 6331 6332 np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC); 6333 if (dev->flags & IFF_PROMISC) 6334 np->flags |= NIU_FLAGS_PROMISC; 6335 if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev))) 6336 np->flags |= NIU_FLAGS_MCAST; 6337 6338 alt_cnt = netdev_uc_count(dev); 6339 if (alt_cnt > niu_num_alt_addr(np)) { 6340 alt_cnt = 0; 6341 np->flags |= NIU_FLAGS_PROMISC; 6342 } 6343 6344 if (alt_cnt) { 6345 int index = 0; 6346 6347 netdev_for_each_uc_addr(ha, dev) { 6348 err = niu_set_alt_mac(np, index, ha->addr); 6349 if (err) 6350 netdev_warn(dev, "Error %d adding alt mac %d\n", 6351 err, index); 6352 err = niu_enable_alt_mac(np, index, 1); 6353 if (err) 6354 netdev_warn(dev, "Error %d enabling alt mac %d\n", 6355 err, index); 6356 6357 index++; 6358 } 6359 } else { 6360 int alt_start; 6361 if (np->flags & NIU_FLAGS_XMAC) 6362 alt_start = 0; 6363 else 6364 alt_start = 1; 6365 for (i = alt_start; i < niu_num_alt_addr(np); i++) { 6366 err = niu_enable_alt_mac(np, i, 0); 6367 if (err) 6368 netdev_warn(dev, "Error %d disabling alt mac %d\n", 6369 err, i); 6370 } 6371 } 6372 if (dev->flags & IFF_ALLMULTI) { 6373 for (i = 0; i < 16; i++) 6374 hash[i] = 0xffff; 6375 } else if (!netdev_mc_empty(dev)) { 6376 netdev_for_each_mc_addr(ha, dev) { 6377 u32 crc = ether_crc_le(ETH_ALEN, ha->addr); 6378 6379 crc >>= 24; 6380 hash[crc >> 4] |= (1 << (15 - (crc & 0xf))); 6381 } 6382 } 6383 6384 if (np->flags & NIU_FLAGS_MCAST) 6385 niu_load_hash(np, hash); 6386 6387 niu_enable_rx_mac(np, 1); 6388 spin_unlock_irqrestore(&np->lock, flags); 6389} 6390 6391static int niu_set_mac_addr(struct net_device *dev, void *p) 6392{ 6393 struct niu *np = netdev_priv(dev); 6394 struct sockaddr *addr = p; 6395 unsigned long flags; 6396 6397 if (!is_valid_ether_addr(addr->sa_data)) 6398 return -EINVAL; 6399 6400 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 6401 6402 if (!netif_running(dev)) 6403 return 0; 6404 6405 spin_lock_irqsave(&np->lock, flags); 6406 niu_enable_rx_mac(np, 0); 6407 niu_set_primary_mac(np, dev->dev_addr); 6408 niu_enable_rx_mac(np, 1); 6409 spin_unlock_irqrestore(&np->lock, flags); 6410 6411 return 0; 6412} 6413 6414static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 6415{ 6416 return -EOPNOTSUPP; 6417} 6418 6419static void niu_netif_stop(struct niu *np) 6420{ 6421 np->dev->trans_start = jiffies; /* prevent tx timeout */ 6422 6423 niu_disable_napi(np); 6424 6425 netif_tx_disable(np->dev); 6426} 6427 6428static void niu_netif_start(struct niu *np) 6429{ 6430 /* NOTE: unconditional netif_wake_queue is only appropriate 6431 * so long as all callers are assured to have free tx slots 6432 * (such as after niu_init_hw). 6433 */ 6434 netif_tx_wake_all_queues(np->dev); 6435 6436 niu_enable_napi(np); 6437 6438 niu_enable_interrupts(np, 1); 6439} 6440 6441static void niu_reset_buffers(struct niu *np) 6442{ 6443 int i, j, k, err; 6444 6445 if (np->rx_rings) { 6446 for (i = 0; i < np->num_rx_rings; i++) { 6447 struct rx_ring_info *rp = &np->rx_rings[i]; 6448 6449 for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) { 6450 struct page *page; 6451 6452 page = rp->rxhash[j]; 6453 while (page) { 6454 struct page *next = 6455 (struct page *) page->mapping; 6456 u64 base = page->index; 6457 base = base >> RBR_DESCR_ADDR_SHIFT; 6458 rp->rbr[k++] = cpu_to_le32(base); 6459 page = next; 6460 } 6461 } 6462 for (; k < MAX_RBR_RING_SIZE; k++) { 6463 err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k); 6464 if (unlikely(err)) 6465 break; 6466 } 6467 6468 rp->rbr_index = rp->rbr_table_size - 1; 6469 rp->rcr_index = 0; 6470 rp->rbr_pending = 0; 6471 rp->rbr_refill_pending = 0; 6472 } 6473 } 6474 if (np->tx_rings) { 6475 for (i = 0; i < np->num_tx_rings; i++) { 6476 struct tx_ring_info *rp = &np->tx_rings[i]; 6477 6478 for (j = 0; j < MAX_TX_RING_SIZE; j++) { 6479 if (rp->tx_buffs[j].skb) 6480 (void) release_tx_packet(np, rp, j); 6481 } 6482 6483 rp->pending = MAX_TX_RING_SIZE; 6484 rp->prod = 0; 6485 rp->cons = 0; 6486 rp->wrap_bit = 0; 6487 } 6488 } 6489} 6490 6491static void niu_reset_task(struct work_struct *work) 6492{ 6493 struct niu *np = container_of(work, struct niu, reset_task); 6494 unsigned long flags; 6495 int err; 6496 6497 spin_lock_irqsave(&np->lock, flags); 6498 if (!netif_running(np->dev)) { 6499 spin_unlock_irqrestore(&np->lock, flags); 6500 return; 6501 } 6502 6503 spin_unlock_irqrestore(&np->lock, flags); 6504 6505 del_timer_sync(&np->timer); 6506 6507 niu_netif_stop(np); 6508 6509 spin_lock_irqsave(&np->lock, flags); 6510 6511 niu_stop_hw(np); 6512 6513 spin_unlock_irqrestore(&np->lock, flags); 6514 6515 niu_reset_buffers(np); 6516 6517 spin_lock_irqsave(&np->lock, flags); 6518 6519 err = niu_init_hw(np); 6520 if (!err) { 6521 np->timer.expires = jiffies + HZ; 6522 add_timer(&np->timer); 6523 niu_netif_start(np); 6524 } 6525 6526 spin_unlock_irqrestore(&np->lock, flags); 6527} 6528 6529static void niu_tx_timeout(struct net_device *dev) 6530{ 6531 struct niu *np = netdev_priv(dev); 6532 6533 dev_err(np->device, "%s: Transmit timed out, resetting\n", 6534 dev->name); 6535 6536 schedule_work(&np->reset_task); 6537} 6538 6539static void niu_set_txd(struct tx_ring_info *rp, int index, 6540 u64 mapping, u64 len, u64 mark, 6541 u64 n_frags) 6542{ 6543 __le64 *desc = &rp->descr[index]; 6544 6545 *desc = cpu_to_le64(mark | 6546 (n_frags << TX_DESC_NUM_PTR_SHIFT) | 6547 (len << TX_DESC_TR_LEN_SHIFT) | 6548 (mapping & TX_DESC_SAD)); 6549} 6550 6551static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr, 6552 u64 pad_bytes, u64 len) 6553{ 6554 u16 eth_proto, eth_proto_inner; 6555 u64 csum_bits, l3off, ihl, ret; 6556 u8 ip_proto; 6557 int ipv6; 6558 6559 eth_proto = be16_to_cpu(ehdr->h_proto); 6560 eth_proto_inner = eth_proto; 6561 if (eth_proto == ETH_P_8021Q) { 6562 struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr; 6563 __be16 val = vp->h_vlan_encapsulated_proto; 6564 6565 eth_proto_inner = be16_to_cpu(val); 6566 } 6567 6568 ipv6 = ihl = 0; 6569 switch (skb->protocol) { 6570 case cpu_to_be16(ETH_P_IP): 6571 ip_proto = ip_hdr(skb)->protocol; 6572 ihl = ip_hdr(skb)->ihl; 6573 break; 6574 case cpu_to_be16(ETH_P_IPV6): 6575 ip_proto = ipv6_hdr(skb)->nexthdr; 6576 ihl = (40 >> 2); 6577 ipv6 = 1; 6578 break; 6579 default: 6580 ip_proto = ihl = 0; 6581 break; 6582 } 6583 6584 csum_bits = TXHDR_CSUM_NONE; 6585 if (skb->ip_summed == CHECKSUM_PARTIAL) { 6586 u64 start, stuff; 6587 6588 csum_bits = (ip_proto == IPPROTO_TCP ? 6589 TXHDR_CSUM_TCP : 6590 (ip_proto == IPPROTO_UDP ? 6591 TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP)); 6592 6593 start = skb_transport_offset(skb) - 6594 (pad_bytes + sizeof(struct tx_pkt_hdr)); 6595 stuff = start + skb->csum_offset; 6596 6597 csum_bits |= (start / 2) << TXHDR_L4START_SHIFT; 6598 csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT; 6599 } 6600 6601 l3off = skb_network_offset(skb) - 6602 (pad_bytes + sizeof(struct tx_pkt_hdr)); 6603 6604 ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) | 6605 (len << TXHDR_LEN_SHIFT) | 6606 ((l3off / 2) << TXHDR_L3START_SHIFT) | 6607 (ihl << TXHDR_IHL_SHIFT) | 6608 ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) | 6609 ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) | 6610 (ipv6 ? TXHDR_IP_VER : 0) | 6611 csum_bits); 6612 6613 return ret; 6614} 6615 6616static netdev_tx_t niu_start_xmit(struct sk_buff *skb, 6617 struct net_device *dev) 6618{ 6619 struct niu *np = netdev_priv(dev); 6620 unsigned long align, headroom; 6621 struct netdev_queue *txq; 6622 struct tx_ring_info *rp; 6623 struct tx_pkt_hdr *tp; 6624 unsigned int len, nfg; 6625 struct ethhdr *ehdr; 6626 int prod, i, tlen; 6627 u64 mapping, mrk; 6628 6629 i = skb_get_queue_mapping(skb); 6630 rp = &np->tx_rings[i]; 6631 txq = netdev_get_tx_queue(dev, i); 6632 6633 if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) { 6634 netif_tx_stop_queue(txq); 6635 dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name); 6636 rp->tx_errors++; 6637 return NETDEV_TX_BUSY; 6638 } 6639 6640 if (skb->len < ETH_ZLEN) { 6641 unsigned int pad_bytes = ETH_ZLEN - skb->len; 6642 6643 if (skb_pad(skb, pad_bytes)) 6644 goto out; 6645 skb_put(skb, pad_bytes); 6646 } 6647 6648 len = sizeof(struct tx_pkt_hdr) + 15; 6649 if (skb_headroom(skb) < len) { 6650 struct sk_buff *skb_new; 6651 6652 skb_new = skb_realloc_headroom(skb, len); 6653 if (!skb_new) { 6654 rp->tx_errors++; 6655 goto out_drop; 6656 } 6657 kfree_skb(skb); 6658 skb = skb_new; 6659 } else 6660 skb_orphan(skb); 6661 6662 align = ((unsigned long) skb->data & (16 - 1)); 6663 headroom = align + sizeof(struct tx_pkt_hdr); 6664 6665 ehdr = (struct ethhdr *) skb->data; 6666 tp = (struct tx_pkt_hdr *) skb_push(skb, headroom); 6667 6668 len = skb->len - sizeof(struct tx_pkt_hdr); 6669 tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len)); 6670 tp->resv = 0; 6671 6672 len = skb_headlen(skb); 6673 mapping = np->ops->map_single(np->device, skb->data, 6674 len, DMA_TO_DEVICE); 6675 6676 prod = rp->prod; 6677 6678 rp->tx_buffs[prod].skb = skb; 6679 rp->tx_buffs[prod].mapping = mapping; 6680 6681 mrk = TX_DESC_SOP; 6682 if (++rp->mark_counter == rp->mark_freq) { 6683 rp->mark_counter = 0; 6684 mrk |= TX_DESC_MARK; 6685 rp->mark_pending++; 6686 } 6687 6688 tlen = len; 6689 nfg = skb_shinfo(skb)->nr_frags; 6690 while (tlen > 0) { 6691 tlen -= MAX_TX_DESC_LEN; 6692 nfg++; 6693 } 6694 6695 while (len > 0) { 6696 unsigned int this_len = len; 6697 6698 if (this_len > MAX_TX_DESC_LEN) 6699 this_len = MAX_TX_DESC_LEN; 6700 6701 niu_set_txd(rp, prod, mapping, this_len, mrk, nfg); 6702 mrk = nfg = 0; 6703 6704 prod = NEXT_TX(rp, prod); 6705 mapping += this_len; 6706 len -= this_len; 6707 } 6708 6709 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 6710 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 6711 6712 len = frag->size; 6713 mapping = np->ops->map_page(np->device, frag->page, 6714 frag->page_offset, len, 6715 DMA_TO_DEVICE); 6716 6717 rp->tx_buffs[prod].skb = NULL; 6718 rp->tx_buffs[prod].mapping = mapping; 6719 6720 niu_set_txd(rp, prod, mapping, len, 0, 0); 6721 6722 prod = NEXT_TX(rp, prod); 6723 } 6724 6725 if (prod < rp->prod) 6726 rp->wrap_bit ^= TX_RING_KICK_WRAP; 6727 rp->prod = prod; 6728 6729 nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3)); 6730 6731 if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) { 6732 netif_tx_stop_queue(txq); 6733 if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)) 6734 netif_tx_wake_queue(txq); 6735 } 6736 6737out: 6738 return NETDEV_TX_OK; 6739 6740out_drop: 6741 rp->tx_errors++; 6742 kfree_skb(skb); 6743 goto out; 6744} 6745 6746static int niu_change_mtu(struct net_device *dev, int new_mtu) 6747{ 6748 struct niu *np = netdev_priv(dev); 6749 int err, orig_jumbo, new_jumbo; 6750 6751 if (new_mtu < 68 || new_mtu > NIU_MAX_MTU) 6752 return -EINVAL; 6753 6754 orig_jumbo = (dev->mtu > ETH_DATA_LEN); 6755 new_jumbo = (new_mtu > ETH_DATA_LEN); 6756 6757 dev->mtu = new_mtu; 6758 6759 if (!netif_running(dev) || 6760 (orig_jumbo == new_jumbo)) 6761 return 0; 6762 6763 niu_full_shutdown(np, dev); 6764 6765 niu_free_channels(np); 6766 6767 niu_enable_napi(np); 6768 6769 err = niu_alloc_channels(np); 6770 if (err) 6771 return err; 6772 6773 spin_lock_irq(&np->lock); 6774 6775 err = niu_init_hw(np); 6776 if (!err) { 6777 init_timer(&np->timer); 6778 np->timer.expires = jiffies + HZ; 6779 np->timer.data = (unsigned long) np; 6780 np->timer.function = niu_timer; 6781 6782 err = niu_enable_interrupts(np, 1); 6783 if (err) 6784 niu_stop_hw(np); 6785 } 6786 6787 spin_unlock_irq(&np->lock); 6788 6789 if (!err) { 6790 netif_tx_start_all_queues(dev); 6791 if (np->link_config.loopback_mode != LOOPBACK_DISABLED) 6792 netif_carrier_on(dev); 6793 6794 add_timer(&np->timer); 6795 } 6796 6797 return err; 6798} 6799 6800static void niu_get_drvinfo(struct net_device *dev, 6801 struct ethtool_drvinfo *info) 6802{ 6803 struct niu *np = netdev_priv(dev); 6804 struct niu_vpd *vpd = &np->vpd; 6805 6806 strcpy(info->driver, DRV_MODULE_NAME); 6807 strcpy(info->version, DRV_MODULE_VERSION); 6808 sprintf(info->fw_version, "%d.%d", 6809 vpd->fcode_major, vpd->fcode_minor); 6810 if (np->parent->plat_type != PLAT_TYPE_NIU) 6811 strcpy(info->bus_info, pci_name(np->pdev)); 6812} 6813 6814static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 6815{ 6816 struct niu *np = netdev_priv(dev); 6817 struct niu_link_config *lp; 6818 6819 lp = &np->link_config; 6820 6821 memset(cmd, 0, sizeof(*cmd)); 6822 cmd->phy_address = np->phy_addr; 6823 cmd->supported = lp->supported; 6824 cmd->advertising = lp->active_advertising; 6825 cmd->autoneg = lp->active_autoneg; 6826 cmd->speed = lp->active_speed; 6827 cmd->duplex = lp->active_duplex; 6828 cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP; 6829 cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ? 6830 XCVR_EXTERNAL : XCVR_INTERNAL; 6831 6832 return 0; 6833} 6834 6835static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 6836{ 6837 struct niu *np = netdev_priv(dev); 6838 struct niu_link_config *lp = &np->link_config; 6839 6840 lp->advertising = cmd->advertising; 6841 lp->speed = cmd->speed; 6842 lp->duplex = cmd->duplex; 6843 lp->autoneg = cmd->autoneg; 6844 return niu_init_link(np); 6845} 6846 6847static u32 niu_get_msglevel(struct net_device *dev) 6848{ 6849 struct niu *np = netdev_priv(dev); 6850 return np->msg_enable; 6851} 6852 6853static void niu_set_msglevel(struct net_device *dev, u32 value) 6854{ 6855 struct niu *np = netdev_priv(dev); 6856 np->msg_enable = value; 6857} 6858 6859static int niu_nway_reset(struct net_device *dev) 6860{ 6861 struct niu *np = netdev_priv(dev); 6862 6863 if (np->link_config.autoneg) 6864 return niu_init_link(np); 6865 6866 return 0; 6867} 6868 6869static int niu_get_eeprom_len(struct net_device *dev) 6870{ 6871 struct niu *np = netdev_priv(dev); 6872 6873 return np->eeprom_len; 6874} 6875 6876static int niu_get_eeprom(struct net_device *dev, 6877 struct ethtool_eeprom *eeprom, u8 *data) 6878{ 6879 struct niu *np = netdev_priv(dev); 6880 u32 offset, len, val; 6881 6882 offset = eeprom->offset; 6883 len = eeprom->len; 6884 6885 if (offset + len < offset) 6886 return -EINVAL; 6887 if (offset >= np->eeprom_len) 6888 return -EINVAL; 6889 if (offset + len > np->eeprom_len) 6890 len = eeprom->len = np->eeprom_len - offset; 6891 6892 if (offset & 3) { 6893 u32 b_offset, b_count; 6894 6895 b_offset = offset & 3; 6896 b_count = 4 - b_offset; 6897 if (b_count > len) 6898 b_count = len; 6899 6900 val = nr64(ESPC_NCR((offset - b_offset) / 4)); 6901 memcpy(data, ((char *)&val) + b_offset, b_count); 6902 data += b_count; 6903 len -= b_count; 6904 offset += b_count; 6905 } 6906 while (len >= 4) { 6907 val = nr64(ESPC_NCR(offset / 4)); 6908 memcpy(data, &val, 4); 6909 data += 4; 6910 len -= 4; 6911 offset += 4; 6912 } 6913 if (len) { 6914 val = nr64(ESPC_NCR(offset / 4)); 6915 memcpy(data, &val, len); 6916 } 6917 return 0; 6918} 6919 6920static void niu_ethflow_to_l3proto(int flow_type, u8 *pid) 6921{ 6922 switch (flow_type) { 6923 case TCP_V4_FLOW: 6924 case TCP_V6_FLOW: 6925 *pid = IPPROTO_TCP; 6926 break; 6927 case UDP_V4_FLOW: 6928 case UDP_V6_FLOW: 6929 *pid = IPPROTO_UDP; 6930 break; 6931 case SCTP_V4_FLOW: 6932 case SCTP_V6_FLOW: 6933 *pid = IPPROTO_SCTP; 6934 break; 6935 case AH_V4_FLOW: 6936 case AH_V6_FLOW: 6937 *pid = IPPROTO_AH; 6938 break; 6939 case ESP_V4_FLOW: 6940 case ESP_V6_FLOW: 6941 *pid = IPPROTO_ESP; 6942 break; 6943 default: 6944 *pid = 0; 6945 break; 6946 } 6947} 6948 6949static int niu_class_to_ethflow(u64 class, int *flow_type) 6950{ 6951 switch (class) { 6952 case CLASS_CODE_TCP_IPV4: 6953 *flow_type = TCP_V4_FLOW; 6954 break; 6955 case CLASS_CODE_UDP_IPV4: 6956 *flow_type = UDP_V4_FLOW; 6957 break; 6958 case CLASS_CODE_AH_ESP_IPV4: 6959 *flow_type = AH_V4_FLOW; 6960 break; 6961 case CLASS_CODE_SCTP_IPV4: 6962 *flow_type = SCTP_V4_FLOW; 6963 break; 6964 case CLASS_CODE_TCP_IPV6: 6965 *flow_type = TCP_V6_FLOW; 6966 break; 6967 case CLASS_CODE_UDP_IPV6: 6968 *flow_type = UDP_V6_FLOW; 6969 break; 6970 case CLASS_CODE_AH_ESP_IPV6: 6971 *flow_type = AH_V6_FLOW; 6972 break; 6973 case CLASS_CODE_SCTP_IPV6: 6974 *flow_type = SCTP_V6_FLOW; 6975 break; 6976 case CLASS_CODE_USER_PROG1: 6977 case CLASS_CODE_USER_PROG2: 6978 case CLASS_CODE_USER_PROG3: 6979 case CLASS_CODE_USER_PROG4: 6980 *flow_type = IP_USER_FLOW; 6981 break; 6982 default: 6983 return 0; 6984 } 6985 6986 return 1; 6987} 6988 6989static int niu_ethflow_to_class(int flow_type, u64 *class) 6990{ 6991 switch (flow_type) { 6992 case TCP_V4_FLOW: 6993 *class = CLASS_CODE_TCP_IPV4; 6994 break; 6995 case UDP_V4_FLOW: 6996 *class = CLASS_CODE_UDP_IPV4; 6997 break; 6998 case AH_V4_FLOW: 6999 case ESP_V4_FLOW: 7000 *class = CLASS_CODE_AH_ESP_IPV4; 7001 break; 7002 case SCTP_V4_FLOW: 7003 *class = CLASS_CODE_SCTP_IPV4; 7004 break; 7005 case TCP_V6_FLOW: 7006 *class = CLASS_CODE_TCP_IPV6; 7007 break; 7008 case UDP_V6_FLOW: 7009 *class = CLASS_CODE_UDP_IPV6; 7010 break; 7011 case AH_V6_FLOW: 7012 case ESP_V6_FLOW: 7013 *class = CLASS_CODE_AH_ESP_IPV6; 7014 break; 7015 case SCTP_V6_FLOW: 7016 *class = CLASS_CODE_SCTP_IPV6; 7017 break; 7018 default: 7019 return 0; 7020 } 7021 7022 return 1; 7023} 7024 7025static u64 niu_flowkey_to_ethflow(u64 flow_key) 7026{ 7027 u64 ethflow = 0; 7028 7029 if (flow_key & FLOW_KEY_L2DA) 7030 ethflow |= RXH_L2DA; 7031 if (flow_key & FLOW_KEY_VLAN) 7032 ethflow |= RXH_VLAN; 7033 if (flow_key & FLOW_KEY_IPSA) 7034 ethflow |= RXH_IP_SRC; 7035 if (flow_key & FLOW_KEY_IPDA) 7036 ethflow |= RXH_IP_DST; 7037 if (flow_key & FLOW_KEY_PROTO) 7038 ethflow |= RXH_L3_PROTO; 7039 if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT)) 7040 ethflow |= RXH_L4_B_0_1; 7041 if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT)) 7042 ethflow |= RXH_L4_B_2_3; 7043 7044 return ethflow; 7045 7046} 7047 7048static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key) 7049{ 7050 u64 key = 0; 7051 7052 if (ethflow & RXH_L2DA) 7053 key |= FLOW_KEY_L2DA; 7054 if (ethflow & RXH_VLAN) 7055 key |= FLOW_KEY_VLAN; 7056 if (ethflow & RXH_IP_SRC) 7057 key |= FLOW_KEY_IPSA; 7058 if (ethflow & RXH_IP_DST) 7059 key |= FLOW_KEY_IPDA; 7060 if (ethflow & RXH_L3_PROTO) 7061 key |= FLOW_KEY_PROTO; 7062 if (ethflow & RXH_L4_B_0_1) 7063 key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT); 7064 if (ethflow & RXH_L4_B_2_3) 7065 key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT); 7066 7067 *flow_key = key; 7068 7069 return 1; 7070 7071} 7072 7073static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) 7074{ 7075 u64 class; 7076 7077 nfc->data = 0; 7078 7079 if (!niu_ethflow_to_class(nfc->flow_type, &class)) 7080 return -EINVAL; 7081 7082 if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & 7083 TCAM_KEY_DISC) 7084 nfc->data = RXH_DISCARD; 7085 else 7086 nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class - 7087 CLASS_CODE_USER_PROG1]); 7088 return 0; 7089} 7090 7091static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp, 7092 struct ethtool_rx_flow_spec *fsp) 7093{ 7094 7095 fsp->h_u.tcp_ip4_spec.ip4src = (tp->key[3] & TCAM_V4KEY3_SADDR) >> 7096 TCAM_V4KEY3_SADDR_SHIFT; 7097 fsp->h_u.tcp_ip4_spec.ip4dst = (tp->key[3] & TCAM_V4KEY3_DADDR) >> 7098 TCAM_V4KEY3_DADDR_SHIFT; 7099 fsp->m_u.tcp_ip4_spec.ip4src = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> 7100 TCAM_V4KEY3_SADDR_SHIFT; 7101 fsp->m_u.tcp_ip4_spec.ip4dst = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> 7102 TCAM_V4KEY3_DADDR_SHIFT; 7103 7104 fsp->h_u.tcp_ip4_spec.ip4src = 7105 cpu_to_be32(fsp->h_u.tcp_ip4_spec.ip4src); 7106 fsp->m_u.tcp_ip4_spec.ip4src = 7107 cpu_to_be32(fsp->m_u.tcp_ip4_spec.ip4src); 7108 fsp->h_u.tcp_ip4_spec.ip4dst = 7109 cpu_to_be32(fsp->h_u.tcp_ip4_spec.ip4dst); 7110 fsp->m_u.tcp_ip4_spec.ip4dst = 7111 cpu_to_be32(fsp->m_u.tcp_ip4_spec.ip4dst); 7112 7113 fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >> 7114 TCAM_V4KEY2_TOS_SHIFT; 7115 fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >> 7116 TCAM_V4KEY2_TOS_SHIFT; 7117 7118 switch (fsp->flow_type) { 7119 case TCP_V4_FLOW: 7120 case UDP_V4_FLOW: 7121 case SCTP_V4_FLOW: 7122 fsp->h_u.tcp_ip4_spec.psrc = 7123 ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> 7124 TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; 7125 fsp->h_u.tcp_ip4_spec.pdst = 7126 ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> 7127 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; 7128 fsp->m_u.tcp_ip4_spec.psrc = 7129 ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> 7130 TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; 7131 fsp->m_u.tcp_ip4_spec.pdst = 7132 ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> 7133 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; 7134 7135 fsp->h_u.tcp_ip4_spec.psrc = 7136 cpu_to_be16(fsp->h_u.tcp_ip4_spec.psrc); 7137 fsp->h_u.tcp_ip4_spec.pdst = 7138 cpu_to_be16(fsp->h_u.tcp_ip4_spec.pdst); 7139 fsp->m_u.tcp_ip4_spec.psrc = 7140 cpu_to_be16(fsp->m_u.tcp_ip4_spec.psrc); 7141 fsp->m_u.tcp_ip4_spec.pdst = 7142 cpu_to_be16(fsp->m_u.tcp_ip4_spec.pdst); 7143 break; 7144 case AH_V4_FLOW: 7145 case ESP_V4_FLOW: 7146 fsp->h_u.ah_ip4_spec.spi = 7147 (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> 7148 TCAM_V4KEY2_PORT_SPI_SHIFT; 7149 fsp->m_u.ah_ip4_spec.spi = 7150 (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> 7151 TCAM_V4KEY2_PORT_SPI_SHIFT; 7152 7153 fsp->h_u.ah_ip4_spec.spi = 7154 cpu_to_be32(fsp->h_u.ah_ip4_spec.spi); 7155 fsp->m_u.ah_ip4_spec.spi = 7156 cpu_to_be32(fsp->m_u.ah_ip4_spec.spi); 7157 break; 7158 case IP_USER_FLOW: 7159 fsp->h_u.usr_ip4_spec.l4_4_bytes = 7160 (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> 7161 TCAM_V4KEY2_PORT_SPI_SHIFT; 7162 fsp->m_u.usr_ip4_spec.l4_4_bytes = 7163 (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> 7164 TCAM_V4KEY2_PORT_SPI_SHIFT; 7165 7166 fsp->h_u.usr_ip4_spec.l4_4_bytes = 7167 cpu_to_be32(fsp->h_u.usr_ip4_spec.l4_4_bytes); 7168 fsp->m_u.usr_ip4_spec.l4_4_bytes = 7169 cpu_to_be32(fsp->m_u.usr_ip4_spec.l4_4_bytes); 7170 7171 fsp->h_u.usr_ip4_spec.proto = 7172 (tp->key[2] & TCAM_V4KEY2_PROTO) >> 7173 TCAM_V4KEY2_PROTO_SHIFT; 7174 fsp->m_u.usr_ip4_spec.proto = 7175 (tp->key_mask[2] & TCAM_V4KEY2_PROTO) >> 7176 TCAM_V4KEY2_PROTO_SHIFT; 7177 7178 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 7179 break; 7180 default: 7181 break; 7182 } 7183} 7184 7185static int niu_get_ethtool_tcam_entry(struct niu *np, 7186 struct ethtool_rxnfc *nfc) 7187{ 7188 struct niu_parent *parent = np->parent; 7189 struct niu_tcam_entry *tp; 7190 struct ethtool_rx_flow_spec *fsp = &nfc->fs; 7191 u16 idx; 7192 u64 class; 7193 int ret = 0; 7194 7195 idx = tcam_get_index(np, (u16)nfc->fs.location); 7196 7197 tp = &parent->tcam[idx]; 7198 if (!tp->valid) { 7199 netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n", 7200 parent->index, (u16)nfc->fs.location, idx); 7201 return -EINVAL; 7202 } 7203 7204 /* fill the flow spec entry */ 7205 class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> 7206 TCAM_V4KEY0_CLASS_CODE_SHIFT; 7207 ret = niu_class_to_ethflow(class, &fsp->flow_type); 7208 7209 if (ret < 0) { 7210 netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", 7211 parent->index); 7212 ret = -EINVAL; 7213 goto out; 7214 } 7215 7216 if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) { 7217 u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >> 7218 TCAM_V4KEY2_PROTO_SHIFT; 7219 if (proto == IPPROTO_ESP) { 7220 if (fsp->flow_type == AH_V4_FLOW) 7221 fsp->flow_type = ESP_V4_FLOW; 7222 else 7223 fsp->flow_type = ESP_V6_FLOW; 7224 } 7225 } 7226 7227 switch (fsp->flow_type) { 7228 case TCP_V4_FLOW: 7229 case UDP_V4_FLOW: 7230 case SCTP_V4_FLOW: 7231 case AH_V4_FLOW: 7232 case ESP_V4_FLOW: 7233 niu_get_ip4fs_from_tcam_key(tp, fsp); 7234 break; 7235 case TCP_V6_FLOW: 7236 case UDP_V6_FLOW: 7237 case SCTP_V6_FLOW: 7238 case AH_V6_FLOW: 7239 case ESP_V6_FLOW: 7240 /* Not yet implemented */ 7241 ret = -EINVAL; 7242 break; 7243 case IP_USER_FLOW: 7244 niu_get_ip4fs_from_tcam_key(tp, fsp); 7245 break; 7246 default: 7247 ret = -EINVAL; 7248 break; 7249 } 7250 7251 if (ret < 0) 7252 goto out; 7253 7254 if (tp->assoc_data & TCAM_ASSOCDATA_DISC) 7255 fsp->ring_cookie = RX_CLS_FLOW_DISC; 7256 else 7257 fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >> 7258 TCAM_ASSOCDATA_OFFSET_SHIFT; 7259 7260 /* put the tcam size here */ 7261 nfc->data = tcam_get_size(np); 7262out: 7263 return ret; 7264} 7265 7266static int niu_get_ethtool_tcam_all(struct niu *np, 7267 struct ethtool_rxnfc *nfc, 7268 u32 *rule_locs) 7269{ 7270 struct niu_parent *parent = np->parent; 7271 struct niu_tcam_entry *tp; 7272 int i, idx, cnt; 7273 u16 n_entries; 7274 unsigned long flags; 7275 7276 7277 /* put the tcam size here */ 7278 nfc->data = tcam_get_size(np); 7279 7280 niu_lock_parent(np, flags); 7281 n_entries = nfc->rule_cnt; 7282 for (cnt = 0, i = 0; i < nfc->data; i++) { 7283 idx = tcam_get_index(np, i); 7284 tp = &parent->tcam[idx]; 7285 if (!tp->valid) 7286 continue; 7287 rule_locs[cnt] = i; 7288 cnt++; 7289 } 7290 niu_unlock_parent(np, flags); 7291 7292 if (n_entries != cnt) { 7293 /* print warning, this should not happen */ 7294 netdev_info(np->dev, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n", 7295 np->parent->index, __func__, n_entries, cnt); 7296 } 7297 7298 return 0; 7299} 7300 7301static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 7302 void *rule_locs) 7303{ 7304 struct niu *np = netdev_priv(dev); 7305 int ret = 0; 7306 7307 switch (cmd->cmd) { 7308 case ETHTOOL_GRXFH: 7309 ret = niu_get_hash_opts(np, cmd); 7310 break; 7311 case ETHTOOL_GRXRINGS: 7312 cmd->data = np->num_rx_rings; 7313 break; 7314 case ETHTOOL_GRXCLSRLCNT: 7315 cmd->rule_cnt = tcam_get_valid_entry_cnt(np); 7316 break; 7317 case ETHTOOL_GRXCLSRULE: 7318 ret = niu_get_ethtool_tcam_entry(np, cmd); 7319 break; 7320 case ETHTOOL_GRXCLSRLALL: 7321 ret = niu_get_ethtool_tcam_all(np, cmd, (u32 *)rule_locs); 7322 break; 7323 default: 7324 ret = -EINVAL; 7325 break; 7326 } 7327 7328 return ret; 7329} 7330 7331static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) 7332{ 7333 u64 class; 7334 u64 flow_key = 0; 7335 unsigned long flags; 7336 7337 if (!niu_ethflow_to_class(nfc->flow_type, &class)) 7338 return -EINVAL; 7339 7340 if (class < CLASS_CODE_USER_PROG1 || 7341 class > CLASS_CODE_SCTP_IPV6) 7342 return -EINVAL; 7343 7344 if (nfc->data & RXH_DISCARD) { 7345 niu_lock_parent(np, flags); 7346 flow_key = np->parent->tcam_key[class - 7347 CLASS_CODE_USER_PROG1]; 7348 flow_key |= TCAM_KEY_DISC; 7349 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key); 7350 np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key; 7351 niu_unlock_parent(np, flags); 7352 return 0; 7353 } else { 7354 /* Discard was set before, but is not set now */ 7355 if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & 7356 TCAM_KEY_DISC) { 7357 niu_lock_parent(np, flags); 7358 flow_key = np->parent->tcam_key[class - 7359 CLASS_CODE_USER_PROG1]; 7360 flow_key &= ~TCAM_KEY_DISC; 7361 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), 7362 flow_key); 7363 np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = 7364 flow_key; 7365 niu_unlock_parent(np, flags); 7366 } 7367 } 7368 7369 if (!niu_ethflow_to_flowkey(nfc->data, &flow_key)) 7370 return -EINVAL; 7371 7372 niu_lock_parent(np, flags); 7373 nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key); 7374 np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key; 7375 niu_unlock_parent(np, flags); 7376 7377 return 0; 7378} 7379 7380static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp, 7381 struct niu_tcam_entry *tp, 7382 int l2_rdc_tab, u64 class) 7383{ 7384 u8 pid = 0; 7385 u32 sip, dip, sipm, dipm, spi, spim; 7386 u16 sport, dport, spm, dpm; 7387 7388 sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src); 7389 sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src); 7390 dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst); 7391 dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst); 7392 7393 tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT; 7394 tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE; 7395 tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT; 7396 tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM; 7397 7398 tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT; 7399 tp->key[3] |= dip; 7400 7401 tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT; 7402 tp->key_mask[3] |= dipm; 7403 7404 tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos << 7405 TCAM_V4KEY2_TOS_SHIFT); 7406 tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos << 7407 TCAM_V4KEY2_TOS_SHIFT); 7408 switch (fsp->flow_type) { 7409 case TCP_V4_FLOW: 7410 case UDP_V4_FLOW: 7411 case SCTP_V4_FLOW: 7412 sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc); 7413 spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc); 7414 dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst); 7415 dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst); 7416 7417 tp->key[2] |= (((u64)sport << 16) | dport); 7418 tp->key_mask[2] |= (((u64)spm << 16) | dpm); 7419 niu_ethflow_to_l3proto(fsp->flow_type, &pid); 7420 break; 7421 case AH_V4_FLOW: 7422 case ESP_V4_FLOW: 7423 spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi); 7424 spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi); 7425 7426 tp->key[2] |= spi; 7427 tp->key_mask[2] |= spim; 7428 niu_ethflow_to_l3proto(fsp->flow_type, &pid); 7429 break; 7430 case IP_USER_FLOW: 7431 spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes); 7432 spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes); 7433 7434 tp->key[2] |= spi; 7435 tp->key_mask[2] |= spim; 7436 pid = fsp->h_u.usr_ip4_spec.proto; 7437 break; 7438 default: 7439 break; 7440 } 7441 7442 tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT); 7443 if (pid) { 7444 tp->key_mask[2] |= TCAM_V4KEY2_PROTO; 7445 } 7446} 7447 7448static int niu_add_ethtool_tcam_entry(struct niu *np, 7449 struct ethtool_rxnfc *nfc) 7450{ 7451 struct niu_parent *parent = np->parent; 7452 struct niu_tcam_entry *tp; 7453 struct ethtool_rx_flow_spec *fsp = &nfc->fs; 7454 struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port]; 7455 int l2_rdc_table = rdc_table->first_table_num; 7456 u16 idx; 7457 u64 class; 7458 unsigned long flags; 7459 int err, ret; 7460 7461 ret = 0; 7462 7463 idx = nfc->fs.location; 7464 if (idx >= tcam_get_size(np)) 7465 return -EINVAL; 7466 7467 if (fsp->flow_type == IP_USER_FLOW) { 7468 int i; 7469 int add_usr_cls = 0; 7470 int ipv6 = 0; 7471 struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec; 7472 struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec; 7473 7474 niu_lock_parent(np, flags); 7475 7476 for (i = 0; i < NIU_L3_PROG_CLS; i++) { 7477 if (parent->l3_cls[i]) { 7478 if (uspec->proto == parent->l3_cls_pid[i]) { 7479 class = parent->l3_cls[i]; 7480 parent->l3_cls_refcnt[i]++; 7481 add_usr_cls = 1; 7482 break; 7483 } 7484 } else { 7485 /* Program new user IP class */ 7486 switch (i) { 7487 case 0: 7488 class = CLASS_CODE_USER_PROG1; 7489 break; 7490 case 1: 7491 class = CLASS_CODE_USER_PROG2; 7492 break; 7493 case 2: 7494 class = CLASS_CODE_USER_PROG3; 7495 break; 7496 case 3: 7497 class = CLASS_CODE_USER_PROG4; 7498 break; 7499 default: 7500 break; 7501 } 7502 if (uspec->ip_ver == ETH_RX_NFC_IP6) 7503 ipv6 = 1; 7504 ret = tcam_user_ip_class_set(np, class, ipv6, 7505 uspec->proto, 7506 uspec->tos, 7507 umask->tos); 7508 if (ret) 7509 goto out; 7510 7511 ret = tcam_user_ip_class_enable(np, class, 1); 7512 if (ret) 7513 goto out; 7514 parent->l3_cls[i] = class; 7515 parent->l3_cls_pid[i] = uspec->proto; 7516 parent->l3_cls_refcnt[i]++; 7517 add_usr_cls = 1; 7518 break; 7519 } 7520 } 7521 if (!add_usr_cls) { 7522 netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n", 7523 parent->index, __func__, uspec->proto); 7524 ret = -EINVAL; 7525 goto out; 7526 } 7527 niu_unlock_parent(np, flags); 7528 } else { 7529 if (!niu_ethflow_to_class(fsp->flow_type, &class)) { 7530 return -EINVAL; 7531 } 7532 } 7533 7534 niu_lock_parent(np, flags); 7535 7536 idx = tcam_get_index(np, idx); 7537 tp = &parent->tcam[idx]; 7538 7539 memset(tp, 0, sizeof(*tp)); 7540 7541 /* fill in the tcam key and mask */ 7542 switch (fsp->flow_type) { 7543 case TCP_V4_FLOW: 7544 case UDP_V4_FLOW: 7545 case SCTP_V4_FLOW: 7546 case AH_V4_FLOW: 7547 case ESP_V4_FLOW: 7548 niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class); 7549 break; 7550 case TCP_V6_FLOW: 7551 case UDP_V6_FLOW: 7552 case SCTP_V6_FLOW: 7553 case AH_V6_FLOW: 7554 case ESP_V6_FLOW: 7555 /* Not yet implemented */ 7556 netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n", 7557 parent->index, __func__, fsp->flow_type); 7558 ret = -EINVAL; 7559 goto out; 7560 case IP_USER_FLOW: 7561 if (fsp->h_u.usr_ip4_spec.ip_ver == ETH_RX_NFC_IP4) { 7562 niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, 7563 class); 7564 } else { 7565 /* Not yet implemented */ 7566 netdev_info(np->dev, "niu%d: In %s(): usr flow for IPv6 not implemented\n", 7567 parent->index, __func__); 7568 ret = -EINVAL; 7569 goto out; 7570 } 7571 break; 7572 default: 7573 netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n", 7574 parent->index, __func__, fsp->flow_type); 7575 ret = -EINVAL; 7576 goto out; 7577 } 7578 7579 /* fill in the assoc data */ 7580 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { 7581 tp->assoc_data = TCAM_ASSOCDATA_DISC; 7582 } else { 7583 if (fsp->ring_cookie >= np->num_rx_rings) { 7584 netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n", 7585 parent->index, __func__, 7586 (long long)fsp->ring_cookie); 7587 ret = -EINVAL; 7588 goto out; 7589 } 7590 tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | 7591 (fsp->ring_cookie << 7592 TCAM_ASSOCDATA_OFFSET_SHIFT)); 7593 } 7594 7595 err = tcam_write(np, idx, tp->key, tp->key_mask); 7596 if (err) { 7597 ret = -EINVAL; 7598 goto out; 7599 } 7600 err = tcam_assoc_write(np, idx, tp->assoc_data); 7601 if (err) { 7602 ret = -EINVAL; 7603 goto out; 7604 } 7605 7606 /* validate the entry */ 7607 tp->valid = 1; 7608 np->clas.tcam_valid_entries++; 7609out: 7610 niu_unlock_parent(np, flags); 7611 7612 return ret; 7613} 7614 7615static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc) 7616{ 7617 struct niu_parent *parent = np->parent; 7618 struct niu_tcam_entry *tp; 7619 u16 idx; 7620 unsigned long flags; 7621 u64 class; 7622 int ret = 0; 7623 7624 if (loc >= tcam_get_size(np)) 7625 return -EINVAL; 7626 7627 niu_lock_parent(np, flags); 7628 7629 idx = tcam_get_index(np, loc); 7630 tp = &parent->tcam[idx]; 7631 7632 /* if the entry is of a user defined class, then update*/ 7633 class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> 7634 TCAM_V4KEY0_CLASS_CODE_SHIFT; 7635 7636 if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) { 7637 int i; 7638 for (i = 0; i < NIU_L3_PROG_CLS; i++) { 7639 if (parent->l3_cls[i] == class) { 7640 parent->l3_cls_refcnt[i]--; 7641 if (!parent->l3_cls_refcnt[i]) { 7642 /* disable class */ 7643 ret = tcam_user_ip_class_enable(np, 7644 class, 7645 0); 7646 if (ret) 7647 goto out; 7648 parent->l3_cls[i] = 0; 7649 parent->l3_cls_pid[i] = 0; 7650 } 7651 break; 7652 } 7653 } 7654 if (i == NIU_L3_PROG_CLS) { 7655 netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n", 7656 parent->index, __func__, 7657 (unsigned long long)class); 7658 ret = -EINVAL; 7659 goto out; 7660 } 7661 } 7662 7663 ret = tcam_flush(np, idx); 7664 if (ret) 7665 goto out; 7666 7667 /* invalidate the entry */ 7668 tp->valid = 0; 7669 np->clas.tcam_valid_entries--; 7670out: 7671 niu_unlock_parent(np, flags); 7672 7673 return ret; 7674} 7675 7676static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 7677{ 7678 struct niu *np = netdev_priv(dev); 7679 int ret = 0; 7680 7681 switch (cmd->cmd) { 7682 case ETHTOOL_SRXFH: 7683 ret = niu_set_hash_opts(np, cmd); 7684 break; 7685 case ETHTOOL_SRXCLSRLINS: 7686 ret = niu_add_ethtool_tcam_entry(np, cmd); 7687 break; 7688 case ETHTOOL_SRXCLSRLDEL: 7689 ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location); 7690 break; 7691 default: 7692 ret = -EINVAL; 7693 break; 7694 } 7695 7696 return ret; 7697} 7698 7699static const struct { 7700 const char string[ETH_GSTRING_LEN]; 7701} niu_xmac_stat_keys[] = { 7702 { "tx_frames" }, 7703 { "tx_bytes" }, 7704 { "tx_fifo_errors" }, 7705 { "tx_overflow_errors" }, 7706 { "tx_max_pkt_size_errors" }, 7707 { "tx_underflow_errors" }, 7708 { "rx_local_faults" }, 7709 { "rx_remote_faults" }, 7710 { "rx_link_faults" }, 7711 { "rx_align_errors" }, 7712 { "rx_frags" }, 7713 { "rx_mcasts" }, 7714 { "rx_bcasts" }, 7715 { "rx_hist_cnt1" }, 7716 { "rx_hist_cnt2" }, 7717 { "rx_hist_cnt3" }, 7718 { "rx_hist_cnt4" }, 7719 { "rx_hist_cnt5" }, 7720 { "rx_hist_cnt6" }, 7721 { "rx_hist_cnt7" }, 7722 { "rx_octets" }, 7723 { "rx_code_violations" }, 7724 { "rx_len_errors" }, 7725 { "rx_crc_errors" }, 7726 { "rx_underflows" }, 7727 { "rx_overflows" }, 7728 { "pause_off_state" }, 7729 { "pause_on_state" }, 7730 { "pause_received" }, 7731}; 7732 7733#define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys) 7734 7735static const struct { 7736 const char string[ETH_GSTRING_LEN]; 7737} niu_bmac_stat_keys[] = { 7738 { "tx_underflow_errors" }, 7739 { "tx_max_pkt_size_errors" }, 7740 { "tx_bytes" }, 7741 { "tx_frames" }, 7742 { "rx_overflows" }, 7743 { "rx_frames" }, 7744 { "rx_align_errors" }, 7745 { "rx_crc_errors" }, 7746 { "rx_len_errors" }, 7747 { "pause_off_state" }, 7748 { "pause_on_state" }, 7749 { "pause_received" }, 7750}; 7751 7752#define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys) 7753 7754static const struct { 7755 const char string[ETH_GSTRING_LEN]; 7756} niu_rxchan_stat_keys[] = { 7757 { "rx_channel" }, 7758 { "rx_packets" }, 7759 { "rx_bytes" }, 7760 { "rx_dropped" }, 7761 { "rx_errors" }, 7762}; 7763 7764#define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys) 7765 7766static const struct { 7767 const char string[ETH_GSTRING_LEN]; 7768} niu_txchan_stat_keys[] = { 7769 { "tx_channel" }, 7770 { "tx_packets" }, 7771 { "tx_bytes" }, 7772 { "tx_errors" }, 7773}; 7774 7775#define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys) 7776 7777static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data) 7778{ 7779 struct niu *np = netdev_priv(dev); 7780 int i; 7781 7782 if (stringset != ETH_SS_STATS) 7783 return; 7784 7785 if (np->flags & NIU_FLAGS_XMAC) { 7786 memcpy(data, niu_xmac_stat_keys, 7787 sizeof(niu_xmac_stat_keys)); 7788 data += sizeof(niu_xmac_stat_keys); 7789 } else { 7790 memcpy(data, niu_bmac_stat_keys, 7791 sizeof(niu_bmac_stat_keys)); 7792 data += sizeof(niu_bmac_stat_keys); 7793 } 7794 for (i = 0; i < np->num_rx_rings; i++) { 7795 memcpy(data, niu_rxchan_stat_keys, 7796 sizeof(niu_rxchan_stat_keys)); 7797 data += sizeof(niu_rxchan_stat_keys); 7798 } 7799 for (i = 0; i < np->num_tx_rings; i++) { 7800 memcpy(data, niu_txchan_stat_keys, 7801 sizeof(niu_txchan_stat_keys)); 7802 data += sizeof(niu_txchan_stat_keys); 7803 } 7804} 7805 7806static int niu_get_sset_count(struct net_device *dev, int stringset) 7807{ 7808 struct niu *np = netdev_priv(dev); 7809 7810 if (stringset != ETH_SS_STATS) 7811 return -EINVAL; 7812 7813 return ((np->flags & NIU_FLAGS_XMAC ? 7814 NUM_XMAC_STAT_KEYS : 7815 NUM_BMAC_STAT_KEYS) + 7816 (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) + 7817 (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS)); 7818} 7819 7820static void niu_get_ethtool_stats(struct net_device *dev, 7821 struct ethtool_stats *stats, u64 *data) 7822{ 7823 struct niu *np = netdev_priv(dev); 7824 int i; 7825 7826 niu_sync_mac_stats(np); 7827 if (np->flags & NIU_FLAGS_XMAC) { 7828 memcpy(data, &np->mac_stats.xmac, 7829 sizeof(struct niu_xmac_stats)); 7830 data += (sizeof(struct niu_xmac_stats) / sizeof(u64)); 7831 } else { 7832 memcpy(data, &np->mac_stats.bmac, 7833 sizeof(struct niu_bmac_stats)); 7834 data += (sizeof(struct niu_bmac_stats) / sizeof(u64)); 7835 } 7836 for (i = 0; i < np->num_rx_rings; i++) { 7837 struct rx_ring_info *rp = &np->rx_rings[i]; 7838 7839 niu_sync_rx_discard_stats(np, rp, 0); 7840 7841 data[0] = rp->rx_channel; 7842 data[1] = rp->rx_packets; 7843 data[2] = rp->rx_bytes; 7844 data[3] = rp->rx_dropped; 7845 data[4] = rp->rx_errors; 7846 data += 5; 7847 } 7848 for (i = 0; i < np->num_tx_rings; i++) { 7849 struct tx_ring_info *rp = &np->tx_rings[i]; 7850 7851 data[0] = rp->tx_channel; 7852 data[1] = rp->tx_packets; 7853 data[2] = rp->tx_bytes; 7854 data[3] = rp->tx_errors; 7855 data += 4; 7856 } 7857} 7858 7859static u64 niu_led_state_save(struct niu *np) 7860{ 7861 if (np->flags & NIU_FLAGS_XMAC) 7862 return nr64_mac(XMAC_CONFIG); 7863 else 7864 return nr64_mac(BMAC_XIF_CONFIG); 7865} 7866 7867static void niu_led_state_restore(struct niu *np, u64 val) 7868{ 7869 if (np->flags & NIU_FLAGS_XMAC) 7870 nw64_mac(XMAC_CONFIG, val); 7871 else 7872 nw64_mac(BMAC_XIF_CONFIG, val); 7873} 7874 7875static void niu_force_led(struct niu *np, int on) 7876{ 7877 u64 val, reg, bit; 7878 7879 if (np->flags & NIU_FLAGS_XMAC) { 7880 reg = XMAC_CONFIG; 7881 bit = XMAC_CONFIG_FORCE_LED_ON; 7882 } else { 7883 reg = BMAC_XIF_CONFIG; 7884 bit = BMAC_XIF_CONFIG_LINK_LED; 7885 } 7886 7887 val = nr64_mac(reg); 7888 if (on) 7889 val |= bit; 7890 else 7891 val &= ~bit; 7892 nw64_mac(reg, val); 7893} 7894 7895static int niu_phys_id(struct net_device *dev, u32 data) 7896{ 7897 struct niu *np = netdev_priv(dev); 7898 u64 orig_led_state; 7899 int i; 7900 7901 if (!netif_running(dev)) 7902 return -EAGAIN; 7903 7904 if (data == 0) 7905 data = 2; 7906 7907 orig_led_state = niu_led_state_save(np); 7908 for (i = 0; i < (data * 2); i++) { 7909 int on = ((i % 2) == 0); 7910 7911 niu_force_led(np, on); 7912 7913 if (msleep_interruptible(500)) 7914 break; 7915 } 7916 niu_led_state_restore(np, orig_led_state); 7917 7918 return 0; 7919} 7920 7921static int niu_set_flags(struct net_device *dev, u32 data) 7922{ 7923 if (data & (ETH_FLAG_LRO | ETH_FLAG_NTUPLE)) 7924 return -EOPNOTSUPP; 7925 7926 if (data & ETH_FLAG_RXHASH) 7927 dev->features |= NETIF_F_RXHASH; 7928 else 7929 dev->features &= ~NETIF_F_RXHASH; 7930 return 0; 7931} 7932 7933static const struct ethtool_ops niu_ethtool_ops = { 7934 .get_drvinfo = niu_get_drvinfo, 7935 .get_link = ethtool_op_get_link, 7936 .get_msglevel = niu_get_msglevel, 7937 .set_msglevel = niu_set_msglevel, 7938 .nway_reset = niu_nway_reset, 7939 .get_eeprom_len = niu_get_eeprom_len, 7940 .get_eeprom = niu_get_eeprom, 7941 .get_settings = niu_get_settings, 7942 .set_settings = niu_set_settings, 7943 .get_strings = niu_get_strings, 7944 .get_sset_count = niu_get_sset_count, 7945 .get_ethtool_stats = niu_get_ethtool_stats, 7946 .phys_id = niu_phys_id, 7947 .get_rxnfc = niu_get_nfc, 7948 .set_rxnfc = niu_set_nfc, 7949 .set_flags = niu_set_flags, 7950 .get_flags = ethtool_op_get_flags, 7951}; 7952 7953static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, 7954 int ldg, int ldn) 7955{ 7956 if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) 7957 return -EINVAL; 7958 if (ldn < 0 || ldn > LDN_MAX) 7959 return -EINVAL; 7960 7961 parent->ldg_map[ldn] = ldg; 7962 7963 if (np->parent->plat_type == PLAT_TYPE_NIU) { 7964 /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by 7965 * the firmware, and we're not supposed to change them. 7966 * Validate the mapping, because if it's wrong we probably 7967 * won't get any interrupts and that's painful to debug. 7968 */ 7969 if (nr64(LDG_NUM(ldn)) != ldg) { 7970 dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n", 7971 np->port, ldn, ldg, 7972 (unsigned long long) nr64(LDG_NUM(ldn))); 7973 return -EINVAL; 7974 } 7975 } else 7976 nw64(LDG_NUM(ldn), ldg); 7977 7978 return 0; 7979} 7980 7981static int niu_set_ldg_timer_res(struct niu *np, int res) 7982{ 7983 if (res < 0 || res > LDG_TIMER_RES_VAL) 7984 return -EINVAL; 7985 7986 7987 nw64(LDG_TIMER_RES, res); 7988 7989 return 0; 7990} 7991 7992static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector) 7993{ 7994 if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) || 7995 (func < 0 || func > 3) || 7996 (vector < 0 || vector > 0x1f)) 7997 return -EINVAL; 7998 7999 nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector); 8000 8001 return 0; 8002} 8003 8004static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr) 8005{ 8006 u64 frame, frame_base = (ESPC_PIO_STAT_READ_START | 8007 (addr << ESPC_PIO_STAT_ADDR_SHIFT)); 8008 int limit; 8009 8010 if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT)) 8011 return -EINVAL; 8012 8013 frame = frame_base; 8014 nw64(ESPC_PIO_STAT, frame); 8015 limit = 64; 8016 do { 8017 udelay(5); 8018 frame = nr64(ESPC_PIO_STAT); 8019 if (frame & ESPC_PIO_STAT_READ_END) 8020 break; 8021 } while (limit--); 8022 if (!(frame & ESPC_PIO_STAT_READ_END)) { 8023 dev_err(np->device, "EEPROM read timeout frame[%llx]\n", 8024 (unsigned long long) frame); 8025 return -ENODEV; 8026 } 8027 8028 frame = frame_base; 8029 nw64(ESPC_PIO_STAT, frame); 8030 limit = 64; 8031 do { 8032 udelay(5); 8033 frame = nr64(ESPC_PIO_STAT); 8034 if (frame & ESPC_PIO_STAT_READ_END) 8035 break; 8036 } while (limit--); 8037 if (!(frame & ESPC_PIO_STAT_READ_END)) { 8038 dev_err(np->device, "EEPROM read timeout frame[%llx]\n", 8039 (unsigned long long) frame); 8040 return -ENODEV; 8041 } 8042 8043 frame = nr64(ESPC_PIO_STAT); 8044 return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT; 8045} 8046 8047static int __devinit niu_pci_eeprom_read16(struct niu *np, u32 off) 8048{ 8049 int err = niu_pci_eeprom_read(np, off); 8050 u16 val; 8051 8052 if (err < 0) 8053 return err; 8054 val = (err << 8); 8055 err = niu_pci_eeprom_read(np, off + 1); 8056 if (err < 0) 8057 return err; 8058 val |= (err & 0xff); 8059 8060 return val; 8061} 8062 8063static int __devinit niu_pci_eeprom_read16_swp(struct niu *np, u32 off) 8064{ 8065 int err = niu_pci_eeprom_read(np, off); 8066 u16 val; 8067 8068 if (err < 0) 8069 return err; 8070 8071 val = (err & 0xff); 8072 err = niu_pci_eeprom_read(np, off + 1); 8073 if (err < 0) 8074 return err; 8075 8076 val |= (err & 0xff) << 8; 8077 8078 return val; 8079} 8080 8081static int __devinit niu_pci_vpd_get_propname(struct niu *np, 8082 u32 off, 8083 char *namebuf, 8084 int namebuf_len) 8085{ 8086 int i; 8087 8088 for (i = 0; i < namebuf_len; i++) { 8089 int err = niu_pci_eeprom_read(np, off + i); 8090 if (err < 0) 8091 return err; 8092 *namebuf++ = err; 8093 if (!err) 8094 break; 8095 } 8096 if (i >= namebuf_len) 8097 return -EINVAL; 8098 8099 return i + 1; 8100} 8101 8102static void __devinit niu_vpd_parse_version(struct niu *np) 8103{ 8104 struct niu_vpd *vpd = &np->vpd; 8105 int len = strlen(vpd->version) + 1; 8106 const char *s = vpd->version; 8107 int i; 8108 8109 for (i = 0; i < len - 5; i++) { 8110 if (!strncmp(s + i, "FCode ", 6)) 8111 break; 8112 } 8113 if (i >= len - 5) 8114 return; 8115 8116 s += i + 5; 8117 sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor); 8118 8119 netif_printk(np, probe, KERN_DEBUG, np->dev, 8120 "VPD_SCAN: FCODE major(%d) minor(%d)\n", 8121 vpd->fcode_major, vpd->fcode_minor); 8122 if (vpd->fcode_major > NIU_VPD_MIN_MAJOR || 8123 (vpd->fcode_major == NIU_VPD_MIN_MAJOR && 8124 vpd->fcode_minor >= NIU_VPD_MIN_MINOR)) 8125 np->flags |= NIU_FLAGS_VPD_VALID; 8126} 8127 8128/* ESPC_PIO_EN_ENABLE must be set */ 8129static int __devinit niu_pci_vpd_scan_props(struct niu *np, 8130 u32 start, u32 end) 8131{ 8132 unsigned int found_mask = 0; 8133#define FOUND_MASK_MODEL 0x00000001 8134#define FOUND_MASK_BMODEL 0x00000002 8135#define FOUND_MASK_VERS 0x00000004 8136#define FOUND_MASK_MAC 0x00000008 8137#define FOUND_MASK_NMAC 0x00000010 8138#define FOUND_MASK_PHY 0x00000020 8139#define FOUND_MASK_ALL 0x0000003f 8140 8141 netif_printk(np, probe, KERN_DEBUG, np->dev, 8142 "VPD_SCAN: start[%x] end[%x]\n", start, end); 8143 while (start < end) { 8144 int len, err, instance, type, prop_len; 8145 char namebuf[64]; 8146 u8 *prop_buf; 8147 int max_len; 8148 8149 if (found_mask == FOUND_MASK_ALL) { 8150 niu_vpd_parse_version(np); 8151 return 1; 8152 } 8153 8154 err = niu_pci_eeprom_read(np, start + 2); 8155 if (err < 0) 8156 return err; 8157 len = err; 8158 start += 3; 8159 8160 instance = niu_pci_eeprom_read(np, start); 8161 type = niu_pci_eeprom_read(np, start + 3); 8162 prop_len = niu_pci_eeprom_read(np, start + 4); 8163 err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64); 8164 if (err < 0) 8165 return err; 8166 8167 prop_buf = NULL; 8168 max_len = 0; 8169 if (!strcmp(namebuf, "model")) { 8170 prop_buf = np->vpd.model; 8171 max_len = NIU_VPD_MODEL_MAX; 8172 found_mask |= FOUND_MASK_MODEL; 8173 } else if (!strcmp(namebuf, "board-model")) { 8174 prop_buf = np->vpd.board_model; 8175 max_len = NIU_VPD_BD_MODEL_MAX; 8176 found_mask |= FOUND_MASK_BMODEL; 8177 } else if (!strcmp(namebuf, "version")) { 8178 prop_buf = np->vpd.version; 8179 max_len = NIU_VPD_VERSION_MAX; 8180 found_mask |= FOUND_MASK_VERS; 8181 } else if (!strcmp(namebuf, "local-mac-address")) { 8182 prop_buf = np->vpd.local_mac; 8183 max_len = ETH_ALEN; 8184 found_mask |= FOUND_MASK_MAC; 8185 } else if (!strcmp(namebuf, "num-mac-addresses")) { 8186 prop_buf = &np->vpd.mac_num; 8187 max_len = 1; 8188 found_mask |= FOUND_MASK_NMAC; 8189 } else if (!strcmp(namebuf, "phy-type")) { 8190 prop_buf = np->vpd.phy_type; 8191 max_len = NIU_VPD_PHY_TYPE_MAX; 8192 found_mask |= FOUND_MASK_PHY; 8193 } 8194 8195 if (max_len && prop_len > max_len) { 8196 dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len); 8197 return -EINVAL; 8198 } 8199 8200 if (prop_buf) { 8201 u32 off = start + 5 + err; 8202 int i; 8203 8204 netif_printk(np, probe, KERN_DEBUG, np->dev, 8205 "VPD_SCAN: Reading in property [%s] len[%d]\n", 8206 namebuf, prop_len); 8207 for (i = 0; i < prop_len; i++) 8208 *prop_buf++ = niu_pci_eeprom_read(np, off + i); 8209 } 8210 8211 start += len; 8212 } 8213 8214 return 0; 8215} 8216 8217/* ESPC_PIO_EN_ENABLE must be set */ 8218static void __devinit niu_pci_vpd_fetch(struct niu *np, u32 start) 8219{ 8220 u32 offset; 8221 int err; 8222 8223 err = niu_pci_eeprom_read16_swp(np, start + 1); 8224 if (err < 0) 8225 return; 8226 8227 offset = err + 3; 8228 8229 while (start + offset < ESPC_EEPROM_SIZE) { 8230 u32 here = start + offset; 8231 u32 end; 8232 8233 err = niu_pci_eeprom_read(np, here); 8234 if (err != 0x90) 8235 return; 8236 8237 err = niu_pci_eeprom_read16_swp(np, here + 1); 8238 if (err < 0) 8239 return; 8240 8241 here = start + offset + 3; 8242 end = start + offset + err; 8243 8244 offset += err; 8245 8246 err = niu_pci_vpd_scan_props(np, here, end); 8247 if (err < 0 || err == 1) 8248 return; 8249 } 8250} 8251 8252/* ESPC_PIO_EN_ENABLE must be set */ 8253static u32 __devinit niu_pci_vpd_offset(struct niu *np) 8254{ 8255 u32 start = 0, end = ESPC_EEPROM_SIZE, ret; 8256 int err; 8257 8258 while (start < end) { 8259 ret = start; 8260 8261 /* ROM header signature? */ 8262 err = niu_pci_eeprom_read16(np, start + 0); 8263 if (err != 0x55aa) 8264 return 0; 8265 8266 /* Apply offset to PCI data structure. */ 8267 err = niu_pci_eeprom_read16(np, start + 23); 8268 if (err < 0) 8269 return 0; 8270 start += err; 8271 8272 /* Check for "PCIR" signature. */ 8273 err = niu_pci_eeprom_read16(np, start + 0); 8274 if (err != 0x5043) 8275 return 0; 8276 err = niu_pci_eeprom_read16(np, start + 2); 8277 if (err != 0x4952) 8278 return 0; 8279 8280 /* Check for OBP image type. */ 8281 err = niu_pci_eeprom_read(np, start + 20); 8282 if (err < 0) 8283 return 0; 8284 if (err != 0x01) { 8285 err = niu_pci_eeprom_read(np, ret + 2); 8286 if (err < 0) 8287 return 0; 8288 8289 start = ret + (err * 512); 8290 continue; 8291 } 8292 8293 err = niu_pci_eeprom_read16_swp(np, start + 8); 8294 if (err < 0) 8295 return err; 8296 ret += err; 8297 8298 err = niu_pci_eeprom_read(np, ret + 0); 8299 if (err != 0x82) 8300 return 0; 8301 8302 return ret; 8303 } 8304 8305 return 0; 8306} 8307 8308static int __devinit niu_phy_type_prop_decode(struct niu *np, 8309 const char *phy_prop) 8310{ 8311 if (!strcmp(phy_prop, "mif")) { 8312 /* 1G copper, MII */ 8313 np->flags &= ~(NIU_FLAGS_FIBER | 8314 NIU_FLAGS_10G); 8315 np->mac_xcvr = MAC_XCVR_MII; 8316 } else if (!strcmp(phy_prop, "xgf")) { 8317 /* 10G fiber, XPCS */ 8318 np->flags |= (NIU_FLAGS_10G | 8319 NIU_FLAGS_FIBER); 8320 np->mac_xcvr = MAC_XCVR_XPCS; 8321 } else if (!strcmp(phy_prop, "pcs")) { 8322 /* 1G fiber, PCS */ 8323 np->flags &= ~NIU_FLAGS_10G; 8324 np->flags |= NIU_FLAGS_FIBER; 8325 np->mac_xcvr = MAC_XCVR_PCS; 8326 } else if (!strcmp(phy_prop, "xgc")) { 8327 /* 10G copper, XPCS */ 8328 np->flags |= NIU_FLAGS_10G; 8329 np->flags &= ~NIU_FLAGS_FIBER; 8330 np->mac_xcvr = MAC_XCVR_XPCS; 8331 } else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) { 8332 /* 10G Serdes or 1G Serdes, default to 10G */ 8333 np->flags |= NIU_FLAGS_10G; 8334 np->flags &= ~NIU_FLAGS_FIBER; 8335 np->flags |= NIU_FLAGS_XCVR_SERDES; 8336 np->mac_xcvr = MAC_XCVR_XPCS; 8337 } else { 8338 return -EINVAL; 8339 } 8340 return 0; 8341} 8342 8343static int niu_pci_vpd_get_nports(struct niu *np) 8344{ 8345 int ports = 0; 8346 8347 if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) || 8348 (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) || 8349 (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) || 8350 (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) || 8351 (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) { 8352 ports = 4; 8353 } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) || 8354 (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) || 8355 (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) || 8356 (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) { 8357 ports = 2; 8358 } 8359 8360 return ports; 8361} 8362 8363static void __devinit niu_pci_vpd_validate(struct niu *np) 8364{ 8365 struct net_device *dev = np->dev; 8366 struct niu_vpd *vpd = &np->vpd; 8367 u8 val8; 8368 8369 if (!is_valid_ether_addr(&vpd->local_mac[0])) { 8370 dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n"); 8371 8372 np->flags &= ~NIU_FLAGS_VPD_VALID; 8373 return; 8374 } 8375 8376 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || 8377 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { 8378 np->flags |= NIU_FLAGS_10G; 8379 np->flags &= ~NIU_FLAGS_FIBER; 8380 np->flags |= NIU_FLAGS_XCVR_SERDES; 8381 np->mac_xcvr = MAC_XCVR_PCS; 8382 if (np->port > 1) { 8383 np->flags |= NIU_FLAGS_FIBER; 8384 np->flags &= ~NIU_FLAGS_10G; 8385 } 8386 if (np->flags & NIU_FLAGS_10G) 8387 np->mac_xcvr = MAC_XCVR_XPCS; 8388 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { 8389 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | 8390 NIU_FLAGS_HOTPLUG_PHY); 8391 } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { 8392 dev_err(np->device, "Illegal phy string [%s]\n", 8393 np->vpd.phy_type); 8394 dev_err(np->device, "Falling back to SPROM\n"); 8395 np->flags &= ~NIU_FLAGS_VPD_VALID; 8396 return; 8397 } 8398 8399 memcpy(dev->perm_addr, vpd->local_mac, ETH_ALEN); 8400 8401 val8 = dev->perm_addr[5]; 8402 dev->perm_addr[5] += np->port; 8403 if (dev->perm_addr[5] < val8) 8404 dev->perm_addr[4]++; 8405 8406 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); 8407} 8408 8409static int __devinit niu_pci_probe_sprom(struct niu *np) 8410{ 8411 struct net_device *dev = np->dev; 8412 int len, i; 8413 u64 val, sum; 8414 u8 val8; 8415 8416 val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ); 8417 val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT; 8418 len = val / 4; 8419 8420 np->eeprom_len = len; 8421 8422 netif_printk(np, probe, KERN_DEBUG, np->dev, 8423 "SPROM: Image size %llu\n", (unsigned long long)val); 8424 8425 sum = 0; 8426 for (i = 0; i < len; i++) { 8427 val = nr64(ESPC_NCR(i)); 8428 sum += (val >> 0) & 0xff; 8429 sum += (val >> 8) & 0xff; 8430 sum += (val >> 16) & 0xff; 8431 sum += (val >> 24) & 0xff; 8432 } 8433 netif_printk(np, probe, KERN_DEBUG, np->dev, 8434 "SPROM: Checksum %x\n", (int)(sum & 0xff)); 8435 if ((sum & 0xff) != 0xab) { 8436 dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff)); 8437 return -EINVAL; 8438 } 8439 8440 val = nr64(ESPC_PHY_TYPE); 8441 switch (np->port) { 8442 case 0: 8443 val8 = (val & ESPC_PHY_TYPE_PORT0) >> 8444 ESPC_PHY_TYPE_PORT0_SHIFT; 8445 break; 8446 case 1: 8447 val8 = (val & ESPC_PHY_TYPE_PORT1) >> 8448 ESPC_PHY_TYPE_PORT1_SHIFT; 8449 break; 8450 case 2: 8451 val8 = (val & ESPC_PHY_TYPE_PORT2) >> 8452 ESPC_PHY_TYPE_PORT2_SHIFT; 8453 break; 8454 case 3: 8455 val8 = (val & ESPC_PHY_TYPE_PORT3) >> 8456 ESPC_PHY_TYPE_PORT3_SHIFT; 8457 break; 8458 default: 8459 dev_err(np->device, "Bogus port number %u\n", 8460 np->port); 8461 return -EINVAL; 8462 } 8463 netif_printk(np, probe, KERN_DEBUG, np->dev, 8464 "SPROM: PHY type %x\n", val8); 8465 8466 switch (val8) { 8467 case ESPC_PHY_TYPE_1G_COPPER: 8468 /* 1G copper, MII */ 8469 np->flags &= ~(NIU_FLAGS_FIBER | 8470 NIU_FLAGS_10G); 8471 np->mac_xcvr = MAC_XCVR_MII; 8472 break; 8473 8474 case ESPC_PHY_TYPE_1G_FIBER: 8475 /* 1G fiber, PCS */ 8476 np->flags &= ~NIU_FLAGS_10G; 8477 np->flags |= NIU_FLAGS_FIBER; 8478 np->mac_xcvr = MAC_XCVR_PCS; 8479 break; 8480 8481 case ESPC_PHY_TYPE_10G_COPPER: 8482 /* 10G copper, XPCS */ 8483 np->flags |= NIU_FLAGS_10G; 8484 np->flags &= ~NIU_FLAGS_FIBER; 8485 np->mac_xcvr = MAC_XCVR_XPCS; 8486 break; 8487 8488 case ESPC_PHY_TYPE_10G_FIBER: 8489 /* 10G fiber, XPCS */ 8490 np->flags |= (NIU_FLAGS_10G | 8491 NIU_FLAGS_FIBER); 8492 np->mac_xcvr = MAC_XCVR_XPCS; 8493 break; 8494 8495 default: 8496 dev_err(np->device, "Bogus SPROM phy type %u\n", val8); 8497 return -EINVAL; 8498 } 8499 8500 val = nr64(ESPC_MAC_ADDR0); 8501 netif_printk(np, probe, KERN_DEBUG, np->dev, 8502 "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val); 8503 dev->perm_addr[0] = (val >> 0) & 0xff; 8504 dev->perm_addr[1] = (val >> 8) & 0xff; 8505 dev->perm_addr[2] = (val >> 16) & 0xff; 8506 dev->perm_addr[3] = (val >> 24) & 0xff; 8507 8508 val = nr64(ESPC_MAC_ADDR1); 8509 netif_printk(np, probe, KERN_DEBUG, np->dev, 8510 "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val); 8511 dev->perm_addr[4] = (val >> 0) & 0xff; 8512 dev->perm_addr[5] = (val >> 8) & 0xff; 8513 8514 if (!is_valid_ether_addr(&dev->perm_addr[0])) { 8515 dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n", 8516 dev->perm_addr); 8517 return -EINVAL; 8518 } 8519 8520 val8 = dev->perm_addr[5]; 8521 dev->perm_addr[5] += np->port; 8522 if (dev->perm_addr[5] < val8) 8523 dev->perm_addr[4]++; 8524 8525 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); 8526 8527 val = nr64(ESPC_MOD_STR_LEN); 8528 netif_printk(np, probe, KERN_DEBUG, np->dev, 8529 "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val); 8530 if (val >= 8 * 4) 8531 return -EINVAL; 8532 8533 for (i = 0; i < val; i += 4) { 8534 u64 tmp = nr64(ESPC_NCR(5 + (i / 4))); 8535 8536 np->vpd.model[i + 3] = (tmp >> 0) & 0xff; 8537 np->vpd.model[i + 2] = (tmp >> 8) & 0xff; 8538 np->vpd.model[i + 1] = (tmp >> 16) & 0xff; 8539 np->vpd.model[i + 0] = (tmp >> 24) & 0xff; 8540 } 8541 np->vpd.model[val] = '\0'; 8542 8543 val = nr64(ESPC_BD_MOD_STR_LEN); 8544 netif_printk(np, probe, KERN_DEBUG, np->dev, 8545 "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val); 8546 if (val >= 4 * 4) 8547 return -EINVAL; 8548 8549 for (i = 0; i < val; i += 4) { 8550 u64 tmp = nr64(ESPC_NCR(14 + (i / 4))); 8551 8552 np->vpd.board_model[i + 3] = (tmp >> 0) & 0xff; 8553 np->vpd.board_model[i + 2] = (tmp >> 8) & 0xff; 8554 np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff; 8555 np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff; 8556 } 8557 np->vpd.board_model[val] = '\0'; 8558 8559 np->vpd.mac_num = 8560 nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL; 8561 netif_printk(np, probe, KERN_DEBUG, np->dev, 8562 "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num); 8563 8564 return 0; 8565} 8566 8567static int __devinit niu_get_and_validate_port(struct niu *np) 8568{ 8569 struct niu_parent *parent = np->parent; 8570 8571 if (np->port <= 1) 8572 np->flags |= NIU_FLAGS_XMAC; 8573 8574 if (!parent->num_ports) { 8575 if (parent->plat_type == PLAT_TYPE_NIU) { 8576 parent->num_ports = 2; 8577 } else { 8578 parent->num_ports = niu_pci_vpd_get_nports(np); 8579 if (!parent->num_ports) { 8580 /* Fall back to SPROM as last resort. 8581 * This will fail on most cards. 8582 */ 8583 parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) & 8584 ESPC_NUM_PORTS_MACS_VAL; 8585 8586 /* All of the current probing methods fail on 8587 * Maramba on-board parts. 8588 */ 8589 if (!parent->num_ports) 8590 parent->num_ports = 4; 8591 } 8592 } 8593 } 8594 8595 if (np->port >= parent->num_ports) 8596 return -ENODEV; 8597 8598 return 0; 8599} 8600 8601static int __devinit phy_record(struct niu_parent *parent, 8602 struct phy_probe_info *p, 8603 int dev_id_1, int dev_id_2, u8 phy_port, 8604 int type) 8605{ 8606 u32 id = (dev_id_1 << 16) | dev_id_2; 8607 u8 idx; 8608 8609 if (dev_id_1 < 0 || dev_id_2 < 0) 8610 return 0; 8611 if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) { 8612 if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) && 8613 ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) && 8614 ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706)) 8615 return 0; 8616 } else { 8617 if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R) 8618 return 0; 8619 } 8620 8621 pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n", 8622 parent->index, id, 8623 type == PHY_TYPE_PMA_PMD ? "PMA/PMD" : 8624 type == PHY_TYPE_PCS ? "PCS" : "MII", 8625 phy_port); 8626 8627 if (p->cur[type] >= NIU_MAX_PORTS) { 8628 pr_err("Too many PHY ports\n"); 8629 return -EINVAL; 8630 } 8631 idx = p->cur[type]; 8632 p->phy_id[type][idx] = id; 8633 p->phy_port[type][idx] = phy_port; 8634 p->cur[type] = idx + 1; 8635 return 0; 8636} 8637 8638static int __devinit port_has_10g(struct phy_probe_info *p, int port) 8639{ 8640 int i; 8641 8642 for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) { 8643 if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port) 8644 return 1; 8645 } 8646 for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) { 8647 if (p->phy_port[PHY_TYPE_PCS][i] == port) 8648 return 1; 8649 } 8650 8651 return 0; 8652} 8653 8654static int __devinit count_10g_ports(struct phy_probe_info *p, int *lowest) 8655{ 8656 int port, cnt; 8657 8658 cnt = 0; 8659 *lowest = 32; 8660 for (port = 8; port < 32; port++) { 8661 if (port_has_10g(p, port)) { 8662 if (!cnt) 8663 *lowest = port; 8664 cnt++; 8665 } 8666 } 8667 8668 return cnt; 8669} 8670 8671static int __devinit count_1g_ports(struct phy_probe_info *p, int *lowest) 8672{ 8673 *lowest = 32; 8674 if (p->cur[PHY_TYPE_MII]) 8675 *lowest = p->phy_port[PHY_TYPE_MII][0]; 8676 8677 return p->cur[PHY_TYPE_MII]; 8678} 8679 8680static void __devinit niu_n2_divide_channels(struct niu_parent *parent) 8681{ 8682 int num_ports = parent->num_ports; 8683 int i; 8684 8685 for (i = 0; i < num_ports; i++) { 8686 parent->rxchan_per_port[i] = (16 / num_ports); 8687 parent->txchan_per_port[i] = (16 / num_ports); 8688 8689 pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n", 8690 parent->index, i, 8691 parent->rxchan_per_port[i], 8692 parent->txchan_per_port[i]); 8693 } 8694} 8695 8696static void __devinit niu_divide_channels(struct niu_parent *parent, 8697 int num_10g, int num_1g) 8698{ 8699 int num_ports = parent->num_ports; 8700 int rx_chans_per_10g, rx_chans_per_1g; 8701 int tx_chans_per_10g, tx_chans_per_1g; 8702 int i, tot_rx, tot_tx; 8703 8704 if (!num_10g || !num_1g) { 8705 rx_chans_per_10g = rx_chans_per_1g = 8706 (NIU_NUM_RXCHAN / num_ports); 8707 tx_chans_per_10g = tx_chans_per_1g = 8708 (NIU_NUM_TXCHAN / num_ports); 8709 } else { 8710 rx_chans_per_1g = NIU_NUM_RXCHAN / 8; 8711 rx_chans_per_10g = (NIU_NUM_RXCHAN - 8712 (rx_chans_per_1g * num_1g)) / 8713 num_10g; 8714 8715 tx_chans_per_1g = NIU_NUM_TXCHAN / 6; 8716 tx_chans_per_10g = (NIU_NUM_TXCHAN - 8717 (tx_chans_per_1g * num_1g)) / 8718 num_10g; 8719 } 8720 8721 tot_rx = tot_tx = 0; 8722 for (i = 0; i < num_ports; i++) { 8723 int type = phy_decode(parent->port_phy, i); 8724 8725 if (type == PORT_TYPE_10G) { 8726 parent->rxchan_per_port[i] = rx_chans_per_10g; 8727 parent->txchan_per_port[i] = tx_chans_per_10g; 8728 } else { 8729 parent->rxchan_per_port[i] = rx_chans_per_1g; 8730 parent->txchan_per_port[i] = tx_chans_per_1g; 8731 } 8732 pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n", 8733 parent->index, i, 8734 parent->rxchan_per_port[i], 8735 parent->txchan_per_port[i]); 8736 tot_rx += parent->rxchan_per_port[i]; 8737 tot_tx += parent->txchan_per_port[i]; 8738 } 8739 8740 if (tot_rx > NIU_NUM_RXCHAN) { 8741 pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n", 8742 parent->index, tot_rx); 8743 for (i = 0; i < num_ports; i++) 8744 parent->rxchan_per_port[i] = 1; 8745 } 8746 if (tot_tx > NIU_NUM_TXCHAN) { 8747 pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n", 8748 parent->index, tot_tx); 8749 for (i = 0; i < num_ports; i++) 8750 parent->txchan_per_port[i] = 1; 8751 } 8752 if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) { 8753 pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n", 8754 parent->index, tot_rx, tot_tx); 8755 } 8756} 8757 8758static void __devinit niu_divide_rdc_groups(struct niu_parent *parent, 8759 int num_10g, int num_1g) 8760{ 8761 int i, num_ports = parent->num_ports; 8762 int rdc_group, rdc_groups_per_port; 8763 int rdc_channel_base; 8764 8765 rdc_group = 0; 8766 rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports; 8767 8768 rdc_channel_base = 0; 8769 8770 for (i = 0; i < num_ports; i++) { 8771 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i]; 8772 int grp, num_channels = parent->rxchan_per_port[i]; 8773 int this_channel_offset; 8774 8775 tp->first_table_num = rdc_group; 8776 tp->num_tables = rdc_groups_per_port; 8777 this_channel_offset = 0; 8778 for (grp = 0; grp < tp->num_tables; grp++) { 8779 struct rdc_table *rt = &tp->tables[grp]; 8780 int slot; 8781 8782 pr_info("niu%d: Port %d RDC tbl(%d) [ ", 8783 parent->index, i, tp->first_table_num + grp); 8784 for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) { 8785 rt->rxdma_channel[slot] = 8786 rdc_channel_base + this_channel_offset; 8787 8788 pr_cont("%d ", rt->rxdma_channel[slot]); 8789 8790 if (++this_channel_offset == num_channels) 8791 this_channel_offset = 0; 8792 } 8793 pr_cont("]\n"); 8794 } 8795 8796 parent->rdc_default[i] = rdc_channel_base; 8797 8798 rdc_channel_base += num_channels; 8799 rdc_group += rdc_groups_per_port; 8800 } 8801} 8802 8803static int __devinit fill_phy_probe_info(struct niu *np, 8804 struct niu_parent *parent, 8805 struct phy_probe_info *info) 8806{ 8807 unsigned long flags; 8808 int port, err; 8809 8810 memset(info, 0, sizeof(*info)); 8811 8812 /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */ 8813 niu_lock_parent(np, flags); 8814 err = 0; 8815 for (port = 8; port < 32; port++) { 8816 int dev_id_1, dev_id_2; 8817 8818 dev_id_1 = mdio_read(np, port, 8819 NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1); 8820 dev_id_2 = mdio_read(np, port, 8821 NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2); 8822 err = phy_record(parent, info, dev_id_1, dev_id_2, port, 8823 PHY_TYPE_PMA_PMD); 8824 if (err) 8825 break; 8826 dev_id_1 = mdio_read(np, port, 8827 NIU_PCS_DEV_ADDR, MII_PHYSID1); 8828 dev_id_2 = mdio_read(np, port, 8829 NIU_PCS_DEV_ADDR, MII_PHYSID2); 8830 err = phy_record(parent, info, dev_id_1, dev_id_2, port, 8831 PHY_TYPE_PCS); 8832 if (err) 8833 break; 8834 dev_id_1 = mii_read(np, port, MII_PHYSID1); 8835 dev_id_2 = mii_read(np, port, MII_PHYSID2); 8836 err = phy_record(parent, info, dev_id_1, dev_id_2, port, 8837 PHY_TYPE_MII); 8838 if (err) 8839 break; 8840 } 8841 niu_unlock_parent(np, flags); 8842 8843 return err; 8844} 8845 8846static int __devinit walk_phys(struct niu *np, struct niu_parent *parent) 8847{ 8848 struct phy_probe_info *info = &parent->phy_probe_info; 8849 int lowest_10g, lowest_1g; 8850 int num_10g, num_1g; 8851 u32 val; 8852 int err; 8853 8854 num_10g = num_1g = 0; 8855 8856 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || 8857 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { 8858 num_10g = 0; 8859 num_1g = 2; 8860 parent->plat_type = PLAT_TYPE_ATCA_CP3220; 8861 parent->num_ports = 4; 8862 val = (phy_encode(PORT_TYPE_1G, 0) | 8863 phy_encode(PORT_TYPE_1G, 1) | 8864 phy_encode(PORT_TYPE_1G, 2) | 8865 phy_encode(PORT_TYPE_1G, 3)); 8866 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { 8867 num_10g = 2; 8868 num_1g = 0; 8869 parent->num_ports = 2; 8870 val = (phy_encode(PORT_TYPE_10G, 0) | 8871 phy_encode(PORT_TYPE_10G, 1)); 8872 } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) && 8873 (parent->plat_type == PLAT_TYPE_NIU)) { 8874 /* this is the Monza case */ 8875 if (np->flags & NIU_FLAGS_10G) { 8876 val = (phy_encode(PORT_TYPE_10G, 0) | 8877 phy_encode(PORT_TYPE_10G, 1)); 8878 } else { 8879 val = (phy_encode(PORT_TYPE_1G, 0) | 8880 phy_encode(PORT_TYPE_1G, 1)); 8881 } 8882 } else { 8883 err = fill_phy_probe_info(np, parent, info); 8884 if (err) 8885 return err; 8886 8887 num_10g = count_10g_ports(info, &lowest_10g); 8888 num_1g = count_1g_ports(info, &lowest_1g); 8889 8890 switch ((num_10g << 4) | num_1g) { 8891 case 0x24: 8892 if (lowest_1g == 10) 8893 parent->plat_type = PLAT_TYPE_VF_P0; 8894 else if (lowest_1g == 26) 8895 parent->plat_type = PLAT_TYPE_VF_P1; 8896 else 8897 goto unknown_vg_1g_port; 8898 8899 /* fallthru */ 8900 case 0x22: 8901 val = (phy_encode(PORT_TYPE_10G, 0) | 8902 phy_encode(PORT_TYPE_10G, 1) | 8903 phy_encode(PORT_TYPE_1G, 2) | 8904 phy_encode(PORT_TYPE_1G, 3)); 8905 break; 8906 8907 case 0x20: 8908 val = (phy_encode(PORT_TYPE_10G, 0) | 8909 phy_encode(PORT_TYPE_10G, 1)); 8910 break; 8911 8912 case 0x10: 8913 val = phy_encode(PORT_TYPE_10G, np->port); 8914 break; 8915 8916 case 0x14: 8917 if (lowest_1g == 10) 8918 parent->plat_type = PLAT_TYPE_VF_P0; 8919 else if (lowest_1g == 26) 8920 parent->plat_type = PLAT_TYPE_VF_P1; 8921 else 8922 goto unknown_vg_1g_port; 8923 8924 /* fallthru */ 8925 case 0x13: 8926 if ((lowest_10g & 0x7) == 0) 8927 val = (phy_encode(PORT_TYPE_10G, 0) | 8928 phy_encode(PORT_TYPE_1G, 1) | 8929 phy_encode(PORT_TYPE_1G, 2) | 8930 phy_encode(PORT_TYPE_1G, 3)); 8931 else 8932 val = (phy_encode(PORT_TYPE_1G, 0) | 8933 phy_encode(PORT_TYPE_10G, 1) | 8934 phy_encode(PORT_TYPE_1G, 2) | 8935 phy_encode(PORT_TYPE_1G, 3)); 8936 break; 8937 8938 case 0x04: 8939 if (lowest_1g == 10) 8940 parent->plat_type = PLAT_TYPE_VF_P0; 8941 else if (lowest_1g == 26) 8942 parent->plat_type = PLAT_TYPE_VF_P1; 8943 else 8944 goto unknown_vg_1g_port; 8945 8946 val = (phy_encode(PORT_TYPE_1G, 0) | 8947 phy_encode(PORT_TYPE_1G, 1) | 8948 phy_encode(PORT_TYPE_1G, 2) | 8949 phy_encode(PORT_TYPE_1G, 3)); 8950 break; 8951 8952 default: 8953 pr_err("Unsupported port config 10G[%d] 1G[%d]\n", 8954 num_10g, num_1g); 8955 return -EINVAL; 8956 } 8957 } 8958 8959 parent->port_phy = val; 8960 8961 if (parent->plat_type == PLAT_TYPE_NIU) 8962 niu_n2_divide_channels(parent); 8963 else 8964 niu_divide_channels(parent, num_10g, num_1g); 8965 8966 niu_divide_rdc_groups(parent, num_10g, num_1g); 8967 8968 return 0; 8969 8970unknown_vg_1g_port: 8971 pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g); 8972 return -EINVAL; 8973} 8974 8975static int __devinit niu_probe_ports(struct niu *np) 8976{ 8977 struct niu_parent *parent = np->parent; 8978 int err, i; 8979 8980 if (parent->port_phy == PORT_PHY_UNKNOWN) { 8981 err = walk_phys(np, parent); 8982 if (err) 8983 return err; 8984 8985 niu_set_ldg_timer_res(np, 2); 8986 for (i = 0; i <= LDN_MAX; i++) 8987 niu_ldn_irq_enable(np, i, 0); 8988 } 8989 8990 if (parent->port_phy == PORT_PHY_INVALID) 8991 return -EINVAL; 8992 8993 return 0; 8994} 8995 8996static int __devinit niu_classifier_swstate_init(struct niu *np) 8997{ 8998 struct niu_classifier *cp = &np->clas; 8999 9000 cp->tcam_top = (u16) np->port; 9001 cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports; 9002 cp->h1_init = 0xffffffff; 9003 cp->h2_init = 0xffff; 9004 9005 return fflp_early_init(np); 9006} 9007 9008static void __devinit niu_link_config_init(struct niu *np) 9009{ 9010 struct niu_link_config *lp = &np->link_config; 9011 9012 lp->advertising = (ADVERTISED_10baseT_Half | 9013 ADVERTISED_10baseT_Full | 9014 ADVERTISED_100baseT_Half | 9015 ADVERTISED_100baseT_Full | 9016 ADVERTISED_1000baseT_Half | 9017 ADVERTISED_1000baseT_Full | 9018 ADVERTISED_10000baseT_Full | 9019 ADVERTISED_Autoneg); 9020 lp->speed = lp->active_speed = SPEED_INVALID; 9021 lp->duplex = DUPLEX_FULL; 9022 lp->active_duplex = DUPLEX_INVALID; 9023 lp->autoneg = 1; 9024#if 0 9025 lp->loopback_mode = LOOPBACK_MAC; 9026 lp->active_speed = SPEED_10000; 9027 lp->active_duplex = DUPLEX_FULL; 9028#else 9029 lp->loopback_mode = LOOPBACK_DISABLED; 9030#endif 9031} 9032 9033static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np) 9034{ 9035 switch (np->port) { 9036 case 0: 9037 np->mac_regs = np->regs + XMAC_PORT0_OFF; 9038 np->ipp_off = 0x00000; 9039 np->pcs_off = 0x04000; 9040 np->xpcs_off = 0x02000; 9041 break; 9042 9043 case 1: 9044 np->mac_regs = np->regs + XMAC_PORT1_OFF; 9045 np->ipp_off = 0x08000; 9046 np->pcs_off = 0x0a000; 9047 np->xpcs_off = 0x08000; 9048 break; 9049 9050 case 2: 9051 np->mac_regs = np->regs + BMAC_PORT2_OFF; 9052 np->ipp_off = 0x04000; 9053 np->pcs_off = 0x0e000; 9054 np->xpcs_off = ~0UL; 9055 break; 9056 9057 case 3: 9058 np->mac_regs = np->regs + BMAC_PORT3_OFF; 9059 np->ipp_off = 0x0c000; 9060 np->pcs_off = 0x12000; 9061 np->xpcs_off = ~0UL; 9062 break; 9063 9064 default: 9065 dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port); 9066 return -EINVAL; 9067 } 9068 9069 return 0; 9070} 9071 9072static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map) 9073{ 9074 struct msix_entry msi_vec[NIU_NUM_LDG]; 9075 struct niu_parent *parent = np->parent; 9076 struct pci_dev *pdev = np->pdev; 9077 int i, num_irqs, err; 9078 u8 first_ldg; 9079 9080 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port; 9081 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++) 9082 ldg_num_map[i] = first_ldg + i; 9083 9084 num_irqs = (parent->rxchan_per_port[np->port] + 9085 parent->txchan_per_port[np->port] + 9086 (np->port == 0 ? 3 : 1)); 9087 BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports)); 9088 9089retry: 9090 for (i = 0; i < num_irqs; i++) { 9091 msi_vec[i].vector = 0; 9092 msi_vec[i].entry = i; 9093 } 9094 9095 err = pci_enable_msix(pdev, msi_vec, num_irqs); 9096 if (err < 0) { 9097 np->flags &= ~NIU_FLAGS_MSIX; 9098 return; 9099 } 9100 if (err > 0) { 9101 num_irqs = err; 9102 goto retry; 9103 } 9104 9105 np->flags |= NIU_FLAGS_MSIX; 9106 for (i = 0; i < num_irqs; i++) 9107 np->ldg[i].irq = msi_vec[i].vector; 9108 np->num_ldg = num_irqs; 9109} 9110 9111static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map) 9112{ 9113#ifdef CONFIG_SPARC64 9114 struct of_device *op = np->op; 9115 const u32 *int_prop; 9116 int i; 9117 9118 int_prop = of_get_property(op->dev.of_node, "interrupts", NULL); 9119 if (!int_prop) 9120 return -ENODEV; 9121 9122 for (i = 0; i < op->num_irqs; i++) { 9123 ldg_num_map[i] = int_prop[i]; 9124 np->ldg[i].irq = op->irqs[i]; 9125 } 9126 9127 np->num_ldg = op->num_irqs; 9128 9129 return 0; 9130#else 9131 return -EINVAL; 9132#endif 9133} 9134 9135static int __devinit niu_ldg_init(struct niu *np) 9136{ 9137 struct niu_parent *parent = np->parent; 9138 u8 ldg_num_map[NIU_NUM_LDG]; 9139 int first_chan, num_chan; 9140 int i, err, ldg_rotor; 9141 u8 port; 9142 9143 np->num_ldg = 1; 9144 np->ldg[0].irq = np->dev->irq; 9145 if (parent->plat_type == PLAT_TYPE_NIU) { 9146 err = niu_n2_irq_init(np, ldg_num_map); 9147 if (err) 9148 return err; 9149 } else 9150 niu_try_msix(np, ldg_num_map); 9151 9152 port = np->port; 9153 for (i = 0; i < np->num_ldg; i++) { 9154 struct niu_ldg *lp = &np->ldg[i]; 9155 9156 netif_napi_add(np->dev, &lp->napi, niu_poll, 64); 9157 9158 lp->np = np; 9159 lp->ldg_num = ldg_num_map[i]; 9160 lp->timer = 2; /* XXX */ 9161 9162 /* On N2 NIU the firmware has setup the SID mappings so they go 9163 * to the correct values that will route the LDG to the proper 9164 * interrupt in the NCU interrupt table. 9165 */ 9166 if (np->parent->plat_type != PLAT_TYPE_NIU) { 9167 err = niu_set_ldg_sid(np, lp->ldg_num, port, i); 9168 if (err) 9169 return err; 9170 } 9171 } 9172 9173 /* We adopt the LDG assignment ordering used by the N2 NIU 9174 * 'interrupt' properties because that simplifies a lot of 9175 * things. This ordering is: 9176 * 9177 * MAC 9178 * MIF (if port zero) 9179 * SYSERR (if port zero) 9180 * RX channels 9181 * TX channels 9182 */ 9183 9184 ldg_rotor = 0; 9185 9186 err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], 9187 LDN_MAC(port)); 9188 if (err) 9189 return err; 9190 9191 ldg_rotor++; 9192 if (ldg_rotor == np->num_ldg) 9193 ldg_rotor = 0; 9194 9195 if (port == 0) { 9196 err = niu_ldg_assign_ldn(np, parent, 9197 ldg_num_map[ldg_rotor], 9198 LDN_MIF); 9199 if (err) 9200 return err; 9201 9202 ldg_rotor++; 9203 if (ldg_rotor == np->num_ldg) 9204 ldg_rotor = 0; 9205 9206 err = niu_ldg_assign_ldn(np, parent, 9207 ldg_num_map[ldg_rotor], 9208 LDN_DEVICE_ERROR); 9209 if (err) 9210 return err; 9211 9212 ldg_rotor++; 9213 if (ldg_rotor == np->num_ldg) 9214 ldg_rotor = 0; 9215 9216 } 9217 9218 first_chan = 0; 9219 for (i = 0; i < port; i++) 9220 first_chan += parent->rxchan_per_port[port]; 9221 num_chan = parent->rxchan_per_port[port]; 9222 9223 for (i = first_chan; i < (first_chan + num_chan); i++) { 9224 err = niu_ldg_assign_ldn(np, parent, 9225 ldg_num_map[ldg_rotor], 9226 LDN_RXDMA(i)); 9227 if (err) 9228 return err; 9229 ldg_rotor++; 9230 if (ldg_rotor == np->num_ldg) 9231 ldg_rotor = 0; 9232 } 9233 9234 first_chan = 0; 9235 for (i = 0; i < port; i++) 9236 first_chan += parent->txchan_per_port[port]; 9237 num_chan = parent->txchan_per_port[port]; 9238 for (i = first_chan; i < (first_chan + num_chan); i++) { 9239 err = niu_ldg_assign_ldn(np, parent, 9240 ldg_num_map[ldg_rotor], 9241 LDN_TXDMA(i)); 9242 if (err) 9243 return err; 9244 ldg_rotor++; 9245 if (ldg_rotor == np->num_ldg) 9246 ldg_rotor = 0; 9247 } 9248 9249 return 0; 9250} 9251 9252static void __devexit niu_ldg_free(struct niu *np) 9253{ 9254 if (np->flags & NIU_FLAGS_MSIX) 9255 pci_disable_msix(np->pdev); 9256} 9257 9258static int __devinit niu_get_of_props(struct niu *np) 9259{ 9260#ifdef CONFIG_SPARC64 9261 struct net_device *dev = np->dev; 9262 struct device_node *dp; 9263 const char *phy_type; 9264 const u8 *mac_addr; 9265 const char *model; 9266 int prop_len; 9267 9268 if (np->parent->plat_type == PLAT_TYPE_NIU) 9269 dp = np->op->dev.of_node; 9270 else 9271 dp = pci_device_to_OF_node(np->pdev); 9272 9273 phy_type = of_get_property(dp, "phy-type", &prop_len); 9274 if (!phy_type) { 9275 netdev_err(dev, "%s: OF node lacks phy-type property\n", 9276 dp->full_name); 9277 return -EINVAL; 9278 } 9279 9280 if (!strcmp(phy_type, "none")) 9281 return -ENODEV; 9282 9283 strcpy(np->vpd.phy_type, phy_type); 9284 9285 if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { 9286 netdev_err(dev, "%s: Illegal phy string [%s]\n", 9287 dp->full_name, np->vpd.phy_type); 9288 return -EINVAL; 9289 } 9290 9291 mac_addr = of_get_property(dp, "local-mac-address", &prop_len); 9292 if (!mac_addr) { 9293 netdev_err(dev, "%s: OF node lacks local-mac-address property\n", 9294 dp->full_name); 9295 return -EINVAL; 9296 } 9297 if (prop_len != dev->addr_len) { 9298 netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n", 9299 dp->full_name, prop_len); 9300 } 9301 memcpy(dev->perm_addr, mac_addr, dev->addr_len); 9302 if (!is_valid_ether_addr(&dev->perm_addr[0])) { 9303 netdev_err(dev, "%s: OF MAC address is invalid\n", 9304 dp->full_name); 9305 netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->perm_addr); 9306 return -EINVAL; 9307 } 9308 9309 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); 9310 9311 model = of_get_property(dp, "model", &prop_len); 9312 9313 if (model) 9314 strcpy(np->vpd.model, model); 9315 9316 if (of_find_property(dp, "hot-swappable-phy", &prop_len)) { 9317 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | 9318 NIU_FLAGS_HOTPLUG_PHY); 9319 } 9320 9321 return 0; 9322#else 9323 return -EINVAL; 9324#endif 9325} 9326 9327static int __devinit niu_get_invariants(struct niu *np) 9328{ 9329 int err, have_props; 9330 u32 offset; 9331 9332 err = niu_get_of_props(np); 9333 if (err == -ENODEV) 9334 return err; 9335 9336 have_props = !err; 9337 9338 err = niu_init_mac_ipp_pcs_base(np); 9339 if (err) 9340 return err; 9341 9342 if (have_props) { 9343 err = niu_get_and_validate_port(np); 9344 if (err) 9345 return err; 9346 9347 } else { 9348 if (np->parent->plat_type == PLAT_TYPE_NIU) 9349 return -EINVAL; 9350 9351 nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE); 9352 offset = niu_pci_vpd_offset(np); 9353 netif_printk(np, probe, KERN_DEBUG, np->dev, 9354 "%s() VPD offset [%08x]\n", __func__, offset); 9355 if (offset) 9356 niu_pci_vpd_fetch(np, offset); 9357 nw64(ESPC_PIO_EN, 0); 9358 9359 if (np->flags & NIU_FLAGS_VPD_VALID) { 9360 niu_pci_vpd_validate(np); 9361 err = niu_get_and_validate_port(np); 9362 if (err) 9363 return err; 9364 } 9365 9366 if (!(np->flags & NIU_FLAGS_VPD_VALID)) { 9367 err = niu_get_and_validate_port(np); 9368 if (err) 9369 return err; 9370 err = niu_pci_probe_sprom(np); 9371 if (err) 9372 return err; 9373 } 9374 } 9375 9376 err = niu_probe_ports(np); 9377 if (err) 9378 return err; 9379 9380 niu_ldg_init(np); 9381 9382 niu_classifier_swstate_init(np); 9383 niu_link_config_init(np); 9384 9385 err = niu_determine_phy_disposition(np); 9386 if (!err) 9387 err = niu_init_link(np); 9388 9389 return err; 9390} 9391 9392static LIST_HEAD(niu_parent_list); 9393static DEFINE_MUTEX(niu_parent_lock); 9394static int niu_parent_index; 9395 9396static ssize_t show_port_phy(struct device *dev, 9397 struct device_attribute *attr, char *buf) 9398{ 9399 struct platform_device *plat_dev = to_platform_device(dev); 9400 struct niu_parent *p = plat_dev->dev.platform_data; 9401 u32 port_phy = p->port_phy; 9402 char *orig_buf = buf; 9403 int i; 9404 9405 if (port_phy == PORT_PHY_UNKNOWN || 9406 port_phy == PORT_PHY_INVALID) 9407 return 0; 9408 9409 for (i = 0; i < p->num_ports; i++) { 9410 const char *type_str; 9411 int type; 9412 9413 type = phy_decode(port_phy, i); 9414 if (type == PORT_TYPE_10G) 9415 type_str = "10G"; 9416 else 9417 type_str = "1G"; 9418 buf += sprintf(buf, 9419 (i == 0) ? "%s" : " %s", 9420 type_str); 9421 } 9422 buf += sprintf(buf, "\n"); 9423 return buf - orig_buf; 9424} 9425 9426static ssize_t show_plat_type(struct device *dev, 9427 struct device_attribute *attr, char *buf) 9428{ 9429 struct platform_device *plat_dev = to_platform_device(dev); 9430 struct niu_parent *p = plat_dev->dev.platform_data; 9431 const char *type_str; 9432 9433 switch (p->plat_type) { 9434 case PLAT_TYPE_ATLAS: 9435 type_str = "atlas"; 9436 break; 9437 case PLAT_TYPE_NIU: 9438 type_str = "niu"; 9439 break; 9440 case PLAT_TYPE_VF_P0: 9441 type_str = "vf_p0"; 9442 break; 9443 case PLAT_TYPE_VF_P1: 9444 type_str = "vf_p1"; 9445 break; 9446 default: 9447 type_str = "unknown"; 9448 break; 9449 } 9450 9451 return sprintf(buf, "%s\n", type_str); 9452} 9453 9454static ssize_t __show_chan_per_port(struct device *dev, 9455 struct device_attribute *attr, char *buf, 9456 int rx) 9457{ 9458 struct platform_device *plat_dev = to_platform_device(dev); 9459 struct niu_parent *p = plat_dev->dev.platform_data; 9460 char *orig_buf = buf; 9461 u8 *arr; 9462 int i; 9463 9464 arr = (rx ? p->rxchan_per_port : p->txchan_per_port); 9465 9466 for (i = 0; i < p->num_ports; i++) { 9467 buf += sprintf(buf, 9468 (i == 0) ? "%d" : " %d", 9469 arr[i]); 9470 } 9471 buf += sprintf(buf, "\n"); 9472 9473 return buf - orig_buf; 9474} 9475 9476static ssize_t show_rxchan_per_port(struct device *dev, 9477 struct device_attribute *attr, char *buf) 9478{ 9479 return __show_chan_per_port(dev, attr, buf, 1); 9480} 9481 9482static ssize_t show_txchan_per_port(struct device *dev, 9483 struct device_attribute *attr, char *buf) 9484{ 9485 return __show_chan_per_port(dev, attr, buf, 1); 9486} 9487 9488static ssize_t show_num_ports(struct device *dev, 9489 struct device_attribute *attr, char *buf) 9490{ 9491 struct platform_device *plat_dev = to_platform_device(dev); 9492 struct niu_parent *p = plat_dev->dev.platform_data; 9493 9494 return sprintf(buf, "%d\n", p->num_ports); 9495} 9496 9497static struct device_attribute niu_parent_attributes[] = { 9498 __ATTR(port_phy, S_IRUGO, show_port_phy, NULL), 9499 __ATTR(plat_type, S_IRUGO, show_plat_type, NULL), 9500 __ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL), 9501 __ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL), 9502 __ATTR(num_ports, S_IRUGO, show_num_ports, NULL), 9503 {} 9504}; 9505 9506static struct niu_parent * __devinit niu_new_parent(struct niu *np, 9507 union niu_parent_id *id, 9508 u8 ptype) 9509{ 9510 struct platform_device *plat_dev; 9511 struct niu_parent *p; 9512 int i; 9513 9514 plat_dev = platform_device_register_simple("niu", niu_parent_index, 9515 NULL, 0); 9516 if (IS_ERR(plat_dev)) 9517 return NULL; 9518 9519 for (i = 0; attr_name(niu_parent_attributes[i]); i++) { 9520 int err = device_create_file(&plat_dev->dev, 9521 &niu_parent_attributes[i]); 9522 if (err) 9523 goto fail_unregister; 9524 } 9525 9526 p = kzalloc(sizeof(*p), GFP_KERNEL); 9527 if (!p) 9528 goto fail_unregister; 9529 9530 p->index = niu_parent_index++; 9531 9532 plat_dev->dev.platform_data = p; 9533 p->plat_dev = plat_dev; 9534 9535 memcpy(&p->id, id, sizeof(*id)); 9536 p->plat_type = ptype; 9537 INIT_LIST_HEAD(&p->list); 9538 atomic_set(&p->refcnt, 0); 9539 list_add(&p->list, &niu_parent_list); 9540 spin_lock_init(&p->lock); 9541 9542 p->rxdma_clock_divider = 7500; 9543 9544 p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES; 9545 if (p->plat_type == PLAT_TYPE_NIU) 9546 p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES; 9547 9548 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { 9549 int index = i - CLASS_CODE_USER_PROG1; 9550 9551 p->tcam_key[index] = TCAM_KEY_TSEL; 9552 p->flow_key[index] = (FLOW_KEY_IPSA | 9553 FLOW_KEY_IPDA | 9554 FLOW_KEY_PROTO | 9555 (FLOW_KEY_L4_BYTE12 << 9556 FLOW_KEY_L4_0_SHIFT) | 9557 (FLOW_KEY_L4_BYTE12 << 9558 FLOW_KEY_L4_1_SHIFT)); 9559 } 9560 9561 for (i = 0; i < LDN_MAX + 1; i++) 9562 p->ldg_map[i] = LDG_INVALID; 9563 9564 return p; 9565 9566fail_unregister: 9567 platform_device_unregister(plat_dev); 9568 return NULL; 9569} 9570 9571static struct niu_parent * __devinit niu_get_parent(struct niu *np, 9572 union niu_parent_id *id, 9573 u8 ptype) 9574{ 9575 struct niu_parent *p, *tmp; 9576 int port = np->port; 9577 9578 mutex_lock(&niu_parent_lock); 9579 p = NULL; 9580 list_for_each_entry(tmp, &niu_parent_list, list) { 9581 if (!memcmp(id, &tmp->id, sizeof(*id))) { 9582 p = tmp; 9583 break; 9584 } 9585 } 9586 if (!p) 9587 p = niu_new_parent(np, id, ptype); 9588 9589 if (p) { 9590 char port_name[6]; 9591 int err; 9592 9593 sprintf(port_name, "port%d", port); 9594 err = sysfs_create_link(&p->plat_dev->dev.kobj, 9595 &np->device->kobj, 9596 port_name); 9597 if (!err) { 9598 p->ports[port] = np; 9599 atomic_inc(&p->refcnt); 9600 } 9601 } 9602 mutex_unlock(&niu_parent_lock); 9603 9604 return p; 9605} 9606 9607static void niu_put_parent(struct niu *np) 9608{ 9609 struct niu_parent *p = np->parent; 9610 u8 port = np->port; 9611 char port_name[6]; 9612 9613 BUG_ON(!p || p->ports[port] != np); 9614 9615 netif_printk(np, probe, KERN_DEBUG, np->dev, 9616 "%s() port[%u]\n", __func__, port); 9617 9618 sprintf(port_name, "port%d", port); 9619 9620 mutex_lock(&niu_parent_lock); 9621 9622 sysfs_remove_link(&p->plat_dev->dev.kobj, port_name); 9623 9624 p->ports[port] = NULL; 9625 np->parent = NULL; 9626 9627 if (atomic_dec_and_test(&p->refcnt)) { 9628 list_del(&p->list); 9629 platform_device_unregister(p->plat_dev); 9630 } 9631 9632 mutex_unlock(&niu_parent_lock); 9633} 9634 9635static void *niu_pci_alloc_coherent(struct device *dev, size_t size, 9636 u64 *handle, gfp_t flag) 9637{ 9638 dma_addr_t dh; 9639 void *ret; 9640 9641 ret = dma_alloc_coherent(dev, size, &dh, flag); 9642 if (ret) 9643 *handle = dh; 9644 return ret; 9645} 9646 9647static void niu_pci_free_coherent(struct device *dev, size_t size, 9648 void *cpu_addr, u64 handle) 9649{ 9650 dma_free_coherent(dev, size, cpu_addr, handle); 9651} 9652 9653static u64 niu_pci_map_page(struct device *dev, struct page *page, 9654 unsigned long offset, size_t size, 9655 enum dma_data_direction direction) 9656{ 9657 return dma_map_page(dev, page, offset, size, direction); 9658} 9659 9660static void niu_pci_unmap_page(struct device *dev, u64 dma_address, 9661 size_t size, enum dma_data_direction direction) 9662{ 9663 dma_unmap_page(dev, dma_address, size, direction); 9664} 9665 9666static u64 niu_pci_map_single(struct device *dev, void *cpu_addr, 9667 size_t size, 9668 enum dma_data_direction direction) 9669{ 9670 return dma_map_single(dev, cpu_addr, size, direction); 9671} 9672 9673static void niu_pci_unmap_single(struct device *dev, u64 dma_address, 9674 size_t size, 9675 enum dma_data_direction direction) 9676{ 9677 dma_unmap_single(dev, dma_address, size, direction); 9678} 9679 9680static const struct niu_ops niu_pci_ops = { 9681 .alloc_coherent = niu_pci_alloc_coherent, 9682 .free_coherent = niu_pci_free_coherent, 9683 .map_page = niu_pci_map_page, 9684 .unmap_page = niu_pci_unmap_page, 9685 .map_single = niu_pci_map_single, 9686 .unmap_single = niu_pci_unmap_single, 9687}; 9688 9689static void __devinit niu_driver_version(void) 9690{ 9691 static int niu_version_printed; 9692 9693 if (niu_version_printed++ == 0) 9694 pr_info("%s", version); 9695} 9696 9697static struct net_device * __devinit niu_alloc_and_init( 9698 struct device *gen_dev, struct pci_dev *pdev, 9699 struct of_device *op, const struct niu_ops *ops, 9700 u8 port) 9701{ 9702 struct net_device *dev; 9703 struct niu *np; 9704 9705 dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN); 9706 if (!dev) { 9707 dev_err(gen_dev, "Etherdev alloc failed, aborting\n"); 9708 return NULL; 9709 } 9710 9711 SET_NETDEV_DEV(dev, gen_dev); 9712 9713 np = netdev_priv(dev); 9714 np->dev = dev; 9715 np->pdev = pdev; 9716 np->op = op; 9717 np->device = gen_dev; 9718 np->ops = ops; 9719 9720 np->msg_enable = niu_debug; 9721 9722 spin_lock_init(&np->lock); 9723 INIT_WORK(&np->reset_task, niu_reset_task); 9724 9725 np->port = port; 9726 9727 return dev; 9728} 9729 9730static const struct net_device_ops niu_netdev_ops = { 9731 .ndo_open = niu_open, 9732 .ndo_stop = niu_close, 9733 .ndo_start_xmit = niu_start_xmit, 9734 .ndo_get_stats = niu_get_stats, 9735 .ndo_set_multicast_list = niu_set_rx_mode, 9736 .ndo_validate_addr = eth_validate_addr, 9737 .ndo_set_mac_address = niu_set_mac_addr, 9738 .ndo_do_ioctl = niu_ioctl, 9739 .ndo_tx_timeout = niu_tx_timeout, 9740 .ndo_change_mtu = niu_change_mtu, 9741}; 9742 9743static void __devinit niu_assign_netdev_ops(struct net_device *dev) 9744{ 9745 dev->netdev_ops = &niu_netdev_ops; 9746 dev->ethtool_ops = &niu_ethtool_ops; 9747 dev->watchdog_timeo = NIU_TX_TIMEOUT; 9748} 9749 9750static void __devinit niu_device_announce(struct niu *np) 9751{ 9752 struct net_device *dev = np->dev; 9753 9754 pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr); 9755 9756 if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) { 9757 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", 9758 dev->name, 9759 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), 9760 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), 9761 (np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"), 9762 (np->mac_xcvr == MAC_XCVR_MII ? "MII" : 9763 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), 9764 np->vpd.phy_type); 9765 } else { 9766 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", 9767 dev->name, 9768 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), 9769 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), 9770 (np->flags & NIU_FLAGS_FIBER ? "FIBER" : 9771 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" : 9772 "COPPER")), 9773 (np->mac_xcvr == MAC_XCVR_MII ? "MII" : 9774 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), 9775 np->vpd.phy_type); 9776 } 9777} 9778 9779static void __devinit niu_set_basic_features(struct net_device *dev) 9780{ 9781 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM | 9782 NETIF_F_GRO | NETIF_F_RXHASH); 9783} 9784 9785static int __devinit niu_pci_init_one(struct pci_dev *pdev, 9786 const struct pci_device_id *ent) 9787{ 9788 union niu_parent_id parent_id; 9789 struct net_device *dev; 9790 struct niu *np; 9791 int err, pos; 9792 u64 dma_mask; 9793 u16 val16; 9794 9795 niu_driver_version(); 9796 9797 err = pci_enable_device(pdev); 9798 if (err) { 9799 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 9800 return err; 9801 } 9802 9803 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || 9804 !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 9805 dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n"); 9806 err = -ENODEV; 9807 goto err_out_disable_pdev; 9808 } 9809 9810 err = pci_request_regions(pdev, DRV_MODULE_NAME); 9811 if (err) { 9812 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 9813 goto err_out_disable_pdev; 9814 } 9815 9816 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 9817 if (pos <= 0) { 9818 dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n"); 9819 goto err_out_free_res; 9820 } 9821 9822 dev = niu_alloc_and_init(&pdev->dev, pdev, NULL, 9823 &niu_pci_ops, PCI_FUNC(pdev->devfn)); 9824 if (!dev) { 9825 err = -ENOMEM; 9826 goto err_out_free_res; 9827 } 9828 np = netdev_priv(dev); 9829 9830 memset(&parent_id, 0, sizeof(parent_id)); 9831 parent_id.pci.domain = pci_domain_nr(pdev->bus); 9832 parent_id.pci.bus = pdev->bus->number; 9833 parent_id.pci.device = PCI_SLOT(pdev->devfn); 9834 9835 np->parent = niu_get_parent(np, &parent_id, 9836 PLAT_TYPE_ATLAS); 9837 if (!np->parent) { 9838 err = -ENOMEM; 9839 goto err_out_free_dev; 9840 } 9841 9842 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16); 9843 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; 9844 val16 |= (PCI_EXP_DEVCTL_CERE | 9845 PCI_EXP_DEVCTL_NFERE | 9846 PCI_EXP_DEVCTL_FERE | 9847 PCI_EXP_DEVCTL_URRE | 9848 PCI_EXP_DEVCTL_RELAX_EN); 9849 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16); 9850 9851 dma_mask = DMA_BIT_MASK(44); 9852 err = pci_set_dma_mask(pdev, dma_mask); 9853 if (!err) { 9854 dev->features |= NETIF_F_HIGHDMA; 9855 err = pci_set_consistent_dma_mask(pdev, dma_mask); 9856 if (err) { 9857 dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n"); 9858 goto err_out_release_parent; 9859 } 9860 } 9861 if (err || dma_mask == DMA_BIT_MASK(32)) { 9862 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 9863 if (err) { 9864 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); 9865 goto err_out_release_parent; 9866 } 9867 } 9868 9869 niu_set_basic_features(dev); 9870 9871 np->regs = pci_ioremap_bar(pdev, 0); 9872 if (!np->regs) { 9873 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 9874 err = -ENOMEM; 9875 goto err_out_release_parent; 9876 } 9877 9878 pci_set_master(pdev); 9879 pci_save_state(pdev); 9880 9881 dev->irq = pdev->irq; 9882 9883 niu_assign_netdev_ops(dev); 9884 9885 err = niu_get_invariants(np); 9886 if (err) { 9887 if (err != -ENODEV) 9888 dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n"); 9889 goto err_out_iounmap; 9890 } 9891 9892 err = register_netdev(dev); 9893 if (err) { 9894 dev_err(&pdev->dev, "Cannot register net device, aborting\n"); 9895 goto err_out_iounmap; 9896 } 9897 9898 pci_set_drvdata(pdev, dev); 9899 9900 niu_device_announce(np); 9901 9902 return 0; 9903 9904err_out_iounmap: 9905 if (np->regs) { 9906 iounmap(np->regs); 9907 np->regs = NULL; 9908 } 9909 9910err_out_release_parent: 9911 niu_put_parent(np); 9912 9913err_out_free_dev: 9914 free_netdev(dev); 9915 9916err_out_free_res: 9917 pci_release_regions(pdev); 9918 9919err_out_disable_pdev: 9920 pci_disable_device(pdev); 9921 pci_set_drvdata(pdev, NULL); 9922 9923 return err; 9924} 9925 9926static void __devexit niu_pci_remove_one(struct pci_dev *pdev) 9927{ 9928 struct net_device *dev = pci_get_drvdata(pdev); 9929 9930 if (dev) { 9931 struct niu *np = netdev_priv(dev); 9932 9933 unregister_netdev(dev); 9934 if (np->regs) { 9935 iounmap(np->regs); 9936 np->regs = NULL; 9937 } 9938 9939 niu_ldg_free(np); 9940 9941 niu_put_parent(np); 9942 9943 free_netdev(dev); 9944 pci_release_regions(pdev); 9945 pci_disable_device(pdev); 9946 pci_set_drvdata(pdev, NULL); 9947 } 9948} 9949 9950static int niu_suspend(struct pci_dev *pdev, pm_message_t state) 9951{ 9952 struct net_device *dev = pci_get_drvdata(pdev); 9953 struct niu *np = netdev_priv(dev); 9954 unsigned long flags; 9955 9956 if (!netif_running(dev)) 9957 return 0; 9958 9959 flush_scheduled_work(); 9960 niu_netif_stop(np); 9961 9962 del_timer_sync(&np->timer); 9963 9964 spin_lock_irqsave(&np->lock, flags); 9965 niu_enable_interrupts(np, 0); 9966 spin_unlock_irqrestore(&np->lock, flags); 9967 9968 netif_device_detach(dev); 9969 9970 spin_lock_irqsave(&np->lock, flags); 9971 niu_stop_hw(np); 9972 spin_unlock_irqrestore(&np->lock, flags); 9973 9974 pci_save_state(pdev); 9975 9976 return 0; 9977} 9978 9979static int niu_resume(struct pci_dev *pdev) 9980{ 9981 struct net_device *dev = pci_get_drvdata(pdev); 9982 struct niu *np = netdev_priv(dev); 9983 unsigned long flags; 9984 int err; 9985 9986 if (!netif_running(dev)) 9987 return 0; 9988 9989 pci_restore_state(pdev); 9990 9991 netif_device_attach(dev); 9992 9993 spin_lock_irqsave(&np->lock, flags); 9994 9995 err = niu_init_hw(np); 9996 if (!err) { 9997 np->timer.expires = jiffies + HZ; 9998 add_timer(&np->timer); 9999 niu_netif_start(np); 10000 } 10001 10002 spin_unlock_irqrestore(&np->lock, flags); 10003 10004 return err; 10005} 10006 10007static struct pci_driver niu_pci_driver = { 10008 .name = DRV_MODULE_NAME, 10009 .id_table = niu_pci_tbl, 10010 .probe = niu_pci_init_one, 10011 .remove = __devexit_p(niu_pci_remove_one), 10012 .suspend = niu_suspend, 10013 .resume = niu_resume, 10014}; 10015 10016#ifdef CONFIG_SPARC64 10017static void *niu_phys_alloc_coherent(struct device *dev, size_t size, 10018 u64 *dma_addr, gfp_t flag) 10019{ 10020 unsigned long order = get_order(size); 10021 unsigned long page = __get_free_pages(flag, order); 10022 10023 if (page == 0UL) 10024 return NULL; 10025 memset((char *)page, 0, PAGE_SIZE << order); 10026 *dma_addr = __pa(page); 10027 10028 return (void *) page; 10029} 10030 10031static void niu_phys_free_coherent(struct device *dev, size_t size, 10032 void *cpu_addr, u64 handle) 10033{ 10034 unsigned long order = get_order(size); 10035 10036 free_pages((unsigned long) cpu_addr, order); 10037} 10038 10039static u64 niu_phys_map_page(struct device *dev, struct page *page, 10040 unsigned long offset, size_t size, 10041 enum dma_data_direction direction) 10042{ 10043 return page_to_phys(page) + offset; 10044} 10045 10046static void niu_phys_unmap_page(struct device *dev, u64 dma_address, 10047 size_t size, enum dma_data_direction direction) 10048{ 10049 /* Nothing to do. */ 10050} 10051 10052static u64 niu_phys_map_single(struct device *dev, void *cpu_addr, 10053 size_t size, 10054 enum dma_data_direction direction) 10055{ 10056 return __pa(cpu_addr); 10057} 10058 10059static void niu_phys_unmap_single(struct device *dev, u64 dma_address, 10060 size_t size, 10061 enum dma_data_direction direction) 10062{ 10063 /* Nothing to do. */ 10064} 10065 10066static const struct niu_ops niu_phys_ops = { 10067 .alloc_coherent = niu_phys_alloc_coherent, 10068 .free_coherent = niu_phys_free_coherent, 10069 .map_page = niu_phys_map_page, 10070 .unmap_page = niu_phys_unmap_page, 10071 .map_single = niu_phys_map_single, 10072 .unmap_single = niu_phys_unmap_single, 10073}; 10074 10075static int __devinit niu_of_probe(struct of_device *op, 10076 const struct of_device_id *match) 10077{ 10078 union niu_parent_id parent_id; 10079 struct net_device *dev; 10080 struct niu *np; 10081 const u32 *reg; 10082 int err; 10083 10084 niu_driver_version(); 10085 10086 reg = of_get_property(op->dev.of_node, "reg", NULL); 10087 if (!reg) { 10088 dev_err(&op->dev, "%s: No 'reg' property, aborting\n", 10089 op->dev.of_node->full_name); 10090 return -ENODEV; 10091 } 10092 10093 dev = niu_alloc_and_init(&op->dev, NULL, op, 10094 &niu_phys_ops, reg[0] & 0x1); 10095 if (!dev) { 10096 err = -ENOMEM; 10097 goto err_out; 10098 } 10099 np = netdev_priv(dev); 10100 10101 memset(&parent_id, 0, sizeof(parent_id)); 10102 parent_id.of = of_get_parent(op->dev.of_node); 10103 10104 np->parent = niu_get_parent(np, &parent_id, 10105 PLAT_TYPE_NIU); 10106 if (!np->parent) { 10107 err = -ENOMEM; 10108 goto err_out_free_dev; 10109 } 10110 10111 niu_set_basic_features(dev); 10112 10113 np->regs = of_ioremap(&op->resource[1], 0, 10114 resource_size(&op->resource[1]), 10115 "niu regs"); 10116 if (!np->regs) { 10117 dev_err(&op->dev, "Cannot map device registers, aborting\n"); 10118 err = -ENOMEM; 10119 goto err_out_release_parent; 10120 } 10121 10122 np->vir_regs_1 = of_ioremap(&op->resource[2], 0, 10123 resource_size(&op->resource[2]), 10124 "niu vregs-1"); 10125 if (!np->vir_regs_1) { 10126 dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n"); 10127 err = -ENOMEM; 10128 goto err_out_iounmap; 10129 } 10130 10131 np->vir_regs_2 = of_ioremap(&op->resource[3], 0, 10132 resource_size(&op->resource[3]), 10133 "niu vregs-2"); 10134 if (!np->vir_regs_2) { 10135 dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n"); 10136 err = -ENOMEM; 10137 goto err_out_iounmap; 10138 } 10139 10140 niu_assign_netdev_ops(dev); 10141 10142 err = niu_get_invariants(np); 10143 if (err) { 10144 if (err != -ENODEV) 10145 dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n"); 10146 goto err_out_iounmap; 10147 } 10148 10149 err = register_netdev(dev); 10150 if (err) { 10151 dev_err(&op->dev, "Cannot register net device, aborting\n"); 10152 goto err_out_iounmap; 10153 } 10154 10155 dev_set_drvdata(&op->dev, dev); 10156 10157 niu_device_announce(np); 10158 10159 return 0; 10160 10161err_out_iounmap: 10162 if (np->vir_regs_1) { 10163 of_iounmap(&op->resource[2], np->vir_regs_1, 10164 resource_size(&op->resource[2])); 10165 np->vir_regs_1 = NULL; 10166 } 10167 10168 if (np->vir_regs_2) { 10169 of_iounmap(&op->resource[3], np->vir_regs_2, 10170 resource_size(&op->resource[3])); 10171 np->vir_regs_2 = NULL; 10172 } 10173 10174 if (np->regs) { 10175 of_iounmap(&op->resource[1], np->regs, 10176 resource_size(&op->resource[1])); 10177 np->regs = NULL; 10178 } 10179 10180err_out_release_parent: 10181 niu_put_parent(np); 10182 10183err_out_free_dev: 10184 free_netdev(dev); 10185 10186err_out: 10187 return err; 10188} 10189 10190static int __devexit niu_of_remove(struct of_device *op) 10191{ 10192 struct net_device *dev = dev_get_drvdata(&op->dev); 10193 10194 if (dev) { 10195 struct niu *np = netdev_priv(dev); 10196 10197 unregister_netdev(dev); 10198 10199 if (np->vir_regs_1) { 10200 of_iounmap(&op->resource[2], np->vir_regs_1, 10201 resource_size(&op->resource[2])); 10202 np->vir_regs_1 = NULL; 10203 } 10204 10205 if (np->vir_regs_2) { 10206 of_iounmap(&op->resource[3], np->vir_regs_2, 10207 resource_size(&op->resource[3])); 10208 np->vir_regs_2 = NULL; 10209 } 10210 10211 if (np->regs) { 10212 of_iounmap(&op->resource[1], np->regs, 10213 resource_size(&op->resource[1])); 10214 np->regs = NULL; 10215 } 10216 10217 niu_ldg_free(np); 10218 10219 niu_put_parent(np); 10220 10221 free_netdev(dev); 10222 dev_set_drvdata(&op->dev, NULL); 10223 } 10224 return 0; 10225} 10226 10227static const struct of_device_id niu_match[] = { 10228 { 10229 .name = "network", 10230 .compatible = "SUNW,niusl", 10231 }, 10232 {}, 10233}; 10234MODULE_DEVICE_TABLE(of, niu_match); 10235 10236static struct of_platform_driver niu_of_driver = { 10237 .driver = { 10238 .name = "niu", 10239 .owner = THIS_MODULE, 10240 .of_match_table = niu_match, 10241 }, 10242 .probe = niu_of_probe, 10243 .remove = __devexit_p(niu_of_remove), 10244}; 10245 10246#endif /* CONFIG_SPARC64 */ 10247 10248static int __init niu_init(void) 10249{ 10250 int err = 0; 10251 10252 BUILD_BUG_ON(PAGE_SIZE < 4 * 1024); 10253 10254 niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT); 10255 10256#ifdef CONFIG_SPARC64 10257 err = of_register_driver(&niu_of_driver, &of_bus_type); 10258#endif 10259 10260 if (!err) { 10261 err = pci_register_driver(&niu_pci_driver); 10262#ifdef CONFIG_SPARC64 10263 if (err) 10264 of_unregister_driver(&niu_of_driver); 10265#endif 10266 } 10267 10268 return err; 10269} 10270 10271static void __exit niu_exit(void) 10272{ 10273 pci_unregister_driver(&niu_pci_driver); 10274#ifdef CONFIG_SPARC64 10275 of_unregister_driver(&niu_of_driver); 10276#endif 10277} 10278 10279module_init(niu_init); 10280module_exit(niu_exit);