Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.38-rc6 10270 lines 236 kB view raw
1/* niu.c: Neptune ethernet driver. 2 * 3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net) 4 */ 5 6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8#include <linux/module.h> 9#include <linux/init.h> 10#include <linux/pci.h> 11#include <linux/dma-mapping.h> 12#include <linux/netdevice.h> 13#include <linux/ethtool.h> 14#include <linux/etherdevice.h> 15#include <linux/platform_device.h> 16#include <linux/delay.h> 17#include <linux/bitops.h> 18#include <linux/mii.h> 19#include <linux/if_ether.h> 20#include <linux/if_vlan.h> 21#include <linux/ip.h> 22#include <linux/in.h> 23#include <linux/ipv6.h> 24#include <linux/log2.h> 25#include <linux/jiffies.h> 26#include <linux/crc32.h> 27#include <linux/list.h> 28#include <linux/slab.h> 29 30#include <linux/io.h> 31#include <linux/of_device.h> 32 33#include "niu.h" 34 35#define DRV_MODULE_NAME "niu" 36#define DRV_MODULE_VERSION "1.1" 37#define DRV_MODULE_RELDATE "Apr 22, 2010" 38 39static char version[] __devinitdata = 40 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 41 42MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 43MODULE_DESCRIPTION("NIU ethernet driver"); 44MODULE_LICENSE("GPL"); 45MODULE_VERSION(DRV_MODULE_VERSION); 46 47#ifndef readq 48static u64 readq(void __iomem *reg) 49{ 50 return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32); 51} 52 53static void writeq(u64 val, void __iomem *reg) 54{ 55 writel(val & 0xffffffff, reg); 56 writel(val >> 32, reg + 0x4UL); 57} 58#endif 59 60static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = { 61 {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)}, 62 {} 63}; 64 65MODULE_DEVICE_TABLE(pci, niu_pci_tbl); 66 67#define NIU_TX_TIMEOUT (5 * HZ) 68 69#define nr64(reg) readq(np->regs + (reg)) 70#define nw64(reg, val) writeq((val), np->regs + (reg)) 71 72#define nr64_mac(reg) readq(np->mac_regs + (reg)) 73#define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg)) 74 75#define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg)) 76#define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg)) 77 78#define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg)) 79#define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg)) 80 81#define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg)) 82#define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg)) 83 84#define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 85 86static int niu_debug; 87static int debug = -1; 88module_param(debug, int, 0); 89MODULE_PARM_DESC(debug, "NIU debug level"); 90 91#define niu_lock_parent(np, flags) \ 92 spin_lock_irqsave(&np->parent->lock, flags) 93#define niu_unlock_parent(np, flags) \ 94 spin_unlock_irqrestore(&np->parent->lock, flags) 95 96static int serdes_init_10g_serdes(struct niu *np); 97 98static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg, 99 u64 bits, int limit, int delay) 100{ 101 while (--limit >= 0) { 102 u64 val = nr64_mac(reg); 103 104 if (!(val & bits)) 105 break; 106 udelay(delay); 107 } 108 if (limit < 0) 109 return -ENODEV; 110 return 0; 111} 112 113static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg, 114 u64 bits, int limit, int delay, 115 const char *reg_name) 116{ 117 int err; 118 119 nw64_mac(reg, bits); 120 err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay); 121 if (err) 122 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", 123 (unsigned long long)bits, reg_name, 124 (unsigned long long)nr64_mac(reg)); 125 return err; 126} 127 128#define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ 129({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 130 __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ 131}) 132 133static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg, 134 u64 bits, int limit, int delay) 135{ 136 while (--limit >= 0) { 137 u64 val = nr64_ipp(reg); 138 139 if (!(val & bits)) 140 break; 141 udelay(delay); 142 } 143 if (limit < 0) 144 return -ENODEV; 145 return 0; 146} 147 148static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg, 149 u64 bits, int limit, int delay, 150 const char *reg_name) 151{ 152 int err; 153 u64 val; 154 155 val = nr64_ipp(reg); 156 val |= bits; 157 nw64_ipp(reg, val); 158 159 err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay); 160 if (err) 161 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", 162 (unsigned long long)bits, reg_name, 163 (unsigned long long)nr64_ipp(reg)); 164 return err; 165} 166 167#define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ 168({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 169 __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ 170}) 171 172static int __niu_wait_bits_clear(struct niu *np, unsigned long reg, 173 u64 bits, int limit, int delay) 174{ 175 while (--limit >= 0) { 176 u64 val = nr64(reg); 177 178 if (!(val & bits)) 179 break; 180 udelay(delay); 181 } 182 if (limit < 0) 183 return -ENODEV; 184 return 0; 185} 186 187#define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \ 188({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 189 __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \ 190}) 191 192static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg, 193 u64 bits, int limit, int delay, 194 const char *reg_name) 195{ 196 int err; 197 198 nw64(reg, bits); 199 err = __niu_wait_bits_clear(np, reg, bits, limit, delay); 200 if (err) 201 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", 202 (unsigned long long)bits, reg_name, 203 (unsigned long long)nr64(reg)); 204 return err; 205} 206 207#define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ 208({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 209 __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ 210}) 211 212static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on) 213{ 214 u64 val = (u64) lp->timer; 215 216 if (on) 217 val |= LDG_IMGMT_ARM; 218 219 nw64(LDG_IMGMT(lp->ldg_num), val); 220} 221 222static int niu_ldn_irq_enable(struct niu *np, int ldn, int on) 223{ 224 unsigned long mask_reg, bits; 225 u64 val; 226 227 if (ldn < 0 || ldn > LDN_MAX) 228 return -EINVAL; 229 230 if (ldn < 64) { 231 mask_reg = LD_IM0(ldn); 232 bits = LD_IM0_MASK; 233 } else { 234 mask_reg = LD_IM1(ldn - 64); 235 bits = LD_IM1_MASK; 236 } 237 238 val = nr64(mask_reg); 239 if (on) 240 val &= ~bits; 241 else 242 val |= bits; 243 nw64(mask_reg, val); 244 245 return 0; 246} 247 248static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on) 249{ 250 struct niu_parent *parent = np->parent; 251 int i; 252 253 for (i = 0; i <= LDN_MAX; i++) { 254 int err; 255 256 if (parent->ldg_map[i] != lp->ldg_num) 257 continue; 258 259 err = niu_ldn_irq_enable(np, i, on); 260 if (err) 261 return err; 262 } 263 return 0; 264} 265 266static int niu_enable_interrupts(struct niu *np, int on) 267{ 268 int i; 269 270 for (i = 0; i < np->num_ldg; i++) { 271 struct niu_ldg *lp = &np->ldg[i]; 272 int err; 273 274 err = niu_enable_ldn_in_ldg(np, lp, on); 275 if (err) 276 return err; 277 } 278 for (i = 0; i < np->num_ldg; i++) 279 niu_ldg_rearm(np, &np->ldg[i], on); 280 281 return 0; 282} 283 284static u32 phy_encode(u32 type, int port) 285{ 286 return type << (port * 2); 287} 288 289static u32 phy_decode(u32 val, int port) 290{ 291 return (val >> (port * 2)) & PORT_TYPE_MASK; 292} 293 294static int mdio_wait(struct niu *np) 295{ 296 int limit = 1000; 297 u64 val; 298 299 while (--limit > 0) { 300 val = nr64(MIF_FRAME_OUTPUT); 301 if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1) 302 return val & MIF_FRAME_OUTPUT_DATA; 303 304 udelay(10); 305 } 306 307 return -ENODEV; 308} 309 310static int mdio_read(struct niu *np, int port, int dev, int reg) 311{ 312 int err; 313 314 nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); 315 err = mdio_wait(np); 316 if (err < 0) 317 return err; 318 319 nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev)); 320 return mdio_wait(np); 321} 322 323static int mdio_write(struct niu *np, int port, int dev, int reg, int data) 324{ 325 int err; 326 327 nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); 328 err = mdio_wait(np); 329 if (err < 0) 330 return err; 331 332 nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data)); 333 err = mdio_wait(np); 334 if (err < 0) 335 return err; 336 337 return 0; 338} 339 340static int mii_read(struct niu *np, int port, int reg) 341{ 342 nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg)); 343 return mdio_wait(np); 344} 345 346static int mii_write(struct niu *np, int port, int reg, int data) 347{ 348 int err; 349 350 nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data)); 351 err = mdio_wait(np); 352 if (err < 0) 353 return err; 354 355 return 0; 356} 357 358static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val) 359{ 360 int err; 361 362 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 363 ESR2_TI_PLL_TX_CFG_L(channel), 364 val & 0xffff); 365 if (!err) 366 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 367 ESR2_TI_PLL_TX_CFG_H(channel), 368 val >> 16); 369 return err; 370} 371 372static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val) 373{ 374 int err; 375 376 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 377 ESR2_TI_PLL_RX_CFG_L(channel), 378 val & 0xffff); 379 if (!err) 380 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 381 ESR2_TI_PLL_RX_CFG_H(channel), 382 val >> 16); 383 return err; 384} 385 386/* Mode is always 10G fiber. */ 387static int serdes_init_niu_10g_fiber(struct niu *np) 388{ 389 struct niu_link_config *lp = &np->link_config; 390 u32 tx_cfg, rx_cfg; 391 unsigned long i; 392 393 tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); 394 rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | 395 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | 396 PLL_RX_CFG_EQ_LP_ADAPTIVE); 397 398 if (lp->loopback_mode == LOOPBACK_PHY) { 399 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; 400 401 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 402 ESR2_TI_PLL_TEST_CFG_L, test_cfg); 403 404 tx_cfg |= PLL_TX_CFG_ENTEST; 405 rx_cfg |= PLL_RX_CFG_ENTEST; 406 } 407 408 /* Initialize all 4 lanes of the SERDES. */ 409 for (i = 0; i < 4; i++) { 410 int err = esr2_set_tx_cfg(np, i, tx_cfg); 411 if (err) 412 return err; 413 } 414 415 for (i = 0; i < 4; i++) { 416 int err = esr2_set_rx_cfg(np, i, rx_cfg); 417 if (err) 418 return err; 419 } 420 421 return 0; 422} 423 424static int serdes_init_niu_1g_serdes(struct niu *np) 425{ 426 struct niu_link_config *lp = &np->link_config; 427 u16 pll_cfg, pll_sts; 428 int max_retry = 100; 429 u64 uninitialized_var(sig), mask, val; 430 u32 tx_cfg, rx_cfg; 431 unsigned long i; 432 int err; 433 434 tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV | 435 PLL_TX_CFG_RATE_HALF); 436 rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | 437 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | 438 PLL_RX_CFG_RATE_HALF); 439 440 if (np->port == 0) 441 rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE; 442 443 if (lp->loopback_mode == LOOPBACK_PHY) { 444 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; 445 446 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 447 ESR2_TI_PLL_TEST_CFG_L, test_cfg); 448 449 tx_cfg |= PLL_TX_CFG_ENTEST; 450 rx_cfg |= PLL_RX_CFG_ENTEST; 451 } 452 453 /* Initialize PLL for 1G */ 454 pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X); 455 456 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 457 ESR2_TI_PLL_CFG_L, pll_cfg); 458 if (err) { 459 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", 460 np->port, __func__); 461 return err; 462 } 463 464 pll_sts = PLL_CFG_ENPLL; 465 466 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 467 ESR2_TI_PLL_STS_L, pll_sts); 468 if (err) { 469 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", 470 np->port, __func__); 471 return err; 472 } 473 474 udelay(200); 475 476 /* Initialize all 4 lanes of the SERDES. */ 477 for (i = 0; i < 4; i++) { 478 err = esr2_set_tx_cfg(np, i, tx_cfg); 479 if (err) 480 return err; 481 } 482 483 for (i = 0; i < 4; i++) { 484 err = esr2_set_rx_cfg(np, i, rx_cfg); 485 if (err) 486 return err; 487 } 488 489 switch (np->port) { 490 case 0: 491 val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); 492 mask = val; 493 break; 494 495 case 1: 496 val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); 497 mask = val; 498 break; 499 500 default: 501 return -EINVAL; 502 } 503 504 while (max_retry--) { 505 sig = nr64(ESR_INT_SIGNALS); 506 if ((sig & mask) == val) 507 break; 508 509 mdelay(500); 510 } 511 512 if ((sig & mask) != val) { 513 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", 514 np->port, (int)(sig & mask), (int)val); 515 return -ENODEV; 516 } 517 518 return 0; 519} 520 521static int serdes_init_niu_10g_serdes(struct niu *np) 522{ 523 struct niu_link_config *lp = &np->link_config; 524 u32 tx_cfg, rx_cfg, pll_cfg, pll_sts; 525 int max_retry = 100; 526 u64 uninitialized_var(sig), mask, val; 527 unsigned long i; 528 int err; 529 530 tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); 531 rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | 532 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | 533 PLL_RX_CFG_EQ_LP_ADAPTIVE); 534 535 if (lp->loopback_mode == LOOPBACK_PHY) { 536 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; 537 538 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 539 ESR2_TI_PLL_TEST_CFG_L, test_cfg); 540 541 tx_cfg |= PLL_TX_CFG_ENTEST; 542 rx_cfg |= PLL_RX_CFG_ENTEST; 543 } 544 545 /* Initialize PLL for 10G */ 546 pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X); 547 548 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 549 ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff); 550 if (err) { 551 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", 552 np->port, __func__); 553 return err; 554 } 555 556 pll_sts = PLL_CFG_ENPLL; 557 558 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 559 ESR2_TI_PLL_STS_L, pll_sts & 0xffff); 560 if (err) { 561 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", 562 np->port, __func__); 563 return err; 564 } 565 566 udelay(200); 567 568 /* Initialize all 4 lanes of the SERDES. */ 569 for (i = 0; i < 4; i++) { 570 err = esr2_set_tx_cfg(np, i, tx_cfg); 571 if (err) 572 return err; 573 } 574 575 for (i = 0; i < 4; i++) { 576 err = esr2_set_rx_cfg(np, i, rx_cfg); 577 if (err) 578 return err; 579 } 580 581 /* check if serdes is ready */ 582 583 switch (np->port) { 584 case 0: 585 mask = ESR_INT_SIGNALS_P0_BITS; 586 val = (ESR_INT_SRDY0_P0 | 587 ESR_INT_DET0_P0 | 588 ESR_INT_XSRDY_P0 | 589 ESR_INT_XDP_P0_CH3 | 590 ESR_INT_XDP_P0_CH2 | 591 ESR_INT_XDP_P0_CH1 | 592 ESR_INT_XDP_P0_CH0); 593 break; 594 595 case 1: 596 mask = ESR_INT_SIGNALS_P1_BITS; 597 val = (ESR_INT_SRDY0_P1 | 598 ESR_INT_DET0_P1 | 599 ESR_INT_XSRDY_P1 | 600 ESR_INT_XDP_P1_CH3 | 601 ESR_INT_XDP_P1_CH2 | 602 ESR_INT_XDP_P1_CH1 | 603 ESR_INT_XDP_P1_CH0); 604 break; 605 606 default: 607 return -EINVAL; 608 } 609 610 while (max_retry--) { 611 sig = nr64(ESR_INT_SIGNALS); 612 if ((sig & mask) == val) 613 break; 614 615 mdelay(500); 616 } 617 618 if ((sig & mask) != val) { 619 pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n", 620 np->port, (int)(sig & mask), (int)val); 621 622 /* 10G failed, try initializing at 1G */ 623 err = serdes_init_niu_1g_serdes(np); 624 if (!err) { 625 np->flags &= ~NIU_FLAGS_10G; 626 np->mac_xcvr = MAC_XCVR_PCS; 627 } else { 628 netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", 629 np->port); 630 return -ENODEV; 631 } 632 } 633 return 0; 634} 635 636static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val) 637{ 638 int err; 639 640 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan)); 641 if (err >= 0) { 642 *val = (err & 0xffff); 643 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 644 ESR_RXTX_CTRL_H(chan)); 645 if (err >= 0) 646 *val |= ((err & 0xffff) << 16); 647 err = 0; 648 } 649 return err; 650} 651 652static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val) 653{ 654 int err; 655 656 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 657 ESR_GLUE_CTRL0_L(chan)); 658 if (err >= 0) { 659 *val = (err & 0xffff); 660 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 661 ESR_GLUE_CTRL0_H(chan)); 662 if (err >= 0) { 663 *val |= ((err & 0xffff) << 16); 664 err = 0; 665 } 666 } 667 return err; 668} 669 670static int esr_read_reset(struct niu *np, u32 *val) 671{ 672 int err; 673 674 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 675 ESR_RXTX_RESET_CTRL_L); 676 if (err >= 0) { 677 *val = (err & 0xffff); 678 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 679 ESR_RXTX_RESET_CTRL_H); 680 if (err >= 0) { 681 *val |= ((err & 0xffff) << 16); 682 err = 0; 683 } 684 } 685 return err; 686} 687 688static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val) 689{ 690 int err; 691 692 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 693 ESR_RXTX_CTRL_L(chan), val & 0xffff); 694 if (!err) 695 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 696 ESR_RXTX_CTRL_H(chan), (val >> 16)); 697 return err; 698} 699 700static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val) 701{ 702 int err; 703 704 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 705 ESR_GLUE_CTRL0_L(chan), val & 0xffff); 706 if (!err) 707 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 708 ESR_GLUE_CTRL0_H(chan), (val >> 16)); 709 return err; 710} 711 712static int esr_reset(struct niu *np) 713{ 714 u32 uninitialized_var(reset); 715 int err; 716 717 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 718 ESR_RXTX_RESET_CTRL_L, 0x0000); 719 if (err) 720 return err; 721 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 722 ESR_RXTX_RESET_CTRL_H, 0xffff); 723 if (err) 724 return err; 725 udelay(200); 726 727 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 728 ESR_RXTX_RESET_CTRL_L, 0xffff); 729 if (err) 730 return err; 731 udelay(200); 732 733 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 734 ESR_RXTX_RESET_CTRL_H, 0x0000); 735 if (err) 736 return err; 737 udelay(200); 738 739 err = esr_read_reset(np, &reset); 740 if (err) 741 return err; 742 if (reset != 0) { 743 netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n", 744 np->port, reset); 745 return -ENODEV; 746 } 747 748 return 0; 749} 750 751static int serdes_init_10g(struct niu *np) 752{ 753 struct niu_link_config *lp = &np->link_config; 754 unsigned long ctrl_reg, test_cfg_reg, i; 755 u64 ctrl_val, test_cfg_val, sig, mask, val; 756 int err; 757 758 switch (np->port) { 759 case 0: 760 ctrl_reg = ENET_SERDES_0_CTRL_CFG; 761 test_cfg_reg = ENET_SERDES_0_TEST_CFG; 762 break; 763 case 1: 764 ctrl_reg = ENET_SERDES_1_CTRL_CFG; 765 test_cfg_reg = ENET_SERDES_1_TEST_CFG; 766 break; 767 768 default: 769 return -EINVAL; 770 } 771 ctrl_val = (ENET_SERDES_CTRL_SDET_0 | 772 ENET_SERDES_CTRL_SDET_1 | 773 ENET_SERDES_CTRL_SDET_2 | 774 ENET_SERDES_CTRL_SDET_3 | 775 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | 776 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | 777 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | 778 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | 779 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | 780 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | 781 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | 782 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); 783 test_cfg_val = 0; 784 785 if (lp->loopback_mode == LOOPBACK_PHY) { 786 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << 787 ENET_SERDES_TEST_MD_0_SHIFT) | 788 (ENET_TEST_MD_PAD_LOOPBACK << 789 ENET_SERDES_TEST_MD_1_SHIFT) | 790 (ENET_TEST_MD_PAD_LOOPBACK << 791 ENET_SERDES_TEST_MD_2_SHIFT) | 792 (ENET_TEST_MD_PAD_LOOPBACK << 793 ENET_SERDES_TEST_MD_3_SHIFT)); 794 } 795 796 nw64(ctrl_reg, ctrl_val); 797 nw64(test_cfg_reg, test_cfg_val); 798 799 /* Initialize all 4 lanes of the SERDES. */ 800 for (i = 0; i < 4; i++) { 801 u32 rxtx_ctrl, glue0; 802 803 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); 804 if (err) 805 return err; 806 err = esr_read_glue0(np, i, &glue0); 807 if (err) 808 return err; 809 810 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); 811 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | 812 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); 813 814 glue0 &= ~(ESR_GLUE_CTRL0_SRATE | 815 ESR_GLUE_CTRL0_THCNT | 816 ESR_GLUE_CTRL0_BLTIME); 817 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | 818 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | 819 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | 820 (BLTIME_300_CYCLES << 821 ESR_GLUE_CTRL0_BLTIME_SHIFT)); 822 823 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); 824 if (err) 825 return err; 826 err = esr_write_glue0(np, i, glue0); 827 if (err) 828 return err; 829 } 830 831 err = esr_reset(np); 832 if (err) 833 return err; 834 835 sig = nr64(ESR_INT_SIGNALS); 836 switch (np->port) { 837 case 0: 838 mask = ESR_INT_SIGNALS_P0_BITS; 839 val = (ESR_INT_SRDY0_P0 | 840 ESR_INT_DET0_P0 | 841 ESR_INT_XSRDY_P0 | 842 ESR_INT_XDP_P0_CH3 | 843 ESR_INT_XDP_P0_CH2 | 844 ESR_INT_XDP_P0_CH1 | 845 ESR_INT_XDP_P0_CH0); 846 break; 847 848 case 1: 849 mask = ESR_INT_SIGNALS_P1_BITS; 850 val = (ESR_INT_SRDY0_P1 | 851 ESR_INT_DET0_P1 | 852 ESR_INT_XSRDY_P1 | 853 ESR_INT_XDP_P1_CH3 | 854 ESR_INT_XDP_P1_CH2 | 855 ESR_INT_XDP_P1_CH1 | 856 ESR_INT_XDP_P1_CH0); 857 break; 858 859 default: 860 return -EINVAL; 861 } 862 863 if ((sig & mask) != val) { 864 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { 865 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; 866 return 0; 867 } 868 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", 869 np->port, (int)(sig & mask), (int)val); 870 return -ENODEV; 871 } 872 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) 873 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; 874 return 0; 875} 876 877static int serdes_init_1g(struct niu *np) 878{ 879 u64 val; 880 881 val = nr64(ENET_SERDES_1_PLL_CFG); 882 val &= ~ENET_SERDES_PLL_FBDIV2; 883 switch (np->port) { 884 case 0: 885 val |= ENET_SERDES_PLL_HRATE0; 886 break; 887 case 1: 888 val |= ENET_SERDES_PLL_HRATE1; 889 break; 890 case 2: 891 val |= ENET_SERDES_PLL_HRATE2; 892 break; 893 case 3: 894 val |= ENET_SERDES_PLL_HRATE3; 895 break; 896 default: 897 return -EINVAL; 898 } 899 nw64(ENET_SERDES_1_PLL_CFG, val); 900 901 return 0; 902} 903 904static int serdes_init_1g_serdes(struct niu *np) 905{ 906 struct niu_link_config *lp = &np->link_config; 907 unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; 908 u64 ctrl_val, test_cfg_val, sig, mask, val; 909 int err; 910 u64 reset_val, val_rd; 911 912 val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 | 913 ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 | 914 ENET_SERDES_PLL_FBDIV0; 915 switch (np->port) { 916 case 0: 917 reset_val = ENET_SERDES_RESET_0; 918 ctrl_reg = ENET_SERDES_0_CTRL_CFG; 919 test_cfg_reg = ENET_SERDES_0_TEST_CFG; 920 pll_cfg = ENET_SERDES_0_PLL_CFG; 921 break; 922 case 1: 923 reset_val = ENET_SERDES_RESET_1; 924 ctrl_reg = ENET_SERDES_1_CTRL_CFG; 925 test_cfg_reg = ENET_SERDES_1_TEST_CFG; 926 pll_cfg = ENET_SERDES_1_PLL_CFG; 927 break; 928 929 default: 930 return -EINVAL; 931 } 932 ctrl_val = (ENET_SERDES_CTRL_SDET_0 | 933 ENET_SERDES_CTRL_SDET_1 | 934 ENET_SERDES_CTRL_SDET_2 | 935 ENET_SERDES_CTRL_SDET_3 | 936 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | 937 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | 938 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | 939 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | 940 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | 941 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | 942 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | 943 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); 944 test_cfg_val = 0; 945 946 if (lp->loopback_mode == LOOPBACK_PHY) { 947 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << 948 ENET_SERDES_TEST_MD_0_SHIFT) | 949 (ENET_TEST_MD_PAD_LOOPBACK << 950 ENET_SERDES_TEST_MD_1_SHIFT) | 951 (ENET_TEST_MD_PAD_LOOPBACK << 952 ENET_SERDES_TEST_MD_2_SHIFT) | 953 (ENET_TEST_MD_PAD_LOOPBACK << 954 ENET_SERDES_TEST_MD_3_SHIFT)); 955 } 956 957 nw64(ENET_SERDES_RESET, reset_val); 958 mdelay(20); 959 val_rd = nr64(ENET_SERDES_RESET); 960 val_rd &= ~reset_val; 961 nw64(pll_cfg, val); 962 nw64(ctrl_reg, ctrl_val); 963 nw64(test_cfg_reg, test_cfg_val); 964 nw64(ENET_SERDES_RESET, val_rd); 965 mdelay(2000); 966 967 /* Initialize all 4 lanes of the SERDES. */ 968 for (i = 0; i < 4; i++) { 969 u32 rxtx_ctrl, glue0; 970 971 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); 972 if (err) 973 return err; 974 err = esr_read_glue0(np, i, &glue0); 975 if (err) 976 return err; 977 978 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); 979 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | 980 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); 981 982 glue0 &= ~(ESR_GLUE_CTRL0_SRATE | 983 ESR_GLUE_CTRL0_THCNT | 984 ESR_GLUE_CTRL0_BLTIME); 985 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | 986 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | 987 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | 988 (BLTIME_300_CYCLES << 989 ESR_GLUE_CTRL0_BLTIME_SHIFT)); 990 991 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); 992 if (err) 993 return err; 994 err = esr_write_glue0(np, i, glue0); 995 if (err) 996 return err; 997 } 998 999 1000 sig = nr64(ESR_INT_SIGNALS); 1001 switch (np->port) { 1002 case 0: 1003 val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); 1004 mask = val; 1005 break; 1006 1007 case 1: 1008 val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); 1009 mask = val; 1010 break; 1011 1012 default: 1013 return -EINVAL; 1014 } 1015 1016 if ((sig & mask) != val) { 1017 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", 1018 np->port, (int)(sig & mask), (int)val); 1019 return -ENODEV; 1020 } 1021 1022 return 0; 1023} 1024 1025static int link_status_1g_serdes(struct niu *np, int *link_up_p) 1026{ 1027 struct niu_link_config *lp = &np->link_config; 1028 int link_up; 1029 u64 val; 1030 u16 current_speed; 1031 unsigned long flags; 1032 u8 current_duplex; 1033 1034 link_up = 0; 1035 current_speed = SPEED_INVALID; 1036 current_duplex = DUPLEX_INVALID; 1037 1038 spin_lock_irqsave(&np->lock, flags); 1039 1040 val = nr64_pcs(PCS_MII_STAT); 1041 1042 if (val & PCS_MII_STAT_LINK_STATUS) { 1043 link_up = 1; 1044 current_speed = SPEED_1000; 1045 current_duplex = DUPLEX_FULL; 1046 } 1047 1048 lp->active_speed = current_speed; 1049 lp->active_duplex = current_duplex; 1050 spin_unlock_irqrestore(&np->lock, flags); 1051 1052 *link_up_p = link_up; 1053 return 0; 1054} 1055 1056static int link_status_10g_serdes(struct niu *np, int *link_up_p) 1057{ 1058 unsigned long flags; 1059 struct niu_link_config *lp = &np->link_config; 1060 int link_up = 0; 1061 int link_ok = 1; 1062 u64 val, val2; 1063 u16 current_speed; 1064 u8 current_duplex; 1065 1066 if (!(np->flags & NIU_FLAGS_10G)) 1067 return link_status_1g_serdes(np, link_up_p); 1068 1069 current_speed = SPEED_INVALID; 1070 current_duplex = DUPLEX_INVALID; 1071 spin_lock_irqsave(&np->lock, flags); 1072 1073 val = nr64_xpcs(XPCS_STATUS(0)); 1074 val2 = nr64_mac(XMAC_INTER2); 1075 if (val2 & 0x01000000) 1076 link_ok = 0; 1077 1078 if ((val & 0x1000ULL) && link_ok) { 1079 link_up = 1; 1080 current_speed = SPEED_10000; 1081 current_duplex = DUPLEX_FULL; 1082 } 1083 lp->active_speed = current_speed; 1084 lp->active_duplex = current_duplex; 1085 spin_unlock_irqrestore(&np->lock, flags); 1086 *link_up_p = link_up; 1087 return 0; 1088} 1089 1090static int link_status_mii(struct niu *np, int *link_up_p) 1091{ 1092 struct niu_link_config *lp = &np->link_config; 1093 int err; 1094 int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus; 1095 int supported, advertising, active_speed, active_duplex; 1096 1097 err = mii_read(np, np->phy_addr, MII_BMCR); 1098 if (unlikely(err < 0)) 1099 return err; 1100 bmcr = err; 1101 1102 err = mii_read(np, np->phy_addr, MII_BMSR); 1103 if (unlikely(err < 0)) 1104 return err; 1105 bmsr = err; 1106 1107 err = mii_read(np, np->phy_addr, MII_ADVERTISE); 1108 if (unlikely(err < 0)) 1109 return err; 1110 advert = err; 1111 1112 err = mii_read(np, np->phy_addr, MII_LPA); 1113 if (unlikely(err < 0)) 1114 return err; 1115 lpa = err; 1116 1117 if (likely(bmsr & BMSR_ESTATEN)) { 1118 err = mii_read(np, np->phy_addr, MII_ESTATUS); 1119 if (unlikely(err < 0)) 1120 return err; 1121 estatus = err; 1122 1123 err = mii_read(np, np->phy_addr, MII_CTRL1000); 1124 if (unlikely(err < 0)) 1125 return err; 1126 ctrl1000 = err; 1127 1128 err = mii_read(np, np->phy_addr, MII_STAT1000); 1129 if (unlikely(err < 0)) 1130 return err; 1131 stat1000 = err; 1132 } else 1133 estatus = ctrl1000 = stat1000 = 0; 1134 1135 supported = 0; 1136 if (bmsr & BMSR_ANEGCAPABLE) 1137 supported |= SUPPORTED_Autoneg; 1138 if (bmsr & BMSR_10HALF) 1139 supported |= SUPPORTED_10baseT_Half; 1140 if (bmsr & BMSR_10FULL) 1141 supported |= SUPPORTED_10baseT_Full; 1142 if (bmsr & BMSR_100HALF) 1143 supported |= SUPPORTED_100baseT_Half; 1144 if (bmsr & BMSR_100FULL) 1145 supported |= SUPPORTED_100baseT_Full; 1146 if (estatus & ESTATUS_1000_THALF) 1147 supported |= SUPPORTED_1000baseT_Half; 1148 if (estatus & ESTATUS_1000_TFULL) 1149 supported |= SUPPORTED_1000baseT_Full; 1150 lp->supported = supported; 1151 1152 advertising = 0; 1153 if (advert & ADVERTISE_10HALF) 1154 advertising |= ADVERTISED_10baseT_Half; 1155 if (advert & ADVERTISE_10FULL) 1156 advertising |= ADVERTISED_10baseT_Full; 1157 if (advert & ADVERTISE_100HALF) 1158 advertising |= ADVERTISED_100baseT_Half; 1159 if (advert & ADVERTISE_100FULL) 1160 advertising |= ADVERTISED_100baseT_Full; 1161 if (ctrl1000 & ADVERTISE_1000HALF) 1162 advertising |= ADVERTISED_1000baseT_Half; 1163 if (ctrl1000 & ADVERTISE_1000FULL) 1164 advertising |= ADVERTISED_1000baseT_Full; 1165 1166 if (bmcr & BMCR_ANENABLE) { 1167 int neg, neg1000; 1168 1169 lp->active_autoneg = 1; 1170 advertising |= ADVERTISED_Autoneg; 1171 1172 neg = advert & lpa; 1173 neg1000 = (ctrl1000 << 2) & stat1000; 1174 1175 if (neg1000 & (LPA_1000FULL | LPA_1000HALF)) 1176 active_speed = SPEED_1000; 1177 else if (neg & LPA_100) 1178 active_speed = SPEED_100; 1179 else if (neg & (LPA_10HALF | LPA_10FULL)) 1180 active_speed = SPEED_10; 1181 else 1182 active_speed = SPEED_INVALID; 1183 1184 if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX)) 1185 active_duplex = DUPLEX_FULL; 1186 else if (active_speed != SPEED_INVALID) 1187 active_duplex = DUPLEX_HALF; 1188 else 1189 active_duplex = DUPLEX_INVALID; 1190 } else { 1191 lp->active_autoneg = 0; 1192 1193 if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100)) 1194 active_speed = SPEED_1000; 1195 else if (bmcr & BMCR_SPEED100) 1196 active_speed = SPEED_100; 1197 else 1198 active_speed = SPEED_10; 1199 1200 if (bmcr & BMCR_FULLDPLX) 1201 active_duplex = DUPLEX_FULL; 1202 else 1203 active_duplex = DUPLEX_HALF; 1204 } 1205 1206 lp->active_advertising = advertising; 1207 lp->active_speed = active_speed; 1208 lp->active_duplex = active_duplex; 1209 *link_up_p = !!(bmsr & BMSR_LSTATUS); 1210 1211 return 0; 1212} 1213 1214static int link_status_1g_rgmii(struct niu *np, int *link_up_p) 1215{ 1216 struct niu_link_config *lp = &np->link_config; 1217 u16 current_speed, bmsr; 1218 unsigned long flags; 1219 u8 current_duplex; 1220 int err, link_up; 1221 1222 link_up = 0; 1223 current_speed = SPEED_INVALID; 1224 current_duplex = DUPLEX_INVALID; 1225 1226 spin_lock_irqsave(&np->lock, flags); 1227 1228 err = -EINVAL; 1229 1230 err = mii_read(np, np->phy_addr, MII_BMSR); 1231 if (err < 0) 1232 goto out; 1233 1234 bmsr = err; 1235 if (bmsr & BMSR_LSTATUS) { 1236 u16 adv, lpa, common, estat; 1237 1238 err = mii_read(np, np->phy_addr, MII_ADVERTISE); 1239 if (err < 0) 1240 goto out; 1241 adv = err; 1242 1243 err = mii_read(np, np->phy_addr, MII_LPA); 1244 if (err < 0) 1245 goto out; 1246 lpa = err; 1247 1248 common = adv & lpa; 1249 1250 err = mii_read(np, np->phy_addr, MII_ESTATUS); 1251 if (err < 0) 1252 goto out; 1253 estat = err; 1254 link_up = 1; 1255 current_speed = SPEED_1000; 1256 current_duplex = DUPLEX_FULL; 1257 1258 } 1259 lp->active_speed = current_speed; 1260 lp->active_duplex = current_duplex; 1261 err = 0; 1262 1263out: 1264 spin_unlock_irqrestore(&np->lock, flags); 1265 1266 *link_up_p = link_up; 1267 return err; 1268} 1269 1270static int link_status_1g(struct niu *np, int *link_up_p) 1271{ 1272 struct niu_link_config *lp = &np->link_config; 1273 unsigned long flags; 1274 int err; 1275 1276 spin_lock_irqsave(&np->lock, flags); 1277 1278 err = link_status_mii(np, link_up_p); 1279 lp->supported |= SUPPORTED_TP; 1280 lp->active_advertising |= ADVERTISED_TP; 1281 1282 spin_unlock_irqrestore(&np->lock, flags); 1283 return err; 1284} 1285 1286static int bcm8704_reset(struct niu *np) 1287{ 1288 int err, limit; 1289 1290 err = mdio_read(np, np->phy_addr, 1291 BCM8704_PHYXS_DEV_ADDR, MII_BMCR); 1292 if (err < 0 || err == 0xffff) 1293 return err; 1294 err |= BMCR_RESET; 1295 err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 1296 MII_BMCR, err); 1297 if (err) 1298 return err; 1299 1300 limit = 1000; 1301 while (--limit >= 0) { 1302 err = mdio_read(np, np->phy_addr, 1303 BCM8704_PHYXS_DEV_ADDR, MII_BMCR); 1304 if (err < 0) 1305 return err; 1306 if (!(err & BMCR_RESET)) 1307 break; 1308 } 1309 if (limit < 0) { 1310 netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n", 1311 np->port, (err & 0xffff)); 1312 return -ENODEV; 1313 } 1314 return 0; 1315} 1316 1317/* When written, certain PHY registers need to be read back twice 1318 * in order for the bits to settle properly. 1319 */ 1320static int bcm8704_user_dev3_readback(struct niu *np, int reg) 1321{ 1322 int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); 1323 if (err < 0) 1324 return err; 1325 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); 1326 if (err < 0) 1327 return err; 1328 return 0; 1329} 1330 1331static int bcm8706_init_user_dev3(struct niu *np) 1332{ 1333 int err; 1334 1335 1336 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1337 BCM8704_USER_OPT_DIGITAL_CTRL); 1338 if (err < 0) 1339 return err; 1340 err &= ~USER_ODIG_CTRL_GPIOS; 1341 err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); 1342 err |= USER_ODIG_CTRL_RESV2; 1343 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1344 BCM8704_USER_OPT_DIGITAL_CTRL, err); 1345 if (err) 1346 return err; 1347 1348 mdelay(1000); 1349 1350 return 0; 1351} 1352 1353static int bcm8704_init_user_dev3(struct niu *np) 1354{ 1355 int err; 1356 1357 err = mdio_write(np, np->phy_addr, 1358 BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL, 1359 (USER_CONTROL_OPTXRST_LVL | 1360 USER_CONTROL_OPBIASFLT_LVL | 1361 USER_CONTROL_OBTMPFLT_LVL | 1362 USER_CONTROL_OPPRFLT_LVL | 1363 USER_CONTROL_OPTXFLT_LVL | 1364 USER_CONTROL_OPRXLOS_LVL | 1365 USER_CONTROL_OPRXFLT_LVL | 1366 USER_CONTROL_OPTXON_LVL | 1367 (0x3f << USER_CONTROL_RES1_SHIFT))); 1368 if (err) 1369 return err; 1370 1371 err = mdio_write(np, np->phy_addr, 1372 BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL, 1373 (USER_PMD_TX_CTL_XFP_CLKEN | 1374 (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) | 1375 (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) | 1376 USER_PMD_TX_CTL_TSCK_LPWREN)); 1377 if (err) 1378 return err; 1379 1380 err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL); 1381 if (err) 1382 return err; 1383 err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL); 1384 if (err) 1385 return err; 1386 1387 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1388 BCM8704_USER_OPT_DIGITAL_CTRL); 1389 if (err < 0) 1390 return err; 1391 err &= ~USER_ODIG_CTRL_GPIOS; 1392 err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); 1393 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1394 BCM8704_USER_OPT_DIGITAL_CTRL, err); 1395 if (err) 1396 return err; 1397 1398 mdelay(1000); 1399 1400 return 0; 1401} 1402 1403static int mrvl88x2011_act_led(struct niu *np, int val) 1404{ 1405 int err; 1406 1407 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 1408 MRVL88X2011_LED_8_TO_11_CTL); 1409 if (err < 0) 1410 return err; 1411 1412 err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK); 1413 err |= MRVL88X2011_LED(MRVL88X2011_LED_ACT,val); 1414 1415 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 1416 MRVL88X2011_LED_8_TO_11_CTL, err); 1417} 1418 1419static int mrvl88x2011_led_blink_rate(struct niu *np, int rate) 1420{ 1421 int err; 1422 1423 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 1424 MRVL88X2011_LED_BLINK_CTL); 1425 if (err >= 0) { 1426 err &= ~MRVL88X2011_LED_BLKRATE_MASK; 1427 err |= (rate << 4); 1428 1429 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 1430 MRVL88X2011_LED_BLINK_CTL, err); 1431 } 1432 1433 return err; 1434} 1435 1436static int xcvr_init_10g_mrvl88x2011(struct niu *np) 1437{ 1438 int err; 1439 1440 /* Set LED functions */ 1441 err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS); 1442 if (err) 1443 return err; 1444 1445 /* led activity */ 1446 err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF); 1447 if (err) 1448 return err; 1449 1450 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 1451 MRVL88X2011_GENERAL_CTL); 1452 if (err < 0) 1453 return err; 1454 1455 err |= MRVL88X2011_ENA_XFPREFCLK; 1456 1457 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 1458 MRVL88X2011_GENERAL_CTL, err); 1459 if (err < 0) 1460 return err; 1461 1462 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1463 MRVL88X2011_PMA_PMD_CTL_1); 1464 if (err < 0) 1465 return err; 1466 1467 if (np->link_config.loopback_mode == LOOPBACK_MAC) 1468 err |= MRVL88X2011_LOOPBACK; 1469 else 1470 err &= ~MRVL88X2011_LOOPBACK; 1471 1472 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1473 MRVL88X2011_PMA_PMD_CTL_1, err); 1474 if (err < 0) 1475 return err; 1476 1477 /* Enable PMD */ 1478 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1479 MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX); 1480} 1481 1482 1483static int xcvr_diag_bcm870x(struct niu *np) 1484{ 1485 u16 analog_stat0, tx_alarm_status; 1486 int err = 0; 1487 1488#if 1 1489 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, 1490 MII_STAT1000); 1491 if (err < 0) 1492 return err; 1493 pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err); 1494 1495 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20); 1496 if (err < 0) 1497 return err; 1498 pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err); 1499 1500 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 1501 MII_NWAYTEST); 1502 if (err < 0) 1503 return err; 1504 pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err); 1505#endif 1506 1507 /* XXX dig this out it might not be so useful XXX */ 1508 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1509 BCM8704_USER_ANALOG_STATUS0); 1510 if (err < 0) 1511 return err; 1512 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1513 BCM8704_USER_ANALOG_STATUS0); 1514 if (err < 0) 1515 return err; 1516 analog_stat0 = err; 1517 1518 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1519 BCM8704_USER_TX_ALARM_STATUS); 1520 if (err < 0) 1521 return err; 1522 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1523 BCM8704_USER_TX_ALARM_STATUS); 1524 if (err < 0) 1525 return err; 1526 tx_alarm_status = err; 1527 1528 if (analog_stat0 != 0x03fc) { 1529 if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) { 1530 pr_info("Port %u cable not connected or bad cable\n", 1531 np->port); 1532 } else if (analog_stat0 == 0x639c) { 1533 pr_info("Port %u optical module is bad or missing\n", 1534 np->port); 1535 } 1536 } 1537 1538 return 0; 1539} 1540 1541static int xcvr_10g_set_lb_bcm870x(struct niu *np) 1542{ 1543 struct niu_link_config *lp = &np->link_config; 1544 int err; 1545 1546 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 1547 MII_BMCR); 1548 if (err < 0) 1549 return err; 1550 1551 err &= ~BMCR_LOOPBACK; 1552 1553 if (lp->loopback_mode == LOOPBACK_MAC) 1554 err |= BMCR_LOOPBACK; 1555 1556 err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 1557 MII_BMCR, err); 1558 if (err) 1559 return err; 1560 1561 return 0; 1562} 1563 1564static int xcvr_init_10g_bcm8706(struct niu *np) 1565{ 1566 int err = 0; 1567 u64 val; 1568 1569 if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) && 1570 (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0) 1571 return err; 1572 1573 val = nr64_mac(XMAC_CONFIG); 1574 val &= ~XMAC_CONFIG_LED_POLARITY; 1575 val |= XMAC_CONFIG_FORCE_LED_ON; 1576 nw64_mac(XMAC_CONFIG, val); 1577 1578 val = nr64(MIF_CONFIG); 1579 val |= MIF_CONFIG_INDIRECT_MODE; 1580 nw64(MIF_CONFIG, val); 1581 1582 err = bcm8704_reset(np); 1583 if (err) 1584 return err; 1585 1586 err = xcvr_10g_set_lb_bcm870x(np); 1587 if (err) 1588 return err; 1589 1590 err = bcm8706_init_user_dev3(np); 1591 if (err) 1592 return err; 1593 1594 err = xcvr_diag_bcm870x(np); 1595 if (err) 1596 return err; 1597 1598 return 0; 1599} 1600 1601static int xcvr_init_10g_bcm8704(struct niu *np) 1602{ 1603 int err; 1604 1605 err = bcm8704_reset(np); 1606 if (err) 1607 return err; 1608 1609 err = bcm8704_init_user_dev3(np); 1610 if (err) 1611 return err; 1612 1613 err = xcvr_10g_set_lb_bcm870x(np); 1614 if (err) 1615 return err; 1616 1617 err = xcvr_diag_bcm870x(np); 1618 if (err) 1619 return err; 1620 1621 return 0; 1622} 1623 1624static int xcvr_init_10g(struct niu *np) 1625{ 1626 int phy_id, err; 1627 u64 val; 1628 1629 val = nr64_mac(XMAC_CONFIG); 1630 val &= ~XMAC_CONFIG_LED_POLARITY; 1631 val |= XMAC_CONFIG_FORCE_LED_ON; 1632 nw64_mac(XMAC_CONFIG, val); 1633 1634 /* XXX shared resource, lock parent XXX */ 1635 val = nr64(MIF_CONFIG); 1636 val |= MIF_CONFIG_INDIRECT_MODE; 1637 nw64(MIF_CONFIG, val); 1638 1639 phy_id = phy_decode(np->parent->port_phy, np->port); 1640 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; 1641 1642 /* handle different phy types */ 1643 switch (phy_id & NIU_PHY_ID_MASK) { 1644 case NIU_PHY_ID_MRVL88X2011: 1645 err = xcvr_init_10g_mrvl88x2011(np); 1646 break; 1647 1648 default: /* bcom 8704 */ 1649 err = xcvr_init_10g_bcm8704(np); 1650 break; 1651 } 1652 1653 return 0; 1654} 1655 1656static int mii_reset(struct niu *np) 1657{ 1658 int limit, err; 1659 1660 err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET); 1661 if (err) 1662 return err; 1663 1664 limit = 1000; 1665 while (--limit >= 0) { 1666 udelay(500); 1667 err = mii_read(np, np->phy_addr, MII_BMCR); 1668 if (err < 0) 1669 return err; 1670 if (!(err & BMCR_RESET)) 1671 break; 1672 } 1673 if (limit < 0) { 1674 netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n", 1675 np->port, err); 1676 return -ENODEV; 1677 } 1678 1679 return 0; 1680} 1681 1682static int xcvr_init_1g_rgmii(struct niu *np) 1683{ 1684 int err; 1685 u64 val; 1686 u16 bmcr, bmsr, estat; 1687 1688 val = nr64(MIF_CONFIG); 1689 val &= ~MIF_CONFIG_INDIRECT_MODE; 1690 nw64(MIF_CONFIG, val); 1691 1692 err = mii_reset(np); 1693 if (err) 1694 return err; 1695 1696 err = mii_read(np, np->phy_addr, MII_BMSR); 1697 if (err < 0) 1698 return err; 1699 bmsr = err; 1700 1701 estat = 0; 1702 if (bmsr & BMSR_ESTATEN) { 1703 err = mii_read(np, np->phy_addr, MII_ESTATUS); 1704 if (err < 0) 1705 return err; 1706 estat = err; 1707 } 1708 1709 bmcr = 0; 1710 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); 1711 if (err) 1712 return err; 1713 1714 if (bmsr & BMSR_ESTATEN) { 1715 u16 ctrl1000 = 0; 1716 1717 if (estat & ESTATUS_1000_TFULL) 1718 ctrl1000 |= ADVERTISE_1000FULL; 1719 err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000); 1720 if (err) 1721 return err; 1722 } 1723 1724 bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX); 1725 1726 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); 1727 if (err) 1728 return err; 1729 1730 err = mii_read(np, np->phy_addr, MII_BMCR); 1731 if (err < 0) 1732 return err; 1733 bmcr = mii_read(np, np->phy_addr, MII_BMCR); 1734 1735 err = mii_read(np, np->phy_addr, MII_BMSR); 1736 if (err < 0) 1737 return err; 1738 1739 return 0; 1740} 1741 1742static int mii_init_common(struct niu *np) 1743{ 1744 struct niu_link_config *lp = &np->link_config; 1745 u16 bmcr, bmsr, adv, estat; 1746 int err; 1747 1748 err = mii_reset(np); 1749 if (err) 1750 return err; 1751 1752 err = mii_read(np, np->phy_addr, MII_BMSR); 1753 if (err < 0) 1754 return err; 1755 bmsr = err; 1756 1757 estat = 0; 1758 if (bmsr & BMSR_ESTATEN) { 1759 err = mii_read(np, np->phy_addr, MII_ESTATUS); 1760 if (err < 0) 1761 return err; 1762 estat = err; 1763 } 1764 1765 bmcr = 0; 1766 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); 1767 if (err) 1768 return err; 1769 1770 if (lp->loopback_mode == LOOPBACK_MAC) { 1771 bmcr |= BMCR_LOOPBACK; 1772 if (lp->active_speed == SPEED_1000) 1773 bmcr |= BMCR_SPEED1000; 1774 if (lp->active_duplex == DUPLEX_FULL) 1775 bmcr |= BMCR_FULLDPLX; 1776 } 1777 1778 if (lp->loopback_mode == LOOPBACK_PHY) { 1779 u16 aux; 1780 1781 aux = (BCM5464R_AUX_CTL_EXT_LB | 1782 BCM5464R_AUX_CTL_WRITE_1); 1783 err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux); 1784 if (err) 1785 return err; 1786 } 1787 1788 if (lp->autoneg) { 1789 u16 ctrl1000; 1790 1791 adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP; 1792 if ((bmsr & BMSR_10HALF) && 1793 (lp->advertising & ADVERTISED_10baseT_Half)) 1794 adv |= ADVERTISE_10HALF; 1795 if ((bmsr & BMSR_10FULL) && 1796 (lp->advertising & ADVERTISED_10baseT_Full)) 1797 adv |= ADVERTISE_10FULL; 1798 if ((bmsr & BMSR_100HALF) && 1799 (lp->advertising & ADVERTISED_100baseT_Half)) 1800 adv |= ADVERTISE_100HALF; 1801 if ((bmsr & BMSR_100FULL) && 1802 (lp->advertising & ADVERTISED_100baseT_Full)) 1803 adv |= ADVERTISE_100FULL; 1804 err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv); 1805 if (err) 1806 return err; 1807 1808 if (likely(bmsr & BMSR_ESTATEN)) { 1809 ctrl1000 = 0; 1810 if ((estat & ESTATUS_1000_THALF) && 1811 (lp->advertising & ADVERTISED_1000baseT_Half)) 1812 ctrl1000 |= ADVERTISE_1000HALF; 1813 if ((estat & ESTATUS_1000_TFULL) && 1814 (lp->advertising & ADVERTISED_1000baseT_Full)) 1815 ctrl1000 |= ADVERTISE_1000FULL; 1816 err = mii_write(np, np->phy_addr, 1817 MII_CTRL1000, ctrl1000); 1818 if (err) 1819 return err; 1820 } 1821 1822 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 1823 } else { 1824 /* !lp->autoneg */ 1825 int fulldpx; 1826 1827 if (lp->duplex == DUPLEX_FULL) { 1828 bmcr |= BMCR_FULLDPLX; 1829 fulldpx = 1; 1830 } else if (lp->duplex == DUPLEX_HALF) 1831 fulldpx = 0; 1832 else 1833 return -EINVAL; 1834 1835 if (lp->speed == SPEED_1000) { 1836 /* if X-full requested while not supported, or 1837 X-half requested while not supported... */ 1838 if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) || 1839 (!fulldpx && !(estat & ESTATUS_1000_THALF))) 1840 return -EINVAL; 1841 bmcr |= BMCR_SPEED1000; 1842 } else if (lp->speed == SPEED_100) { 1843 if ((fulldpx && !(bmsr & BMSR_100FULL)) || 1844 (!fulldpx && !(bmsr & BMSR_100HALF))) 1845 return -EINVAL; 1846 bmcr |= BMCR_SPEED100; 1847 } else if (lp->speed == SPEED_10) { 1848 if ((fulldpx && !(bmsr & BMSR_10FULL)) || 1849 (!fulldpx && !(bmsr & BMSR_10HALF))) 1850 return -EINVAL; 1851 } else 1852 return -EINVAL; 1853 } 1854 1855 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); 1856 if (err) 1857 return err; 1858 1859#if 0 1860 err = mii_read(np, np->phy_addr, MII_BMCR); 1861 if (err < 0) 1862 return err; 1863 bmcr = err; 1864 1865 err = mii_read(np, np->phy_addr, MII_BMSR); 1866 if (err < 0) 1867 return err; 1868 bmsr = err; 1869 1870 pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n", 1871 np->port, bmcr, bmsr); 1872#endif 1873 1874 return 0; 1875} 1876 1877static int xcvr_init_1g(struct niu *np) 1878{ 1879 u64 val; 1880 1881 /* XXX shared resource, lock parent XXX */ 1882 val = nr64(MIF_CONFIG); 1883 val &= ~MIF_CONFIG_INDIRECT_MODE; 1884 nw64(MIF_CONFIG, val); 1885 1886 return mii_init_common(np); 1887} 1888 1889static int niu_xcvr_init(struct niu *np) 1890{ 1891 const struct niu_phy_ops *ops = np->phy_ops; 1892 int err; 1893 1894 err = 0; 1895 if (ops->xcvr_init) 1896 err = ops->xcvr_init(np); 1897 1898 return err; 1899} 1900 1901static int niu_serdes_init(struct niu *np) 1902{ 1903 const struct niu_phy_ops *ops = np->phy_ops; 1904 int err; 1905 1906 err = 0; 1907 if (ops->serdes_init) 1908 err = ops->serdes_init(np); 1909 1910 return err; 1911} 1912 1913static void niu_init_xif(struct niu *); 1914static void niu_handle_led(struct niu *, int status); 1915 1916static int niu_link_status_common(struct niu *np, int link_up) 1917{ 1918 struct niu_link_config *lp = &np->link_config; 1919 struct net_device *dev = np->dev; 1920 unsigned long flags; 1921 1922 if (!netif_carrier_ok(dev) && link_up) { 1923 netif_info(np, link, dev, "Link is up at %s, %s duplex\n", 1924 lp->active_speed == SPEED_10000 ? "10Gb/sec" : 1925 lp->active_speed == SPEED_1000 ? "1Gb/sec" : 1926 lp->active_speed == SPEED_100 ? "100Mbit/sec" : 1927 "10Mbit/sec", 1928 lp->active_duplex == DUPLEX_FULL ? "full" : "half"); 1929 1930 spin_lock_irqsave(&np->lock, flags); 1931 niu_init_xif(np); 1932 niu_handle_led(np, 1); 1933 spin_unlock_irqrestore(&np->lock, flags); 1934 1935 netif_carrier_on(dev); 1936 } else if (netif_carrier_ok(dev) && !link_up) { 1937 netif_warn(np, link, dev, "Link is down\n"); 1938 spin_lock_irqsave(&np->lock, flags); 1939 niu_handle_led(np, 0); 1940 spin_unlock_irqrestore(&np->lock, flags); 1941 netif_carrier_off(dev); 1942 } 1943 1944 return 0; 1945} 1946 1947static int link_status_10g_mrvl(struct niu *np, int *link_up_p) 1948{ 1949 int err, link_up, pma_status, pcs_status; 1950 1951 link_up = 0; 1952 1953 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1954 MRVL88X2011_10G_PMD_STATUS_2); 1955 if (err < 0) 1956 goto out; 1957 1958 /* Check PMA/PMD Register: 1.0001.2 == 1 */ 1959 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1960 MRVL88X2011_PMA_PMD_STATUS_1); 1961 if (err < 0) 1962 goto out; 1963 1964 pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); 1965 1966 /* Check PMC Register : 3.0001.2 == 1: read twice */ 1967 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 1968 MRVL88X2011_PMA_PMD_STATUS_1); 1969 if (err < 0) 1970 goto out; 1971 1972 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 1973 MRVL88X2011_PMA_PMD_STATUS_1); 1974 if (err < 0) 1975 goto out; 1976 1977 pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); 1978 1979 /* Check XGXS Register : 4.0018.[0-3,12] */ 1980 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR, 1981 MRVL88X2011_10G_XGXS_LANE_STAT); 1982 if (err < 0) 1983 goto out; 1984 1985 if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 | 1986 PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 | 1987 PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC | 1988 0x800)) 1989 link_up = (pma_status && pcs_status) ? 1 : 0; 1990 1991 np->link_config.active_speed = SPEED_10000; 1992 np->link_config.active_duplex = DUPLEX_FULL; 1993 err = 0; 1994out: 1995 mrvl88x2011_act_led(np, (link_up ? 1996 MRVL88X2011_LED_CTL_PCS_ACT : 1997 MRVL88X2011_LED_CTL_OFF)); 1998 1999 *link_up_p = link_up; 2000 return err; 2001} 2002 2003static int link_status_10g_bcm8706(struct niu *np, int *link_up_p) 2004{ 2005 int err, link_up; 2006 link_up = 0; 2007 2008 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, 2009 BCM8704_PMD_RCV_SIGDET); 2010 if (err < 0 || err == 0xffff) 2011 goto out; 2012 if (!(err & PMD_RCV_SIGDET_GLOBAL)) { 2013 err = 0; 2014 goto out; 2015 } 2016 2017 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 2018 BCM8704_PCS_10G_R_STATUS); 2019 if (err < 0) 2020 goto out; 2021 2022 if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { 2023 err = 0; 2024 goto out; 2025 } 2026 2027 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 2028 BCM8704_PHYXS_XGXS_LANE_STAT); 2029 if (err < 0) 2030 goto out; 2031 if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | 2032 PHYXS_XGXS_LANE_STAT_MAGIC | 2033 PHYXS_XGXS_LANE_STAT_PATTEST | 2034 PHYXS_XGXS_LANE_STAT_LANE3 | 2035 PHYXS_XGXS_LANE_STAT_LANE2 | 2036 PHYXS_XGXS_LANE_STAT_LANE1 | 2037 PHYXS_XGXS_LANE_STAT_LANE0)) { 2038 err = 0; 2039 np->link_config.active_speed = SPEED_INVALID; 2040 np->link_config.active_duplex = DUPLEX_INVALID; 2041 goto out; 2042 } 2043 2044 link_up = 1; 2045 np->link_config.active_speed = SPEED_10000; 2046 np->link_config.active_duplex = DUPLEX_FULL; 2047 err = 0; 2048 2049out: 2050 *link_up_p = link_up; 2051 return err; 2052} 2053 2054static int link_status_10g_bcom(struct niu *np, int *link_up_p) 2055{ 2056 int err, link_up; 2057 2058 link_up = 0; 2059 2060 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, 2061 BCM8704_PMD_RCV_SIGDET); 2062 if (err < 0) 2063 goto out; 2064 if (!(err & PMD_RCV_SIGDET_GLOBAL)) { 2065 err = 0; 2066 goto out; 2067 } 2068 2069 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 2070 BCM8704_PCS_10G_R_STATUS); 2071 if (err < 0) 2072 goto out; 2073 if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { 2074 err = 0; 2075 goto out; 2076 } 2077 2078 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 2079 BCM8704_PHYXS_XGXS_LANE_STAT); 2080 if (err < 0) 2081 goto out; 2082 2083 if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | 2084 PHYXS_XGXS_LANE_STAT_MAGIC | 2085 PHYXS_XGXS_LANE_STAT_LANE3 | 2086 PHYXS_XGXS_LANE_STAT_LANE2 | 2087 PHYXS_XGXS_LANE_STAT_LANE1 | 2088 PHYXS_XGXS_LANE_STAT_LANE0)) { 2089 err = 0; 2090 goto out; 2091 } 2092 2093 link_up = 1; 2094 np->link_config.active_speed = SPEED_10000; 2095 np->link_config.active_duplex = DUPLEX_FULL; 2096 err = 0; 2097 2098out: 2099 *link_up_p = link_up; 2100 return err; 2101} 2102 2103static int link_status_10g(struct niu *np, int *link_up_p) 2104{ 2105 unsigned long flags; 2106 int err = -EINVAL; 2107 2108 spin_lock_irqsave(&np->lock, flags); 2109 2110 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { 2111 int phy_id; 2112 2113 phy_id = phy_decode(np->parent->port_phy, np->port); 2114 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; 2115 2116 /* handle different phy types */ 2117 switch (phy_id & NIU_PHY_ID_MASK) { 2118 case NIU_PHY_ID_MRVL88X2011: 2119 err = link_status_10g_mrvl(np, link_up_p); 2120 break; 2121 2122 default: /* bcom 8704 */ 2123 err = link_status_10g_bcom(np, link_up_p); 2124 break; 2125 } 2126 } 2127 2128 spin_unlock_irqrestore(&np->lock, flags); 2129 2130 return err; 2131} 2132 2133static int niu_10g_phy_present(struct niu *np) 2134{ 2135 u64 sig, mask, val; 2136 2137 sig = nr64(ESR_INT_SIGNALS); 2138 switch (np->port) { 2139 case 0: 2140 mask = ESR_INT_SIGNALS_P0_BITS; 2141 val = (ESR_INT_SRDY0_P0 | 2142 ESR_INT_DET0_P0 | 2143 ESR_INT_XSRDY_P0 | 2144 ESR_INT_XDP_P0_CH3 | 2145 ESR_INT_XDP_P0_CH2 | 2146 ESR_INT_XDP_P0_CH1 | 2147 ESR_INT_XDP_P0_CH0); 2148 break; 2149 2150 case 1: 2151 mask = ESR_INT_SIGNALS_P1_BITS; 2152 val = (ESR_INT_SRDY0_P1 | 2153 ESR_INT_DET0_P1 | 2154 ESR_INT_XSRDY_P1 | 2155 ESR_INT_XDP_P1_CH3 | 2156 ESR_INT_XDP_P1_CH2 | 2157 ESR_INT_XDP_P1_CH1 | 2158 ESR_INT_XDP_P1_CH0); 2159 break; 2160 2161 default: 2162 return 0; 2163 } 2164 2165 if ((sig & mask) != val) 2166 return 0; 2167 return 1; 2168} 2169 2170static int link_status_10g_hotplug(struct niu *np, int *link_up_p) 2171{ 2172 unsigned long flags; 2173 int err = 0; 2174 int phy_present; 2175 int phy_present_prev; 2176 2177 spin_lock_irqsave(&np->lock, flags); 2178 2179 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { 2180 phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ? 2181 1 : 0; 2182 phy_present = niu_10g_phy_present(np); 2183 if (phy_present != phy_present_prev) { 2184 /* state change */ 2185 if (phy_present) { 2186 /* A NEM was just plugged in */ 2187 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; 2188 if (np->phy_ops->xcvr_init) 2189 err = np->phy_ops->xcvr_init(np); 2190 if (err) { 2191 err = mdio_read(np, np->phy_addr, 2192 BCM8704_PHYXS_DEV_ADDR, MII_BMCR); 2193 if (err == 0xffff) { 2194 /* No mdio, back-to-back XAUI */ 2195 goto out; 2196 } 2197 /* debounce */ 2198 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; 2199 } 2200 } else { 2201 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; 2202 *link_up_p = 0; 2203 netif_warn(np, link, np->dev, 2204 "Hotplug PHY Removed\n"); 2205 } 2206 } 2207out: 2208 if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) { 2209 err = link_status_10g_bcm8706(np, link_up_p); 2210 if (err == 0xffff) { 2211 /* No mdio, back-to-back XAUI: it is C10NEM */ 2212 *link_up_p = 1; 2213 np->link_config.active_speed = SPEED_10000; 2214 np->link_config.active_duplex = DUPLEX_FULL; 2215 } 2216 } 2217 } 2218 2219 spin_unlock_irqrestore(&np->lock, flags); 2220 2221 return 0; 2222} 2223 2224static int niu_link_status(struct niu *np, int *link_up_p) 2225{ 2226 const struct niu_phy_ops *ops = np->phy_ops; 2227 int err; 2228 2229 err = 0; 2230 if (ops->link_status) 2231 err = ops->link_status(np, link_up_p); 2232 2233 return err; 2234} 2235 2236static void niu_timer(unsigned long __opaque) 2237{ 2238 struct niu *np = (struct niu *) __opaque; 2239 unsigned long off; 2240 int err, link_up; 2241 2242 err = niu_link_status(np, &link_up); 2243 if (!err) 2244 niu_link_status_common(np, link_up); 2245 2246 if (netif_carrier_ok(np->dev)) 2247 off = 5 * HZ; 2248 else 2249 off = 1 * HZ; 2250 np->timer.expires = jiffies + off; 2251 2252 add_timer(&np->timer); 2253} 2254 2255static const struct niu_phy_ops phy_ops_10g_serdes = { 2256 .serdes_init = serdes_init_10g_serdes, 2257 .link_status = link_status_10g_serdes, 2258}; 2259 2260static const struct niu_phy_ops phy_ops_10g_serdes_niu = { 2261 .serdes_init = serdes_init_niu_10g_serdes, 2262 .link_status = link_status_10g_serdes, 2263}; 2264 2265static const struct niu_phy_ops phy_ops_1g_serdes_niu = { 2266 .serdes_init = serdes_init_niu_1g_serdes, 2267 .link_status = link_status_1g_serdes, 2268}; 2269 2270static const struct niu_phy_ops phy_ops_1g_rgmii = { 2271 .xcvr_init = xcvr_init_1g_rgmii, 2272 .link_status = link_status_1g_rgmii, 2273}; 2274 2275static const struct niu_phy_ops phy_ops_10g_fiber_niu = { 2276 .serdes_init = serdes_init_niu_10g_fiber, 2277 .xcvr_init = xcvr_init_10g, 2278 .link_status = link_status_10g, 2279}; 2280 2281static const struct niu_phy_ops phy_ops_10g_fiber = { 2282 .serdes_init = serdes_init_10g, 2283 .xcvr_init = xcvr_init_10g, 2284 .link_status = link_status_10g, 2285}; 2286 2287static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = { 2288 .serdes_init = serdes_init_10g, 2289 .xcvr_init = xcvr_init_10g_bcm8706, 2290 .link_status = link_status_10g_hotplug, 2291}; 2292 2293static const struct niu_phy_ops phy_ops_niu_10g_hotplug = { 2294 .serdes_init = serdes_init_niu_10g_fiber, 2295 .xcvr_init = xcvr_init_10g_bcm8706, 2296 .link_status = link_status_10g_hotplug, 2297}; 2298 2299static const struct niu_phy_ops phy_ops_10g_copper = { 2300 .serdes_init = serdes_init_10g, 2301 .link_status = link_status_10g, /* XXX */ 2302}; 2303 2304static const struct niu_phy_ops phy_ops_1g_fiber = { 2305 .serdes_init = serdes_init_1g, 2306 .xcvr_init = xcvr_init_1g, 2307 .link_status = link_status_1g, 2308}; 2309 2310static const struct niu_phy_ops phy_ops_1g_copper = { 2311 .xcvr_init = xcvr_init_1g, 2312 .link_status = link_status_1g, 2313}; 2314 2315struct niu_phy_template { 2316 const struct niu_phy_ops *ops; 2317 u32 phy_addr_base; 2318}; 2319 2320static const struct niu_phy_template phy_template_niu_10g_fiber = { 2321 .ops = &phy_ops_10g_fiber_niu, 2322 .phy_addr_base = 16, 2323}; 2324 2325static const struct niu_phy_template phy_template_niu_10g_serdes = { 2326 .ops = &phy_ops_10g_serdes_niu, 2327 .phy_addr_base = 0, 2328}; 2329 2330static const struct niu_phy_template phy_template_niu_1g_serdes = { 2331 .ops = &phy_ops_1g_serdes_niu, 2332 .phy_addr_base = 0, 2333}; 2334 2335static const struct niu_phy_template phy_template_10g_fiber = { 2336 .ops = &phy_ops_10g_fiber, 2337 .phy_addr_base = 8, 2338}; 2339 2340static const struct niu_phy_template phy_template_10g_fiber_hotplug = { 2341 .ops = &phy_ops_10g_fiber_hotplug, 2342 .phy_addr_base = 8, 2343}; 2344 2345static const struct niu_phy_template phy_template_niu_10g_hotplug = { 2346 .ops = &phy_ops_niu_10g_hotplug, 2347 .phy_addr_base = 8, 2348}; 2349 2350static const struct niu_phy_template phy_template_10g_copper = { 2351 .ops = &phy_ops_10g_copper, 2352 .phy_addr_base = 10, 2353}; 2354 2355static const struct niu_phy_template phy_template_1g_fiber = { 2356 .ops = &phy_ops_1g_fiber, 2357 .phy_addr_base = 0, 2358}; 2359 2360static const struct niu_phy_template phy_template_1g_copper = { 2361 .ops = &phy_ops_1g_copper, 2362 .phy_addr_base = 0, 2363}; 2364 2365static const struct niu_phy_template phy_template_1g_rgmii = { 2366 .ops = &phy_ops_1g_rgmii, 2367 .phy_addr_base = 0, 2368}; 2369 2370static const struct niu_phy_template phy_template_10g_serdes = { 2371 .ops = &phy_ops_10g_serdes, 2372 .phy_addr_base = 0, 2373}; 2374 2375static int niu_atca_port_num[4] = { 2376 0, 0, 11, 10 2377}; 2378 2379static int serdes_init_10g_serdes(struct niu *np) 2380{ 2381 struct niu_link_config *lp = &np->link_config; 2382 unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; 2383 u64 ctrl_val, test_cfg_val, sig, mask, val; 2384 u64 reset_val; 2385 2386 switch (np->port) { 2387 case 0: 2388 reset_val = ENET_SERDES_RESET_0; 2389 ctrl_reg = ENET_SERDES_0_CTRL_CFG; 2390 test_cfg_reg = ENET_SERDES_0_TEST_CFG; 2391 pll_cfg = ENET_SERDES_0_PLL_CFG; 2392 break; 2393 case 1: 2394 reset_val = ENET_SERDES_RESET_1; 2395 ctrl_reg = ENET_SERDES_1_CTRL_CFG; 2396 test_cfg_reg = ENET_SERDES_1_TEST_CFG; 2397 pll_cfg = ENET_SERDES_1_PLL_CFG; 2398 break; 2399 2400 default: 2401 return -EINVAL; 2402 } 2403 ctrl_val = (ENET_SERDES_CTRL_SDET_0 | 2404 ENET_SERDES_CTRL_SDET_1 | 2405 ENET_SERDES_CTRL_SDET_2 | 2406 ENET_SERDES_CTRL_SDET_3 | 2407 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | 2408 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | 2409 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | 2410 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | 2411 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | 2412 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | 2413 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | 2414 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); 2415 test_cfg_val = 0; 2416 2417 if (lp->loopback_mode == LOOPBACK_PHY) { 2418 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << 2419 ENET_SERDES_TEST_MD_0_SHIFT) | 2420 (ENET_TEST_MD_PAD_LOOPBACK << 2421 ENET_SERDES_TEST_MD_1_SHIFT) | 2422 (ENET_TEST_MD_PAD_LOOPBACK << 2423 ENET_SERDES_TEST_MD_2_SHIFT) | 2424 (ENET_TEST_MD_PAD_LOOPBACK << 2425 ENET_SERDES_TEST_MD_3_SHIFT)); 2426 } 2427 2428 esr_reset(np); 2429 nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2); 2430 nw64(ctrl_reg, ctrl_val); 2431 nw64(test_cfg_reg, test_cfg_val); 2432 2433 /* Initialize all 4 lanes of the SERDES. */ 2434 for (i = 0; i < 4; i++) { 2435 u32 rxtx_ctrl, glue0; 2436 int err; 2437 2438 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); 2439 if (err) 2440 return err; 2441 err = esr_read_glue0(np, i, &glue0); 2442 if (err) 2443 return err; 2444 2445 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); 2446 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | 2447 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); 2448 2449 glue0 &= ~(ESR_GLUE_CTRL0_SRATE | 2450 ESR_GLUE_CTRL0_THCNT | 2451 ESR_GLUE_CTRL0_BLTIME); 2452 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | 2453 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | 2454 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | 2455 (BLTIME_300_CYCLES << 2456 ESR_GLUE_CTRL0_BLTIME_SHIFT)); 2457 2458 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); 2459 if (err) 2460 return err; 2461 err = esr_write_glue0(np, i, glue0); 2462 if (err) 2463 return err; 2464 } 2465 2466 2467 sig = nr64(ESR_INT_SIGNALS); 2468 switch (np->port) { 2469 case 0: 2470 mask = ESR_INT_SIGNALS_P0_BITS; 2471 val = (ESR_INT_SRDY0_P0 | 2472 ESR_INT_DET0_P0 | 2473 ESR_INT_XSRDY_P0 | 2474 ESR_INT_XDP_P0_CH3 | 2475 ESR_INT_XDP_P0_CH2 | 2476 ESR_INT_XDP_P0_CH1 | 2477 ESR_INT_XDP_P0_CH0); 2478 break; 2479 2480 case 1: 2481 mask = ESR_INT_SIGNALS_P1_BITS; 2482 val = (ESR_INT_SRDY0_P1 | 2483 ESR_INT_DET0_P1 | 2484 ESR_INT_XSRDY_P1 | 2485 ESR_INT_XDP_P1_CH3 | 2486 ESR_INT_XDP_P1_CH2 | 2487 ESR_INT_XDP_P1_CH1 | 2488 ESR_INT_XDP_P1_CH0); 2489 break; 2490 2491 default: 2492 return -EINVAL; 2493 } 2494 2495 if ((sig & mask) != val) { 2496 int err; 2497 err = serdes_init_1g_serdes(np); 2498 if (!err) { 2499 np->flags &= ~NIU_FLAGS_10G; 2500 np->mac_xcvr = MAC_XCVR_PCS; 2501 } else { 2502 netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", 2503 np->port); 2504 return -ENODEV; 2505 } 2506 } 2507 2508 return 0; 2509} 2510 2511static int niu_determine_phy_disposition(struct niu *np) 2512{ 2513 struct niu_parent *parent = np->parent; 2514 u8 plat_type = parent->plat_type; 2515 const struct niu_phy_template *tp; 2516 u32 phy_addr_off = 0; 2517 2518 if (plat_type == PLAT_TYPE_NIU) { 2519 switch (np->flags & 2520 (NIU_FLAGS_10G | 2521 NIU_FLAGS_FIBER | 2522 NIU_FLAGS_XCVR_SERDES)) { 2523 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: 2524 /* 10G Serdes */ 2525 tp = &phy_template_niu_10g_serdes; 2526 break; 2527 case NIU_FLAGS_XCVR_SERDES: 2528 /* 1G Serdes */ 2529 tp = &phy_template_niu_1g_serdes; 2530 break; 2531 case NIU_FLAGS_10G | NIU_FLAGS_FIBER: 2532 /* 10G Fiber */ 2533 default: 2534 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { 2535 tp = &phy_template_niu_10g_hotplug; 2536 if (np->port == 0) 2537 phy_addr_off = 8; 2538 if (np->port == 1) 2539 phy_addr_off = 12; 2540 } else { 2541 tp = &phy_template_niu_10g_fiber; 2542 phy_addr_off += np->port; 2543 } 2544 break; 2545 } 2546 } else { 2547 switch (np->flags & 2548 (NIU_FLAGS_10G | 2549 NIU_FLAGS_FIBER | 2550 NIU_FLAGS_XCVR_SERDES)) { 2551 case 0: 2552 /* 1G copper */ 2553 tp = &phy_template_1g_copper; 2554 if (plat_type == PLAT_TYPE_VF_P0) 2555 phy_addr_off = 10; 2556 else if (plat_type == PLAT_TYPE_VF_P1) 2557 phy_addr_off = 26; 2558 2559 phy_addr_off += (np->port ^ 0x3); 2560 break; 2561 2562 case NIU_FLAGS_10G: 2563 /* 10G copper */ 2564 tp = &phy_template_10g_copper; 2565 break; 2566 2567 case NIU_FLAGS_FIBER: 2568 /* 1G fiber */ 2569 tp = &phy_template_1g_fiber; 2570 break; 2571 2572 case NIU_FLAGS_10G | NIU_FLAGS_FIBER: 2573 /* 10G fiber */ 2574 tp = &phy_template_10g_fiber; 2575 if (plat_type == PLAT_TYPE_VF_P0 || 2576 plat_type == PLAT_TYPE_VF_P1) 2577 phy_addr_off = 8; 2578 phy_addr_off += np->port; 2579 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { 2580 tp = &phy_template_10g_fiber_hotplug; 2581 if (np->port == 0) 2582 phy_addr_off = 8; 2583 if (np->port == 1) 2584 phy_addr_off = 12; 2585 } 2586 break; 2587 2588 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: 2589 case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER: 2590 case NIU_FLAGS_XCVR_SERDES: 2591 switch(np->port) { 2592 case 0: 2593 case 1: 2594 tp = &phy_template_10g_serdes; 2595 break; 2596 case 2: 2597 case 3: 2598 tp = &phy_template_1g_rgmii; 2599 break; 2600 default: 2601 return -EINVAL; 2602 break; 2603 } 2604 phy_addr_off = niu_atca_port_num[np->port]; 2605 break; 2606 2607 default: 2608 return -EINVAL; 2609 } 2610 } 2611 2612 np->phy_ops = tp->ops; 2613 np->phy_addr = tp->phy_addr_base + phy_addr_off; 2614 2615 return 0; 2616} 2617 2618static int niu_init_link(struct niu *np) 2619{ 2620 struct niu_parent *parent = np->parent; 2621 int err, ignore; 2622 2623 if (parent->plat_type == PLAT_TYPE_NIU) { 2624 err = niu_xcvr_init(np); 2625 if (err) 2626 return err; 2627 msleep(200); 2628 } 2629 err = niu_serdes_init(np); 2630 if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY)) 2631 return err; 2632 msleep(200); 2633 err = niu_xcvr_init(np); 2634 if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY)) 2635 niu_link_status(np, &ignore); 2636 return 0; 2637} 2638 2639static void niu_set_primary_mac(struct niu *np, unsigned char *addr) 2640{ 2641 u16 reg0 = addr[4] << 8 | addr[5]; 2642 u16 reg1 = addr[2] << 8 | addr[3]; 2643 u16 reg2 = addr[0] << 8 | addr[1]; 2644 2645 if (np->flags & NIU_FLAGS_XMAC) { 2646 nw64_mac(XMAC_ADDR0, reg0); 2647 nw64_mac(XMAC_ADDR1, reg1); 2648 nw64_mac(XMAC_ADDR2, reg2); 2649 } else { 2650 nw64_mac(BMAC_ADDR0, reg0); 2651 nw64_mac(BMAC_ADDR1, reg1); 2652 nw64_mac(BMAC_ADDR2, reg2); 2653 } 2654} 2655 2656static int niu_num_alt_addr(struct niu *np) 2657{ 2658 if (np->flags & NIU_FLAGS_XMAC) 2659 return XMAC_NUM_ALT_ADDR; 2660 else 2661 return BMAC_NUM_ALT_ADDR; 2662} 2663 2664static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr) 2665{ 2666 u16 reg0 = addr[4] << 8 | addr[5]; 2667 u16 reg1 = addr[2] << 8 | addr[3]; 2668 u16 reg2 = addr[0] << 8 | addr[1]; 2669 2670 if (index >= niu_num_alt_addr(np)) 2671 return -EINVAL; 2672 2673 if (np->flags & NIU_FLAGS_XMAC) { 2674 nw64_mac(XMAC_ALT_ADDR0(index), reg0); 2675 nw64_mac(XMAC_ALT_ADDR1(index), reg1); 2676 nw64_mac(XMAC_ALT_ADDR2(index), reg2); 2677 } else { 2678 nw64_mac(BMAC_ALT_ADDR0(index), reg0); 2679 nw64_mac(BMAC_ALT_ADDR1(index), reg1); 2680 nw64_mac(BMAC_ALT_ADDR2(index), reg2); 2681 } 2682 2683 return 0; 2684} 2685 2686static int niu_enable_alt_mac(struct niu *np, int index, int on) 2687{ 2688 unsigned long reg; 2689 u64 val, mask; 2690 2691 if (index >= niu_num_alt_addr(np)) 2692 return -EINVAL; 2693 2694 if (np->flags & NIU_FLAGS_XMAC) { 2695 reg = XMAC_ADDR_CMPEN; 2696 mask = 1 << index; 2697 } else { 2698 reg = BMAC_ADDR_CMPEN; 2699 mask = 1 << (index + 1); 2700 } 2701 2702 val = nr64_mac(reg); 2703 if (on) 2704 val |= mask; 2705 else 2706 val &= ~mask; 2707 nw64_mac(reg, val); 2708 2709 return 0; 2710} 2711 2712static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg, 2713 int num, int mac_pref) 2714{ 2715 u64 val = nr64_mac(reg); 2716 val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR); 2717 val |= num; 2718 if (mac_pref) 2719 val |= HOST_INFO_MPR; 2720 nw64_mac(reg, val); 2721} 2722 2723static int __set_rdc_table_num(struct niu *np, 2724 int xmac_index, int bmac_index, 2725 int rdc_table_num, int mac_pref) 2726{ 2727 unsigned long reg; 2728 2729 if (rdc_table_num & ~HOST_INFO_MACRDCTBLN) 2730 return -EINVAL; 2731 if (np->flags & NIU_FLAGS_XMAC) 2732 reg = XMAC_HOST_INFO(xmac_index); 2733 else 2734 reg = BMAC_HOST_INFO(bmac_index); 2735 __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref); 2736 return 0; 2737} 2738 2739static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num, 2740 int mac_pref) 2741{ 2742 return __set_rdc_table_num(np, 17, 0, table_num, mac_pref); 2743} 2744 2745static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num, 2746 int mac_pref) 2747{ 2748 return __set_rdc_table_num(np, 16, 8, table_num, mac_pref); 2749} 2750 2751static int niu_set_alt_mac_rdc_table(struct niu *np, int idx, 2752 int table_num, int mac_pref) 2753{ 2754 if (idx >= niu_num_alt_addr(np)) 2755 return -EINVAL; 2756 return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref); 2757} 2758 2759static u64 vlan_entry_set_parity(u64 reg_val) 2760{ 2761 u64 port01_mask; 2762 u64 port23_mask; 2763 2764 port01_mask = 0x00ff; 2765 port23_mask = 0xff00; 2766 2767 if (hweight64(reg_val & port01_mask) & 1) 2768 reg_val |= ENET_VLAN_TBL_PARITY0; 2769 else 2770 reg_val &= ~ENET_VLAN_TBL_PARITY0; 2771 2772 if (hweight64(reg_val & port23_mask) & 1) 2773 reg_val |= ENET_VLAN_TBL_PARITY1; 2774 else 2775 reg_val &= ~ENET_VLAN_TBL_PARITY1; 2776 2777 return reg_val; 2778} 2779 2780static void vlan_tbl_write(struct niu *np, unsigned long index, 2781 int port, int vpr, int rdc_table) 2782{ 2783 u64 reg_val = nr64(ENET_VLAN_TBL(index)); 2784 2785 reg_val &= ~((ENET_VLAN_TBL_VPR | 2786 ENET_VLAN_TBL_VLANRDCTBLN) << 2787 ENET_VLAN_TBL_SHIFT(port)); 2788 if (vpr) 2789 reg_val |= (ENET_VLAN_TBL_VPR << 2790 ENET_VLAN_TBL_SHIFT(port)); 2791 reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port)); 2792 2793 reg_val = vlan_entry_set_parity(reg_val); 2794 2795 nw64(ENET_VLAN_TBL(index), reg_val); 2796} 2797 2798static void vlan_tbl_clear(struct niu *np) 2799{ 2800 int i; 2801 2802 for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) 2803 nw64(ENET_VLAN_TBL(i), 0); 2804} 2805 2806static int tcam_wait_bit(struct niu *np, u64 bit) 2807{ 2808 int limit = 1000; 2809 2810 while (--limit > 0) { 2811 if (nr64(TCAM_CTL) & bit) 2812 break; 2813 udelay(1); 2814 } 2815 if (limit <= 0) 2816 return -ENODEV; 2817 2818 return 0; 2819} 2820 2821static int tcam_flush(struct niu *np, int index) 2822{ 2823 nw64(TCAM_KEY_0, 0x00); 2824 nw64(TCAM_KEY_MASK_0, 0xff); 2825 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); 2826 2827 return tcam_wait_bit(np, TCAM_CTL_STAT); 2828} 2829 2830#if 0 2831static int tcam_read(struct niu *np, int index, 2832 u64 *key, u64 *mask) 2833{ 2834 int err; 2835 2836 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index)); 2837 err = tcam_wait_bit(np, TCAM_CTL_STAT); 2838 if (!err) { 2839 key[0] = nr64(TCAM_KEY_0); 2840 key[1] = nr64(TCAM_KEY_1); 2841 key[2] = nr64(TCAM_KEY_2); 2842 key[3] = nr64(TCAM_KEY_3); 2843 mask[0] = nr64(TCAM_KEY_MASK_0); 2844 mask[1] = nr64(TCAM_KEY_MASK_1); 2845 mask[2] = nr64(TCAM_KEY_MASK_2); 2846 mask[3] = nr64(TCAM_KEY_MASK_3); 2847 } 2848 return err; 2849} 2850#endif 2851 2852static int tcam_write(struct niu *np, int index, 2853 u64 *key, u64 *mask) 2854{ 2855 nw64(TCAM_KEY_0, key[0]); 2856 nw64(TCAM_KEY_1, key[1]); 2857 nw64(TCAM_KEY_2, key[2]); 2858 nw64(TCAM_KEY_3, key[3]); 2859 nw64(TCAM_KEY_MASK_0, mask[0]); 2860 nw64(TCAM_KEY_MASK_1, mask[1]); 2861 nw64(TCAM_KEY_MASK_2, mask[2]); 2862 nw64(TCAM_KEY_MASK_3, mask[3]); 2863 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); 2864 2865 return tcam_wait_bit(np, TCAM_CTL_STAT); 2866} 2867 2868#if 0 2869static int tcam_assoc_read(struct niu *np, int index, u64 *data) 2870{ 2871 int err; 2872 2873 nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index)); 2874 err = tcam_wait_bit(np, TCAM_CTL_STAT); 2875 if (!err) 2876 *data = nr64(TCAM_KEY_1); 2877 2878 return err; 2879} 2880#endif 2881 2882static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data) 2883{ 2884 nw64(TCAM_KEY_1, assoc_data); 2885 nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index)); 2886 2887 return tcam_wait_bit(np, TCAM_CTL_STAT); 2888} 2889 2890static void tcam_enable(struct niu *np, int on) 2891{ 2892 u64 val = nr64(FFLP_CFG_1); 2893 2894 if (on) 2895 val &= ~FFLP_CFG_1_TCAM_DIS; 2896 else 2897 val |= FFLP_CFG_1_TCAM_DIS; 2898 nw64(FFLP_CFG_1, val); 2899} 2900 2901static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio) 2902{ 2903 u64 val = nr64(FFLP_CFG_1); 2904 2905 val &= ~(FFLP_CFG_1_FFLPINITDONE | 2906 FFLP_CFG_1_CAMLAT | 2907 FFLP_CFG_1_CAMRATIO); 2908 val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT); 2909 val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT); 2910 nw64(FFLP_CFG_1, val); 2911 2912 val = nr64(FFLP_CFG_1); 2913 val |= FFLP_CFG_1_FFLPINITDONE; 2914 nw64(FFLP_CFG_1, val); 2915} 2916 2917static int tcam_user_eth_class_enable(struct niu *np, unsigned long class, 2918 int on) 2919{ 2920 unsigned long reg; 2921 u64 val; 2922 2923 if (class < CLASS_CODE_ETHERTYPE1 || 2924 class > CLASS_CODE_ETHERTYPE2) 2925 return -EINVAL; 2926 2927 reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); 2928 val = nr64(reg); 2929 if (on) 2930 val |= L2_CLS_VLD; 2931 else 2932 val &= ~L2_CLS_VLD; 2933 nw64(reg, val); 2934 2935 return 0; 2936} 2937 2938#if 0 2939static int tcam_user_eth_class_set(struct niu *np, unsigned long class, 2940 u64 ether_type) 2941{ 2942 unsigned long reg; 2943 u64 val; 2944 2945 if (class < CLASS_CODE_ETHERTYPE1 || 2946 class > CLASS_CODE_ETHERTYPE2 || 2947 (ether_type & ~(u64)0xffff) != 0) 2948 return -EINVAL; 2949 2950 reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); 2951 val = nr64(reg); 2952 val &= ~L2_CLS_ETYPE; 2953 val |= (ether_type << L2_CLS_ETYPE_SHIFT); 2954 nw64(reg, val); 2955 2956 return 0; 2957} 2958#endif 2959 2960static int tcam_user_ip_class_enable(struct niu *np, unsigned long class, 2961 int on) 2962{ 2963 unsigned long reg; 2964 u64 val; 2965 2966 if (class < CLASS_CODE_USER_PROG1 || 2967 class > CLASS_CODE_USER_PROG4) 2968 return -EINVAL; 2969 2970 reg = L3_CLS(class - CLASS_CODE_USER_PROG1); 2971 val = nr64(reg); 2972 if (on) 2973 val |= L3_CLS_VALID; 2974 else 2975 val &= ~L3_CLS_VALID; 2976 nw64(reg, val); 2977 2978 return 0; 2979} 2980 2981static int tcam_user_ip_class_set(struct niu *np, unsigned long class, 2982 int ipv6, u64 protocol_id, 2983 u64 tos_mask, u64 tos_val) 2984{ 2985 unsigned long reg; 2986 u64 val; 2987 2988 if (class < CLASS_CODE_USER_PROG1 || 2989 class > CLASS_CODE_USER_PROG4 || 2990 (protocol_id & ~(u64)0xff) != 0 || 2991 (tos_mask & ~(u64)0xff) != 0 || 2992 (tos_val & ~(u64)0xff) != 0) 2993 return -EINVAL; 2994 2995 reg = L3_CLS(class - CLASS_CODE_USER_PROG1); 2996 val = nr64(reg); 2997 val &= ~(L3_CLS_IPVER | L3_CLS_PID | 2998 L3_CLS_TOSMASK | L3_CLS_TOS); 2999 if (ipv6) 3000 val |= L3_CLS_IPVER; 3001 val |= (protocol_id << L3_CLS_PID_SHIFT); 3002 val |= (tos_mask << L3_CLS_TOSMASK_SHIFT); 3003 val |= (tos_val << L3_CLS_TOS_SHIFT); 3004 nw64(reg, val); 3005 3006 return 0; 3007} 3008 3009static int tcam_early_init(struct niu *np) 3010{ 3011 unsigned long i; 3012 int err; 3013 3014 tcam_enable(np, 0); 3015 tcam_set_lat_and_ratio(np, 3016 DEFAULT_TCAM_LATENCY, 3017 DEFAULT_TCAM_ACCESS_RATIO); 3018 for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) { 3019 err = tcam_user_eth_class_enable(np, i, 0); 3020 if (err) 3021 return err; 3022 } 3023 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) { 3024 err = tcam_user_ip_class_enable(np, i, 0); 3025 if (err) 3026 return err; 3027 } 3028 3029 return 0; 3030} 3031 3032static int tcam_flush_all(struct niu *np) 3033{ 3034 unsigned long i; 3035 3036 for (i = 0; i < np->parent->tcam_num_entries; i++) { 3037 int err = tcam_flush(np, i); 3038 if (err) 3039 return err; 3040 } 3041 return 0; 3042} 3043 3044static u64 hash_addr_regval(unsigned long index, unsigned long num_entries) 3045{ 3046 return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0); 3047} 3048 3049#if 0 3050static int hash_read(struct niu *np, unsigned long partition, 3051 unsigned long index, unsigned long num_entries, 3052 u64 *data) 3053{ 3054 u64 val = hash_addr_regval(index, num_entries); 3055 unsigned long i; 3056 3057 if (partition >= FCRAM_NUM_PARTITIONS || 3058 index + num_entries > FCRAM_SIZE) 3059 return -EINVAL; 3060 3061 nw64(HASH_TBL_ADDR(partition), val); 3062 for (i = 0; i < num_entries; i++) 3063 data[i] = nr64(HASH_TBL_DATA(partition)); 3064 3065 return 0; 3066} 3067#endif 3068 3069static int hash_write(struct niu *np, unsigned long partition, 3070 unsigned long index, unsigned long num_entries, 3071 u64 *data) 3072{ 3073 u64 val = hash_addr_regval(index, num_entries); 3074 unsigned long i; 3075 3076 if (partition >= FCRAM_NUM_PARTITIONS || 3077 index + (num_entries * 8) > FCRAM_SIZE) 3078 return -EINVAL; 3079 3080 nw64(HASH_TBL_ADDR(partition), val); 3081 for (i = 0; i < num_entries; i++) 3082 nw64(HASH_TBL_DATA(partition), data[i]); 3083 3084 return 0; 3085} 3086 3087static void fflp_reset(struct niu *np) 3088{ 3089 u64 val; 3090 3091 nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST); 3092 udelay(10); 3093 nw64(FFLP_CFG_1, 0); 3094 3095 val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE; 3096 nw64(FFLP_CFG_1, val); 3097} 3098 3099static void fflp_set_timings(struct niu *np) 3100{ 3101 u64 val = nr64(FFLP_CFG_1); 3102 3103 val &= ~FFLP_CFG_1_FFLPINITDONE; 3104 val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT); 3105 nw64(FFLP_CFG_1, val); 3106 3107 val = nr64(FFLP_CFG_1); 3108 val |= FFLP_CFG_1_FFLPINITDONE; 3109 nw64(FFLP_CFG_1, val); 3110 3111 val = nr64(FCRAM_REF_TMR); 3112 val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN); 3113 val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT); 3114 val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT); 3115 nw64(FCRAM_REF_TMR, val); 3116} 3117 3118static int fflp_set_partition(struct niu *np, u64 partition, 3119 u64 mask, u64 base, int enable) 3120{ 3121 unsigned long reg; 3122 u64 val; 3123 3124 if (partition >= FCRAM_NUM_PARTITIONS || 3125 (mask & ~(u64)0x1f) != 0 || 3126 (base & ~(u64)0x1f) != 0) 3127 return -EINVAL; 3128 3129 reg = FLW_PRT_SEL(partition); 3130 3131 val = nr64(reg); 3132 val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE); 3133 val |= (mask << FLW_PRT_SEL_MASK_SHIFT); 3134 val |= (base << FLW_PRT_SEL_BASE_SHIFT); 3135 if (enable) 3136 val |= FLW_PRT_SEL_EXT; 3137 nw64(reg, val); 3138 3139 return 0; 3140} 3141 3142static int fflp_disable_all_partitions(struct niu *np) 3143{ 3144 unsigned long i; 3145 3146 for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) { 3147 int err = fflp_set_partition(np, 0, 0, 0, 0); 3148 if (err) 3149 return err; 3150 } 3151 return 0; 3152} 3153 3154static void fflp_llcsnap_enable(struct niu *np, int on) 3155{ 3156 u64 val = nr64(FFLP_CFG_1); 3157 3158 if (on) 3159 val |= FFLP_CFG_1_LLCSNAP; 3160 else 3161 val &= ~FFLP_CFG_1_LLCSNAP; 3162 nw64(FFLP_CFG_1, val); 3163} 3164 3165static void fflp_errors_enable(struct niu *np, int on) 3166{ 3167 u64 val = nr64(FFLP_CFG_1); 3168 3169 if (on) 3170 val &= ~FFLP_CFG_1_ERRORDIS; 3171 else 3172 val |= FFLP_CFG_1_ERRORDIS; 3173 nw64(FFLP_CFG_1, val); 3174} 3175 3176static int fflp_hash_clear(struct niu *np) 3177{ 3178 struct fcram_hash_ipv4 ent; 3179 unsigned long i; 3180 3181 /* IPV4 hash entry with valid bit clear, rest is don't care. */ 3182 memset(&ent, 0, sizeof(ent)); 3183 ent.header = HASH_HEADER_EXT; 3184 3185 for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) { 3186 int err = hash_write(np, 0, i, 1, (u64 *) &ent); 3187 if (err) 3188 return err; 3189 } 3190 return 0; 3191} 3192 3193static int fflp_early_init(struct niu *np) 3194{ 3195 struct niu_parent *parent; 3196 unsigned long flags; 3197 int err; 3198 3199 niu_lock_parent(np, flags); 3200 3201 parent = np->parent; 3202 err = 0; 3203 if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) { 3204 if (np->parent->plat_type != PLAT_TYPE_NIU) { 3205 fflp_reset(np); 3206 fflp_set_timings(np); 3207 err = fflp_disable_all_partitions(np); 3208 if (err) { 3209 netif_printk(np, probe, KERN_DEBUG, np->dev, 3210 "fflp_disable_all_partitions failed, err=%d\n", 3211 err); 3212 goto out; 3213 } 3214 } 3215 3216 err = tcam_early_init(np); 3217 if (err) { 3218 netif_printk(np, probe, KERN_DEBUG, np->dev, 3219 "tcam_early_init failed, err=%d\n", err); 3220 goto out; 3221 } 3222 fflp_llcsnap_enable(np, 1); 3223 fflp_errors_enable(np, 0); 3224 nw64(H1POLY, 0); 3225 nw64(H2POLY, 0); 3226 3227 err = tcam_flush_all(np); 3228 if (err) { 3229 netif_printk(np, probe, KERN_DEBUG, np->dev, 3230 "tcam_flush_all failed, err=%d\n", err); 3231 goto out; 3232 } 3233 if (np->parent->plat_type != PLAT_TYPE_NIU) { 3234 err = fflp_hash_clear(np); 3235 if (err) { 3236 netif_printk(np, probe, KERN_DEBUG, np->dev, 3237 "fflp_hash_clear failed, err=%d\n", 3238 err); 3239 goto out; 3240 } 3241 } 3242 3243 vlan_tbl_clear(np); 3244 3245 parent->flags |= PARENT_FLGS_CLS_HWINIT; 3246 } 3247out: 3248 niu_unlock_parent(np, flags); 3249 return err; 3250} 3251 3252static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key) 3253{ 3254 if (class_code < CLASS_CODE_USER_PROG1 || 3255 class_code > CLASS_CODE_SCTP_IPV6) 3256 return -EINVAL; 3257 3258 nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key); 3259 return 0; 3260} 3261 3262static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key) 3263{ 3264 if (class_code < CLASS_CODE_USER_PROG1 || 3265 class_code > CLASS_CODE_SCTP_IPV6) 3266 return -EINVAL; 3267 3268 nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key); 3269 return 0; 3270} 3271 3272/* Entries for the ports are interleaved in the TCAM */ 3273static u16 tcam_get_index(struct niu *np, u16 idx) 3274{ 3275 /* One entry reserved for IP fragment rule */ 3276 if (idx >= (np->clas.tcam_sz - 1)) 3277 idx = 0; 3278 return np->clas.tcam_top + ((idx+1) * np->parent->num_ports); 3279} 3280 3281static u16 tcam_get_size(struct niu *np) 3282{ 3283 /* One entry reserved for IP fragment rule */ 3284 return np->clas.tcam_sz - 1; 3285} 3286 3287static u16 tcam_get_valid_entry_cnt(struct niu *np) 3288{ 3289 /* One entry reserved for IP fragment rule */ 3290 return np->clas.tcam_valid_entries - 1; 3291} 3292 3293static void niu_rx_skb_append(struct sk_buff *skb, struct page *page, 3294 u32 offset, u32 size) 3295{ 3296 int i = skb_shinfo(skb)->nr_frags; 3297 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3298 3299 frag->page = page; 3300 frag->page_offset = offset; 3301 frag->size = size; 3302 3303 skb->len += size; 3304 skb->data_len += size; 3305 skb->truesize += size; 3306 3307 skb_shinfo(skb)->nr_frags = i + 1; 3308} 3309 3310static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a) 3311{ 3312 a >>= PAGE_SHIFT; 3313 a ^= (a >> ilog2(MAX_RBR_RING_SIZE)); 3314 3315 return a & (MAX_RBR_RING_SIZE - 1); 3316} 3317 3318static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr, 3319 struct page ***link) 3320{ 3321 unsigned int h = niu_hash_rxaddr(rp, addr); 3322 struct page *p, **pp; 3323 3324 addr &= PAGE_MASK; 3325 pp = &rp->rxhash[h]; 3326 for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) { 3327 if (p->index == addr) { 3328 *link = pp; 3329 goto found; 3330 } 3331 } 3332 BUG(); 3333 3334found: 3335 return p; 3336} 3337 3338static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base) 3339{ 3340 unsigned int h = niu_hash_rxaddr(rp, base); 3341 3342 page->index = base; 3343 page->mapping = (struct address_space *) rp->rxhash[h]; 3344 rp->rxhash[h] = page; 3345} 3346 3347static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp, 3348 gfp_t mask, int start_index) 3349{ 3350 struct page *page; 3351 u64 addr; 3352 int i; 3353 3354 page = alloc_page(mask); 3355 if (!page) 3356 return -ENOMEM; 3357 3358 addr = np->ops->map_page(np->device, page, 0, 3359 PAGE_SIZE, DMA_FROM_DEVICE); 3360 3361 niu_hash_page(rp, page, addr); 3362 if (rp->rbr_blocks_per_page > 1) 3363 atomic_add(rp->rbr_blocks_per_page - 1, 3364 &compound_head(page)->_count); 3365 3366 for (i = 0; i < rp->rbr_blocks_per_page; i++) { 3367 __le32 *rbr = &rp->rbr[start_index + i]; 3368 3369 *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT); 3370 addr += rp->rbr_block_size; 3371 } 3372 3373 return 0; 3374} 3375 3376static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) 3377{ 3378 int index = rp->rbr_index; 3379 3380 rp->rbr_pending++; 3381 if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) { 3382 int err = niu_rbr_add_page(np, rp, mask, index); 3383 3384 if (unlikely(err)) { 3385 rp->rbr_pending--; 3386 return; 3387 } 3388 3389 rp->rbr_index += rp->rbr_blocks_per_page; 3390 BUG_ON(rp->rbr_index > rp->rbr_table_size); 3391 if (rp->rbr_index == rp->rbr_table_size) 3392 rp->rbr_index = 0; 3393 3394 if (rp->rbr_pending >= rp->rbr_kick_thresh) { 3395 nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending); 3396 rp->rbr_pending = 0; 3397 } 3398 } 3399} 3400 3401static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp) 3402{ 3403 unsigned int index = rp->rcr_index; 3404 int num_rcr = 0; 3405 3406 rp->rx_dropped++; 3407 while (1) { 3408 struct page *page, **link; 3409 u64 addr, val; 3410 u32 rcr_size; 3411 3412 num_rcr++; 3413 3414 val = le64_to_cpup(&rp->rcr[index]); 3415 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << 3416 RCR_ENTRY_PKT_BUF_ADDR_SHIFT; 3417 page = niu_find_rxpage(rp, addr, &link); 3418 3419 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> 3420 RCR_ENTRY_PKTBUFSZ_SHIFT]; 3421 if ((page->index + PAGE_SIZE) - rcr_size == addr) { 3422 *link = (struct page *) page->mapping; 3423 np->ops->unmap_page(np->device, page->index, 3424 PAGE_SIZE, DMA_FROM_DEVICE); 3425 page->index = 0; 3426 page->mapping = NULL; 3427 __free_page(page); 3428 rp->rbr_refill_pending++; 3429 } 3430 3431 index = NEXT_RCR(rp, index); 3432 if (!(val & RCR_ENTRY_MULTI)) 3433 break; 3434 3435 } 3436 rp->rcr_index = index; 3437 3438 return num_rcr; 3439} 3440 3441static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, 3442 struct rx_ring_info *rp) 3443{ 3444 unsigned int index = rp->rcr_index; 3445 struct rx_pkt_hdr1 *rh; 3446 struct sk_buff *skb; 3447 int len, num_rcr; 3448 3449 skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE); 3450 if (unlikely(!skb)) 3451 return niu_rx_pkt_ignore(np, rp); 3452 3453 num_rcr = 0; 3454 while (1) { 3455 struct page *page, **link; 3456 u32 rcr_size, append_size; 3457 u64 addr, val, off; 3458 3459 num_rcr++; 3460 3461 val = le64_to_cpup(&rp->rcr[index]); 3462 3463 len = (val & RCR_ENTRY_L2_LEN) >> 3464 RCR_ENTRY_L2_LEN_SHIFT; 3465 len -= ETH_FCS_LEN; 3466 3467 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << 3468 RCR_ENTRY_PKT_BUF_ADDR_SHIFT; 3469 page = niu_find_rxpage(rp, addr, &link); 3470 3471 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> 3472 RCR_ENTRY_PKTBUFSZ_SHIFT]; 3473 3474 off = addr & ~PAGE_MASK; 3475 append_size = rcr_size; 3476 if (num_rcr == 1) { 3477 int ptype; 3478 3479 ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT); 3480 if ((ptype == RCR_PKT_TYPE_TCP || 3481 ptype == RCR_PKT_TYPE_UDP) && 3482 !(val & (RCR_ENTRY_NOPORT | 3483 RCR_ENTRY_ERROR))) 3484 skb->ip_summed = CHECKSUM_UNNECESSARY; 3485 else 3486 skb_checksum_none_assert(skb); 3487 } else if (!(val & RCR_ENTRY_MULTI)) 3488 append_size = len - skb->len; 3489 3490 niu_rx_skb_append(skb, page, off, append_size); 3491 if ((page->index + rp->rbr_block_size) - rcr_size == addr) { 3492 *link = (struct page *) page->mapping; 3493 np->ops->unmap_page(np->device, page->index, 3494 PAGE_SIZE, DMA_FROM_DEVICE); 3495 page->index = 0; 3496 page->mapping = NULL; 3497 rp->rbr_refill_pending++; 3498 } else 3499 get_page(page); 3500 3501 index = NEXT_RCR(rp, index); 3502 if (!(val & RCR_ENTRY_MULTI)) 3503 break; 3504 3505 } 3506 rp->rcr_index = index; 3507 3508 len += sizeof(*rh); 3509 len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN); 3510 __pskb_pull_tail(skb, len); 3511 3512 rh = (struct rx_pkt_hdr1 *) skb->data; 3513 if (np->dev->features & NETIF_F_RXHASH) 3514 skb->rxhash = ((u32)rh->hashval2_0 << 24 | 3515 (u32)rh->hashval2_1 << 16 | 3516 (u32)rh->hashval1_1 << 8 | 3517 (u32)rh->hashval1_2 << 0); 3518 skb_pull(skb, sizeof(*rh)); 3519 3520 rp->rx_packets++; 3521 rp->rx_bytes += skb->len; 3522 3523 skb->protocol = eth_type_trans(skb, np->dev); 3524 skb_record_rx_queue(skb, rp->rx_channel); 3525 napi_gro_receive(napi, skb); 3526 3527 return num_rcr; 3528} 3529 3530static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) 3531{ 3532 int blocks_per_page = rp->rbr_blocks_per_page; 3533 int err, index = rp->rbr_index; 3534 3535 err = 0; 3536 while (index < (rp->rbr_table_size - blocks_per_page)) { 3537 err = niu_rbr_add_page(np, rp, mask, index); 3538 if (err) 3539 break; 3540 3541 index += blocks_per_page; 3542 } 3543 3544 rp->rbr_index = index; 3545 return err; 3546} 3547 3548static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp) 3549{ 3550 int i; 3551 3552 for (i = 0; i < MAX_RBR_RING_SIZE; i++) { 3553 struct page *page; 3554 3555 page = rp->rxhash[i]; 3556 while (page) { 3557 struct page *next = (struct page *) page->mapping; 3558 u64 base = page->index; 3559 3560 np->ops->unmap_page(np->device, base, PAGE_SIZE, 3561 DMA_FROM_DEVICE); 3562 page->index = 0; 3563 page->mapping = NULL; 3564 3565 __free_page(page); 3566 3567 page = next; 3568 } 3569 } 3570 3571 for (i = 0; i < rp->rbr_table_size; i++) 3572 rp->rbr[i] = cpu_to_le32(0); 3573 rp->rbr_index = 0; 3574} 3575 3576static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) 3577{ 3578 struct tx_buff_info *tb = &rp->tx_buffs[idx]; 3579 struct sk_buff *skb = tb->skb; 3580 struct tx_pkt_hdr *tp; 3581 u64 tx_flags; 3582 int i, len; 3583 3584 tp = (struct tx_pkt_hdr *) skb->data; 3585 tx_flags = le64_to_cpup(&tp->flags); 3586 3587 rp->tx_packets++; 3588 rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) - 3589 ((tx_flags & TXHDR_PAD) / 2)); 3590 3591 len = skb_headlen(skb); 3592 np->ops->unmap_single(np->device, tb->mapping, 3593 len, DMA_TO_DEVICE); 3594 3595 if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK) 3596 rp->mark_pending--; 3597 3598 tb->skb = NULL; 3599 do { 3600 idx = NEXT_TX(rp, idx); 3601 len -= MAX_TX_DESC_LEN; 3602 } while (len > 0); 3603 3604 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3605 tb = &rp->tx_buffs[idx]; 3606 BUG_ON(tb->skb != NULL); 3607 np->ops->unmap_page(np->device, tb->mapping, 3608 skb_shinfo(skb)->frags[i].size, 3609 DMA_TO_DEVICE); 3610 idx = NEXT_TX(rp, idx); 3611 } 3612 3613 dev_kfree_skb(skb); 3614 3615 return idx; 3616} 3617 3618#define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4) 3619 3620static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) 3621{ 3622 struct netdev_queue *txq; 3623 u16 pkt_cnt, tmp; 3624 int cons, index; 3625 u64 cs; 3626 3627 index = (rp - np->tx_rings); 3628 txq = netdev_get_tx_queue(np->dev, index); 3629 3630 cs = rp->tx_cs; 3631 if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK)))) 3632 goto out; 3633 3634 tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT; 3635 pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) & 3636 (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT); 3637 3638 rp->last_pkt_cnt = tmp; 3639 3640 cons = rp->cons; 3641 3642 netif_printk(np, tx_done, KERN_DEBUG, np->dev, 3643 "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); 3644 3645 while (pkt_cnt--) 3646 cons = release_tx_packet(np, rp, cons); 3647 3648 rp->cons = cons; 3649 smp_mb(); 3650 3651out: 3652 if (unlikely(netif_tx_queue_stopped(txq) && 3653 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { 3654 __netif_tx_lock(txq, smp_processor_id()); 3655 if (netif_tx_queue_stopped(txq) && 3656 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) 3657 netif_tx_wake_queue(txq); 3658 __netif_tx_unlock(txq); 3659 } 3660} 3661 3662static inline void niu_sync_rx_discard_stats(struct niu *np, 3663 struct rx_ring_info *rp, 3664 const int limit) 3665{ 3666 /* This elaborate scheme is needed for reading the RX discard 3667 * counters, as they are only 16-bit and can overflow quickly, 3668 * and because the overflow indication bit is not usable as 3669 * the counter value does not wrap, but remains at max value 3670 * 0xFFFF. 3671 * 3672 * In theory and in practice counters can be lost in between 3673 * reading nr64() and clearing the counter nw64(). For this 3674 * reason, the number of counter clearings nw64() is 3675 * limited/reduced though the limit parameter. 3676 */ 3677 int rx_channel = rp->rx_channel; 3678 u32 misc, wred; 3679 3680 /* RXMISC (Receive Miscellaneous Discard Count), covers the 3681 * following discard events: IPP (Input Port Process), 3682 * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive 3683 * Block Ring) prefetch buffer is empty. 3684 */ 3685 misc = nr64(RXMISC(rx_channel)); 3686 if (unlikely((misc & RXMISC_COUNT) > limit)) { 3687 nw64(RXMISC(rx_channel), 0); 3688 rp->rx_errors += misc & RXMISC_COUNT; 3689 3690 if (unlikely(misc & RXMISC_OFLOW)) 3691 dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n", 3692 rx_channel); 3693 3694 netif_printk(np, rx_err, KERN_DEBUG, np->dev, 3695 "rx-%d: MISC drop=%u over=%u\n", 3696 rx_channel, misc, misc-limit); 3697 } 3698 3699 /* WRED (Weighted Random Early Discard) by hardware */ 3700 wred = nr64(RED_DIS_CNT(rx_channel)); 3701 if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) { 3702 nw64(RED_DIS_CNT(rx_channel), 0); 3703 rp->rx_dropped += wred & RED_DIS_CNT_COUNT; 3704 3705 if (unlikely(wred & RED_DIS_CNT_OFLOW)) 3706 dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel); 3707 3708 netif_printk(np, rx_err, KERN_DEBUG, np->dev, 3709 "rx-%d: WRED drop=%u over=%u\n", 3710 rx_channel, wred, wred-limit); 3711 } 3712} 3713 3714static int niu_rx_work(struct napi_struct *napi, struct niu *np, 3715 struct rx_ring_info *rp, int budget) 3716{ 3717 int qlen, rcr_done = 0, work_done = 0; 3718 struct rxdma_mailbox *mbox = rp->mbox; 3719 u64 stat; 3720 3721#if 1 3722 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); 3723 qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN; 3724#else 3725 stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); 3726 qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN); 3727#endif 3728 mbox->rx_dma_ctl_stat = 0; 3729 mbox->rcrstat_a = 0; 3730 3731 netif_printk(np, rx_status, KERN_DEBUG, np->dev, 3732 "%s(chan[%d]), stat[%llx] qlen=%d\n", 3733 __func__, rp->rx_channel, (unsigned long long)stat, qlen); 3734 3735 rcr_done = work_done = 0; 3736 qlen = min(qlen, budget); 3737 while (work_done < qlen) { 3738 rcr_done += niu_process_rx_pkt(napi, np, rp); 3739 work_done++; 3740 } 3741 3742 if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) { 3743 unsigned int i; 3744 3745 for (i = 0; i < rp->rbr_refill_pending; i++) 3746 niu_rbr_refill(np, rp, GFP_ATOMIC); 3747 rp->rbr_refill_pending = 0; 3748 } 3749 3750 stat = (RX_DMA_CTL_STAT_MEX | 3751 ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) | 3752 ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT)); 3753 3754 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat); 3755 3756 /* Only sync discards stats when qlen indicate potential for drops */ 3757 if (qlen > 10) 3758 niu_sync_rx_discard_stats(np, rp, 0x7FFF); 3759 3760 return work_done; 3761} 3762 3763static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget) 3764{ 3765 u64 v0 = lp->v0; 3766 u32 tx_vec = (v0 >> 32); 3767 u32 rx_vec = (v0 & 0xffffffff); 3768 int i, work_done = 0; 3769 3770 netif_printk(np, intr, KERN_DEBUG, np->dev, 3771 "%s() v0[%016llx]\n", __func__, (unsigned long long)v0); 3772 3773 for (i = 0; i < np->num_tx_rings; i++) { 3774 struct tx_ring_info *rp = &np->tx_rings[i]; 3775 if (tx_vec & (1 << rp->tx_channel)) 3776 niu_tx_work(np, rp); 3777 nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0); 3778 } 3779 3780 for (i = 0; i < np->num_rx_rings; i++) { 3781 struct rx_ring_info *rp = &np->rx_rings[i]; 3782 3783 if (rx_vec & (1 << rp->rx_channel)) { 3784 int this_work_done; 3785 3786 this_work_done = niu_rx_work(&lp->napi, np, rp, 3787 budget); 3788 3789 budget -= this_work_done; 3790 work_done += this_work_done; 3791 } 3792 nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0); 3793 } 3794 3795 return work_done; 3796} 3797 3798static int niu_poll(struct napi_struct *napi, int budget) 3799{ 3800 struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi); 3801 struct niu *np = lp->np; 3802 int work_done; 3803 3804 work_done = niu_poll_core(np, lp, budget); 3805 3806 if (work_done < budget) { 3807 napi_complete(napi); 3808 niu_ldg_rearm(np, lp, 1); 3809 } 3810 return work_done; 3811} 3812 3813static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp, 3814 u64 stat) 3815{ 3816 netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel); 3817 3818 if (stat & RX_DMA_CTL_STAT_RBR_TMOUT) 3819 pr_cont("RBR_TMOUT "); 3820 if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR) 3821 pr_cont("RSP_CNT "); 3822 if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS) 3823 pr_cont("BYTE_EN_BUS "); 3824 if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR) 3825 pr_cont("RSP_DAT "); 3826 if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR) 3827 pr_cont("RCR_ACK "); 3828 if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR) 3829 pr_cont("RCR_SHA_PAR "); 3830 if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR) 3831 pr_cont("RBR_PRE_PAR "); 3832 if (stat & RX_DMA_CTL_STAT_CONFIG_ERR) 3833 pr_cont("CONFIG "); 3834 if (stat & RX_DMA_CTL_STAT_RCRINCON) 3835 pr_cont("RCRINCON "); 3836 if (stat & RX_DMA_CTL_STAT_RCRFULL) 3837 pr_cont("RCRFULL "); 3838 if (stat & RX_DMA_CTL_STAT_RBRFULL) 3839 pr_cont("RBRFULL "); 3840 if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE) 3841 pr_cont("RBRLOGPAGE "); 3842 if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE) 3843 pr_cont("CFIGLOGPAGE "); 3844 if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR) 3845 pr_cont("DC_FIDO "); 3846 3847 pr_cont(")\n"); 3848} 3849 3850static int niu_rx_error(struct niu *np, struct rx_ring_info *rp) 3851{ 3852 u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); 3853 int err = 0; 3854 3855 3856 if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL | 3857 RX_DMA_CTL_STAT_PORT_FATAL)) 3858 err = -EINVAL; 3859 3860 if (err) { 3861 netdev_err(np->dev, "RX channel %u error, stat[%llx]\n", 3862 rp->rx_channel, 3863 (unsigned long long) stat); 3864 3865 niu_log_rxchan_errors(np, rp, stat); 3866 } 3867 3868 nw64(RX_DMA_CTL_STAT(rp->rx_channel), 3869 stat & RX_DMA_CTL_WRITE_CLEAR_ERRS); 3870 3871 return err; 3872} 3873 3874static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp, 3875 u64 cs) 3876{ 3877 netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel); 3878 3879 if (cs & TX_CS_MBOX_ERR) 3880 pr_cont("MBOX "); 3881 if (cs & TX_CS_PKT_SIZE_ERR) 3882 pr_cont("PKT_SIZE "); 3883 if (cs & TX_CS_TX_RING_OFLOW) 3884 pr_cont("TX_RING_OFLOW "); 3885 if (cs & TX_CS_PREF_BUF_PAR_ERR) 3886 pr_cont("PREF_BUF_PAR "); 3887 if (cs & TX_CS_NACK_PREF) 3888 pr_cont("NACK_PREF "); 3889 if (cs & TX_CS_NACK_PKT_RD) 3890 pr_cont("NACK_PKT_RD "); 3891 if (cs & TX_CS_CONF_PART_ERR) 3892 pr_cont("CONF_PART "); 3893 if (cs & TX_CS_PKT_PRT_ERR) 3894 pr_cont("PKT_PTR "); 3895 3896 pr_cont(")\n"); 3897} 3898 3899static int niu_tx_error(struct niu *np, struct tx_ring_info *rp) 3900{ 3901 u64 cs, logh, logl; 3902 3903 cs = nr64(TX_CS(rp->tx_channel)); 3904 logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel)); 3905 logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel)); 3906 3907 netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n", 3908 rp->tx_channel, 3909 (unsigned long long)cs, 3910 (unsigned long long)logh, 3911 (unsigned long long)logl); 3912 3913 niu_log_txchan_errors(np, rp, cs); 3914 3915 return -ENODEV; 3916} 3917 3918static int niu_mif_interrupt(struct niu *np) 3919{ 3920 u64 mif_status = nr64(MIF_STATUS); 3921 int phy_mdint = 0; 3922 3923 if (np->flags & NIU_FLAGS_XMAC) { 3924 u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS); 3925 3926 if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT) 3927 phy_mdint = 1; 3928 } 3929 3930 netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n", 3931 (unsigned long long)mif_status, phy_mdint); 3932 3933 return -ENODEV; 3934} 3935 3936static void niu_xmac_interrupt(struct niu *np) 3937{ 3938 struct niu_xmac_stats *mp = &np->mac_stats.xmac; 3939 u64 val; 3940 3941 val = nr64_mac(XTXMAC_STATUS); 3942 if (val & XTXMAC_STATUS_FRAME_CNT_EXP) 3943 mp->tx_frames += TXMAC_FRM_CNT_COUNT; 3944 if (val & XTXMAC_STATUS_BYTE_CNT_EXP) 3945 mp->tx_bytes += TXMAC_BYTE_CNT_COUNT; 3946 if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR) 3947 mp->tx_fifo_errors++; 3948 if (val & XTXMAC_STATUS_TXMAC_OFLOW) 3949 mp->tx_overflow_errors++; 3950 if (val & XTXMAC_STATUS_MAX_PSIZE_ERR) 3951 mp->tx_max_pkt_size_errors++; 3952 if (val & XTXMAC_STATUS_TXMAC_UFLOW) 3953 mp->tx_underflow_errors++; 3954 3955 val = nr64_mac(XRXMAC_STATUS); 3956 if (val & XRXMAC_STATUS_LCL_FLT_STATUS) 3957 mp->rx_local_faults++; 3958 if (val & XRXMAC_STATUS_RFLT_DET) 3959 mp->rx_remote_faults++; 3960 if (val & XRXMAC_STATUS_LFLT_CNT_EXP) 3961 mp->rx_link_faults += LINK_FAULT_CNT_COUNT; 3962 if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP) 3963 mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT; 3964 if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP) 3965 mp->rx_frags += RXMAC_FRAG_CNT_COUNT; 3966 if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP) 3967 mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT; 3968 if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) 3969 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; 3970 if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) 3971 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; 3972 if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP) 3973 mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT; 3974 if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP) 3975 mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT; 3976 if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP) 3977 mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT; 3978 if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP) 3979 mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT; 3980 if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP) 3981 mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT; 3982 if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP) 3983 mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT; 3984 if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP) 3985 mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT; 3986 if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP) 3987 mp->rx_octets += RXMAC_BT_CNT_COUNT; 3988 if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP) 3989 mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT; 3990 if (val & XRXMAC_STATUS_LENERR_CNT_EXP) 3991 mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT; 3992 if (val & XRXMAC_STATUS_CRCERR_CNT_EXP) 3993 mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT; 3994 if (val & XRXMAC_STATUS_RXUFLOW) 3995 mp->rx_underflows++; 3996 if (val & XRXMAC_STATUS_RXOFLOW) 3997 mp->rx_overflows++; 3998 3999 val = nr64_mac(XMAC_FC_STAT); 4000 if (val & XMAC_FC_STAT_TX_MAC_NPAUSE) 4001 mp->pause_off_state++; 4002 if (val & XMAC_FC_STAT_TX_MAC_PAUSE) 4003 mp->pause_on_state++; 4004 if (val & XMAC_FC_STAT_RX_MAC_RPAUSE) 4005 mp->pause_received++; 4006} 4007 4008static void niu_bmac_interrupt(struct niu *np) 4009{ 4010 struct niu_bmac_stats *mp = &np->mac_stats.bmac; 4011 u64 val; 4012 4013 val = nr64_mac(BTXMAC_STATUS); 4014 if (val & BTXMAC_STATUS_UNDERRUN) 4015 mp->tx_underflow_errors++; 4016 if (val & BTXMAC_STATUS_MAX_PKT_ERR) 4017 mp->tx_max_pkt_size_errors++; 4018 if (val & BTXMAC_STATUS_BYTE_CNT_EXP) 4019 mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT; 4020 if (val & BTXMAC_STATUS_FRAME_CNT_EXP) 4021 mp->tx_frames += BTXMAC_FRM_CNT_COUNT; 4022 4023 val = nr64_mac(BRXMAC_STATUS); 4024 if (val & BRXMAC_STATUS_OVERFLOW) 4025 mp->rx_overflows++; 4026 if (val & BRXMAC_STATUS_FRAME_CNT_EXP) 4027 mp->rx_frames += BRXMAC_FRAME_CNT_COUNT; 4028 if (val & BRXMAC_STATUS_ALIGN_ERR_EXP) 4029 mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; 4030 if (val & BRXMAC_STATUS_CRC_ERR_EXP) 4031 mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; 4032 if (val & BRXMAC_STATUS_LEN_ERR_EXP) 4033 mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT; 4034 4035 val = nr64_mac(BMAC_CTRL_STATUS); 4036 if (val & BMAC_CTRL_STATUS_NOPAUSE) 4037 mp->pause_off_state++; 4038 if (val & BMAC_CTRL_STATUS_PAUSE) 4039 mp->pause_on_state++; 4040 if (val & BMAC_CTRL_STATUS_PAUSE_RECV) 4041 mp->pause_received++; 4042} 4043 4044static int niu_mac_interrupt(struct niu *np) 4045{ 4046 if (np->flags & NIU_FLAGS_XMAC) 4047 niu_xmac_interrupt(np); 4048 else 4049 niu_bmac_interrupt(np); 4050 4051 return 0; 4052} 4053 4054static void niu_log_device_error(struct niu *np, u64 stat) 4055{ 4056 netdev_err(np->dev, "Core device errors ( "); 4057 4058 if (stat & SYS_ERR_MASK_META2) 4059 pr_cont("META2 "); 4060 if (stat & SYS_ERR_MASK_META1) 4061 pr_cont("META1 "); 4062 if (stat & SYS_ERR_MASK_PEU) 4063 pr_cont("PEU "); 4064 if (stat & SYS_ERR_MASK_TXC) 4065 pr_cont("TXC "); 4066 if (stat & SYS_ERR_MASK_RDMC) 4067 pr_cont("RDMC "); 4068 if (stat & SYS_ERR_MASK_TDMC) 4069 pr_cont("TDMC "); 4070 if (stat & SYS_ERR_MASK_ZCP) 4071 pr_cont("ZCP "); 4072 if (stat & SYS_ERR_MASK_FFLP) 4073 pr_cont("FFLP "); 4074 if (stat & SYS_ERR_MASK_IPP) 4075 pr_cont("IPP "); 4076 if (stat & SYS_ERR_MASK_MAC) 4077 pr_cont("MAC "); 4078 if (stat & SYS_ERR_MASK_SMX) 4079 pr_cont("SMX "); 4080 4081 pr_cont(")\n"); 4082} 4083 4084static int niu_device_error(struct niu *np) 4085{ 4086 u64 stat = nr64(SYS_ERR_STAT); 4087 4088 netdev_err(np->dev, "Core device error, stat[%llx]\n", 4089 (unsigned long long)stat); 4090 4091 niu_log_device_error(np, stat); 4092 4093 return -ENODEV; 4094} 4095 4096static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp, 4097 u64 v0, u64 v1, u64 v2) 4098{ 4099 4100 int i, err = 0; 4101 4102 lp->v0 = v0; 4103 lp->v1 = v1; 4104 lp->v2 = v2; 4105 4106 if (v1 & 0x00000000ffffffffULL) { 4107 u32 rx_vec = (v1 & 0xffffffff); 4108 4109 for (i = 0; i < np->num_rx_rings; i++) { 4110 struct rx_ring_info *rp = &np->rx_rings[i]; 4111 4112 if (rx_vec & (1 << rp->rx_channel)) { 4113 int r = niu_rx_error(np, rp); 4114 if (r) { 4115 err = r; 4116 } else { 4117 if (!v0) 4118 nw64(RX_DMA_CTL_STAT(rp->rx_channel), 4119 RX_DMA_CTL_STAT_MEX); 4120 } 4121 } 4122 } 4123 } 4124 if (v1 & 0x7fffffff00000000ULL) { 4125 u32 tx_vec = (v1 >> 32) & 0x7fffffff; 4126 4127 for (i = 0; i < np->num_tx_rings; i++) { 4128 struct tx_ring_info *rp = &np->tx_rings[i]; 4129 4130 if (tx_vec & (1 << rp->tx_channel)) { 4131 int r = niu_tx_error(np, rp); 4132 if (r) 4133 err = r; 4134 } 4135 } 4136 } 4137 if ((v0 | v1) & 0x8000000000000000ULL) { 4138 int r = niu_mif_interrupt(np); 4139 if (r) 4140 err = r; 4141 } 4142 if (v2) { 4143 if (v2 & 0x01ef) { 4144 int r = niu_mac_interrupt(np); 4145 if (r) 4146 err = r; 4147 } 4148 if (v2 & 0x0210) { 4149 int r = niu_device_error(np); 4150 if (r) 4151 err = r; 4152 } 4153 } 4154 4155 if (err) 4156 niu_enable_interrupts(np, 0); 4157 4158 return err; 4159} 4160 4161static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp, 4162 int ldn) 4163{ 4164 struct rxdma_mailbox *mbox = rp->mbox; 4165 u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); 4166 4167 stat_write = (RX_DMA_CTL_STAT_RCRTHRES | 4168 RX_DMA_CTL_STAT_RCRTO); 4169 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write); 4170 4171 netif_printk(np, intr, KERN_DEBUG, np->dev, 4172 "%s() stat[%llx]\n", __func__, (unsigned long long)stat); 4173} 4174 4175static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp, 4176 int ldn) 4177{ 4178 rp->tx_cs = nr64(TX_CS(rp->tx_channel)); 4179 4180 netif_printk(np, intr, KERN_DEBUG, np->dev, 4181 "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs); 4182} 4183 4184static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0) 4185{ 4186 struct niu_parent *parent = np->parent; 4187 u32 rx_vec, tx_vec; 4188 int i; 4189 4190 tx_vec = (v0 >> 32); 4191 rx_vec = (v0 & 0xffffffff); 4192 4193 for (i = 0; i < np->num_rx_rings; i++) { 4194 struct rx_ring_info *rp = &np->rx_rings[i]; 4195 int ldn = LDN_RXDMA(rp->rx_channel); 4196 4197 if (parent->ldg_map[ldn] != ldg) 4198 continue; 4199 4200 nw64(LD_IM0(ldn), LD_IM0_MASK); 4201 if (rx_vec & (1 << rp->rx_channel)) 4202 niu_rxchan_intr(np, rp, ldn); 4203 } 4204 4205 for (i = 0; i < np->num_tx_rings; i++) { 4206 struct tx_ring_info *rp = &np->tx_rings[i]; 4207 int ldn = LDN_TXDMA(rp->tx_channel); 4208 4209 if (parent->ldg_map[ldn] != ldg) 4210 continue; 4211 4212 nw64(LD_IM0(ldn), LD_IM0_MASK); 4213 if (tx_vec & (1 << rp->tx_channel)) 4214 niu_txchan_intr(np, rp, ldn); 4215 } 4216} 4217 4218static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp, 4219 u64 v0, u64 v1, u64 v2) 4220{ 4221 if (likely(napi_schedule_prep(&lp->napi))) { 4222 lp->v0 = v0; 4223 lp->v1 = v1; 4224 lp->v2 = v2; 4225 __niu_fastpath_interrupt(np, lp->ldg_num, v0); 4226 __napi_schedule(&lp->napi); 4227 } 4228} 4229 4230static irqreturn_t niu_interrupt(int irq, void *dev_id) 4231{ 4232 struct niu_ldg *lp = dev_id; 4233 struct niu *np = lp->np; 4234 int ldg = lp->ldg_num; 4235 unsigned long flags; 4236 u64 v0, v1, v2; 4237 4238 if (netif_msg_intr(np)) 4239 printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)", 4240 __func__, lp, ldg); 4241 4242 spin_lock_irqsave(&np->lock, flags); 4243 4244 v0 = nr64(LDSV0(ldg)); 4245 v1 = nr64(LDSV1(ldg)); 4246 v2 = nr64(LDSV2(ldg)); 4247 4248 if (netif_msg_intr(np)) 4249 pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n", 4250 (unsigned long long) v0, 4251 (unsigned long long) v1, 4252 (unsigned long long) v2); 4253 4254 if (unlikely(!v0 && !v1 && !v2)) { 4255 spin_unlock_irqrestore(&np->lock, flags); 4256 return IRQ_NONE; 4257 } 4258 4259 if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) { 4260 int err = niu_slowpath_interrupt(np, lp, v0, v1, v2); 4261 if (err) 4262 goto out; 4263 } 4264 if (likely(v0 & ~((u64)1 << LDN_MIF))) 4265 niu_schedule_napi(np, lp, v0, v1, v2); 4266 else 4267 niu_ldg_rearm(np, lp, 1); 4268out: 4269 spin_unlock_irqrestore(&np->lock, flags); 4270 4271 return IRQ_HANDLED; 4272} 4273 4274static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp) 4275{ 4276 if (rp->mbox) { 4277 np->ops->free_coherent(np->device, 4278 sizeof(struct rxdma_mailbox), 4279 rp->mbox, rp->mbox_dma); 4280 rp->mbox = NULL; 4281 } 4282 if (rp->rcr) { 4283 np->ops->free_coherent(np->device, 4284 MAX_RCR_RING_SIZE * sizeof(__le64), 4285 rp->rcr, rp->rcr_dma); 4286 rp->rcr = NULL; 4287 rp->rcr_table_size = 0; 4288 rp->rcr_index = 0; 4289 } 4290 if (rp->rbr) { 4291 niu_rbr_free(np, rp); 4292 4293 np->ops->free_coherent(np->device, 4294 MAX_RBR_RING_SIZE * sizeof(__le32), 4295 rp->rbr, rp->rbr_dma); 4296 rp->rbr = NULL; 4297 rp->rbr_table_size = 0; 4298 rp->rbr_index = 0; 4299 } 4300 kfree(rp->rxhash); 4301 rp->rxhash = NULL; 4302} 4303 4304static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp) 4305{ 4306 if (rp->mbox) { 4307 np->ops->free_coherent(np->device, 4308 sizeof(struct txdma_mailbox), 4309 rp->mbox, rp->mbox_dma); 4310 rp->mbox = NULL; 4311 } 4312 if (rp->descr) { 4313 int i; 4314 4315 for (i = 0; i < MAX_TX_RING_SIZE; i++) { 4316 if (rp->tx_buffs[i].skb) 4317 (void) release_tx_packet(np, rp, i); 4318 } 4319 4320 np->ops->free_coherent(np->device, 4321 MAX_TX_RING_SIZE * sizeof(__le64), 4322 rp->descr, rp->descr_dma); 4323 rp->descr = NULL; 4324 rp->pending = 0; 4325 rp->prod = 0; 4326 rp->cons = 0; 4327 rp->wrap_bit = 0; 4328 } 4329} 4330 4331static void niu_free_channels(struct niu *np) 4332{ 4333 int i; 4334 4335 if (np->rx_rings) { 4336 for (i = 0; i < np->num_rx_rings; i++) { 4337 struct rx_ring_info *rp = &np->rx_rings[i]; 4338 4339 niu_free_rx_ring_info(np, rp); 4340 } 4341 kfree(np->rx_rings); 4342 np->rx_rings = NULL; 4343 np->num_rx_rings = 0; 4344 } 4345 4346 if (np->tx_rings) { 4347 for (i = 0; i < np->num_tx_rings; i++) { 4348 struct tx_ring_info *rp = &np->tx_rings[i]; 4349 4350 niu_free_tx_ring_info(np, rp); 4351 } 4352 kfree(np->tx_rings); 4353 np->tx_rings = NULL; 4354 np->num_tx_rings = 0; 4355 } 4356} 4357 4358static int niu_alloc_rx_ring_info(struct niu *np, 4359 struct rx_ring_info *rp) 4360{ 4361 BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64); 4362 4363 rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *), 4364 GFP_KERNEL); 4365 if (!rp->rxhash) 4366 return -ENOMEM; 4367 4368 rp->mbox = np->ops->alloc_coherent(np->device, 4369 sizeof(struct rxdma_mailbox), 4370 &rp->mbox_dma, GFP_KERNEL); 4371 if (!rp->mbox) 4372 return -ENOMEM; 4373 if ((unsigned long)rp->mbox & (64UL - 1)) { 4374 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n", 4375 rp->mbox); 4376 return -EINVAL; 4377 } 4378 4379 rp->rcr = np->ops->alloc_coherent(np->device, 4380 MAX_RCR_RING_SIZE * sizeof(__le64), 4381 &rp->rcr_dma, GFP_KERNEL); 4382 if (!rp->rcr) 4383 return -ENOMEM; 4384 if ((unsigned long)rp->rcr & (64UL - 1)) { 4385 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n", 4386 rp->rcr); 4387 return -EINVAL; 4388 } 4389 rp->rcr_table_size = MAX_RCR_RING_SIZE; 4390 rp->rcr_index = 0; 4391 4392 rp->rbr = np->ops->alloc_coherent(np->device, 4393 MAX_RBR_RING_SIZE * sizeof(__le32), 4394 &rp->rbr_dma, GFP_KERNEL); 4395 if (!rp->rbr) 4396 return -ENOMEM; 4397 if ((unsigned long)rp->rbr & (64UL - 1)) { 4398 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n", 4399 rp->rbr); 4400 return -EINVAL; 4401 } 4402 rp->rbr_table_size = MAX_RBR_RING_SIZE; 4403 rp->rbr_index = 0; 4404 rp->rbr_pending = 0; 4405 4406 return 0; 4407} 4408 4409static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp) 4410{ 4411 int mtu = np->dev->mtu; 4412 4413 /* These values are recommended by the HW designers for fair 4414 * utilization of DRR amongst the rings. 4415 */ 4416 rp->max_burst = mtu + 32; 4417 if (rp->max_burst > 4096) 4418 rp->max_burst = 4096; 4419} 4420 4421static int niu_alloc_tx_ring_info(struct niu *np, 4422 struct tx_ring_info *rp) 4423{ 4424 BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64); 4425 4426 rp->mbox = np->ops->alloc_coherent(np->device, 4427 sizeof(struct txdma_mailbox), 4428 &rp->mbox_dma, GFP_KERNEL); 4429 if (!rp->mbox) 4430 return -ENOMEM; 4431 if ((unsigned long)rp->mbox & (64UL - 1)) { 4432 netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n", 4433 rp->mbox); 4434 return -EINVAL; 4435 } 4436 4437 rp->descr = np->ops->alloc_coherent(np->device, 4438 MAX_TX_RING_SIZE * sizeof(__le64), 4439 &rp->descr_dma, GFP_KERNEL); 4440 if (!rp->descr) 4441 return -ENOMEM; 4442 if ((unsigned long)rp->descr & (64UL - 1)) { 4443 netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n", 4444 rp->descr); 4445 return -EINVAL; 4446 } 4447 4448 rp->pending = MAX_TX_RING_SIZE; 4449 rp->prod = 0; 4450 rp->cons = 0; 4451 rp->wrap_bit = 0; 4452 4453 /* XXX make these configurable... XXX */ 4454 rp->mark_freq = rp->pending / 4; 4455 4456 niu_set_max_burst(np, rp); 4457 4458 return 0; 4459} 4460 4461static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp) 4462{ 4463 u16 bss; 4464 4465 bss = min(PAGE_SHIFT, 15); 4466 4467 rp->rbr_block_size = 1 << bss; 4468 rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss); 4469 4470 rp->rbr_sizes[0] = 256; 4471 rp->rbr_sizes[1] = 1024; 4472 if (np->dev->mtu > ETH_DATA_LEN) { 4473 switch (PAGE_SIZE) { 4474 case 4 * 1024: 4475 rp->rbr_sizes[2] = 4096; 4476 break; 4477 4478 default: 4479 rp->rbr_sizes[2] = 8192; 4480 break; 4481 } 4482 } else { 4483 rp->rbr_sizes[2] = 2048; 4484 } 4485 rp->rbr_sizes[3] = rp->rbr_block_size; 4486} 4487 4488static int niu_alloc_channels(struct niu *np) 4489{ 4490 struct niu_parent *parent = np->parent; 4491 int first_rx_channel, first_tx_channel; 4492 int num_rx_rings, num_tx_rings; 4493 struct rx_ring_info *rx_rings; 4494 struct tx_ring_info *tx_rings; 4495 int i, port, err; 4496 4497 port = np->port; 4498 first_rx_channel = first_tx_channel = 0; 4499 for (i = 0; i < port; i++) { 4500 first_rx_channel += parent->rxchan_per_port[i]; 4501 first_tx_channel += parent->txchan_per_port[i]; 4502 } 4503 4504 num_rx_rings = parent->rxchan_per_port[port]; 4505 num_tx_rings = parent->txchan_per_port[port]; 4506 4507 rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info), 4508 GFP_KERNEL); 4509 err = -ENOMEM; 4510 if (!rx_rings) 4511 goto out_err; 4512 4513 np->num_rx_rings = num_rx_rings; 4514 smp_wmb(); 4515 np->rx_rings = rx_rings; 4516 4517 netif_set_real_num_rx_queues(np->dev, num_rx_rings); 4518 4519 for (i = 0; i < np->num_rx_rings; i++) { 4520 struct rx_ring_info *rp = &np->rx_rings[i]; 4521 4522 rp->np = np; 4523 rp->rx_channel = first_rx_channel + i; 4524 4525 err = niu_alloc_rx_ring_info(np, rp); 4526 if (err) 4527 goto out_err; 4528 4529 niu_size_rbr(np, rp); 4530 4531 /* XXX better defaults, configurable, etc... XXX */ 4532 rp->nonsyn_window = 64; 4533 rp->nonsyn_threshold = rp->rcr_table_size - 64; 4534 rp->syn_window = 64; 4535 rp->syn_threshold = rp->rcr_table_size - 64; 4536 rp->rcr_pkt_threshold = 16; 4537 rp->rcr_timeout = 8; 4538 rp->rbr_kick_thresh = RBR_REFILL_MIN; 4539 if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page) 4540 rp->rbr_kick_thresh = rp->rbr_blocks_per_page; 4541 4542 err = niu_rbr_fill(np, rp, GFP_KERNEL); 4543 if (err) 4544 return err; 4545 } 4546 4547 tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info), 4548 GFP_KERNEL); 4549 err = -ENOMEM; 4550 if (!tx_rings) 4551 goto out_err; 4552 4553 np->num_tx_rings = num_tx_rings; 4554 smp_wmb(); 4555 np->tx_rings = tx_rings; 4556 4557 netif_set_real_num_tx_queues(np->dev, num_tx_rings); 4558 4559 for (i = 0; i < np->num_tx_rings; i++) { 4560 struct tx_ring_info *rp = &np->tx_rings[i]; 4561 4562 rp->np = np; 4563 rp->tx_channel = first_tx_channel + i; 4564 4565 err = niu_alloc_tx_ring_info(np, rp); 4566 if (err) 4567 goto out_err; 4568 } 4569 4570 return 0; 4571 4572out_err: 4573 niu_free_channels(np); 4574 return err; 4575} 4576 4577static int niu_tx_cs_sng_poll(struct niu *np, int channel) 4578{ 4579 int limit = 1000; 4580 4581 while (--limit > 0) { 4582 u64 val = nr64(TX_CS(channel)); 4583 if (val & TX_CS_SNG_STATE) 4584 return 0; 4585 } 4586 return -ENODEV; 4587} 4588 4589static int niu_tx_channel_stop(struct niu *np, int channel) 4590{ 4591 u64 val = nr64(TX_CS(channel)); 4592 4593 val |= TX_CS_STOP_N_GO; 4594 nw64(TX_CS(channel), val); 4595 4596 return niu_tx_cs_sng_poll(np, channel); 4597} 4598 4599static int niu_tx_cs_reset_poll(struct niu *np, int channel) 4600{ 4601 int limit = 1000; 4602 4603 while (--limit > 0) { 4604 u64 val = nr64(TX_CS(channel)); 4605 if (!(val & TX_CS_RST)) 4606 return 0; 4607 } 4608 return -ENODEV; 4609} 4610 4611static int niu_tx_channel_reset(struct niu *np, int channel) 4612{ 4613 u64 val = nr64(TX_CS(channel)); 4614 int err; 4615 4616 val |= TX_CS_RST; 4617 nw64(TX_CS(channel), val); 4618 4619 err = niu_tx_cs_reset_poll(np, channel); 4620 if (!err) 4621 nw64(TX_RING_KICK(channel), 0); 4622 4623 return err; 4624} 4625 4626static int niu_tx_channel_lpage_init(struct niu *np, int channel) 4627{ 4628 u64 val; 4629 4630 nw64(TX_LOG_MASK1(channel), 0); 4631 nw64(TX_LOG_VAL1(channel), 0); 4632 nw64(TX_LOG_MASK2(channel), 0); 4633 nw64(TX_LOG_VAL2(channel), 0); 4634 nw64(TX_LOG_PAGE_RELO1(channel), 0); 4635 nw64(TX_LOG_PAGE_RELO2(channel), 0); 4636 nw64(TX_LOG_PAGE_HDL(channel), 0); 4637 4638 val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT; 4639 val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1); 4640 nw64(TX_LOG_PAGE_VLD(channel), val); 4641 4642 /* XXX TXDMA 32bit mode? XXX */ 4643 4644 return 0; 4645} 4646 4647static void niu_txc_enable_port(struct niu *np, int on) 4648{ 4649 unsigned long flags; 4650 u64 val, mask; 4651 4652 niu_lock_parent(np, flags); 4653 val = nr64(TXC_CONTROL); 4654 mask = (u64)1 << np->port; 4655 if (on) { 4656 val |= TXC_CONTROL_ENABLE | mask; 4657 } else { 4658 val &= ~mask; 4659 if ((val & ~TXC_CONTROL_ENABLE) == 0) 4660 val &= ~TXC_CONTROL_ENABLE; 4661 } 4662 nw64(TXC_CONTROL, val); 4663 niu_unlock_parent(np, flags); 4664} 4665 4666static void niu_txc_set_imask(struct niu *np, u64 imask) 4667{ 4668 unsigned long flags; 4669 u64 val; 4670 4671 niu_lock_parent(np, flags); 4672 val = nr64(TXC_INT_MASK); 4673 val &= ~TXC_INT_MASK_VAL(np->port); 4674 val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port)); 4675 niu_unlock_parent(np, flags); 4676} 4677 4678static void niu_txc_port_dma_enable(struct niu *np, int on) 4679{ 4680 u64 val = 0; 4681 4682 if (on) { 4683 int i; 4684 4685 for (i = 0; i < np->num_tx_rings; i++) 4686 val |= (1 << np->tx_rings[i].tx_channel); 4687 } 4688 nw64(TXC_PORT_DMA(np->port), val); 4689} 4690 4691static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp) 4692{ 4693 int err, channel = rp->tx_channel; 4694 u64 val, ring_len; 4695 4696 err = niu_tx_channel_stop(np, channel); 4697 if (err) 4698 return err; 4699 4700 err = niu_tx_channel_reset(np, channel); 4701 if (err) 4702 return err; 4703 4704 err = niu_tx_channel_lpage_init(np, channel); 4705 if (err) 4706 return err; 4707 4708 nw64(TXC_DMA_MAX(channel), rp->max_burst); 4709 nw64(TX_ENT_MSK(channel), 0); 4710 4711 if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE | 4712 TX_RNG_CFIG_STADDR)) { 4713 netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n", 4714 channel, (unsigned long long)rp->descr_dma); 4715 return -EINVAL; 4716 } 4717 4718 /* The length field in TX_RNG_CFIG is measured in 64-byte 4719 * blocks. rp->pending is the number of TX descriptors in 4720 * our ring, 8 bytes each, thus we divide by 8 bytes more 4721 * to get the proper value the chip wants. 4722 */ 4723 ring_len = (rp->pending / 8); 4724 4725 val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) | 4726 rp->descr_dma); 4727 nw64(TX_RNG_CFIG(channel), val); 4728 4729 if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) || 4730 ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) { 4731 netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n", 4732 channel, (unsigned long long)rp->mbox_dma); 4733 return -EINVAL; 4734 } 4735 nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32); 4736 nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR); 4737 4738 nw64(TX_CS(channel), 0); 4739 4740 rp->last_pkt_cnt = 0; 4741 4742 return 0; 4743} 4744 4745static void niu_init_rdc_groups(struct niu *np) 4746{ 4747 struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port]; 4748 int i, first_table_num = tp->first_table_num; 4749 4750 for (i = 0; i < tp->num_tables; i++) { 4751 struct rdc_table *tbl = &tp->tables[i]; 4752 int this_table = first_table_num + i; 4753 int slot; 4754 4755 for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) 4756 nw64(RDC_TBL(this_table, slot), 4757 tbl->rxdma_channel[slot]); 4758 } 4759 4760 nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]); 4761} 4762 4763static void niu_init_drr_weight(struct niu *np) 4764{ 4765 int type = phy_decode(np->parent->port_phy, np->port); 4766 u64 val; 4767 4768 switch (type) { 4769 case PORT_TYPE_10G: 4770 val = PT_DRR_WEIGHT_DEFAULT_10G; 4771 break; 4772 4773 case PORT_TYPE_1G: 4774 default: 4775 val = PT_DRR_WEIGHT_DEFAULT_1G; 4776 break; 4777 } 4778 nw64(PT_DRR_WT(np->port), val); 4779} 4780 4781static int niu_init_hostinfo(struct niu *np) 4782{ 4783 struct niu_parent *parent = np->parent; 4784 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; 4785 int i, err, num_alt = niu_num_alt_addr(np); 4786 int first_rdc_table = tp->first_table_num; 4787 4788 err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); 4789 if (err) 4790 return err; 4791 4792 err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); 4793 if (err) 4794 return err; 4795 4796 for (i = 0; i < num_alt; i++) { 4797 err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1); 4798 if (err) 4799 return err; 4800 } 4801 4802 return 0; 4803} 4804 4805static int niu_rx_channel_reset(struct niu *np, int channel) 4806{ 4807 return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel), 4808 RXDMA_CFIG1_RST, 1000, 10, 4809 "RXDMA_CFIG1"); 4810} 4811 4812static int niu_rx_channel_lpage_init(struct niu *np, int channel) 4813{ 4814 u64 val; 4815 4816 nw64(RX_LOG_MASK1(channel), 0); 4817 nw64(RX_LOG_VAL1(channel), 0); 4818 nw64(RX_LOG_MASK2(channel), 0); 4819 nw64(RX_LOG_VAL2(channel), 0); 4820 nw64(RX_LOG_PAGE_RELO1(channel), 0); 4821 nw64(RX_LOG_PAGE_RELO2(channel), 0); 4822 nw64(RX_LOG_PAGE_HDL(channel), 0); 4823 4824 val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT; 4825 val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1); 4826 nw64(RX_LOG_PAGE_VLD(channel), val); 4827 4828 return 0; 4829} 4830 4831static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp) 4832{ 4833 u64 val; 4834 4835 val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) | 4836 ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) | 4837 ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) | 4838 ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT)); 4839 nw64(RDC_RED_PARA(rp->rx_channel), val); 4840} 4841 4842static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret) 4843{ 4844 u64 val = 0; 4845 4846 *ret = 0; 4847 switch (rp->rbr_block_size) { 4848 case 4 * 1024: 4849 val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT); 4850 break; 4851 case 8 * 1024: 4852 val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT); 4853 break; 4854 case 16 * 1024: 4855 val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT); 4856 break; 4857 case 32 * 1024: 4858 val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT); 4859 break; 4860 default: 4861 return -EINVAL; 4862 } 4863 val |= RBR_CFIG_B_VLD2; 4864 switch (rp->rbr_sizes[2]) { 4865 case 2 * 1024: 4866 val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT); 4867 break; 4868 case 4 * 1024: 4869 val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT); 4870 break; 4871 case 8 * 1024: 4872 val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT); 4873 break; 4874 case 16 * 1024: 4875 val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT); 4876 break; 4877 4878 default: 4879 return -EINVAL; 4880 } 4881 val |= RBR_CFIG_B_VLD1; 4882 switch (rp->rbr_sizes[1]) { 4883 case 1 * 1024: 4884 val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT); 4885 break; 4886 case 2 * 1024: 4887 val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT); 4888 break; 4889 case 4 * 1024: 4890 val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT); 4891 break; 4892 case 8 * 1024: 4893 val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT); 4894 break; 4895 4896 default: 4897 return -EINVAL; 4898 } 4899 val |= RBR_CFIG_B_VLD0; 4900 switch (rp->rbr_sizes[0]) { 4901 case 256: 4902 val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT); 4903 break; 4904 case 512: 4905 val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT); 4906 break; 4907 case 1 * 1024: 4908 val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT); 4909 break; 4910 case 2 * 1024: 4911 val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT); 4912 break; 4913 4914 default: 4915 return -EINVAL; 4916 } 4917 4918 *ret = val; 4919 return 0; 4920} 4921 4922static int niu_enable_rx_channel(struct niu *np, int channel, int on) 4923{ 4924 u64 val = nr64(RXDMA_CFIG1(channel)); 4925 int limit; 4926 4927 if (on) 4928 val |= RXDMA_CFIG1_EN; 4929 else 4930 val &= ~RXDMA_CFIG1_EN; 4931 nw64(RXDMA_CFIG1(channel), val); 4932 4933 limit = 1000; 4934 while (--limit > 0) { 4935 if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST) 4936 break; 4937 udelay(10); 4938 } 4939 if (limit <= 0) 4940 return -ENODEV; 4941 return 0; 4942} 4943 4944static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp) 4945{ 4946 int err, channel = rp->rx_channel; 4947 u64 val; 4948 4949 err = niu_rx_channel_reset(np, channel); 4950 if (err) 4951 return err; 4952 4953 err = niu_rx_channel_lpage_init(np, channel); 4954 if (err) 4955 return err; 4956 4957 niu_rx_channel_wred_init(np, rp); 4958 4959 nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY); 4960 nw64(RX_DMA_CTL_STAT(channel), 4961 (RX_DMA_CTL_STAT_MEX | 4962 RX_DMA_CTL_STAT_RCRTHRES | 4963 RX_DMA_CTL_STAT_RCRTO | 4964 RX_DMA_CTL_STAT_RBR_EMPTY)); 4965 nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32); 4966 nw64(RXDMA_CFIG2(channel), 4967 ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) | 4968 RXDMA_CFIG2_FULL_HDR)); 4969 nw64(RBR_CFIG_A(channel), 4970 ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) | 4971 (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR))); 4972 err = niu_compute_rbr_cfig_b(rp, &val); 4973 if (err) 4974 return err; 4975 nw64(RBR_CFIG_B(channel), val); 4976 nw64(RCRCFIG_A(channel), 4977 ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) | 4978 (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR))); 4979 nw64(RCRCFIG_B(channel), 4980 ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) | 4981 RCRCFIG_B_ENTOUT | 4982 ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT)); 4983 4984 err = niu_enable_rx_channel(np, channel, 1); 4985 if (err) 4986 return err; 4987 4988 nw64(RBR_KICK(channel), rp->rbr_index); 4989 4990 val = nr64(RX_DMA_CTL_STAT(channel)); 4991 val |= RX_DMA_CTL_STAT_RBR_EMPTY; 4992 nw64(RX_DMA_CTL_STAT(channel), val); 4993 4994 return 0; 4995} 4996 4997static int niu_init_rx_channels(struct niu *np) 4998{ 4999 unsigned long flags; 5000 u64 seed = jiffies_64; 5001 int err, i; 5002 5003 niu_lock_parent(np, flags); 5004 nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider); 5005 nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL)); 5006 niu_unlock_parent(np, flags); 5007 5008 /* XXX RXDMA 32bit mode? XXX */ 5009 5010 niu_init_rdc_groups(np); 5011 niu_init_drr_weight(np); 5012 5013 err = niu_init_hostinfo(np); 5014 if (err) 5015 return err; 5016 5017 for (i = 0; i < np->num_rx_rings; i++) { 5018 struct rx_ring_info *rp = &np->rx_rings[i]; 5019 5020 err = niu_init_one_rx_channel(np, rp); 5021 if (err) 5022 return err; 5023 } 5024 5025 return 0; 5026} 5027 5028static int niu_set_ip_frag_rule(struct niu *np) 5029{ 5030 struct niu_parent *parent = np->parent; 5031 struct niu_classifier *cp = &np->clas; 5032 struct niu_tcam_entry *tp; 5033 int index, err; 5034 5035 index = cp->tcam_top; 5036 tp = &parent->tcam[index]; 5037 5038 /* Note that the noport bit is the same in both ipv4 and 5039 * ipv6 format TCAM entries. 5040 */ 5041 memset(tp, 0, sizeof(*tp)); 5042 tp->key[1] = TCAM_V4KEY1_NOPORT; 5043 tp->key_mask[1] = TCAM_V4KEY1_NOPORT; 5044 tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | 5045 ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT)); 5046 err = tcam_write(np, index, tp->key, tp->key_mask); 5047 if (err) 5048 return err; 5049 err = tcam_assoc_write(np, index, tp->assoc_data); 5050 if (err) 5051 return err; 5052 tp->valid = 1; 5053 cp->tcam_valid_entries++; 5054 5055 return 0; 5056} 5057 5058static int niu_init_classifier_hw(struct niu *np) 5059{ 5060 struct niu_parent *parent = np->parent; 5061 struct niu_classifier *cp = &np->clas; 5062 int i, err; 5063 5064 nw64(H1POLY, cp->h1_init); 5065 nw64(H2POLY, cp->h2_init); 5066 5067 err = niu_init_hostinfo(np); 5068 if (err) 5069 return err; 5070 5071 for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) { 5072 struct niu_vlan_rdc *vp = &cp->vlan_mappings[i]; 5073 5074 vlan_tbl_write(np, i, np->port, 5075 vp->vlan_pref, vp->rdc_num); 5076 } 5077 5078 for (i = 0; i < cp->num_alt_mac_mappings; i++) { 5079 struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i]; 5080 5081 err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num, 5082 ap->rdc_num, ap->mac_pref); 5083 if (err) 5084 return err; 5085 } 5086 5087 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { 5088 int index = i - CLASS_CODE_USER_PROG1; 5089 5090 err = niu_set_tcam_key(np, i, parent->tcam_key[index]); 5091 if (err) 5092 return err; 5093 err = niu_set_flow_key(np, i, parent->flow_key[index]); 5094 if (err) 5095 return err; 5096 } 5097 5098 err = niu_set_ip_frag_rule(np); 5099 if (err) 5100 return err; 5101 5102 tcam_enable(np, 1); 5103 5104 return 0; 5105} 5106 5107static int niu_zcp_write(struct niu *np, int index, u64 *data) 5108{ 5109 nw64(ZCP_RAM_DATA0, data[0]); 5110 nw64(ZCP_RAM_DATA1, data[1]); 5111 nw64(ZCP_RAM_DATA2, data[2]); 5112 nw64(ZCP_RAM_DATA3, data[3]); 5113 nw64(ZCP_RAM_DATA4, data[4]); 5114 nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL); 5115 nw64(ZCP_RAM_ACC, 5116 (ZCP_RAM_ACC_WRITE | 5117 (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | 5118 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); 5119 5120 return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 5121 1000, 100); 5122} 5123 5124static int niu_zcp_read(struct niu *np, int index, u64 *data) 5125{ 5126 int err; 5127 5128 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 5129 1000, 100); 5130 if (err) { 5131 netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n", 5132 (unsigned long long)nr64(ZCP_RAM_ACC)); 5133 return err; 5134 } 5135 5136 nw64(ZCP_RAM_ACC, 5137 (ZCP_RAM_ACC_READ | 5138 (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | 5139 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); 5140 5141 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 5142 1000, 100); 5143 if (err) { 5144 netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n", 5145 (unsigned long long)nr64(ZCP_RAM_ACC)); 5146 return err; 5147 } 5148 5149 data[0] = nr64(ZCP_RAM_DATA0); 5150 data[1] = nr64(ZCP_RAM_DATA1); 5151 data[2] = nr64(ZCP_RAM_DATA2); 5152 data[3] = nr64(ZCP_RAM_DATA3); 5153 data[4] = nr64(ZCP_RAM_DATA4); 5154 5155 return 0; 5156} 5157 5158static void niu_zcp_cfifo_reset(struct niu *np) 5159{ 5160 u64 val = nr64(RESET_CFIFO); 5161 5162 val |= RESET_CFIFO_RST(np->port); 5163 nw64(RESET_CFIFO, val); 5164 udelay(10); 5165 5166 val &= ~RESET_CFIFO_RST(np->port); 5167 nw64(RESET_CFIFO, val); 5168} 5169 5170static int niu_init_zcp(struct niu *np) 5171{ 5172 u64 data[5], rbuf[5]; 5173 int i, max, err; 5174 5175 if (np->parent->plat_type != PLAT_TYPE_NIU) { 5176 if (np->port == 0 || np->port == 1) 5177 max = ATLAS_P0_P1_CFIFO_ENTRIES; 5178 else 5179 max = ATLAS_P2_P3_CFIFO_ENTRIES; 5180 } else 5181 max = NIU_CFIFO_ENTRIES; 5182 5183 data[0] = 0; 5184 data[1] = 0; 5185 data[2] = 0; 5186 data[3] = 0; 5187 data[4] = 0; 5188 5189 for (i = 0; i < max; i++) { 5190 err = niu_zcp_write(np, i, data); 5191 if (err) 5192 return err; 5193 err = niu_zcp_read(np, i, rbuf); 5194 if (err) 5195 return err; 5196 } 5197 5198 niu_zcp_cfifo_reset(np); 5199 nw64(CFIFO_ECC(np->port), 0); 5200 nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL); 5201 (void) nr64(ZCP_INT_STAT); 5202 nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL); 5203 5204 return 0; 5205} 5206 5207static void niu_ipp_write(struct niu *np, int index, u64 *data) 5208{ 5209 u64 val = nr64_ipp(IPP_CFIG); 5210 5211 nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W); 5212 nw64_ipp(IPP_DFIFO_WR_PTR, index); 5213 nw64_ipp(IPP_DFIFO_WR0, data[0]); 5214 nw64_ipp(IPP_DFIFO_WR1, data[1]); 5215 nw64_ipp(IPP_DFIFO_WR2, data[2]); 5216 nw64_ipp(IPP_DFIFO_WR3, data[3]); 5217 nw64_ipp(IPP_DFIFO_WR4, data[4]); 5218 nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W); 5219} 5220 5221static void niu_ipp_read(struct niu *np, int index, u64 *data) 5222{ 5223 nw64_ipp(IPP_DFIFO_RD_PTR, index); 5224 data[0] = nr64_ipp(IPP_DFIFO_RD0); 5225 data[1] = nr64_ipp(IPP_DFIFO_RD1); 5226 data[2] = nr64_ipp(IPP_DFIFO_RD2); 5227 data[3] = nr64_ipp(IPP_DFIFO_RD3); 5228 data[4] = nr64_ipp(IPP_DFIFO_RD4); 5229} 5230 5231static int niu_ipp_reset(struct niu *np) 5232{ 5233 return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST, 5234 1000, 100, "IPP_CFIG"); 5235} 5236 5237static int niu_init_ipp(struct niu *np) 5238{ 5239 u64 data[5], rbuf[5], val; 5240 int i, max, err; 5241 5242 if (np->parent->plat_type != PLAT_TYPE_NIU) { 5243 if (np->port == 0 || np->port == 1) 5244 max = ATLAS_P0_P1_DFIFO_ENTRIES; 5245 else 5246 max = ATLAS_P2_P3_DFIFO_ENTRIES; 5247 } else 5248 max = NIU_DFIFO_ENTRIES; 5249 5250 data[0] = 0; 5251 data[1] = 0; 5252 data[2] = 0; 5253 data[3] = 0; 5254 data[4] = 0; 5255 5256 for (i = 0; i < max; i++) { 5257 niu_ipp_write(np, i, data); 5258 niu_ipp_read(np, i, rbuf); 5259 } 5260 5261 (void) nr64_ipp(IPP_INT_STAT); 5262 (void) nr64_ipp(IPP_INT_STAT); 5263 5264 err = niu_ipp_reset(np); 5265 if (err) 5266 return err; 5267 5268 (void) nr64_ipp(IPP_PKT_DIS); 5269 (void) nr64_ipp(IPP_BAD_CS_CNT); 5270 (void) nr64_ipp(IPP_ECC); 5271 5272 (void) nr64_ipp(IPP_INT_STAT); 5273 5274 nw64_ipp(IPP_MSK, ~IPP_MSK_ALL); 5275 5276 val = nr64_ipp(IPP_CFIG); 5277 val &= ~IPP_CFIG_IP_MAX_PKT; 5278 val |= (IPP_CFIG_IPP_ENABLE | 5279 IPP_CFIG_DFIFO_ECC_EN | 5280 IPP_CFIG_DROP_BAD_CRC | 5281 IPP_CFIG_CKSUM_EN | 5282 (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT)); 5283 nw64_ipp(IPP_CFIG, val); 5284 5285 return 0; 5286} 5287 5288static void niu_handle_led(struct niu *np, int status) 5289{ 5290 u64 val; 5291 val = nr64_mac(XMAC_CONFIG); 5292 5293 if ((np->flags & NIU_FLAGS_10G) != 0 && 5294 (np->flags & NIU_FLAGS_FIBER) != 0) { 5295 if (status) { 5296 val |= XMAC_CONFIG_LED_POLARITY; 5297 val &= ~XMAC_CONFIG_FORCE_LED_ON; 5298 } else { 5299 val |= XMAC_CONFIG_FORCE_LED_ON; 5300 val &= ~XMAC_CONFIG_LED_POLARITY; 5301 } 5302 } 5303 5304 nw64_mac(XMAC_CONFIG, val); 5305} 5306 5307static void niu_init_xif_xmac(struct niu *np) 5308{ 5309 struct niu_link_config *lp = &np->link_config; 5310 u64 val; 5311 5312 if (np->flags & NIU_FLAGS_XCVR_SERDES) { 5313 val = nr64(MIF_CONFIG); 5314 val |= MIF_CONFIG_ATCA_GE; 5315 nw64(MIF_CONFIG, val); 5316 } 5317 5318 val = nr64_mac(XMAC_CONFIG); 5319 val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; 5320 5321 val |= XMAC_CONFIG_TX_OUTPUT_EN; 5322 5323 if (lp->loopback_mode == LOOPBACK_MAC) { 5324 val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; 5325 val |= XMAC_CONFIG_LOOPBACK; 5326 } else { 5327 val &= ~XMAC_CONFIG_LOOPBACK; 5328 } 5329 5330 if (np->flags & NIU_FLAGS_10G) { 5331 val &= ~XMAC_CONFIG_LFS_DISABLE; 5332 } else { 5333 val |= XMAC_CONFIG_LFS_DISABLE; 5334 if (!(np->flags & NIU_FLAGS_FIBER) && 5335 !(np->flags & NIU_FLAGS_XCVR_SERDES)) 5336 val |= XMAC_CONFIG_1G_PCS_BYPASS; 5337 else 5338 val &= ~XMAC_CONFIG_1G_PCS_BYPASS; 5339 } 5340 5341 val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; 5342 5343 if (lp->active_speed == SPEED_100) 5344 val |= XMAC_CONFIG_SEL_CLK_25MHZ; 5345 else 5346 val &= ~XMAC_CONFIG_SEL_CLK_25MHZ; 5347 5348 nw64_mac(XMAC_CONFIG, val); 5349 5350 val = nr64_mac(XMAC_CONFIG); 5351 val &= ~XMAC_CONFIG_MODE_MASK; 5352 if (np->flags & NIU_FLAGS_10G) { 5353 val |= XMAC_CONFIG_MODE_XGMII; 5354 } else { 5355 if (lp->active_speed == SPEED_1000) 5356 val |= XMAC_CONFIG_MODE_GMII; 5357 else 5358 val |= XMAC_CONFIG_MODE_MII; 5359 } 5360 5361 nw64_mac(XMAC_CONFIG, val); 5362} 5363 5364static void niu_init_xif_bmac(struct niu *np) 5365{ 5366 struct niu_link_config *lp = &np->link_config; 5367 u64 val; 5368 5369 val = BMAC_XIF_CONFIG_TX_OUTPUT_EN; 5370 5371 if (lp->loopback_mode == LOOPBACK_MAC) 5372 val |= BMAC_XIF_CONFIG_MII_LOOPBACK; 5373 else 5374 val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK; 5375 5376 if (lp->active_speed == SPEED_1000) 5377 val |= BMAC_XIF_CONFIG_GMII_MODE; 5378 else 5379 val &= ~BMAC_XIF_CONFIG_GMII_MODE; 5380 5381 val &= ~(BMAC_XIF_CONFIG_LINK_LED | 5382 BMAC_XIF_CONFIG_LED_POLARITY); 5383 5384 if (!(np->flags & NIU_FLAGS_10G) && 5385 !(np->flags & NIU_FLAGS_FIBER) && 5386 lp->active_speed == SPEED_100) 5387 val |= BMAC_XIF_CONFIG_25MHZ_CLOCK; 5388 else 5389 val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK; 5390 5391 nw64_mac(BMAC_XIF_CONFIG, val); 5392} 5393 5394static void niu_init_xif(struct niu *np) 5395{ 5396 if (np->flags & NIU_FLAGS_XMAC) 5397 niu_init_xif_xmac(np); 5398 else 5399 niu_init_xif_bmac(np); 5400} 5401 5402static void niu_pcs_mii_reset(struct niu *np) 5403{ 5404 int limit = 1000; 5405 u64 val = nr64_pcs(PCS_MII_CTL); 5406 val |= PCS_MII_CTL_RST; 5407 nw64_pcs(PCS_MII_CTL, val); 5408 while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) { 5409 udelay(100); 5410 val = nr64_pcs(PCS_MII_CTL); 5411 } 5412} 5413 5414static void niu_xpcs_reset(struct niu *np) 5415{ 5416 int limit = 1000; 5417 u64 val = nr64_xpcs(XPCS_CONTROL1); 5418 val |= XPCS_CONTROL1_RESET; 5419 nw64_xpcs(XPCS_CONTROL1, val); 5420 while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) { 5421 udelay(100); 5422 val = nr64_xpcs(XPCS_CONTROL1); 5423 } 5424} 5425 5426static int niu_init_pcs(struct niu *np) 5427{ 5428 struct niu_link_config *lp = &np->link_config; 5429 u64 val; 5430 5431 switch (np->flags & (NIU_FLAGS_10G | 5432 NIU_FLAGS_FIBER | 5433 NIU_FLAGS_XCVR_SERDES)) { 5434 case NIU_FLAGS_FIBER: 5435 /* 1G fiber */ 5436 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); 5437 nw64_pcs(PCS_DPATH_MODE, 0); 5438 niu_pcs_mii_reset(np); 5439 break; 5440 5441 case NIU_FLAGS_10G: 5442 case NIU_FLAGS_10G | NIU_FLAGS_FIBER: 5443 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: 5444 /* 10G SERDES */ 5445 if (!(np->flags & NIU_FLAGS_XMAC)) 5446 return -EINVAL; 5447 5448 /* 10G copper or fiber */ 5449 val = nr64_mac(XMAC_CONFIG); 5450 val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; 5451 nw64_mac(XMAC_CONFIG, val); 5452 5453 niu_xpcs_reset(np); 5454 5455 val = nr64_xpcs(XPCS_CONTROL1); 5456 if (lp->loopback_mode == LOOPBACK_PHY) 5457 val |= XPCS_CONTROL1_LOOPBACK; 5458 else 5459 val &= ~XPCS_CONTROL1_LOOPBACK; 5460 nw64_xpcs(XPCS_CONTROL1, val); 5461 5462 nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0); 5463 (void) nr64_xpcs(XPCS_SYMERR_CNT01); 5464 (void) nr64_xpcs(XPCS_SYMERR_CNT23); 5465 break; 5466 5467 5468 case NIU_FLAGS_XCVR_SERDES: 5469 /* 1G SERDES */ 5470 niu_pcs_mii_reset(np); 5471 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); 5472 nw64_pcs(PCS_DPATH_MODE, 0); 5473 break; 5474 5475 case 0: 5476 /* 1G copper */ 5477 case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER: 5478 /* 1G RGMII FIBER */ 5479 nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII); 5480 niu_pcs_mii_reset(np); 5481 break; 5482 5483 default: 5484 return -EINVAL; 5485 } 5486 5487 return 0; 5488} 5489 5490static int niu_reset_tx_xmac(struct niu *np) 5491{ 5492 return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST, 5493 (XTXMAC_SW_RST_REG_RS | 5494 XTXMAC_SW_RST_SOFT_RST), 5495 1000, 100, "XTXMAC_SW_RST"); 5496} 5497 5498static int niu_reset_tx_bmac(struct niu *np) 5499{ 5500 int limit; 5501 5502 nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET); 5503 limit = 1000; 5504 while (--limit >= 0) { 5505 if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET)) 5506 break; 5507 udelay(100); 5508 } 5509 if (limit < 0) { 5510 dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n", 5511 np->port, 5512 (unsigned long long) nr64_mac(BTXMAC_SW_RST)); 5513 return -ENODEV; 5514 } 5515 5516 return 0; 5517} 5518 5519static int niu_reset_tx_mac(struct niu *np) 5520{ 5521 if (np->flags & NIU_FLAGS_XMAC) 5522 return niu_reset_tx_xmac(np); 5523 else 5524 return niu_reset_tx_bmac(np); 5525} 5526 5527static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max) 5528{ 5529 u64 val; 5530 5531 val = nr64_mac(XMAC_MIN); 5532 val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE | 5533 XMAC_MIN_RX_MIN_PKT_SIZE); 5534 val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT); 5535 val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT); 5536 nw64_mac(XMAC_MIN, val); 5537 5538 nw64_mac(XMAC_MAX, max); 5539 5540 nw64_mac(XTXMAC_STAT_MSK, ~(u64)0); 5541 5542 val = nr64_mac(XMAC_IPG); 5543 if (np->flags & NIU_FLAGS_10G) { 5544 val &= ~XMAC_IPG_IPG_XGMII; 5545 val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT); 5546 } else { 5547 val &= ~XMAC_IPG_IPG_MII_GMII; 5548 val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT); 5549 } 5550 nw64_mac(XMAC_IPG, val); 5551 5552 val = nr64_mac(XMAC_CONFIG); 5553 val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC | 5554 XMAC_CONFIG_STRETCH_MODE | 5555 XMAC_CONFIG_VAR_MIN_IPG_EN | 5556 XMAC_CONFIG_TX_ENABLE); 5557 nw64_mac(XMAC_CONFIG, val); 5558 5559 nw64_mac(TXMAC_FRM_CNT, 0); 5560 nw64_mac(TXMAC_BYTE_CNT, 0); 5561} 5562 5563static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max) 5564{ 5565 u64 val; 5566 5567 nw64_mac(BMAC_MIN_FRAME, min); 5568 nw64_mac(BMAC_MAX_FRAME, max); 5569 5570 nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0); 5571 nw64_mac(BMAC_CTRL_TYPE, 0x8808); 5572 nw64_mac(BMAC_PREAMBLE_SIZE, 7); 5573 5574 val = nr64_mac(BTXMAC_CONFIG); 5575 val &= ~(BTXMAC_CONFIG_FCS_DISABLE | 5576 BTXMAC_CONFIG_ENABLE); 5577 nw64_mac(BTXMAC_CONFIG, val); 5578} 5579 5580static void niu_init_tx_mac(struct niu *np) 5581{ 5582 u64 min, max; 5583 5584 min = 64; 5585 if (np->dev->mtu > ETH_DATA_LEN) 5586 max = 9216; 5587 else 5588 max = 1522; 5589 5590 /* The XMAC_MIN register only accepts values for TX min which 5591 * have the low 3 bits cleared. 5592 */ 5593 BUG_ON(min & 0x7); 5594 5595 if (np->flags & NIU_FLAGS_XMAC) 5596 niu_init_tx_xmac(np, min, max); 5597 else 5598 niu_init_tx_bmac(np, min, max); 5599} 5600 5601static int niu_reset_rx_xmac(struct niu *np) 5602{ 5603 int limit; 5604 5605 nw64_mac(XRXMAC_SW_RST, 5606 XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST); 5607 limit = 1000; 5608 while (--limit >= 0) { 5609 if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS | 5610 XRXMAC_SW_RST_SOFT_RST))) 5611 break; 5612 udelay(100); 5613 } 5614 if (limit < 0) { 5615 dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n", 5616 np->port, 5617 (unsigned long long) nr64_mac(XRXMAC_SW_RST)); 5618 return -ENODEV; 5619 } 5620 5621 return 0; 5622} 5623 5624static int niu_reset_rx_bmac(struct niu *np) 5625{ 5626 int limit; 5627 5628 nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET); 5629 limit = 1000; 5630 while (--limit >= 0) { 5631 if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET)) 5632 break; 5633 udelay(100); 5634 } 5635 if (limit < 0) { 5636 dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n", 5637 np->port, 5638 (unsigned long long) nr64_mac(BRXMAC_SW_RST)); 5639 return -ENODEV; 5640 } 5641 5642 return 0; 5643} 5644 5645static int niu_reset_rx_mac(struct niu *np) 5646{ 5647 if (np->flags & NIU_FLAGS_XMAC) 5648 return niu_reset_rx_xmac(np); 5649 else 5650 return niu_reset_rx_bmac(np); 5651} 5652 5653static void niu_init_rx_xmac(struct niu *np) 5654{ 5655 struct niu_parent *parent = np->parent; 5656 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; 5657 int first_rdc_table = tp->first_table_num; 5658 unsigned long i; 5659 u64 val; 5660 5661 nw64_mac(XMAC_ADD_FILT0, 0); 5662 nw64_mac(XMAC_ADD_FILT1, 0); 5663 nw64_mac(XMAC_ADD_FILT2, 0); 5664 nw64_mac(XMAC_ADD_FILT12_MASK, 0); 5665 nw64_mac(XMAC_ADD_FILT00_MASK, 0); 5666 for (i = 0; i < MAC_NUM_HASH; i++) 5667 nw64_mac(XMAC_HASH_TBL(i), 0); 5668 nw64_mac(XRXMAC_STAT_MSK, ~(u64)0); 5669 niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); 5670 niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); 5671 5672 val = nr64_mac(XMAC_CONFIG); 5673 val &= ~(XMAC_CONFIG_RX_MAC_ENABLE | 5674 XMAC_CONFIG_PROMISCUOUS | 5675 XMAC_CONFIG_PROMISC_GROUP | 5676 XMAC_CONFIG_ERR_CHK_DIS | 5677 XMAC_CONFIG_RX_CRC_CHK_DIS | 5678 XMAC_CONFIG_RESERVED_MULTICAST | 5679 XMAC_CONFIG_RX_CODEV_CHK_DIS | 5680 XMAC_CONFIG_ADDR_FILTER_EN | 5681 XMAC_CONFIG_RCV_PAUSE_ENABLE | 5682 XMAC_CONFIG_STRIP_CRC | 5683 XMAC_CONFIG_PASS_FLOW_CTRL | 5684 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN); 5685 val |= (XMAC_CONFIG_HASH_FILTER_EN); 5686 nw64_mac(XMAC_CONFIG, val); 5687 5688 nw64_mac(RXMAC_BT_CNT, 0); 5689 nw64_mac(RXMAC_BC_FRM_CNT, 0); 5690 nw64_mac(RXMAC_MC_FRM_CNT, 0); 5691 nw64_mac(RXMAC_FRAG_CNT, 0); 5692 nw64_mac(RXMAC_HIST_CNT1, 0); 5693 nw64_mac(RXMAC_HIST_CNT2, 0); 5694 nw64_mac(RXMAC_HIST_CNT3, 0); 5695 nw64_mac(RXMAC_HIST_CNT4, 0); 5696 nw64_mac(RXMAC_HIST_CNT5, 0); 5697 nw64_mac(RXMAC_HIST_CNT6, 0); 5698 nw64_mac(RXMAC_HIST_CNT7, 0); 5699 nw64_mac(RXMAC_MPSZER_CNT, 0); 5700 nw64_mac(RXMAC_CRC_ER_CNT, 0); 5701 nw64_mac(RXMAC_CD_VIO_CNT, 0); 5702 nw64_mac(LINK_FAULT_CNT, 0); 5703} 5704 5705static void niu_init_rx_bmac(struct niu *np) 5706{ 5707 struct niu_parent *parent = np->parent; 5708 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; 5709 int first_rdc_table = tp->first_table_num; 5710 unsigned long i; 5711 u64 val; 5712 5713 nw64_mac(BMAC_ADD_FILT0, 0); 5714 nw64_mac(BMAC_ADD_FILT1, 0); 5715 nw64_mac(BMAC_ADD_FILT2, 0); 5716 nw64_mac(BMAC_ADD_FILT12_MASK, 0); 5717 nw64_mac(BMAC_ADD_FILT00_MASK, 0); 5718 for (i = 0; i < MAC_NUM_HASH; i++) 5719 nw64_mac(BMAC_HASH_TBL(i), 0); 5720 niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); 5721 niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); 5722 nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0); 5723 5724 val = nr64_mac(BRXMAC_CONFIG); 5725 val &= ~(BRXMAC_CONFIG_ENABLE | 5726 BRXMAC_CONFIG_STRIP_PAD | 5727 BRXMAC_CONFIG_STRIP_FCS | 5728 BRXMAC_CONFIG_PROMISC | 5729 BRXMAC_CONFIG_PROMISC_GRP | 5730 BRXMAC_CONFIG_ADDR_FILT_EN | 5731 BRXMAC_CONFIG_DISCARD_DIS); 5732 val |= (BRXMAC_CONFIG_HASH_FILT_EN); 5733 nw64_mac(BRXMAC_CONFIG, val); 5734 5735 val = nr64_mac(BMAC_ADDR_CMPEN); 5736 val |= BMAC_ADDR_CMPEN_EN0; 5737 nw64_mac(BMAC_ADDR_CMPEN, val); 5738} 5739 5740static void niu_init_rx_mac(struct niu *np) 5741{ 5742 niu_set_primary_mac(np, np->dev->dev_addr); 5743 5744 if (np->flags & NIU_FLAGS_XMAC) 5745 niu_init_rx_xmac(np); 5746 else 5747 niu_init_rx_bmac(np); 5748} 5749 5750static void niu_enable_tx_xmac(struct niu *np, int on) 5751{ 5752 u64 val = nr64_mac(XMAC_CONFIG); 5753 5754 if (on) 5755 val |= XMAC_CONFIG_TX_ENABLE; 5756 else 5757 val &= ~XMAC_CONFIG_TX_ENABLE; 5758 nw64_mac(XMAC_CONFIG, val); 5759} 5760 5761static void niu_enable_tx_bmac(struct niu *np, int on) 5762{ 5763 u64 val = nr64_mac(BTXMAC_CONFIG); 5764 5765 if (on) 5766 val |= BTXMAC_CONFIG_ENABLE; 5767 else 5768 val &= ~BTXMAC_CONFIG_ENABLE; 5769 nw64_mac(BTXMAC_CONFIG, val); 5770} 5771 5772static void niu_enable_tx_mac(struct niu *np, int on) 5773{ 5774 if (np->flags & NIU_FLAGS_XMAC) 5775 niu_enable_tx_xmac(np, on); 5776 else 5777 niu_enable_tx_bmac(np, on); 5778} 5779 5780static void niu_enable_rx_xmac(struct niu *np, int on) 5781{ 5782 u64 val = nr64_mac(XMAC_CONFIG); 5783 5784 val &= ~(XMAC_CONFIG_HASH_FILTER_EN | 5785 XMAC_CONFIG_PROMISCUOUS); 5786 5787 if (np->flags & NIU_FLAGS_MCAST) 5788 val |= XMAC_CONFIG_HASH_FILTER_EN; 5789 if (np->flags & NIU_FLAGS_PROMISC) 5790 val |= XMAC_CONFIG_PROMISCUOUS; 5791 5792 if (on) 5793 val |= XMAC_CONFIG_RX_MAC_ENABLE; 5794 else 5795 val &= ~XMAC_CONFIG_RX_MAC_ENABLE; 5796 nw64_mac(XMAC_CONFIG, val); 5797} 5798 5799static void niu_enable_rx_bmac(struct niu *np, int on) 5800{ 5801 u64 val = nr64_mac(BRXMAC_CONFIG); 5802 5803 val &= ~(BRXMAC_CONFIG_HASH_FILT_EN | 5804 BRXMAC_CONFIG_PROMISC); 5805 5806 if (np->flags & NIU_FLAGS_MCAST) 5807 val |= BRXMAC_CONFIG_HASH_FILT_EN; 5808 if (np->flags & NIU_FLAGS_PROMISC) 5809 val |= BRXMAC_CONFIG_PROMISC; 5810 5811 if (on) 5812 val |= BRXMAC_CONFIG_ENABLE; 5813 else 5814 val &= ~BRXMAC_CONFIG_ENABLE; 5815 nw64_mac(BRXMAC_CONFIG, val); 5816} 5817 5818static void niu_enable_rx_mac(struct niu *np, int on) 5819{ 5820 if (np->flags & NIU_FLAGS_XMAC) 5821 niu_enable_rx_xmac(np, on); 5822 else 5823 niu_enable_rx_bmac(np, on); 5824} 5825 5826static int niu_init_mac(struct niu *np) 5827{ 5828 int err; 5829 5830 niu_init_xif(np); 5831 err = niu_init_pcs(np); 5832 if (err) 5833 return err; 5834 5835 err = niu_reset_tx_mac(np); 5836 if (err) 5837 return err; 5838 niu_init_tx_mac(np); 5839 err = niu_reset_rx_mac(np); 5840 if (err) 5841 return err; 5842 niu_init_rx_mac(np); 5843 5844 /* This looks hookey but the RX MAC reset we just did will 5845 * undo some of the state we setup in niu_init_tx_mac() so we 5846 * have to call it again. In particular, the RX MAC reset will 5847 * set the XMAC_MAX register back to it's default value. 5848 */ 5849 niu_init_tx_mac(np); 5850 niu_enable_tx_mac(np, 1); 5851 5852 niu_enable_rx_mac(np, 1); 5853 5854 return 0; 5855} 5856 5857static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp) 5858{ 5859 (void) niu_tx_channel_stop(np, rp->tx_channel); 5860} 5861 5862static void niu_stop_tx_channels(struct niu *np) 5863{ 5864 int i; 5865 5866 for (i = 0; i < np->num_tx_rings; i++) { 5867 struct tx_ring_info *rp = &np->tx_rings[i]; 5868 5869 niu_stop_one_tx_channel(np, rp); 5870 } 5871} 5872 5873static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp) 5874{ 5875 (void) niu_tx_channel_reset(np, rp->tx_channel); 5876} 5877 5878static void niu_reset_tx_channels(struct niu *np) 5879{ 5880 int i; 5881 5882 for (i = 0; i < np->num_tx_rings; i++) { 5883 struct tx_ring_info *rp = &np->tx_rings[i]; 5884 5885 niu_reset_one_tx_channel(np, rp); 5886 } 5887} 5888 5889static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp) 5890{ 5891 (void) niu_enable_rx_channel(np, rp->rx_channel, 0); 5892} 5893 5894static void niu_stop_rx_channels(struct niu *np) 5895{ 5896 int i; 5897 5898 for (i = 0; i < np->num_rx_rings; i++) { 5899 struct rx_ring_info *rp = &np->rx_rings[i]; 5900 5901 niu_stop_one_rx_channel(np, rp); 5902 } 5903} 5904 5905static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp) 5906{ 5907 int channel = rp->rx_channel; 5908 5909 (void) niu_rx_channel_reset(np, channel); 5910 nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL); 5911 nw64(RX_DMA_CTL_STAT(channel), 0); 5912 (void) niu_enable_rx_channel(np, channel, 0); 5913} 5914 5915static void niu_reset_rx_channels(struct niu *np) 5916{ 5917 int i; 5918 5919 for (i = 0; i < np->num_rx_rings; i++) { 5920 struct rx_ring_info *rp = &np->rx_rings[i]; 5921 5922 niu_reset_one_rx_channel(np, rp); 5923 } 5924} 5925 5926static void niu_disable_ipp(struct niu *np) 5927{ 5928 u64 rd, wr, val; 5929 int limit; 5930 5931 rd = nr64_ipp(IPP_DFIFO_RD_PTR); 5932 wr = nr64_ipp(IPP_DFIFO_WR_PTR); 5933 limit = 100; 5934 while (--limit >= 0 && (rd != wr)) { 5935 rd = nr64_ipp(IPP_DFIFO_RD_PTR); 5936 wr = nr64_ipp(IPP_DFIFO_WR_PTR); 5937 } 5938 if (limit < 0 && 5939 (rd != 0 && wr != 1)) { 5940 netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n", 5941 (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR), 5942 (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR)); 5943 } 5944 5945 val = nr64_ipp(IPP_CFIG); 5946 val &= ~(IPP_CFIG_IPP_ENABLE | 5947 IPP_CFIG_DFIFO_ECC_EN | 5948 IPP_CFIG_DROP_BAD_CRC | 5949 IPP_CFIG_CKSUM_EN); 5950 nw64_ipp(IPP_CFIG, val); 5951 5952 (void) niu_ipp_reset(np); 5953} 5954 5955static int niu_init_hw(struct niu *np) 5956{ 5957 int i, err; 5958 5959 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n"); 5960 niu_txc_enable_port(np, 1); 5961 niu_txc_port_dma_enable(np, 1); 5962 niu_txc_set_imask(np, 0); 5963 5964 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n"); 5965 for (i = 0; i < np->num_tx_rings; i++) { 5966 struct tx_ring_info *rp = &np->tx_rings[i]; 5967 5968 err = niu_init_one_tx_channel(np, rp); 5969 if (err) 5970 return err; 5971 } 5972 5973 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n"); 5974 err = niu_init_rx_channels(np); 5975 if (err) 5976 goto out_uninit_tx_channels; 5977 5978 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n"); 5979 err = niu_init_classifier_hw(np); 5980 if (err) 5981 goto out_uninit_rx_channels; 5982 5983 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n"); 5984 err = niu_init_zcp(np); 5985 if (err) 5986 goto out_uninit_rx_channels; 5987 5988 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n"); 5989 err = niu_init_ipp(np); 5990 if (err) 5991 goto out_uninit_rx_channels; 5992 5993 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n"); 5994 err = niu_init_mac(np); 5995 if (err) 5996 goto out_uninit_ipp; 5997 5998 return 0; 5999 6000out_uninit_ipp: 6001 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n"); 6002 niu_disable_ipp(np); 6003 6004out_uninit_rx_channels: 6005 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n"); 6006 niu_stop_rx_channels(np); 6007 niu_reset_rx_channels(np); 6008 6009out_uninit_tx_channels: 6010 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n"); 6011 niu_stop_tx_channels(np); 6012 niu_reset_tx_channels(np); 6013 6014 return err; 6015} 6016 6017static void niu_stop_hw(struct niu *np) 6018{ 6019 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n"); 6020 niu_enable_interrupts(np, 0); 6021 6022 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n"); 6023 niu_enable_rx_mac(np, 0); 6024 6025 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n"); 6026 niu_disable_ipp(np); 6027 6028 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n"); 6029 niu_stop_tx_channels(np); 6030 6031 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n"); 6032 niu_stop_rx_channels(np); 6033 6034 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n"); 6035 niu_reset_tx_channels(np); 6036 6037 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n"); 6038 niu_reset_rx_channels(np); 6039} 6040 6041static void niu_set_irq_name(struct niu *np) 6042{ 6043 int port = np->port; 6044 int i, j = 1; 6045 6046 sprintf(np->irq_name[0], "%s:MAC", np->dev->name); 6047 6048 if (port == 0) { 6049 sprintf(np->irq_name[1], "%s:MIF", np->dev->name); 6050 sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name); 6051 j = 3; 6052 } 6053 6054 for (i = 0; i < np->num_ldg - j; i++) { 6055 if (i < np->num_rx_rings) 6056 sprintf(np->irq_name[i+j], "%s-rx-%d", 6057 np->dev->name, i); 6058 else if (i < np->num_tx_rings + np->num_rx_rings) 6059 sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name, 6060 i - np->num_rx_rings); 6061 } 6062} 6063 6064static int niu_request_irq(struct niu *np) 6065{ 6066 int i, j, err; 6067 6068 niu_set_irq_name(np); 6069 6070 err = 0; 6071 for (i = 0; i < np->num_ldg; i++) { 6072 struct niu_ldg *lp = &np->ldg[i]; 6073 6074 err = request_irq(lp->irq, niu_interrupt, 6075 IRQF_SHARED | IRQF_SAMPLE_RANDOM, 6076 np->irq_name[i], lp); 6077 if (err) 6078 goto out_free_irqs; 6079 6080 } 6081 6082 return 0; 6083 6084out_free_irqs: 6085 for (j = 0; j < i; j++) { 6086 struct niu_ldg *lp = &np->ldg[j]; 6087 6088 free_irq(lp->irq, lp); 6089 } 6090 return err; 6091} 6092 6093static void niu_free_irq(struct niu *np) 6094{ 6095 int i; 6096 6097 for (i = 0; i < np->num_ldg; i++) { 6098 struct niu_ldg *lp = &np->ldg[i]; 6099 6100 free_irq(lp->irq, lp); 6101 } 6102} 6103 6104static void niu_enable_napi(struct niu *np) 6105{ 6106 int i; 6107 6108 for (i = 0; i < np->num_ldg; i++) 6109 napi_enable(&np->ldg[i].napi); 6110} 6111 6112static void niu_disable_napi(struct niu *np) 6113{ 6114 int i; 6115 6116 for (i = 0; i < np->num_ldg; i++) 6117 napi_disable(&np->ldg[i].napi); 6118} 6119 6120static int niu_open(struct net_device *dev) 6121{ 6122 struct niu *np = netdev_priv(dev); 6123 int err; 6124 6125 netif_carrier_off(dev); 6126 6127 err = niu_alloc_channels(np); 6128 if (err) 6129 goto out_err; 6130 6131 err = niu_enable_interrupts(np, 0); 6132 if (err) 6133 goto out_free_channels; 6134 6135 err = niu_request_irq(np); 6136 if (err) 6137 goto out_free_channels; 6138 6139 niu_enable_napi(np); 6140 6141 spin_lock_irq(&np->lock); 6142 6143 err = niu_init_hw(np); 6144 if (!err) { 6145 init_timer(&np->timer); 6146 np->timer.expires = jiffies + HZ; 6147 np->timer.data = (unsigned long) np; 6148 np->timer.function = niu_timer; 6149 6150 err = niu_enable_interrupts(np, 1); 6151 if (err) 6152 niu_stop_hw(np); 6153 } 6154 6155 spin_unlock_irq(&np->lock); 6156 6157 if (err) { 6158 niu_disable_napi(np); 6159 goto out_free_irq; 6160 } 6161 6162 netif_tx_start_all_queues(dev); 6163 6164 if (np->link_config.loopback_mode != LOOPBACK_DISABLED) 6165 netif_carrier_on(dev); 6166 6167 add_timer(&np->timer); 6168 6169 return 0; 6170 6171out_free_irq: 6172 niu_free_irq(np); 6173 6174out_free_channels: 6175 niu_free_channels(np); 6176 6177out_err: 6178 return err; 6179} 6180 6181static void niu_full_shutdown(struct niu *np, struct net_device *dev) 6182{ 6183 cancel_work_sync(&np->reset_task); 6184 6185 niu_disable_napi(np); 6186 netif_tx_stop_all_queues(dev); 6187 6188 del_timer_sync(&np->timer); 6189 6190 spin_lock_irq(&np->lock); 6191 6192 niu_stop_hw(np); 6193 6194 spin_unlock_irq(&np->lock); 6195} 6196 6197static int niu_close(struct net_device *dev) 6198{ 6199 struct niu *np = netdev_priv(dev); 6200 6201 niu_full_shutdown(np, dev); 6202 6203 niu_free_irq(np); 6204 6205 niu_free_channels(np); 6206 6207 niu_handle_led(np, 0); 6208 6209 return 0; 6210} 6211 6212static void niu_sync_xmac_stats(struct niu *np) 6213{ 6214 struct niu_xmac_stats *mp = &np->mac_stats.xmac; 6215 6216 mp->tx_frames += nr64_mac(TXMAC_FRM_CNT); 6217 mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT); 6218 6219 mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT); 6220 mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT); 6221 mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT); 6222 mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT); 6223 mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT); 6224 mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1); 6225 mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2); 6226 mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3); 6227 mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4); 6228 mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5); 6229 mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6); 6230 mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7); 6231 mp->rx_octets += nr64_mac(RXMAC_BT_CNT); 6232 mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT); 6233 mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT); 6234 mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT); 6235} 6236 6237static void niu_sync_bmac_stats(struct niu *np) 6238{ 6239 struct niu_bmac_stats *mp = &np->mac_stats.bmac; 6240 6241 mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT); 6242 mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT); 6243 6244 mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT); 6245 mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); 6246 mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); 6247 mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT); 6248} 6249 6250static void niu_sync_mac_stats(struct niu *np) 6251{ 6252 if (np->flags & NIU_FLAGS_XMAC) 6253 niu_sync_xmac_stats(np); 6254 else 6255 niu_sync_bmac_stats(np); 6256} 6257 6258static void niu_get_rx_stats(struct niu *np) 6259{ 6260 unsigned long pkts, dropped, errors, bytes; 6261 struct rx_ring_info *rx_rings; 6262 int i; 6263 6264 pkts = dropped = errors = bytes = 0; 6265 6266 rx_rings = ACCESS_ONCE(np->rx_rings); 6267 if (!rx_rings) 6268 goto no_rings; 6269 6270 for (i = 0; i < np->num_rx_rings; i++) { 6271 struct rx_ring_info *rp = &rx_rings[i]; 6272 6273 niu_sync_rx_discard_stats(np, rp, 0); 6274 6275 pkts += rp->rx_packets; 6276 bytes += rp->rx_bytes; 6277 dropped += rp->rx_dropped; 6278 errors += rp->rx_errors; 6279 } 6280 6281no_rings: 6282 np->dev->stats.rx_packets = pkts; 6283 np->dev->stats.rx_bytes = bytes; 6284 np->dev->stats.rx_dropped = dropped; 6285 np->dev->stats.rx_errors = errors; 6286} 6287 6288static void niu_get_tx_stats(struct niu *np) 6289{ 6290 unsigned long pkts, errors, bytes; 6291 struct tx_ring_info *tx_rings; 6292 int i; 6293 6294 pkts = errors = bytes = 0; 6295 6296 tx_rings = ACCESS_ONCE(np->tx_rings); 6297 if (!tx_rings) 6298 goto no_rings; 6299 6300 for (i = 0; i < np->num_tx_rings; i++) { 6301 struct tx_ring_info *rp = &tx_rings[i]; 6302 6303 pkts += rp->tx_packets; 6304 bytes += rp->tx_bytes; 6305 errors += rp->tx_errors; 6306 } 6307 6308no_rings: 6309 np->dev->stats.tx_packets = pkts; 6310 np->dev->stats.tx_bytes = bytes; 6311 np->dev->stats.tx_errors = errors; 6312} 6313 6314static struct net_device_stats *niu_get_stats(struct net_device *dev) 6315{ 6316 struct niu *np = netdev_priv(dev); 6317 6318 if (netif_running(dev)) { 6319 niu_get_rx_stats(np); 6320 niu_get_tx_stats(np); 6321 } 6322 return &dev->stats; 6323} 6324 6325static void niu_load_hash_xmac(struct niu *np, u16 *hash) 6326{ 6327 int i; 6328 6329 for (i = 0; i < 16; i++) 6330 nw64_mac(XMAC_HASH_TBL(i), hash[i]); 6331} 6332 6333static void niu_load_hash_bmac(struct niu *np, u16 *hash) 6334{ 6335 int i; 6336 6337 for (i = 0; i < 16; i++) 6338 nw64_mac(BMAC_HASH_TBL(i), hash[i]); 6339} 6340 6341static void niu_load_hash(struct niu *np, u16 *hash) 6342{ 6343 if (np->flags & NIU_FLAGS_XMAC) 6344 niu_load_hash_xmac(np, hash); 6345 else 6346 niu_load_hash_bmac(np, hash); 6347} 6348 6349static void niu_set_rx_mode(struct net_device *dev) 6350{ 6351 struct niu *np = netdev_priv(dev); 6352 int i, alt_cnt, err; 6353 struct netdev_hw_addr *ha; 6354 unsigned long flags; 6355 u16 hash[16] = { 0, }; 6356 6357 spin_lock_irqsave(&np->lock, flags); 6358 niu_enable_rx_mac(np, 0); 6359 6360 np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC); 6361 if (dev->flags & IFF_PROMISC) 6362 np->flags |= NIU_FLAGS_PROMISC; 6363 if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev))) 6364 np->flags |= NIU_FLAGS_MCAST; 6365 6366 alt_cnt = netdev_uc_count(dev); 6367 if (alt_cnt > niu_num_alt_addr(np)) { 6368 alt_cnt = 0; 6369 np->flags |= NIU_FLAGS_PROMISC; 6370 } 6371 6372 if (alt_cnt) { 6373 int index = 0; 6374 6375 netdev_for_each_uc_addr(ha, dev) { 6376 err = niu_set_alt_mac(np, index, ha->addr); 6377 if (err) 6378 netdev_warn(dev, "Error %d adding alt mac %d\n", 6379 err, index); 6380 err = niu_enable_alt_mac(np, index, 1); 6381 if (err) 6382 netdev_warn(dev, "Error %d enabling alt mac %d\n", 6383 err, index); 6384 6385 index++; 6386 } 6387 } else { 6388 int alt_start; 6389 if (np->flags & NIU_FLAGS_XMAC) 6390 alt_start = 0; 6391 else 6392 alt_start = 1; 6393 for (i = alt_start; i < niu_num_alt_addr(np); i++) { 6394 err = niu_enable_alt_mac(np, i, 0); 6395 if (err) 6396 netdev_warn(dev, "Error %d disabling alt mac %d\n", 6397 err, i); 6398 } 6399 } 6400 if (dev->flags & IFF_ALLMULTI) { 6401 for (i = 0; i < 16; i++) 6402 hash[i] = 0xffff; 6403 } else if (!netdev_mc_empty(dev)) { 6404 netdev_for_each_mc_addr(ha, dev) { 6405 u32 crc = ether_crc_le(ETH_ALEN, ha->addr); 6406 6407 crc >>= 24; 6408 hash[crc >> 4] |= (1 << (15 - (crc & 0xf))); 6409 } 6410 } 6411 6412 if (np->flags & NIU_FLAGS_MCAST) 6413 niu_load_hash(np, hash); 6414 6415 niu_enable_rx_mac(np, 1); 6416 spin_unlock_irqrestore(&np->lock, flags); 6417} 6418 6419static int niu_set_mac_addr(struct net_device *dev, void *p) 6420{ 6421 struct niu *np = netdev_priv(dev); 6422 struct sockaddr *addr = p; 6423 unsigned long flags; 6424 6425 if (!is_valid_ether_addr(addr->sa_data)) 6426 return -EINVAL; 6427 6428 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 6429 6430 if (!netif_running(dev)) 6431 return 0; 6432 6433 spin_lock_irqsave(&np->lock, flags); 6434 niu_enable_rx_mac(np, 0); 6435 niu_set_primary_mac(np, dev->dev_addr); 6436 niu_enable_rx_mac(np, 1); 6437 spin_unlock_irqrestore(&np->lock, flags); 6438 6439 return 0; 6440} 6441 6442static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 6443{ 6444 return -EOPNOTSUPP; 6445} 6446 6447static void niu_netif_stop(struct niu *np) 6448{ 6449 np->dev->trans_start = jiffies; /* prevent tx timeout */ 6450 6451 niu_disable_napi(np); 6452 6453 netif_tx_disable(np->dev); 6454} 6455 6456static void niu_netif_start(struct niu *np) 6457{ 6458 /* NOTE: unconditional netif_wake_queue is only appropriate 6459 * so long as all callers are assured to have free tx slots 6460 * (such as after niu_init_hw). 6461 */ 6462 netif_tx_wake_all_queues(np->dev); 6463 6464 niu_enable_napi(np); 6465 6466 niu_enable_interrupts(np, 1); 6467} 6468 6469static void niu_reset_buffers(struct niu *np) 6470{ 6471 int i, j, k, err; 6472 6473 if (np->rx_rings) { 6474 for (i = 0; i < np->num_rx_rings; i++) { 6475 struct rx_ring_info *rp = &np->rx_rings[i]; 6476 6477 for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) { 6478 struct page *page; 6479 6480 page = rp->rxhash[j]; 6481 while (page) { 6482 struct page *next = 6483 (struct page *) page->mapping; 6484 u64 base = page->index; 6485 base = base >> RBR_DESCR_ADDR_SHIFT; 6486 rp->rbr[k++] = cpu_to_le32(base); 6487 page = next; 6488 } 6489 } 6490 for (; k < MAX_RBR_RING_SIZE; k++) { 6491 err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k); 6492 if (unlikely(err)) 6493 break; 6494 } 6495 6496 rp->rbr_index = rp->rbr_table_size - 1; 6497 rp->rcr_index = 0; 6498 rp->rbr_pending = 0; 6499 rp->rbr_refill_pending = 0; 6500 } 6501 } 6502 if (np->tx_rings) { 6503 for (i = 0; i < np->num_tx_rings; i++) { 6504 struct tx_ring_info *rp = &np->tx_rings[i]; 6505 6506 for (j = 0; j < MAX_TX_RING_SIZE; j++) { 6507 if (rp->tx_buffs[j].skb) 6508 (void) release_tx_packet(np, rp, j); 6509 } 6510 6511 rp->pending = MAX_TX_RING_SIZE; 6512 rp->prod = 0; 6513 rp->cons = 0; 6514 rp->wrap_bit = 0; 6515 } 6516 } 6517} 6518 6519static void niu_reset_task(struct work_struct *work) 6520{ 6521 struct niu *np = container_of(work, struct niu, reset_task); 6522 unsigned long flags; 6523 int err; 6524 6525 spin_lock_irqsave(&np->lock, flags); 6526 if (!netif_running(np->dev)) { 6527 spin_unlock_irqrestore(&np->lock, flags); 6528 return; 6529 } 6530 6531 spin_unlock_irqrestore(&np->lock, flags); 6532 6533 del_timer_sync(&np->timer); 6534 6535 niu_netif_stop(np); 6536 6537 spin_lock_irqsave(&np->lock, flags); 6538 6539 niu_stop_hw(np); 6540 6541 spin_unlock_irqrestore(&np->lock, flags); 6542 6543 niu_reset_buffers(np); 6544 6545 spin_lock_irqsave(&np->lock, flags); 6546 6547 err = niu_init_hw(np); 6548 if (!err) { 6549 np->timer.expires = jiffies + HZ; 6550 add_timer(&np->timer); 6551 niu_netif_start(np); 6552 } 6553 6554 spin_unlock_irqrestore(&np->lock, flags); 6555} 6556 6557static void niu_tx_timeout(struct net_device *dev) 6558{ 6559 struct niu *np = netdev_priv(dev); 6560 6561 dev_err(np->device, "%s: Transmit timed out, resetting\n", 6562 dev->name); 6563 6564 schedule_work(&np->reset_task); 6565} 6566 6567static void niu_set_txd(struct tx_ring_info *rp, int index, 6568 u64 mapping, u64 len, u64 mark, 6569 u64 n_frags) 6570{ 6571 __le64 *desc = &rp->descr[index]; 6572 6573 *desc = cpu_to_le64(mark | 6574 (n_frags << TX_DESC_NUM_PTR_SHIFT) | 6575 (len << TX_DESC_TR_LEN_SHIFT) | 6576 (mapping & TX_DESC_SAD)); 6577} 6578 6579static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr, 6580 u64 pad_bytes, u64 len) 6581{ 6582 u16 eth_proto, eth_proto_inner; 6583 u64 csum_bits, l3off, ihl, ret; 6584 u8 ip_proto; 6585 int ipv6; 6586 6587 eth_proto = be16_to_cpu(ehdr->h_proto); 6588 eth_proto_inner = eth_proto; 6589 if (eth_proto == ETH_P_8021Q) { 6590 struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr; 6591 __be16 val = vp->h_vlan_encapsulated_proto; 6592 6593 eth_proto_inner = be16_to_cpu(val); 6594 } 6595 6596 ipv6 = ihl = 0; 6597 switch (skb->protocol) { 6598 case cpu_to_be16(ETH_P_IP): 6599 ip_proto = ip_hdr(skb)->protocol; 6600 ihl = ip_hdr(skb)->ihl; 6601 break; 6602 case cpu_to_be16(ETH_P_IPV6): 6603 ip_proto = ipv6_hdr(skb)->nexthdr; 6604 ihl = (40 >> 2); 6605 ipv6 = 1; 6606 break; 6607 default: 6608 ip_proto = ihl = 0; 6609 break; 6610 } 6611 6612 csum_bits = TXHDR_CSUM_NONE; 6613 if (skb->ip_summed == CHECKSUM_PARTIAL) { 6614 u64 start, stuff; 6615 6616 csum_bits = (ip_proto == IPPROTO_TCP ? 6617 TXHDR_CSUM_TCP : 6618 (ip_proto == IPPROTO_UDP ? 6619 TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP)); 6620 6621 start = skb_checksum_start_offset(skb) - 6622 (pad_bytes + sizeof(struct tx_pkt_hdr)); 6623 stuff = start + skb->csum_offset; 6624 6625 csum_bits |= (start / 2) << TXHDR_L4START_SHIFT; 6626 csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT; 6627 } 6628 6629 l3off = skb_network_offset(skb) - 6630 (pad_bytes + sizeof(struct tx_pkt_hdr)); 6631 6632 ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) | 6633 (len << TXHDR_LEN_SHIFT) | 6634 ((l3off / 2) << TXHDR_L3START_SHIFT) | 6635 (ihl << TXHDR_IHL_SHIFT) | 6636 ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) | 6637 ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) | 6638 (ipv6 ? TXHDR_IP_VER : 0) | 6639 csum_bits); 6640 6641 return ret; 6642} 6643 6644static netdev_tx_t niu_start_xmit(struct sk_buff *skb, 6645 struct net_device *dev) 6646{ 6647 struct niu *np = netdev_priv(dev); 6648 unsigned long align, headroom; 6649 struct netdev_queue *txq; 6650 struct tx_ring_info *rp; 6651 struct tx_pkt_hdr *tp; 6652 unsigned int len, nfg; 6653 struct ethhdr *ehdr; 6654 int prod, i, tlen; 6655 u64 mapping, mrk; 6656 6657 i = skb_get_queue_mapping(skb); 6658 rp = &np->tx_rings[i]; 6659 txq = netdev_get_tx_queue(dev, i); 6660 6661 if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) { 6662 netif_tx_stop_queue(txq); 6663 dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name); 6664 rp->tx_errors++; 6665 return NETDEV_TX_BUSY; 6666 } 6667 6668 if (skb->len < ETH_ZLEN) { 6669 unsigned int pad_bytes = ETH_ZLEN - skb->len; 6670 6671 if (skb_pad(skb, pad_bytes)) 6672 goto out; 6673 skb_put(skb, pad_bytes); 6674 } 6675 6676 len = sizeof(struct tx_pkt_hdr) + 15; 6677 if (skb_headroom(skb) < len) { 6678 struct sk_buff *skb_new; 6679 6680 skb_new = skb_realloc_headroom(skb, len); 6681 if (!skb_new) { 6682 rp->tx_errors++; 6683 goto out_drop; 6684 } 6685 kfree_skb(skb); 6686 skb = skb_new; 6687 } else 6688 skb_orphan(skb); 6689 6690 align = ((unsigned long) skb->data & (16 - 1)); 6691 headroom = align + sizeof(struct tx_pkt_hdr); 6692 6693 ehdr = (struct ethhdr *) skb->data; 6694 tp = (struct tx_pkt_hdr *) skb_push(skb, headroom); 6695 6696 len = skb->len - sizeof(struct tx_pkt_hdr); 6697 tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len)); 6698 tp->resv = 0; 6699 6700 len = skb_headlen(skb); 6701 mapping = np->ops->map_single(np->device, skb->data, 6702 len, DMA_TO_DEVICE); 6703 6704 prod = rp->prod; 6705 6706 rp->tx_buffs[prod].skb = skb; 6707 rp->tx_buffs[prod].mapping = mapping; 6708 6709 mrk = TX_DESC_SOP; 6710 if (++rp->mark_counter == rp->mark_freq) { 6711 rp->mark_counter = 0; 6712 mrk |= TX_DESC_MARK; 6713 rp->mark_pending++; 6714 } 6715 6716 tlen = len; 6717 nfg = skb_shinfo(skb)->nr_frags; 6718 while (tlen > 0) { 6719 tlen -= MAX_TX_DESC_LEN; 6720 nfg++; 6721 } 6722 6723 while (len > 0) { 6724 unsigned int this_len = len; 6725 6726 if (this_len > MAX_TX_DESC_LEN) 6727 this_len = MAX_TX_DESC_LEN; 6728 6729 niu_set_txd(rp, prod, mapping, this_len, mrk, nfg); 6730 mrk = nfg = 0; 6731 6732 prod = NEXT_TX(rp, prod); 6733 mapping += this_len; 6734 len -= this_len; 6735 } 6736 6737 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 6738 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 6739 6740 len = frag->size; 6741 mapping = np->ops->map_page(np->device, frag->page, 6742 frag->page_offset, len, 6743 DMA_TO_DEVICE); 6744 6745 rp->tx_buffs[prod].skb = NULL; 6746 rp->tx_buffs[prod].mapping = mapping; 6747 6748 niu_set_txd(rp, prod, mapping, len, 0, 0); 6749 6750 prod = NEXT_TX(rp, prod); 6751 } 6752 6753 if (prod < rp->prod) 6754 rp->wrap_bit ^= TX_RING_KICK_WRAP; 6755 rp->prod = prod; 6756 6757 nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3)); 6758 6759 if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) { 6760 netif_tx_stop_queue(txq); 6761 if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)) 6762 netif_tx_wake_queue(txq); 6763 } 6764 6765out: 6766 return NETDEV_TX_OK; 6767 6768out_drop: 6769 rp->tx_errors++; 6770 kfree_skb(skb); 6771 goto out; 6772} 6773 6774static int niu_change_mtu(struct net_device *dev, int new_mtu) 6775{ 6776 struct niu *np = netdev_priv(dev); 6777 int err, orig_jumbo, new_jumbo; 6778 6779 if (new_mtu < 68 || new_mtu > NIU_MAX_MTU) 6780 return -EINVAL; 6781 6782 orig_jumbo = (dev->mtu > ETH_DATA_LEN); 6783 new_jumbo = (new_mtu > ETH_DATA_LEN); 6784 6785 dev->mtu = new_mtu; 6786 6787 if (!netif_running(dev) || 6788 (orig_jumbo == new_jumbo)) 6789 return 0; 6790 6791 niu_full_shutdown(np, dev); 6792 6793 niu_free_channels(np); 6794 6795 niu_enable_napi(np); 6796 6797 err = niu_alloc_channels(np); 6798 if (err) 6799 return err; 6800 6801 spin_lock_irq(&np->lock); 6802 6803 err = niu_init_hw(np); 6804 if (!err) { 6805 init_timer(&np->timer); 6806 np->timer.expires = jiffies + HZ; 6807 np->timer.data = (unsigned long) np; 6808 np->timer.function = niu_timer; 6809 6810 err = niu_enable_interrupts(np, 1); 6811 if (err) 6812 niu_stop_hw(np); 6813 } 6814 6815 spin_unlock_irq(&np->lock); 6816 6817 if (!err) { 6818 netif_tx_start_all_queues(dev); 6819 if (np->link_config.loopback_mode != LOOPBACK_DISABLED) 6820 netif_carrier_on(dev); 6821 6822 add_timer(&np->timer); 6823 } 6824 6825 return err; 6826} 6827 6828static void niu_get_drvinfo(struct net_device *dev, 6829 struct ethtool_drvinfo *info) 6830{ 6831 struct niu *np = netdev_priv(dev); 6832 struct niu_vpd *vpd = &np->vpd; 6833 6834 strcpy(info->driver, DRV_MODULE_NAME); 6835 strcpy(info->version, DRV_MODULE_VERSION); 6836 sprintf(info->fw_version, "%d.%d", 6837 vpd->fcode_major, vpd->fcode_minor); 6838 if (np->parent->plat_type != PLAT_TYPE_NIU) 6839 strcpy(info->bus_info, pci_name(np->pdev)); 6840} 6841 6842static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 6843{ 6844 struct niu *np = netdev_priv(dev); 6845 struct niu_link_config *lp; 6846 6847 lp = &np->link_config; 6848 6849 memset(cmd, 0, sizeof(*cmd)); 6850 cmd->phy_address = np->phy_addr; 6851 cmd->supported = lp->supported; 6852 cmd->advertising = lp->active_advertising; 6853 cmd->autoneg = lp->active_autoneg; 6854 cmd->speed = lp->active_speed; 6855 cmd->duplex = lp->active_duplex; 6856 cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP; 6857 cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ? 6858 XCVR_EXTERNAL : XCVR_INTERNAL; 6859 6860 return 0; 6861} 6862 6863static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 6864{ 6865 struct niu *np = netdev_priv(dev); 6866 struct niu_link_config *lp = &np->link_config; 6867 6868 lp->advertising = cmd->advertising; 6869 lp->speed = cmd->speed; 6870 lp->duplex = cmd->duplex; 6871 lp->autoneg = cmd->autoneg; 6872 return niu_init_link(np); 6873} 6874 6875static u32 niu_get_msglevel(struct net_device *dev) 6876{ 6877 struct niu *np = netdev_priv(dev); 6878 return np->msg_enable; 6879} 6880 6881static void niu_set_msglevel(struct net_device *dev, u32 value) 6882{ 6883 struct niu *np = netdev_priv(dev); 6884 np->msg_enable = value; 6885} 6886 6887static int niu_nway_reset(struct net_device *dev) 6888{ 6889 struct niu *np = netdev_priv(dev); 6890 6891 if (np->link_config.autoneg) 6892 return niu_init_link(np); 6893 6894 return 0; 6895} 6896 6897static int niu_get_eeprom_len(struct net_device *dev) 6898{ 6899 struct niu *np = netdev_priv(dev); 6900 6901 return np->eeprom_len; 6902} 6903 6904static int niu_get_eeprom(struct net_device *dev, 6905 struct ethtool_eeprom *eeprom, u8 *data) 6906{ 6907 struct niu *np = netdev_priv(dev); 6908 u32 offset, len, val; 6909 6910 offset = eeprom->offset; 6911 len = eeprom->len; 6912 6913 if (offset + len < offset) 6914 return -EINVAL; 6915 if (offset >= np->eeprom_len) 6916 return -EINVAL; 6917 if (offset + len > np->eeprom_len) 6918 len = eeprom->len = np->eeprom_len - offset; 6919 6920 if (offset & 3) { 6921 u32 b_offset, b_count; 6922 6923 b_offset = offset & 3; 6924 b_count = 4 - b_offset; 6925 if (b_count > len) 6926 b_count = len; 6927 6928 val = nr64(ESPC_NCR((offset - b_offset) / 4)); 6929 memcpy(data, ((char *)&val) + b_offset, b_count); 6930 data += b_count; 6931 len -= b_count; 6932 offset += b_count; 6933 } 6934 while (len >= 4) { 6935 val = nr64(ESPC_NCR(offset / 4)); 6936 memcpy(data, &val, 4); 6937 data += 4; 6938 len -= 4; 6939 offset += 4; 6940 } 6941 if (len) { 6942 val = nr64(ESPC_NCR(offset / 4)); 6943 memcpy(data, &val, len); 6944 } 6945 return 0; 6946} 6947 6948static void niu_ethflow_to_l3proto(int flow_type, u8 *pid) 6949{ 6950 switch (flow_type) { 6951 case TCP_V4_FLOW: 6952 case TCP_V6_FLOW: 6953 *pid = IPPROTO_TCP; 6954 break; 6955 case UDP_V4_FLOW: 6956 case UDP_V6_FLOW: 6957 *pid = IPPROTO_UDP; 6958 break; 6959 case SCTP_V4_FLOW: 6960 case SCTP_V6_FLOW: 6961 *pid = IPPROTO_SCTP; 6962 break; 6963 case AH_V4_FLOW: 6964 case AH_V6_FLOW: 6965 *pid = IPPROTO_AH; 6966 break; 6967 case ESP_V4_FLOW: 6968 case ESP_V6_FLOW: 6969 *pid = IPPROTO_ESP; 6970 break; 6971 default: 6972 *pid = 0; 6973 break; 6974 } 6975} 6976 6977static int niu_class_to_ethflow(u64 class, int *flow_type) 6978{ 6979 switch (class) { 6980 case CLASS_CODE_TCP_IPV4: 6981 *flow_type = TCP_V4_FLOW; 6982 break; 6983 case CLASS_CODE_UDP_IPV4: 6984 *flow_type = UDP_V4_FLOW; 6985 break; 6986 case CLASS_CODE_AH_ESP_IPV4: 6987 *flow_type = AH_V4_FLOW; 6988 break; 6989 case CLASS_CODE_SCTP_IPV4: 6990 *flow_type = SCTP_V4_FLOW; 6991 break; 6992 case CLASS_CODE_TCP_IPV6: 6993 *flow_type = TCP_V6_FLOW; 6994 break; 6995 case CLASS_CODE_UDP_IPV6: 6996 *flow_type = UDP_V6_FLOW; 6997 break; 6998 case CLASS_CODE_AH_ESP_IPV6: 6999 *flow_type = AH_V6_FLOW; 7000 break; 7001 case CLASS_CODE_SCTP_IPV6: 7002 *flow_type = SCTP_V6_FLOW; 7003 break; 7004 case CLASS_CODE_USER_PROG1: 7005 case CLASS_CODE_USER_PROG2: 7006 case CLASS_CODE_USER_PROG3: 7007 case CLASS_CODE_USER_PROG4: 7008 *flow_type = IP_USER_FLOW; 7009 break; 7010 default: 7011 return 0; 7012 } 7013 7014 return 1; 7015} 7016 7017static int niu_ethflow_to_class(int flow_type, u64 *class) 7018{ 7019 switch (flow_type) { 7020 case TCP_V4_FLOW: 7021 *class = CLASS_CODE_TCP_IPV4; 7022 break; 7023 case UDP_V4_FLOW: 7024 *class = CLASS_CODE_UDP_IPV4; 7025 break; 7026 case AH_V4_FLOW: 7027 case ESP_V4_FLOW: 7028 *class = CLASS_CODE_AH_ESP_IPV4; 7029 break; 7030 case SCTP_V4_FLOW: 7031 *class = CLASS_CODE_SCTP_IPV4; 7032 break; 7033 case TCP_V6_FLOW: 7034 *class = CLASS_CODE_TCP_IPV6; 7035 break; 7036 case UDP_V6_FLOW: 7037 *class = CLASS_CODE_UDP_IPV6; 7038 break; 7039 case AH_V6_FLOW: 7040 case ESP_V6_FLOW: 7041 *class = CLASS_CODE_AH_ESP_IPV6; 7042 break; 7043 case SCTP_V6_FLOW: 7044 *class = CLASS_CODE_SCTP_IPV6; 7045 break; 7046 default: 7047 return 0; 7048 } 7049 7050 return 1; 7051} 7052 7053static u64 niu_flowkey_to_ethflow(u64 flow_key) 7054{ 7055 u64 ethflow = 0; 7056 7057 if (flow_key & FLOW_KEY_L2DA) 7058 ethflow |= RXH_L2DA; 7059 if (flow_key & FLOW_KEY_VLAN) 7060 ethflow |= RXH_VLAN; 7061 if (flow_key & FLOW_KEY_IPSA) 7062 ethflow |= RXH_IP_SRC; 7063 if (flow_key & FLOW_KEY_IPDA) 7064 ethflow |= RXH_IP_DST; 7065 if (flow_key & FLOW_KEY_PROTO) 7066 ethflow |= RXH_L3_PROTO; 7067 if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT)) 7068 ethflow |= RXH_L4_B_0_1; 7069 if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT)) 7070 ethflow |= RXH_L4_B_2_3; 7071 7072 return ethflow; 7073 7074} 7075 7076static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key) 7077{ 7078 u64 key = 0; 7079 7080 if (ethflow & RXH_L2DA) 7081 key |= FLOW_KEY_L2DA; 7082 if (ethflow & RXH_VLAN) 7083 key |= FLOW_KEY_VLAN; 7084 if (ethflow & RXH_IP_SRC) 7085 key |= FLOW_KEY_IPSA; 7086 if (ethflow & RXH_IP_DST) 7087 key |= FLOW_KEY_IPDA; 7088 if (ethflow & RXH_L3_PROTO) 7089 key |= FLOW_KEY_PROTO; 7090 if (ethflow & RXH_L4_B_0_1) 7091 key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT); 7092 if (ethflow & RXH_L4_B_2_3) 7093 key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT); 7094 7095 *flow_key = key; 7096 7097 return 1; 7098 7099} 7100 7101static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) 7102{ 7103 u64 class; 7104 7105 nfc->data = 0; 7106 7107 if (!niu_ethflow_to_class(nfc->flow_type, &class)) 7108 return -EINVAL; 7109 7110 if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & 7111 TCAM_KEY_DISC) 7112 nfc->data = RXH_DISCARD; 7113 else 7114 nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class - 7115 CLASS_CODE_USER_PROG1]); 7116 return 0; 7117} 7118 7119static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp, 7120 struct ethtool_rx_flow_spec *fsp) 7121{ 7122 u32 tmp; 7123 u16 prt; 7124 7125 tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT; 7126 fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp); 7127 7128 tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT; 7129 fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp); 7130 7131 tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT; 7132 fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp); 7133 7134 tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT; 7135 fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp); 7136 7137 fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >> 7138 TCAM_V4KEY2_TOS_SHIFT; 7139 fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >> 7140 TCAM_V4KEY2_TOS_SHIFT; 7141 7142 switch (fsp->flow_type) { 7143 case TCP_V4_FLOW: 7144 case UDP_V4_FLOW: 7145 case SCTP_V4_FLOW: 7146 prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> 7147 TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; 7148 fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt); 7149 7150 prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> 7151 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; 7152 fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt); 7153 7154 prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> 7155 TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; 7156 fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt); 7157 7158 prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> 7159 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; 7160 fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt); 7161 break; 7162 case AH_V4_FLOW: 7163 case ESP_V4_FLOW: 7164 tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> 7165 TCAM_V4KEY2_PORT_SPI_SHIFT; 7166 fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp); 7167 7168 tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> 7169 TCAM_V4KEY2_PORT_SPI_SHIFT; 7170 fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp); 7171 break; 7172 case IP_USER_FLOW: 7173 tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> 7174 TCAM_V4KEY2_PORT_SPI_SHIFT; 7175 fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp); 7176 7177 tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> 7178 TCAM_V4KEY2_PORT_SPI_SHIFT; 7179 fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp); 7180 7181 fsp->h_u.usr_ip4_spec.proto = 7182 (tp->key[2] & TCAM_V4KEY2_PROTO) >> 7183 TCAM_V4KEY2_PROTO_SHIFT; 7184 fsp->m_u.usr_ip4_spec.proto = 7185 (tp->key_mask[2] & TCAM_V4KEY2_PROTO) >> 7186 TCAM_V4KEY2_PROTO_SHIFT; 7187 7188 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 7189 break; 7190 default: 7191 break; 7192 } 7193} 7194 7195static int niu_get_ethtool_tcam_entry(struct niu *np, 7196 struct ethtool_rxnfc *nfc) 7197{ 7198 struct niu_parent *parent = np->parent; 7199 struct niu_tcam_entry *tp; 7200 struct ethtool_rx_flow_spec *fsp = &nfc->fs; 7201 u16 idx; 7202 u64 class; 7203 int ret = 0; 7204 7205 idx = tcam_get_index(np, (u16)nfc->fs.location); 7206 7207 tp = &parent->tcam[idx]; 7208 if (!tp->valid) { 7209 netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n", 7210 parent->index, (u16)nfc->fs.location, idx); 7211 return -EINVAL; 7212 } 7213 7214 /* fill the flow spec entry */ 7215 class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> 7216 TCAM_V4KEY0_CLASS_CODE_SHIFT; 7217 ret = niu_class_to_ethflow(class, &fsp->flow_type); 7218 7219 if (ret < 0) { 7220 netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", 7221 parent->index); 7222 ret = -EINVAL; 7223 goto out; 7224 } 7225 7226 if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) { 7227 u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >> 7228 TCAM_V4KEY2_PROTO_SHIFT; 7229 if (proto == IPPROTO_ESP) { 7230 if (fsp->flow_type == AH_V4_FLOW) 7231 fsp->flow_type = ESP_V4_FLOW; 7232 else 7233 fsp->flow_type = ESP_V6_FLOW; 7234 } 7235 } 7236 7237 switch (fsp->flow_type) { 7238 case TCP_V4_FLOW: 7239 case UDP_V4_FLOW: 7240 case SCTP_V4_FLOW: 7241 case AH_V4_FLOW: 7242 case ESP_V4_FLOW: 7243 niu_get_ip4fs_from_tcam_key(tp, fsp); 7244 break; 7245 case TCP_V6_FLOW: 7246 case UDP_V6_FLOW: 7247 case SCTP_V6_FLOW: 7248 case AH_V6_FLOW: 7249 case ESP_V6_FLOW: 7250 /* Not yet implemented */ 7251 ret = -EINVAL; 7252 break; 7253 case IP_USER_FLOW: 7254 niu_get_ip4fs_from_tcam_key(tp, fsp); 7255 break; 7256 default: 7257 ret = -EINVAL; 7258 break; 7259 } 7260 7261 if (ret < 0) 7262 goto out; 7263 7264 if (tp->assoc_data & TCAM_ASSOCDATA_DISC) 7265 fsp->ring_cookie = RX_CLS_FLOW_DISC; 7266 else 7267 fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >> 7268 TCAM_ASSOCDATA_OFFSET_SHIFT; 7269 7270 /* put the tcam size here */ 7271 nfc->data = tcam_get_size(np); 7272out: 7273 return ret; 7274} 7275 7276static int niu_get_ethtool_tcam_all(struct niu *np, 7277 struct ethtool_rxnfc *nfc, 7278 u32 *rule_locs) 7279{ 7280 struct niu_parent *parent = np->parent; 7281 struct niu_tcam_entry *tp; 7282 int i, idx, cnt; 7283 unsigned long flags; 7284 int ret = 0; 7285 7286 /* put the tcam size here */ 7287 nfc->data = tcam_get_size(np); 7288 7289 niu_lock_parent(np, flags); 7290 for (cnt = 0, i = 0; i < nfc->data; i++) { 7291 idx = tcam_get_index(np, i); 7292 tp = &parent->tcam[idx]; 7293 if (!tp->valid) 7294 continue; 7295 if (cnt == nfc->rule_cnt) { 7296 ret = -EMSGSIZE; 7297 break; 7298 } 7299 rule_locs[cnt] = i; 7300 cnt++; 7301 } 7302 niu_unlock_parent(np, flags); 7303 7304 return ret; 7305} 7306 7307static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 7308 void *rule_locs) 7309{ 7310 struct niu *np = netdev_priv(dev); 7311 int ret = 0; 7312 7313 switch (cmd->cmd) { 7314 case ETHTOOL_GRXFH: 7315 ret = niu_get_hash_opts(np, cmd); 7316 break; 7317 case ETHTOOL_GRXRINGS: 7318 cmd->data = np->num_rx_rings; 7319 break; 7320 case ETHTOOL_GRXCLSRLCNT: 7321 cmd->rule_cnt = tcam_get_valid_entry_cnt(np); 7322 break; 7323 case ETHTOOL_GRXCLSRULE: 7324 ret = niu_get_ethtool_tcam_entry(np, cmd); 7325 break; 7326 case ETHTOOL_GRXCLSRLALL: 7327 ret = niu_get_ethtool_tcam_all(np, cmd, (u32 *)rule_locs); 7328 break; 7329 default: 7330 ret = -EINVAL; 7331 break; 7332 } 7333 7334 return ret; 7335} 7336 7337static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) 7338{ 7339 u64 class; 7340 u64 flow_key = 0; 7341 unsigned long flags; 7342 7343 if (!niu_ethflow_to_class(nfc->flow_type, &class)) 7344 return -EINVAL; 7345 7346 if (class < CLASS_CODE_USER_PROG1 || 7347 class > CLASS_CODE_SCTP_IPV6) 7348 return -EINVAL; 7349 7350 if (nfc->data & RXH_DISCARD) { 7351 niu_lock_parent(np, flags); 7352 flow_key = np->parent->tcam_key[class - 7353 CLASS_CODE_USER_PROG1]; 7354 flow_key |= TCAM_KEY_DISC; 7355 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key); 7356 np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key; 7357 niu_unlock_parent(np, flags); 7358 return 0; 7359 } else { 7360 /* Discard was set before, but is not set now */ 7361 if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & 7362 TCAM_KEY_DISC) { 7363 niu_lock_parent(np, flags); 7364 flow_key = np->parent->tcam_key[class - 7365 CLASS_CODE_USER_PROG1]; 7366 flow_key &= ~TCAM_KEY_DISC; 7367 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), 7368 flow_key); 7369 np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = 7370 flow_key; 7371 niu_unlock_parent(np, flags); 7372 } 7373 } 7374 7375 if (!niu_ethflow_to_flowkey(nfc->data, &flow_key)) 7376 return -EINVAL; 7377 7378 niu_lock_parent(np, flags); 7379 nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key); 7380 np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key; 7381 niu_unlock_parent(np, flags); 7382 7383 return 0; 7384} 7385 7386static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp, 7387 struct niu_tcam_entry *tp, 7388 int l2_rdc_tab, u64 class) 7389{ 7390 u8 pid = 0; 7391 u32 sip, dip, sipm, dipm, spi, spim; 7392 u16 sport, dport, spm, dpm; 7393 7394 sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src); 7395 sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src); 7396 dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst); 7397 dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst); 7398 7399 tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT; 7400 tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE; 7401 tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT; 7402 tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM; 7403 7404 tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT; 7405 tp->key[3] |= dip; 7406 7407 tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT; 7408 tp->key_mask[3] |= dipm; 7409 7410 tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos << 7411 TCAM_V4KEY2_TOS_SHIFT); 7412 tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos << 7413 TCAM_V4KEY2_TOS_SHIFT); 7414 switch (fsp->flow_type) { 7415 case TCP_V4_FLOW: 7416 case UDP_V4_FLOW: 7417 case SCTP_V4_FLOW: 7418 sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc); 7419 spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc); 7420 dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst); 7421 dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst); 7422 7423 tp->key[2] |= (((u64)sport << 16) | dport); 7424 tp->key_mask[2] |= (((u64)spm << 16) | dpm); 7425 niu_ethflow_to_l3proto(fsp->flow_type, &pid); 7426 break; 7427 case AH_V4_FLOW: 7428 case ESP_V4_FLOW: 7429 spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi); 7430 spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi); 7431 7432 tp->key[2] |= spi; 7433 tp->key_mask[2] |= spim; 7434 niu_ethflow_to_l3proto(fsp->flow_type, &pid); 7435 break; 7436 case IP_USER_FLOW: 7437 spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes); 7438 spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes); 7439 7440 tp->key[2] |= spi; 7441 tp->key_mask[2] |= spim; 7442 pid = fsp->h_u.usr_ip4_spec.proto; 7443 break; 7444 default: 7445 break; 7446 } 7447 7448 tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT); 7449 if (pid) { 7450 tp->key_mask[2] |= TCAM_V4KEY2_PROTO; 7451 } 7452} 7453 7454static int niu_add_ethtool_tcam_entry(struct niu *np, 7455 struct ethtool_rxnfc *nfc) 7456{ 7457 struct niu_parent *parent = np->parent; 7458 struct niu_tcam_entry *tp; 7459 struct ethtool_rx_flow_spec *fsp = &nfc->fs; 7460 struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port]; 7461 int l2_rdc_table = rdc_table->first_table_num; 7462 u16 idx; 7463 u64 class; 7464 unsigned long flags; 7465 int err, ret; 7466 7467 ret = 0; 7468 7469 idx = nfc->fs.location; 7470 if (idx >= tcam_get_size(np)) 7471 return -EINVAL; 7472 7473 if (fsp->flow_type == IP_USER_FLOW) { 7474 int i; 7475 int add_usr_cls = 0; 7476 struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec; 7477 struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec; 7478 7479 if (uspec->ip_ver != ETH_RX_NFC_IP4) 7480 return -EINVAL; 7481 7482 niu_lock_parent(np, flags); 7483 7484 for (i = 0; i < NIU_L3_PROG_CLS; i++) { 7485 if (parent->l3_cls[i]) { 7486 if (uspec->proto == parent->l3_cls_pid[i]) { 7487 class = parent->l3_cls[i]; 7488 parent->l3_cls_refcnt[i]++; 7489 add_usr_cls = 1; 7490 break; 7491 } 7492 } else { 7493 /* Program new user IP class */ 7494 switch (i) { 7495 case 0: 7496 class = CLASS_CODE_USER_PROG1; 7497 break; 7498 case 1: 7499 class = CLASS_CODE_USER_PROG2; 7500 break; 7501 case 2: 7502 class = CLASS_CODE_USER_PROG3; 7503 break; 7504 case 3: 7505 class = CLASS_CODE_USER_PROG4; 7506 break; 7507 default: 7508 break; 7509 } 7510 ret = tcam_user_ip_class_set(np, class, 0, 7511 uspec->proto, 7512 uspec->tos, 7513 umask->tos); 7514 if (ret) 7515 goto out; 7516 7517 ret = tcam_user_ip_class_enable(np, class, 1); 7518 if (ret) 7519 goto out; 7520 parent->l3_cls[i] = class; 7521 parent->l3_cls_pid[i] = uspec->proto; 7522 parent->l3_cls_refcnt[i]++; 7523 add_usr_cls = 1; 7524 break; 7525 } 7526 } 7527 if (!add_usr_cls) { 7528 netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n", 7529 parent->index, __func__, uspec->proto); 7530 ret = -EINVAL; 7531 goto out; 7532 } 7533 niu_unlock_parent(np, flags); 7534 } else { 7535 if (!niu_ethflow_to_class(fsp->flow_type, &class)) { 7536 return -EINVAL; 7537 } 7538 } 7539 7540 niu_lock_parent(np, flags); 7541 7542 idx = tcam_get_index(np, idx); 7543 tp = &parent->tcam[idx]; 7544 7545 memset(tp, 0, sizeof(*tp)); 7546 7547 /* fill in the tcam key and mask */ 7548 switch (fsp->flow_type) { 7549 case TCP_V4_FLOW: 7550 case UDP_V4_FLOW: 7551 case SCTP_V4_FLOW: 7552 case AH_V4_FLOW: 7553 case ESP_V4_FLOW: 7554 niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class); 7555 break; 7556 case TCP_V6_FLOW: 7557 case UDP_V6_FLOW: 7558 case SCTP_V6_FLOW: 7559 case AH_V6_FLOW: 7560 case ESP_V6_FLOW: 7561 /* Not yet implemented */ 7562 netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n", 7563 parent->index, __func__, fsp->flow_type); 7564 ret = -EINVAL; 7565 goto out; 7566 case IP_USER_FLOW: 7567 niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class); 7568 break; 7569 default: 7570 netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n", 7571 parent->index, __func__, fsp->flow_type); 7572 ret = -EINVAL; 7573 goto out; 7574 } 7575 7576 /* fill in the assoc data */ 7577 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { 7578 tp->assoc_data = TCAM_ASSOCDATA_DISC; 7579 } else { 7580 if (fsp->ring_cookie >= np->num_rx_rings) { 7581 netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n", 7582 parent->index, __func__, 7583 (long long)fsp->ring_cookie); 7584 ret = -EINVAL; 7585 goto out; 7586 } 7587 tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | 7588 (fsp->ring_cookie << 7589 TCAM_ASSOCDATA_OFFSET_SHIFT)); 7590 } 7591 7592 err = tcam_write(np, idx, tp->key, tp->key_mask); 7593 if (err) { 7594 ret = -EINVAL; 7595 goto out; 7596 } 7597 err = tcam_assoc_write(np, idx, tp->assoc_data); 7598 if (err) { 7599 ret = -EINVAL; 7600 goto out; 7601 } 7602 7603 /* validate the entry */ 7604 tp->valid = 1; 7605 np->clas.tcam_valid_entries++; 7606out: 7607 niu_unlock_parent(np, flags); 7608 7609 return ret; 7610} 7611 7612static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc) 7613{ 7614 struct niu_parent *parent = np->parent; 7615 struct niu_tcam_entry *tp; 7616 u16 idx; 7617 unsigned long flags; 7618 u64 class; 7619 int ret = 0; 7620 7621 if (loc >= tcam_get_size(np)) 7622 return -EINVAL; 7623 7624 niu_lock_parent(np, flags); 7625 7626 idx = tcam_get_index(np, loc); 7627 tp = &parent->tcam[idx]; 7628 7629 /* if the entry is of a user defined class, then update*/ 7630 class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> 7631 TCAM_V4KEY0_CLASS_CODE_SHIFT; 7632 7633 if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) { 7634 int i; 7635 for (i = 0; i < NIU_L3_PROG_CLS; i++) { 7636 if (parent->l3_cls[i] == class) { 7637 parent->l3_cls_refcnt[i]--; 7638 if (!parent->l3_cls_refcnt[i]) { 7639 /* disable class */ 7640 ret = tcam_user_ip_class_enable(np, 7641 class, 7642 0); 7643 if (ret) 7644 goto out; 7645 parent->l3_cls[i] = 0; 7646 parent->l3_cls_pid[i] = 0; 7647 } 7648 break; 7649 } 7650 } 7651 if (i == NIU_L3_PROG_CLS) { 7652 netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n", 7653 parent->index, __func__, 7654 (unsigned long long)class); 7655 ret = -EINVAL; 7656 goto out; 7657 } 7658 } 7659 7660 ret = tcam_flush(np, idx); 7661 if (ret) 7662 goto out; 7663 7664 /* invalidate the entry */ 7665 tp->valid = 0; 7666 np->clas.tcam_valid_entries--; 7667out: 7668 niu_unlock_parent(np, flags); 7669 7670 return ret; 7671} 7672 7673static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) 7674{ 7675 struct niu *np = netdev_priv(dev); 7676 int ret = 0; 7677 7678 switch (cmd->cmd) { 7679 case ETHTOOL_SRXFH: 7680 ret = niu_set_hash_opts(np, cmd); 7681 break; 7682 case ETHTOOL_SRXCLSRLINS: 7683 ret = niu_add_ethtool_tcam_entry(np, cmd); 7684 break; 7685 case ETHTOOL_SRXCLSRLDEL: 7686 ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location); 7687 break; 7688 default: 7689 ret = -EINVAL; 7690 break; 7691 } 7692 7693 return ret; 7694} 7695 7696static const struct { 7697 const char string[ETH_GSTRING_LEN]; 7698} niu_xmac_stat_keys[] = { 7699 { "tx_frames" }, 7700 { "tx_bytes" }, 7701 { "tx_fifo_errors" }, 7702 { "tx_overflow_errors" }, 7703 { "tx_max_pkt_size_errors" }, 7704 { "tx_underflow_errors" }, 7705 { "rx_local_faults" }, 7706 { "rx_remote_faults" }, 7707 { "rx_link_faults" }, 7708 { "rx_align_errors" }, 7709 { "rx_frags" }, 7710 { "rx_mcasts" }, 7711 { "rx_bcasts" }, 7712 { "rx_hist_cnt1" }, 7713 { "rx_hist_cnt2" }, 7714 { "rx_hist_cnt3" }, 7715 { "rx_hist_cnt4" }, 7716 { "rx_hist_cnt5" }, 7717 { "rx_hist_cnt6" }, 7718 { "rx_hist_cnt7" }, 7719 { "rx_octets" }, 7720 { "rx_code_violations" }, 7721 { "rx_len_errors" }, 7722 { "rx_crc_errors" }, 7723 { "rx_underflows" }, 7724 { "rx_overflows" }, 7725 { "pause_off_state" }, 7726 { "pause_on_state" }, 7727 { "pause_received" }, 7728}; 7729 7730#define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys) 7731 7732static const struct { 7733 const char string[ETH_GSTRING_LEN]; 7734} niu_bmac_stat_keys[] = { 7735 { "tx_underflow_errors" }, 7736 { "tx_max_pkt_size_errors" }, 7737 { "tx_bytes" }, 7738 { "tx_frames" }, 7739 { "rx_overflows" }, 7740 { "rx_frames" }, 7741 { "rx_align_errors" }, 7742 { "rx_crc_errors" }, 7743 { "rx_len_errors" }, 7744 { "pause_off_state" }, 7745 { "pause_on_state" }, 7746 { "pause_received" }, 7747}; 7748 7749#define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys) 7750 7751static const struct { 7752 const char string[ETH_GSTRING_LEN]; 7753} niu_rxchan_stat_keys[] = { 7754 { "rx_channel" }, 7755 { "rx_packets" }, 7756 { "rx_bytes" }, 7757 { "rx_dropped" }, 7758 { "rx_errors" }, 7759}; 7760 7761#define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys) 7762 7763static const struct { 7764 const char string[ETH_GSTRING_LEN]; 7765} niu_txchan_stat_keys[] = { 7766 { "tx_channel" }, 7767 { "tx_packets" }, 7768 { "tx_bytes" }, 7769 { "tx_errors" }, 7770}; 7771 7772#define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys) 7773 7774static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data) 7775{ 7776 struct niu *np = netdev_priv(dev); 7777 int i; 7778 7779 if (stringset != ETH_SS_STATS) 7780 return; 7781 7782 if (np->flags & NIU_FLAGS_XMAC) { 7783 memcpy(data, niu_xmac_stat_keys, 7784 sizeof(niu_xmac_stat_keys)); 7785 data += sizeof(niu_xmac_stat_keys); 7786 } else { 7787 memcpy(data, niu_bmac_stat_keys, 7788 sizeof(niu_bmac_stat_keys)); 7789 data += sizeof(niu_bmac_stat_keys); 7790 } 7791 for (i = 0; i < np->num_rx_rings; i++) { 7792 memcpy(data, niu_rxchan_stat_keys, 7793 sizeof(niu_rxchan_stat_keys)); 7794 data += sizeof(niu_rxchan_stat_keys); 7795 } 7796 for (i = 0; i < np->num_tx_rings; i++) { 7797 memcpy(data, niu_txchan_stat_keys, 7798 sizeof(niu_txchan_stat_keys)); 7799 data += sizeof(niu_txchan_stat_keys); 7800 } 7801} 7802 7803static int niu_get_sset_count(struct net_device *dev, int stringset) 7804{ 7805 struct niu *np = netdev_priv(dev); 7806 7807 if (stringset != ETH_SS_STATS) 7808 return -EINVAL; 7809 7810 return (np->flags & NIU_FLAGS_XMAC ? 7811 NUM_XMAC_STAT_KEYS : 7812 NUM_BMAC_STAT_KEYS) + 7813 (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) + 7814 (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS); 7815} 7816 7817static void niu_get_ethtool_stats(struct net_device *dev, 7818 struct ethtool_stats *stats, u64 *data) 7819{ 7820 struct niu *np = netdev_priv(dev); 7821 int i; 7822 7823 niu_sync_mac_stats(np); 7824 if (np->flags & NIU_FLAGS_XMAC) { 7825 memcpy(data, &np->mac_stats.xmac, 7826 sizeof(struct niu_xmac_stats)); 7827 data += (sizeof(struct niu_xmac_stats) / sizeof(u64)); 7828 } else { 7829 memcpy(data, &np->mac_stats.bmac, 7830 sizeof(struct niu_bmac_stats)); 7831 data += (sizeof(struct niu_bmac_stats) / sizeof(u64)); 7832 } 7833 for (i = 0; i < np->num_rx_rings; i++) { 7834 struct rx_ring_info *rp = &np->rx_rings[i]; 7835 7836 niu_sync_rx_discard_stats(np, rp, 0); 7837 7838 data[0] = rp->rx_channel; 7839 data[1] = rp->rx_packets; 7840 data[2] = rp->rx_bytes; 7841 data[3] = rp->rx_dropped; 7842 data[4] = rp->rx_errors; 7843 data += 5; 7844 } 7845 for (i = 0; i < np->num_tx_rings; i++) { 7846 struct tx_ring_info *rp = &np->tx_rings[i]; 7847 7848 data[0] = rp->tx_channel; 7849 data[1] = rp->tx_packets; 7850 data[2] = rp->tx_bytes; 7851 data[3] = rp->tx_errors; 7852 data += 4; 7853 } 7854} 7855 7856static u64 niu_led_state_save(struct niu *np) 7857{ 7858 if (np->flags & NIU_FLAGS_XMAC) 7859 return nr64_mac(XMAC_CONFIG); 7860 else 7861 return nr64_mac(BMAC_XIF_CONFIG); 7862} 7863 7864static void niu_led_state_restore(struct niu *np, u64 val) 7865{ 7866 if (np->flags & NIU_FLAGS_XMAC) 7867 nw64_mac(XMAC_CONFIG, val); 7868 else 7869 nw64_mac(BMAC_XIF_CONFIG, val); 7870} 7871 7872static void niu_force_led(struct niu *np, int on) 7873{ 7874 u64 val, reg, bit; 7875 7876 if (np->flags & NIU_FLAGS_XMAC) { 7877 reg = XMAC_CONFIG; 7878 bit = XMAC_CONFIG_FORCE_LED_ON; 7879 } else { 7880 reg = BMAC_XIF_CONFIG; 7881 bit = BMAC_XIF_CONFIG_LINK_LED; 7882 } 7883 7884 val = nr64_mac(reg); 7885 if (on) 7886 val |= bit; 7887 else 7888 val &= ~bit; 7889 nw64_mac(reg, val); 7890} 7891 7892static int niu_phys_id(struct net_device *dev, u32 data) 7893{ 7894 struct niu *np = netdev_priv(dev); 7895 u64 orig_led_state; 7896 int i; 7897 7898 if (!netif_running(dev)) 7899 return -EAGAIN; 7900 7901 if (data == 0) 7902 data = 2; 7903 7904 orig_led_state = niu_led_state_save(np); 7905 for (i = 0; i < (data * 2); i++) { 7906 int on = ((i % 2) == 0); 7907 7908 niu_force_led(np, on); 7909 7910 if (msleep_interruptible(500)) 7911 break; 7912 } 7913 niu_led_state_restore(np, orig_led_state); 7914 7915 return 0; 7916} 7917 7918static int niu_set_flags(struct net_device *dev, u32 data) 7919{ 7920 return ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH); 7921} 7922 7923static const struct ethtool_ops niu_ethtool_ops = { 7924 .get_drvinfo = niu_get_drvinfo, 7925 .get_link = ethtool_op_get_link, 7926 .get_msglevel = niu_get_msglevel, 7927 .set_msglevel = niu_set_msglevel, 7928 .nway_reset = niu_nway_reset, 7929 .get_eeprom_len = niu_get_eeprom_len, 7930 .get_eeprom = niu_get_eeprom, 7931 .get_settings = niu_get_settings, 7932 .set_settings = niu_set_settings, 7933 .get_strings = niu_get_strings, 7934 .get_sset_count = niu_get_sset_count, 7935 .get_ethtool_stats = niu_get_ethtool_stats, 7936 .phys_id = niu_phys_id, 7937 .get_rxnfc = niu_get_nfc, 7938 .set_rxnfc = niu_set_nfc, 7939 .set_flags = niu_set_flags, 7940 .get_flags = ethtool_op_get_flags, 7941}; 7942 7943static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, 7944 int ldg, int ldn) 7945{ 7946 if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) 7947 return -EINVAL; 7948 if (ldn < 0 || ldn > LDN_MAX) 7949 return -EINVAL; 7950 7951 parent->ldg_map[ldn] = ldg; 7952 7953 if (np->parent->plat_type == PLAT_TYPE_NIU) { 7954 /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by 7955 * the firmware, and we're not supposed to change them. 7956 * Validate the mapping, because if it's wrong we probably 7957 * won't get any interrupts and that's painful to debug. 7958 */ 7959 if (nr64(LDG_NUM(ldn)) != ldg) { 7960 dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n", 7961 np->port, ldn, ldg, 7962 (unsigned long long) nr64(LDG_NUM(ldn))); 7963 return -EINVAL; 7964 } 7965 } else 7966 nw64(LDG_NUM(ldn), ldg); 7967 7968 return 0; 7969} 7970 7971static int niu_set_ldg_timer_res(struct niu *np, int res) 7972{ 7973 if (res < 0 || res > LDG_TIMER_RES_VAL) 7974 return -EINVAL; 7975 7976 7977 nw64(LDG_TIMER_RES, res); 7978 7979 return 0; 7980} 7981 7982static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector) 7983{ 7984 if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) || 7985 (func < 0 || func > 3) || 7986 (vector < 0 || vector > 0x1f)) 7987 return -EINVAL; 7988 7989 nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector); 7990 7991 return 0; 7992} 7993 7994static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr) 7995{ 7996 u64 frame, frame_base = (ESPC_PIO_STAT_READ_START | 7997 (addr << ESPC_PIO_STAT_ADDR_SHIFT)); 7998 int limit; 7999 8000 if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT)) 8001 return -EINVAL; 8002 8003 frame = frame_base; 8004 nw64(ESPC_PIO_STAT, frame); 8005 limit = 64; 8006 do { 8007 udelay(5); 8008 frame = nr64(ESPC_PIO_STAT); 8009 if (frame & ESPC_PIO_STAT_READ_END) 8010 break; 8011 } while (limit--); 8012 if (!(frame & ESPC_PIO_STAT_READ_END)) { 8013 dev_err(np->device, "EEPROM read timeout frame[%llx]\n", 8014 (unsigned long long) frame); 8015 return -ENODEV; 8016 } 8017 8018 frame = frame_base; 8019 nw64(ESPC_PIO_STAT, frame); 8020 limit = 64; 8021 do { 8022 udelay(5); 8023 frame = nr64(ESPC_PIO_STAT); 8024 if (frame & ESPC_PIO_STAT_READ_END) 8025 break; 8026 } while (limit--); 8027 if (!(frame & ESPC_PIO_STAT_READ_END)) { 8028 dev_err(np->device, "EEPROM read timeout frame[%llx]\n", 8029 (unsigned long long) frame); 8030 return -ENODEV; 8031 } 8032 8033 frame = nr64(ESPC_PIO_STAT); 8034 return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT; 8035} 8036 8037static int __devinit niu_pci_eeprom_read16(struct niu *np, u32 off) 8038{ 8039 int err = niu_pci_eeprom_read(np, off); 8040 u16 val; 8041 8042 if (err < 0) 8043 return err; 8044 val = (err << 8); 8045 err = niu_pci_eeprom_read(np, off + 1); 8046 if (err < 0) 8047 return err; 8048 val |= (err & 0xff); 8049 8050 return val; 8051} 8052 8053static int __devinit niu_pci_eeprom_read16_swp(struct niu *np, u32 off) 8054{ 8055 int err = niu_pci_eeprom_read(np, off); 8056 u16 val; 8057 8058 if (err < 0) 8059 return err; 8060 8061 val = (err & 0xff); 8062 err = niu_pci_eeprom_read(np, off + 1); 8063 if (err < 0) 8064 return err; 8065 8066 val |= (err & 0xff) << 8; 8067 8068 return val; 8069} 8070 8071static int __devinit niu_pci_vpd_get_propname(struct niu *np, 8072 u32 off, 8073 char *namebuf, 8074 int namebuf_len) 8075{ 8076 int i; 8077 8078 for (i = 0; i < namebuf_len; i++) { 8079 int err = niu_pci_eeprom_read(np, off + i); 8080 if (err < 0) 8081 return err; 8082 *namebuf++ = err; 8083 if (!err) 8084 break; 8085 } 8086 if (i >= namebuf_len) 8087 return -EINVAL; 8088 8089 return i + 1; 8090} 8091 8092static void __devinit niu_vpd_parse_version(struct niu *np) 8093{ 8094 struct niu_vpd *vpd = &np->vpd; 8095 int len = strlen(vpd->version) + 1; 8096 const char *s = vpd->version; 8097 int i; 8098 8099 for (i = 0; i < len - 5; i++) { 8100 if (!strncmp(s + i, "FCode ", 6)) 8101 break; 8102 } 8103 if (i >= len - 5) 8104 return; 8105 8106 s += i + 5; 8107 sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor); 8108 8109 netif_printk(np, probe, KERN_DEBUG, np->dev, 8110 "VPD_SCAN: FCODE major(%d) minor(%d)\n", 8111 vpd->fcode_major, vpd->fcode_minor); 8112 if (vpd->fcode_major > NIU_VPD_MIN_MAJOR || 8113 (vpd->fcode_major == NIU_VPD_MIN_MAJOR && 8114 vpd->fcode_minor >= NIU_VPD_MIN_MINOR)) 8115 np->flags |= NIU_FLAGS_VPD_VALID; 8116} 8117 8118/* ESPC_PIO_EN_ENABLE must be set */ 8119static int __devinit niu_pci_vpd_scan_props(struct niu *np, 8120 u32 start, u32 end) 8121{ 8122 unsigned int found_mask = 0; 8123#define FOUND_MASK_MODEL 0x00000001 8124#define FOUND_MASK_BMODEL 0x00000002 8125#define FOUND_MASK_VERS 0x00000004 8126#define FOUND_MASK_MAC 0x00000008 8127#define FOUND_MASK_NMAC 0x00000010 8128#define FOUND_MASK_PHY 0x00000020 8129#define FOUND_MASK_ALL 0x0000003f 8130 8131 netif_printk(np, probe, KERN_DEBUG, np->dev, 8132 "VPD_SCAN: start[%x] end[%x]\n", start, end); 8133 while (start < end) { 8134 int len, err, instance, type, prop_len; 8135 char namebuf[64]; 8136 u8 *prop_buf; 8137 int max_len; 8138 8139 if (found_mask == FOUND_MASK_ALL) { 8140 niu_vpd_parse_version(np); 8141 return 1; 8142 } 8143 8144 err = niu_pci_eeprom_read(np, start + 2); 8145 if (err < 0) 8146 return err; 8147 len = err; 8148 start += 3; 8149 8150 instance = niu_pci_eeprom_read(np, start); 8151 type = niu_pci_eeprom_read(np, start + 3); 8152 prop_len = niu_pci_eeprom_read(np, start + 4); 8153 err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64); 8154 if (err < 0) 8155 return err; 8156 8157 prop_buf = NULL; 8158 max_len = 0; 8159 if (!strcmp(namebuf, "model")) { 8160 prop_buf = np->vpd.model; 8161 max_len = NIU_VPD_MODEL_MAX; 8162 found_mask |= FOUND_MASK_MODEL; 8163 } else if (!strcmp(namebuf, "board-model")) { 8164 prop_buf = np->vpd.board_model; 8165 max_len = NIU_VPD_BD_MODEL_MAX; 8166 found_mask |= FOUND_MASK_BMODEL; 8167 } else if (!strcmp(namebuf, "version")) { 8168 prop_buf = np->vpd.version; 8169 max_len = NIU_VPD_VERSION_MAX; 8170 found_mask |= FOUND_MASK_VERS; 8171 } else if (!strcmp(namebuf, "local-mac-address")) { 8172 prop_buf = np->vpd.local_mac; 8173 max_len = ETH_ALEN; 8174 found_mask |= FOUND_MASK_MAC; 8175 } else if (!strcmp(namebuf, "num-mac-addresses")) { 8176 prop_buf = &np->vpd.mac_num; 8177 max_len = 1; 8178 found_mask |= FOUND_MASK_NMAC; 8179 } else if (!strcmp(namebuf, "phy-type")) { 8180 prop_buf = np->vpd.phy_type; 8181 max_len = NIU_VPD_PHY_TYPE_MAX; 8182 found_mask |= FOUND_MASK_PHY; 8183 } 8184 8185 if (max_len && prop_len > max_len) { 8186 dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len); 8187 return -EINVAL; 8188 } 8189 8190 if (prop_buf) { 8191 u32 off = start + 5 + err; 8192 int i; 8193 8194 netif_printk(np, probe, KERN_DEBUG, np->dev, 8195 "VPD_SCAN: Reading in property [%s] len[%d]\n", 8196 namebuf, prop_len); 8197 for (i = 0; i < prop_len; i++) 8198 *prop_buf++ = niu_pci_eeprom_read(np, off + i); 8199 } 8200 8201 start += len; 8202 } 8203 8204 return 0; 8205} 8206 8207/* ESPC_PIO_EN_ENABLE must be set */ 8208static void __devinit niu_pci_vpd_fetch(struct niu *np, u32 start) 8209{ 8210 u32 offset; 8211 int err; 8212 8213 err = niu_pci_eeprom_read16_swp(np, start + 1); 8214 if (err < 0) 8215 return; 8216 8217 offset = err + 3; 8218 8219 while (start + offset < ESPC_EEPROM_SIZE) { 8220 u32 here = start + offset; 8221 u32 end; 8222 8223 err = niu_pci_eeprom_read(np, here); 8224 if (err != 0x90) 8225 return; 8226 8227 err = niu_pci_eeprom_read16_swp(np, here + 1); 8228 if (err < 0) 8229 return; 8230 8231 here = start + offset + 3; 8232 end = start + offset + err; 8233 8234 offset += err; 8235 8236 err = niu_pci_vpd_scan_props(np, here, end); 8237 if (err < 0 || err == 1) 8238 return; 8239 } 8240} 8241 8242/* ESPC_PIO_EN_ENABLE must be set */ 8243static u32 __devinit niu_pci_vpd_offset(struct niu *np) 8244{ 8245 u32 start = 0, end = ESPC_EEPROM_SIZE, ret; 8246 int err; 8247 8248 while (start < end) { 8249 ret = start; 8250 8251 /* ROM header signature? */ 8252 err = niu_pci_eeprom_read16(np, start + 0); 8253 if (err != 0x55aa) 8254 return 0; 8255 8256 /* Apply offset to PCI data structure. */ 8257 err = niu_pci_eeprom_read16(np, start + 23); 8258 if (err < 0) 8259 return 0; 8260 start += err; 8261 8262 /* Check for "PCIR" signature. */ 8263 err = niu_pci_eeprom_read16(np, start + 0); 8264 if (err != 0x5043) 8265 return 0; 8266 err = niu_pci_eeprom_read16(np, start + 2); 8267 if (err != 0x4952) 8268 return 0; 8269 8270 /* Check for OBP image type. */ 8271 err = niu_pci_eeprom_read(np, start + 20); 8272 if (err < 0) 8273 return 0; 8274 if (err != 0x01) { 8275 err = niu_pci_eeprom_read(np, ret + 2); 8276 if (err < 0) 8277 return 0; 8278 8279 start = ret + (err * 512); 8280 continue; 8281 } 8282 8283 err = niu_pci_eeprom_read16_swp(np, start + 8); 8284 if (err < 0) 8285 return err; 8286 ret += err; 8287 8288 err = niu_pci_eeprom_read(np, ret + 0); 8289 if (err != 0x82) 8290 return 0; 8291 8292 return ret; 8293 } 8294 8295 return 0; 8296} 8297 8298static int __devinit niu_phy_type_prop_decode(struct niu *np, 8299 const char *phy_prop) 8300{ 8301 if (!strcmp(phy_prop, "mif")) { 8302 /* 1G copper, MII */ 8303 np->flags &= ~(NIU_FLAGS_FIBER | 8304 NIU_FLAGS_10G); 8305 np->mac_xcvr = MAC_XCVR_MII; 8306 } else if (!strcmp(phy_prop, "xgf")) { 8307 /* 10G fiber, XPCS */ 8308 np->flags |= (NIU_FLAGS_10G | 8309 NIU_FLAGS_FIBER); 8310 np->mac_xcvr = MAC_XCVR_XPCS; 8311 } else if (!strcmp(phy_prop, "pcs")) { 8312 /* 1G fiber, PCS */ 8313 np->flags &= ~NIU_FLAGS_10G; 8314 np->flags |= NIU_FLAGS_FIBER; 8315 np->mac_xcvr = MAC_XCVR_PCS; 8316 } else if (!strcmp(phy_prop, "xgc")) { 8317 /* 10G copper, XPCS */ 8318 np->flags |= NIU_FLAGS_10G; 8319 np->flags &= ~NIU_FLAGS_FIBER; 8320 np->mac_xcvr = MAC_XCVR_XPCS; 8321 } else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) { 8322 /* 10G Serdes or 1G Serdes, default to 10G */ 8323 np->flags |= NIU_FLAGS_10G; 8324 np->flags &= ~NIU_FLAGS_FIBER; 8325 np->flags |= NIU_FLAGS_XCVR_SERDES; 8326 np->mac_xcvr = MAC_XCVR_XPCS; 8327 } else { 8328 return -EINVAL; 8329 } 8330 return 0; 8331} 8332 8333static int niu_pci_vpd_get_nports(struct niu *np) 8334{ 8335 int ports = 0; 8336 8337 if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) || 8338 (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) || 8339 (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) || 8340 (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) || 8341 (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) { 8342 ports = 4; 8343 } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) || 8344 (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) || 8345 (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) || 8346 (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) { 8347 ports = 2; 8348 } 8349 8350 return ports; 8351} 8352 8353static void __devinit niu_pci_vpd_validate(struct niu *np) 8354{ 8355 struct net_device *dev = np->dev; 8356 struct niu_vpd *vpd = &np->vpd; 8357 u8 val8; 8358 8359 if (!is_valid_ether_addr(&vpd->local_mac[0])) { 8360 dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n"); 8361 8362 np->flags &= ~NIU_FLAGS_VPD_VALID; 8363 return; 8364 } 8365 8366 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || 8367 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { 8368 np->flags |= NIU_FLAGS_10G; 8369 np->flags &= ~NIU_FLAGS_FIBER; 8370 np->flags |= NIU_FLAGS_XCVR_SERDES; 8371 np->mac_xcvr = MAC_XCVR_PCS; 8372 if (np->port > 1) { 8373 np->flags |= NIU_FLAGS_FIBER; 8374 np->flags &= ~NIU_FLAGS_10G; 8375 } 8376 if (np->flags & NIU_FLAGS_10G) 8377 np->mac_xcvr = MAC_XCVR_XPCS; 8378 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { 8379 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | 8380 NIU_FLAGS_HOTPLUG_PHY); 8381 } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { 8382 dev_err(np->device, "Illegal phy string [%s]\n", 8383 np->vpd.phy_type); 8384 dev_err(np->device, "Falling back to SPROM\n"); 8385 np->flags &= ~NIU_FLAGS_VPD_VALID; 8386 return; 8387 } 8388 8389 memcpy(dev->perm_addr, vpd->local_mac, ETH_ALEN); 8390 8391 val8 = dev->perm_addr[5]; 8392 dev->perm_addr[5] += np->port; 8393 if (dev->perm_addr[5] < val8) 8394 dev->perm_addr[4]++; 8395 8396 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); 8397} 8398 8399static int __devinit niu_pci_probe_sprom(struct niu *np) 8400{ 8401 struct net_device *dev = np->dev; 8402 int len, i; 8403 u64 val, sum; 8404 u8 val8; 8405 8406 val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ); 8407 val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT; 8408 len = val / 4; 8409 8410 np->eeprom_len = len; 8411 8412 netif_printk(np, probe, KERN_DEBUG, np->dev, 8413 "SPROM: Image size %llu\n", (unsigned long long)val); 8414 8415 sum = 0; 8416 for (i = 0; i < len; i++) { 8417 val = nr64(ESPC_NCR(i)); 8418 sum += (val >> 0) & 0xff; 8419 sum += (val >> 8) & 0xff; 8420 sum += (val >> 16) & 0xff; 8421 sum += (val >> 24) & 0xff; 8422 } 8423 netif_printk(np, probe, KERN_DEBUG, np->dev, 8424 "SPROM: Checksum %x\n", (int)(sum & 0xff)); 8425 if ((sum & 0xff) != 0xab) { 8426 dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff)); 8427 return -EINVAL; 8428 } 8429 8430 val = nr64(ESPC_PHY_TYPE); 8431 switch (np->port) { 8432 case 0: 8433 val8 = (val & ESPC_PHY_TYPE_PORT0) >> 8434 ESPC_PHY_TYPE_PORT0_SHIFT; 8435 break; 8436 case 1: 8437 val8 = (val & ESPC_PHY_TYPE_PORT1) >> 8438 ESPC_PHY_TYPE_PORT1_SHIFT; 8439 break; 8440 case 2: 8441 val8 = (val & ESPC_PHY_TYPE_PORT2) >> 8442 ESPC_PHY_TYPE_PORT2_SHIFT; 8443 break; 8444 case 3: 8445 val8 = (val & ESPC_PHY_TYPE_PORT3) >> 8446 ESPC_PHY_TYPE_PORT3_SHIFT; 8447 break; 8448 default: 8449 dev_err(np->device, "Bogus port number %u\n", 8450 np->port); 8451 return -EINVAL; 8452 } 8453 netif_printk(np, probe, KERN_DEBUG, np->dev, 8454 "SPROM: PHY type %x\n", val8); 8455 8456 switch (val8) { 8457 case ESPC_PHY_TYPE_1G_COPPER: 8458 /* 1G copper, MII */ 8459 np->flags &= ~(NIU_FLAGS_FIBER | 8460 NIU_FLAGS_10G); 8461 np->mac_xcvr = MAC_XCVR_MII; 8462 break; 8463 8464 case ESPC_PHY_TYPE_1G_FIBER: 8465 /* 1G fiber, PCS */ 8466 np->flags &= ~NIU_FLAGS_10G; 8467 np->flags |= NIU_FLAGS_FIBER; 8468 np->mac_xcvr = MAC_XCVR_PCS; 8469 break; 8470 8471 case ESPC_PHY_TYPE_10G_COPPER: 8472 /* 10G copper, XPCS */ 8473 np->flags |= NIU_FLAGS_10G; 8474 np->flags &= ~NIU_FLAGS_FIBER; 8475 np->mac_xcvr = MAC_XCVR_XPCS; 8476 break; 8477 8478 case ESPC_PHY_TYPE_10G_FIBER: 8479 /* 10G fiber, XPCS */ 8480 np->flags |= (NIU_FLAGS_10G | 8481 NIU_FLAGS_FIBER); 8482 np->mac_xcvr = MAC_XCVR_XPCS; 8483 break; 8484 8485 default: 8486 dev_err(np->device, "Bogus SPROM phy type %u\n", val8); 8487 return -EINVAL; 8488 } 8489 8490 val = nr64(ESPC_MAC_ADDR0); 8491 netif_printk(np, probe, KERN_DEBUG, np->dev, 8492 "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val); 8493 dev->perm_addr[0] = (val >> 0) & 0xff; 8494 dev->perm_addr[1] = (val >> 8) & 0xff; 8495 dev->perm_addr[2] = (val >> 16) & 0xff; 8496 dev->perm_addr[3] = (val >> 24) & 0xff; 8497 8498 val = nr64(ESPC_MAC_ADDR1); 8499 netif_printk(np, probe, KERN_DEBUG, np->dev, 8500 "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val); 8501 dev->perm_addr[4] = (val >> 0) & 0xff; 8502 dev->perm_addr[5] = (val >> 8) & 0xff; 8503 8504 if (!is_valid_ether_addr(&dev->perm_addr[0])) { 8505 dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n", 8506 dev->perm_addr); 8507 return -EINVAL; 8508 } 8509 8510 val8 = dev->perm_addr[5]; 8511 dev->perm_addr[5] += np->port; 8512 if (dev->perm_addr[5] < val8) 8513 dev->perm_addr[4]++; 8514 8515 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); 8516 8517 val = nr64(ESPC_MOD_STR_LEN); 8518 netif_printk(np, probe, KERN_DEBUG, np->dev, 8519 "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val); 8520 if (val >= 8 * 4) 8521 return -EINVAL; 8522 8523 for (i = 0; i < val; i += 4) { 8524 u64 tmp = nr64(ESPC_NCR(5 + (i / 4))); 8525 8526 np->vpd.model[i + 3] = (tmp >> 0) & 0xff; 8527 np->vpd.model[i + 2] = (tmp >> 8) & 0xff; 8528 np->vpd.model[i + 1] = (tmp >> 16) & 0xff; 8529 np->vpd.model[i + 0] = (tmp >> 24) & 0xff; 8530 } 8531 np->vpd.model[val] = '\0'; 8532 8533 val = nr64(ESPC_BD_MOD_STR_LEN); 8534 netif_printk(np, probe, KERN_DEBUG, np->dev, 8535 "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val); 8536 if (val >= 4 * 4) 8537 return -EINVAL; 8538 8539 for (i = 0; i < val; i += 4) { 8540 u64 tmp = nr64(ESPC_NCR(14 + (i / 4))); 8541 8542 np->vpd.board_model[i + 3] = (tmp >> 0) & 0xff; 8543 np->vpd.board_model[i + 2] = (tmp >> 8) & 0xff; 8544 np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff; 8545 np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff; 8546 } 8547 np->vpd.board_model[val] = '\0'; 8548 8549 np->vpd.mac_num = 8550 nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL; 8551 netif_printk(np, probe, KERN_DEBUG, np->dev, 8552 "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num); 8553 8554 return 0; 8555} 8556 8557static int __devinit niu_get_and_validate_port(struct niu *np) 8558{ 8559 struct niu_parent *parent = np->parent; 8560 8561 if (np->port <= 1) 8562 np->flags |= NIU_FLAGS_XMAC; 8563 8564 if (!parent->num_ports) { 8565 if (parent->plat_type == PLAT_TYPE_NIU) { 8566 parent->num_ports = 2; 8567 } else { 8568 parent->num_ports = niu_pci_vpd_get_nports(np); 8569 if (!parent->num_ports) { 8570 /* Fall back to SPROM as last resort. 8571 * This will fail on most cards. 8572 */ 8573 parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) & 8574 ESPC_NUM_PORTS_MACS_VAL; 8575 8576 /* All of the current probing methods fail on 8577 * Maramba on-board parts. 8578 */ 8579 if (!parent->num_ports) 8580 parent->num_ports = 4; 8581 } 8582 } 8583 } 8584 8585 if (np->port >= parent->num_ports) 8586 return -ENODEV; 8587 8588 return 0; 8589} 8590 8591static int __devinit phy_record(struct niu_parent *parent, 8592 struct phy_probe_info *p, 8593 int dev_id_1, int dev_id_2, u8 phy_port, 8594 int type) 8595{ 8596 u32 id = (dev_id_1 << 16) | dev_id_2; 8597 u8 idx; 8598 8599 if (dev_id_1 < 0 || dev_id_2 < 0) 8600 return 0; 8601 if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) { 8602 if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) && 8603 ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) && 8604 ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706)) 8605 return 0; 8606 } else { 8607 if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R) 8608 return 0; 8609 } 8610 8611 pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n", 8612 parent->index, id, 8613 type == PHY_TYPE_PMA_PMD ? "PMA/PMD" : 8614 type == PHY_TYPE_PCS ? "PCS" : "MII", 8615 phy_port); 8616 8617 if (p->cur[type] >= NIU_MAX_PORTS) { 8618 pr_err("Too many PHY ports\n"); 8619 return -EINVAL; 8620 } 8621 idx = p->cur[type]; 8622 p->phy_id[type][idx] = id; 8623 p->phy_port[type][idx] = phy_port; 8624 p->cur[type] = idx + 1; 8625 return 0; 8626} 8627 8628static int __devinit port_has_10g(struct phy_probe_info *p, int port) 8629{ 8630 int i; 8631 8632 for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) { 8633 if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port) 8634 return 1; 8635 } 8636 for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) { 8637 if (p->phy_port[PHY_TYPE_PCS][i] == port) 8638 return 1; 8639 } 8640 8641 return 0; 8642} 8643 8644static int __devinit count_10g_ports(struct phy_probe_info *p, int *lowest) 8645{ 8646 int port, cnt; 8647 8648 cnt = 0; 8649 *lowest = 32; 8650 for (port = 8; port < 32; port++) { 8651 if (port_has_10g(p, port)) { 8652 if (!cnt) 8653 *lowest = port; 8654 cnt++; 8655 } 8656 } 8657 8658 return cnt; 8659} 8660 8661static int __devinit count_1g_ports(struct phy_probe_info *p, int *lowest) 8662{ 8663 *lowest = 32; 8664 if (p->cur[PHY_TYPE_MII]) 8665 *lowest = p->phy_port[PHY_TYPE_MII][0]; 8666 8667 return p->cur[PHY_TYPE_MII]; 8668} 8669 8670static void __devinit niu_n2_divide_channels(struct niu_parent *parent) 8671{ 8672 int num_ports = parent->num_ports; 8673 int i; 8674 8675 for (i = 0; i < num_ports; i++) { 8676 parent->rxchan_per_port[i] = (16 / num_ports); 8677 parent->txchan_per_port[i] = (16 / num_ports); 8678 8679 pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n", 8680 parent->index, i, 8681 parent->rxchan_per_port[i], 8682 parent->txchan_per_port[i]); 8683 } 8684} 8685 8686static void __devinit niu_divide_channels(struct niu_parent *parent, 8687 int num_10g, int num_1g) 8688{ 8689 int num_ports = parent->num_ports; 8690 int rx_chans_per_10g, rx_chans_per_1g; 8691 int tx_chans_per_10g, tx_chans_per_1g; 8692 int i, tot_rx, tot_tx; 8693 8694 if (!num_10g || !num_1g) { 8695 rx_chans_per_10g = rx_chans_per_1g = 8696 (NIU_NUM_RXCHAN / num_ports); 8697 tx_chans_per_10g = tx_chans_per_1g = 8698 (NIU_NUM_TXCHAN / num_ports); 8699 } else { 8700 rx_chans_per_1g = NIU_NUM_RXCHAN / 8; 8701 rx_chans_per_10g = (NIU_NUM_RXCHAN - 8702 (rx_chans_per_1g * num_1g)) / 8703 num_10g; 8704 8705 tx_chans_per_1g = NIU_NUM_TXCHAN / 6; 8706 tx_chans_per_10g = (NIU_NUM_TXCHAN - 8707 (tx_chans_per_1g * num_1g)) / 8708 num_10g; 8709 } 8710 8711 tot_rx = tot_tx = 0; 8712 for (i = 0; i < num_ports; i++) { 8713 int type = phy_decode(parent->port_phy, i); 8714 8715 if (type == PORT_TYPE_10G) { 8716 parent->rxchan_per_port[i] = rx_chans_per_10g; 8717 parent->txchan_per_port[i] = tx_chans_per_10g; 8718 } else { 8719 parent->rxchan_per_port[i] = rx_chans_per_1g; 8720 parent->txchan_per_port[i] = tx_chans_per_1g; 8721 } 8722 pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n", 8723 parent->index, i, 8724 parent->rxchan_per_port[i], 8725 parent->txchan_per_port[i]); 8726 tot_rx += parent->rxchan_per_port[i]; 8727 tot_tx += parent->txchan_per_port[i]; 8728 } 8729 8730 if (tot_rx > NIU_NUM_RXCHAN) { 8731 pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n", 8732 parent->index, tot_rx); 8733 for (i = 0; i < num_ports; i++) 8734 parent->rxchan_per_port[i] = 1; 8735 } 8736 if (tot_tx > NIU_NUM_TXCHAN) { 8737 pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n", 8738 parent->index, tot_tx); 8739 for (i = 0; i < num_ports; i++) 8740 parent->txchan_per_port[i] = 1; 8741 } 8742 if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) { 8743 pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n", 8744 parent->index, tot_rx, tot_tx); 8745 } 8746} 8747 8748static void __devinit niu_divide_rdc_groups(struct niu_parent *parent, 8749 int num_10g, int num_1g) 8750{ 8751 int i, num_ports = parent->num_ports; 8752 int rdc_group, rdc_groups_per_port; 8753 int rdc_channel_base; 8754 8755 rdc_group = 0; 8756 rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports; 8757 8758 rdc_channel_base = 0; 8759 8760 for (i = 0; i < num_ports; i++) { 8761 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i]; 8762 int grp, num_channels = parent->rxchan_per_port[i]; 8763 int this_channel_offset; 8764 8765 tp->first_table_num = rdc_group; 8766 tp->num_tables = rdc_groups_per_port; 8767 this_channel_offset = 0; 8768 for (grp = 0; grp < tp->num_tables; grp++) { 8769 struct rdc_table *rt = &tp->tables[grp]; 8770 int slot; 8771 8772 pr_info("niu%d: Port %d RDC tbl(%d) [ ", 8773 parent->index, i, tp->first_table_num + grp); 8774 for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) { 8775 rt->rxdma_channel[slot] = 8776 rdc_channel_base + this_channel_offset; 8777 8778 pr_cont("%d ", rt->rxdma_channel[slot]); 8779 8780 if (++this_channel_offset == num_channels) 8781 this_channel_offset = 0; 8782 } 8783 pr_cont("]\n"); 8784 } 8785 8786 parent->rdc_default[i] = rdc_channel_base; 8787 8788 rdc_channel_base += num_channels; 8789 rdc_group += rdc_groups_per_port; 8790 } 8791} 8792 8793static int __devinit fill_phy_probe_info(struct niu *np, 8794 struct niu_parent *parent, 8795 struct phy_probe_info *info) 8796{ 8797 unsigned long flags; 8798 int port, err; 8799 8800 memset(info, 0, sizeof(*info)); 8801 8802 /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */ 8803 niu_lock_parent(np, flags); 8804 err = 0; 8805 for (port = 8; port < 32; port++) { 8806 int dev_id_1, dev_id_2; 8807 8808 dev_id_1 = mdio_read(np, port, 8809 NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1); 8810 dev_id_2 = mdio_read(np, port, 8811 NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2); 8812 err = phy_record(parent, info, dev_id_1, dev_id_2, port, 8813 PHY_TYPE_PMA_PMD); 8814 if (err) 8815 break; 8816 dev_id_1 = mdio_read(np, port, 8817 NIU_PCS_DEV_ADDR, MII_PHYSID1); 8818 dev_id_2 = mdio_read(np, port, 8819 NIU_PCS_DEV_ADDR, MII_PHYSID2); 8820 err = phy_record(parent, info, dev_id_1, dev_id_2, port, 8821 PHY_TYPE_PCS); 8822 if (err) 8823 break; 8824 dev_id_1 = mii_read(np, port, MII_PHYSID1); 8825 dev_id_2 = mii_read(np, port, MII_PHYSID2); 8826 err = phy_record(parent, info, dev_id_1, dev_id_2, port, 8827 PHY_TYPE_MII); 8828 if (err) 8829 break; 8830 } 8831 niu_unlock_parent(np, flags); 8832 8833 return err; 8834} 8835 8836static int __devinit walk_phys(struct niu *np, struct niu_parent *parent) 8837{ 8838 struct phy_probe_info *info = &parent->phy_probe_info; 8839 int lowest_10g, lowest_1g; 8840 int num_10g, num_1g; 8841 u32 val; 8842 int err; 8843 8844 num_10g = num_1g = 0; 8845 8846 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || 8847 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { 8848 num_10g = 0; 8849 num_1g = 2; 8850 parent->plat_type = PLAT_TYPE_ATCA_CP3220; 8851 parent->num_ports = 4; 8852 val = (phy_encode(PORT_TYPE_1G, 0) | 8853 phy_encode(PORT_TYPE_1G, 1) | 8854 phy_encode(PORT_TYPE_1G, 2) | 8855 phy_encode(PORT_TYPE_1G, 3)); 8856 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { 8857 num_10g = 2; 8858 num_1g = 0; 8859 parent->num_ports = 2; 8860 val = (phy_encode(PORT_TYPE_10G, 0) | 8861 phy_encode(PORT_TYPE_10G, 1)); 8862 } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) && 8863 (parent->plat_type == PLAT_TYPE_NIU)) { 8864 /* this is the Monza case */ 8865 if (np->flags & NIU_FLAGS_10G) { 8866 val = (phy_encode(PORT_TYPE_10G, 0) | 8867 phy_encode(PORT_TYPE_10G, 1)); 8868 } else { 8869 val = (phy_encode(PORT_TYPE_1G, 0) | 8870 phy_encode(PORT_TYPE_1G, 1)); 8871 } 8872 } else { 8873 err = fill_phy_probe_info(np, parent, info); 8874 if (err) 8875 return err; 8876 8877 num_10g = count_10g_ports(info, &lowest_10g); 8878 num_1g = count_1g_ports(info, &lowest_1g); 8879 8880 switch ((num_10g << 4) | num_1g) { 8881 case 0x24: 8882 if (lowest_1g == 10) 8883 parent->plat_type = PLAT_TYPE_VF_P0; 8884 else if (lowest_1g == 26) 8885 parent->plat_type = PLAT_TYPE_VF_P1; 8886 else 8887 goto unknown_vg_1g_port; 8888 8889 /* fallthru */ 8890 case 0x22: 8891 val = (phy_encode(PORT_TYPE_10G, 0) | 8892 phy_encode(PORT_TYPE_10G, 1) | 8893 phy_encode(PORT_TYPE_1G, 2) | 8894 phy_encode(PORT_TYPE_1G, 3)); 8895 break; 8896 8897 case 0x20: 8898 val = (phy_encode(PORT_TYPE_10G, 0) | 8899 phy_encode(PORT_TYPE_10G, 1)); 8900 break; 8901 8902 case 0x10: 8903 val = phy_encode(PORT_TYPE_10G, np->port); 8904 break; 8905 8906 case 0x14: 8907 if (lowest_1g == 10) 8908 parent->plat_type = PLAT_TYPE_VF_P0; 8909 else if (lowest_1g == 26) 8910 parent->plat_type = PLAT_TYPE_VF_P1; 8911 else 8912 goto unknown_vg_1g_port; 8913 8914 /* fallthru */ 8915 case 0x13: 8916 if ((lowest_10g & 0x7) == 0) 8917 val = (phy_encode(PORT_TYPE_10G, 0) | 8918 phy_encode(PORT_TYPE_1G, 1) | 8919 phy_encode(PORT_TYPE_1G, 2) | 8920 phy_encode(PORT_TYPE_1G, 3)); 8921 else 8922 val = (phy_encode(PORT_TYPE_1G, 0) | 8923 phy_encode(PORT_TYPE_10G, 1) | 8924 phy_encode(PORT_TYPE_1G, 2) | 8925 phy_encode(PORT_TYPE_1G, 3)); 8926 break; 8927 8928 case 0x04: 8929 if (lowest_1g == 10) 8930 parent->plat_type = PLAT_TYPE_VF_P0; 8931 else if (lowest_1g == 26) 8932 parent->plat_type = PLAT_TYPE_VF_P1; 8933 else 8934 goto unknown_vg_1g_port; 8935 8936 val = (phy_encode(PORT_TYPE_1G, 0) | 8937 phy_encode(PORT_TYPE_1G, 1) | 8938 phy_encode(PORT_TYPE_1G, 2) | 8939 phy_encode(PORT_TYPE_1G, 3)); 8940 break; 8941 8942 default: 8943 pr_err("Unsupported port config 10G[%d] 1G[%d]\n", 8944 num_10g, num_1g); 8945 return -EINVAL; 8946 } 8947 } 8948 8949 parent->port_phy = val; 8950 8951 if (parent->plat_type == PLAT_TYPE_NIU) 8952 niu_n2_divide_channels(parent); 8953 else 8954 niu_divide_channels(parent, num_10g, num_1g); 8955 8956 niu_divide_rdc_groups(parent, num_10g, num_1g); 8957 8958 return 0; 8959 8960unknown_vg_1g_port: 8961 pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g); 8962 return -EINVAL; 8963} 8964 8965static int __devinit niu_probe_ports(struct niu *np) 8966{ 8967 struct niu_parent *parent = np->parent; 8968 int err, i; 8969 8970 if (parent->port_phy == PORT_PHY_UNKNOWN) { 8971 err = walk_phys(np, parent); 8972 if (err) 8973 return err; 8974 8975 niu_set_ldg_timer_res(np, 2); 8976 for (i = 0; i <= LDN_MAX; i++) 8977 niu_ldn_irq_enable(np, i, 0); 8978 } 8979 8980 if (parent->port_phy == PORT_PHY_INVALID) 8981 return -EINVAL; 8982 8983 return 0; 8984} 8985 8986static int __devinit niu_classifier_swstate_init(struct niu *np) 8987{ 8988 struct niu_classifier *cp = &np->clas; 8989 8990 cp->tcam_top = (u16) np->port; 8991 cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports; 8992 cp->h1_init = 0xffffffff; 8993 cp->h2_init = 0xffff; 8994 8995 return fflp_early_init(np); 8996} 8997 8998static void __devinit niu_link_config_init(struct niu *np) 8999{ 9000 struct niu_link_config *lp = &np->link_config; 9001 9002 lp->advertising = (ADVERTISED_10baseT_Half | 9003 ADVERTISED_10baseT_Full | 9004 ADVERTISED_100baseT_Half | 9005 ADVERTISED_100baseT_Full | 9006 ADVERTISED_1000baseT_Half | 9007 ADVERTISED_1000baseT_Full | 9008 ADVERTISED_10000baseT_Full | 9009 ADVERTISED_Autoneg); 9010 lp->speed = lp->active_speed = SPEED_INVALID; 9011 lp->duplex = DUPLEX_FULL; 9012 lp->active_duplex = DUPLEX_INVALID; 9013 lp->autoneg = 1; 9014#if 0 9015 lp->loopback_mode = LOOPBACK_MAC; 9016 lp->active_speed = SPEED_10000; 9017 lp->active_duplex = DUPLEX_FULL; 9018#else 9019 lp->loopback_mode = LOOPBACK_DISABLED; 9020#endif 9021} 9022 9023static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np) 9024{ 9025 switch (np->port) { 9026 case 0: 9027 np->mac_regs = np->regs + XMAC_PORT0_OFF; 9028 np->ipp_off = 0x00000; 9029 np->pcs_off = 0x04000; 9030 np->xpcs_off = 0x02000; 9031 break; 9032 9033 case 1: 9034 np->mac_regs = np->regs + XMAC_PORT1_OFF; 9035 np->ipp_off = 0x08000; 9036 np->pcs_off = 0x0a000; 9037 np->xpcs_off = 0x08000; 9038 break; 9039 9040 case 2: 9041 np->mac_regs = np->regs + BMAC_PORT2_OFF; 9042 np->ipp_off = 0x04000; 9043 np->pcs_off = 0x0e000; 9044 np->xpcs_off = ~0UL; 9045 break; 9046 9047 case 3: 9048 np->mac_regs = np->regs + BMAC_PORT3_OFF; 9049 np->ipp_off = 0x0c000; 9050 np->pcs_off = 0x12000; 9051 np->xpcs_off = ~0UL; 9052 break; 9053 9054 default: 9055 dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port); 9056 return -EINVAL; 9057 } 9058 9059 return 0; 9060} 9061 9062static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map) 9063{ 9064 struct msix_entry msi_vec[NIU_NUM_LDG]; 9065 struct niu_parent *parent = np->parent; 9066 struct pci_dev *pdev = np->pdev; 9067 int i, num_irqs, err; 9068 u8 first_ldg; 9069 9070 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port; 9071 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++) 9072 ldg_num_map[i] = first_ldg + i; 9073 9074 num_irqs = (parent->rxchan_per_port[np->port] + 9075 parent->txchan_per_port[np->port] + 9076 (np->port == 0 ? 3 : 1)); 9077 BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports)); 9078 9079retry: 9080 for (i = 0; i < num_irqs; i++) { 9081 msi_vec[i].vector = 0; 9082 msi_vec[i].entry = i; 9083 } 9084 9085 err = pci_enable_msix(pdev, msi_vec, num_irqs); 9086 if (err < 0) { 9087 np->flags &= ~NIU_FLAGS_MSIX; 9088 return; 9089 } 9090 if (err > 0) { 9091 num_irqs = err; 9092 goto retry; 9093 } 9094 9095 np->flags |= NIU_FLAGS_MSIX; 9096 for (i = 0; i < num_irqs; i++) 9097 np->ldg[i].irq = msi_vec[i].vector; 9098 np->num_ldg = num_irqs; 9099} 9100 9101static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map) 9102{ 9103#ifdef CONFIG_SPARC64 9104 struct platform_device *op = np->op; 9105 const u32 *int_prop; 9106 int i; 9107 9108 int_prop = of_get_property(op->dev.of_node, "interrupts", NULL); 9109 if (!int_prop) 9110 return -ENODEV; 9111 9112 for (i = 0; i < op->archdata.num_irqs; i++) { 9113 ldg_num_map[i] = int_prop[i]; 9114 np->ldg[i].irq = op->archdata.irqs[i]; 9115 } 9116 9117 np->num_ldg = op->archdata.num_irqs; 9118 9119 return 0; 9120#else 9121 return -EINVAL; 9122#endif 9123} 9124 9125static int __devinit niu_ldg_init(struct niu *np) 9126{ 9127 struct niu_parent *parent = np->parent; 9128 u8 ldg_num_map[NIU_NUM_LDG]; 9129 int first_chan, num_chan; 9130 int i, err, ldg_rotor; 9131 u8 port; 9132 9133 np->num_ldg = 1; 9134 np->ldg[0].irq = np->dev->irq; 9135 if (parent->plat_type == PLAT_TYPE_NIU) { 9136 err = niu_n2_irq_init(np, ldg_num_map); 9137 if (err) 9138 return err; 9139 } else 9140 niu_try_msix(np, ldg_num_map); 9141 9142 port = np->port; 9143 for (i = 0; i < np->num_ldg; i++) { 9144 struct niu_ldg *lp = &np->ldg[i]; 9145 9146 netif_napi_add(np->dev, &lp->napi, niu_poll, 64); 9147 9148 lp->np = np; 9149 lp->ldg_num = ldg_num_map[i]; 9150 lp->timer = 2; /* XXX */ 9151 9152 /* On N2 NIU the firmware has setup the SID mappings so they go 9153 * to the correct values that will route the LDG to the proper 9154 * interrupt in the NCU interrupt table. 9155 */ 9156 if (np->parent->plat_type != PLAT_TYPE_NIU) { 9157 err = niu_set_ldg_sid(np, lp->ldg_num, port, i); 9158 if (err) 9159 return err; 9160 } 9161 } 9162 9163 /* We adopt the LDG assignment ordering used by the N2 NIU 9164 * 'interrupt' properties because that simplifies a lot of 9165 * things. This ordering is: 9166 * 9167 * MAC 9168 * MIF (if port zero) 9169 * SYSERR (if port zero) 9170 * RX channels 9171 * TX channels 9172 */ 9173 9174 ldg_rotor = 0; 9175 9176 err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], 9177 LDN_MAC(port)); 9178 if (err) 9179 return err; 9180 9181 ldg_rotor++; 9182 if (ldg_rotor == np->num_ldg) 9183 ldg_rotor = 0; 9184 9185 if (port == 0) { 9186 err = niu_ldg_assign_ldn(np, parent, 9187 ldg_num_map[ldg_rotor], 9188 LDN_MIF); 9189 if (err) 9190 return err; 9191 9192 ldg_rotor++; 9193 if (ldg_rotor == np->num_ldg) 9194 ldg_rotor = 0; 9195 9196 err = niu_ldg_assign_ldn(np, parent, 9197 ldg_num_map[ldg_rotor], 9198 LDN_DEVICE_ERROR); 9199 if (err) 9200 return err; 9201 9202 ldg_rotor++; 9203 if (ldg_rotor == np->num_ldg) 9204 ldg_rotor = 0; 9205 9206 } 9207 9208 first_chan = 0; 9209 for (i = 0; i < port; i++) 9210 first_chan += parent->rxchan_per_port[port]; 9211 num_chan = parent->rxchan_per_port[port]; 9212 9213 for (i = first_chan; i < (first_chan + num_chan); i++) { 9214 err = niu_ldg_assign_ldn(np, parent, 9215 ldg_num_map[ldg_rotor], 9216 LDN_RXDMA(i)); 9217 if (err) 9218 return err; 9219 ldg_rotor++; 9220 if (ldg_rotor == np->num_ldg) 9221 ldg_rotor = 0; 9222 } 9223 9224 first_chan = 0; 9225 for (i = 0; i < port; i++) 9226 first_chan += parent->txchan_per_port[port]; 9227 num_chan = parent->txchan_per_port[port]; 9228 for (i = first_chan; i < (first_chan + num_chan); i++) { 9229 err = niu_ldg_assign_ldn(np, parent, 9230 ldg_num_map[ldg_rotor], 9231 LDN_TXDMA(i)); 9232 if (err) 9233 return err; 9234 ldg_rotor++; 9235 if (ldg_rotor == np->num_ldg) 9236 ldg_rotor = 0; 9237 } 9238 9239 return 0; 9240} 9241 9242static void __devexit niu_ldg_free(struct niu *np) 9243{ 9244 if (np->flags & NIU_FLAGS_MSIX) 9245 pci_disable_msix(np->pdev); 9246} 9247 9248static int __devinit niu_get_of_props(struct niu *np) 9249{ 9250#ifdef CONFIG_SPARC64 9251 struct net_device *dev = np->dev; 9252 struct device_node *dp; 9253 const char *phy_type; 9254 const u8 *mac_addr; 9255 const char *model; 9256 int prop_len; 9257 9258 if (np->parent->plat_type == PLAT_TYPE_NIU) 9259 dp = np->op->dev.of_node; 9260 else 9261 dp = pci_device_to_OF_node(np->pdev); 9262 9263 phy_type = of_get_property(dp, "phy-type", &prop_len); 9264 if (!phy_type) { 9265 netdev_err(dev, "%s: OF node lacks phy-type property\n", 9266 dp->full_name); 9267 return -EINVAL; 9268 } 9269 9270 if (!strcmp(phy_type, "none")) 9271 return -ENODEV; 9272 9273 strcpy(np->vpd.phy_type, phy_type); 9274 9275 if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { 9276 netdev_err(dev, "%s: Illegal phy string [%s]\n", 9277 dp->full_name, np->vpd.phy_type); 9278 return -EINVAL; 9279 } 9280 9281 mac_addr = of_get_property(dp, "local-mac-address", &prop_len); 9282 if (!mac_addr) { 9283 netdev_err(dev, "%s: OF node lacks local-mac-address property\n", 9284 dp->full_name); 9285 return -EINVAL; 9286 } 9287 if (prop_len != dev->addr_len) { 9288 netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n", 9289 dp->full_name, prop_len); 9290 } 9291 memcpy(dev->perm_addr, mac_addr, dev->addr_len); 9292 if (!is_valid_ether_addr(&dev->perm_addr[0])) { 9293 netdev_err(dev, "%s: OF MAC address is invalid\n", 9294 dp->full_name); 9295 netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->perm_addr); 9296 return -EINVAL; 9297 } 9298 9299 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); 9300 9301 model = of_get_property(dp, "model", &prop_len); 9302 9303 if (model) 9304 strcpy(np->vpd.model, model); 9305 9306 if (of_find_property(dp, "hot-swappable-phy", &prop_len)) { 9307 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | 9308 NIU_FLAGS_HOTPLUG_PHY); 9309 } 9310 9311 return 0; 9312#else 9313 return -EINVAL; 9314#endif 9315} 9316 9317static int __devinit niu_get_invariants(struct niu *np) 9318{ 9319 int err, have_props; 9320 u32 offset; 9321 9322 err = niu_get_of_props(np); 9323 if (err == -ENODEV) 9324 return err; 9325 9326 have_props = !err; 9327 9328 err = niu_init_mac_ipp_pcs_base(np); 9329 if (err) 9330 return err; 9331 9332 if (have_props) { 9333 err = niu_get_and_validate_port(np); 9334 if (err) 9335 return err; 9336 9337 } else { 9338 if (np->parent->plat_type == PLAT_TYPE_NIU) 9339 return -EINVAL; 9340 9341 nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE); 9342 offset = niu_pci_vpd_offset(np); 9343 netif_printk(np, probe, KERN_DEBUG, np->dev, 9344 "%s() VPD offset [%08x]\n", __func__, offset); 9345 if (offset) 9346 niu_pci_vpd_fetch(np, offset); 9347 nw64(ESPC_PIO_EN, 0); 9348 9349 if (np->flags & NIU_FLAGS_VPD_VALID) { 9350 niu_pci_vpd_validate(np); 9351 err = niu_get_and_validate_port(np); 9352 if (err) 9353 return err; 9354 } 9355 9356 if (!(np->flags & NIU_FLAGS_VPD_VALID)) { 9357 err = niu_get_and_validate_port(np); 9358 if (err) 9359 return err; 9360 err = niu_pci_probe_sprom(np); 9361 if (err) 9362 return err; 9363 } 9364 } 9365 9366 err = niu_probe_ports(np); 9367 if (err) 9368 return err; 9369 9370 niu_ldg_init(np); 9371 9372 niu_classifier_swstate_init(np); 9373 niu_link_config_init(np); 9374 9375 err = niu_determine_phy_disposition(np); 9376 if (!err) 9377 err = niu_init_link(np); 9378 9379 return err; 9380} 9381 9382static LIST_HEAD(niu_parent_list); 9383static DEFINE_MUTEX(niu_parent_lock); 9384static int niu_parent_index; 9385 9386static ssize_t show_port_phy(struct device *dev, 9387 struct device_attribute *attr, char *buf) 9388{ 9389 struct platform_device *plat_dev = to_platform_device(dev); 9390 struct niu_parent *p = plat_dev->dev.platform_data; 9391 u32 port_phy = p->port_phy; 9392 char *orig_buf = buf; 9393 int i; 9394 9395 if (port_phy == PORT_PHY_UNKNOWN || 9396 port_phy == PORT_PHY_INVALID) 9397 return 0; 9398 9399 for (i = 0; i < p->num_ports; i++) { 9400 const char *type_str; 9401 int type; 9402 9403 type = phy_decode(port_phy, i); 9404 if (type == PORT_TYPE_10G) 9405 type_str = "10G"; 9406 else 9407 type_str = "1G"; 9408 buf += sprintf(buf, 9409 (i == 0) ? "%s" : " %s", 9410 type_str); 9411 } 9412 buf += sprintf(buf, "\n"); 9413 return buf - orig_buf; 9414} 9415 9416static ssize_t show_plat_type(struct device *dev, 9417 struct device_attribute *attr, char *buf) 9418{ 9419 struct platform_device *plat_dev = to_platform_device(dev); 9420 struct niu_parent *p = plat_dev->dev.platform_data; 9421 const char *type_str; 9422 9423 switch (p->plat_type) { 9424 case PLAT_TYPE_ATLAS: 9425 type_str = "atlas"; 9426 break; 9427 case PLAT_TYPE_NIU: 9428 type_str = "niu"; 9429 break; 9430 case PLAT_TYPE_VF_P0: 9431 type_str = "vf_p0"; 9432 break; 9433 case PLAT_TYPE_VF_P1: 9434 type_str = "vf_p1"; 9435 break; 9436 default: 9437 type_str = "unknown"; 9438 break; 9439 } 9440 9441 return sprintf(buf, "%s\n", type_str); 9442} 9443 9444static ssize_t __show_chan_per_port(struct device *dev, 9445 struct device_attribute *attr, char *buf, 9446 int rx) 9447{ 9448 struct platform_device *plat_dev = to_platform_device(dev); 9449 struct niu_parent *p = plat_dev->dev.platform_data; 9450 char *orig_buf = buf; 9451 u8 *arr; 9452 int i; 9453 9454 arr = (rx ? p->rxchan_per_port : p->txchan_per_port); 9455 9456 for (i = 0; i < p->num_ports; i++) { 9457 buf += sprintf(buf, 9458 (i == 0) ? "%d" : " %d", 9459 arr[i]); 9460 } 9461 buf += sprintf(buf, "\n"); 9462 9463 return buf - orig_buf; 9464} 9465 9466static ssize_t show_rxchan_per_port(struct device *dev, 9467 struct device_attribute *attr, char *buf) 9468{ 9469 return __show_chan_per_port(dev, attr, buf, 1); 9470} 9471 9472static ssize_t show_txchan_per_port(struct device *dev, 9473 struct device_attribute *attr, char *buf) 9474{ 9475 return __show_chan_per_port(dev, attr, buf, 1); 9476} 9477 9478static ssize_t show_num_ports(struct device *dev, 9479 struct device_attribute *attr, char *buf) 9480{ 9481 struct platform_device *plat_dev = to_platform_device(dev); 9482 struct niu_parent *p = plat_dev->dev.platform_data; 9483 9484 return sprintf(buf, "%d\n", p->num_ports); 9485} 9486 9487static struct device_attribute niu_parent_attributes[] = { 9488 __ATTR(port_phy, S_IRUGO, show_port_phy, NULL), 9489 __ATTR(plat_type, S_IRUGO, show_plat_type, NULL), 9490 __ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL), 9491 __ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL), 9492 __ATTR(num_ports, S_IRUGO, show_num_ports, NULL), 9493 {} 9494}; 9495 9496static struct niu_parent * __devinit niu_new_parent(struct niu *np, 9497 union niu_parent_id *id, 9498 u8 ptype) 9499{ 9500 struct platform_device *plat_dev; 9501 struct niu_parent *p; 9502 int i; 9503 9504 plat_dev = platform_device_register_simple("niu", niu_parent_index, 9505 NULL, 0); 9506 if (IS_ERR(plat_dev)) 9507 return NULL; 9508 9509 for (i = 0; attr_name(niu_parent_attributes[i]); i++) { 9510 int err = device_create_file(&plat_dev->dev, 9511 &niu_parent_attributes[i]); 9512 if (err) 9513 goto fail_unregister; 9514 } 9515 9516 p = kzalloc(sizeof(*p), GFP_KERNEL); 9517 if (!p) 9518 goto fail_unregister; 9519 9520 p->index = niu_parent_index++; 9521 9522 plat_dev->dev.platform_data = p; 9523 p->plat_dev = plat_dev; 9524 9525 memcpy(&p->id, id, sizeof(*id)); 9526 p->plat_type = ptype; 9527 INIT_LIST_HEAD(&p->list); 9528 atomic_set(&p->refcnt, 0); 9529 list_add(&p->list, &niu_parent_list); 9530 spin_lock_init(&p->lock); 9531 9532 p->rxdma_clock_divider = 7500; 9533 9534 p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES; 9535 if (p->plat_type == PLAT_TYPE_NIU) 9536 p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES; 9537 9538 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { 9539 int index = i - CLASS_CODE_USER_PROG1; 9540 9541 p->tcam_key[index] = TCAM_KEY_TSEL; 9542 p->flow_key[index] = (FLOW_KEY_IPSA | 9543 FLOW_KEY_IPDA | 9544 FLOW_KEY_PROTO | 9545 (FLOW_KEY_L4_BYTE12 << 9546 FLOW_KEY_L4_0_SHIFT) | 9547 (FLOW_KEY_L4_BYTE12 << 9548 FLOW_KEY_L4_1_SHIFT)); 9549 } 9550 9551 for (i = 0; i < LDN_MAX + 1; i++) 9552 p->ldg_map[i] = LDG_INVALID; 9553 9554 return p; 9555 9556fail_unregister: 9557 platform_device_unregister(plat_dev); 9558 return NULL; 9559} 9560 9561static struct niu_parent * __devinit niu_get_parent(struct niu *np, 9562 union niu_parent_id *id, 9563 u8 ptype) 9564{ 9565 struct niu_parent *p, *tmp; 9566 int port = np->port; 9567 9568 mutex_lock(&niu_parent_lock); 9569 p = NULL; 9570 list_for_each_entry(tmp, &niu_parent_list, list) { 9571 if (!memcmp(id, &tmp->id, sizeof(*id))) { 9572 p = tmp; 9573 break; 9574 } 9575 } 9576 if (!p) 9577 p = niu_new_parent(np, id, ptype); 9578 9579 if (p) { 9580 char port_name[6]; 9581 int err; 9582 9583 sprintf(port_name, "port%d", port); 9584 err = sysfs_create_link(&p->plat_dev->dev.kobj, 9585 &np->device->kobj, 9586 port_name); 9587 if (!err) { 9588 p->ports[port] = np; 9589 atomic_inc(&p->refcnt); 9590 } 9591 } 9592 mutex_unlock(&niu_parent_lock); 9593 9594 return p; 9595} 9596 9597static void niu_put_parent(struct niu *np) 9598{ 9599 struct niu_parent *p = np->parent; 9600 u8 port = np->port; 9601 char port_name[6]; 9602 9603 BUG_ON(!p || p->ports[port] != np); 9604 9605 netif_printk(np, probe, KERN_DEBUG, np->dev, 9606 "%s() port[%u]\n", __func__, port); 9607 9608 sprintf(port_name, "port%d", port); 9609 9610 mutex_lock(&niu_parent_lock); 9611 9612 sysfs_remove_link(&p->plat_dev->dev.kobj, port_name); 9613 9614 p->ports[port] = NULL; 9615 np->parent = NULL; 9616 9617 if (atomic_dec_and_test(&p->refcnt)) { 9618 list_del(&p->list); 9619 platform_device_unregister(p->plat_dev); 9620 } 9621 9622 mutex_unlock(&niu_parent_lock); 9623} 9624 9625static void *niu_pci_alloc_coherent(struct device *dev, size_t size, 9626 u64 *handle, gfp_t flag) 9627{ 9628 dma_addr_t dh; 9629 void *ret; 9630 9631 ret = dma_alloc_coherent(dev, size, &dh, flag); 9632 if (ret) 9633 *handle = dh; 9634 return ret; 9635} 9636 9637static void niu_pci_free_coherent(struct device *dev, size_t size, 9638 void *cpu_addr, u64 handle) 9639{ 9640 dma_free_coherent(dev, size, cpu_addr, handle); 9641} 9642 9643static u64 niu_pci_map_page(struct device *dev, struct page *page, 9644 unsigned long offset, size_t size, 9645 enum dma_data_direction direction) 9646{ 9647 return dma_map_page(dev, page, offset, size, direction); 9648} 9649 9650static void niu_pci_unmap_page(struct device *dev, u64 dma_address, 9651 size_t size, enum dma_data_direction direction) 9652{ 9653 dma_unmap_page(dev, dma_address, size, direction); 9654} 9655 9656static u64 niu_pci_map_single(struct device *dev, void *cpu_addr, 9657 size_t size, 9658 enum dma_data_direction direction) 9659{ 9660 return dma_map_single(dev, cpu_addr, size, direction); 9661} 9662 9663static void niu_pci_unmap_single(struct device *dev, u64 dma_address, 9664 size_t size, 9665 enum dma_data_direction direction) 9666{ 9667 dma_unmap_single(dev, dma_address, size, direction); 9668} 9669 9670static const struct niu_ops niu_pci_ops = { 9671 .alloc_coherent = niu_pci_alloc_coherent, 9672 .free_coherent = niu_pci_free_coherent, 9673 .map_page = niu_pci_map_page, 9674 .unmap_page = niu_pci_unmap_page, 9675 .map_single = niu_pci_map_single, 9676 .unmap_single = niu_pci_unmap_single, 9677}; 9678 9679static void __devinit niu_driver_version(void) 9680{ 9681 static int niu_version_printed; 9682 9683 if (niu_version_printed++ == 0) 9684 pr_info("%s", version); 9685} 9686 9687static struct net_device * __devinit niu_alloc_and_init( 9688 struct device *gen_dev, struct pci_dev *pdev, 9689 struct platform_device *op, const struct niu_ops *ops, 9690 u8 port) 9691{ 9692 struct net_device *dev; 9693 struct niu *np; 9694 9695 dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN); 9696 if (!dev) { 9697 dev_err(gen_dev, "Etherdev alloc failed, aborting\n"); 9698 return NULL; 9699 } 9700 9701 SET_NETDEV_DEV(dev, gen_dev); 9702 9703 np = netdev_priv(dev); 9704 np->dev = dev; 9705 np->pdev = pdev; 9706 np->op = op; 9707 np->device = gen_dev; 9708 np->ops = ops; 9709 9710 np->msg_enable = niu_debug; 9711 9712 spin_lock_init(&np->lock); 9713 INIT_WORK(&np->reset_task, niu_reset_task); 9714 9715 np->port = port; 9716 9717 return dev; 9718} 9719 9720static const struct net_device_ops niu_netdev_ops = { 9721 .ndo_open = niu_open, 9722 .ndo_stop = niu_close, 9723 .ndo_start_xmit = niu_start_xmit, 9724 .ndo_get_stats = niu_get_stats, 9725 .ndo_set_multicast_list = niu_set_rx_mode, 9726 .ndo_validate_addr = eth_validate_addr, 9727 .ndo_set_mac_address = niu_set_mac_addr, 9728 .ndo_do_ioctl = niu_ioctl, 9729 .ndo_tx_timeout = niu_tx_timeout, 9730 .ndo_change_mtu = niu_change_mtu, 9731}; 9732 9733static void __devinit niu_assign_netdev_ops(struct net_device *dev) 9734{ 9735 dev->netdev_ops = &niu_netdev_ops; 9736 dev->ethtool_ops = &niu_ethtool_ops; 9737 dev->watchdog_timeo = NIU_TX_TIMEOUT; 9738} 9739 9740static void __devinit niu_device_announce(struct niu *np) 9741{ 9742 struct net_device *dev = np->dev; 9743 9744 pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr); 9745 9746 if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) { 9747 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", 9748 dev->name, 9749 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), 9750 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), 9751 (np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"), 9752 (np->mac_xcvr == MAC_XCVR_MII ? "MII" : 9753 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), 9754 np->vpd.phy_type); 9755 } else { 9756 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", 9757 dev->name, 9758 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), 9759 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), 9760 (np->flags & NIU_FLAGS_FIBER ? "FIBER" : 9761 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" : 9762 "COPPER")), 9763 (np->mac_xcvr == MAC_XCVR_MII ? "MII" : 9764 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), 9765 np->vpd.phy_type); 9766 } 9767} 9768 9769static void __devinit niu_set_basic_features(struct net_device *dev) 9770{ 9771 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM | 9772 NETIF_F_GRO | NETIF_F_RXHASH); 9773} 9774 9775static int __devinit niu_pci_init_one(struct pci_dev *pdev, 9776 const struct pci_device_id *ent) 9777{ 9778 union niu_parent_id parent_id; 9779 struct net_device *dev; 9780 struct niu *np; 9781 int err, pos; 9782 u64 dma_mask; 9783 u16 val16; 9784 9785 niu_driver_version(); 9786 9787 err = pci_enable_device(pdev); 9788 if (err) { 9789 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 9790 return err; 9791 } 9792 9793 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || 9794 !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 9795 dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n"); 9796 err = -ENODEV; 9797 goto err_out_disable_pdev; 9798 } 9799 9800 err = pci_request_regions(pdev, DRV_MODULE_NAME); 9801 if (err) { 9802 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 9803 goto err_out_disable_pdev; 9804 } 9805 9806 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 9807 if (pos <= 0) { 9808 dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n"); 9809 goto err_out_free_res; 9810 } 9811 9812 dev = niu_alloc_and_init(&pdev->dev, pdev, NULL, 9813 &niu_pci_ops, PCI_FUNC(pdev->devfn)); 9814 if (!dev) { 9815 err = -ENOMEM; 9816 goto err_out_free_res; 9817 } 9818 np = netdev_priv(dev); 9819 9820 memset(&parent_id, 0, sizeof(parent_id)); 9821 parent_id.pci.domain = pci_domain_nr(pdev->bus); 9822 parent_id.pci.bus = pdev->bus->number; 9823 parent_id.pci.device = PCI_SLOT(pdev->devfn); 9824 9825 np->parent = niu_get_parent(np, &parent_id, 9826 PLAT_TYPE_ATLAS); 9827 if (!np->parent) { 9828 err = -ENOMEM; 9829 goto err_out_free_dev; 9830 } 9831 9832 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16); 9833 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; 9834 val16 |= (PCI_EXP_DEVCTL_CERE | 9835 PCI_EXP_DEVCTL_NFERE | 9836 PCI_EXP_DEVCTL_FERE | 9837 PCI_EXP_DEVCTL_URRE | 9838 PCI_EXP_DEVCTL_RELAX_EN); 9839 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16); 9840 9841 dma_mask = DMA_BIT_MASK(44); 9842 err = pci_set_dma_mask(pdev, dma_mask); 9843 if (!err) { 9844 dev->features |= NETIF_F_HIGHDMA; 9845 err = pci_set_consistent_dma_mask(pdev, dma_mask); 9846 if (err) { 9847 dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n"); 9848 goto err_out_release_parent; 9849 } 9850 } 9851 if (err || dma_mask == DMA_BIT_MASK(32)) { 9852 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 9853 if (err) { 9854 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); 9855 goto err_out_release_parent; 9856 } 9857 } 9858 9859 niu_set_basic_features(dev); 9860 9861 np->regs = pci_ioremap_bar(pdev, 0); 9862 if (!np->regs) { 9863 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 9864 err = -ENOMEM; 9865 goto err_out_release_parent; 9866 } 9867 9868 pci_set_master(pdev); 9869 pci_save_state(pdev); 9870 9871 dev->irq = pdev->irq; 9872 9873 niu_assign_netdev_ops(dev); 9874 9875 err = niu_get_invariants(np); 9876 if (err) { 9877 if (err != -ENODEV) 9878 dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n"); 9879 goto err_out_iounmap; 9880 } 9881 9882 err = register_netdev(dev); 9883 if (err) { 9884 dev_err(&pdev->dev, "Cannot register net device, aborting\n"); 9885 goto err_out_iounmap; 9886 } 9887 9888 pci_set_drvdata(pdev, dev); 9889 9890 niu_device_announce(np); 9891 9892 return 0; 9893 9894err_out_iounmap: 9895 if (np->regs) { 9896 iounmap(np->regs); 9897 np->regs = NULL; 9898 } 9899 9900err_out_release_parent: 9901 niu_put_parent(np); 9902 9903err_out_free_dev: 9904 free_netdev(dev); 9905 9906err_out_free_res: 9907 pci_release_regions(pdev); 9908 9909err_out_disable_pdev: 9910 pci_disable_device(pdev); 9911 pci_set_drvdata(pdev, NULL); 9912 9913 return err; 9914} 9915 9916static void __devexit niu_pci_remove_one(struct pci_dev *pdev) 9917{ 9918 struct net_device *dev = pci_get_drvdata(pdev); 9919 9920 if (dev) { 9921 struct niu *np = netdev_priv(dev); 9922 9923 unregister_netdev(dev); 9924 if (np->regs) { 9925 iounmap(np->regs); 9926 np->regs = NULL; 9927 } 9928 9929 niu_ldg_free(np); 9930 9931 niu_put_parent(np); 9932 9933 free_netdev(dev); 9934 pci_release_regions(pdev); 9935 pci_disable_device(pdev); 9936 pci_set_drvdata(pdev, NULL); 9937 } 9938} 9939 9940static int niu_suspend(struct pci_dev *pdev, pm_message_t state) 9941{ 9942 struct net_device *dev = pci_get_drvdata(pdev); 9943 struct niu *np = netdev_priv(dev); 9944 unsigned long flags; 9945 9946 if (!netif_running(dev)) 9947 return 0; 9948 9949 flush_work_sync(&np->reset_task); 9950 niu_netif_stop(np); 9951 9952 del_timer_sync(&np->timer); 9953 9954 spin_lock_irqsave(&np->lock, flags); 9955 niu_enable_interrupts(np, 0); 9956 spin_unlock_irqrestore(&np->lock, flags); 9957 9958 netif_device_detach(dev); 9959 9960 spin_lock_irqsave(&np->lock, flags); 9961 niu_stop_hw(np); 9962 spin_unlock_irqrestore(&np->lock, flags); 9963 9964 pci_save_state(pdev); 9965 9966 return 0; 9967} 9968 9969static int niu_resume(struct pci_dev *pdev) 9970{ 9971 struct net_device *dev = pci_get_drvdata(pdev); 9972 struct niu *np = netdev_priv(dev); 9973 unsigned long flags; 9974 int err; 9975 9976 if (!netif_running(dev)) 9977 return 0; 9978 9979 pci_restore_state(pdev); 9980 9981 netif_device_attach(dev); 9982 9983 spin_lock_irqsave(&np->lock, flags); 9984 9985 err = niu_init_hw(np); 9986 if (!err) { 9987 np->timer.expires = jiffies + HZ; 9988 add_timer(&np->timer); 9989 niu_netif_start(np); 9990 } 9991 9992 spin_unlock_irqrestore(&np->lock, flags); 9993 9994 return err; 9995} 9996 9997static struct pci_driver niu_pci_driver = { 9998 .name = DRV_MODULE_NAME, 9999 .id_table = niu_pci_tbl, 10000 .probe = niu_pci_init_one, 10001 .remove = __devexit_p(niu_pci_remove_one), 10002 .suspend = niu_suspend, 10003 .resume = niu_resume, 10004}; 10005 10006#ifdef CONFIG_SPARC64 10007static void *niu_phys_alloc_coherent(struct device *dev, size_t size, 10008 u64 *dma_addr, gfp_t flag) 10009{ 10010 unsigned long order = get_order(size); 10011 unsigned long page = __get_free_pages(flag, order); 10012 10013 if (page == 0UL) 10014 return NULL; 10015 memset((char *)page, 0, PAGE_SIZE << order); 10016 *dma_addr = __pa(page); 10017 10018 return (void *) page; 10019} 10020 10021static void niu_phys_free_coherent(struct device *dev, size_t size, 10022 void *cpu_addr, u64 handle) 10023{ 10024 unsigned long order = get_order(size); 10025 10026 free_pages((unsigned long) cpu_addr, order); 10027} 10028 10029static u64 niu_phys_map_page(struct device *dev, struct page *page, 10030 unsigned long offset, size_t size, 10031 enum dma_data_direction direction) 10032{ 10033 return page_to_phys(page) + offset; 10034} 10035 10036static void niu_phys_unmap_page(struct device *dev, u64 dma_address, 10037 size_t size, enum dma_data_direction direction) 10038{ 10039 /* Nothing to do. */ 10040} 10041 10042static u64 niu_phys_map_single(struct device *dev, void *cpu_addr, 10043 size_t size, 10044 enum dma_data_direction direction) 10045{ 10046 return __pa(cpu_addr); 10047} 10048 10049static void niu_phys_unmap_single(struct device *dev, u64 dma_address, 10050 size_t size, 10051 enum dma_data_direction direction) 10052{ 10053 /* Nothing to do. */ 10054} 10055 10056static const struct niu_ops niu_phys_ops = { 10057 .alloc_coherent = niu_phys_alloc_coherent, 10058 .free_coherent = niu_phys_free_coherent, 10059 .map_page = niu_phys_map_page, 10060 .unmap_page = niu_phys_unmap_page, 10061 .map_single = niu_phys_map_single, 10062 .unmap_single = niu_phys_unmap_single, 10063}; 10064 10065static int __devinit niu_of_probe(struct platform_device *op, 10066 const struct of_device_id *match) 10067{ 10068 union niu_parent_id parent_id; 10069 struct net_device *dev; 10070 struct niu *np; 10071 const u32 *reg; 10072 int err; 10073 10074 niu_driver_version(); 10075 10076 reg = of_get_property(op->dev.of_node, "reg", NULL); 10077 if (!reg) { 10078 dev_err(&op->dev, "%s: No 'reg' property, aborting\n", 10079 op->dev.of_node->full_name); 10080 return -ENODEV; 10081 } 10082 10083 dev = niu_alloc_and_init(&op->dev, NULL, op, 10084 &niu_phys_ops, reg[0] & 0x1); 10085 if (!dev) { 10086 err = -ENOMEM; 10087 goto err_out; 10088 } 10089 np = netdev_priv(dev); 10090 10091 memset(&parent_id, 0, sizeof(parent_id)); 10092 parent_id.of = of_get_parent(op->dev.of_node); 10093 10094 np->parent = niu_get_parent(np, &parent_id, 10095 PLAT_TYPE_NIU); 10096 if (!np->parent) { 10097 err = -ENOMEM; 10098 goto err_out_free_dev; 10099 } 10100 10101 niu_set_basic_features(dev); 10102 10103 np->regs = of_ioremap(&op->resource[1], 0, 10104 resource_size(&op->resource[1]), 10105 "niu regs"); 10106 if (!np->regs) { 10107 dev_err(&op->dev, "Cannot map device registers, aborting\n"); 10108 err = -ENOMEM; 10109 goto err_out_release_parent; 10110 } 10111 10112 np->vir_regs_1 = of_ioremap(&op->resource[2], 0, 10113 resource_size(&op->resource[2]), 10114 "niu vregs-1"); 10115 if (!np->vir_regs_1) { 10116 dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n"); 10117 err = -ENOMEM; 10118 goto err_out_iounmap; 10119 } 10120 10121 np->vir_regs_2 = of_ioremap(&op->resource[3], 0, 10122 resource_size(&op->resource[3]), 10123 "niu vregs-2"); 10124 if (!np->vir_regs_2) { 10125 dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n"); 10126 err = -ENOMEM; 10127 goto err_out_iounmap; 10128 } 10129 10130 niu_assign_netdev_ops(dev); 10131 10132 err = niu_get_invariants(np); 10133 if (err) { 10134 if (err != -ENODEV) 10135 dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n"); 10136 goto err_out_iounmap; 10137 } 10138 10139 err = register_netdev(dev); 10140 if (err) { 10141 dev_err(&op->dev, "Cannot register net device, aborting\n"); 10142 goto err_out_iounmap; 10143 } 10144 10145 dev_set_drvdata(&op->dev, dev); 10146 10147 niu_device_announce(np); 10148 10149 return 0; 10150 10151err_out_iounmap: 10152 if (np->vir_regs_1) { 10153 of_iounmap(&op->resource[2], np->vir_regs_1, 10154 resource_size(&op->resource[2])); 10155 np->vir_regs_1 = NULL; 10156 } 10157 10158 if (np->vir_regs_2) { 10159 of_iounmap(&op->resource[3], np->vir_regs_2, 10160 resource_size(&op->resource[3])); 10161 np->vir_regs_2 = NULL; 10162 } 10163 10164 if (np->regs) { 10165 of_iounmap(&op->resource[1], np->regs, 10166 resource_size(&op->resource[1])); 10167 np->regs = NULL; 10168 } 10169 10170err_out_release_parent: 10171 niu_put_parent(np); 10172 10173err_out_free_dev: 10174 free_netdev(dev); 10175 10176err_out: 10177 return err; 10178} 10179 10180static int __devexit niu_of_remove(struct platform_device *op) 10181{ 10182 struct net_device *dev = dev_get_drvdata(&op->dev); 10183 10184 if (dev) { 10185 struct niu *np = netdev_priv(dev); 10186 10187 unregister_netdev(dev); 10188 10189 if (np->vir_regs_1) { 10190 of_iounmap(&op->resource[2], np->vir_regs_1, 10191 resource_size(&op->resource[2])); 10192 np->vir_regs_1 = NULL; 10193 } 10194 10195 if (np->vir_regs_2) { 10196 of_iounmap(&op->resource[3], np->vir_regs_2, 10197 resource_size(&op->resource[3])); 10198 np->vir_regs_2 = NULL; 10199 } 10200 10201 if (np->regs) { 10202 of_iounmap(&op->resource[1], np->regs, 10203 resource_size(&op->resource[1])); 10204 np->regs = NULL; 10205 } 10206 10207 niu_ldg_free(np); 10208 10209 niu_put_parent(np); 10210 10211 free_netdev(dev); 10212 dev_set_drvdata(&op->dev, NULL); 10213 } 10214 return 0; 10215} 10216 10217static const struct of_device_id niu_match[] = { 10218 { 10219 .name = "network", 10220 .compatible = "SUNW,niusl", 10221 }, 10222 {}, 10223}; 10224MODULE_DEVICE_TABLE(of, niu_match); 10225 10226static struct of_platform_driver niu_of_driver = { 10227 .driver = { 10228 .name = "niu", 10229 .owner = THIS_MODULE, 10230 .of_match_table = niu_match, 10231 }, 10232 .probe = niu_of_probe, 10233 .remove = __devexit_p(niu_of_remove), 10234}; 10235 10236#endif /* CONFIG_SPARC64 */ 10237 10238static int __init niu_init(void) 10239{ 10240 int err = 0; 10241 10242 BUILD_BUG_ON(PAGE_SIZE < 4 * 1024); 10243 10244 niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT); 10245 10246#ifdef CONFIG_SPARC64 10247 err = of_register_platform_driver(&niu_of_driver); 10248#endif 10249 10250 if (!err) { 10251 err = pci_register_driver(&niu_pci_driver); 10252#ifdef CONFIG_SPARC64 10253 if (err) 10254 of_unregister_platform_driver(&niu_of_driver); 10255#endif 10256 } 10257 10258 return err; 10259} 10260 10261static void __exit niu_exit(void) 10262{ 10263 pci_unregister_driver(&niu_pci_driver); 10264#ifdef CONFIG_SPARC64 10265 of_unregister_platform_driver(&niu_of_driver); 10266#endif 10267} 10268 10269module_init(niu_init); 10270module_exit(niu_exit);