Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.27-rc4 9127 lines 207 kB view raw
1/* niu.c: Neptune ethernet driver. 2 * 3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net) 4 */ 5 6#include <linux/module.h> 7#include <linux/init.h> 8#include <linux/pci.h> 9#include <linux/dma-mapping.h> 10#include <linux/netdevice.h> 11#include <linux/ethtool.h> 12#include <linux/etherdevice.h> 13#include <linux/platform_device.h> 14#include <linux/delay.h> 15#include <linux/bitops.h> 16#include <linux/mii.h> 17#include <linux/if_ether.h> 18#include <linux/if_vlan.h> 19#include <linux/ip.h> 20#include <linux/in.h> 21#include <linux/ipv6.h> 22#include <linux/log2.h> 23#include <linux/jiffies.h> 24#include <linux/crc32.h> 25 26#include <linux/io.h> 27 28#ifdef CONFIG_SPARC64 29#include <linux/of_device.h> 30#endif 31 32#include "niu.h" 33 34#define DRV_MODULE_NAME "niu" 35#define PFX DRV_MODULE_NAME ": " 36#define DRV_MODULE_VERSION "0.9" 37#define DRV_MODULE_RELDATE "May 4, 2008" 38 39static char version[] __devinitdata = 40 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 41 42MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 43MODULE_DESCRIPTION("NIU ethernet driver"); 44MODULE_LICENSE("GPL"); 45MODULE_VERSION(DRV_MODULE_VERSION); 46 47#ifndef DMA_44BIT_MASK 48#define DMA_44BIT_MASK 0x00000fffffffffffULL 49#endif 50 51#ifndef readq 52static u64 readq(void __iomem *reg) 53{ 54 return (((u64)readl(reg + 0x4UL) << 32) | 55 (u64)readl(reg)); 56} 57 58static void writeq(u64 val, void __iomem *reg) 59{ 60 writel(val & 0xffffffff, reg); 61 writel(val >> 32, reg + 0x4UL); 62} 63#endif 64 65static struct pci_device_id niu_pci_tbl[] = { 66 {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)}, 67 {} 68}; 69 70MODULE_DEVICE_TABLE(pci, niu_pci_tbl); 71 72#define NIU_TX_TIMEOUT (5 * HZ) 73 74#define nr64(reg) readq(np->regs + (reg)) 75#define nw64(reg, val) writeq((val), np->regs + (reg)) 76 77#define nr64_mac(reg) readq(np->mac_regs + (reg)) 78#define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg)) 79 80#define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg)) 81#define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg)) 82 83#define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg)) 84#define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg)) 85 86#define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg)) 87#define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg)) 88 89#define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 90 91static int niu_debug; 92static int debug = -1; 93module_param(debug, int, 0); 94MODULE_PARM_DESC(debug, "NIU debug level"); 95 96#define niudbg(TYPE, f, a...) \ 97do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \ 98 printk(KERN_DEBUG PFX f, ## a); \ 99} while (0) 100 101#define niuinfo(TYPE, f, a...) \ 102do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \ 103 printk(KERN_INFO PFX f, ## a); \ 104} while (0) 105 106#define niuwarn(TYPE, f, a...) \ 107do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \ 108 printk(KERN_WARNING PFX f, ## a); \ 109} while (0) 110 111#define niu_lock_parent(np, flags) \ 112 spin_lock_irqsave(&np->parent->lock, flags) 113#define niu_unlock_parent(np, flags) \ 114 spin_unlock_irqrestore(&np->parent->lock, flags) 115 116static int serdes_init_10g_serdes(struct niu *np); 117 118static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg, 119 u64 bits, int limit, int delay) 120{ 121 while (--limit >= 0) { 122 u64 val = nr64_mac(reg); 123 124 if (!(val & bits)) 125 break; 126 udelay(delay); 127 } 128 if (limit < 0) 129 return -ENODEV; 130 return 0; 131} 132 133static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg, 134 u64 bits, int limit, int delay, 135 const char *reg_name) 136{ 137 int err; 138 139 nw64_mac(reg, bits); 140 err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay); 141 if (err) 142 dev_err(np->device, PFX "%s: bits (%llx) of register %s " 143 "would not clear, val[%llx]\n", 144 np->dev->name, (unsigned long long) bits, reg_name, 145 (unsigned long long) nr64_mac(reg)); 146 return err; 147} 148 149#define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ 150({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 151 __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ 152}) 153 154static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg, 155 u64 bits, int limit, int delay) 156{ 157 while (--limit >= 0) { 158 u64 val = nr64_ipp(reg); 159 160 if (!(val & bits)) 161 break; 162 udelay(delay); 163 } 164 if (limit < 0) 165 return -ENODEV; 166 return 0; 167} 168 169static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg, 170 u64 bits, int limit, int delay, 171 const char *reg_name) 172{ 173 int err; 174 u64 val; 175 176 val = nr64_ipp(reg); 177 val |= bits; 178 nw64_ipp(reg, val); 179 180 err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay); 181 if (err) 182 dev_err(np->device, PFX "%s: bits (%llx) of register %s " 183 "would not clear, val[%llx]\n", 184 np->dev->name, (unsigned long long) bits, reg_name, 185 (unsigned long long) nr64_ipp(reg)); 186 return err; 187} 188 189#define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ 190({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 191 __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ 192}) 193 194static int __niu_wait_bits_clear(struct niu *np, unsigned long reg, 195 u64 bits, int limit, int delay) 196{ 197 while (--limit >= 0) { 198 u64 val = nr64(reg); 199 200 if (!(val & bits)) 201 break; 202 udelay(delay); 203 } 204 if (limit < 0) 205 return -ENODEV; 206 return 0; 207} 208 209#define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \ 210({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 211 __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \ 212}) 213 214static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg, 215 u64 bits, int limit, int delay, 216 const char *reg_name) 217{ 218 int err; 219 220 nw64(reg, bits); 221 err = __niu_wait_bits_clear(np, reg, bits, limit, delay); 222 if (err) 223 dev_err(np->device, PFX "%s: bits (%llx) of register %s " 224 "would not clear, val[%llx]\n", 225 np->dev->name, (unsigned long long) bits, reg_name, 226 (unsigned long long) nr64(reg)); 227 return err; 228} 229 230#define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ 231({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 232 __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ 233}) 234 235static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on) 236{ 237 u64 val = (u64) lp->timer; 238 239 if (on) 240 val |= LDG_IMGMT_ARM; 241 242 nw64(LDG_IMGMT(lp->ldg_num), val); 243} 244 245static int niu_ldn_irq_enable(struct niu *np, int ldn, int on) 246{ 247 unsigned long mask_reg, bits; 248 u64 val; 249 250 if (ldn < 0 || ldn > LDN_MAX) 251 return -EINVAL; 252 253 if (ldn < 64) { 254 mask_reg = LD_IM0(ldn); 255 bits = LD_IM0_MASK; 256 } else { 257 mask_reg = LD_IM1(ldn - 64); 258 bits = LD_IM1_MASK; 259 } 260 261 val = nr64(mask_reg); 262 if (on) 263 val &= ~bits; 264 else 265 val |= bits; 266 nw64(mask_reg, val); 267 268 return 0; 269} 270 271static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on) 272{ 273 struct niu_parent *parent = np->parent; 274 int i; 275 276 for (i = 0; i <= LDN_MAX; i++) { 277 int err; 278 279 if (parent->ldg_map[i] != lp->ldg_num) 280 continue; 281 282 err = niu_ldn_irq_enable(np, i, on); 283 if (err) 284 return err; 285 } 286 return 0; 287} 288 289static int niu_enable_interrupts(struct niu *np, int on) 290{ 291 int i; 292 293 for (i = 0; i < np->num_ldg; i++) { 294 struct niu_ldg *lp = &np->ldg[i]; 295 int err; 296 297 err = niu_enable_ldn_in_ldg(np, lp, on); 298 if (err) 299 return err; 300 } 301 for (i = 0; i < np->num_ldg; i++) 302 niu_ldg_rearm(np, &np->ldg[i], on); 303 304 return 0; 305} 306 307static u32 phy_encode(u32 type, int port) 308{ 309 return (type << (port * 2)); 310} 311 312static u32 phy_decode(u32 val, int port) 313{ 314 return (val >> (port * 2)) & PORT_TYPE_MASK; 315} 316 317static int mdio_wait(struct niu *np) 318{ 319 int limit = 1000; 320 u64 val; 321 322 while (--limit > 0) { 323 val = nr64(MIF_FRAME_OUTPUT); 324 if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1) 325 return val & MIF_FRAME_OUTPUT_DATA; 326 327 udelay(10); 328 } 329 330 return -ENODEV; 331} 332 333static int mdio_read(struct niu *np, int port, int dev, int reg) 334{ 335 int err; 336 337 nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); 338 err = mdio_wait(np); 339 if (err < 0) 340 return err; 341 342 nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev)); 343 return mdio_wait(np); 344} 345 346static int mdio_write(struct niu *np, int port, int dev, int reg, int data) 347{ 348 int err; 349 350 nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); 351 err = mdio_wait(np); 352 if (err < 0) 353 return err; 354 355 nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data)); 356 err = mdio_wait(np); 357 if (err < 0) 358 return err; 359 360 return 0; 361} 362 363static int mii_read(struct niu *np, int port, int reg) 364{ 365 nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg)); 366 return mdio_wait(np); 367} 368 369static int mii_write(struct niu *np, int port, int reg, int data) 370{ 371 int err; 372 373 nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data)); 374 err = mdio_wait(np); 375 if (err < 0) 376 return err; 377 378 return 0; 379} 380 381static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val) 382{ 383 int err; 384 385 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 386 ESR2_TI_PLL_TX_CFG_L(channel), 387 val & 0xffff); 388 if (!err) 389 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 390 ESR2_TI_PLL_TX_CFG_H(channel), 391 val >> 16); 392 return err; 393} 394 395static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val) 396{ 397 int err; 398 399 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 400 ESR2_TI_PLL_RX_CFG_L(channel), 401 val & 0xffff); 402 if (!err) 403 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 404 ESR2_TI_PLL_RX_CFG_H(channel), 405 val >> 16); 406 return err; 407} 408 409/* Mode is always 10G fiber. */ 410static int serdes_init_niu(struct niu *np) 411{ 412 struct niu_link_config *lp = &np->link_config; 413 u32 tx_cfg, rx_cfg; 414 unsigned long i; 415 416 tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); 417 rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | 418 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | 419 PLL_RX_CFG_EQ_LP_ADAPTIVE); 420 421 if (lp->loopback_mode == LOOPBACK_PHY) { 422 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; 423 424 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 425 ESR2_TI_PLL_TEST_CFG_L, test_cfg); 426 427 tx_cfg |= PLL_TX_CFG_ENTEST; 428 rx_cfg |= PLL_RX_CFG_ENTEST; 429 } 430 431 /* Initialize all 4 lanes of the SERDES. */ 432 for (i = 0; i < 4; i++) { 433 int err = esr2_set_tx_cfg(np, i, tx_cfg); 434 if (err) 435 return err; 436 } 437 438 for (i = 0; i < 4; i++) { 439 int err = esr2_set_rx_cfg(np, i, rx_cfg); 440 if (err) 441 return err; 442 } 443 444 return 0; 445} 446 447static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val) 448{ 449 int err; 450 451 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan)); 452 if (err >= 0) { 453 *val = (err & 0xffff); 454 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 455 ESR_RXTX_CTRL_H(chan)); 456 if (err >= 0) 457 *val |= ((err & 0xffff) << 16); 458 err = 0; 459 } 460 return err; 461} 462 463static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val) 464{ 465 int err; 466 467 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 468 ESR_GLUE_CTRL0_L(chan)); 469 if (err >= 0) { 470 *val = (err & 0xffff); 471 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 472 ESR_GLUE_CTRL0_H(chan)); 473 if (err >= 0) { 474 *val |= ((err & 0xffff) << 16); 475 err = 0; 476 } 477 } 478 return err; 479} 480 481static int esr_read_reset(struct niu *np, u32 *val) 482{ 483 int err; 484 485 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 486 ESR_RXTX_RESET_CTRL_L); 487 if (err >= 0) { 488 *val = (err & 0xffff); 489 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 490 ESR_RXTX_RESET_CTRL_H); 491 if (err >= 0) { 492 *val |= ((err & 0xffff) << 16); 493 err = 0; 494 } 495 } 496 return err; 497} 498 499static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val) 500{ 501 int err; 502 503 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 504 ESR_RXTX_CTRL_L(chan), val & 0xffff); 505 if (!err) 506 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 507 ESR_RXTX_CTRL_H(chan), (val >> 16)); 508 return err; 509} 510 511static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val) 512{ 513 int err; 514 515 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 516 ESR_GLUE_CTRL0_L(chan), val & 0xffff); 517 if (!err) 518 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 519 ESR_GLUE_CTRL0_H(chan), (val >> 16)); 520 return err; 521} 522 523static int esr_reset(struct niu *np) 524{ 525 u32 reset; 526 int err; 527 528 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 529 ESR_RXTX_RESET_CTRL_L, 0x0000); 530 if (err) 531 return err; 532 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 533 ESR_RXTX_RESET_CTRL_H, 0xffff); 534 if (err) 535 return err; 536 udelay(200); 537 538 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 539 ESR_RXTX_RESET_CTRL_L, 0xffff); 540 if (err) 541 return err; 542 udelay(200); 543 544 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 545 ESR_RXTX_RESET_CTRL_H, 0x0000); 546 if (err) 547 return err; 548 udelay(200); 549 550 err = esr_read_reset(np, &reset); 551 if (err) 552 return err; 553 if (reset != 0) { 554 dev_err(np->device, PFX "Port %u ESR_RESET " 555 "did not clear [%08x]\n", 556 np->port, reset); 557 return -ENODEV; 558 } 559 560 return 0; 561} 562 563static int serdes_init_10g(struct niu *np) 564{ 565 struct niu_link_config *lp = &np->link_config; 566 unsigned long ctrl_reg, test_cfg_reg, i; 567 u64 ctrl_val, test_cfg_val, sig, mask, val; 568 int err; 569 570 switch (np->port) { 571 case 0: 572 ctrl_reg = ENET_SERDES_0_CTRL_CFG; 573 test_cfg_reg = ENET_SERDES_0_TEST_CFG; 574 break; 575 case 1: 576 ctrl_reg = ENET_SERDES_1_CTRL_CFG; 577 test_cfg_reg = ENET_SERDES_1_TEST_CFG; 578 break; 579 580 default: 581 return -EINVAL; 582 } 583 ctrl_val = (ENET_SERDES_CTRL_SDET_0 | 584 ENET_SERDES_CTRL_SDET_1 | 585 ENET_SERDES_CTRL_SDET_2 | 586 ENET_SERDES_CTRL_SDET_3 | 587 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | 588 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | 589 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | 590 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | 591 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | 592 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | 593 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | 594 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); 595 test_cfg_val = 0; 596 597 if (lp->loopback_mode == LOOPBACK_PHY) { 598 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << 599 ENET_SERDES_TEST_MD_0_SHIFT) | 600 (ENET_TEST_MD_PAD_LOOPBACK << 601 ENET_SERDES_TEST_MD_1_SHIFT) | 602 (ENET_TEST_MD_PAD_LOOPBACK << 603 ENET_SERDES_TEST_MD_2_SHIFT) | 604 (ENET_TEST_MD_PAD_LOOPBACK << 605 ENET_SERDES_TEST_MD_3_SHIFT)); 606 } 607 608 nw64(ctrl_reg, ctrl_val); 609 nw64(test_cfg_reg, test_cfg_val); 610 611 /* Initialize all 4 lanes of the SERDES. */ 612 for (i = 0; i < 4; i++) { 613 u32 rxtx_ctrl, glue0; 614 615 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); 616 if (err) 617 return err; 618 err = esr_read_glue0(np, i, &glue0); 619 if (err) 620 return err; 621 622 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); 623 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | 624 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); 625 626 glue0 &= ~(ESR_GLUE_CTRL0_SRATE | 627 ESR_GLUE_CTRL0_THCNT | 628 ESR_GLUE_CTRL0_BLTIME); 629 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | 630 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | 631 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | 632 (BLTIME_300_CYCLES << 633 ESR_GLUE_CTRL0_BLTIME_SHIFT)); 634 635 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); 636 if (err) 637 return err; 638 err = esr_write_glue0(np, i, glue0); 639 if (err) 640 return err; 641 } 642 643 err = esr_reset(np); 644 if (err) 645 return err; 646 647 sig = nr64(ESR_INT_SIGNALS); 648 switch (np->port) { 649 case 0: 650 mask = ESR_INT_SIGNALS_P0_BITS; 651 val = (ESR_INT_SRDY0_P0 | 652 ESR_INT_DET0_P0 | 653 ESR_INT_XSRDY_P0 | 654 ESR_INT_XDP_P0_CH3 | 655 ESR_INT_XDP_P0_CH2 | 656 ESR_INT_XDP_P0_CH1 | 657 ESR_INT_XDP_P0_CH0); 658 break; 659 660 case 1: 661 mask = ESR_INT_SIGNALS_P1_BITS; 662 val = (ESR_INT_SRDY0_P1 | 663 ESR_INT_DET0_P1 | 664 ESR_INT_XSRDY_P1 | 665 ESR_INT_XDP_P1_CH3 | 666 ESR_INT_XDP_P1_CH2 | 667 ESR_INT_XDP_P1_CH1 | 668 ESR_INT_XDP_P1_CH0); 669 break; 670 671 default: 672 return -EINVAL; 673 } 674 675 if ((sig & mask) != val) { 676 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { 677 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; 678 return 0; 679 } 680 dev_err(np->device, PFX "Port %u signal bits [%08x] are not " 681 "[%08x]\n", np->port, (int) (sig & mask), (int) val); 682 return -ENODEV; 683 } 684 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) 685 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; 686 return 0; 687} 688 689static int serdes_init_1g(struct niu *np) 690{ 691 u64 val; 692 693 val = nr64(ENET_SERDES_1_PLL_CFG); 694 val &= ~ENET_SERDES_PLL_FBDIV2; 695 switch (np->port) { 696 case 0: 697 val |= ENET_SERDES_PLL_HRATE0; 698 break; 699 case 1: 700 val |= ENET_SERDES_PLL_HRATE1; 701 break; 702 case 2: 703 val |= ENET_SERDES_PLL_HRATE2; 704 break; 705 case 3: 706 val |= ENET_SERDES_PLL_HRATE3; 707 break; 708 default: 709 return -EINVAL; 710 } 711 nw64(ENET_SERDES_1_PLL_CFG, val); 712 713 return 0; 714} 715 716static int serdes_init_1g_serdes(struct niu *np) 717{ 718 struct niu_link_config *lp = &np->link_config; 719 unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; 720 u64 ctrl_val, test_cfg_val, sig, mask, val; 721 int err; 722 u64 reset_val, val_rd; 723 724 val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 | 725 ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 | 726 ENET_SERDES_PLL_FBDIV0; 727 switch (np->port) { 728 case 0: 729 reset_val = ENET_SERDES_RESET_0; 730 ctrl_reg = ENET_SERDES_0_CTRL_CFG; 731 test_cfg_reg = ENET_SERDES_0_TEST_CFG; 732 pll_cfg = ENET_SERDES_0_PLL_CFG; 733 break; 734 case 1: 735 reset_val = ENET_SERDES_RESET_1; 736 ctrl_reg = ENET_SERDES_1_CTRL_CFG; 737 test_cfg_reg = ENET_SERDES_1_TEST_CFG; 738 pll_cfg = ENET_SERDES_1_PLL_CFG; 739 break; 740 741 default: 742 return -EINVAL; 743 } 744 ctrl_val = (ENET_SERDES_CTRL_SDET_0 | 745 ENET_SERDES_CTRL_SDET_1 | 746 ENET_SERDES_CTRL_SDET_2 | 747 ENET_SERDES_CTRL_SDET_3 | 748 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | 749 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | 750 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | 751 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | 752 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | 753 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | 754 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | 755 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); 756 test_cfg_val = 0; 757 758 if (lp->loopback_mode == LOOPBACK_PHY) { 759 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << 760 ENET_SERDES_TEST_MD_0_SHIFT) | 761 (ENET_TEST_MD_PAD_LOOPBACK << 762 ENET_SERDES_TEST_MD_1_SHIFT) | 763 (ENET_TEST_MD_PAD_LOOPBACK << 764 ENET_SERDES_TEST_MD_2_SHIFT) | 765 (ENET_TEST_MD_PAD_LOOPBACK << 766 ENET_SERDES_TEST_MD_3_SHIFT)); 767 } 768 769 nw64(ENET_SERDES_RESET, reset_val); 770 mdelay(20); 771 val_rd = nr64(ENET_SERDES_RESET); 772 val_rd &= ~reset_val; 773 nw64(pll_cfg, val); 774 nw64(ctrl_reg, ctrl_val); 775 nw64(test_cfg_reg, test_cfg_val); 776 nw64(ENET_SERDES_RESET, val_rd); 777 mdelay(2000); 778 779 /* Initialize all 4 lanes of the SERDES. */ 780 for (i = 0; i < 4; i++) { 781 u32 rxtx_ctrl, glue0; 782 783 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); 784 if (err) 785 return err; 786 err = esr_read_glue0(np, i, &glue0); 787 if (err) 788 return err; 789 790 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); 791 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | 792 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); 793 794 glue0 &= ~(ESR_GLUE_CTRL0_SRATE | 795 ESR_GLUE_CTRL0_THCNT | 796 ESR_GLUE_CTRL0_BLTIME); 797 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | 798 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | 799 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | 800 (BLTIME_300_CYCLES << 801 ESR_GLUE_CTRL0_BLTIME_SHIFT)); 802 803 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); 804 if (err) 805 return err; 806 err = esr_write_glue0(np, i, glue0); 807 if (err) 808 return err; 809 } 810 811 812 sig = nr64(ESR_INT_SIGNALS); 813 switch (np->port) { 814 case 0: 815 val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); 816 mask = val; 817 break; 818 819 case 1: 820 val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); 821 mask = val; 822 break; 823 824 default: 825 return -EINVAL; 826 } 827 828 if ((sig & mask) != val) { 829 dev_err(np->device, PFX "Port %u signal bits [%08x] are not " 830 "[%08x]\n", np->port, (int) (sig & mask), (int) val); 831 return -ENODEV; 832 } 833 834 return 0; 835} 836 837static int link_status_1g_serdes(struct niu *np, int *link_up_p) 838{ 839 struct niu_link_config *lp = &np->link_config; 840 int link_up; 841 u64 val; 842 u16 current_speed; 843 unsigned long flags; 844 u8 current_duplex; 845 846 link_up = 0; 847 current_speed = SPEED_INVALID; 848 current_duplex = DUPLEX_INVALID; 849 850 spin_lock_irqsave(&np->lock, flags); 851 852 val = nr64_pcs(PCS_MII_STAT); 853 854 if (val & PCS_MII_STAT_LINK_STATUS) { 855 link_up = 1; 856 current_speed = SPEED_1000; 857 current_duplex = DUPLEX_FULL; 858 } 859 860 lp->active_speed = current_speed; 861 lp->active_duplex = current_duplex; 862 spin_unlock_irqrestore(&np->lock, flags); 863 864 *link_up_p = link_up; 865 return 0; 866} 867 868static int link_status_10g_serdes(struct niu *np, int *link_up_p) 869{ 870 unsigned long flags; 871 struct niu_link_config *lp = &np->link_config; 872 int link_up = 0; 873 int link_ok = 1; 874 u64 val, val2; 875 u16 current_speed; 876 u8 current_duplex; 877 878 if (!(np->flags & NIU_FLAGS_10G)) 879 return link_status_1g_serdes(np, link_up_p); 880 881 current_speed = SPEED_INVALID; 882 current_duplex = DUPLEX_INVALID; 883 spin_lock_irqsave(&np->lock, flags); 884 885 val = nr64_xpcs(XPCS_STATUS(0)); 886 val2 = nr64_mac(XMAC_INTER2); 887 if (val2 & 0x01000000) 888 link_ok = 0; 889 890 if ((val & 0x1000ULL) && link_ok) { 891 link_up = 1; 892 current_speed = SPEED_10000; 893 current_duplex = DUPLEX_FULL; 894 } 895 lp->active_speed = current_speed; 896 lp->active_duplex = current_duplex; 897 spin_unlock_irqrestore(&np->lock, flags); 898 *link_up_p = link_up; 899 return 0; 900} 901 902static int link_status_1g_rgmii(struct niu *np, int *link_up_p) 903{ 904 struct niu_link_config *lp = &np->link_config; 905 u16 current_speed, bmsr; 906 unsigned long flags; 907 u8 current_duplex; 908 int err, link_up; 909 910 link_up = 0; 911 current_speed = SPEED_INVALID; 912 current_duplex = DUPLEX_INVALID; 913 914 spin_lock_irqsave(&np->lock, flags); 915 916 err = -EINVAL; 917 918 err = mii_read(np, np->phy_addr, MII_BMSR); 919 if (err < 0) 920 goto out; 921 922 bmsr = err; 923 if (bmsr & BMSR_LSTATUS) { 924 u16 adv, lpa, common, estat; 925 926 err = mii_read(np, np->phy_addr, MII_ADVERTISE); 927 if (err < 0) 928 goto out; 929 adv = err; 930 931 err = mii_read(np, np->phy_addr, MII_LPA); 932 if (err < 0) 933 goto out; 934 lpa = err; 935 936 common = adv & lpa; 937 938 err = mii_read(np, np->phy_addr, MII_ESTATUS); 939 if (err < 0) 940 goto out; 941 estat = err; 942 link_up = 1; 943 current_speed = SPEED_1000; 944 current_duplex = DUPLEX_FULL; 945 946 } 947 lp->active_speed = current_speed; 948 lp->active_duplex = current_duplex; 949 err = 0; 950 951out: 952 spin_unlock_irqrestore(&np->lock, flags); 953 954 *link_up_p = link_up; 955 return err; 956} 957 958static int bcm8704_reset(struct niu *np) 959{ 960 int err, limit; 961 962 err = mdio_read(np, np->phy_addr, 963 BCM8704_PHYXS_DEV_ADDR, MII_BMCR); 964 if (err < 0) 965 return err; 966 err |= BMCR_RESET; 967 err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 968 MII_BMCR, err); 969 if (err) 970 return err; 971 972 limit = 1000; 973 while (--limit >= 0) { 974 err = mdio_read(np, np->phy_addr, 975 BCM8704_PHYXS_DEV_ADDR, MII_BMCR); 976 if (err < 0) 977 return err; 978 if (!(err & BMCR_RESET)) 979 break; 980 } 981 if (limit < 0) { 982 dev_err(np->device, PFX "Port %u PHY will not reset " 983 "(bmcr=%04x)\n", np->port, (err & 0xffff)); 984 return -ENODEV; 985 } 986 return 0; 987} 988 989/* When written, certain PHY registers need to be read back twice 990 * in order for the bits to settle properly. 991 */ 992static int bcm8704_user_dev3_readback(struct niu *np, int reg) 993{ 994 int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); 995 if (err < 0) 996 return err; 997 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); 998 if (err < 0) 999 return err; 1000 return 0; 1001} 1002 1003static int bcm8706_init_user_dev3(struct niu *np) 1004{ 1005 int err; 1006 1007 1008 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1009 BCM8704_USER_OPT_DIGITAL_CTRL); 1010 if (err < 0) 1011 return err; 1012 err &= ~USER_ODIG_CTRL_GPIOS; 1013 err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); 1014 err |= USER_ODIG_CTRL_RESV2; 1015 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1016 BCM8704_USER_OPT_DIGITAL_CTRL, err); 1017 if (err) 1018 return err; 1019 1020 mdelay(1000); 1021 1022 return 0; 1023} 1024 1025static int bcm8704_init_user_dev3(struct niu *np) 1026{ 1027 int err; 1028 1029 err = mdio_write(np, np->phy_addr, 1030 BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL, 1031 (USER_CONTROL_OPTXRST_LVL | 1032 USER_CONTROL_OPBIASFLT_LVL | 1033 USER_CONTROL_OBTMPFLT_LVL | 1034 USER_CONTROL_OPPRFLT_LVL | 1035 USER_CONTROL_OPTXFLT_LVL | 1036 USER_CONTROL_OPRXLOS_LVL | 1037 USER_CONTROL_OPRXFLT_LVL | 1038 USER_CONTROL_OPTXON_LVL | 1039 (0x3f << USER_CONTROL_RES1_SHIFT))); 1040 if (err) 1041 return err; 1042 1043 err = mdio_write(np, np->phy_addr, 1044 BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL, 1045 (USER_PMD_TX_CTL_XFP_CLKEN | 1046 (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) | 1047 (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) | 1048 USER_PMD_TX_CTL_TSCK_LPWREN)); 1049 if (err) 1050 return err; 1051 1052 err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL); 1053 if (err) 1054 return err; 1055 err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL); 1056 if (err) 1057 return err; 1058 1059 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1060 BCM8704_USER_OPT_DIGITAL_CTRL); 1061 if (err < 0) 1062 return err; 1063 err &= ~USER_ODIG_CTRL_GPIOS; 1064 err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); 1065 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1066 BCM8704_USER_OPT_DIGITAL_CTRL, err); 1067 if (err) 1068 return err; 1069 1070 mdelay(1000); 1071 1072 return 0; 1073} 1074 1075static int mrvl88x2011_act_led(struct niu *np, int val) 1076{ 1077 int err; 1078 1079 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 1080 MRVL88X2011_LED_8_TO_11_CTL); 1081 if (err < 0) 1082 return err; 1083 1084 err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK); 1085 err |= MRVL88X2011_LED(MRVL88X2011_LED_ACT,val); 1086 1087 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 1088 MRVL88X2011_LED_8_TO_11_CTL, err); 1089} 1090 1091static int mrvl88x2011_led_blink_rate(struct niu *np, int rate) 1092{ 1093 int err; 1094 1095 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 1096 MRVL88X2011_LED_BLINK_CTL); 1097 if (err >= 0) { 1098 err &= ~MRVL88X2011_LED_BLKRATE_MASK; 1099 err |= (rate << 4); 1100 1101 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 1102 MRVL88X2011_LED_BLINK_CTL, err); 1103 } 1104 1105 return err; 1106} 1107 1108static int xcvr_init_10g_mrvl88x2011(struct niu *np) 1109{ 1110 int err; 1111 1112 /* Set LED functions */ 1113 err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS); 1114 if (err) 1115 return err; 1116 1117 /* led activity */ 1118 err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF); 1119 if (err) 1120 return err; 1121 1122 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 1123 MRVL88X2011_GENERAL_CTL); 1124 if (err < 0) 1125 return err; 1126 1127 err |= MRVL88X2011_ENA_XFPREFCLK; 1128 1129 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 1130 MRVL88X2011_GENERAL_CTL, err); 1131 if (err < 0) 1132 return err; 1133 1134 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1135 MRVL88X2011_PMA_PMD_CTL_1); 1136 if (err < 0) 1137 return err; 1138 1139 if (np->link_config.loopback_mode == LOOPBACK_MAC) 1140 err |= MRVL88X2011_LOOPBACK; 1141 else 1142 err &= ~MRVL88X2011_LOOPBACK; 1143 1144 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1145 MRVL88X2011_PMA_PMD_CTL_1, err); 1146 if (err < 0) 1147 return err; 1148 1149 /* Enable PMD */ 1150 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1151 MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX); 1152} 1153 1154 1155static int xcvr_diag_bcm870x(struct niu *np) 1156{ 1157 u16 analog_stat0, tx_alarm_status; 1158 int err = 0; 1159 1160#if 1 1161 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, 1162 MII_STAT1000); 1163 if (err < 0) 1164 return err; 1165 pr_info(PFX "Port %u PMA_PMD(MII_STAT1000) [%04x]\n", 1166 np->port, err); 1167 1168 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20); 1169 if (err < 0) 1170 return err; 1171 pr_info(PFX "Port %u USER_DEV3(0x20) [%04x]\n", 1172 np->port, err); 1173 1174 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 1175 MII_NWAYTEST); 1176 if (err < 0) 1177 return err; 1178 pr_info(PFX "Port %u PHYXS(MII_NWAYTEST) [%04x]\n", 1179 np->port, err); 1180#endif 1181 1182 /* XXX dig this out it might not be so useful XXX */ 1183 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1184 BCM8704_USER_ANALOG_STATUS0); 1185 if (err < 0) 1186 return err; 1187 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1188 BCM8704_USER_ANALOG_STATUS0); 1189 if (err < 0) 1190 return err; 1191 analog_stat0 = err; 1192 1193 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1194 BCM8704_USER_TX_ALARM_STATUS); 1195 if (err < 0) 1196 return err; 1197 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 1198 BCM8704_USER_TX_ALARM_STATUS); 1199 if (err < 0) 1200 return err; 1201 tx_alarm_status = err; 1202 1203 if (analog_stat0 != 0x03fc) { 1204 if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) { 1205 pr_info(PFX "Port %u cable not connected " 1206 "or bad cable.\n", np->port); 1207 } else if (analog_stat0 == 0x639c) { 1208 pr_info(PFX "Port %u optical module is bad " 1209 "or missing.\n", np->port); 1210 } 1211 } 1212 1213 return 0; 1214} 1215 1216static int xcvr_10g_set_lb_bcm870x(struct niu *np) 1217{ 1218 struct niu_link_config *lp = &np->link_config; 1219 int err; 1220 1221 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 1222 MII_BMCR); 1223 if (err < 0) 1224 return err; 1225 1226 err &= ~BMCR_LOOPBACK; 1227 1228 if (lp->loopback_mode == LOOPBACK_MAC) 1229 err |= BMCR_LOOPBACK; 1230 1231 err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 1232 MII_BMCR, err); 1233 if (err) 1234 return err; 1235 1236 return 0; 1237} 1238 1239static int xcvr_init_10g_bcm8706(struct niu *np) 1240{ 1241 int err = 0; 1242 u64 val; 1243 1244 if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) && 1245 (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0) 1246 return err; 1247 1248 val = nr64_mac(XMAC_CONFIG); 1249 val &= ~XMAC_CONFIG_LED_POLARITY; 1250 val |= XMAC_CONFIG_FORCE_LED_ON; 1251 nw64_mac(XMAC_CONFIG, val); 1252 1253 val = nr64(MIF_CONFIG); 1254 val |= MIF_CONFIG_INDIRECT_MODE; 1255 nw64(MIF_CONFIG, val); 1256 1257 err = bcm8704_reset(np); 1258 if (err) 1259 return err; 1260 1261 err = xcvr_10g_set_lb_bcm870x(np); 1262 if (err) 1263 return err; 1264 1265 err = bcm8706_init_user_dev3(np); 1266 if (err) 1267 return err; 1268 1269 err = xcvr_diag_bcm870x(np); 1270 if (err) 1271 return err; 1272 1273 return 0; 1274} 1275 1276static int xcvr_init_10g_bcm8704(struct niu *np) 1277{ 1278 int err; 1279 1280 err = bcm8704_reset(np); 1281 if (err) 1282 return err; 1283 1284 err = bcm8704_init_user_dev3(np); 1285 if (err) 1286 return err; 1287 1288 err = xcvr_10g_set_lb_bcm870x(np); 1289 if (err) 1290 return err; 1291 1292 err = xcvr_diag_bcm870x(np); 1293 if (err) 1294 return err; 1295 1296 return 0; 1297} 1298 1299static int xcvr_init_10g(struct niu *np) 1300{ 1301 int phy_id, err; 1302 u64 val; 1303 1304 val = nr64_mac(XMAC_CONFIG); 1305 val &= ~XMAC_CONFIG_LED_POLARITY; 1306 val |= XMAC_CONFIG_FORCE_LED_ON; 1307 nw64_mac(XMAC_CONFIG, val); 1308 1309 /* XXX shared resource, lock parent XXX */ 1310 val = nr64(MIF_CONFIG); 1311 val |= MIF_CONFIG_INDIRECT_MODE; 1312 nw64(MIF_CONFIG, val); 1313 1314 phy_id = phy_decode(np->parent->port_phy, np->port); 1315 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; 1316 1317 /* handle different phy types */ 1318 switch (phy_id & NIU_PHY_ID_MASK) { 1319 case NIU_PHY_ID_MRVL88X2011: 1320 err = xcvr_init_10g_mrvl88x2011(np); 1321 break; 1322 1323 default: /* bcom 8704 */ 1324 err = xcvr_init_10g_bcm8704(np); 1325 break; 1326 } 1327 1328 return 0; 1329} 1330 1331static int mii_reset(struct niu *np) 1332{ 1333 int limit, err; 1334 1335 err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET); 1336 if (err) 1337 return err; 1338 1339 limit = 1000; 1340 while (--limit >= 0) { 1341 udelay(500); 1342 err = mii_read(np, np->phy_addr, MII_BMCR); 1343 if (err < 0) 1344 return err; 1345 if (!(err & BMCR_RESET)) 1346 break; 1347 } 1348 if (limit < 0) { 1349 dev_err(np->device, PFX "Port %u MII would not reset, " 1350 "bmcr[%04x]\n", np->port, err); 1351 return -ENODEV; 1352 } 1353 1354 return 0; 1355} 1356 1357static int xcvr_init_1g_rgmii(struct niu *np) 1358{ 1359 int err; 1360 u64 val; 1361 u16 bmcr, bmsr, estat; 1362 1363 val = nr64(MIF_CONFIG); 1364 val &= ~MIF_CONFIG_INDIRECT_MODE; 1365 nw64(MIF_CONFIG, val); 1366 1367 err = mii_reset(np); 1368 if (err) 1369 return err; 1370 1371 err = mii_read(np, np->phy_addr, MII_BMSR); 1372 if (err < 0) 1373 return err; 1374 bmsr = err; 1375 1376 estat = 0; 1377 if (bmsr & BMSR_ESTATEN) { 1378 err = mii_read(np, np->phy_addr, MII_ESTATUS); 1379 if (err < 0) 1380 return err; 1381 estat = err; 1382 } 1383 1384 bmcr = 0; 1385 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); 1386 if (err) 1387 return err; 1388 1389 if (bmsr & BMSR_ESTATEN) { 1390 u16 ctrl1000 = 0; 1391 1392 if (estat & ESTATUS_1000_TFULL) 1393 ctrl1000 |= ADVERTISE_1000FULL; 1394 err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000); 1395 if (err) 1396 return err; 1397 } 1398 1399 bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX); 1400 1401 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); 1402 if (err) 1403 return err; 1404 1405 err = mii_read(np, np->phy_addr, MII_BMCR); 1406 if (err < 0) 1407 return err; 1408 bmcr = mii_read(np, np->phy_addr, MII_BMCR); 1409 1410 err = mii_read(np, np->phy_addr, MII_BMSR); 1411 if (err < 0) 1412 return err; 1413 1414 return 0; 1415} 1416 1417static int mii_init_common(struct niu *np) 1418{ 1419 struct niu_link_config *lp = &np->link_config; 1420 u16 bmcr, bmsr, adv, estat; 1421 int err; 1422 1423 err = mii_reset(np); 1424 if (err) 1425 return err; 1426 1427 err = mii_read(np, np->phy_addr, MII_BMSR); 1428 if (err < 0) 1429 return err; 1430 bmsr = err; 1431 1432 estat = 0; 1433 if (bmsr & BMSR_ESTATEN) { 1434 err = mii_read(np, np->phy_addr, MII_ESTATUS); 1435 if (err < 0) 1436 return err; 1437 estat = err; 1438 } 1439 1440 bmcr = 0; 1441 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); 1442 if (err) 1443 return err; 1444 1445 if (lp->loopback_mode == LOOPBACK_MAC) { 1446 bmcr |= BMCR_LOOPBACK; 1447 if (lp->active_speed == SPEED_1000) 1448 bmcr |= BMCR_SPEED1000; 1449 if (lp->active_duplex == DUPLEX_FULL) 1450 bmcr |= BMCR_FULLDPLX; 1451 } 1452 1453 if (lp->loopback_mode == LOOPBACK_PHY) { 1454 u16 aux; 1455 1456 aux = (BCM5464R_AUX_CTL_EXT_LB | 1457 BCM5464R_AUX_CTL_WRITE_1); 1458 err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux); 1459 if (err) 1460 return err; 1461 } 1462 1463 /* XXX configurable XXX */ 1464 /* XXX for now don't advertise half-duplex or asym pause... XXX */ 1465 adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP; 1466 if (bmsr & BMSR_10FULL) 1467 adv |= ADVERTISE_10FULL; 1468 if (bmsr & BMSR_100FULL) 1469 adv |= ADVERTISE_100FULL; 1470 err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv); 1471 if (err) 1472 return err; 1473 1474 if (bmsr & BMSR_ESTATEN) { 1475 u16 ctrl1000 = 0; 1476 1477 if (estat & ESTATUS_1000_TFULL) 1478 ctrl1000 |= ADVERTISE_1000FULL; 1479 err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000); 1480 if (err) 1481 return err; 1482 } 1483 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 1484 1485 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); 1486 if (err) 1487 return err; 1488 1489 err = mii_read(np, np->phy_addr, MII_BMCR); 1490 if (err < 0) 1491 return err; 1492 err = mii_read(np, np->phy_addr, MII_BMSR); 1493 if (err < 0) 1494 return err; 1495#if 0 1496 pr_info(PFX "Port %u after MII init bmcr[%04x] bmsr[%04x]\n", 1497 np->port, bmcr, bmsr); 1498#endif 1499 1500 return 0; 1501} 1502 1503static int xcvr_init_1g(struct niu *np) 1504{ 1505 u64 val; 1506 1507 /* XXX shared resource, lock parent XXX */ 1508 val = nr64(MIF_CONFIG); 1509 val &= ~MIF_CONFIG_INDIRECT_MODE; 1510 nw64(MIF_CONFIG, val); 1511 1512 return mii_init_common(np); 1513} 1514 1515static int niu_xcvr_init(struct niu *np) 1516{ 1517 const struct niu_phy_ops *ops = np->phy_ops; 1518 int err; 1519 1520 err = 0; 1521 if (ops->xcvr_init) 1522 err = ops->xcvr_init(np); 1523 1524 return err; 1525} 1526 1527static int niu_serdes_init(struct niu *np) 1528{ 1529 const struct niu_phy_ops *ops = np->phy_ops; 1530 int err; 1531 1532 err = 0; 1533 if (ops->serdes_init) 1534 err = ops->serdes_init(np); 1535 1536 return err; 1537} 1538 1539static void niu_init_xif(struct niu *); 1540static void niu_handle_led(struct niu *, int status); 1541 1542static int niu_link_status_common(struct niu *np, int link_up) 1543{ 1544 struct niu_link_config *lp = &np->link_config; 1545 struct net_device *dev = np->dev; 1546 unsigned long flags; 1547 1548 if (!netif_carrier_ok(dev) && link_up) { 1549 niuinfo(LINK, "%s: Link is up at %s, %s duplex\n", 1550 dev->name, 1551 (lp->active_speed == SPEED_10000 ? 1552 "10Gb/sec" : 1553 (lp->active_speed == SPEED_1000 ? 1554 "1Gb/sec" : 1555 (lp->active_speed == SPEED_100 ? 1556 "100Mbit/sec" : "10Mbit/sec"))), 1557 (lp->active_duplex == DUPLEX_FULL ? 1558 "full" : "half")); 1559 1560 spin_lock_irqsave(&np->lock, flags); 1561 niu_init_xif(np); 1562 niu_handle_led(np, 1); 1563 spin_unlock_irqrestore(&np->lock, flags); 1564 1565 netif_carrier_on(dev); 1566 } else if (netif_carrier_ok(dev) && !link_up) { 1567 niuwarn(LINK, "%s: Link is down\n", dev->name); 1568 spin_lock_irqsave(&np->lock, flags); 1569 niu_handle_led(np, 0); 1570 spin_unlock_irqrestore(&np->lock, flags); 1571 netif_carrier_off(dev); 1572 } 1573 1574 return 0; 1575} 1576 1577static int link_status_10g_mrvl(struct niu *np, int *link_up_p) 1578{ 1579 int err, link_up, pma_status, pcs_status; 1580 1581 link_up = 0; 1582 1583 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1584 MRVL88X2011_10G_PMD_STATUS_2); 1585 if (err < 0) 1586 goto out; 1587 1588 /* Check PMA/PMD Register: 1.0001.2 == 1 */ 1589 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1590 MRVL88X2011_PMA_PMD_STATUS_1); 1591 if (err < 0) 1592 goto out; 1593 1594 pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); 1595 1596 /* Check PMC Register : 3.0001.2 == 1: read twice */ 1597 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 1598 MRVL88X2011_PMA_PMD_STATUS_1); 1599 if (err < 0) 1600 goto out; 1601 1602 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 1603 MRVL88X2011_PMA_PMD_STATUS_1); 1604 if (err < 0) 1605 goto out; 1606 1607 pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); 1608 1609 /* Check XGXS Register : 4.0018.[0-3,12] */ 1610 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR, 1611 MRVL88X2011_10G_XGXS_LANE_STAT); 1612 if (err < 0) 1613 goto out; 1614 1615 if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 | 1616 PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 | 1617 PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC | 1618 0x800)) 1619 link_up = (pma_status && pcs_status) ? 1 : 0; 1620 1621 np->link_config.active_speed = SPEED_10000; 1622 np->link_config.active_duplex = DUPLEX_FULL; 1623 err = 0; 1624out: 1625 mrvl88x2011_act_led(np, (link_up ? 1626 MRVL88X2011_LED_CTL_PCS_ACT : 1627 MRVL88X2011_LED_CTL_OFF)); 1628 1629 *link_up_p = link_up; 1630 return err; 1631} 1632 1633static int link_status_10g_bcm8706(struct niu *np, int *link_up_p) 1634{ 1635 int err, link_up; 1636 link_up = 0; 1637 1638 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, 1639 BCM8704_PMD_RCV_SIGDET); 1640 if (err < 0) 1641 goto out; 1642 if (!(err & PMD_RCV_SIGDET_GLOBAL)) { 1643 err = 0; 1644 goto out; 1645 } 1646 1647 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 1648 BCM8704_PCS_10G_R_STATUS); 1649 if (err < 0) 1650 goto out; 1651 1652 if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { 1653 err = 0; 1654 goto out; 1655 } 1656 1657 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 1658 BCM8704_PHYXS_XGXS_LANE_STAT); 1659 if (err < 0) 1660 goto out; 1661 if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | 1662 PHYXS_XGXS_LANE_STAT_MAGIC | 1663 PHYXS_XGXS_LANE_STAT_PATTEST | 1664 PHYXS_XGXS_LANE_STAT_LANE3 | 1665 PHYXS_XGXS_LANE_STAT_LANE2 | 1666 PHYXS_XGXS_LANE_STAT_LANE1 | 1667 PHYXS_XGXS_LANE_STAT_LANE0)) { 1668 err = 0; 1669 np->link_config.active_speed = SPEED_INVALID; 1670 np->link_config.active_duplex = DUPLEX_INVALID; 1671 goto out; 1672 } 1673 1674 link_up = 1; 1675 np->link_config.active_speed = SPEED_10000; 1676 np->link_config.active_duplex = DUPLEX_FULL; 1677 err = 0; 1678 1679out: 1680 *link_up_p = link_up; 1681 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) 1682 err = 0; 1683 return err; 1684} 1685 1686static int link_status_10g_bcom(struct niu *np, int *link_up_p) 1687{ 1688 int err, link_up; 1689 1690 link_up = 0; 1691 1692 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, 1693 BCM8704_PMD_RCV_SIGDET); 1694 if (err < 0) 1695 goto out; 1696 if (!(err & PMD_RCV_SIGDET_GLOBAL)) { 1697 err = 0; 1698 goto out; 1699 } 1700 1701 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 1702 BCM8704_PCS_10G_R_STATUS); 1703 if (err < 0) 1704 goto out; 1705 if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { 1706 err = 0; 1707 goto out; 1708 } 1709 1710 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 1711 BCM8704_PHYXS_XGXS_LANE_STAT); 1712 if (err < 0) 1713 goto out; 1714 1715 if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | 1716 PHYXS_XGXS_LANE_STAT_MAGIC | 1717 PHYXS_XGXS_LANE_STAT_LANE3 | 1718 PHYXS_XGXS_LANE_STAT_LANE2 | 1719 PHYXS_XGXS_LANE_STAT_LANE1 | 1720 PHYXS_XGXS_LANE_STAT_LANE0)) { 1721 err = 0; 1722 goto out; 1723 } 1724 1725 link_up = 1; 1726 np->link_config.active_speed = SPEED_10000; 1727 np->link_config.active_duplex = DUPLEX_FULL; 1728 err = 0; 1729 1730out: 1731 *link_up_p = link_up; 1732 return err; 1733} 1734 1735static int link_status_10g(struct niu *np, int *link_up_p) 1736{ 1737 unsigned long flags; 1738 int err = -EINVAL; 1739 1740 spin_lock_irqsave(&np->lock, flags); 1741 1742 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { 1743 int phy_id; 1744 1745 phy_id = phy_decode(np->parent->port_phy, np->port); 1746 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; 1747 1748 /* handle different phy types */ 1749 switch (phy_id & NIU_PHY_ID_MASK) { 1750 case NIU_PHY_ID_MRVL88X2011: 1751 err = link_status_10g_mrvl(np, link_up_p); 1752 break; 1753 1754 default: /* bcom 8704 */ 1755 err = link_status_10g_bcom(np, link_up_p); 1756 break; 1757 } 1758 } 1759 1760 spin_unlock_irqrestore(&np->lock, flags); 1761 1762 return err; 1763} 1764 1765static int niu_10g_phy_present(struct niu *np) 1766{ 1767 u64 sig, mask, val; 1768 1769 sig = nr64(ESR_INT_SIGNALS); 1770 switch (np->port) { 1771 case 0: 1772 mask = ESR_INT_SIGNALS_P0_BITS; 1773 val = (ESR_INT_SRDY0_P0 | 1774 ESR_INT_DET0_P0 | 1775 ESR_INT_XSRDY_P0 | 1776 ESR_INT_XDP_P0_CH3 | 1777 ESR_INT_XDP_P0_CH2 | 1778 ESR_INT_XDP_P0_CH1 | 1779 ESR_INT_XDP_P0_CH0); 1780 break; 1781 1782 case 1: 1783 mask = ESR_INT_SIGNALS_P1_BITS; 1784 val = (ESR_INT_SRDY0_P1 | 1785 ESR_INT_DET0_P1 | 1786 ESR_INT_XSRDY_P1 | 1787 ESR_INT_XDP_P1_CH3 | 1788 ESR_INT_XDP_P1_CH2 | 1789 ESR_INT_XDP_P1_CH1 | 1790 ESR_INT_XDP_P1_CH0); 1791 break; 1792 1793 default: 1794 return 0; 1795 } 1796 1797 if ((sig & mask) != val) 1798 return 0; 1799 return 1; 1800} 1801 1802static int link_status_10g_hotplug(struct niu *np, int *link_up_p) 1803{ 1804 unsigned long flags; 1805 int err = 0; 1806 int phy_present; 1807 int phy_present_prev; 1808 1809 spin_lock_irqsave(&np->lock, flags); 1810 1811 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { 1812 phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ? 1813 1 : 0; 1814 phy_present = niu_10g_phy_present(np); 1815 if (phy_present != phy_present_prev) { 1816 /* state change */ 1817 if (phy_present) { 1818 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; 1819 if (np->phy_ops->xcvr_init) 1820 err = np->phy_ops->xcvr_init(np); 1821 if (err) { 1822 /* debounce */ 1823 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; 1824 } 1825 } else { 1826 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; 1827 *link_up_p = 0; 1828 niuwarn(LINK, "%s: Hotplug PHY Removed\n", 1829 np->dev->name); 1830 } 1831 } 1832 if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) 1833 err = link_status_10g_bcm8706(np, link_up_p); 1834 } 1835 1836 spin_unlock_irqrestore(&np->lock, flags); 1837 1838 return err; 1839} 1840 1841static int link_status_1g(struct niu *np, int *link_up_p) 1842{ 1843 struct niu_link_config *lp = &np->link_config; 1844 u16 current_speed, bmsr; 1845 unsigned long flags; 1846 u8 current_duplex; 1847 int err, link_up; 1848 1849 link_up = 0; 1850 current_speed = SPEED_INVALID; 1851 current_duplex = DUPLEX_INVALID; 1852 1853 spin_lock_irqsave(&np->lock, flags); 1854 1855 err = -EINVAL; 1856 if (np->link_config.loopback_mode != LOOPBACK_DISABLED) 1857 goto out; 1858 1859 err = mii_read(np, np->phy_addr, MII_BMSR); 1860 if (err < 0) 1861 goto out; 1862 1863 bmsr = err; 1864 if (bmsr & BMSR_LSTATUS) { 1865 u16 adv, lpa, common, estat; 1866 1867 err = mii_read(np, np->phy_addr, MII_ADVERTISE); 1868 if (err < 0) 1869 goto out; 1870 adv = err; 1871 1872 err = mii_read(np, np->phy_addr, MII_LPA); 1873 if (err < 0) 1874 goto out; 1875 lpa = err; 1876 1877 common = adv & lpa; 1878 1879 err = mii_read(np, np->phy_addr, MII_ESTATUS); 1880 if (err < 0) 1881 goto out; 1882 estat = err; 1883 1884 link_up = 1; 1885 if (estat & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF)) { 1886 current_speed = SPEED_1000; 1887 if (estat & ESTATUS_1000_TFULL) 1888 current_duplex = DUPLEX_FULL; 1889 else 1890 current_duplex = DUPLEX_HALF; 1891 } else { 1892 if (common & ADVERTISE_100BASE4) { 1893 current_speed = SPEED_100; 1894 current_duplex = DUPLEX_HALF; 1895 } else if (common & ADVERTISE_100FULL) { 1896 current_speed = SPEED_100; 1897 current_duplex = DUPLEX_FULL; 1898 } else if (common & ADVERTISE_100HALF) { 1899 current_speed = SPEED_100; 1900 current_duplex = DUPLEX_HALF; 1901 } else if (common & ADVERTISE_10FULL) { 1902 current_speed = SPEED_10; 1903 current_duplex = DUPLEX_FULL; 1904 } else if (common & ADVERTISE_10HALF) { 1905 current_speed = SPEED_10; 1906 current_duplex = DUPLEX_HALF; 1907 } else 1908 link_up = 0; 1909 } 1910 } 1911 lp->active_speed = current_speed; 1912 lp->active_duplex = current_duplex; 1913 err = 0; 1914 1915out: 1916 spin_unlock_irqrestore(&np->lock, flags); 1917 1918 *link_up_p = link_up; 1919 return err; 1920} 1921 1922static int niu_link_status(struct niu *np, int *link_up_p) 1923{ 1924 const struct niu_phy_ops *ops = np->phy_ops; 1925 int err; 1926 1927 err = 0; 1928 if (ops->link_status) 1929 err = ops->link_status(np, link_up_p); 1930 1931 return err; 1932} 1933 1934static void niu_timer(unsigned long __opaque) 1935{ 1936 struct niu *np = (struct niu *) __opaque; 1937 unsigned long off; 1938 int err, link_up; 1939 1940 err = niu_link_status(np, &link_up); 1941 if (!err) 1942 niu_link_status_common(np, link_up); 1943 1944 if (netif_carrier_ok(np->dev)) 1945 off = 5 * HZ; 1946 else 1947 off = 1 * HZ; 1948 np->timer.expires = jiffies + off; 1949 1950 add_timer(&np->timer); 1951} 1952 1953static const struct niu_phy_ops phy_ops_10g_serdes = { 1954 .serdes_init = serdes_init_10g_serdes, 1955 .link_status = link_status_10g_serdes, 1956}; 1957 1958static const struct niu_phy_ops phy_ops_1g_rgmii = { 1959 .xcvr_init = xcvr_init_1g_rgmii, 1960 .link_status = link_status_1g_rgmii, 1961}; 1962 1963static const struct niu_phy_ops phy_ops_10g_fiber_niu = { 1964 .serdes_init = serdes_init_niu, 1965 .xcvr_init = xcvr_init_10g, 1966 .link_status = link_status_10g, 1967}; 1968 1969static const struct niu_phy_ops phy_ops_10g_fiber = { 1970 .serdes_init = serdes_init_10g, 1971 .xcvr_init = xcvr_init_10g, 1972 .link_status = link_status_10g, 1973}; 1974 1975static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = { 1976 .serdes_init = serdes_init_10g, 1977 .xcvr_init = xcvr_init_10g_bcm8706, 1978 .link_status = link_status_10g_hotplug, 1979}; 1980 1981static const struct niu_phy_ops phy_ops_10g_copper = { 1982 .serdes_init = serdes_init_10g, 1983 .link_status = link_status_10g, /* XXX */ 1984}; 1985 1986static const struct niu_phy_ops phy_ops_1g_fiber = { 1987 .serdes_init = serdes_init_1g, 1988 .xcvr_init = xcvr_init_1g, 1989 .link_status = link_status_1g, 1990}; 1991 1992static const struct niu_phy_ops phy_ops_1g_copper = { 1993 .xcvr_init = xcvr_init_1g, 1994 .link_status = link_status_1g, 1995}; 1996 1997struct niu_phy_template { 1998 const struct niu_phy_ops *ops; 1999 u32 phy_addr_base; 2000}; 2001 2002static const struct niu_phy_template phy_template_niu = { 2003 .ops = &phy_ops_10g_fiber_niu, 2004 .phy_addr_base = 16, 2005}; 2006 2007static const struct niu_phy_template phy_template_10g_fiber = { 2008 .ops = &phy_ops_10g_fiber, 2009 .phy_addr_base = 8, 2010}; 2011 2012static const struct niu_phy_template phy_template_10g_fiber_hotplug = { 2013 .ops = &phy_ops_10g_fiber_hotplug, 2014 .phy_addr_base = 8, 2015}; 2016 2017static const struct niu_phy_template phy_template_10g_copper = { 2018 .ops = &phy_ops_10g_copper, 2019 .phy_addr_base = 10, 2020}; 2021 2022static const struct niu_phy_template phy_template_1g_fiber = { 2023 .ops = &phy_ops_1g_fiber, 2024 .phy_addr_base = 0, 2025}; 2026 2027static const struct niu_phy_template phy_template_1g_copper = { 2028 .ops = &phy_ops_1g_copper, 2029 .phy_addr_base = 0, 2030}; 2031 2032static const struct niu_phy_template phy_template_1g_rgmii = { 2033 .ops = &phy_ops_1g_rgmii, 2034 .phy_addr_base = 0, 2035}; 2036 2037static const struct niu_phy_template phy_template_10g_serdes = { 2038 .ops = &phy_ops_10g_serdes, 2039 .phy_addr_base = 0, 2040}; 2041 2042static int niu_atca_port_num[4] = { 2043 0, 0, 11, 10 2044}; 2045 2046static int serdes_init_10g_serdes(struct niu *np) 2047{ 2048 struct niu_link_config *lp = &np->link_config; 2049 unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; 2050 u64 ctrl_val, test_cfg_val, sig, mask, val; 2051 int err; 2052 u64 reset_val; 2053 2054 switch (np->port) { 2055 case 0: 2056 reset_val = ENET_SERDES_RESET_0; 2057 ctrl_reg = ENET_SERDES_0_CTRL_CFG; 2058 test_cfg_reg = ENET_SERDES_0_TEST_CFG; 2059 pll_cfg = ENET_SERDES_0_PLL_CFG; 2060 break; 2061 case 1: 2062 reset_val = ENET_SERDES_RESET_1; 2063 ctrl_reg = ENET_SERDES_1_CTRL_CFG; 2064 test_cfg_reg = ENET_SERDES_1_TEST_CFG; 2065 pll_cfg = ENET_SERDES_1_PLL_CFG; 2066 break; 2067 2068 default: 2069 return -EINVAL; 2070 } 2071 ctrl_val = (ENET_SERDES_CTRL_SDET_0 | 2072 ENET_SERDES_CTRL_SDET_1 | 2073 ENET_SERDES_CTRL_SDET_2 | 2074 ENET_SERDES_CTRL_SDET_3 | 2075 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | 2076 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | 2077 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | 2078 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | 2079 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | 2080 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | 2081 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | 2082 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); 2083 test_cfg_val = 0; 2084 2085 if (lp->loopback_mode == LOOPBACK_PHY) { 2086 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << 2087 ENET_SERDES_TEST_MD_0_SHIFT) | 2088 (ENET_TEST_MD_PAD_LOOPBACK << 2089 ENET_SERDES_TEST_MD_1_SHIFT) | 2090 (ENET_TEST_MD_PAD_LOOPBACK << 2091 ENET_SERDES_TEST_MD_2_SHIFT) | 2092 (ENET_TEST_MD_PAD_LOOPBACK << 2093 ENET_SERDES_TEST_MD_3_SHIFT)); 2094 } 2095 2096 esr_reset(np); 2097 nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2); 2098 nw64(ctrl_reg, ctrl_val); 2099 nw64(test_cfg_reg, test_cfg_val); 2100 2101 /* Initialize all 4 lanes of the SERDES. */ 2102 for (i = 0; i < 4; i++) { 2103 u32 rxtx_ctrl, glue0; 2104 2105 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); 2106 if (err) 2107 return err; 2108 err = esr_read_glue0(np, i, &glue0); 2109 if (err) 2110 return err; 2111 2112 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); 2113 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | 2114 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); 2115 2116 glue0 &= ~(ESR_GLUE_CTRL0_SRATE | 2117 ESR_GLUE_CTRL0_THCNT | 2118 ESR_GLUE_CTRL0_BLTIME); 2119 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | 2120 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | 2121 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | 2122 (BLTIME_300_CYCLES << 2123 ESR_GLUE_CTRL0_BLTIME_SHIFT)); 2124 2125 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); 2126 if (err) 2127 return err; 2128 err = esr_write_glue0(np, i, glue0); 2129 if (err) 2130 return err; 2131 } 2132 2133 2134 sig = nr64(ESR_INT_SIGNALS); 2135 switch (np->port) { 2136 case 0: 2137 mask = ESR_INT_SIGNALS_P0_BITS; 2138 val = (ESR_INT_SRDY0_P0 | 2139 ESR_INT_DET0_P0 | 2140 ESR_INT_XSRDY_P0 | 2141 ESR_INT_XDP_P0_CH3 | 2142 ESR_INT_XDP_P0_CH2 | 2143 ESR_INT_XDP_P0_CH1 | 2144 ESR_INT_XDP_P0_CH0); 2145 break; 2146 2147 case 1: 2148 mask = ESR_INT_SIGNALS_P1_BITS; 2149 val = (ESR_INT_SRDY0_P1 | 2150 ESR_INT_DET0_P1 | 2151 ESR_INT_XSRDY_P1 | 2152 ESR_INT_XDP_P1_CH3 | 2153 ESR_INT_XDP_P1_CH2 | 2154 ESR_INT_XDP_P1_CH1 | 2155 ESR_INT_XDP_P1_CH0); 2156 break; 2157 2158 default: 2159 return -EINVAL; 2160 } 2161 2162 if ((sig & mask) != val) { 2163 int err; 2164 err = serdes_init_1g_serdes(np); 2165 if (!err) { 2166 np->flags &= ~NIU_FLAGS_10G; 2167 np->mac_xcvr = MAC_XCVR_PCS; 2168 } else { 2169 dev_err(np->device, PFX "Port %u 10G/1G SERDES Link Failed \n", 2170 np->port); 2171 return -ENODEV; 2172 } 2173 } 2174 2175 return 0; 2176} 2177 2178static int niu_determine_phy_disposition(struct niu *np) 2179{ 2180 struct niu_parent *parent = np->parent; 2181 u8 plat_type = parent->plat_type; 2182 const struct niu_phy_template *tp; 2183 u32 phy_addr_off = 0; 2184 2185 if (plat_type == PLAT_TYPE_NIU) { 2186 tp = &phy_template_niu; 2187 phy_addr_off += np->port; 2188 } else { 2189 switch (np->flags & 2190 (NIU_FLAGS_10G | 2191 NIU_FLAGS_FIBER | 2192 NIU_FLAGS_XCVR_SERDES)) { 2193 case 0: 2194 /* 1G copper */ 2195 tp = &phy_template_1g_copper; 2196 if (plat_type == PLAT_TYPE_VF_P0) 2197 phy_addr_off = 10; 2198 else if (plat_type == PLAT_TYPE_VF_P1) 2199 phy_addr_off = 26; 2200 2201 phy_addr_off += (np->port ^ 0x3); 2202 break; 2203 2204 case NIU_FLAGS_10G: 2205 /* 10G copper */ 2206 tp = &phy_template_1g_copper; 2207 break; 2208 2209 case NIU_FLAGS_FIBER: 2210 /* 1G fiber */ 2211 tp = &phy_template_1g_fiber; 2212 break; 2213 2214 case NIU_FLAGS_10G | NIU_FLAGS_FIBER: 2215 /* 10G fiber */ 2216 tp = &phy_template_10g_fiber; 2217 if (plat_type == PLAT_TYPE_VF_P0 || 2218 plat_type == PLAT_TYPE_VF_P1) 2219 phy_addr_off = 8; 2220 phy_addr_off += np->port; 2221 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { 2222 tp = &phy_template_10g_fiber_hotplug; 2223 if (np->port == 0) 2224 phy_addr_off = 8; 2225 if (np->port == 1) 2226 phy_addr_off = 12; 2227 } 2228 break; 2229 2230 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: 2231 case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER: 2232 case NIU_FLAGS_XCVR_SERDES: 2233 switch(np->port) { 2234 case 0: 2235 case 1: 2236 tp = &phy_template_10g_serdes; 2237 break; 2238 case 2: 2239 case 3: 2240 tp = &phy_template_1g_rgmii; 2241 break; 2242 default: 2243 return -EINVAL; 2244 break; 2245 } 2246 phy_addr_off = niu_atca_port_num[np->port]; 2247 break; 2248 2249 default: 2250 return -EINVAL; 2251 } 2252 } 2253 2254 np->phy_ops = tp->ops; 2255 np->phy_addr = tp->phy_addr_base + phy_addr_off; 2256 2257 return 0; 2258} 2259 2260static int niu_init_link(struct niu *np) 2261{ 2262 struct niu_parent *parent = np->parent; 2263 int err, ignore; 2264 2265 if (parent->plat_type == PLAT_TYPE_NIU) { 2266 err = niu_xcvr_init(np); 2267 if (err) 2268 return err; 2269 msleep(200); 2270 } 2271 err = niu_serdes_init(np); 2272 if (err) 2273 return err; 2274 msleep(200); 2275 err = niu_xcvr_init(np); 2276 if (!err) 2277 niu_link_status(np, &ignore); 2278 return 0; 2279} 2280 2281static void niu_set_primary_mac(struct niu *np, unsigned char *addr) 2282{ 2283 u16 reg0 = addr[4] << 8 | addr[5]; 2284 u16 reg1 = addr[2] << 8 | addr[3]; 2285 u16 reg2 = addr[0] << 8 | addr[1]; 2286 2287 if (np->flags & NIU_FLAGS_XMAC) { 2288 nw64_mac(XMAC_ADDR0, reg0); 2289 nw64_mac(XMAC_ADDR1, reg1); 2290 nw64_mac(XMAC_ADDR2, reg2); 2291 } else { 2292 nw64_mac(BMAC_ADDR0, reg0); 2293 nw64_mac(BMAC_ADDR1, reg1); 2294 nw64_mac(BMAC_ADDR2, reg2); 2295 } 2296} 2297 2298static int niu_num_alt_addr(struct niu *np) 2299{ 2300 if (np->flags & NIU_FLAGS_XMAC) 2301 return XMAC_NUM_ALT_ADDR; 2302 else 2303 return BMAC_NUM_ALT_ADDR; 2304} 2305 2306static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr) 2307{ 2308 u16 reg0 = addr[4] << 8 | addr[5]; 2309 u16 reg1 = addr[2] << 8 | addr[3]; 2310 u16 reg2 = addr[0] << 8 | addr[1]; 2311 2312 if (index >= niu_num_alt_addr(np)) 2313 return -EINVAL; 2314 2315 if (np->flags & NIU_FLAGS_XMAC) { 2316 nw64_mac(XMAC_ALT_ADDR0(index), reg0); 2317 nw64_mac(XMAC_ALT_ADDR1(index), reg1); 2318 nw64_mac(XMAC_ALT_ADDR2(index), reg2); 2319 } else { 2320 nw64_mac(BMAC_ALT_ADDR0(index), reg0); 2321 nw64_mac(BMAC_ALT_ADDR1(index), reg1); 2322 nw64_mac(BMAC_ALT_ADDR2(index), reg2); 2323 } 2324 2325 return 0; 2326} 2327 2328static int niu_enable_alt_mac(struct niu *np, int index, int on) 2329{ 2330 unsigned long reg; 2331 u64 val, mask; 2332 2333 if (index >= niu_num_alt_addr(np)) 2334 return -EINVAL; 2335 2336 if (np->flags & NIU_FLAGS_XMAC) { 2337 reg = XMAC_ADDR_CMPEN; 2338 mask = 1 << index; 2339 } else { 2340 reg = BMAC_ADDR_CMPEN; 2341 mask = 1 << (index + 1); 2342 } 2343 2344 val = nr64_mac(reg); 2345 if (on) 2346 val |= mask; 2347 else 2348 val &= ~mask; 2349 nw64_mac(reg, val); 2350 2351 return 0; 2352} 2353 2354static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg, 2355 int num, int mac_pref) 2356{ 2357 u64 val = nr64_mac(reg); 2358 val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR); 2359 val |= num; 2360 if (mac_pref) 2361 val |= HOST_INFO_MPR; 2362 nw64_mac(reg, val); 2363} 2364 2365static int __set_rdc_table_num(struct niu *np, 2366 int xmac_index, int bmac_index, 2367 int rdc_table_num, int mac_pref) 2368{ 2369 unsigned long reg; 2370 2371 if (rdc_table_num & ~HOST_INFO_MACRDCTBLN) 2372 return -EINVAL; 2373 if (np->flags & NIU_FLAGS_XMAC) 2374 reg = XMAC_HOST_INFO(xmac_index); 2375 else 2376 reg = BMAC_HOST_INFO(bmac_index); 2377 __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref); 2378 return 0; 2379} 2380 2381static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num, 2382 int mac_pref) 2383{ 2384 return __set_rdc_table_num(np, 17, 0, table_num, mac_pref); 2385} 2386 2387static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num, 2388 int mac_pref) 2389{ 2390 return __set_rdc_table_num(np, 16, 8, table_num, mac_pref); 2391} 2392 2393static int niu_set_alt_mac_rdc_table(struct niu *np, int idx, 2394 int table_num, int mac_pref) 2395{ 2396 if (idx >= niu_num_alt_addr(np)) 2397 return -EINVAL; 2398 return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref); 2399} 2400 2401static u64 vlan_entry_set_parity(u64 reg_val) 2402{ 2403 u64 port01_mask; 2404 u64 port23_mask; 2405 2406 port01_mask = 0x00ff; 2407 port23_mask = 0xff00; 2408 2409 if (hweight64(reg_val & port01_mask) & 1) 2410 reg_val |= ENET_VLAN_TBL_PARITY0; 2411 else 2412 reg_val &= ~ENET_VLAN_TBL_PARITY0; 2413 2414 if (hweight64(reg_val & port23_mask) & 1) 2415 reg_val |= ENET_VLAN_TBL_PARITY1; 2416 else 2417 reg_val &= ~ENET_VLAN_TBL_PARITY1; 2418 2419 return reg_val; 2420} 2421 2422static void vlan_tbl_write(struct niu *np, unsigned long index, 2423 int port, int vpr, int rdc_table) 2424{ 2425 u64 reg_val = nr64(ENET_VLAN_TBL(index)); 2426 2427 reg_val &= ~((ENET_VLAN_TBL_VPR | 2428 ENET_VLAN_TBL_VLANRDCTBLN) << 2429 ENET_VLAN_TBL_SHIFT(port)); 2430 if (vpr) 2431 reg_val |= (ENET_VLAN_TBL_VPR << 2432 ENET_VLAN_TBL_SHIFT(port)); 2433 reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port)); 2434 2435 reg_val = vlan_entry_set_parity(reg_val); 2436 2437 nw64(ENET_VLAN_TBL(index), reg_val); 2438} 2439 2440static void vlan_tbl_clear(struct niu *np) 2441{ 2442 int i; 2443 2444 for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) 2445 nw64(ENET_VLAN_TBL(i), 0); 2446} 2447 2448static int tcam_wait_bit(struct niu *np, u64 bit) 2449{ 2450 int limit = 1000; 2451 2452 while (--limit > 0) { 2453 if (nr64(TCAM_CTL) & bit) 2454 break; 2455 udelay(1); 2456 } 2457 if (limit < 0) 2458 return -ENODEV; 2459 2460 return 0; 2461} 2462 2463static int tcam_flush(struct niu *np, int index) 2464{ 2465 nw64(TCAM_KEY_0, 0x00); 2466 nw64(TCAM_KEY_MASK_0, 0xff); 2467 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); 2468 2469 return tcam_wait_bit(np, TCAM_CTL_STAT); 2470} 2471 2472#if 0 2473static int tcam_read(struct niu *np, int index, 2474 u64 *key, u64 *mask) 2475{ 2476 int err; 2477 2478 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index)); 2479 err = tcam_wait_bit(np, TCAM_CTL_STAT); 2480 if (!err) { 2481 key[0] = nr64(TCAM_KEY_0); 2482 key[1] = nr64(TCAM_KEY_1); 2483 key[2] = nr64(TCAM_KEY_2); 2484 key[3] = nr64(TCAM_KEY_3); 2485 mask[0] = nr64(TCAM_KEY_MASK_0); 2486 mask[1] = nr64(TCAM_KEY_MASK_1); 2487 mask[2] = nr64(TCAM_KEY_MASK_2); 2488 mask[3] = nr64(TCAM_KEY_MASK_3); 2489 } 2490 return err; 2491} 2492#endif 2493 2494static int tcam_write(struct niu *np, int index, 2495 u64 *key, u64 *mask) 2496{ 2497 nw64(TCAM_KEY_0, key[0]); 2498 nw64(TCAM_KEY_1, key[1]); 2499 nw64(TCAM_KEY_2, key[2]); 2500 nw64(TCAM_KEY_3, key[3]); 2501 nw64(TCAM_KEY_MASK_0, mask[0]); 2502 nw64(TCAM_KEY_MASK_1, mask[1]); 2503 nw64(TCAM_KEY_MASK_2, mask[2]); 2504 nw64(TCAM_KEY_MASK_3, mask[3]); 2505 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); 2506 2507 return tcam_wait_bit(np, TCAM_CTL_STAT); 2508} 2509 2510#if 0 2511static int tcam_assoc_read(struct niu *np, int index, u64 *data) 2512{ 2513 int err; 2514 2515 nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index)); 2516 err = tcam_wait_bit(np, TCAM_CTL_STAT); 2517 if (!err) 2518 *data = nr64(TCAM_KEY_1); 2519 2520 return err; 2521} 2522#endif 2523 2524static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data) 2525{ 2526 nw64(TCAM_KEY_1, assoc_data); 2527 nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index)); 2528 2529 return tcam_wait_bit(np, TCAM_CTL_STAT); 2530} 2531 2532static void tcam_enable(struct niu *np, int on) 2533{ 2534 u64 val = nr64(FFLP_CFG_1); 2535 2536 if (on) 2537 val &= ~FFLP_CFG_1_TCAM_DIS; 2538 else 2539 val |= FFLP_CFG_1_TCAM_DIS; 2540 nw64(FFLP_CFG_1, val); 2541} 2542 2543static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio) 2544{ 2545 u64 val = nr64(FFLP_CFG_1); 2546 2547 val &= ~(FFLP_CFG_1_FFLPINITDONE | 2548 FFLP_CFG_1_CAMLAT | 2549 FFLP_CFG_1_CAMRATIO); 2550 val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT); 2551 val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT); 2552 nw64(FFLP_CFG_1, val); 2553 2554 val = nr64(FFLP_CFG_1); 2555 val |= FFLP_CFG_1_FFLPINITDONE; 2556 nw64(FFLP_CFG_1, val); 2557} 2558 2559static int tcam_user_eth_class_enable(struct niu *np, unsigned long class, 2560 int on) 2561{ 2562 unsigned long reg; 2563 u64 val; 2564 2565 if (class < CLASS_CODE_ETHERTYPE1 || 2566 class > CLASS_CODE_ETHERTYPE2) 2567 return -EINVAL; 2568 2569 reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); 2570 val = nr64(reg); 2571 if (on) 2572 val |= L2_CLS_VLD; 2573 else 2574 val &= ~L2_CLS_VLD; 2575 nw64(reg, val); 2576 2577 return 0; 2578} 2579 2580#if 0 2581static int tcam_user_eth_class_set(struct niu *np, unsigned long class, 2582 u64 ether_type) 2583{ 2584 unsigned long reg; 2585 u64 val; 2586 2587 if (class < CLASS_CODE_ETHERTYPE1 || 2588 class > CLASS_CODE_ETHERTYPE2 || 2589 (ether_type & ~(u64)0xffff) != 0) 2590 return -EINVAL; 2591 2592 reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); 2593 val = nr64(reg); 2594 val &= ~L2_CLS_ETYPE; 2595 val |= (ether_type << L2_CLS_ETYPE_SHIFT); 2596 nw64(reg, val); 2597 2598 return 0; 2599} 2600#endif 2601 2602static int tcam_user_ip_class_enable(struct niu *np, unsigned long class, 2603 int on) 2604{ 2605 unsigned long reg; 2606 u64 val; 2607 2608 if (class < CLASS_CODE_USER_PROG1 || 2609 class > CLASS_CODE_USER_PROG4) 2610 return -EINVAL; 2611 2612 reg = L3_CLS(class - CLASS_CODE_USER_PROG1); 2613 val = nr64(reg); 2614 if (on) 2615 val |= L3_CLS_VALID; 2616 else 2617 val &= ~L3_CLS_VALID; 2618 nw64(reg, val); 2619 2620 return 0; 2621} 2622 2623#if 0 2624static int tcam_user_ip_class_set(struct niu *np, unsigned long class, 2625 int ipv6, u64 protocol_id, 2626 u64 tos_mask, u64 tos_val) 2627{ 2628 unsigned long reg; 2629 u64 val; 2630 2631 if (class < CLASS_CODE_USER_PROG1 || 2632 class > CLASS_CODE_USER_PROG4 || 2633 (protocol_id & ~(u64)0xff) != 0 || 2634 (tos_mask & ~(u64)0xff) != 0 || 2635 (tos_val & ~(u64)0xff) != 0) 2636 return -EINVAL; 2637 2638 reg = L3_CLS(class - CLASS_CODE_USER_PROG1); 2639 val = nr64(reg); 2640 val &= ~(L3_CLS_IPVER | L3_CLS_PID | 2641 L3_CLS_TOSMASK | L3_CLS_TOS); 2642 if (ipv6) 2643 val |= L3_CLS_IPVER; 2644 val |= (protocol_id << L3_CLS_PID_SHIFT); 2645 val |= (tos_mask << L3_CLS_TOSMASK_SHIFT); 2646 val |= (tos_val << L3_CLS_TOS_SHIFT); 2647 nw64(reg, val); 2648 2649 return 0; 2650} 2651#endif 2652 2653static int tcam_early_init(struct niu *np) 2654{ 2655 unsigned long i; 2656 int err; 2657 2658 tcam_enable(np, 0); 2659 tcam_set_lat_and_ratio(np, 2660 DEFAULT_TCAM_LATENCY, 2661 DEFAULT_TCAM_ACCESS_RATIO); 2662 for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) { 2663 err = tcam_user_eth_class_enable(np, i, 0); 2664 if (err) 2665 return err; 2666 } 2667 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) { 2668 err = tcam_user_ip_class_enable(np, i, 0); 2669 if (err) 2670 return err; 2671 } 2672 2673 return 0; 2674} 2675 2676static int tcam_flush_all(struct niu *np) 2677{ 2678 unsigned long i; 2679 2680 for (i = 0; i < np->parent->tcam_num_entries; i++) { 2681 int err = tcam_flush(np, i); 2682 if (err) 2683 return err; 2684 } 2685 return 0; 2686} 2687 2688static u64 hash_addr_regval(unsigned long index, unsigned long num_entries) 2689{ 2690 return ((u64)index | (num_entries == 1 ? 2691 HASH_TBL_ADDR_AUTOINC : 0)); 2692} 2693 2694#if 0 2695static int hash_read(struct niu *np, unsigned long partition, 2696 unsigned long index, unsigned long num_entries, 2697 u64 *data) 2698{ 2699 u64 val = hash_addr_regval(index, num_entries); 2700 unsigned long i; 2701 2702 if (partition >= FCRAM_NUM_PARTITIONS || 2703 index + num_entries > FCRAM_SIZE) 2704 return -EINVAL; 2705 2706 nw64(HASH_TBL_ADDR(partition), val); 2707 for (i = 0; i < num_entries; i++) 2708 data[i] = nr64(HASH_TBL_DATA(partition)); 2709 2710 return 0; 2711} 2712#endif 2713 2714static int hash_write(struct niu *np, unsigned long partition, 2715 unsigned long index, unsigned long num_entries, 2716 u64 *data) 2717{ 2718 u64 val = hash_addr_regval(index, num_entries); 2719 unsigned long i; 2720 2721 if (partition >= FCRAM_NUM_PARTITIONS || 2722 index + (num_entries * 8) > FCRAM_SIZE) 2723 return -EINVAL; 2724 2725 nw64(HASH_TBL_ADDR(partition), val); 2726 for (i = 0; i < num_entries; i++) 2727 nw64(HASH_TBL_DATA(partition), data[i]); 2728 2729 return 0; 2730} 2731 2732static void fflp_reset(struct niu *np) 2733{ 2734 u64 val; 2735 2736 nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST); 2737 udelay(10); 2738 nw64(FFLP_CFG_1, 0); 2739 2740 val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE; 2741 nw64(FFLP_CFG_1, val); 2742} 2743 2744static void fflp_set_timings(struct niu *np) 2745{ 2746 u64 val = nr64(FFLP_CFG_1); 2747 2748 val &= ~FFLP_CFG_1_FFLPINITDONE; 2749 val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT); 2750 nw64(FFLP_CFG_1, val); 2751 2752 val = nr64(FFLP_CFG_1); 2753 val |= FFLP_CFG_1_FFLPINITDONE; 2754 nw64(FFLP_CFG_1, val); 2755 2756 val = nr64(FCRAM_REF_TMR); 2757 val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN); 2758 val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT); 2759 val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT); 2760 nw64(FCRAM_REF_TMR, val); 2761} 2762 2763static int fflp_set_partition(struct niu *np, u64 partition, 2764 u64 mask, u64 base, int enable) 2765{ 2766 unsigned long reg; 2767 u64 val; 2768 2769 if (partition >= FCRAM_NUM_PARTITIONS || 2770 (mask & ~(u64)0x1f) != 0 || 2771 (base & ~(u64)0x1f) != 0) 2772 return -EINVAL; 2773 2774 reg = FLW_PRT_SEL(partition); 2775 2776 val = nr64(reg); 2777 val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE); 2778 val |= (mask << FLW_PRT_SEL_MASK_SHIFT); 2779 val |= (base << FLW_PRT_SEL_BASE_SHIFT); 2780 if (enable) 2781 val |= FLW_PRT_SEL_EXT; 2782 nw64(reg, val); 2783 2784 return 0; 2785} 2786 2787static int fflp_disable_all_partitions(struct niu *np) 2788{ 2789 unsigned long i; 2790 2791 for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) { 2792 int err = fflp_set_partition(np, 0, 0, 0, 0); 2793 if (err) 2794 return err; 2795 } 2796 return 0; 2797} 2798 2799static void fflp_llcsnap_enable(struct niu *np, int on) 2800{ 2801 u64 val = nr64(FFLP_CFG_1); 2802 2803 if (on) 2804 val |= FFLP_CFG_1_LLCSNAP; 2805 else 2806 val &= ~FFLP_CFG_1_LLCSNAP; 2807 nw64(FFLP_CFG_1, val); 2808} 2809 2810static void fflp_errors_enable(struct niu *np, int on) 2811{ 2812 u64 val = nr64(FFLP_CFG_1); 2813 2814 if (on) 2815 val &= ~FFLP_CFG_1_ERRORDIS; 2816 else 2817 val |= FFLP_CFG_1_ERRORDIS; 2818 nw64(FFLP_CFG_1, val); 2819} 2820 2821static int fflp_hash_clear(struct niu *np) 2822{ 2823 struct fcram_hash_ipv4 ent; 2824 unsigned long i; 2825 2826 /* IPV4 hash entry with valid bit clear, rest is don't care. */ 2827 memset(&ent, 0, sizeof(ent)); 2828 ent.header = HASH_HEADER_EXT; 2829 2830 for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) { 2831 int err = hash_write(np, 0, i, 1, (u64 *) &ent); 2832 if (err) 2833 return err; 2834 } 2835 return 0; 2836} 2837 2838static int fflp_early_init(struct niu *np) 2839{ 2840 struct niu_parent *parent; 2841 unsigned long flags; 2842 int err; 2843 2844 niu_lock_parent(np, flags); 2845 2846 parent = np->parent; 2847 err = 0; 2848 if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) { 2849 niudbg(PROBE, "fflp_early_init: Initting hw on port %u\n", 2850 np->port); 2851 if (np->parent->plat_type != PLAT_TYPE_NIU) { 2852 fflp_reset(np); 2853 fflp_set_timings(np); 2854 err = fflp_disable_all_partitions(np); 2855 if (err) { 2856 niudbg(PROBE, "fflp_disable_all_partitions " 2857 "failed, err=%d\n", err); 2858 goto out; 2859 } 2860 } 2861 2862 err = tcam_early_init(np); 2863 if (err) { 2864 niudbg(PROBE, "tcam_early_init failed, err=%d\n", 2865 err); 2866 goto out; 2867 } 2868 fflp_llcsnap_enable(np, 1); 2869 fflp_errors_enable(np, 0); 2870 nw64(H1POLY, 0); 2871 nw64(H2POLY, 0); 2872 2873 err = tcam_flush_all(np); 2874 if (err) { 2875 niudbg(PROBE, "tcam_flush_all failed, err=%d\n", 2876 err); 2877 goto out; 2878 } 2879 if (np->parent->plat_type != PLAT_TYPE_NIU) { 2880 err = fflp_hash_clear(np); 2881 if (err) { 2882 niudbg(PROBE, "fflp_hash_clear failed, " 2883 "err=%d\n", err); 2884 goto out; 2885 } 2886 } 2887 2888 vlan_tbl_clear(np); 2889 2890 niudbg(PROBE, "fflp_early_init: Success\n"); 2891 parent->flags |= PARENT_FLGS_CLS_HWINIT; 2892 } 2893out: 2894 niu_unlock_parent(np, flags); 2895 return err; 2896} 2897 2898static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key) 2899{ 2900 if (class_code < CLASS_CODE_USER_PROG1 || 2901 class_code > CLASS_CODE_SCTP_IPV6) 2902 return -EINVAL; 2903 2904 nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key); 2905 return 0; 2906} 2907 2908static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key) 2909{ 2910 if (class_code < CLASS_CODE_USER_PROG1 || 2911 class_code > CLASS_CODE_SCTP_IPV6) 2912 return -EINVAL; 2913 2914 nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key); 2915 return 0; 2916} 2917 2918static void niu_rx_skb_append(struct sk_buff *skb, struct page *page, 2919 u32 offset, u32 size) 2920{ 2921 int i = skb_shinfo(skb)->nr_frags; 2922 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2923 2924 frag->page = page; 2925 frag->page_offset = offset; 2926 frag->size = size; 2927 2928 skb->len += size; 2929 skb->data_len += size; 2930 skb->truesize += size; 2931 2932 skb_shinfo(skb)->nr_frags = i + 1; 2933} 2934 2935static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a) 2936{ 2937 a >>= PAGE_SHIFT; 2938 a ^= (a >> ilog2(MAX_RBR_RING_SIZE)); 2939 2940 return (a & (MAX_RBR_RING_SIZE - 1)); 2941} 2942 2943static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr, 2944 struct page ***link) 2945{ 2946 unsigned int h = niu_hash_rxaddr(rp, addr); 2947 struct page *p, **pp; 2948 2949 addr &= PAGE_MASK; 2950 pp = &rp->rxhash[h]; 2951 for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) { 2952 if (p->index == addr) { 2953 *link = pp; 2954 break; 2955 } 2956 } 2957 2958 return p; 2959} 2960 2961static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base) 2962{ 2963 unsigned int h = niu_hash_rxaddr(rp, base); 2964 2965 page->index = base; 2966 page->mapping = (struct address_space *) rp->rxhash[h]; 2967 rp->rxhash[h] = page; 2968} 2969 2970static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp, 2971 gfp_t mask, int start_index) 2972{ 2973 struct page *page; 2974 u64 addr; 2975 int i; 2976 2977 page = alloc_page(mask); 2978 if (!page) 2979 return -ENOMEM; 2980 2981 addr = np->ops->map_page(np->device, page, 0, 2982 PAGE_SIZE, DMA_FROM_DEVICE); 2983 2984 niu_hash_page(rp, page, addr); 2985 if (rp->rbr_blocks_per_page > 1) 2986 atomic_add(rp->rbr_blocks_per_page - 1, 2987 &compound_head(page)->_count); 2988 2989 for (i = 0; i < rp->rbr_blocks_per_page; i++) { 2990 __le32 *rbr = &rp->rbr[start_index + i]; 2991 2992 *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT); 2993 addr += rp->rbr_block_size; 2994 } 2995 2996 return 0; 2997} 2998 2999static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) 3000{ 3001 int index = rp->rbr_index; 3002 3003 rp->rbr_pending++; 3004 if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) { 3005 int err = niu_rbr_add_page(np, rp, mask, index); 3006 3007 if (unlikely(err)) { 3008 rp->rbr_pending--; 3009 return; 3010 } 3011 3012 rp->rbr_index += rp->rbr_blocks_per_page; 3013 BUG_ON(rp->rbr_index > rp->rbr_table_size); 3014 if (rp->rbr_index == rp->rbr_table_size) 3015 rp->rbr_index = 0; 3016 3017 if (rp->rbr_pending >= rp->rbr_kick_thresh) { 3018 nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending); 3019 rp->rbr_pending = 0; 3020 } 3021 } 3022} 3023 3024static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp) 3025{ 3026 unsigned int index = rp->rcr_index; 3027 int num_rcr = 0; 3028 3029 rp->rx_dropped++; 3030 while (1) { 3031 struct page *page, **link; 3032 u64 addr, val; 3033 u32 rcr_size; 3034 3035 num_rcr++; 3036 3037 val = le64_to_cpup(&rp->rcr[index]); 3038 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << 3039 RCR_ENTRY_PKT_BUF_ADDR_SHIFT; 3040 page = niu_find_rxpage(rp, addr, &link); 3041 3042 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> 3043 RCR_ENTRY_PKTBUFSZ_SHIFT]; 3044 if ((page->index + PAGE_SIZE) - rcr_size == addr) { 3045 *link = (struct page *) page->mapping; 3046 np->ops->unmap_page(np->device, page->index, 3047 PAGE_SIZE, DMA_FROM_DEVICE); 3048 page->index = 0; 3049 page->mapping = NULL; 3050 __free_page(page); 3051 rp->rbr_refill_pending++; 3052 } 3053 3054 index = NEXT_RCR(rp, index); 3055 if (!(val & RCR_ENTRY_MULTI)) 3056 break; 3057 3058 } 3059 rp->rcr_index = index; 3060 3061 return num_rcr; 3062} 3063 3064static int niu_process_rx_pkt(struct niu *np, struct rx_ring_info *rp) 3065{ 3066 unsigned int index = rp->rcr_index; 3067 struct sk_buff *skb; 3068 int len, num_rcr; 3069 3070 skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE); 3071 if (unlikely(!skb)) 3072 return niu_rx_pkt_ignore(np, rp); 3073 3074 num_rcr = 0; 3075 while (1) { 3076 struct page *page, **link; 3077 u32 rcr_size, append_size; 3078 u64 addr, val, off; 3079 3080 num_rcr++; 3081 3082 val = le64_to_cpup(&rp->rcr[index]); 3083 3084 len = (val & RCR_ENTRY_L2_LEN) >> 3085 RCR_ENTRY_L2_LEN_SHIFT; 3086 len -= ETH_FCS_LEN; 3087 3088 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << 3089 RCR_ENTRY_PKT_BUF_ADDR_SHIFT; 3090 page = niu_find_rxpage(rp, addr, &link); 3091 3092 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> 3093 RCR_ENTRY_PKTBUFSZ_SHIFT]; 3094 3095 off = addr & ~PAGE_MASK; 3096 append_size = rcr_size; 3097 if (num_rcr == 1) { 3098 int ptype; 3099 3100 off += 2; 3101 append_size -= 2; 3102 3103 ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT); 3104 if ((ptype == RCR_PKT_TYPE_TCP || 3105 ptype == RCR_PKT_TYPE_UDP) && 3106 !(val & (RCR_ENTRY_NOPORT | 3107 RCR_ENTRY_ERROR))) 3108 skb->ip_summed = CHECKSUM_UNNECESSARY; 3109 else 3110 skb->ip_summed = CHECKSUM_NONE; 3111 } 3112 if (!(val & RCR_ENTRY_MULTI)) 3113 append_size = len - skb->len; 3114 3115 niu_rx_skb_append(skb, page, off, append_size); 3116 if ((page->index + rp->rbr_block_size) - rcr_size == addr) { 3117 *link = (struct page *) page->mapping; 3118 np->ops->unmap_page(np->device, page->index, 3119 PAGE_SIZE, DMA_FROM_DEVICE); 3120 page->index = 0; 3121 page->mapping = NULL; 3122 rp->rbr_refill_pending++; 3123 } else 3124 get_page(page); 3125 3126 index = NEXT_RCR(rp, index); 3127 if (!(val & RCR_ENTRY_MULTI)) 3128 break; 3129 3130 } 3131 rp->rcr_index = index; 3132 3133 skb_reserve(skb, NET_IP_ALIGN); 3134 __pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX)); 3135 3136 rp->rx_packets++; 3137 rp->rx_bytes += skb->len; 3138 3139 skb->protocol = eth_type_trans(skb, np->dev); 3140 netif_receive_skb(skb); 3141 3142 np->dev->last_rx = jiffies; 3143 3144 return num_rcr; 3145} 3146 3147static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) 3148{ 3149 int blocks_per_page = rp->rbr_blocks_per_page; 3150 int err, index = rp->rbr_index; 3151 3152 err = 0; 3153 while (index < (rp->rbr_table_size - blocks_per_page)) { 3154 err = niu_rbr_add_page(np, rp, mask, index); 3155 if (err) 3156 break; 3157 3158 index += blocks_per_page; 3159 } 3160 3161 rp->rbr_index = index; 3162 return err; 3163} 3164 3165static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp) 3166{ 3167 int i; 3168 3169 for (i = 0; i < MAX_RBR_RING_SIZE; i++) { 3170 struct page *page; 3171 3172 page = rp->rxhash[i]; 3173 while (page) { 3174 struct page *next = (struct page *) page->mapping; 3175 u64 base = page->index; 3176 3177 np->ops->unmap_page(np->device, base, PAGE_SIZE, 3178 DMA_FROM_DEVICE); 3179 page->index = 0; 3180 page->mapping = NULL; 3181 3182 __free_page(page); 3183 3184 page = next; 3185 } 3186 } 3187 3188 for (i = 0; i < rp->rbr_table_size; i++) 3189 rp->rbr[i] = cpu_to_le32(0); 3190 rp->rbr_index = 0; 3191} 3192 3193static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) 3194{ 3195 struct tx_buff_info *tb = &rp->tx_buffs[idx]; 3196 struct sk_buff *skb = tb->skb; 3197 struct tx_pkt_hdr *tp; 3198 u64 tx_flags; 3199 int i, len; 3200 3201 tp = (struct tx_pkt_hdr *) skb->data; 3202 tx_flags = le64_to_cpup(&tp->flags); 3203 3204 rp->tx_packets++; 3205 rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) - 3206 ((tx_flags & TXHDR_PAD) / 2)); 3207 3208 len = skb_headlen(skb); 3209 np->ops->unmap_single(np->device, tb->mapping, 3210 len, DMA_TO_DEVICE); 3211 3212 if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK) 3213 rp->mark_pending--; 3214 3215 tb->skb = NULL; 3216 do { 3217 idx = NEXT_TX(rp, idx); 3218 len -= MAX_TX_DESC_LEN; 3219 } while (len > 0); 3220 3221 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3222 tb = &rp->tx_buffs[idx]; 3223 BUG_ON(tb->skb != NULL); 3224 np->ops->unmap_page(np->device, tb->mapping, 3225 skb_shinfo(skb)->frags[i].size, 3226 DMA_TO_DEVICE); 3227 idx = NEXT_TX(rp, idx); 3228 } 3229 3230 dev_kfree_skb(skb); 3231 3232 return idx; 3233} 3234 3235#define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4) 3236 3237static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) 3238{ 3239 struct netdev_queue *txq; 3240 u16 pkt_cnt, tmp; 3241 int cons, index; 3242 u64 cs; 3243 3244 index = (rp - np->tx_rings); 3245 txq = netdev_get_tx_queue(np->dev, index); 3246 3247 cs = rp->tx_cs; 3248 if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK)))) 3249 goto out; 3250 3251 tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT; 3252 pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) & 3253 (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT); 3254 3255 rp->last_pkt_cnt = tmp; 3256 3257 cons = rp->cons; 3258 3259 niudbg(TX_DONE, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n", 3260 np->dev->name, pkt_cnt, cons); 3261 3262 while (pkt_cnt--) 3263 cons = release_tx_packet(np, rp, cons); 3264 3265 rp->cons = cons; 3266 smp_mb(); 3267 3268out: 3269 if (unlikely(netif_tx_queue_stopped(txq) && 3270 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { 3271 __netif_tx_lock(txq, smp_processor_id()); 3272 if (netif_tx_queue_stopped(txq) && 3273 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) 3274 netif_tx_wake_queue(txq); 3275 __netif_tx_unlock(txq); 3276 } 3277} 3278 3279static int niu_rx_work(struct niu *np, struct rx_ring_info *rp, int budget) 3280{ 3281 int qlen, rcr_done = 0, work_done = 0; 3282 struct rxdma_mailbox *mbox = rp->mbox; 3283 u64 stat; 3284 3285#if 1 3286 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); 3287 qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN; 3288#else 3289 stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); 3290 qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN); 3291#endif 3292 mbox->rx_dma_ctl_stat = 0; 3293 mbox->rcrstat_a = 0; 3294 3295 niudbg(RX_STATUS, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n", 3296 np->dev->name, rp->rx_channel, (unsigned long long) stat, qlen); 3297 3298 rcr_done = work_done = 0; 3299 qlen = min(qlen, budget); 3300 while (work_done < qlen) { 3301 rcr_done += niu_process_rx_pkt(np, rp); 3302 work_done++; 3303 } 3304 3305 if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) { 3306 unsigned int i; 3307 3308 for (i = 0; i < rp->rbr_refill_pending; i++) 3309 niu_rbr_refill(np, rp, GFP_ATOMIC); 3310 rp->rbr_refill_pending = 0; 3311 } 3312 3313 stat = (RX_DMA_CTL_STAT_MEX | 3314 ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) | 3315 ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT)); 3316 3317 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat); 3318 3319 return work_done; 3320} 3321 3322static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget) 3323{ 3324 u64 v0 = lp->v0; 3325 u32 tx_vec = (v0 >> 32); 3326 u32 rx_vec = (v0 & 0xffffffff); 3327 int i, work_done = 0; 3328 3329 niudbg(INTR, "%s: niu_poll_core() v0[%016llx]\n", 3330 np->dev->name, (unsigned long long) v0); 3331 3332 for (i = 0; i < np->num_tx_rings; i++) { 3333 struct tx_ring_info *rp = &np->tx_rings[i]; 3334 if (tx_vec & (1 << rp->tx_channel)) 3335 niu_tx_work(np, rp); 3336 nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0); 3337 } 3338 3339 for (i = 0; i < np->num_rx_rings; i++) { 3340 struct rx_ring_info *rp = &np->rx_rings[i]; 3341 3342 if (rx_vec & (1 << rp->rx_channel)) { 3343 int this_work_done; 3344 3345 this_work_done = niu_rx_work(np, rp, 3346 budget); 3347 3348 budget -= this_work_done; 3349 work_done += this_work_done; 3350 } 3351 nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0); 3352 } 3353 3354 return work_done; 3355} 3356 3357static int niu_poll(struct napi_struct *napi, int budget) 3358{ 3359 struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi); 3360 struct niu *np = lp->np; 3361 int work_done; 3362 3363 work_done = niu_poll_core(np, lp, budget); 3364 3365 if (work_done < budget) { 3366 netif_rx_complete(np->dev, napi); 3367 niu_ldg_rearm(np, lp, 1); 3368 } 3369 return work_done; 3370} 3371 3372static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp, 3373 u64 stat) 3374{ 3375 dev_err(np->device, PFX "%s: RX channel %u errors ( ", 3376 np->dev->name, rp->rx_channel); 3377 3378 if (stat & RX_DMA_CTL_STAT_RBR_TMOUT) 3379 printk("RBR_TMOUT "); 3380 if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR) 3381 printk("RSP_CNT "); 3382 if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS) 3383 printk("BYTE_EN_BUS "); 3384 if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR) 3385 printk("RSP_DAT "); 3386 if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR) 3387 printk("RCR_ACK "); 3388 if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR) 3389 printk("RCR_SHA_PAR "); 3390 if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR) 3391 printk("RBR_PRE_PAR "); 3392 if (stat & RX_DMA_CTL_STAT_CONFIG_ERR) 3393 printk("CONFIG "); 3394 if (stat & RX_DMA_CTL_STAT_RCRINCON) 3395 printk("RCRINCON "); 3396 if (stat & RX_DMA_CTL_STAT_RCRFULL) 3397 printk("RCRFULL "); 3398 if (stat & RX_DMA_CTL_STAT_RBRFULL) 3399 printk("RBRFULL "); 3400 if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE) 3401 printk("RBRLOGPAGE "); 3402 if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE) 3403 printk("CFIGLOGPAGE "); 3404 if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR) 3405 printk("DC_FIDO "); 3406 3407 printk(")\n"); 3408} 3409 3410static int niu_rx_error(struct niu *np, struct rx_ring_info *rp) 3411{ 3412 u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); 3413 int err = 0; 3414 3415 3416 if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL | 3417 RX_DMA_CTL_STAT_PORT_FATAL)) 3418 err = -EINVAL; 3419 3420 if (err) { 3421 dev_err(np->device, PFX "%s: RX channel %u error, stat[%llx]\n", 3422 np->dev->name, rp->rx_channel, 3423 (unsigned long long) stat); 3424 3425 niu_log_rxchan_errors(np, rp, stat); 3426 } 3427 3428 nw64(RX_DMA_CTL_STAT(rp->rx_channel), 3429 stat & RX_DMA_CTL_WRITE_CLEAR_ERRS); 3430 3431 return err; 3432} 3433 3434static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp, 3435 u64 cs) 3436{ 3437 dev_err(np->device, PFX "%s: TX channel %u errors ( ", 3438 np->dev->name, rp->tx_channel); 3439 3440 if (cs & TX_CS_MBOX_ERR) 3441 printk("MBOX "); 3442 if (cs & TX_CS_PKT_SIZE_ERR) 3443 printk("PKT_SIZE "); 3444 if (cs & TX_CS_TX_RING_OFLOW) 3445 printk("TX_RING_OFLOW "); 3446 if (cs & TX_CS_PREF_BUF_PAR_ERR) 3447 printk("PREF_BUF_PAR "); 3448 if (cs & TX_CS_NACK_PREF) 3449 printk("NACK_PREF "); 3450 if (cs & TX_CS_NACK_PKT_RD) 3451 printk("NACK_PKT_RD "); 3452 if (cs & TX_CS_CONF_PART_ERR) 3453 printk("CONF_PART "); 3454 if (cs & TX_CS_PKT_PRT_ERR) 3455 printk("PKT_PTR "); 3456 3457 printk(")\n"); 3458} 3459 3460static int niu_tx_error(struct niu *np, struct tx_ring_info *rp) 3461{ 3462 u64 cs, logh, logl; 3463 3464 cs = nr64(TX_CS(rp->tx_channel)); 3465 logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel)); 3466 logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel)); 3467 3468 dev_err(np->device, PFX "%s: TX channel %u error, " 3469 "cs[%llx] logh[%llx] logl[%llx]\n", 3470 np->dev->name, rp->tx_channel, 3471 (unsigned long long) cs, 3472 (unsigned long long) logh, 3473 (unsigned long long) logl); 3474 3475 niu_log_txchan_errors(np, rp, cs); 3476 3477 return -ENODEV; 3478} 3479 3480static int niu_mif_interrupt(struct niu *np) 3481{ 3482 u64 mif_status = nr64(MIF_STATUS); 3483 int phy_mdint = 0; 3484 3485 if (np->flags & NIU_FLAGS_XMAC) { 3486 u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS); 3487 3488 if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT) 3489 phy_mdint = 1; 3490 } 3491 3492 dev_err(np->device, PFX "%s: MIF interrupt, " 3493 "stat[%llx] phy_mdint(%d)\n", 3494 np->dev->name, (unsigned long long) mif_status, phy_mdint); 3495 3496 return -ENODEV; 3497} 3498 3499static void niu_xmac_interrupt(struct niu *np) 3500{ 3501 struct niu_xmac_stats *mp = &np->mac_stats.xmac; 3502 u64 val; 3503 3504 val = nr64_mac(XTXMAC_STATUS); 3505 if (val & XTXMAC_STATUS_FRAME_CNT_EXP) 3506 mp->tx_frames += TXMAC_FRM_CNT_COUNT; 3507 if (val & XTXMAC_STATUS_BYTE_CNT_EXP) 3508 mp->tx_bytes += TXMAC_BYTE_CNT_COUNT; 3509 if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR) 3510 mp->tx_fifo_errors++; 3511 if (val & XTXMAC_STATUS_TXMAC_OFLOW) 3512 mp->tx_overflow_errors++; 3513 if (val & XTXMAC_STATUS_MAX_PSIZE_ERR) 3514 mp->tx_max_pkt_size_errors++; 3515 if (val & XTXMAC_STATUS_TXMAC_UFLOW) 3516 mp->tx_underflow_errors++; 3517 3518 val = nr64_mac(XRXMAC_STATUS); 3519 if (val & XRXMAC_STATUS_LCL_FLT_STATUS) 3520 mp->rx_local_faults++; 3521 if (val & XRXMAC_STATUS_RFLT_DET) 3522 mp->rx_remote_faults++; 3523 if (val & XRXMAC_STATUS_LFLT_CNT_EXP) 3524 mp->rx_link_faults += LINK_FAULT_CNT_COUNT; 3525 if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP) 3526 mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT; 3527 if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP) 3528 mp->rx_frags += RXMAC_FRAG_CNT_COUNT; 3529 if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP) 3530 mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT; 3531 if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) 3532 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; 3533 if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) 3534 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; 3535 if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP) 3536 mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT; 3537 if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP) 3538 mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT; 3539 if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP) 3540 mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT; 3541 if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP) 3542 mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT; 3543 if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP) 3544 mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT; 3545 if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP) 3546 mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT; 3547 if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP) 3548 mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT; 3549 if (val & XRXMAC_STAT_MSK_RXOCTET_CNT_EXP) 3550 mp->rx_octets += RXMAC_BT_CNT_COUNT; 3551 if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP) 3552 mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT; 3553 if (val & XRXMAC_STATUS_LENERR_CNT_EXP) 3554 mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT; 3555 if (val & XRXMAC_STATUS_CRCERR_CNT_EXP) 3556 mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT; 3557 if (val & XRXMAC_STATUS_RXUFLOW) 3558 mp->rx_underflows++; 3559 if (val & XRXMAC_STATUS_RXOFLOW) 3560 mp->rx_overflows++; 3561 3562 val = nr64_mac(XMAC_FC_STAT); 3563 if (val & XMAC_FC_STAT_TX_MAC_NPAUSE) 3564 mp->pause_off_state++; 3565 if (val & XMAC_FC_STAT_TX_MAC_PAUSE) 3566 mp->pause_on_state++; 3567 if (val & XMAC_FC_STAT_RX_MAC_RPAUSE) 3568 mp->pause_received++; 3569} 3570 3571static void niu_bmac_interrupt(struct niu *np) 3572{ 3573 struct niu_bmac_stats *mp = &np->mac_stats.bmac; 3574 u64 val; 3575 3576 val = nr64_mac(BTXMAC_STATUS); 3577 if (val & BTXMAC_STATUS_UNDERRUN) 3578 mp->tx_underflow_errors++; 3579 if (val & BTXMAC_STATUS_MAX_PKT_ERR) 3580 mp->tx_max_pkt_size_errors++; 3581 if (val & BTXMAC_STATUS_BYTE_CNT_EXP) 3582 mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT; 3583 if (val & BTXMAC_STATUS_FRAME_CNT_EXP) 3584 mp->tx_frames += BTXMAC_FRM_CNT_COUNT; 3585 3586 val = nr64_mac(BRXMAC_STATUS); 3587 if (val & BRXMAC_STATUS_OVERFLOW) 3588 mp->rx_overflows++; 3589 if (val & BRXMAC_STATUS_FRAME_CNT_EXP) 3590 mp->rx_frames += BRXMAC_FRAME_CNT_COUNT; 3591 if (val & BRXMAC_STATUS_ALIGN_ERR_EXP) 3592 mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; 3593 if (val & BRXMAC_STATUS_CRC_ERR_EXP) 3594 mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; 3595 if (val & BRXMAC_STATUS_LEN_ERR_EXP) 3596 mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT; 3597 3598 val = nr64_mac(BMAC_CTRL_STATUS); 3599 if (val & BMAC_CTRL_STATUS_NOPAUSE) 3600 mp->pause_off_state++; 3601 if (val & BMAC_CTRL_STATUS_PAUSE) 3602 mp->pause_on_state++; 3603 if (val & BMAC_CTRL_STATUS_PAUSE_RECV) 3604 mp->pause_received++; 3605} 3606 3607static int niu_mac_interrupt(struct niu *np) 3608{ 3609 if (np->flags & NIU_FLAGS_XMAC) 3610 niu_xmac_interrupt(np); 3611 else 3612 niu_bmac_interrupt(np); 3613 3614 return 0; 3615} 3616 3617static void niu_log_device_error(struct niu *np, u64 stat) 3618{ 3619 dev_err(np->device, PFX "%s: Core device errors ( ", 3620 np->dev->name); 3621 3622 if (stat & SYS_ERR_MASK_META2) 3623 printk("META2 "); 3624 if (stat & SYS_ERR_MASK_META1) 3625 printk("META1 "); 3626 if (stat & SYS_ERR_MASK_PEU) 3627 printk("PEU "); 3628 if (stat & SYS_ERR_MASK_TXC) 3629 printk("TXC "); 3630 if (stat & SYS_ERR_MASK_RDMC) 3631 printk("RDMC "); 3632 if (stat & SYS_ERR_MASK_TDMC) 3633 printk("TDMC "); 3634 if (stat & SYS_ERR_MASK_ZCP) 3635 printk("ZCP "); 3636 if (stat & SYS_ERR_MASK_FFLP) 3637 printk("FFLP "); 3638 if (stat & SYS_ERR_MASK_IPP) 3639 printk("IPP "); 3640 if (stat & SYS_ERR_MASK_MAC) 3641 printk("MAC "); 3642 if (stat & SYS_ERR_MASK_SMX) 3643 printk("SMX "); 3644 3645 printk(")\n"); 3646} 3647 3648static int niu_device_error(struct niu *np) 3649{ 3650 u64 stat = nr64(SYS_ERR_STAT); 3651 3652 dev_err(np->device, PFX "%s: Core device error, stat[%llx]\n", 3653 np->dev->name, (unsigned long long) stat); 3654 3655 niu_log_device_error(np, stat); 3656 3657 return -ENODEV; 3658} 3659 3660static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp, 3661 u64 v0, u64 v1, u64 v2) 3662{ 3663 3664 int i, err = 0; 3665 3666 lp->v0 = v0; 3667 lp->v1 = v1; 3668 lp->v2 = v2; 3669 3670 if (v1 & 0x00000000ffffffffULL) { 3671 u32 rx_vec = (v1 & 0xffffffff); 3672 3673 for (i = 0; i < np->num_rx_rings; i++) { 3674 struct rx_ring_info *rp = &np->rx_rings[i]; 3675 3676 if (rx_vec & (1 << rp->rx_channel)) { 3677 int r = niu_rx_error(np, rp); 3678 if (r) { 3679 err = r; 3680 } else { 3681 if (!v0) 3682 nw64(RX_DMA_CTL_STAT(rp->rx_channel), 3683 RX_DMA_CTL_STAT_MEX); 3684 } 3685 } 3686 } 3687 } 3688 if (v1 & 0x7fffffff00000000ULL) { 3689 u32 tx_vec = (v1 >> 32) & 0x7fffffff; 3690 3691 for (i = 0; i < np->num_tx_rings; i++) { 3692 struct tx_ring_info *rp = &np->tx_rings[i]; 3693 3694 if (tx_vec & (1 << rp->tx_channel)) { 3695 int r = niu_tx_error(np, rp); 3696 if (r) 3697 err = r; 3698 } 3699 } 3700 } 3701 if ((v0 | v1) & 0x8000000000000000ULL) { 3702 int r = niu_mif_interrupt(np); 3703 if (r) 3704 err = r; 3705 } 3706 if (v2) { 3707 if (v2 & 0x01ef) { 3708 int r = niu_mac_interrupt(np); 3709 if (r) 3710 err = r; 3711 } 3712 if (v2 & 0x0210) { 3713 int r = niu_device_error(np); 3714 if (r) 3715 err = r; 3716 } 3717 } 3718 3719 if (err) 3720 niu_enable_interrupts(np, 0); 3721 3722 return err; 3723} 3724 3725static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp, 3726 int ldn) 3727{ 3728 struct rxdma_mailbox *mbox = rp->mbox; 3729 u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); 3730 3731 stat_write = (RX_DMA_CTL_STAT_RCRTHRES | 3732 RX_DMA_CTL_STAT_RCRTO); 3733 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write); 3734 3735 niudbg(INTR, "%s: rxchan_intr stat[%llx]\n", 3736 np->dev->name, (unsigned long long) stat); 3737} 3738 3739static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp, 3740 int ldn) 3741{ 3742 rp->tx_cs = nr64(TX_CS(rp->tx_channel)); 3743 3744 niudbg(INTR, "%s: txchan_intr cs[%llx]\n", 3745 np->dev->name, (unsigned long long) rp->tx_cs); 3746} 3747 3748static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0) 3749{ 3750 struct niu_parent *parent = np->parent; 3751 u32 rx_vec, tx_vec; 3752 int i; 3753 3754 tx_vec = (v0 >> 32); 3755 rx_vec = (v0 & 0xffffffff); 3756 3757 for (i = 0; i < np->num_rx_rings; i++) { 3758 struct rx_ring_info *rp = &np->rx_rings[i]; 3759 int ldn = LDN_RXDMA(rp->rx_channel); 3760 3761 if (parent->ldg_map[ldn] != ldg) 3762 continue; 3763 3764 nw64(LD_IM0(ldn), LD_IM0_MASK); 3765 if (rx_vec & (1 << rp->rx_channel)) 3766 niu_rxchan_intr(np, rp, ldn); 3767 } 3768 3769 for (i = 0; i < np->num_tx_rings; i++) { 3770 struct tx_ring_info *rp = &np->tx_rings[i]; 3771 int ldn = LDN_TXDMA(rp->tx_channel); 3772 3773 if (parent->ldg_map[ldn] != ldg) 3774 continue; 3775 3776 nw64(LD_IM0(ldn), LD_IM0_MASK); 3777 if (tx_vec & (1 << rp->tx_channel)) 3778 niu_txchan_intr(np, rp, ldn); 3779 } 3780} 3781 3782static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp, 3783 u64 v0, u64 v1, u64 v2) 3784{ 3785 if (likely(netif_rx_schedule_prep(np->dev, &lp->napi))) { 3786 lp->v0 = v0; 3787 lp->v1 = v1; 3788 lp->v2 = v2; 3789 __niu_fastpath_interrupt(np, lp->ldg_num, v0); 3790 __netif_rx_schedule(np->dev, &lp->napi); 3791 } 3792} 3793 3794static irqreturn_t niu_interrupt(int irq, void *dev_id) 3795{ 3796 struct niu_ldg *lp = dev_id; 3797 struct niu *np = lp->np; 3798 int ldg = lp->ldg_num; 3799 unsigned long flags; 3800 u64 v0, v1, v2; 3801 3802 if (netif_msg_intr(np)) 3803 printk(KERN_DEBUG PFX "niu_interrupt() ldg[%p](%d) ", 3804 lp, ldg); 3805 3806 spin_lock_irqsave(&np->lock, flags); 3807 3808 v0 = nr64(LDSV0(ldg)); 3809 v1 = nr64(LDSV1(ldg)); 3810 v2 = nr64(LDSV2(ldg)); 3811 3812 if (netif_msg_intr(np)) 3813 printk("v0[%llx] v1[%llx] v2[%llx]\n", 3814 (unsigned long long) v0, 3815 (unsigned long long) v1, 3816 (unsigned long long) v2); 3817 3818 if (unlikely(!v0 && !v1 && !v2)) { 3819 spin_unlock_irqrestore(&np->lock, flags); 3820 return IRQ_NONE; 3821 } 3822 3823 if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) { 3824 int err = niu_slowpath_interrupt(np, lp, v0, v1, v2); 3825 if (err) 3826 goto out; 3827 } 3828 if (likely(v0 & ~((u64)1 << LDN_MIF))) 3829 niu_schedule_napi(np, lp, v0, v1, v2); 3830 else 3831 niu_ldg_rearm(np, lp, 1); 3832out: 3833 spin_unlock_irqrestore(&np->lock, flags); 3834 3835 return IRQ_HANDLED; 3836} 3837 3838static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp) 3839{ 3840 if (rp->mbox) { 3841 np->ops->free_coherent(np->device, 3842 sizeof(struct rxdma_mailbox), 3843 rp->mbox, rp->mbox_dma); 3844 rp->mbox = NULL; 3845 } 3846 if (rp->rcr) { 3847 np->ops->free_coherent(np->device, 3848 MAX_RCR_RING_SIZE * sizeof(__le64), 3849 rp->rcr, rp->rcr_dma); 3850 rp->rcr = NULL; 3851 rp->rcr_table_size = 0; 3852 rp->rcr_index = 0; 3853 } 3854 if (rp->rbr) { 3855 niu_rbr_free(np, rp); 3856 3857 np->ops->free_coherent(np->device, 3858 MAX_RBR_RING_SIZE * sizeof(__le32), 3859 rp->rbr, rp->rbr_dma); 3860 rp->rbr = NULL; 3861 rp->rbr_table_size = 0; 3862 rp->rbr_index = 0; 3863 } 3864 kfree(rp->rxhash); 3865 rp->rxhash = NULL; 3866} 3867 3868static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp) 3869{ 3870 if (rp->mbox) { 3871 np->ops->free_coherent(np->device, 3872 sizeof(struct txdma_mailbox), 3873 rp->mbox, rp->mbox_dma); 3874 rp->mbox = NULL; 3875 } 3876 if (rp->descr) { 3877 int i; 3878 3879 for (i = 0; i < MAX_TX_RING_SIZE; i++) { 3880 if (rp->tx_buffs[i].skb) 3881 (void) release_tx_packet(np, rp, i); 3882 } 3883 3884 np->ops->free_coherent(np->device, 3885 MAX_TX_RING_SIZE * sizeof(__le64), 3886 rp->descr, rp->descr_dma); 3887 rp->descr = NULL; 3888 rp->pending = 0; 3889 rp->prod = 0; 3890 rp->cons = 0; 3891 rp->wrap_bit = 0; 3892 } 3893} 3894 3895static void niu_free_channels(struct niu *np) 3896{ 3897 int i; 3898 3899 if (np->rx_rings) { 3900 for (i = 0; i < np->num_rx_rings; i++) { 3901 struct rx_ring_info *rp = &np->rx_rings[i]; 3902 3903 niu_free_rx_ring_info(np, rp); 3904 } 3905 kfree(np->rx_rings); 3906 np->rx_rings = NULL; 3907 np->num_rx_rings = 0; 3908 } 3909 3910 if (np->tx_rings) { 3911 for (i = 0; i < np->num_tx_rings; i++) { 3912 struct tx_ring_info *rp = &np->tx_rings[i]; 3913 3914 niu_free_tx_ring_info(np, rp); 3915 } 3916 kfree(np->tx_rings); 3917 np->tx_rings = NULL; 3918 np->num_tx_rings = 0; 3919 } 3920} 3921 3922static int niu_alloc_rx_ring_info(struct niu *np, 3923 struct rx_ring_info *rp) 3924{ 3925 BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64); 3926 3927 rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *), 3928 GFP_KERNEL); 3929 if (!rp->rxhash) 3930 return -ENOMEM; 3931 3932 rp->mbox = np->ops->alloc_coherent(np->device, 3933 sizeof(struct rxdma_mailbox), 3934 &rp->mbox_dma, GFP_KERNEL); 3935 if (!rp->mbox) 3936 return -ENOMEM; 3937 if ((unsigned long)rp->mbox & (64UL - 1)) { 3938 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " 3939 "RXDMA mailbox %p\n", np->dev->name, rp->mbox); 3940 return -EINVAL; 3941 } 3942 3943 rp->rcr = np->ops->alloc_coherent(np->device, 3944 MAX_RCR_RING_SIZE * sizeof(__le64), 3945 &rp->rcr_dma, GFP_KERNEL); 3946 if (!rp->rcr) 3947 return -ENOMEM; 3948 if ((unsigned long)rp->rcr & (64UL - 1)) { 3949 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " 3950 "RXDMA RCR table %p\n", np->dev->name, rp->rcr); 3951 return -EINVAL; 3952 } 3953 rp->rcr_table_size = MAX_RCR_RING_SIZE; 3954 rp->rcr_index = 0; 3955 3956 rp->rbr = np->ops->alloc_coherent(np->device, 3957 MAX_RBR_RING_SIZE * sizeof(__le32), 3958 &rp->rbr_dma, GFP_KERNEL); 3959 if (!rp->rbr) 3960 return -ENOMEM; 3961 if ((unsigned long)rp->rbr & (64UL - 1)) { 3962 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " 3963 "RXDMA RBR table %p\n", np->dev->name, rp->rbr); 3964 return -EINVAL; 3965 } 3966 rp->rbr_table_size = MAX_RBR_RING_SIZE; 3967 rp->rbr_index = 0; 3968 rp->rbr_pending = 0; 3969 3970 return 0; 3971} 3972 3973static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp) 3974{ 3975 int mtu = np->dev->mtu; 3976 3977 /* These values are recommended by the HW designers for fair 3978 * utilization of DRR amongst the rings. 3979 */ 3980 rp->max_burst = mtu + 32; 3981 if (rp->max_burst > 4096) 3982 rp->max_burst = 4096; 3983} 3984 3985static int niu_alloc_tx_ring_info(struct niu *np, 3986 struct tx_ring_info *rp) 3987{ 3988 BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64); 3989 3990 rp->mbox = np->ops->alloc_coherent(np->device, 3991 sizeof(struct txdma_mailbox), 3992 &rp->mbox_dma, GFP_KERNEL); 3993 if (!rp->mbox) 3994 return -ENOMEM; 3995 if ((unsigned long)rp->mbox & (64UL - 1)) { 3996 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " 3997 "TXDMA mailbox %p\n", np->dev->name, rp->mbox); 3998 return -EINVAL; 3999 } 4000 4001 rp->descr = np->ops->alloc_coherent(np->device, 4002 MAX_TX_RING_SIZE * sizeof(__le64), 4003 &rp->descr_dma, GFP_KERNEL); 4004 if (!rp->descr) 4005 return -ENOMEM; 4006 if ((unsigned long)rp->descr & (64UL - 1)) { 4007 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " 4008 "TXDMA descr table %p\n", np->dev->name, rp->descr); 4009 return -EINVAL; 4010 } 4011 4012 rp->pending = MAX_TX_RING_SIZE; 4013 rp->prod = 0; 4014 rp->cons = 0; 4015 rp->wrap_bit = 0; 4016 4017 /* XXX make these configurable... XXX */ 4018 rp->mark_freq = rp->pending / 4; 4019 4020 niu_set_max_burst(np, rp); 4021 4022 return 0; 4023} 4024 4025static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp) 4026{ 4027 u16 bss; 4028 4029 bss = min(PAGE_SHIFT, 15); 4030 4031 rp->rbr_block_size = 1 << bss; 4032 rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss); 4033 4034 rp->rbr_sizes[0] = 256; 4035 rp->rbr_sizes[1] = 1024; 4036 if (np->dev->mtu > ETH_DATA_LEN) { 4037 switch (PAGE_SIZE) { 4038 case 4 * 1024: 4039 rp->rbr_sizes[2] = 4096; 4040 break; 4041 4042 default: 4043 rp->rbr_sizes[2] = 8192; 4044 break; 4045 } 4046 } else { 4047 rp->rbr_sizes[2] = 2048; 4048 } 4049 rp->rbr_sizes[3] = rp->rbr_block_size; 4050} 4051 4052static int niu_alloc_channels(struct niu *np) 4053{ 4054 struct niu_parent *parent = np->parent; 4055 int first_rx_channel, first_tx_channel; 4056 int i, port, err; 4057 4058 port = np->port; 4059 first_rx_channel = first_tx_channel = 0; 4060 for (i = 0; i < port; i++) { 4061 first_rx_channel += parent->rxchan_per_port[i]; 4062 first_tx_channel += parent->txchan_per_port[i]; 4063 } 4064 4065 np->num_rx_rings = parent->rxchan_per_port[port]; 4066 np->num_tx_rings = parent->txchan_per_port[port]; 4067 4068 np->dev->real_num_tx_queues = np->num_tx_rings; 4069 4070 np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info), 4071 GFP_KERNEL); 4072 err = -ENOMEM; 4073 if (!np->rx_rings) 4074 goto out_err; 4075 4076 for (i = 0; i < np->num_rx_rings; i++) { 4077 struct rx_ring_info *rp = &np->rx_rings[i]; 4078 4079 rp->np = np; 4080 rp->rx_channel = first_rx_channel + i; 4081 4082 err = niu_alloc_rx_ring_info(np, rp); 4083 if (err) 4084 goto out_err; 4085 4086 niu_size_rbr(np, rp); 4087 4088 /* XXX better defaults, configurable, etc... XXX */ 4089 rp->nonsyn_window = 64; 4090 rp->nonsyn_threshold = rp->rcr_table_size - 64; 4091 rp->syn_window = 64; 4092 rp->syn_threshold = rp->rcr_table_size - 64; 4093 rp->rcr_pkt_threshold = 16; 4094 rp->rcr_timeout = 8; 4095 rp->rbr_kick_thresh = RBR_REFILL_MIN; 4096 if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page) 4097 rp->rbr_kick_thresh = rp->rbr_blocks_per_page; 4098 4099 err = niu_rbr_fill(np, rp, GFP_KERNEL); 4100 if (err) 4101 return err; 4102 } 4103 4104 np->tx_rings = kzalloc(np->num_tx_rings * sizeof(struct tx_ring_info), 4105 GFP_KERNEL); 4106 err = -ENOMEM; 4107 if (!np->tx_rings) 4108 goto out_err; 4109 4110 for (i = 0; i < np->num_tx_rings; i++) { 4111 struct tx_ring_info *rp = &np->tx_rings[i]; 4112 4113 rp->np = np; 4114 rp->tx_channel = first_tx_channel + i; 4115 4116 err = niu_alloc_tx_ring_info(np, rp); 4117 if (err) 4118 goto out_err; 4119 } 4120 4121 return 0; 4122 4123out_err: 4124 niu_free_channels(np); 4125 return err; 4126} 4127 4128static int niu_tx_cs_sng_poll(struct niu *np, int channel) 4129{ 4130 int limit = 1000; 4131 4132 while (--limit > 0) { 4133 u64 val = nr64(TX_CS(channel)); 4134 if (val & TX_CS_SNG_STATE) 4135 return 0; 4136 } 4137 return -ENODEV; 4138} 4139 4140static int niu_tx_channel_stop(struct niu *np, int channel) 4141{ 4142 u64 val = nr64(TX_CS(channel)); 4143 4144 val |= TX_CS_STOP_N_GO; 4145 nw64(TX_CS(channel), val); 4146 4147 return niu_tx_cs_sng_poll(np, channel); 4148} 4149 4150static int niu_tx_cs_reset_poll(struct niu *np, int channel) 4151{ 4152 int limit = 1000; 4153 4154 while (--limit > 0) { 4155 u64 val = nr64(TX_CS(channel)); 4156 if (!(val & TX_CS_RST)) 4157 return 0; 4158 } 4159 return -ENODEV; 4160} 4161 4162static int niu_tx_channel_reset(struct niu *np, int channel) 4163{ 4164 u64 val = nr64(TX_CS(channel)); 4165 int err; 4166 4167 val |= TX_CS_RST; 4168 nw64(TX_CS(channel), val); 4169 4170 err = niu_tx_cs_reset_poll(np, channel); 4171 if (!err) 4172 nw64(TX_RING_KICK(channel), 0); 4173 4174 return err; 4175} 4176 4177static int niu_tx_channel_lpage_init(struct niu *np, int channel) 4178{ 4179 u64 val; 4180 4181 nw64(TX_LOG_MASK1(channel), 0); 4182 nw64(TX_LOG_VAL1(channel), 0); 4183 nw64(TX_LOG_MASK2(channel), 0); 4184 nw64(TX_LOG_VAL2(channel), 0); 4185 nw64(TX_LOG_PAGE_RELO1(channel), 0); 4186 nw64(TX_LOG_PAGE_RELO2(channel), 0); 4187 nw64(TX_LOG_PAGE_HDL(channel), 0); 4188 4189 val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT; 4190 val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1); 4191 nw64(TX_LOG_PAGE_VLD(channel), val); 4192 4193 /* XXX TXDMA 32bit mode? XXX */ 4194 4195 return 0; 4196} 4197 4198static void niu_txc_enable_port(struct niu *np, int on) 4199{ 4200 unsigned long flags; 4201 u64 val, mask; 4202 4203 niu_lock_parent(np, flags); 4204 val = nr64(TXC_CONTROL); 4205 mask = (u64)1 << np->port; 4206 if (on) { 4207 val |= TXC_CONTROL_ENABLE | mask; 4208 } else { 4209 val &= ~mask; 4210 if ((val & ~TXC_CONTROL_ENABLE) == 0) 4211 val &= ~TXC_CONTROL_ENABLE; 4212 } 4213 nw64(TXC_CONTROL, val); 4214 niu_unlock_parent(np, flags); 4215} 4216 4217static void niu_txc_set_imask(struct niu *np, u64 imask) 4218{ 4219 unsigned long flags; 4220 u64 val; 4221 4222 niu_lock_parent(np, flags); 4223 val = nr64(TXC_INT_MASK); 4224 val &= ~TXC_INT_MASK_VAL(np->port); 4225 val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port)); 4226 niu_unlock_parent(np, flags); 4227} 4228 4229static void niu_txc_port_dma_enable(struct niu *np, int on) 4230{ 4231 u64 val = 0; 4232 4233 if (on) { 4234 int i; 4235 4236 for (i = 0; i < np->num_tx_rings; i++) 4237 val |= (1 << np->tx_rings[i].tx_channel); 4238 } 4239 nw64(TXC_PORT_DMA(np->port), val); 4240} 4241 4242static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp) 4243{ 4244 int err, channel = rp->tx_channel; 4245 u64 val, ring_len; 4246 4247 err = niu_tx_channel_stop(np, channel); 4248 if (err) 4249 return err; 4250 4251 err = niu_tx_channel_reset(np, channel); 4252 if (err) 4253 return err; 4254 4255 err = niu_tx_channel_lpage_init(np, channel); 4256 if (err) 4257 return err; 4258 4259 nw64(TXC_DMA_MAX(channel), rp->max_burst); 4260 nw64(TX_ENT_MSK(channel), 0); 4261 4262 if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE | 4263 TX_RNG_CFIG_STADDR)) { 4264 dev_err(np->device, PFX "%s: TX ring channel %d " 4265 "DMA addr (%llx) is not aligned.\n", 4266 np->dev->name, channel, 4267 (unsigned long long) rp->descr_dma); 4268 return -EINVAL; 4269 } 4270 4271 /* The length field in TX_RNG_CFIG is measured in 64-byte 4272 * blocks. rp->pending is the number of TX descriptors in 4273 * our ring, 8 bytes each, thus we divide by 8 bytes more 4274 * to get the proper value the chip wants. 4275 */ 4276 ring_len = (rp->pending / 8); 4277 4278 val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) | 4279 rp->descr_dma); 4280 nw64(TX_RNG_CFIG(channel), val); 4281 4282 if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) || 4283 ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) { 4284 dev_err(np->device, PFX "%s: TX ring channel %d " 4285 "MBOX addr (%llx) is has illegal bits.\n", 4286 np->dev->name, channel, 4287 (unsigned long long) rp->mbox_dma); 4288 return -EINVAL; 4289 } 4290 nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32); 4291 nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR); 4292 4293 nw64(TX_CS(channel), 0); 4294 4295 rp->last_pkt_cnt = 0; 4296 4297 return 0; 4298} 4299 4300static void niu_init_rdc_groups(struct niu *np) 4301{ 4302 struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port]; 4303 int i, first_table_num = tp->first_table_num; 4304 4305 for (i = 0; i < tp->num_tables; i++) { 4306 struct rdc_table *tbl = &tp->tables[i]; 4307 int this_table = first_table_num + i; 4308 int slot; 4309 4310 for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) 4311 nw64(RDC_TBL(this_table, slot), 4312 tbl->rxdma_channel[slot]); 4313 } 4314 4315 nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]); 4316} 4317 4318static void niu_init_drr_weight(struct niu *np) 4319{ 4320 int type = phy_decode(np->parent->port_phy, np->port); 4321 u64 val; 4322 4323 switch (type) { 4324 case PORT_TYPE_10G: 4325 val = PT_DRR_WEIGHT_DEFAULT_10G; 4326 break; 4327 4328 case PORT_TYPE_1G: 4329 default: 4330 val = PT_DRR_WEIGHT_DEFAULT_1G; 4331 break; 4332 } 4333 nw64(PT_DRR_WT(np->port), val); 4334} 4335 4336static int niu_init_hostinfo(struct niu *np) 4337{ 4338 struct niu_parent *parent = np->parent; 4339 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; 4340 int i, err, num_alt = niu_num_alt_addr(np); 4341 int first_rdc_table = tp->first_table_num; 4342 4343 err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); 4344 if (err) 4345 return err; 4346 4347 err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); 4348 if (err) 4349 return err; 4350 4351 for (i = 0; i < num_alt; i++) { 4352 err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1); 4353 if (err) 4354 return err; 4355 } 4356 4357 return 0; 4358} 4359 4360static int niu_rx_channel_reset(struct niu *np, int channel) 4361{ 4362 return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel), 4363 RXDMA_CFIG1_RST, 1000, 10, 4364 "RXDMA_CFIG1"); 4365} 4366 4367static int niu_rx_channel_lpage_init(struct niu *np, int channel) 4368{ 4369 u64 val; 4370 4371 nw64(RX_LOG_MASK1(channel), 0); 4372 nw64(RX_LOG_VAL1(channel), 0); 4373 nw64(RX_LOG_MASK2(channel), 0); 4374 nw64(RX_LOG_VAL2(channel), 0); 4375 nw64(RX_LOG_PAGE_RELO1(channel), 0); 4376 nw64(RX_LOG_PAGE_RELO2(channel), 0); 4377 nw64(RX_LOG_PAGE_HDL(channel), 0); 4378 4379 val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT; 4380 val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1); 4381 nw64(RX_LOG_PAGE_VLD(channel), val); 4382 4383 return 0; 4384} 4385 4386static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp) 4387{ 4388 u64 val; 4389 4390 val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) | 4391 ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) | 4392 ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) | 4393 ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT)); 4394 nw64(RDC_RED_PARA(rp->rx_channel), val); 4395} 4396 4397static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret) 4398{ 4399 u64 val = 0; 4400 4401 switch (rp->rbr_block_size) { 4402 case 4 * 1024: 4403 val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT); 4404 break; 4405 case 8 * 1024: 4406 val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT); 4407 break; 4408 case 16 * 1024: 4409 val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT); 4410 break; 4411 case 32 * 1024: 4412 val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT); 4413 break; 4414 default: 4415 return -EINVAL; 4416 } 4417 val |= RBR_CFIG_B_VLD2; 4418 switch (rp->rbr_sizes[2]) { 4419 case 2 * 1024: 4420 val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT); 4421 break; 4422 case 4 * 1024: 4423 val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT); 4424 break; 4425 case 8 * 1024: 4426 val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT); 4427 break; 4428 case 16 * 1024: 4429 val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT); 4430 break; 4431 4432 default: 4433 return -EINVAL; 4434 } 4435 val |= RBR_CFIG_B_VLD1; 4436 switch (rp->rbr_sizes[1]) { 4437 case 1 * 1024: 4438 val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT); 4439 break; 4440 case 2 * 1024: 4441 val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT); 4442 break; 4443 case 4 * 1024: 4444 val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT); 4445 break; 4446 case 8 * 1024: 4447 val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT); 4448 break; 4449 4450 default: 4451 return -EINVAL; 4452 } 4453 val |= RBR_CFIG_B_VLD0; 4454 switch (rp->rbr_sizes[0]) { 4455 case 256: 4456 val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT); 4457 break; 4458 case 512: 4459 val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT); 4460 break; 4461 case 1 * 1024: 4462 val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT); 4463 break; 4464 case 2 * 1024: 4465 val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT); 4466 break; 4467 4468 default: 4469 return -EINVAL; 4470 } 4471 4472 *ret = val; 4473 return 0; 4474} 4475 4476static int niu_enable_rx_channel(struct niu *np, int channel, int on) 4477{ 4478 u64 val = nr64(RXDMA_CFIG1(channel)); 4479 int limit; 4480 4481 if (on) 4482 val |= RXDMA_CFIG1_EN; 4483 else 4484 val &= ~RXDMA_CFIG1_EN; 4485 nw64(RXDMA_CFIG1(channel), val); 4486 4487 limit = 1000; 4488 while (--limit > 0) { 4489 if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST) 4490 break; 4491 udelay(10); 4492 } 4493 if (limit <= 0) 4494 return -ENODEV; 4495 return 0; 4496} 4497 4498static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp) 4499{ 4500 int err, channel = rp->rx_channel; 4501 u64 val; 4502 4503 err = niu_rx_channel_reset(np, channel); 4504 if (err) 4505 return err; 4506 4507 err = niu_rx_channel_lpage_init(np, channel); 4508 if (err) 4509 return err; 4510 4511 niu_rx_channel_wred_init(np, rp); 4512 4513 nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY); 4514 nw64(RX_DMA_CTL_STAT(channel), 4515 (RX_DMA_CTL_STAT_MEX | 4516 RX_DMA_CTL_STAT_RCRTHRES | 4517 RX_DMA_CTL_STAT_RCRTO | 4518 RX_DMA_CTL_STAT_RBR_EMPTY)); 4519 nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32); 4520 nw64(RXDMA_CFIG2(channel), (rp->mbox_dma & 0x00000000ffffffc0)); 4521 nw64(RBR_CFIG_A(channel), 4522 ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) | 4523 (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR))); 4524 err = niu_compute_rbr_cfig_b(rp, &val); 4525 if (err) 4526 return err; 4527 nw64(RBR_CFIG_B(channel), val); 4528 nw64(RCRCFIG_A(channel), 4529 ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) | 4530 (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR))); 4531 nw64(RCRCFIG_B(channel), 4532 ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) | 4533 RCRCFIG_B_ENTOUT | 4534 ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT)); 4535 4536 err = niu_enable_rx_channel(np, channel, 1); 4537 if (err) 4538 return err; 4539 4540 nw64(RBR_KICK(channel), rp->rbr_index); 4541 4542 val = nr64(RX_DMA_CTL_STAT(channel)); 4543 val |= RX_DMA_CTL_STAT_RBR_EMPTY; 4544 nw64(RX_DMA_CTL_STAT(channel), val); 4545 4546 return 0; 4547} 4548 4549static int niu_init_rx_channels(struct niu *np) 4550{ 4551 unsigned long flags; 4552 u64 seed = jiffies_64; 4553 int err, i; 4554 4555 niu_lock_parent(np, flags); 4556 nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider); 4557 nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL)); 4558 niu_unlock_parent(np, flags); 4559 4560 /* XXX RXDMA 32bit mode? XXX */ 4561 4562 niu_init_rdc_groups(np); 4563 niu_init_drr_weight(np); 4564 4565 err = niu_init_hostinfo(np); 4566 if (err) 4567 return err; 4568 4569 for (i = 0; i < np->num_rx_rings; i++) { 4570 struct rx_ring_info *rp = &np->rx_rings[i]; 4571 4572 err = niu_init_one_rx_channel(np, rp); 4573 if (err) 4574 return err; 4575 } 4576 4577 return 0; 4578} 4579 4580static int niu_set_ip_frag_rule(struct niu *np) 4581{ 4582 struct niu_parent *parent = np->parent; 4583 struct niu_classifier *cp = &np->clas; 4584 struct niu_tcam_entry *tp; 4585 int index, err; 4586 4587 /* XXX fix this allocation scheme XXX */ 4588 index = cp->tcam_index; 4589 tp = &parent->tcam[index]; 4590 4591 /* Note that the noport bit is the same in both ipv4 and 4592 * ipv6 format TCAM entries. 4593 */ 4594 memset(tp, 0, sizeof(*tp)); 4595 tp->key[1] = TCAM_V4KEY1_NOPORT; 4596 tp->key_mask[1] = TCAM_V4KEY1_NOPORT; 4597 tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | 4598 ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT)); 4599 err = tcam_write(np, index, tp->key, tp->key_mask); 4600 if (err) 4601 return err; 4602 err = tcam_assoc_write(np, index, tp->assoc_data); 4603 if (err) 4604 return err; 4605 4606 return 0; 4607} 4608 4609static int niu_init_classifier_hw(struct niu *np) 4610{ 4611 struct niu_parent *parent = np->parent; 4612 struct niu_classifier *cp = &np->clas; 4613 int i, err; 4614 4615 nw64(H1POLY, cp->h1_init); 4616 nw64(H2POLY, cp->h2_init); 4617 4618 err = niu_init_hostinfo(np); 4619 if (err) 4620 return err; 4621 4622 for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) { 4623 struct niu_vlan_rdc *vp = &cp->vlan_mappings[i]; 4624 4625 vlan_tbl_write(np, i, np->port, 4626 vp->vlan_pref, vp->rdc_num); 4627 } 4628 4629 for (i = 0; i < cp->num_alt_mac_mappings; i++) { 4630 struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i]; 4631 4632 err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num, 4633 ap->rdc_num, ap->mac_pref); 4634 if (err) 4635 return err; 4636 } 4637 4638 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { 4639 int index = i - CLASS_CODE_USER_PROG1; 4640 4641 err = niu_set_tcam_key(np, i, parent->tcam_key[index]); 4642 if (err) 4643 return err; 4644 err = niu_set_flow_key(np, i, parent->flow_key[index]); 4645 if (err) 4646 return err; 4647 } 4648 4649 err = niu_set_ip_frag_rule(np); 4650 if (err) 4651 return err; 4652 4653 tcam_enable(np, 1); 4654 4655 return 0; 4656} 4657 4658static int niu_zcp_write(struct niu *np, int index, u64 *data) 4659{ 4660 nw64(ZCP_RAM_DATA0, data[0]); 4661 nw64(ZCP_RAM_DATA1, data[1]); 4662 nw64(ZCP_RAM_DATA2, data[2]); 4663 nw64(ZCP_RAM_DATA3, data[3]); 4664 nw64(ZCP_RAM_DATA4, data[4]); 4665 nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL); 4666 nw64(ZCP_RAM_ACC, 4667 (ZCP_RAM_ACC_WRITE | 4668 (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | 4669 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); 4670 4671 return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 4672 1000, 100); 4673} 4674 4675static int niu_zcp_read(struct niu *np, int index, u64 *data) 4676{ 4677 int err; 4678 4679 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 4680 1000, 100); 4681 if (err) { 4682 dev_err(np->device, PFX "%s: ZCP read busy won't clear, " 4683 "ZCP_RAM_ACC[%llx]\n", np->dev->name, 4684 (unsigned long long) nr64(ZCP_RAM_ACC)); 4685 return err; 4686 } 4687 4688 nw64(ZCP_RAM_ACC, 4689 (ZCP_RAM_ACC_READ | 4690 (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | 4691 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); 4692 4693 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 4694 1000, 100); 4695 if (err) { 4696 dev_err(np->device, PFX "%s: ZCP read busy2 won't clear, " 4697 "ZCP_RAM_ACC[%llx]\n", np->dev->name, 4698 (unsigned long long) nr64(ZCP_RAM_ACC)); 4699 return err; 4700 } 4701 4702 data[0] = nr64(ZCP_RAM_DATA0); 4703 data[1] = nr64(ZCP_RAM_DATA1); 4704 data[2] = nr64(ZCP_RAM_DATA2); 4705 data[3] = nr64(ZCP_RAM_DATA3); 4706 data[4] = nr64(ZCP_RAM_DATA4); 4707 4708 return 0; 4709} 4710 4711static void niu_zcp_cfifo_reset(struct niu *np) 4712{ 4713 u64 val = nr64(RESET_CFIFO); 4714 4715 val |= RESET_CFIFO_RST(np->port); 4716 nw64(RESET_CFIFO, val); 4717 udelay(10); 4718 4719 val &= ~RESET_CFIFO_RST(np->port); 4720 nw64(RESET_CFIFO, val); 4721} 4722 4723static int niu_init_zcp(struct niu *np) 4724{ 4725 u64 data[5], rbuf[5]; 4726 int i, max, err; 4727 4728 if (np->parent->plat_type != PLAT_TYPE_NIU) { 4729 if (np->port == 0 || np->port == 1) 4730 max = ATLAS_P0_P1_CFIFO_ENTRIES; 4731 else 4732 max = ATLAS_P2_P3_CFIFO_ENTRIES; 4733 } else 4734 max = NIU_CFIFO_ENTRIES; 4735 4736 data[0] = 0; 4737 data[1] = 0; 4738 data[2] = 0; 4739 data[3] = 0; 4740 data[4] = 0; 4741 4742 for (i = 0; i < max; i++) { 4743 err = niu_zcp_write(np, i, data); 4744 if (err) 4745 return err; 4746 err = niu_zcp_read(np, i, rbuf); 4747 if (err) 4748 return err; 4749 } 4750 4751 niu_zcp_cfifo_reset(np); 4752 nw64(CFIFO_ECC(np->port), 0); 4753 nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL); 4754 (void) nr64(ZCP_INT_STAT); 4755 nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL); 4756 4757 return 0; 4758} 4759 4760static void niu_ipp_write(struct niu *np, int index, u64 *data) 4761{ 4762 u64 val = nr64_ipp(IPP_CFIG); 4763 4764 nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W); 4765 nw64_ipp(IPP_DFIFO_WR_PTR, index); 4766 nw64_ipp(IPP_DFIFO_WR0, data[0]); 4767 nw64_ipp(IPP_DFIFO_WR1, data[1]); 4768 nw64_ipp(IPP_DFIFO_WR2, data[2]); 4769 nw64_ipp(IPP_DFIFO_WR3, data[3]); 4770 nw64_ipp(IPP_DFIFO_WR4, data[4]); 4771 nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W); 4772} 4773 4774static void niu_ipp_read(struct niu *np, int index, u64 *data) 4775{ 4776 nw64_ipp(IPP_DFIFO_RD_PTR, index); 4777 data[0] = nr64_ipp(IPP_DFIFO_RD0); 4778 data[1] = nr64_ipp(IPP_DFIFO_RD1); 4779 data[2] = nr64_ipp(IPP_DFIFO_RD2); 4780 data[3] = nr64_ipp(IPP_DFIFO_RD3); 4781 data[4] = nr64_ipp(IPP_DFIFO_RD4); 4782} 4783 4784static int niu_ipp_reset(struct niu *np) 4785{ 4786 return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST, 4787 1000, 100, "IPP_CFIG"); 4788} 4789 4790static int niu_init_ipp(struct niu *np) 4791{ 4792 u64 data[5], rbuf[5], val; 4793 int i, max, err; 4794 4795 if (np->parent->plat_type != PLAT_TYPE_NIU) { 4796 if (np->port == 0 || np->port == 1) 4797 max = ATLAS_P0_P1_DFIFO_ENTRIES; 4798 else 4799 max = ATLAS_P2_P3_DFIFO_ENTRIES; 4800 } else 4801 max = NIU_DFIFO_ENTRIES; 4802 4803 data[0] = 0; 4804 data[1] = 0; 4805 data[2] = 0; 4806 data[3] = 0; 4807 data[4] = 0; 4808 4809 for (i = 0; i < max; i++) { 4810 niu_ipp_write(np, i, data); 4811 niu_ipp_read(np, i, rbuf); 4812 } 4813 4814 (void) nr64_ipp(IPP_INT_STAT); 4815 (void) nr64_ipp(IPP_INT_STAT); 4816 4817 err = niu_ipp_reset(np); 4818 if (err) 4819 return err; 4820 4821 (void) nr64_ipp(IPP_PKT_DIS); 4822 (void) nr64_ipp(IPP_BAD_CS_CNT); 4823 (void) nr64_ipp(IPP_ECC); 4824 4825 (void) nr64_ipp(IPP_INT_STAT); 4826 4827 nw64_ipp(IPP_MSK, ~IPP_MSK_ALL); 4828 4829 val = nr64_ipp(IPP_CFIG); 4830 val &= ~IPP_CFIG_IP_MAX_PKT; 4831 val |= (IPP_CFIG_IPP_ENABLE | 4832 IPP_CFIG_DFIFO_ECC_EN | 4833 IPP_CFIG_DROP_BAD_CRC | 4834 IPP_CFIG_CKSUM_EN | 4835 (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT)); 4836 nw64_ipp(IPP_CFIG, val); 4837 4838 return 0; 4839} 4840 4841static void niu_handle_led(struct niu *np, int status) 4842{ 4843 u64 val; 4844 val = nr64_mac(XMAC_CONFIG); 4845 4846 if ((np->flags & NIU_FLAGS_10G) != 0 && 4847 (np->flags & NIU_FLAGS_FIBER) != 0) { 4848 if (status) { 4849 val |= XMAC_CONFIG_LED_POLARITY; 4850 val &= ~XMAC_CONFIG_FORCE_LED_ON; 4851 } else { 4852 val |= XMAC_CONFIG_FORCE_LED_ON; 4853 val &= ~XMAC_CONFIG_LED_POLARITY; 4854 } 4855 } 4856 4857 nw64_mac(XMAC_CONFIG, val); 4858} 4859 4860static void niu_init_xif_xmac(struct niu *np) 4861{ 4862 struct niu_link_config *lp = &np->link_config; 4863 u64 val; 4864 4865 if (np->flags & NIU_FLAGS_XCVR_SERDES) { 4866 val = nr64(MIF_CONFIG); 4867 val |= MIF_CONFIG_ATCA_GE; 4868 nw64(MIF_CONFIG, val); 4869 } 4870 4871 val = nr64_mac(XMAC_CONFIG); 4872 val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; 4873 4874 val |= XMAC_CONFIG_TX_OUTPUT_EN; 4875 4876 if (lp->loopback_mode == LOOPBACK_MAC) { 4877 val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; 4878 val |= XMAC_CONFIG_LOOPBACK; 4879 } else { 4880 val &= ~XMAC_CONFIG_LOOPBACK; 4881 } 4882 4883 if (np->flags & NIU_FLAGS_10G) { 4884 val &= ~XMAC_CONFIG_LFS_DISABLE; 4885 } else { 4886 val |= XMAC_CONFIG_LFS_DISABLE; 4887 if (!(np->flags & NIU_FLAGS_FIBER) && 4888 !(np->flags & NIU_FLAGS_XCVR_SERDES)) 4889 val |= XMAC_CONFIG_1G_PCS_BYPASS; 4890 else 4891 val &= ~XMAC_CONFIG_1G_PCS_BYPASS; 4892 } 4893 4894 val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; 4895 4896 if (lp->active_speed == SPEED_100) 4897 val |= XMAC_CONFIG_SEL_CLK_25MHZ; 4898 else 4899 val &= ~XMAC_CONFIG_SEL_CLK_25MHZ; 4900 4901 nw64_mac(XMAC_CONFIG, val); 4902 4903 val = nr64_mac(XMAC_CONFIG); 4904 val &= ~XMAC_CONFIG_MODE_MASK; 4905 if (np->flags & NIU_FLAGS_10G) { 4906 val |= XMAC_CONFIG_MODE_XGMII; 4907 } else { 4908 if (lp->active_speed == SPEED_100) 4909 val |= XMAC_CONFIG_MODE_MII; 4910 else 4911 val |= XMAC_CONFIG_MODE_GMII; 4912 } 4913 4914 nw64_mac(XMAC_CONFIG, val); 4915} 4916 4917static void niu_init_xif_bmac(struct niu *np) 4918{ 4919 struct niu_link_config *lp = &np->link_config; 4920 u64 val; 4921 4922 val = BMAC_XIF_CONFIG_TX_OUTPUT_EN; 4923 4924 if (lp->loopback_mode == LOOPBACK_MAC) 4925 val |= BMAC_XIF_CONFIG_MII_LOOPBACK; 4926 else 4927 val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK; 4928 4929 if (lp->active_speed == SPEED_1000) 4930 val |= BMAC_XIF_CONFIG_GMII_MODE; 4931 else 4932 val &= ~BMAC_XIF_CONFIG_GMII_MODE; 4933 4934 val &= ~(BMAC_XIF_CONFIG_LINK_LED | 4935 BMAC_XIF_CONFIG_LED_POLARITY); 4936 4937 if (!(np->flags & NIU_FLAGS_10G) && 4938 !(np->flags & NIU_FLAGS_FIBER) && 4939 lp->active_speed == SPEED_100) 4940 val |= BMAC_XIF_CONFIG_25MHZ_CLOCK; 4941 else 4942 val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK; 4943 4944 nw64_mac(BMAC_XIF_CONFIG, val); 4945} 4946 4947static void niu_init_xif(struct niu *np) 4948{ 4949 if (np->flags & NIU_FLAGS_XMAC) 4950 niu_init_xif_xmac(np); 4951 else 4952 niu_init_xif_bmac(np); 4953} 4954 4955static void niu_pcs_mii_reset(struct niu *np) 4956{ 4957 int limit = 1000; 4958 u64 val = nr64_pcs(PCS_MII_CTL); 4959 val |= PCS_MII_CTL_RST; 4960 nw64_pcs(PCS_MII_CTL, val); 4961 while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) { 4962 udelay(100); 4963 val = nr64_pcs(PCS_MII_CTL); 4964 } 4965} 4966 4967static void niu_xpcs_reset(struct niu *np) 4968{ 4969 int limit = 1000; 4970 u64 val = nr64_xpcs(XPCS_CONTROL1); 4971 val |= XPCS_CONTROL1_RESET; 4972 nw64_xpcs(XPCS_CONTROL1, val); 4973 while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) { 4974 udelay(100); 4975 val = nr64_xpcs(XPCS_CONTROL1); 4976 } 4977} 4978 4979static int niu_init_pcs(struct niu *np) 4980{ 4981 struct niu_link_config *lp = &np->link_config; 4982 u64 val; 4983 4984 switch (np->flags & (NIU_FLAGS_10G | 4985 NIU_FLAGS_FIBER | 4986 NIU_FLAGS_XCVR_SERDES)) { 4987 case NIU_FLAGS_FIBER: 4988 /* 1G fiber */ 4989 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); 4990 nw64_pcs(PCS_DPATH_MODE, 0); 4991 niu_pcs_mii_reset(np); 4992 break; 4993 4994 case NIU_FLAGS_10G: 4995 case NIU_FLAGS_10G | NIU_FLAGS_FIBER: 4996 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: 4997 /* 10G SERDES */ 4998 if (!(np->flags & NIU_FLAGS_XMAC)) 4999 return -EINVAL; 5000 5001 /* 10G copper or fiber */ 5002 val = nr64_mac(XMAC_CONFIG); 5003 val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; 5004 nw64_mac(XMAC_CONFIG, val); 5005 5006 niu_xpcs_reset(np); 5007 5008 val = nr64_xpcs(XPCS_CONTROL1); 5009 if (lp->loopback_mode == LOOPBACK_PHY) 5010 val |= XPCS_CONTROL1_LOOPBACK; 5011 else 5012 val &= ~XPCS_CONTROL1_LOOPBACK; 5013 nw64_xpcs(XPCS_CONTROL1, val); 5014 5015 nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0); 5016 (void) nr64_xpcs(XPCS_SYMERR_CNT01); 5017 (void) nr64_xpcs(XPCS_SYMERR_CNT23); 5018 break; 5019 5020 5021 case NIU_FLAGS_XCVR_SERDES: 5022 /* 1G SERDES */ 5023 niu_pcs_mii_reset(np); 5024 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); 5025 nw64_pcs(PCS_DPATH_MODE, 0); 5026 break; 5027 5028 case 0: 5029 /* 1G copper */ 5030 case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER: 5031 /* 1G RGMII FIBER */ 5032 nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII); 5033 niu_pcs_mii_reset(np); 5034 break; 5035 5036 default: 5037 return -EINVAL; 5038 } 5039 5040 return 0; 5041} 5042 5043static int niu_reset_tx_xmac(struct niu *np) 5044{ 5045 return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST, 5046 (XTXMAC_SW_RST_REG_RS | 5047 XTXMAC_SW_RST_SOFT_RST), 5048 1000, 100, "XTXMAC_SW_RST"); 5049} 5050 5051static int niu_reset_tx_bmac(struct niu *np) 5052{ 5053 int limit; 5054 5055 nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET); 5056 limit = 1000; 5057 while (--limit >= 0) { 5058 if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET)) 5059 break; 5060 udelay(100); 5061 } 5062 if (limit < 0) { 5063 dev_err(np->device, PFX "Port %u TX BMAC would not reset, " 5064 "BTXMAC_SW_RST[%llx]\n", 5065 np->port, 5066 (unsigned long long) nr64_mac(BTXMAC_SW_RST)); 5067 return -ENODEV; 5068 } 5069 5070 return 0; 5071} 5072 5073static int niu_reset_tx_mac(struct niu *np) 5074{ 5075 if (np->flags & NIU_FLAGS_XMAC) 5076 return niu_reset_tx_xmac(np); 5077 else 5078 return niu_reset_tx_bmac(np); 5079} 5080 5081static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max) 5082{ 5083 u64 val; 5084 5085 val = nr64_mac(XMAC_MIN); 5086 val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE | 5087 XMAC_MIN_RX_MIN_PKT_SIZE); 5088 val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT); 5089 val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT); 5090 nw64_mac(XMAC_MIN, val); 5091 5092 nw64_mac(XMAC_MAX, max); 5093 5094 nw64_mac(XTXMAC_STAT_MSK, ~(u64)0); 5095 5096 val = nr64_mac(XMAC_IPG); 5097 if (np->flags & NIU_FLAGS_10G) { 5098 val &= ~XMAC_IPG_IPG_XGMII; 5099 val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT); 5100 } else { 5101 val &= ~XMAC_IPG_IPG_MII_GMII; 5102 val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT); 5103 } 5104 nw64_mac(XMAC_IPG, val); 5105 5106 val = nr64_mac(XMAC_CONFIG); 5107 val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC | 5108 XMAC_CONFIG_STRETCH_MODE | 5109 XMAC_CONFIG_VAR_MIN_IPG_EN | 5110 XMAC_CONFIG_TX_ENABLE); 5111 nw64_mac(XMAC_CONFIG, val); 5112 5113 nw64_mac(TXMAC_FRM_CNT, 0); 5114 nw64_mac(TXMAC_BYTE_CNT, 0); 5115} 5116 5117static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max) 5118{ 5119 u64 val; 5120 5121 nw64_mac(BMAC_MIN_FRAME, min); 5122 nw64_mac(BMAC_MAX_FRAME, max); 5123 5124 nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0); 5125 nw64_mac(BMAC_CTRL_TYPE, 0x8808); 5126 nw64_mac(BMAC_PREAMBLE_SIZE, 7); 5127 5128 val = nr64_mac(BTXMAC_CONFIG); 5129 val &= ~(BTXMAC_CONFIG_FCS_DISABLE | 5130 BTXMAC_CONFIG_ENABLE); 5131 nw64_mac(BTXMAC_CONFIG, val); 5132} 5133 5134static void niu_init_tx_mac(struct niu *np) 5135{ 5136 u64 min, max; 5137 5138 min = 64; 5139 if (np->dev->mtu > ETH_DATA_LEN) 5140 max = 9216; 5141 else 5142 max = 1522; 5143 5144 /* The XMAC_MIN register only accepts values for TX min which 5145 * have the low 3 bits cleared. 5146 */ 5147 BUILD_BUG_ON(min & 0x7); 5148 5149 if (np->flags & NIU_FLAGS_XMAC) 5150 niu_init_tx_xmac(np, min, max); 5151 else 5152 niu_init_tx_bmac(np, min, max); 5153} 5154 5155static int niu_reset_rx_xmac(struct niu *np) 5156{ 5157 int limit; 5158 5159 nw64_mac(XRXMAC_SW_RST, 5160 XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST); 5161 limit = 1000; 5162 while (--limit >= 0) { 5163 if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS | 5164 XRXMAC_SW_RST_SOFT_RST))) 5165 break; 5166 udelay(100); 5167 } 5168 if (limit < 0) { 5169 dev_err(np->device, PFX "Port %u RX XMAC would not reset, " 5170 "XRXMAC_SW_RST[%llx]\n", 5171 np->port, 5172 (unsigned long long) nr64_mac(XRXMAC_SW_RST)); 5173 return -ENODEV; 5174 } 5175 5176 return 0; 5177} 5178 5179static int niu_reset_rx_bmac(struct niu *np) 5180{ 5181 int limit; 5182 5183 nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET); 5184 limit = 1000; 5185 while (--limit >= 0) { 5186 if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET)) 5187 break; 5188 udelay(100); 5189 } 5190 if (limit < 0) { 5191 dev_err(np->device, PFX "Port %u RX BMAC would not reset, " 5192 "BRXMAC_SW_RST[%llx]\n", 5193 np->port, 5194 (unsigned long long) nr64_mac(BRXMAC_SW_RST)); 5195 return -ENODEV; 5196 } 5197 5198 return 0; 5199} 5200 5201static int niu_reset_rx_mac(struct niu *np) 5202{ 5203 if (np->flags & NIU_FLAGS_XMAC) 5204 return niu_reset_rx_xmac(np); 5205 else 5206 return niu_reset_rx_bmac(np); 5207} 5208 5209static void niu_init_rx_xmac(struct niu *np) 5210{ 5211 struct niu_parent *parent = np->parent; 5212 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; 5213 int first_rdc_table = tp->first_table_num; 5214 unsigned long i; 5215 u64 val; 5216 5217 nw64_mac(XMAC_ADD_FILT0, 0); 5218 nw64_mac(XMAC_ADD_FILT1, 0); 5219 nw64_mac(XMAC_ADD_FILT2, 0); 5220 nw64_mac(XMAC_ADD_FILT12_MASK, 0); 5221 nw64_mac(XMAC_ADD_FILT00_MASK, 0); 5222 for (i = 0; i < MAC_NUM_HASH; i++) 5223 nw64_mac(XMAC_HASH_TBL(i), 0); 5224 nw64_mac(XRXMAC_STAT_MSK, ~(u64)0); 5225 niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); 5226 niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); 5227 5228 val = nr64_mac(XMAC_CONFIG); 5229 val &= ~(XMAC_CONFIG_RX_MAC_ENABLE | 5230 XMAC_CONFIG_PROMISCUOUS | 5231 XMAC_CONFIG_PROMISC_GROUP | 5232 XMAC_CONFIG_ERR_CHK_DIS | 5233 XMAC_CONFIG_RX_CRC_CHK_DIS | 5234 XMAC_CONFIG_RESERVED_MULTICAST | 5235 XMAC_CONFIG_RX_CODEV_CHK_DIS | 5236 XMAC_CONFIG_ADDR_FILTER_EN | 5237 XMAC_CONFIG_RCV_PAUSE_ENABLE | 5238 XMAC_CONFIG_STRIP_CRC | 5239 XMAC_CONFIG_PASS_FLOW_CTRL | 5240 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN); 5241 val |= (XMAC_CONFIG_HASH_FILTER_EN); 5242 nw64_mac(XMAC_CONFIG, val); 5243 5244 nw64_mac(RXMAC_BT_CNT, 0); 5245 nw64_mac(RXMAC_BC_FRM_CNT, 0); 5246 nw64_mac(RXMAC_MC_FRM_CNT, 0); 5247 nw64_mac(RXMAC_FRAG_CNT, 0); 5248 nw64_mac(RXMAC_HIST_CNT1, 0); 5249 nw64_mac(RXMAC_HIST_CNT2, 0); 5250 nw64_mac(RXMAC_HIST_CNT3, 0); 5251 nw64_mac(RXMAC_HIST_CNT4, 0); 5252 nw64_mac(RXMAC_HIST_CNT5, 0); 5253 nw64_mac(RXMAC_HIST_CNT6, 0); 5254 nw64_mac(RXMAC_HIST_CNT7, 0); 5255 nw64_mac(RXMAC_MPSZER_CNT, 0); 5256 nw64_mac(RXMAC_CRC_ER_CNT, 0); 5257 nw64_mac(RXMAC_CD_VIO_CNT, 0); 5258 nw64_mac(LINK_FAULT_CNT, 0); 5259} 5260 5261static void niu_init_rx_bmac(struct niu *np) 5262{ 5263 struct niu_parent *parent = np->parent; 5264 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; 5265 int first_rdc_table = tp->first_table_num; 5266 unsigned long i; 5267 u64 val; 5268 5269 nw64_mac(BMAC_ADD_FILT0, 0); 5270 nw64_mac(BMAC_ADD_FILT1, 0); 5271 nw64_mac(BMAC_ADD_FILT2, 0); 5272 nw64_mac(BMAC_ADD_FILT12_MASK, 0); 5273 nw64_mac(BMAC_ADD_FILT00_MASK, 0); 5274 for (i = 0; i < MAC_NUM_HASH; i++) 5275 nw64_mac(BMAC_HASH_TBL(i), 0); 5276 niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); 5277 niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); 5278 nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0); 5279 5280 val = nr64_mac(BRXMAC_CONFIG); 5281 val &= ~(BRXMAC_CONFIG_ENABLE | 5282 BRXMAC_CONFIG_STRIP_PAD | 5283 BRXMAC_CONFIG_STRIP_FCS | 5284 BRXMAC_CONFIG_PROMISC | 5285 BRXMAC_CONFIG_PROMISC_GRP | 5286 BRXMAC_CONFIG_ADDR_FILT_EN | 5287 BRXMAC_CONFIG_DISCARD_DIS); 5288 val |= (BRXMAC_CONFIG_HASH_FILT_EN); 5289 nw64_mac(BRXMAC_CONFIG, val); 5290 5291 val = nr64_mac(BMAC_ADDR_CMPEN); 5292 val |= BMAC_ADDR_CMPEN_EN0; 5293 nw64_mac(BMAC_ADDR_CMPEN, val); 5294} 5295 5296static void niu_init_rx_mac(struct niu *np) 5297{ 5298 niu_set_primary_mac(np, np->dev->dev_addr); 5299 5300 if (np->flags & NIU_FLAGS_XMAC) 5301 niu_init_rx_xmac(np); 5302 else 5303 niu_init_rx_bmac(np); 5304} 5305 5306static void niu_enable_tx_xmac(struct niu *np, int on) 5307{ 5308 u64 val = nr64_mac(XMAC_CONFIG); 5309 5310 if (on) 5311 val |= XMAC_CONFIG_TX_ENABLE; 5312 else 5313 val &= ~XMAC_CONFIG_TX_ENABLE; 5314 nw64_mac(XMAC_CONFIG, val); 5315} 5316 5317static void niu_enable_tx_bmac(struct niu *np, int on) 5318{ 5319 u64 val = nr64_mac(BTXMAC_CONFIG); 5320 5321 if (on) 5322 val |= BTXMAC_CONFIG_ENABLE; 5323 else 5324 val &= ~BTXMAC_CONFIG_ENABLE; 5325 nw64_mac(BTXMAC_CONFIG, val); 5326} 5327 5328static void niu_enable_tx_mac(struct niu *np, int on) 5329{ 5330 if (np->flags & NIU_FLAGS_XMAC) 5331 niu_enable_tx_xmac(np, on); 5332 else 5333 niu_enable_tx_bmac(np, on); 5334} 5335 5336static void niu_enable_rx_xmac(struct niu *np, int on) 5337{ 5338 u64 val = nr64_mac(XMAC_CONFIG); 5339 5340 val &= ~(XMAC_CONFIG_HASH_FILTER_EN | 5341 XMAC_CONFIG_PROMISCUOUS); 5342 5343 if (np->flags & NIU_FLAGS_MCAST) 5344 val |= XMAC_CONFIG_HASH_FILTER_EN; 5345 if (np->flags & NIU_FLAGS_PROMISC) 5346 val |= XMAC_CONFIG_PROMISCUOUS; 5347 5348 if (on) 5349 val |= XMAC_CONFIG_RX_MAC_ENABLE; 5350 else 5351 val &= ~XMAC_CONFIG_RX_MAC_ENABLE; 5352 nw64_mac(XMAC_CONFIG, val); 5353} 5354 5355static void niu_enable_rx_bmac(struct niu *np, int on) 5356{ 5357 u64 val = nr64_mac(BRXMAC_CONFIG); 5358 5359 val &= ~(BRXMAC_CONFIG_HASH_FILT_EN | 5360 BRXMAC_CONFIG_PROMISC); 5361 5362 if (np->flags & NIU_FLAGS_MCAST) 5363 val |= BRXMAC_CONFIG_HASH_FILT_EN; 5364 if (np->flags & NIU_FLAGS_PROMISC) 5365 val |= BRXMAC_CONFIG_PROMISC; 5366 5367 if (on) 5368 val |= BRXMAC_CONFIG_ENABLE; 5369 else 5370 val &= ~BRXMAC_CONFIG_ENABLE; 5371 nw64_mac(BRXMAC_CONFIG, val); 5372} 5373 5374static void niu_enable_rx_mac(struct niu *np, int on) 5375{ 5376 if (np->flags & NIU_FLAGS_XMAC) 5377 niu_enable_rx_xmac(np, on); 5378 else 5379 niu_enable_rx_bmac(np, on); 5380} 5381 5382static int niu_init_mac(struct niu *np) 5383{ 5384 int err; 5385 5386 niu_init_xif(np); 5387 err = niu_init_pcs(np); 5388 if (err) 5389 return err; 5390 5391 err = niu_reset_tx_mac(np); 5392 if (err) 5393 return err; 5394 niu_init_tx_mac(np); 5395 err = niu_reset_rx_mac(np); 5396 if (err) 5397 return err; 5398 niu_init_rx_mac(np); 5399 5400 /* This looks hookey but the RX MAC reset we just did will 5401 * undo some of the state we setup in niu_init_tx_mac() so we 5402 * have to call it again. In particular, the RX MAC reset will 5403 * set the XMAC_MAX register back to it's default value. 5404 */ 5405 niu_init_tx_mac(np); 5406 niu_enable_tx_mac(np, 1); 5407 5408 niu_enable_rx_mac(np, 1); 5409 5410 return 0; 5411} 5412 5413static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp) 5414{ 5415 (void) niu_tx_channel_stop(np, rp->tx_channel); 5416} 5417 5418static void niu_stop_tx_channels(struct niu *np) 5419{ 5420 int i; 5421 5422 for (i = 0; i < np->num_tx_rings; i++) { 5423 struct tx_ring_info *rp = &np->tx_rings[i]; 5424 5425 niu_stop_one_tx_channel(np, rp); 5426 } 5427} 5428 5429static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp) 5430{ 5431 (void) niu_tx_channel_reset(np, rp->tx_channel); 5432} 5433 5434static void niu_reset_tx_channels(struct niu *np) 5435{ 5436 int i; 5437 5438 for (i = 0; i < np->num_tx_rings; i++) { 5439 struct tx_ring_info *rp = &np->tx_rings[i]; 5440 5441 niu_reset_one_tx_channel(np, rp); 5442 } 5443} 5444 5445static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp) 5446{ 5447 (void) niu_enable_rx_channel(np, rp->rx_channel, 0); 5448} 5449 5450static void niu_stop_rx_channels(struct niu *np) 5451{ 5452 int i; 5453 5454 for (i = 0; i < np->num_rx_rings; i++) { 5455 struct rx_ring_info *rp = &np->rx_rings[i]; 5456 5457 niu_stop_one_rx_channel(np, rp); 5458 } 5459} 5460 5461static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp) 5462{ 5463 int channel = rp->rx_channel; 5464 5465 (void) niu_rx_channel_reset(np, channel); 5466 nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL); 5467 nw64(RX_DMA_CTL_STAT(channel), 0); 5468 (void) niu_enable_rx_channel(np, channel, 0); 5469} 5470 5471static void niu_reset_rx_channels(struct niu *np) 5472{ 5473 int i; 5474 5475 for (i = 0; i < np->num_rx_rings; i++) { 5476 struct rx_ring_info *rp = &np->rx_rings[i]; 5477 5478 niu_reset_one_rx_channel(np, rp); 5479 } 5480} 5481 5482static void niu_disable_ipp(struct niu *np) 5483{ 5484 u64 rd, wr, val; 5485 int limit; 5486 5487 rd = nr64_ipp(IPP_DFIFO_RD_PTR); 5488 wr = nr64_ipp(IPP_DFIFO_WR_PTR); 5489 limit = 100; 5490 while (--limit >= 0 && (rd != wr)) { 5491 rd = nr64_ipp(IPP_DFIFO_RD_PTR); 5492 wr = nr64_ipp(IPP_DFIFO_WR_PTR); 5493 } 5494 if (limit < 0 && 5495 (rd != 0 && wr != 1)) { 5496 dev_err(np->device, PFX "%s: IPP would not quiesce, " 5497 "rd_ptr[%llx] wr_ptr[%llx]\n", 5498 np->dev->name, 5499 (unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR), 5500 (unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR)); 5501 } 5502 5503 val = nr64_ipp(IPP_CFIG); 5504 val &= ~(IPP_CFIG_IPP_ENABLE | 5505 IPP_CFIG_DFIFO_ECC_EN | 5506 IPP_CFIG_DROP_BAD_CRC | 5507 IPP_CFIG_CKSUM_EN); 5508 nw64_ipp(IPP_CFIG, val); 5509 5510 (void) niu_ipp_reset(np); 5511} 5512 5513static int niu_init_hw(struct niu *np) 5514{ 5515 int i, err; 5516 5517 niudbg(IFUP, "%s: Initialize TXC\n", np->dev->name); 5518 niu_txc_enable_port(np, 1); 5519 niu_txc_port_dma_enable(np, 1); 5520 niu_txc_set_imask(np, 0); 5521 5522 niudbg(IFUP, "%s: Initialize TX channels\n", np->dev->name); 5523 for (i = 0; i < np->num_tx_rings; i++) { 5524 struct tx_ring_info *rp = &np->tx_rings[i]; 5525 5526 err = niu_init_one_tx_channel(np, rp); 5527 if (err) 5528 return err; 5529 } 5530 5531 niudbg(IFUP, "%s: Initialize RX channels\n", np->dev->name); 5532 err = niu_init_rx_channels(np); 5533 if (err) 5534 goto out_uninit_tx_channels; 5535 5536 niudbg(IFUP, "%s: Initialize classifier\n", np->dev->name); 5537 err = niu_init_classifier_hw(np); 5538 if (err) 5539 goto out_uninit_rx_channels; 5540 5541 niudbg(IFUP, "%s: Initialize ZCP\n", np->dev->name); 5542 err = niu_init_zcp(np); 5543 if (err) 5544 goto out_uninit_rx_channels; 5545 5546 niudbg(IFUP, "%s: Initialize IPP\n", np->dev->name); 5547 err = niu_init_ipp(np); 5548 if (err) 5549 goto out_uninit_rx_channels; 5550 5551 niudbg(IFUP, "%s: Initialize MAC\n", np->dev->name); 5552 err = niu_init_mac(np); 5553 if (err) 5554 goto out_uninit_ipp; 5555 5556 return 0; 5557 5558out_uninit_ipp: 5559 niudbg(IFUP, "%s: Uninit IPP\n", np->dev->name); 5560 niu_disable_ipp(np); 5561 5562out_uninit_rx_channels: 5563 niudbg(IFUP, "%s: Uninit RX channels\n", np->dev->name); 5564 niu_stop_rx_channels(np); 5565 niu_reset_rx_channels(np); 5566 5567out_uninit_tx_channels: 5568 niudbg(IFUP, "%s: Uninit TX channels\n", np->dev->name); 5569 niu_stop_tx_channels(np); 5570 niu_reset_tx_channels(np); 5571 5572 return err; 5573} 5574 5575static void niu_stop_hw(struct niu *np) 5576{ 5577 niudbg(IFDOWN, "%s: Disable interrupts\n", np->dev->name); 5578 niu_enable_interrupts(np, 0); 5579 5580 niudbg(IFDOWN, "%s: Disable RX MAC\n", np->dev->name); 5581 niu_enable_rx_mac(np, 0); 5582 5583 niudbg(IFDOWN, "%s: Disable IPP\n", np->dev->name); 5584 niu_disable_ipp(np); 5585 5586 niudbg(IFDOWN, "%s: Stop TX channels\n", np->dev->name); 5587 niu_stop_tx_channels(np); 5588 5589 niudbg(IFDOWN, "%s: Stop RX channels\n", np->dev->name); 5590 niu_stop_rx_channels(np); 5591 5592 niudbg(IFDOWN, "%s: Reset TX channels\n", np->dev->name); 5593 niu_reset_tx_channels(np); 5594 5595 niudbg(IFDOWN, "%s: Reset RX channels\n", np->dev->name); 5596 niu_reset_rx_channels(np); 5597} 5598 5599static int niu_request_irq(struct niu *np) 5600{ 5601 int i, j, err; 5602 5603 err = 0; 5604 for (i = 0; i < np->num_ldg; i++) { 5605 struct niu_ldg *lp = &np->ldg[i]; 5606 5607 err = request_irq(lp->irq, niu_interrupt, 5608 IRQF_SHARED | IRQF_SAMPLE_RANDOM, 5609 np->dev->name, lp); 5610 if (err) 5611 goto out_free_irqs; 5612 5613 } 5614 5615 return 0; 5616 5617out_free_irqs: 5618 for (j = 0; j < i; j++) { 5619 struct niu_ldg *lp = &np->ldg[j]; 5620 5621 free_irq(lp->irq, lp); 5622 } 5623 return err; 5624} 5625 5626static void niu_free_irq(struct niu *np) 5627{ 5628 int i; 5629 5630 for (i = 0; i < np->num_ldg; i++) { 5631 struct niu_ldg *lp = &np->ldg[i]; 5632 5633 free_irq(lp->irq, lp); 5634 } 5635} 5636 5637static void niu_enable_napi(struct niu *np) 5638{ 5639 int i; 5640 5641 for (i = 0; i < np->num_ldg; i++) 5642 napi_enable(&np->ldg[i].napi); 5643} 5644 5645static void niu_disable_napi(struct niu *np) 5646{ 5647 int i; 5648 5649 for (i = 0; i < np->num_ldg; i++) 5650 napi_disable(&np->ldg[i].napi); 5651} 5652 5653static int niu_open(struct net_device *dev) 5654{ 5655 struct niu *np = netdev_priv(dev); 5656 int err; 5657 5658 netif_carrier_off(dev); 5659 5660 err = niu_alloc_channels(np); 5661 if (err) 5662 goto out_err; 5663 5664 err = niu_enable_interrupts(np, 0); 5665 if (err) 5666 goto out_free_channels; 5667 5668 err = niu_request_irq(np); 5669 if (err) 5670 goto out_free_channels; 5671 5672 niu_enable_napi(np); 5673 5674 spin_lock_irq(&np->lock); 5675 5676 err = niu_init_hw(np); 5677 if (!err) { 5678 init_timer(&np->timer); 5679 np->timer.expires = jiffies + HZ; 5680 np->timer.data = (unsigned long) np; 5681 np->timer.function = niu_timer; 5682 5683 err = niu_enable_interrupts(np, 1); 5684 if (err) 5685 niu_stop_hw(np); 5686 } 5687 5688 spin_unlock_irq(&np->lock); 5689 5690 if (err) { 5691 niu_disable_napi(np); 5692 goto out_free_irq; 5693 } 5694 5695 netif_tx_start_all_queues(dev); 5696 5697 if (np->link_config.loopback_mode != LOOPBACK_DISABLED) 5698 netif_carrier_on(dev); 5699 5700 add_timer(&np->timer); 5701 5702 return 0; 5703 5704out_free_irq: 5705 niu_free_irq(np); 5706 5707out_free_channels: 5708 niu_free_channels(np); 5709 5710out_err: 5711 return err; 5712} 5713 5714static void niu_full_shutdown(struct niu *np, struct net_device *dev) 5715{ 5716 cancel_work_sync(&np->reset_task); 5717 5718 niu_disable_napi(np); 5719 netif_tx_stop_all_queues(dev); 5720 5721 del_timer_sync(&np->timer); 5722 5723 spin_lock_irq(&np->lock); 5724 5725 niu_stop_hw(np); 5726 5727 spin_unlock_irq(&np->lock); 5728} 5729 5730static int niu_close(struct net_device *dev) 5731{ 5732 struct niu *np = netdev_priv(dev); 5733 5734 niu_full_shutdown(np, dev); 5735 5736 niu_free_irq(np); 5737 5738 niu_free_channels(np); 5739 5740 niu_handle_led(np, 0); 5741 5742 return 0; 5743} 5744 5745static void niu_sync_xmac_stats(struct niu *np) 5746{ 5747 struct niu_xmac_stats *mp = &np->mac_stats.xmac; 5748 5749 mp->tx_frames += nr64_mac(TXMAC_FRM_CNT); 5750 mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT); 5751 5752 mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT); 5753 mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT); 5754 mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT); 5755 mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT); 5756 mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT); 5757 mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1); 5758 mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2); 5759 mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3); 5760 mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4); 5761 mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5); 5762 mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6); 5763 mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7); 5764 mp->rx_octets += nr64_mac(RXMAC_BT_CNT); 5765 mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT); 5766 mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT); 5767 mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT); 5768} 5769 5770static void niu_sync_bmac_stats(struct niu *np) 5771{ 5772 struct niu_bmac_stats *mp = &np->mac_stats.bmac; 5773 5774 mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT); 5775 mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT); 5776 5777 mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT); 5778 mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); 5779 mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); 5780 mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT); 5781} 5782 5783static void niu_sync_mac_stats(struct niu *np) 5784{ 5785 if (np->flags & NIU_FLAGS_XMAC) 5786 niu_sync_xmac_stats(np); 5787 else 5788 niu_sync_bmac_stats(np); 5789} 5790 5791static void niu_get_rx_stats(struct niu *np) 5792{ 5793 unsigned long pkts, dropped, errors, bytes; 5794 int i; 5795 5796 pkts = dropped = errors = bytes = 0; 5797 for (i = 0; i < np->num_rx_rings; i++) { 5798 struct rx_ring_info *rp = &np->rx_rings[i]; 5799 5800 pkts += rp->rx_packets; 5801 bytes += rp->rx_bytes; 5802 dropped += rp->rx_dropped; 5803 errors += rp->rx_errors; 5804 } 5805 np->net_stats.rx_packets = pkts; 5806 np->net_stats.rx_bytes = bytes; 5807 np->net_stats.rx_dropped = dropped; 5808 np->net_stats.rx_errors = errors; 5809} 5810 5811static void niu_get_tx_stats(struct niu *np) 5812{ 5813 unsigned long pkts, errors, bytes; 5814 int i; 5815 5816 pkts = errors = bytes = 0; 5817 for (i = 0; i < np->num_tx_rings; i++) { 5818 struct tx_ring_info *rp = &np->tx_rings[i]; 5819 5820 pkts += rp->tx_packets; 5821 bytes += rp->tx_bytes; 5822 errors += rp->tx_errors; 5823 } 5824 np->net_stats.tx_packets = pkts; 5825 np->net_stats.tx_bytes = bytes; 5826 np->net_stats.tx_errors = errors; 5827} 5828 5829static struct net_device_stats *niu_get_stats(struct net_device *dev) 5830{ 5831 struct niu *np = netdev_priv(dev); 5832 5833 niu_get_rx_stats(np); 5834 niu_get_tx_stats(np); 5835 5836 return &np->net_stats; 5837} 5838 5839static void niu_load_hash_xmac(struct niu *np, u16 *hash) 5840{ 5841 int i; 5842 5843 for (i = 0; i < 16; i++) 5844 nw64_mac(XMAC_HASH_TBL(i), hash[i]); 5845} 5846 5847static void niu_load_hash_bmac(struct niu *np, u16 *hash) 5848{ 5849 int i; 5850 5851 for (i = 0; i < 16; i++) 5852 nw64_mac(BMAC_HASH_TBL(i), hash[i]); 5853} 5854 5855static void niu_load_hash(struct niu *np, u16 *hash) 5856{ 5857 if (np->flags & NIU_FLAGS_XMAC) 5858 niu_load_hash_xmac(np, hash); 5859 else 5860 niu_load_hash_bmac(np, hash); 5861} 5862 5863static void niu_set_rx_mode(struct net_device *dev) 5864{ 5865 struct niu *np = netdev_priv(dev); 5866 int i, alt_cnt, err; 5867 struct dev_addr_list *addr; 5868 unsigned long flags; 5869 u16 hash[16] = { 0, }; 5870 5871 spin_lock_irqsave(&np->lock, flags); 5872 niu_enable_rx_mac(np, 0); 5873 5874 np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC); 5875 if (dev->flags & IFF_PROMISC) 5876 np->flags |= NIU_FLAGS_PROMISC; 5877 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0)) 5878 np->flags |= NIU_FLAGS_MCAST; 5879 5880 alt_cnt = dev->uc_count; 5881 if (alt_cnt > niu_num_alt_addr(np)) { 5882 alt_cnt = 0; 5883 np->flags |= NIU_FLAGS_PROMISC; 5884 } 5885 5886 if (alt_cnt) { 5887 int index = 0; 5888 5889 for (addr = dev->uc_list; addr; addr = addr->next) { 5890 err = niu_set_alt_mac(np, index, 5891 addr->da_addr); 5892 if (err) 5893 printk(KERN_WARNING PFX "%s: Error %d " 5894 "adding alt mac %d\n", 5895 dev->name, err, index); 5896 err = niu_enable_alt_mac(np, index, 1); 5897 if (err) 5898 printk(KERN_WARNING PFX "%s: Error %d " 5899 "enabling alt mac %d\n", 5900 dev->name, err, index); 5901 5902 index++; 5903 } 5904 } else { 5905 int alt_start; 5906 if (np->flags & NIU_FLAGS_XMAC) 5907 alt_start = 0; 5908 else 5909 alt_start = 1; 5910 for (i = alt_start; i < niu_num_alt_addr(np); i++) { 5911 err = niu_enable_alt_mac(np, i, 0); 5912 if (err) 5913 printk(KERN_WARNING PFX "%s: Error %d " 5914 "disabling alt mac %d\n", 5915 dev->name, err, i); 5916 } 5917 } 5918 if (dev->flags & IFF_ALLMULTI) { 5919 for (i = 0; i < 16; i++) 5920 hash[i] = 0xffff; 5921 } else if (dev->mc_count > 0) { 5922 for (addr = dev->mc_list; addr; addr = addr->next) { 5923 u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr); 5924 5925 crc >>= 24; 5926 hash[crc >> 4] |= (1 << (15 - (crc & 0xf))); 5927 } 5928 } 5929 5930 if (np->flags & NIU_FLAGS_MCAST) 5931 niu_load_hash(np, hash); 5932 5933 niu_enable_rx_mac(np, 1); 5934 spin_unlock_irqrestore(&np->lock, flags); 5935} 5936 5937static int niu_set_mac_addr(struct net_device *dev, void *p) 5938{ 5939 struct niu *np = netdev_priv(dev); 5940 struct sockaddr *addr = p; 5941 unsigned long flags; 5942 5943 if (!is_valid_ether_addr(addr->sa_data)) 5944 return -EINVAL; 5945 5946 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 5947 5948 if (!netif_running(dev)) 5949 return 0; 5950 5951 spin_lock_irqsave(&np->lock, flags); 5952 niu_enable_rx_mac(np, 0); 5953 niu_set_primary_mac(np, dev->dev_addr); 5954 niu_enable_rx_mac(np, 1); 5955 spin_unlock_irqrestore(&np->lock, flags); 5956 5957 return 0; 5958} 5959 5960static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 5961{ 5962 return -EOPNOTSUPP; 5963} 5964 5965static void niu_netif_stop(struct niu *np) 5966{ 5967 np->dev->trans_start = jiffies; /* prevent tx timeout */ 5968 5969 niu_disable_napi(np); 5970 5971 netif_tx_disable(np->dev); 5972} 5973 5974static void niu_netif_start(struct niu *np) 5975{ 5976 /* NOTE: unconditional netif_wake_queue is only appropriate 5977 * so long as all callers are assured to have free tx slots 5978 * (such as after niu_init_hw). 5979 */ 5980 netif_tx_wake_all_queues(np->dev); 5981 5982 niu_enable_napi(np); 5983 5984 niu_enable_interrupts(np, 1); 5985} 5986 5987static void niu_reset_task(struct work_struct *work) 5988{ 5989 struct niu *np = container_of(work, struct niu, reset_task); 5990 unsigned long flags; 5991 int err; 5992 5993 spin_lock_irqsave(&np->lock, flags); 5994 if (!netif_running(np->dev)) { 5995 spin_unlock_irqrestore(&np->lock, flags); 5996 return; 5997 } 5998 5999 spin_unlock_irqrestore(&np->lock, flags); 6000 6001 del_timer_sync(&np->timer); 6002 6003 niu_netif_stop(np); 6004 6005 spin_lock_irqsave(&np->lock, flags); 6006 6007 niu_stop_hw(np); 6008 6009 err = niu_init_hw(np); 6010 if (!err) { 6011 np->timer.expires = jiffies + HZ; 6012 add_timer(&np->timer); 6013 niu_netif_start(np); 6014 } 6015 6016 spin_unlock_irqrestore(&np->lock, flags); 6017} 6018 6019static void niu_tx_timeout(struct net_device *dev) 6020{ 6021 struct niu *np = netdev_priv(dev); 6022 6023 dev_err(np->device, PFX "%s: Transmit timed out, resetting\n", 6024 dev->name); 6025 6026 schedule_work(&np->reset_task); 6027} 6028 6029static void niu_set_txd(struct tx_ring_info *rp, int index, 6030 u64 mapping, u64 len, u64 mark, 6031 u64 n_frags) 6032{ 6033 __le64 *desc = &rp->descr[index]; 6034 6035 *desc = cpu_to_le64(mark | 6036 (n_frags << TX_DESC_NUM_PTR_SHIFT) | 6037 (len << TX_DESC_TR_LEN_SHIFT) | 6038 (mapping & TX_DESC_SAD)); 6039} 6040 6041static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr, 6042 u64 pad_bytes, u64 len) 6043{ 6044 u16 eth_proto, eth_proto_inner; 6045 u64 csum_bits, l3off, ihl, ret; 6046 u8 ip_proto; 6047 int ipv6; 6048 6049 eth_proto = be16_to_cpu(ehdr->h_proto); 6050 eth_proto_inner = eth_proto; 6051 if (eth_proto == ETH_P_8021Q) { 6052 struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr; 6053 __be16 val = vp->h_vlan_encapsulated_proto; 6054 6055 eth_proto_inner = be16_to_cpu(val); 6056 } 6057 6058 ipv6 = ihl = 0; 6059 switch (skb->protocol) { 6060 case __constant_htons(ETH_P_IP): 6061 ip_proto = ip_hdr(skb)->protocol; 6062 ihl = ip_hdr(skb)->ihl; 6063 break; 6064 case __constant_htons(ETH_P_IPV6): 6065 ip_proto = ipv6_hdr(skb)->nexthdr; 6066 ihl = (40 >> 2); 6067 ipv6 = 1; 6068 break; 6069 default: 6070 ip_proto = ihl = 0; 6071 break; 6072 } 6073 6074 csum_bits = TXHDR_CSUM_NONE; 6075 if (skb->ip_summed == CHECKSUM_PARTIAL) { 6076 u64 start, stuff; 6077 6078 csum_bits = (ip_proto == IPPROTO_TCP ? 6079 TXHDR_CSUM_TCP : 6080 (ip_proto == IPPROTO_UDP ? 6081 TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP)); 6082 6083 start = skb_transport_offset(skb) - 6084 (pad_bytes + sizeof(struct tx_pkt_hdr)); 6085 stuff = start + skb->csum_offset; 6086 6087 csum_bits |= (start / 2) << TXHDR_L4START_SHIFT; 6088 csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT; 6089 } 6090 6091 l3off = skb_network_offset(skb) - 6092 (pad_bytes + sizeof(struct tx_pkt_hdr)); 6093 6094 ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) | 6095 (len << TXHDR_LEN_SHIFT) | 6096 ((l3off / 2) << TXHDR_L3START_SHIFT) | 6097 (ihl << TXHDR_IHL_SHIFT) | 6098 ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) | 6099 ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) | 6100 (ipv6 ? TXHDR_IP_VER : 0) | 6101 csum_bits); 6102 6103 return ret; 6104} 6105 6106static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev) 6107{ 6108 struct niu *np = netdev_priv(dev); 6109 unsigned long align, headroom; 6110 struct netdev_queue *txq; 6111 struct tx_ring_info *rp; 6112 struct tx_pkt_hdr *tp; 6113 unsigned int len, nfg; 6114 struct ethhdr *ehdr; 6115 int prod, i, tlen; 6116 u64 mapping, mrk; 6117 6118 i = skb_get_queue_mapping(skb); 6119 rp = &np->tx_rings[i]; 6120 txq = netdev_get_tx_queue(dev, i); 6121 6122 if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) { 6123 netif_tx_stop_queue(txq); 6124 dev_err(np->device, PFX "%s: BUG! Tx ring full when " 6125 "queue awake!\n", dev->name); 6126 rp->tx_errors++; 6127 return NETDEV_TX_BUSY; 6128 } 6129 6130 if (skb->len < ETH_ZLEN) { 6131 unsigned int pad_bytes = ETH_ZLEN - skb->len; 6132 6133 if (skb_pad(skb, pad_bytes)) 6134 goto out; 6135 skb_put(skb, pad_bytes); 6136 } 6137 6138 len = sizeof(struct tx_pkt_hdr) + 15; 6139 if (skb_headroom(skb) < len) { 6140 struct sk_buff *skb_new; 6141 6142 skb_new = skb_realloc_headroom(skb, len); 6143 if (!skb_new) { 6144 rp->tx_errors++; 6145 goto out_drop; 6146 } 6147 kfree_skb(skb); 6148 skb = skb_new; 6149 } else 6150 skb_orphan(skb); 6151 6152 align = ((unsigned long) skb->data & (16 - 1)); 6153 headroom = align + sizeof(struct tx_pkt_hdr); 6154 6155 ehdr = (struct ethhdr *) skb->data; 6156 tp = (struct tx_pkt_hdr *) skb_push(skb, headroom); 6157 6158 len = skb->len - sizeof(struct tx_pkt_hdr); 6159 tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len)); 6160 tp->resv = 0; 6161 6162 len = skb_headlen(skb); 6163 mapping = np->ops->map_single(np->device, skb->data, 6164 len, DMA_TO_DEVICE); 6165 6166 prod = rp->prod; 6167 6168 rp->tx_buffs[prod].skb = skb; 6169 rp->tx_buffs[prod].mapping = mapping; 6170 6171 mrk = TX_DESC_SOP; 6172 if (++rp->mark_counter == rp->mark_freq) { 6173 rp->mark_counter = 0; 6174 mrk |= TX_DESC_MARK; 6175 rp->mark_pending++; 6176 } 6177 6178 tlen = len; 6179 nfg = skb_shinfo(skb)->nr_frags; 6180 while (tlen > 0) { 6181 tlen -= MAX_TX_DESC_LEN; 6182 nfg++; 6183 } 6184 6185 while (len > 0) { 6186 unsigned int this_len = len; 6187 6188 if (this_len > MAX_TX_DESC_LEN) 6189 this_len = MAX_TX_DESC_LEN; 6190 6191 niu_set_txd(rp, prod, mapping, this_len, mrk, nfg); 6192 mrk = nfg = 0; 6193 6194 prod = NEXT_TX(rp, prod); 6195 mapping += this_len; 6196 len -= this_len; 6197 } 6198 6199 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 6200 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 6201 6202 len = frag->size; 6203 mapping = np->ops->map_page(np->device, frag->page, 6204 frag->page_offset, len, 6205 DMA_TO_DEVICE); 6206 6207 rp->tx_buffs[prod].skb = NULL; 6208 rp->tx_buffs[prod].mapping = mapping; 6209 6210 niu_set_txd(rp, prod, mapping, len, 0, 0); 6211 6212 prod = NEXT_TX(rp, prod); 6213 } 6214 6215 if (prod < rp->prod) 6216 rp->wrap_bit ^= TX_RING_KICK_WRAP; 6217 rp->prod = prod; 6218 6219 nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3)); 6220 6221 if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) { 6222 netif_tx_stop_queue(txq); 6223 if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)) 6224 netif_tx_wake_queue(txq); 6225 } 6226 6227 dev->trans_start = jiffies; 6228 6229out: 6230 return NETDEV_TX_OK; 6231 6232out_drop: 6233 rp->tx_errors++; 6234 kfree_skb(skb); 6235 goto out; 6236} 6237 6238static int niu_change_mtu(struct net_device *dev, int new_mtu) 6239{ 6240 struct niu *np = netdev_priv(dev); 6241 int err, orig_jumbo, new_jumbo; 6242 6243 if (new_mtu < 68 || new_mtu > NIU_MAX_MTU) 6244 return -EINVAL; 6245 6246 orig_jumbo = (dev->mtu > ETH_DATA_LEN); 6247 new_jumbo = (new_mtu > ETH_DATA_LEN); 6248 6249 dev->mtu = new_mtu; 6250 6251 if (!netif_running(dev) || 6252 (orig_jumbo == new_jumbo)) 6253 return 0; 6254 6255 niu_full_shutdown(np, dev); 6256 6257 niu_free_channels(np); 6258 6259 niu_enable_napi(np); 6260 6261 err = niu_alloc_channels(np); 6262 if (err) 6263 return err; 6264 6265 spin_lock_irq(&np->lock); 6266 6267 err = niu_init_hw(np); 6268 if (!err) { 6269 init_timer(&np->timer); 6270 np->timer.expires = jiffies + HZ; 6271 np->timer.data = (unsigned long) np; 6272 np->timer.function = niu_timer; 6273 6274 err = niu_enable_interrupts(np, 1); 6275 if (err) 6276 niu_stop_hw(np); 6277 } 6278 6279 spin_unlock_irq(&np->lock); 6280 6281 if (!err) { 6282 netif_tx_start_all_queues(dev); 6283 if (np->link_config.loopback_mode != LOOPBACK_DISABLED) 6284 netif_carrier_on(dev); 6285 6286 add_timer(&np->timer); 6287 } 6288 6289 return err; 6290} 6291 6292static void niu_get_drvinfo(struct net_device *dev, 6293 struct ethtool_drvinfo *info) 6294{ 6295 struct niu *np = netdev_priv(dev); 6296 struct niu_vpd *vpd = &np->vpd; 6297 6298 strcpy(info->driver, DRV_MODULE_NAME); 6299 strcpy(info->version, DRV_MODULE_VERSION); 6300 sprintf(info->fw_version, "%d.%d", 6301 vpd->fcode_major, vpd->fcode_minor); 6302 if (np->parent->plat_type != PLAT_TYPE_NIU) 6303 strcpy(info->bus_info, pci_name(np->pdev)); 6304} 6305 6306static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 6307{ 6308 struct niu *np = netdev_priv(dev); 6309 struct niu_link_config *lp; 6310 6311 lp = &np->link_config; 6312 6313 memset(cmd, 0, sizeof(*cmd)); 6314 cmd->phy_address = np->phy_addr; 6315 cmd->supported = lp->supported; 6316 cmd->advertising = lp->advertising; 6317 cmd->autoneg = lp->autoneg; 6318 cmd->speed = lp->active_speed; 6319 cmd->duplex = lp->active_duplex; 6320 6321 return 0; 6322} 6323 6324static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 6325{ 6326 return -EINVAL; 6327} 6328 6329static u32 niu_get_msglevel(struct net_device *dev) 6330{ 6331 struct niu *np = netdev_priv(dev); 6332 return np->msg_enable; 6333} 6334 6335static void niu_set_msglevel(struct net_device *dev, u32 value) 6336{ 6337 struct niu *np = netdev_priv(dev); 6338 np->msg_enable = value; 6339} 6340 6341static int niu_get_eeprom_len(struct net_device *dev) 6342{ 6343 struct niu *np = netdev_priv(dev); 6344 6345 return np->eeprom_len; 6346} 6347 6348static int niu_get_eeprom(struct net_device *dev, 6349 struct ethtool_eeprom *eeprom, u8 *data) 6350{ 6351 struct niu *np = netdev_priv(dev); 6352 u32 offset, len, val; 6353 6354 offset = eeprom->offset; 6355 len = eeprom->len; 6356 6357 if (offset + len < offset) 6358 return -EINVAL; 6359 if (offset >= np->eeprom_len) 6360 return -EINVAL; 6361 if (offset + len > np->eeprom_len) 6362 len = eeprom->len = np->eeprom_len - offset; 6363 6364 if (offset & 3) { 6365 u32 b_offset, b_count; 6366 6367 b_offset = offset & 3; 6368 b_count = 4 - b_offset; 6369 if (b_count > len) 6370 b_count = len; 6371 6372 val = nr64(ESPC_NCR((offset - b_offset) / 4)); 6373 memcpy(data, ((char *)&val) + b_offset, b_count); 6374 data += b_count; 6375 len -= b_count; 6376 offset += b_count; 6377 } 6378 while (len >= 4) { 6379 val = nr64(ESPC_NCR(offset / 4)); 6380 memcpy(data, &val, 4); 6381 data += 4; 6382 len -= 4; 6383 offset += 4; 6384 } 6385 if (len) { 6386 val = nr64(ESPC_NCR(offset / 4)); 6387 memcpy(data, &val, len); 6388 } 6389 return 0; 6390} 6391 6392static int niu_ethflow_to_class(int flow_type, u64 *class) 6393{ 6394 switch (flow_type) { 6395 case TCP_V4_FLOW: 6396 *class = CLASS_CODE_TCP_IPV4; 6397 break; 6398 case UDP_V4_FLOW: 6399 *class = CLASS_CODE_UDP_IPV4; 6400 break; 6401 case AH_ESP_V4_FLOW: 6402 *class = CLASS_CODE_AH_ESP_IPV4; 6403 break; 6404 case SCTP_V4_FLOW: 6405 *class = CLASS_CODE_SCTP_IPV4; 6406 break; 6407 case TCP_V6_FLOW: 6408 *class = CLASS_CODE_TCP_IPV6; 6409 break; 6410 case UDP_V6_FLOW: 6411 *class = CLASS_CODE_UDP_IPV6; 6412 break; 6413 case AH_ESP_V6_FLOW: 6414 *class = CLASS_CODE_AH_ESP_IPV6; 6415 break; 6416 case SCTP_V6_FLOW: 6417 *class = CLASS_CODE_SCTP_IPV6; 6418 break; 6419 default: 6420 return 0; 6421 } 6422 6423 return 1; 6424} 6425 6426static u64 niu_flowkey_to_ethflow(u64 flow_key) 6427{ 6428 u64 ethflow = 0; 6429 6430 if (flow_key & FLOW_KEY_PORT) 6431 ethflow |= RXH_DEV_PORT; 6432 if (flow_key & FLOW_KEY_L2DA) 6433 ethflow |= RXH_L2DA; 6434 if (flow_key & FLOW_KEY_VLAN) 6435 ethflow |= RXH_VLAN; 6436 if (flow_key & FLOW_KEY_IPSA) 6437 ethflow |= RXH_IP_SRC; 6438 if (flow_key & FLOW_KEY_IPDA) 6439 ethflow |= RXH_IP_DST; 6440 if (flow_key & FLOW_KEY_PROTO) 6441 ethflow |= RXH_L3_PROTO; 6442 if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT)) 6443 ethflow |= RXH_L4_B_0_1; 6444 if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT)) 6445 ethflow |= RXH_L4_B_2_3; 6446 6447 return ethflow; 6448 6449} 6450 6451static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key) 6452{ 6453 u64 key = 0; 6454 6455 if (ethflow & RXH_DEV_PORT) 6456 key |= FLOW_KEY_PORT; 6457 if (ethflow & RXH_L2DA) 6458 key |= FLOW_KEY_L2DA; 6459 if (ethflow & RXH_VLAN) 6460 key |= FLOW_KEY_VLAN; 6461 if (ethflow & RXH_IP_SRC) 6462 key |= FLOW_KEY_IPSA; 6463 if (ethflow & RXH_IP_DST) 6464 key |= FLOW_KEY_IPDA; 6465 if (ethflow & RXH_L3_PROTO) 6466 key |= FLOW_KEY_PROTO; 6467 if (ethflow & RXH_L4_B_0_1) 6468 key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT); 6469 if (ethflow & RXH_L4_B_2_3) 6470 key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT); 6471 6472 *flow_key = key; 6473 6474 return 1; 6475 6476} 6477 6478static int niu_get_hash_opts(struct net_device *dev, struct ethtool_rxnfc *cmd) 6479{ 6480 struct niu *np = netdev_priv(dev); 6481 u64 class; 6482 6483 cmd->data = 0; 6484 6485 if (!niu_ethflow_to_class(cmd->flow_type, &class)) 6486 return -EINVAL; 6487 6488 if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & 6489 TCAM_KEY_DISC) 6490 cmd->data = RXH_DISCARD; 6491 else 6492 6493 cmd->data = niu_flowkey_to_ethflow(np->parent->flow_key[class - 6494 CLASS_CODE_USER_PROG1]); 6495 return 0; 6496} 6497 6498static int niu_set_hash_opts(struct net_device *dev, struct ethtool_rxnfc *cmd) 6499{ 6500 struct niu *np = netdev_priv(dev); 6501 u64 class; 6502 u64 flow_key = 0; 6503 unsigned long flags; 6504 6505 if (!niu_ethflow_to_class(cmd->flow_type, &class)) 6506 return -EINVAL; 6507 6508 if (class < CLASS_CODE_USER_PROG1 || 6509 class > CLASS_CODE_SCTP_IPV6) 6510 return -EINVAL; 6511 6512 if (cmd->data & RXH_DISCARD) { 6513 niu_lock_parent(np, flags); 6514 flow_key = np->parent->tcam_key[class - 6515 CLASS_CODE_USER_PROG1]; 6516 flow_key |= TCAM_KEY_DISC; 6517 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key); 6518 np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key; 6519 niu_unlock_parent(np, flags); 6520 return 0; 6521 } else { 6522 /* Discard was set before, but is not set now */ 6523 if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & 6524 TCAM_KEY_DISC) { 6525 niu_lock_parent(np, flags); 6526 flow_key = np->parent->tcam_key[class - 6527 CLASS_CODE_USER_PROG1]; 6528 flow_key &= ~TCAM_KEY_DISC; 6529 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), 6530 flow_key); 6531 np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = 6532 flow_key; 6533 niu_unlock_parent(np, flags); 6534 } 6535 } 6536 6537 if (!niu_ethflow_to_flowkey(cmd->data, &flow_key)) 6538 return -EINVAL; 6539 6540 niu_lock_parent(np, flags); 6541 nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key); 6542 np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key; 6543 niu_unlock_parent(np, flags); 6544 6545 return 0; 6546} 6547 6548static const struct { 6549 const char string[ETH_GSTRING_LEN]; 6550} niu_xmac_stat_keys[] = { 6551 { "tx_frames" }, 6552 { "tx_bytes" }, 6553 { "tx_fifo_errors" }, 6554 { "tx_overflow_errors" }, 6555 { "tx_max_pkt_size_errors" }, 6556 { "tx_underflow_errors" }, 6557 { "rx_local_faults" }, 6558 { "rx_remote_faults" }, 6559 { "rx_link_faults" }, 6560 { "rx_align_errors" }, 6561 { "rx_frags" }, 6562 { "rx_mcasts" }, 6563 { "rx_bcasts" }, 6564 { "rx_hist_cnt1" }, 6565 { "rx_hist_cnt2" }, 6566 { "rx_hist_cnt3" }, 6567 { "rx_hist_cnt4" }, 6568 { "rx_hist_cnt5" }, 6569 { "rx_hist_cnt6" }, 6570 { "rx_hist_cnt7" }, 6571 { "rx_octets" }, 6572 { "rx_code_violations" }, 6573 { "rx_len_errors" }, 6574 { "rx_crc_errors" }, 6575 { "rx_underflows" }, 6576 { "rx_overflows" }, 6577 { "pause_off_state" }, 6578 { "pause_on_state" }, 6579 { "pause_received" }, 6580}; 6581 6582#define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys) 6583 6584static const struct { 6585 const char string[ETH_GSTRING_LEN]; 6586} niu_bmac_stat_keys[] = { 6587 { "tx_underflow_errors" }, 6588 { "tx_max_pkt_size_errors" }, 6589 { "tx_bytes" }, 6590 { "tx_frames" }, 6591 { "rx_overflows" }, 6592 { "rx_frames" }, 6593 { "rx_align_errors" }, 6594 { "rx_crc_errors" }, 6595 { "rx_len_errors" }, 6596 { "pause_off_state" }, 6597 { "pause_on_state" }, 6598 { "pause_received" }, 6599}; 6600 6601#define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys) 6602 6603static const struct { 6604 const char string[ETH_GSTRING_LEN]; 6605} niu_rxchan_stat_keys[] = { 6606 { "rx_channel" }, 6607 { "rx_packets" }, 6608 { "rx_bytes" }, 6609 { "rx_dropped" }, 6610 { "rx_errors" }, 6611}; 6612 6613#define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys) 6614 6615static const struct { 6616 const char string[ETH_GSTRING_LEN]; 6617} niu_txchan_stat_keys[] = { 6618 { "tx_channel" }, 6619 { "tx_packets" }, 6620 { "tx_bytes" }, 6621 { "tx_errors" }, 6622}; 6623 6624#define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys) 6625 6626static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data) 6627{ 6628 struct niu *np = netdev_priv(dev); 6629 int i; 6630 6631 if (stringset != ETH_SS_STATS) 6632 return; 6633 6634 if (np->flags & NIU_FLAGS_XMAC) { 6635 memcpy(data, niu_xmac_stat_keys, 6636 sizeof(niu_xmac_stat_keys)); 6637 data += sizeof(niu_xmac_stat_keys); 6638 } else { 6639 memcpy(data, niu_bmac_stat_keys, 6640 sizeof(niu_bmac_stat_keys)); 6641 data += sizeof(niu_bmac_stat_keys); 6642 } 6643 for (i = 0; i < np->num_rx_rings; i++) { 6644 memcpy(data, niu_rxchan_stat_keys, 6645 sizeof(niu_rxchan_stat_keys)); 6646 data += sizeof(niu_rxchan_stat_keys); 6647 } 6648 for (i = 0; i < np->num_tx_rings; i++) { 6649 memcpy(data, niu_txchan_stat_keys, 6650 sizeof(niu_txchan_stat_keys)); 6651 data += sizeof(niu_txchan_stat_keys); 6652 } 6653} 6654 6655static int niu_get_stats_count(struct net_device *dev) 6656{ 6657 struct niu *np = netdev_priv(dev); 6658 6659 return ((np->flags & NIU_FLAGS_XMAC ? 6660 NUM_XMAC_STAT_KEYS : 6661 NUM_BMAC_STAT_KEYS) + 6662 (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) + 6663 (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS)); 6664} 6665 6666static void niu_get_ethtool_stats(struct net_device *dev, 6667 struct ethtool_stats *stats, u64 *data) 6668{ 6669 struct niu *np = netdev_priv(dev); 6670 int i; 6671 6672 niu_sync_mac_stats(np); 6673 if (np->flags & NIU_FLAGS_XMAC) { 6674 memcpy(data, &np->mac_stats.xmac, 6675 sizeof(struct niu_xmac_stats)); 6676 data += (sizeof(struct niu_xmac_stats) / sizeof(u64)); 6677 } else { 6678 memcpy(data, &np->mac_stats.bmac, 6679 sizeof(struct niu_bmac_stats)); 6680 data += (sizeof(struct niu_bmac_stats) / sizeof(u64)); 6681 } 6682 for (i = 0; i < np->num_rx_rings; i++) { 6683 struct rx_ring_info *rp = &np->rx_rings[i]; 6684 6685 data[0] = rp->rx_channel; 6686 data[1] = rp->rx_packets; 6687 data[2] = rp->rx_bytes; 6688 data[3] = rp->rx_dropped; 6689 data[4] = rp->rx_errors; 6690 data += 5; 6691 } 6692 for (i = 0; i < np->num_tx_rings; i++) { 6693 struct tx_ring_info *rp = &np->tx_rings[i]; 6694 6695 data[0] = rp->tx_channel; 6696 data[1] = rp->tx_packets; 6697 data[2] = rp->tx_bytes; 6698 data[3] = rp->tx_errors; 6699 data += 4; 6700 } 6701} 6702 6703static u64 niu_led_state_save(struct niu *np) 6704{ 6705 if (np->flags & NIU_FLAGS_XMAC) 6706 return nr64_mac(XMAC_CONFIG); 6707 else 6708 return nr64_mac(BMAC_XIF_CONFIG); 6709} 6710 6711static void niu_led_state_restore(struct niu *np, u64 val) 6712{ 6713 if (np->flags & NIU_FLAGS_XMAC) 6714 nw64_mac(XMAC_CONFIG, val); 6715 else 6716 nw64_mac(BMAC_XIF_CONFIG, val); 6717} 6718 6719static void niu_force_led(struct niu *np, int on) 6720{ 6721 u64 val, reg, bit; 6722 6723 if (np->flags & NIU_FLAGS_XMAC) { 6724 reg = XMAC_CONFIG; 6725 bit = XMAC_CONFIG_FORCE_LED_ON; 6726 } else { 6727 reg = BMAC_XIF_CONFIG; 6728 bit = BMAC_XIF_CONFIG_LINK_LED; 6729 } 6730 6731 val = nr64_mac(reg); 6732 if (on) 6733 val |= bit; 6734 else 6735 val &= ~bit; 6736 nw64_mac(reg, val); 6737} 6738 6739static int niu_phys_id(struct net_device *dev, u32 data) 6740{ 6741 struct niu *np = netdev_priv(dev); 6742 u64 orig_led_state; 6743 int i; 6744 6745 if (!netif_running(dev)) 6746 return -EAGAIN; 6747 6748 if (data == 0) 6749 data = 2; 6750 6751 orig_led_state = niu_led_state_save(np); 6752 for (i = 0; i < (data * 2); i++) { 6753 int on = ((i % 2) == 0); 6754 6755 niu_force_led(np, on); 6756 6757 if (msleep_interruptible(500)) 6758 break; 6759 } 6760 niu_led_state_restore(np, orig_led_state); 6761 6762 return 0; 6763} 6764 6765static const struct ethtool_ops niu_ethtool_ops = { 6766 .get_drvinfo = niu_get_drvinfo, 6767 .get_link = ethtool_op_get_link, 6768 .get_msglevel = niu_get_msglevel, 6769 .set_msglevel = niu_set_msglevel, 6770 .get_eeprom_len = niu_get_eeprom_len, 6771 .get_eeprom = niu_get_eeprom, 6772 .get_settings = niu_get_settings, 6773 .set_settings = niu_set_settings, 6774 .get_strings = niu_get_strings, 6775 .get_stats_count = niu_get_stats_count, 6776 .get_ethtool_stats = niu_get_ethtool_stats, 6777 .phys_id = niu_phys_id, 6778 .get_rxhash = niu_get_hash_opts, 6779 .set_rxhash = niu_set_hash_opts, 6780}; 6781 6782static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, 6783 int ldg, int ldn) 6784{ 6785 if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) 6786 return -EINVAL; 6787 if (ldn < 0 || ldn > LDN_MAX) 6788 return -EINVAL; 6789 6790 parent->ldg_map[ldn] = ldg; 6791 6792 if (np->parent->plat_type == PLAT_TYPE_NIU) { 6793 /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by 6794 * the firmware, and we're not supposed to change them. 6795 * Validate the mapping, because if it's wrong we probably 6796 * won't get any interrupts and that's painful to debug. 6797 */ 6798 if (nr64(LDG_NUM(ldn)) != ldg) { 6799 dev_err(np->device, PFX "Port %u, mis-matched " 6800 "LDG assignment " 6801 "for ldn %d, should be %d is %llu\n", 6802 np->port, ldn, ldg, 6803 (unsigned long long) nr64(LDG_NUM(ldn))); 6804 return -EINVAL; 6805 } 6806 } else 6807 nw64(LDG_NUM(ldn), ldg); 6808 6809 return 0; 6810} 6811 6812static int niu_set_ldg_timer_res(struct niu *np, int res) 6813{ 6814 if (res < 0 || res > LDG_TIMER_RES_VAL) 6815 return -EINVAL; 6816 6817 6818 nw64(LDG_TIMER_RES, res); 6819 6820 return 0; 6821} 6822 6823static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector) 6824{ 6825 if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) || 6826 (func < 0 || func > 3) || 6827 (vector < 0 || vector > 0x1f)) 6828 return -EINVAL; 6829 6830 nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector); 6831 6832 return 0; 6833} 6834 6835static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr) 6836{ 6837 u64 frame, frame_base = (ESPC_PIO_STAT_READ_START | 6838 (addr << ESPC_PIO_STAT_ADDR_SHIFT)); 6839 int limit; 6840 6841 if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT)) 6842 return -EINVAL; 6843 6844 frame = frame_base; 6845 nw64(ESPC_PIO_STAT, frame); 6846 limit = 64; 6847 do { 6848 udelay(5); 6849 frame = nr64(ESPC_PIO_STAT); 6850 if (frame & ESPC_PIO_STAT_READ_END) 6851 break; 6852 } while (limit--); 6853 if (!(frame & ESPC_PIO_STAT_READ_END)) { 6854 dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n", 6855 (unsigned long long) frame); 6856 return -ENODEV; 6857 } 6858 6859 frame = frame_base; 6860 nw64(ESPC_PIO_STAT, frame); 6861 limit = 64; 6862 do { 6863 udelay(5); 6864 frame = nr64(ESPC_PIO_STAT); 6865 if (frame & ESPC_PIO_STAT_READ_END) 6866 break; 6867 } while (limit--); 6868 if (!(frame & ESPC_PIO_STAT_READ_END)) { 6869 dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n", 6870 (unsigned long long) frame); 6871 return -ENODEV; 6872 } 6873 6874 frame = nr64(ESPC_PIO_STAT); 6875 return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT; 6876} 6877 6878static int __devinit niu_pci_eeprom_read16(struct niu *np, u32 off) 6879{ 6880 int err = niu_pci_eeprom_read(np, off); 6881 u16 val; 6882 6883 if (err < 0) 6884 return err; 6885 val = (err << 8); 6886 err = niu_pci_eeprom_read(np, off + 1); 6887 if (err < 0) 6888 return err; 6889 val |= (err & 0xff); 6890 6891 return val; 6892} 6893 6894static int __devinit niu_pci_eeprom_read16_swp(struct niu *np, u32 off) 6895{ 6896 int err = niu_pci_eeprom_read(np, off); 6897 u16 val; 6898 6899 if (err < 0) 6900 return err; 6901 6902 val = (err & 0xff); 6903 err = niu_pci_eeprom_read(np, off + 1); 6904 if (err < 0) 6905 return err; 6906 6907 val |= (err & 0xff) << 8; 6908 6909 return val; 6910} 6911 6912static int __devinit niu_pci_vpd_get_propname(struct niu *np, 6913 u32 off, 6914 char *namebuf, 6915 int namebuf_len) 6916{ 6917 int i; 6918 6919 for (i = 0; i < namebuf_len; i++) { 6920 int err = niu_pci_eeprom_read(np, off + i); 6921 if (err < 0) 6922 return err; 6923 *namebuf++ = err; 6924 if (!err) 6925 break; 6926 } 6927 if (i >= namebuf_len) 6928 return -EINVAL; 6929 6930 return i + 1; 6931} 6932 6933static void __devinit niu_vpd_parse_version(struct niu *np) 6934{ 6935 struct niu_vpd *vpd = &np->vpd; 6936 int len = strlen(vpd->version) + 1; 6937 const char *s = vpd->version; 6938 int i; 6939 6940 for (i = 0; i < len - 5; i++) { 6941 if (!strncmp(s + i, "FCode ", 5)) 6942 break; 6943 } 6944 if (i >= len - 5) 6945 return; 6946 6947 s += i + 5; 6948 sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor); 6949 6950 niudbg(PROBE, "VPD_SCAN: FCODE major(%d) minor(%d)\n", 6951 vpd->fcode_major, vpd->fcode_minor); 6952 if (vpd->fcode_major > NIU_VPD_MIN_MAJOR || 6953 (vpd->fcode_major == NIU_VPD_MIN_MAJOR && 6954 vpd->fcode_minor >= NIU_VPD_MIN_MINOR)) 6955 np->flags |= NIU_FLAGS_VPD_VALID; 6956} 6957 6958/* ESPC_PIO_EN_ENABLE must be set */ 6959static int __devinit niu_pci_vpd_scan_props(struct niu *np, 6960 u32 start, u32 end) 6961{ 6962 unsigned int found_mask = 0; 6963#define FOUND_MASK_MODEL 0x00000001 6964#define FOUND_MASK_BMODEL 0x00000002 6965#define FOUND_MASK_VERS 0x00000004 6966#define FOUND_MASK_MAC 0x00000008 6967#define FOUND_MASK_NMAC 0x00000010 6968#define FOUND_MASK_PHY 0x00000020 6969#define FOUND_MASK_ALL 0x0000003f 6970 6971 niudbg(PROBE, "VPD_SCAN: start[%x] end[%x]\n", 6972 start, end); 6973 while (start < end) { 6974 int len, err, instance, type, prop_len; 6975 char namebuf[64]; 6976 u8 *prop_buf; 6977 int max_len; 6978 6979 if (found_mask == FOUND_MASK_ALL) { 6980 niu_vpd_parse_version(np); 6981 return 1; 6982 } 6983 6984 err = niu_pci_eeprom_read(np, start + 2); 6985 if (err < 0) 6986 return err; 6987 len = err; 6988 start += 3; 6989 6990 instance = niu_pci_eeprom_read(np, start); 6991 type = niu_pci_eeprom_read(np, start + 3); 6992 prop_len = niu_pci_eeprom_read(np, start + 4); 6993 err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64); 6994 if (err < 0) 6995 return err; 6996 6997 prop_buf = NULL; 6998 max_len = 0; 6999 if (!strcmp(namebuf, "model")) { 7000 prop_buf = np->vpd.model; 7001 max_len = NIU_VPD_MODEL_MAX; 7002 found_mask |= FOUND_MASK_MODEL; 7003 } else if (!strcmp(namebuf, "board-model")) { 7004 prop_buf = np->vpd.board_model; 7005 max_len = NIU_VPD_BD_MODEL_MAX; 7006 found_mask |= FOUND_MASK_BMODEL; 7007 } else if (!strcmp(namebuf, "version")) { 7008 prop_buf = np->vpd.version; 7009 max_len = NIU_VPD_VERSION_MAX; 7010 found_mask |= FOUND_MASK_VERS; 7011 } else if (!strcmp(namebuf, "local-mac-address")) { 7012 prop_buf = np->vpd.local_mac; 7013 max_len = ETH_ALEN; 7014 found_mask |= FOUND_MASK_MAC; 7015 } else if (!strcmp(namebuf, "num-mac-addresses")) { 7016 prop_buf = &np->vpd.mac_num; 7017 max_len = 1; 7018 found_mask |= FOUND_MASK_NMAC; 7019 } else if (!strcmp(namebuf, "phy-type")) { 7020 prop_buf = np->vpd.phy_type; 7021 max_len = NIU_VPD_PHY_TYPE_MAX; 7022 found_mask |= FOUND_MASK_PHY; 7023 } 7024 7025 if (max_len && prop_len > max_len) { 7026 dev_err(np->device, PFX "Property '%s' length (%d) is " 7027 "too long.\n", namebuf, prop_len); 7028 return -EINVAL; 7029 } 7030 7031 if (prop_buf) { 7032 u32 off = start + 5 + err; 7033 int i; 7034 7035 niudbg(PROBE, "VPD_SCAN: Reading in property [%s] " 7036 "len[%d]\n", namebuf, prop_len); 7037 for (i = 0; i < prop_len; i++) 7038 *prop_buf++ = niu_pci_eeprom_read(np, off + i); 7039 } 7040 7041 start += len; 7042 } 7043 7044 return 0; 7045} 7046 7047/* ESPC_PIO_EN_ENABLE must be set */ 7048static void __devinit niu_pci_vpd_fetch(struct niu *np, u32 start) 7049{ 7050 u32 offset; 7051 int err; 7052 7053 err = niu_pci_eeprom_read16_swp(np, start + 1); 7054 if (err < 0) 7055 return; 7056 7057 offset = err + 3; 7058 7059 while (start + offset < ESPC_EEPROM_SIZE) { 7060 u32 here = start + offset; 7061 u32 end; 7062 7063 err = niu_pci_eeprom_read(np, here); 7064 if (err != 0x90) 7065 return; 7066 7067 err = niu_pci_eeprom_read16_swp(np, here + 1); 7068 if (err < 0) 7069 return; 7070 7071 here = start + offset + 3; 7072 end = start + offset + err; 7073 7074 offset += err; 7075 7076 err = niu_pci_vpd_scan_props(np, here, end); 7077 if (err < 0 || err == 1) 7078 return; 7079 } 7080} 7081 7082/* ESPC_PIO_EN_ENABLE must be set */ 7083static u32 __devinit niu_pci_vpd_offset(struct niu *np) 7084{ 7085 u32 start = 0, end = ESPC_EEPROM_SIZE, ret; 7086 int err; 7087 7088 while (start < end) { 7089 ret = start; 7090 7091 /* ROM header signature? */ 7092 err = niu_pci_eeprom_read16(np, start + 0); 7093 if (err != 0x55aa) 7094 return 0; 7095 7096 /* Apply offset to PCI data structure. */ 7097 err = niu_pci_eeprom_read16(np, start + 23); 7098 if (err < 0) 7099 return 0; 7100 start += err; 7101 7102 /* Check for "PCIR" signature. */ 7103 err = niu_pci_eeprom_read16(np, start + 0); 7104 if (err != 0x5043) 7105 return 0; 7106 err = niu_pci_eeprom_read16(np, start + 2); 7107 if (err != 0x4952) 7108 return 0; 7109 7110 /* Check for OBP image type. */ 7111 err = niu_pci_eeprom_read(np, start + 20); 7112 if (err < 0) 7113 return 0; 7114 if (err != 0x01) { 7115 err = niu_pci_eeprom_read(np, ret + 2); 7116 if (err < 0) 7117 return 0; 7118 7119 start = ret + (err * 512); 7120 continue; 7121 } 7122 7123 err = niu_pci_eeprom_read16_swp(np, start + 8); 7124 if (err < 0) 7125 return err; 7126 ret += err; 7127 7128 err = niu_pci_eeprom_read(np, ret + 0); 7129 if (err != 0x82) 7130 return 0; 7131 7132 return ret; 7133 } 7134 7135 return 0; 7136} 7137 7138static int __devinit niu_phy_type_prop_decode(struct niu *np, 7139 const char *phy_prop) 7140{ 7141 if (!strcmp(phy_prop, "mif")) { 7142 /* 1G copper, MII */ 7143 np->flags &= ~(NIU_FLAGS_FIBER | 7144 NIU_FLAGS_10G); 7145 np->mac_xcvr = MAC_XCVR_MII; 7146 } else if (!strcmp(phy_prop, "xgf")) { 7147 /* 10G fiber, XPCS */ 7148 np->flags |= (NIU_FLAGS_10G | 7149 NIU_FLAGS_FIBER); 7150 np->mac_xcvr = MAC_XCVR_XPCS; 7151 } else if (!strcmp(phy_prop, "pcs")) { 7152 /* 1G fiber, PCS */ 7153 np->flags &= ~NIU_FLAGS_10G; 7154 np->flags |= NIU_FLAGS_FIBER; 7155 np->mac_xcvr = MAC_XCVR_PCS; 7156 } else if (!strcmp(phy_prop, "xgc")) { 7157 /* 10G copper, XPCS */ 7158 np->flags |= NIU_FLAGS_10G; 7159 np->flags &= ~NIU_FLAGS_FIBER; 7160 np->mac_xcvr = MAC_XCVR_XPCS; 7161 } else { 7162 return -EINVAL; 7163 } 7164 return 0; 7165} 7166 7167static int niu_pci_vpd_get_nports(struct niu *np) 7168{ 7169 int ports = 0; 7170 7171 if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) || 7172 (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) || 7173 (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) || 7174 (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) || 7175 (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) { 7176 ports = 4; 7177 } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) || 7178 (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) || 7179 (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) || 7180 (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) { 7181 ports = 2; 7182 } 7183 7184 return ports; 7185} 7186 7187static void __devinit niu_pci_vpd_validate(struct niu *np) 7188{ 7189 struct net_device *dev = np->dev; 7190 struct niu_vpd *vpd = &np->vpd; 7191 u8 val8; 7192 7193 if (!is_valid_ether_addr(&vpd->local_mac[0])) { 7194 dev_err(np->device, PFX "VPD MAC invalid, " 7195 "falling back to SPROM.\n"); 7196 7197 np->flags &= ~NIU_FLAGS_VPD_VALID; 7198 return; 7199 } 7200 7201 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || 7202 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { 7203 np->flags |= NIU_FLAGS_10G; 7204 np->flags &= ~NIU_FLAGS_FIBER; 7205 np->flags |= NIU_FLAGS_XCVR_SERDES; 7206 np->mac_xcvr = MAC_XCVR_PCS; 7207 if (np->port > 1) { 7208 np->flags |= NIU_FLAGS_FIBER; 7209 np->flags &= ~NIU_FLAGS_10G; 7210 } 7211 if (np->flags & NIU_FLAGS_10G) 7212 np->mac_xcvr = MAC_XCVR_XPCS; 7213 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { 7214 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | 7215 NIU_FLAGS_HOTPLUG_PHY); 7216 } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { 7217 dev_err(np->device, PFX "Illegal phy string [%s].\n", 7218 np->vpd.phy_type); 7219 dev_err(np->device, PFX "Falling back to SPROM.\n"); 7220 np->flags &= ~NIU_FLAGS_VPD_VALID; 7221 return; 7222 } 7223 7224 memcpy(dev->perm_addr, vpd->local_mac, ETH_ALEN); 7225 7226 val8 = dev->perm_addr[5]; 7227 dev->perm_addr[5] += np->port; 7228 if (dev->perm_addr[5] < val8) 7229 dev->perm_addr[4]++; 7230 7231 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); 7232} 7233 7234static int __devinit niu_pci_probe_sprom(struct niu *np) 7235{ 7236 struct net_device *dev = np->dev; 7237 int len, i; 7238 u64 val, sum; 7239 u8 val8; 7240 7241 val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ); 7242 val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT; 7243 len = val / 4; 7244 7245 np->eeprom_len = len; 7246 7247 niudbg(PROBE, "SPROM: Image size %llu\n", (unsigned long long) val); 7248 7249 sum = 0; 7250 for (i = 0; i < len; i++) { 7251 val = nr64(ESPC_NCR(i)); 7252 sum += (val >> 0) & 0xff; 7253 sum += (val >> 8) & 0xff; 7254 sum += (val >> 16) & 0xff; 7255 sum += (val >> 24) & 0xff; 7256 } 7257 niudbg(PROBE, "SPROM: Checksum %x\n", (int)(sum & 0xff)); 7258 if ((sum & 0xff) != 0xab) { 7259 dev_err(np->device, PFX "Bad SPROM checksum " 7260 "(%x, should be 0xab)\n", (int) (sum & 0xff)); 7261 return -EINVAL; 7262 } 7263 7264 val = nr64(ESPC_PHY_TYPE); 7265 switch (np->port) { 7266 case 0: 7267 val8 = (val & ESPC_PHY_TYPE_PORT0) >> 7268 ESPC_PHY_TYPE_PORT0_SHIFT; 7269 break; 7270 case 1: 7271 val8 = (val & ESPC_PHY_TYPE_PORT1) >> 7272 ESPC_PHY_TYPE_PORT1_SHIFT; 7273 break; 7274 case 2: 7275 val8 = (val & ESPC_PHY_TYPE_PORT2) >> 7276 ESPC_PHY_TYPE_PORT2_SHIFT; 7277 break; 7278 case 3: 7279 val8 = (val & ESPC_PHY_TYPE_PORT3) >> 7280 ESPC_PHY_TYPE_PORT3_SHIFT; 7281 break; 7282 default: 7283 dev_err(np->device, PFX "Bogus port number %u\n", 7284 np->port); 7285 return -EINVAL; 7286 } 7287 niudbg(PROBE, "SPROM: PHY type %x\n", val8); 7288 7289 switch (val8) { 7290 case ESPC_PHY_TYPE_1G_COPPER: 7291 /* 1G copper, MII */ 7292 np->flags &= ~(NIU_FLAGS_FIBER | 7293 NIU_FLAGS_10G); 7294 np->mac_xcvr = MAC_XCVR_MII; 7295 break; 7296 7297 case ESPC_PHY_TYPE_1G_FIBER: 7298 /* 1G fiber, PCS */ 7299 np->flags &= ~NIU_FLAGS_10G; 7300 np->flags |= NIU_FLAGS_FIBER; 7301 np->mac_xcvr = MAC_XCVR_PCS; 7302 break; 7303 7304 case ESPC_PHY_TYPE_10G_COPPER: 7305 /* 10G copper, XPCS */ 7306 np->flags |= NIU_FLAGS_10G; 7307 np->flags &= ~NIU_FLAGS_FIBER; 7308 np->mac_xcvr = MAC_XCVR_XPCS; 7309 break; 7310 7311 case ESPC_PHY_TYPE_10G_FIBER: 7312 /* 10G fiber, XPCS */ 7313 np->flags |= (NIU_FLAGS_10G | 7314 NIU_FLAGS_FIBER); 7315 np->mac_xcvr = MAC_XCVR_XPCS; 7316 break; 7317 7318 default: 7319 dev_err(np->device, PFX "Bogus SPROM phy type %u\n", val8); 7320 return -EINVAL; 7321 } 7322 7323 val = nr64(ESPC_MAC_ADDR0); 7324 niudbg(PROBE, "SPROM: MAC_ADDR0[%08llx]\n", 7325 (unsigned long long) val); 7326 dev->perm_addr[0] = (val >> 0) & 0xff; 7327 dev->perm_addr[1] = (val >> 8) & 0xff; 7328 dev->perm_addr[2] = (val >> 16) & 0xff; 7329 dev->perm_addr[3] = (val >> 24) & 0xff; 7330 7331 val = nr64(ESPC_MAC_ADDR1); 7332 niudbg(PROBE, "SPROM: MAC_ADDR1[%08llx]\n", 7333 (unsigned long long) val); 7334 dev->perm_addr[4] = (val >> 0) & 0xff; 7335 dev->perm_addr[5] = (val >> 8) & 0xff; 7336 7337 if (!is_valid_ether_addr(&dev->perm_addr[0])) { 7338 dev_err(np->device, PFX "SPROM MAC address invalid\n"); 7339 dev_err(np->device, PFX "[ \n"); 7340 for (i = 0; i < 6; i++) 7341 printk("%02x ", dev->perm_addr[i]); 7342 printk("]\n"); 7343 return -EINVAL; 7344 } 7345 7346 val8 = dev->perm_addr[5]; 7347 dev->perm_addr[5] += np->port; 7348 if (dev->perm_addr[5] < val8) 7349 dev->perm_addr[4]++; 7350 7351 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); 7352 7353 val = nr64(ESPC_MOD_STR_LEN); 7354 niudbg(PROBE, "SPROM: MOD_STR_LEN[%llu]\n", 7355 (unsigned long long) val); 7356 if (val >= 8 * 4) 7357 return -EINVAL; 7358 7359 for (i = 0; i < val; i += 4) { 7360 u64 tmp = nr64(ESPC_NCR(5 + (i / 4))); 7361 7362 np->vpd.model[i + 3] = (tmp >> 0) & 0xff; 7363 np->vpd.model[i + 2] = (tmp >> 8) & 0xff; 7364 np->vpd.model[i + 1] = (tmp >> 16) & 0xff; 7365 np->vpd.model[i + 0] = (tmp >> 24) & 0xff; 7366 } 7367 np->vpd.model[val] = '\0'; 7368 7369 val = nr64(ESPC_BD_MOD_STR_LEN); 7370 niudbg(PROBE, "SPROM: BD_MOD_STR_LEN[%llu]\n", 7371 (unsigned long long) val); 7372 if (val >= 4 * 4) 7373 return -EINVAL; 7374 7375 for (i = 0; i < val; i += 4) { 7376 u64 tmp = nr64(ESPC_NCR(14 + (i / 4))); 7377 7378 np->vpd.board_model[i + 3] = (tmp >> 0) & 0xff; 7379 np->vpd.board_model[i + 2] = (tmp >> 8) & 0xff; 7380 np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff; 7381 np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff; 7382 } 7383 np->vpd.board_model[val] = '\0'; 7384 7385 np->vpd.mac_num = 7386 nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL; 7387 niudbg(PROBE, "SPROM: NUM_PORTS_MACS[%d]\n", 7388 np->vpd.mac_num); 7389 7390 return 0; 7391} 7392 7393static int __devinit niu_get_and_validate_port(struct niu *np) 7394{ 7395 struct niu_parent *parent = np->parent; 7396 7397 if (np->port <= 1) 7398 np->flags |= NIU_FLAGS_XMAC; 7399 7400 if (!parent->num_ports) { 7401 if (parent->plat_type == PLAT_TYPE_NIU) { 7402 parent->num_ports = 2; 7403 } else { 7404 parent->num_ports = niu_pci_vpd_get_nports(np); 7405 if (!parent->num_ports) { 7406 /* Fall back to SPROM as last resort. 7407 * This will fail on most cards. 7408 */ 7409 parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) & 7410 ESPC_NUM_PORTS_MACS_VAL; 7411 7412 /* All of the current probing methods fail on 7413 * Maramba on-board parts. 7414 */ 7415 if (!parent->num_ports) 7416 parent->num_ports = 4; 7417 } 7418 } 7419 } 7420 7421 niudbg(PROBE, "niu_get_and_validate_port: port[%d] num_ports[%d]\n", 7422 np->port, parent->num_ports); 7423 if (np->port >= parent->num_ports) 7424 return -ENODEV; 7425 7426 return 0; 7427} 7428 7429static int __devinit phy_record(struct niu_parent *parent, 7430 struct phy_probe_info *p, 7431 int dev_id_1, int dev_id_2, u8 phy_port, 7432 int type) 7433{ 7434 u32 id = (dev_id_1 << 16) | dev_id_2; 7435 u8 idx; 7436 7437 if (dev_id_1 < 0 || dev_id_2 < 0) 7438 return 0; 7439 if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) { 7440 if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) && 7441 ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) && 7442 ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706)) 7443 return 0; 7444 } else { 7445 if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R) 7446 return 0; 7447 } 7448 7449 pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n", 7450 parent->index, id, 7451 (type == PHY_TYPE_PMA_PMD ? 7452 "PMA/PMD" : 7453 (type == PHY_TYPE_PCS ? 7454 "PCS" : "MII")), 7455 phy_port); 7456 7457 if (p->cur[type] >= NIU_MAX_PORTS) { 7458 printk(KERN_ERR PFX "Too many PHY ports.\n"); 7459 return -EINVAL; 7460 } 7461 idx = p->cur[type]; 7462 p->phy_id[type][idx] = id; 7463 p->phy_port[type][idx] = phy_port; 7464 p->cur[type] = idx + 1; 7465 return 0; 7466} 7467 7468static int __devinit port_has_10g(struct phy_probe_info *p, int port) 7469{ 7470 int i; 7471 7472 for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) { 7473 if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port) 7474 return 1; 7475 } 7476 for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) { 7477 if (p->phy_port[PHY_TYPE_PCS][i] == port) 7478 return 1; 7479 } 7480 7481 return 0; 7482} 7483 7484static int __devinit count_10g_ports(struct phy_probe_info *p, int *lowest) 7485{ 7486 int port, cnt; 7487 7488 cnt = 0; 7489 *lowest = 32; 7490 for (port = 8; port < 32; port++) { 7491 if (port_has_10g(p, port)) { 7492 if (!cnt) 7493 *lowest = port; 7494 cnt++; 7495 } 7496 } 7497 7498 return cnt; 7499} 7500 7501static int __devinit count_1g_ports(struct phy_probe_info *p, int *lowest) 7502{ 7503 *lowest = 32; 7504 if (p->cur[PHY_TYPE_MII]) 7505 *lowest = p->phy_port[PHY_TYPE_MII][0]; 7506 7507 return p->cur[PHY_TYPE_MII]; 7508} 7509 7510static void __devinit niu_n2_divide_channels(struct niu_parent *parent) 7511{ 7512 int num_ports = parent->num_ports; 7513 int i; 7514 7515 for (i = 0; i < num_ports; i++) { 7516 parent->rxchan_per_port[i] = (16 / num_ports); 7517 parent->txchan_per_port[i] = (16 / num_ports); 7518 7519 pr_info(PFX "niu%d: Port %u [%u RX chans] " 7520 "[%u TX chans]\n", 7521 parent->index, i, 7522 parent->rxchan_per_port[i], 7523 parent->txchan_per_port[i]); 7524 } 7525} 7526 7527static void __devinit niu_divide_channels(struct niu_parent *parent, 7528 int num_10g, int num_1g) 7529{ 7530 int num_ports = parent->num_ports; 7531 int rx_chans_per_10g, rx_chans_per_1g; 7532 int tx_chans_per_10g, tx_chans_per_1g; 7533 int i, tot_rx, tot_tx; 7534 7535 if (!num_10g || !num_1g) { 7536 rx_chans_per_10g = rx_chans_per_1g = 7537 (NIU_NUM_RXCHAN / num_ports); 7538 tx_chans_per_10g = tx_chans_per_1g = 7539 (NIU_NUM_TXCHAN / num_ports); 7540 } else { 7541 rx_chans_per_1g = NIU_NUM_RXCHAN / 8; 7542 rx_chans_per_10g = (NIU_NUM_RXCHAN - 7543 (rx_chans_per_1g * num_1g)) / 7544 num_10g; 7545 7546 tx_chans_per_1g = NIU_NUM_TXCHAN / 6; 7547 tx_chans_per_10g = (NIU_NUM_TXCHAN - 7548 (tx_chans_per_1g * num_1g)) / 7549 num_10g; 7550 } 7551 7552 tot_rx = tot_tx = 0; 7553 for (i = 0; i < num_ports; i++) { 7554 int type = phy_decode(parent->port_phy, i); 7555 7556 if (type == PORT_TYPE_10G) { 7557 parent->rxchan_per_port[i] = rx_chans_per_10g; 7558 parent->txchan_per_port[i] = tx_chans_per_10g; 7559 } else { 7560 parent->rxchan_per_port[i] = rx_chans_per_1g; 7561 parent->txchan_per_port[i] = tx_chans_per_1g; 7562 } 7563 pr_info(PFX "niu%d: Port %u [%u RX chans] " 7564 "[%u TX chans]\n", 7565 parent->index, i, 7566 parent->rxchan_per_port[i], 7567 parent->txchan_per_port[i]); 7568 tot_rx += parent->rxchan_per_port[i]; 7569 tot_tx += parent->txchan_per_port[i]; 7570 } 7571 7572 if (tot_rx > NIU_NUM_RXCHAN) { 7573 printk(KERN_ERR PFX "niu%d: Too many RX channels (%d), " 7574 "resetting to one per port.\n", 7575 parent->index, tot_rx); 7576 for (i = 0; i < num_ports; i++) 7577 parent->rxchan_per_port[i] = 1; 7578 } 7579 if (tot_tx > NIU_NUM_TXCHAN) { 7580 printk(KERN_ERR PFX "niu%d: Too many TX channels (%d), " 7581 "resetting to one per port.\n", 7582 parent->index, tot_tx); 7583 for (i = 0; i < num_ports; i++) 7584 parent->txchan_per_port[i] = 1; 7585 } 7586 if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) { 7587 printk(KERN_WARNING PFX "niu%d: Driver bug, wasted channels, " 7588 "RX[%d] TX[%d]\n", 7589 parent->index, tot_rx, tot_tx); 7590 } 7591} 7592 7593static void __devinit niu_divide_rdc_groups(struct niu_parent *parent, 7594 int num_10g, int num_1g) 7595{ 7596 int i, num_ports = parent->num_ports; 7597 int rdc_group, rdc_groups_per_port; 7598 int rdc_channel_base; 7599 7600 rdc_group = 0; 7601 rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports; 7602 7603 rdc_channel_base = 0; 7604 7605 for (i = 0; i < num_ports; i++) { 7606 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i]; 7607 int grp, num_channels = parent->rxchan_per_port[i]; 7608 int this_channel_offset; 7609 7610 tp->first_table_num = rdc_group; 7611 tp->num_tables = rdc_groups_per_port; 7612 this_channel_offset = 0; 7613 for (grp = 0; grp < tp->num_tables; grp++) { 7614 struct rdc_table *rt = &tp->tables[grp]; 7615 int slot; 7616 7617 pr_info(PFX "niu%d: Port %d RDC tbl(%d) [ ", 7618 parent->index, i, tp->first_table_num + grp); 7619 for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) { 7620 rt->rxdma_channel[slot] = 7621 rdc_channel_base + this_channel_offset; 7622 7623 printk("%d ", rt->rxdma_channel[slot]); 7624 7625 if (++this_channel_offset == num_channels) 7626 this_channel_offset = 0; 7627 } 7628 printk("]\n"); 7629 } 7630 7631 parent->rdc_default[i] = rdc_channel_base; 7632 7633 rdc_channel_base += num_channels; 7634 rdc_group += rdc_groups_per_port; 7635 } 7636} 7637 7638static int __devinit fill_phy_probe_info(struct niu *np, 7639 struct niu_parent *parent, 7640 struct phy_probe_info *info) 7641{ 7642 unsigned long flags; 7643 int port, err; 7644 7645 memset(info, 0, sizeof(*info)); 7646 7647 /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */ 7648 niu_lock_parent(np, flags); 7649 err = 0; 7650 for (port = 8; port < 32; port++) { 7651 int dev_id_1, dev_id_2; 7652 7653 dev_id_1 = mdio_read(np, port, 7654 NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1); 7655 dev_id_2 = mdio_read(np, port, 7656 NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2); 7657 err = phy_record(parent, info, dev_id_1, dev_id_2, port, 7658 PHY_TYPE_PMA_PMD); 7659 if (err) 7660 break; 7661 dev_id_1 = mdio_read(np, port, 7662 NIU_PCS_DEV_ADDR, MII_PHYSID1); 7663 dev_id_2 = mdio_read(np, port, 7664 NIU_PCS_DEV_ADDR, MII_PHYSID2); 7665 err = phy_record(parent, info, dev_id_1, dev_id_2, port, 7666 PHY_TYPE_PCS); 7667 if (err) 7668 break; 7669 dev_id_1 = mii_read(np, port, MII_PHYSID1); 7670 dev_id_2 = mii_read(np, port, MII_PHYSID2); 7671 err = phy_record(parent, info, dev_id_1, dev_id_2, port, 7672 PHY_TYPE_MII); 7673 if (err) 7674 break; 7675 } 7676 niu_unlock_parent(np, flags); 7677 7678 return err; 7679} 7680 7681static int __devinit walk_phys(struct niu *np, struct niu_parent *parent) 7682{ 7683 struct phy_probe_info *info = &parent->phy_probe_info; 7684 int lowest_10g, lowest_1g; 7685 int num_10g, num_1g; 7686 u32 val; 7687 int err; 7688 7689 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || 7690 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { 7691 num_10g = 0; 7692 num_1g = 2; 7693 parent->plat_type = PLAT_TYPE_ATCA_CP3220; 7694 parent->num_ports = 4; 7695 val = (phy_encode(PORT_TYPE_1G, 0) | 7696 phy_encode(PORT_TYPE_1G, 1) | 7697 phy_encode(PORT_TYPE_1G, 2) | 7698 phy_encode(PORT_TYPE_1G, 3)); 7699 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { 7700 num_10g = 2; 7701 num_1g = 0; 7702 parent->num_ports = 2; 7703 val = (phy_encode(PORT_TYPE_10G, 0) | 7704 phy_encode(PORT_TYPE_10G, 1)); 7705 } else { 7706 err = fill_phy_probe_info(np, parent, info); 7707 if (err) 7708 return err; 7709 7710 num_10g = count_10g_ports(info, &lowest_10g); 7711 num_1g = count_1g_ports(info, &lowest_1g); 7712 7713 switch ((num_10g << 4) | num_1g) { 7714 case 0x24: 7715 if (lowest_1g == 10) 7716 parent->plat_type = PLAT_TYPE_VF_P0; 7717 else if (lowest_1g == 26) 7718 parent->plat_type = PLAT_TYPE_VF_P1; 7719 else 7720 goto unknown_vg_1g_port; 7721 7722 /* fallthru */ 7723 case 0x22: 7724 val = (phy_encode(PORT_TYPE_10G, 0) | 7725 phy_encode(PORT_TYPE_10G, 1) | 7726 phy_encode(PORT_TYPE_1G, 2) | 7727 phy_encode(PORT_TYPE_1G, 3)); 7728 break; 7729 7730 case 0x20: 7731 val = (phy_encode(PORT_TYPE_10G, 0) | 7732 phy_encode(PORT_TYPE_10G, 1)); 7733 break; 7734 7735 case 0x10: 7736 val = phy_encode(PORT_TYPE_10G, np->port); 7737 break; 7738 7739 case 0x14: 7740 if (lowest_1g == 10) 7741 parent->plat_type = PLAT_TYPE_VF_P0; 7742 else if (lowest_1g == 26) 7743 parent->plat_type = PLAT_TYPE_VF_P1; 7744 else 7745 goto unknown_vg_1g_port; 7746 7747 /* fallthru */ 7748 case 0x13: 7749 if ((lowest_10g & 0x7) == 0) 7750 val = (phy_encode(PORT_TYPE_10G, 0) | 7751 phy_encode(PORT_TYPE_1G, 1) | 7752 phy_encode(PORT_TYPE_1G, 2) | 7753 phy_encode(PORT_TYPE_1G, 3)); 7754 else 7755 val = (phy_encode(PORT_TYPE_1G, 0) | 7756 phy_encode(PORT_TYPE_10G, 1) | 7757 phy_encode(PORT_TYPE_1G, 2) | 7758 phy_encode(PORT_TYPE_1G, 3)); 7759 break; 7760 7761 case 0x04: 7762 if (lowest_1g == 10) 7763 parent->plat_type = PLAT_TYPE_VF_P0; 7764 else if (lowest_1g == 26) 7765 parent->plat_type = PLAT_TYPE_VF_P1; 7766 else 7767 goto unknown_vg_1g_port; 7768 7769 val = (phy_encode(PORT_TYPE_1G, 0) | 7770 phy_encode(PORT_TYPE_1G, 1) | 7771 phy_encode(PORT_TYPE_1G, 2) | 7772 phy_encode(PORT_TYPE_1G, 3)); 7773 break; 7774 7775 default: 7776 printk(KERN_ERR PFX "Unsupported port config " 7777 "10G[%d] 1G[%d]\n", 7778 num_10g, num_1g); 7779 return -EINVAL; 7780 } 7781 } 7782 7783 parent->port_phy = val; 7784 7785 if (parent->plat_type == PLAT_TYPE_NIU) 7786 niu_n2_divide_channels(parent); 7787 else 7788 niu_divide_channels(parent, num_10g, num_1g); 7789 7790 niu_divide_rdc_groups(parent, num_10g, num_1g); 7791 7792 return 0; 7793 7794unknown_vg_1g_port: 7795 printk(KERN_ERR PFX "Cannot identify platform type, 1gport=%d\n", 7796 lowest_1g); 7797 return -EINVAL; 7798} 7799 7800static int __devinit niu_probe_ports(struct niu *np) 7801{ 7802 struct niu_parent *parent = np->parent; 7803 int err, i; 7804 7805 niudbg(PROBE, "niu_probe_ports(): port_phy[%08x]\n", 7806 parent->port_phy); 7807 7808 if (parent->port_phy == PORT_PHY_UNKNOWN) { 7809 err = walk_phys(np, parent); 7810 if (err) 7811 return err; 7812 7813 niu_set_ldg_timer_res(np, 2); 7814 for (i = 0; i <= LDN_MAX; i++) 7815 niu_ldn_irq_enable(np, i, 0); 7816 } 7817 7818 if (parent->port_phy == PORT_PHY_INVALID) 7819 return -EINVAL; 7820 7821 return 0; 7822} 7823 7824static int __devinit niu_classifier_swstate_init(struct niu *np) 7825{ 7826 struct niu_classifier *cp = &np->clas; 7827 7828 niudbg(PROBE, "niu_classifier_swstate_init: num_tcam(%d)\n", 7829 np->parent->tcam_num_entries); 7830 7831 cp->tcam_index = (u16) np->port; 7832 cp->h1_init = 0xffffffff; 7833 cp->h2_init = 0xffff; 7834 7835 return fflp_early_init(np); 7836} 7837 7838static void __devinit niu_link_config_init(struct niu *np) 7839{ 7840 struct niu_link_config *lp = &np->link_config; 7841 7842 lp->advertising = (ADVERTISED_10baseT_Half | 7843 ADVERTISED_10baseT_Full | 7844 ADVERTISED_100baseT_Half | 7845 ADVERTISED_100baseT_Full | 7846 ADVERTISED_1000baseT_Half | 7847 ADVERTISED_1000baseT_Full | 7848 ADVERTISED_10000baseT_Full | 7849 ADVERTISED_Autoneg); 7850 lp->speed = lp->active_speed = SPEED_INVALID; 7851 lp->duplex = lp->active_duplex = DUPLEX_INVALID; 7852#if 0 7853 lp->loopback_mode = LOOPBACK_MAC; 7854 lp->active_speed = SPEED_10000; 7855 lp->active_duplex = DUPLEX_FULL; 7856#else 7857 lp->loopback_mode = LOOPBACK_DISABLED; 7858#endif 7859} 7860 7861static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np) 7862{ 7863 switch (np->port) { 7864 case 0: 7865 np->mac_regs = np->regs + XMAC_PORT0_OFF; 7866 np->ipp_off = 0x00000; 7867 np->pcs_off = 0x04000; 7868 np->xpcs_off = 0x02000; 7869 break; 7870 7871 case 1: 7872 np->mac_regs = np->regs + XMAC_PORT1_OFF; 7873 np->ipp_off = 0x08000; 7874 np->pcs_off = 0x0a000; 7875 np->xpcs_off = 0x08000; 7876 break; 7877 7878 case 2: 7879 np->mac_regs = np->regs + BMAC_PORT2_OFF; 7880 np->ipp_off = 0x04000; 7881 np->pcs_off = 0x0e000; 7882 np->xpcs_off = ~0UL; 7883 break; 7884 7885 case 3: 7886 np->mac_regs = np->regs + BMAC_PORT3_OFF; 7887 np->ipp_off = 0x0c000; 7888 np->pcs_off = 0x12000; 7889 np->xpcs_off = ~0UL; 7890 break; 7891 7892 default: 7893 dev_err(np->device, PFX "Port %u is invalid, cannot " 7894 "compute MAC block offset.\n", np->port); 7895 return -EINVAL; 7896 } 7897 7898 return 0; 7899} 7900 7901static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map) 7902{ 7903 struct msix_entry msi_vec[NIU_NUM_LDG]; 7904 struct niu_parent *parent = np->parent; 7905 struct pci_dev *pdev = np->pdev; 7906 int i, num_irqs, err; 7907 u8 first_ldg; 7908 7909 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port; 7910 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++) 7911 ldg_num_map[i] = first_ldg + i; 7912 7913 num_irqs = (parent->rxchan_per_port[np->port] + 7914 parent->txchan_per_port[np->port] + 7915 (np->port == 0 ? 3 : 1)); 7916 BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports)); 7917 7918retry: 7919 for (i = 0; i < num_irqs; i++) { 7920 msi_vec[i].vector = 0; 7921 msi_vec[i].entry = i; 7922 } 7923 7924 err = pci_enable_msix(pdev, msi_vec, num_irqs); 7925 if (err < 0) { 7926 np->flags &= ~NIU_FLAGS_MSIX; 7927 return; 7928 } 7929 if (err > 0) { 7930 num_irqs = err; 7931 goto retry; 7932 } 7933 7934 np->flags |= NIU_FLAGS_MSIX; 7935 for (i = 0; i < num_irqs; i++) 7936 np->ldg[i].irq = msi_vec[i].vector; 7937 np->num_ldg = num_irqs; 7938} 7939 7940static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map) 7941{ 7942#ifdef CONFIG_SPARC64 7943 struct of_device *op = np->op; 7944 const u32 *int_prop; 7945 int i; 7946 7947 int_prop = of_get_property(op->node, "interrupts", NULL); 7948 if (!int_prop) 7949 return -ENODEV; 7950 7951 for (i = 0; i < op->num_irqs; i++) { 7952 ldg_num_map[i] = int_prop[i]; 7953 np->ldg[i].irq = op->irqs[i]; 7954 } 7955 7956 np->num_ldg = op->num_irqs; 7957 7958 return 0; 7959#else 7960 return -EINVAL; 7961#endif 7962} 7963 7964static int __devinit niu_ldg_init(struct niu *np) 7965{ 7966 struct niu_parent *parent = np->parent; 7967 u8 ldg_num_map[NIU_NUM_LDG]; 7968 int first_chan, num_chan; 7969 int i, err, ldg_rotor; 7970 u8 port; 7971 7972 np->num_ldg = 1; 7973 np->ldg[0].irq = np->dev->irq; 7974 if (parent->plat_type == PLAT_TYPE_NIU) { 7975 err = niu_n2_irq_init(np, ldg_num_map); 7976 if (err) 7977 return err; 7978 } else 7979 niu_try_msix(np, ldg_num_map); 7980 7981 port = np->port; 7982 for (i = 0; i < np->num_ldg; i++) { 7983 struct niu_ldg *lp = &np->ldg[i]; 7984 7985 netif_napi_add(np->dev, &lp->napi, niu_poll, 64); 7986 7987 lp->np = np; 7988 lp->ldg_num = ldg_num_map[i]; 7989 lp->timer = 2; /* XXX */ 7990 7991 /* On N2 NIU the firmware has setup the SID mappings so they go 7992 * to the correct values that will route the LDG to the proper 7993 * interrupt in the NCU interrupt table. 7994 */ 7995 if (np->parent->plat_type != PLAT_TYPE_NIU) { 7996 err = niu_set_ldg_sid(np, lp->ldg_num, port, i); 7997 if (err) 7998 return err; 7999 } 8000 } 8001 8002 /* We adopt the LDG assignment ordering used by the N2 NIU 8003 * 'interrupt' properties because that simplifies a lot of 8004 * things. This ordering is: 8005 * 8006 * MAC 8007 * MIF (if port zero) 8008 * SYSERR (if port zero) 8009 * RX channels 8010 * TX channels 8011 */ 8012 8013 ldg_rotor = 0; 8014 8015 err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], 8016 LDN_MAC(port)); 8017 if (err) 8018 return err; 8019 8020 ldg_rotor++; 8021 if (ldg_rotor == np->num_ldg) 8022 ldg_rotor = 0; 8023 8024 if (port == 0) { 8025 err = niu_ldg_assign_ldn(np, parent, 8026 ldg_num_map[ldg_rotor], 8027 LDN_MIF); 8028 if (err) 8029 return err; 8030 8031 ldg_rotor++; 8032 if (ldg_rotor == np->num_ldg) 8033 ldg_rotor = 0; 8034 8035 err = niu_ldg_assign_ldn(np, parent, 8036 ldg_num_map[ldg_rotor], 8037 LDN_DEVICE_ERROR); 8038 if (err) 8039 return err; 8040 8041 ldg_rotor++; 8042 if (ldg_rotor == np->num_ldg) 8043 ldg_rotor = 0; 8044 8045 } 8046 8047 first_chan = 0; 8048 for (i = 0; i < port; i++) 8049 first_chan += parent->rxchan_per_port[port]; 8050 num_chan = parent->rxchan_per_port[port]; 8051 8052 for (i = first_chan; i < (first_chan + num_chan); i++) { 8053 err = niu_ldg_assign_ldn(np, parent, 8054 ldg_num_map[ldg_rotor], 8055 LDN_RXDMA(i)); 8056 if (err) 8057 return err; 8058 ldg_rotor++; 8059 if (ldg_rotor == np->num_ldg) 8060 ldg_rotor = 0; 8061 } 8062 8063 first_chan = 0; 8064 for (i = 0; i < port; i++) 8065 first_chan += parent->txchan_per_port[port]; 8066 num_chan = parent->txchan_per_port[port]; 8067 for (i = first_chan; i < (first_chan + num_chan); i++) { 8068 err = niu_ldg_assign_ldn(np, parent, 8069 ldg_num_map[ldg_rotor], 8070 LDN_TXDMA(i)); 8071 if (err) 8072 return err; 8073 ldg_rotor++; 8074 if (ldg_rotor == np->num_ldg) 8075 ldg_rotor = 0; 8076 } 8077 8078 return 0; 8079} 8080 8081static void __devexit niu_ldg_free(struct niu *np) 8082{ 8083 if (np->flags & NIU_FLAGS_MSIX) 8084 pci_disable_msix(np->pdev); 8085} 8086 8087static int __devinit niu_get_of_props(struct niu *np) 8088{ 8089#ifdef CONFIG_SPARC64 8090 struct net_device *dev = np->dev; 8091 struct device_node *dp; 8092 const char *phy_type; 8093 const u8 *mac_addr; 8094 const char *model; 8095 int prop_len; 8096 8097 if (np->parent->plat_type == PLAT_TYPE_NIU) 8098 dp = np->op->node; 8099 else 8100 dp = pci_device_to_OF_node(np->pdev); 8101 8102 phy_type = of_get_property(dp, "phy-type", &prop_len); 8103 if (!phy_type) { 8104 dev_err(np->device, PFX "%s: OF node lacks " 8105 "phy-type property\n", 8106 dp->full_name); 8107 return -EINVAL; 8108 } 8109 8110 if (!strcmp(phy_type, "none")) 8111 return -ENODEV; 8112 8113 strcpy(np->vpd.phy_type, phy_type); 8114 8115 if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { 8116 dev_err(np->device, PFX "%s: Illegal phy string [%s].\n", 8117 dp->full_name, np->vpd.phy_type); 8118 return -EINVAL; 8119 } 8120 8121 mac_addr = of_get_property(dp, "local-mac-address", &prop_len); 8122 if (!mac_addr) { 8123 dev_err(np->device, PFX "%s: OF node lacks " 8124 "local-mac-address property\n", 8125 dp->full_name); 8126 return -EINVAL; 8127 } 8128 if (prop_len != dev->addr_len) { 8129 dev_err(np->device, PFX "%s: OF MAC address prop len (%d) " 8130 "is wrong.\n", 8131 dp->full_name, prop_len); 8132 } 8133 memcpy(dev->perm_addr, mac_addr, dev->addr_len); 8134 if (!is_valid_ether_addr(&dev->perm_addr[0])) { 8135 int i; 8136 8137 dev_err(np->device, PFX "%s: OF MAC address is invalid\n", 8138 dp->full_name); 8139 dev_err(np->device, PFX "%s: [ \n", 8140 dp->full_name); 8141 for (i = 0; i < 6; i++) 8142 printk("%02x ", dev->perm_addr[i]); 8143 printk("]\n"); 8144 return -EINVAL; 8145 } 8146 8147 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); 8148 8149 model = of_get_property(dp, "model", &prop_len); 8150 8151 if (model) 8152 strcpy(np->vpd.model, model); 8153 8154 return 0; 8155#else 8156 return -EINVAL; 8157#endif 8158} 8159 8160static int __devinit niu_get_invariants(struct niu *np) 8161{ 8162 int err, have_props; 8163 u32 offset; 8164 8165 err = niu_get_of_props(np); 8166 if (err == -ENODEV) 8167 return err; 8168 8169 have_props = !err; 8170 8171 err = niu_init_mac_ipp_pcs_base(np); 8172 if (err) 8173 return err; 8174 8175 if (have_props) { 8176 err = niu_get_and_validate_port(np); 8177 if (err) 8178 return err; 8179 8180 } else { 8181 if (np->parent->plat_type == PLAT_TYPE_NIU) 8182 return -EINVAL; 8183 8184 nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE); 8185 offset = niu_pci_vpd_offset(np); 8186 niudbg(PROBE, "niu_get_invariants: VPD offset [%08x]\n", 8187 offset); 8188 if (offset) 8189 niu_pci_vpd_fetch(np, offset); 8190 nw64(ESPC_PIO_EN, 0); 8191 8192 if (np->flags & NIU_FLAGS_VPD_VALID) { 8193 niu_pci_vpd_validate(np); 8194 err = niu_get_and_validate_port(np); 8195 if (err) 8196 return err; 8197 } 8198 8199 if (!(np->flags & NIU_FLAGS_VPD_VALID)) { 8200 err = niu_get_and_validate_port(np); 8201 if (err) 8202 return err; 8203 err = niu_pci_probe_sprom(np); 8204 if (err) 8205 return err; 8206 } 8207 } 8208 8209 err = niu_probe_ports(np); 8210 if (err) 8211 return err; 8212 8213 niu_ldg_init(np); 8214 8215 niu_classifier_swstate_init(np); 8216 niu_link_config_init(np); 8217 8218 err = niu_determine_phy_disposition(np); 8219 if (!err) 8220 err = niu_init_link(np); 8221 8222 return err; 8223} 8224 8225static LIST_HEAD(niu_parent_list); 8226static DEFINE_MUTEX(niu_parent_lock); 8227static int niu_parent_index; 8228 8229static ssize_t show_port_phy(struct device *dev, 8230 struct device_attribute *attr, char *buf) 8231{ 8232 struct platform_device *plat_dev = to_platform_device(dev); 8233 struct niu_parent *p = plat_dev->dev.platform_data; 8234 u32 port_phy = p->port_phy; 8235 char *orig_buf = buf; 8236 int i; 8237 8238 if (port_phy == PORT_PHY_UNKNOWN || 8239 port_phy == PORT_PHY_INVALID) 8240 return 0; 8241 8242 for (i = 0; i < p->num_ports; i++) { 8243 const char *type_str; 8244 int type; 8245 8246 type = phy_decode(port_phy, i); 8247 if (type == PORT_TYPE_10G) 8248 type_str = "10G"; 8249 else 8250 type_str = "1G"; 8251 buf += sprintf(buf, 8252 (i == 0) ? "%s" : " %s", 8253 type_str); 8254 } 8255 buf += sprintf(buf, "\n"); 8256 return buf - orig_buf; 8257} 8258 8259static ssize_t show_plat_type(struct device *dev, 8260 struct device_attribute *attr, char *buf) 8261{ 8262 struct platform_device *plat_dev = to_platform_device(dev); 8263 struct niu_parent *p = plat_dev->dev.platform_data; 8264 const char *type_str; 8265 8266 switch (p->plat_type) { 8267 case PLAT_TYPE_ATLAS: 8268 type_str = "atlas"; 8269 break; 8270 case PLAT_TYPE_NIU: 8271 type_str = "niu"; 8272 break; 8273 case PLAT_TYPE_VF_P0: 8274 type_str = "vf_p0"; 8275 break; 8276 case PLAT_TYPE_VF_P1: 8277 type_str = "vf_p1"; 8278 break; 8279 default: 8280 type_str = "unknown"; 8281 break; 8282 } 8283 8284 return sprintf(buf, "%s\n", type_str); 8285} 8286 8287static ssize_t __show_chan_per_port(struct device *dev, 8288 struct device_attribute *attr, char *buf, 8289 int rx) 8290{ 8291 struct platform_device *plat_dev = to_platform_device(dev); 8292 struct niu_parent *p = plat_dev->dev.platform_data; 8293 char *orig_buf = buf; 8294 u8 *arr; 8295 int i; 8296 8297 arr = (rx ? p->rxchan_per_port : p->txchan_per_port); 8298 8299 for (i = 0; i < p->num_ports; i++) { 8300 buf += sprintf(buf, 8301 (i == 0) ? "%d" : " %d", 8302 arr[i]); 8303 } 8304 buf += sprintf(buf, "\n"); 8305 8306 return buf - orig_buf; 8307} 8308 8309static ssize_t show_rxchan_per_port(struct device *dev, 8310 struct device_attribute *attr, char *buf) 8311{ 8312 return __show_chan_per_port(dev, attr, buf, 1); 8313} 8314 8315static ssize_t show_txchan_per_port(struct device *dev, 8316 struct device_attribute *attr, char *buf) 8317{ 8318 return __show_chan_per_port(dev, attr, buf, 1); 8319} 8320 8321static ssize_t show_num_ports(struct device *dev, 8322 struct device_attribute *attr, char *buf) 8323{ 8324 struct platform_device *plat_dev = to_platform_device(dev); 8325 struct niu_parent *p = plat_dev->dev.platform_data; 8326 8327 return sprintf(buf, "%d\n", p->num_ports); 8328} 8329 8330static struct device_attribute niu_parent_attributes[] = { 8331 __ATTR(port_phy, S_IRUGO, show_port_phy, NULL), 8332 __ATTR(plat_type, S_IRUGO, show_plat_type, NULL), 8333 __ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL), 8334 __ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL), 8335 __ATTR(num_ports, S_IRUGO, show_num_ports, NULL), 8336 {} 8337}; 8338 8339static struct niu_parent * __devinit niu_new_parent(struct niu *np, 8340 union niu_parent_id *id, 8341 u8 ptype) 8342{ 8343 struct platform_device *plat_dev; 8344 struct niu_parent *p; 8345 int i; 8346 8347 niudbg(PROBE, "niu_new_parent: Creating new parent.\n"); 8348 8349 plat_dev = platform_device_register_simple("niu", niu_parent_index, 8350 NULL, 0); 8351 if (!plat_dev) 8352 return NULL; 8353 8354 for (i = 0; attr_name(niu_parent_attributes[i]); i++) { 8355 int err = device_create_file(&plat_dev->dev, 8356 &niu_parent_attributes[i]); 8357 if (err) 8358 goto fail_unregister; 8359 } 8360 8361 p = kzalloc(sizeof(*p), GFP_KERNEL); 8362 if (!p) 8363 goto fail_unregister; 8364 8365 p->index = niu_parent_index++; 8366 8367 plat_dev->dev.platform_data = p; 8368 p->plat_dev = plat_dev; 8369 8370 memcpy(&p->id, id, sizeof(*id)); 8371 p->plat_type = ptype; 8372 INIT_LIST_HEAD(&p->list); 8373 atomic_set(&p->refcnt, 0); 8374 list_add(&p->list, &niu_parent_list); 8375 spin_lock_init(&p->lock); 8376 8377 p->rxdma_clock_divider = 7500; 8378 8379 p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES; 8380 if (p->plat_type == PLAT_TYPE_NIU) 8381 p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES; 8382 8383 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { 8384 int index = i - CLASS_CODE_USER_PROG1; 8385 8386 p->tcam_key[index] = TCAM_KEY_TSEL; 8387 p->flow_key[index] = (FLOW_KEY_IPSA | 8388 FLOW_KEY_IPDA | 8389 FLOW_KEY_PROTO | 8390 (FLOW_KEY_L4_BYTE12 << 8391 FLOW_KEY_L4_0_SHIFT) | 8392 (FLOW_KEY_L4_BYTE12 << 8393 FLOW_KEY_L4_1_SHIFT)); 8394 } 8395 8396 for (i = 0; i < LDN_MAX + 1; i++) 8397 p->ldg_map[i] = LDG_INVALID; 8398 8399 return p; 8400 8401fail_unregister: 8402 platform_device_unregister(plat_dev); 8403 return NULL; 8404} 8405 8406static struct niu_parent * __devinit niu_get_parent(struct niu *np, 8407 union niu_parent_id *id, 8408 u8 ptype) 8409{ 8410 struct niu_parent *p, *tmp; 8411 int port = np->port; 8412 8413 niudbg(PROBE, "niu_get_parent: platform_type[%u] port[%u]\n", 8414 ptype, port); 8415 8416 mutex_lock(&niu_parent_lock); 8417 p = NULL; 8418 list_for_each_entry(tmp, &niu_parent_list, list) { 8419 if (!memcmp(id, &tmp->id, sizeof(*id))) { 8420 p = tmp; 8421 break; 8422 } 8423 } 8424 if (!p) 8425 p = niu_new_parent(np, id, ptype); 8426 8427 if (p) { 8428 char port_name[6]; 8429 int err; 8430 8431 sprintf(port_name, "port%d", port); 8432 err = sysfs_create_link(&p->plat_dev->dev.kobj, 8433 &np->device->kobj, 8434 port_name); 8435 if (!err) { 8436 p->ports[port] = np; 8437 atomic_inc(&p->refcnt); 8438 } 8439 } 8440 mutex_unlock(&niu_parent_lock); 8441 8442 return p; 8443} 8444 8445static void niu_put_parent(struct niu *np) 8446{ 8447 struct niu_parent *p = np->parent; 8448 u8 port = np->port; 8449 char port_name[6]; 8450 8451 BUG_ON(!p || p->ports[port] != np); 8452 8453 niudbg(PROBE, "niu_put_parent: port[%u]\n", port); 8454 8455 sprintf(port_name, "port%d", port); 8456 8457 mutex_lock(&niu_parent_lock); 8458 8459 sysfs_remove_link(&p->plat_dev->dev.kobj, port_name); 8460 8461 p->ports[port] = NULL; 8462 np->parent = NULL; 8463 8464 if (atomic_dec_and_test(&p->refcnt)) { 8465 list_del(&p->list); 8466 platform_device_unregister(p->plat_dev); 8467 } 8468 8469 mutex_unlock(&niu_parent_lock); 8470} 8471 8472static void *niu_pci_alloc_coherent(struct device *dev, size_t size, 8473 u64 *handle, gfp_t flag) 8474{ 8475 dma_addr_t dh; 8476 void *ret; 8477 8478 ret = dma_alloc_coherent(dev, size, &dh, flag); 8479 if (ret) 8480 *handle = dh; 8481 return ret; 8482} 8483 8484static void niu_pci_free_coherent(struct device *dev, size_t size, 8485 void *cpu_addr, u64 handle) 8486{ 8487 dma_free_coherent(dev, size, cpu_addr, handle); 8488} 8489 8490static u64 niu_pci_map_page(struct device *dev, struct page *page, 8491 unsigned long offset, size_t size, 8492 enum dma_data_direction direction) 8493{ 8494 return dma_map_page(dev, page, offset, size, direction); 8495} 8496 8497static void niu_pci_unmap_page(struct device *dev, u64 dma_address, 8498 size_t size, enum dma_data_direction direction) 8499{ 8500 return dma_unmap_page(dev, dma_address, size, direction); 8501} 8502 8503static u64 niu_pci_map_single(struct device *dev, void *cpu_addr, 8504 size_t size, 8505 enum dma_data_direction direction) 8506{ 8507 return dma_map_single(dev, cpu_addr, size, direction); 8508} 8509 8510static void niu_pci_unmap_single(struct device *dev, u64 dma_address, 8511 size_t size, 8512 enum dma_data_direction direction) 8513{ 8514 dma_unmap_single(dev, dma_address, size, direction); 8515} 8516 8517static const struct niu_ops niu_pci_ops = { 8518 .alloc_coherent = niu_pci_alloc_coherent, 8519 .free_coherent = niu_pci_free_coherent, 8520 .map_page = niu_pci_map_page, 8521 .unmap_page = niu_pci_unmap_page, 8522 .map_single = niu_pci_map_single, 8523 .unmap_single = niu_pci_unmap_single, 8524}; 8525 8526static void __devinit niu_driver_version(void) 8527{ 8528 static int niu_version_printed; 8529 8530 if (niu_version_printed++ == 0) 8531 pr_info("%s", version); 8532} 8533 8534static struct net_device * __devinit niu_alloc_and_init( 8535 struct device *gen_dev, struct pci_dev *pdev, 8536 struct of_device *op, const struct niu_ops *ops, 8537 u8 port) 8538{ 8539 struct net_device *dev; 8540 struct niu *np; 8541 8542 dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN); 8543 if (!dev) { 8544 dev_err(gen_dev, PFX "Etherdev alloc failed, aborting.\n"); 8545 return NULL; 8546 } 8547 8548 SET_NETDEV_DEV(dev, gen_dev); 8549 8550 np = netdev_priv(dev); 8551 np->dev = dev; 8552 np->pdev = pdev; 8553 np->op = op; 8554 np->device = gen_dev; 8555 np->ops = ops; 8556 8557 np->msg_enable = niu_debug; 8558 8559 spin_lock_init(&np->lock); 8560 INIT_WORK(&np->reset_task, niu_reset_task); 8561 8562 np->port = port; 8563 8564 return dev; 8565} 8566 8567static void __devinit niu_assign_netdev_ops(struct net_device *dev) 8568{ 8569 dev->open = niu_open; 8570 dev->stop = niu_close; 8571 dev->get_stats = niu_get_stats; 8572 dev->set_multicast_list = niu_set_rx_mode; 8573 dev->set_mac_address = niu_set_mac_addr; 8574 dev->do_ioctl = niu_ioctl; 8575 dev->tx_timeout = niu_tx_timeout; 8576 dev->hard_start_xmit = niu_start_xmit; 8577 dev->ethtool_ops = &niu_ethtool_ops; 8578 dev->watchdog_timeo = NIU_TX_TIMEOUT; 8579 dev->change_mtu = niu_change_mtu; 8580} 8581 8582static void __devinit niu_device_announce(struct niu *np) 8583{ 8584 struct net_device *dev = np->dev; 8585 DECLARE_MAC_BUF(mac); 8586 8587 pr_info("%s: NIU Ethernet %s\n", 8588 dev->name, print_mac(mac, dev->dev_addr)); 8589 8590 if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) { 8591 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", 8592 dev->name, 8593 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), 8594 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), 8595 (np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"), 8596 (np->mac_xcvr == MAC_XCVR_MII ? "MII" : 8597 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), 8598 np->vpd.phy_type); 8599 } else { 8600 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", 8601 dev->name, 8602 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), 8603 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), 8604 (np->flags & NIU_FLAGS_FIBER ? "FIBER" : "COPPER"), 8605 (np->mac_xcvr == MAC_XCVR_MII ? "MII" : 8606 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), 8607 np->vpd.phy_type); 8608 } 8609} 8610 8611static int __devinit niu_pci_init_one(struct pci_dev *pdev, 8612 const struct pci_device_id *ent) 8613{ 8614 unsigned long niureg_base, niureg_len; 8615 union niu_parent_id parent_id; 8616 struct net_device *dev; 8617 struct niu *np; 8618 int err, pos; 8619 u64 dma_mask; 8620 u16 val16; 8621 8622 niu_driver_version(); 8623 8624 err = pci_enable_device(pdev); 8625 if (err) { 8626 dev_err(&pdev->dev, PFX "Cannot enable PCI device, " 8627 "aborting.\n"); 8628 return err; 8629 } 8630 8631 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || 8632 !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 8633 dev_err(&pdev->dev, PFX "Cannot find proper PCI device " 8634 "base addresses, aborting.\n"); 8635 err = -ENODEV; 8636 goto err_out_disable_pdev; 8637 } 8638 8639 err = pci_request_regions(pdev, DRV_MODULE_NAME); 8640 if (err) { 8641 dev_err(&pdev->dev, PFX "Cannot obtain PCI resources, " 8642 "aborting.\n"); 8643 goto err_out_disable_pdev; 8644 } 8645 8646 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 8647 if (pos <= 0) { 8648 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, " 8649 "aborting.\n"); 8650 goto err_out_free_res; 8651 } 8652 8653 dev = niu_alloc_and_init(&pdev->dev, pdev, NULL, 8654 &niu_pci_ops, PCI_FUNC(pdev->devfn)); 8655 if (!dev) { 8656 err = -ENOMEM; 8657 goto err_out_free_res; 8658 } 8659 np = netdev_priv(dev); 8660 8661 memset(&parent_id, 0, sizeof(parent_id)); 8662 parent_id.pci.domain = pci_domain_nr(pdev->bus); 8663 parent_id.pci.bus = pdev->bus->number; 8664 parent_id.pci.device = PCI_SLOT(pdev->devfn); 8665 8666 np->parent = niu_get_parent(np, &parent_id, 8667 PLAT_TYPE_ATLAS); 8668 if (!np->parent) { 8669 err = -ENOMEM; 8670 goto err_out_free_dev; 8671 } 8672 8673 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16); 8674 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; 8675 val16 |= (PCI_EXP_DEVCTL_CERE | 8676 PCI_EXP_DEVCTL_NFERE | 8677 PCI_EXP_DEVCTL_FERE | 8678 PCI_EXP_DEVCTL_URRE | 8679 PCI_EXP_DEVCTL_RELAX_EN); 8680 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16); 8681 8682 dma_mask = DMA_44BIT_MASK; 8683 err = pci_set_dma_mask(pdev, dma_mask); 8684 if (!err) { 8685 dev->features |= NETIF_F_HIGHDMA; 8686 err = pci_set_consistent_dma_mask(pdev, dma_mask); 8687 if (err) { 8688 dev_err(&pdev->dev, PFX "Unable to obtain 44 bit " 8689 "DMA for consistent allocations, " 8690 "aborting.\n"); 8691 goto err_out_release_parent; 8692 } 8693 } 8694 if (err || dma_mask == DMA_32BIT_MASK) { 8695 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 8696 if (err) { 8697 dev_err(&pdev->dev, PFX "No usable DMA configuration, " 8698 "aborting.\n"); 8699 goto err_out_release_parent; 8700 } 8701 } 8702 8703 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM); 8704 8705 niureg_base = pci_resource_start(pdev, 0); 8706 niureg_len = pci_resource_len(pdev, 0); 8707 8708 np->regs = ioremap_nocache(niureg_base, niureg_len); 8709 if (!np->regs) { 8710 dev_err(&pdev->dev, PFX "Cannot map device registers, " 8711 "aborting.\n"); 8712 err = -ENOMEM; 8713 goto err_out_release_parent; 8714 } 8715 8716 pci_set_master(pdev); 8717 pci_save_state(pdev); 8718 8719 dev->irq = pdev->irq; 8720 8721 niu_assign_netdev_ops(dev); 8722 8723 err = niu_get_invariants(np); 8724 if (err) { 8725 if (err != -ENODEV) 8726 dev_err(&pdev->dev, PFX "Problem fetching invariants " 8727 "of chip, aborting.\n"); 8728 goto err_out_iounmap; 8729 } 8730 8731 err = register_netdev(dev); 8732 if (err) { 8733 dev_err(&pdev->dev, PFX "Cannot register net device, " 8734 "aborting.\n"); 8735 goto err_out_iounmap; 8736 } 8737 8738 pci_set_drvdata(pdev, dev); 8739 8740 niu_device_announce(np); 8741 8742 return 0; 8743 8744err_out_iounmap: 8745 if (np->regs) { 8746 iounmap(np->regs); 8747 np->regs = NULL; 8748 } 8749 8750err_out_release_parent: 8751 niu_put_parent(np); 8752 8753err_out_free_dev: 8754 free_netdev(dev); 8755 8756err_out_free_res: 8757 pci_release_regions(pdev); 8758 8759err_out_disable_pdev: 8760 pci_disable_device(pdev); 8761 pci_set_drvdata(pdev, NULL); 8762 8763 return err; 8764} 8765 8766static void __devexit niu_pci_remove_one(struct pci_dev *pdev) 8767{ 8768 struct net_device *dev = pci_get_drvdata(pdev); 8769 8770 if (dev) { 8771 struct niu *np = netdev_priv(dev); 8772 8773 unregister_netdev(dev); 8774 if (np->regs) { 8775 iounmap(np->regs); 8776 np->regs = NULL; 8777 } 8778 8779 niu_ldg_free(np); 8780 8781 niu_put_parent(np); 8782 8783 free_netdev(dev); 8784 pci_release_regions(pdev); 8785 pci_disable_device(pdev); 8786 pci_set_drvdata(pdev, NULL); 8787 } 8788} 8789 8790static int niu_suspend(struct pci_dev *pdev, pm_message_t state) 8791{ 8792 struct net_device *dev = pci_get_drvdata(pdev); 8793 struct niu *np = netdev_priv(dev); 8794 unsigned long flags; 8795 8796 if (!netif_running(dev)) 8797 return 0; 8798 8799 flush_scheduled_work(); 8800 niu_netif_stop(np); 8801 8802 del_timer_sync(&np->timer); 8803 8804 spin_lock_irqsave(&np->lock, flags); 8805 niu_enable_interrupts(np, 0); 8806 spin_unlock_irqrestore(&np->lock, flags); 8807 8808 netif_device_detach(dev); 8809 8810 spin_lock_irqsave(&np->lock, flags); 8811 niu_stop_hw(np); 8812 spin_unlock_irqrestore(&np->lock, flags); 8813 8814 pci_save_state(pdev); 8815 8816 return 0; 8817} 8818 8819static int niu_resume(struct pci_dev *pdev) 8820{ 8821 struct net_device *dev = pci_get_drvdata(pdev); 8822 struct niu *np = netdev_priv(dev); 8823 unsigned long flags; 8824 int err; 8825 8826 if (!netif_running(dev)) 8827 return 0; 8828 8829 pci_restore_state(pdev); 8830 8831 netif_device_attach(dev); 8832 8833 spin_lock_irqsave(&np->lock, flags); 8834 8835 err = niu_init_hw(np); 8836 if (!err) { 8837 np->timer.expires = jiffies + HZ; 8838 add_timer(&np->timer); 8839 niu_netif_start(np); 8840 } 8841 8842 spin_unlock_irqrestore(&np->lock, flags); 8843 8844 return err; 8845} 8846 8847static struct pci_driver niu_pci_driver = { 8848 .name = DRV_MODULE_NAME, 8849 .id_table = niu_pci_tbl, 8850 .probe = niu_pci_init_one, 8851 .remove = __devexit_p(niu_pci_remove_one), 8852 .suspend = niu_suspend, 8853 .resume = niu_resume, 8854}; 8855 8856#ifdef CONFIG_SPARC64 8857static void *niu_phys_alloc_coherent(struct device *dev, size_t size, 8858 u64 *dma_addr, gfp_t flag) 8859{ 8860 unsigned long order = get_order(size); 8861 unsigned long page = __get_free_pages(flag, order); 8862 8863 if (page == 0UL) 8864 return NULL; 8865 memset((char *)page, 0, PAGE_SIZE << order); 8866 *dma_addr = __pa(page); 8867 8868 return (void *) page; 8869} 8870 8871static void niu_phys_free_coherent(struct device *dev, size_t size, 8872 void *cpu_addr, u64 handle) 8873{ 8874 unsigned long order = get_order(size); 8875 8876 free_pages((unsigned long) cpu_addr, order); 8877} 8878 8879static u64 niu_phys_map_page(struct device *dev, struct page *page, 8880 unsigned long offset, size_t size, 8881 enum dma_data_direction direction) 8882{ 8883 return page_to_phys(page) + offset; 8884} 8885 8886static void niu_phys_unmap_page(struct device *dev, u64 dma_address, 8887 size_t size, enum dma_data_direction direction) 8888{ 8889 /* Nothing to do. */ 8890} 8891 8892static u64 niu_phys_map_single(struct device *dev, void *cpu_addr, 8893 size_t size, 8894 enum dma_data_direction direction) 8895{ 8896 return __pa(cpu_addr); 8897} 8898 8899static void niu_phys_unmap_single(struct device *dev, u64 dma_address, 8900 size_t size, 8901 enum dma_data_direction direction) 8902{ 8903 /* Nothing to do. */ 8904} 8905 8906static const struct niu_ops niu_phys_ops = { 8907 .alloc_coherent = niu_phys_alloc_coherent, 8908 .free_coherent = niu_phys_free_coherent, 8909 .map_page = niu_phys_map_page, 8910 .unmap_page = niu_phys_unmap_page, 8911 .map_single = niu_phys_map_single, 8912 .unmap_single = niu_phys_unmap_single, 8913}; 8914 8915static unsigned long res_size(struct resource *r) 8916{ 8917 return r->end - r->start + 1UL; 8918} 8919 8920static int __devinit niu_of_probe(struct of_device *op, 8921 const struct of_device_id *match) 8922{ 8923 union niu_parent_id parent_id; 8924 struct net_device *dev; 8925 struct niu *np; 8926 const u32 *reg; 8927 int err; 8928 8929 niu_driver_version(); 8930 8931 reg = of_get_property(op->node, "reg", NULL); 8932 if (!reg) { 8933 dev_err(&op->dev, PFX "%s: No 'reg' property, aborting.\n", 8934 op->node->full_name); 8935 return -ENODEV; 8936 } 8937 8938 dev = niu_alloc_and_init(&op->dev, NULL, op, 8939 &niu_phys_ops, reg[0] & 0x1); 8940 if (!dev) { 8941 err = -ENOMEM; 8942 goto err_out; 8943 } 8944 np = netdev_priv(dev); 8945 8946 memset(&parent_id, 0, sizeof(parent_id)); 8947 parent_id.of = of_get_parent(op->node); 8948 8949 np->parent = niu_get_parent(np, &parent_id, 8950 PLAT_TYPE_NIU); 8951 if (!np->parent) { 8952 err = -ENOMEM; 8953 goto err_out_free_dev; 8954 } 8955 8956 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM); 8957 8958 np->regs = of_ioremap(&op->resource[1], 0, 8959 res_size(&op->resource[1]), 8960 "niu regs"); 8961 if (!np->regs) { 8962 dev_err(&op->dev, PFX "Cannot map device registers, " 8963 "aborting.\n"); 8964 err = -ENOMEM; 8965 goto err_out_release_parent; 8966 } 8967 8968 np->vir_regs_1 = of_ioremap(&op->resource[2], 0, 8969 res_size(&op->resource[2]), 8970 "niu vregs-1"); 8971 if (!np->vir_regs_1) { 8972 dev_err(&op->dev, PFX "Cannot map device vir registers 1, " 8973 "aborting.\n"); 8974 err = -ENOMEM; 8975 goto err_out_iounmap; 8976 } 8977 8978 np->vir_regs_2 = of_ioremap(&op->resource[3], 0, 8979 res_size(&op->resource[3]), 8980 "niu vregs-2"); 8981 if (!np->vir_regs_2) { 8982 dev_err(&op->dev, PFX "Cannot map device vir registers 2, " 8983 "aborting.\n"); 8984 err = -ENOMEM; 8985 goto err_out_iounmap; 8986 } 8987 8988 niu_assign_netdev_ops(dev); 8989 8990 err = niu_get_invariants(np); 8991 if (err) { 8992 if (err != -ENODEV) 8993 dev_err(&op->dev, PFX "Problem fetching invariants " 8994 "of chip, aborting.\n"); 8995 goto err_out_iounmap; 8996 } 8997 8998 err = register_netdev(dev); 8999 if (err) { 9000 dev_err(&op->dev, PFX "Cannot register net device, " 9001 "aborting.\n"); 9002 goto err_out_iounmap; 9003 } 9004 9005 dev_set_drvdata(&op->dev, dev); 9006 9007 niu_device_announce(np); 9008 9009 return 0; 9010 9011err_out_iounmap: 9012 if (np->vir_regs_1) { 9013 of_iounmap(&op->resource[2], np->vir_regs_1, 9014 res_size(&op->resource[2])); 9015 np->vir_regs_1 = NULL; 9016 } 9017 9018 if (np->vir_regs_2) { 9019 of_iounmap(&op->resource[3], np->vir_regs_2, 9020 res_size(&op->resource[3])); 9021 np->vir_regs_2 = NULL; 9022 } 9023 9024 if (np->regs) { 9025 of_iounmap(&op->resource[1], np->regs, 9026 res_size(&op->resource[1])); 9027 np->regs = NULL; 9028 } 9029 9030err_out_release_parent: 9031 niu_put_parent(np); 9032 9033err_out_free_dev: 9034 free_netdev(dev); 9035 9036err_out: 9037 return err; 9038} 9039 9040static int __devexit niu_of_remove(struct of_device *op) 9041{ 9042 struct net_device *dev = dev_get_drvdata(&op->dev); 9043 9044 if (dev) { 9045 struct niu *np = netdev_priv(dev); 9046 9047 unregister_netdev(dev); 9048 9049 if (np->vir_regs_1) { 9050 of_iounmap(&op->resource[2], np->vir_regs_1, 9051 res_size(&op->resource[2])); 9052 np->vir_regs_1 = NULL; 9053 } 9054 9055 if (np->vir_regs_2) { 9056 of_iounmap(&op->resource[3], np->vir_regs_2, 9057 res_size(&op->resource[3])); 9058 np->vir_regs_2 = NULL; 9059 } 9060 9061 if (np->regs) { 9062 of_iounmap(&op->resource[1], np->regs, 9063 res_size(&op->resource[1])); 9064 np->regs = NULL; 9065 } 9066 9067 niu_ldg_free(np); 9068 9069 niu_put_parent(np); 9070 9071 free_netdev(dev); 9072 dev_set_drvdata(&op->dev, NULL); 9073 } 9074 return 0; 9075} 9076 9077static struct of_device_id niu_match[] = { 9078 { 9079 .name = "network", 9080 .compatible = "SUNW,niusl", 9081 }, 9082 {}, 9083}; 9084MODULE_DEVICE_TABLE(of, niu_match); 9085 9086static struct of_platform_driver niu_of_driver = { 9087 .name = "niu", 9088 .match_table = niu_match, 9089 .probe = niu_of_probe, 9090 .remove = __devexit_p(niu_of_remove), 9091}; 9092 9093#endif /* CONFIG_SPARC64 */ 9094 9095static int __init niu_init(void) 9096{ 9097 int err = 0; 9098 9099 BUILD_BUG_ON(PAGE_SIZE < 4 * 1024); 9100 9101 niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT); 9102 9103#ifdef CONFIG_SPARC64 9104 err = of_register_driver(&niu_of_driver, &of_bus_type); 9105#endif 9106 9107 if (!err) { 9108 err = pci_register_driver(&niu_pci_driver); 9109#ifdef CONFIG_SPARC64 9110 if (err) 9111 of_unregister_driver(&niu_of_driver); 9112#endif 9113 } 9114 9115 return err; 9116} 9117 9118static void __exit niu_exit(void) 9119{ 9120 pci_unregister_driver(&niu_pci_driver); 9121#ifdef CONFIG_SPARC64 9122 of_unregister_driver(&niu_of_driver); 9123#endif 9124} 9125 9126module_init(niu_init); 9127module_exit(niu_exit);