Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.24-rc8 8121 lines 183 kB view raw
1/* niu.c: Neptune ethernet driver. 2 * 3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net) 4 */ 5 6#include <linux/module.h> 7#include <linux/init.h> 8#include <linux/pci.h> 9#include <linux/dma-mapping.h> 10#include <linux/netdevice.h> 11#include <linux/ethtool.h> 12#include <linux/etherdevice.h> 13#include <linux/platform_device.h> 14#include <linux/delay.h> 15#include <linux/bitops.h> 16#include <linux/mii.h> 17#include <linux/if_ether.h> 18#include <linux/if_vlan.h> 19#include <linux/ip.h> 20#include <linux/in.h> 21#include <linux/ipv6.h> 22#include <linux/log2.h> 23#include <linux/jiffies.h> 24#include <linux/crc32.h> 25 26#include <linux/io.h> 27 28#ifdef CONFIG_SPARC64 29#include <linux/of_device.h> 30#endif 31 32#include "niu.h" 33 34#define DRV_MODULE_NAME "niu" 35#define PFX DRV_MODULE_NAME ": " 36#define DRV_MODULE_VERSION "0.6" 37#define DRV_MODULE_RELDATE "January 5, 2008" 38 39static char version[] __devinitdata = 40 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 41 42MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 43MODULE_DESCRIPTION("NIU ethernet driver"); 44MODULE_LICENSE("GPL"); 45MODULE_VERSION(DRV_MODULE_VERSION); 46 47#ifndef DMA_44BIT_MASK 48#define DMA_44BIT_MASK 0x00000fffffffffffULL 49#endif 50 51#ifndef readq 52static u64 readq(void __iomem *reg) 53{ 54 return (((u64)readl(reg + 0x4UL) << 32) | 55 (u64)readl(reg)); 56} 57 58static void writeq(u64 val, void __iomem *reg) 59{ 60 writel(val & 0xffffffff, reg); 61 writel(val >> 32, reg + 0x4UL); 62} 63#endif 64 65static struct pci_device_id niu_pci_tbl[] = { 66 {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)}, 67 {} 68}; 69 70MODULE_DEVICE_TABLE(pci, niu_pci_tbl); 71 72#define NIU_TX_TIMEOUT (5 * HZ) 73 74#define nr64(reg) readq(np->regs + (reg)) 75#define nw64(reg, val) writeq((val), np->regs + (reg)) 76 77#define nr64_mac(reg) readq(np->mac_regs + (reg)) 78#define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg)) 79 80#define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg)) 81#define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg)) 82 83#define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg)) 84#define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg)) 85 86#define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg)) 87#define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg)) 88 89#define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 90 91static int niu_debug; 92static int debug = -1; 93module_param(debug, int, 0); 94MODULE_PARM_DESC(debug, "NIU debug level"); 95 96#define niudbg(TYPE, f, a...) \ 97do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \ 98 printk(KERN_DEBUG PFX f, ## a); \ 99} while (0) 100 101#define niuinfo(TYPE, f, a...) \ 102do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \ 103 printk(KERN_INFO PFX f, ## a); \ 104} while (0) 105 106#define niuwarn(TYPE, f, a...) \ 107do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \ 108 printk(KERN_WARNING PFX f, ## a); \ 109} while (0) 110 111#define niu_lock_parent(np, flags) \ 112 spin_lock_irqsave(&np->parent->lock, flags) 113#define niu_unlock_parent(np, flags) \ 114 spin_unlock_irqrestore(&np->parent->lock, flags) 115 116static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg, 117 u64 bits, int limit, int delay) 118{ 119 while (--limit >= 0) { 120 u64 val = nr64_mac(reg); 121 122 if (!(val & bits)) 123 break; 124 udelay(delay); 125 } 126 if (limit < 0) 127 return -ENODEV; 128 return 0; 129} 130 131static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg, 132 u64 bits, int limit, int delay, 133 const char *reg_name) 134{ 135 int err; 136 137 nw64_mac(reg, bits); 138 err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay); 139 if (err) 140 dev_err(np->device, PFX "%s: bits (%llx) of register %s " 141 "would not clear, val[%llx]\n", 142 np->dev->name, (unsigned long long) bits, reg_name, 143 (unsigned long long) nr64_mac(reg)); 144 return err; 145} 146 147#define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ 148({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 149 __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ 150}) 151 152static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg, 153 u64 bits, int limit, int delay) 154{ 155 while (--limit >= 0) { 156 u64 val = nr64_ipp(reg); 157 158 if (!(val & bits)) 159 break; 160 udelay(delay); 161 } 162 if (limit < 0) 163 return -ENODEV; 164 return 0; 165} 166 167static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg, 168 u64 bits, int limit, int delay, 169 const char *reg_name) 170{ 171 int err; 172 u64 val; 173 174 val = nr64_ipp(reg); 175 val |= bits; 176 nw64_ipp(reg, val); 177 178 err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay); 179 if (err) 180 dev_err(np->device, PFX "%s: bits (%llx) of register %s " 181 "would not clear, val[%llx]\n", 182 np->dev->name, (unsigned long long) bits, reg_name, 183 (unsigned long long) nr64_ipp(reg)); 184 return err; 185} 186 187#define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ 188({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 189 __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ 190}) 191 192static int __niu_wait_bits_clear(struct niu *np, unsigned long reg, 193 u64 bits, int limit, int delay) 194{ 195 while (--limit >= 0) { 196 u64 val = nr64(reg); 197 198 if (!(val & bits)) 199 break; 200 udelay(delay); 201 } 202 if (limit < 0) 203 return -ENODEV; 204 return 0; 205} 206 207#define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \ 208({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 209 __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \ 210}) 211 212static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg, 213 u64 bits, int limit, int delay, 214 const char *reg_name) 215{ 216 int err; 217 218 nw64(reg, bits); 219 err = __niu_wait_bits_clear(np, reg, bits, limit, delay); 220 if (err) 221 dev_err(np->device, PFX "%s: bits (%llx) of register %s " 222 "would not clear, val[%llx]\n", 223 np->dev->name, (unsigned long long) bits, reg_name, 224 (unsigned long long) nr64(reg)); 225 return err; 226} 227 228#define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ 229({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ 230 __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ 231}) 232 233static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on) 234{ 235 u64 val = (u64) lp->timer; 236 237 if (on) 238 val |= LDG_IMGMT_ARM; 239 240 nw64(LDG_IMGMT(lp->ldg_num), val); 241} 242 243static int niu_ldn_irq_enable(struct niu *np, int ldn, int on) 244{ 245 unsigned long mask_reg, bits; 246 u64 val; 247 248 if (ldn < 0 || ldn > LDN_MAX) 249 return -EINVAL; 250 251 if (ldn < 64) { 252 mask_reg = LD_IM0(ldn); 253 bits = LD_IM0_MASK; 254 } else { 255 mask_reg = LD_IM1(ldn - 64); 256 bits = LD_IM1_MASK; 257 } 258 259 val = nr64(mask_reg); 260 if (on) 261 val &= ~bits; 262 else 263 val |= bits; 264 nw64(mask_reg, val); 265 266 return 0; 267} 268 269static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on) 270{ 271 struct niu_parent *parent = np->parent; 272 int i; 273 274 for (i = 0; i <= LDN_MAX; i++) { 275 int err; 276 277 if (parent->ldg_map[i] != lp->ldg_num) 278 continue; 279 280 err = niu_ldn_irq_enable(np, i, on); 281 if (err) 282 return err; 283 } 284 return 0; 285} 286 287static int niu_enable_interrupts(struct niu *np, int on) 288{ 289 int i; 290 291 for (i = 0; i < np->num_ldg; i++) { 292 struct niu_ldg *lp = &np->ldg[i]; 293 int err; 294 295 err = niu_enable_ldn_in_ldg(np, lp, on); 296 if (err) 297 return err; 298 } 299 for (i = 0; i < np->num_ldg; i++) 300 niu_ldg_rearm(np, &np->ldg[i], on); 301 302 return 0; 303} 304 305static u32 phy_encode(u32 type, int port) 306{ 307 return (type << (port * 2)); 308} 309 310static u32 phy_decode(u32 val, int port) 311{ 312 return (val >> (port * 2)) & PORT_TYPE_MASK; 313} 314 315static int mdio_wait(struct niu *np) 316{ 317 int limit = 1000; 318 u64 val; 319 320 while (--limit > 0) { 321 val = nr64(MIF_FRAME_OUTPUT); 322 if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1) 323 return val & MIF_FRAME_OUTPUT_DATA; 324 325 udelay(10); 326 } 327 328 return -ENODEV; 329} 330 331static int mdio_read(struct niu *np, int port, int dev, int reg) 332{ 333 int err; 334 335 nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); 336 err = mdio_wait(np); 337 if (err < 0) 338 return err; 339 340 nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev)); 341 return mdio_wait(np); 342} 343 344static int mdio_write(struct niu *np, int port, int dev, int reg, int data) 345{ 346 int err; 347 348 nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); 349 err = mdio_wait(np); 350 if (err < 0) 351 return err; 352 353 nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data)); 354 err = mdio_wait(np); 355 if (err < 0) 356 return err; 357 358 return 0; 359} 360 361static int mii_read(struct niu *np, int port, int reg) 362{ 363 nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg)); 364 return mdio_wait(np); 365} 366 367static int mii_write(struct niu *np, int port, int reg, int data) 368{ 369 int err; 370 371 nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data)); 372 err = mdio_wait(np); 373 if (err < 0) 374 return err; 375 376 return 0; 377} 378 379static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val) 380{ 381 int err; 382 383 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 384 ESR2_TI_PLL_TX_CFG_L(channel), 385 val & 0xffff); 386 if (!err) 387 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 388 ESR2_TI_PLL_TX_CFG_H(channel), 389 val >> 16); 390 return err; 391} 392 393static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val) 394{ 395 int err; 396 397 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 398 ESR2_TI_PLL_RX_CFG_L(channel), 399 val & 0xffff); 400 if (!err) 401 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 402 ESR2_TI_PLL_RX_CFG_H(channel), 403 val >> 16); 404 return err; 405} 406 407/* Mode is always 10G fiber. */ 408static int serdes_init_niu(struct niu *np) 409{ 410 struct niu_link_config *lp = &np->link_config; 411 u32 tx_cfg, rx_cfg; 412 unsigned long i; 413 414 tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); 415 rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | 416 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | 417 PLL_RX_CFG_EQ_LP_ADAPTIVE); 418 419 if (lp->loopback_mode == LOOPBACK_PHY) { 420 u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; 421 422 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, 423 ESR2_TI_PLL_TEST_CFG_L, test_cfg); 424 425 tx_cfg |= PLL_TX_CFG_ENTEST; 426 rx_cfg |= PLL_RX_CFG_ENTEST; 427 } 428 429 /* Initialize all 4 lanes of the SERDES. */ 430 for (i = 0; i < 4; i++) { 431 int err = esr2_set_tx_cfg(np, i, tx_cfg); 432 if (err) 433 return err; 434 } 435 436 for (i = 0; i < 4; i++) { 437 int err = esr2_set_rx_cfg(np, i, rx_cfg); 438 if (err) 439 return err; 440 } 441 442 return 0; 443} 444 445static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val) 446{ 447 int err; 448 449 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan)); 450 if (err >= 0) { 451 *val = (err & 0xffff); 452 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 453 ESR_RXTX_CTRL_H(chan)); 454 if (err >= 0) 455 *val |= ((err & 0xffff) << 16); 456 err = 0; 457 } 458 return err; 459} 460 461static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val) 462{ 463 int err; 464 465 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 466 ESR_GLUE_CTRL0_L(chan)); 467 if (err >= 0) { 468 *val = (err & 0xffff); 469 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 470 ESR_GLUE_CTRL0_H(chan)); 471 if (err >= 0) { 472 *val |= ((err & 0xffff) << 16); 473 err = 0; 474 } 475 } 476 return err; 477} 478 479static int esr_read_reset(struct niu *np, u32 *val) 480{ 481 int err; 482 483 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 484 ESR_RXTX_RESET_CTRL_L); 485 if (err >= 0) { 486 *val = (err & 0xffff); 487 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, 488 ESR_RXTX_RESET_CTRL_H); 489 if (err >= 0) { 490 *val |= ((err & 0xffff) << 16); 491 err = 0; 492 } 493 } 494 return err; 495} 496 497static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val) 498{ 499 int err; 500 501 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 502 ESR_RXTX_CTRL_L(chan), val & 0xffff); 503 if (!err) 504 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 505 ESR_RXTX_CTRL_H(chan), (val >> 16)); 506 return err; 507} 508 509static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val) 510{ 511 int err; 512 513 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 514 ESR_GLUE_CTRL0_L(chan), val & 0xffff); 515 if (!err) 516 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 517 ESR_GLUE_CTRL0_H(chan), (val >> 16)); 518 return err; 519} 520 521static int esr_reset(struct niu *np) 522{ 523 u32 reset; 524 int err; 525 526 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 527 ESR_RXTX_RESET_CTRL_L, 0x0000); 528 if (err) 529 return err; 530 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 531 ESR_RXTX_RESET_CTRL_H, 0xffff); 532 if (err) 533 return err; 534 udelay(200); 535 536 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 537 ESR_RXTX_RESET_CTRL_L, 0xffff); 538 if (err) 539 return err; 540 udelay(200); 541 542 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 543 ESR_RXTX_RESET_CTRL_H, 0x0000); 544 if (err) 545 return err; 546 udelay(200); 547 548 err = esr_read_reset(np, &reset); 549 if (err) 550 return err; 551 if (reset != 0) { 552 dev_err(np->device, PFX "Port %u ESR_RESET " 553 "did not clear [%08x]\n", 554 np->port, reset); 555 return -ENODEV; 556 } 557 558 return 0; 559} 560 561static int serdes_init_10g(struct niu *np) 562{ 563 struct niu_link_config *lp = &np->link_config; 564 unsigned long ctrl_reg, test_cfg_reg, i; 565 u64 ctrl_val, test_cfg_val, sig, mask, val; 566 int err; 567 568 switch (np->port) { 569 case 0: 570 ctrl_reg = ENET_SERDES_0_CTRL_CFG; 571 test_cfg_reg = ENET_SERDES_0_TEST_CFG; 572 break; 573 case 1: 574 ctrl_reg = ENET_SERDES_1_CTRL_CFG; 575 test_cfg_reg = ENET_SERDES_1_TEST_CFG; 576 break; 577 578 default: 579 return -EINVAL; 580 } 581 ctrl_val = (ENET_SERDES_CTRL_SDET_0 | 582 ENET_SERDES_CTRL_SDET_1 | 583 ENET_SERDES_CTRL_SDET_2 | 584 ENET_SERDES_CTRL_SDET_3 | 585 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | 586 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | 587 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | 588 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | 589 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | 590 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | 591 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | 592 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); 593 test_cfg_val = 0; 594 595 if (lp->loopback_mode == LOOPBACK_PHY) { 596 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << 597 ENET_SERDES_TEST_MD_0_SHIFT) | 598 (ENET_TEST_MD_PAD_LOOPBACK << 599 ENET_SERDES_TEST_MD_1_SHIFT) | 600 (ENET_TEST_MD_PAD_LOOPBACK << 601 ENET_SERDES_TEST_MD_2_SHIFT) | 602 (ENET_TEST_MD_PAD_LOOPBACK << 603 ENET_SERDES_TEST_MD_3_SHIFT)); 604 } 605 606 nw64(ctrl_reg, ctrl_val); 607 nw64(test_cfg_reg, test_cfg_val); 608 609 /* Initialize all 4 lanes of the SERDES. */ 610 for (i = 0; i < 4; i++) { 611 u32 rxtx_ctrl, glue0; 612 613 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); 614 if (err) 615 return err; 616 err = esr_read_glue0(np, i, &glue0); 617 if (err) 618 return err; 619 620 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); 621 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | 622 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); 623 624 glue0 &= ~(ESR_GLUE_CTRL0_SRATE | 625 ESR_GLUE_CTRL0_THCNT | 626 ESR_GLUE_CTRL0_BLTIME); 627 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | 628 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | 629 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | 630 (BLTIME_300_CYCLES << 631 ESR_GLUE_CTRL0_BLTIME_SHIFT)); 632 633 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); 634 if (err) 635 return err; 636 err = esr_write_glue0(np, i, glue0); 637 if (err) 638 return err; 639 } 640 641 err = esr_reset(np); 642 if (err) 643 return err; 644 645 sig = nr64(ESR_INT_SIGNALS); 646 switch (np->port) { 647 case 0: 648 mask = ESR_INT_SIGNALS_P0_BITS; 649 val = (ESR_INT_SRDY0_P0 | 650 ESR_INT_DET0_P0 | 651 ESR_INT_XSRDY_P0 | 652 ESR_INT_XDP_P0_CH3 | 653 ESR_INT_XDP_P0_CH2 | 654 ESR_INT_XDP_P0_CH1 | 655 ESR_INT_XDP_P0_CH0); 656 break; 657 658 case 1: 659 mask = ESR_INT_SIGNALS_P1_BITS; 660 val = (ESR_INT_SRDY0_P1 | 661 ESR_INT_DET0_P1 | 662 ESR_INT_XSRDY_P1 | 663 ESR_INT_XDP_P1_CH3 | 664 ESR_INT_XDP_P1_CH2 | 665 ESR_INT_XDP_P1_CH1 | 666 ESR_INT_XDP_P1_CH0); 667 break; 668 669 default: 670 return -EINVAL; 671 } 672 673 if ((sig & mask) != val) { 674 dev_err(np->device, PFX "Port %u signal bits [%08x] are not " 675 "[%08x]\n", np->port, (int) (sig & mask), (int) val); 676 return -ENODEV; 677 } 678 679 return 0; 680} 681 682static int serdes_init_1g(struct niu *np) 683{ 684 u64 val; 685 686 val = nr64(ENET_SERDES_1_PLL_CFG); 687 val &= ~ENET_SERDES_PLL_FBDIV2; 688 switch (np->port) { 689 case 0: 690 val |= ENET_SERDES_PLL_HRATE0; 691 break; 692 case 1: 693 val |= ENET_SERDES_PLL_HRATE1; 694 break; 695 case 2: 696 val |= ENET_SERDES_PLL_HRATE2; 697 break; 698 case 3: 699 val |= ENET_SERDES_PLL_HRATE3; 700 break; 701 default: 702 return -EINVAL; 703 } 704 nw64(ENET_SERDES_1_PLL_CFG, val); 705 706 return 0; 707} 708 709static int bcm8704_reset(struct niu *np) 710{ 711 int err, limit; 712 713 err = mdio_read(np, np->phy_addr, 714 BCM8704_PHYXS_DEV_ADDR, MII_BMCR); 715 if (err < 0) 716 return err; 717 err |= BMCR_RESET; 718 err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 719 MII_BMCR, err); 720 if (err) 721 return err; 722 723 limit = 1000; 724 while (--limit >= 0) { 725 err = mdio_read(np, np->phy_addr, 726 BCM8704_PHYXS_DEV_ADDR, MII_BMCR); 727 if (err < 0) 728 return err; 729 if (!(err & BMCR_RESET)) 730 break; 731 } 732 if (limit < 0) { 733 dev_err(np->device, PFX "Port %u PHY will not reset " 734 "(bmcr=%04x)\n", np->port, (err & 0xffff)); 735 return -ENODEV; 736 } 737 return 0; 738} 739 740/* When written, certain PHY registers need to be read back twice 741 * in order for the bits to settle properly. 742 */ 743static int bcm8704_user_dev3_readback(struct niu *np, int reg) 744{ 745 int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); 746 if (err < 0) 747 return err; 748 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); 749 if (err < 0) 750 return err; 751 return 0; 752} 753 754static int bcm8704_init_user_dev3(struct niu *np) 755{ 756 int err; 757 758 err = mdio_write(np, np->phy_addr, 759 BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL, 760 (USER_CONTROL_OPTXRST_LVL | 761 USER_CONTROL_OPBIASFLT_LVL | 762 USER_CONTROL_OBTMPFLT_LVL | 763 USER_CONTROL_OPPRFLT_LVL | 764 USER_CONTROL_OPTXFLT_LVL | 765 USER_CONTROL_OPRXLOS_LVL | 766 USER_CONTROL_OPRXFLT_LVL | 767 USER_CONTROL_OPTXON_LVL | 768 (0x3f << USER_CONTROL_RES1_SHIFT))); 769 if (err) 770 return err; 771 772 err = mdio_write(np, np->phy_addr, 773 BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL, 774 (USER_PMD_TX_CTL_XFP_CLKEN | 775 (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) | 776 (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) | 777 USER_PMD_TX_CTL_TSCK_LPWREN)); 778 if (err) 779 return err; 780 781 err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL); 782 if (err) 783 return err; 784 err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL); 785 if (err) 786 return err; 787 788 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 789 BCM8704_USER_OPT_DIGITAL_CTRL); 790 if (err < 0) 791 return err; 792 err &= ~USER_ODIG_CTRL_GPIOS; 793 err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); 794 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 795 BCM8704_USER_OPT_DIGITAL_CTRL, err); 796 if (err) 797 return err; 798 799 mdelay(1000); 800 801 return 0; 802} 803 804static int mrvl88x2011_act_led(struct niu *np, int val) 805{ 806 int err; 807 808 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 809 MRVL88X2011_LED_8_TO_11_CTL); 810 if (err < 0) 811 return err; 812 813 err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK); 814 err |= MRVL88X2011_LED(MRVL88X2011_LED_ACT,val); 815 816 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 817 MRVL88X2011_LED_8_TO_11_CTL, err); 818} 819 820static int mrvl88x2011_led_blink_rate(struct niu *np, int rate) 821{ 822 int err; 823 824 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 825 MRVL88X2011_LED_BLINK_CTL); 826 if (err >= 0) { 827 err &= ~MRVL88X2011_LED_BLKRATE_MASK; 828 err |= (rate << 4); 829 830 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, 831 MRVL88X2011_LED_BLINK_CTL, err); 832 } 833 834 return err; 835} 836 837static int xcvr_init_10g_mrvl88x2011(struct niu *np) 838{ 839 int err; 840 841 /* Set LED functions */ 842 err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS); 843 if (err) 844 return err; 845 846 /* led activity */ 847 err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF); 848 if (err) 849 return err; 850 851 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 852 MRVL88X2011_GENERAL_CTL); 853 if (err < 0) 854 return err; 855 856 err |= MRVL88X2011_ENA_XFPREFCLK; 857 858 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 859 MRVL88X2011_GENERAL_CTL, err); 860 if (err < 0) 861 return err; 862 863 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 864 MRVL88X2011_PMA_PMD_CTL_1); 865 if (err < 0) 866 return err; 867 868 if (np->link_config.loopback_mode == LOOPBACK_MAC) 869 err |= MRVL88X2011_LOOPBACK; 870 else 871 err &= ~MRVL88X2011_LOOPBACK; 872 873 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 874 MRVL88X2011_PMA_PMD_CTL_1, err); 875 if (err < 0) 876 return err; 877 878 /* Enable PMD */ 879 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 880 MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX); 881} 882 883static int xcvr_init_10g_bcm8704(struct niu *np) 884{ 885 struct niu_link_config *lp = &np->link_config; 886 u16 analog_stat0, tx_alarm_status; 887 int err; 888 889 err = bcm8704_reset(np); 890 if (err) 891 return err; 892 893 err = bcm8704_init_user_dev3(np); 894 if (err) 895 return err; 896 897 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 898 MII_BMCR); 899 if (err < 0) 900 return err; 901 err &= ~BMCR_LOOPBACK; 902 903 if (lp->loopback_mode == LOOPBACK_MAC) 904 err |= BMCR_LOOPBACK; 905 906 err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 907 MII_BMCR, err); 908 if (err) 909 return err; 910 911#if 1 912 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, 913 MII_STAT1000); 914 if (err < 0) 915 return err; 916 pr_info(PFX "Port %u PMA_PMD(MII_STAT1000) [%04x]\n", 917 np->port, err); 918 919 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20); 920 if (err < 0) 921 return err; 922 pr_info(PFX "Port %u USER_DEV3(0x20) [%04x]\n", 923 np->port, err); 924 925 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 926 MII_NWAYTEST); 927 if (err < 0) 928 return err; 929 pr_info(PFX "Port %u PHYXS(MII_NWAYTEST) [%04x]\n", 930 np->port, err); 931#endif 932 933 /* XXX dig this out it might not be so useful XXX */ 934 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 935 BCM8704_USER_ANALOG_STATUS0); 936 if (err < 0) 937 return err; 938 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 939 BCM8704_USER_ANALOG_STATUS0); 940 if (err < 0) 941 return err; 942 analog_stat0 = err; 943 944 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 945 BCM8704_USER_TX_ALARM_STATUS); 946 if (err < 0) 947 return err; 948 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 949 BCM8704_USER_TX_ALARM_STATUS); 950 if (err < 0) 951 return err; 952 tx_alarm_status = err; 953 954 if (analog_stat0 != 0x03fc) { 955 if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) { 956 pr_info(PFX "Port %u cable not connected " 957 "or bad cable.\n", np->port); 958 } else if (analog_stat0 == 0x639c) { 959 pr_info(PFX "Port %u optical module is bad " 960 "or missing.\n", np->port); 961 } 962 } 963 964 return 0; 965} 966 967static int xcvr_init_10g(struct niu *np) 968{ 969 int phy_id, err; 970 u64 val; 971 972 val = nr64_mac(XMAC_CONFIG); 973 val &= ~XMAC_CONFIG_LED_POLARITY; 974 val |= XMAC_CONFIG_FORCE_LED_ON; 975 nw64_mac(XMAC_CONFIG, val); 976 977 /* XXX shared resource, lock parent XXX */ 978 val = nr64(MIF_CONFIG); 979 val |= MIF_CONFIG_INDIRECT_MODE; 980 nw64(MIF_CONFIG, val); 981 982 phy_id = phy_decode(np->parent->port_phy, np->port); 983 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; 984 985 /* handle different phy types */ 986 switch (phy_id & NIU_PHY_ID_MASK) { 987 case NIU_PHY_ID_MRVL88X2011: 988 err = xcvr_init_10g_mrvl88x2011(np); 989 break; 990 991 default: /* bcom 8704 */ 992 err = xcvr_init_10g_bcm8704(np); 993 break; 994 } 995 996 return 0; 997} 998 999static int mii_reset(struct niu *np) 1000{ 1001 int limit, err; 1002 1003 err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET); 1004 if (err) 1005 return err; 1006 1007 limit = 1000; 1008 while (--limit >= 0) { 1009 udelay(500); 1010 err = mii_read(np, np->phy_addr, MII_BMCR); 1011 if (err < 0) 1012 return err; 1013 if (!(err & BMCR_RESET)) 1014 break; 1015 } 1016 if (limit < 0) { 1017 dev_err(np->device, PFX "Port %u MII would not reset, " 1018 "bmcr[%04x]\n", np->port, err); 1019 return -ENODEV; 1020 } 1021 1022 return 0; 1023} 1024 1025static int mii_init_common(struct niu *np) 1026{ 1027 struct niu_link_config *lp = &np->link_config; 1028 u16 bmcr, bmsr, adv, estat; 1029 int err; 1030 1031 err = mii_reset(np); 1032 if (err) 1033 return err; 1034 1035 err = mii_read(np, np->phy_addr, MII_BMSR); 1036 if (err < 0) 1037 return err; 1038 bmsr = err; 1039 1040 estat = 0; 1041 if (bmsr & BMSR_ESTATEN) { 1042 err = mii_read(np, np->phy_addr, MII_ESTATUS); 1043 if (err < 0) 1044 return err; 1045 estat = err; 1046 } 1047 1048 bmcr = 0; 1049 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); 1050 if (err) 1051 return err; 1052 1053 if (lp->loopback_mode == LOOPBACK_MAC) { 1054 bmcr |= BMCR_LOOPBACK; 1055 if (lp->active_speed == SPEED_1000) 1056 bmcr |= BMCR_SPEED1000; 1057 if (lp->active_duplex == DUPLEX_FULL) 1058 bmcr |= BMCR_FULLDPLX; 1059 } 1060 1061 if (lp->loopback_mode == LOOPBACK_PHY) { 1062 u16 aux; 1063 1064 aux = (BCM5464R_AUX_CTL_EXT_LB | 1065 BCM5464R_AUX_CTL_WRITE_1); 1066 err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux); 1067 if (err) 1068 return err; 1069 } 1070 1071 /* XXX configurable XXX */ 1072 /* XXX for now don't advertise half-duplex or asym pause... XXX */ 1073 adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP; 1074 if (bmsr & BMSR_10FULL) 1075 adv |= ADVERTISE_10FULL; 1076 if (bmsr & BMSR_100FULL) 1077 adv |= ADVERTISE_100FULL; 1078 err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv); 1079 if (err) 1080 return err; 1081 1082 if (bmsr & BMSR_ESTATEN) { 1083 u16 ctrl1000 = 0; 1084 1085 if (estat & ESTATUS_1000_TFULL) 1086 ctrl1000 |= ADVERTISE_1000FULL; 1087 err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000); 1088 if (err) 1089 return err; 1090 } 1091 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 1092 1093 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); 1094 if (err) 1095 return err; 1096 1097 err = mii_read(np, np->phy_addr, MII_BMCR); 1098 if (err < 0) 1099 return err; 1100 err = mii_read(np, np->phy_addr, MII_BMSR); 1101 if (err < 0) 1102 return err; 1103#if 0 1104 pr_info(PFX "Port %u after MII init bmcr[%04x] bmsr[%04x]\n", 1105 np->port, bmcr, bmsr); 1106#endif 1107 1108 return 0; 1109} 1110 1111static int xcvr_init_1g(struct niu *np) 1112{ 1113 u64 val; 1114 1115 /* XXX shared resource, lock parent XXX */ 1116 val = nr64(MIF_CONFIG); 1117 val &= ~MIF_CONFIG_INDIRECT_MODE; 1118 nw64(MIF_CONFIG, val); 1119 1120 return mii_init_common(np); 1121} 1122 1123static int niu_xcvr_init(struct niu *np) 1124{ 1125 const struct niu_phy_ops *ops = np->phy_ops; 1126 int err; 1127 1128 err = 0; 1129 if (ops->xcvr_init) 1130 err = ops->xcvr_init(np); 1131 1132 return err; 1133} 1134 1135static int niu_serdes_init(struct niu *np) 1136{ 1137 const struct niu_phy_ops *ops = np->phy_ops; 1138 int err; 1139 1140 err = 0; 1141 if (ops->serdes_init) 1142 err = ops->serdes_init(np); 1143 1144 return err; 1145} 1146 1147static void niu_init_xif(struct niu *); 1148static void niu_handle_led(struct niu *, int status); 1149 1150static int niu_link_status_common(struct niu *np, int link_up) 1151{ 1152 struct niu_link_config *lp = &np->link_config; 1153 struct net_device *dev = np->dev; 1154 unsigned long flags; 1155 1156 if (!netif_carrier_ok(dev) && link_up) { 1157 niuinfo(LINK, "%s: Link is up at %s, %s duplex\n", 1158 dev->name, 1159 (lp->active_speed == SPEED_10000 ? 1160 "10Gb/sec" : 1161 (lp->active_speed == SPEED_1000 ? 1162 "1Gb/sec" : 1163 (lp->active_speed == SPEED_100 ? 1164 "100Mbit/sec" : "10Mbit/sec"))), 1165 (lp->active_duplex == DUPLEX_FULL ? 1166 "full" : "half")); 1167 1168 spin_lock_irqsave(&np->lock, flags); 1169 niu_init_xif(np); 1170 niu_handle_led(np, 1); 1171 spin_unlock_irqrestore(&np->lock, flags); 1172 1173 netif_carrier_on(dev); 1174 } else if (netif_carrier_ok(dev) && !link_up) { 1175 niuwarn(LINK, "%s: Link is down\n", dev->name); 1176 spin_lock_irqsave(&np->lock, flags); 1177 niu_handle_led(np, 0); 1178 spin_unlock_irqrestore(&np->lock, flags); 1179 netif_carrier_off(dev); 1180 } 1181 1182 return 0; 1183} 1184 1185static int link_status_10g_mrvl(struct niu *np, int *link_up_p) 1186{ 1187 int err, link_up, pma_status, pcs_status; 1188 1189 link_up = 0; 1190 1191 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1192 MRVL88X2011_10G_PMD_STATUS_2); 1193 if (err < 0) 1194 goto out; 1195 1196 /* Check PMA/PMD Register: 1.0001.2 == 1 */ 1197 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, 1198 MRVL88X2011_PMA_PMD_STATUS_1); 1199 if (err < 0) 1200 goto out; 1201 1202 pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); 1203 1204 /* Check PMC Register : 3.0001.2 == 1: read twice */ 1205 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 1206 MRVL88X2011_PMA_PMD_STATUS_1); 1207 if (err < 0) 1208 goto out; 1209 1210 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, 1211 MRVL88X2011_PMA_PMD_STATUS_1); 1212 if (err < 0) 1213 goto out; 1214 1215 pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); 1216 1217 /* Check XGXS Register : 4.0018.[0-3,12] */ 1218 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR, 1219 MRVL88X2011_10G_XGXS_LANE_STAT); 1220 if (err < 0) 1221 goto out; 1222 1223 if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 | 1224 PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 | 1225 PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC | 1226 0x800)) 1227 link_up = (pma_status && pcs_status) ? 1 : 0; 1228 1229 np->link_config.active_speed = SPEED_10000; 1230 np->link_config.active_duplex = DUPLEX_FULL; 1231 err = 0; 1232out: 1233 mrvl88x2011_act_led(np, (link_up ? 1234 MRVL88X2011_LED_CTL_PCS_ACT : 1235 MRVL88X2011_LED_CTL_OFF)); 1236 1237 *link_up_p = link_up; 1238 return err; 1239} 1240 1241static int link_status_10g_bcom(struct niu *np, int *link_up_p) 1242{ 1243 int err, link_up; 1244 1245 link_up = 0; 1246 1247 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, 1248 BCM8704_PMD_RCV_SIGDET); 1249 if (err < 0) 1250 goto out; 1251 if (!(err & PMD_RCV_SIGDET_GLOBAL)) { 1252 err = 0; 1253 goto out; 1254 } 1255 1256 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, 1257 BCM8704_PCS_10G_R_STATUS); 1258 if (err < 0) 1259 goto out; 1260 if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { 1261 err = 0; 1262 goto out; 1263 } 1264 1265 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, 1266 BCM8704_PHYXS_XGXS_LANE_STAT); 1267 if (err < 0) 1268 goto out; 1269 1270 if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | 1271 PHYXS_XGXS_LANE_STAT_MAGIC | 1272 PHYXS_XGXS_LANE_STAT_LANE3 | 1273 PHYXS_XGXS_LANE_STAT_LANE2 | 1274 PHYXS_XGXS_LANE_STAT_LANE1 | 1275 PHYXS_XGXS_LANE_STAT_LANE0)) { 1276 err = 0; 1277 goto out; 1278 } 1279 1280 link_up = 1; 1281 np->link_config.active_speed = SPEED_10000; 1282 np->link_config.active_duplex = DUPLEX_FULL; 1283 err = 0; 1284 1285out: 1286 *link_up_p = link_up; 1287 return err; 1288} 1289 1290static int link_status_10g(struct niu *np, int *link_up_p) 1291{ 1292 unsigned long flags; 1293 int err = -EINVAL; 1294 1295 spin_lock_irqsave(&np->lock, flags); 1296 1297 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { 1298 int phy_id; 1299 1300 phy_id = phy_decode(np->parent->port_phy, np->port); 1301 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; 1302 1303 /* handle different phy types */ 1304 switch (phy_id & NIU_PHY_ID_MASK) { 1305 case NIU_PHY_ID_MRVL88X2011: 1306 err = link_status_10g_mrvl(np, link_up_p); 1307 break; 1308 1309 default: /* bcom 8704 */ 1310 err = link_status_10g_bcom(np, link_up_p); 1311 break; 1312 } 1313 } 1314 1315 spin_unlock_irqrestore(&np->lock, flags); 1316 1317 return err; 1318} 1319 1320static int link_status_1g(struct niu *np, int *link_up_p) 1321{ 1322 u16 current_speed, bmsr; 1323 unsigned long flags; 1324 u8 current_duplex; 1325 int err, link_up; 1326 1327 link_up = 0; 1328 current_speed = SPEED_INVALID; 1329 current_duplex = DUPLEX_INVALID; 1330 1331 spin_lock_irqsave(&np->lock, flags); 1332 1333 err = -EINVAL; 1334 if (np->link_config.loopback_mode != LOOPBACK_DISABLED) 1335 goto out; 1336 1337 err = mii_read(np, np->phy_addr, MII_BMSR); 1338 if (err < 0) 1339 goto out; 1340 1341 bmsr = err; 1342 if (bmsr & BMSR_LSTATUS) { 1343 u16 adv, lpa, common, estat; 1344 1345 err = mii_read(np, np->phy_addr, MII_ADVERTISE); 1346 if (err < 0) 1347 goto out; 1348 adv = err; 1349 1350 err = mii_read(np, np->phy_addr, MII_LPA); 1351 if (err < 0) 1352 goto out; 1353 lpa = err; 1354 1355 common = adv & lpa; 1356 1357 err = mii_read(np, np->phy_addr, MII_ESTATUS); 1358 if (err < 0) 1359 goto out; 1360 estat = err; 1361 1362 link_up = 1; 1363 if (estat & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF)) { 1364 current_speed = SPEED_1000; 1365 if (estat & ESTATUS_1000_TFULL) 1366 current_duplex = DUPLEX_FULL; 1367 else 1368 current_duplex = DUPLEX_HALF; 1369 } else { 1370 if (common & ADVERTISE_100BASE4) { 1371 current_speed = SPEED_100; 1372 current_duplex = DUPLEX_HALF; 1373 } else if (common & ADVERTISE_100FULL) { 1374 current_speed = SPEED_100; 1375 current_duplex = DUPLEX_FULL; 1376 } else if (common & ADVERTISE_100HALF) { 1377 current_speed = SPEED_100; 1378 current_duplex = DUPLEX_HALF; 1379 } else if (common & ADVERTISE_10FULL) { 1380 current_speed = SPEED_10; 1381 current_duplex = DUPLEX_FULL; 1382 } else if (common & ADVERTISE_10HALF) { 1383 current_speed = SPEED_10; 1384 current_duplex = DUPLEX_HALF; 1385 } else 1386 link_up = 0; 1387 } 1388 } 1389 err = 0; 1390 1391out: 1392 spin_unlock_irqrestore(&np->lock, flags); 1393 1394 *link_up_p = link_up; 1395 return err; 1396} 1397 1398static int niu_link_status(struct niu *np, int *link_up_p) 1399{ 1400 const struct niu_phy_ops *ops = np->phy_ops; 1401 int err; 1402 1403 err = 0; 1404 if (ops->link_status) 1405 err = ops->link_status(np, link_up_p); 1406 1407 return err; 1408} 1409 1410static void niu_timer(unsigned long __opaque) 1411{ 1412 struct niu *np = (struct niu *) __opaque; 1413 unsigned long off; 1414 int err, link_up; 1415 1416 err = niu_link_status(np, &link_up); 1417 if (!err) 1418 niu_link_status_common(np, link_up); 1419 1420 if (netif_carrier_ok(np->dev)) 1421 off = 5 * HZ; 1422 else 1423 off = 1 * HZ; 1424 np->timer.expires = jiffies + off; 1425 1426 add_timer(&np->timer); 1427} 1428 1429static const struct niu_phy_ops phy_ops_10g_fiber_niu = { 1430 .serdes_init = serdes_init_niu, 1431 .xcvr_init = xcvr_init_10g, 1432 .link_status = link_status_10g, 1433}; 1434 1435static const struct niu_phy_ops phy_ops_10g_fiber = { 1436 .serdes_init = serdes_init_10g, 1437 .xcvr_init = xcvr_init_10g, 1438 .link_status = link_status_10g, 1439}; 1440 1441static const struct niu_phy_ops phy_ops_10g_copper = { 1442 .serdes_init = serdes_init_10g, 1443 .link_status = link_status_10g, /* XXX */ 1444}; 1445 1446static const struct niu_phy_ops phy_ops_1g_fiber = { 1447 .serdes_init = serdes_init_1g, 1448 .xcvr_init = xcvr_init_1g, 1449 .link_status = link_status_1g, 1450}; 1451 1452static const struct niu_phy_ops phy_ops_1g_copper = { 1453 .xcvr_init = xcvr_init_1g, 1454 .link_status = link_status_1g, 1455}; 1456 1457struct niu_phy_template { 1458 const struct niu_phy_ops *ops; 1459 u32 phy_addr_base; 1460}; 1461 1462static const struct niu_phy_template phy_template_niu = { 1463 .ops = &phy_ops_10g_fiber_niu, 1464 .phy_addr_base = 16, 1465}; 1466 1467static const struct niu_phy_template phy_template_10g_fiber = { 1468 .ops = &phy_ops_10g_fiber, 1469 .phy_addr_base = 8, 1470}; 1471 1472static const struct niu_phy_template phy_template_10g_copper = { 1473 .ops = &phy_ops_10g_copper, 1474 .phy_addr_base = 10, 1475}; 1476 1477static const struct niu_phy_template phy_template_1g_fiber = { 1478 .ops = &phy_ops_1g_fiber, 1479 .phy_addr_base = 0, 1480}; 1481 1482static const struct niu_phy_template phy_template_1g_copper = { 1483 .ops = &phy_ops_1g_copper, 1484 .phy_addr_base = 0, 1485}; 1486 1487static int niu_determine_phy_disposition(struct niu *np) 1488{ 1489 struct niu_parent *parent = np->parent; 1490 u8 plat_type = parent->plat_type; 1491 const struct niu_phy_template *tp; 1492 u32 phy_addr_off = 0; 1493 1494 if (plat_type == PLAT_TYPE_NIU) { 1495 tp = &phy_template_niu; 1496 phy_addr_off += np->port; 1497 } else { 1498 switch (np->flags & (NIU_FLAGS_10G | NIU_FLAGS_FIBER)) { 1499 case 0: 1500 /* 1G copper */ 1501 tp = &phy_template_1g_copper; 1502 if (plat_type == PLAT_TYPE_VF_P0) 1503 phy_addr_off = 10; 1504 else if (plat_type == PLAT_TYPE_VF_P1) 1505 phy_addr_off = 26; 1506 1507 phy_addr_off += (np->port ^ 0x3); 1508 break; 1509 1510 case NIU_FLAGS_10G: 1511 /* 10G copper */ 1512 tp = &phy_template_1g_copper; 1513 break; 1514 1515 case NIU_FLAGS_FIBER: 1516 /* 1G fiber */ 1517 tp = &phy_template_1g_fiber; 1518 break; 1519 1520 case NIU_FLAGS_10G | NIU_FLAGS_FIBER: 1521 /* 10G fiber */ 1522 tp = &phy_template_10g_fiber; 1523 if (plat_type == PLAT_TYPE_VF_P0 || 1524 plat_type == PLAT_TYPE_VF_P1) 1525 phy_addr_off = 8; 1526 phy_addr_off += np->port; 1527 break; 1528 1529 default: 1530 return -EINVAL; 1531 } 1532 } 1533 1534 np->phy_ops = tp->ops; 1535 np->phy_addr = tp->phy_addr_base + phy_addr_off; 1536 1537 return 0; 1538} 1539 1540static int niu_init_link(struct niu *np) 1541{ 1542 struct niu_parent *parent = np->parent; 1543 int err, ignore; 1544 1545 if (parent->plat_type == PLAT_TYPE_NIU) { 1546 err = niu_xcvr_init(np); 1547 if (err) 1548 return err; 1549 msleep(200); 1550 } 1551 err = niu_serdes_init(np); 1552 if (err) 1553 return err; 1554 msleep(200); 1555 err = niu_xcvr_init(np); 1556 if (!err) 1557 niu_link_status(np, &ignore); 1558 return 0; 1559} 1560 1561static void niu_set_primary_mac(struct niu *np, unsigned char *addr) 1562{ 1563 u16 reg0 = addr[4] << 8 | addr[5]; 1564 u16 reg1 = addr[2] << 8 | addr[3]; 1565 u16 reg2 = addr[0] << 8 | addr[1]; 1566 1567 if (np->flags & NIU_FLAGS_XMAC) { 1568 nw64_mac(XMAC_ADDR0, reg0); 1569 nw64_mac(XMAC_ADDR1, reg1); 1570 nw64_mac(XMAC_ADDR2, reg2); 1571 } else { 1572 nw64_mac(BMAC_ADDR0, reg0); 1573 nw64_mac(BMAC_ADDR1, reg1); 1574 nw64_mac(BMAC_ADDR2, reg2); 1575 } 1576} 1577 1578static int niu_num_alt_addr(struct niu *np) 1579{ 1580 if (np->flags & NIU_FLAGS_XMAC) 1581 return XMAC_NUM_ALT_ADDR; 1582 else 1583 return BMAC_NUM_ALT_ADDR; 1584} 1585 1586static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr) 1587{ 1588 u16 reg0 = addr[4] << 8 | addr[5]; 1589 u16 reg1 = addr[2] << 8 | addr[3]; 1590 u16 reg2 = addr[0] << 8 | addr[1]; 1591 1592 if (index >= niu_num_alt_addr(np)) 1593 return -EINVAL; 1594 1595 if (np->flags & NIU_FLAGS_XMAC) { 1596 nw64_mac(XMAC_ALT_ADDR0(index), reg0); 1597 nw64_mac(XMAC_ALT_ADDR1(index), reg1); 1598 nw64_mac(XMAC_ALT_ADDR2(index), reg2); 1599 } else { 1600 nw64_mac(BMAC_ALT_ADDR0(index), reg0); 1601 nw64_mac(BMAC_ALT_ADDR1(index), reg1); 1602 nw64_mac(BMAC_ALT_ADDR2(index), reg2); 1603 } 1604 1605 return 0; 1606} 1607 1608static int niu_enable_alt_mac(struct niu *np, int index, int on) 1609{ 1610 unsigned long reg; 1611 u64 val, mask; 1612 1613 if (index >= niu_num_alt_addr(np)) 1614 return -EINVAL; 1615 1616 if (np->flags & NIU_FLAGS_XMAC) 1617 reg = XMAC_ADDR_CMPEN; 1618 else 1619 reg = BMAC_ADDR_CMPEN; 1620 1621 mask = 1 << index; 1622 1623 val = nr64_mac(reg); 1624 if (on) 1625 val |= mask; 1626 else 1627 val &= ~mask; 1628 nw64_mac(reg, val); 1629 1630 return 0; 1631} 1632 1633static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg, 1634 int num, int mac_pref) 1635{ 1636 u64 val = nr64_mac(reg); 1637 val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR); 1638 val |= num; 1639 if (mac_pref) 1640 val |= HOST_INFO_MPR; 1641 nw64_mac(reg, val); 1642} 1643 1644static int __set_rdc_table_num(struct niu *np, 1645 int xmac_index, int bmac_index, 1646 int rdc_table_num, int mac_pref) 1647{ 1648 unsigned long reg; 1649 1650 if (rdc_table_num & ~HOST_INFO_MACRDCTBLN) 1651 return -EINVAL; 1652 if (np->flags & NIU_FLAGS_XMAC) 1653 reg = XMAC_HOST_INFO(xmac_index); 1654 else 1655 reg = BMAC_HOST_INFO(bmac_index); 1656 __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref); 1657 return 0; 1658} 1659 1660static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num, 1661 int mac_pref) 1662{ 1663 return __set_rdc_table_num(np, 17, 0, table_num, mac_pref); 1664} 1665 1666static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num, 1667 int mac_pref) 1668{ 1669 return __set_rdc_table_num(np, 16, 8, table_num, mac_pref); 1670} 1671 1672static int niu_set_alt_mac_rdc_table(struct niu *np, int idx, 1673 int table_num, int mac_pref) 1674{ 1675 if (idx >= niu_num_alt_addr(np)) 1676 return -EINVAL; 1677 return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref); 1678} 1679 1680static u64 vlan_entry_set_parity(u64 reg_val) 1681{ 1682 u64 port01_mask; 1683 u64 port23_mask; 1684 1685 port01_mask = 0x00ff; 1686 port23_mask = 0xff00; 1687 1688 if (hweight64(reg_val & port01_mask) & 1) 1689 reg_val |= ENET_VLAN_TBL_PARITY0; 1690 else 1691 reg_val &= ~ENET_VLAN_TBL_PARITY0; 1692 1693 if (hweight64(reg_val & port23_mask) & 1) 1694 reg_val |= ENET_VLAN_TBL_PARITY1; 1695 else 1696 reg_val &= ~ENET_VLAN_TBL_PARITY1; 1697 1698 return reg_val; 1699} 1700 1701static void vlan_tbl_write(struct niu *np, unsigned long index, 1702 int port, int vpr, int rdc_table) 1703{ 1704 u64 reg_val = nr64(ENET_VLAN_TBL(index)); 1705 1706 reg_val &= ~((ENET_VLAN_TBL_VPR | 1707 ENET_VLAN_TBL_VLANRDCTBLN) << 1708 ENET_VLAN_TBL_SHIFT(port)); 1709 if (vpr) 1710 reg_val |= (ENET_VLAN_TBL_VPR << 1711 ENET_VLAN_TBL_SHIFT(port)); 1712 reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port)); 1713 1714 reg_val = vlan_entry_set_parity(reg_val); 1715 1716 nw64(ENET_VLAN_TBL(index), reg_val); 1717} 1718 1719static void vlan_tbl_clear(struct niu *np) 1720{ 1721 int i; 1722 1723 for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) 1724 nw64(ENET_VLAN_TBL(i), 0); 1725} 1726 1727static int tcam_wait_bit(struct niu *np, u64 bit) 1728{ 1729 int limit = 1000; 1730 1731 while (--limit > 0) { 1732 if (nr64(TCAM_CTL) & bit) 1733 break; 1734 udelay(1); 1735 } 1736 if (limit < 0) 1737 return -ENODEV; 1738 1739 return 0; 1740} 1741 1742static int tcam_flush(struct niu *np, int index) 1743{ 1744 nw64(TCAM_KEY_0, 0x00); 1745 nw64(TCAM_KEY_MASK_0, 0xff); 1746 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); 1747 1748 return tcam_wait_bit(np, TCAM_CTL_STAT); 1749} 1750 1751#if 0 1752static int tcam_read(struct niu *np, int index, 1753 u64 *key, u64 *mask) 1754{ 1755 int err; 1756 1757 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index)); 1758 err = tcam_wait_bit(np, TCAM_CTL_STAT); 1759 if (!err) { 1760 key[0] = nr64(TCAM_KEY_0); 1761 key[1] = nr64(TCAM_KEY_1); 1762 key[2] = nr64(TCAM_KEY_2); 1763 key[3] = nr64(TCAM_KEY_3); 1764 mask[0] = nr64(TCAM_KEY_MASK_0); 1765 mask[1] = nr64(TCAM_KEY_MASK_1); 1766 mask[2] = nr64(TCAM_KEY_MASK_2); 1767 mask[3] = nr64(TCAM_KEY_MASK_3); 1768 } 1769 return err; 1770} 1771#endif 1772 1773static int tcam_write(struct niu *np, int index, 1774 u64 *key, u64 *mask) 1775{ 1776 nw64(TCAM_KEY_0, key[0]); 1777 nw64(TCAM_KEY_1, key[1]); 1778 nw64(TCAM_KEY_2, key[2]); 1779 nw64(TCAM_KEY_3, key[3]); 1780 nw64(TCAM_KEY_MASK_0, mask[0]); 1781 nw64(TCAM_KEY_MASK_1, mask[1]); 1782 nw64(TCAM_KEY_MASK_2, mask[2]); 1783 nw64(TCAM_KEY_MASK_3, mask[3]); 1784 nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); 1785 1786 return tcam_wait_bit(np, TCAM_CTL_STAT); 1787} 1788 1789#if 0 1790static int tcam_assoc_read(struct niu *np, int index, u64 *data) 1791{ 1792 int err; 1793 1794 nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index)); 1795 err = tcam_wait_bit(np, TCAM_CTL_STAT); 1796 if (!err) 1797 *data = nr64(TCAM_KEY_1); 1798 1799 return err; 1800} 1801#endif 1802 1803static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data) 1804{ 1805 nw64(TCAM_KEY_1, assoc_data); 1806 nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index)); 1807 1808 return tcam_wait_bit(np, TCAM_CTL_STAT); 1809} 1810 1811static void tcam_enable(struct niu *np, int on) 1812{ 1813 u64 val = nr64(FFLP_CFG_1); 1814 1815 if (on) 1816 val &= ~FFLP_CFG_1_TCAM_DIS; 1817 else 1818 val |= FFLP_CFG_1_TCAM_DIS; 1819 nw64(FFLP_CFG_1, val); 1820} 1821 1822static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio) 1823{ 1824 u64 val = nr64(FFLP_CFG_1); 1825 1826 val &= ~(FFLP_CFG_1_FFLPINITDONE | 1827 FFLP_CFG_1_CAMLAT | 1828 FFLP_CFG_1_CAMRATIO); 1829 val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT); 1830 val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT); 1831 nw64(FFLP_CFG_1, val); 1832 1833 val = nr64(FFLP_CFG_1); 1834 val |= FFLP_CFG_1_FFLPINITDONE; 1835 nw64(FFLP_CFG_1, val); 1836} 1837 1838static int tcam_user_eth_class_enable(struct niu *np, unsigned long class, 1839 int on) 1840{ 1841 unsigned long reg; 1842 u64 val; 1843 1844 if (class < CLASS_CODE_ETHERTYPE1 || 1845 class > CLASS_CODE_ETHERTYPE2) 1846 return -EINVAL; 1847 1848 reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); 1849 val = nr64(reg); 1850 if (on) 1851 val |= L2_CLS_VLD; 1852 else 1853 val &= ~L2_CLS_VLD; 1854 nw64(reg, val); 1855 1856 return 0; 1857} 1858 1859#if 0 1860static int tcam_user_eth_class_set(struct niu *np, unsigned long class, 1861 u64 ether_type) 1862{ 1863 unsigned long reg; 1864 u64 val; 1865 1866 if (class < CLASS_CODE_ETHERTYPE1 || 1867 class > CLASS_CODE_ETHERTYPE2 || 1868 (ether_type & ~(u64)0xffff) != 0) 1869 return -EINVAL; 1870 1871 reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); 1872 val = nr64(reg); 1873 val &= ~L2_CLS_ETYPE; 1874 val |= (ether_type << L2_CLS_ETYPE_SHIFT); 1875 nw64(reg, val); 1876 1877 return 0; 1878} 1879#endif 1880 1881static int tcam_user_ip_class_enable(struct niu *np, unsigned long class, 1882 int on) 1883{ 1884 unsigned long reg; 1885 u64 val; 1886 1887 if (class < CLASS_CODE_USER_PROG1 || 1888 class > CLASS_CODE_USER_PROG4) 1889 return -EINVAL; 1890 1891 reg = L3_CLS(class - CLASS_CODE_USER_PROG1); 1892 val = nr64(reg); 1893 if (on) 1894 val |= L3_CLS_VALID; 1895 else 1896 val &= ~L3_CLS_VALID; 1897 nw64(reg, val); 1898 1899 return 0; 1900} 1901 1902#if 0 1903static int tcam_user_ip_class_set(struct niu *np, unsigned long class, 1904 int ipv6, u64 protocol_id, 1905 u64 tos_mask, u64 tos_val) 1906{ 1907 unsigned long reg; 1908 u64 val; 1909 1910 if (class < CLASS_CODE_USER_PROG1 || 1911 class > CLASS_CODE_USER_PROG4 || 1912 (protocol_id & ~(u64)0xff) != 0 || 1913 (tos_mask & ~(u64)0xff) != 0 || 1914 (tos_val & ~(u64)0xff) != 0) 1915 return -EINVAL; 1916 1917 reg = L3_CLS(class - CLASS_CODE_USER_PROG1); 1918 val = nr64(reg); 1919 val &= ~(L3_CLS_IPVER | L3_CLS_PID | 1920 L3_CLS_TOSMASK | L3_CLS_TOS); 1921 if (ipv6) 1922 val |= L3_CLS_IPVER; 1923 val |= (protocol_id << L3_CLS_PID_SHIFT); 1924 val |= (tos_mask << L3_CLS_TOSMASK_SHIFT); 1925 val |= (tos_val << L3_CLS_TOS_SHIFT); 1926 nw64(reg, val); 1927 1928 return 0; 1929} 1930#endif 1931 1932static int tcam_early_init(struct niu *np) 1933{ 1934 unsigned long i; 1935 int err; 1936 1937 tcam_enable(np, 0); 1938 tcam_set_lat_and_ratio(np, 1939 DEFAULT_TCAM_LATENCY, 1940 DEFAULT_TCAM_ACCESS_RATIO); 1941 for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) { 1942 err = tcam_user_eth_class_enable(np, i, 0); 1943 if (err) 1944 return err; 1945 } 1946 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) { 1947 err = tcam_user_ip_class_enable(np, i, 0); 1948 if (err) 1949 return err; 1950 } 1951 1952 return 0; 1953} 1954 1955static int tcam_flush_all(struct niu *np) 1956{ 1957 unsigned long i; 1958 1959 for (i = 0; i < np->parent->tcam_num_entries; i++) { 1960 int err = tcam_flush(np, i); 1961 if (err) 1962 return err; 1963 } 1964 return 0; 1965} 1966 1967static u64 hash_addr_regval(unsigned long index, unsigned long num_entries) 1968{ 1969 return ((u64)index | (num_entries == 1 ? 1970 HASH_TBL_ADDR_AUTOINC : 0)); 1971} 1972 1973#if 0 1974static int hash_read(struct niu *np, unsigned long partition, 1975 unsigned long index, unsigned long num_entries, 1976 u64 *data) 1977{ 1978 u64 val = hash_addr_regval(index, num_entries); 1979 unsigned long i; 1980 1981 if (partition >= FCRAM_NUM_PARTITIONS || 1982 index + num_entries > FCRAM_SIZE) 1983 return -EINVAL; 1984 1985 nw64(HASH_TBL_ADDR(partition), val); 1986 for (i = 0; i < num_entries; i++) 1987 data[i] = nr64(HASH_TBL_DATA(partition)); 1988 1989 return 0; 1990} 1991#endif 1992 1993static int hash_write(struct niu *np, unsigned long partition, 1994 unsigned long index, unsigned long num_entries, 1995 u64 *data) 1996{ 1997 u64 val = hash_addr_regval(index, num_entries); 1998 unsigned long i; 1999 2000 if (partition >= FCRAM_NUM_PARTITIONS || 2001 index + (num_entries * 8) > FCRAM_SIZE) 2002 return -EINVAL; 2003 2004 nw64(HASH_TBL_ADDR(partition), val); 2005 for (i = 0; i < num_entries; i++) 2006 nw64(HASH_TBL_DATA(partition), data[i]); 2007 2008 return 0; 2009} 2010 2011static void fflp_reset(struct niu *np) 2012{ 2013 u64 val; 2014 2015 nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST); 2016 udelay(10); 2017 nw64(FFLP_CFG_1, 0); 2018 2019 val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE; 2020 nw64(FFLP_CFG_1, val); 2021} 2022 2023static void fflp_set_timings(struct niu *np) 2024{ 2025 u64 val = nr64(FFLP_CFG_1); 2026 2027 val &= ~FFLP_CFG_1_FFLPINITDONE; 2028 val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT); 2029 nw64(FFLP_CFG_1, val); 2030 2031 val = nr64(FFLP_CFG_1); 2032 val |= FFLP_CFG_1_FFLPINITDONE; 2033 nw64(FFLP_CFG_1, val); 2034 2035 val = nr64(FCRAM_REF_TMR); 2036 val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN); 2037 val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT); 2038 val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT); 2039 nw64(FCRAM_REF_TMR, val); 2040} 2041 2042static int fflp_set_partition(struct niu *np, u64 partition, 2043 u64 mask, u64 base, int enable) 2044{ 2045 unsigned long reg; 2046 u64 val; 2047 2048 if (partition >= FCRAM_NUM_PARTITIONS || 2049 (mask & ~(u64)0x1f) != 0 || 2050 (base & ~(u64)0x1f) != 0) 2051 return -EINVAL; 2052 2053 reg = FLW_PRT_SEL(partition); 2054 2055 val = nr64(reg); 2056 val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE); 2057 val |= (mask << FLW_PRT_SEL_MASK_SHIFT); 2058 val |= (base << FLW_PRT_SEL_BASE_SHIFT); 2059 if (enable) 2060 val |= FLW_PRT_SEL_EXT; 2061 nw64(reg, val); 2062 2063 return 0; 2064} 2065 2066static int fflp_disable_all_partitions(struct niu *np) 2067{ 2068 unsigned long i; 2069 2070 for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) { 2071 int err = fflp_set_partition(np, 0, 0, 0, 0); 2072 if (err) 2073 return err; 2074 } 2075 return 0; 2076} 2077 2078static void fflp_llcsnap_enable(struct niu *np, int on) 2079{ 2080 u64 val = nr64(FFLP_CFG_1); 2081 2082 if (on) 2083 val |= FFLP_CFG_1_LLCSNAP; 2084 else 2085 val &= ~FFLP_CFG_1_LLCSNAP; 2086 nw64(FFLP_CFG_1, val); 2087} 2088 2089static void fflp_errors_enable(struct niu *np, int on) 2090{ 2091 u64 val = nr64(FFLP_CFG_1); 2092 2093 if (on) 2094 val &= ~FFLP_CFG_1_ERRORDIS; 2095 else 2096 val |= FFLP_CFG_1_ERRORDIS; 2097 nw64(FFLP_CFG_1, val); 2098} 2099 2100static int fflp_hash_clear(struct niu *np) 2101{ 2102 struct fcram_hash_ipv4 ent; 2103 unsigned long i; 2104 2105 /* IPV4 hash entry with valid bit clear, rest is don't care. */ 2106 memset(&ent, 0, sizeof(ent)); 2107 ent.header = HASH_HEADER_EXT; 2108 2109 for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) { 2110 int err = hash_write(np, 0, i, 1, (u64 *) &ent); 2111 if (err) 2112 return err; 2113 } 2114 return 0; 2115} 2116 2117static int fflp_early_init(struct niu *np) 2118{ 2119 struct niu_parent *parent; 2120 unsigned long flags; 2121 int err; 2122 2123 niu_lock_parent(np, flags); 2124 2125 parent = np->parent; 2126 err = 0; 2127 if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) { 2128 niudbg(PROBE, "fflp_early_init: Initting hw on port %u\n", 2129 np->port); 2130 if (np->parent->plat_type != PLAT_TYPE_NIU) { 2131 fflp_reset(np); 2132 fflp_set_timings(np); 2133 err = fflp_disable_all_partitions(np); 2134 if (err) { 2135 niudbg(PROBE, "fflp_disable_all_partitions " 2136 "failed, err=%d\n", err); 2137 goto out; 2138 } 2139 } 2140 2141 err = tcam_early_init(np); 2142 if (err) { 2143 niudbg(PROBE, "tcam_early_init failed, err=%d\n", 2144 err); 2145 goto out; 2146 } 2147 fflp_llcsnap_enable(np, 1); 2148 fflp_errors_enable(np, 0); 2149 nw64(H1POLY, 0); 2150 nw64(H2POLY, 0); 2151 2152 err = tcam_flush_all(np); 2153 if (err) { 2154 niudbg(PROBE, "tcam_flush_all failed, err=%d\n", 2155 err); 2156 goto out; 2157 } 2158 if (np->parent->plat_type != PLAT_TYPE_NIU) { 2159 err = fflp_hash_clear(np); 2160 if (err) { 2161 niudbg(PROBE, "fflp_hash_clear failed, " 2162 "err=%d\n", err); 2163 goto out; 2164 } 2165 } 2166 2167 vlan_tbl_clear(np); 2168 2169 niudbg(PROBE, "fflp_early_init: Success\n"); 2170 parent->flags |= PARENT_FLGS_CLS_HWINIT; 2171 } 2172out: 2173 niu_unlock_parent(np, flags); 2174 return err; 2175} 2176 2177static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key) 2178{ 2179 if (class_code < CLASS_CODE_USER_PROG1 || 2180 class_code > CLASS_CODE_SCTP_IPV6) 2181 return -EINVAL; 2182 2183 nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key); 2184 return 0; 2185} 2186 2187static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key) 2188{ 2189 if (class_code < CLASS_CODE_USER_PROG1 || 2190 class_code > CLASS_CODE_SCTP_IPV6) 2191 return -EINVAL; 2192 2193 nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key); 2194 return 0; 2195} 2196 2197static void niu_rx_skb_append(struct sk_buff *skb, struct page *page, 2198 u32 offset, u32 size) 2199{ 2200 int i = skb_shinfo(skb)->nr_frags; 2201 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2202 2203 frag->page = page; 2204 frag->page_offset = offset; 2205 frag->size = size; 2206 2207 skb->len += size; 2208 skb->data_len += size; 2209 skb->truesize += size; 2210 2211 skb_shinfo(skb)->nr_frags = i + 1; 2212} 2213 2214static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a) 2215{ 2216 a >>= PAGE_SHIFT; 2217 a ^= (a >> ilog2(MAX_RBR_RING_SIZE)); 2218 2219 return (a & (MAX_RBR_RING_SIZE - 1)); 2220} 2221 2222static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr, 2223 struct page ***link) 2224{ 2225 unsigned int h = niu_hash_rxaddr(rp, addr); 2226 struct page *p, **pp; 2227 2228 addr &= PAGE_MASK; 2229 pp = &rp->rxhash[h]; 2230 for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) { 2231 if (p->index == addr) { 2232 *link = pp; 2233 break; 2234 } 2235 } 2236 2237 return p; 2238} 2239 2240static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base) 2241{ 2242 unsigned int h = niu_hash_rxaddr(rp, base); 2243 2244 page->index = base; 2245 page->mapping = (struct address_space *) rp->rxhash[h]; 2246 rp->rxhash[h] = page; 2247} 2248 2249static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp, 2250 gfp_t mask, int start_index) 2251{ 2252 struct page *page; 2253 u64 addr; 2254 int i; 2255 2256 page = alloc_page(mask); 2257 if (!page) 2258 return -ENOMEM; 2259 2260 addr = np->ops->map_page(np->device, page, 0, 2261 PAGE_SIZE, DMA_FROM_DEVICE); 2262 2263 niu_hash_page(rp, page, addr); 2264 if (rp->rbr_blocks_per_page > 1) 2265 atomic_add(rp->rbr_blocks_per_page - 1, 2266 &compound_head(page)->_count); 2267 2268 for (i = 0; i < rp->rbr_blocks_per_page; i++) { 2269 __le32 *rbr = &rp->rbr[start_index + i]; 2270 2271 *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT); 2272 addr += rp->rbr_block_size; 2273 } 2274 2275 return 0; 2276} 2277 2278static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) 2279{ 2280 int index = rp->rbr_index; 2281 2282 rp->rbr_pending++; 2283 if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) { 2284 int err = niu_rbr_add_page(np, rp, mask, index); 2285 2286 if (unlikely(err)) { 2287 rp->rbr_pending--; 2288 return; 2289 } 2290 2291 rp->rbr_index += rp->rbr_blocks_per_page; 2292 BUG_ON(rp->rbr_index > rp->rbr_table_size); 2293 if (rp->rbr_index == rp->rbr_table_size) 2294 rp->rbr_index = 0; 2295 2296 if (rp->rbr_pending >= rp->rbr_kick_thresh) { 2297 nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending); 2298 rp->rbr_pending = 0; 2299 } 2300 } 2301} 2302 2303static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp) 2304{ 2305 unsigned int index = rp->rcr_index; 2306 int num_rcr = 0; 2307 2308 rp->rx_dropped++; 2309 while (1) { 2310 struct page *page, **link; 2311 u64 addr, val; 2312 u32 rcr_size; 2313 2314 num_rcr++; 2315 2316 val = le64_to_cpup(&rp->rcr[index]); 2317 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << 2318 RCR_ENTRY_PKT_BUF_ADDR_SHIFT; 2319 page = niu_find_rxpage(rp, addr, &link); 2320 2321 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> 2322 RCR_ENTRY_PKTBUFSZ_SHIFT]; 2323 if ((page->index + PAGE_SIZE) - rcr_size == addr) { 2324 *link = (struct page *) page->mapping; 2325 np->ops->unmap_page(np->device, page->index, 2326 PAGE_SIZE, DMA_FROM_DEVICE); 2327 page->index = 0; 2328 page->mapping = NULL; 2329 __free_page(page); 2330 rp->rbr_refill_pending++; 2331 } 2332 2333 index = NEXT_RCR(rp, index); 2334 if (!(val & RCR_ENTRY_MULTI)) 2335 break; 2336 2337 } 2338 rp->rcr_index = index; 2339 2340 return num_rcr; 2341} 2342 2343static int niu_process_rx_pkt(struct niu *np, struct rx_ring_info *rp) 2344{ 2345 unsigned int index = rp->rcr_index; 2346 struct sk_buff *skb; 2347 int len, num_rcr; 2348 2349 skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE); 2350 if (unlikely(!skb)) 2351 return niu_rx_pkt_ignore(np, rp); 2352 2353 num_rcr = 0; 2354 while (1) { 2355 struct page *page, **link; 2356 u32 rcr_size, append_size; 2357 u64 addr, val, off; 2358 2359 num_rcr++; 2360 2361 val = le64_to_cpup(&rp->rcr[index]); 2362 2363 len = (val & RCR_ENTRY_L2_LEN) >> 2364 RCR_ENTRY_L2_LEN_SHIFT; 2365 len -= ETH_FCS_LEN; 2366 2367 addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << 2368 RCR_ENTRY_PKT_BUF_ADDR_SHIFT; 2369 page = niu_find_rxpage(rp, addr, &link); 2370 2371 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> 2372 RCR_ENTRY_PKTBUFSZ_SHIFT]; 2373 2374 off = addr & ~PAGE_MASK; 2375 append_size = rcr_size; 2376 if (num_rcr == 1) { 2377 int ptype; 2378 2379 off += 2; 2380 append_size -= 2; 2381 2382 ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT); 2383 if ((ptype == RCR_PKT_TYPE_TCP || 2384 ptype == RCR_PKT_TYPE_UDP) && 2385 !(val & (RCR_ENTRY_NOPORT | 2386 RCR_ENTRY_ERROR))) 2387 skb->ip_summed = CHECKSUM_UNNECESSARY; 2388 else 2389 skb->ip_summed = CHECKSUM_NONE; 2390 } 2391 if (!(val & RCR_ENTRY_MULTI)) 2392 append_size = len - skb->len; 2393 2394 niu_rx_skb_append(skb, page, off, append_size); 2395 if ((page->index + rp->rbr_block_size) - rcr_size == addr) { 2396 *link = (struct page *) page->mapping; 2397 np->ops->unmap_page(np->device, page->index, 2398 PAGE_SIZE, DMA_FROM_DEVICE); 2399 page->index = 0; 2400 page->mapping = NULL; 2401 rp->rbr_refill_pending++; 2402 } else 2403 get_page(page); 2404 2405 index = NEXT_RCR(rp, index); 2406 if (!(val & RCR_ENTRY_MULTI)) 2407 break; 2408 2409 } 2410 rp->rcr_index = index; 2411 2412 skb_reserve(skb, NET_IP_ALIGN); 2413 __pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX)); 2414 2415 rp->rx_packets++; 2416 rp->rx_bytes += skb->len; 2417 2418 skb->protocol = eth_type_trans(skb, np->dev); 2419 netif_receive_skb(skb); 2420 2421 np->dev->last_rx = jiffies; 2422 2423 return num_rcr; 2424} 2425 2426static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) 2427{ 2428 int blocks_per_page = rp->rbr_blocks_per_page; 2429 int err, index = rp->rbr_index; 2430 2431 err = 0; 2432 while (index < (rp->rbr_table_size - blocks_per_page)) { 2433 err = niu_rbr_add_page(np, rp, mask, index); 2434 if (err) 2435 break; 2436 2437 index += blocks_per_page; 2438 } 2439 2440 rp->rbr_index = index; 2441 return err; 2442} 2443 2444static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp) 2445{ 2446 int i; 2447 2448 for (i = 0; i < MAX_RBR_RING_SIZE; i++) { 2449 struct page *page; 2450 2451 page = rp->rxhash[i]; 2452 while (page) { 2453 struct page *next = (struct page *) page->mapping; 2454 u64 base = page->index; 2455 2456 np->ops->unmap_page(np->device, base, PAGE_SIZE, 2457 DMA_FROM_DEVICE); 2458 page->index = 0; 2459 page->mapping = NULL; 2460 2461 __free_page(page); 2462 2463 page = next; 2464 } 2465 } 2466 2467 for (i = 0; i < rp->rbr_table_size; i++) 2468 rp->rbr[i] = cpu_to_le32(0); 2469 rp->rbr_index = 0; 2470} 2471 2472static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) 2473{ 2474 struct tx_buff_info *tb = &rp->tx_buffs[idx]; 2475 struct sk_buff *skb = tb->skb; 2476 struct tx_pkt_hdr *tp; 2477 u64 tx_flags; 2478 int i, len; 2479 2480 tp = (struct tx_pkt_hdr *) skb->data; 2481 tx_flags = le64_to_cpup(&tp->flags); 2482 2483 rp->tx_packets++; 2484 rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) - 2485 ((tx_flags & TXHDR_PAD) / 2)); 2486 2487 len = skb_headlen(skb); 2488 np->ops->unmap_single(np->device, tb->mapping, 2489 len, DMA_TO_DEVICE); 2490 2491 if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK) 2492 rp->mark_pending--; 2493 2494 tb->skb = NULL; 2495 do { 2496 idx = NEXT_TX(rp, idx); 2497 len -= MAX_TX_DESC_LEN; 2498 } while (len > 0); 2499 2500 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2501 tb = &rp->tx_buffs[idx]; 2502 BUG_ON(tb->skb != NULL); 2503 np->ops->unmap_page(np->device, tb->mapping, 2504 skb_shinfo(skb)->frags[i].size, 2505 DMA_TO_DEVICE); 2506 idx = NEXT_TX(rp, idx); 2507 } 2508 2509 dev_kfree_skb(skb); 2510 2511 return idx; 2512} 2513 2514#define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4) 2515 2516static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) 2517{ 2518 u16 pkt_cnt, tmp; 2519 int cons; 2520 u64 cs; 2521 2522 cs = rp->tx_cs; 2523 if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK)))) 2524 goto out; 2525 2526 tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT; 2527 pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) & 2528 (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT); 2529 2530 rp->last_pkt_cnt = tmp; 2531 2532 cons = rp->cons; 2533 2534 niudbg(TX_DONE, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n", 2535 np->dev->name, pkt_cnt, cons); 2536 2537 while (pkt_cnt--) 2538 cons = release_tx_packet(np, rp, cons); 2539 2540 rp->cons = cons; 2541 smp_mb(); 2542 2543out: 2544 if (unlikely(netif_queue_stopped(np->dev) && 2545 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { 2546 netif_tx_lock(np->dev); 2547 if (netif_queue_stopped(np->dev) && 2548 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) 2549 netif_wake_queue(np->dev); 2550 netif_tx_unlock(np->dev); 2551 } 2552} 2553 2554static int niu_rx_work(struct niu *np, struct rx_ring_info *rp, int budget) 2555{ 2556 int qlen, rcr_done = 0, work_done = 0; 2557 struct rxdma_mailbox *mbox = rp->mbox; 2558 u64 stat; 2559 2560#if 1 2561 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); 2562 qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN; 2563#else 2564 stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); 2565 qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN); 2566#endif 2567 mbox->rx_dma_ctl_stat = 0; 2568 mbox->rcrstat_a = 0; 2569 2570 niudbg(RX_STATUS, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n", 2571 np->dev->name, rp->rx_channel, (unsigned long long) stat, qlen); 2572 2573 rcr_done = work_done = 0; 2574 qlen = min(qlen, budget); 2575 while (work_done < qlen) { 2576 rcr_done += niu_process_rx_pkt(np, rp); 2577 work_done++; 2578 } 2579 2580 if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) { 2581 unsigned int i; 2582 2583 for (i = 0; i < rp->rbr_refill_pending; i++) 2584 niu_rbr_refill(np, rp, GFP_ATOMIC); 2585 rp->rbr_refill_pending = 0; 2586 } 2587 2588 stat = (RX_DMA_CTL_STAT_MEX | 2589 ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) | 2590 ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT)); 2591 2592 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat); 2593 2594 return work_done; 2595} 2596 2597static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget) 2598{ 2599 u64 v0 = lp->v0; 2600 u32 tx_vec = (v0 >> 32); 2601 u32 rx_vec = (v0 & 0xffffffff); 2602 int i, work_done = 0; 2603 2604 niudbg(INTR, "%s: niu_poll_core() v0[%016llx]\n", 2605 np->dev->name, (unsigned long long) v0); 2606 2607 for (i = 0; i < np->num_tx_rings; i++) { 2608 struct tx_ring_info *rp = &np->tx_rings[i]; 2609 if (tx_vec & (1 << rp->tx_channel)) 2610 niu_tx_work(np, rp); 2611 nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0); 2612 } 2613 2614 for (i = 0; i < np->num_rx_rings; i++) { 2615 struct rx_ring_info *rp = &np->rx_rings[i]; 2616 2617 if (rx_vec & (1 << rp->rx_channel)) { 2618 int this_work_done; 2619 2620 this_work_done = niu_rx_work(np, rp, 2621 budget); 2622 2623 budget -= this_work_done; 2624 work_done += this_work_done; 2625 } 2626 nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0); 2627 } 2628 2629 return work_done; 2630} 2631 2632static int niu_poll(struct napi_struct *napi, int budget) 2633{ 2634 struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi); 2635 struct niu *np = lp->np; 2636 int work_done; 2637 2638 work_done = niu_poll_core(np, lp, budget); 2639 2640 if (work_done < budget) { 2641 netif_rx_complete(np->dev, napi); 2642 niu_ldg_rearm(np, lp, 1); 2643 } 2644 return work_done; 2645} 2646 2647static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp, 2648 u64 stat) 2649{ 2650 dev_err(np->device, PFX "%s: RX channel %u errors ( ", 2651 np->dev->name, rp->rx_channel); 2652 2653 if (stat & RX_DMA_CTL_STAT_RBR_TMOUT) 2654 printk("RBR_TMOUT "); 2655 if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR) 2656 printk("RSP_CNT "); 2657 if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS) 2658 printk("BYTE_EN_BUS "); 2659 if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR) 2660 printk("RSP_DAT "); 2661 if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR) 2662 printk("RCR_ACK "); 2663 if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR) 2664 printk("RCR_SHA_PAR "); 2665 if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR) 2666 printk("RBR_PRE_PAR "); 2667 if (stat & RX_DMA_CTL_STAT_CONFIG_ERR) 2668 printk("CONFIG "); 2669 if (stat & RX_DMA_CTL_STAT_RCRINCON) 2670 printk("RCRINCON "); 2671 if (stat & RX_DMA_CTL_STAT_RCRFULL) 2672 printk("RCRFULL "); 2673 if (stat & RX_DMA_CTL_STAT_RBRFULL) 2674 printk("RBRFULL "); 2675 if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE) 2676 printk("RBRLOGPAGE "); 2677 if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE) 2678 printk("CFIGLOGPAGE "); 2679 if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR) 2680 printk("DC_FIDO "); 2681 2682 printk(")\n"); 2683} 2684 2685static int niu_rx_error(struct niu *np, struct rx_ring_info *rp) 2686{ 2687 u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); 2688 int err = 0; 2689 2690 2691 if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL | 2692 RX_DMA_CTL_STAT_PORT_FATAL)) 2693 err = -EINVAL; 2694 2695 if (err) { 2696 dev_err(np->device, PFX "%s: RX channel %u error, stat[%llx]\n", 2697 np->dev->name, rp->rx_channel, 2698 (unsigned long long) stat); 2699 2700 niu_log_rxchan_errors(np, rp, stat); 2701 } 2702 2703 nw64(RX_DMA_CTL_STAT(rp->rx_channel), 2704 stat & RX_DMA_CTL_WRITE_CLEAR_ERRS); 2705 2706 return err; 2707} 2708 2709static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp, 2710 u64 cs) 2711{ 2712 dev_err(np->device, PFX "%s: TX channel %u errors ( ", 2713 np->dev->name, rp->tx_channel); 2714 2715 if (cs & TX_CS_MBOX_ERR) 2716 printk("MBOX "); 2717 if (cs & TX_CS_PKT_SIZE_ERR) 2718 printk("PKT_SIZE "); 2719 if (cs & TX_CS_TX_RING_OFLOW) 2720 printk("TX_RING_OFLOW "); 2721 if (cs & TX_CS_PREF_BUF_PAR_ERR) 2722 printk("PREF_BUF_PAR "); 2723 if (cs & TX_CS_NACK_PREF) 2724 printk("NACK_PREF "); 2725 if (cs & TX_CS_NACK_PKT_RD) 2726 printk("NACK_PKT_RD "); 2727 if (cs & TX_CS_CONF_PART_ERR) 2728 printk("CONF_PART "); 2729 if (cs & TX_CS_PKT_PRT_ERR) 2730 printk("PKT_PTR "); 2731 2732 printk(")\n"); 2733} 2734 2735static int niu_tx_error(struct niu *np, struct tx_ring_info *rp) 2736{ 2737 u64 cs, logh, logl; 2738 2739 cs = nr64(TX_CS(rp->tx_channel)); 2740 logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel)); 2741 logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel)); 2742 2743 dev_err(np->device, PFX "%s: TX channel %u error, " 2744 "cs[%llx] logh[%llx] logl[%llx]\n", 2745 np->dev->name, rp->tx_channel, 2746 (unsigned long long) cs, 2747 (unsigned long long) logh, 2748 (unsigned long long) logl); 2749 2750 niu_log_txchan_errors(np, rp, cs); 2751 2752 return -ENODEV; 2753} 2754 2755static int niu_mif_interrupt(struct niu *np) 2756{ 2757 u64 mif_status = nr64(MIF_STATUS); 2758 int phy_mdint = 0; 2759 2760 if (np->flags & NIU_FLAGS_XMAC) { 2761 u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS); 2762 2763 if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT) 2764 phy_mdint = 1; 2765 } 2766 2767 dev_err(np->device, PFX "%s: MIF interrupt, " 2768 "stat[%llx] phy_mdint(%d)\n", 2769 np->dev->name, (unsigned long long) mif_status, phy_mdint); 2770 2771 return -ENODEV; 2772} 2773 2774static void niu_xmac_interrupt(struct niu *np) 2775{ 2776 struct niu_xmac_stats *mp = &np->mac_stats.xmac; 2777 u64 val; 2778 2779 val = nr64_mac(XTXMAC_STATUS); 2780 if (val & XTXMAC_STATUS_FRAME_CNT_EXP) 2781 mp->tx_frames += TXMAC_FRM_CNT_COUNT; 2782 if (val & XTXMAC_STATUS_BYTE_CNT_EXP) 2783 mp->tx_bytes += TXMAC_BYTE_CNT_COUNT; 2784 if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR) 2785 mp->tx_fifo_errors++; 2786 if (val & XTXMAC_STATUS_TXMAC_OFLOW) 2787 mp->tx_overflow_errors++; 2788 if (val & XTXMAC_STATUS_MAX_PSIZE_ERR) 2789 mp->tx_max_pkt_size_errors++; 2790 if (val & XTXMAC_STATUS_TXMAC_UFLOW) 2791 mp->tx_underflow_errors++; 2792 2793 val = nr64_mac(XRXMAC_STATUS); 2794 if (val & XRXMAC_STATUS_LCL_FLT_STATUS) 2795 mp->rx_local_faults++; 2796 if (val & XRXMAC_STATUS_RFLT_DET) 2797 mp->rx_remote_faults++; 2798 if (val & XRXMAC_STATUS_LFLT_CNT_EXP) 2799 mp->rx_link_faults += LINK_FAULT_CNT_COUNT; 2800 if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP) 2801 mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT; 2802 if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP) 2803 mp->rx_frags += RXMAC_FRAG_CNT_COUNT; 2804 if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP) 2805 mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT; 2806 if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) 2807 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; 2808 if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) 2809 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; 2810 if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP) 2811 mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT; 2812 if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP) 2813 mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT; 2814 if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP) 2815 mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT; 2816 if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP) 2817 mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT; 2818 if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP) 2819 mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT; 2820 if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP) 2821 mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT; 2822 if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP) 2823 mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT; 2824 if (val & XRXMAC_STAT_MSK_RXOCTET_CNT_EXP) 2825 mp->rx_octets += RXMAC_BT_CNT_COUNT; 2826 if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP) 2827 mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT; 2828 if (val & XRXMAC_STATUS_LENERR_CNT_EXP) 2829 mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT; 2830 if (val & XRXMAC_STATUS_CRCERR_CNT_EXP) 2831 mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT; 2832 if (val & XRXMAC_STATUS_RXUFLOW) 2833 mp->rx_underflows++; 2834 if (val & XRXMAC_STATUS_RXOFLOW) 2835 mp->rx_overflows++; 2836 2837 val = nr64_mac(XMAC_FC_STAT); 2838 if (val & XMAC_FC_STAT_TX_MAC_NPAUSE) 2839 mp->pause_off_state++; 2840 if (val & XMAC_FC_STAT_TX_MAC_PAUSE) 2841 mp->pause_on_state++; 2842 if (val & XMAC_FC_STAT_RX_MAC_RPAUSE) 2843 mp->pause_received++; 2844} 2845 2846static void niu_bmac_interrupt(struct niu *np) 2847{ 2848 struct niu_bmac_stats *mp = &np->mac_stats.bmac; 2849 u64 val; 2850 2851 val = nr64_mac(BTXMAC_STATUS); 2852 if (val & BTXMAC_STATUS_UNDERRUN) 2853 mp->tx_underflow_errors++; 2854 if (val & BTXMAC_STATUS_MAX_PKT_ERR) 2855 mp->tx_max_pkt_size_errors++; 2856 if (val & BTXMAC_STATUS_BYTE_CNT_EXP) 2857 mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT; 2858 if (val & BTXMAC_STATUS_FRAME_CNT_EXP) 2859 mp->tx_frames += BTXMAC_FRM_CNT_COUNT; 2860 2861 val = nr64_mac(BRXMAC_STATUS); 2862 if (val & BRXMAC_STATUS_OVERFLOW) 2863 mp->rx_overflows++; 2864 if (val & BRXMAC_STATUS_FRAME_CNT_EXP) 2865 mp->rx_frames += BRXMAC_FRAME_CNT_COUNT; 2866 if (val & BRXMAC_STATUS_ALIGN_ERR_EXP) 2867 mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; 2868 if (val & BRXMAC_STATUS_CRC_ERR_EXP) 2869 mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; 2870 if (val & BRXMAC_STATUS_LEN_ERR_EXP) 2871 mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT; 2872 2873 val = nr64_mac(BMAC_CTRL_STATUS); 2874 if (val & BMAC_CTRL_STATUS_NOPAUSE) 2875 mp->pause_off_state++; 2876 if (val & BMAC_CTRL_STATUS_PAUSE) 2877 mp->pause_on_state++; 2878 if (val & BMAC_CTRL_STATUS_PAUSE_RECV) 2879 mp->pause_received++; 2880} 2881 2882static int niu_mac_interrupt(struct niu *np) 2883{ 2884 if (np->flags & NIU_FLAGS_XMAC) 2885 niu_xmac_interrupt(np); 2886 else 2887 niu_bmac_interrupt(np); 2888 2889 return 0; 2890} 2891 2892static void niu_log_device_error(struct niu *np, u64 stat) 2893{ 2894 dev_err(np->device, PFX "%s: Core device errors ( ", 2895 np->dev->name); 2896 2897 if (stat & SYS_ERR_MASK_META2) 2898 printk("META2 "); 2899 if (stat & SYS_ERR_MASK_META1) 2900 printk("META1 "); 2901 if (stat & SYS_ERR_MASK_PEU) 2902 printk("PEU "); 2903 if (stat & SYS_ERR_MASK_TXC) 2904 printk("TXC "); 2905 if (stat & SYS_ERR_MASK_RDMC) 2906 printk("RDMC "); 2907 if (stat & SYS_ERR_MASK_TDMC) 2908 printk("TDMC "); 2909 if (stat & SYS_ERR_MASK_ZCP) 2910 printk("ZCP "); 2911 if (stat & SYS_ERR_MASK_FFLP) 2912 printk("FFLP "); 2913 if (stat & SYS_ERR_MASK_IPP) 2914 printk("IPP "); 2915 if (stat & SYS_ERR_MASK_MAC) 2916 printk("MAC "); 2917 if (stat & SYS_ERR_MASK_SMX) 2918 printk("SMX "); 2919 2920 printk(")\n"); 2921} 2922 2923static int niu_device_error(struct niu *np) 2924{ 2925 u64 stat = nr64(SYS_ERR_STAT); 2926 2927 dev_err(np->device, PFX "%s: Core device error, stat[%llx]\n", 2928 np->dev->name, (unsigned long long) stat); 2929 2930 niu_log_device_error(np, stat); 2931 2932 return -ENODEV; 2933} 2934 2935static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp, 2936 u64 v0, u64 v1, u64 v2) 2937{ 2938 2939 int i, err = 0; 2940 2941 lp->v0 = v0; 2942 lp->v1 = v1; 2943 lp->v2 = v2; 2944 2945 if (v1 & 0x00000000ffffffffULL) { 2946 u32 rx_vec = (v1 & 0xffffffff); 2947 2948 for (i = 0; i < np->num_rx_rings; i++) { 2949 struct rx_ring_info *rp = &np->rx_rings[i]; 2950 2951 if (rx_vec & (1 << rp->rx_channel)) { 2952 int r = niu_rx_error(np, rp); 2953 if (r) { 2954 err = r; 2955 } else { 2956 if (!v0) 2957 nw64(RX_DMA_CTL_STAT(rp->rx_channel), 2958 RX_DMA_CTL_STAT_MEX); 2959 } 2960 } 2961 } 2962 } 2963 if (v1 & 0x7fffffff00000000ULL) { 2964 u32 tx_vec = (v1 >> 32) & 0x7fffffff; 2965 2966 for (i = 0; i < np->num_tx_rings; i++) { 2967 struct tx_ring_info *rp = &np->tx_rings[i]; 2968 2969 if (tx_vec & (1 << rp->tx_channel)) { 2970 int r = niu_tx_error(np, rp); 2971 if (r) 2972 err = r; 2973 } 2974 } 2975 } 2976 if ((v0 | v1) & 0x8000000000000000ULL) { 2977 int r = niu_mif_interrupt(np); 2978 if (r) 2979 err = r; 2980 } 2981 if (v2) { 2982 if (v2 & 0x01ef) { 2983 int r = niu_mac_interrupt(np); 2984 if (r) 2985 err = r; 2986 } 2987 if (v2 & 0x0210) { 2988 int r = niu_device_error(np); 2989 if (r) 2990 err = r; 2991 } 2992 } 2993 2994 if (err) 2995 niu_enable_interrupts(np, 0); 2996 2997 return err; 2998} 2999 3000static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp, 3001 int ldn) 3002{ 3003 struct rxdma_mailbox *mbox = rp->mbox; 3004 u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); 3005 3006 stat_write = (RX_DMA_CTL_STAT_RCRTHRES | 3007 RX_DMA_CTL_STAT_RCRTO); 3008 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write); 3009 3010 niudbg(INTR, "%s: rxchan_intr stat[%llx]\n", 3011 np->dev->name, (unsigned long long) stat); 3012} 3013 3014static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp, 3015 int ldn) 3016{ 3017 rp->tx_cs = nr64(TX_CS(rp->tx_channel)); 3018 3019 niudbg(INTR, "%s: txchan_intr cs[%llx]\n", 3020 np->dev->name, (unsigned long long) rp->tx_cs); 3021} 3022 3023static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0) 3024{ 3025 struct niu_parent *parent = np->parent; 3026 u32 rx_vec, tx_vec; 3027 int i; 3028 3029 tx_vec = (v0 >> 32); 3030 rx_vec = (v0 & 0xffffffff); 3031 3032 for (i = 0; i < np->num_rx_rings; i++) { 3033 struct rx_ring_info *rp = &np->rx_rings[i]; 3034 int ldn = LDN_RXDMA(rp->rx_channel); 3035 3036 if (parent->ldg_map[ldn] != ldg) 3037 continue; 3038 3039 nw64(LD_IM0(ldn), LD_IM0_MASK); 3040 if (rx_vec & (1 << rp->rx_channel)) 3041 niu_rxchan_intr(np, rp, ldn); 3042 } 3043 3044 for (i = 0; i < np->num_tx_rings; i++) { 3045 struct tx_ring_info *rp = &np->tx_rings[i]; 3046 int ldn = LDN_TXDMA(rp->tx_channel); 3047 3048 if (parent->ldg_map[ldn] != ldg) 3049 continue; 3050 3051 nw64(LD_IM0(ldn), LD_IM0_MASK); 3052 if (tx_vec & (1 << rp->tx_channel)) 3053 niu_txchan_intr(np, rp, ldn); 3054 } 3055} 3056 3057static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp, 3058 u64 v0, u64 v1, u64 v2) 3059{ 3060 if (likely(netif_rx_schedule_prep(np->dev, &lp->napi))) { 3061 lp->v0 = v0; 3062 lp->v1 = v1; 3063 lp->v2 = v2; 3064 __niu_fastpath_interrupt(np, lp->ldg_num, v0); 3065 __netif_rx_schedule(np->dev, &lp->napi); 3066 } 3067} 3068 3069static irqreturn_t niu_interrupt(int irq, void *dev_id) 3070{ 3071 struct niu_ldg *lp = dev_id; 3072 struct niu *np = lp->np; 3073 int ldg = lp->ldg_num; 3074 unsigned long flags; 3075 u64 v0, v1, v2; 3076 3077 if (netif_msg_intr(np)) 3078 printk(KERN_DEBUG PFX "niu_interrupt() ldg[%p](%d) ", 3079 lp, ldg); 3080 3081 spin_lock_irqsave(&np->lock, flags); 3082 3083 v0 = nr64(LDSV0(ldg)); 3084 v1 = nr64(LDSV1(ldg)); 3085 v2 = nr64(LDSV2(ldg)); 3086 3087 if (netif_msg_intr(np)) 3088 printk("v0[%llx] v1[%llx] v2[%llx]\n", 3089 (unsigned long long) v0, 3090 (unsigned long long) v1, 3091 (unsigned long long) v2); 3092 3093 if (unlikely(!v0 && !v1 && !v2)) { 3094 spin_unlock_irqrestore(&np->lock, flags); 3095 return IRQ_NONE; 3096 } 3097 3098 if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) { 3099 int err = niu_slowpath_interrupt(np, lp, v0, v1, v2); 3100 if (err) 3101 goto out; 3102 } 3103 if (likely(v0 & ~((u64)1 << LDN_MIF))) 3104 niu_schedule_napi(np, lp, v0, v1, v2); 3105 else 3106 niu_ldg_rearm(np, lp, 1); 3107out: 3108 spin_unlock_irqrestore(&np->lock, flags); 3109 3110 return IRQ_HANDLED; 3111} 3112 3113static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp) 3114{ 3115 if (rp->mbox) { 3116 np->ops->free_coherent(np->device, 3117 sizeof(struct rxdma_mailbox), 3118 rp->mbox, rp->mbox_dma); 3119 rp->mbox = NULL; 3120 } 3121 if (rp->rcr) { 3122 np->ops->free_coherent(np->device, 3123 MAX_RCR_RING_SIZE * sizeof(__le64), 3124 rp->rcr, rp->rcr_dma); 3125 rp->rcr = NULL; 3126 rp->rcr_table_size = 0; 3127 rp->rcr_index = 0; 3128 } 3129 if (rp->rbr) { 3130 niu_rbr_free(np, rp); 3131 3132 np->ops->free_coherent(np->device, 3133 MAX_RBR_RING_SIZE * sizeof(__le32), 3134 rp->rbr, rp->rbr_dma); 3135 rp->rbr = NULL; 3136 rp->rbr_table_size = 0; 3137 rp->rbr_index = 0; 3138 } 3139 kfree(rp->rxhash); 3140 rp->rxhash = NULL; 3141} 3142 3143static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp) 3144{ 3145 if (rp->mbox) { 3146 np->ops->free_coherent(np->device, 3147 sizeof(struct txdma_mailbox), 3148 rp->mbox, rp->mbox_dma); 3149 rp->mbox = NULL; 3150 } 3151 if (rp->descr) { 3152 int i; 3153 3154 for (i = 0; i < MAX_TX_RING_SIZE; i++) { 3155 if (rp->tx_buffs[i].skb) 3156 (void) release_tx_packet(np, rp, i); 3157 } 3158 3159 np->ops->free_coherent(np->device, 3160 MAX_TX_RING_SIZE * sizeof(__le64), 3161 rp->descr, rp->descr_dma); 3162 rp->descr = NULL; 3163 rp->pending = 0; 3164 rp->prod = 0; 3165 rp->cons = 0; 3166 rp->wrap_bit = 0; 3167 } 3168} 3169 3170static void niu_free_channels(struct niu *np) 3171{ 3172 int i; 3173 3174 if (np->rx_rings) { 3175 for (i = 0; i < np->num_rx_rings; i++) { 3176 struct rx_ring_info *rp = &np->rx_rings[i]; 3177 3178 niu_free_rx_ring_info(np, rp); 3179 } 3180 kfree(np->rx_rings); 3181 np->rx_rings = NULL; 3182 np->num_rx_rings = 0; 3183 } 3184 3185 if (np->tx_rings) { 3186 for (i = 0; i < np->num_tx_rings; i++) { 3187 struct tx_ring_info *rp = &np->tx_rings[i]; 3188 3189 niu_free_tx_ring_info(np, rp); 3190 } 3191 kfree(np->tx_rings); 3192 np->tx_rings = NULL; 3193 np->num_tx_rings = 0; 3194 } 3195} 3196 3197static int niu_alloc_rx_ring_info(struct niu *np, 3198 struct rx_ring_info *rp) 3199{ 3200 BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64); 3201 3202 rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *), 3203 GFP_KERNEL); 3204 if (!rp->rxhash) 3205 return -ENOMEM; 3206 3207 rp->mbox = np->ops->alloc_coherent(np->device, 3208 sizeof(struct rxdma_mailbox), 3209 &rp->mbox_dma, GFP_KERNEL); 3210 if (!rp->mbox) 3211 return -ENOMEM; 3212 if ((unsigned long)rp->mbox & (64UL - 1)) { 3213 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " 3214 "RXDMA mailbox %p\n", np->dev->name, rp->mbox); 3215 return -EINVAL; 3216 } 3217 3218 rp->rcr = np->ops->alloc_coherent(np->device, 3219 MAX_RCR_RING_SIZE * sizeof(__le64), 3220 &rp->rcr_dma, GFP_KERNEL); 3221 if (!rp->rcr) 3222 return -ENOMEM; 3223 if ((unsigned long)rp->rcr & (64UL - 1)) { 3224 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " 3225 "RXDMA RCR table %p\n", np->dev->name, rp->rcr); 3226 return -EINVAL; 3227 } 3228 rp->rcr_table_size = MAX_RCR_RING_SIZE; 3229 rp->rcr_index = 0; 3230 3231 rp->rbr = np->ops->alloc_coherent(np->device, 3232 MAX_RBR_RING_SIZE * sizeof(__le32), 3233 &rp->rbr_dma, GFP_KERNEL); 3234 if (!rp->rbr) 3235 return -ENOMEM; 3236 if ((unsigned long)rp->rbr & (64UL - 1)) { 3237 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " 3238 "RXDMA RBR table %p\n", np->dev->name, rp->rbr); 3239 return -EINVAL; 3240 } 3241 rp->rbr_table_size = MAX_RBR_RING_SIZE; 3242 rp->rbr_index = 0; 3243 rp->rbr_pending = 0; 3244 3245 return 0; 3246} 3247 3248static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp) 3249{ 3250 int mtu = np->dev->mtu; 3251 3252 /* These values are recommended by the HW designers for fair 3253 * utilization of DRR amongst the rings. 3254 */ 3255 rp->max_burst = mtu + 32; 3256 if (rp->max_burst > 4096) 3257 rp->max_burst = 4096; 3258} 3259 3260static int niu_alloc_tx_ring_info(struct niu *np, 3261 struct tx_ring_info *rp) 3262{ 3263 BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64); 3264 3265 rp->mbox = np->ops->alloc_coherent(np->device, 3266 sizeof(struct txdma_mailbox), 3267 &rp->mbox_dma, GFP_KERNEL); 3268 if (!rp->mbox) 3269 return -ENOMEM; 3270 if ((unsigned long)rp->mbox & (64UL - 1)) { 3271 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " 3272 "TXDMA mailbox %p\n", np->dev->name, rp->mbox); 3273 return -EINVAL; 3274 } 3275 3276 rp->descr = np->ops->alloc_coherent(np->device, 3277 MAX_TX_RING_SIZE * sizeof(__le64), 3278 &rp->descr_dma, GFP_KERNEL); 3279 if (!rp->descr) 3280 return -ENOMEM; 3281 if ((unsigned long)rp->descr & (64UL - 1)) { 3282 dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " 3283 "TXDMA descr table %p\n", np->dev->name, rp->descr); 3284 return -EINVAL; 3285 } 3286 3287 rp->pending = MAX_TX_RING_SIZE; 3288 rp->prod = 0; 3289 rp->cons = 0; 3290 rp->wrap_bit = 0; 3291 3292 /* XXX make these configurable... XXX */ 3293 rp->mark_freq = rp->pending / 4; 3294 3295 niu_set_max_burst(np, rp); 3296 3297 return 0; 3298} 3299 3300static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp) 3301{ 3302 u16 bss; 3303 3304 bss = min(PAGE_SHIFT, 15); 3305 3306 rp->rbr_block_size = 1 << bss; 3307 rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss); 3308 3309 rp->rbr_sizes[0] = 256; 3310 rp->rbr_sizes[1] = 1024; 3311 if (np->dev->mtu > ETH_DATA_LEN) { 3312 switch (PAGE_SIZE) { 3313 case 4 * 1024: 3314 rp->rbr_sizes[2] = 4096; 3315 break; 3316 3317 default: 3318 rp->rbr_sizes[2] = 8192; 3319 break; 3320 } 3321 } else { 3322 rp->rbr_sizes[2] = 2048; 3323 } 3324 rp->rbr_sizes[3] = rp->rbr_block_size; 3325} 3326 3327static int niu_alloc_channels(struct niu *np) 3328{ 3329 struct niu_parent *parent = np->parent; 3330 int first_rx_channel, first_tx_channel; 3331 int i, port, err; 3332 3333 port = np->port; 3334 first_rx_channel = first_tx_channel = 0; 3335 for (i = 0; i < port; i++) { 3336 first_rx_channel += parent->rxchan_per_port[i]; 3337 first_tx_channel += parent->txchan_per_port[i]; 3338 } 3339 3340 np->num_rx_rings = parent->rxchan_per_port[port]; 3341 np->num_tx_rings = parent->txchan_per_port[port]; 3342 3343 np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info), 3344 GFP_KERNEL); 3345 err = -ENOMEM; 3346 if (!np->rx_rings) 3347 goto out_err; 3348 3349 for (i = 0; i < np->num_rx_rings; i++) { 3350 struct rx_ring_info *rp = &np->rx_rings[i]; 3351 3352 rp->np = np; 3353 rp->rx_channel = first_rx_channel + i; 3354 3355 err = niu_alloc_rx_ring_info(np, rp); 3356 if (err) 3357 goto out_err; 3358 3359 niu_size_rbr(np, rp); 3360 3361 /* XXX better defaults, configurable, etc... XXX */ 3362 rp->nonsyn_window = 64; 3363 rp->nonsyn_threshold = rp->rcr_table_size - 64; 3364 rp->syn_window = 64; 3365 rp->syn_threshold = rp->rcr_table_size - 64; 3366 rp->rcr_pkt_threshold = 16; 3367 rp->rcr_timeout = 8; 3368 rp->rbr_kick_thresh = RBR_REFILL_MIN; 3369 if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page) 3370 rp->rbr_kick_thresh = rp->rbr_blocks_per_page; 3371 3372 err = niu_rbr_fill(np, rp, GFP_KERNEL); 3373 if (err) 3374 return err; 3375 } 3376 3377 np->tx_rings = kzalloc(np->num_tx_rings * sizeof(struct tx_ring_info), 3378 GFP_KERNEL); 3379 err = -ENOMEM; 3380 if (!np->tx_rings) 3381 goto out_err; 3382 3383 for (i = 0; i < np->num_tx_rings; i++) { 3384 struct tx_ring_info *rp = &np->tx_rings[i]; 3385 3386 rp->np = np; 3387 rp->tx_channel = first_tx_channel + i; 3388 3389 err = niu_alloc_tx_ring_info(np, rp); 3390 if (err) 3391 goto out_err; 3392 } 3393 3394 return 0; 3395 3396out_err: 3397 niu_free_channels(np); 3398 return err; 3399} 3400 3401static int niu_tx_cs_sng_poll(struct niu *np, int channel) 3402{ 3403 int limit = 1000; 3404 3405 while (--limit > 0) { 3406 u64 val = nr64(TX_CS(channel)); 3407 if (val & TX_CS_SNG_STATE) 3408 return 0; 3409 } 3410 return -ENODEV; 3411} 3412 3413static int niu_tx_channel_stop(struct niu *np, int channel) 3414{ 3415 u64 val = nr64(TX_CS(channel)); 3416 3417 val |= TX_CS_STOP_N_GO; 3418 nw64(TX_CS(channel), val); 3419 3420 return niu_tx_cs_sng_poll(np, channel); 3421} 3422 3423static int niu_tx_cs_reset_poll(struct niu *np, int channel) 3424{ 3425 int limit = 1000; 3426 3427 while (--limit > 0) { 3428 u64 val = nr64(TX_CS(channel)); 3429 if (!(val & TX_CS_RST)) 3430 return 0; 3431 } 3432 return -ENODEV; 3433} 3434 3435static int niu_tx_channel_reset(struct niu *np, int channel) 3436{ 3437 u64 val = nr64(TX_CS(channel)); 3438 int err; 3439 3440 val |= TX_CS_RST; 3441 nw64(TX_CS(channel), val); 3442 3443 err = niu_tx_cs_reset_poll(np, channel); 3444 if (!err) 3445 nw64(TX_RING_KICK(channel), 0); 3446 3447 return err; 3448} 3449 3450static int niu_tx_channel_lpage_init(struct niu *np, int channel) 3451{ 3452 u64 val; 3453 3454 nw64(TX_LOG_MASK1(channel), 0); 3455 nw64(TX_LOG_VAL1(channel), 0); 3456 nw64(TX_LOG_MASK2(channel), 0); 3457 nw64(TX_LOG_VAL2(channel), 0); 3458 nw64(TX_LOG_PAGE_RELO1(channel), 0); 3459 nw64(TX_LOG_PAGE_RELO2(channel), 0); 3460 nw64(TX_LOG_PAGE_HDL(channel), 0); 3461 3462 val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT; 3463 val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1); 3464 nw64(TX_LOG_PAGE_VLD(channel), val); 3465 3466 /* XXX TXDMA 32bit mode? XXX */ 3467 3468 return 0; 3469} 3470 3471static void niu_txc_enable_port(struct niu *np, int on) 3472{ 3473 unsigned long flags; 3474 u64 val, mask; 3475 3476 niu_lock_parent(np, flags); 3477 val = nr64(TXC_CONTROL); 3478 mask = (u64)1 << np->port; 3479 if (on) { 3480 val |= TXC_CONTROL_ENABLE | mask; 3481 } else { 3482 val &= ~mask; 3483 if ((val & ~TXC_CONTROL_ENABLE) == 0) 3484 val &= ~TXC_CONTROL_ENABLE; 3485 } 3486 nw64(TXC_CONTROL, val); 3487 niu_unlock_parent(np, flags); 3488} 3489 3490static void niu_txc_set_imask(struct niu *np, u64 imask) 3491{ 3492 unsigned long flags; 3493 u64 val; 3494 3495 niu_lock_parent(np, flags); 3496 val = nr64(TXC_INT_MASK); 3497 val &= ~TXC_INT_MASK_VAL(np->port); 3498 val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port)); 3499 niu_unlock_parent(np, flags); 3500} 3501 3502static void niu_txc_port_dma_enable(struct niu *np, int on) 3503{ 3504 u64 val = 0; 3505 3506 if (on) { 3507 int i; 3508 3509 for (i = 0; i < np->num_tx_rings; i++) 3510 val |= (1 << np->tx_rings[i].tx_channel); 3511 } 3512 nw64(TXC_PORT_DMA(np->port), val); 3513} 3514 3515static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp) 3516{ 3517 int err, channel = rp->tx_channel; 3518 u64 val, ring_len; 3519 3520 err = niu_tx_channel_stop(np, channel); 3521 if (err) 3522 return err; 3523 3524 err = niu_tx_channel_reset(np, channel); 3525 if (err) 3526 return err; 3527 3528 err = niu_tx_channel_lpage_init(np, channel); 3529 if (err) 3530 return err; 3531 3532 nw64(TXC_DMA_MAX(channel), rp->max_burst); 3533 nw64(TX_ENT_MSK(channel), 0); 3534 3535 if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE | 3536 TX_RNG_CFIG_STADDR)) { 3537 dev_err(np->device, PFX "%s: TX ring channel %d " 3538 "DMA addr (%llx) is not aligned.\n", 3539 np->dev->name, channel, 3540 (unsigned long long) rp->descr_dma); 3541 return -EINVAL; 3542 } 3543 3544 /* The length field in TX_RNG_CFIG is measured in 64-byte 3545 * blocks. rp->pending is the number of TX descriptors in 3546 * our ring, 8 bytes each, thus we divide by 8 bytes more 3547 * to get the proper value the chip wants. 3548 */ 3549 ring_len = (rp->pending / 8); 3550 3551 val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) | 3552 rp->descr_dma); 3553 nw64(TX_RNG_CFIG(channel), val); 3554 3555 if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) || 3556 ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) { 3557 dev_err(np->device, PFX "%s: TX ring channel %d " 3558 "MBOX addr (%llx) is has illegal bits.\n", 3559 np->dev->name, channel, 3560 (unsigned long long) rp->mbox_dma); 3561 return -EINVAL; 3562 } 3563 nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32); 3564 nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR); 3565 3566 nw64(TX_CS(channel), 0); 3567 3568 rp->last_pkt_cnt = 0; 3569 3570 return 0; 3571} 3572 3573static void niu_init_rdc_groups(struct niu *np) 3574{ 3575 struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port]; 3576 int i, first_table_num = tp->first_table_num; 3577 3578 for (i = 0; i < tp->num_tables; i++) { 3579 struct rdc_table *tbl = &tp->tables[i]; 3580 int this_table = first_table_num + i; 3581 int slot; 3582 3583 for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) 3584 nw64(RDC_TBL(this_table, slot), 3585 tbl->rxdma_channel[slot]); 3586 } 3587 3588 nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]); 3589} 3590 3591static void niu_init_drr_weight(struct niu *np) 3592{ 3593 int type = phy_decode(np->parent->port_phy, np->port); 3594 u64 val; 3595 3596 switch (type) { 3597 case PORT_TYPE_10G: 3598 val = PT_DRR_WEIGHT_DEFAULT_10G; 3599 break; 3600 3601 case PORT_TYPE_1G: 3602 default: 3603 val = PT_DRR_WEIGHT_DEFAULT_1G; 3604 break; 3605 } 3606 nw64(PT_DRR_WT(np->port), val); 3607} 3608 3609static int niu_init_hostinfo(struct niu *np) 3610{ 3611 struct niu_parent *parent = np->parent; 3612 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; 3613 int i, err, num_alt = niu_num_alt_addr(np); 3614 int first_rdc_table = tp->first_table_num; 3615 3616 err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); 3617 if (err) 3618 return err; 3619 3620 err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); 3621 if (err) 3622 return err; 3623 3624 for (i = 0; i < num_alt; i++) { 3625 err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1); 3626 if (err) 3627 return err; 3628 } 3629 3630 return 0; 3631} 3632 3633static int niu_rx_channel_reset(struct niu *np, int channel) 3634{ 3635 return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel), 3636 RXDMA_CFIG1_RST, 1000, 10, 3637 "RXDMA_CFIG1"); 3638} 3639 3640static int niu_rx_channel_lpage_init(struct niu *np, int channel) 3641{ 3642 u64 val; 3643 3644 nw64(RX_LOG_MASK1(channel), 0); 3645 nw64(RX_LOG_VAL1(channel), 0); 3646 nw64(RX_LOG_MASK2(channel), 0); 3647 nw64(RX_LOG_VAL2(channel), 0); 3648 nw64(RX_LOG_PAGE_RELO1(channel), 0); 3649 nw64(RX_LOG_PAGE_RELO2(channel), 0); 3650 nw64(RX_LOG_PAGE_HDL(channel), 0); 3651 3652 val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT; 3653 val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1); 3654 nw64(RX_LOG_PAGE_VLD(channel), val); 3655 3656 return 0; 3657} 3658 3659static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp) 3660{ 3661 u64 val; 3662 3663 val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) | 3664 ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) | 3665 ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) | 3666 ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT)); 3667 nw64(RDC_RED_PARA(rp->rx_channel), val); 3668} 3669 3670static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret) 3671{ 3672 u64 val = 0; 3673 3674 switch (rp->rbr_block_size) { 3675 case 4 * 1024: 3676 val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT); 3677 break; 3678 case 8 * 1024: 3679 val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT); 3680 break; 3681 case 16 * 1024: 3682 val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT); 3683 break; 3684 case 32 * 1024: 3685 val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT); 3686 break; 3687 default: 3688 return -EINVAL; 3689 } 3690 val |= RBR_CFIG_B_VLD2; 3691 switch (rp->rbr_sizes[2]) { 3692 case 2 * 1024: 3693 val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT); 3694 break; 3695 case 4 * 1024: 3696 val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT); 3697 break; 3698 case 8 * 1024: 3699 val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT); 3700 break; 3701 case 16 * 1024: 3702 val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT); 3703 break; 3704 3705 default: 3706 return -EINVAL; 3707 } 3708 val |= RBR_CFIG_B_VLD1; 3709 switch (rp->rbr_sizes[1]) { 3710 case 1 * 1024: 3711 val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT); 3712 break; 3713 case 2 * 1024: 3714 val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT); 3715 break; 3716 case 4 * 1024: 3717 val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT); 3718 break; 3719 case 8 * 1024: 3720 val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT); 3721 break; 3722 3723 default: 3724 return -EINVAL; 3725 } 3726 val |= RBR_CFIG_B_VLD0; 3727 switch (rp->rbr_sizes[0]) { 3728 case 256: 3729 val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT); 3730 break; 3731 case 512: 3732 val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT); 3733 break; 3734 case 1 * 1024: 3735 val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT); 3736 break; 3737 case 2 * 1024: 3738 val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT); 3739 break; 3740 3741 default: 3742 return -EINVAL; 3743 } 3744 3745 *ret = val; 3746 return 0; 3747} 3748 3749static int niu_enable_rx_channel(struct niu *np, int channel, int on) 3750{ 3751 u64 val = nr64(RXDMA_CFIG1(channel)); 3752 int limit; 3753 3754 if (on) 3755 val |= RXDMA_CFIG1_EN; 3756 else 3757 val &= ~RXDMA_CFIG1_EN; 3758 nw64(RXDMA_CFIG1(channel), val); 3759 3760 limit = 1000; 3761 while (--limit > 0) { 3762 if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST) 3763 break; 3764 udelay(10); 3765 } 3766 if (limit <= 0) 3767 return -ENODEV; 3768 return 0; 3769} 3770 3771static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp) 3772{ 3773 int err, channel = rp->rx_channel; 3774 u64 val; 3775 3776 err = niu_rx_channel_reset(np, channel); 3777 if (err) 3778 return err; 3779 3780 err = niu_rx_channel_lpage_init(np, channel); 3781 if (err) 3782 return err; 3783 3784 niu_rx_channel_wred_init(np, rp); 3785 3786 nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY); 3787 nw64(RX_DMA_CTL_STAT(channel), 3788 (RX_DMA_CTL_STAT_MEX | 3789 RX_DMA_CTL_STAT_RCRTHRES | 3790 RX_DMA_CTL_STAT_RCRTO | 3791 RX_DMA_CTL_STAT_RBR_EMPTY)); 3792 nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32); 3793 nw64(RXDMA_CFIG2(channel), (rp->mbox_dma & 0x00000000ffffffc0)); 3794 nw64(RBR_CFIG_A(channel), 3795 ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) | 3796 (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR))); 3797 err = niu_compute_rbr_cfig_b(rp, &val); 3798 if (err) 3799 return err; 3800 nw64(RBR_CFIG_B(channel), val); 3801 nw64(RCRCFIG_A(channel), 3802 ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) | 3803 (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR))); 3804 nw64(RCRCFIG_B(channel), 3805 ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) | 3806 RCRCFIG_B_ENTOUT | 3807 ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT)); 3808 3809 err = niu_enable_rx_channel(np, channel, 1); 3810 if (err) 3811 return err; 3812 3813 nw64(RBR_KICK(channel), rp->rbr_index); 3814 3815 val = nr64(RX_DMA_CTL_STAT(channel)); 3816 val |= RX_DMA_CTL_STAT_RBR_EMPTY; 3817 nw64(RX_DMA_CTL_STAT(channel), val); 3818 3819 return 0; 3820} 3821 3822static int niu_init_rx_channels(struct niu *np) 3823{ 3824 unsigned long flags; 3825 u64 seed = jiffies_64; 3826 int err, i; 3827 3828 niu_lock_parent(np, flags); 3829 nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider); 3830 nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL)); 3831 niu_unlock_parent(np, flags); 3832 3833 /* XXX RXDMA 32bit mode? XXX */ 3834 3835 niu_init_rdc_groups(np); 3836 niu_init_drr_weight(np); 3837 3838 err = niu_init_hostinfo(np); 3839 if (err) 3840 return err; 3841 3842 for (i = 0; i < np->num_rx_rings; i++) { 3843 struct rx_ring_info *rp = &np->rx_rings[i]; 3844 3845 err = niu_init_one_rx_channel(np, rp); 3846 if (err) 3847 return err; 3848 } 3849 3850 return 0; 3851} 3852 3853static int niu_set_ip_frag_rule(struct niu *np) 3854{ 3855 struct niu_parent *parent = np->parent; 3856 struct niu_classifier *cp = &np->clas; 3857 struct niu_tcam_entry *tp; 3858 int index, err; 3859 3860 /* XXX fix this allocation scheme XXX */ 3861 index = cp->tcam_index; 3862 tp = &parent->tcam[index]; 3863 3864 /* Note that the noport bit is the same in both ipv4 and 3865 * ipv6 format TCAM entries. 3866 */ 3867 memset(tp, 0, sizeof(*tp)); 3868 tp->key[1] = TCAM_V4KEY1_NOPORT; 3869 tp->key_mask[1] = TCAM_V4KEY1_NOPORT; 3870 tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | 3871 ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT)); 3872 err = tcam_write(np, index, tp->key, tp->key_mask); 3873 if (err) 3874 return err; 3875 err = tcam_assoc_write(np, index, tp->assoc_data); 3876 if (err) 3877 return err; 3878 3879 return 0; 3880} 3881 3882static int niu_init_classifier_hw(struct niu *np) 3883{ 3884 struct niu_parent *parent = np->parent; 3885 struct niu_classifier *cp = &np->clas; 3886 int i, err; 3887 3888 nw64(H1POLY, cp->h1_init); 3889 nw64(H2POLY, cp->h2_init); 3890 3891 err = niu_init_hostinfo(np); 3892 if (err) 3893 return err; 3894 3895 for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) { 3896 struct niu_vlan_rdc *vp = &cp->vlan_mappings[i]; 3897 3898 vlan_tbl_write(np, i, np->port, 3899 vp->vlan_pref, vp->rdc_num); 3900 } 3901 3902 for (i = 0; i < cp->num_alt_mac_mappings; i++) { 3903 struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i]; 3904 3905 err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num, 3906 ap->rdc_num, ap->mac_pref); 3907 if (err) 3908 return err; 3909 } 3910 3911 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { 3912 int index = i - CLASS_CODE_USER_PROG1; 3913 3914 err = niu_set_tcam_key(np, i, parent->tcam_key[index]); 3915 if (err) 3916 return err; 3917 err = niu_set_flow_key(np, i, parent->flow_key[index]); 3918 if (err) 3919 return err; 3920 } 3921 3922 err = niu_set_ip_frag_rule(np); 3923 if (err) 3924 return err; 3925 3926 tcam_enable(np, 1); 3927 3928 return 0; 3929} 3930 3931static int niu_zcp_write(struct niu *np, int index, u64 *data) 3932{ 3933 nw64(ZCP_RAM_DATA0, data[0]); 3934 nw64(ZCP_RAM_DATA1, data[1]); 3935 nw64(ZCP_RAM_DATA2, data[2]); 3936 nw64(ZCP_RAM_DATA3, data[3]); 3937 nw64(ZCP_RAM_DATA4, data[4]); 3938 nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL); 3939 nw64(ZCP_RAM_ACC, 3940 (ZCP_RAM_ACC_WRITE | 3941 (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | 3942 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); 3943 3944 return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 3945 1000, 100); 3946} 3947 3948static int niu_zcp_read(struct niu *np, int index, u64 *data) 3949{ 3950 int err; 3951 3952 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 3953 1000, 100); 3954 if (err) { 3955 dev_err(np->device, PFX "%s: ZCP read busy won't clear, " 3956 "ZCP_RAM_ACC[%llx]\n", np->dev->name, 3957 (unsigned long long) nr64(ZCP_RAM_ACC)); 3958 return err; 3959 } 3960 3961 nw64(ZCP_RAM_ACC, 3962 (ZCP_RAM_ACC_READ | 3963 (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | 3964 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); 3965 3966 err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 3967 1000, 100); 3968 if (err) { 3969 dev_err(np->device, PFX "%s: ZCP read busy2 won't clear, " 3970 "ZCP_RAM_ACC[%llx]\n", np->dev->name, 3971 (unsigned long long) nr64(ZCP_RAM_ACC)); 3972 return err; 3973 } 3974 3975 data[0] = nr64(ZCP_RAM_DATA0); 3976 data[1] = nr64(ZCP_RAM_DATA1); 3977 data[2] = nr64(ZCP_RAM_DATA2); 3978 data[3] = nr64(ZCP_RAM_DATA3); 3979 data[4] = nr64(ZCP_RAM_DATA4); 3980 3981 return 0; 3982} 3983 3984static void niu_zcp_cfifo_reset(struct niu *np) 3985{ 3986 u64 val = nr64(RESET_CFIFO); 3987 3988 val |= RESET_CFIFO_RST(np->port); 3989 nw64(RESET_CFIFO, val); 3990 udelay(10); 3991 3992 val &= ~RESET_CFIFO_RST(np->port); 3993 nw64(RESET_CFIFO, val); 3994} 3995 3996static int niu_init_zcp(struct niu *np) 3997{ 3998 u64 data[5], rbuf[5]; 3999 int i, max, err; 4000 4001 if (np->parent->plat_type != PLAT_TYPE_NIU) { 4002 if (np->port == 0 || np->port == 1) 4003 max = ATLAS_P0_P1_CFIFO_ENTRIES; 4004 else 4005 max = ATLAS_P2_P3_CFIFO_ENTRIES; 4006 } else 4007 max = NIU_CFIFO_ENTRIES; 4008 4009 data[0] = 0; 4010 data[1] = 0; 4011 data[2] = 0; 4012 data[3] = 0; 4013 data[4] = 0; 4014 4015 for (i = 0; i < max; i++) { 4016 err = niu_zcp_write(np, i, data); 4017 if (err) 4018 return err; 4019 err = niu_zcp_read(np, i, rbuf); 4020 if (err) 4021 return err; 4022 } 4023 4024 niu_zcp_cfifo_reset(np); 4025 nw64(CFIFO_ECC(np->port), 0); 4026 nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL); 4027 (void) nr64(ZCP_INT_STAT); 4028 nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL); 4029 4030 return 0; 4031} 4032 4033static void niu_ipp_write(struct niu *np, int index, u64 *data) 4034{ 4035 u64 val = nr64_ipp(IPP_CFIG); 4036 4037 nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W); 4038 nw64_ipp(IPP_DFIFO_WR_PTR, index); 4039 nw64_ipp(IPP_DFIFO_WR0, data[0]); 4040 nw64_ipp(IPP_DFIFO_WR1, data[1]); 4041 nw64_ipp(IPP_DFIFO_WR2, data[2]); 4042 nw64_ipp(IPP_DFIFO_WR3, data[3]); 4043 nw64_ipp(IPP_DFIFO_WR4, data[4]); 4044 nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W); 4045} 4046 4047static void niu_ipp_read(struct niu *np, int index, u64 *data) 4048{ 4049 nw64_ipp(IPP_DFIFO_RD_PTR, index); 4050 data[0] = nr64_ipp(IPP_DFIFO_RD0); 4051 data[1] = nr64_ipp(IPP_DFIFO_RD1); 4052 data[2] = nr64_ipp(IPP_DFIFO_RD2); 4053 data[3] = nr64_ipp(IPP_DFIFO_RD3); 4054 data[4] = nr64_ipp(IPP_DFIFO_RD4); 4055} 4056 4057static int niu_ipp_reset(struct niu *np) 4058{ 4059 return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST, 4060 1000, 100, "IPP_CFIG"); 4061} 4062 4063static int niu_init_ipp(struct niu *np) 4064{ 4065 u64 data[5], rbuf[5], val; 4066 int i, max, err; 4067 4068 if (np->parent->plat_type != PLAT_TYPE_NIU) { 4069 if (np->port == 0 || np->port == 1) 4070 max = ATLAS_P0_P1_DFIFO_ENTRIES; 4071 else 4072 max = ATLAS_P2_P3_DFIFO_ENTRIES; 4073 } else 4074 max = NIU_DFIFO_ENTRIES; 4075 4076 data[0] = 0; 4077 data[1] = 0; 4078 data[2] = 0; 4079 data[3] = 0; 4080 data[4] = 0; 4081 4082 for (i = 0; i < max; i++) { 4083 niu_ipp_write(np, i, data); 4084 niu_ipp_read(np, i, rbuf); 4085 } 4086 4087 (void) nr64_ipp(IPP_INT_STAT); 4088 (void) nr64_ipp(IPP_INT_STAT); 4089 4090 err = niu_ipp_reset(np); 4091 if (err) 4092 return err; 4093 4094 (void) nr64_ipp(IPP_PKT_DIS); 4095 (void) nr64_ipp(IPP_BAD_CS_CNT); 4096 (void) nr64_ipp(IPP_ECC); 4097 4098 (void) nr64_ipp(IPP_INT_STAT); 4099 4100 nw64_ipp(IPP_MSK, ~IPP_MSK_ALL); 4101 4102 val = nr64_ipp(IPP_CFIG); 4103 val &= ~IPP_CFIG_IP_MAX_PKT; 4104 val |= (IPP_CFIG_IPP_ENABLE | 4105 IPP_CFIG_DFIFO_ECC_EN | 4106 IPP_CFIG_DROP_BAD_CRC | 4107 IPP_CFIG_CKSUM_EN | 4108 (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT)); 4109 nw64_ipp(IPP_CFIG, val); 4110 4111 return 0; 4112} 4113 4114static void niu_handle_led(struct niu *np, int status) 4115{ 4116 u64 val; 4117 val = nr64_mac(XMAC_CONFIG); 4118 4119 if ((np->flags & NIU_FLAGS_10G) != 0 && 4120 (np->flags & NIU_FLAGS_FIBER) != 0) { 4121 if (status) { 4122 val |= XMAC_CONFIG_LED_POLARITY; 4123 val &= ~XMAC_CONFIG_FORCE_LED_ON; 4124 } else { 4125 val |= XMAC_CONFIG_FORCE_LED_ON; 4126 val &= ~XMAC_CONFIG_LED_POLARITY; 4127 } 4128 } 4129 4130 nw64_mac(XMAC_CONFIG, val); 4131} 4132 4133static void niu_init_xif_xmac(struct niu *np) 4134{ 4135 struct niu_link_config *lp = &np->link_config; 4136 u64 val; 4137 4138 val = nr64_mac(XMAC_CONFIG); 4139 val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; 4140 4141 val |= XMAC_CONFIG_TX_OUTPUT_EN; 4142 4143 if (lp->loopback_mode == LOOPBACK_MAC) { 4144 val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; 4145 val |= XMAC_CONFIG_LOOPBACK; 4146 } else { 4147 val &= ~XMAC_CONFIG_LOOPBACK; 4148 } 4149 4150 if (np->flags & NIU_FLAGS_10G) { 4151 val &= ~XMAC_CONFIG_LFS_DISABLE; 4152 } else { 4153 val |= XMAC_CONFIG_LFS_DISABLE; 4154 if (!(np->flags & NIU_FLAGS_FIBER)) 4155 val |= XMAC_CONFIG_1G_PCS_BYPASS; 4156 else 4157 val &= ~XMAC_CONFIG_1G_PCS_BYPASS; 4158 } 4159 4160 val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; 4161 4162 if (lp->active_speed == SPEED_100) 4163 val |= XMAC_CONFIG_SEL_CLK_25MHZ; 4164 else 4165 val &= ~XMAC_CONFIG_SEL_CLK_25MHZ; 4166 4167 nw64_mac(XMAC_CONFIG, val); 4168 4169 val = nr64_mac(XMAC_CONFIG); 4170 val &= ~XMAC_CONFIG_MODE_MASK; 4171 if (np->flags & NIU_FLAGS_10G) { 4172 val |= XMAC_CONFIG_MODE_XGMII; 4173 } else { 4174 if (lp->active_speed == SPEED_100) 4175 val |= XMAC_CONFIG_MODE_MII; 4176 else 4177 val |= XMAC_CONFIG_MODE_GMII; 4178 } 4179 4180 nw64_mac(XMAC_CONFIG, val); 4181} 4182 4183static void niu_init_xif_bmac(struct niu *np) 4184{ 4185 struct niu_link_config *lp = &np->link_config; 4186 u64 val; 4187 4188 val = BMAC_XIF_CONFIG_TX_OUTPUT_EN; 4189 4190 if (lp->loopback_mode == LOOPBACK_MAC) 4191 val |= BMAC_XIF_CONFIG_MII_LOOPBACK; 4192 else 4193 val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK; 4194 4195 if (lp->active_speed == SPEED_1000) 4196 val |= BMAC_XIF_CONFIG_GMII_MODE; 4197 else 4198 val &= ~BMAC_XIF_CONFIG_GMII_MODE; 4199 4200 val &= ~(BMAC_XIF_CONFIG_LINK_LED | 4201 BMAC_XIF_CONFIG_LED_POLARITY); 4202 4203 if (!(np->flags & NIU_FLAGS_10G) && 4204 !(np->flags & NIU_FLAGS_FIBER) && 4205 lp->active_speed == SPEED_100) 4206 val |= BMAC_XIF_CONFIG_25MHZ_CLOCK; 4207 else 4208 val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK; 4209 4210 nw64_mac(BMAC_XIF_CONFIG, val); 4211} 4212 4213static void niu_init_xif(struct niu *np) 4214{ 4215 if (np->flags & NIU_FLAGS_XMAC) 4216 niu_init_xif_xmac(np); 4217 else 4218 niu_init_xif_bmac(np); 4219} 4220 4221static void niu_pcs_mii_reset(struct niu *np) 4222{ 4223 u64 val = nr64_pcs(PCS_MII_CTL); 4224 val |= PCS_MII_CTL_RST; 4225 nw64_pcs(PCS_MII_CTL, val); 4226} 4227 4228static void niu_xpcs_reset(struct niu *np) 4229{ 4230 u64 val = nr64_xpcs(XPCS_CONTROL1); 4231 val |= XPCS_CONTROL1_RESET; 4232 nw64_xpcs(XPCS_CONTROL1, val); 4233} 4234 4235static int niu_init_pcs(struct niu *np) 4236{ 4237 struct niu_link_config *lp = &np->link_config; 4238 u64 val; 4239 4240 switch (np->flags & (NIU_FLAGS_10G | NIU_FLAGS_FIBER)) { 4241 case NIU_FLAGS_FIBER: 4242 /* 1G fiber */ 4243 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); 4244 nw64_pcs(PCS_DPATH_MODE, 0); 4245 niu_pcs_mii_reset(np); 4246 break; 4247 4248 case NIU_FLAGS_10G: 4249 case NIU_FLAGS_10G | NIU_FLAGS_FIBER: 4250 if (!(np->flags & NIU_FLAGS_XMAC)) 4251 return -EINVAL; 4252 4253 /* 10G copper or fiber */ 4254 val = nr64_mac(XMAC_CONFIG); 4255 val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; 4256 nw64_mac(XMAC_CONFIG, val); 4257 4258 niu_xpcs_reset(np); 4259 4260 val = nr64_xpcs(XPCS_CONTROL1); 4261 if (lp->loopback_mode == LOOPBACK_PHY) 4262 val |= XPCS_CONTROL1_LOOPBACK; 4263 else 4264 val &= ~XPCS_CONTROL1_LOOPBACK; 4265 nw64_xpcs(XPCS_CONTROL1, val); 4266 4267 nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0); 4268 (void) nr64_xpcs(XPCS_SYMERR_CNT01); 4269 (void) nr64_xpcs(XPCS_SYMERR_CNT23); 4270 break; 4271 4272 case 0: 4273 /* 1G copper */ 4274 nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII); 4275 niu_pcs_mii_reset(np); 4276 break; 4277 4278 default: 4279 return -EINVAL; 4280 } 4281 4282 return 0; 4283} 4284 4285static int niu_reset_tx_xmac(struct niu *np) 4286{ 4287 return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST, 4288 (XTXMAC_SW_RST_REG_RS | 4289 XTXMAC_SW_RST_SOFT_RST), 4290 1000, 100, "XTXMAC_SW_RST"); 4291} 4292 4293static int niu_reset_tx_bmac(struct niu *np) 4294{ 4295 int limit; 4296 4297 nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET); 4298 limit = 1000; 4299 while (--limit >= 0) { 4300 if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET)) 4301 break; 4302 udelay(100); 4303 } 4304 if (limit < 0) { 4305 dev_err(np->device, PFX "Port %u TX BMAC would not reset, " 4306 "BTXMAC_SW_RST[%llx]\n", 4307 np->port, 4308 (unsigned long long) nr64_mac(BTXMAC_SW_RST)); 4309 return -ENODEV; 4310 } 4311 4312 return 0; 4313} 4314 4315static int niu_reset_tx_mac(struct niu *np) 4316{ 4317 if (np->flags & NIU_FLAGS_XMAC) 4318 return niu_reset_tx_xmac(np); 4319 else 4320 return niu_reset_tx_bmac(np); 4321} 4322 4323static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max) 4324{ 4325 u64 val; 4326 4327 val = nr64_mac(XMAC_MIN); 4328 val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE | 4329 XMAC_MIN_RX_MIN_PKT_SIZE); 4330 val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT); 4331 val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT); 4332 nw64_mac(XMAC_MIN, val); 4333 4334 nw64_mac(XMAC_MAX, max); 4335 4336 nw64_mac(XTXMAC_STAT_MSK, ~(u64)0); 4337 4338 val = nr64_mac(XMAC_IPG); 4339 if (np->flags & NIU_FLAGS_10G) { 4340 val &= ~XMAC_IPG_IPG_XGMII; 4341 val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT); 4342 } else { 4343 val &= ~XMAC_IPG_IPG_MII_GMII; 4344 val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT); 4345 } 4346 nw64_mac(XMAC_IPG, val); 4347 4348 val = nr64_mac(XMAC_CONFIG); 4349 val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC | 4350 XMAC_CONFIG_STRETCH_MODE | 4351 XMAC_CONFIG_VAR_MIN_IPG_EN | 4352 XMAC_CONFIG_TX_ENABLE); 4353 nw64_mac(XMAC_CONFIG, val); 4354 4355 nw64_mac(TXMAC_FRM_CNT, 0); 4356 nw64_mac(TXMAC_BYTE_CNT, 0); 4357} 4358 4359static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max) 4360{ 4361 u64 val; 4362 4363 nw64_mac(BMAC_MIN_FRAME, min); 4364 nw64_mac(BMAC_MAX_FRAME, max); 4365 4366 nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0); 4367 nw64_mac(BMAC_CTRL_TYPE, 0x8808); 4368 nw64_mac(BMAC_PREAMBLE_SIZE, 7); 4369 4370 val = nr64_mac(BTXMAC_CONFIG); 4371 val &= ~(BTXMAC_CONFIG_FCS_DISABLE | 4372 BTXMAC_CONFIG_ENABLE); 4373 nw64_mac(BTXMAC_CONFIG, val); 4374} 4375 4376static void niu_init_tx_mac(struct niu *np) 4377{ 4378 u64 min, max; 4379 4380 min = 64; 4381 if (np->dev->mtu > ETH_DATA_LEN) 4382 max = 9216; 4383 else 4384 max = 1522; 4385 4386 /* The XMAC_MIN register only accepts values for TX min which 4387 * have the low 3 bits cleared. 4388 */ 4389 BUILD_BUG_ON(min & 0x7); 4390 4391 if (np->flags & NIU_FLAGS_XMAC) 4392 niu_init_tx_xmac(np, min, max); 4393 else 4394 niu_init_tx_bmac(np, min, max); 4395} 4396 4397static int niu_reset_rx_xmac(struct niu *np) 4398{ 4399 int limit; 4400 4401 nw64_mac(XRXMAC_SW_RST, 4402 XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST); 4403 limit = 1000; 4404 while (--limit >= 0) { 4405 if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS | 4406 XRXMAC_SW_RST_SOFT_RST))) 4407 break; 4408 udelay(100); 4409 } 4410 if (limit < 0) { 4411 dev_err(np->device, PFX "Port %u RX XMAC would not reset, " 4412 "XRXMAC_SW_RST[%llx]\n", 4413 np->port, 4414 (unsigned long long) nr64_mac(XRXMAC_SW_RST)); 4415 return -ENODEV; 4416 } 4417 4418 return 0; 4419} 4420 4421static int niu_reset_rx_bmac(struct niu *np) 4422{ 4423 int limit; 4424 4425 nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET); 4426 limit = 1000; 4427 while (--limit >= 0) { 4428 if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET)) 4429 break; 4430 udelay(100); 4431 } 4432 if (limit < 0) { 4433 dev_err(np->device, PFX "Port %u RX BMAC would not reset, " 4434 "BRXMAC_SW_RST[%llx]\n", 4435 np->port, 4436 (unsigned long long) nr64_mac(BRXMAC_SW_RST)); 4437 return -ENODEV; 4438 } 4439 4440 return 0; 4441} 4442 4443static int niu_reset_rx_mac(struct niu *np) 4444{ 4445 if (np->flags & NIU_FLAGS_XMAC) 4446 return niu_reset_rx_xmac(np); 4447 else 4448 return niu_reset_rx_bmac(np); 4449} 4450 4451static void niu_init_rx_xmac(struct niu *np) 4452{ 4453 struct niu_parent *parent = np->parent; 4454 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; 4455 int first_rdc_table = tp->first_table_num; 4456 unsigned long i; 4457 u64 val; 4458 4459 nw64_mac(XMAC_ADD_FILT0, 0); 4460 nw64_mac(XMAC_ADD_FILT1, 0); 4461 nw64_mac(XMAC_ADD_FILT2, 0); 4462 nw64_mac(XMAC_ADD_FILT12_MASK, 0); 4463 nw64_mac(XMAC_ADD_FILT00_MASK, 0); 4464 for (i = 0; i < MAC_NUM_HASH; i++) 4465 nw64_mac(XMAC_HASH_TBL(i), 0); 4466 nw64_mac(XRXMAC_STAT_MSK, ~(u64)0); 4467 niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); 4468 niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); 4469 4470 val = nr64_mac(XMAC_CONFIG); 4471 val &= ~(XMAC_CONFIG_RX_MAC_ENABLE | 4472 XMAC_CONFIG_PROMISCUOUS | 4473 XMAC_CONFIG_PROMISC_GROUP | 4474 XMAC_CONFIG_ERR_CHK_DIS | 4475 XMAC_CONFIG_RX_CRC_CHK_DIS | 4476 XMAC_CONFIG_RESERVED_MULTICAST | 4477 XMAC_CONFIG_RX_CODEV_CHK_DIS | 4478 XMAC_CONFIG_ADDR_FILTER_EN | 4479 XMAC_CONFIG_RCV_PAUSE_ENABLE | 4480 XMAC_CONFIG_STRIP_CRC | 4481 XMAC_CONFIG_PASS_FLOW_CTRL | 4482 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN); 4483 val |= (XMAC_CONFIG_HASH_FILTER_EN); 4484 nw64_mac(XMAC_CONFIG, val); 4485 4486 nw64_mac(RXMAC_BT_CNT, 0); 4487 nw64_mac(RXMAC_BC_FRM_CNT, 0); 4488 nw64_mac(RXMAC_MC_FRM_CNT, 0); 4489 nw64_mac(RXMAC_FRAG_CNT, 0); 4490 nw64_mac(RXMAC_HIST_CNT1, 0); 4491 nw64_mac(RXMAC_HIST_CNT2, 0); 4492 nw64_mac(RXMAC_HIST_CNT3, 0); 4493 nw64_mac(RXMAC_HIST_CNT4, 0); 4494 nw64_mac(RXMAC_HIST_CNT5, 0); 4495 nw64_mac(RXMAC_HIST_CNT6, 0); 4496 nw64_mac(RXMAC_HIST_CNT7, 0); 4497 nw64_mac(RXMAC_MPSZER_CNT, 0); 4498 nw64_mac(RXMAC_CRC_ER_CNT, 0); 4499 nw64_mac(RXMAC_CD_VIO_CNT, 0); 4500 nw64_mac(LINK_FAULT_CNT, 0); 4501} 4502 4503static void niu_init_rx_bmac(struct niu *np) 4504{ 4505 struct niu_parent *parent = np->parent; 4506 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; 4507 int first_rdc_table = tp->first_table_num; 4508 unsigned long i; 4509 u64 val; 4510 4511 nw64_mac(BMAC_ADD_FILT0, 0); 4512 nw64_mac(BMAC_ADD_FILT1, 0); 4513 nw64_mac(BMAC_ADD_FILT2, 0); 4514 nw64_mac(BMAC_ADD_FILT12_MASK, 0); 4515 nw64_mac(BMAC_ADD_FILT00_MASK, 0); 4516 for (i = 0; i < MAC_NUM_HASH; i++) 4517 nw64_mac(BMAC_HASH_TBL(i), 0); 4518 niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); 4519 niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); 4520 nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0); 4521 4522 val = nr64_mac(BRXMAC_CONFIG); 4523 val &= ~(BRXMAC_CONFIG_ENABLE | 4524 BRXMAC_CONFIG_STRIP_PAD | 4525 BRXMAC_CONFIG_STRIP_FCS | 4526 BRXMAC_CONFIG_PROMISC | 4527 BRXMAC_CONFIG_PROMISC_GRP | 4528 BRXMAC_CONFIG_ADDR_FILT_EN | 4529 BRXMAC_CONFIG_DISCARD_DIS); 4530 val |= (BRXMAC_CONFIG_HASH_FILT_EN); 4531 nw64_mac(BRXMAC_CONFIG, val); 4532 4533 val = nr64_mac(BMAC_ADDR_CMPEN); 4534 val |= BMAC_ADDR_CMPEN_EN0; 4535 nw64_mac(BMAC_ADDR_CMPEN, val); 4536} 4537 4538static void niu_init_rx_mac(struct niu *np) 4539{ 4540 niu_set_primary_mac(np, np->dev->dev_addr); 4541 4542 if (np->flags & NIU_FLAGS_XMAC) 4543 niu_init_rx_xmac(np); 4544 else 4545 niu_init_rx_bmac(np); 4546} 4547 4548static void niu_enable_tx_xmac(struct niu *np, int on) 4549{ 4550 u64 val = nr64_mac(XMAC_CONFIG); 4551 4552 if (on) 4553 val |= XMAC_CONFIG_TX_ENABLE; 4554 else 4555 val &= ~XMAC_CONFIG_TX_ENABLE; 4556 nw64_mac(XMAC_CONFIG, val); 4557} 4558 4559static void niu_enable_tx_bmac(struct niu *np, int on) 4560{ 4561 u64 val = nr64_mac(BTXMAC_CONFIG); 4562 4563 if (on) 4564 val |= BTXMAC_CONFIG_ENABLE; 4565 else 4566 val &= ~BTXMAC_CONFIG_ENABLE; 4567 nw64_mac(BTXMAC_CONFIG, val); 4568} 4569 4570static void niu_enable_tx_mac(struct niu *np, int on) 4571{ 4572 if (np->flags & NIU_FLAGS_XMAC) 4573 niu_enable_tx_xmac(np, on); 4574 else 4575 niu_enable_tx_bmac(np, on); 4576} 4577 4578static void niu_enable_rx_xmac(struct niu *np, int on) 4579{ 4580 u64 val = nr64_mac(XMAC_CONFIG); 4581 4582 val &= ~(XMAC_CONFIG_HASH_FILTER_EN | 4583 XMAC_CONFIG_PROMISCUOUS); 4584 4585 if (np->flags & NIU_FLAGS_MCAST) 4586 val |= XMAC_CONFIG_HASH_FILTER_EN; 4587 if (np->flags & NIU_FLAGS_PROMISC) 4588 val |= XMAC_CONFIG_PROMISCUOUS; 4589 4590 if (on) 4591 val |= XMAC_CONFIG_RX_MAC_ENABLE; 4592 else 4593 val &= ~XMAC_CONFIG_RX_MAC_ENABLE; 4594 nw64_mac(XMAC_CONFIG, val); 4595} 4596 4597static void niu_enable_rx_bmac(struct niu *np, int on) 4598{ 4599 u64 val = nr64_mac(BRXMAC_CONFIG); 4600 4601 val &= ~(BRXMAC_CONFIG_HASH_FILT_EN | 4602 BRXMAC_CONFIG_PROMISC); 4603 4604 if (np->flags & NIU_FLAGS_MCAST) 4605 val |= BRXMAC_CONFIG_HASH_FILT_EN; 4606 if (np->flags & NIU_FLAGS_PROMISC) 4607 val |= BRXMAC_CONFIG_PROMISC; 4608 4609 if (on) 4610 val |= BRXMAC_CONFIG_ENABLE; 4611 else 4612 val &= ~BRXMAC_CONFIG_ENABLE; 4613 nw64_mac(BRXMAC_CONFIG, val); 4614} 4615 4616static void niu_enable_rx_mac(struct niu *np, int on) 4617{ 4618 if (np->flags & NIU_FLAGS_XMAC) 4619 niu_enable_rx_xmac(np, on); 4620 else 4621 niu_enable_rx_bmac(np, on); 4622} 4623 4624static int niu_init_mac(struct niu *np) 4625{ 4626 int err; 4627 4628 niu_init_xif(np); 4629 err = niu_init_pcs(np); 4630 if (err) 4631 return err; 4632 4633 err = niu_reset_tx_mac(np); 4634 if (err) 4635 return err; 4636 niu_init_tx_mac(np); 4637 err = niu_reset_rx_mac(np); 4638 if (err) 4639 return err; 4640 niu_init_rx_mac(np); 4641 4642 /* This looks hookey but the RX MAC reset we just did will 4643 * undo some of the state we setup in niu_init_tx_mac() so we 4644 * have to call it again. In particular, the RX MAC reset will 4645 * set the XMAC_MAX register back to it's default value. 4646 */ 4647 niu_init_tx_mac(np); 4648 niu_enable_tx_mac(np, 1); 4649 4650 niu_enable_rx_mac(np, 1); 4651 4652 return 0; 4653} 4654 4655static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp) 4656{ 4657 (void) niu_tx_channel_stop(np, rp->tx_channel); 4658} 4659 4660static void niu_stop_tx_channels(struct niu *np) 4661{ 4662 int i; 4663 4664 for (i = 0; i < np->num_tx_rings; i++) { 4665 struct tx_ring_info *rp = &np->tx_rings[i]; 4666 4667 niu_stop_one_tx_channel(np, rp); 4668 } 4669} 4670 4671static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp) 4672{ 4673 (void) niu_tx_channel_reset(np, rp->tx_channel); 4674} 4675 4676static void niu_reset_tx_channels(struct niu *np) 4677{ 4678 int i; 4679 4680 for (i = 0; i < np->num_tx_rings; i++) { 4681 struct tx_ring_info *rp = &np->tx_rings[i]; 4682 4683 niu_reset_one_tx_channel(np, rp); 4684 } 4685} 4686 4687static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp) 4688{ 4689 (void) niu_enable_rx_channel(np, rp->rx_channel, 0); 4690} 4691 4692static void niu_stop_rx_channels(struct niu *np) 4693{ 4694 int i; 4695 4696 for (i = 0; i < np->num_rx_rings; i++) { 4697 struct rx_ring_info *rp = &np->rx_rings[i]; 4698 4699 niu_stop_one_rx_channel(np, rp); 4700 } 4701} 4702 4703static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp) 4704{ 4705 int channel = rp->rx_channel; 4706 4707 (void) niu_rx_channel_reset(np, channel); 4708 nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL); 4709 nw64(RX_DMA_CTL_STAT(channel), 0); 4710 (void) niu_enable_rx_channel(np, channel, 0); 4711} 4712 4713static void niu_reset_rx_channels(struct niu *np) 4714{ 4715 int i; 4716 4717 for (i = 0; i < np->num_rx_rings; i++) { 4718 struct rx_ring_info *rp = &np->rx_rings[i]; 4719 4720 niu_reset_one_rx_channel(np, rp); 4721 } 4722} 4723 4724static void niu_disable_ipp(struct niu *np) 4725{ 4726 u64 rd, wr, val; 4727 int limit; 4728 4729 rd = nr64_ipp(IPP_DFIFO_RD_PTR); 4730 wr = nr64_ipp(IPP_DFIFO_WR_PTR); 4731 limit = 100; 4732 while (--limit >= 0 && (rd != wr)) { 4733 rd = nr64_ipp(IPP_DFIFO_RD_PTR); 4734 wr = nr64_ipp(IPP_DFIFO_WR_PTR); 4735 } 4736 if (limit < 0 && 4737 (rd != 0 && wr != 1)) { 4738 dev_err(np->device, PFX "%s: IPP would not quiesce, " 4739 "rd_ptr[%llx] wr_ptr[%llx]\n", 4740 np->dev->name, 4741 (unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR), 4742 (unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR)); 4743 } 4744 4745 val = nr64_ipp(IPP_CFIG); 4746 val &= ~(IPP_CFIG_IPP_ENABLE | 4747 IPP_CFIG_DFIFO_ECC_EN | 4748 IPP_CFIG_DROP_BAD_CRC | 4749 IPP_CFIG_CKSUM_EN); 4750 nw64_ipp(IPP_CFIG, val); 4751 4752 (void) niu_ipp_reset(np); 4753} 4754 4755static int niu_init_hw(struct niu *np) 4756{ 4757 int i, err; 4758 4759 niudbg(IFUP, "%s: Initialize TXC\n", np->dev->name); 4760 niu_txc_enable_port(np, 1); 4761 niu_txc_port_dma_enable(np, 1); 4762 niu_txc_set_imask(np, 0); 4763 4764 niudbg(IFUP, "%s: Initialize TX channels\n", np->dev->name); 4765 for (i = 0; i < np->num_tx_rings; i++) { 4766 struct tx_ring_info *rp = &np->tx_rings[i]; 4767 4768 err = niu_init_one_tx_channel(np, rp); 4769 if (err) 4770 return err; 4771 } 4772 4773 niudbg(IFUP, "%s: Initialize RX channels\n", np->dev->name); 4774 err = niu_init_rx_channels(np); 4775 if (err) 4776 goto out_uninit_tx_channels; 4777 4778 niudbg(IFUP, "%s: Initialize classifier\n", np->dev->name); 4779 err = niu_init_classifier_hw(np); 4780 if (err) 4781 goto out_uninit_rx_channels; 4782 4783 niudbg(IFUP, "%s: Initialize ZCP\n", np->dev->name); 4784 err = niu_init_zcp(np); 4785 if (err) 4786 goto out_uninit_rx_channels; 4787 4788 niudbg(IFUP, "%s: Initialize IPP\n", np->dev->name); 4789 err = niu_init_ipp(np); 4790 if (err) 4791 goto out_uninit_rx_channels; 4792 4793 niudbg(IFUP, "%s: Initialize MAC\n", np->dev->name); 4794 err = niu_init_mac(np); 4795 if (err) 4796 goto out_uninit_ipp; 4797 4798 return 0; 4799 4800out_uninit_ipp: 4801 niudbg(IFUP, "%s: Uninit IPP\n", np->dev->name); 4802 niu_disable_ipp(np); 4803 4804out_uninit_rx_channels: 4805 niudbg(IFUP, "%s: Uninit RX channels\n", np->dev->name); 4806 niu_stop_rx_channels(np); 4807 niu_reset_rx_channels(np); 4808 4809out_uninit_tx_channels: 4810 niudbg(IFUP, "%s: Uninit TX channels\n", np->dev->name); 4811 niu_stop_tx_channels(np); 4812 niu_reset_tx_channels(np); 4813 4814 return err; 4815} 4816 4817static void niu_stop_hw(struct niu *np) 4818{ 4819 niudbg(IFDOWN, "%s: Disable interrupts\n", np->dev->name); 4820 niu_enable_interrupts(np, 0); 4821 4822 niudbg(IFDOWN, "%s: Disable RX MAC\n", np->dev->name); 4823 niu_enable_rx_mac(np, 0); 4824 4825 niudbg(IFDOWN, "%s: Disable IPP\n", np->dev->name); 4826 niu_disable_ipp(np); 4827 4828 niudbg(IFDOWN, "%s: Stop TX channels\n", np->dev->name); 4829 niu_stop_tx_channels(np); 4830 4831 niudbg(IFDOWN, "%s: Stop RX channels\n", np->dev->name); 4832 niu_stop_rx_channels(np); 4833 4834 niudbg(IFDOWN, "%s: Reset TX channels\n", np->dev->name); 4835 niu_reset_tx_channels(np); 4836 4837 niudbg(IFDOWN, "%s: Reset RX channels\n", np->dev->name); 4838 niu_reset_rx_channels(np); 4839} 4840 4841static int niu_request_irq(struct niu *np) 4842{ 4843 int i, j, err; 4844 4845 err = 0; 4846 for (i = 0; i < np->num_ldg; i++) { 4847 struct niu_ldg *lp = &np->ldg[i]; 4848 4849 err = request_irq(lp->irq, niu_interrupt, 4850 IRQF_SHARED | IRQF_SAMPLE_RANDOM, 4851 np->dev->name, lp); 4852 if (err) 4853 goto out_free_irqs; 4854 4855 } 4856 4857 return 0; 4858 4859out_free_irqs: 4860 for (j = 0; j < i; j++) { 4861 struct niu_ldg *lp = &np->ldg[j]; 4862 4863 free_irq(lp->irq, lp); 4864 } 4865 return err; 4866} 4867 4868static void niu_free_irq(struct niu *np) 4869{ 4870 int i; 4871 4872 for (i = 0; i < np->num_ldg; i++) { 4873 struct niu_ldg *lp = &np->ldg[i]; 4874 4875 free_irq(lp->irq, lp); 4876 } 4877} 4878 4879static void niu_enable_napi(struct niu *np) 4880{ 4881 int i; 4882 4883 for (i = 0; i < np->num_ldg; i++) 4884 napi_enable(&np->ldg[i].napi); 4885} 4886 4887static void niu_disable_napi(struct niu *np) 4888{ 4889 int i; 4890 4891 for (i = 0; i < np->num_ldg; i++) 4892 napi_disable(&np->ldg[i].napi); 4893} 4894 4895static int niu_open(struct net_device *dev) 4896{ 4897 struct niu *np = netdev_priv(dev); 4898 int err; 4899 4900 netif_carrier_off(dev); 4901 4902 err = niu_alloc_channels(np); 4903 if (err) 4904 goto out_err; 4905 4906 err = niu_enable_interrupts(np, 0); 4907 if (err) 4908 goto out_free_channels; 4909 4910 err = niu_request_irq(np); 4911 if (err) 4912 goto out_free_channels; 4913 4914 niu_enable_napi(np); 4915 4916 spin_lock_irq(&np->lock); 4917 4918 err = niu_init_hw(np); 4919 if (!err) { 4920 init_timer(&np->timer); 4921 np->timer.expires = jiffies + HZ; 4922 np->timer.data = (unsigned long) np; 4923 np->timer.function = niu_timer; 4924 4925 err = niu_enable_interrupts(np, 1); 4926 if (err) 4927 niu_stop_hw(np); 4928 } 4929 4930 spin_unlock_irq(&np->lock); 4931 4932 if (err) { 4933 niu_disable_napi(np); 4934 goto out_free_irq; 4935 } 4936 4937 netif_start_queue(dev); 4938 4939 if (np->link_config.loopback_mode != LOOPBACK_DISABLED) 4940 netif_carrier_on(dev); 4941 4942 add_timer(&np->timer); 4943 4944 return 0; 4945 4946out_free_irq: 4947 niu_free_irq(np); 4948 4949out_free_channels: 4950 niu_free_channels(np); 4951 4952out_err: 4953 return err; 4954} 4955 4956static void niu_full_shutdown(struct niu *np, struct net_device *dev) 4957{ 4958 cancel_work_sync(&np->reset_task); 4959 4960 niu_disable_napi(np); 4961 netif_stop_queue(dev); 4962 4963 del_timer_sync(&np->timer); 4964 4965 spin_lock_irq(&np->lock); 4966 4967 niu_stop_hw(np); 4968 4969 spin_unlock_irq(&np->lock); 4970} 4971 4972static int niu_close(struct net_device *dev) 4973{ 4974 struct niu *np = netdev_priv(dev); 4975 4976 niu_full_shutdown(np, dev); 4977 4978 niu_free_irq(np); 4979 4980 niu_free_channels(np); 4981 4982 niu_handle_led(np, 0); 4983 4984 return 0; 4985} 4986 4987static void niu_sync_xmac_stats(struct niu *np) 4988{ 4989 struct niu_xmac_stats *mp = &np->mac_stats.xmac; 4990 4991 mp->tx_frames += nr64_mac(TXMAC_FRM_CNT); 4992 mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT); 4993 4994 mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT); 4995 mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT); 4996 mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT); 4997 mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT); 4998 mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT); 4999 mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1); 5000 mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2); 5001 mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3); 5002 mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4); 5003 mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5); 5004 mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6); 5005 mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7); 5006 mp->rx_octets += nr64_mac(RXMAC_BT_CNT); 5007 mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT); 5008 mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT); 5009 mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT); 5010} 5011 5012static void niu_sync_bmac_stats(struct niu *np) 5013{ 5014 struct niu_bmac_stats *mp = &np->mac_stats.bmac; 5015 5016 mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT); 5017 mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT); 5018 5019 mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT); 5020 mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); 5021 mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); 5022 mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT); 5023} 5024 5025static void niu_sync_mac_stats(struct niu *np) 5026{ 5027 if (np->flags & NIU_FLAGS_XMAC) 5028 niu_sync_xmac_stats(np); 5029 else 5030 niu_sync_bmac_stats(np); 5031} 5032 5033static void niu_get_rx_stats(struct niu *np) 5034{ 5035 unsigned long pkts, dropped, errors, bytes; 5036 int i; 5037 5038 pkts = dropped = errors = bytes = 0; 5039 for (i = 0; i < np->num_rx_rings; i++) { 5040 struct rx_ring_info *rp = &np->rx_rings[i]; 5041 5042 pkts += rp->rx_packets; 5043 bytes += rp->rx_bytes; 5044 dropped += rp->rx_dropped; 5045 errors += rp->rx_errors; 5046 } 5047 np->net_stats.rx_packets = pkts; 5048 np->net_stats.rx_bytes = bytes; 5049 np->net_stats.rx_dropped = dropped; 5050 np->net_stats.rx_errors = errors; 5051} 5052 5053static void niu_get_tx_stats(struct niu *np) 5054{ 5055 unsigned long pkts, errors, bytes; 5056 int i; 5057 5058 pkts = errors = bytes = 0; 5059 for (i = 0; i < np->num_tx_rings; i++) { 5060 struct tx_ring_info *rp = &np->tx_rings[i]; 5061 5062 pkts += rp->tx_packets; 5063 bytes += rp->tx_bytes; 5064 errors += rp->tx_errors; 5065 } 5066 np->net_stats.tx_packets = pkts; 5067 np->net_stats.tx_bytes = bytes; 5068 np->net_stats.tx_errors = errors; 5069} 5070 5071static struct net_device_stats *niu_get_stats(struct net_device *dev) 5072{ 5073 struct niu *np = netdev_priv(dev); 5074 5075 niu_get_rx_stats(np); 5076 niu_get_tx_stats(np); 5077 5078 return &np->net_stats; 5079} 5080 5081static void niu_load_hash_xmac(struct niu *np, u16 *hash) 5082{ 5083 int i; 5084 5085 for (i = 0; i < 16; i++) 5086 nw64_mac(XMAC_HASH_TBL(i), hash[i]); 5087} 5088 5089static void niu_load_hash_bmac(struct niu *np, u16 *hash) 5090{ 5091 int i; 5092 5093 for (i = 0; i < 16; i++) 5094 nw64_mac(BMAC_HASH_TBL(i), hash[i]); 5095} 5096 5097static void niu_load_hash(struct niu *np, u16 *hash) 5098{ 5099 if (np->flags & NIU_FLAGS_XMAC) 5100 niu_load_hash_xmac(np, hash); 5101 else 5102 niu_load_hash_bmac(np, hash); 5103} 5104 5105static void niu_set_rx_mode(struct net_device *dev) 5106{ 5107 struct niu *np = netdev_priv(dev); 5108 int i, alt_cnt, err; 5109 struct dev_addr_list *addr; 5110 unsigned long flags; 5111 u16 hash[16] = { 0, }; 5112 5113 spin_lock_irqsave(&np->lock, flags); 5114 niu_enable_rx_mac(np, 0); 5115 5116 np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC); 5117 if (dev->flags & IFF_PROMISC) 5118 np->flags |= NIU_FLAGS_PROMISC; 5119 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0)) 5120 np->flags |= NIU_FLAGS_MCAST; 5121 5122 alt_cnt = dev->uc_count; 5123 if (alt_cnt > niu_num_alt_addr(np)) { 5124 alt_cnt = 0; 5125 np->flags |= NIU_FLAGS_PROMISC; 5126 } 5127 5128 if (alt_cnt) { 5129 int index = 0; 5130 5131 for (addr = dev->uc_list; addr; addr = addr->next) { 5132 err = niu_set_alt_mac(np, index, 5133 addr->da_addr); 5134 if (err) 5135 printk(KERN_WARNING PFX "%s: Error %d " 5136 "adding alt mac %d\n", 5137 dev->name, err, index); 5138 err = niu_enable_alt_mac(np, index, 1); 5139 if (err) 5140 printk(KERN_WARNING PFX "%s: Error %d " 5141 "enabling alt mac %d\n", 5142 dev->name, err, index); 5143 5144 index++; 5145 } 5146 } else { 5147 for (i = 0; i < niu_num_alt_addr(np); i++) { 5148 err = niu_enable_alt_mac(np, i, 0); 5149 if (err) 5150 printk(KERN_WARNING PFX "%s: Error %d " 5151 "disabling alt mac %d\n", 5152 dev->name, err, i); 5153 } 5154 } 5155 if (dev->flags & IFF_ALLMULTI) { 5156 for (i = 0; i < 16; i++) 5157 hash[i] = 0xffff; 5158 } else if (dev->mc_count > 0) { 5159 for (addr = dev->mc_list; addr; addr = addr->next) { 5160 u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr); 5161 5162 crc >>= 24; 5163 hash[crc >> 4] |= (1 << (15 - (crc & 0xf))); 5164 } 5165 } 5166 5167 if (np->flags & NIU_FLAGS_MCAST) 5168 niu_load_hash(np, hash); 5169 5170 niu_enable_rx_mac(np, 1); 5171 spin_unlock_irqrestore(&np->lock, flags); 5172} 5173 5174static int niu_set_mac_addr(struct net_device *dev, void *p) 5175{ 5176 struct niu *np = netdev_priv(dev); 5177 struct sockaddr *addr = p; 5178 unsigned long flags; 5179 5180 if (!is_valid_ether_addr(addr->sa_data)) 5181 return -EINVAL; 5182 5183 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 5184 5185 if (!netif_running(dev)) 5186 return 0; 5187 5188 spin_lock_irqsave(&np->lock, flags); 5189 niu_enable_rx_mac(np, 0); 5190 niu_set_primary_mac(np, dev->dev_addr); 5191 niu_enable_rx_mac(np, 1); 5192 spin_unlock_irqrestore(&np->lock, flags); 5193 5194 return 0; 5195} 5196 5197static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 5198{ 5199 return -EOPNOTSUPP; 5200} 5201 5202static void niu_netif_stop(struct niu *np) 5203{ 5204 np->dev->trans_start = jiffies; /* prevent tx timeout */ 5205 5206 niu_disable_napi(np); 5207 5208 netif_tx_disable(np->dev); 5209} 5210 5211static void niu_netif_start(struct niu *np) 5212{ 5213 /* NOTE: unconditional netif_wake_queue is only appropriate 5214 * so long as all callers are assured to have free tx slots 5215 * (such as after niu_init_hw). 5216 */ 5217 netif_wake_queue(np->dev); 5218 5219 niu_enable_napi(np); 5220 5221 niu_enable_interrupts(np, 1); 5222} 5223 5224static void niu_reset_task(struct work_struct *work) 5225{ 5226 struct niu *np = container_of(work, struct niu, reset_task); 5227 unsigned long flags; 5228 int err; 5229 5230 spin_lock_irqsave(&np->lock, flags); 5231 if (!netif_running(np->dev)) { 5232 spin_unlock_irqrestore(&np->lock, flags); 5233 return; 5234 } 5235 5236 spin_unlock_irqrestore(&np->lock, flags); 5237 5238 del_timer_sync(&np->timer); 5239 5240 niu_netif_stop(np); 5241 5242 spin_lock_irqsave(&np->lock, flags); 5243 5244 niu_stop_hw(np); 5245 5246 err = niu_init_hw(np); 5247 if (!err) { 5248 np->timer.expires = jiffies + HZ; 5249 add_timer(&np->timer); 5250 niu_netif_start(np); 5251 } 5252 5253 spin_unlock_irqrestore(&np->lock, flags); 5254} 5255 5256static void niu_tx_timeout(struct net_device *dev) 5257{ 5258 struct niu *np = netdev_priv(dev); 5259 5260 dev_err(np->device, PFX "%s: Transmit timed out, resetting\n", 5261 dev->name); 5262 5263 schedule_work(&np->reset_task); 5264} 5265 5266static void niu_set_txd(struct tx_ring_info *rp, int index, 5267 u64 mapping, u64 len, u64 mark, 5268 u64 n_frags) 5269{ 5270 __le64 *desc = &rp->descr[index]; 5271 5272 *desc = cpu_to_le64(mark | 5273 (n_frags << TX_DESC_NUM_PTR_SHIFT) | 5274 (len << TX_DESC_TR_LEN_SHIFT) | 5275 (mapping & TX_DESC_SAD)); 5276} 5277 5278static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr, 5279 u64 pad_bytes, u64 len) 5280{ 5281 u16 eth_proto, eth_proto_inner; 5282 u64 csum_bits, l3off, ihl, ret; 5283 u8 ip_proto; 5284 int ipv6; 5285 5286 eth_proto = be16_to_cpu(ehdr->h_proto); 5287 eth_proto_inner = eth_proto; 5288 if (eth_proto == ETH_P_8021Q) { 5289 struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr; 5290 __be16 val = vp->h_vlan_encapsulated_proto; 5291 5292 eth_proto_inner = be16_to_cpu(val); 5293 } 5294 5295 ipv6 = ihl = 0; 5296 switch (skb->protocol) { 5297 case __constant_htons(ETH_P_IP): 5298 ip_proto = ip_hdr(skb)->protocol; 5299 ihl = ip_hdr(skb)->ihl; 5300 break; 5301 case __constant_htons(ETH_P_IPV6): 5302 ip_proto = ipv6_hdr(skb)->nexthdr; 5303 ihl = (40 >> 2); 5304 ipv6 = 1; 5305 break; 5306 default: 5307 ip_proto = ihl = 0; 5308 break; 5309 } 5310 5311 csum_bits = TXHDR_CSUM_NONE; 5312 if (skb->ip_summed == CHECKSUM_PARTIAL) { 5313 u64 start, stuff; 5314 5315 csum_bits = (ip_proto == IPPROTO_TCP ? 5316 TXHDR_CSUM_TCP : 5317 (ip_proto == IPPROTO_UDP ? 5318 TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP)); 5319 5320 start = skb_transport_offset(skb) - 5321 (pad_bytes + sizeof(struct tx_pkt_hdr)); 5322 stuff = start + skb->csum_offset; 5323 5324 csum_bits |= (start / 2) << TXHDR_L4START_SHIFT; 5325 csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT; 5326 } 5327 5328 l3off = skb_network_offset(skb) - 5329 (pad_bytes + sizeof(struct tx_pkt_hdr)); 5330 5331 ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) | 5332 (len << TXHDR_LEN_SHIFT) | 5333 ((l3off / 2) << TXHDR_L3START_SHIFT) | 5334 (ihl << TXHDR_IHL_SHIFT) | 5335 ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) | 5336 ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) | 5337 (ipv6 ? TXHDR_IP_VER : 0) | 5338 csum_bits); 5339 5340 return ret; 5341} 5342 5343static struct tx_ring_info *tx_ring_select(struct niu *np, struct sk_buff *skb) 5344{ 5345 return &np->tx_rings[0]; 5346} 5347 5348static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev) 5349{ 5350 struct niu *np = netdev_priv(dev); 5351 unsigned long align, headroom; 5352 struct tx_ring_info *rp; 5353 struct tx_pkt_hdr *tp; 5354 unsigned int len, nfg; 5355 struct ethhdr *ehdr; 5356 int prod, i, tlen; 5357 u64 mapping, mrk; 5358 5359 rp = tx_ring_select(np, skb); 5360 5361 if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) { 5362 netif_stop_queue(dev); 5363 dev_err(np->device, PFX "%s: BUG! Tx ring full when " 5364 "queue awake!\n", dev->name); 5365 rp->tx_errors++; 5366 return NETDEV_TX_BUSY; 5367 } 5368 5369 if (skb->len < ETH_ZLEN) { 5370 unsigned int pad_bytes = ETH_ZLEN - skb->len; 5371 5372 if (skb_pad(skb, pad_bytes)) 5373 goto out; 5374 skb_put(skb, pad_bytes); 5375 } 5376 5377 len = sizeof(struct tx_pkt_hdr) + 15; 5378 if (skb_headroom(skb) < len) { 5379 struct sk_buff *skb_new; 5380 5381 skb_new = skb_realloc_headroom(skb, len); 5382 if (!skb_new) { 5383 rp->tx_errors++; 5384 goto out_drop; 5385 } 5386 kfree_skb(skb); 5387 skb = skb_new; 5388 } else 5389 skb_orphan(skb); 5390 5391 align = ((unsigned long) skb->data & (16 - 1)); 5392 headroom = align + sizeof(struct tx_pkt_hdr); 5393 5394 ehdr = (struct ethhdr *) skb->data; 5395 tp = (struct tx_pkt_hdr *) skb_push(skb, headroom); 5396 5397 len = skb->len - sizeof(struct tx_pkt_hdr); 5398 tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len)); 5399 tp->resv = 0; 5400 5401 len = skb_headlen(skb); 5402 mapping = np->ops->map_single(np->device, skb->data, 5403 len, DMA_TO_DEVICE); 5404 5405 prod = rp->prod; 5406 5407 rp->tx_buffs[prod].skb = skb; 5408 rp->tx_buffs[prod].mapping = mapping; 5409 5410 mrk = TX_DESC_SOP; 5411 if (++rp->mark_counter == rp->mark_freq) { 5412 rp->mark_counter = 0; 5413 mrk |= TX_DESC_MARK; 5414 rp->mark_pending++; 5415 } 5416 5417 tlen = len; 5418 nfg = skb_shinfo(skb)->nr_frags; 5419 while (tlen > 0) { 5420 tlen -= MAX_TX_DESC_LEN; 5421 nfg++; 5422 } 5423 5424 while (len > 0) { 5425 unsigned int this_len = len; 5426 5427 if (this_len > MAX_TX_DESC_LEN) 5428 this_len = MAX_TX_DESC_LEN; 5429 5430 niu_set_txd(rp, prod, mapping, this_len, mrk, nfg); 5431 mrk = nfg = 0; 5432 5433 prod = NEXT_TX(rp, prod); 5434 mapping += this_len; 5435 len -= this_len; 5436 } 5437 5438 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 5439 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5440 5441 len = frag->size; 5442 mapping = np->ops->map_page(np->device, frag->page, 5443 frag->page_offset, len, 5444 DMA_TO_DEVICE); 5445 5446 rp->tx_buffs[prod].skb = NULL; 5447 rp->tx_buffs[prod].mapping = mapping; 5448 5449 niu_set_txd(rp, prod, mapping, len, 0, 0); 5450 5451 prod = NEXT_TX(rp, prod); 5452 } 5453 5454 if (prod < rp->prod) 5455 rp->wrap_bit ^= TX_RING_KICK_WRAP; 5456 rp->prod = prod; 5457 5458 nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3)); 5459 5460 if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) { 5461 netif_stop_queue(dev); 5462 if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)) 5463 netif_wake_queue(dev); 5464 } 5465 5466 dev->trans_start = jiffies; 5467 5468out: 5469 return NETDEV_TX_OK; 5470 5471out_drop: 5472 rp->tx_errors++; 5473 kfree_skb(skb); 5474 goto out; 5475} 5476 5477static int niu_change_mtu(struct net_device *dev, int new_mtu) 5478{ 5479 struct niu *np = netdev_priv(dev); 5480 int err, orig_jumbo, new_jumbo; 5481 5482 if (new_mtu < 68 || new_mtu > NIU_MAX_MTU) 5483 return -EINVAL; 5484 5485 orig_jumbo = (dev->mtu > ETH_DATA_LEN); 5486 new_jumbo = (new_mtu > ETH_DATA_LEN); 5487 5488 dev->mtu = new_mtu; 5489 5490 if (!netif_running(dev) || 5491 (orig_jumbo == new_jumbo)) 5492 return 0; 5493 5494 niu_full_shutdown(np, dev); 5495 5496 niu_free_channels(np); 5497 5498 niu_enable_napi(np); 5499 5500 err = niu_alloc_channels(np); 5501 if (err) 5502 return err; 5503 5504 spin_lock_irq(&np->lock); 5505 5506 err = niu_init_hw(np); 5507 if (!err) { 5508 init_timer(&np->timer); 5509 np->timer.expires = jiffies + HZ; 5510 np->timer.data = (unsigned long) np; 5511 np->timer.function = niu_timer; 5512 5513 err = niu_enable_interrupts(np, 1); 5514 if (err) 5515 niu_stop_hw(np); 5516 } 5517 5518 spin_unlock_irq(&np->lock); 5519 5520 if (!err) { 5521 netif_start_queue(dev); 5522 if (np->link_config.loopback_mode != LOOPBACK_DISABLED) 5523 netif_carrier_on(dev); 5524 5525 add_timer(&np->timer); 5526 } 5527 5528 return err; 5529} 5530 5531static void niu_get_drvinfo(struct net_device *dev, 5532 struct ethtool_drvinfo *info) 5533{ 5534 struct niu *np = netdev_priv(dev); 5535 struct niu_vpd *vpd = &np->vpd; 5536 5537 strcpy(info->driver, DRV_MODULE_NAME); 5538 strcpy(info->version, DRV_MODULE_VERSION); 5539 sprintf(info->fw_version, "%d.%d", 5540 vpd->fcode_major, vpd->fcode_minor); 5541 if (np->parent->plat_type != PLAT_TYPE_NIU) 5542 strcpy(info->bus_info, pci_name(np->pdev)); 5543} 5544 5545static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 5546{ 5547 struct niu *np = netdev_priv(dev); 5548 struct niu_link_config *lp; 5549 5550 lp = &np->link_config; 5551 5552 memset(cmd, 0, sizeof(*cmd)); 5553 cmd->phy_address = np->phy_addr; 5554 cmd->supported = lp->supported; 5555 cmd->advertising = lp->advertising; 5556 cmd->autoneg = lp->autoneg; 5557 cmd->speed = lp->active_speed; 5558 cmd->duplex = lp->active_duplex; 5559 5560 return 0; 5561} 5562 5563static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 5564{ 5565 return -EINVAL; 5566} 5567 5568static u32 niu_get_msglevel(struct net_device *dev) 5569{ 5570 struct niu *np = netdev_priv(dev); 5571 return np->msg_enable; 5572} 5573 5574static void niu_set_msglevel(struct net_device *dev, u32 value) 5575{ 5576 struct niu *np = netdev_priv(dev); 5577 np->msg_enable = value; 5578} 5579 5580static int niu_get_eeprom_len(struct net_device *dev) 5581{ 5582 struct niu *np = netdev_priv(dev); 5583 5584 return np->eeprom_len; 5585} 5586 5587static int niu_get_eeprom(struct net_device *dev, 5588 struct ethtool_eeprom *eeprom, u8 *data) 5589{ 5590 struct niu *np = netdev_priv(dev); 5591 u32 offset, len, val; 5592 5593 offset = eeprom->offset; 5594 len = eeprom->len; 5595 5596 if (offset + len < offset) 5597 return -EINVAL; 5598 if (offset >= np->eeprom_len) 5599 return -EINVAL; 5600 if (offset + len > np->eeprom_len) 5601 len = eeprom->len = np->eeprom_len - offset; 5602 5603 if (offset & 3) { 5604 u32 b_offset, b_count; 5605 5606 b_offset = offset & 3; 5607 b_count = 4 - b_offset; 5608 if (b_count > len) 5609 b_count = len; 5610 5611 val = nr64(ESPC_NCR((offset - b_offset) / 4)); 5612 memcpy(data, ((char *)&val) + b_offset, b_count); 5613 data += b_count; 5614 len -= b_count; 5615 offset += b_count; 5616 } 5617 while (len >= 4) { 5618 val = nr64(ESPC_NCR(offset / 4)); 5619 memcpy(data, &val, 4); 5620 data += 4; 5621 len -= 4; 5622 offset += 4; 5623 } 5624 if (len) { 5625 val = nr64(ESPC_NCR(offset / 4)); 5626 memcpy(data, &val, len); 5627 } 5628 return 0; 5629} 5630 5631static const struct { 5632 const char string[ETH_GSTRING_LEN]; 5633} niu_xmac_stat_keys[] = { 5634 { "tx_frames" }, 5635 { "tx_bytes" }, 5636 { "tx_fifo_errors" }, 5637 { "tx_overflow_errors" }, 5638 { "tx_max_pkt_size_errors" }, 5639 { "tx_underflow_errors" }, 5640 { "rx_local_faults" }, 5641 { "rx_remote_faults" }, 5642 { "rx_link_faults" }, 5643 { "rx_align_errors" }, 5644 { "rx_frags" }, 5645 { "rx_mcasts" }, 5646 { "rx_bcasts" }, 5647 { "rx_hist_cnt1" }, 5648 { "rx_hist_cnt2" }, 5649 { "rx_hist_cnt3" }, 5650 { "rx_hist_cnt4" }, 5651 { "rx_hist_cnt5" }, 5652 { "rx_hist_cnt6" }, 5653 { "rx_hist_cnt7" }, 5654 { "rx_octets" }, 5655 { "rx_code_violations" }, 5656 { "rx_len_errors" }, 5657 { "rx_crc_errors" }, 5658 { "rx_underflows" }, 5659 { "rx_overflows" }, 5660 { "pause_off_state" }, 5661 { "pause_on_state" }, 5662 { "pause_received" }, 5663}; 5664 5665#define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys) 5666 5667static const struct { 5668 const char string[ETH_GSTRING_LEN]; 5669} niu_bmac_stat_keys[] = { 5670 { "tx_underflow_errors" }, 5671 { "tx_max_pkt_size_errors" }, 5672 { "tx_bytes" }, 5673 { "tx_frames" }, 5674 { "rx_overflows" }, 5675 { "rx_frames" }, 5676 { "rx_align_errors" }, 5677 { "rx_crc_errors" }, 5678 { "rx_len_errors" }, 5679 { "pause_off_state" }, 5680 { "pause_on_state" }, 5681 { "pause_received" }, 5682}; 5683 5684#define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys) 5685 5686static const struct { 5687 const char string[ETH_GSTRING_LEN]; 5688} niu_rxchan_stat_keys[] = { 5689 { "rx_channel" }, 5690 { "rx_packets" }, 5691 { "rx_bytes" }, 5692 { "rx_dropped" }, 5693 { "rx_errors" }, 5694}; 5695 5696#define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys) 5697 5698static const struct { 5699 const char string[ETH_GSTRING_LEN]; 5700} niu_txchan_stat_keys[] = { 5701 { "tx_channel" }, 5702 { "tx_packets" }, 5703 { "tx_bytes" }, 5704 { "tx_errors" }, 5705}; 5706 5707#define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys) 5708 5709static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data) 5710{ 5711 struct niu *np = netdev_priv(dev); 5712 int i; 5713 5714 if (stringset != ETH_SS_STATS) 5715 return; 5716 5717 if (np->flags & NIU_FLAGS_XMAC) { 5718 memcpy(data, niu_xmac_stat_keys, 5719 sizeof(niu_xmac_stat_keys)); 5720 data += sizeof(niu_xmac_stat_keys); 5721 } else { 5722 memcpy(data, niu_bmac_stat_keys, 5723 sizeof(niu_bmac_stat_keys)); 5724 data += sizeof(niu_bmac_stat_keys); 5725 } 5726 for (i = 0; i < np->num_rx_rings; i++) { 5727 memcpy(data, niu_rxchan_stat_keys, 5728 sizeof(niu_rxchan_stat_keys)); 5729 data += sizeof(niu_rxchan_stat_keys); 5730 } 5731 for (i = 0; i < np->num_tx_rings; i++) { 5732 memcpy(data, niu_txchan_stat_keys, 5733 sizeof(niu_txchan_stat_keys)); 5734 data += sizeof(niu_txchan_stat_keys); 5735 } 5736} 5737 5738static int niu_get_stats_count(struct net_device *dev) 5739{ 5740 struct niu *np = netdev_priv(dev); 5741 5742 return ((np->flags & NIU_FLAGS_XMAC ? 5743 NUM_XMAC_STAT_KEYS : 5744 NUM_BMAC_STAT_KEYS) + 5745 (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) + 5746 (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS)); 5747} 5748 5749static void niu_get_ethtool_stats(struct net_device *dev, 5750 struct ethtool_stats *stats, u64 *data) 5751{ 5752 struct niu *np = netdev_priv(dev); 5753 int i; 5754 5755 niu_sync_mac_stats(np); 5756 if (np->flags & NIU_FLAGS_XMAC) { 5757 memcpy(data, &np->mac_stats.xmac, 5758 sizeof(struct niu_xmac_stats)); 5759 data += (sizeof(struct niu_xmac_stats) / sizeof(u64)); 5760 } else { 5761 memcpy(data, &np->mac_stats.bmac, 5762 sizeof(struct niu_bmac_stats)); 5763 data += (sizeof(struct niu_bmac_stats) / sizeof(u64)); 5764 } 5765 for (i = 0; i < np->num_rx_rings; i++) { 5766 struct rx_ring_info *rp = &np->rx_rings[i]; 5767 5768 data[0] = rp->rx_channel; 5769 data[1] = rp->rx_packets; 5770 data[2] = rp->rx_bytes; 5771 data[3] = rp->rx_dropped; 5772 data[4] = rp->rx_errors; 5773 data += 5; 5774 } 5775 for (i = 0; i < np->num_tx_rings; i++) { 5776 struct tx_ring_info *rp = &np->tx_rings[i]; 5777 5778 data[0] = rp->tx_channel; 5779 data[1] = rp->tx_packets; 5780 data[2] = rp->tx_bytes; 5781 data[3] = rp->tx_errors; 5782 data += 4; 5783 } 5784} 5785 5786static u64 niu_led_state_save(struct niu *np) 5787{ 5788 if (np->flags & NIU_FLAGS_XMAC) 5789 return nr64_mac(XMAC_CONFIG); 5790 else 5791 return nr64_mac(BMAC_XIF_CONFIG); 5792} 5793 5794static void niu_led_state_restore(struct niu *np, u64 val) 5795{ 5796 if (np->flags & NIU_FLAGS_XMAC) 5797 nw64_mac(XMAC_CONFIG, val); 5798 else 5799 nw64_mac(BMAC_XIF_CONFIG, val); 5800} 5801 5802static void niu_force_led(struct niu *np, int on) 5803{ 5804 u64 val, reg, bit; 5805 5806 if (np->flags & NIU_FLAGS_XMAC) { 5807 reg = XMAC_CONFIG; 5808 bit = XMAC_CONFIG_FORCE_LED_ON; 5809 } else { 5810 reg = BMAC_XIF_CONFIG; 5811 bit = BMAC_XIF_CONFIG_LINK_LED; 5812 } 5813 5814 val = nr64_mac(reg); 5815 if (on) 5816 val |= bit; 5817 else 5818 val &= ~bit; 5819 nw64_mac(reg, val); 5820} 5821 5822static int niu_phys_id(struct net_device *dev, u32 data) 5823{ 5824 struct niu *np = netdev_priv(dev); 5825 u64 orig_led_state; 5826 int i; 5827 5828 if (!netif_running(dev)) 5829 return -EAGAIN; 5830 5831 if (data == 0) 5832 data = 2; 5833 5834 orig_led_state = niu_led_state_save(np); 5835 for (i = 0; i < (data * 2); i++) { 5836 int on = ((i % 2) == 0); 5837 5838 niu_force_led(np, on); 5839 5840 if (msleep_interruptible(500)) 5841 break; 5842 } 5843 niu_led_state_restore(np, orig_led_state); 5844 5845 return 0; 5846} 5847 5848static const struct ethtool_ops niu_ethtool_ops = { 5849 .get_drvinfo = niu_get_drvinfo, 5850 .get_link = ethtool_op_get_link, 5851 .get_msglevel = niu_get_msglevel, 5852 .set_msglevel = niu_set_msglevel, 5853 .get_eeprom_len = niu_get_eeprom_len, 5854 .get_eeprom = niu_get_eeprom, 5855 .get_settings = niu_get_settings, 5856 .set_settings = niu_set_settings, 5857 .get_strings = niu_get_strings, 5858 .get_stats_count = niu_get_stats_count, 5859 .get_ethtool_stats = niu_get_ethtool_stats, 5860 .phys_id = niu_phys_id, 5861}; 5862 5863static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, 5864 int ldg, int ldn) 5865{ 5866 if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) 5867 return -EINVAL; 5868 if (ldn < 0 || ldn > LDN_MAX) 5869 return -EINVAL; 5870 5871 parent->ldg_map[ldn] = ldg; 5872 5873 if (np->parent->plat_type == PLAT_TYPE_NIU) { 5874 /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by 5875 * the firmware, and we're not supposed to change them. 5876 * Validate the mapping, because if it's wrong we probably 5877 * won't get any interrupts and that's painful to debug. 5878 */ 5879 if (nr64(LDG_NUM(ldn)) != ldg) { 5880 dev_err(np->device, PFX "Port %u, mis-matched " 5881 "LDG assignment " 5882 "for ldn %d, should be %d is %llu\n", 5883 np->port, ldn, ldg, 5884 (unsigned long long) nr64(LDG_NUM(ldn))); 5885 return -EINVAL; 5886 } 5887 } else 5888 nw64(LDG_NUM(ldn), ldg); 5889 5890 return 0; 5891} 5892 5893static int niu_set_ldg_timer_res(struct niu *np, int res) 5894{ 5895 if (res < 0 || res > LDG_TIMER_RES_VAL) 5896 return -EINVAL; 5897 5898 5899 nw64(LDG_TIMER_RES, res); 5900 5901 return 0; 5902} 5903 5904static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector) 5905{ 5906 if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) || 5907 (func < 0 || func > 3) || 5908 (vector < 0 || vector > 0x1f)) 5909 return -EINVAL; 5910 5911 nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector); 5912 5913 return 0; 5914} 5915 5916static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr) 5917{ 5918 u64 frame, frame_base = (ESPC_PIO_STAT_READ_START | 5919 (addr << ESPC_PIO_STAT_ADDR_SHIFT)); 5920 int limit; 5921 5922 if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT)) 5923 return -EINVAL; 5924 5925 frame = frame_base; 5926 nw64(ESPC_PIO_STAT, frame); 5927 limit = 64; 5928 do { 5929 udelay(5); 5930 frame = nr64(ESPC_PIO_STAT); 5931 if (frame & ESPC_PIO_STAT_READ_END) 5932 break; 5933 } while (limit--); 5934 if (!(frame & ESPC_PIO_STAT_READ_END)) { 5935 dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n", 5936 (unsigned long long) frame); 5937 return -ENODEV; 5938 } 5939 5940 frame = frame_base; 5941 nw64(ESPC_PIO_STAT, frame); 5942 limit = 64; 5943 do { 5944 udelay(5); 5945 frame = nr64(ESPC_PIO_STAT); 5946 if (frame & ESPC_PIO_STAT_READ_END) 5947 break; 5948 } while (limit--); 5949 if (!(frame & ESPC_PIO_STAT_READ_END)) { 5950 dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n", 5951 (unsigned long long) frame); 5952 return -ENODEV; 5953 } 5954 5955 frame = nr64(ESPC_PIO_STAT); 5956 return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT; 5957} 5958 5959static int __devinit niu_pci_eeprom_read16(struct niu *np, u32 off) 5960{ 5961 int err = niu_pci_eeprom_read(np, off); 5962 u16 val; 5963 5964 if (err < 0) 5965 return err; 5966 val = (err << 8); 5967 err = niu_pci_eeprom_read(np, off + 1); 5968 if (err < 0) 5969 return err; 5970 val |= (err & 0xff); 5971 5972 return val; 5973} 5974 5975static int __devinit niu_pci_eeprom_read16_swp(struct niu *np, u32 off) 5976{ 5977 int err = niu_pci_eeprom_read(np, off); 5978 u16 val; 5979 5980 if (err < 0) 5981 return err; 5982 5983 val = (err & 0xff); 5984 err = niu_pci_eeprom_read(np, off + 1); 5985 if (err < 0) 5986 return err; 5987 5988 val |= (err & 0xff) << 8; 5989 5990 return val; 5991} 5992 5993static int __devinit niu_pci_vpd_get_propname(struct niu *np, 5994 u32 off, 5995 char *namebuf, 5996 int namebuf_len) 5997{ 5998 int i; 5999 6000 for (i = 0; i < namebuf_len; i++) { 6001 int err = niu_pci_eeprom_read(np, off + i); 6002 if (err < 0) 6003 return err; 6004 *namebuf++ = err; 6005 if (!err) 6006 break; 6007 } 6008 if (i >= namebuf_len) 6009 return -EINVAL; 6010 6011 return i + 1; 6012} 6013 6014static void __devinit niu_vpd_parse_version(struct niu *np) 6015{ 6016 struct niu_vpd *vpd = &np->vpd; 6017 int len = strlen(vpd->version) + 1; 6018 const char *s = vpd->version; 6019 int i; 6020 6021 for (i = 0; i < len - 5; i++) { 6022 if (!strncmp(s + i, "FCode ", 5)) 6023 break; 6024 } 6025 if (i >= len - 5) 6026 return; 6027 6028 s += i + 5; 6029 sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor); 6030 6031 niudbg(PROBE, "VPD_SCAN: FCODE major(%d) minor(%d)\n", 6032 vpd->fcode_major, vpd->fcode_minor); 6033 if (vpd->fcode_major > NIU_VPD_MIN_MAJOR || 6034 (vpd->fcode_major == NIU_VPD_MIN_MAJOR && 6035 vpd->fcode_minor >= NIU_VPD_MIN_MINOR)) 6036 np->flags |= NIU_FLAGS_VPD_VALID; 6037} 6038 6039/* ESPC_PIO_EN_ENABLE must be set */ 6040static int __devinit niu_pci_vpd_scan_props(struct niu *np, 6041 u32 start, u32 end) 6042{ 6043 unsigned int found_mask = 0; 6044#define FOUND_MASK_MODEL 0x00000001 6045#define FOUND_MASK_BMODEL 0x00000002 6046#define FOUND_MASK_VERS 0x00000004 6047#define FOUND_MASK_MAC 0x00000008 6048#define FOUND_MASK_NMAC 0x00000010 6049#define FOUND_MASK_PHY 0x00000020 6050#define FOUND_MASK_ALL 0x0000003f 6051 6052 niudbg(PROBE, "VPD_SCAN: start[%x] end[%x]\n", 6053 start, end); 6054 while (start < end) { 6055 int len, err, instance, type, prop_len; 6056 char namebuf[64]; 6057 u8 *prop_buf; 6058 int max_len; 6059 6060 if (found_mask == FOUND_MASK_ALL) { 6061 niu_vpd_parse_version(np); 6062 return 1; 6063 } 6064 6065 err = niu_pci_eeprom_read(np, start + 2); 6066 if (err < 0) 6067 return err; 6068 len = err; 6069 start += 3; 6070 6071 instance = niu_pci_eeprom_read(np, start); 6072 type = niu_pci_eeprom_read(np, start + 3); 6073 prop_len = niu_pci_eeprom_read(np, start + 4); 6074 err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64); 6075 if (err < 0) 6076 return err; 6077 6078 prop_buf = NULL; 6079 max_len = 0; 6080 if (!strcmp(namebuf, "model")) { 6081 prop_buf = np->vpd.model; 6082 max_len = NIU_VPD_MODEL_MAX; 6083 found_mask |= FOUND_MASK_MODEL; 6084 } else if (!strcmp(namebuf, "board-model")) { 6085 prop_buf = np->vpd.board_model; 6086 max_len = NIU_VPD_BD_MODEL_MAX; 6087 found_mask |= FOUND_MASK_BMODEL; 6088 } else if (!strcmp(namebuf, "version")) { 6089 prop_buf = np->vpd.version; 6090 max_len = NIU_VPD_VERSION_MAX; 6091 found_mask |= FOUND_MASK_VERS; 6092 } else if (!strcmp(namebuf, "local-mac-address")) { 6093 prop_buf = np->vpd.local_mac; 6094 max_len = ETH_ALEN; 6095 found_mask |= FOUND_MASK_MAC; 6096 } else if (!strcmp(namebuf, "num-mac-addresses")) { 6097 prop_buf = &np->vpd.mac_num; 6098 max_len = 1; 6099 found_mask |= FOUND_MASK_NMAC; 6100 } else if (!strcmp(namebuf, "phy-type")) { 6101 prop_buf = np->vpd.phy_type; 6102 max_len = NIU_VPD_PHY_TYPE_MAX; 6103 found_mask |= FOUND_MASK_PHY; 6104 } 6105 6106 if (max_len && prop_len > max_len) { 6107 dev_err(np->device, PFX "Property '%s' length (%d) is " 6108 "too long.\n", namebuf, prop_len); 6109 return -EINVAL; 6110 } 6111 6112 if (prop_buf) { 6113 u32 off = start + 5 + err; 6114 int i; 6115 6116 niudbg(PROBE, "VPD_SCAN: Reading in property [%s] " 6117 "len[%d]\n", namebuf, prop_len); 6118 for (i = 0; i < prop_len; i++) 6119 *prop_buf++ = niu_pci_eeprom_read(np, off + i); 6120 } 6121 6122 start += len; 6123 } 6124 6125 return 0; 6126} 6127 6128/* ESPC_PIO_EN_ENABLE must be set */ 6129static void __devinit niu_pci_vpd_fetch(struct niu *np, u32 start) 6130{ 6131 u32 offset; 6132 int err; 6133 6134 err = niu_pci_eeprom_read16_swp(np, start + 1); 6135 if (err < 0) 6136 return; 6137 6138 offset = err + 3; 6139 6140 while (start + offset < ESPC_EEPROM_SIZE) { 6141 u32 here = start + offset; 6142 u32 end; 6143 6144 err = niu_pci_eeprom_read(np, here); 6145 if (err != 0x90) 6146 return; 6147 6148 err = niu_pci_eeprom_read16_swp(np, here + 1); 6149 if (err < 0) 6150 return; 6151 6152 here = start + offset + 3; 6153 end = start + offset + err; 6154 6155 offset += err; 6156 6157 err = niu_pci_vpd_scan_props(np, here, end); 6158 if (err < 0 || err == 1) 6159 return; 6160 } 6161} 6162 6163/* ESPC_PIO_EN_ENABLE must be set */ 6164static u32 __devinit niu_pci_vpd_offset(struct niu *np) 6165{ 6166 u32 start = 0, end = ESPC_EEPROM_SIZE, ret; 6167 int err; 6168 6169 while (start < end) { 6170 ret = start; 6171 6172 /* ROM header signature? */ 6173 err = niu_pci_eeprom_read16(np, start + 0); 6174 if (err != 0x55aa) 6175 return 0; 6176 6177 /* Apply offset to PCI data structure. */ 6178 err = niu_pci_eeprom_read16(np, start + 23); 6179 if (err < 0) 6180 return 0; 6181 start += err; 6182 6183 /* Check for "PCIR" signature. */ 6184 err = niu_pci_eeprom_read16(np, start + 0); 6185 if (err != 0x5043) 6186 return 0; 6187 err = niu_pci_eeprom_read16(np, start + 2); 6188 if (err != 0x4952) 6189 return 0; 6190 6191 /* Check for OBP image type. */ 6192 err = niu_pci_eeprom_read(np, start + 20); 6193 if (err < 0) 6194 return 0; 6195 if (err != 0x01) { 6196 err = niu_pci_eeprom_read(np, ret + 2); 6197 if (err < 0) 6198 return 0; 6199 6200 start = ret + (err * 512); 6201 continue; 6202 } 6203 6204 err = niu_pci_eeprom_read16_swp(np, start + 8); 6205 if (err < 0) 6206 return err; 6207 ret += err; 6208 6209 err = niu_pci_eeprom_read(np, ret + 0); 6210 if (err != 0x82) 6211 return 0; 6212 6213 return ret; 6214 } 6215 6216 return 0; 6217} 6218 6219static int __devinit niu_phy_type_prop_decode(struct niu *np, 6220 const char *phy_prop) 6221{ 6222 if (!strcmp(phy_prop, "mif")) { 6223 /* 1G copper, MII */ 6224 np->flags &= ~(NIU_FLAGS_FIBER | 6225 NIU_FLAGS_10G); 6226 np->mac_xcvr = MAC_XCVR_MII; 6227 } else if (!strcmp(phy_prop, "xgf")) { 6228 /* 10G fiber, XPCS */ 6229 np->flags |= (NIU_FLAGS_10G | 6230 NIU_FLAGS_FIBER); 6231 np->mac_xcvr = MAC_XCVR_XPCS; 6232 } else if (!strcmp(phy_prop, "pcs")) { 6233 /* 1G fiber, PCS */ 6234 np->flags &= ~NIU_FLAGS_10G; 6235 np->flags |= NIU_FLAGS_FIBER; 6236 np->mac_xcvr = MAC_XCVR_PCS; 6237 } else if (!strcmp(phy_prop, "xgc")) { 6238 /* 10G copper, XPCS */ 6239 np->flags |= NIU_FLAGS_10G; 6240 np->flags &= ~NIU_FLAGS_FIBER; 6241 np->mac_xcvr = MAC_XCVR_XPCS; 6242 } else { 6243 return -EINVAL; 6244 } 6245 return 0; 6246} 6247 6248static void __devinit niu_pci_vpd_validate(struct niu *np) 6249{ 6250 struct net_device *dev = np->dev; 6251 struct niu_vpd *vpd = &np->vpd; 6252 u8 val8; 6253 6254 if (!is_valid_ether_addr(&vpd->local_mac[0])) { 6255 dev_err(np->device, PFX "VPD MAC invalid, " 6256 "falling back to SPROM.\n"); 6257 6258 np->flags &= ~NIU_FLAGS_VPD_VALID; 6259 return; 6260 } 6261 6262 if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { 6263 dev_err(np->device, PFX "Illegal phy string [%s].\n", 6264 np->vpd.phy_type); 6265 dev_err(np->device, PFX "Falling back to SPROM.\n"); 6266 np->flags &= ~NIU_FLAGS_VPD_VALID; 6267 return; 6268 } 6269 6270 memcpy(dev->perm_addr, vpd->local_mac, ETH_ALEN); 6271 6272 val8 = dev->perm_addr[5]; 6273 dev->perm_addr[5] += np->port; 6274 if (dev->perm_addr[5] < val8) 6275 dev->perm_addr[4]++; 6276 6277 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); 6278} 6279 6280static int __devinit niu_pci_probe_sprom(struct niu *np) 6281{ 6282 struct net_device *dev = np->dev; 6283 int len, i; 6284 u64 val, sum; 6285 u8 val8; 6286 6287 val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ); 6288 val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT; 6289 len = val / 4; 6290 6291 np->eeprom_len = len; 6292 6293 niudbg(PROBE, "SPROM: Image size %llu\n", (unsigned long long) val); 6294 6295 sum = 0; 6296 for (i = 0; i < len; i++) { 6297 val = nr64(ESPC_NCR(i)); 6298 sum += (val >> 0) & 0xff; 6299 sum += (val >> 8) & 0xff; 6300 sum += (val >> 16) & 0xff; 6301 sum += (val >> 24) & 0xff; 6302 } 6303 niudbg(PROBE, "SPROM: Checksum %x\n", (int)(sum & 0xff)); 6304 if ((sum & 0xff) != 0xab) { 6305 dev_err(np->device, PFX "Bad SPROM checksum " 6306 "(%x, should be 0xab)\n", (int) (sum & 0xff)); 6307 return -EINVAL; 6308 } 6309 6310 val = nr64(ESPC_PHY_TYPE); 6311 switch (np->port) { 6312 case 0: 6313 val8 = (val & ESPC_PHY_TYPE_PORT0) >> 6314 ESPC_PHY_TYPE_PORT0_SHIFT; 6315 break; 6316 case 1: 6317 val8 = (val & ESPC_PHY_TYPE_PORT1) >> 6318 ESPC_PHY_TYPE_PORT1_SHIFT; 6319 break; 6320 case 2: 6321 val8 = (val & ESPC_PHY_TYPE_PORT2) >> 6322 ESPC_PHY_TYPE_PORT2_SHIFT; 6323 break; 6324 case 3: 6325 val8 = (val & ESPC_PHY_TYPE_PORT3) >> 6326 ESPC_PHY_TYPE_PORT3_SHIFT; 6327 break; 6328 default: 6329 dev_err(np->device, PFX "Bogus port number %u\n", 6330 np->port); 6331 return -EINVAL; 6332 } 6333 niudbg(PROBE, "SPROM: PHY type %x\n", val8); 6334 6335 switch (val8) { 6336 case ESPC_PHY_TYPE_1G_COPPER: 6337 /* 1G copper, MII */ 6338 np->flags &= ~(NIU_FLAGS_FIBER | 6339 NIU_FLAGS_10G); 6340 np->mac_xcvr = MAC_XCVR_MII; 6341 break; 6342 6343 case ESPC_PHY_TYPE_1G_FIBER: 6344 /* 1G fiber, PCS */ 6345 np->flags &= ~NIU_FLAGS_10G; 6346 np->flags |= NIU_FLAGS_FIBER; 6347 np->mac_xcvr = MAC_XCVR_PCS; 6348 break; 6349 6350 case ESPC_PHY_TYPE_10G_COPPER: 6351 /* 10G copper, XPCS */ 6352 np->flags |= NIU_FLAGS_10G; 6353 np->flags &= ~NIU_FLAGS_FIBER; 6354 np->mac_xcvr = MAC_XCVR_XPCS; 6355 break; 6356 6357 case ESPC_PHY_TYPE_10G_FIBER: 6358 /* 10G fiber, XPCS */ 6359 np->flags |= (NIU_FLAGS_10G | 6360 NIU_FLAGS_FIBER); 6361 np->mac_xcvr = MAC_XCVR_XPCS; 6362 break; 6363 6364 default: 6365 dev_err(np->device, PFX "Bogus SPROM phy type %u\n", val8); 6366 return -EINVAL; 6367 } 6368 6369 val = nr64(ESPC_MAC_ADDR0); 6370 niudbg(PROBE, "SPROM: MAC_ADDR0[%08llx]\n", 6371 (unsigned long long) val); 6372 dev->perm_addr[0] = (val >> 0) & 0xff; 6373 dev->perm_addr[1] = (val >> 8) & 0xff; 6374 dev->perm_addr[2] = (val >> 16) & 0xff; 6375 dev->perm_addr[3] = (val >> 24) & 0xff; 6376 6377 val = nr64(ESPC_MAC_ADDR1); 6378 niudbg(PROBE, "SPROM: MAC_ADDR1[%08llx]\n", 6379 (unsigned long long) val); 6380 dev->perm_addr[4] = (val >> 0) & 0xff; 6381 dev->perm_addr[5] = (val >> 8) & 0xff; 6382 6383 if (!is_valid_ether_addr(&dev->perm_addr[0])) { 6384 dev_err(np->device, PFX "SPROM MAC address invalid\n"); 6385 dev_err(np->device, PFX "[ \n"); 6386 for (i = 0; i < 6; i++) 6387 printk("%02x ", dev->perm_addr[i]); 6388 printk("]\n"); 6389 return -EINVAL; 6390 } 6391 6392 val8 = dev->perm_addr[5]; 6393 dev->perm_addr[5] += np->port; 6394 if (dev->perm_addr[5] < val8) 6395 dev->perm_addr[4]++; 6396 6397 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); 6398 6399 val = nr64(ESPC_MOD_STR_LEN); 6400 niudbg(PROBE, "SPROM: MOD_STR_LEN[%llu]\n", 6401 (unsigned long long) val); 6402 if (val >= 8 * 4) 6403 return -EINVAL; 6404 6405 for (i = 0; i < val; i += 4) { 6406 u64 tmp = nr64(ESPC_NCR(5 + (i / 4))); 6407 6408 np->vpd.model[i + 3] = (tmp >> 0) & 0xff; 6409 np->vpd.model[i + 2] = (tmp >> 8) & 0xff; 6410 np->vpd.model[i + 1] = (tmp >> 16) & 0xff; 6411 np->vpd.model[i + 0] = (tmp >> 24) & 0xff; 6412 } 6413 np->vpd.model[val] = '\0'; 6414 6415 val = nr64(ESPC_BD_MOD_STR_LEN); 6416 niudbg(PROBE, "SPROM: BD_MOD_STR_LEN[%llu]\n", 6417 (unsigned long long) val); 6418 if (val >= 4 * 4) 6419 return -EINVAL; 6420 6421 for (i = 0; i < val; i += 4) { 6422 u64 tmp = nr64(ESPC_NCR(14 + (i / 4))); 6423 6424 np->vpd.board_model[i + 3] = (tmp >> 0) & 0xff; 6425 np->vpd.board_model[i + 2] = (tmp >> 8) & 0xff; 6426 np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff; 6427 np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff; 6428 } 6429 np->vpd.board_model[val] = '\0'; 6430 6431 np->vpd.mac_num = 6432 nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL; 6433 niudbg(PROBE, "SPROM: NUM_PORTS_MACS[%d]\n", 6434 np->vpd.mac_num); 6435 6436 return 0; 6437} 6438 6439static int __devinit niu_get_and_validate_port(struct niu *np) 6440{ 6441 struct niu_parent *parent = np->parent; 6442 6443 if (np->port <= 1) 6444 np->flags |= NIU_FLAGS_XMAC; 6445 6446 if (!parent->num_ports) { 6447 if (parent->plat_type == PLAT_TYPE_NIU) { 6448 parent->num_ports = 2; 6449 } else { 6450 parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) & 6451 ESPC_NUM_PORTS_MACS_VAL; 6452 6453 if (!parent->num_ports) 6454 parent->num_ports = 4; 6455 } 6456 } 6457 6458 niudbg(PROBE, "niu_get_and_validate_port: port[%d] num_ports[%d]\n", 6459 np->port, parent->num_ports); 6460 if (np->port >= parent->num_ports) 6461 return -ENODEV; 6462 6463 return 0; 6464} 6465 6466static int __devinit phy_record(struct niu_parent *parent, 6467 struct phy_probe_info *p, 6468 int dev_id_1, int dev_id_2, u8 phy_port, 6469 int type) 6470{ 6471 u32 id = (dev_id_1 << 16) | dev_id_2; 6472 u8 idx; 6473 6474 if (dev_id_1 < 0 || dev_id_2 < 0) 6475 return 0; 6476 if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) { 6477 if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) && 6478 ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011)) 6479 return 0; 6480 } else { 6481 if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R) 6482 return 0; 6483 } 6484 6485 pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n", 6486 parent->index, id, 6487 (type == PHY_TYPE_PMA_PMD ? 6488 "PMA/PMD" : 6489 (type == PHY_TYPE_PCS ? 6490 "PCS" : "MII")), 6491 phy_port); 6492 6493 if (p->cur[type] >= NIU_MAX_PORTS) { 6494 printk(KERN_ERR PFX "Too many PHY ports.\n"); 6495 return -EINVAL; 6496 } 6497 idx = p->cur[type]; 6498 p->phy_id[type][idx] = id; 6499 p->phy_port[type][idx] = phy_port; 6500 p->cur[type] = idx + 1; 6501 return 0; 6502} 6503 6504static int __devinit port_has_10g(struct phy_probe_info *p, int port) 6505{ 6506 int i; 6507 6508 for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) { 6509 if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port) 6510 return 1; 6511 } 6512 for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) { 6513 if (p->phy_port[PHY_TYPE_PCS][i] == port) 6514 return 1; 6515 } 6516 6517 return 0; 6518} 6519 6520static int __devinit count_10g_ports(struct phy_probe_info *p, int *lowest) 6521{ 6522 int port, cnt; 6523 6524 cnt = 0; 6525 *lowest = 32; 6526 for (port = 8; port < 32; port++) { 6527 if (port_has_10g(p, port)) { 6528 if (!cnt) 6529 *lowest = port; 6530 cnt++; 6531 } 6532 } 6533 6534 return cnt; 6535} 6536 6537static int __devinit count_1g_ports(struct phy_probe_info *p, int *lowest) 6538{ 6539 *lowest = 32; 6540 if (p->cur[PHY_TYPE_MII]) 6541 *lowest = p->phy_port[PHY_TYPE_MII][0]; 6542 6543 return p->cur[PHY_TYPE_MII]; 6544} 6545 6546static void __devinit niu_n2_divide_channels(struct niu_parent *parent) 6547{ 6548 int num_ports = parent->num_ports; 6549 int i; 6550 6551 for (i = 0; i < num_ports; i++) { 6552 parent->rxchan_per_port[i] = (16 / num_ports); 6553 parent->txchan_per_port[i] = (16 / num_ports); 6554 6555 pr_info(PFX "niu%d: Port %u [%u RX chans] " 6556 "[%u TX chans]\n", 6557 parent->index, i, 6558 parent->rxchan_per_port[i], 6559 parent->txchan_per_port[i]); 6560 } 6561} 6562 6563static void __devinit niu_divide_channels(struct niu_parent *parent, 6564 int num_10g, int num_1g) 6565{ 6566 int num_ports = parent->num_ports; 6567 int rx_chans_per_10g, rx_chans_per_1g; 6568 int tx_chans_per_10g, tx_chans_per_1g; 6569 int i, tot_rx, tot_tx; 6570 6571 if (!num_10g || !num_1g) { 6572 rx_chans_per_10g = rx_chans_per_1g = 6573 (NIU_NUM_RXCHAN / num_ports); 6574 tx_chans_per_10g = tx_chans_per_1g = 6575 (NIU_NUM_TXCHAN / num_ports); 6576 } else { 6577 rx_chans_per_1g = NIU_NUM_RXCHAN / 8; 6578 rx_chans_per_10g = (NIU_NUM_RXCHAN - 6579 (rx_chans_per_1g * num_1g)) / 6580 num_10g; 6581 6582 tx_chans_per_1g = NIU_NUM_TXCHAN / 6; 6583 tx_chans_per_10g = (NIU_NUM_TXCHAN - 6584 (tx_chans_per_1g * num_1g)) / 6585 num_10g; 6586 } 6587 6588 tot_rx = tot_tx = 0; 6589 for (i = 0; i < num_ports; i++) { 6590 int type = phy_decode(parent->port_phy, i); 6591 6592 if (type == PORT_TYPE_10G) { 6593 parent->rxchan_per_port[i] = rx_chans_per_10g; 6594 parent->txchan_per_port[i] = tx_chans_per_10g; 6595 } else { 6596 parent->rxchan_per_port[i] = rx_chans_per_1g; 6597 parent->txchan_per_port[i] = tx_chans_per_1g; 6598 } 6599 pr_info(PFX "niu%d: Port %u [%u RX chans] " 6600 "[%u TX chans]\n", 6601 parent->index, i, 6602 parent->rxchan_per_port[i], 6603 parent->txchan_per_port[i]); 6604 tot_rx += parent->rxchan_per_port[i]; 6605 tot_tx += parent->txchan_per_port[i]; 6606 } 6607 6608 if (tot_rx > NIU_NUM_RXCHAN) { 6609 printk(KERN_ERR PFX "niu%d: Too many RX channels (%d), " 6610 "resetting to one per port.\n", 6611 parent->index, tot_rx); 6612 for (i = 0; i < num_ports; i++) 6613 parent->rxchan_per_port[i] = 1; 6614 } 6615 if (tot_tx > NIU_NUM_TXCHAN) { 6616 printk(KERN_ERR PFX "niu%d: Too many TX channels (%d), " 6617 "resetting to one per port.\n", 6618 parent->index, tot_tx); 6619 for (i = 0; i < num_ports; i++) 6620 parent->txchan_per_port[i] = 1; 6621 } 6622 if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) { 6623 printk(KERN_WARNING PFX "niu%d: Driver bug, wasted channels, " 6624 "RX[%d] TX[%d]\n", 6625 parent->index, tot_rx, tot_tx); 6626 } 6627} 6628 6629static void __devinit niu_divide_rdc_groups(struct niu_parent *parent, 6630 int num_10g, int num_1g) 6631{ 6632 int i, num_ports = parent->num_ports; 6633 int rdc_group, rdc_groups_per_port; 6634 int rdc_channel_base; 6635 6636 rdc_group = 0; 6637 rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports; 6638 6639 rdc_channel_base = 0; 6640 6641 for (i = 0; i < num_ports; i++) { 6642 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i]; 6643 int grp, num_channels = parent->rxchan_per_port[i]; 6644 int this_channel_offset; 6645 6646 tp->first_table_num = rdc_group; 6647 tp->num_tables = rdc_groups_per_port; 6648 this_channel_offset = 0; 6649 for (grp = 0; grp < tp->num_tables; grp++) { 6650 struct rdc_table *rt = &tp->tables[grp]; 6651 int slot; 6652 6653 pr_info(PFX "niu%d: Port %d RDC tbl(%d) [ ", 6654 parent->index, i, tp->first_table_num + grp); 6655 for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) { 6656 rt->rxdma_channel[slot] = 6657 rdc_channel_base + this_channel_offset; 6658 6659 printk("%d ", rt->rxdma_channel[slot]); 6660 6661 if (++this_channel_offset == num_channels) 6662 this_channel_offset = 0; 6663 } 6664 printk("]\n"); 6665 } 6666 6667 parent->rdc_default[i] = rdc_channel_base; 6668 6669 rdc_channel_base += num_channels; 6670 rdc_group += rdc_groups_per_port; 6671 } 6672} 6673 6674static int __devinit fill_phy_probe_info(struct niu *np, 6675 struct niu_parent *parent, 6676 struct phy_probe_info *info) 6677{ 6678 unsigned long flags; 6679 int port, err; 6680 6681 memset(info, 0, sizeof(*info)); 6682 6683 /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */ 6684 niu_lock_parent(np, flags); 6685 err = 0; 6686 for (port = 8; port < 32; port++) { 6687 int dev_id_1, dev_id_2; 6688 6689 dev_id_1 = mdio_read(np, port, 6690 NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1); 6691 dev_id_2 = mdio_read(np, port, 6692 NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2); 6693 err = phy_record(parent, info, dev_id_1, dev_id_2, port, 6694 PHY_TYPE_PMA_PMD); 6695 if (err) 6696 break; 6697 dev_id_1 = mdio_read(np, port, 6698 NIU_PCS_DEV_ADDR, MII_PHYSID1); 6699 dev_id_2 = mdio_read(np, port, 6700 NIU_PCS_DEV_ADDR, MII_PHYSID2); 6701 err = phy_record(parent, info, dev_id_1, dev_id_2, port, 6702 PHY_TYPE_PCS); 6703 if (err) 6704 break; 6705 dev_id_1 = mii_read(np, port, MII_PHYSID1); 6706 dev_id_2 = mii_read(np, port, MII_PHYSID2); 6707 err = phy_record(parent, info, dev_id_1, dev_id_2, port, 6708 PHY_TYPE_MII); 6709 if (err) 6710 break; 6711 } 6712 niu_unlock_parent(np, flags); 6713 6714 return err; 6715} 6716 6717static int __devinit walk_phys(struct niu *np, struct niu_parent *parent) 6718{ 6719 struct phy_probe_info *info = &parent->phy_probe_info; 6720 int lowest_10g, lowest_1g; 6721 int num_10g, num_1g; 6722 u32 val; 6723 int err; 6724 6725 err = fill_phy_probe_info(np, parent, info); 6726 if (err) 6727 return err; 6728 6729 num_10g = count_10g_ports(info, &lowest_10g); 6730 num_1g = count_1g_ports(info, &lowest_1g); 6731 6732 switch ((num_10g << 4) | num_1g) { 6733 case 0x24: 6734 if (lowest_1g == 10) 6735 parent->plat_type = PLAT_TYPE_VF_P0; 6736 else if (lowest_1g == 26) 6737 parent->plat_type = PLAT_TYPE_VF_P1; 6738 else 6739 goto unknown_vg_1g_port; 6740 6741 /* fallthru */ 6742 case 0x22: 6743 val = (phy_encode(PORT_TYPE_10G, 0) | 6744 phy_encode(PORT_TYPE_10G, 1) | 6745 phy_encode(PORT_TYPE_1G, 2) | 6746 phy_encode(PORT_TYPE_1G, 3)); 6747 break; 6748 6749 case 0x20: 6750 val = (phy_encode(PORT_TYPE_10G, 0) | 6751 phy_encode(PORT_TYPE_10G, 1)); 6752 break; 6753 6754 case 0x10: 6755 val = phy_encode(PORT_TYPE_10G, np->port); 6756 break; 6757 6758 case 0x14: 6759 if (lowest_1g == 10) 6760 parent->plat_type = PLAT_TYPE_VF_P0; 6761 else if (lowest_1g == 26) 6762 parent->plat_type = PLAT_TYPE_VF_P1; 6763 else 6764 goto unknown_vg_1g_port; 6765 6766 /* fallthru */ 6767 case 0x13: 6768 if ((lowest_10g & 0x7) == 0) 6769 val = (phy_encode(PORT_TYPE_10G, 0) | 6770 phy_encode(PORT_TYPE_1G, 1) | 6771 phy_encode(PORT_TYPE_1G, 2) | 6772 phy_encode(PORT_TYPE_1G, 3)); 6773 else 6774 val = (phy_encode(PORT_TYPE_1G, 0) | 6775 phy_encode(PORT_TYPE_10G, 1) | 6776 phy_encode(PORT_TYPE_1G, 2) | 6777 phy_encode(PORT_TYPE_1G, 3)); 6778 break; 6779 6780 case 0x04: 6781 if (lowest_1g == 10) 6782 parent->plat_type = PLAT_TYPE_VF_P0; 6783 else if (lowest_1g == 26) 6784 parent->plat_type = PLAT_TYPE_VF_P1; 6785 else 6786 goto unknown_vg_1g_port; 6787 6788 val = (phy_encode(PORT_TYPE_1G, 0) | 6789 phy_encode(PORT_TYPE_1G, 1) | 6790 phy_encode(PORT_TYPE_1G, 2) | 6791 phy_encode(PORT_TYPE_1G, 3)); 6792 break; 6793 6794 default: 6795 printk(KERN_ERR PFX "Unsupported port config " 6796 "10G[%d] 1G[%d]\n", 6797 num_10g, num_1g); 6798 return -EINVAL; 6799 } 6800 6801 parent->port_phy = val; 6802 6803 if (parent->plat_type == PLAT_TYPE_NIU) 6804 niu_n2_divide_channels(parent); 6805 else 6806 niu_divide_channels(parent, num_10g, num_1g); 6807 6808 niu_divide_rdc_groups(parent, num_10g, num_1g); 6809 6810 return 0; 6811 6812unknown_vg_1g_port: 6813 printk(KERN_ERR PFX "Cannot identify platform type, 1gport=%d\n", 6814 lowest_1g); 6815 return -EINVAL; 6816} 6817 6818static int __devinit niu_probe_ports(struct niu *np) 6819{ 6820 struct niu_parent *parent = np->parent; 6821 int err, i; 6822 6823 niudbg(PROBE, "niu_probe_ports(): port_phy[%08x]\n", 6824 parent->port_phy); 6825 6826 if (parent->port_phy == PORT_PHY_UNKNOWN) { 6827 err = walk_phys(np, parent); 6828 if (err) 6829 return err; 6830 6831 niu_set_ldg_timer_res(np, 2); 6832 for (i = 0; i <= LDN_MAX; i++) 6833 niu_ldn_irq_enable(np, i, 0); 6834 } 6835 6836 if (parent->port_phy == PORT_PHY_INVALID) 6837 return -EINVAL; 6838 6839 return 0; 6840} 6841 6842static int __devinit niu_classifier_swstate_init(struct niu *np) 6843{ 6844 struct niu_classifier *cp = &np->clas; 6845 6846 niudbg(PROBE, "niu_classifier_swstate_init: num_tcam(%d)\n", 6847 np->parent->tcam_num_entries); 6848 6849 cp->tcam_index = (u16) np->port; 6850 cp->h1_init = 0xffffffff; 6851 cp->h2_init = 0xffff; 6852 6853 return fflp_early_init(np); 6854} 6855 6856static void __devinit niu_link_config_init(struct niu *np) 6857{ 6858 struct niu_link_config *lp = &np->link_config; 6859 6860 lp->advertising = (ADVERTISED_10baseT_Half | 6861 ADVERTISED_10baseT_Full | 6862 ADVERTISED_100baseT_Half | 6863 ADVERTISED_100baseT_Full | 6864 ADVERTISED_1000baseT_Half | 6865 ADVERTISED_1000baseT_Full | 6866 ADVERTISED_10000baseT_Full | 6867 ADVERTISED_Autoneg); 6868 lp->speed = lp->active_speed = SPEED_INVALID; 6869 lp->duplex = lp->active_duplex = DUPLEX_INVALID; 6870#if 0 6871 lp->loopback_mode = LOOPBACK_MAC; 6872 lp->active_speed = SPEED_10000; 6873 lp->active_duplex = DUPLEX_FULL; 6874#else 6875 lp->loopback_mode = LOOPBACK_DISABLED; 6876#endif 6877} 6878 6879static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np) 6880{ 6881 switch (np->port) { 6882 case 0: 6883 np->mac_regs = np->regs + XMAC_PORT0_OFF; 6884 np->ipp_off = 0x00000; 6885 np->pcs_off = 0x04000; 6886 np->xpcs_off = 0x02000; 6887 break; 6888 6889 case 1: 6890 np->mac_regs = np->regs + XMAC_PORT1_OFF; 6891 np->ipp_off = 0x08000; 6892 np->pcs_off = 0x0a000; 6893 np->xpcs_off = 0x08000; 6894 break; 6895 6896 case 2: 6897 np->mac_regs = np->regs + BMAC_PORT2_OFF; 6898 np->ipp_off = 0x04000; 6899 np->pcs_off = 0x0e000; 6900 np->xpcs_off = ~0UL; 6901 break; 6902 6903 case 3: 6904 np->mac_regs = np->regs + BMAC_PORT3_OFF; 6905 np->ipp_off = 0x0c000; 6906 np->pcs_off = 0x12000; 6907 np->xpcs_off = ~0UL; 6908 break; 6909 6910 default: 6911 dev_err(np->device, PFX "Port %u is invalid, cannot " 6912 "compute MAC block offset.\n", np->port); 6913 return -EINVAL; 6914 } 6915 6916 return 0; 6917} 6918 6919static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map) 6920{ 6921 struct msix_entry msi_vec[NIU_NUM_LDG]; 6922 struct niu_parent *parent = np->parent; 6923 struct pci_dev *pdev = np->pdev; 6924 int i, num_irqs, err; 6925 u8 first_ldg; 6926 6927 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port; 6928 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++) 6929 ldg_num_map[i] = first_ldg + i; 6930 6931 num_irqs = (parent->rxchan_per_port[np->port] + 6932 parent->txchan_per_port[np->port] + 6933 (np->port == 0 ? 3 : 1)); 6934 BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports)); 6935 6936retry: 6937 for (i = 0; i < num_irqs; i++) { 6938 msi_vec[i].vector = 0; 6939 msi_vec[i].entry = i; 6940 } 6941 6942 err = pci_enable_msix(pdev, msi_vec, num_irqs); 6943 if (err < 0) { 6944 np->flags &= ~NIU_FLAGS_MSIX; 6945 return; 6946 } 6947 if (err > 0) { 6948 num_irqs = err; 6949 goto retry; 6950 } 6951 6952 np->flags |= NIU_FLAGS_MSIX; 6953 for (i = 0; i < num_irqs; i++) 6954 np->ldg[i].irq = msi_vec[i].vector; 6955 np->num_ldg = num_irqs; 6956} 6957 6958static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map) 6959{ 6960#ifdef CONFIG_SPARC64 6961 struct of_device *op = np->op; 6962 const u32 *int_prop; 6963 int i; 6964 6965 int_prop = of_get_property(op->node, "interrupts", NULL); 6966 if (!int_prop) 6967 return -ENODEV; 6968 6969 for (i = 0; i < op->num_irqs; i++) { 6970 ldg_num_map[i] = int_prop[i]; 6971 np->ldg[i].irq = op->irqs[i]; 6972 } 6973 6974 np->num_ldg = op->num_irqs; 6975 6976 return 0; 6977#else 6978 return -EINVAL; 6979#endif 6980} 6981 6982static int __devinit niu_ldg_init(struct niu *np) 6983{ 6984 struct niu_parent *parent = np->parent; 6985 u8 ldg_num_map[NIU_NUM_LDG]; 6986 int first_chan, num_chan; 6987 int i, err, ldg_rotor; 6988 u8 port; 6989 6990 np->num_ldg = 1; 6991 np->ldg[0].irq = np->dev->irq; 6992 if (parent->plat_type == PLAT_TYPE_NIU) { 6993 err = niu_n2_irq_init(np, ldg_num_map); 6994 if (err) 6995 return err; 6996 } else 6997 niu_try_msix(np, ldg_num_map); 6998 6999 port = np->port; 7000 for (i = 0; i < np->num_ldg; i++) { 7001 struct niu_ldg *lp = &np->ldg[i]; 7002 7003 netif_napi_add(np->dev, &lp->napi, niu_poll, 64); 7004 7005 lp->np = np; 7006 lp->ldg_num = ldg_num_map[i]; 7007 lp->timer = 2; /* XXX */ 7008 7009 /* On N2 NIU the firmware has setup the SID mappings so they go 7010 * to the correct values that will route the LDG to the proper 7011 * interrupt in the NCU interrupt table. 7012 */ 7013 if (np->parent->plat_type != PLAT_TYPE_NIU) { 7014 err = niu_set_ldg_sid(np, lp->ldg_num, port, i); 7015 if (err) 7016 return err; 7017 } 7018 } 7019 7020 /* We adopt the LDG assignment ordering used by the N2 NIU 7021 * 'interrupt' properties because that simplifies a lot of 7022 * things. This ordering is: 7023 * 7024 * MAC 7025 * MIF (if port zero) 7026 * SYSERR (if port zero) 7027 * RX channels 7028 * TX channels 7029 */ 7030 7031 ldg_rotor = 0; 7032 7033 err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], 7034 LDN_MAC(port)); 7035 if (err) 7036 return err; 7037 7038 ldg_rotor++; 7039 if (ldg_rotor == np->num_ldg) 7040 ldg_rotor = 0; 7041 7042 if (port == 0) { 7043 err = niu_ldg_assign_ldn(np, parent, 7044 ldg_num_map[ldg_rotor], 7045 LDN_MIF); 7046 if (err) 7047 return err; 7048 7049 ldg_rotor++; 7050 if (ldg_rotor == np->num_ldg) 7051 ldg_rotor = 0; 7052 7053 err = niu_ldg_assign_ldn(np, parent, 7054 ldg_num_map[ldg_rotor], 7055 LDN_DEVICE_ERROR); 7056 if (err) 7057 return err; 7058 7059 ldg_rotor++; 7060 if (ldg_rotor == np->num_ldg) 7061 ldg_rotor = 0; 7062 7063 } 7064 7065 first_chan = 0; 7066 for (i = 0; i < port; i++) 7067 first_chan += parent->rxchan_per_port[port]; 7068 num_chan = parent->rxchan_per_port[port]; 7069 7070 for (i = first_chan; i < (first_chan + num_chan); i++) { 7071 err = niu_ldg_assign_ldn(np, parent, 7072 ldg_num_map[ldg_rotor], 7073 LDN_RXDMA(i)); 7074 if (err) 7075 return err; 7076 ldg_rotor++; 7077 if (ldg_rotor == np->num_ldg) 7078 ldg_rotor = 0; 7079 } 7080 7081 first_chan = 0; 7082 for (i = 0; i < port; i++) 7083 first_chan += parent->txchan_per_port[port]; 7084 num_chan = parent->txchan_per_port[port]; 7085 for (i = first_chan; i < (first_chan + num_chan); i++) { 7086 err = niu_ldg_assign_ldn(np, parent, 7087 ldg_num_map[ldg_rotor], 7088 LDN_TXDMA(i)); 7089 if (err) 7090 return err; 7091 ldg_rotor++; 7092 if (ldg_rotor == np->num_ldg) 7093 ldg_rotor = 0; 7094 } 7095 7096 return 0; 7097} 7098 7099static void __devexit niu_ldg_free(struct niu *np) 7100{ 7101 if (np->flags & NIU_FLAGS_MSIX) 7102 pci_disable_msix(np->pdev); 7103} 7104 7105static int __devinit niu_get_of_props(struct niu *np) 7106{ 7107#ifdef CONFIG_SPARC64 7108 struct net_device *dev = np->dev; 7109 struct device_node *dp; 7110 const char *phy_type; 7111 const u8 *mac_addr; 7112 int prop_len; 7113 7114 if (np->parent->plat_type == PLAT_TYPE_NIU) 7115 dp = np->op->node; 7116 else 7117 dp = pci_device_to_OF_node(np->pdev); 7118 7119 phy_type = of_get_property(dp, "phy-type", &prop_len); 7120 if (!phy_type) { 7121 dev_err(np->device, PFX "%s: OF node lacks " 7122 "phy-type property\n", 7123 dp->full_name); 7124 return -EINVAL; 7125 } 7126 7127 if (!strcmp(phy_type, "none")) 7128 return -ENODEV; 7129 7130 strcpy(np->vpd.phy_type, phy_type); 7131 7132 if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { 7133 dev_err(np->device, PFX "%s: Illegal phy string [%s].\n", 7134 dp->full_name, np->vpd.phy_type); 7135 return -EINVAL; 7136 } 7137 7138 mac_addr = of_get_property(dp, "local-mac-address", &prop_len); 7139 if (!mac_addr) { 7140 dev_err(np->device, PFX "%s: OF node lacks " 7141 "local-mac-address property\n", 7142 dp->full_name); 7143 return -EINVAL; 7144 } 7145 if (prop_len != dev->addr_len) { 7146 dev_err(np->device, PFX "%s: OF MAC address prop len (%d) " 7147 "is wrong.\n", 7148 dp->full_name, prop_len); 7149 } 7150 memcpy(dev->perm_addr, mac_addr, dev->addr_len); 7151 if (!is_valid_ether_addr(&dev->perm_addr[0])) { 7152 int i; 7153 7154 dev_err(np->device, PFX "%s: OF MAC address is invalid\n", 7155 dp->full_name); 7156 dev_err(np->device, PFX "%s: [ \n", 7157 dp->full_name); 7158 for (i = 0; i < 6; i++) 7159 printk("%02x ", dev->perm_addr[i]); 7160 printk("]\n"); 7161 return -EINVAL; 7162 } 7163 7164 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); 7165 7166 return 0; 7167#else 7168 return -EINVAL; 7169#endif 7170} 7171 7172static int __devinit niu_get_invariants(struct niu *np) 7173{ 7174 int err, have_props; 7175 u32 offset; 7176 7177 err = niu_get_of_props(np); 7178 if (err == -ENODEV) 7179 return err; 7180 7181 have_props = !err; 7182 7183 err = niu_get_and_validate_port(np); 7184 if (err) 7185 return err; 7186 7187 err = niu_init_mac_ipp_pcs_base(np); 7188 if (err) 7189 return err; 7190 7191 if (!have_props) { 7192 if (np->parent->plat_type == PLAT_TYPE_NIU) 7193 return -EINVAL; 7194 7195 nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE); 7196 offset = niu_pci_vpd_offset(np); 7197 niudbg(PROBE, "niu_get_invariants: VPD offset [%08x]\n", 7198 offset); 7199 if (offset) 7200 niu_pci_vpd_fetch(np, offset); 7201 nw64(ESPC_PIO_EN, 0); 7202 7203 if (np->flags & NIU_FLAGS_VPD_VALID) 7204 niu_pci_vpd_validate(np); 7205 7206 if (!(np->flags & NIU_FLAGS_VPD_VALID)) { 7207 err = niu_pci_probe_sprom(np); 7208 if (err) 7209 return err; 7210 } 7211 } 7212 7213 err = niu_probe_ports(np); 7214 if (err) 7215 return err; 7216 7217 niu_ldg_init(np); 7218 7219 niu_classifier_swstate_init(np); 7220 niu_link_config_init(np); 7221 7222 err = niu_determine_phy_disposition(np); 7223 if (!err) 7224 err = niu_init_link(np); 7225 7226 return err; 7227} 7228 7229static LIST_HEAD(niu_parent_list); 7230static DEFINE_MUTEX(niu_parent_lock); 7231static int niu_parent_index; 7232 7233static ssize_t show_port_phy(struct device *dev, 7234 struct device_attribute *attr, char *buf) 7235{ 7236 struct platform_device *plat_dev = to_platform_device(dev); 7237 struct niu_parent *p = plat_dev->dev.platform_data; 7238 u32 port_phy = p->port_phy; 7239 char *orig_buf = buf; 7240 int i; 7241 7242 if (port_phy == PORT_PHY_UNKNOWN || 7243 port_phy == PORT_PHY_INVALID) 7244 return 0; 7245 7246 for (i = 0; i < p->num_ports; i++) { 7247 const char *type_str; 7248 int type; 7249 7250 type = phy_decode(port_phy, i); 7251 if (type == PORT_TYPE_10G) 7252 type_str = "10G"; 7253 else 7254 type_str = "1G"; 7255 buf += sprintf(buf, 7256 (i == 0) ? "%s" : " %s", 7257 type_str); 7258 } 7259 buf += sprintf(buf, "\n"); 7260 return buf - orig_buf; 7261} 7262 7263static ssize_t show_plat_type(struct device *dev, 7264 struct device_attribute *attr, char *buf) 7265{ 7266 struct platform_device *plat_dev = to_platform_device(dev); 7267 struct niu_parent *p = plat_dev->dev.platform_data; 7268 const char *type_str; 7269 7270 switch (p->plat_type) { 7271 case PLAT_TYPE_ATLAS: 7272 type_str = "atlas"; 7273 break; 7274 case PLAT_TYPE_NIU: 7275 type_str = "niu"; 7276 break; 7277 case PLAT_TYPE_VF_P0: 7278 type_str = "vf_p0"; 7279 break; 7280 case PLAT_TYPE_VF_P1: 7281 type_str = "vf_p1"; 7282 break; 7283 default: 7284 type_str = "unknown"; 7285 break; 7286 } 7287 7288 return sprintf(buf, "%s\n", type_str); 7289} 7290 7291static ssize_t __show_chan_per_port(struct device *dev, 7292 struct device_attribute *attr, char *buf, 7293 int rx) 7294{ 7295 struct platform_device *plat_dev = to_platform_device(dev); 7296 struct niu_parent *p = plat_dev->dev.platform_data; 7297 char *orig_buf = buf; 7298 u8 *arr; 7299 int i; 7300 7301 arr = (rx ? p->rxchan_per_port : p->txchan_per_port); 7302 7303 for (i = 0; i < p->num_ports; i++) { 7304 buf += sprintf(buf, 7305 (i == 0) ? "%d" : " %d", 7306 arr[i]); 7307 } 7308 buf += sprintf(buf, "\n"); 7309 7310 return buf - orig_buf; 7311} 7312 7313static ssize_t show_rxchan_per_port(struct device *dev, 7314 struct device_attribute *attr, char *buf) 7315{ 7316 return __show_chan_per_port(dev, attr, buf, 1); 7317} 7318 7319static ssize_t show_txchan_per_port(struct device *dev, 7320 struct device_attribute *attr, char *buf) 7321{ 7322 return __show_chan_per_port(dev, attr, buf, 1); 7323} 7324 7325static ssize_t show_num_ports(struct device *dev, 7326 struct device_attribute *attr, char *buf) 7327{ 7328 struct platform_device *plat_dev = to_platform_device(dev); 7329 struct niu_parent *p = plat_dev->dev.platform_data; 7330 7331 return sprintf(buf, "%d\n", p->num_ports); 7332} 7333 7334static struct device_attribute niu_parent_attributes[] = { 7335 __ATTR(port_phy, S_IRUGO, show_port_phy, NULL), 7336 __ATTR(plat_type, S_IRUGO, show_plat_type, NULL), 7337 __ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL), 7338 __ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL), 7339 __ATTR(num_ports, S_IRUGO, show_num_ports, NULL), 7340 {} 7341}; 7342 7343static struct niu_parent * __devinit niu_new_parent(struct niu *np, 7344 union niu_parent_id *id, 7345 u8 ptype) 7346{ 7347 struct platform_device *plat_dev; 7348 struct niu_parent *p; 7349 int i; 7350 7351 niudbg(PROBE, "niu_new_parent: Creating new parent.\n"); 7352 7353 plat_dev = platform_device_register_simple("niu", niu_parent_index, 7354 NULL, 0); 7355 if (!plat_dev) 7356 return NULL; 7357 7358 for (i = 0; attr_name(niu_parent_attributes[i]); i++) { 7359 int err = device_create_file(&plat_dev->dev, 7360 &niu_parent_attributes[i]); 7361 if (err) 7362 goto fail_unregister; 7363 } 7364 7365 p = kzalloc(sizeof(*p), GFP_KERNEL); 7366 if (!p) 7367 goto fail_unregister; 7368 7369 p->index = niu_parent_index++; 7370 7371 plat_dev->dev.platform_data = p; 7372 p->plat_dev = plat_dev; 7373 7374 memcpy(&p->id, id, sizeof(*id)); 7375 p->plat_type = ptype; 7376 INIT_LIST_HEAD(&p->list); 7377 atomic_set(&p->refcnt, 0); 7378 list_add(&p->list, &niu_parent_list); 7379 spin_lock_init(&p->lock); 7380 7381 p->rxdma_clock_divider = 7500; 7382 7383 p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES; 7384 if (p->plat_type == PLAT_TYPE_NIU) 7385 p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES; 7386 7387 for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { 7388 int index = i - CLASS_CODE_USER_PROG1; 7389 7390 p->tcam_key[index] = TCAM_KEY_TSEL; 7391 p->flow_key[index] = (FLOW_KEY_IPSA | 7392 FLOW_KEY_IPDA | 7393 FLOW_KEY_PROTO | 7394 (FLOW_KEY_L4_BYTE12 << 7395 FLOW_KEY_L4_0_SHIFT) | 7396 (FLOW_KEY_L4_BYTE12 << 7397 FLOW_KEY_L4_1_SHIFT)); 7398 } 7399 7400 for (i = 0; i < LDN_MAX + 1; i++) 7401 p->ldg_map[i] = LDG_INVALID; 7402 7403 return p; 7404 7405fail_unregister: 7406 platform_device_unregister(plat_dev); 7407 return NULL; 7408} 7409 7410static struct niu_parent * __devinit niu_get_parent(struct niu *np, 7411 union niu_parent_id *id, 7412 u8 ptype) 7413{ 7414 struct niu_parent *p, *tmp; 7415 int port = np->port; 7416 7417 niudbg(PROBE, "niu_get_parent: platform_type[%u] port[%u]\n", 7418 ptype, port); 7419 7420 mutex_lock(&niu_parent_lock); 7421 p = NULL; 7422 list_for_each_entry(tmp, &niu_parent_list, list) { 7423 if (!memcmp(id, &tmp->id, sizeof(*id))) { 7424 p = tmp; 7425 break; 7426 } 7427 } 7428 if (!p) 7429 p = niu_new_parent(np, id, ptype); 7430 7431 if (p) { 7432 char port_name[6]; 7433 int err; 7434 7435 sprintf(port_name, "port%d", port); 7436 err = sysfs_create_link(&p->plat_dev->dev.kobj, 7437 &np->device->kobj, 7438 port_name); 7439 if (!err) { 7440 p->ports[port] = np; 7441 atomic_inc(&p->refcnt); 7442 } 7443 } 7444 mutex_unlock(&niu_parent_lock); 7445 7446 return p; 7447} 7448 7449static void niu_put_parent(struct niu *np) 7450{ 7451 struct niu_parent *p = np->parent; 7452 u8 port = np->port; 7453 char port_name[6]; 7454 7455 BUG_ON(!p || p->ports[port] != np); 7456 7457 niudbg(PROBE, "niu_put_parent: port[%u]\n", port); 7458 7459 sprintf(port_name, "port%d", port); 7460 7461 mutex_lock(&niu_parent_lock); 7462 7463 sysfs_remove_link(&p->plat_dev->dev.kobj, port_name); 7464 7465 p->ports[port] = NULL; 7466 np->parent = NULL; 7467 7468 if (atomic_dec_and_test(&p->refcnt)) { 7469 list_del(&p->list); 7470 platform_device_unregister(p->plat_dev); 7471 } 7472 7473 mutex_unlock(&niu_parent_lock); 7474} 7475 7476static void *niu_pci_alloc_coherent(struct device *dev, size_t size, 7477 u64 *handle, gfp_t flag) 7478{ 7479 dma_addr_t dh; 7480 void *ret; 7481 7482 ret = dma_alloc_coherent(dev, size, &dh, flag); 7483 if (ret) 7484 *handle = dh; 7485 return ret; 7486} 7487 7488static void niu_pci_free_coherent(struct device *dev, size_t size, 7489 void *cpu_addr, u64 handle) 7490{ 7491 dma_free_coherent(dev, size, cpu_addr, handle); 7492} 7493 7494static u64 niu_pci_map_page(struct device *dev, struct page *page, 7495 unsigned long offset, size_t size, 7496 enum dma_data_direction direction) 7497{ 7498 return dma_map_page(dev, page, offset, size, direction); 7499} 7500 7501static void niu_pci_unmap_page(struct device *dev, u64 dma_address, 7502 size_t size, enum dma_data_direction direction) 7503{ 7504 return dma_unmap_page(dev, dma_address, size, direction); 7505} 7506 7507static u64 niu_pci_map_single(struct device *dev, void *cpu_addr, 7508 size_t size, 7509 enum dma_data_direction direction) 7510{ 7511 return dma_map_single(dev, cpu_addr, size, direction); 7512} 7513 7514static void niu_pci_unmap_single(struct device *dev, u64 dma_address, 7515 size_t size, 7516 enum dma_data_direction direction) 7517{ 7518 dma_unmap_single(dev, dma_address, size, direction); 7519} 7520 7521static const struct niu_ops niu_pci_ops = { 7522 .alloc_coherent = niu_pci_alloc_coherent, 7523 .free_coherent = niu_pci_free_coherent, 7524 .map_page = niu_pci_map_page, 7525 .unmap_page = niu_pci_unmap_page, 7526 .map_single = niu_pci_map_single, 7527 .unmap_single = niu_pci_unmap_single, 7528}; 7529 7530static void __devinit niu_driver_version(void) 7531{ 7532 static int niu_version_printed; 7533 7534 if (niu_version_printed++ == 0) 7535 pr_info("%s", version); 7536} 7537 7538static struct net_device * __devinit niu_alloc_and_init( 7539 struct device *gen_dev, struct pci_dev *pdev, 7540 struct of_device *op, const struct niu_ops *ops, 7541 u8 port) 7542{ 7543 struct net_device *dev = alloc_etherdev(sizeof(struct niu)); 7544 struct niu *np; 7545 7546 if (!dev) { 7547 dev_err(gen_dev, PFX "Etherdev alloc failed, aborting.\n"); 7548 return NULL; 7549 } 7550 7551 SET_NETDEV_DEV(dev, gen_dev); 7552 7553 np = netdev_priv(dev); 7554 np->dev = dev; 7555 np->pdev = pdev; 7556 np->op = op; 7557 np->device = gen_dev; 7558 np->ops = ops; 7559 7560 np->msg_enable = niu_debug; 7561 7562 spin_lock_init(&np->lock); 7563 INIT_WORK(&np->reset_task, niu_reset_task); 7564 7565 np->port = port; 7566 7567 return dev; 7568} 7569 7570static void __devinit niu_assign_netdev_ops(struct net_device *dev) 7571{ 7572 dev->open = niu_open; 7573 dev->stop = niu_close; 7574 dev->get_stats = niu_get_stats; 7575 dev->set_multicast_list = niu_set_rx_mode; 7576 dev->set_mac_address = niu_set_mac_addr; 7577 dev->do_ioctl = niu_ioctl; 7578 dev->tx_timeout = niu_tx_timeout; 7579 dev->hard_start_xmit = niu_start_xmit; 7580 dev->ethtool_ops = &niu_ethtool_ops; 7581 dev->watchdog_timeo = NIU_TX_TIMEOUT; 7582 dev->change_mtu = niu_change_mtu; 7583} 7584 7585static void __devinit niu_device_announce(struct niu *np) 7586{ 7587 struct net_device *dev = np->dev; 7588 int i; 7589 7590 pr_info("%s: NIU Ethernet ", dev->name); 7591 for (i = 0; i < 6; i++) 7592 printk("%2.2x%c", dev->dev_addr[i], 7593 i == 5 ? '\n' : ':'); 7594 7595 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", 7596 dev->name, 7597 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), 7598 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), 7599 (np->flags & NIU_FLAGS_FIBER ? "FIBER" : "COPPER"), 7600 (np->mac_xcvr == MAC_XCVR_MII ? "MII" : 7601 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), 7602 np->vpd.phy_type); 7603} 7604 7605static int __devinit niu_pci_init_one(struct pci_dev *pdev, 7606 const struct pci_device_id *ent) 7607{ 7608 unsigned long niureg_base, niureg_len; 7609 union niu_parent_id parent_id; 7610 struct net_device *dev; 7611 struct niu *np; 7612 int err, pos; 7613 u64 dma_mask; 7614 u16 val16; 7615 7616 niu_driver_version(); 7617 7618 err = pci_enable_device(pdev); 7619 if (err) { 7620 dev_err(&pdev->dev, PFX "Cannot enable PCI device, " 7621 "aborting.\n"); 7622 return err; 7623 } 7624 7625 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || 7626 !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 7627 dev_err(&pdev->dev, PFX "Cannot find proper PCI device " 7628 "base addresses, aborting.\n"); 7629 err = -ENODEV; 7630 goto err_out_disable_pdev; 7631 } 7632 7633 err = pci_request_regions(pdev, DRV_MODULE_NAME); 7634 if (err) { 7635 dev_err(&pdev->dev, PFX "Cannot obtain PCI resources, " 7636 "aborting.\n"); 7637 goto err_out_disable_pdev; 7638 } 7639 7640 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 7641 if (pos <= 0) { 7642 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, " 7643 "aborting.\n"); 7644 goto err_out_free_res; 7645 } 7646 7647 dev = niu_alloc_and_init(&pdev->dev, pdev, NULL, 7648 &niu_pci_ops, PCI_FUNC(pdev->devfn)); 7649 if (!dev) { 7650 err = -ENOMEM; 7651 goto err_out_free_res; 7652 } 7653 np = netdev_priv(dev); 7654 7655 memset(&parent_id, 0, sizeof(parent_id)); 7656 parent_id.pci.domain = pci_domain_nr(pdev->bus); 7657 parent_id.pci.bus = pdev->bus->number; 7658 parent_id.pci.device = PCI_SLOT(pdev->devfn); 7659 7660 np->parent = niu_get_parent(np, &parent_id, 7661 PLAT_TYPE_ATLAS); 7662 if (!np->parent) { 7663 err = -ENOMEM; 7664 goto err_out_free_dev; 7665 } 7666 7667 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16); 7668 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; 7669 val16 |= (PCI_EXP_DEVCTL_CERE | 7670 PCI_EXP_DEVCTL_NFERE | 7671 PCI_EXP_DEVCTL_FERE | 7672 PCI_EXP_DEVCTL_URRE | 7673 PCI_EXP_DEVCTL_RELAX_EN); 7674 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16); 7675 7676 dma_mask = DMA_44BIT_MASK; 7677 err = pci_set_dma_mask(pdev, dma_mask); 7678 if (!err) { 7679 dev->features |= NETIF_F_HIGHDMA; 7680 err = pci_set_consistent_dma_mask(pdev, dma_mask); 7681 if (err) { 7682 dev_err(&pdev->dev, PFX "Unable to obtain 44 bit " 7683 "DMA for consistent allocations, " 7684 "aborting.\n"); 7685 goto err_out_release_parent; 7686 } 7687 } 7688 if (err || dma_mask == DMA_32BIT_MASK) { 7689 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 7690 if (err) { 7691 dev_err(&pdev->dev, PFX "No usable DMA configuration, " 7692 "aborting.\n"); 7693 goto err_out_release_parent; 7694 } 7695 } 7696 7697 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM); 7698 7699 niureg_base = pci_resource_start(pdev, 0); 7700 niureg_len = pci_resource_len(pdev, 0); 7701 7702 np->regs = ioremap_nocache(niureg_base, niureg_len); 7703 if (!np->regs) { 7704 dev_err(&pdev->dev, PFX "Cannot map device registers, " 7705 "aborting.\n"); 7706 err = -ENOMEM; 7707 goto err_out_release_parent; 7708 } 7709 7710 pci_set_master(pdev); 7711 pci_save_state(pdev); 7712 7713 dev->irq = pdev->irq; 7714 7715 niu_assign_netdev_ops(dev); 7716 7717 err = niu_get_invariants(np); 7718 if (err) { 7719 if (err != -ENODEV) 7720 dev_err(&pdev->dev, PFX "Problem fetching invariants " 7721 "of chip, aborting.\n"); 7722 goto err_out_iounmap; 7723 } 7724 7725 err = register_netdev(dev); 7726 if (err) { 7727 dev_err(&pdev->dev, PFX "Cannot register net device, " 7728 "aborting.\n"); 7729 goto err_out_iounmap; 7730 } 7731 7732 pci_set_drvdata(pdev, dev); 7733 7734 niu_device_announce(np); 7735 7736 return 0; 7737 7738err_out_iounmap: 7739 if (np->regs) { 7740 iounmap(np->regs); 7741 np->regs = NULL; 7742 } 7743 7744err_out_release_parent: 7745 niu_put_parent(np); 7746 7747err_out_free_dev: 7748 free_netdev(dev); 7749 7750err_out_free_res: 7751 pci_release_regions(pdev); 7752 7753err_out_disable_pdev: 7754 pci_disable_device(pdev); 7755 pci_set_drvdata(pdev, NULL); 7756 7757 return err; 7758} 7759 7760static void __devexit niu_pci_remove_one(struct pci_dev *pdev) 7761{ 7762 struct net_device *dev = pci_get_drvdata(pdev); 7763 7764 if (dev) { 7765 struct niu *np = netdev_priv(dev); 7766 7767 unregister_netdev(dev); 7768 if (np->regs) { 7769 iounmap(np->regs); 7770 np->regs = NULL; 7771 } 7772 7773 niu_ldg_free(np); 7774 7775 niu_put_parent(np); 7776 7777 free_netdev(dev); 7778 pci_release_regions(pdev); 7779 pci_disable_device(pdev); 7780 pci_set_drvdata(pdev, NULL); 7781 } 7782} 7783 7784static int niu_suspend(struct pci_dev *pdev, pm_message_t state) 7785{ 7786 struct net_device *dev = pci_get_drvdata(pdev); 7787 struct niu *np = netdev_priv(dev); 7788 unsigned long flags; 7789 7790 if (!netif_running(dev)) 7791 return 0; 7792 7793 flush_scheduled_work(); 7794 niu_netif_stop(np); 7795 7796 del_timer_sync(&np->timer); 7797 7798 spin_lock_irqsave(&np->lock, flags); 7799 niu_enable_interrupts(np, 0); 7800 spin_unlock_irqrestore(&np->lock, flags); 7801 7802 netif_device_detach(dev); 7803 7804 spin_lock_irqsave(&np->lock, flags); 7805 niu_stop_hw(np); 7806 spin_unlock_irqrestore(&np->lock, flags); 7807 7808 pci_save_state(pdev); 7809 7810 return 0; 7811} 7812 7813static int niu_resume(struct pci_dev *pdev) 7814{ 7815 struct net_device *dev = pci_get_drvdata(pdev); 7816 struct niu *np = netdev_priv(dev); 7817 unsigned long flags; 7818 int err; 7819 7820 if (!netif_running(dev)) 7821 return 0; 7822 7823 pci_restore_state(pdev); 7824 7825 netif_device_attach(dev); 7826 7827 spin_lock_irqsave(&np->lock, flags); 7828 7829 err = niu_init_hw(np); 7830 if (!err) { 7831 np->timer.expires = jiffies + HZ; 7832 add_timer(&np->timer); 7833 niu_netif_start(np); 7834 } 7835 7836 spin_unlock_irqrestore(&np->lock, flags); 7837 7838 return err; 7839} 7840 7841static struct pci_driver niu_pci_driver = { 7842 .name = DRV_MODULE_NAME, 7843 .id_table = niu_pci_tbl, 7844 .probe = niu_pci_init_one, 7845 .remove = __devexit_p(niu_pci_remove_one), 7846 .suspend = niu_suspend, 7847 .resume = niu_resume, 7848}; 7849 7850#ifdef CONFIG_SPARC64 7851static void *niu_phys_alloc_coherent(struct device *dev, size_t size, 7852 u64 *dma_addr, gfp_t flag) 7853{ 7854 unsigned long order = get_order(size); 7855 unsigned long page = __get_free_pages(flag, order); 7856 7857 if (page == 0UL) 7858 return NULL; 7859 memset((char *)page, 0, PAGE_SIZE << order); 7860 *dma_addr = __pa(page); 7861 7862 return (void *) page; 7863} 7864 7865static void niu_phys_free_coherent(struct device *dev, size_t size, 7866 void *cpu_addr, u64 handle) 7867{ 7868 unsigned long order = get_order(size); 7869 7870 free_pages((unsigned long) cpu_addr, order); 7871} 7872 7873static u64 niu_phys_map_page(struct device *dev, struct page *page, 7874 unsigned long offset, size_t size, 7875 enum dma_data_direction direction) 7876{ 7877 return page_to_phys(page) + offset; 7878} 7879 7880static void niu_phys_unmap_page(struct device *dev, u64 dma_address, 7881 size_t size, enum dma_data_direction direction) 7882{ 7883 /* Nothing to do. */ 7884} 7885 7886static u64 niu_phys_map_single(struct device *dev, void *cpu_addr, 7887 size_t size, 7888 enum dma_data_direction direction) 7889{ 7890 return __pa(cpu_addr); 7891} 7892 7893static void niu_phys_unmap_single(struct device *dev, u64 dma_address, 7894 size_t size, 7895 enum dma_data_direction direction) 7896{ 7897 /* Nothing to do. */ 7898} 7899 7900static const struct niu_ops niu_phys_ops = { 7901 .alloc_coherent = niu_phys_alloc_coherent, 7902 .free_coherent = niu_phys_free_coherent, 7903 .map_page = niu_phys_map_page, 7904 .unmap_page = niu_phys_unmap_page, 7905 .map_single = niu_phys_map_single, 7906 .unmap_single = niu_phys_unmap_single, 7907}; 7908 7909static unsigned long res_size(struct resource *r) 7910{ 7911 return r->end - r->start + 1UL; 7912} 7913 7914static int __devinit niu_of_probe(struct of_device *op, 7915 const struct of_device_id *match) 7916{ 7917 union niu_parent_id parent_id; 7918 struct net_device *dev; 7919 struct niu *np; 7920 const u32 *reg; 7921 int err; 7922 7923 niu_driver_version(); 7924 7925 reg = of_get_property(op->node, "reg", NULL); 7926 if (!reg) { 7927 dev_err(&op->dev, PFX "%s: No 'reg' property, aborting.\n", 7928 op->node->full_name); 7929 return -ENODEV; 7930 } 7931 7932 dev = niu_alloc_and_init(&op->dev, NULL, op, 7933 &niu_phys_ops, reg[0] & 0x1); 7934 if (!dev) { 7935 err = -ENOMEM; 7936 goto err_out; 7937 } 7938 np = netdev_priv(dev); 7939 7940 memset(&parent_id, 0, sizeof(parent_id)); 7941 parent_id.of = of_get_parent(op->node); 7942 7943 np->parent = niu_get_parent(np, &parent_id, 7944 PLAT_TYPE_NIU); 7945 if (!np->parent) { 7946 err = -ENOMEM; 7947 goto err_out_free_dev; 7948 } 7949 7950 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM); 7951 7952 np->regs = of_ioremap(&op->resource[1], 0, 7953 res_size(&op->resource[1]), 7954 "niu regs"); 7955 if (!np->regs) { 7956 dev_err(&op->dev, PFX "Cannot map device registers, " 7957 "aborting.\n"); 7958 err = -ENOMEM; 7959 goto err_out_release_parent; 7960 } 7961 7962 np->vir_regs_1 = of_ioremap(&op->resource[2], 0, 7963 res_size(&op->resource[2]), 7964 "niu vregs-1"); 7965 if (!np->vir_regs_1) { 7966 dev_err(&op->dev, PFX "Cannot map device vir registers 1, " 7967 "aborting.\n"); 7968 err = -ENOMEM; 7969 goto err_out_iounmap; 7970 } 7971 7972 np->vir_regs_2 = of_ioremap(&op->resource[3], 0, 7973 res_size(&op->resource[3]), 7974 "niu vregs-2"); 7975 if (!np->vir_regs_2) { 7976 dev_err(&op->dev, PFX "Cannot map device vir registers 2, " 7977 "aborting.\n"); 7978 err = -ENOMEM; 7979 goto err_out_iounmap; 7980 } 7981 7982 niu_assign_netdev_ops(dev); 7983 7984 err = niu_get_invariants(np); 7985 if (err) { 7986 if (err != -ENODEV) 7987 dev_err(&op->dev, PFX "Problem fetching invariants " 7988 "of chip, aborting.\n"); 7989 goto err_out_iounmap; 7990 } 7991 7992 err = register_netdev(dev); 7993 if (err) { 7994 dev_err(&op->dev, PFX "Cannot register net device, " 7995 "aborting.\n"); 7996 goto err_out_iounmap; 7997 } 7998 7999 dev_set_drvdata(&op->dev, dev); 8000 8001 niu_device_announce(np); 8002 8003 return 0; 8004 8005err_out_iounmap: 8006 if (np->vir_regs_1) { 8007 of_iounmap(&op->resource[2], np->vir_regs_1, 8008 res_size(&op->resource[2])); 8009 np->vir_regs_1 = NULL; 8010 } 8011 8012 if (np->vir_regs_2) { 8013 of_iounmap(&op->resource[3], np->vir_regs_2, 8014 res_size(&op->resource[3])); 8015 np->vir_regs_2 = NULL; 8016 } 8017 8018 if (np->regs) { 8019 of_iounmap(&op->resource[1], np->regs, 8020 res_size(&op->resource[1])); 8021 np->regs = NULL; 8022 } 8023 8024err_out_release_parent: 8025 niu_put_parent(np); 8026 8027err_out_free_dev: 8028 free_netdev(dev); 8029 8030err_out: 8031 return err; 8032} 8033 8034static int __devexit niu_of_remove(struct of_device *op) 8035{ 8036 struct net_device *dev = dev_get_drvdata(&op->dev); 8037 8038 if (dev) { 8039 struct niu *np = netdev_priv(dev); 8040 8041 unregister_netdev(dev); 8042 8043 if (np->vir_regs_1) { 8044 of_iounmap(&op->resource[2], np->vir_regs_1, 8045 res_size(&op->resource[2])); 8046 np->vir_regs_1 = NULL; 8047 } 8048 8049 if (np->vir_regs_2) { 8050 of_iounmap(&op->resource[3], np->vir_regs_2, 8051 res_size(&op->resource[3])); 8052 np->vir_regs_2 = NULL; 8053 } 8054 8055 if (np->regs) { 8056 of_iounmap(&op->resource[1], np->regs, 8057 res_size(&op->resource[1])); 8058 np->regs = NULL; 8059 } 8060 8061 niu_ldg_free(np); 8062 8063 niu_put_parent(np); 8064 8065 free_netdev(dev); 8066 dev_set_drvdata(&op->dev, NULL); 8067 } 8068 return 0; 8069} 8070 8071static struct of_device_id niu_match[] = { 8072 { 8073 .name = "network", 8074 .compatible = "SUNW,niusl", 8075 }, 8076 {}, 8077}; 8078MODULE_DEVICE_TABLE(of, niu_match); 8079 8080static struct of_platform_driver niu_of_driver = { 8081 .name = "niu", 8082 .match_table = niu_match, 8083 .probe = niu_of_probe, 8084 .remove = __devexit_p(niu_of_remove), 8085}; 8086 8087#endif /* CONFIG_SPARC64 */ 8088 8089static int __init niu_init(void) 8090{ 8091 int err = 0; 8092 8093 BUILD_BUG_ON(PAGE_SIZE < 4 * 1024); 8094 8095 niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT); 8096 8097#ifdef CONFIG_SPARC64 8098 err = of_register_driver(&niu_of_driver, &of_bus_type); 8099#endif 8100 8101 if (!err) { 8102 err = pci_register_driver(&niu_pci_driver); 8103#ifdef CONFIG_SPARC64 8104 if (err) 8105 of_unregister_driver(&niu_of_driver); 8106#endif 8107 } 8108 8109 return err; 8110} 8111 8112static void __exit niu_exit(void) 8113{ 8114 pci_unregister_driver(&niu_pci_driver); 8115#ifdef CONFIG_SPARC64 8116 of_unregister_driver(&niu_of_driver); 8117#endif 8118} 8119 8120module_init(niu_init); 8121module_exit(niu_exit);