Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.32-rc1 3036 lines 68 kB view raw
1/* 2 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver 3 * 4 * Copyright 2008 JMicron Technology Corporation 5 * http://www.jmicron.com/ 6 * 7 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 * 22 */ 23 24#include <linux/module.h> 25#include <linux/kernel.h> 26#include <linux/pci.h> 27#include <linux/netdevice.h> 28#include <linux/etherdevice.h> 29#include <linux/ethtool.h> 30#include <linux/mii.h> 31#include <linux/crc32.h> 32#include <linux/delay.h> 33#include <linux/spinlock.h> 34#include <linux/in.h> 35#include <linux/ip.h> 36#include <linux/ipv6.h> 37#include <linux/tcp.h> 38#include <linux/udp.h> 39#include <linux/if_vlan.h> 40#include <net/ip6_checksum.h> 41#include "jme.h" 42 43static int force_pseudohp = -1; 44static int no_pseudohp = -1; 45static int no_extplug = -1; 46module_param(force_pseudohp, int, 0); 47MODULE_PARM_DESC(force_pseudohp, 48 "Enable pseudo hot-plug feature manually by driver instead of BIOS."); 49module_param(no_pseudohp, int, 0); 50MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature."); 51module_param(no_extplug, int, 0); 52MODULE_PARM_DESC(no_extplug, 53 "Do not use external plug signal for pseudo hot-plug."); 54 55static int 56jme_mdio_read(struct net_device *netdev, int phy, int reg) 57{ 58 struct jme_adapter *jme = netdev_priv(netdev); 59 int i, val, again = (reg == MII_BMSR) ? 1 : 0; 60 61read_again: 62 jwrite32(jme, JME_SMI, SMI_OP_REQ | 63 smi_phy_addr(phy) | 64 smi_reg_addr(reg)); 65 66 wmb(); 67 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { 68 udelay(20); 69 val = jread32(jme, JME_SMI); 70 if ((val & SMI_OP_REQ) == 0) 71 break; 72 } 73 74 if (i == 0) { 75 jeprintk(jme->pdev, "phy(%d) read timeout : %d\n", phy, reg); 76 return 0; 77 } 78 79 if (again--) 80 goto read_again; 81 82 return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT; 83} 84 85static void 86jme_mdio_write(struct net_device *netdev, 87 int phy, int reg, int val) 88{ 89 struct jme_adapter *jme = netdev_priv(netdev); 90 int i; 91 92 jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ | 93 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 94 smi_phy_addr(phy) | smi_reg_addr(reg)); 95 96 wmb(); 97 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { 98 udelay(20); 99 if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0) 100 break; 101 } 102 103 if (i == 0) 104 jeprintk(jme->pdev, "phy(%d) write timeout : %d\n", phy, reg); 105 106 return; 107} 108 109static inline void 110jme_reset_phy_processor(struct jme_adapter *jme) 111{ 112 u32 val; 113 114 jme_mdio_write(jme->dev, 115 jme->mii_if.phy_id, 116 MII_ADVERTISE, ADVERTISE_ALL | 117 ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 118 119 if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) 120 jme_mdio_write(jme->dev, 121 jme->mii_if.phy_id, 122 MII_CTRL1000, 123 ADVERTISE_1000FULL | ADVERTISE_1000HALF); 124 125 val = jme_mdio_read(jme->dev, 126 jme->mii_if.phy_id, 127 MII_BMCR); 128 129 jme_mdio_write(jme->dev, 130 jme->mii_if.phy_id, 131 MII_BMCR, val | BMCR_RESET); 132 133 return; 134} 135 136static void 137jme_setup_wakeup_frame(struct jme_adapter *jme, 138 u32 *mask, u32 crc, int fnr) 139{ 140 int i; 141 142 /* 143 * Setup CRC pattern 144 */ 145 jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL)); 146 wmb(); 147 jwrite32(jme, JME_WFODP, crc); 148 wmb(); 149 150 /* 151 * Setup Mask 152 */ 153 for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) { 154 jwrite32(jme, JME_WFOI, 155 ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) | 156 (fnr & WFOI_FRAME_SEL)); 157 wmb(); 158 jwrite32(jme, JME_WFODP, mask[i]); 159 wmb(); 160 } 161} 162 163static inline void 164jme_reset_mac_processor(struct jme_adapter *jme) 165{ 166 u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0}; 167 u32 crc = 0xCDCDCDCD; 168 u32 gpreg0; 169 int i; 170 171 jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST); 172 udelay(2); 173 jwrite32(jme, JME_GHC, jme->reg_ghc); 174 175 jwrite32(jme, JME_RXDBA_LO, 0x00000000); 176 jwrite32(jme, JME_RXDBA_HI, 0x00000000); 177 jwrite32(jme, JME_RXQDC, 0x00000000); 178 jwrite32(jme, JME_RXNDA, 0x00000000); 179 jwrite32(jme, JME_TXDBA_LO, 0x00000000); 180 jwrite32(jme, JME_TXDBA_HI, 0x00000000); 181 jwrite32(jme, JME_TXQDC, 0x00000000); 182 jwrite32(jme, JME_TXNDA, 0x00000000); 183 184 jwrite32(jme, JME_RXMCHT_LO, 0x00000000); 185 jwrite32(jme, JME_RXMCHT_HI, 0x00000000); 186 for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i) 187 jme_setup_wakeup_frame(jme, mask, crc, i); 188 if (jme->fpgaver) 189 gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL; 190 else 191 gpreg0 = GPREG0_DEFAULT; 192 jwrite32(jme, JME_GPREG0, gpreg0); 193 jwrite32(jme, JME_GPREG1, GPREG1_DEFAULT); 194} 195 196static inline void 197jme_reset_ghc_speed(struct jme_adapter *jme) 198{ 199 jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX); 200 jwrite32(jme, JME_GHC, jme->reg_ghc); 201} 202 203static inline void 204jme_clear_pm(struct jme_adapter *jme) 205{ 206 jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs); 207 pci_set_power_state(jme->pdev, PCI_D0); 208 pci_enable_wake(jme->pdev, PCI_D0, false); 209} 210 211static int 212jme_reload_eeprom(struct jme_adapter *jme) 213{ 214 u32 val; 215 int i; 216 217 val = jread32(jme, JME_SMBCSR); 218 219 if (val & SMBCSR_EEPROMD) { 220 val |= SMBCSR_CNACK; 221 jwrite32(jme, JME_SMBCSR, val); 222 val |= SMBCSR_RELOAD; 223 jwrite32(jme, JME_SMBCSR, val); 224 mdelay(12); 225 226 for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) { 227 mdelay(1); 228 if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0) 229 break; 230 } 231 232 if (i == 0) { 233 jeprintk(jme->pdev, "eeprom reload timeout\n"); 234 return -EIO; 235 } 236 } 237 238 return 0; 239} 240 241static void 242jme_load_macaddr(struct net_device *netdev) 243{ 244 struct jme_adapter *jme = netdev_priv(netdev); 245 unsigned char macaddr[6]; 246 u32 val; 247 248 spin_lock_bh(&jme->macaddr_lock); 249 val = jread32(jme, JME_RXUMA_LO); 250 macaddr[0] = (val >> 0) & 0xFF; 251 macaddr[1] = (val >> 8) & 0xFF; 252 macaddr[2] = (val >> 16) & 0xFF; 253 macaddr[3] = (val >> 24) & 0xFF; 254 val = jread32(jme, JME_RXUMA_HI); 255 macaddr[4] = (val >> 0) & 0xFF; 256 macaddr[5] = (val >> 8) & 0xFF; 257 memcpy(netdev->dev_addr, macaddr, 6); 258 spin_unlock_bh(&jme->macaddr_lock); 259} 260 261static inline void 262jme_set_rx_pcc(struct jme_adapter *jme, int p) 263{ 264 switch (p) { 265 case PCC_OFF: 266 jwrite32(jme, JME_PCCRX0, 267 ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | 268 ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK)); 269 break; 270 case PCC_P1: 271 jwrite32(jme, JME_PCCRX0, 272 ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | 273 ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK)); 274 break; 275 case PCC_P2: 276 jwrite32(jme, JME_PCCRX0, 277 ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | 278 ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK)); 279 break; 280 case PCC_P3: 281 jwrite32(jme, JME_PCCRX0, 282 ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | 283 ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK)); 284 break; 285 default: 286 break; 287 } 288 wmb(); 289 290 if (!(test_bit(JME_FLAG_POLL, &jme->flags))) 291 msg_rx_status(jme, "Switched to PCC_P%d\n", p); 292} 293 294static void 295jme_start_irq(struct jme_adapter *jme) 296{ 297 register struct dynpcc_info *dpi = &(jme->dpi); 298 299 jme_set_rx_pcc(jme, PCC_P1); 300 dpi->cur = PCC_P1; 301 dpi->attempt = PCC_P1; 302 dpi->cnt = 0; 303 304 jwrite32(jme, JME_PCCTX, 305 ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) | 306 ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) | 307 PCCTXQ0_EN 308 ); 309 310 /* 311 * Enable Interrupts 312 */ 313 jwrite32(jme, JME_IENS, INTR_ENABLE); 314} 315 316static inline void 317jme_stop_irq(struct jme_adapter *jme) 318{ 319 /* 320 * Disable Interrupts 321 */ 322 jwrite32f(jme, JME_IENC, INTR_ENABLE); 323} 324 325static u32 326jme_linkstat_from_phy(struct jme_adapter *jme) 327{ 328 u32 phylink, bmsr; 329 330 phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17); 331 bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR); 332 if (bmsr & BMSR_ANCOMP) 333 phylink |= PHY_LINK_AUTONEG_COMPLETE; 334 335 return phylink; 336} 337 338static inline void 339jme_set_phyfifoa(struct jme_adapter *jme) 340{ 341 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004); 342} 343 344static inline void 345jme_set_phyfifob(struct jme_adapter *jme) 346{ 347 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000); 348} 349 350static int 351jme_check_link(struct net_device *netdev, int testonly) 352{ 353 struct jme_adapter *jme = netdev_priv(netdev); 354 u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr, gpreg1; 355 char linkmsg[64]; 356 int rc = 0; 357 358 linkmsg[0] = '\0'; 359 360 if (jme->fpgaver) 361 phylink = jme_linkstat_from_phy(jme); 362 else 363 phylink = jread32(jme, JME_PHY_LINK); 364 365 if (phylink & PHY_LINK_UP) { 366 if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) { 367 /* 368 * If we did not enable AN 369 * Speed/Duplex Info should be obtained from SMI 370 */ 371 phylink = PHY_LINK_UP; 372 373 bmcr = jme_mdio_read(jme->dev, 374 jme->mii_if.phy_id, 375 MII_BMCR); 376 377 phylink |= ((bmcr & BMCR_SPEED1000) && 378 (bmcr & BMCR_SPEED100) == 0) ? 379 PHY_LINK_SPEED_1000M : 380 (bmcr & BMCR_SPEED100) ? 381 PHY_LINK_SPEED_100M : 382 PHY_LINK_SPEED_10M; 383 384 phylink |= (bmcr & BMCR_FULLDPLX) ? 385 PHY_LINK_DUPLEX : 0; 386 387 strcat(linkmsg, "Forced: "); 388 } else { 389 /* 390 * Keep polling for speed/duplex resolve complete 391 */ 392 while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) && 393 --cnt) { 394 395 udelay(1); 396 397 if (jme->fpgaver) 398 phylink = jme_linkstat_from_phy(jme); 399 else 400 phylink = jread32(jme, JME_PHY_LINK); 401 } 402 if (!cnt) 403 jeprintk(jme->pdev, 404 "Waiting speed resolve timeout.\n"); 405 406 strcat(linkmsg, "ANed: "); 407 } 408 409 if (jme->phylink == phylink) { 410 rc = 1; 411 goto out; 412 } 413 if (testonly) 414 goto out; 415 416 jme->phylink = phylink; 417 418 ghc = jme->reg_ghc & ~(GHC_SPEED | GHC_DPX | 419 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE | 420 GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY); 421 switch (phylink & PHY_LINK_SPEED_MASK) { 422 case PHY_LINK_SPEED_10M: 423 ghc |= GHC_SPEED_10M | 424 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE; 425 strcat(linkmsg, "10 Mbps, "); 426 break; 427 case PHY_LINK_SPEED_100M: 428 ghc |= GHC_SPEED_100M | 429 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE; 430 strcat(linkmsg, "100 Mbps, "); 431 break; 432 case PHY_LINK_SPEED_1000M: 433 ghc |= GHC_SPEED_1000M | 434 GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY; 435 strcat(linkmsg, "1000 Mbps, "); 436 break; 437 default: 438 break; 439 } 440 441 if (phylink & PHY_LINK_DUPLEX) { 442 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT); 443 ghc |= GHC_DPX; 444 } else { 445 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT | 446 TXMCS_BACKOFF | 447 TXMCS_CARRIERSENSE | 448 TXMCS_COLLISION); 449 jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN | 450 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) | 451 TXTRHD_TXREN | 452 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL)); 453 } 454 455 gpreg1 = GPREG1_DEFAULT; 456 if (is_buggy250(jme->pdev->device, jme->chiprev)) { 457 if (!(phylink & PHY_LINK_DUPLEX)) 458 gpreg1 |= GPREG1_HALFMODEPATCH; 459 switch (phylink & PHY_LINK_SPEED_MASK) { 460 case PHY_LINK_SPEED_10M: 461 jme_set_phyfifoa(jme); 462 gpreg1 |= GPREG1_RSSPATCH; 463 break; 464 case PHY_LINK_SPEED_100M: 465 jme_set_phyfifob(jme); 466 gpreg1 |= GPREG1_RSSPATCH; 467 break; 468 case PHY_LINK_SPEED_1000M: 469 jme_set_phyfifoa(jme); 470 break; 471 default: 472 break; 473 } 474 } 475 476 jwrite32(jme, JME_GPREG1, gpreg1); 477 jwrite32(jme, JME_GHC, ghc); 478 jme->reg_ghc = ghc; 479 480 strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ? 481 "Full-Duplex, " : 482 "Half-Duplex, "); 483 strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ? 484 "MDI-X" : 485 "MDI"); 486 msg_link(jme, "Link is up at %s.\n", linkmsg); 487 netif_carrier_on(netdev); 488 } else { 489 if (testonly) 490 goto out; 491 492 msg_link(jme, "Link is down.\n"); 493 jme->phylink = 0; 494 netif_carrier_off(netdev); 495 } 496 497out: 498 return rc; 499} 500 501static int 502jme_setup_tx_resources(struct jme_adapter *jme) 503{ 504 struct jme_ring *txring = &(jme->txring[0]); 505 506 txring->alloc = dma_alloc_coherent(&(jme->pdev->dev), 507 TX_RING_ALLOC_SIZE(jme->tx_ring_size), 508 &(txring->dmaalloc), 509 GFP_ATOMIC); 510 511 if (!txring->alloc) 512 goto err_set_null; 513 514 /* 515 * 16 Bytes align 516 */ 517 txring->desc = (void *)ALIGN((unsigned long)(txring->alloc), 518 RING_DESC_ALIGN); 519 txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN); 520 txring->next_to_use = 0; 521 atomic_set(&txring->next_to_clean, 0); 522 atomic_set(&txring->nr_free, jme->tx_ring_size); 523 524 txring->bufinf = kmalloc(sizeof(struct jme_buffer_info) * 525 jme->tx_ring_size, GFP_ATOMIC); 526 if (unlikely(!(txring->bufinf))) 527 goto err_free_txring; 528 529 /* 530 * Initialize Transmit Descriptors 531 */ 532 memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size)); 533 memset(txring->bufinf, 0, 534 sizeof(struct jme_buffer_info) * jme->tx_ring_size); 535 536 return 0; 537 538err_free_txring: 539 dma_free_coherent(&(jme->pdev->dev), 540 TX_RING_ALLOC_SIZE(jme->tx_ring_size), 541 txring->alloc, 542 txring->dmaalloc); 543 544err_set_null: 545 txring->desc = NULL; 546 txring->dmaalloc = 0; 547 txring->dma = 0; 548 txring->bufinf = NULL; 549 550 return -ENOMEM; 551} 552 553static void 554jme_free_tx_resources(struct jme_adapter *jme) 555{ 556 int i; 557 struct jme_ring *txring = &(jme->txring[0]); 558 struct jme_buffer_info *txbi; 559 560 if (txring->alloc) { 561 if (txring->bufinf) { 562 for (i = 0 ; i < jme->tx_ring_size ; ++i) { 563 txbi = txring->bufinf + i; 564 if (txbi->skb) { 565 dev_kfree_skb(txbi->skb); 566 txbi->skb = NULL; 567 } 568 txbi->mapping = 0; 569 txbi->len = 0; 570 txbi->nr_desc = 0; 571 txbi->start_xmit = 0; 572 } 573 kfree(txring->bufinf); 574 } 575 576 dma_free_coherent(&(jme->pdev->dev), 577 TX_RING_ALLOC_SIZE(jme->tx_ring_size), 578 txring->alloc, 579 txring->dmaalloc); 580 581 txring->alloc = NULL; 582 txring->desc = NULL; 583 txring->dmaalloc = 0; 584 txring->dma = 0; 585 txring->bufinf = NULL; 586 } 587 txring->next_to_use = 0; 588 atomic_set(&txring->next_to_clean, 0); 589 atomic_set(&txring->nr_free, 0); 590} 591 592static inline void 593jme_enable_tx_engine(struct jme_adapter *jme) 594{ 595 /* 596 * Select Queue 0 597 */ 598 jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0); 599 wmb(); 600 601 /* 602 * Setup TX Queue 0 DMA Bass Address 603 */ 604 jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); 605 jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32); 606 jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); 607 608 /* 609 * Setup TX Descptor Count 610 */ 611 jwrite32(jme, JME_TXQDC, jme->tx_ring_size); 612 613 /* 614 * Enable TX Engine 615 */ 616 wmb(); 617 jwrite32(jme, JME_TXCS, jme->reg_txcs | 618 TXCS_SELECT_QUEUE0 | 619 TXCS_ENABLE); 620 621} 622 623static inline void 624jme_restart_tx_engine(struct jme_adapter *jme) 625{ 626 /* 627 * Restart TX Engine 628 */ 629 jwrite32(jme, JME_TXCS, jme->reg_txcs | 630 TXCS_SELECT_QUEUE0 | 631 TXCS_ENABLE); 632} 633 634static inline void 635jme_disable_tx_engine(struct jme_adapter *jme) 636{ 637 int i; 638 u32 val; 639 640 /* 641 * Disable TX Engine 642 */ 643 jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0); 644 wmb(); 645 646 val = jread32(jme, JME_TXCS); 647 for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) { 648 mdelay(1); 649 val = jread32(jme, JME_TXCS); 650 rmb(); 651 } 652 653 if (!i) 654 jeprintk(jme->pdev, "Disable TX engine timeout.\n"); 655} 656 657static void 658jme_set_clean_rxdesc(struct jme_adapter *jme, int i) 659{ 660 struct jme_ring *rxring = &(jme->rxring[0]); 661 register struct rxdesc *rxdesc = rxring->desc; 662 struct jme_buffer_info *rxbi = rxring->bufinf; 663 rxdesc += i; 664 rxbi += i; 665 666 rxdesc->dw[0] = 0; 667 rxdesc->dw[1] = 0; 668 rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32); 669 rxdesc->desc1.bufaddrl = cpu_to_le32( 670 (__u64)rxbi->mapping & 0xFFFFFFFFUL); 671 rxdesc->desc1.datalen = cpu_to_le16(rxbi->len); 672 if (jme->dev->features & NETIF_F_HIGHDMA) 673 rxdesc->desc1.flags = RXFLAG_64BIT; 674 wmb(); 675 rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT; 676} 677 678static int 679jme_make_new_rx_buf(struct jme_adapter *jme, int i) 680{ 681 struct jme_ring *rxring = &(jme->rxring[0]); 682 struct jme_buffer_info *rxbi = rxring->bufinf + i; 683 struct sk_buff *skb; 684 685 skb = netdev_alloc_skb(jme->dev, 686 jme->dev->mtu + RX_EXTRA_LEN); 687 if (unlikely(!skb)) 688 return -ENOMEM; 689 690 rxbi->skb = skb; 691 rxbi->len = skb_tailroom(skb); 692 rxbi->mapping = pci_map_page(jme->pdev, 693 virt_to_page(skb->data), 694 offset_in_page(skb->data), 695 rxbi->len, 696 PCI_DMA_FROMDEVICE); 697 698 return 0; 699} 700 701static void 702jme_free_rx_buf(struct jme_adapter *jme, int i) 703{ 704 struct jme_ring *rxring = &(jme->rxring[0]); 705 struct jme_buffer_info *rxbi = rxring->bufinf; 706 rxbi += i; 707 708 if (rxbi->skb) { 709 pci_unmap_page(jme->pdev, 710 rxbi->mapping, 711 rxbi->len, 712 PCI_DMA_FROMDEVICE); 713 dev_kfree_skb(rxbi->skb); 714 rxbi->skb = NULL; 715 rxbi->mapping = 0; 716 rxbi->len = 0; 717 } 718} 719 720static void 721jme_free_rx_resources(struct jme_adapter *jme) 722{ 723 int i; 724 struct jme_ring *rxring = &(jme->rxring[0]); 725 726 if (rxring->alloc) { 727 if (rxring->bufinf) { 728 for (i = 0 ; i < jme->rx_ring_size ; ++i) 729 jme_free_rx_buf(jme, i); 730 kfree(rxring->bufinf); 731 } 732 733 dma_free_coherent(&(jme->pdev->dev), 734 RX_RING_ALLOC_SIZE(jme->rx_ring_size), 735 rxring->alloc, 736 rxring->dmaalloc); 737 rxring->alloc = NULL; 738 rxring->desc = NULL; 739 rxring->dmaalloc = 0; 740 rxring->dma = 0; 741 rxring->bufinf = NULL; 742 } 743 rxring->next_to_use = 0; 744 atomic_set(&rxring->next_to_clean, 0); 745} 746 747static int 748jme_setup_rx_resources(struct jme_adapter *jme) 749{ 750 int i; 751 struct jme_ring *rxring = &(jme->rxring[0]); 752 753 rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev), 754 RX_RING_ALLOC_SIZE(jme->rx_ring_size), 755 &(rxring->dmaalloc), 756 GFP_ATOMIC); 757 if (!rxring->alloc) 758 goto err_set_null; 759 760 /* 761 * 16 Bytes align 762 */ 763 rxring->desc = (void *)ALIGN((unsigned long)(rxring->alloc), 764 RING_DESC_ALIGN); 765 rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN); 766 rxring->next_to_use = 0; 767 atomic_set(&rxring->next_to_clean, 0); 768 769 rxring->bufinf = kmalloc(sizeof(struct jme_buffer_info) * 770 jme->rx_ring_size, GFP_ATOMIC); 771 if (unlikely(!(rxring->bufinf))) 772 goto err_free_rxring; 773 774 /* 775 * Initiallize Receive Descriptors 776 */ 777 memset(rxring->bufinf, 0, 778 sizeof(struct jme_buffer_info) * jme->rx_ring_size); 779 for (i = 0 ; i < jme->rx_ring_size ; ++i) { 780 if (unlikely(jme_make_new_rx_buf(jme, i))) { 781 jme_free_rx_resources(jme); 782 return -ENOMEM; 783 } 784 785 jme_set_clean_rxdesc(jme, i); 786 } 787 788 return 0; 789 790err_free_rxring: 791 dma_free_coherent(&(jme->pdev->dev), 792 RX_RING_ALLOC_SIZE(jme->rx_ring_size), 793 rxring->alloc, 794 rxring->dmaalloc); 795err_set_null: 796 rxring->desc = NULL; 797 rxring->dmaalloc = 0; 798 rxring->dma = 0; 799 rxring->bufinf = NULL; 800 801 return -ENOMEM; 802} 803 804static inline void 805jme_enable_rx_engine(struct jme_adapter *jme) 806{ 807 /* 808 * Select Queue 0 809 */ 810 jwrite32(jme, JME_RXCS, jme->reg_rxcs | 811 RXCS_QUEUESEL_Q0); 812 wmb(); 813 814 /* 815 * Setup RX DMA Bass Address 816 */ 817 jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); 818 jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32); 819 jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); 820 821 /* 822 * Setup RX Descriptor Count 823 */ 824 jwrite32(jme, JME_RXQDC, jme->rx_ring_size); 825 826 /* 827 * Setup Unicast Filter 828 */ 829 jme_set_multi(jme->dev); 830 831 /* 832 * Enable RX Engine 833 */ 834 wmb(); 835 jwrite32(jme, JME_RXCS, jme->reg_rxcs | 836 RXCS_QUEUESEL_Q0 | 837 RXCS_ENABLE | 838 RXCS_QST); 839} 840 841static inline void 842jme_restart_rx_engine(struct jme_adapter *jme) 843{ 844 /* 845 * Start RX Engine 846 */ 847 jwrite32(jme, JME_RXCS, jme->reg_rxcs | 848 RXCS_QUEUESEL_Q0 | 849 RXCS_ENABLE | 850 RXCS_QST); 851} 852 853static inline void 854jme_disable_rx_engine(struct jme_adapter *jme) 855{ 856 int i; 857 u32 val; 858 859 /* 860 * Disable RX Engine 861 */ 862 jwrite32(jme, JME_RXCS, jme->reg_rxcs); 863 wmb(); 864 865 val = jread32(jme, JME_RXCS); 866 for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) { 867 mdelay(1); 868 val = jread32(jme, JME_RXCS); 869 rmb(); 870 } 871 872 if (!i) 873 jeprintk(jme->pdev, "Disable RX engine timeout.\n"); 874 875} 876 877static int 878jme_rxsum_ok(struct jme_adapter *jme, u16 flags) 879{ 880 if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4))) 881 return false; 882 883 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS)) 884 == RXWBFLAG_TCPON)) { 885 if (flags & RXWBFLAG_IPV4) 886 msg_rx_err(jme, "TCP Checksum error\n"); 887 return false; 888 } 889 890 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS)) 891 == RXWBFLAG_UDPON)) { 892 if (flags & RXWBFLAG_IPV4) 893 msg_rx_err(jme, "UDP Checksum error.\n"); 894 return false; 895 } 896 897 if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS)) 898 == RXWBFLAG_IPV4)) { 899 msg_rx_err(jme, "IPv4 Checksum error.\n"); 900 return false; 901 } 902 903 return true; 904} 905 906static void 907jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx) 908{ 909 struct jme_ring *rxring = &(jme->rxring[0]); 910 struct rxdesc *rxdesc = rxring->desc; 911 struct jme_buffer_info *rxbi = rxring->bufinf; 912 struct sk_buff *skb; 913 int framesize; 914 915 rxdesc += idx; 916 rxbi += idx; 917 918 skb = rxbi->skb; 919 pci_dma_sync_single_for_cpu(jme->pdev, 920 rxbi->mapping, 921 rxbi->len, 922 PCI_DMA_FROMDEVICE); 923 924 if (unlikely(jme_make_new_rx_buf(jme, idx))) { 925 pci_dma_sync_single_for_device(jme->pdev, 926 rxbi->mapping, 927 rxbi->len, 928 PCI_DMA_FROMDEVICE); 929 930 ++(NET_STAT(jme).rx_dropped); 931 } else { 932 framesize = le16_to_cpu(rxdesc->descwb.framesize) 933 - RX_PREPAD_SIZE; 934 935 skb_reserve(skb, RX_PREPAD_SIZE); 936 skb_put(skb, framesize); 937 skb->protocol = eth_type_trans(skb, jme->dev); 938 939 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags))) 940 skb->ip_summed = CHECKSUM_UNNECESSARY; 941 else 942 skb->ip_summed = CHECKSUM_NONE; 943 944 if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) { 945 if (jme->vlgrp) { 946 jme->jme_vlan_rx(skb, jme->vlgrp, 947 le16_to_cpu(rxdesc->descwb.vlan)); 948 NET_STAT(jme).rx_bytes += 4; 949 } 950 } else { 951 jme->jme_rx(skb); 952 } 953 954 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) == 955 cpu_to_le16(RXWBFLAG_DEST_MUL)) 956 ++(NET_STAT(jme).multicast); 957 958 NET_STAT(jme).rx_bytes += framesize; 959 ++(NET_STAT(jme).rx_packets); 960 } 961 962 jme_set_clean_rxdesc(jme, idx); 963 964} 965 966static int 967jme_process_receive(struct jme_adapter *jme, int limit) 968{ 969 struct jme_ring *rxring = &(jme->rxring[0]); 970 struct rxdesc *rxdesc = rxring->desc; 971 int i, j, ccnt, desccnt, mask = jme->rx_ring_mask; 972 973 if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning))) 974 goto out_inc; 975 976 if (unlikely(atomic_read(&jme->link_changing) != 1)) 977 goto out_inc; 978 979 if (unlikely(!netif_carrier_ok(jme->dev))) 980 goto out_inc; 981 982 i = atomic_read(&rxring->next_to_clean); 983 while (limit > 0) { 984 rxdesc = rxring->desc; 985 rxdesc += i; 986 987 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) || 988 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL)) 989 goto out; 990 --limit; 991 992 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT; 993 994 if (unlikely(desccnt > 1 || 995 rxdesc->descwb.errstat & RXWBERR_ALLERR)) { 996 997 if (rxdesc->descwb.errstat & RXWBERR_CRCERR) 998 ++(NET_STAT(jme).rx_crc_errors); 999 else if (rxdesc->descwb.errstat & RXWBERR_OVERUN) 1000 ++(NET_STAT(jme).rx_fifo_errors); 1001 else 1002 ++(NET_STAT(jme).rx_errors); 1003 1004 if (desccnt > 1) 1005 limit -= desccnt - 1; 1006 1007 for (j = i, ccnt = desccnt ; ccnt-- ; ) { 1008 jme_set_clean_rxdesc(jme, j); 1009 j = (j + 1) & (mask); 1010 } 1011 1012 } else { 1013 jme_alloc_and_feed_skb(jme, i); 1014 } 1015 1016 i = (i + desccnt) & (mask); 1017 } 1018 1019out: 1020 atomic_set(&rxring->next_to_clean, i); 1021 1022out_inc: 1023 atomic_inc(&jme->rx_cleaning); 1024 1025 return limit > 0 ? limit : 0; 1026 1027} 1028 1029static void 1030jme_attempt_pcc(struct dynpcc_info *dpi, int atmp) 1031{ 1032 if (likely(atmp == dpi->cur)) { 1033 dpi->cnt = 0; 1034 return; 1035 } 1036 1037 if (dpi->attempt == atmp) { 1038 ++(dpi->cnt); 1039 } else { 1040 dpi->attempt = atmp; 1041 dpi->cnt = 0; 1042 } 1043 1044} 1045 1046static void 1047jme_dynamic_pcc(struct jme_adapter *jme) 1048{ 1049 register struct dynpcc_info *dpi = &(jme->dpi); 1050 1051 if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD) 1052 jme_attempt_pcc(dpi, PCC_P3); 1053 else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD 1054 || dpi->intr_cnt > PCC_INTR_THRESHOLD) 1055 jme_attempt_pcc(dpi, PCC_P2); 1056 else 1057 jme_attempt_pcc(dpi, PCC_P1); 1058 1059 if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) { 1060 if (dpi->attempt < dpi->cur) 1061 tasklet_schedule(&jme->rxclean_task); 1062 jme_set_rx_pcc(jme, dpi->attempt); 1063 dpi->cur = dpi->attempt; 1064 dpi->cnt = 0; 1065 } 1066} 1067 1068static void 1069jme_start_pcc_timer(struct jme_adapter *jme) 1070{ 1071 struct dynpcc_info *dpi = &(jme->dpi); 1072 dpi->last_bytes = NET_STAT(jme).rx_bytes; 1073 dpi->last_pkts = NET_STAT(jme).rx_packets; 1074 dpi->intr_cnt = 0; 1075 jwrite32(jme, JME_TMCSR, 1076 TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT)); 1077} 1078 1079static inline void 1080jme_stop_pcc_timer(struct jme_adapter *jme) 1081{ 1082 jwrite32(jme, JME_TMCSR, 0); 1083} 1084 1085static void 1086jme_shutdown_nic(struct jme_adapter *jme) 1087{ 1088 u32 phylink; 1089 1090 phylink = jme_linkstat_from_phy(jme); 1091 1092 if (!(phylink & PHY_LINK_UP)) { 1093 /* 1094 * Disable all interrupt before issue timer 1095 */ 1096 jme_stop_irq(jme); 1097 jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE); 1098 } 1099} 1100 1101static void 1102jme_pcc_tasklet(unsigned long arg) 1103{ 1104 struct jme_adapter *jme = (struct jme_adapter *)arg; 1105 struct net_device *netdev = jme->dev; 1106 1107 if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) { 1108 jme_shutdown_nic(jme); 1109 return; 1110 } 1111 1112 if (unlikely(!netif_carrier_ok(netdev) || 1113 (atomic_read(&jme->link_changing) != 1) 1114 )) { 1115 jme_stop_pcc_timer(jme); 1116 return; 1117 } 1118 1119 if (!(test_bit(JME_FLAG_POLL, &jme->flags))) 1120 jme_dynamic_pcc(jme); 1121 1122 jme_start_pcc_timer(jme); 1123} 1124 1125static inline void 1126jme_polling_mode(struct jme_adapter *jme) 1127{ 1128 jme_set_rx_pcc(jme, PCC_OFF); 1129} 1130 1131static inline void 1132jme_interrupt_mode(struct jme_adapter *jme) 1133{ 1134 jme_set_rx_pcc(jme, PCC_P1); 1135} 1136 1137static inline int 1138jme_pseudo_hotplug_enabled(struct jme_adapter *jme) 1139{ 1140 u32 apmc; 1141 apmc = jread32(jme, JME_APMC); 1142 return apmc & JME_APMC_PSEUDO_HP_EN; 1143} 1144 1145static void 1146jme_start_shutdown_timer(struct jme_adapter *jme) 1147{ 1148 u32 apmc; 1149 1150 apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN; 1151 apmc &= ~JME_APMC_EPIEN_CTRL; 1152 if (!no_extplug) { 1153 jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN); 1154 wmb(); 1155 } 1156 jwrite32f(jme, JME_APMC, apmc); 1157 1158 jwrite32f(jme, JME_TIMER2, 0); 1159 set_bit(JME_FLAG_SHUTDOWN, &jme->flags); 1160 jwrite32(jme, JME_TMCSR, 1161 TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT)); 1162} 1163 1164static void 1165jme_stop_shutdown_timer(struct jme_adapter *jme) 1166{ 1167 u32 apmc; 1168 1169 jwrite32f(jme, JME_TMCSR, 0); 1170 jwrite32f(jme, JME_TIMER2, 0); 1171 clear_bit(JME_FLAG_SHUTDOWN, &jme->flags); 1172 1173 apmc = jread32(jme, JME_APMC); 1174 apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL); 1175 jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS); 1176 wmb(); 1177 jwrite32f(jme, JME_APMC, apmc); 1178} 1179 1180static void 1181jme_link_change_tasklet(unsigned long arg) 1182{ 1183 struct jme_adapter *jme = (struct jme_adapter *)arg; 1184 struct net_device *netdev = jme->dev; 1185 int rc; 1186 1187 while (!atomic_dec_and_test(&jme->link_changing)) { 1188 atomic_inc(&jme->link_changing); 1189 msg_intr(jme, "Get link change lock failed.\n"); 1190 while (atomic_read(&jme->link_changing) != 1) 1191 msg_intr(jme, "Waiting link change lock.\n"); 1192 } 1193 1194 if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu) 1195 goto out; 1196 1197 jme->old_mtu = netdev->mtu; 1198 netif_stop_queue(netdev); 1199 if (jme_pseudo_hotplug_enabled(jme)) 1200 jme_stop_shutdown_timer(jme); 1201 1202 jme_stop_pcc_timer(jme); 1203 tasklet_disable(&jme->txclean_task); 1204 tasklet_disable(&jme->rxclean_task); 1205 tasklet_disable(&jme->rxempty_task); 1206 1207 if (netif_carrier_ok(netdev)) { 1208 jme_reset_ghc_speed(jme); 1209 jme_disable_rx_engine(jme); 1210 jme_disable_tx_engine(jme); 1211 jme_reset_mac_processor(jme); 1212 jme_free_rx_resources(jme); 1213 jme_free_tx_resources(jme); 1214 1215 if (test_bit(JME_FLAG_POLL, &jme->flags)) 1216 jme_polling_mode(jme); 1217 1218 netif_carrier_off(netdev); 1219 } 1220 1221 jme_check_link(netdev, 0); 1222 if (netif_carrier_ok(netdev)) { 1223 rc = jme_setup_rx_resources(jme); 1224 if (rc) { 1225 jeprintk(jme->pdev, "Allocating resources for RX error" 1226 ", Device STOPPED!\n"); 1227 goto out_enable_tasklet; 1228 } 1229 1230 rc = jme_setup_tx_resources(jme); 1231 if (rc) { 1232 jeprintk(jme->pdev, "Allocating resources for TX error" 1233 ", Device STOPPED!\n"); 1234 goto err_out_free_rx_resources; 1235 } 1236 1237 jme_enable_rx_engine(jme); 1238 jme_enable_tx_engine(jme); 1239 1240 netif_start_queue(netdev); 1241 1242 if (test_bit(JME_FLAG_POLL, &jme->flags)) 1243 jme_interrupt_mode(jme); 1244 1245 jme_start_pcc_timer(jme); 1246 } else if (jme_pseudo_hotplug_enabled(jme)) { 1247 jme_start_shutdown_timer(jme); 1248 } 1249 1250 goto out_enable_tasklet; 1251 1252err_out_free_rx_resources: 1253 jme_free_rx_resources(jme); 1254out_enable_tasklet: 1255 tasklet_enable(&jme->txclean_task); 1256 tasklet_hi_enable(&jme->rxclean_task); 1257 tasklet_hi_enable(&jme->rxempty_task); 1258out: 1259 atomic_inc(&jme->link_changing); 1260} 1261 1262static void 1263jme_rx_clean_tasklet(unsigned long arg) 1264{ 1265 struct jme_adapter *jme = (struct jme_adapter *)arg; 1266 struct dynpcc_info *dpi = &(jme->dpi); 1267 1268 jme_process_receive(jme, jme->rx_ring_size); 1269 ++(dpi->intr_cnt); 1270 1271} 1272 1273static int 1274jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget)) 1275{ 1276 struct jme_adapter *jme = jme_napi_priv(holder); 1277 int rest; 1278 1279 rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget)); 1280 1281 while (atomic_read(&jme->rx_empty) > 0) { 1282 atomic_dec(&jme->rx_empty); 1283 ++(NET_STAT(jme).rx_dropped); 1284 jme_restart_rx_engine(jme); 1285 } 1286 atomic_inc(&jme->rx_empty); 1287 1288 if (rest) { 1289 JME_RX_COMPLETE(netdev, holder); 1290 jme_interrupt_mode(jme); 1291 } 1292 1293 JME_NAPI_WEIGHT_SET(budget, rest); 1294 return JME_NAPI_WEIGHT_VAL(budget) - rest; 1295} 1296 1297static void 1298jme_rx_empty_tasklet(unsigned long arg) 1299{ 1300 struct jme_adapter *jme = (struct jme_adapter *)arg; 1301 1302 if (unlikely(atomic_read(&jme->link_changing) != 1)) 1303 return; 1304 1305 if (unlikely(!netif_carrier_ok(jme->dev))) 1306 return; 1307 1308 msg_rx_status(jme, "RX Queue Full!\n"); 1309 1310 jme_rx_clean_tasklet(arg); 1311 1312 while (atomic_read(&jme->rx_empty) > 0) { 1313 atomic_dec(&jme->rx_empty); 1314 ++(NET_STAT(jme).rx_dropped); 1315 jme_restart_rx_engine(jme); 1316 } 1317 atomic_inc(&jme->rx_empty); 1318} 1319 1320static void 1321jme_wake_queue_if_stopped(struct jme_adapter *jme) 1322{ 1323 struct jme_ring *txring = &(jme->txring[0]); 1324 1325 smp_wmb(); 1326 if (unlikely(netif_queue_stopped(jme->dev) && 1327 atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) { 1328 msg_tx_done(jme, "TX Queue Waked.\n"); 1329 netif_wake_queue(jme->dev); 1330 } 1331 1332} 1333 1334static void 1335jme_tx_clean_tasklet(unsigned long arg) 1336{ 1337 struct jme_adapter *jme = (struct jme_adapter *)arg; 1338 struct jme_ring *txring = &(jme->txring[0]); 1339 struct txdesc *txdesc = txring->desc; 1340 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi; 1341 int i, j, cnt = 0, max, err, mask; 1342 1343 tx_dbg(jme, "Into txclean.\n"); 1344 1345 if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning))) 1346 goto out; 1347 1348 if (unlikely(atomic_read(&jme->link_changing) != 1)) 1349 goto out; 1350 1351 if (unlikely(!netif_carrier_ok(jme->dev))) 1352 goto out; 1353 1354 max = jme->tx_ring_size - atomic_read(&txring->nr_free); 1355 mask = jme->tx_ring_mask; 1356 1357 for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) { 1358 1359 ctxbi = txbi + i; 1360 1361 if (likely(ctxbi->skb && 1362 !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) { 1363 1364 tx_dbg(jme, "txclean: %d+%d@%lu\n", 1365 i, ctxbi->nr_desc, jiffies); 1366 1367 err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR; 1368 1369 for (j = 1 ; j < ctxbi->nr_desc ; ++j) { 1370 ttxbi = txbi + ((i + j) & (mask)); 1371 txdesc[(i + j) & (mask)].dw[0] = 0; 1372 1373 pci_unmap_page(jme->pdev, 1374 ttxbi->mapping, 1375 ttxbi->len, 1376 PCI_DMA_TODEVICE); 1377 1378 ttxbi->mapping = 0; 1379 ttxbi->len = 0; 1380 } 1381 1382 dev_kfree_skb(ctxbi->skb); 1383 1384 cnt += ctxbi->nr_desc; 1385 1386 if (unlikely(err)) { 1387 ++(NET_STAT(jme).tx_carrier_errors); 1388 } else { 1389 ++(NET_STAT(jme).tx_packets); 1390 NET_STAT(jme).tx_bytes += ctxbi->len; 1391 } 1392 1393 ctxbi->skb = NULL; 1394 ctxbi->len = 0; 1395 ctxbi->start_xmit = 0; 1396 1397 } else { 1398 break; 1399 } 1400 1401 i = (i + ctxbi->nr_desc) & mask; 1402 1403 ctxbi->nr_desc = 0; 1404 } 1405 1406 tx_dbg(jme, "txclean: done %d@%lu.\n", i, jiffies); 1407 atomic_set(&txring->next_to_clean, i); 1408 atomic_add(cnt, &txring->nr_free); 1409 1410 jme_wake_queue_if_stopped(jme); 1411 1412out: 1413 atomic_inc(&jme->tx_cleaning); 1414} 1415 1416static void 1417jme_intr_msi(struct jme_adapter *jme, u32 intrstat) 1418{ 1419 /* 1420 * Disable interrupt 1421 */ 1422 jwrite32f(jme, JME_IENC, INTR_ENABLE); 1423 1424 if (intrstat & (INTR_LINKCH | INTR_SWINTR)) { 1425 /* 1426 * Link change event is critical 1427 * all other events are ignored 1428 */ 1429 jwrite32(jme, JME_IEVE, intrstat); 1430 tasklet_schedule(&jme->linkch_task); 1431 goto out_reenable; 1432 } 1433 1434 if (intrstat & INTR_TMINTR) { 1435 jwrite32(jme, JME_IEVE, INTR_TMINTR); 1436 tasklet_schedule(&jme->pcc_task); 1437 } 1438 1439 if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) { 1440 jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0); 1441 tasklet_schedule(&jme->txclean_task); 1442 } 1443 1444 if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { 1445 jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO | 1446 INTR_PCCRX0 | 1447 INTR_RX0EMP)) | 1448 INTR_RX0); 1449 } 1450 1451 if (test_bit(JME_FLAG_POLL, &jme->flags)) { 1452 if (intrstat & INTR_RX0EMP) 1453 atomic_inc(&jme->rx_empty); 1454 1455 if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { 1456 if (likely(JME_RX_SCHEDULE_PREP(jme))) { 1457 jme_polling_mode(jme); 1458 JME_RX_SCHEDULE(jme); 1459 } 1460 } 1461 } else { 1462 if (intrstat & INTR_RX0EMP) { 1463 atomic_inc(&jme->rx_empty); 1464 tasklet_hi_schedule(&jme->rxempty_task); 1465 } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) { 1466 tasklet_hi_schedule(&jme->rxclean_task); 1467 } 1468 } 1469 1470out_reenable: 1471 /* 1472 * Re-enable interrupt 1473 */ 1474 jwrite32f(jme, JME_IENS, INTR_ENABLE); 1475} 1476 1477static irqreturn_t 1478jme_intr(int irq, void *dev_id) 1479{ 1480 struct net_device *netdev = dev_id; 1481 struct jme_adapter *jme = netdev_priv(netdev); 1482 u32 intrstat; 1483 1484 intrstat = jread32(jme, JME_IEVE); 1485 1486 /* 1487 * Check if it's really an interrupt for us 1488 */ 1489 if (unlikely((intrstat & INTR_ENABLE) == 0)) 1490 return IRQ_NONE; 1491 1492 /* 1493 * Check if the device still exist 1494 */ 1495 if (unlikely(intrstat == ~((typeof(intrstat))0))) 1496 return IRQ_NONE; 1497 1498 jme_intr_msi(jme, intrstat); 1499 1500 return IRQ_HANDLED; 1501} 1502 1503static irqreturn_t 1504jme_msi(int irq, void *dev_id) 1505{ 1506 struct net_device *netdev = dev_id; 1507 struct jme_adapter *jme = netdev_priv(netdev); 1508 u32 intrstat; 1509 1510 intrstat = jread32(jme, JME_IEVE); 1511 1512 jme_intr_msi(jme, intrstat); 1513 1514 return IRQ_HANDLED; 1515} 1516 1517static void 1518jme_reset_link(struct jme_adapter *jme) 1519{ 1520 jwrite32(jme, JME_TMCSR, TMCSR_SWIT); 1521} 1522 1523static void 1524jme_restart_an(struct jme_adapter *jme) 1525{ 1526 u32 bmcr; 1527 1528 spin_lock_bh(&jme->phy_lock); 1529 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); 1530 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 1531 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); 1532 spin_unlock_bh(&jme->phy_lock); 1533} 1534 1535static int 1536jme_request_irq(struct jme_adapter *jme) 1537{ 1538 int rc; 1539 struct net_device *netdev = jme->dev; 1540 irq_handler_t handler = jme_intr; 1541 int irq_flags = IRQF_SHARED; 1542 1543 if (!pci_enable_msi(jme->pdev)) { 1544 set_bit(JME_FLAG_MSI, &jme->flags); 1545 handler = jme_msi; 1546 irq_flags = 0; 1547 } 1548 1549 rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name, 1550 netdev); 1551 if (rc) { 1552 jeprintk(jme->pdev, 1553 "Unable to request %s interrupt (return: %d)\n", 1554 test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx", 1555 rc); 1556 1557 if (test_bit(JME_FLAG_MSI, &jme->flags)) { 1558 pci_disable_msi(jme->pdev); 1559 clear_bit(JME_FLAG_MSI, &jme->flags); 1560 } 1561 } else { 1562 netdev->irq = jme->pdev->irq; 1563 } 1564 1565 return rc; 1566} 1567 1568static void 1569jme_free_irq(struct jme_adapter *jme) 1570{ 1571 free_irq(jme->pdev->irq, jme->dev); 1572 if (test_bit(JME_FLAG_MSI, &jme->flags)) { 1573 pci_disable_msi(jme->pdev); 1574 clear_bit(JME_FLAG_MSI, &jme->flags); 1575 jme->dev->irq = jme->pdev->irq; 1576 } 1577} 1578 1579static int 1580jme_open(struct net_device *netdev) 1581{ 1582 struct jme_adapter *jme = netdev_priv(netdev); 1583 int rc; 1584 1585 jme_clear_pm(jme); 1586 JME_NAPI_ENABLE(jme); 1587 1588 tasklet_enable(&jme->linkch_task); 1589 tasklet_enable(&jme->txclean_task); 1590 tasklet_hi_enable(&jme->rxclean_task); 1591 tasklet_hi_enable(&jme->rxempty_task); 1592 1593 rc = jme_request_irq(jme); 1594 if (rc) 1595 goto err_out; 1596 1597 jme_start_irq(jme); 1598 1599 if (test_bit(JME_FLAG_SSET, &jme->flags)) 1600 jme_set_settings(netdev, &jme->old_ecmd); 1601 else 1602 jme_reset_phy_processor(jme); 1603 1604 jme_reset_link(jme); 1605 1606 return 0; 1607 1608err_out: 1609 netif_stop_queue(netdev); 1610 netif_carrier_off(netdev); 1611 return rc; 1612} 1613 1614#ifdef CONFIG_PM 1615static void 1616jme_set_100m_half(struct jme_adapter *jme) 1617{ 1618 u32 bmcr, tmp; 1619 1620 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); 1621 tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | 1622 BMCR_SPEED1000 | BMCR_FULLDPLX); 1623 tmp |= BMCR_SPEED100; 1624 1625 if (bmcr != tmp) 1626 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp); 1627 1628 if (jme->fpgaver) 1629 jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL); 1630 else 1631 jwrite32(jme, JME_GHC, GHC_SPEED_100M); 1632} 1633 1634#define JME_WAIT_LINK_TIME 2000 /* 2000ms */ 1635static void 1636jme_wait_link(struct jme_adapter *jme) 1637{ 1638 u32 phylink, to = JME_WAIT_LINK_TIME; 1639 1640 mdelay(1000); 1641 phylink = jme_linkstat_from_phy(jme); 1642 while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) { 1643 mdelay(10); 1644 phylink = jme_linkstat_from_phy(jme); 1645 } 1646} 1647#endif 1648 1649static inline void 1650jme_phy_off(struct jme_adapter *jme) 1651{ 1652 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN); 1653} 1654 1655static int 1656jme_close(struct net_device *netdev) 1657{ 1658 struct jme_adapter *jme = netdev_priv(netdev); 1659 1660 netif_stop_queue(netdev); 1661 netif_carrier_off(netdev); 1662 1663 jme_stop_irq(jme); 1664 jme_free_irq(jme); 1665 1666 JME_NAPI_DISABLE(jme); 1667 1668 tasklet_disable(&jme->linkch_task); 1669 tasklet_disable(&jme->txclean_task); 1670 tasklet_disable(&jme->rxclean_task); 1671 tasklet_disable(&jme->rxempty_task); 1672 1673 jme_reset_ghc_speed(jme); 1674 jme_disable_rx_engine(jme); 1675 jme_disable_tx_engine(jme); 1676 jme_reset_mac_processor(jme); 1677 jme_free_rx_resources(jme); 1678 jme_free_tx_resources(jme); 1679 jme->phylink = 0; 1680 jme_phy_off(jme); 1681 1682 return 0; 1683} 1684 1685static int 1686jme_alloc_txdesc(struct jme_adapter *jme, 1687 struct sk_buff *skb) 1688{ 1689 struct jme_ring *txring = &(jme->txring[0]); 1690 int idx, nr_alloc, mask = jme->tx_ring_mask; 1691 1692 idx = txring->next_to_use; 1693 nr_alloc = skb_shinfo(skb)->nr_frags + 2; 1694 1695 if (unlikely(atomic_read(&txring->nr_free) < nr_alloc)) 1696 return -1; 1697 1698 atomic_sub(nr_alloc, &txring->nr_free); 1699 1700 txring->next_to_use = (txring->next_to_use + nr_alloc) & mask; 1701 1702 return idx; 1703} 1704 1705static void 1706jme_fill_tx_map(struct pci_dev *pdev, 1707 struct txdesc *txdesc, 1708 struct jme_buffer_info *txbi, 1709 struct page *page, 1710 u32 page_offset, 1711 u32 len, 1712 u8 hidma) 1713{ 1714 dma_addr_t dmaaddr; 1715 1716 dmaaddr = pci_map_page(pdev, 1717 page, 1718 page_offset, 1719 len, 1720 PCI_DMA_TODEVICE); 1721 1722 pci_dma_sync_single_for_device(pdev, 1723 dmaaddr, 1724 len, 1725 PCI_DMA_TODEVICE); 1726 1727 txdesc->dw[0] = 0; 1728 txdesc->dw[1] = 0; 1729 txdesc->desc2.flags = TXFLAG_OWN; 1730 txdesc->desc2.flags |= (hidma) ? TXFLAG_64BIT : 0; 1731 txdesc->desc2.datalen = cpu_to_le16(len); 1732 txdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32); 1733 txdesc->desc2.bufaddrl = cpu_to_le32( 1734 (__u64)dmaaddr & 0xFFFFFFFFUL); 1735 1736 txbi->mapping = dmaaddr; 1737 txbi->len = len; 1738} 1739 1740static void 1741jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) 1742{ 1743 struct jme_ring *txring = &(jme->txring[0]); 1744 struct txdesc *txdesc = txring->desc, *ctxdesc; 1745 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; 1746 u8 hidma = jme->dev->features & NETIF_F_HIGHDMA; 1747 int i, nr_frags = skb_shinfo(skb)->nr_frags; 1748 int mask = jme->tx_ring_mask; 1749 struct skb_frag_struct *frag; 1750 u32 len; 1751 1752 for (i = 0 ; i < nr_frags ; ++i) { 1753 frag = &skb_shinfo(skb)->frags[i]; 1754 ctxdesc = txdesc + ((idx + i + 2) & (mask)); 1755 ctxbi = txbi + ((idx + i + 2) & (mask)); 1756 1757 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page, 1758 frag->page_offset, frag->size, hidma); 1759 } 1760 1761 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; 1762 ctxdesc = txdesc + ((idx + 1) & (mask)); 1763 ctxbi = txbi + ((idx + 1) & (mask)); 1764 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), 1765 offset_in_page(skb->data), len, hidma); 1766 1767} 1768 1769static int 1770jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb) 1771{ 1772 if (unlikely(skb_shinfo(skb)->gso_size && 1773 skb_header_cloned(skb) && 1774 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) { 1775 dev_kfree_skb(skb); 1776 return -1; 1777 } 1778 1779 return 0; 1780} 1781 1782static int 1783jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) 1784{ 1785 *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT); 1786 if (*mss) { 1787 *flags |= TXFLAG_LSEN; 1788 1789 if (skb->protocol == htons(ETH_P_IP)) { 1790 struct iphdr *iph = ip_hdr(skb); 1791 1792 iph->check = 0; 1793 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 1794 iph->daddr, 0, 1795 IPPROTO_TCP, 1796 0); 1797 } else { 1798 struct ipv6hdr *ip6h = ipv6_hdr(skb); 1799 1800 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr, 1801 &ip6h->daddr, 0, 1802 IPPROTO_TCP, 1803 0); 1804 } 1805 1806 return 0; 1807 } 1808 1809 return 1; 1810} 1811 1812static void 1813jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags) 1814{ 1815 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1816 u8 ip_proto; 1817 1818 switch (skb->protocol) { 1819 case htons(ETH_P_IP): 1820 ip_proto = ip_hdr(skb)->protocol; 1821 break; 1822 case htons(ETH_P_IPV6): 1823 ip_proto = ipv6_hdr(skb)->nexthdr; 1824 break; 1825 default: 1826 ip_proto = 0; 1827 break; 1828 } 1829 1830 switch (ip_proto) { 1831 case IPPROTO_TCP: 1832 *flags |= TXFLAG_TCPCS; 1833 break; 1834 case IPPROTO_UDP: 1835 *flags |= TXFLAG_UDPCS; 1836 break; 1837 default: 1838 msg_tx_err(jme, "Error upper layer protocol.\n"); 1839 break; 1840 } 1841 } 1842} 1843 1844static inline void 1845jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags) 1846{ 1847 if (vlan_tx_tag_present(skb)) { 1848 *flags |= TXFLAG_TAGON; 1849 *vlan = cpu_to_le16(vlan_tx_tag_get(skb)); 1850 } 1851} 1852 1853static int 1854jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) 1855{ 1856 struct jme_ring *txring = &(jme->txring[0]); 1857 struct txdesc *txdesc; 1858 struct jme_buffer_info *txbi; 1859 u8 flags; 1860 1861 txdesc = (struct txdesc *)txring->desc + idx; 1862 txbi = txring->bufinf + idx; 1863 1864 txdesc->dw[0] = 0; 1865 txdesc->dw[1] = 0; 1866 txdesc->dw[2] = 0; 1867 txdesc->dw[3] = 0; 1868 txdesc->desc1.pktsize = cpu_to_le16(skb->len); 1869 /* 1870 * Set OWN bit at final. 1871 * When kernel transmit faster than NIC. 1872 * And NIC trying to send this descriptor before we tell 1873 * it to start sending this TX queue. 1874 * Other fields are already filled correctly. 1875 */ 1876 wmb(); 1877 flags = TXFLAG_OWN | TXFLAG_INT; 1878 /* 1879 * Set checksum flags while not tso 1880 */ 1881 if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) 1882 jme_tx_csum(jme, skb, &flags); 1883 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); 1884 jme_map_tx_skb(jme, skb, idx); 1885 txdesc->desc1.flags = flags; 1886 /* 1887 * Set tx buffer info after telling NIC to send 1888 * For better tx_clean timing 1889 */ 1890 wmb(); 1891 txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2; 1892 txbi->skb = skb; 1893 txbi->len = skb->len; 1894 txbi->start_xmit = jiffies; 1895 if (!txbi->start_xmit) 1896 txbi->start_xmit = (0UL-1); 1897 1898 return 0; 1899} 1900 1901static void 1902jme_stop_queue_if_full(struct jme_adapter *jme) 1903{ 1904 struct jme_ring *txring = &(jme->txring[0]); 1905 struct jme_buffer_info *txbi = txring->bufinf; 1906 int idx = atomic_read(&txring->next_to_clean); 1907 1908 txbi += idx; 1909 1910 smp_wmb(); 1911 if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) { 1912 netif_stop_queue(jme->dev); 1913 msg_tx_queued(jme, "TX Queue Paused.\n"); 1914 smp_wmb(); 1915 if (atomic_read(&txring->nr_free) 1916 >= (jme->tx_wake_threshold)) { 1917 netif_wake_queue(jme->dev); 1918 msg_tx_queued(jme, "TX Queue Fast Waked.\n"); 1919 } 1920 } 1921 1922 if (unlikely(txbi->start_xmit && 1923 (jiffies - txbi->start_xmit) >= TX_TIMEOUT && 1924 txbi->skb)) { 1925 netif_stop_queue(jme->dev); 1926 msg_tx_queued(jme, "TX Queue Stopped %d@%lu.\n", idx, jiffies); 1927 } 1928} 1929 1930/* 1931 * This function is already protected by netif_tx_lock() 1932 */ 1933 1934static netdev_tx_t 1935jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) 1936{ 1937 struct jme_adapter *jme = netdev_priv(netdev); 1938 int idx; 1939 1940 if (unlikely(jme_expand_header(jme, skb))) { 1941 ++(NET_STAT(jme).tx_dropped); 1942 return NETDEV_TX_OK; 1943 } 1944 1945 idx = jme_alloc_txdesc(jme, skb); 1946 1947 if (unlikely(idx < 0)) { 1948 netif_stop_queue(netdev); 1949 msg_tx_err(jme, "BUG! Tx ring full when queue awake!\n"); 1950 1951 return NETDEV_TX_BUSY; 1952 } 1953 1954 jme_fill_tx_desc(jme, skb, idx); 1955 1956 jwrite32(jme, JME_TXCS, jme->reg_txcs | 1957 TXCS_SELECT_QUEUE0 | 1958 TXCS_QUEUE0S | 1959 TXCS_ENABLE); 1960 1961 tx_dbg(jme, "xmit: %d+%d@%lu\n", idx, 1962 skb_shinfo(skb)->nr_frags + 2, 1963 jiffies); 1964 jme_stop_queue_if_full(jme); 1965 1966 return NETDEV_TX_OK; 1967} 1968 1969static int 1970jme_set_macaddr(struct net_device *netdev, void *p) 1971{ 1972 struct jme_adapter *jme = netdev_priv(netdev); 1973 struct sockaddr *addr = p; 1974 u32 val; 1975 1976 if (netif_running(netdev)) 1977 return -EBUSY; 1978 1979 spin_lock_bh(&jme->macaddr_lock); 1980 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1981 1982 val = (addr->sa_data[3] & 0xff) << 24 | 1983 (addr->sa_data[2] & 0xff) << 16 | 1984 (addr->sa_data[1] & 0xff) << 8 | 1985 (addr->sa_data[0] & 0xff); 1986 jwrite32(jme, JME_RXUMA_LO, val); 1987 val = (addr->sa_data[5] & 0xff) << 8 | 1988 (addr->sa_data[4] & 0xff); 1989 jwrite32(jme, JME_RXUMA_HI, val); 1990 spin_unlock_bh(&jme->macaddr_lock); 1991 1992 return 0; 1993} 1994 1995static void 1996jme_set_multi(struct net_device *netdev) 1997{ 1998 struct jme_adapter *jme = netdev_priv(netdev); 1999 u32 mc_hash[2] = {}; 2000 int i; 2001 2002 spin_lock_bh(&jme->rxmcs_lock); 2003 2004 jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME; 2005 2006 if (netdev->flags & IFF_PROMISC) { 2007 jme->reg_rxmcs |= RXMCS_ALLFRAME; 2008 } else if (netdev->flags & IFF_ALLMULTI) { 2009 jme->reg_rxmcs |= RXMCS_ALLMULFRAME; 2010 } else if (netdev->flags & IFF_MULTICAST) { 2011 struct dev_mc_list *mclist; 2012 int bit_nr; 2013 2014 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED; 2015 for (i = 0, mclist = netdev->mc_list; 2016 mclist && i < netdev->mc_count; 2017 ++i, mclist = mclist->next) { 2018 2019 bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F; 2020 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F); 2021 } 2022 2023 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]); 2024 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]); 2025 } 2026 2027 wmb(); 2028 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); 2029 2030 spin_unlock_bh(&jme->rxmcs_lock); 2031} 2032 2033static int 2034jme_change_mtu(struct net_device *netdev, int new_mtu) 2035{ 2036 struct jme_adapter *jme = netdev_priv(netdev); 2037 2038 if (new_mtu == jme->old_mtu) 2039 return 0; 2040 2041 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) || 2042 ((new_mtu) < IPV6_MIN_MTU)) 2043 return -EINVAL; 2044 2045 if (new_mtu > 4000) { 2046 jme->reg_rxcs &= ~RXCS_FIFOTHNP; 2047 jme->reg_rxcs |= RXCS_FIFOTHNP_64QW; 2048 jme_restart_rx_engine(jme); 2049 } else { 2050 jme->reg_rxcs &= ~RXCS_FIFOTHNP; 2051 jme->reg_rxcs |= RXCS_FIFOTHNP_128QW; 2052 jme_restart_rx_engine(jme); 2053 } 2054 2055 if (new_mtu > 1900) { 2056 netdev->features &= ~(NETIF_F_HW_CSUM | 2057 NETIF_F_TSO | 2058 NETIF_F_TSO6); 2059 } else { 2060 if (test_bit(JME_FLAG_TXCSUM, &jme->flags)) 2061 netdev->features |= NETIF_F_HW_CSUM; 2062 if (test_bit(JME_FLAG_TSO, &jme->flags)) 2063 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6; 2064 } 2065 2066 netdev->mtu = new_mtu; 2067 jme_reset_link(jme); 2068 2069 return 0; 2070} 2071 2072static void 2073jme_tx_timeout(struct net_device *netdev) 2074{ 2075 struct jme_adapter *jme = netdev_priv(netdev); 2076 2077 jme->phylink = 0; 2078 jme_reset_phy_processor(jme); 2079 if (test_bit(JME_FLAG_SSET, &jme->flags)) 2080 jme_set_settings(netdev, &jme->old_ecmd); 2081 2082 /* 2083 * Force to Reset the link again 2084 */ 2085 jme_reset_link(jme); 2086} 2087 2088static void 2089jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 2090{ 2091 struct jme_adapter *jme = netdev_priv(netdev); 2092 2093 jme->vlgrp = grp; 2094} 2095 2096static void 2097jme_get_drvinfo(struct net_device *netdev, 2098 struct ethtool_drvinfo *info) 2099{ 2100 struct jme_adapter *jme = netdev_priv(netdev); 2101 2102 strcpy(info->driver, DRV_NAME); 2103 strcpy(info->version, DRV_VERSION); 2104 strcpy(info->bus_info, pci_name(jme->pdev)); 2105} 2106 2107static int 2108jme_get_regs_len(struct net_device *netdev) 2109{ 2110 return JME_REG_LEN; 2111} 2112 2113static void 2114mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len) 2115{ 2116 int i; 2117 2118 for (i = 0 ; i < len ; i += 4) 2119 p[i >> 2] = jread32(jme, reg + i); 2120} 2121 2122static void 2123mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr) 2124{ 2125 int i; 2126 u16 *p16 = (u16 *)p; 2127 2128 for (i = 0 ; i < reg_nr ; ++i) 2129 p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i); 2130} 2131 2132static void 2133jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) 2134{ 2135 struct jme_adapter *jme = netdev_priv(netdev); 2136 u32 *p32 = (u32 *)p; 2137 2138 memset(p, 0xFF, JME_REG_LEN); 2139 2140 regs->version = 1; 2141 mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN); 2142 2143 p32 += 0x100 >> 2; 2144 mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN); 2145 2146 p32 += 0x100 >> 2; 2147 mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN); 2148 2149 p32 += 0x100 >> 2; 2150 mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN); 2151 2152 p32 += 0x100 >> 2; 2153 mdio_memcpy(jme, p32, JME_PHY_REG_NR); 2154} 2155 2156static int 2157jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) 2158{ 2159 struct jme_adapter *jme = netdev_priv(netdev); 2160 2161 ecmd->tx_coalesce_usecs = PCC_TX_TO; 2162 ecmd->tx_max_coalesced_frames = PCC_TX_CNT; 2163 2164 if (test_bit(JME_FLAG_POLL, &jme->flags)) { 2165 ecmd->use_adaptive_rx_coalesce = false; 2166 ecmd->rx_coalesce_usecs = 0; 2167 ecmd->rx_max_coalesced_frames = 0; 2168 return 0; 2169 } 2170 2171 ecmd->use_adaptive_rx_coalesce = true; 2172 2173 switch (jme->dpi.cur) { 2174 case PCC_P1: 2175 ecmd->rx_coalesce_usecs = PCC_P1_TO; 2176 ecmd->rx_max_coalesced_frames = PCC_P1_CNT; 2177 break; 2178 case PCC_P2: 2179 ecmd->rx_coalesce_usecs = PCC_P2_TO; 2180 ecmd->rx_max_coalesced_frames = PCC_P2_CNT; 2181 break; 2182 case PCC_P3: 2183 ecmd->rx_coalesce_usecs = PCC_P3_TO; 2184 ecmd->rx_max_coalesced_frames = PCC_P3_CNT; 2185 break; 2186 default: 2187 break; 2188 } 2189 2190 return 0; 2191} 2192 2193static int 2194jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) 2195{ 2196 struct jme_adapter *jme = netdev_priv(netdev); 2197 struct dynpcc_info *dpi = &(jme->dpi); 2198 2199 if (netif_running(netdev)) 2200 return -EBUSY; 2201 2202 if (ecmd->use_adaptive_rx_coalesce 2203 && test_bit(JME_FLAG_POLL, &jme->flags)) { 2204 clear_bit(JME_FLAG_POLL, &jme->flags); 2205 jme->jme_rx = netif_rx; 2206 jme->jme_vlan_rx = vlan_hwaccel_rx; 2207 dpi->cur = PCC_P1; 2208 dpi->attempt = PCC_P1; 2209 dpi->cnt = 0; 2210 jme_set_rx_pcc(jme, PCC_P1); 2211 jme_interrupt_mode(jme); 2212 } else if (!(ecmd->use_adaptive_rx_coalesce) 2213 && !(test_bit(JME_FLAG_POLL, &jme->flags))) { 2214 set_bit(JME_FLAG_POLL, &jme->flags); 2215 jme->jme_rx = netif_receive_skb; 2216 jme->jme_vlan_rx = vlan_hwaccel_receive_skb; 2217 jme_interrupt_mode(jme); 2218 } 2219 2220 return 0; 2221} 2222 2223static void 2224jme_get_pauseparam(struct net_device *netdev, 2225 struct ethtool_pauseparam *ecmd) 2226{ 2227 struct jme_adapter *jme = netdev_priv(netdev); 2228 u32 val; 2229 2230 ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0; 2231 ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0; 2232 2233 spin_lock_bh(&jme->phy_lock); 2234 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); 2235 spin_unlock_bh(&jme->phy_lock); 2236 2237 ecmd->autoneg = 2238 (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0; 2239} 2240 2241static int 2242jme_set_pauseparam(struct net_device *netdev, 2243 struct ethtool_pauseparam *ecmd) 2244{ 2245 struct jme_adapter *jme = netdev_priv(netdev); 2246 u32 val; 2247 2248 if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^ 2249 (ecmd->tx_pause != 0)) { 2250 2251 if (ecmd->tx_pause) 2252 jme->reg_txpfc |= TXPFC_PF_EN; 2253 else 2254 jme->reg_txpfc &= ~TXPFC_PF_EN; 2255 2256 jwrite32(jme, JME_TXPFC, jme->reg_txpfc); 2257 } 2258 2259 spin_lock_bh(&jme->rxmcs_lock); 2260 if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^ 2261 (ecmd->rx_pause != 0)) { 2262 2263 if (ecmd->rx_pause) 2264 jme->reg_rxmcs |= RXMCS_FLOWCTRL; 2265 else 2266 jme->reg_rxmcs &= ~RXMCS_FLOWCTRL; 2267 2268 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); 2269 } 2270 spin_unlock_bh(&jme->rxmcs_lock); 2271 2272 spin_lock_bh(&jme->phy_lock); 2273 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); 2274 if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^ 2275 (ecmd->autoneg != 0)) { 2276 2277 if (ecmd->autoneg) 2278 val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 2279 else 2280 val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 2281 2282 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 2283 MII_ADVERTISE, val); 2284 } 2285 spin_unlock_bh(&jme->phy_lock); 2286 2287 return 0; 2288} 2289 2290static void 2291jme_get_wol(struct net_device *netdev, 2292 struct ethtool_wolinfo *wol) 2293{ 2294 struct jme_adapter *jme = netdev_priv(netdev); 2295 2296 wol->supported = WAKE_MAGIC | WAKE_PHY; 2297 2298 wol->wolopts = 0; 2299 2300 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) 2301 wol->wolopts |= WAKE_PHY; 2302 2303 if (jme->reg_pmcs & PMCS_MFEN) 2304 wol->wolopts |= WAKE_MAGIC; 2305 2306} 2307 2308static int 2309jme_set_wol(struct net_device *netdev, 2310 struct ethtool_wolinfo *wol) 2311{ 2312 struct jme_adapter *jme = netdev_priv(netdev); 2313 2314 if (wol->wolopts & (WAKE_MAGICSECURE | 2315 WAKE_UCAST | 2316 WAKE_MCAST | 2317 WAKE_BCAST | 2318 WAKE_ARP)) 2319 return -EOPNOTSUPP; 2320 2321 jme->reg_pmcs = 0; 2322 2323 if (wol->wolopts & WAKE_PHY) 2324 jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN; 2325 2326 if (wol->wolopts & WAKE_MAGIC) 2327 jme->reg_pmcs |= PMCS_MFEN; 2328 2329 jwrite32(jme, JME_PMCS, jme->reg_pmcs); 2330 2331 return 0; 2332} 2333 2334static int 2335jme_get_settings(struct net_device *netdev, 2336 struct ethtool_cmd *ecmd) 2337{ 2338 struct jme_adapter *jme = netdev_priv(netdev); 2339 int rc; 2340 2341 spin_lock_bh(&jme->phy_lock); 2342 rc = mii_ethtool_gset(&(jme->mii_if), ecmd); 2343 spin_unlock_bh(&jme->phy_lock); 2344 return rc; 2345} 2346 2347static int 2348jme_set_settings(struct net_device *netdev, 2349 struct ethtool_cmd *ecmd) 2350{ 2351 struct jme_adapter *jme = netdev_priv(netdev); 2352 int rc, fdc = 0; 2353 2354 if (ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE) 2355 return -EINVAL; 2356 2357 if (jme->mii_if.force_media && 2358 ecmd->autoneg != AUTONEG_ENABLE && 2359 (jme->mii_if.full_duplex != ecmd->duplex)) 2360 fdc = 1; 2361 2362 spin_lock_bh(&jme->phy_lock); 2363 rc = mii_ethtool_sset(&(jme->mii_if), ecmd); 2364 spin_unlock_bh(&jme->phy_lock); 2365 2366 if (!rc && fdc) 2367 jme_reset_link(jme); 2368 2369 if (!rc) { 2370 set_bit(JME_FLAG_SSET, &jme->flags); 2371 jme->old_ecmd = *ecmd; 2372 } 2373 2374 return rc; 2375} 2376 2377static u32 2378jme_get_link(struct net_device *netdev) 2379{ 2380 struct jme_adapter *jme = netdev_priv(netdev); 2381 return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP; 2382} 2383 2384static u32 2385jme_get_msglevel(struct net_device *netdev) 2386{ 2387 struct jme_adapter *jme = netdev_priv(netdev); 2388 return jme->msg_enable; 2389} 2390 2391static void 2392jme_set_msglevel(struct net_device *netdev, u32 value) 2393{ 2394 struct jme_adapter *jme = netdev_priv(netdev); 2395 jme->msg_enable = value; 2396} 2397 2398static u32 2399jme_get_rx_csum(struct net_device *netdev) 2400{ 2401 struct jme_adapter *jme = netdev_priv(netdev); 2402 return jme->reg_rxmcs & RXMCS_CHECKSUM; 2403} 2404 2405static int 2406jme_set_rx_csum(struct net_device *netdev, u32 on) 2407{ 2408 struct jme_adapter *jme = netdev_priv(netdev); 2409 2410 spin_lock_bh(&jme->rxmcs_lock); 2411 if (on) 2412 jme->reg_rxmcs |= RXMCS_CHECKSUM; 2413 else 2414 jme->reg_rxmcs &= ~RXMCS_CHECKSUM; 2415 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); 2416 spin_unlock_bh(&jme->rxmcs_lock); 2417 2418 return 0; 2419} 2420 2421static int 2422jme_set_tx_csum(struct net_device *netdev, u32 on) 2423{ 2424 struct jme_adapter *jme = netdev_priv(netdev); 2425 2426 if (on) { 2427 set_bit(JME_FLAG_TXCSUM, &jme->flags); 2428 if (netdev->mtu <= 1900) 2429 netdev->features |= NETIF_F_HW_CSUM; 2430 } else { 2431 clear_bit(JME_FLAG_TXCSUM, &jme->flags); 2432 netdev->features &= ~NETIF_F_HW_CSUM; 2433 } 2434 2435 return 0; 2436} 2437 2438static int 2439jme_set_tso(struct net_device *netdev, u32 on) 2440{ 2441 struct jme_adapter *jme = netdev_priv(netdev); 2442 2443 if (on) { 2444 set_bit(JME_FLAG_TSO, &jme->flags); 2445 if (netdev->mtu <= 1900) 2446 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6; 2447 } else { 2448 clear_bit(JME_FLAG_TSO, &jme->flags); 2449 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); 2450 } 2451 2452 return 0; 2453} 2454 2455static int 2456jme_nway_reset(struct net_device *netdev) 2457{ 2458 struct jme_adapter *jme = netdev_priv(netdev); 2459 jme_restart_an(jme); 2460 return 0; 2461} 2462 2463static u8 2464jme_smb_read(struct jme_adapter *jme, unsigned int addr) 2465{ 2466 u32 val; 2467 int to; 2468 2469 val = jread32(jme, JME_SMBCSR); 2470 to = JME_SMB_BUSY_TIMEOUT; 2471 while ((val & SMBCSR_BUSY) && --to) { 2472 msleep(1); 2473 val = jread32(jme, JME_SMBCSR); 2474 } 2475 if (!to) { 2476 msg_hw(jme, "SMB Bus Busy.\n"); 2477 return 0xFF; 2478 } 2479 2480 jwrite32(jme, JME_SMBINTF, 2481 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | 2482 SMBINTF_HWRWN_READ | 2483 SMBINTF_HWCMD); 2484 2485 val = jread32(jme, JME_SMBINTF); 2486 to = JME_SMB_BUSY_TIMEOUT; 2487 while ((val & SMBINTF_HWCMD) && --to) { 2488 msleep(1); 2489 val = jread32(jme, JME_SMBINTF); 2490 } 2491 if (!to) { 2492 msg_hw(jme, "SMB Bus Busy.\n"); 2493 return 0xFF; 2494 } 2495 2496 return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT; 2497} 2498 2499static void 2500jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data) 2501{ 2502 u32 val; 2503 int to; 2504 2505 val = jread32(jme, JME_SMBCSR); 2506 to = JME_SMB_BUSY_TIMEOUT; 2507 while ((val & SMBCSR_BUSY) && --to) { 2508 msleep(1); 2509 val = jread32(jme, JME_SMBCSR); 2510 } 2511 if (!to) { 2512 msg_hw(jme, "SMB Bus Busy.\n"); 2513 return; 2514 } 2515 2516 jwrite32(jme, JME_SMBINTF, 2517 ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) | 2518 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | 2519 SMBINTF_HWRWN_WRITE | 2520 SMBINTF_HWCMD); 2521 2522 val = jread32(jme, JME_SMBINTF); 2523 to = JME_SMB_BUSY_TIMEOUT; 2524 while ((val & SMBINTF_HWCMD) && --to) { 2525 msleep(1); 2526 val = jread32(jme, JME_SMBINTF); 2527 } 2528 if (!to) { 2529 msg_hw(jme, "SMB Bus Busy.\n"); 2530 return; 2531 } 2532 2533 mdelay(2); 2534} 2535 2536static int 2537jme_get_eeprom_len(struct net_device *netdev) 2538{ 2539 struct jme_adapter *jme = netdev_priv(netdev); 2540 u32 val; 2541 val = jread32(jme, JME_SMBCSR); 2542 return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0; 2543} 2544 2545static int 2546jme_get_eeprom(struct net_device *netdev, 2547 struct ethtool_eeprom *eeprom, u8 *data) 2548{ 2549 struct jme_adapter *jme = netdev_priv(netdev); 2550 int i, offset = eeprom->offset, len = eeprom->len; 2551 2552 /* 2553 * ethtool will check the boundary for us 2554 */ 2555 eeprom->magic = JME_EEPROM_MAGIC; 2556 for (i = 0 ; i < len ; ++i) 2557 data[i] = jme_smb_read(jme, i + offset); 2558 2559 return 0; 2560} 2561 2562static int 2563jme_set_eeprom(struct net_device *netdev, 2564 struct ethtool_eeprom *eeprom, u8 *data) 2565{ 2566 struct jme_adapter *jme = netdev_priv(netdev); 2567 int i, offset = eeprom->offset, len = eeprom->len; 2568 2569 if (eeprom->magic != JME_EEPROM_MAGIC) 2570 return -EINVAL; 2571 2572 /* 2573 * ethtool will check the boundary for us 2574 */ 2575 for (i = 0 ; i < len ; ++i) 2576 jme_smb_write(jme, i + offset, data[i]); 2577 2578 return 0; 2579} 2580 2581static const struct ethtool_ops jme_ethtool_ops = { 2582 .get_drvinfo = jme_get_drvinfo, 2583 .get_regs_len = jme_get_regs_len, 2584 .get_regs = jme_get_regs, 2585 .get_coalesce = jme_get_coalesce, 2586 .set_coalesce = jme_set_coalesce, 2587 .get_pauseparam = jme_get_pauseparam, 2588 .set_pauseparam = jme_set_pauseparam, 2589 .get_wol = jme_get_wol, 2590 .set_wol = jme_set_wol, 2591 .get_settings = jme_get_settings, 2592 .set_settings = jme_set_settings, 2593 .get_link = jme_get_link, 2594 .get_msglevel = jme_get_msglevel, 2595 .set_msglevel = jme_set_msglevel, 2596 .get_rx_csum = jme_get_rx_csum, 2597 .set_rx_csum = jme_set_rx_csum, 2598 .set_tx_csum = jme_set_tx_csum, 2599 .set_tso = jme_set_tso, 2600 .set_sg = ethtool_op_set_sg, 2601 .nway_reset = jme_nway_reset, 2602 .get_eeprom_len = jme_get_eeprom_len, 2603 .get_eeprom = jme_get_eeprom, 2604 .set_eeprom = jme_set_eeprom, 2605}; 2606 2607static int 2608jme_pci_dma64(struct pci_dev *pdev) 2609{ 2610 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && 2611 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) 2612 if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) 2613 return 1; 2614 2615 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && 2616 !pci_set_dma_mask(pdev, DMA_BIT_MASK(40))) 2617 if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40))) 2618 return 1; 2619 2620 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) 2621 if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) 2622 return 0; 2623 2624 return -1; 2625} 2626 2627static inline void 2628jme_phy_init(struct jme_adapter *jme) 2629{ 2630 u16 reg26; 2631 2632 reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26); 2633 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000); 2634} 2635 2636static inline void 2637jme_check_hw_ver(struct jme_adapter *jme) 2638{ 2639 u32 chipmode; 2640 2641 chipmode = jread32(jme, JME_CHIPMODE); 2642 2643 jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT; 2644 jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT; 2645} 2646 2647static const struct net_device_ops jme_netdev_ops = { 2648 .ndo_open = jme_open, 2649 .ndo_stop = jme_close, 2650 .ndo_validate_addr = eth_validate_addr, 2651 .ndo_start_xmit = jme_start_xmit, 2652 .ndo_set_mac_address = jme_set_macaddr, 2653 .ndo_set_multicast_list = jme_set_multi, 2654 .ndo_change_mtu = jme_change_mtu, 2655 .ndo_tx_timeout = jme_tx_timeout, 2656 .ndo_vlan_rx_register = jme_vlan_rx_register, 2657}; 2658 2659static int __devinit 2660jme_init_one(struct pci_dev *pdev, 2661 const struct pci_device_id *ent) 2662{ 2663 int rc = 0, using_dac, i; 2664 struct net_device *netdev; 2665 struct jme_adapter *jme; 2666 u16 bmcr, bmsr; 2667 u32 apmc; 2668 2669 /* 2670 * set up PCI device basics 2671 */ 2672 rc = pci_enable_device(pdev); 2673 if (rc) { 2674 jeprintk(pdev, "Cannot enable PCI device.\n"); 2675 goto err_out; 2676 } 2677 2678 using_dac = jme_pci_dma64(pdev); 2679 if (using_dac < 0) { 2680 jeprintk(pdev, "Cannot set PCI DMA Mask.\n"); 2681 rc = -EIO; 2682 goto err_out_disable_pdev; 2683 } 2684 2685 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 2686 jeprintk(pdev, "No PCI resource region found.\n"); 2687 rc = -ENOMEM; 2688 goto err_out_disable_pdev; 2689 } 2690 2691 rc = pci_request_regions(pdev, DRV_NAME); 2692 if (rc) { 2693 jeprintk(pdev, "Cannot obtain PCI resource region.\n"); 2694 goto err_out_disable_pdev; 2695 } 2696 2697 pci_set_master(pdev); 2698 2699 /* 2700 * alloc and init net device 2701 */ 2702 netdev = alloc_etherdev(sizeof(*jme)); 2703 if (!netdev) { 2704 jeprintk(pdev, "Cannot allocate netdev structure.\n"); 2705 rc = -ENOMEM; 2706 goto err_out_release_regions; 2707 } 2708 netdev->netdev_ops = &jme_netdev_ops; 2709 netdev->ethtool_ops = &jme_ethtool_ops; 2710 netdev->watchdog_timeo = TX_TIMEOUT; 2711 netdev->features = NETIF_F_HW_CSUM | 2712 NETIF_F_SG | 2713 NETIF_F_TSO | 2714 NETIF_F_TSO6 | 2715 NETIF_F_HW_VLAN_TX | 2716 NETIF_F_HW_VLAN_RX; 2717 if (using_dac) 2718 netdev->features |= NETIF_F_HIGHDMA; 2719 2720 SET_NETDEV_DEV(netdev, &pdev->dev); 2721 pci_set_drvdata(pdev, netdev); 2722 2723 /* 2724 * init adapter info 2725 */ 2726 jme = netdev_priv(netdev); 2727 jme->pdev = pdev; 2728 jme->dev = netdev; 2729 jme->jme_rx = netif_rx; 2730 jme->jme_vlan_rx = vlan_hwaccel_rx; 2731 jme->old_mtu = netdev->mtu = 1500; 2732 jme->phylink = 0; 2733 jme->tx_ring_size = 1 << 10; 2734 jme->tx_ring_mask = jme->tx_ring_size - 1; 2735 jme->tx_wake_threshold = 1 << 9; 2736 jme->rx_ring_size = 1 << 9; 2737 jme->rx_ring_mask = jme->rx_ring_size - 1; 2738 jme->msg_enable = JME_DEF_MSG_ENABLE; 2739 jme->regs = ioremap(pci_resource_start(pdev, 0), 2740 pci_resource_len(pdev, 0)); 2741 if (!(jme->regs)) { 2742 jeprintk(pdev, "Mapping PCI resource region error.\n"); 2743 rc = -ENOMEM; 2744 goto err_out_free_netdev; 2745 } 2746 2747 if (no_pseudohp) { 2748 apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN; 2749 jwrite32(jme, JME_APMC, apmc); 2750 } else if (force_pseudohp) { 2751 apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN; 2752 jwrite32(jme, JME_APMC, apmc); 2753 } 2754 2755 NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2) 2756 2757 spin_lock_init(&jme->phy_lock); 2758 spin_lock_init(&jme->macaddr_lock); 2759 spin_lock_init(&jme->rxmcs_lock); 2760 2761 atomic_set(&jme->link_changing, 1); 2762 atomic_set(&jme->rx_cleaning, 1); 2763 atomic_set(&jme->tx_cleaning, 1); 2764 atomic_set(&jme->rx_empty, 1); 2765 2766 tasklet_init(&jme->pcc_task, 2767 &jme_pcc_tasklet, 2768 (unsigned long) jme); 2769 tasklet_init(&jme->linkch_task, 2770 &jme_link_change_tasklet, 2771 (unsigned long) jme); 2772 tasklet_init(&jme->txclean_task, 2773 &jme_tx_clean_tasklet, 2774 (unsigned long) jme); 2775 tasklet_init(&jme->rxclean_task, 2776 &jme_rx_clean_tasklet, 2777 (unsigned long) jme); 2778 tasklet_init(&jme->rxempty_task, 2779 &jme_rx_empty_tasklet, 2780 (unsigned long) jme); 2781 tasklet_disable_nosync(&jme->linkch_task); 2782 tasklet_disable_nosync(&jme->txclean_task); 2783 tasklet_disable_nosync(&jme->rxclean_task); 2784 tasklet_disable_nosync(&jme->rxempty_task); 2785 jme->dpi.cur = PCC_P1; 2786 2787 jme->reg_ghc = 0; 2788 jme->reg_rxcs = RXCS_DEFAULT; 2789 jme->reg_rxmcs = RXMCS_DEFAULT; 2790 jme->reg_txpfc = 0; 2791 jme->reg_pmcs = PMCS_MFEN; 2792 set_bit(JME_FLAG_TXCSUM, &jme->flags); 2793 set_bit(JME_FLAG_TSO, &jme->flags); 2794 2795 /* 2796 * Get Max Read Req Size from PCI Config Space 2797 */ 2798 pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs); 2799 jme->mrrs &= PCI_DCSR_MRRS_MASK; 2800 switch (jme->mrrs) { 2801 case MRRS_128B: 2802 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B; 2803 break; 2804 case MRRS_256B: 2805 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B; 2806 break; 2807 default: 2808 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B; 2809 break; 2810 }; 2811 2812 /* 2813 * Must check before reset_mac_processor 2814 */ 2815 jme_check_hw_ver(jme); 2816 jme->mii_if.dev = netdev; 2817 if (jme->fpgaver) { 2818 jme->mii_if.phy_id = 0; 2819 for (i = 1 ; i < 32 ; ++i) { 2820 bmcr = jme_mdio_read(netdev, i, MII_BMCR); 2821 bmsr = jme_mdio_read(netdev, i, MII_BMSR); 2822 if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) { 2823 jme->mii_if.phy_id = i; 2824 break; 2825 } 2826 } 2827 2828 if (!jme->mii_if.phy_id) { 2829 rc = -EIO; 2830 jeprintk(pdev, "Can not find phy_id.\n"); 2831 goto err_out_unmap; 2832 } 2833 2834 jme->reg_ghc |= GHC_LINK_POLL; 2835 } else { 2836 jme->mii_if.phy_id = 1; 2837 } 2838 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) 2839 jme->mii_if.supports_gmii = true; 2840 else 2841 jme->mii_if.supports_gmii = false; 2842 jme->mii_if.mdio_read = jme_mdio_read; 2843 jme->mii_if.mdio_write = jme_mdio_write; 2844 2845 jme_clear_pm(jme); 2846 jme_set_phyfifoa(jme); 2847 pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->rev); 2848 if (!jme->fpgaver) 2849 jme_phy_init(jme); 2850 jme_phy_off(jme); 2851 2852 /* 2853 * Reset MAC processor and reload EEPROM for MAC Address 2854 */ 2855 jme_reset_mac_processor(jme); 2856 rc = jme_reload_eeprom(jme); 2857 if (rc) { 2858 jeprintk(pdev, 2859 "Reload eeprom for reading MAC Address error.\n"); 2860 goto err_out_unmap; 2861 } 2862 jme_load_macaddr(netdev); 2863 2864 /* 2865 * Tell stack that we are not ready to work until open() 2866 */ 2867 netif_carrier_off(netdev); 2868 netif_stop_queue(netdev); 2869 2870 /* 2871 * Register netdev 2872 */ 2873 rc = register_netdev(netdev); 2874 if (rc) { 2875 jeprintk(pdev, "Cannot register net device.\n"); 2876 goto err_out_unmap; 2877 } 2878 2879 msg_probe(jme, "%s%s ver:%x rev:%x macaddr:%pM\n", 2880 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ? 2881 "JMC250 Gigabit Ethernet" : 2882 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ? 2883 "JMC260 Fast Ethernet" : "Unknown", 2884 (jme->fpgaver != 0) ? " (FPGA)" : "", 2885 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev, 2886 jme->rev, netdev->dev_addr); 2887 2888 return 0; 2889 2890err_out_unmap: 2891 iounmap(jme->regs); 2892err_out_free_netdev: 2893 pci_set_drvdata(pdev, NULL); 2894 free_netdev(netdev); 2895err_out_release_regions: 2896 pci_release_regions(pdev); 2897err_out_disable_pdev: 2898 pci_disable_device(pdev); 2899err_out: 2900 return rc; 2901} 2902 2903static void __devexit 2904jme_remove_one(struct pci_dev *pdev) 2905{ 2906 struct net_device *netdev = pci_get_drvdata(pdev); 2907 struct jme_adapter *jme = netdev_priv(netdev); 2908 2909 unregister_netdev(netdev); 2910 iounmap(jme->regs); 2911 pci_set_drvdata(pdev, NULL); 2912 free_netdev(netdev); 2913 pci_release_regions(pdev); 2914 pci_disable_device(pdev); 2915 2916} 2917 2918#ifdef CONFIG_PM 2919static int 2920jme_suspend(struct pci_dev *pdev, pm_message_t state) 2921{ 2922 struct net_device *netdev = pci_get_drvdata(pdev); 2923 struct jme_adapter *jme = netdev_priv(netdev); 2924 2925 atomic_dec(&jme->link_changing); 2926 2927 netif_device_detach(netdev); 2928 netif_stop_queue(netdev); 2929 jme_stop_irq(jme); 2930 2931 tasklet_disable(&jme->txclean_task); 2932 tasklet_disable(&jme->rxclean_task); 2933 tasklet_disable(&jme->rxempty_task); 2934 2935 if (netif_carrier_ok(netdev)) { 2936 if (test_bit(JME_FLAG_POLL, &jme->flags)) 2937 jme_polling_mode(jme); 2938 2939 jme_stop_pcc_timer(jme); 2940 jme_reset_ghc_speed(jme); 2941 jme_disable_rx_engine(jme); 2942 jme_disable_tx_engine(jme); 2943 jme_reset_mac_processor(jme); 2944 jme_free_rx_resources(jme); 2945 jme_free_tx_resources(jme); 2946 netif_carrier_off(netdev); 2947 jme->phylink = 0; 2948 } 2949 2950 tasklet_enable(&jme->txclean_task); 2951 tasklet_hi_enable(&jme->rxclean_task); 2952 tasklet_hi_enable(&jme->rxempty_task); 2953 2954 pci_save_state(pdev); 2955 if (jme->reg_pmcs) { 2956 jme_set_100m_half(jme); 2957 2958 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) 2959 jme_wait_link(jme); 2960 2961 jwrite32(jme, JME_PMCS, jme->reg_pmcs); 2962 2963 pci_enable_wake(pdev, PCI_D3cold, true); 2964 } else { 2965 jme_phy_off(jme); 2966 } 2967 pci_set_power_state(pdev, PCI_D3cold); 2968 2969 return 0; 2970} 2971 2972static int 2973jme_resume(struct pci_dev *pdev) 2974{ 2975 struct net_device *netdev = pci_get_drvdata(pdev); 2976 struct jme_adapter *jme = netdev_priv(netdev); 2977 2978 jme_clear_pm(jme); 2979 pci_restore_state(pdev); 2980 2981 if (test_bit(JME_FLAG_SSET, &jme->flags)) 2982 jme_set_settings(netdev, &jme->old_ecmd); 2983 else 2984 jme_reset_phy_processor(jme); 2985 2986 jme_start_irq(jme); 2987 netif_device_attach(netdev); 2988 2989 atomic_inc(&jme->link_changing); 2990 2991 jme_reset_link(jme); 2992 2993 return 0; 2994} 2995#endif 2996 2997static struct pci_device_id jme_pci_tbl[] = { 2998 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) }, 2999 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) }, 3000 { } 3001}; 3002 3003static struct pci_driver jme_driver = { 3004 .name = DRV_NAME, 3005 .id_table = jme_pci_tbl, 3006 .probe = jme_init_one, 3007 .remove = __devexit_p(jme_remove_one), 3008#ifdef CONFIG_PM 3009 .suspend = jme_suspend, 3010 .resume = jme_resume, 3011#endif /* CONFIG_PM */ 3012}; 3013 3014static int __init 3015jme_init_module(void) 3016{ 3017 printk(KERN_INFO PFX "JMicron JMC2XX ethernet " 3018 "driver version %s\n", DRV_VERSION); 3019 return pci_register_driver(&jme_driver); 3020} 3021 3022static void __exit 3023jme_cleanup_module(void) 3024{ 3025 pci_unregister_driver(&jme_driver); 3026} 3027 3028module_init(jme_init_module); 3029module_exit(jme_cleanup_module); 3030 3031MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>"); 3032MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver"); 3033MODULE_LICENSE("GPL"); 3034MODULE_VERSION(DRV_VERSION); 3035MODULE_DEVICE_TABLE(pci, jme_pci_tbl); 3036