Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.34-rc1 3032 lines 68 kB view raw
1/* 2 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver 3 * 4 * Copyright 2008 JMicron Technology Corporation 5 * http://www.jmicron.com/ 6 * 7 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 * 22 */ 23 24#include <linux/module.h> 25#include <linux/kernel.h> 26#include <linux/pci.h> 27#include <linux/netdevice.h> 28#include <linux/etherdevice.h> 29#include <linux/ethtool.h> 30#include <linux/mii.h> 31#include <linux/crc32.h> 32#include <linux/delay.h> 33#include <linux/spinlock.h> 34#include <linux/in.h> 35#include <linux/ip.h> 36#include <linux/ipv6.h> 37#include <linux/tcp.h> 38#include <linux/udp.h> 39#include <linux/if_vlan.h> 40#include <net/ip6_checksum.h> 41#include "jme.h" 42 43static int force_pseudohp = -1; 44static int no_pseudohp = -1; 45static int no_extplug = -1; 46module_param(force_pseudohp, int, 0); 47MODULE_PARM_DESC(force_pseudohp, 48 "Enable pseudo hot-plug feature manually by driver instead of BIOS."); 49module_param(no_pseudohp, int, 0); 50MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature."); 51module_param(no_extplug, int, 0); 52MODULE_PARM_DESC(no_extplug, 53 "Do not use external plug signal for pseudo hot-plug."); 54 55static int 56jme_mdio_read(struct net_device *netdev, int phy, int reg) 57{ 58 struct jme_adapter *jme = netdev_priv(netdev); 59 int i, val, again = (reg == MII_BMSR) ? 1 : 0; 60 61read_again: 62 jwrite32(jme, JME_SMI, SMI_OP_REQ | 63 smi_phy_addr(phy) | 64 smi_reg_addr(reg)); 65 66 wmb(); 67 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { 68 udelay(20); 69 val = jread32(jme, JME_SMI); 70 if ((val & SMI_OP_REQ) == 0) 71 break; 72 } 73 74 if (i == 0) { 75 jeprintk(jme->pdev, "phy(%d) read timeout : %d\n", phy, reg); 76 return 0; 77 } 78 79 if (again--) 80 goto read_again; 81 82 return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT; 83} 84 85static void 86jme_mdio_write(struct net_device *netdev, 87 int phy, int reg, int val) 88{ 89 struct jme_adapter *jme = netdev_priv(netdev); 90 int i; 91 92 jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ | 93 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 94 smi_phy_addr(phy) | smi_reg_addr(reg)); 95 96 wmb(); 97 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { 98 udelay(20); 99 if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0) 100 break; 101 } 102 103 if (i == 0) 104 jeprintk(jme->pdev, "phy(%d) write timeout : %d\n", phy, reg); 105 106 return; 107} 108 109static inline void 110jme_reset_phy_processor(struct jme_adapter *jme) 111{ 112 u32 val; 113 114 jme_mdio_write(jme->dev, 115 jme->mii_if.phy_id, 116 MII_ADVERTISE, ADVERTISE_ALL | 117 ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 118 119 if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) 120 jme_mdio_write(jme->dev, 121 jme->mii_if.phy_id, 122 MII_CTRL1000, 123 ADVERTISE_1000FULL | ADVERTISE_1000HALF); 124 125 val = jme_mdio_read(jme->dev, 126 jme->mii_if.phy_id, 127 MII_BMCR); 128 129 jme_mdio_write(jme->dev, 130 jme->mii_if.phy_id, 131 MII_BMCR, val | BMCR_RESET); 132 133 return; 134} 135 136static void 137jme_setup_wakeup_frame(struct jme_adapter *jme, 138 u32 *mask, u32 crc, int fnr) 139{ 140 int i; 141 142 /* 143 * Setup CRC pattern 144 */ 145 jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL)); 146 wmb(); 147 jwrite32(jme, JME_WFODP, crc); 148 wmb(); 149 150 /* 151 * Setup Mask 152 */ 153 for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) { 154 jwrite32(jme, JME_WFOI, 155 ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) | 156 (fnr & WFOI_FRAME_SEL)); 157 wmb(); 158 jwrite32(jme, JME_WFODP, mask[i]); 159 wmb(); 160 } 161} 162 163static inline void 164jme_reset_mac_processor(struct jme_adapter *jme) 165{ 166 u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0}; 167 u32 crc = 0xCDCDCDCD; 168 u32 gpreg0; 169 int i; 170 171 jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST); 172 udelay(2); 173 jwrite32(jme, JME_GHC, jme->reg_ghc); 174 175 jwrite32(jme, JME_RXDBA_LO, 0x00000000); 176 jwrite32(jme, JME_RXDBA_HI, 0x00000000); 177 jwrite32(jme, JME_RXQDC, 0x00000000); 178 jwrite32(jme, JME_RXNDA, 0x00000000); 179 jwrite32(jme, JME_TXDBA_LO, 0x00000000); 180 jwrite32(jme, JME_TXDBA_HI, 0x00000000); 181 jwrite32(jme, JME_TXQDC, 0x00000000); 182 jwrite32(jme, JME_TXNDA, 0x00000000); 183 184 jwrite32(jme, JME_RXMCHT_LO, 0x00000000); 185 jwrite32(jme, JME_RXMCHT_HI, 0x00000000); 186 for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i) 187 jme_setup_wakeup_frame(jme, mask, crc, i); 188 if (jme->fpgaver) 189 gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL; 190 else 191 gpreg0 = GPREG0_DEFAULT; 192 jwrite32(jme, JME_GPREG0, gpreg0); 193 jwrite32(jme, JME_GPREG1, GPREG1_DEFAULT); 194} 195 196static inline void 197jme_reset_ghc_speed(struct jme_adapter *jme) 198{ 199 jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX); 200 jwrite32(jme, JME_GHC, jme->reg_ghc); 201} 202 203static inline void 204jme_clear_pm(struct jme_adapter *jme) 205{ 206 jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs); 207 pci_set_power_state(jme->pdev, PCI_D0); 208 pci_enable_wake(jme->pdev, PCI_D0, false); 209} 210 211static int 212jme_reload_eeprom(struct jme_adapter *jme) 213{ 214 u32 val; 215 int i; 216 217 val = jread32(jme, JME_SMBCSR); 218 219 if (val & SMBCSR_EEPROMD) { 220 val |= SMBCSR_CNACK; 221 jwrite32(jme, JME_SMBCSR, val); 222 val |= SMBCSR_RELOAD; 223 jwrite32(jme, JME_SMBCSR, val); 224 mdelay(12); 225 226 for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) { 227 mdelay(1); 228 if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0) 229 break; 230 } 231 232 if (i == 0) { 233 jeprintk(jme->pdev, "eeprom reload timeout\n"); 234 return -EIO; 235 } 236 } 237 238 return 0; 239} 240 241static void 242jme_load_macaddr(struct net_device *netdev) 243{ 244 struct jme_adapter *jme = netdev_priv(netdev); 245 unsigned char macaddr[6]; 246 u32 val; 247 248 spin_lock_bh(&jme->macaddr_lock); 249 val = jread32(jme, JME_RXUMA_LO); 250 macaddr[0] = (val >> 0) & 0xFF; 251 macaddr[1] = (val >> 8) & 0xFF; 252 macaddr[2] = (val >> 16) & 0xFF; 253 macaddr[3] = (val >> 24) & 0xFF; 254 val = jread32(jme, JME_RXUMA_HI); 255 macaddr[4] = (val >> 0) & 0xFF; 256 macaddr[5] = (val >> 8) & 0xFF; 257 memcpy(netdev->dev_addr, macaddr, 6); 258 spin_unlock_bh(&jme->macaddr_lock); 259} 260 261static inline void 262jme_set_rx_pcc(struct jme_adapter *jme, int p) 263{ 264 switch (p) { 265 case PCC_OFF: 266 jwrite32(jme, JME_PCCRX0, 267 ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | 268 ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK)); 269 break; 270 case PCC_P1: 271 jwrite32(jme, JME_PCCRX0, 272 ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | 273 ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK)); 274 break; 275 case PCC_P2: 276 jwrite32(jme, JME_PCCRX0, 277 ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | 278 ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK)); 279 break; 280 case PCC_P3: 281 jwrite32(jme, JME_PCCRX0, 282 ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | 283 ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK)); 284 break; 285 default: 286 break; 287 } 288 wmb(); 289 290 if (!(test_bit(JME_FLAG_POLL, &jme->flags))) 291 netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p); 292} 293 294static void 295jme_start_irq(struct jme_adapter *jme) 296{ 297 register struct dynpcc_info *dpi = &(jme->dpi); 298 299 jme_set_rx_pcc(jme, PCC_P1); 300 dpi->cur = PCC_P1; 301 dpi->attempt = PCC_P1; 302 dpi->cnt = 0; 303 304 jwrite32(jme, JME_PCCTX, 305 ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) | 306 ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) | 307 PCCTXQ0_EN 308 ); 309 310 /* 311 * Enable Interrupts 312 */ 313 jwrite32(jme, JME_IENS, INTR_ENABLE); 314} 315 316static inline void 317jme_stop_irq(struct jme_adapter *jme) 318{ 319 /* 320 * Disable Interrupts 321 */ 322 jwrite32f(jme, JME_IENC, INTR_ENABLE); 323} 324 325static u32 326jme_linkstat_from_phy(struct jme_adapter *jme) 327{ 328 u32 phylink, bmsr; 329 330 phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17); 331 bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR); 332 if (bmsr & BMSR_ANCOMP) 333 phylink |= PHY_LINK_AUTONEG_COMPLETE; 334 335 return phylink; 336} 337 338static inline void 339jme_set_phyfifoa(struct jme_adapter *jme) 340{ 341 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004); 342} 343 344static inline void 345jme_set_phyfifob(struct jme_adapter *jme) 346{ 347 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000); 348} 349 350static int 351jme_check_link(struct net_device *netdev, int testonly) 352{ 353 struct jme_adapter *jme = netdev_priv(netdev); 354 u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr, gpreg1; 355 char linkmsg[64]; 356 int rc = 0; 357 358 linkmsg[0] = '\0'; 359 360 if (jme->fpgaver) 361 phylink = jme_linkstat_from_phy(jme); 362 else 363 phylink = jread32(jme, JME_PHY_LINK); 364 365 if (phylink & PHY_LINK_UP) { 366 if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) { 367 /* 368 * If we did not enable AN 369 * Speed/Duplex Info should be obtained from SMI 370 */ 371 phylink = PHY_LINK_UP; 372 373 bmcr = jme_mdio_read(jme->dev, 374 jme->mii_if.phy_id, 375 MII_BMCR); 376 377 phylink |= ((bmcr & BMCR_SPEED1000) && 378 (bmcr & BMCR_SPEED100) == 0) ? 379 PHY_LINK_SPEED_1000M : 380 (bmcr & BMCR_SPEED100) ? 381 PHY_LINK_SPEED_100M : 382 PHY_LINK_SPEED_10M; 383 384 phylink |= (bmcr & BMCR_FULLDPLX) ? 385 PHY_LINK_DUPLEX : 0; 386 387 strcat(linkmsg, "Forced: "); 388 } else { 389 /* 390 * Keep polling for speed/duplex resolve complete 391 */ 392 while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) && 393 --cnt) { 394 395 udelay(1); 396 397 if (jme->fpgaver) 398 phylink = jme_linkstat_from_phy(jme); 399 else 400 phylink = jread32(jme, JME_PHY_LINK); 401 } 402 if (!cnt) 403 jeprintk(jme->pdev, 404 "Waiting speed resolve timeout.\n"); 405 406 strcat(linkmsg, "ANed: "); 407 } 408 409 if (jme->phylink == phylink) { 410 rc = 1; 411 goto out; 412 } 413 if (testonly) 414 goto out; 415 416 jme->phylink = phylink; 417 418 ghc = jme->reg_ghc & ~(GHC_SPEED | GHC_DPX | 419 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE | 420 GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY); 421 switch (phylink & PHY_LINK_SPEED_MASK) { 422 case PHY_LINK_SPEED_10M: 423 ghc |= GHC_SPEED_10M | 424 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE; 425 strcat(linkmsg, "10 Mbps, "); 426 break; 427 case PHY_LINK_SPEED_100M: 428 ghc |= GHC_SPEED_100M | 429 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE; 430 strcat(linkmsg, "100 Mbps, "); 431 break; 432 case PHY_LINK_SPEED_1000M: 433 ghc |= GHC_SPEED_1000M | 434 GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY; 435 strcat(linkmsg, "1000 Mbps, "); 436 break; 437 default: 438 break; 439 } 440 441 if (phylink & PHY_LINK_DUPLEX) { 442 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT); 443 ghc |= GHC_DPX; 444 } else { 445 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT | 446 TXMCS_BACKOFF | 447 TXMCS_CARRIERSENSE | 448 TXMCS_COLLISION); 449 jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN | 450 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) | 451 TXTRHD_TXREN | 452 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL)); 453 } 454 455 gpreg1 = GPREG1_DEFAULT; 456 if (is_buggy250(jme->pdev->device, jme->chiprev)) { 457 if (!(phylink & PHY_LINK_DUPLEX)) 458 gpreg1 |= GPREG1_HALFMODEPATCH; 459 switch (phylink & PHY_LINK_SPEED_MASK) { 460 case PHY_LINK_SPEED_10M: 461 jme_set_phyfifoa(jme); 462 gpreg1 |= GPREG1_RSSPATCH; 463 break; 464 case PHY_LINK_SPEED_100M: 465 jme_set_phyfifob(jme); 466 gpreg1 |= GPREG1_RSSPATCH; 467 break; 468 case PHY_LINK_SPEED_1000M: 469 jme_set_phyfifoa(jme); 470 break; 471 default: 472 break; 473 } 474 } 475 476 jwrite32(jme, JME_GPREG1, gpreg1); 477 jwrite32(jme, JME_GHC, ghc); 478 jme->reg_ghc = ghc; 479 480 strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ? 481 "Full-Duplex, " : 482 "Half-Duplex, "); 483 strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ? 484 "MDI-X" : 485 "MDI"); 486 netif_info(jme, link, jme->dev, "Link is up at %s.\n", linkmsg); 487 netif_carrier_on(netdev); 488 } else { 489 if (testonly) 490 goto out; 491 492 netif_info(jme, link, jme->dev, "Link is down.\n"); 493 jme->phylink = 0; 494 netif_carrier_off(netdev); 495 } 496 497out: 498 return rc; 499} 500 501static int 502jme_setup_tx_resources(struct jme_adapter *jme) 503{ 504 struct jme_ring *txring = &(jme->txring[0]); 505 506 txring->alloc = dma_alloc_coherent(&(jme->pdev->dev), 507 TX_RING_ALLOC_SIZE(jme->tx_ring_size), 508 &(txring->dmaalloc), 509 GFP_ATOMIC); 510 511 if (!txring->alloc) 512 goto err_set_null; 513 514 /* 515 * 16 Bytes align 516 */ 517 txring->desc = (void *)ALIGN((unsigned long)(txring->alloc), 518 RING_DESC_ALIGN); 519 txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN); 520 txring->next_to_use = 0; 521 atomic_set(&txring->next_to_clean, 0); 522 atomic_set(&txring->nr_free, jme->tx_ring_size); 523 524 txring->bufinf = kmalloc(sizeof(struct jme_buffer_info) * 525 jme->tx_ring_size, GFP_ATOMIC); 526 if (unlikely(!(txring->bufinf))) 527 goto err_free_txring; 528 529 /* 530 * Initialize Transmit Descriptors 531 */ 532 memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size)); 533 memset(txring->bufinf, 0, 534 sizeof(struct jme_buffer_info) * jme->tx_ring_size); 535 536 return 0; 537 538err_free_txring: 539 dma_free_coherent(&(jme->pdev->dev), 540 TX_RING_ALLOC_SIZE(jme->tx_ring_size), 541 txring->alloc, 542 txring->dmaalloc); 543 544err_set_null: 545 txring->desc = NULL; 546 txring->dmaalloc = 0; 547 txring->dma = 0; 548 txring->bufinf = NULL; 549 550 return -ENOMEM; 551} 552 553static void 554jme_free_tx_resources(struct jme_adapter *jme) 555{ 556 int i; 557 struct jme_ring *txring = &(jme->txring[0]); 558 struct jme_buffer_info *txbi; 559 560 if (txring->alloc) { 561 if (txring->bufinf) { 562 for (i = 0 ; i < jme->tx_ring_size ; ++i) { 563 txbi = txring->bufinf + i; 564 if (txbi->skb) { 565 dev_kfree_skb(txbi->skb); 566 txbi->skb = NULL; 567 } 568 txbi->mapping = 0; 569 txbi->len = 0; 570 txbi->nr_desc = 0; 571 txbi->start_xmit = 0; 572 } 573 kfree(txring->bufinf); 574 } 575 576 dma_free_coherent(&(jme->pdev->dev), 577 TX_RING_ALLOC_SIZE(jme->tx_ring_size), 578 txring->alloc, 579 txring->dmaalloc); 580 581 txring->alloc = NULL; 582 txring->desc = NULL; 583 txring->dmaalloc = 0; 584 txring->dma = 0; 585 txring->bufinf = NULL; 586 } 587 txring->next_to_use = 0; 588 atomic_set(&txring->next_to_clean, 0); 589 atomic_set(&txring->nr_free, 0); 590} 591 592static inline void 593jme_enable_tx_engine(struct jme_adapter *jme) 594{ 595 /* 596 * Select Queue 0 597 */ 598 jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0); 599 wmb(); 600 601 /* 602 * Setup TX Queue 0 DMA Bass Address 603 */ 604 jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); 605 jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32); 606 jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); 607 608 /* 609 * Setup TX Descptor Count 610 */ 611 jwrite32(jme, JME_TXQDC, jme->tx_ring_size); 612 613 /* 614 * Enable TX Engine 615 */ 616 wmb(); 617 jwrite32(jme, JME_TXCS, jme->reg_txcs | 618 TXCS_SELECT_QUEUE0 | 619 TXCS_ENABLE); 620 621} 622 623static inline void 624jme_restart_tx_engine(struct jme_adapter *jme) 625{ 626 /* 627 * Restart TX Engine 628 */ 629 jwrite32(jme, JME_TXCS, jme->reg_txcs | 630 TXCS_SELECT_QUEUE0 | 631 TXCS_ENABLE); 632} 633 634static inline void 635jme_disable_tx_engine(struct jme_adapter *jme) 636{ 637 int i; 638 u32 val; 639 640 /* 641 * Disable TX Engine 642 */ 643 jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0); 644 wmb(); 645 646 val = jread32(jme, JME_TXCS); 647 for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) { 648 mdelay(1); 649 val = jread32(jme, JME_TXCS); 650 rmb(); 651 } 652 653 if (!i) 654 jeprintk(jme->pdev, "Disable TX engine timeout.\n"); 655} 656 657static void 658jme_set_clean_rxdesc(struct jme_adapter *jme, int i) 659{ 660 struct jme_ring *rxring = &(jme->rxring[0]); 661 register struct rxdesc *rxdesc = rxring->desc; 662 struct jme_buffer_info *rxbi = rxring->bufinf; 663 rxdesc += i; 664 rxbi += i; 665 666 rxdesc->dw[0] = 0; 667 rxdesc->dw[1] = 0; 668 rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32); 669 rxdesc->desc1.bufaddrl = cpu_to_le32( 670 (__u64)rxbi->mapping & 0xFFFFFFFFUL); 671 rxdesc->desc1.datalen = cpu_to_le16(rxbi->len); 672 if (jme->dev->features & NETIF_F_HIGHDMA) 673 rxdesc->desc1.flags = RXFLAG_64BIT; 674 wmb(); 675 rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT; 676} 677 678static int 679jme_make_new_rx_buf(struct jme_adapter *jme, int i) 680{ 681 struct jme_ring *rxring = &(jme->rxring[0]); 682 struct jme_buffer_info *rxbi = rxring->bufinf + i; 683 struct sk_buff *skb; 684 685 skb = netdev_alloc_skb(jme->dev, 686 jme->dev->mtu + RX_EXTRA_LEN); 687 if (unlikely(!skb)) 688 return -ENOMEM; 689 690 rxbi->skb = skb; 691 rxbi->len = skb_tailroom(skb); 692 rxbi->mapping = pci_map_page(jme->pdev, 693 virt_to_page(skb->data), 694 offset_in_page(skb->data), 695 rxbi->len, 696 PCI_DMA_FROMDEVICE); 697 698 return 0; 699} 700 701static void 702jme_free_rx_buf(struct jme_adapter *jme, int i) 703{ 704 struct jme_ring *rxring = &(jme->rxring[0]); 705 struct jme_buffer_info *rxbi = rxring->bufinf; 706 rxbi += i; 707 708 if (rxbi->skb) { 709 pci_unmap_page(jme->pdev, 710 rxbi->mapping, 711 rxbi->len, 712 PCI_DMA_FROMDEVICE); 713 dev_kfree_skb(rxbi->skb); 714 rxbi->skb = NULL; 715 rxbi->mapping = 0; 716 rxbi->len = 0; 717 } 718} 719 720static void 721jme_free_rx_resources(struct jme_adapter *jme) 722{ 723 int i; 724 struct jme_ring *rxring = &(jme->rxring[0]); 725 726 if (rxring->alloc) { 727 if (rxring->bufinf) { 728 for (i = 0 ; i < jme->rx_ring_size ; ++i) 729 jme_free_rx_buf(jme, i); 730 kfree(rxring->bufinf); 731 } 732 733 dma_free_coherent(&(jme->pdev->dev), 734 RX_RING_ALLOC_SIZE(jme->rx_ring_size), 735 rxring->alloc, 736 rxring->dmaalloc); 737 rxring->alloc = NULL; 738 rxring->desc = NULL; 739 rxring->dmaalloc = 0; 740 rxring->dma = 0; 741 rxring->bufinf = NULL; 742 } 743 rxring->next_to_use = 0; 744 atomic_set(&rxring->next_to_clean, 0); 745} 746 747static int 748jme_setup_rx_resources(struct jme_adapter *jme) 749{ 750 int i; 751 struct jme_ring *rxring = &(jme->rxring[0]); 752 753 rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev), 754 RX_RING_ALLOC_SIZE(jme->rx_ring_size), 755 &(rxring->dmaalloc), 756 GFP_ATOMIC); 757 if (!rxring->alloc) 758 goto err_set_null; 759 760 /* 761 * 16 Bytes align 762 */ 763 rxring->desc = (void *)ALIGN((unsigned long)(rxring->alloc), 764 RING_DESC_ALIGN); 765 rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN); 766 rxring->next_to_use = 0; 767 atomic_set(&rxring->next_to_clean, 0); 768 769 rxring->bufinf = kmalloc(sizeof(struct jme_buffer_info) * 770 jme->rx_ring_size, GFP_ATOMIC); 771 if (unlikely(!(rxring->bufinf))) 772 goto err_free_rxring; 773 774 /* 775 * Initiallize Receive Descriptors 776 */ 777 memset(rxring->bufinf, 0, 778 sizeof(struct jme_buffer_info) * jme->rx_ring_size); 779 for (i = 0 ; i < jme->rx_ring_size ; ++i) { 780 if (unlikely(jme_make_new_rx_buf(jme, i))) { 781 jme_free_rx_resources(jme); 782 return -ENOMEM; 783 } 784 785 jme_set_clean_rxdesc(jme, i); 786 } 787 788 return 0; 789 790err_free_rxring: 791 dma_free_coherent(&(jme->pdev->dev), 792 RX_RING_ALLOC_SIZE(jme->rx_ring_size), 793 rxring->alloc, 794 rxring->dmaalloc); 795err_set_null: 796 rxring->desc = NULL; 797 rxring->dmaalloc = 0; 798 rxring->dma = 0; 799 rxring->bufinf = NULL; 800 801 return -ENOMEM; 802} 803 804static inline void 805jme_enable_rx_engine(struct jme_adapter *jme) 806{ 807 /* 808 * Select Queue 0 809 */ 810 jwrite32(jme, JME_RXCS, jme->reg_rxcs | 811 RXCS_QUEUESEL_Q0); 812 wmb(); 813 814 /* 815 * Setup RX DMA Bass Address 816 */ 817 jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); 818 jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32); 819 jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); 820 821 /* 822 * Setup RX Descriptor Count 823 */ 824 jwrite32(jme, JME_RXQDC, jme->rx_ring_size); 825 826 /* 827 * Setup Unicast Filter 828 */ 829 jme_set_multi(jme->dev); 830 831 /* 832 * Enable RX Engine 833 */ 834 wmb(); 835 jwrite32(jme, JME_RXCS, jme->reg_rxcs | 836 RXCS_QUEUESEL_Q0 | 837 RXCS_ENABLE | 838 RXCS_QST); 839} 840 841static inline void 842jme_restart_rx_engine(struct jme_adapter *jme) 843{ 844 /* 845 * Start RX Engine 846 */ 847 jwrite32(jme, JME_RXCS, jme->reg_rxcs | 848 RXCS_QUEUESEL_Q0 | 849 RXCS_ENABLE | 850 RXCS_QST); 851} 852 853static inline void 854jme_disable_rx_engine(struct jme_adapter *jme) 855{ 856 int i; 857 u32 val; 858 859 /* 860 * Disable RX Engine 861 */ 862 jwrite32(jme, JME_RXCS, jme->reg_rxcs); 863 wmb(); 864 865 val = jread32(jme, JME_RXCS); 866 for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) { 867 mdelay(1); 868 val = jread32(jme, JME_RXCS); 869 rmb(); 870 } 871 872 if (!i) 873 jeprintk(jme->pdev, "Disable RX engine timeout.\n"); 874 875} 876 877static int 878jme_rxsum_ok(struct jme_adapter *jme, u16 flags) 879{ 880 if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4))) 881 return false; 882 883 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS)) 884 == RXWBFLAG_TCPON)) { 885 if (flags & RXWBFLAG_IPV4) 886 netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n"); 887 return false; 888 } 889 890 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS)) 891 == RXWBFLAG_UDPON)) { 892 if (flags & RXWBFLAG_IPV4) 893 netif_err(jme, rx_err, jme->dev, "UDP Checksum error.\n"); 894 return false; 895 } 896 897 if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS)) 898 == RXWBFLAG_IPV4)) { 899 netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error.\n"); 900 return false; 901 } 902 903 return true; 904} 905 906static void 907jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx) 908{ 909 struct jme_ring *rxring = &(jme->rxring[0]); 910 struct rxdesc *rxdesc = rxring->desc; 911 struct jme_buffer_info *rxbi = rxring->bufinf; 912 struct sk_buff *skb; 913 int framesize; 914 915 rxdesc += idx; 916 rxbi += idx; 917 918 skb = rxbi->skb; 919 pci_dma_sync_single_for_cpu(jme->pdev, 920 rxbi->mapping, 921 rxbi->len, 922 PCI_DMA_FROMDEVICE); 923 924 if (unlikely(jme_make_new_rx_buf(jme, idx))) { 925 pci_dma_sync_single_for_device(jme->pdev, 926 rxbi->mapping, 927 rxbi->len, 928 PCI_DMA_FROMDEVICE); 929 930 ++(NET_STAT(jme).rx_dropped); 931 } else { 932 framesize = le16_to_cpu(rxdesc->descwb.framesize) 933 - RX_PREPAD_SIZE; 934 935 skb_reserve(skb, RX_PREPAD_SIZE); 936 skb_put(skb, framesize); 937 skb->protocol = eth_type_trans(skb, jme->dev); 938 939 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags))) 940 skb->ip_summed = CHECKSUM_UNNECESSARY; 941 else 942 skb->ip_summed = CHECKSUM_NONE; 943 944 if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) { 945 if (jme->vlgrp) { 946 jme->jme_vlan_rx(skb, jme->vlgrp, 947 le16_to_cpu(rxdesc->descwb.vlan)); 948 NET_STAT(jme).rx_bytes += 4; 949 } 950 } else { 951 jme->jme_rx(skb); 952 } 953 954 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) == 955 cpu_to_le16(RXWBFLAG_DEST_MUL)) 956 ++(NET_STAT(jme).multicast); 957 958 NET_STAT(jme).rx_bytes += framesize; 959 ++(NET_STAT(jme).rx_packets); 960 } 961 962 jme_set_clean_rxdesc(jme, idx); 963 964} 965 966static int 967jme_process_receive(struct jme_adapter *jme, int limit) 968{ 969 struct jme_ring *rxring = &(jme->rxring[0]); 970 struct rxdesc *rxdesc = rxring->desc; 971 int i, j, ccnt, desccnt, mask = jme->rx_ring_mask; 972 973 if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning))) 974 goto out_inc; 975 976 if (unlikely(atomic_read(&jme->link_changing) != 1)) 977 goto out_inc; 978 979 if (unlikely(!netif_carrier_ok(jme->dev))) 980 goto out_inc; 981 982 i = atomic_read(&rxring->next_to_clean); 983 while (limit > 0) { 984 rxdesc = rxring->desc; 985 rxdesc += i; 986 987 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) || 988 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL)) 989 goto out; 990 --limit; 991 992 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT; 993 994 if (unlikely(desccnt > 1 || 995 rxdesc->descwb.errstat & RXWBERR_ALLERR)) { 996 997 if (rxdesc->descwb.errstat & RXWBERR_CRCERR) 998 ++(NET_STAT(jme).rx_crc_errors); 999 else if (rxdesc->descwb.errstat & RXWBERR_OVERUN) 1000 ++(NET_STAT(jme).rx_fifo_errors); 1001 else 1002 ++(NET_STAT(jme).rx_errors); 1003 1004 if (desccnt > 1) 1005 limit -= desccnt - 1; 1006 1007 for (j = i, ccnt = desccnt ; ccnt-- ; ) { 1008 jme_set_clean_rxdesc(jme, j); 1009 j = (j + 1) & (mask); 1010 } 1011 1012 } else { 1013 jme_alloc_and_feed_skb(jme, i); 1014 } 1015 1016 i = (i + desccnt) & (mask); 1017 } 1018 1019out: 1020 atomic_set(&rxring->next_to_clean, i); 1021 1022out_inc: 1023 atomic_inc(&jme->rx_cleaning); 1024 1025 return limit > 0 ? limit : 0; 1026 1027} 1028 1029static void 1030jme_attempt_pcc(struct dynpcc_info *dpi, int atmp) 1031{ 1032 if (likely(atmp == dpi->cur)) { 1033 dpi->cnt = 0; 1034 return; 1035 } 1036 1037 if (dpi->attempt == atmp) { 1038 ++(dpi->cnt); 1039 } else { 1040 dpi->attempt = atmp; 1041 dpi->cnt = 0; 1042 } 1043 1044} 1045 1046static void 1047jme_dynamic_pcc(struct jme_adapter *jme) 1048{ 1049 register struct dynpcc_info *dpi = &(jme->dpi); 1050 1051 if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD) 1052 jme_attempt_pcc(dpi, PCC_P3); 1053 else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD || 1054 dpi->intr_cnt > PCC_INTR_THRESHOLD) 1055 jme_attempt_pcc(dpi, PCC_P2); 1056 else 1057 jme_attempt_pcc(dpi, PCC_P1); 1058 1059 if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) { 1060 if (dpi->attempt < dpi->cur) 1061 tasklet_schedule(&jme->rxclean_task); 1062 jme_set_rx_pcc(jme, dpi->attempt); 1063 dpi->cur = dpi->attempt; 1064 dpi->cnt = 0; 1065 } 1066} 1067 1068static void 1069jme_start_pcc_timer(struct jme_adapter *jme) 1070{ 1071 struct dynpcc_info *dpi = &(jme->dpi); 1072 dpi->last_bytes = NET_STAT(jme).rx_bytes; 1073 dpi->last_pkts = NET_STAT(jme).rx_packets; 1074 dpi->intr_cnt = 0; 1075 jwrite32(jme, JME_TMCSR, 1076 TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT)); 1077} 1078 1079static inline void 1080jme_stop_pcc_timer(struct jme_adapter *jme) 1081{ 1082 jwrite32(jme, JME_TMCSR, 0); 1083} 1084 1085static void 1086jme_shutdown_nic(struct jme_adapter *jme) 1087{ 1088 u32 phylink; 1089 1090 phylink = jme_linkstat_from_phy(jme); 1091 1092 if (!(phylink & PHY_LINK_UP)) { 1093 /* 1094 * Disable all interrupt before issue timer 1095 */ 1096 jme_stop_irq(jme); 1097 jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE); 1098 } 1099} 1100 1101static void 1102jme_pcc_tasklet(unsigned long arg) 1103{ 1104 struct jme_adapter *jme = (struct jme_adapter *)arg; 1105 struct net_device *netdev = jme->dev; 1106 1107 if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) { 1108 jme_shutdown_nic(jme); 1109 return; 1110 } 1111 1112 if (unlikely(!netif_carrier_ok(netdev) || 1113 (atomic_read(&jme->link_changing) != 1) 1114 )) { 1115 jme_stop_pcc_timer(jme); 1116 return; 1117 } 1118 1119 if (!(test_bit(JME_FLAG_POLL, &jme->flags))) 1120 jme_dynamic_pcc(jme); 1121 1122 jme_start_pcc_timer(jme); 1123} 1124 1125static inline void 1126jme_polling_mode(struct jme_adapter *jme) 1127{ 1128 jme_set_rx_pcc(jme, PCC_OFF); 1129} 1130 1131static inline void 1132jme_interrupt_mode(struct jme_adapter *jme) 1133{ 1134 jme_set_rx_pcc(jme, PCC_P1); 1135} 1136 1137static inline int 1138jme_pseudo_hotplug_enabled(struct jme_adapter *jme) 1139{ 1140 u32 apmc; 1141 apmc = jread32(jme, JME_APMC); 1142 return apmc & JME_APMC_PSEUDO_HP_EN; 1143} 1144 1145static void 1146jme_start_shutdown_timer(struct jme_adapter *jme) 1147{ 1148 u32 apmc; 1149 1150 apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN; 1151 apmc &= ~JME_APMC_EPIEN_CTRL; 1152 if (!no_extplug) { 1153 jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN); 1154 wmb(); 1155 } 1156 jwrite32f(jme, JME_APMC, apmc); 1157 1158 jwrite32f(jme, JME_TIMER2, 0); 1159 set_bit(JME_FLAG_SHUTDOWN, &jme->flags); 1160 jwrite32(jme, JME_TMCSR, 1161 TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT)); 1162} 1163 1164static void 1165jme_stop_shutdown_timer(struct jme_adapter *jme) 1166{ 1167 u32 apmc; 1168 1169 jwrite32f(jme, JME_TMCSR, 0); 1170 jwrite32f(jme, JME_TIMER2, 0); 1171 clear_bit(JME_FLAG_SHUTDOWN, &jme->flags); 1172 1173 apmc = jread32(jme, JME_APMC); 1174 apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL); 1175 jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS); 1176 wmb(); 1177 jwrite32f(jme, JME_APMC, apmc); 1178} 1179 1180static void 1181jme_link_change_tasklet(unsigned long arg) 1182{ 1183 struct jme_adapter *jme = (struct jme_adapter *)arg; 1184 struct net_device *netdev = jme->dev; 1185 int rc; 1186 1187 while (!atomic_dec_and_test(&jme->link_changing)) { 1188 atomic_inc(&jme->link_changing); 1189 netif_info(jme, intr, jme->dev, "Get link change lock failed.\n"); 1190 while (atomic_read(&jme->link_changing) != 1) 1191 netif_info(jme, intr, jme->dev, "Waiting link change lock.\n"); 1192 } 1193 1194 if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu) 1195 goto out; 1196 1197 jme->old_mtu = netdev->mtu; 1198 netif_stop_queue(netdev); 1199 if (jme_pseudo_hotplug_enabled(jme)) 1200 jme_stop_shutdown_timer(jme); 1201 1202 jme_stop_pcc_timer(jme); 1203 tasklet_disable(&jme->txclean_task); 1204 tasklet_disable(&jme->rxclean_task); 1205 tasklet_disable(&jme->rxempty_task); 1206 1207 if (netif_carrier_ok(netdev)) { 1208 jme_reset_ghc_speed(jme); 1209 jme_disable_rx_engine(jme); 1210 jme_disable_tx_engine(jme); 1211 jme_reset_mac_processor(jme); 1212 jme_free_rx_resources(jme); 1213 jme_free_tx_resources(jme); 1214 1215 if (test_bit(JME_FLAG_POLL, &jme->flags)) 1216 jme_polling_mode(jme); 1217 1218 netif_carrier_off(netdev); 1219 } 1220 1221 jme_check_link(netdev, 0); 1222 if (netif_carrier_ok(netdev)) { 1223 rc = jme_setup_rx_resources(jme); 1224 if (rc) { 1225 jeprintk(jme->pdev, "Allocating resources for RX error" 1226 ", Device STOPPED!\n"); 1227 goto out_enable_tasklet; 1228 } 1229 1230 rc = jme_setup_tx_resources(jme); 1231 if (rc) { 1232 jeprintk(jme->pdev, "Allocating resources for TX error" 1233 ", Device STOPPED!\n"); 1234 goto err_out_free_rx_resources; 1235 } 1236 1237 jme_enable_rx_engine(jme); 1238 jme_enable_tx_engine(jme); 1239 1240 netif_start_queue(netdev); 1241 1242 if (test_bit(JME_FLAG_POLL, &jme->flags)) 1243 jme_interrupt_mode(jme); 1244 1245 jme_start_pcc_timer(jme); 1246 } else if (jme_pseudo_hotplug_enabled(jme)) { 1247 jme_start_shutdown_timer(jme); 1248 } 1249 1250 goto out_enable_tasklet; 1251 1252err_out_free_rx_resources: 1253 jme_free_rx_resources(jme); 1254out_enable_tasklet: 1255 tasklet_enable(&jme->txclean_task); 1256 tasklet_hi_enable(&jme->rxclean_task); 1257 tasklet_hi_enable(&jme->rxempty_task); 1258out: 1259 atomic_inc(&jme->link_changing); 1260} 1261 1262static void 1263jme_rx_clean_tasklet(unsigned long arg) 1264{ 1265 struct jme_adapter *jme = (struct jme_adapter *)arg; 1266 struct dynpcc_info *dpi = &(jme->dpi); 1267 1268 jme_process_receive(jme, jme->rx_ring_size); 1269 ++(dpi->intr_cnt); 1270 1271} 1272 1273static int 1274jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget)) 1275{ 1276 struct jme_adapter *jme = jme_napi_priv(holder); 1277 int rest; 1278 1279 rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget)); 1280 1281 while (atomic_read(&jme->rx_empty) > 0) { 1282 atomic_dec(&jme->rx_empty); 1283 ++(NET_STAT(jme).rx_dropped); 1284 jme_restart_rx_engine(jme); 1285 } 1286 atomic_inc(&jme->rx_empty); 1287 1288 if (rest) { 1289 JME_RX_COMPLETE(netdev, holder); 1290 jme_interrupt_mode(jme); 1291 } 1292 1293 JME_NAPI_WEIGHT_SET(budget, rest); 1294 return JME_NAPI_WEIGHT_VAL(budget) - rest; 1295} 1296 1297static void 1298jme_rx_empty_tasklet(unsigned long arg) 1299{ 1300 struct jme_adapter *jme = (struct jme_adapter *)arg; 1301 1302 if (unlikely(atomic_read(&jme->link_changing) != 1)) 1303 return; 1304 1305 if (unlikely(!netif_carrier_ok(jme->dev))) 1306 return; 1307 1308 netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n"); 1309 1310 jme_rx_clean_tasklet(arg); 1311 1312 while (atomic_read(&jme->rx_empty) > 0) { 1313 atomic_dec(&jme->rx_empty); 1314 ++(NET_STAT(jme).rx_dropped); 1315 jme_restart_rx_engine(jme); 1316 } 1317 atomic_inc(&jme->rx_empty); 1318} 1319 1320static void 1321jme_wake_queue_if_stopped(struct jme_adapter *jme) 1322{ 1323 struct jme_ring *txring = &(jme->txring[0]); 1324 1325 smp_wmb(); 1326 if (unlikely(netif_queue_stopped(jme->dev) && 1327 atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) { 1328 netif_info(jme, tx_done, jme->dev, "TX Queue Waked.\n"); 1329 netif_wake_queue(jme->dev); 1330 } 1331 1332} 1333 1334static void 1335jme_tx_clean_tasklet(unsigned long arg) 1336{ 1337 struct jme_adapter *jme = (struct jme_adapter *)arg; 1338 struct jme_ring *txring = &(jme->txring[0]); 1339 struct txdesc *txdesc = txring->desc; 1340 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi; 1341 int i, j, cnt = 0, max, err, mask; 1342 1343 tx_dbg(jme, "Into txclean.\n"); 1344 1345 if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning))) 1346 goto out; 1347 1348 if (unlikely(atomic_read(&jme->link_changing) != 1)) 1349 goto out; 1350 1351 if (unlikely(!netif_carrier_ok(jme->dev))) 1352 goto out; 1353 1354 max = jme->tx_ring_size - atomic_read(&txring->nr_free); 1355 mask = jme->tx_ring_mask; 1356 1357 for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) { 1358 1359 ctxbi = txbi + i; 1360 1361 if (likely(ctxbi->skb && 1362 !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) { 1363 1364 tx_dbg(jme, "txclean: %d+%d@%lu\n", 1365 i, ctxbi->nr_desc, jiffies); 1366 1367 err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR; 1368 1369 for (j = 1 ; j < ctxbi->nr_desc ; ++j) { 1370 ttxbi = txbi + ((i + j) & (mask)); 1371 txdesc[(i + j) & (mask)].dw[0] = 0; 1372 1373 pci_unmap_page(jme->pdev, 1374 ttxbi->mapping, 1375 ttxbi->len, 1376 PCI_DMA_TODEVICE); 1377 1378 ttxbi->mapping = 0; 1379 ttxbi->len = 0; 1380 } 1381 1382 dev_kfree_skb(ctxbi->skb); 1383 1384 cnt += ctxbi->nr_desc; 1385 1386 if (unlikely(err)) { 1387 ++(NET_STAT(jme).tx_carrier_errors); 1388 } else { 1389 ++(NET_STAT(jme).tx_packets); 1390 NET_STAT(jme).tx_bytes += ctxbi->len; 1391 } 1392 1393 ctxbi->skb = NULL; 1394 ctxbi->len = 0; 1395 ctxbi->start_xmit = 0; 1396 1397 } else { 1398 break; 1399 } 1400 1401 i = (i + ctxbi->nr_desc) & mask; 1402 1403 ctxbi->nr_desc = 0; 1404 } 1405 1406 tx_dbg(jme, "txclean: done %d@%lu.\n", i, jiffies); 1407 atomic_set(&txring->next_to_clean, i); 1408 atomic_add(cnt, &txring->nr_free); 1409 1410 jme_wake_queue_if_stopped(jme); 1411 1412out: 1413 atomic_inc(&jme->tx_cleaning); 1414} 1415 1416static void 1417jme_intr_msi(struct jme_adapter *jme, u32 intrstat) 1418{ 1419 /* 1420 * Disable interrupt 1421 */ 1422 jwrite32f(jme, JME_IENC, INTR_ENABLE); 1423 1424 if (intrstat & (INTR_LINKCH | INTR_SWINTR)) { 1425 /* 1426 * Link change event is critical 1427 * all other events are ignored 1428 */ 1429 jwrite32(jme, JME_IEVE, intrstat); 1430 tasklet_schedule(&jme->linkch_task); 1431 goto out_reenable; 1432 } 1433 1434 if (intrstat & INTR_TMINTR) { 1435 jwrite32(jme, JME_IEVE, INTR_TMINTR); 1436 tasklet_schedule(&jme->pcc_task); 1437 } 1438 1439 if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) { 1440 jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0); 1441 tasklet_schedule(&jme->txclean_task); 1442 } 1443 1444 if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { 1445 jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO | 1446 INTR_PCCRX0 | 1447 INTR_RX0EMP)) | 1448 INTR_RX0); 1449 } 1450 1451 if (test_bit(JME_FLAG_POLL, &jme->flags)) { 1452 if (intrstat & INTR_RX0EMP) 1453 atomic_inc(&jme->rx_empty); 1454 1455 if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { 1456 if (likely(JME_RX_SCHEDULE_PREP(jme))) { 1457 jme_polling_mode(jme); 1458 JME_RX_SCHEDULE(jme); 1459 } 1460 } 1461 } else { 1462 if (intrstat & INTR_RX0EMP) { 1463 atomic_inc(&jme->rx_empty); 1464 tasklet_hi_schedule(&jme->rxempty_task); 1465 } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) { 1466 tasklet_hi_schedule(&jme->rxclean_task); 1467 } 1468 } 1469 1470out_reenable: 1471 /* 1472 * Re-enable interrupt 1473 */ 1474 jwrite32f(jme, JME_IENS, INTR_ENABLE); 1475} 1476 1477static irqreturn_t 1478jme_intr(int irq, void *dev_id) 1479{ 1480 struct net_device *netdev = dev_id; 1481 struct jme_adapter *jme = netdev_priv(netdev); 1482 u32 intrstat; 1483 1484 intrstat = jread32(jme, JME_IEVE); 1485 1486 /* 1487 * Check if it's really an interrupt for us 1488 */ 1489 if (unlikely((intrstat & INTR_ENABLE) == 0)) 1490 return IRQ_NONE; 1491 1492 /* 1493 * Check if the device still exist 1494 */ 1495 if (unlikely(intrstat == ~((typeof(intrstat))0))) 1496 return IRQ_NONE; 1497 1498 jme_intr_msi(jme, intrstat); 1499 1500 return IRQ_HANDLED; 1501} 1502 1503static irqreturn_t 1504jme_msi(int irq, void *dev_id) 1505{ 1506 struct net_device *netdev = dev_id; 1507 struct jme_adapter *jme = netdev_priv(netdev); 1508 u32 intrstat; 1509 1510 intrstat = jread32(jme, JME_IEVE); 1511 1512 jme_intr_msi(jme, intrstat); 1513 1514 return IRQ_HANDLED; 1515} 1516 1517static void 1518jme_reset_link(struct jme_adapter *jme) 1519{ 1520 jwrite32(jme, JME_TMCSR, TMCSR_SWIT); 1521} 1522 1523static void 1524jme_restart_an(struct jme_adapter *jme) 1525{ 1526 u32 bmcr; 1527 1528 spin_lock_bh(&jme->phy_lock); 1529 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); 1530 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 1531 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); 1532 spin_unlock_bh(&jme->phy_lock); 1533} 1534 1535static int 1536jme_request_irq(struct jme_adapter *jme) 1537{ 1538 int rc; 1539 struct net_device *netdev = jme->dev; 1540 irq_handler_t handler = jme_intr; 1541 int irq_flags = IRQF_SHARED; 1542 1543 if (!pci_enable_msi(jme->pdev)) { 1544 set_bit(JME_FLAG_MSI, &jme->flags); 1545 handler = jme_msi; 1546 irq_flags = 0; 1547 } 1548 1549 rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name, 1550 netdev); 1551 if (rc) { 1552 jeprintk(jme->pdev, 1553 "Unable to request %s interrupt (return: %d)\n", 1554 test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx", 1555 rc); 1556 1557 if (test_bit(JME_FLAG_MSI, &jme->flags)) { 1558 pci_disable_msi(jme->pdev); 1559 clear_bit(JME_FLAG_MSI, &jme->flags); 1560 } 1561 } else { 1562 netdev->irq = jme->pdev->irq; 1563 } 1564 1565 return rc; 1566} 1567 1568static void 1569jme_free_irq(struct jme_adapter *jme) 1570{ 1571 free_irq(jme->pdev->irq, jme->dev); 1572 if (test_bit(JME_FLAG_MSI, &jme->flags)) { 1573 pci_disable_msi(jme->pdev); 1574 clear_bit(JME_FLAG_MSI, &jme->flags); 1575 jme->dev->irq = jme->pdev->irq; 1576 } 1577} 1578 1579static int 1580jme_open(struct net_device *netdev) 1581{ 1582 struct jme_adapter *jme = netdev_priv(netdev); 1583 int rc; 1584 1585 jme_clear_pm(jme); 1586 JME_NAPI_ENABLE(jme); 1587 1588 tasklet_enable(&jme->linkch_task); 1589 tasklet_enable(&jme->txclean_task); 1590 tasklet_hi_enable(&jme->rxclean_task); 1591 tasklet_hi_enable(&jme->rxempty_task); 1592 1593 rc = jme_request_irq(jme); 1594 if (rc) 1595 goto err_out; 1596 1597 jme_start_irq(jme); 1598 1599 if (test_bit(JME_FLAG_SSET, &jme->flags)) 1600 jme_set_settings(netdev, &jme->old_ecmd); 1601 else 1602 jme_reset_phy_processor(jme); 1603 1604 jme_reset_link(jme); 1605 1606 return 0; 1607 1608err_out: 1609 netif_stop_queue(netdev); 1610 netif_carrier_off(netdev); 1611 return rc; 1612} 1613 1614#ifdef CONFIG_PM 1615static void 1616jme_set_100m_half(struct jme_adapter *jme) 1617{ 1618 u32 bmcr, tmp; 1619 1620 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); 1621 tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | 1622 BMCR_SPEED1000 | BMCR_FULLDPLX); 1623 tmp |= BMCR_SPEED100; 1624 1625 if (bmcr != tmp) 1626 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp); 1627 1628 if (jme->fpgaver) 1629 jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL); 1630 else 1631 jwrite32(jme, JME_GHC, GHC_SPEED_100M); 1632} 1633 1634#define JME_WAIT_LINK_TIME 2000 /* 2000ms */ 1635static void 1636jme_wait_link(struct jme_adapter *jme) 1637{ 1638 u32 phylink, to = JME_WAIT_LINK_TIME; 1639 1640 mdelay(1000); 1641 phylink = jme_linkstat_from_phy(jme); 1642 while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) { 1643 mdelay(10); 1644 phylink = jme_linkstat_from_phy(jme); 1645 } 1646} 1647#endif 1648 1649static inline void 1650jme_phy_off(struct jme_adapter *jme) 1651{ 1652 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN); 1653} 1654 1655static int 1656jme_close(struct net_device *netdev) 1657{ 1658 struct jme_adapter *jme = netdev_priv(netdev); 1659 1660 netif_stop_queue(netdev); 1661 netif_carrier_off(netdev); 1662 1663 jme_stop_irq(jme); 1664 jme_free_irq(jme); 1665 1666 JME_NAPI_DISABLE(jme); 1667 1668 tasklet_disable(&jme->linkch_task); 1669 tasklet_disable(&jme->txclean_task); 1670 tasklet_disable(&jme->rxclean_task); 1671 tasklet_disable(&jme->rxempty_task); 1672 1673 jme_reset_ghc_speed(jme); 1674 jme_disable_rx_engine(jme); 1675 jme_disable_tx_engine(jme); 1676 jme_reset_mac_processor(jme); 1677 jme_free_rx_resources(jme); 1678 jme_free_tx_resources(jme); 1679 jme->phylink = 0; 1680 jme_phy_off(jme); 1681 1682 return 0; 1683} 1684 1685static int 1686jme_alloc_txdesc(struct jme_adapter *jme, 1687 struct sk_buff *skb) 1688{ 1689 struct jme_ring *txring = &(jme->txring[0]); 1690 int idx, nr_alloc, mask = jme->tx_ring_mask; 1691 1692 idx = txring->next_to_use; 1693 nr_alloc = skb_shinfo(skb)->nr_frags + 2; 1694 1695 if (unlikely(atomic_read(&txring->nr_free) < nr_alloc)) 1696 return -1; 1697 1698 atomic_sub(nr_alloc, &txring->nr_free); 1699 1700 txring->next_to_use = (txring->next_to_use + nr_alloc) & mask; 1701 1702 return idx; 1703} 1704 1705static void 1706jme_fill_tx_map(struct pci_dev *pdev, 1707 struct txdesc *txdesc, 1708 struct jme_buffer_info *txbi, 1709 struct page *page, 1710 u32 page_offset, 1711 u32 len, 1712 u8 hidma) 1713{ 1714 dma_addr_t dmaaddr; 1715 1716 dmaaddr = pci_map_page(pdev, 1717 page, 1718 page_offset, 1719 len, 1720 PCI_DMA_TODEVICE); 1721 1722 pci_dma_sync_single_for_device(pdev, 1723 dmaaddr, 1724 len, 1725 PCI_DMA_TODEVICE); 1726 1727 txdesc->dw[0] = 0; 1728 txdesc->dw[1] = 0; 1729 txdesc->desc2.flags = TXFLAG_OWN; 1730 txdesc->desc2.flags |= (hidma) ? TXFLAG_64BIT : 0; 1731 txdesc->desc2.datalen = cpu_to_le16(len); 1732 txdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32); 1733 txdesc->desc2.bufaddrl = cpu_to_le32( 1734 (__u64)dmaaddr & 0xFFFFFFFFUL); 1735 1736 txbi->mapping = dmaaddr; 1737 txbi->len = len; 1738} 1739 1740static void 1741jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) 1742{ 1743 struct jme_ring *txring = &(jme->txring[0]); 1744 struct txdesc *txdesc = txring->desc, *ctxdesc; 1745 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; 1746 u8 hidma = jme->dev->features & NETIF_F_HIGHDMA; 1747 int i, nr_frags = skb_shinfo(skb)->nr_frags; 1748 int mask = jme->tx_ring_mask; 1749 struct skb_frag_struct *frag; 1750 u32 len; 1751 1752 for (i = 0 ; i < nr_frags ; ++i) { 1753 frag = &skb_shinfo(skb)->frags[i]; 1754 ctxdesc = txdesc + ((idx + i + 2) & (mask)); 1755 ctxbi = txbi + ((idx + i + 2) & (mask)); 1756 1757 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page, 1758 frag->page_offset, frag->size, hidma); 1759 } 1760 1761 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; 1762 ctxdesc = txdesc + ((idx + 1) & (mask)); 1763 ctxbi = txbi + ((idx + 1) & (mask)); 1764 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), 1765 offset_in_page(skb->data), len, hidma); 1766 1767} 1768 1769static int 1770jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb) 1771{ 1772 if (unlikely(skb_shinfo(skb)->gso_size && 1773 skb_header_cloned(skb) && 1774 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) { 1775 dev_kfree_skb(skb); 1776 return -1; 1777 } 1778 1779 return 0; 1780} 1781 1782static int 1783jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) 1784{ 1785 *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT); 1786 if (*mss) { 1787 *flags |= TXFLAG_LSEN; 1788 1789 if (skb->protocol == htons(ETH_P_IP)) { 1790 struct iphdr *iph = ip_hdr(skb); 1791 1792 iph->check = 0; 1793 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 1794 iph->daddr, 0, 1795 IPPROTO_TCP, 1796 0); 1797 } else { 1798 struct ipv6hdr *ip6h = ipv6_hdr(skb); 1799 1800 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr, 1801 &ip6h->daddr, 0, 1802 IPPROTO_TCP, 1803 0); 1804 } 1805 1806 return 0; 1807 } 1808 1809 return 1; 1810} 1811 1812static void 1813jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags) 1814{ 1815 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1816 u8 ip_proto; 1817 1818 switch (skb->protocol) { 1819 case htons(ETH_P_IP): 1820 ip_proto = ip_hdr(skb)->protocol; 1821 break; 1822 case htons(ETH_P_IPV6): 1823 ip_proto = ipv6_hdr(skb)->nexthdr; 1824 break; 1825 default: 1826 ip_proto = 0; 1827 break; 1828 } 1829 1830 switch (ip_proto) { 1831 case IPPROTO_TCP: 1832 *flags |= TXFLAG_TCPCS; 1833 break; 1834 case IPPROTO_UDP: 1835 *flags |= TXFLAG_UDPCS; 1836 break; 1837 default: 1838 netif_err(jme, tx_err, jme->dev, "Error upper layer protocol.\n"); 1839 break; 1840 } 1841 } 1842} 1843 1844static inline void 1845jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags) 1846{ 1847 if (vlan_tx_tag_present(skb)) { 1848 *flags |= TXFLAG_TAGON; 1849 *vlan = cpu_to_le16(vlan_tx_tag_get(skb)); 1850 } 1851} 1852 1853static int 1854jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) 1855{ 1856 struct jme_ring *txring = &(jme->txring[0]); 1857 struct txdesc *txdesc; 1858 struct jme_buffer_info *txbi; 1859 u8 flags; 1860 1861 txdesc = (struct txdesc *)txring->desc + idx; 1862 txbi = txring->bufinf + idx; 1863 1864 txdesc->dw[0] = 0; 1865 txdesc->dw[1] = 0; 1866 txdesc->dw[2] = 0; 1867 txdesc->dw[3] = 0; 1868 txdesc->desc1.pktsize = cpu_to_le16(skb->len); 1869 /* 1870 * Set OWN bit at final. 1871 * When kernel transmit faster than NIC. 1872 * And NIC trying to send this descriptor before we tell 1873 * it to start sending this TX queue. 1874 * Other fields are already filled correctly. 1875 */ 1876 wmb(); 1877 flags = TXFLAG_OWN | TXFLAG_INT; 1878 /* 1879 * Set checksum flags while not tso 1880 */ 1881 if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) 1882 jme_tx_csum(jme, skb, &flags); 1883 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); 1884 jme_map_tx_skb(jme, skb, idx); 1885 txdesc->desc1.flags = flags; 1886 /* 1887 * Set tx buffer info after telling NIC to send 1888 * For better tx_clean timing 1889 */ 1890 wmb(); 1891 txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2; 1892 txbi->skb = skb; 1893 txbi->len = skb->len; 1894 txbi->start_xmit = jiffies; 1895 if (!txbi->start_xmit) 1896 txbi->start_xmit = (0UL-1); 1897 1898 return 0; 1899} 1900 1901static void 1902jme_stop_queue_if_full(struct jme_adapter *jme) 1903{ 1904 struct jme_ring *txring = &(jme->txring[0]); 1905 struct jme_buffer_info *txbi = txring->bufinf; 1906 int idx = atomic_read(&txring->next_to_clean); 1907 1908 txbi += idx; 1909 1910 smp_wmb(); 1911 if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) { 1912 netif_stop_queue(jme->dev); 1913 netif_info(jme, tx_queued, jme->dev, "TX Queue Paused.\n"); 1914 smp_wmb(); 1915 if (atomic_read(&txring->nr_free) 1916 >= (jme->tx_wake_threshold)) { 1917 netif_wake_queue(jme->dev); 1918 netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked.\n"); 1919 } 1920 } 1921 1922 if (unlikely(txbi->start_xmit && 1923 (jiffies - txbi->start_xmit) >= TX_TIMEOUT && 1924 txbi->skb)) { 1925 netif_stop_queue(jme->dev); 1926 netif_info(jme, tx_queued, jme->dev, "TX Queue Stopped %d@%lu.\n", idx, jiffies); 1927 } 1928} 1929 1930/* 1931 * This function is already protected by netif_tx_lock() 1932 */ 1933 1934static netdev_tx_t 1935jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) 1936{ 1937 struct jme_adapter *jme = netdev_priv(netdev); 1938 int idx; 1939 1940 if (unlikely(jme_expand_header(jme, skb))) { 1941 ++(NET_STAT(jme).tx_dropped); 1942 return NETDEV_TX_OK; 1943 } 1944 1945 idx = jme_alloc_txdesc(jme, skb); 1946 1947 if (unlikely(idx < 0)) { 1948 netif_stop_queue(netdev); 1949 netif_err(jme, tx_err, jme->dev, "BUG! Tx ring full when queue awake!\n"); 1950 1951 return NETDEV_TX_BUSY; 1952 } 1953 1954 jme_fill_tx_desc(jme, skb, idx); 1955 1956 jwrite32(jme, JME_TXCS, jme->reg_txcs | 1957 TXCS_SELECT_QUEUE0 | 1958 TXCS_QUEUE0S | 1959 TXCS_ENABLE); 1960 1961 tx_dbg(jme, "xmit: %d+%d@%lu\n", idx, 1962 skb_shinfo(skb)->nr_frags + 2, 1963 jiffies); 1964 jme_stop_queue_if_full(jme); 1965 1966 return NETDEV_TX_OK; 1967} 1968 1969static int 1970jme_set_macaddr(struct net_device *netdev, void *p) 1971{ 1972 struct jme_adapter *jme = netdev_priv(netdev); 1973 struct sockaddr *addr = p; 1974 u32 val; 1975 1976 if (netif_running(netdev)) 1977 return -EBUSY; 1978 1979 spin_lock_bh(&jme->macaddr_lock); 1980 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1981 1982 val = (addr->sa_data[3] & 0xff) << 24 | 1983 (addr->sa_data[2] & 0xff) << 16 | 1984 (addr->sa_data[1] & 0xff) << 8 | 1985 (addr->sa_data[0] & 0xff); 1986 jwrite32(jme, JME_RXUMA_LO, val); 1987 val = (addr->sa_data[5] & 0xff) << 8 | 1988 (addr->sa_data[4] & 0xff); 1989 jwrite32(jme, JME_RXUMA_HI, val); 1990 spin_unlock_bh(&jme->macaddr_lock); 1991 1992 return 0; 1993} 1994 1995static void 1996jme_set_multi(struct net_device *netdev) 1997{ 1998 struct jme_adapter *jme = netdev_priv(netdev); 1999 u32 mc_hash[2] = {}; 2000 2001 spin_lock_bh(&jme->rxmcs_lock); 2002 2003 jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME; 2004 2005 if (netdev->flags & IFF_PROMISC) { 2006 jme->reg_rxmcs |= RXMCS_ALLFRAME; 2007 } else if (netdev->flags & IFF_ALLMULTI) { 2008 jme->reg_rxmcs |= RXMCS_ALLMULFRAME; 2009 } else if (netdev->flags & IFF_MULTICAST) { 2010 struct dev_mc_list *mclist; 2011 int bit_nr; 2012 2013 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED; 2014 netdev_for_each_mc_addr(mclist, netdev) { 2015 bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F; 2016 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F); 2017 } 2018 2019 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]); 2020 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]); 2021 } 2022 2023 wmb(); 2024 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); 2025 2026 spin_unlock_bh(&jme->rxmcs_lock); 2027} 2028 2029static int 2030jme_change_mtu(struct net_device *netdev, int new_mtu) 2031{ 2032 struct jme_adapter *jme = netdev_priv(netdev); 2033 2034 if (new_mtu == jme->old_mtu) 2035 return 0; 2036 2037 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) || 2038 ((new_mtu) < IPV6_MIN_MTU)) 2039 return -EINVAL; 2040 2041 if (new_mtu > 4000) { 2042 jme->reg_rxcs &= ~RXCS_FIFOTHNP; 2043 jme->reg_rxcs |= RXCS_FIFOTHNP_64QW; 2044 jme_restart_rx_engine(jme); 2045 } else { 2046 jme->reg_rxcs &= ~RXCS_FIFOTHNP; 2047 jme->reg_rxcs |= RXCS_FIFOTHNP_128QW; 2048 jme_restart_rx_engine(jme); 2049 } 2050 2051 if (new_mtu > 1900) { 2052 netdev->features &= ~(NETIF_F_HW_CSUM | 2053 NETIF_F_TSO | 2054 NETIF_F_TSO6); 2055 } else { 2056 if (test_bit(JME_FLAG_TXCSUM, &jme->flags)) 2057 netdev->features |= NETIF_F_HW_CSUM; 2058 if (test_bit(JME_FLAG_TSO, &jme->flags)) 2059 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6; 2060 } 2061 2062 netdev->mtu = new_mtu; 2063 jme_reset_link(jme); 2064 2065 return 0; 2066} 2067 2068static void 2069jme_tx_timeout(struct net_device *netdev) 2070{ 2071 struct jme_adapter *jme = netdev_priv(netdev); 2072 2073 jme->phylink = 0; 2074 jme_reset_phy_processor(jme); 2075 if (test_bit(JME_FLAG_SSET, &jme->flags)) 2076 jme_set_settings(netdev, &jme->old_ecmd); 2077 2078 /* 2079 * Force to Reset the link again 2080 */ 2081 jme_reset_link(jme); 2082} 2083 2084static void 2085jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 2086{ 2087 struct jme_adapter *jme = netdev_priv(netdev); 2088 2089 jme->vlgrp = grp; 2090} 2091 2092static void 2093jme_get_drvinfo(struct net_device *netdev, 2094 struct ethtool_drvinfo *info) 2095{ 2096 struct jme_adapter *jme = netdev_priv(netdev); 2097 2098 strcpy(info->driver, DRV_NAME); 2099 strcpy(info->version, DRV_VERSION); 2100 strcpy(info->bus_info, pci_name(jme->pdev)); 2101} 2102 2103static int 2104jme_get_regs_len(struct net_device *netdev) 2105{ 2106 return JME_REG_LEN; 2107} 2108 2109static void 2110mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len) 2111{ 2112 int i; 2113 2114 for (i = 0 ; i < len ; i += 4) 2115 p[i >> 2] = jread32(jme, reg + i); 2116} 2117 2118static void 2119mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr) 2120{ 2121 int i; 2122 u16 *p16 = (u16 *)p; 2123 2124 for (i = 0 ; i < reg_nr ; ++i) 2125 p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i); 2126} 2127 2128static void 2129jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) 2130{ 2131 struct jme_adapter *jme = netdev_priv(netdev); 2132 u32 *p32 = (u32 *)p; 2133 2134 memset(p, 0xFF, JME_REG_LEN); 2135 2136 regs->version = 1; 2137 mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN); 2138 2139 p32 += 0x100 >> 2; 2140 mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN); 2141 2142 p32 += 0x100 >> 2; 2143 mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN); 2144 2145 p32 += 0x100 >> 2; 2146 mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN); 2147 2148 p32 += 0x100 >> 2; 2149 mdio_memcpy(jme, p32, JME_PHY_REG_NR); 2150} 2151 2152static int 2153jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) 2154{ 2155 struct jme_adapter *jme = netdev_priv(netdev); 2156 2157 ecmd->tx_coalesce_usecs = PCC_TX_TO; 2158 ecmd->tx_max_coalesced_frames = PCC_TX_CNT; 2159 2160 if (test_bit(JME_FLAG_POLL, &jme->flags)) { 2161 ecmd->use_adaptive_rx_coalesce = false; 2162 ecmd->rx_coalesce_usecs = 0; 2163 ecmd->rx_max_coalesced_frames = 0; 2164 return 0; 2165 } 2166 2167 ecmd->use_adaptive_rx_coalesce = true; 2168 2169 switch (jme->dpi.cur) { 2170 case PCC_P1: 2171 ecmd->rx_coalesce_usecs = PCC_P1_TO; 2172 ecmd->rx_max_coalesced_frames = PCC_P1_CNT; 2173 break; 2174 case PCC_P2: 2175 ecmd->rx_coalesce_usecs = PCC_P2_TO; 2176 ecmd->rx_max_coalesced_frames = PCC_P2_CNT; 2177 break; 2178 case PCC_P3: 2179 ecmd->rx_coalesce_usecs = PCC_P3_TO; 2180 ecmd->rx_max_coalesced_frames = PCC_P3_CNT; 2181 break; 2182 default: 2183 break; 2184 } 2185 2186 return 0; 2187} 2188 2189static int 2190jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) 2191{ 2192 struct jme_adapter *jme = netdev_priv(netdev); 2193 struct dynpcc_info *dpi = &(jme->dpi); 2194 2195 if (netif_running(netdev)) 2196 return -EBUSY; 2197 2198 if (ecmd->use_adaptive_rx_coalesce && 2199 test_bit(JME_FLAG_POLL, &jme->flags)) { 2200 clear_bit(JME_FLAG_POLL, &jme->flags); 2201 jme->jme_rx = netif_rx; 2202 jme->jme_vlan_rx = vlan_hwaccel_rx; 2203 dpi->cur = PCC_P1; 2204 dpi->attempt = PCC_P1; 2205 dpi->cnt = 0; 2206 jme_set_rx_pcc(jme, PCC_P1); 2207 jme_interrupt_mode(jme); 2208 } else if (!(ecmd->use_adaptive_rx_coalesce) && 2209 !(test_bit(JME_FLAG_POLL, &jme->flags))) { 2210 set_bit(JME_FLAG_POLL, &jme->flags); 2211 jme->jme_rx = netif_receive_skb; 2212 jme->jme_vlan_rx = vlan_hwaccel_receive_skb; 2213 jme_interrupt_mode(jme); 2214 } 2215 2216 return 0; 2217} 2218 2219static void 2220jme_get_pauseparam(struct net_device *netdev, 2221 struct ethtool_pauseparam *ecmd) 2222{ 2223 struct jme_adapter *jme = netdev_priv(netdev); 2224 u32 val; 2225 2226 ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0; 2227 ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0; 2228 2229 spin_lock_bh(&jme->phy_lock); 2230 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); 2231 spin_unlock_bh(&jme->phy_lock); 2232 2233 ecmd->autoneg = 2234 (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0; 2235} 2236 2237static int 2238jme_set_pauseparam(struct net_device *netdev, 2239 struct ethtool_pauseparam *ecmd) 2240{ 2241 struct jme_adapter *jme = netdev_priv(netdev); 2242 u32 val; 2243 2244 if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^ 2245 (ecmd->tx_pause != 0)) { 2246 2247 if (ecmd->tx_pause) 2248 jme->reg_txpfc |= TXPFC_PF_EN; 2249 else 2250 jme->reg_txpfc &= ~TXPFC_PF_EN; 2251 2252 jwrite32(jme, JME_TXPFC, jme->reg_txpfc); 2253 } 2254 2255 spin_lock_bh(&jme->rxmcs_lock); 2256 if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^ 2257 (ecmd->rx_pause != 0)) { 2258 2259 if (ecmd->rx_pause) 2260 jme->reg_rxmcs |= RXMCS_FLOWCTRL; 2261 else 2262 jme->reg_rxmcs &= ~RXMCS_FLOWCTRL; 2263 2264 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); 2265 } 2266 spin_unlock_bh(&jme->rxmcs_lock); 2267 2268 spin_lock_bh(&jme->phy_lock); 2269 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); 2270 if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^ 2271 (ecmd->autoneg != 0)) { 2272 2273 if (ecmd->autoneg) 2274 val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 2275 else 2276 val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 2277 2278 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 2279 MII_ADVERTISE, val); 2280 } 2281 spin_unlock_bh(&jme->phy_lock); 2282 2283 return 0; 2284} 2285 2286static void 2287jme_get_wol(struct net_device *netdev, 2288 struct ethtool_wolinfo *wol) 2289{ 2290 struct jme_adapter *jme = netdev_priv(netdev); 2291 2292 wol->supported = WAKE_MAGIC | WAKE_PHY; 2293 2294 wol->wolopts = 0; 2295 2296 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) 2297 wol->wolopts |= WAKE_PHY; 2298 2299 if (jme->reg_pmcs & PMCS_MFEN) 2300 wol->wolopts |= WAKE_MAGIC; 2301 2302} 2303 2304static int 2305jme_set_wol(struct net_device *netdev, 2306 struct ethtool_wolinfo *wol) 2307{ 2308 struct jme_adapter *jme = netdev_priv(netdev); 2309 2310 if (wol->wolopts & (WAKE_MAGICSECURE | 2311 WAKE_UCAST | 2312 WAKE_MCAST | 2313 WAKE_BCAST | 2314 WAKE_ARP)) 2315 return -EOPNOTSUPP; 2316 2317 jme->reg_pmcs = 0; 2318 2319 if (wol->wolopts & WAKE_PHY) 2320 jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN; 2321 2322 if (wol->wolopts & WAKE_MAGIC) 2323 jme->reg_pmcs |= PMCS_MFEN; 2324 2325 jwrite32(jme, JME_PMCS, jme->reg_pmcs); 2326 2327 return 0; 2328} 2329 2330static int 2331jme_get_settings(struct net_device *netdev, 2332 struct ethtool_cmd *ecmd) 2333{ 2334 struct jme_adapter *jme = netdev_priv(netdev); 2335 int rc; 2336 2337 spin_lock_bh(&jme->phy_lock); 2338 rc = mii_ethtool_gset(&(jme->mii_if), ecmd); 2339 spin_unlock_bh(&jme->phy_lock); 2340 return rc; 2341} 2342 2343static int 2344jme_set_settings(struct net_device *netdev, 2345 struct ethtool_cmd *ecmd) 2346{ 2347 struct jme_adapter *jme = netdev_priv(netdev); 2348 int rc, fdc = 0; 2349 2350 if (ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE) 2351 return -EINVAL; 2352 2353 if (jme->mii_if.force_media && 2354 ecmd->autoneg != AUTONEG_ENABLE && 2355 (jme->mii_if.full_duplex != ecmd->duplex)) 2356 fdc = 1; 2357 2358 spin_lock_bh(&jme->phy_lock); 2359 rc = mii_ethtool_sset(&(jme->mii_if), ecmd); 2360 spin_unlock_bh(&jme->phy_lock); 2361 2362 if (!rc && fdc) 2363 jme_reset_link(jme); 2364 2365 if (!rc) { 2366 set_bit(JME_FLAG_SSET, &jme->flags); 2367 jme->old_ecmd = *ecmd; 2368 } 2369 2370 return rc; 2371} 2372 2373static u32 2374jme_get_link(struct net_device *netdev) 2375{ 2376 struct jme_adapter *jme = netdev_priv(netdev); 2377 return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP; 2378} 2379 2380static u32 2381jme_get_msglevel(struct net_device *netdev) 2382{ 2383 struct jme_adapter *jme = netdev_priv(netdev); 2384 return jme->msg_enable; 2385} 2386 2387static void 2388jme_set_msglevel(struct net_device *netdev, u32 value) 2389{ 2390 struct jme_adapter *jme = netdev_priv(netdev); 2391 jme->msg_enable = value; 2392} 2393 2394static u32 2395jme_get_rx_csum(struct net_device *netdev) 2396{ 2397 struct jme_adapter *jme = netdev_priv(netdev); 2398 return jme->reg_rxmcs & RXMCS_CHECKSUM; 2399} 2400 2401static int 2402jme_set_rx_csum(struct net_device *netdev, u32 on) 2403{ 2404 struct jme_adapter *jme = netdev_priv(netdev); 2405 2406 spin_lock_bh(&jme->rxmcs_lock); 2407 if (on) 2408 jme->reg_rxmcs |= RXMCS_CHECKSUM; 2409 else 2410 jme->reg_rxmcs &= ~RXMCS_CHECKSUM; 2411 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); 2412 spin_unlock_bh(&jme->rxmcs_lock); 2413 2414 return 0; 2415} 2416 2417static int 2418jme_set_tx_csum(struct net_device *netdev, u32 on) 2419{ 2420 struct jme_adapter *jme = netdev_priv(netdev); 2421 2422 if (on) { 2423 set_bit(JME_FLAG_TXCSUM, &jme->flags); 2424 if (netdev->mtu <= 1900) 2425 netdev->features |= NETIF_F_HW_CSUM; 2426 } else { 2427 clear_bit(JME_FLAG_TXCSUM, &jme->flags); 2428 netdev->features &= ~NETIF_F_HW_CSUM; 2429 } 2430 2431 return 0; 2432} 2433 2434static int 2435jme_set_tso(struct net_device *netdev, u32 on) 2436{ 2437 struct jme_adapter *jme = netdev_priv(netdev); 2438 2439 if (on) { 2440 set_bit(JME_FLAG_TSO, &jme->flags); 2441 if (netdev->mtu <= 1900) 2442 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6; 2443 } else { 2444 clear_bit(JME_FLAG_TSO, &jme->flags); 2445 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); 2446 } 2447 2448 return 0; 2449} 2450 2451static int 2452jme_nway_reset(struct net_device *netdev) 2453{ 2454 struct jme_adapter *jme = netdev_priv(netdev); 2455 jme_restart_an(jme); 2456 return 0; 2457} 2458 2459static u8 2460jme_smb_read(struct jme_adapter *jme, unsigned int addr) 2461{ 2462 u32 val; 2463 int to; 2464 2465 val = jread32(jme, JME_SMBCSR); 2466 to = JME_SMB_BUSY_TIMEOUT; 2467 while ((val & SMBCSR_BUSY) && --to) { 2468 msleep(1); 2469 val = jread32(jme, JME_SMBCSR); 2470 } 2471 if (!to) { 2472 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n"); 2473 return 0xFF; 2474 } 2475 2476 jwrite32(jme, JME_SMBINTF, 2477 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | 2478 SMBINTF_HWRWN_READ | 2479 SMBINTF_HWCMD); 2480 2481 val = jread32(jme, JME_SMBINTF); 2482 to = JME_SMB_BUSY_TIMEOUT; 2483 while ((val & SMBINTF_HWCMD) && --to) { 2484 msleep(1); 2485 val = jread32(jme, JME_SMBINTF); 2486 } 2487 if (!to) { 2488 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n"); 2489 return 0xFF; 2490 } 2491 2492 return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT; 2493} 2494 2495static void 2496jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data) 2497{ 2498 u32 val; 2499 int to; 2500 2501 val = jread32(jme, JME_SMBCSR); 2502 to = JME_SMB_BUSY_TIMEOUT; 2503 while ((val & SMBCSR_BUSY) && --to) { 2504 msleep(1); 2505 val = jread32(jme, JME_SMBCSR); 2506 } 2507 if (!to) { 2508 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n"); 2509 return; 2510 } 2511 2512 jwrite32(jme, JME_SMBINTF, 2513 ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) | 2514 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | 2515 SMBINTF_HWRWN_WRITE | 2516 SMBINTF_HWCMD); 2517 2518 val = jread32(jme, JME_SMBINTF); 2519 to = JME_SMB_BUSY_TIMEOUT; 2520 while ((val & SMBINTF_HWCMD) && --to) { 2521 msleep(1); 2522 val = jread32(jme, JME_SMBINTF); 2523 } 2524 if (!to) { 2525 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n"); 2526 return; 2527 } 2528 2529 mdelay(2); 2530} 2531 2532static int 2533jme_get_eeprom_len(struct net_device *netdev) 2534{ 2535 struct jme_adapter *jme = netdev_priv(netdev); 2536 u32 val; 2537 val = jread32(jme, JME_SMBCSR); 2538 return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0; 2539} 2540 2541static int 2542jme_get_eeprom(struct net_device *netdev, 2543 struct ethtool_eeprom *eeprom, u8 *data) 2544{ 2545 struct jme_adapter *jme = netdev_priv(netdev); 2546 int i, offset = eeprom->offset, len = eeprom->len; 2547 2548 /* 2549 * ethtool will check the boundary for us 2550 */ 2551 eeprom->magic = JME_EEPROM_MAGIC; 2552 for (i = 0 ; i < len ; ++i) 2553 data[i] = jme_smb_read(jme, i + offset); 2554 2555 return 0; 2556} 2557 2558static int 2559jme_set_eeprom(struct net_device *netdev, 2560 struct ethtool_eeprom *eeprom, u8 *data) 2561{ 2562 struct jme_adapter *jme = netdev_priv(netdev); 2563 int i, offset = eeprom->offset, len = eeprom->len; 2564 2565 if (eeprom->magic != JME_EEPROM_MAGIC) 2566 return -EINVAL; 2567 2568 /* 2569 * ethtool will check the boundary for us 2570 */ 2571 for (i = 0 ; i < len ; ++i) 2572 jme_smb_write(jme, i + offset, data[i]); 2573 2574 return 0; 2575} 2576 2577static const struct ethtool_ops jme_ethtool_ops = { 2578 .get_drvinfo = jme_get_drvinfo, 2579 .get_regs_len = jme_get_regs_len, 2580 .get_regs = jme_get_regs, 2581 .get_coalesce = jme_get_coalesce, 2582 .set_coalesce = jme_set_coalesce, 2583 .get_pauseparam = jme_get_pauseparam, 2584 .set_pauseparam = jme_set_pauseparam, 2585 .get_wol = jme_get_wol, 2586 .set_wol = jme_set_wol, 2587 .get_settings = jme_get_settings, 2588 .set_settings = jme_set_settings, 2589 .get_link = jme_get_link, 2590 .get_msglevel = jme_get_msglevel, 2591 .set_msglevel = jme_set_msglevel, 2592 .get_rx_csum = jme_get_rx_csum, 2593 .set_rx_csum = jme_set_rx_csum, 2594 .set_tx_csum = jme_set_tx_csum, 2595 .set_tso = jme_set_tso, 2596 .set_sg = ethtool_op_set_sg, 2597 .nway_reset = jme_nway_reset, 2598 .get_eeprom_len = jme_get_eeprom_len, 2599 .get_eeprom = jme_get_eeprom, 2600 .set_eeprom = jme_set_eeprom, 2601}; 2602 2603static int 2604jme_pci_dma64(struct pci_dev *pdev) 2605{ 2606 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && 2607 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) 2608 if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) 2609 return 1; 2610 2611 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && 2612 !pci_set_dma_mask(pdev, DMA_BIT_MASK(40))) 2613 if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40))) 2614 return 1; 2615 2616 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) 2617 if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) 2618 return 0; 2619 2620 return -1; 2621} 2622 2623static inline void 2624jme_phy_init(struct jme_adapter *jme) 2625{ 2626 u16 reg26; 2627 2628 reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26); 2629 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000); 2630} 2631 2632static inline void 2633jme_check_hw_ver(struct jme_adapter *jme) 2634{ 2635 u32 chipmode; 2636 2637 chipmode = jread32(jme, JME_CHIPMODE); 2638 2639 jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT; 2640 jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT; 2641} 2642 2643static const struct net_device_ops jme_netdev_ops = { 2644 .ndo_open = jme_open, 2645 .ndo_stop = jme_close, 2646 .ndo_validate_addr = eth_validate_addr, 2647 .ndo_start_xmit = jme_start_xmit, 2648 .ndo_set_mac_address = jme_set_macaddr, 2649 .ndo_set_multicast_list = jme_set_multi, 2650 .ndo_change_mtu = jme_change_mtu, 2651 .ndo_tx_timeout = jme_tx_timeout, 2652 .ndo_vlan_rx_register = jme_vlan_rx_register, 2653}; 2654 2655static int __devinit 2656jme_init_one(struct pci_dev *pdev, 2657 const struct pci_device_id *ent) 2658{ 2659 int rc = 0, using_dac, i; 2660 struct net_device *netdev; 2661 struct jme_adapter *jme; 2662 u16 bmcr, bmsr; 2663 u32 apmc; 2664 2665 /* 2666 * set up PCI device basics 2667 */ 2668 rc = pci_enable_device(pdev); 2669 if (rc) { 2670 jeprintk(pdev, "Cannot enable PCI device.\n"); 2671 goto err_out; 2672 } 2673 2674 using_dac = jme_pci_dma64(pdev); 2675 if (using_dac < 0) { 2676 jeprintk(pdev, "Cannot set PCI DMA Mask.\n"); 2677 rc = -EIO; 2678 goto err_out_disable_pdev; 2679 } 2680 2681 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 2682 jeprintk(pdev, "No PCI resource region found.\n"); 2683 rc = -ENOMEM; 2684 goto err_out_disable_pdev; 2685 } 2686 2687 rc = pci_request_regions(pdev, DRV_NAME); 2688 if (rc) { 2689 jeprintk(pdev, "Cannot obtain PCI resource region.\n"); 2690 goto err_out_disable_pdev; 2691 } 2692 2693 pci_set_master(pdev); 2694 2695 /* 2696 * alloc and init net device 2697 */ 2698 netdev = alloc_etherdev(sizeof(*jme)); 2699 if (!netdev) { 2700 jeprintk(pdev, "Cannot allocate netdev structure.\n"); 2701 rc = -ENOMEM; 2702 goto err_out_release_regions; 2703 } 2704 netdev->netdev_ops = &jme_netdev_ops; 2705 netdev->ethtool_ops = &jme_ethtool_ops; 2706 netdev->watchdog_timeo = TX_TIMEOUT; 2707 netdev->features = NETIF_F_HW_CSUM | 2708 NETIF_F_SG | 2709 NETIF_F_TSO | 2710 NETIF_F_TSO6 | 2711 NETIF_F_HW_VLAN_TX | 2712 NETIF_F_HW_VLAN_RX; 2713 if (using_dac) 2714 netdev->features |= NETIF_F_HIGHDMA; 2715 2716 SET_NETDEV_DEV(netdev, &pdev->dev); 2717 pci_set_drvdata(pdev, netdev); 2718 2719 /* 2720 * init adapter info 2721 */ 2722 jme = netdev_priv(netdev); 2723 jme->pdev = pdev; 2724 jme->dev = netdev; 2725 jme->jme_rx = netif_rx; 2726 jme->jme_vlan_rx = vlan_hwaccel_rx; 2727 jme->old_mtu = netdev->mtu = 1500; 2728 jme->phylink = 0; 2729 jme->tx_ring_size = 1 << 10; 2730 jme->tx_ring_mask = jme->tx_ring_size - 1; 2731 jme->tx_wake_threshold = 1 << 9; 2732 jme->rx_ring_size = 1 << 9; 2733 jme->rx_ring_mask = jme->rx_ring_size - 1; 2734 jme->msg_enable = JME_DEF_MSG_ENABLE; 2735 jme->regs = ioremap(pci_resource_start(pdev, 0), 2736 pci_resource_len(pdev, 0)); 2737 if (!(jme->regs)) { 2738 jeprintk(pdev, "Mapping PCI resource region error.\n"); 2739 rc = -ENOMEM; 2740 goto err_out_free_netdev; 2741 } 2742 2743 if (no_pseudohp) { 2744 apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN; 2745 jwrite32(jme, JME_APMC, apmc); 2746 } else if (force_pseudohp) { 2747 apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN; 2748 jwrite32(jme, JME_APMC, apmc); 2749 } 2750 2751 NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2) 2752 2753 spin_lock_init(&jme->phy_lock); 2754 spin_lock_init(&jme->macaddr_lock); 2755 spin_lock_init(&jme->rxmcs_lock); 2756 2757 atomic_set(&jme->link_changing, 1); 2758 atomic_set(&jme->rx_cleaning, 1); 2759 atomic_set(&jme->tx_cleaning, 1); 2760 atomic_set(&jme->rx_empty, 1); 2761 2762 tasklet_init(&jme->pcc_task, 2763 jme_pcc_tasklet, 2764 (unsigned long) jme); 2765 tasklet_init(&jme->linkch_task, 2766 jme_link_change_tasklet, 2767 (unsigned long) jme); 2768 tasklet_init(&jme->txclean_task, 2769 jme_tx_clean_tasklet, 2770 (unsigned long) jme); 2771 tasklet_init(&jme->rxclean_task, 2772 jme_rx_clean_tasklet, 2773 (unsigned long) jme); 2774 tasklet_init(&jme->rxempty_task, 2775 jme_rx_empty_tasklet, 2776 (unsigned long) jme); 2777 tasklet_disable_nosync(&jme->linkch_task); 2778 tasklet_disable_nosync(&jme->txclean_task); 2779 tasklet_disable_nosync(&jme->rxclean_task); 2780 tasklet_disable_nosync(&jme->rxempty_task); 2781 jme->dpi.cur = PCC_P1; 2782 2783 jme->reg_ghc = 0; 2784 jme->reg_rxcs = RXCS_DEFAULT; 2785 jme->reg_rxmcs = RXMCS_DEFAULT; 2786 jme->reg_txpfc = 0; 2787 jme->reg_pmcs = PMCS_MFEN; 2788 set_bit(JME_FLAG_TXCSUM, &jme->flags); 2789 set_bit(JME_FLAG_TSO, &jme->flags); 2790 2791 /* 2792 * Get Max Read Req Size from PCI Config Space 2793 */ 2794 pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs); 2795 jme->mrrs &= PCI_DCSR_MRRS_MASK; 2796 switch (jme->mrrs) { 2797 case MRRS_128B: 2798 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B; 2799 break; 2800 case MRRS_256B: 2801 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B; 2802 break; 2803 default: 2804 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B; 2805 break; 2806 }; 2807 2808 /* 2809 * Must check before reset_mac_processor 2810 */ 2811 jme_check_hw_ver(jme); 2812 jme->mii_if.dev = netdev; 2813 if (jme->fpgaver) { 2814 jme->mii_if.phy_id = 0; 2815 for (i = 1 ; i < 32 ; ++i) { 2816 bmcr = jme_mdio_read(netdev, i, MII_BMCR); 2817 bmsr = jme_mdio_read(netdev, i, MII_BMSR); 2818 if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) { 2819 jme->mii_if.phy_id = i; 2820 break; 2821 } 2822 } 2823 2824 if (!jme->mii_if.phy_id) { 2825 rc = -EIO; 2826 jeprintk(pdev, "Can not find phy_id.\n"); 2827 goto err_out_unmap; 2828 } 2829 2830 jme->reg_ghc |= GHC_LINK_POLL; 2831 } else { 2832 jme->mii_if.phy_id = 1; 2833 } 2834 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) 2835 jme->mii_if.supports_gmii = true; 2836 else 2837 jme->mii_if.supports_gmii = false; 2838 jme->mii_if.mdio_read = jme_mdio_read; 2839 jme->mii_if.mdio_write = jme_mdio_write; 2840 2841 jme_clear_pm(jme); 2842 jme_set_phyfifoa(jme); 2843 pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->rev); 2844 if (!jme->fpgaver) 2845 jme_phy_init(jme); 2846 jme_phy_off(jme); 2847 2848 /* 2849 * Reset MAC processor and reload EEPROM for MAC Address 2850 */ 2851 jme_reset_mac_processor(jme); 2852 rc = jme_reload_eeprom(jme); 2853 if (rc) { 2854 jeprintk(pdev, 2855 "Reload eeprom for reading MAC Address error.\n"); 2856 goto err_out_unmap; 2857 } 2858 jme_load_macaddr(netdev); 2859 2860 /* 2861 * Tell stack that we are not ready to work until open() 2862 */ 2863 netif_carrier_off(netdev); 2864 netif_stop_queue(netdev); 2865 2866 /* 2867 * Register netdev 2868 */ 2869 rc = register_netdev(netdev); 2870 if (rc) { 2871 jeprintk(pdev, "Cannot register net device.\n"); 2872 goto err_out_unmap; 2873 } 2874 2875 netif_info(jme, probe, jme->dev, "%s%s ver:%x rev:%x macaddr:%pM\n", 2876 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ? 2877 "JMC250 Gigabit Ethernet" : 2878 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ? 2879 "JMC260 Fast Ethernet" : "Unknown", 2880 (jme->fpgaver != 0) ? " (FPGA)" : "", 2881 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev, 2882 jme->rev, netdev->dev_addr); 2883 2884 return 0; 2885 2886err_out_unmap: 2887 iounmap(jme->regs); 2888err_out_free_netdev: 2889 pci_set_drvdata(pdev, NULL); 2890 free_netdev(netdev); 2891err_out_release_regions: 2892 pci_release_regions(pdev); 2893err_out_disable_pdev: 2894 pci_disable_device(pdev); 2895err_out: 2896 return rc; 2897} 2898 2899static void __devexit 2900jme_remove_one(struct pci_dev *pdev) 2901{ 2902 struct net_device *netdev = pci_get_drvdata(pdev); 2903 struct jme_adapter *jme = netdev_priv(netdev); 2904 2905 unregister_netdev(netdev); 2906 iounmap(jme->regs); 2907 pci_set_drvdata(pdev, NULL); 2908 free_netdev(netdev); 2909 pci_release_regions(pdev); 2910 pci_disable_device(pdev); 2911 2912} 2913 2914#ifdef CONFIG_PM 2915static int 2916jme_suspend(struct pci_dev *pdev, pm_message_t state) 2917{ 2918 struct net_device *netdev = pci_get_drvdata(pdev); 2919 struct jme_adapter *jme = netdev_priv(netdev); 2920 2921 atomic_dec(&jme->link_changing); 2922 2923 netif_device_detach(netdev); 2924 netif_stop_queue(netdev); 2925 jme_stop_irq(jme); 2926 2927 tasklet_disable(&jme->txclean_task); 2928 tasklet_disable(&jme->rxclean_task); 2929 tasklet_disable(&jme->rxempty_task); 2930 2931 if (netif_carrier_ok(netdev)) { 2932 if (test_bit(JME_FLAG_POLL, &jme->flags)) 2933 jme_polling_mode(jme); 2934 2935 jme_stop_pcc_timer(jme); 2936 jme_reset_ghc_speed(jme); 2937 jme_disable_rx_engine(jme); 2938 jme_disable_tx_engine(jme); 2939 jme_reset_mac_processor(jme); 2940 jme_free_rx_resources(jme); 2941 jme_free_tx_resources(jme); 2942 netif_carrier_off(netdev); 2943 jme->phylink = 0; 2944 } 2945 2946 tasklet_enable(&jme->txclean_task); 2947 tasklet_hi_enable(&jme->rxclean_task); 2948 tasklet_hi_enable(&jme->rxempty_task); 2949 2950 pci_save_state(pdev); 2951 if (jme->reg_pmcs) { 2952 jme_set_100m_half(jme); 2953 2954 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) 2955 jme_wait_link(jme); 2956 2957 jwrite32(jme, JME_PMCS, jme->reg_pmcs); 2958 2959 pci_enable_wake(pdev, PCI_D3cold, true); 2960 } else { 2961 jme_phy_off(jme); 2962 } 2963 pci_set_power_state(pdev, PCI_D3cold); 2964 2965 return 0; 2966} 2967 2968static int 2969jme_resume(struct pci_dev *pdev) 2970{ 2971 struct net_device *netdev = pci_get_drvdata(pdev); 2972 struct jme_adapter *jme = netdev_priv(netdev); 2973 2974 jme_clear_pm(jme); 2975 pci_restore_state(pdev); 2976 2977 if (test_bit(JME_FLAG_SSET, &jme->flags)) 2978 jme_set_settings(netdev, &jme->old_ecmd); 2979 else 2980 jme_reset_phy_processor(jme); 2981 2982 jme_start_irq(jme); 2983 netif_device_attach(netdev); 2984 2985 atomic_inc(&jme->link_changing); 2986 2987 jme_reset_link(jme); 2988 2989 return 0; 2990} 2991#endif 2992 2993static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = { 2994 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) }, 2995 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) }, 2996 { } 2997}; 2998 2999static struct pci_driver jme_driver = { 3000 .name = DRV_NAME, 3001 .id_table = jme_pci_tbl, 3002 .probe = jme_init_one, 3003 .remove = __devexit_p(jme_remove_one), 3004#ifdef CONFIG_PM 3005 .suspend = jme_suspend, 3006 .resume = jme_resume, 3007#endif /* CONFIG_PM */ 3008}; 3009 3010static int __init 3011jme_init_module(void) 3012{ 3013 printk(KERN_INFO PFX "JMicron JMC2XX ethernet " 3014 "driver version %s\n", DRV_VERSION); 3015 return pci_register_driver(&jme_driver); 3016} 3017 3018static void __exit 3019jme_cleanup_module(void) 3020{ 3021 pci_unregister_driver(&jme_driver); 3022} 3023 3024module_init(jme_init_module); 3025module_exit(jme_cleanup_module); 3026 3027MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>"); 3028MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver"); 3029MODULE_LICENSE("GPL"); 3030MODULE_VERSION(DRV_VERSION); 3031MODULE_DEVICE_TABLE(pci, jme_pci_tbl); 3032