Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.26-rc8 3724 lines 103 kB view raw
1/* 2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. 3 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> 4 * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> 5 * 6 * Derived from Intel e1000 driver 7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the Free 11 * Software Foundation; either version 2 of the License, or (at your option) 12 * any later version. 13 * 14 * This program is distributed in the hope that it will be useful, but WITHOUT 15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 17 * more details. 18 * 19 * You should have received a copy of the GNU General Public License along with 20 * this program; if not, write to the Free Software Foundation, Inc., 59 21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 22 * 23 * The full GNU General Public License is included in this distribution in the 24 * file called COPYING. 25 * 26 * Contact Information: 27 * Xiong Huang <xiong_huang@attansic.com> 28 * Attansic Technology Corp. 3F 147, Xianzheng 9th Road, Zhubei, 29 * Xinzhu 302, TAIWAN, REPUBLIC OF CHINA 30 * 31 * Chris Snook <csnook@redhat.com> 32 * Jay Cliburn <jcliburn@gmail.com> 33 * 34 * This version is adapted from the Attansic reference driver for 35 * inclusion in the Linux kernel. It is currently under heavy development. 36 * A very incomplete list of things that need to be dealt with: 37 * 38 * TODO: 39 * Add more ethtool functions. 40 * Fix abstruse irq enable/disable condition described here: 41 * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 42 * 43 * NEEDS TESTING: 44 * VLAN 45 * multicast 46 * promiscuous mode 47 * interrupt coalescing 48 * SMP torture testing 49 */ 50 51#include <asm/atomic.h> 52#include <asm/byteorder.h> 53 54#include <linux/compiler.h> 55#include <linux/crc32.h> 56#include <linux/delay.h> 57#include <linux/dma-mapping.h> 58#include <linux/etherdevice.h> 59#include <linux/hardirq.h> 60#include <linux/if_ether.h> 61#include <linux/if_vlan.h> 62#include <linux/in.h> 63#include <linux/interrupt.h> 64#include <linux/ip.h> 65#include <linux/irqflags.h> 66#include <linux/irqreturn.h> 67#include <linux/jiffies.h> 68#include <linux/mii.h> 69#include <linux/module.h> 70#include <linux/moduleparam.h> 71#include <linux/net.h> 72#include <linux/netdevice.h> 73#include <linux/pci.h> 74#include <linux/pci_ids.h> 75#include <linux/pm.h> 76#include <linux/skbuff.h> 77#include <linux/slab.h> 78#include <linux/spinlock.h> 79#include <linux/string.h> 80#include <linux/tcp.h> 81#include <linux/timer.h> 82#include <linux/types.h> 83#include <linux/workqueue.h> 84 85#include <net/checksum.h> 86 87#include "atl1.h" 88 89/* Temporary hack for merging atl1 and atl2 */ 90#include "atlx.c" 91 92/* 93 * This is the only thing that needs to be changed to adjust the 94 * maximum number of ports that the driver can manage. 95 */ 96#define ATL1_MAX_NIC 4 97 98#define OPTION_UNSET -1 99#define OPTION_DISABLED 0 100#define OPTION_ENABLED 1 101 102#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET } 103 104/* 105 * Interrupt Moderate Timer in units of 2 us 106 * 107 * Valid Range: 10-65535 108 * 109 * Default Value: 100 (200us) 110 */ 111static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT; 112static int num_int_mod_timer; 113module_param_array_named(int_mod_timer, int_mod_timer, int, 114 &num_int_mod_timer, 0); 115MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer"); 116 117#define DEFAULT_INT_MOD_CNT 100 /* 200us */ 118#define MAX_INT_MOD_CNT 65000 119#define MIN_INT_MOD_CNT 50 120 121struct atl1_option { 122 enum { enable_option, range_option, list_option } type; 123 char *name; 124 char *err; 125 int def; 126 union { 127 struct { /* range_option info */ 128 int min; 129 int max; 130 } r; 131 struct { /* list_option info */ 132 int nr; 133 struct atl1_opt_list { 134 int i; 135 char *str; 136 } *p; 137 } l; 138 } arg; 139}; 140 141static int __devinit atl1_validate_option(int *value, struct atl1_option *opt, 142 struct pci_dev *pdev) 143{ 144 if (*value == OPTION_UNSET) { 145 *value = opt->def; 146 return 0; 147 } 148 149 switch (opt->type) { 150 case enable_option: 151 switch (*value) { 152 case OPTION_ENABLED: 153 dev_info(&pdev->dev, "%s enabled\n", opt->name); 154 return 0; 155 case OPTION_DISABLED: 156 dev_info(&pdev->dev, "%s disabled\n", opt->name); 157 return 0; 158 } 159 break; 160 case range_option: 161 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 162 dev_info(&pdev->dev, "%s set to %i\n", opt->name, 163 *value); 164 return 0; 165 } 166 break; 167 case list_option:{ 168 int i; 169 struct atl1_opt_list *ent; 170 171 for (i = 0; i < opt->arg.l.nr; i++) { 172 ent = &opt->arg.l.p[i]; 173 if (*value == ent->i) { 174 if (ent->str[0] != '\0') 175 dev_info(&pdev->dev, "%s\n", 176 ent->str); 177 return 0; 178 } 179 } 180 } 181 break; 182 183 default: 184 break; 185 } 186 187 dev_info(&pdev->dev, "invalid %s specified (%i) %s\n", 188 opt->name, *value, opt->err); 189 *value = opt->def; 190 return -1; 191} 192 193/* 194 * atl1_check_options - Range Checking for Command Line Parameters 195 * @adapter: board private structure 196 * 197 * This routine checks all command line parameters for valid user 198 * input. If an invalid value is given, or if no user specified 199 * value exists, a default value is used. The final value is stored 200 * in a variable in the adapter structure. 201 */ 202void __devinit atl1_check_options(struct atl1_adapter *adapter) 203{ 204 struct pci_dev *pdev = adapter->pdev; 205 int bd = adapter->bd_number; 206 if (bd >= ATL1_MAX_NIC) { 207 dev_notice(&pdev->dev, "no configuration for board#%i\n", bd); 208 dev_notice(&pdev->dev, "using defaults for all values\n"); 209 } 210 { /* Interrupt Moderate Timer */ 211 struct atl1_option opt = { 212 .type = range_option, 213 .name = "Interrupt Moderator Timer", 214 .err = "using default of " 215 __MODULE_STRING(DEFAULT_INT_MOD_CNT), 216 .def = DEFAULT_INT_MOD_CNT, 217 .arg = {.r = {.min = MIN_INT_MOD_CNT, 218 .max = MAX_INT_MOD_CNT} } 219 }; 220 int val; 221 if (num_int_mod_timer > bd) { 222 val = int_mod_timer[bd]; 223 atl1_validate_option(&val, &opt, pdev); 224 adapter->imt = (u16) val; 225 } else 226 adapter->imt = (u16) (opt.def); 227 } 228} 229 230/* 231 * atl1_pci_tbl - PCI Device ID Table 232 */ 233static const struct pci_device_id atl1_pci_tbl[] = { 234 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)}, 235 /* required last entry */ 236 {0,} 237}; 238MODULE_DEVICE_TABLE(pci, atl1_pci_tbl); 239 240static const u32 atl1_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | 241 NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; 242 243static int debug = -1; 244module_param(debug, int, 0); 245MODULE_PARM_DESC(debug, "Message level (0=none,...,16=all)"); 246 247/* 248 * Reset the transmit and receive units; mask and clear all interrupts. 249 * hw - Struct containing variables accessed by shared code 250 * return : 0 or idle status (if error) 251 */ 252static s32 atl1_reset_hw(struct atl1_hw *hw) 253{ 254 struct pci_dev *pdev = hw->back->pdev; 255 struct atl1_adapter *adapter = hw->back; 256 u32 icr; 257 int i; 258 259 /* 260 * Clear Interrupt mask to stop board from generating 261 * interrupts & Clear any pending interrupt events 262 */ 263 /* 264 * iowrite32(0, hw->hw_addr + REG_IMR); 265 * iowrite32(0xffffffff, hw->hw_addr + REG_ISR); 266 */ 267 268 /* 269 * Issue Soft Reset to the MAC. This will reset the chip's 270 * transmit, receive, DMA. It will not effect 271 * the current PCI configuration. The global reset bit is self- 272 * clearing, and should clear within a microsecond. 273 */ 274 iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL); 275 ioread32(hw->hw_addr + REG_MASTER_CTRL); 276 277 iowrite16(1, hw->hw_addr + REG_PHY_ENABLE); 278 ioread16(hw->hw_addr + REG_PHY_ENABLE); 279 280 /* delay about 1ms */ 281 msleep(1); 282 283 /* Wait at least 10ms for All module to be Idle */ 284 for (i = 0; i < 10; i++) { 285 icr = ioread32(hw->hw_addr + REG_IDLE_STATUS); 286 if (!icr) 287 break; 288 /* delay 1 ms */ 289 msleep(1); 290 /* FIXME: still the right way to do this? */ 291 cpu_relax(); 292 } 293 294 if (icr) { 295 if (netif_msg_hw(adapter)) 296 dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr); 297 return icr; 298 } 299 300 return 0; 301} 302 303/* function about EEPROM 304 * 305 * check_eeprom_exist 306 * return 0 if eeprom exist 307 */ 308static int atl1_check_eeprom_exist(struct atl1_hw *hw) 309{ 310 u32 value; 311 value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); 312 if (value & SPI_FLASH_CTRL_EN_VPD) { 313 value &= ~SPI_FLASH_CTRL_EN_VPD; 314 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); 315 } 316 317 value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST); 318 return ((value & 0xFF00) == 0x6C00) ? 0 : 1; 319} 320 321static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value) 322{ 323 int i; 324 u32 control; 325 326 if (offset & 3) 327 /* address do not align */ 328 return false; 329 330 iowrite32(0, hw->hw_addr + REG_VPD_DATA); 331 control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; 332 iowrite32(control, hw->hw_addr + REG_VPD_CAP); 333 ioread32(hw->hw_addr + REG_VPD_CAP); 334 335 for (i = 0; i < 10; i++) { 336 msleep(2); 337 control = ioread32(hw->hw_addr + REG_VPD_CAP); 338 if (control & VPD_CAP_VPD_FLAG) 339 break; 340 } 341 if (control & VPD_CAP_VPD_FLAG) { 342 *p_value = ioread32(hw->hw_addr + REG_VPD_DATA); 343 return true; 344 } 345 /* timeout */ 346 return false; 347} 348 349/* 350 * Reads the value from a PHY register 351 * hw - Struct containing variables accessed by shared code 352 * reg_addr - address of the PHY register to read 353 */ 354s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data) 355{ 356 u32 val; 357 int i; 358 359 val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | 360 MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 << 361 MDIO_CLK_SEL_SHIFT; 362 iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); 363 ioread32(hw->hw_addr + REG_MDIO_CTRL); 364 365 for (i = 0; i < MDIO_WAIT_TIMES; i++) { 366 udelay(2); 367 val = ioread32(hw->hw_addr + REG_MDIO_CTRL); 368 if (!(val & (MDIO_START | MDIO_BUSY))) 369 break; 370 } 371 if (!(val & (MDIO_START | MDIO_BUSY))) { 372 *phy_data = (u16) val; 373 return 0; 374 } 375 return ATLX_ERR_PHY; 376} 377 378#define CUSTOM_SPI_CS_SETUP 2 379#define CUSTOM_SPI_CLK_HI 2 380#define CUSTOM_SPI_CLK_LO 2 381#define CUSTOM_SPI_CS_HOLD 2 382#define CUSTOM_SPI_CS_HI 3 383 384static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf) 385{ 386 int i; 387 u32 value; 388 389 iowrite32(0, hw->hw_addr + REG_SPI_DATA); 390 iowrite32(addr, hw->hw_addr + REG_SPI_ADDR); 391 392 value = SPI_FLASH_CTRL_WAIT_READY | 393 (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) << 394 SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI & 395 SPI_FLASH_CTRL_CLK_HI_MASK) << 396 SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO & 397 SPI_FLASH_CTRL_CLK_LO_MASK) << 398 SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD & 399 SPI_FLASH_CTRL_CS_HOLD_MASK) << 400 SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI & 401 SPI_FLASH_CTRL_CS_HI_MASK) << 402 SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) << 403 SPI_FLASH_CTRL_INS_SHIFT; 404 405 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); 406 407 value |= SPI_FLASH_CTRL_START; 408 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); 409 ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); 410 411 for (i = 0; i < 10; i++) { 412 msleep(1); 413 value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); 414 if (!(value & SPI_FLASH_CTRL_START)) 415 break; 416 } 417 418 if (value & SPI_FLASH_CTRL_START) 419 return false; 420 421 *buf = ioread32(hw->hw_addr + REG_SPI_DATA); 422 423 return true; 424} 425 426/* 427 * get_permanent_address 428 * return 0 if get valid mac address, 429 */ 430static int atl1_get_permanent_address(struct atl1_hw *hw) 431{ 432 u32 addr[2]; 433 u32 i, control; 434 u16 reg; 435 u8 eth_addr[ETH_ALEN]; 436 bool key_valid; 437 438 if (is_valid_ether_addr(hw->perm_mac_addr)) 439 return 0; 440 441 /* init */ 442 addr[0] = addr[1] = 0; 443 444 if (!atl1_check_eeprom_exist(hw)) { 445 reg = 0; 446 key_valid = false; 447 /* Read out all EEPROM content */ 448 i = 0; 449 while (1) { 450 if (atl1_read_eeprom(hw, i + 0x100, &control)) { 451 if (key_valid) { 452 if (reg == REG_MAC_STA_ADDR) 453 addr[0] = control; 454 else if (reg == (REG_MAC_STA_ADDR + 4)) 455 addr[1] = control; 456 key_valid = false; 457 } else if ((control & 0xff) == 0x5A) { 458 key_valid = true; 459 reg = (u16) (control >> 16); 460 } else 461 break; 462 } else 463 /* read error */ 464 break; 465 i += 4; 466 } 467 468 *(u32 *) &eth_addr[2] = swab32(addr[0]); 469 *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]); 470 if (is_valid_ether_addr(eth_addr)) { 471 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); 472 return 0; 473 } 474 } 475 476 /* see if SPI FLAGS exist ? */ 477 addr[0] = addr[1] = 0; 478 reg = 0; 479 key_valid = false; 480 i = 0; 481 while (1) { 482 if (atl1_spi_read(hw, i + 0x1f000, &control)) { 483 if (key_valid) { 484 if (reg == REG_MAC_STA_ADDR) 485 addr[0] = control; 486 else if (reg == (REG_MAC_STA_ADDR + 4)) 487 addr[1] = control; 488 key_valid = false; 489 } else if ((control & 0xff) == 0x5A) { 490 key_valid = true; 491 reg = (u16) (control >> 16); 492 } else 493 /* data end */ 494 break; 495 } else 496 /* read error */ 497 break; 498 i += 4; 499 } 500 501 *(u32 *) &eth_addr[2] = swab32(addr[0]); 502 *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]); 503 if (is_valid_ether_addr(eth_addr)) { 504 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); 505 return 0; 506 } 507 508 /* 509 * On some motherboards, the MAC address is written by the 510 * BIOS directly to the MAC register during POST, and is 511 * not stored in eeprom. If all else thus far has failed 512 * to fetch the permanent MAC address, try reading it directly. 513 */ 514 addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR); 515 addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4)); 516 *(u32 *) &eth_addr[2] = swab32(addr[0]); 517 *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]); 518 if (is_valid_ether_addr(eth_addr)) { 519 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); 520 return 0; 521 } 522 523 return 1; 524} 525 526/* 527 * Reads the adapter's MAC address from the EEPROM 528 * hw - Struct containing variables accessed by shared code 529 */ 530s32 atl1_read_mac_addr(struct atl1_hw *hw) 531{ 532 u16 i; 533 534 if (atl1_get_permanent_address(hw)) 535 random_ether_addr(hw->perm_mac_addr); 536 537 for (i = 0; i < ETH_ALEN; i++) 538 hw->mac_addr[i] = hw->perm_mac_addr[i]; 539 return 0; 540} 541 542/* 543 * Hashes an address to determine its location in the multicast table 544 * hw - Struct containing variables accessed by shared code 545 * mc_addr - the multicast address to hash 546 * 547 * atl1_hash_mc_addr 548 * purpose 549 * set hash value for a multicast address 550 * hash calcu processing : 551 * 1. calcu 32bit CRC for multicast address 552 * 2. reverse crc with MSB to LSB 553 */ 554u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr) 555{ 556 u32 crc32, value = 0; 557 int i; 558 559 crc32 = ether_crc_le(6, mc_addr); 560 for (i = 0; i < 32; i++) 561 value |= (((crc32 >> i) & 1) << (31 - i)); 562 563 return value; 564} 565 566/* 567 * Sets the bit in the multicast table corresponding to the hash value. 568 * hw - Struct containing variables accessed by shared code 569 * hash_value - Multicast address hash value 570 */ 571void atl1_hash_set(struct atl1_hw *hw, u32 hash_value) 572{ 573 u32 hash_bit, hash_reg; 574 u32 mta; 575 576 /* 577 * The HASH Table is a register array of 2 32-bit registers. 578 * It is treated like an array of 64 bits. We want to set 579 * bit BitArray[hash_value]. So we figure out what register 580 * the bit is in, read it, OR in the new bit, then write 581 * back the new value. The register is determined by the 582 * upper 7 bits of the hash value and the bit within that 583 * register are determined by the lower 5 bits of the value. 584 */ 585 hash_reg = (hash_value >> 31) & 0x1; 586 hash_bit = (hash_value >> 26) & 0x1F; 587 mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); 588 mta |= (1 << hash_bit); 589 iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); 590} 591 592/* 593 * Writes a value to a PHY register 594 * hw - Struct containing variables accessed by shared code 595 * reg_addr - address of the PHY register to write 596 * data - data to write to the PHY 597 */ 598static s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data) 599{ 600 int i; 601 u32 val; 602 603 val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | 604 (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | 605 MDIO_SUP_PREAMBLE | 606 MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; 607 iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); 608 ioread32(hw->hw_addr + REG_MDIO_CTRL); 609 610 for (i = 0; i < MDIO_WAIT_TIMES; i++) { 611 udelay(2); 612 val = ioread32(hw->hw_addr + REG_MDIO_CTRL); 613 if (!(val & (MDIO_START | MDIO_BUSY))) 614 break; 615 } 616 617 if (!(val & (MDIO_START | MDIO_BUSY))) 618 return 0; 619 620 return ATLX_ERR_PHY; 621} 622 623/* 624 * Make L001's PHY out of Power Saving State (bug) 625 * hw - Struct containing variables accessed by shared code 626 * when power on, L001's PHY always on Power saving State 627 * (Gigabit Link forbidden) 628 */ 629static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw) 630{ 631 s32 ret; 632 ret = atl1_write_phy_reg(hw, 29, 0x0029); 633 if (ret) 634 return ret; 635 return atl1_write_phy_reg(hw, 30, 0); 636} 637 638/* 639 * Resets the PHY and make all config validate 640 * hw - Struct containing variables accessed by shared code 641 * 642 * Sets bit 15 and 12 of the MII Control regiser (for F001 bug) 643 */ 644static s32 atl1_phy_reset(struct atl1_hw *hw) 645{ 646 struct pci_dev *pdev = hw->back->pdev; 647 struct atl1_adapter *adapter = hw->back; 648 s32 ret_val; 649 u16 phy_data; 650 651 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || 652 hw->media_type == MEDIA_TYPE_1000M_FULL) 653 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; 654 else { 655 switch (hw->media_type) { 656 case MEDIA_TYPE_100M_FULL: 657 phy_data = 658 MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | 659 MII_CR_RESET; 660 break; 661 case MEDIA_TYPE_100M_HALF: 662 phy_data = MII_CR_SPEED_100 | MII_CR_RESET; 663 break; 664 case MEDIA_TYPE_10M_FULL: 665 phy_data = 666 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; 667 break; 668 default: 669 /* MEDIA_TYPE_10M_HALF: */ 670 phy_data = MII_CR_SPEED_10 | MII_CR_RESET; 671 break; 672 } 673 } 674 675 ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data); 676 if (ret_val) { 677 u32 val; 678 int i; 679 /* pcie serdes link may be down! */ 680 if (netif_msg_hw(adapter)) 681 dev_dbg(&pdev->dev, "pcie phy link down\n"); 682 683 for (i = 0; i < 25; i++) { 684 msleep(1); 685 val = ioread32(hw->hw_addr + REG_MDIO_CTRL); 686 if (!(val & (MDIO_START | MDIO_BUSY))) 687 break; 688 } 689 690 if ((val & (MDIO_START | MDIO_BUSY)) != 0) { 691 if (netif_msg_hw(adapter)) 692 dev_warn(&pdev->dev, 693 "pcie link down at least 25ms\n"); 694 return ret_val; 695 } 696 } 697 return 0; 698} 699 700/* 701 * Configures PHY autoneg and flow control advertisement settings 702 * hw - Struct containing variables accessed by shared code 703 */ 704static s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw) 705{ 706 s32 ret_val; 707 s16 mii_autoneg_adv_reg; 708 s16 mii_1000t_ctrl_reg; 709 710 /* Read the MII Auto-Neg Advertisement Register (Address 4). */ 711 mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; 712 713 /* Read the MII 1000Base-T Control Register (Address 9). */ 714 mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK; 715 716 /* 717 * First we clear all the 10/100 mb speed bits in the Auto-Neg 718 * Advertisement Register (Address 4) and the 1000 mb speed bits in 719 * the 1000Base-T Control Register (Address 9). 720 */ 721 mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; 722 mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK; 723 724 /* 725 * Need to parse media_type and set up 726 * the appropriate PHY registers. 727 */ 728 switch (hw->media_type) { 729 case MEDIA_TYPE_AUTO_SENSOR: 730 mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | 731 MII_AR_10T_FD_CAPS | 732 MII_AR_100TX_HD_CAPS | 733 MII_AR_100TX_FD_CAPS); 734 mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS; 735 break; 736 737 case MEDIA_TYPE_1000M_FULL: 738 mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS; 739 break; 740 741 case MEDIA_TYPE_100M_FULL: 742 mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; 743 break; 744 745 case MEDIA_TYPE_100M_HALF: 746 mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; 747 break; 748 749 case MEDIA_TYPE_10M_FULL: 750 mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; 751 break; 752 753 default: 754 mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; 755 break; 756 } 757 758 /* flow control fixed to enable all */ 759 mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); 760 761 hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; 762 hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; 763 764 ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); 765 if (ret_val) 766 return ret_val; 767 768 ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg); 769 if (ret_val) 770 return ret_val; 771 772 return 0; 773} 774 775/* 776 * Configures link settings. 777 * hw - Struct containing variables accessed by shared code 778 * Assumes the hardware has previously been reset and the 779 * transmitter and receiver are not enabled. 780 */ 781static s32 atl1_setup_link(struct atl1_hw *hw) 782{ 783 struct pci_dev *pdev = hw->back->pdev; 784 struct atl1_adapter *adapter = hw->back; 785 s32 ret_val; 786 787 /* 788 * Options: 789 * PHY will advertise value(s) parsed from 790 * autoneg_advertised and fc 791 * no matter what autoneg is , We will not wait link result. 792 */ 793 ret_val = atl1_phy_setup_autoneg_adv(hw); 794 if (ret_val) { 795 if (netif_msg_link(adapter)) 796 dev_dbg(&pdev->dev, 797 "error setting up autonegotiation\n"); 798 return ret_val; 799 } 800 /* SW.Reset , En-Auto-Neg if needed */ 801 ret_val = atl1_phy_reset(hw); 802 if (ret_val) { 803 if (netif_msg_link(adapter)) 804 dev_dbg(&pdev->dev, "error resetting phy\n"); 805 return ret_val; 806 } 807 hw->phy_configured = true; 808 return ret_val; 809} 810 811static void atl1_init_flash_opcode(struct atl1_hw *hw) 812{ 813 if (hw->flash_vendor >= ARRAY_SIZE(flash_table)) 814 /* Atmel */ 815 hw->flash_vendor = 0; 816 817 /* Init OP table */ 818 iowrite8(flash_table[hw->flash_vendor].cmd_program, 819 hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM); 820 iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase, 821 hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE); 822 iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase, 823 hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE); 824 iowrite8(flash_table[hw->flash_vendor].cmd_rdid, 825 hw->hw_addr + REG_SPI_FLASH_OP_RDID); 826 iowrite8(flash_table[hw->flash_vendor].cmd_wren, 827 hw->hw_addr + REG_SPI_FLASH_OP_WREN); 828 iowrite8(flash_table[hw->flash_vendor].cmd_rdsr, 829 hw->hw_addr + REG_SPI_FLASH_OP_RDSR); 830 iowrite8(flash_table[hw->flash_vendor].cmd_wrsr, 831 hw->hw_addr + REG_SPI_FLASH_OP_WRSR); 832 iowrite8(flash_table[hw->flash_vendor].cmd_read, 833 hw->hw_addr + REG_SPI_FLASH_OP_READ); 834} 835 836/* 837 * Performs basic configuration of the adapter. 838 * hw - Struct containing variables accessed by shared code 839 * Assumes that the controller has previously been reset and is in a 840 * post-reset uninitialized state. Initializes multicast table, 841 * and Calls routines to setup link 842 * Leaves the transmit and receive units disabled and uninitialized. 843 */ 844static s32 atl1_init_hw(struct atl1_hw *hw) 845{ 846 u32 ret_val = 0; 847 848 /* Zero out the Multicast HASH table */ 849 iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); 850 /* clear the old settings from the multicast hash table */ 851 iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); 852 853 atl1_init_flash_opcode(hw); 854 855 if (!hw->phy_configured) { 856 /* enable GPHY LinkChange Interrrupt */ 857 ret_val = atl1_write_phy_reg(hw, 18, 0xC00); 858 if (ret_val) 859 return ret_val; 860 /* make PHY out of power-saving state */ 861 ret_val = atl1_phy_leave_power_saving(hw); 862 if (ret_val) 863 return ret_val; 864 /* Call a subroutine to configure the link */ 865 ret_val = atl1_setup_link(hw); 866 } 867 return ret_val; 868} 869 870/* 871 * Detects the current speed and duplex settings of the hardware. 872 * hw - Struct containing variables accessed by shared code 873 * speed - Speed of the connection 874 * duplex - Duplex setting of the connection 875 */ 876static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex) 877{ 878 struct pci_dev *pdev = hw->back->pdev; 879 struct atl1_adapter *adapter = hw->back; 880 s32 ret_val; 881 u16 phy_data; 882 883 /* ; --- Read PHY Specific Status Register (17) */ 884 ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data); 885 if (ret_val) 886 return ret_val; 887 888 if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED)) 889 return ATLX_ERR_PHY_RES; 890 891 switch (phy_data & MII_ATLX_PSSR_SPEED) { 892 case MII_ATLX_PSSR_1000MBS: 893 *speed = SPEED_1000; 894 break; 895 case MII_ATLX_PSSR_100MBS: 896 *speed = SPEED_100; 897 break; 898 case MII_ATLX_PSSR_10MBS: 899 *speed = SPEED_10; 900 break; 901 default: 902 if (netif_msg_hw(adapter)) 903 dev_dbg(&pdev->dev, "error getting speed\n"); 904 return ATLX_ERR_PHY_SPEED; 905 break; 906 } 907 if (phy_data & MII_ATLX_PSSR_DPLX) 908 *duplex = FULL_DUPLEX; 909 else 910 *duplex = HALF_DUPLEX; 911 912 return 0; 913} 914 915void atl1_set_mac_addr(struct atl1_hw *hw) 916{ 917 u32 value; 918 /* 919 * 00-0B-6A-F6-00-DC 920 * 0: 6AF600DC 1: 000B 921 * low dword 922 */ 923 value = (((u32) hw->mac_addr[2]) << 24) | 924 (((u32) hw->mac_addr[3]) << 16) | 925 (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5])); 926 iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); 927 /* high dword */ 928 value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); 929 iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2)); 930} 931 932/* 933 * atl1_sw_init - Initialize general software structures (struct atl1_adapter) 934 * @adapter: board private structure to initialize 935 * 936 * atl1_sw_init initializes the Adapter private data structure. 937 * Fields are initialized based on PCI device information and 938 * OS network device settings (MTU size). 939 */ 940static int __devinit atl1_sw_init(struct atl1_adapter *adapter) 941{ 942 struct atl1_hw *hw = &adapter->hw; 943 struct net_device *netdev = adapter->netdev; 944 945 hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 946 hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 947 948 adapter->wol = 0; 949 adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7; 950 adapter->ict = 50000; /* 100ms */ 951 adapter->link_speed = SPEED_0; /* hardware init */ 952 adapter->link_duplex = FULL_DUPLEX; 953 954 hw->phy_configured = false; 955 hw->preamble_len = 7; 956 hw->ipgt = 0x60; 957 hw->min_ifg = 0x50; 958 hw->ipgr1 = 0x40; 959 hw->ipgr2 = 0x60; 960 hw->max_retry = 0xf; 961 hw->lcol = 0x37; 962 hw->jam_ipg = 7; 963 hw->rfd_burst = 8; 964 hw->rrd_burst = 8; 965 hw->rfd_fetch_gap = 1; 966 hw->rx_jumbo_th = adapter->rx_buffer_len / 8; 967 hw->rx_jumbo_lkah = 1; 968 hw->rrd_ret_timer = 16; 969 hw->tpd_burst = 4; 970 hw->tpd_fetch_th = 16; 971 hw->txf_burst = 0x100; 972 hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3; 973 hw->tpd_fetch_gap = 1; 974 hw->rcb_value = atl1_rcb_64; 975 hw->dma_ord = atl1_dma_ord_enh; 976 hw->dmar_block = atl1_dma_req_256; 977 hw->dmaw_block = atl1_dma_req_256; 978 hw->cmb_rrd = 4; 979 hw->cmb_tpd = 4; 980 hw->cmb_rx_timer = 1; /* about 2us */ 981 hw->cmb_tx_timer = 1; /* about 2us */ 982 hw->smb_timer = 100000; /* about 200ms */ 983 984 spin_lock_init(&adapter->lock); 985 spin_lock_init(&adapter->mb_lock); 986 987 return 0; 988} 989 990static int mdio_read(struct net_device *netdev, int phy_id, int reg_num) 991{ 992 struct atl1_adapter *adapter = netdev_priv(netdev); 993 u16 result; 994 995 atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result); 996 997 return result; 998} 999 1000static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, 1001 int val) 1002{ 1003 struct atl1_adapter *adapter = netdev_priv(netdev); 1004 1005 atl1_write_phy_reg(&adapter->hw, reg_num, val); 1006} 1007 1008/* 1009 * atl1_mii_ioctl - 1010 * @netdev: 1011 * @ifreq: 1012 * @cmd: 1013 */ 1014static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1015{ 1016 struct atl1_adapter *adapter = netdev_priv(netdev); 1017 unsigned long flags; 1018 int retval; 1019 1020 if (!netif_running(netdev)) 1021 return -EINVAL; 1022 1023 spin_lock_irqsave(&adapter->lock, flags); 1024 retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); 1025 spin_unlock_irqrestore(&adapter->lock, flags); 1026 1027 return retval; 1028} 1029 1030/* 1031 * atl1_setup_mem_resources - allocate Tx / RX descriptor resources 1032 * @adapter: board private structure 1033 * 1034 * Return 0 on success, negative on failure 1035 */ 1036static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) 1037{ 1038 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 1039 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 1040 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; 1041 struct atl1_ring_header *ring_header = &adapter->ring_header; 1042 struct pci_dev *pdev = adapter->pdev; 1043 int size; 1044 u8 offset = 0; 1045 1046 size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count); 1047 tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); 1048 if (unlikely(!tpd_ring->buffer_info)) { 1049 if (netif_msg_drv(adapter)) 1050 dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", 1051 size); 1052 goto err_nomem; 1053 } 1054 rfd_ring->buffer_info = 1055 (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); 1056 1057 /* 1058 * real ring DMA buffer 1059 * each ring/block may need up to 8 bytes for alignment, hence the 1060 * additional 40 bytes tacked onto the end. 1061 */ 1062 ring_header->size = size = 1063 sizeof(struct tx_packet_desc) * tpd_ring->count 1064 + sizeof(struct rx_free_desc) * rfd_ring->count 1065 + sizeof(struct rx_return_desc) * rrd_ring->count 1066 + sizeof(struct coals_msg_block) 1067 + sizeof(struct stats_msg_block) 1068 + 40; 1069 1070 ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, 1071 &ring_header->dma); 1072 if (unlikely(!ring_header->desc)) { 1073 if (netif_msg_drv(adapter)) 1074 dev_err(&pdev->dev, "pci_alloc_consistent failed\n"); 1075 goto err_nomem; 1076 } 1077 1078 memset(ring_header->desc, 0, ring_header->size); 1079 1080 /* init TPD ring */ 1081 tpd_ring->dma = ring_header->dma; 1082 offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0; 1083 tpd_ring->dma += offset; 1084 tpd_ring->desc = (u8 *) ring_header->desc + offset; 1085 tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count; 1086 1087 /* init RFD ring */ 1088 rfd_ring->dma = tpd_ring->dma + tpd_ring->size; 1089 offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0; 1090 rfd_ring->dma += offset; 1091 rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset); 1092 rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count; 1093 1094 1095 /* init RRD ring */ 1096 rrd_ring->dma = rfd_ring->dma + rfd_ring->size; 1097 offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0; 1098 rrd_ring->dma += offset; 1099 rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset); 1100 rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count; 1101 1102 1103 /* init CMB */ 1104 adapter->cmb.dma = rrd_ring->dma + rrd_ring->size; 1105 offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0; 1106 adapter->cmb.dma += offset; 1107 adapter->cmb.cmb = (struct coals_msg_block *) 1108 ((u8 *) rrd_ring->desc + (rrd_ring->size + offset)); 1109 1110 /* init SMB */ 1111 adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block); 1112 offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0; 1113 adapter->smb.dma += offset; 1114 adapter->smb.smb = (struct stats_msg_block *) 1115 ((u8 *) adapter->cmb.cmb + 1116 (sizeof(struct coals_msg_block) + offset)); 1117 1118 return 0; 1119 1120err_nomem: 1121 kfree(tpd_ring->buffer_info); 1122 return -ENOMEM; 1123} 1124 1125static void atl1_init_ring_ptrs(struct atl1_adapter *adapter) 1126{ 1127 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 1128 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 1129 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; 1130 1131 atomic_set(&tpd_ring->next_to_use, 0); 1132 atomic_set(&tpd_ring->next_to_clean, 0); 1133 1134 rfd_ring->next_to_clean = 0; 1135 atomic_set(&rfd_ring->next_to_use, 0); 1136 1137 rrd_ring->next_to_use = 0; 1138 atomic_set(&rrd_ring->next_to_clean, 0); 1139} 1140 1141/* 1142 * atl1_clean_rx_ring - Free RFD Buffers 1143 * @adapter: board private structure 1144 */ 1145static void atl1_clean_rx_ring(struct atl1_adapter *adapter) 1146{ 1147 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 1148 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; 1149 struct atl1_buffer *buffer_info; 1150 struct pci_dev *pdev = adapter->pdev; 1151 unsigned long size; 1152 unsigned int i; 1153 1154 /* Free all the Rx ring sk_buffs */ 1155 for (i = 0; i < rfd_ring->count; i++) { 1156 buffer_info = &rfd_ring->buffer_info[i]; 1157 if (buffer_info->dma) { 1158 pci_unmap_page(pdev, buffer_info->dma, 1159 buffer_info->length, PCI_DMA_FROMDEVICE); 1160 buffer_info->dma = 0; 1161 } 1162 if (buffer_info->skb) { 1163 dev_kfree_skb(buffer_info->skb); 1164 buffer_info->skb = NULL; 1165 } 1166 } 1167 1168 size = sizeof(struct atl1_buffer) * rfd_ring->count; 1169 memset(rfd_ring->buffer_info, 0, size); 1170 1171 /* Zero out the descriptor ring */ 1172 memset(rfd_ring->desc, 0, rfd_ring->size); 1173 1174 rfd_ring->next_to_clean = 0; 1175 atomic_set(&rfd_ring->next_to_use, 0); 1176 1177 rrd_ring->next_to_use = 0; 1178 atomic_set(&rrd_ring->next_to_clean, 0); 1179} 1180 1181/* 1182 * atl1_clean_tx_ring - Free Tx Buffers 1183 * @adapter: board private structure 1184 */ 1185static void atl1_clean_tx_ring(struct atl1_adapter *adapter) 1186{ 1187 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 1188 struct atl1_buffer *buffer_info; 1189 struct pci_dev *pdev = adapter->pdev; 1190 unsigned long size; 1191 unsigned int i; 1192 1193 /* Free all the Tx ring sk_buffs */ 1194 for (i = 0; i < tpd_ring->count; i++) { 1195 buffer_info = &tpd_ring->buffer_info[i]; 1196 if (buffer_info->dma) { 1197 pci_unmap_page(pdev, buffer_info->dma, 1198 buffer_info->length, PCI_DMA_TODEVICE); 1199 buffer_info->dma = 0; 1200 } 1201 } 1202 1203 for (i = 0; i < tpd_ring->count; i++) { 1204 buffer_info = &tpd_ring->buffer_info[i]; 1205 if (buffer_info->skb) { 1206 dev_kfree_skb_any(buffer_info->skb); 1207 buffer_info->skb = NULL; 1208 } 1209 } 1210 1211 size = sizeof(struct atl1_buffer) * tpd_ring->count; 1212 memset(tpd_ring->buffer_info, 0, size); 1213 1214 /* Zero out the descriptor ring */ 1215 memset(tpd_ring->desc, 0, tpd_ring->size); 1216 1217 atomic_set(&tpd_ring->next_to_use, 0); 1218 atomic_set(&tpd_ring->next_to_clean, 0); 1219} 1220 1221/* 1222 * atl1_free_ring_resources - Free Tx / RX descriptor Resources 1223 * @adapter: board private structure 1224 * 1225 * Free all transmit software resources 1226 */ 1227static void atl1_free_ring_resources(struct atl1_adapter *adapter) 1228{ 1229 struct pci_dev *pdev = adapter->pdev; 1230 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 1231 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 1232 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; 1233 struct atl1_ring_header *ring_header = &adapter->ring_header; 1234 1235 atl1_clean_tx_ring(adapter); 1236 atl1_clean_rx_ring(adapter); 1237 1238 kfree(tpd_ring->buffer_info); 1239 pci_free_consistent(pdev, ring_header->size, ring_header->desc, 1240 ring_header->dma); 1241 1242 tpd_ring->buffer_info = NULL; 1243 tpd_ring->desc = NULL; 1244 tpd_ring->dma = 0; 1245 1246 rfd_ring->buffer_info = NULL; 1247 rfd_ring->desc = NULL; 1248 rfd_ring->dma = 0; 1249 1250 rrd_ring->desc = NULL; 1251 rrd_ring->dma = 0; 1252} 1253 1254static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) 1255{ 1256 u32 value; 1257 struct atl1_hw *hw = &adapter->hw; 1258 struct net_device *netdev = adapter->netdev; 1259 /* Config MAC CTRL Register */ 1260 value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN; 1261 /* duplex */ 1262 if (FULL_DUPLEX == adapter->link_duplex) 1263 value |= MAC_CTRL_DUPLX; 1264 /* speed */ 1265 value |= ((u32) ((SPEED_1000 == adapter->link_speed) ? 1266 MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << 1267 MAC_CTRL_SPEED_SHIFT); 1268 /* flow control */ 1269 value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); 1270 /* PAD & CRC */ 1271 value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); 1272 /* preamble length */ 1273 value |= (((u32) adapter->hw.preamble_len 1274 & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); 1275 /* vlan */ 1276 if (adapter->vlgrp) 1277 value |= MAC_CTRL_RMV_VLAN; 1278 /* rx checksum 1279 if (adapter->rx_csum) 1280 value |= MAC_CTRL_RX_CHKSUM_EN; 1281 */ 1282 /* filter mode */ 1283 value |= MAC_CTRL_BC_EN; 1284 if (netdev->flags & IFF_PROMISC) 1285 value |= MAC_CTRL_PROMIS_EN; 1286 else if (netdev->flags & IFF_ALLMULTI) 1287 value |= MAC_CTRL_MC_ALL_EN; 1288 /* value |= MAC_CTRL_LOOPBACK; */ 1289 iowrite32(value, hw->hw_addr + REG_MAC_CTRL); 1290} 1291 1292static u32 atl1_check_link(struct atl1_adapter *adapter) 1293{ 1294 struct atl1_hw *hw = &adapter->hw; 1295 struct net_device *netdev = adapter->netdev; 1296 u32 ret_val; 1297 u16 speed, duplex, phy_data; 1298 int reconfig = 0; 1299 1300 /* MII_BMSR must read twice */ 1301 atl1_read_phy_reg(hw, MII_BMSR, &phy_data); 1302 atl1_read_phy_reg(hw, MII_BMSR, &phy_data); 1303 if (!(phy_data & BMSR_LSTATUS)) { 1304 /* link down */ 1305 if (netif_carrier_ok(netdev)) { 1306 /* old link state: Up */ 1307 if (netif_msg_link(adapter)) 1308 dev_info(&adapter->pdev->dev, "link is down\n"); 1309 adapter->link_speed = SPEED_0; 1310 netif_carrier_off(netdev); 1311 netif_stop_queue(netdev); 1312 } 1313 return 0; 1314 } 1315 1316 /* Link Up */ 1317 ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex); 1318 if (ret_val) 1319 return ret_val; 1320 1321 switch (hw->media_type) { 1322 case MEDIA_TYPE_1000M_FULL: 1323 if (speed != SPEED_1000 || duplex != FULL_DUPLEX) 1324 reconfig = 1; 1325 break; 1326 case MEDIA_TYPE_100M_FULL: 1327 if (speed != SPEED_100 || duplex != FULL_DUPLEX) 1328 reconfig = 1; 1329 break; 1330 case MEDIA_TYPE_100M_HALF: 1331 if (speed != SPEED_100 || duplex != HALF_DUPLEX) 1332 reconfig = 1; 1333 break; 1334 case MEDIA_TYPE_10M_FULL: 1335 if (speed != SPEED_10 || duplex != FULL_DUPLEX) 1336 reconfig = 1; 1337 break; 1338 case MEDIA_TYPE_10M_HALF: 1339 if (speed != SPEED_10 || duplex != HALF_DUPLEX) 1340 reconfig = 1; 1341 break; 1342 } 1343 1344 /* link result is our setting */ 1345 if (!reconfig) { 1346 if (adapter->link_speed != speed 1347 || adapter->link_duplex != duplex) { 1348 adapter->link_speed = speed; 1349 adapter->link_duplex = duplex; 1350 atl1_setup_mac_ctrl(adapter); 1351 if (netif_msg_link(adapter)) 1352 dev_info(&adapter->pdev->dev, 1353 "%s link is up %d Mbps %s\n", 1354 netdev->name, adapter->link_speed, 1355 adapter->link_duplex == FULL_DUPLEX ? 1356 "full duplex" : "half duplex"); 1357 } 1358 if (!netif_carrier_ok(netdev)) { 1359 /* Link down -> Up */ 1360 netif_carrier_on(netdev); 1361 netif_wake_queue(netdev); 1362 } 1363 return 0; 1364 } 1365 1366 /* change original link status */ 1367 if (netif_carrier_ok(netdev)) { 1368 adapter->link_speed = SPEED_0; 1369 netif_carrier_off(netdev); 1370 netif_stop_queue(netdev); 1371 } 1372 1373 if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR && 1374 hw->media_type != MEDIA_TYPE_1000M_FULL) { 1375 switch (hw->media_type) { 1376 case MEDIA_TYPE_100M_FULL: 1377 phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | 1378 MII_CR_RESET; 1379 break; 1380 case MEDIA_TYPE_100M_HALF: 1381 phy_data = MII_CR_SPEED_100 | MII_CR_RESET; 1382 break; 1383 case MEDIA_TYPE_10M_FULL: 1384 phy_data = 1385 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; 1386 break; 1387 default: 1388 /* MEDIA_TYPE_10M_HALF: */ 1389 phy_data = MII_CR_SPEED_10 | MII_CR_RESET; 1390 break; 1391 } 1392 atl1_write_phy_reg(hw, MII_BMCR, phy_data); 1393 return 0; 1394 } 1395 1396 /* auto-neg, insert timer to re-config phy */ 1397 if (!adapter->phy_timer_pending) { 1398 adapter->phy_timer_pending = true; 1399 mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ); 1400 } 1401 1402 return 0; 1403} 1404 1405static void set_flow_ctrl_old(struct atl1_adapter *adapter) 1406{ 1407 u32 hi, lo, value; 1408 1409 /* RFD Flow Control */ 1410 value = adapter->rfd_ring.count; 1411 hi = value / 16; 1412 if (hi < 2) 1413 hi = 2; 1414 lo = value * 7 / 8; 1415 1416 value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | 1417 ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); 1418 iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH); 1419 1420 /* RRD Flow Control */ 1421 value = adapter->rrd_ring.count; 1422 lo = value / 16; 1423 hi = value * 7 / 8; 1424 if (lo < 2) 1425 lo = 2; 1426 value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | 1427 ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); 1428 iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH); 1429} 1430 1431static void set_flow_ctrl_new(struct atl1_hw *hw) 1432{ 1433 u32 hi, lo, value; 1434 1435 /* RXF Flow Control */ 1436 value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN); 1437 lo = value / 16; 1438 if (lo < 192) 1439 lo = 192; 1440 hi = value * 7 / 8; 1441 if (hi < lo) 1442 hi = lo + 16; 1443 value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | 1444 ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); 1445 iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH); 1446 1447 /* RRD Flow Control */ 1448 value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN); 1449 lo = value / 8; 1450 hi = value * 7 / 8; 1451 if (lo < 2) 1452 lo = 2; 1453 if (hi < lo) 1454 hi = lo + 3; 1455 value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | 1456 ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); 1457 iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); 1458} 1459 1460/* 1461 * atl1_configure - Configure Transmit&Receive Unit after Reset 1462 * @adapter: board private structure 1463 * 1464 * Configure the Tx /Rx unit of the MAC after a reset. 1465 */ 1466static u32 atl1_configure(struct atl1_adapter *adapter) 1467{ 1468 struct atl1_hw *hw = &adapter->hw; 1469 u32 value; 1470 1471 /* clear interrupt status */ 1472 iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR); 1473 1474 /* set MAC Address */ 1475 value = (((u32) hw->mac_addr[2]) << 24) | 1476 (((u32) hw->mac_addr[3]) << 16) | 1477 (((u32) hw->mac_addr[4]) << 8) | 1478 (((u32) hw->mac_addr[5])); 1479 iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); 1480 value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); 1481 iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4)); 1482 1483 /* tx / rx ring */ 1484 1485 /* HI base address */ 1486 iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32), 1487 hw->hw_addr + REG_DESC_BASE_ADDR_HI); 1488 /* LO base address */ 1489 iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL), 1490 hw->hw_addr + REG_DESC_RFD_ADDR_LO); 1491 iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL), 1492 hw->hw_addr + REG_DESC_RRD_ADDR_LO); 1493 iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL), 1494 hw->hw_addr + REG_DESC_TPD_ADDR_LO); 1495 iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL), 1496 hw->hw_addr + REG_DESC_CMB_ADDR_LO); 1497 iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL), 1498 hw->hw_addr + REG_DESC_SMB_ADDR_LO); 1499 1500 /* element count */ 1501 value = adapter->rrd_ring.count; 1502 value <<= 16; 1503 value += adapter->rfd_ring.count; 1504 iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE); 1505 iowrite32(adapter->tpd_ring.count, hw->hw_addr + 1506 REG_DESC_TPD_RING_SIZE); 1507 1508 /* Load Ptr */ 1509 iowrite32(1, hw->hw_addr + REG_LOAD_PTR); 1510 1511 /* config Mailbox */ 1512 value = ((atomic_read(&adapter->tpd_ring.next_to_use) 1513 & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) | 1514 ((atomic_read(&adapter->rrd_ring.next_to_clean) 1515 & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | 1516 ((atomic_read(&adapter->rfd_ring.next_to_use) 1517 & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT); 1518 iowrite32(value, hw->hw_addr + REG_MAILBOX); 1519 1520 /* config IPG/IFG */ 1521 value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK) 1522 << MAC_IPG_IFG_IPGT_SHIFT) | 1523 (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) 1524 << MAC_IPG_IFG_MIFG_SHIFT) | 1525 (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) 1526 << MAC_IPG_IFG_IPGR1_SHIFT) | 1527 (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) 1528 << MAC_IPG_IFG_IPGR2_SHIFT); 1529 iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG); 1530 1531 /* config Half-Duplex Control */ 1532 value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | 1533 (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) 1534 << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | 1535 MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | 1536 (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | 1537 (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) 1538 << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); 1539 iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL); 1540 1541 /* set Interrupt Moderator Timer */ 1542 iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT); 1543 iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL); 1544 1545 /* set Interrupt Clear Timer */ 1546 iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER); 1547 1548 /* set max frame size hw will accept */ 1549 iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU); 1550 1551 /* jumbo size & rrd retirement timer */ 1552 value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) 1553 << RXQ_JMBOSZ_TH_SHIFT) | 1554 (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK) 1555 << RXQ_JMBO_LKAH_SHIFT) | 1556 (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK) 1557 << RXQ_RRD_TIMER_SHIFT); 1558 iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM); 1559 1560 /* Flow Control */ 1561 switch (hw->dev_rev) { 1562 case 0x8001: 1563 case 0x9001: 1564 case 0x9002: 1565 case 0x9003: 1566 set_flow_ctrl_old(adapter); 1567 break; 1568 default: 1569 set_flow_ctrl_new(hw); 1570 break; 1571 } 1572 1573 /* config TXQ */ 1574 value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK) 1575 << TXQ_CTRL_TPD_BURST_NUM_SHIFT) | 1576 (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK) 1577 << TXQ_CTRL_TXF_BURST_NUM_SHIFT) | 1578 (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK) 1579 << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | 1580 TXQ_CTRL_EN; 1581 iowrite32(value, hw->hw_addr + REG_TXQ_CTRL); 1582 1583 /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */ 1584 value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK) 1585 << TX_JUMBO_TASK_TH_SHIFT) | 1586 (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK) 1587 << TX_TPD_MIN_IPG_SHIFT); 1588 iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG); 1589 1590 /* config RXQ */ 1591 value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK) 1592 << RXQ_CTRL_RFD_BURST_NUM_SHIFT) | 1593 (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK) 1594 << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) | 1595 (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK) 1596 << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN | 1597 RXQ_CTRL_EN; 1598 iowrite32(value, hw->hw_addr + REG_RXQ_CTRL); 1599 1600 /* config DMA Engine */ 1601 value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) 1602 << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | 1603 ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) 1604 << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN | 1605 DMA_CTRL_DMAW_EN; 1606 value |= (u32) hw->dma_ord; 1607 if (atl1_rcb_128 == hw->rcb_value) 1608 value |= DMA_CTRL_RCB_VALUE; 1609 iowrite32(value, hw->hw_addr + REG_DMA_CTRL); 1610 1611 /* config CMB / SMB */ 1612 value = (hw->cmb_tpd > adapter->tpd_ring.count) ? 1613 hw->cmb_tpd : adapter->tpd_ring.count; 1614 value <<= 16; 1615 value |= hw->cmb_rrd; 1616 iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH); 1617 value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16); 1618 iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER); 1619 iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER); 1620 1621 /* --- enable CMB / SMB */ 1622 value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN; 1623 iowrite32(value, hw->hw_addr + REG_CSMB_CTRL); 1624 1625 value = ioread32(adapter->hw.hw_addr + REG_ISR); 1626 if (unlikely((value & ISR_PHY_LINKDOWN) != 0)) 1627 value = 1; /* config failed */ 1628 else 1629 value = 0; 1630 1631 /* clear all interrupt status */ 1632 iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR); 1633 iowrite32(0, adapter->hw.hw_addr + REG_ISR); 1634 return value; 1635} 1636 1637/* 1638 * atl1_pcie_patch - Patch for PCIE module 1639 */ 1640static void atl1_pcie_patch(struct atl1_adapter *adapter) 1641{ 1642 u32 value; 1643 1644 /* much vendor magic here */ 1645 value = 0x6500; 1646 iowrite32(value, adapter->hw.hw_addr + 0x12FC); 1647 /* pcie flow control mode change */ 1648 value = ioread32(adapter->hw.hw_addr + 0x1008); 1649 value |= 0x8000; 1650 iowrite32(value, adapter->hw.hw_addr + 0x1008); 1651} 1652 1653/* 1654 * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400 1655 * on PCI Command register is disable. 1656 * The function enable this bit. 1657 * Brackett, 2006/03/15 1658 */ 1659static void atl1_via_workaround(struct atl1_adapter *adapter) 1660{ 1661 unsigned long value; 1662 1663 value = ioread16(adapter->hw.hw_addr + PCI_COMMAND); 1664 if (value & PCI_COMMAND_INTX_DISABLE) 1665 value &= ~PCI_COMMAND_INTX_DISABLE; 1666 iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND); 1667} 1668 1669static void atl1_inc_smb(struct atl1_adapter *adapter) 1670{ 1671 struct stats_msg_block *smb = adapter->smb.smb; 1672 1673 /* Fill out the OS statistics structure */ 1674 adapter->soft_stats.rx_packets += smb->rx_ok; 1675 adapter->soft_stats.tx_packets += smb->tx_ok; 1676 adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; 1677 adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; 1678 adapter->soft_stats.multicast += smb->rx_mcast; 1679 adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 + 1680 smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry); 1681 1682 /* Rx Errors */ 1683 adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err + 1684 smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov + 1685 smb->rx_rrd_ov + smb->rx_align_err); 1686 adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; 1687 adapter->soft_stats.rx_length_errors += smb->rx_len_err; 1688 adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; 1689 adapter->soft_stats.rx_frame_errors += smb->rx_align_err; 1690 adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov + 1691 smb->rx_rxf_ov); 1692 1693 adapter->soft_stats.rx_pause += smb->rx_pause; 1694 adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; 1695 adapter->soft_stats.rx_trunc += smb->rx_sz_ov; 1696 1697 /* Tx Errors */ 1698 adapter->soft_stats.tx_errors += (smb->tx_late_col + 1699 smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc); 1700 adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; 1701 adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; 1702 adapter->soft_stats.tx_window_errors += smb->tx_late_col; 1703 1704 adapter->soft_stats.excecol += smb->tx_abort_col; 1705 adapter->soft_stats.deffer += smb->tx_defer; 1706 adapter->soft_stats.scc += smb->tx_1_col; 1707 adapter->soft_stats.mcc += smb->tx_2_col; 1708 adapter->soft_stats.latecol += smb->tx_late_col; 1709 adapter->soft_stats.tx_underun += smb->tx_underrun; 1710 adapter->soft_stats.tx_trunc += smb->tx_trunc; 1711 adapter->soft_stats.tx_pause += smb->tx_pause; 1712 1713 adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets; 1714 adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets; 1715 adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes; 1716 adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes; 1717 adapter->net_stats.multicast = adapter->soft_stats.multicast; 1718 adapter->net_stats.collisions = adapter->soft_stats.collisions; 1719 adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors; 1720 adapter->net_stats.rx_over_errors = 1721 adapter->soft_stats.rx_missed_errors; 1722 adapter->net_stats.rx_length_errors = 1723 adapter->soft_stats.rx_length_errors; 1724 adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; 1725 adapter->net_stats.rx_frame_errors = 1726 adapter->soft_stats.rx_frame_errors; 1727 adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; 1728 adapter->net_stats.rx_missed_errors = 1729 adapter->soft_stats.rx_missed_errors; 1730 adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors; 1731 adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; 1732 adapter->net_stats.tx_aborted_errors = 1733 adapter->soft_stats.tx_aborted_errors; 1734 adapter->net_stats.tx_window_errors = 1735 adapter->soft_stats.tx_window_errors; 1736 adapter->net_stats.tx_carrier_errors = 1737 adapter->soft_stats.tx_carrier_errors; 1738} 1739 1740static void atl1_update_mailbox(struct atl1_adapter *adapter) 1741{ 1742 unsigned long flags; 1743 u32 tpd_next_to_use; 1744 u32 rfd_next_to_use; 1745 u32 rrd_next_to_clean; 1746 u32 value; 1747 1748 spin_lock_irqsave(&adapter->mb_lock, flags); 1749 1750 tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); 1751 rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); 1752 rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); 1753 1754 value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << 1755 MB_RFD_PROD_INDX_SHIFT) | 1756 ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << 1757 MB_RRD_CONS_INDX_SHIFT) | 1758 ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << 1759 MB_TPD_PROD_INDX_SHIFT); 1760 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); 1761 1762 spin_unlock_irqrestore(&adapter->mb_lock, flags); 1763} 1764 1765static void atl1_clean_alloc_flag(struct atl1_adapter *adapter, 1766 struct rx_return_desc *rrd, u16 offset) 1767{ 1768 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 1769 1770 while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) { 1771 rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0; 1772 if (++rfd_ring->next_to_clean == rfd_ring->count) { 1773 rfd_ring->next_to_clean = 0; 1774 } 1775 } 1776} 1777 1778static void atl1_update_rfd_index(struct atl1_adapter *adapter, 1779 struct rx_return_desc *rrd) 1780{ 1781 u16 num_buf; 1782 1783 num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) / 1784 adapter->rx_buffer_len; 1785 if (rrd->num_buf == num_buf) 1786 /* clean alloc flag for bad rrd */ 1787 atl1_clean_alloc_flag(adapter, rrd, num_buf); 1788} 1789 1790static void atl1_rx_checksum(struct atl1_adapter *adapter, 1791 struct rx_return_desc *rrd, struct sk_buff *skb) 1792{ 1793 struct pci_dev *pdev = adapter->pdev; 1794 1795 skb->ip_summed = CHECKSUM_NONE; 1796 1797 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { 1798 if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | 1799 ERR_FLAG_CODE | ERR_FLAG_OV)) { 1800 adapter->hw_csum_err++; 1801 if (netif_msg_rx_err(adapter)) 1802 dev_printk(KERN_DEBUG, &pdev->dev, 1803 "rx checksum error\n"); 1804 return; 1805 } 1806 } 1807 1808 /* not IPv4 */ 1809 if (!(rrd->pkt_flg & PACKET_FLAG_IPV4)) 1810 /* checksum is invalid, but it's not an IPv4 pkt, so ok */ 1811 return; 1812 1813 /* IPv4 packet */ 1814 if (likely(!(rrd->err_flg & 1815 (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) { 1816 skb->ip_summed = CHECKSUM_UNNECESSARY; 1817 adapter->hw_csum_good++; 1818 return; 1819 } 1820 1821 /* IPv4, but hardware thinks its checksum is wrong */ 1822 if (netif_msg_rx_err(adapter)) 1823 dev_printk(KERN_DEBUG, &pdev->dev, 1824 "hw csum wrong, pkt_flag:%x, err_flag:%x\n", 1825 rrd->pkt_flg, rrd->err_flg); 1826 skb->ip_summed = CHECKSUM_COMPLETE; 1827 skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum); 1828 adapter->hw_csum_err++; 1829 return; 1830} 1831 1832/* 1833 * atl1_alloc_rx_buffers - Replace used receive buffers 1834 * @adapter: address of board private structure 1835 */ 1836static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) 1837{ 1838 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 1839 struct pci_dev *pdev = adapter->pdev; 1840 struct page *page; 1841 unsigned long offset; 1842 struct atl1_buffer *buffer_info, *next_info; 1843 struct sk_buff *skb; 1844 u16 num_alloc = 0; 1845 u16 rfd_next_to_use, next_next; 1846 struct rx_free_desc *rfd_desc; 1847 1848 next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use); 1849 if (++next_next == rfd_ring->count) 1850 next_next = 0; 1851 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; 1852 next_info = &rfd_ring->buffer_info[next_next]; 1853 1854 while (!buffer_info->alloced && !next_info->alloced) { 1855 if (buffer_info->skb) { 1856 buffer_info->alloced = 1; 1857 goto next; 1858 } 1859 1860 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); 1861 1862 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); 1863 if (unlikely(!skb)) { 1864 /* Better luck next round */ 1865 adapter->net_stats.rx_dropped++; 1866 break; 1867 } 1868 1869 /* 1870 * Make buffer alignment 2 beyond a 16 byte boundary 1871 * this will result in a 16 byte aligned IP header after 1872 * the 14 byte MAC header is removed 1873 */ 1874 skb_reserve(skb, NET_IP_ALIGN); 1875 1876 buffer_info->alloced = 1; 1877 buffer_info->skb = skb; 1878 buffer_info->length = (u16) adapter->rx_buffer_len; 1879 page = virt_to_page(skb->data); 1880 offset = (unsigned long)skb->data & ~PAGE_MASK; 1881 buffer_info->dma = pci_map_page(pdev, page, offset, 1882 adapter->rx_buffer_len, 1883 PCI_DMA_FROMDEVICE); 1884 rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 1885 rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); 1886 rfd_desc->coalese = 0; 1887 1888next: 1889 rfd_next_to_use = next_next; 1890 if (unlikely(++next_next == rfd_ring->count)) 1891 next_next = 0; 1892 1893 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; 1894 next_info = &rfd_ring->buffer_info[next_next]; 1895 num_alloc++; 1896 } 1897 1898 if (num_alloc) { 1899 /* 1900 * Force memory writes to complete before letting h/w 1901 * know there are new descriptors to fetch. (Only 1902 * applicable for weak-ordered memory model archs, 1903 * such as IA-64). 1904 */ 1905 wmb(); 1906 atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use); 1907 } 1908 return num_alloc; 1909} 1910 1911static void atl1_intr_rx(struct atl1_adapter *adapter) 1912{ 1913 int i, count; 1914 u16 length; 1915 u16 rrd_next_to_clean; 1916 u32 value; 1917 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 1918 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; 1919 struct atl1_buffer *buffer_info; 1920 struct rx_return_desc *rrd; 1921 struct sk_buff *skb; 1922 1923 count = 0; 1924 1925 rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); 1926 1927 while (1) { 1928 rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); 1929 i = 1; 1930 if (likely(rrd->xsz.valid)) { /* packet valid */ 1931chk_rrd: 1932 /* check rrd status */ 1933 if (likely(rrd->num_buf == 1)) 1934 goto rrd_ok; 1935 else if (netif_msg_rx_err(adapter)) { 1936 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1937 "unexpected RRD buffer count\n"); 1938 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1939 "rx_buf_len = %d\n", 1940 adapter->rx_buffer_len); 1941 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1942 "RRD num_buf = %d\n", 1943 rrd->num_buf); 1944 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1945 "RRD pkt_len = %d\n", 1946 rrd->xsz.xsum_sz.pkt_size); 1947 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1948 "RRD pkt_flg = 0x%08X\n", 1949 rrd->pkt_flg); 1950 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1951 "RRD err_flg = 0x%08X\n", 1952 rrd->err_flg); 1953 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1954 "RRD vlan_tag = 0x%08X\n", 1955 rrd->vlan_tag); 1956 } 1957 1958 /* rrd seems to be bad */ 1959 if (unlikely(i-- > 0)) { 1960 /* rrd may not be DMAed completely */ 1961 udelay(1); 1962 goto chk_rrd; 1963 } 1964 /* bad rrd */ 1965 if (netif_msg_rx_err(adapter)) 1966 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1967 "bad RRD\n"); 1968 /* see if update RFD index */ 1969 if (rrd->num_buf > 1) 1970 atl1_update_rfd_index(adapter, rrd); 1971 1972 /* update rrd */ 1973 rrd->xsz.valid = 0; 1974 if (++rrd_next_to_clean == rrd_ring->count) 1975 rrd_next_to_clean = 0; 1976 count++; 1977 continue; 1978 } else { /* current rrd still not be updated */ 1979 1980 break; 1981 } 1982rrd_ok: 1983 /* clean alloc flag for bad rrd */ 1984 atl1_clean_alloc_flag(adapter, rrd, 0); 1985 1986 buffer_info = &rfd_ring->buffer_info[rrd->buf_indx]; 1987 if (++rfd_ring->next_to_clean == rfd_ring->count) 1988 rfd_ring->next_to_clean = 0; 1989 1990 /* update rrd next to clean */ 1991 if (++rrd_next_to_clean == rrd_ring->count) 1992 rrd_next_to_clean = 0; 1993 count++; 1994 1995 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { 1996 if (!(rrd->err_flg & 1997 (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM 1998 | ERR_FLAG_LEN))) { 1999 /* packet error, don't need upstream */ 2000 buffer_info->alloced = 0; 2001 rrd->xsz.valid = 0; 2002 continue; 2003 } 2004 } 2005 2006 /* Good Receive */ 2007 pci_unmap_page(adapter->pdev, buffer_info->dma, 2008 buffer_info->length, PCI_DMA_FROMDEVICE); 2009 buffer_info->dma = 0; 2010 skb = buffer_info->skb; 2011 length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); 2012 2013 skb_put(skb, length - ETH_FCS_LEN); 2014 2015 /* Receive Checksum Offload */ 2016 atl1_rx_checksum(adapter, rrd, skb); 2017 skb->protocol = eth_type_trans(skb, adapter->netdev); 2018 2019 if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) { 2020 u16 vlan_tag = (rrd->vlan_tag >> 4) | 2021 ((rrd->vlan_tag & 7) << 13) | 2022 ((rrd->vlan_tag & 8) << 9); 2023 vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag); 2024 } else 2025 netif_rx(skb); 2026 2027 /* let protocol layer free skb */ 2028 buffer_info->skb = NULL; 2029 buffer_info->alloced = 0; 2030 rrd->xsz.valid = 0; 2031 2032 adapter->netdev->last_rx = jiffies; 2033 } 2034 2035 atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean); 2036 2037 atl1_alloc_rx_buffers(adapter); 2038 2039 /* update mailbox ? */ 2040 if (count) { 2041 u32 tpd_next_to_use; 2042 u32 rfd_next_to_use; 2043 2044 spin_lock(&adapter->mb_lock); 2045 2046 tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); 2047 rfd_next_to_use = 2048 atomic_read(&adapter->rfd_ring.next_to_use); 2049 rrd_next_to_clean = 2050 atomic_read(&adapter->rrd_ring.next_to_clean); 2051 value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << 2052 MB_RFD_PROD_INDX_SHIFT) | 2053 ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << 2054 MB_RRD_CONS_INDX_SHIFT) | 2055 ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << 2056 MB_TPD_PROD_INDX_SHIFT); 2057 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); 2058 spin_unlock(&adapter->mb_lock); 2059 } 2060} 2061 2062static void atl1_intr_tx(struct atl1_adapter *adapter) 2063{ 2064 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 2065 struct atl1_buffer *buffer_info; 2066 u16 sw_tpd_next_to_clean; 2067 u16 cmb_tpd_next_to_clean; 2068 2069 sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); 2070 cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); 2071 2072 while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) { 2073 struct tx_packet_desc *tpd; 2074 2075 tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean); 2076 buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean]; 2077 if (buffer_info->dma) { 2078 pci_unmap_page(adapter->pdev, buffer_info->dma, 2079 buffer_info->length, PCI_DMA_TODEVICE); 2080 buffer_info->dma = 0; 2081 } 2082 2083 if (buffer_info->skb) { 2084 dev_kfree_skb_irq(buffer_info->skb); 2085 buffer_info->skb = NULL; 2086 } 2087 2088 if (++sw_tpd_next_to_clean == tpd_ring->count) 2089 sw_tpd_next_to_clean = 0; 2090 } 2091 atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); 2092 2093 if (netif_queue_stopped(adapter->netdev) 2094 && netif_carrier_ok(adapter->netdev)) 2095 netif_wake_queue(adapter->netdev); 2096} 2097 2098static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring) 2099{ 2100 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); 2101 u16 next_to_use = atomic_read(&tpd_ring->next_to_use); 2102 return ((next_to_clean > next_to_use) ? 2103 next_to_clean - next_to_use - 1 : 2104 tpd_ring->count + next_to_clean - next_to_use - 1); 2105} 2106 2107static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, 2108 struct tx_packet_desc *ptpd) 2109{ 2110 /* spinlock held */ 2111 u8 hdr_len, ip_off; 2112 u32 real_len; 2113 int err; 2114 2115 if (skb_shinfo(skb)->gso_size) { 2116 if (skb_header_cloned(skb)) { 2117 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2118 if (unlikely(err)) 2119 return -1; 2120 } 2121 2122 if (skb->protocol == htons(ETH_P_IP)) { 2123 struct iphdr *iph = ip_hdr(skb); 2124 2125 real_len = (((unsigned char *)iph - skb->data) + 2126 ntohs(iph->tot_len)); 2127 if (real_len < skb->len) 2128 pskb_trim(skb, real_len); 2129 hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); 2130 if (skb->len == hdr_len) { 2131 iph->check = 0; 2132 tcp_hdr(skb)->check = 2133 ~csum_tcpudp_magic(iph->saddr, 2134 iph->daddr, tcp_hdrlen(skb), 2135 IPPROTO_TCP, 0); 2136 ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) << 2137 TPD_IPHL_SHIFT; 2138 ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) & 2139 TPD_TCPHDRLEN_MASK) << 2140 TPD_TCPHDRLEN_SHIFT; 2141 ptpd->word3 |= 1 << TPD_IP_CSUM_SHIFT; 2142 ptpd->word3 |= 1 << TPD_TCP_CSUM_SHIFT; 2143 return 1; 2144 } 2145 2146 iph->check = 0; 2147 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2148 iph->daddr, 0, IPPROTO_TCP, 0); 2149 ip_off = (unsigned char *)iph - 2150 (unsigned char *) skb_network_header(skb); 2151 if (ip_off == 8) /* 802.3-SNAP frame */ 2152 ptpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; 2153 else if (ip_off != 0) 2154 return -2; 2155 2156 ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) << 2157 TPD_IPHL_SHIFT; 2158 ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) & 2159 TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT; 2160 ptpd->word3 |= (skb_shinfo(skb)->gso_size & 2161 TPD_MSS_MASK) << TPD_MSS_SHIFT; 2162 ptpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT; 2163 return 3; 2164 } 2165 } 2166 return false; 2167} 2168 2169static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, 2170 struct tx_packet_desc *ptpd) 2171{ 2172 u8 css, cso; 2173 2174 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 2175 css = (u8) (skb->csum_start - skb_headroom(skb)); 2176 cso = css + (u8) skb->csum_offset; 2177 if (unlikely(css & 0x1)) { 2178 /* L1 hardware requires an even number here */ 2179 if (netif_msg_tx_err(adapter)) 2180 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 2181 "payload offset not an even number\n"); 2182 return -1; 2183 } 2184 ptpd->word3 |= (css & TPD_PLOADOFFSET_MASK) << 2185 TPD_PLOADOFFSET_SHIFT; 2186 ptpd->word3 |= (cso & TPD_CCSUMOFFSET_MASK) << 2187 TPD_CCSUMOFFSET_SHIFT; 2188 ptpd->word3 |= 1 << TPD_CUST_CSUM_EN_SHIFT; 2189 return true; 2190 } 2191 return 0; 2192} 2193 2194static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, 2195 struct tx_packet_desc *ptpd) 2196{ 2197 /* spinlock held */ 2198 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 2199 struct atl1_buffer *buffer_info; 2200 u16 buf_len = skb->len; 2201 struct page *page; 2202 unsigned long offset; 2203 unsigned int nr_frags; 2204 unsigned int f; 2205 int retval; 2206 u16 next_to_use; 2207 u16 data_len; 2208 u8 hdr_len; 2209 2210 buf_len -= skb->data_len; 2211 nr_frags = skb_shinfo(skb)->nr_frags; 2212 next_to_use = atomic_read(&tpd_ring->next_to_use); 2213 buffer_info = &tpd_ring->buffer_info[next_to_use]; 2214 if (unlikely(buffer_info->skb)) 2215 BUG(); 2216 /* put skb in last TPD */ 2217 buffer_info->skb = NULL; 2218 2219 retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; 2220 if (retval) { 2221 /* TSO */ 2222 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2223 buffer_info->length = hdr_len; 2224 page = virt_to_page(skb->data); 2225 offset = (unsigned long)skb->data & ~PAGE_MASK; 2226 buffer_info->dma = pci_map_page(adapter->pdev, page, 2227 offset, hdr_len, 2228 PCI_DMA_TODEVICE); 2229 2230 if (++next_to_use == tpd_ring->count) 2231 next_to_use = 0; 2232 2233 if (buf_len > hdr_len) { 2234 int i, nseg; 2235 2236 data_len = buf_len - hdr_len; 2237 nseg = (data_len + ATL1_MAX_TX_BUF_LEN - 1) / 2238 ATL1_MAX_TX_BUF_LEN; 2239 for (i = 0; i < nseg; i++) { 2240 buffer_info = 2241 &tpd_ring->buffer_info[next_to_use]; 2242 buffer_info->skb = NULL; 2243 buffer_info->length = 2244 (ATL1_MAX_TX_BUF_LEN >= 2245 data_len) ? ATL1_MAX_TX_BUF_LEN : data_len; 2246 data_len -= buffer_info->length; 2247 page = virt_to_page(skb->data + 2248 (hdr_len + i * ATL1_MAX_TX_BUF_LEN)); 2249 offset = (unsigned long)(skb->data + 2250 (hdr_len + i * ATL1_MAX_TX_BUF_LEN)) & 2251 ~PAGE_MASK; 2252 buffer_info->dma = pci_map_page(adapter->pdev, 2253 page, offset, buffer_info->length, 2254 PCI_DMA_TODEVICE); 2255 if (++next_to_use == tpd_ring->count) 2256 next_to_use = 0; 2257 } 2258 } 2259 } else { 2260 /* not TSO */ 2261 buffer_info->length = buf_len; 2262 page = virt_to_page(skb->data); 2263 offset = (unsigned long)skb->data & ~PAGE_MASK; 2264 buffer_info->dma = pci_map_page(adapter->pdev, page, 2265 offset, buf_len, PCI_DMA_TODEVICE); 2266 if (++next_to_use == tpd_ring->count) 2267 next_to_use = 0; 2268 } 2269 2270 for (f = 0; f < nr_frags; f++) { 2271 struct skb_frag_struct *frag; 2272 u16 i, nseg; 2273 2274 frag = &skb_shinfo(skb)->frags[f]; 2275 buf_len = frag->size; 2276 2277 nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) / 2278 ATL1_MAX_TX_BUF_LEN; 2279 for (i = 0; i < nseg; i++) { 2280 buffer_info = &tpd_ring->buffer_info[next_to_use]; 2281 if (unlikely(buffer_info->skb)) 2282 BUG(); 2283 buffer_info->skb = NULL; 2284 buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ? 2285 ATL1_MAX_TX_BUF_LEN : buf_len; 2286 buf_len -= buffer_info->length; 2287 buffer_info->dma = pci_map_page(adapter->pdev, 2288 frag->page, 2289 frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN), 2290 buffer_info->length, PCI_DMA_TODEVICE); 2291 2292 if (++next_to_use == tpd_ring->count) 2293 next_to_use = 0; 2294 } 2295 } 2296 2297 /* last tpd's buffer-info */ 2298 buffer_info->skb = skb; 2299} 2300 2301static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count, 2302 struct tx_packet_desc *ptpd) 2303{ 2304 /* spinlock held */ 2305 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 2306 struct atl1_buffer *buffer_info; 2307 struct tx_packet_desc *tpd; 2308 u16 j; 2309 u32 val; 2310 u16 next_to_use = (u16) atomic_read(&tpd_ring->next_to_use); 2311 2312 for (j = 0; j < count; j++) { 2313 buffer_info = &tpd_ring->buffer_info[next_to_use]; 2314 tpd = ATL1_TPD_DESC(&adapter->tpd_ring, next_to_use); 2315 if (tpd != ptpd) 2316 memcpy(tpd, ptpd, sizeof(struct tx_packet_desc)); 2317 tpd->buffer_addr = cpu_to_le64(buffer_info->dma); 2318 tpd->word2 = (cpu_to_le16(buffer_info->length) & 2319 TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT; 2320 2321 /* 2322 * if this is the first packet in a TSO chain, set 2323 * TPD_HDRFLAG, otherwise, clear it. 2324 */ 2325 val = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & 2326 TPD_SEGMENT_EN_MASK; 2327 if (val) { 2328 if (!j) 2329 tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT; 2330 else 2331 tpd->word3 &= ~(1 << TPD_HDRFLAG_SHIFT); 2332 } 2333 2334 if (j == (count - 1)) 2335 tpd->word3 |= 1 << TPD_EOP_SHIFT; 2336 2337 if (++next_to_use == tpd_ring->count) 2338 next_to_use = 0; 2339 } 2340 /* 2341 * Force memory writes to complete before letting h/w 2342 * know there are new descriptors to fetch. (Only 2343 * applicable for weak-ordered memory model archs, 2344 * such as IA-64). 2345 */ 2346 wmb(); 2347 2348 atomic_set(&tpd_ring->next_to_use, next_to_use); 2349} 2350 2351static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2352{ 2353 struct atl1_adapter *adapter = netdev_priv(netdev); 2354 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 2355 int len = skb->len; 2356 int tso; 2357 int count = 1; 2358 int ret_val; 2359 struct tx_packet_desc *ptpd; 2360 u16 frag_size; 2361 u16 vlan_tag; 2362 unsigned long flags; 2363 unsigned int nr_frags = 0; 2364 unsigned int mss = 0; 2365 unsigned int f; 2366 unsigned int proto_hdr_len; 2367 2368 len -= skb->data_len; 2369 2370 if (unlikely(skb->len <= 0)) { 2371 dev_kfree_skb_any(skb); 2372 return NETDEV_TX_OK; 2373 } 2374 2375 nr_frags = skb_shinfo(skb)->nr_frags; 2376 for (f = 0; f < nr_frags; f++) { 2377 frag_size = skb_shinfo(skb)->frags[f].size; 2378 if (frag_size) 2379 count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) / 2380 ATL1_MAX_TX_BUF_LEN; 2381 } 2382 2383 mss = skb_shinfo(skb)->gso_size; 2384 if (mss) { 2385 if (skb->protocol == ntohs(ETH_P_IP)) { 2386 proto_hdr_len = (skb_transport_offset(skb) + 2387 tcp_hdrlen(skb)); 2388 if (unlikely(proto_hdr_len > len)) { 2389 dev_kfree_skb_any(skb); 2390 return NETDEV_TX_OK; 2391 } 2392 /* need additional TPD ? */ 2393 if (proto_hdr_len != len) 2394 count += (len - proto_hdr_len + 2395 ATL1_MAX_TX_BUF_LEN - 1) / 2396 ATL1_MAX_TX_BUF_LEN; 2397 } 2398 } 2399 2400 if (!spin_trylock_irqsave(&adapter->lock, flags)) { 2401 /* Can't get lock - tell upper layer to requeue */ 2402 if (netif_msg_tx_queued(adapter)) 2403 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 2404 "tx locked\n"); 2405 return NETDEV_TX_LOCKED; 2406 } 2407 2408 if (atl1_tpd_avail(&adapter->tpd_ring) < count) { 2409 /* not enough descriptors */ 2410 netif_stop_queue(netdev); 2411 spin_unlock_irqrestore(&adapter->lock, flags); 2412 if (netif_msg_tx_queued(adapter)) 2413 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 2414 "tx busy\n"); 2415 return NETDEV_TX_BUSY; 2416 } 2417 2418 ptpd = ATL1_TPD_DESC(tpd_ring, 2419 (u16) atomic_read(&tpd_ring->next_to_use)); 2420 memset(ptpd, 0, sizeof(struct tx_packet_desc)); 2421 2422 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 2423 vlan_tag = vlan_tx_tag_get(skb); 2424 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | 2425 ((vlan_tag >> 9) & 0x8); 2426 ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; 2427 ptpd->word3 |= (vlan_tag & TPD_VL_TAGGED_MASK) << 2428 TPD_VL_TAGGED_SHIFT; 2429 } 2430 2431 tso = atl1_tso(adapter, skb, ptpd); 2432 if (tso < 0) { 2433 spin_unlock_irqrestore(&adapter->lock, flags); 2434 dev_kfree_skb_any(skb); 2435 return NETDEV_TX_OK; 2436 } 2437 2438 if (!tso) { 2439 ret_val = atl1_tx_csum(adapter, skb, ptpd); 2440 if (ret_val < 0) { 2441 spin_unlock_irqrestore(&adapter->lock, flags); 2442 dev_kfree_skb_any(skb); 2443 return NETDEV_TX_OK; 2444 } 2445 } 2446 2447 atl1_tx_map(adapter, skb, ptpd); 2448 atl1_tx_queue(adapter, count, ptpd); 2449 atl1_update_mailbox(adapter); 2450 spin_unlock_irqrestore(&adapter->lock, flags); 2451 netdev->trans_start = jiffies; 2452 return NETDEV_TX_OK; 2453} 2454 2455/* 2456 * atl1_intr - Interrupt Handler 2457 * @irq: interrupt number 2458 * @data: pointer to a network interface device structure 2459 * @pt_regs: CPU registers structure 2460 */ 2461static irqreturn_t atl1_intr(int irq, void *data) 2462{ 2463 struct atl1_adapter *adapter = netdev_priv(data); 2464 u32 status; 2465 int max_ints = 10; 2466 2467 status = adapter->cmb.cmb->int_stats; 2468 if (!status) 2469 return IRQ_NONE; 2470 2471 do { 2472 /* clear CMB interrupt status at once */ 2473 adapter->cmb.cmb->int_stats = 0; 2474 2475 if (status & ISR_GPHY) /* clear phy status */ 2476 atlx_clear_phy_int(adapter); 2477 2478 /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ 2479 iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); 2480 2481 /* check if SMB intr */ 2482 if (status & ISR_SMB) 2483 atl1_inc_smb(adapter); 2484 2485 /* check if PCIE PHY Link down */ 2486 if (status & ISR_PHY_LINKDOWN) { 2487 if (netif_msg_intr(adapter)) 2488 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 2489 "pcie phy link down %x\n", status); 2490 if (netif_running(adapter->netdev)) { /* reset MAC */ 2491 iowrite32(0, adapter->hw.hw_addr + REG_IMR); 2492 schedule_work(&adapter->pcie_dma_to_rst_task); 2493 return IRQ_HANDLED; 2494 } 2495 } 2496 2497 /* check if DMA read/write error ? */ 2498 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { 2499 if (netif_msg_intr(adapter)) 2500 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 2501 "pcie DMA r/w error (status = 0x%x)\n", 2502 status); 2503 iowrite32(0, adapter->hw.hw_addr + REG_IMR); 2504 schedule_work(&adapter->pcie_dma_to_rst_task); 2505 return IRQ_HANDLED; 2506 } 2507 2508 /* link event */ 2509 if (status & ISR_GPHY) { 2510 adapter->soft_stats.tx_carrier_errors++; 2511 atl1_check_for_link(adapter); 2512 } 2513 2514 /* transmit event */ 2515 if (status & ISR_CMB_TX) 2516 atl1_intr_tx(adapter); 2517 2518 /* rx exception */ 2519 if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | 2520 ISR_RRD_OV | ISR_HOST_RFD_UNRUN | 2521 ISR_HOST_RRD_OV | ISR_CMB_RX))) { 2522 if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | 2523 ISR_RRD_OV | ISR_HOST_RFD_UNRUN | 2524 ISR_HOST_RRD_OV)) 2525 if (netif_msg_intr(adapter)) 2526 dev_printk(KERN_DEBUG, 2527 &adapter->pdev->dev, 2528 "rx exception, ISR = 0x%x\n", 2529 status); 2530 atl1_intr_rx(adapter); 2531 } 2532 2533 if (--max_ints < 0) 2534 break; 2535 2536 } while ((status = adapter->cmb.cmb->int_stats)); 2537 2538 /* re-enable Interrupt */ 2539 iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); 2540 return IRQ_HANDLED; 2541} 2542 2543/* 2544 * atl1_watchdog - Timer Call-back 2545 * @data: pointer to netdev cast into an unsigned long 2546 */ 2547static void atl1_watchdog(unsigned long data) 2548{ 2549 struct atl1_adapter *adapter = (struct atl1_adapter *)data; 2550 2551 /* Reset the timer */ 2552 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); 2553} 2554 2555/* 2556 * atl1_phy_config - Timer Call-back 2557 * @data: pointer to netdev cast into an unsigned long 2558 */ 2559static void atl1_phy_config(unsigned long data) 2560{ 2561 struct atl1_adapter *adapter = (struct atl1_adapter *)data; 2562 struct atl1_hw *hw = &adapter->hw; 2563 unsigned long flags; 2564 2565 spin_lock_irqsave(&adapter->lock, flags); 2566 adapter->phy_timer_pending = false; 2567 atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); 2568 atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg); 2569 atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN); 2570 spin_unlock_irqrestore(&adapter->lock, flags); 2571} 2572 2573/* 2574 * Orphaned vendor comment left intact here: 2575 * <vendor comment> 2576 * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT 2577 * will assert. We do soft reset <0x1400=1> according 2578 * with the SPEC. BUT, it seemes that PCIE or DMA 2579 * state-machine will not be reset. DMAR_TO_INT will 2580 * assert again and again. 2581 * </vendor comment> 2582 */ 2583 2584static int atl1_reset(struct atl1_adapter *adapter) 2585{ 2586 int ret; 2587 ret = atl1_reset_hw(&adapter->hw); 2588 if (ret) 2589 return ret; 2590 return atl1_init_hw(&adapter->hw); 2591} 2592 2593static s32 atl1_up(struct atl1_adapter *adapter) 2594{ 2595 struct net_device *netdev = adapter->netdev; 2596 int err; 2597 int irq_flags = IRQF_SAMPLE_RANDOM; 2598 2599 /* hardware has been reset, we need to reload some things */ 2600 atlx_set_multi(netdev); 2601 atl1_init_ring_ptrs(adapter); 2602 atlx_restore_vlan(adapter); 2603 err = atl1_alloc_rx_buffers(adapter); 2604 if (unlikely(!err)) 2605 /* no RX BUFFER allocated */ 2606 return -ENOMEM; 2607 2608 if (unlikely(atl1_configure(adapter))) { 2609 err = -EIO; 2610 goto err_up; 2611 } 2612 2613 err = pci_enable_msi(adapter->pdev); 2614 if (err) { 2615 if (netif_msg_ifup(adapter)) 2616 dev_info(&adapter->pdev->dev, 2617 "Unable to enable MSI: %d\n", err); 2618 irq_flags |= IRQF_SHARED; 2619 } 2620 2621 err = request_irq(adapter->pdev->irq, &atl1_intr, irq_flags, 2622 netdev->name, netdev); 2623 if (unlikely(err)) 2624 goto err_up; 2625 2626 mod_timer(&adapter->watchdog_timer, jiffies); 2627 atlx_irq_enable(adapter); 2628 atl1_check_link(adapter); 2629 return 0; 2630 2631err_up: 2632 pci_disable_msi(adapter->pdev); 2633 /* free rx_buffers */ 2634 atl1_clean_rx_ring(adapter); 2635 return err; 2636} 2637 2638static void atl1_down(struct atl1_adapter *adapter) 2639{ 2640 struct net_device *netdev = adapter->netdev; 2641 2642 del_timer_sync(&adapter->watchdog_timer); 2643 del_timer_sync(&adapter->phy_config_timer); 2644 adapter->phy_timer_pending = false; 2645 2646 atlx_irq_disable(adapter); 2647 free_irq(adapter->pdev->irq, netdev); 2648 pci_disable_msi(adapter->pdev); 2649 atl1_reset_hw(&adapter->hw); 2650 adapter->cmb.cmb->int_stats = 0; 2651 2652 adapter->link_speed = SPEED_0; 2653 adapter->link_duplex = -1; 2654 netif_carrier_off(netdev); 2655 netif_stop_queue(netdev); 2656 2657 atl1_clean_tx_ring(adapter); 2658 atl1_clean_rx_ring(adapter); 2659} 2660 2661static void atl1_tx_timeout_task(struct work_struct *work) 2662{ 2663 struct atl1_adapter *adapter = 2664 container_of(work, struct atl1_adapter, tx_timeout_task); 2665 struct net_device *netdev = adapter->netdev; 2666 2667 netif_device_detach(netdev); 2668 atl1_down(adapter); 2669 atl1_up(adapter); 2670 netif_device_attach(netdev); 2671} 2672 2673/* 2674 * atl1_change_mtu - Change the Maximum Transfer Unit 2675 * @netdev: network interface device structure 2676 * @new_mtu: new value for maximum frame size 2677 * 2678 * Returns 0 on success, negative on failure 2679 */ 2680static int atl1_change_mtu(struct net_device *netdev, int new_mtu) 2681{ 2682 struct atl1_adapter *adapter = netdev_priv(netdev); 2683 int old_mtu = netdev->mtu; 2684 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 2685 2686 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 2687 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 2688 if (netif_msg_link(adapter)) 2689 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); 2690 return -EINVAL; 2691 } 2692 2693 adapter->hw.max_frame_size = max_frame; 2694 adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; 2695 adapter->rx_buffer_len = (max_frame + 7) & ~7; 2696 adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; 2697 2698 netdev->mtu = new_mtu; 2699 if ((old_mtu != new_mtu) && netif_running(netdev)) { 2700 atl1_down(adapter); 2701 atl1_up(adapter); 2702 } 2703 2704 return 0; 2705} 2706 2707/* 2708 * atl1_open - Called when a network interface is made active 2709 * @netdev: network interface device structure 2710 * 2711 * Returns 0 on success, negative value on failure 2712 * 2713 * The open entry point is called when a network interface is made 2714 * active by the system (IFF_UP). At this point all resources needed 2715 * for transmit and receive operations are allocated, the interrupt 2716 * handler is registered with the OS, the watchdog timer is started, 2717 * and the stack is notified that the interface is ready. 2718 */ 2719static int atl1_open(struct net_device *netdev) 2720{ 2721 struct atl1_adapter *adapter = netdev_priv(netdev); 2722 int err; 2723 2724 /* allocate transmit descriptors */ 2725 err = atl1_setup_ring_resources(adapter); 2726 if (err) 2727 return err; 2728 2729 err = atl1_up(adapter); 2730 if (err) 2731 goto err_up; 2732 2733 return 0; 2734 2735err_up: 2736 atl1_reset(adapter); 2737 return err; 2738} 2739 2740/* 2741 * atl1_close - Disables a network interface 2742 * @netdev: network interface device structure 2743 * 2744 * Returns 0, this is not allowed to fail 2745 * 2746 * The close entry point is called when an interface is de-activated 2747 * by the OS. The hardware is still under the drivers control, but 2748 * needs to be disabled. A global MAC reset is issued to stop the 2749 * hardware, and all transmit and receive resources are freed. 2750 */ 2751static int atl1_close(struct net_device *netdev) 2752{ 2753 struct atl1_adapter *adapter = netdev_priv(netdev); 2754 atl1_down(adapter); 2755 atl1_free_ring_resources(adapter); 2756 return 0; 2757} 2758 2759#ifdef CONFIG_PM 2760static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) 2761{ 2762 struct net_device *netdev = pci_get_drvdata(pdev); 2763 struct atl1_adapter *adapter = netdev_priv(netdev); 2764 struct atl1_hw *hw = &adapter->hw; 2765 u32 ctrl = 0; 2766 u32 wufc = adapter->wol; 2767 u32 val; 2768 int retval; 2769 u16 speed; 2770 u16 duplex; 2771 2772 netif_device_detach(netdev); 2773 if (netif_running(netdev)) 2774 atl1_down(adapter); 2775 2776 retval = pci_save_state(pdev); 2777 if (retval) 2778 return retval; 2779 2780 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2781 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2782 val = ctrl & BMSR_LSTATUS; 2783 if (val) 2784 wufc &= ~ATLX_WUFC_LNKC; 2785 2786 if (val && wufc) { 2787 val = atl1_get_speed_and_duplex(hw, &speed, &duplex); 2788 if (val) { 2789 if (netif_msg_ifdown(adapter)) 2790 dev_printk(KERN_DEBUG, &pdev->dev, 2791 "error getting speed/duplex\n"); 2792 goto disable_wol; 2793 } 2794 2795 ctrl = 0; 2796 2797 /* enable magic packet WOL */ 2798 if (wufc & ATLX_WUFC_MAG) 2799 ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN); 2800 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); 2801 ioread32(hw->hw_addr + REG_WOL_CTRL); 2802 2803 /* configure the mac */ 2804 ctrl = MAC_CTRL_RX_EN; 2805 ctrl |= ((u32)((speed == SPEED_1000) ? MAC_CTRL_SPEED_1000 : 2806 MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT); 2807 if (duplex == FULL_DUPLEX) 2808 ctrl |= MAC_CTRL_DUPLX; 2809 ctrl |= (((u32)adapter->hw.preamble_len & 2810 MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); 2811 if (adapter->vlgrp) 2812 ctrl |= MAC_CTRL_RMV_VLAN; 2813 if (wufc & ATLX_WUFC_MAG) 2814 ctrl |= MAC_CTRL_BC_EN; 2815 iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); 2816 ioread32(hw->hw_addr + REG_MAC_CTRL); 2817 2818 /* poke the PHY */ 2819 ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2820 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; 2821 iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); 2822 ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2823 2824 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); 2825 goto exit; 2826 } 2827 2828 if (!val && wufc) { 2829 ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); 2830 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); 2831 ioread32(hw->hw_addr + REG_WOL_CTRL); 2832 iowrite32(0, hw->hw_addr + REG_MAC_CTRL); 2833 ioread32(hw->hw_addr + REG_MAC_CTRL); 2834 hw->phy_configured = false; 2835 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); 2836 goto exit; 2837 } 2838 2839disable_wol: 2840 iowrite32(0, hw->hw_addr + REG_WOL_CTRL); 2841 ioread32(hw->hw_addr + REG_WOL_CTRL); 2842 ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2843 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; 2844 iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); 2845 ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2846 hw->phy_configured = false; 2847 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); 2848exit: 2849 if (netif_running(netdev)) 2850 pci_disable_msi(adapter->pdev); 2851 pci_disable_device(pdev); 2852 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 2853 2854 return 0; 2855} 2856 2857static int atl1_resume(struct pci_dev *pdev) 2858{ 2859 struct net_device *netdev = pci_get_drvdata(pdev); 2860 struct atl1_adapter *adapter = netdev_priv(netdev); 2861 u32 err; 2862 2863 pci_set_power_state(pdev, PCI_D0); 2864 pci_restore_state(pdev); 2865 2866 err = pci_enable_device(pdev); 2867 if (err) { 2868 if (netif_msg_ifup(adapter)) 2869 dev_printk(KERN_DEBUG, &pdev->dev, 2870 "error enabling pci device\n"); 2871 return err; 2872 } 2873 2874 pci_set_master(pdev); 2875 iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); 2876 pci_enable_wake(pdev, PCI_D3hot, 0); 2877 pci_enable_wake(pdev, PCI_D3cold, 0); 2878 2879 atl1_reset_hw(&adapter->hw); 2880 adapter->cmb.cmb->int_stats = 0; 2881 2882 if (netif_running(netdev)) 2883 atl1_up(adapter); 2884 netif_device_attach(netdev); 2885 2886 return 0; 2887} 2888#else 2889#define atl1_suspend NULL 2890#define atl1_resume NULL 2891#endif 2892 2893static void atl1_shutdown(struct pci_dev *pdev) 2894{ 2895#ifdef CONFIG_PM 2896 atl1_suspend(pdev, PMSG_SUSPEND); 2897#endif 2898} 2899 2900#ifdef CONFIG_NET_POLL_CONTROLLER 2901static void atl1_poll_controller(struct net_device *netdev) 2902{ 2903 disable_irq(netdev->irq); 2904 atl1_intr(netdev->irq, netdev); 2905 enable_irq(netdev->irq); 2906} 2907#endif 2908 2909/* 2910 * atl1_probe - Device Initialization Routine 2911 * @pdev: PCI device information struct 2912 * @ent: entry in atl1_pci_tbl 2913 * 2914 * Returns 0 on success, negative on failure 2915 * 2916 * atl1_probe initializes an adapter identified by a pci_dev structure. 2917 * The OS initialization, configuring of the adapter private structure, 2918 * and a hardware reset occur. 2919 */ 2920static int __devinit atl1_probe(struct pci_dev *pdev, 2921 const struct pci_device_id *ent) 2922{ 2923 struct net_device *netdev; 2924 struct atl1_adapter *adapter; 2925 static int cards_found = 0; 2926 int err; 2927 2928 err = pci_enable_device(pdev); 2929 if (err) 2930 return err; 2931 2932 /* 2933 * The atl1 chip can DMA to 64-bit addresses, but it uses a single 2934 * shared register for the high 32 bits, so only a single, aligned, 2935 * 4 GB physical address range can be used at a time. 2936 * 2937 * Supporting 64-bit DMA on this hardware is more trouble than it's 2938 * worth. It is far easier to limit to 32-bit DMA than update 2939 * various kernel subsystems to support the mechanics required by a 2940 * fixed-high-32-bit system. 2941 */ 2942 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 2943 if (err) { 2944 dev_err(&pdev->dev, "no usable DMA configuration\n"); 2945 goto err_dma; 2946 } 2947 /* 2948 * Mark all PCI regions associated with PCI device 2949 * pdev as being reserved by owner atl1_driver_name 2950 */ 2951 err = pci_request_regions(pdev, ATLX_DRIVER_NAME); 2952 if (err) 2953 goto err_request_regions; 2954 2955 /* 2956 * Enables bus-mastering on the device and calls 2957 * pcibios_set_master to do the needed arch specific settings 2958 */ 2959 pci_set_master(pdev); 2960 2961 netdev = alloc_etherdev(sizeof(struct atl1_adapter)); 2962 if (!netdev) { 2963 err = -ENOMEM; 2964 goto err_alloc_etherdev; 2965 } 2966 SET_NETDEV_DEV(netdev, &pdev->dev); 2967 2968 pci_set_drvdata(pdev, netdev); 2969 adapter = netdev_priv(netdev); 2970 adapter->netdev = netdev; 2971 adapter->pdev = pdev; 2972 adapter->hw.back = adapter; 2973 adapter->msg_enable = netif_msg_init(debug, atl1_default_msg); 2974 2975 adapter->hw.hw_addr = pci_iomap(pdev, 0, 0); 2976 if (!adapter->hw.hw_addr) { 2977 err = -EIO; 2978 goto err_pci_iomap; 2979 } 2980 /* get device revision number */ 2981 adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + 2982 (REG_MASTER_CTRL + 2)); 2983 if (netif_msg_probe(adapter)) 2984 dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION); 2985 2986 /* set default ring resource counts */ 2987 adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD; 2988 adapter->tpd_ring.count = ATL1_DEFAULT_TPD; 2989 2990 adapter->mii.dev = netdev; 2991 adapter->mii.mdio_read = mdio_read; 2992 adapter->mii.mdio_write = mdio_write; 2993 adapter->mii.phy_id_mask = 0x1f; 2994 adapter->mii.reg_num_mask = 0x1f; 2995 2996 netdev->open = &atl1_open; 2997 netdev->stop = &atl1_close; 2998 netdev->hard_start_xmit = &atl1_xmit_frame; 2999 netdev->get_stats = &atlx_get_stats; 3000 netdev->set_multicast_list = &atlx_set_multi; 3001 netdev->set_mac_address = &atl1_set_mac; 3002 netdev->change_mtu = &atl1_change_mtu; 3003 netdev->do_ioctl = &atlx_ioctl; 3004 netdev->tx_timeout = &atlx_tx_timeout; 3005 netdev->watchdog_timeo = 5 * HZ; 3006#ifdef CONFIG_NET_POLL_CONTROLLER 3007 netdev->poll_controller = atl1_poll_controller; 3008#endif 3009 netdev->vlan_rx_register = atlx_vlan_rx_register; 3010 3011 netdev->ethtool_ops = &atl1_ethtool_ops; 3012 adapter->bd_number = cards_found; 3013 3014 /* setup the private structure */ 3015 err = atl1_sw_init(adapter); 3016 if (err) 3017 goto err_common; 3018 3019 netdev->features = NETIF_F_HW_CSUM; 3020 netdev->features |= NETIF_F_SG; 3021 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); 3022 netdev->features |= NETIF_F_TSO; 3023 netdev->features |= NETIF_F_LLTX; 3024 3025 /* 3026 * patch for some L1 of old version, 3027 * the final version of L1 may not need these 3028 * patches 3029 */ 3030 /* atl1_pcie_patch(adapter); */ 3031 3032 /* really reset GPHY core */ 3033 iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE); 3034 3035 /* 3036 * reset the controller to 3037 * put the device in a known good starting state 3038 */ 3039 if (atl1_reset_hw(&adapter->hw)) { 3040 err = -EIO; 3041 goto err_common; 3042 } 3043 3044 /* copy the MAC address out of the EEPROM */ 3045 atl1_read_mac_addr(&adapter->hw); 3046 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); 3047 3048 if (!is_valid_ether_addr(netdev->dev_addr)) { 3049 err = -EIO; 3050 goto err_common; 3051 } 3052 3053 atl1_check_options(adapter); 3054 3055 /* pre-init the MAC, and setup link */ 3056 err = atl1_init_hw(&adapter->hw); 3057 if (err) { 3058 err = -EIO; 3059 goto err_common; 3060 } 3061 3062 atl1_pcie_patch(adapter); 3063 /* assume we have no link for now */ 3064 netif_carrier_off(netdev); 3065 netif_stop_queue(netdev); 3066 3067 init_timer(&adapter->watchdog_timer); 3068 adapter->watchdog_timer.function = &atl1_watchdog; 3069 adapter->watchdog_timer.data = (unsigned long)adapter; 3070 3071 init_timer(&adapter->phy_config_timer); 3072 adapter->phy_config_timer.function = &atl1_phy_config; 3073 adapter->phy_config_timer.data = (unsigned long)adapter; 3074 adapter->phy_timer_pending = false; 3075 3076 INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task); 3077 3078 INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task); 3079 3080 INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task); 3081 3082 err = register_netdev(netdev); 3083 if (err) 3084 goto err_common; 3085 3086 cards_found++; 3087 atl1_via_workaround(adapter); 3088 return 0; 3089 3090err_common: 3091 pci_iounmap(pdev, adapter->hw.hw_addr); 3092err_pci_iomap: 3093 free_netdev(netdev); 3094err_alloc_etherdev: 3095 pci_release_regions(pdev); 3096err_dma: 3097err_request_regions: 3098 pci_disable_device(pdev); 3099 return err; 3100} 3101 3102/* 3103 * atl1_remove - Device Removal Routine 3104 * @pdev: PCI device information struct 3105 * 3106 * atl1_remove is called by the PCI subsystem to alert the driver 3107 * that it should release a PCI device. The could be caused by a 3108 * Hot-Plug event, or because the driver is going to be removed from 3109 * memory. 3110 */ 3111static void __devexit atl1_remove(struct pci_dev *pdev) 3112{ 3113 struct net_device *netdev = pci_get_drvdata(pdev); 3114 struct atl1_adapter *adapter; 3115 /* Device not available. Return. */ 3116 if (!netdev) 3117 return; 3118 3119 adapter = netdev_priv(netdev); 3120 3121 /* 3122 * Some atl1 boards lack persistent storage for their MAC, and get it 3123 * from the BIOS during POST. If we've been messing with the MAC 3124 * address, we need to save the permanent one. 3125 */ 3126 if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) { 3127 memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, 3128 ETH_ALEN); 3129 atl1_set_mac_addr(&adapter->hw); 3130 } 3131 3132 iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE); 3133 unregister_netdev(netdev); 3134 pci_iounmap(pdev, adapter->hw.hw_addr); 3135 pci_release_regions(pdev); 3136 free_netdev(netdev); 3137 pci_disable_device(pdev); 3138} 3139 3140static struct pci_driver atl1_driver = { 3141 .name = ATLX_DRIVER_NAME, 3142 .id_table = atl1_pci_tbl, 3143 .probe = atl1_probe, 3144 .remove = __devexit_p(atl1_remove), 3145 .suspend = atl1_suspend, 3146 .resume = atl1_resume, 3147 .shutdown = atl1_shutdown 3148}; 3149 3150/* 3151 * atl1_exit_module - Driver Exit Cleanup Routine 3152 * 3153 * atl1_exit_module is called just before the driver is removed 3154 * from memory. 3155 */ 3156static void __exit atl1_exit_module(void) 3157{ 3158 pci_unregister_driver(&atl1_driver); 3159} 3160 3161/* 3162 * atl1_init_module - Driver Registration Routine 3163 * 3164 * atl1_init_module is the first routine called when the driver is 3165 * loaded. All it does is register with the PCI subsystem. 3166 */ 3167static int __init atl1_init_module(void) 3168{ 3169 return pci_register_driver(&atl1_driver); 3170} 3171 3172module_init(atl1_init_module); 3173module_exit(atl1_exit_module); 3174 3175struct atl1_stats { 3176 char stat_string[ETH_GSTRING_LEN]; 3177 int sizeof_stat; 3178 int stat_offset; 3179}; 3180 3181#define ATL1_STAT(m) \ 3182 sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m) 3183 3184static struct atl1_stats atl1_gstrings_stats[] = { 3185 {"rx_packets", ATL1_STAT(soft_stats.rx_packets)}, 3186 {"tx_packets", ATL1_STAT(soft_stats.tx_packets)}, 3187 {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)}, 3188 {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)}, 3189 {"rx_errors", ATL1_STAT(soft_stats.rx_errors)}, 3190 {"tx_errors", ATL1_STAT(soft_stats.tx_errors)}, 3191 {"rx_dropped", ATL1_STAT(net_stats.rx_dropped)}, 3192 {"tx_dropped", ATL1_STAT(net_stats.tx_dropped)}, 3193 {"multicast", ATL1_STAT(soft_stats.multicast)}, 3194 {"collisions", ATL1_STAT(soft_stats.collisions)}, 3195 {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)}, 3196 {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, 3197 {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)}, 3198 {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)}, 3199 {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)}, 3200 {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, 3201 {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)}, 3202 {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)}, 3203 {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)}, 3204 {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)}, 3205 {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)}, 3206 {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)}, 3207 {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)}, 3208 {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)}, 3209 {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)}, 3210 {"tx_underun", ATL1_STAT(soft_stats.tx_underun)}, 3211 {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)}, 3212 {"tx_pause", ATL1_STAT(soft_stats.tx_pause)}, 3213 {"rx_pause", ATL1_STAT(soft_stats.rx_pause)}, 3214 {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)}, 3215 {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)} 3216}; 3217 3218static void atl1_get_ethtool_stats(struct net_device *netdev, 3219 struct ethtool_stats *stats, u64 *data) 3220{ 3221 struct atl1_adapter *adapter = netdev_priv(netdev); 3222 int i; 3223 char *p; 3224 3225 for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { 3226 p = (char *)adapter+atl1_gstrings_stats[i].stat_offset; 3227 data[i] = (atl1_gstrings_stats[i].sizeof_stat == 3228 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 3229 } 3230 3231} 3232 3233static int atl1_get_sset_count(struct net_device *netdev, int sset) 3234{ 3235 switch (sset) { 3236 case ETH_SS_STATS: 3237 return ARRAY_SIZE(atl1_gstrings_stats); 3238 default: 3239 return -EOPNOTSUPP; 3240 } 3241} 3242 3243static int atl1_get_settings(struct net_device *netdev, 3244 struct ethtool_cmd *ecmd) 3245{ 3246 struct atl1_adapter *adapter = netdev_priv(netdev); 3247 struct atl1_hw *hw = &adapter->hw; 3248 3249 ecmd->supported = (SUPPORTED_10baseT_Half | 3250 SUPPORTED_10baseT_Full | 3251 SUPPORTED_100baseT_Half | 3252 SUPPORTED_100baseT_Full | 3253 SUPPORTED_1000baseT_Full | 3254 SUPPORTED_Autoneg | SUPPORTED_TP); 3255 ecmd->advertising = ADVERTISED_TP; 3256 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || 3257 hw->media_type == MEDIA_TYPE_1000M_FULL) { 3258 ecmd->advertising |= ADVERTISED_Autoneg; 3259 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) { 3260 ecmd->advertising |= ADVERTISED_Autoneg; 3261 ecmd->advertising |= 3262 (ADVERTISED_10baseT_Half | 3263 ADVERTISED_10baseT_Full | 3264 ADVERTISED_100baseT_Half | 3265 ADVERTISED_100baseT_Full | 3266 ADVERTISED_1000baseT_Full); 3267 } else 3268 ecmd->advertising |= (ADVERTISED_1000baseT_Full); 3269 } 3270 ecmd->port = PORT_TP; 3271 ecmd->phy_address = 0; 3272 ecmd->transceiver = XCVR_INTERNAL; 3273 3274 if (netif_carrier_ok(adapter->netdev)) { 3275 u16 link_speed, link_duplex; 3276 atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex); 3277 ecmd->speed = link_speed; 3278 if (link_duplex == FULL_DUPLEX) 3279 ecmd->duplex = DUPLEX_FULL; 3280 else 3281 ecmd->duplex = DUPLEX_HALF; 3282 } else { 3283 ecmd->speed = -1; 3284 ecmd->duplex = -1; 3285 } 3286 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || 3287 hw->media_type == MEDIA_TYPE_1000M_FULL) 3288 ecmd->autoneg = AUTONEG_ENABLE; 3289 else 3290 ecmd->autoneg = AUTONEG_DISABLE; 3291 3292 return 0; 3293} 3294 3295static int atl1_set_settings(struct net_device *netdev, 3296 struct ethtool_cmd *ecmd) 3297{ 3298 struct atl1_adapter *adapter = netdev_priv(netdev); 3299 struct atl1_hw *hw = &adapter->hw; 3300 u16 phy_data; 3301 int ret_val = 0; 3302 u16 old_media_type = hw->media_type; 3303 3304 if (netif_running(adapter->netdev)) { 3305 if (netif_msg_link(adapter)) 3306 dev_dbg(&adapter->pdev->dev, 3307 "ethtool shutting down adapter\n"); 3308 atl1_down(adapter); 3309 } 3310 3311 if (ecmd->autoneg == AUTONEG_ENABLE) 3312 hw->media_type = MEDIA_TYPE_AUTO_SENSOR; 3313 else { 3314 if (ecmd->speed == SPEED_1000) { 3315 if (ecmd->duplex != DUPLEX_FULL) { 3316 if (netif_msg_link(adapter)) 3317 dev_warn(&adapter->pdev->dev, 3318 "1000M half is invalid\n"); 3319 ret_val = -EINVAL; 3320 goto exit_sset; 3321 } 3322 hw->media_type = MEDIA_TYPE_1000M_FULL; 3323 } else if (ecmd->speed == SPEED_100) { 3324 if (ecmd->duplex == DUPLEX_FULL) 3325 hw->media_type = MEDIA_TYPE_100M_FULL; 3326 else 3327 hw->media_type = MEDIA_TYPE_100M_HALF; 3328 } else { 3329 if (ecmd->duplex == DUPLEX_FULL) 3330 hw->media_type = MEDIA_TYPE_10M_FULL; 3331 else 3332 hw->media_type = MEDIA_TYPE_10M_HALF; 3333 } 3334 } 3335 switch (hw->media_type) { 3336 case MEDIA_TYPE_AUTO_SENSOR: 3337 ecmd->advertising = 3338 ADVERTISED_10baseT_Half | 3339 ADVERTISED_10baseT_Full | 3340 ADVERTISED_100baseT_Half | 3341 ADVERTISED_100baseT_Full | 3342 ADVERTISED_1000baseT_Full | 3343 ADVERTISED_Autoneg | ADVERTISED_TP; 3344 break; 3345 case MEDIA_TYPE_1000M_FULL: 3346 ecmd->advertising = 3347 ADVERTISED_1000baseT_Full | 3348 ADVERTISED_Autoneg | ADVERTISED_TP; 3349 break; 3350 default: 3351 ecmd->advertising = 0; 3352 break; 3353 } 3354 if (atl1_phy_setup_autoneg_adv(hw)) { 3355 ret_val = -EINVAL; 3356 if (netif_msg_link(adapter)) 3357 dev_warn(&adapter->pdev->dev, 3358 "invalid ethtool speed/duplex setting\n"); 3359 goto exit_sset; 3360 } 3361 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || 3362 hw->media_type == MEDIA_TYPE_1000M_FULL) 3363 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; 3364 else { 3365 switch (hw->media_type) { 3366 case MEDIA_TYPE_100M_FULL: 3367 phy_data = 3368 MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | 3369 MII_CR_RESET; 3370 break; 3371 case MEDIA_TYPE_100M_HALF: 3372 phy_data = MII_CR_SPEED_100 | MII_CR_RESET; 3373 break; 3374 case MEDIA_TYPE_10M_FULL: 3375 phy_data = 3376 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; 3377 break; 3378 default: 3379 /* MEDIA_TYPE_10M_HALF: */ 3380 phy_data = MII_CR_SPEED_10 | MII_CR_RESET; 3381 break; 3382 } 3383 } 3384 atl1_write_phy_reg(hw, MII_BMCR, phy_data); 3385exit_sset: 3386 if (ret_val) 3387 hw->media_type = old_media_type; 3388 3389 if (netif_running(adapter->netdev)) { 3390 if (netif_msg_link(adapter)) 3391 dev_dbg(&adapter->pdev->dev, 3392 "ethtool starting adapter\n"); 3393 atl1_up(adapter); 3394 } else if (!ret_val) { 3395 if (netif_msg_link(adapter)) 3396 dev_dbg(&adapter->pdev->dev, 3397 "ethtool resetting adapter\n"); 3398 atl1_reset(adapter); 3399 } 3400 return ret_val; 3401} 3402 3403static void atl1_get_drvinfo(struct net_device *netdev, 3404 struct ethtool_drvinfo *drvinfo) 3405{ 3406 struct atl1_adapter *adapter = netdev_priv(netdev); 3407 3408 strncpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver)); 3409 strncpy(drvinfo->version, ATLX_DRIVER_VERSION, 3410 sizeof(drvinfo->version)); 3411 strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); 3412 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 3413 sizeof(drvinfo->bus_info)); 3414 drvinfo->eedump_len = ATL1_EEDUMP_LEN; 3415} 3416 3417static void atl1_get_wol(struct net_device *netdev, 3418 struct ethtool_wolinfo *wol) 3419{ 3420 struct atl1_adapter *adapter = netdev_priv(netdev); 3421 3422 wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; 3423 wol->wolopts = 0; 3424 if (adapter->wol & ATLX_WUFC_EX) 3425 wol->wolopts |= WAKE_UCAST; 3426 if (adapter->wol & ATLX_WUFC_MC) 3427 wol->wolopts |= WAKE_MCAST; 3428 if (adapter->wol & ATLX_WUFC_BC) 3429 wol->wolopts |= WAKE_BCAST; 3430 if (adapter->wol & ATLX_WUFC_MAG) 3431 wol->wolopts |= WAKE_MAGIC; 3432 return; 3433} 3434 3435static int atl1_set_wol(struct net_device *netdev, 3436 struct ethtool_wolinfo *wol) 3437{ 3438 struct atl1_adapter *adapter = netdev_priv(netdev); 3439 3440 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 3441 return -EOPNOTSUPP; 3442 adapter->wol = 0; 3443 if (wol->wolopts & WAKE_UCAST) 3444 adapter->wol |= ATLX_WUFC_EX; 3445 if (wol->wolopts & WAKE_MCAST) 3446 adapter->wol |= ATLX_WUFC_MC; 3447 if (wol->wolopts & WAKE_BCAST) 3448 adapter->wol |= ATLX_WUFC_BC; 3449 if (wol->wolopts & WAKE_MAGIC) 3450 adapter->wol |= ATLX_WUFC_MAG; 3451 return 0; 3452} 3453 3454static u32 atl1_get_msglevel(struct net_device *netdev) 3455{ 3456 struct atl1_adapter *adapter = netdev_priv(netdev); 3457 return adapter->msg_enable; 3458} 3459 3460static void atl1_set_msglevel(struct net_device *netdev, u32 value) 3461{ 3462 struct atl1_adapter *adapter = netdev_priv(netdev); 3463 adapter->msg_enable = value; 3464} 3465 3466static int atl1_get_regs_len(struct net_device *netdev) 3467{ 3468 return ATL1_REG_COUNT * sizeof(u32); 3469} 3470 3471static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs, 3472 void *p) 3473{ 3474 struct atl1_adapter *adapter = netdev_priv(netdev); 3475 struct atl1_hw *hw = &adapter->hw; 3476 unsigned int i; 3477 u32 *regbuf = p; 3478 3479 for (i = 0; i < ATL1_REG_COUNT; i++) { 3480 /* 3481 * This switch statement avoids reserved regions 3482 * of register space. 3483 */ 3484 switch (i) { 3485 case 6 ... 9: 3486 case 14: 3487 case 29 ... 31: 3488 case 34 ... 63: 3489 case 75 ... 127: 3490 case 136 ... 1023: 3491 case 1027 ... 1087: 3492 case 1091 ... 1151: 3493 case 1194 ... 1195: 3494 case 1200 ... 1201: 3495 case 1206 ... 1213: 3496 case 1216 ... 1279: 3497 case 1290 ... 1311: 3498 case 1323 ... 1343: 3499 case 1358 ... 1359: 3500 case 1368 ... 1375: 3501 case 1378 ... 1383: 3502 case 1388 ... 1391: 3503 case 1393 ... 1395: 3504 case 1402 ... 1403: 3505 case 1410 ... 1471: 3506 case 1522 ... 1535: 3507 /* reserved region; don't read it */ 3508 regbuf[i] = 0; 3509 break; 3510 default: 3511 /* unreserved region */ 3512 regbuf[i] = ioread32(hw->hw_addr + (i * sizeof(u32))); 3513 } 3514 } 3515} 3516 3517static void atl1_get_ringparam(struct net_device *netdev, 3518 struct ethtool_ringparam *ring) 3519{ 3520 struct atl1_adapter *adapter = netdev_priv(netdev); 3521 struct atl1_tpd_ring *txdr = &adapter->tpd_ring; 3522 struct atl1_rfd_ring *rxdr = &adapter->rfd_ring; 3523 3524 ring->rx_max_pending = ATL1_MAX_RFD; 3525 ring->tx_max_pending = ATL1_MAX_TPD; 3526 ring->rx_mini_max_pending = 0; 3527 ring->rx_jumbo_max_pending = 0; 3528 ring->rx_pending = rxdr->count; 3529 ring->tx_pending = txdr->count; 3530 ring->rx_mini_pending = 0; 3531 ring->rx_jumbo_pending = 0; 3532} 3533 3534static int atl1_set_ringparam(struct net_device *netdev, 3535 struct ethtool_ringparam *ring) 3536{ 3537 struct atl1_adapter *adapter = netdev_priv(netdev); 3538 struct atl1_tpd_ring *tpdr = &adapter->tpd_ring; 3539 struct atl1_rrd_ring *rrdr = &adapter->rrd_ring; 3540 struct atl1_rfd_ring *rfdr = &adapter->rfd_ring; 3541 3542 struct atl1_tpd_ring tpd_old, tpd_new; 3543 struct atl1_rfd_ring rfd_old, rfd_new; 3544 struct atl1_rrd_ring rrd_old, rrd_new; 3545 struct atl1_ring_header rhdr_old, rhdr_new; 3546 int err; 3547 3548 tpd_old = adapter->tpd_ring; 3549 rfd_old = adapter->rfd_ring; 3550 rrd_old = adapter->rrd_ring; 3551 rhdr_old = adapter->ring_header; 3552 3553 if (netif_running(adapter->netdev)) 3554 atl1_down(adapter); 3555 3556 rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD); 3557 rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD : 3558 rfdr->count; 3559 rfdr->count = (rfdr->count + 3) & ~3; 3560 rrdr->count = rfdr->count; 3561 3562 tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD); 3563 tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD : 3564 tpdr->count; 3565 tpdr->count = (tpdr->count + 3) & ~3; 3566 3567 if (netif_running(adapter->netdev)) { 3568 /* try to get new resources before deleting old */ 3569 err = atl1_setup_ring_resources(adapter); 3570 if (err) 3571 goto err_setup_ring; 3572 3573 /* 3574 * save the new, restore the old in order to free it, 3575 * then restore the new back again 3576 */ 3577 3578 rfd_new = adapter->rfd_ring; 3579 rrd_new = adapter->rrd_ring; 3580 tpd_new = adapter->tpd_ring; 3581 rhdr_new = adapter->ring_header; 3582 adapter->rfd_ring = rfd_old; 3583 adapter->rrd_ring = rrd_old; 3584 adapter->tpd_ring = tpd_old; 3585 adapter->ring_header = rhdr_old; 3586 atl1_free_ring_resources(adapter); 3587 adapter->rfd_ring = rfd_new; 3588 adapter->rrd_ring = rrd_new; 3589 adapter->tpd_ring = tpd_new; 3590 adapter->ring_header = rhdr_new; 3591 3592 err = atl1_up(adapter); 3593 if (err) 3594 return err; 3595 } 3596 return 0; 3597 3598err_setup_ring: 3599 adapter->rfd_ring = rfd_old; 3600 adapter->rrd_ring = rrd_old; 3601 adapter->tpd_ring = tpd_old; 3602 adapter->ring_header = rhdr_old; 3603 atl1_up(adapter); 3604 return err; 3605} 3606 3607static void atl1_get_pauseparam(struct net_device *netdev, 3608 struct ethtool_pauseparam *epause) 3609{ 3610 struct atl1_adapter *adapter = netdev_priv(netdev); 3611 struct atl1_hw *hw = &adapter->hw; 3612 3613 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || 3614 hw->media_type == MEDIA_TYPE_1000M_FULL) { 3615 epause->autoneg = AUTONEG_ENABLE; 3616 } else { 3617 epause->autoneg = AUTONEG_DISABLE; 3618 } 3619 epause->rx_pause = 1; 3620 epause->tx_pause = 1; 3621} 3622 3623static int atl1_set_pauseparam(struct net_device *netdev, 3624 struct ethtool_pauseparam *epause) 3625{ 3626 struct atl1_adapter *adapter = netdev_priv(netdev); 3627 struct atl1_hw *hw = &adapter->hw; 3628 3629 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || 3630 hw->media_type == MEDIA_TYPE_1000M_FULL) { 3631 epause->autoneg = AUTONEG_ENABLE; 3632 } else { 3633 epause->autoneg = AUTONEG_DISABLE; 3634 } 3635 3636 epause->rx_pause = 1; 3637 epause->tx_pause = 1; 3638 3639 return 0; 3640} 3641 3642/* FIXME: is this right? -- CHS */ 3643static u32 atl1_get_rx_csum(struct net_device *netdev) 3644{ 3645 return 1; 3646} 3647 3648static void atl1_get_strings(struct net_device *netdev, u32 stringset, 3649 u8 *data) 3650{ 3651 u8 *p = data; 3652 int i; 3653 3654 switch (stringset) { 3655 case ETH_SS_STATS: 3656 for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { 3657 memcpy(p, atl1_gstrings_stats[i].stat_string, 3658 ETH_GSTRING_LEN); 3659 p += ETH_GSTRING_LEN; 3660 } 3661 break; 3662 } 3663} 3664 3665static int atl1_nway_reset(struct net_device *netdev) 3666{ 3667 struct atl1_adapter *adapter = netdev_priv(netdev); 3668 struct atl1_hw *hw = &adapter->hw; 3669 3670 if (netif_running(netdev)) { 3671 u16 phy_data; 3672 atl1_down(adapter); 3673 3674 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || 3675 hw->media_type == MEDIA_TYPE_1000M_FULL) { 3676 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; 3677 } else { 3678 switch (hw->media_type) { 3679 case MEDIA_TYPE_100M_FULL: 3680 phy_data = MII_CR_FULL_DUPLEX | 3681 MII_CR_SPEED_100 | MII_CR_RESET; 3682 break; 3683 case MEDIA_TYPE_100M_HALF: 3684 phy_data = MII_CR_SPEED_100 | MII_CR_RESET; 3685 break; 3686 case MEDIA_TYPE_10M_FULL: 3687 phy_data = MII_CR_FULL_DUPLEX | 3688 MII_CR_SPEED_10 | MII_CR_RESET; 3689 break; 3690 default: 3691 /* MEDIA_TYPE_10M_HALF */ 3692 phy_data = MII_CR_SPEED_10 | MII_CR_RESET; 3693 } 3694 } 3695 atl1_write_phy_reg(hw, MII_BMCR, phy_data); 3696 atl1_up(adapter); 3697 } 3698 return 0; 3699} 3700 3701const struct ethtool_ops atl1_ethtool_ops = { 3702 .get_settings = atl1_get_settings, 3703 .set_settings = atl1_set_settings, 3704 .get_drvinfo = atl1_get_drvinfo, 3705 .get_wol = atl1_get_wol, 3706 .set_wol = atl1_set_wol, 3707 .get_msglevel = atl1_get_msglevel, 3708 .set_msglevel = atl1_set_msglevel, 3709 .get_regs_len = atl1_get_regs_len, 3710 .get_regs = atl1_get_regs, 3711 .get_ringparam = atl1_get_ringparam, 3712 .set_ringparam = atl1_set_ringparam, 3713 .get_pauseparam = atl1_get_pauseparam, 3714 .set_pauseparam = atl1_set_pauseparam, 3715 .get_rx_csum = atl1_get_rx_csum, 3716 .set_tx_csum = ethtool_op_set_tx_hw_csum, 3717 .get_link = ethtool_op_get_link, 3718 .set_sg = ethtool_op_set_sg, 3719 .get_strings = atl1_get_strings, 3720 .nway_reset = atl1_nway_reset, 3721 .get_ethtool_stats = atl1_get_ethtool_stats, 3722 .get_sset_count = atl1_get_sset_count, 3723 .set_tso = ethtool_op_set_tso, 3724};