Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.29 2032 lines 60 kB view raw
1/******************************************************************************* 2 3 Intel(R) Gigabit Ethernet Linux driver 4 Copyright(c) 2007 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26*******************************************************************************/ 27 28/* ethtool support for igb */ 29 30#include <linux/vmalloc.h> 31#include <linux/netdevice.h> 32#include <linux/pci.h> 33#include <linux/delay.h> 34#include <linux/interrupt.h> 35#include <linux/if_ether.h> 36#include <linux/ethtool.h> 37 38#include "igb.h" 39 40struct igb_stats { 41 char stat_string[ETH_GSTRING_LEN]; 42 int sizeof_stat; 43 int stat_offset; 44}; 45 46#define IGB_STAT(m) FIELD_SIZEOF(struct igb_adapter, m), \ 47 offsetof(struct igb_adapter, m) 48static const struct igb_stats igb_gstrings_stats[] = { 49 { "rx_packets", IGB_STAT(stats.gprc) }, 50 { "tx_packets", IGB_STAT(stats.gptc) }, 51 { "rx_bytes", IGB_STAT(stats.gorc) }, 52 { "tx_bytes", IGB_STAT(stats.gotc) }, 53 { "rx_broadcast", IGB_STAT(stats.bprc) }, 54 { "tx_broadcast", IGB_STAT(stats.bptc) }, 55 { "rx_multicast", IGB_STAT(stats.mprc) }, 56 { "tx_multicast", IGB_STAT(stats.mptc) }, 57 { "rx_errors", IGB_STAT(net_stats.rx_errors) }, 58 { "tx_errors", IGB_STAT(net_stats.tx_errors) }, 59 { "tx_dropped", IGB_STAT(net_stats.tx_dropped) }, 60 { "multicast", IGB_STAT(stats.mprc) }, 61 { "collisions", IGB_STAT(stats.colc) }, 62 { "rx_length_errors", IGB_STAT(net_stats.rx_length_errors) }, 63 { "rx_over_errors", IGB_STAT(net_stats.rx_over_errors) }, 64 { "rx_crc_errors", IGB_STAT(stats.crcerrs) }, 65 { "rx_frame_errors", IGB_STAT(net_stats.rx_frame_errors) }, 66 { "rx_no_buffer_count", IGB_STAT(stats.rnbc) }, 67 { "rx_missed_errors", IGB_STAT(stats.mpc) }, 68 { "tx_aborted_errors", IGB_STAT(stats.ecol) }, 69 { "tx_carrier_errors", IGB_STAT(stats.tncrs) }, 70 { "tx_fifo_errors", IGB_STAT(net_stats.tx_fifo_errors) }, 71 { "tx_heartbeat_errors", IGB_STAT(net_stats.tx_heartbeat_errors) }, 72 { "tx_window_errors", IGB_STAT(stats.latecol) }, 73 { "tx_abort_late_coll", IGB_STAT(stats.latecol) }, 74 { "tx_deferred_ok", IGB_STAT(stats.dc) }, 75 { "tx_single_coll_ok", IGB_STAT(stats.scc) }, 76 { "tx_multi_coll_ok", IGB_STAT(stats.mcc) }, 77 { "tx_timeout_count", IGB_STAT(tx_timeout_count) }, 78 { "tx_restart_queue", IGB_STAT(restart_queue) }, 79 { "rx_long_length_errors", IGB_STAT(stats.roc) }, 80 { "rx_short_length_errors", IGB_STAT(stats.ruc) }, 81 { "rx_align_errors", IGB_STAT(stats.algnerrc) }, 82 { "tx_tcp_seg_good", IGB_STAT(stats.tsctc) }, 83 { "tx_tcp_seg_failed", IGB_STAT(stats.tsctfc) }, 84 { "rx_flow_control_xon", IGB_STAT(stats.xonrxc) }, 85 { "rx_flow_control_xoff", IGB_STAT(stats.xoffrxc) }, 86 { "tx_flow_control_xon", IGB_STAT(stats.xontxc) }, 87 { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) }, 88 { "rx_long_byte_count", IGB_STAT(stats.gorc) }, 89 { "rx_csum_offload_good", IGB_STAT(hw_csum_good) }, 90 { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) }, 91 { "rx_header_split", IGB_STAT(rx_hdr_split) }, 92 { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) }, 93 { "tx_smbus", IGB_STAT(stats.mgptc) }, 94 { "rx_smbus", IGB_STAT(stats.mgprc) }, 95 { "dropped_smbus", IGB_STAT(stats.mgpdc) }, 96#ifdef CONFIG_IGB_LRO 97 { "lro_aggregated", IGB_STAT(lro_aggregated) }, 98 { "lro_flushed", IGB_STAT(lro_flushed) }, 99 { "lro_no_desc", IGB_STAT(lro_no_desc) }, 100#endif 101}; 102 103#define IGB_QUEUE_STATS_LEN \ 104 ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues + \ 105 ((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues) * \ 106 (sizeof(struct igb_queue_stats) / sizeof(u64))) 107#define IGB_GLOBAL_STATS_LEN \ 108 sizeof(igb_gstrings_stats) / sizeof(struct igb_stats) 109#define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN) 110static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { 111 "Register test (offline)", "Eeprom test (offline)", 112 "Interrupt test (offline)", "Loopback test (offline)", 113 "Link test (on/offline)" 114}; 115#define IGB_TEST_LEN sizeof(igb_gstrings_test) / ETH_GSTRING_LEN 116 117static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 118{ 119 struct igb_adapter *adapter = netdev_priv(netdev); 120 struct e1000_hw *hw = &adapter->hw; 121 122 if (hw->phy.media_type == e1000_media_type_copper) { 123 124 ecmd->supported = (SUPPORTED_10baseT_Half | 125 SUPPORTED_10baseT_Full | 126 SUPPORTED_100baseT_Half | 127 SUPPORTED_100baseT_Full | 128 SUPPORTED_1000baseT_Full| 129 SUPPORTED_Autoneg | 130 SUPPORTED_TP); 131 ecmd->advertising = ADVERTISED_TP; 132 133 if (hw->mac.autoneg == 1) { 134 ecmd->advertising |= ADVERTISED_Autoneg; 135 /* the e1000 autoneg seems to match ethtool nicely */ 136 ecmd->advertising |= hw->phy.autoneg_advertised; 137 } 138 139 ecmd->port = PORT_TP; 140 ecmd->phy_address = hw->phy.addr; 141 } else { 142 ecmd->supported = (SUPPORTED_1000baseT_Full | 143 SUPPORTED_FIBRE | 144 SUPPORTED_Autoneg); 145 146 ecmd->advertising = (ADVERTISED_1000baseT_Full | 147 ADVERTISED_FIBRE | 148 ADVERTISED_Autoneg); 149 150 ecmd->port = PORT_FIBRE; 151 } 152 153 ecmd->transceiver = XCVR_INTERNAL; 154 155 if (rd32(E1000_STATUS) & E1000_STATUS_LU) { 156 157 adapter->hw.mac.ops.get_speed_and_duplex(hw, 158 &adapter->link_speed, 159 &adapter->link_duplex); 160 ecmd->speed = adapter->link_speed; 161 162 /* unfortunately FULL_DUPLEX != DUPLEX_FULL 163 * and HALF_DUPLEX != DUPLEX_HALF */ 164 165 if (adapter->link_duplex == FULL_DUPLEX) 166 ecmd->duplex = DUPLEX_FULL; 167 else 168 ecmd->duplex = DUPLEX_HALF; 169 } else { 170 ecmd->speed = -1; 171 ecmd->duplex = -1; 172 } 173 174 ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) || 175 hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; 176 return 0; 177} 178 179static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 180{ 181 struct igb_adapter *adapter = netdev_priv(netdev); 182 struct e1000_hw *hw = &adapter->hw; 183 184 /* When SoL/IDER sessions are active, autoneg/speed/duplex 185 * cannot be changed */ 186 if (igb_check_reset_block(hw)) { 187 dev_err(&adapter->pdev->dev, "Cannot change link " 188 "characteristics when SoL/IDER is active.\n"); 189 return -EINVAL; 190 } 191 192 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 193 msleep(1); 194 195 if (ecmd->autoneg == AUTONEG_ENABLE) { 196 hw->mac.autoneg = 1; 197 if (hw->phy.media_type == e1000_media_type_fiber) 198 hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | 199 ADVERTISED_FIBRE | 200 ADVERTISED_Autoneg; 201 else 202 hw->phy.autoneg_advertised = ecmd->advertising | 203 ADVERTISED_TP | 204 ADVERTISED_Autoneg; 205 ecmd->advertising = hw->phy.autoneg_advertised; 206 } else 207 if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) { 208 clear_bit(__IGB_RESETTING, &adapter->state); 209 return -EINVAL; 210 } 211 212 /* reset the link */ 213 214 if (netif_running(adapter->netdev)) { 215 igb_down(adapter); 216 igb_up(adapter); 217 } else 218 igb_reset(adapter); 219 220 clear_bit(__IGB_RESETTING, &adapter->state); 221 return 0; 222} 223 224static void igb_get_pauseparam(struct net_device *netdev, 225 struct ethtool_pauseparam *pause) 226{ 227 struct igb_adapter *adapter = netdev_priv(netdev); 228 struct e1000_hw *hw = &adapter->hw; 229 230 pause->autoneg = 231 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); 232 233 if (hw->fc.type == e1000_fc_rx_pause) 234 pause->rx_pause = 1; 235 else if (hw->fc.type == e1000_fc_tx_pause) 236 pause->tx_pause = 1; 237 else if (hw->fc.type == e1000_fc_full) { 238 pause->rx_pause = 1; 239 pause->tx_pause = 1; 240 } 241} 242 243static int igb_set_pauseparam(struct net_device *netdev, 244 struct ethtool_pauseparam *pause) 245{ 246 struct igb_adapter *adapter = netdev_priv(netdev); 247 struct e1000_hw *hw = &adapter->hw; 248 int retval = 0; 249 250 adapter->fc_autoneg = pause->autoneg; 251 252 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 253 msleep(1); 254 255 if (pause->rx_pause && pause->tx_pause) 256 hw->fc.type = e1000_fc_full; 257 else if (pause->rx_pause && !pause->tx_pause) 258 hw->fc.type = e1000_fc_rx_pause; 259 else if (!pause->rx_pause && pause->tx_pause) 260 hw->fc.type = e1000_fc_tx_pause; 261 else if (!pause->rx_pause && !pause->tx_pause) 262 hw->fc.type = e1000_fc_none; 263 264 hw->fc.original_type = hw->fc.type; 265 266 if (adapter->fc_autoneg == AUTONEG_ENABLE) { 267 if (netif_running(adapter->netdev)) { 268 igb_down(adapter); 269 igb_up(adapter); 270 } else 271 igb_reset(adapter); 272 } else 273 retval = ((hw->phy.media_type == e1000_media_type_fiber) ? 274 igb_setup_link(hw) : igb_force_mac_fc(hw)); 275 276 clear_bit(__IGB_RESETTING, &adapter->state); 277 return retval; 278} 279 280static u32 igb_get_rx_csum(struct net_device *netdev) 281{ 282 struct igb_adapter *adapter = netdev_priv(netdev); 283 return adapter->rx_csum; 284} 285 286static int igb_set_rx_csum(struct net_device *netdev, u32 data) 287{ 288 struct igb_adapter *adapter = netdev_priv(netdev); 289 adapter->rx_csum = data; 290 291 return 0; 292} 293 294static u32 igb_get_tx_csum(struct net_device *netdev) 295{ 296 return (netdev->features & NETIF_F_HW_CSUM) != 0; 297} 298 299static int igb_set_tx_csum(struct net_device *netdev, u32 data) 300{ 301 if (data) 302 netdev->features |= NETIF_F_HW_CSUM; 303 else 304 netdev->features &= ~NETIF_F_HW_CSUM; 305 306 return 0; 307} 308 309static int igb_set_tso(struct net_device *netdev, u32 data) 310{ 311 struct igb_adapter *adapter = netdev_priv(netdev); 312 313 if (data) 314 netdev->features |= NETIF_F_TSO; 315 else 316 netdev->features &= ~NETIF_F_TSO; 317 318 if (data) 319 netdev->features |= NETIF_F_TSO6; 320 else 321 netdev->features &= ~NETIF_F_TSO6; 322 323 dev_info(&adapter->pdev->dev, "TSO is %s\n", 324 data ? "Enabled" : "Disabled"); 325 return 0; 326} 327 328static u32 igb_get_msglevel(struct net_device *netdev) 329{ 330 struct igb_adapter *adapter = netdev_priv(netdev); 331 return adapter->msg_enable; 332} 333 334static void igb_set_msglevel(struct net_device *netdev, u32 data) 335{ 336 struct igb_adapter *adapter = netdev_priv(netdev); 337 adapter->msg_enable = data; 338} 339 340static int igb_get_regs_len(struct net_device *netdev) 341{ 342#define IGB_REGS_LEN 551 343 return IGB_REGS_LEN * sizeof(u32); 344} 345 346static void igb_get_regs(struct net_device *netdev, 347 struct ethtool_regs *regs, void *p) 348{ 349 struct igb_adapter *adapter = netdev_priv(netdev); 350 struct e1000_hw *hw = &adapter->hw; 351 u32 *regs_buff = p; 352 u8 i; 353 354 memset(p, 0, IGB_REGS_LEN * sizeof(u32)); 355 356 regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; 357 358 /* General Registers */ 359 regs_buff[0] = rd32(E1000_CTRL); 360 regs_buff[1] = rd32(E1000_STATUS); 361 regs_buff[2] = rd32(E1000_CTRL_EXT); 362 regs_buff[3] = rd32(E1000_MDIC); 363 regs_buff[4] = rd32(E1000_SCTL); 364 regs_buff[5] = rd32(E1000_CONNSW); 365 regs_buff[6] = rd32(E1000_VET); 366 regs_buff[7] = rd32(E1000_LEDCTL); 367 regs_buff[8] = rd32(E1000_PBA); 368 regs_buff[9] = rd32(E1000_PBS); 369 regs_buff[10] = rd32(E1000_FRTIMER); 370 regs_buff[11] = rd32(E1000_TCPTIMER); 371 372 /* NVM Register */ 373 regs_buff[12] = rd32(E1000_EECD); 374 375 /* Interrupt */ 376 /* Reading EICS for EICR because they read the 377 * same but EICS does not clear on read */ 378 regs_buff[13] = rd32(E1000_EICS); 379 regs_buff[14] = rd32(E1000_EICS); 380 regs_buff[15] = rd32(E1000_EIMS); 381 regs_buff[16] = rd32(E1000_EIMC); 382 regs_buff[17] = rd32(E1000_EIAC); 383 regs_buff[18] = rd32(E1000_EIAM); 384 /* Reading ICS for ICR because they read the 385 * same but ICS does not clear on read */ 386 regs_buff[19] = rd32(E1000_ICS); 387 regs_buff[20] = rd32(E1000_ICS); 388 regs_buff[21] = rd32(E1000_IMS); 389 regs_buff[22] = rd32(E1000_IMC); 390 regs_buff[23] = rd32(E1000_IAC); 391 regs_buff[24] = rd32(E1000_IAM); 392 regs_buff[25] = rd32(E1000_IMIRVP); 393 394 /* Flow Control */ 395 regs_buff[26] = rd32(E1000_FCAL); 396 regs_buff[27] = rd32(E1000_FCAH); 397 regs_buff[28] = rd32(E1000_FCTTV); 398 regs_buff[29] = rd32(E1000_FCRTL); 399 regs_buff[30] = rd32(E1000_FCRTH); 400 regs_buff[31] = rd32(E1000_FCRTV); 401 402 /* Receive */ 403 regs_buff[32] = rd32(E1000_RCTL); 404 regs_buff[33] = rd32(E1000_RXCSUM); 405 regs_buff[34] = rd32(E1000_RLPML); 406 regs_buff[35] = rd32(E1000_RFCTL); 407 regs_buff[36] = rd32(E1000_MRQC); 408 regs_buff[37] = rd32(E1000_VMD_CTL); 409 410 /* Transmit */ 411 regs_buff[38] = rd32(E1000_TCTL); 412 regs_buff[39] = rd32(E1000_TCTL_EXT); 413 regs_buff[40] = rd32(E1000_TIPG); 414 regs_buff[41] = rd32(E1000_DTXCTL); 415 416 /* Wake Up */ 417 regs_buff[42] = rd32(E1000_WUC); 418 regs_buff[43] = rd32(E1000_WUFC); 419 regs_buff[44] = rd32(E1000_WUS); 420 regs_buff[45] = rd32(E1000_IPAV); 421 regs_buff[46] = rd32(E1000_WUPL); 422 423 /* MAC */ 424 regs_buff[47] = rd32(E1000_PCS_CFG0); 425 regs_buff[48] = rd32(E1000_PCS_LCTL); 426 regs_buff[49] = rd32(E1000_PCS_LSTAT); 427 regs_buff[50] = rd32(E1000_PCS_ANADV); 428 regs_buff[51] = rd32(E1000_PCS_LPAB); 429 regs_buff[52] = rd32(E1000_PCS_NPTX); 430 regs_buff[53] = rd32(E1000_PCS_LPABNP); 431 432 /* Statistics */ 433 regs_buff[54] = adapter->stats.crcerrs; 434 regs_buff[55] = adapter->stats.algnerrc; 435 regs_buff[56] = adapter->stats.symerrs; 436 regs_buff[57] = adapter->stats.rxerrc; 437 regs_buff[58] = adapter->stats.mpc; 438 regs_buff[59] = adapter->stats.scc; 439 regs_buff[60] = adapter->stats.ecol; 440 regs_buff[61] = adapter->stats.mcc; 441 regs_buff[62] = adapter->stats.latecol; 442 regs_buff[63] = adapter->stats.colc; 443 regs_buff[64] = adapter->stats.dc; 444 regs_buff[65] = adapter->stats.tncrs; 445 regs_buff[66] = adapter->stats.sec; 446 regs_buff[67] = adapter->stats.htdpmc; 447 regs_buff[68] = adapter->stats.rlec; 448 regs_buff[69] = adapter->stats.xonrxc; 449 regs_buff[70] = adapter->stats.xontxc; 450 regs_buff[71] = adapter->stats.xoffrxc; 451 regs_buff[72] = adapter->stats.xofftxc; 452 regs_buff[73] = adapter->stats.fcruc; 453 regs_buff[74] = adapter->stats.prc64; 454 regs_buff[75] = adapter->stats.prc127; 455 regs_buff[76] = adapter->stats.prc255; 456 regs_buff[77] = adapter->stats.prc511; 457 regs_buff[78] = adapter->stats.prc1023; 458 regs_buff[79] = adapter->stats.prc1522; 459 regs_buff[80] = adapter->stats.gprc; 460 regs_buff[81] = adapter->stats.bprc; 461 regs_buff[82] = adapter->stats.mprc; 462 regs_buff[83] = adapter->stats.gptc; 463 regs_buff[84] = adapter->stats.gorc; 464 regs_buff[86] = adapter->stats.gotc; 465 regs_buff[88] = adapter->stats.rnbc; 466 regs_buff[89] = adapter->stats.ruc; 467 regs_buff[90] = adapter->stats.rfc; 468 regs_buff[91] = adapter->stats.roc; 469 regs_buff[92] = adapter->stats.rjc; 470 regs_buff[93] = adapter->stats.mgprc; 471 regs_buff[94] = adapter->stats.mgpdc; 472 regs_buff[95] = adapter->stats.mgptc; 473 regs_buff[96] = adapter->stats.tor; 474 regs_buff[98] = adapter->stats.tot; 475 regs_buff[100] = adapter->stats.tpr; 476 regs_buff[101] = adapter->stats.tpt; 477 regs_buff[102] = adapter->stats.ptc64; 478 regs_buff[103] = adapter->stats.ptc127; 479 regs_buff[104] = adapter->stats.ptc255; 480 regs_buff[105] = adapter->stats.ptc511; 481 regs_buff[106] = adapter->stats.ptc1023; 482 regs_buff[107] = adapter->stats.ptc1522; 483 regs_buff[108] = adapter->stats.mptc; 484 regs_buff[109] = adapter->stats.bptc; 485 regs_buff[110] = adapter->stats.tsctc; 486 regs_buff[111] = adapter->stats.iac; 487 regs_buff[112] = adapter->stats.rpthc; 488 regs_buff[113] = adapter->stats.hgptc; 489 regs_buff[114] = adapter->stats.hgorc; 490 regs_buff[116] = adapter->stats.hgotc; 491 regs_buff[118] = adapter->stats.lenerrs; 492 regs_buff[119] = adapter->stats.scvpc; 493 regs_buff[120] = adapter->stats.hrmpc; 494 495 /* These should probably be added to e1000_regs.h instead */ 496 #define E1000_PSRTYPE_REG(_i) (0x05480 + ((_i) * 4)) 497 #define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) 498 #define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) 499 #define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) 500 #define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) 501 #define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) 502 #define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) 503 504 for (i = 0; i < 4; i++) 505 regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); 506 for (i = 0; i < 4; i++) 507 regs_buff[125 + i] = rd32(E1000_PSRTYPE_REG(i)); 508 for (i = 0; i < 4; i++) 509 regs_buff[129 + i] = rd32(E1000_RDBAL(i)); 510 for (i = 0; i < 4; i++) 511 regs_buff[133 + i] = rd32(E1000_RDBAH(i)); 512 for (i = 0; i < 4; i++) 513 regs_buff[137 + i] = rd32(E1000_RDLEN(i)); 514 for (i = 0; i < 4; i++) 515 regs_buff[141 + i] = rd32(E1000_RDH(i)); 516 for (i = 0; i < 4; i++) 517 regs_buff[145 + i] = rd32(E1000_RDT(i)); 518 for (i = 0; i < 4; i++) 519 regs_buff[149 + i] = rd32(E1000_RXDCTL(i)); 520 521 for (i = 0; i < 10; i++) 522 regs_buff[153 + i] = rd32(E1000_EITR(i)); 523 for (i = 0; i < 8; i++) 524 regs_buff[163 + i] = rd32(E1000_IMIR(i)); 525 for (i = 0; i < 8; i++) 526 regs_buff[171 + i] = rd32(E1000_IMIREXT(i)); 527 for (i = 0; i < 16; i++) 528 regs_buff[179 + i] = rd32(E1000_RAL(i)); 529 for (i = 0; i < 16; i++) 530 regs_buff[195 + i] = rd32(E1000_RAH(i)); 531 532 for (i = 0; i < 4; i++) 533 regs_buff[211 + i] = rd32(E1000_TDBAL(i)); 534 for (i = 0; i < 4; i++) 535 regs_buff[215 + i] = rd32(E1000_TDBAH(i)); 536 for (i = 0; i < 4; i++) 537 regs_buff[219 + i] = rd32(E1000_TDLEN(i)); 538 for (i = 0; i < 4; i++) 539 regs_buff[223 + i] = rd32(E1000_TDH(i)); 540 for (i = 0; i < 4; i++) 541 regs_buff[227 + i] = rd32(E1000_TDT(i)); 542 for (i = 0; i < 4; i++) 543 regs_buff[231 + i] = rd32(E1000_TXDCTL(i)); 544 for (i = 0; i < 4; i++) 545 regs_buff[235 + i] = rd32(E1000_TDWBAL(i)); 546 for (i = 0; i < 4; i++) 547 regs_buff[239 + i] = rd32(E1000_TDWBAH(i)); 548 for (i = 0; i < 4; i++) 549 regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i)); 550 551 for (i = 0; i < 4; i++) 552 regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i)); 553 for (i = 0; i < 4; i++) 554 regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i)); 555 for (i = 0; i < 32; i++) 556 regs_buff[255 + i] = rd32(E1000_WUPM_REG(i)); 557 for (i = 0; i < 128; i++) 558 regs_buff[287 + i] = rd32(E1000_FFMT_REG(i)); 559 for (i = 0; i < 128; i++) 560 regs_buff[415 + i] = rd32(E1000_FFVT_REG(i)); 561 for (i = 0; i < 4; i++) 562 regs_buff[543 + i] = rd32(E1000_FFLT_REG(i)); 563 564 regs_buff[547] = rd32(E1000_TDFH); 565 regs_buff[548] = rd32(E1000_TDFT); 566 regs_buff[549] = rd32(E1000_TDFHS); 567 regs_buff[550] = rd32(E1000_TDFPC); 568 569} 570 571static int igb_get_eeprom_len(struct net_device *netdev) 572{ 573 struct igb_adapter *adapter = netdev_priv(netdev); 574 return adapter->hw.nvm.word_size * 2; 575} 576 577static int igb_get_eeprom(struct net_device *netdev, 578 struct ethtool_eeprom *eeprom, u8 *bytes) 579{ 580 struct igb_adapter *adapter = netdev_priv(netdev); 581 struct e1000_hw *hw = &adapter->hw; 582 u16 *eeprom_buff; 583 int first_word, last_word; 584 int ret_val = 0; 585 u16 i; 586 587 if (eeprom->len == 0) 588 return -EINVAL; 589 590 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 591 592 first_word = eeprom->offset >> 1; 593 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 594 595 eeprom_buff = kmalloc(sizeof(u16) * 596 (last_word - first_word + 1), GFP_KERNEL); 597 if (!eeprom_buff) 598 return -ENOMEM; 599 600 if (hw->nvm.type == e1000_nvm_eeprom_spi) 601 ret_val = hw->nvm.ops.read_nvm(hw, first_word, 602 last_word - first_word + 1, 603 eeprom_buff); 604 else { 605 for (i = 0; i < last_word - first_word + 1; i++) { 606 ret_val = hw->nvm.ops.read_nvm(hw, first_word + i, 1, 607 &eeprom_buff[i]); 608 if (ret_val) 609 break; 610 } 611 } 612 613 /* Device's eeprom is always little-endian, word addressable */ 614 for (i = 0; i < last_word - first_word + 1; i++) 615 le16_to_cpus(&eeprom_buff[i]); 616 617 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), 618 eeprom->len); 619 kfree(eeprom_buff); 620 621 return ret_val; 622} 623 624static int igb_set_eeprom(struct net_device *netdev, 625 struct ethtool_eeprom *eeprom, u8 *bytes) 626{ 627 struct igb_adapter *adapter = netdev_priv(netdev); 628 struct e1000_hw *hw = &adapter->hw; 629 u16 *eeprom_buff; 630 void *ptr; 631 int max_len, first_word, last_word, ret_val = 0; 632 u16 i; 633 634 if (eeprom->len == 0) 635 return -EOPNOTSUPP; 636 637 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) 638 return -EFAULT; 639 640 max_len = hw->nvm.word_size * 2; 641 642 first_word = eeprom->offset >> 1; 643 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 644 eeprom_buff = kmalloc(max_len, GFP_KERNEL); 645 if (!eeprom_buff) 646 return -ENOMEM; 647 648 ptr = (void *)eeprom_buff; 649 650 if (eeprom->offset & 1) { 651 /* need read/modify/write of first changed EEPROM word */ 652 /* only the second byte of the word is being modified */ 653 ret_val = hw->nvm.ops.read_nvm(hw, first_word, 1, 654 &eeprom_buff[0]); 655 ptr++; 656 } 657 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { 658 /* need read/modify/write of last changed EEPROM word */ 659 /* only the first byte of the word is being modified */ 660 ret_val = hw->nvm.ops.read_nvm(hw, last_word, 1, 661 &eeprom_buff[last_word - first_word]); 662 } 663 664 /* Device's eeprom is always little-endian, word addressable */ 665 for (i = 0; i < last_word - first_word + 1; i++) 666 le16_to_cpus(&eeprom_buff[i]); 667 668 memcpy(ptr, bytes, eeprom->len); 669 670 for (i = 0; i < last_word - first_word + 1; i++) 671 eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); 672 673 ret_val = hw->nvm.ops.write_nvm(hw, first_word, 674 last_word - first_word + 1, eeprom_buff); 675 676 /* Update the checksum over the first part of the EEPROM if needed 677 * and flush shadow RAM for 82573 controllers */ 678 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG))) 679 igb_update_nvm_checksum(hw); 680 681 kfree(eeprom_buff); 682 return ret_val; 683} 684 685static void igb_get_drvinfo(struct net_device *netdev, 686 struct ethtool_drvinfo *drvinfo) 687{ 688 struct igb_adapter *adapter = netdev_priv(netdev); 689 char firmware_version[32]; 690 u16 eeprom_data; 691 692 strncpy(drvinfo->driver, igb_driver_name, 32); 693 strncpy(drvinfo->version, igb_driver_version, 32); 694 695 /* EEPROM image version # is reported as firmware version # for 696 * 82575 controllers */ 697 adapter->hw.nvm.ops.read_nvm(&adapter->hw, 5, 1, &eeprom_data); 698 sprintf(firmware_version, "%d.%d-%d", 699 (eeprom_data & 0xF000) >> 12, 700 (eeprom_data & 0x0FF0) >> 4, 701 eeprom_data & 0x000F); 702 703 strncpy(drvinfo->fw_version, firmware_version, 32); 704 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 705 drvinfo->n_stats = IGB_STATS_LEN; 706 drvinfo->testinfo_len = IGB_TEST_LEN; 707 drvinfo->regdump_len = igb_get_regs_len(netdev); 708 drvinfo->eedump_len = igb_get_eeprom_len(netdev); 709} 710 711static void igb_get_ringparam(struct net_device *netdev, 712 struct ethtool_ringparam *ring) 713{ 714 struct igb_adapter *adapter = netdev_priv(netdev); 715 716 ring->rx_max_pending = IGB_MAX_RXD; 717 ring->tx_max_pending = IGB_MAX_TXD; 718 ring->rx_mini_max_pending = 0; 719 ring->rx_jumbo_max_pending = 0; 720 ring->rx_pending = adapter->rx_ring_count; 721 ring->tx_pending = adapter->tx_ring_count; 722 ring->rx_mini_pending = 0; 723 ring->rx_jumbo_pending = 0; 724} 725 726static int igb_set_ringparam(struct net_device *netdev, 727 struct ethtool_ringparam *ring) 728{ 729 struct igb_adapter *adapter = netdev_priv(netdev); 730 struct igb_ring *temp_ring; 731 int i, err; 732 u32 new_rx_count, new_tx_count; 733 734 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 735 return -EINVAL; 736 737 new_rx_count = max(ring->rx_pending, (u32)IGB_MIN_RXD); 738 new_rx_count = min(new_rx_count, (u32)IGB_MAX_RXD); 739 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); 740 741 new_tx_count = max(ring->tx_pending, (u32)IGB_MIN_TXD); 742 new_tx_count = min(new_tx_count, (u32)IGB_MAX_TXD); 743 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); 744 745 if ((new_tx_count == adapter->tx_ring_count) && 746 (new_rx_count == adapter->rx_ring_count)) { 747 /* nothing to do */ 748 return 0; 749 } 750 751 if (adapter->num_tx_queues > adapter->num_rx_queues) 752 temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring)); 753 else 754 temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring)); 755 if (!temp_ring) 756 return -ENOMEM; 757 758 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 759 msleep(1); 760 761 if (netif_running(adapter->netdev)) 762 igb_down(adapter); 763 764 /* 765 * We can't just free everything and then setup again, 766 * because the ISRs in MSI-X mode get passed pointers 767 * to the tx and rx ring structs. 768 */ 769 if (new_tx_count != adapter->tx_ring_count) { 770 memcpy(temp_ring, adapter->tx_ring, 771 adapter->num_tx_queues * sizeof(struct igb_ring)); 772 773 for (i = 0; i < adapter->num_tx_queues; i++) { 774 temp_ring[i].count = new_tx_count; 775 err = igb_setup_tx_resources(adapter, &temp_ring[i]); 776 if (err) { 777 while (i) { 778 i--; 779 igb_free_tx_resources(&temp_ring[i]); 780 } 781 goto err_setup; 782 } 783 } 784 785 for (i = 0; i < adapter->num_tx_queues; i++) 786 igb_free_tx_resources(&adapter->tx_ring[i]); 787 788 memcpy(adapter->tx_ring, temp_ring, 789 adapter->num_tx_queues * sizeof(struct igb_ring)); 790 791 adapter->tx_ring_count = new_tx_count; 792 } 793 794 if (new_rx_count != adapter->rx_ring->count) { 795 memcpy(temp_ring, adapter->rx_ring, 796 adapter->num_rx_queues * sizeof(struct igb_ring)); 797 798 for (i = 0; i < adapter->num_rx_queues; i++) { 799 temp_ring[i].count = new_rx_count; 800 err = igb_setup_rx_resources(adapter, &temp_ring[i]); 801 if (err) { 802 while (i) { 803 i--; 804 igb_free_rx_resources(&temp_ring[i]); 805 } 806 goto err_setup; 807 } 808 809 } 810 811 for (i = 0; i < adapter->num_rx_queues; i++) 812 igb_free_rx_resources(&adapter->rx_ring[i]); 813 814 memcpy(adapter->rx_ring, temp_ring, 815 adapter->num_rx_queues * sizeof(struct igb_ring)); 816 817 adapter->rx_ring_count = new_rx_count; 818 } 819 820 err = 0; 821err_setup: 822 if (netif_running(adapter->netdev)) 823 igb_up(adapter); 824 825 clear_bit(__IGB_RESETTING, &adapter->state); 826 vfree(temp_ring); 827 return err; 828} 829 830/* ethtool register test data */ 831struct igb_reg_test { 832 u16 reg; 833 u16 reg_offset; 834 u16 array_len; 835 u16 test_type; 836 u32 mask; 837 u32 write; 838}; 839 840/* In the hardware, registers are laid out either singly, in arrays 841 * spaced 0x100 bytes apart, or in contiguous tables. We assume 842 * most tests take place on arrays or single registers (handled 843 * as a single-element array) and special-case the tables. 844 * Table tests are always pattern tests. 845 * 846 * We also make provision for some required setup steps by specifying 847 * registers to be written without any read-back testing. 848 */ 849 850#define PATTERN_TEST 1 851#define SET_READ_TEST 2 852#define WRITE_NO_TEST 3 853#define TABLE32_TEST 4 854#define TABLE64_TEST_LO 5 855#define TABLE64_TEST_HI 6 856 857/* 82576 reg test */ 858static struct igb_reg_test reg_test_82576[] = { 859 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 860 { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, 861 { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, 862 { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 863 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 864 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 865 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 866 { E1000_RDBAL(4), 0x40, 8, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 867 { E1000_RDBAH(4), 0x40, 8, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 868 { E1000_RDLEN(4), 0x40, 8, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 869 /* Enable all four RX queues before testing. */ 870 { E1000_RXDCTL(0), 0x100, 1, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, 871 /* RDH is read-only for 82576, only test RDT. */ 872 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 873 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, 874 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, 875 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 876 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, 877 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 878 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 879 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 880 { E1000_TDBAL(4), 0x40, 8, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 881 { E1000_TDBAH(4), 0x40, 8, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 882 { E1000_TDLEN(4), 0x40, 8, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 883 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 884 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, 885 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, 886 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 887 { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 888 { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, 889 { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 890 { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, 891 { E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 892 { 0, 0, 0, 0 } 893}; 894 895/* 82575 register test */ 896static struct igb_reg_test reg_test_82575[] = { 897 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 898 { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, 899 { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, 900 { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 901 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 902 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 903 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 904 /* Enable all four RX queues before testing. */ 905 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, 906 /* RDH is read-only for 82575, only test RDT. */ 907 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 908 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, 909 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, 910 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 911 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, 912 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 913 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 914 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 915 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 916 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB }, 917 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF }, 918 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 919 { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF }, 920 { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 921 { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF }, 922 { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 923 { 0, 0, 0, 0 } 924}; 925 926static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, 927 int reg, u32 mask, u32 write) 928{ 929 u32 pat, val; 930 u32 _test[] = 931 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 932 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { 933 writel((_test[pat] & write), (adapter->hw.hw_addr + reg)); 934 val = readl(adapter->hw.hw_addr + reg); 935 if (val != (_test[pat] & write & mask)) { 936 dev_err(&adapter->pdev->dev, "pattern test reg %04X " 937 "failed: got 0x%08X expected 0x%08X\n", 938 reg, val, (_test[pat] & write & mask)); 939 *data = reg; 940 return 1; 941 } 942 } 943 return 0; 944} 945 946static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, 947 int reg, u32 mask, u32 write) 948{ 949 u32 val; 950 writel((write & mask), (adapter->hw.hw_addr + reg)); 951 val = readl(adapter->hw.hw_addr + reg); 952 if ((write & mask) != (val & mask)) { 953 dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:" 954 " got 0x%08X expected 0x%08X\n", reg, 955 (val & mask), (write & mask)); 956 *data = reg; 957 return 1; 958 } 959 return 0; 960} 961 962#define REG_PATTERN_TEST(reg, mask, write) \ 963 do { \ 964 if (reg_pattern_test(adapter, data, reg, mask, write)) \ 965 return 1; \ 966 } while (0) 967 968#define REG_SET_AND_CHECK(reg, mask, write) \ 969 do { \ 970 if (reg_set_and_check(adapter, data, reg, mask, write)) \ 971 return 1; \ 972 } while (0) 973 974static int igb_reg_test(struct igb_adapter *adapter, u64 *data) 975{ 976 struct e1000_hw *hw = &adapter->hw; 977 struct igb_reg_test *test; 978 u32 value, before, after; 979 u32 i, toggle; 980 981 toggle = 0x7FFFF3FF; 982 983 switch (adapter->hw.mac.type) { 984 case e1000_82576: 985 test = reg_test_82576; 986 break; 987 default: 988 test = reg_test_82575; 989 break; 990 } 991 992 /* Because the status register is such a special case, 993 * we handle it separately from the rest of the register 994 * tests. Some bits are read-only, some toggle, and some 995 * are writable on newer MACs. 996 */ 997 before = rd32(E1000_STATUS); 998 value = (rd32(E1000_STATUS) & toggle); 999 wr32(E1000_STATUS, toggle); 1000 after = rd32(E1000_STATUS) & toggle; 1001 if (value != after) { 1002 dev_err(&adapter->pdev->dev, "failed STATUS register test " 1003 "got: 0x%08X expected: 0x%08X\n", after, value); 1004 *data = 1; 1005 return 1; 1006 } 1007 /* restore previous status */ 1008 wr32(E1000_STATUS, before); 1009 1010 /* Perform the remainder of the register test, looping through 1011 * the test table until we either fail or reach the null entry. 1012 */ 1013 while (test->reg) { 1014 for (i = 0; i < test->array_len; i++) { 1015 switch (test->test_type) { 1016 case PATTERN_TEST: 1017 REG_PATTERN_TEST(test->reg + (i * test->reg_offset), 1018 test->mask, 1019 test->write); 1020 break; 1021 case SET_READ_TEST: 1022 REG_SET_AND_CHECK(test->reg + (i * test->reg_offset), 1023 test->mask, 1024 test->write); 1025 break; 1026 case WRITE_NO_TEST: 1027 writel(test->write, 1028 (adapter->hw.hw_addr + test->reg) 1029 + (i * test->reg_offset)); 1030 break; 1031 case TABLE32_TEST: 1032 REG_PATTERN_TEST(test->reg + (i * 4), 1033 test->mask, 1034 test->write); 1035 break; 1036 case TABLE64_TEST_LO: 1037 REG_PATTERN_TEST(test->reg + (i * 8), 1038 test->mask, 1039 test->write); 1040 break; 1041 case TABLE64_TEST_HI: 1042 REG_PATTERN_TEST((test->reg + 4) + (i * 8), 1043 test->mask, 1044 test->write); 1045 break; 1046 } 1047 } 1048 test++; 1049 } 1050 1051 *data = 0; 1052 return 0; 1053} 1054 1055static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) 1056{ 1057 u16 temp; 1058 u16 checksum = 0; 1059 u16 i; 1060 1061 *data = 0; 1062 /* Read and add up the contents of the EEPROM */ 1063 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { 1064 if ((adapter->hw.nvm.ops.read_nvm(&adapter->hw, i, 1, &temp)) 1065 < 0) { 1066 *data = 1; 1067 break; 1068 } 1069 checksum += temp; 1070 } 1071 1072 /* If Checksum is not Correct return error else test passed */ 1073 if ((checksum != (u16) NVM_SUM) && !(*data)) 1074 *data = 2; 1075 1076 return *data; 1077} 1078 1079static irqreturn_t igb_test_intr(int irq, void *data) 1080{ 1081 struct net_device *netdev = (struct net_device *) data; 1082 struct igb_adapter *adapter = netdev_priv(netdev); 1083 struct e1000_hw *hw = &adapter->hw; 1084 1085 adapter->test_icr |= rd32(E1000_ICR); 1086 1087 return IRQ_HANDLED; 1088} 1089 1090static int igb_intr_test(struct igb_adapter *adapter, u64 *data) 1091{ 1092 struct e1000_hw *hw = &adapter->hw; 1093 struct net_device *netdev = adapter->netdev; 1094 u32 mask, i = 0, shared_int = true; 1095 u32 irq = adapter->pdev->irq; 1096 1097 *data = 0; 1098 1099 /* Hook up test interrupt handler just for this test */ 1100 if (adapter->msix_entries) { 1101 /* NOTE: we don't test MSI-X interrupts here, yet */ 1102 return 0; 1103 } else if (adapter->flags & IGB_FLAG_HAS_MSI) { 1104 shared_int = false; 1105 if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) { 1106 *data = 1; 1107 return -1; 1108 } 1109 } else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED, 1110 netdev->name, netdev)) { 1111 shared_int = false; 1112 } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED, 1113 netdev->name, netdev)) { 1114 *data = 1; 1115 return -1; 1116 } 1117 dev_info(&adapter->pdev->dev, "testing %s interrupt\n", 1118 (shared_int ? "shared" : "unshared")); 1119 1120 /* Disable all the interrupts */ 1121 wr32(E1000_IMC, 0xFFFFFFFF); 1122 msleep(10); 1123 1124 /* Test each interrupt */ 1125 for (; i < 10; i++) { 1126 /* Interrupt to test */ 1127 mask = 1 << i; 1128 1129 if (!shared_int) { 1130 /* Disable the interrupt to be reported in 1131 * the cause register and then force the same 1132 * interrupt and see if one gets posted. If 1133 * an interrupt was posted to the bus, the 1134 * test failed. 1135 */ 1136 adapter->test_icr = 0; 1137 wr32(E1000_IMC, ~mask & 0x00007FFF); 1138 wr32(E1000_ICS, ~mask & 0x00007FFF); 1139 msleep(10); 1140 1141 if (adapter->test_icr & mask) { 1142 *data = 3; 1143 break; 1144 } 1145 } 1146 1147 /* Enable the interrupt to be reported in 1148 * the cause register and then force the same 1149 * interrupt and see if one gets posted. If 1150 * an interrupt was not posted to the bus, the 1151 * test failed. 1152 */ 1153 adapter->test_icr = 0; 1154 wr32(E1000_IMS, mask); 1155 wr32(E1000_ICS, mask); 1156 msleep(10); 1157 1158 if (!(adapter->test_icr & mask)) { 1159 *data = 4; 1160 break; 1161 } 1162 1163 if (!shared_int) { 1164 /* Disable the other interrupts to be reported in 1165 * the cause register and then force the other 1166 * interrupts and see if any get posted. If 1167 * an interrupt was posted to the bus, the 1168 * test failed. 1169 */ 1170 adapter->test_icr = 0; 1171 wr32(E1000_IMC, ~mask & 0x00007FFF); 1172 wr32(E1000_ICS, ~mask & 0x00007FFF); 1173 msleep(10); 1174 1175 if (adapter->test_icr) { 1176 *data = 5; 1177 break; 1178 } 1179 } 1180 } 1181 1182 /* Disable all the interrupts */ 1183 wr32(E1000_IMC, 0xFFFFFFFF); 1184 msleep(10); 1185 1186 /* Unhook test interrupt handler */ 1187 free_irq(irq, netdev); 1188 1189 return *data; 1190} 1191 1192static void igb_free_desc_rings(struct igb_adapter *adapter) 1193{ 1194 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1195 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1196 struct pci_dev *pdev = adapter->pdev; 1197 int i; 1198 1199 if (tx_ring->desc && tx_ring->buffer_info) { 1200 for (i = 0; i < tx_ring->count; i++) { 1201 struct igb_buffer *buf = &(tx_ring->buffer_info[i]); 1202 if (buf->dma) 1203 pci_unmap_single(pdev, buf->dma, buf->length, 1204 PCI_DMA_TODEVICE); 1205 if (buf->skb) 1206 dev_kfree_skb(buf->skb); 1207 } 1208 } 1209 1210 if (rx_ring->desc && rx_ring->buffer_info) { 1211 for (i = 0; i < rx_ring->count; i++) { 1212 struct igb_buffer *buf = &(rx_ring->buffer_info[i]); 1213 if (buf->dma) 1214 pci_unmap_single(pdev, buf->dma, 1215 IGB_RXBUFFER_2048, 1216 PCI_DMA_FROMDEVICE); 1217 if (buf->skb) 1218 dev_kfree_skb(buf->skb); 1219 } 1220 } 1221 1222 if (tx_ring->desc) { 1223 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, 1224 tx_ring->dma); 1225 tx_ring->desc = NULL; 1226 } 1227 if (rx_ring->desc) { 1228 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, 1229 rx_ring->dma); 1230 rx_ring->desc = NULL; 1231 } 1232 1233 kfree(tx_ring->buffer_info); 1234 tx_ring->buffer_info = NULL; 1235 kfree(rx_ring->buffer_info); 1236 rx_ring->buffer_info = NULL; 1237 1238 return; 1239} 1240 1241static int igb_setup_desc_rings(struct igb_adapter *adapter) 1242{ 1243 struct e1000_hw *hw = &adapter->hw; 1244 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1245 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1246 struct pci_dev *pdev = adapter->pdev; 1247 u32 rctl; 1248 int i, ret_val; 1249 1250 /* Setup Tx descriptor ring and Tx buffers */ 1251 1252 if (!tx_ring->count) 1253 tx_ring->count = IGB_DEFAULT_TXD; 1254 1255 tx_ring->buffer_info = kcalloc(tx_ring->count, 1256 sizeof(struct igb_buffer), 1257 GFP_KERNEL); 1258 if (!tx_ring->buffer_info) { 1259 ret_val = 1; 1260 goto err_nomem; 1261 } 1262 1263 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); 1264 tx_ring->size = ALIGN(tx_ring->size, 4096); 1265 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 1266 &tx_ring->dma); 1267 if (!tx_ring->desc) { 1268 ret_val = 2; 1269 goto err_nomem; 1270 } 1271 tx_ring->next_to_use = tx_ring->next_to_clean = 0; 1272 1273 wr32(E1000_TDBAL(0), 1274 ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); 1275 wr32(E1000_TDBAH(0), ((u64) tx_ring->dma >> 32)); 1276 wr32(E1000_TDLEN(0), 1277 tx_ring->count * sizeof(struct e1000_tx_desc)); 1278 wr32(E1000_TDH(0), 0); 1279 wr32(E1000_TDT(0), 0); 1280 wr32(E1000_TCTL, 1281 E1000_TCTL_PSP | E1000_TCTL_EN | 1282 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | 1283 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); 1284 1285 for (i = 0; i < tx_ring->count; i++) { 1286 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); 1287 struct sk_buff *skb; 1288 unsigned int size = 1024; 1289 1290 skb = alloc_skb(size, GFP_KERNEL); 1291 if (!skb) { 1292 ret_val = 3; 1293 goto err_nomem; 1294 } 1295 skb_put(skb, size); 1296 tx_ring->buffer_info[i].skb = skb; 1297 tx_ring->buffer_info[i].length = skb->len; 1298 tx_ring->buffer_info[i].dma = 1299 pci_map_single(pdev, skb->data, skb->len, 1300 PCI_DMA_TODEVICE); 1301 tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma); 1302 tx_desc->lower.data = cpu_to_le32(skb->len); 1303 tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | 1304 E1000_TXD_CMD_IFCS | 1305 E1000_TXD_CMD_RS); 1306 tx_desc->upper.data = 0; 1307 } 1308 1309 /* Setup Rx descriptor ring and Rx buffers */ 1310 1311 if (!rx_ring->count) 1312 rx_ring->count = IGB_DEFAULT_RXD; 1313 1314 rx_ring->buffer_info = kcalloc(rx_ring->count, 1315 sizeof(struct igb_buffer), 1316 GFP_KERNEL); 1317 if (!rx_ring->buffer_info) { 1318 ret_val = 4; 1319 goto err_nomem; 1320 } 1321 1322 rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc); 1323 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 1324 &rx_ring->dma); 1325 if (!rx_ring->desc) { 1326 ret_val = 5; 1327 goto err_nomem; 1328 } 1329 rx_ring->next_to_use = rx_ring->next_to_clean = 0; 1330 1331 rctl = rd32(E1000_RCTL); 1332 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); 1333 wr32(E1000_RDBAL(0), 1334 ((u64) rx_ring->dma & 0xFFFFFFFF)); 1335 wr32(E1000_RDBAH(0), 1336 ((u64) rx_ring->dma >> 32)); 1337 wr32(E1000_RDLEN(0), rx_ring->size); 1338 wr32(E1000_RDH(0), 0); 1339 wr32(E1000_RDT(0), 0); 1340 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); 1341 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | 1342 E1000_RCTL_RDMTS_HALF | 1343 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 1344 wr32(E1000_RCTL, rctl); 1345 wr32(E1000_SRRCTL(0), 0); 1346 1347 for (i = 0; i < rx_ring->count; i++) { 1348 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); 1349 struct sk_buff *skb; 1350 1351 skb = alloc_skb(IGB_RXBUFFER_2048 + NET_IP_ALIGN, 1352 GFP_KERNEL); 1353 if (!skb) { 1354 ret_val = 6; 1355 goto err_nomem; 1356 } 1357 skb_reserve(skb, NET_IP_ALIGN); 1358 rx_ring->buffer_info[i].skb = skb; 1359 rx_ring->buffer_info[i].dma = 1360 pci_map_single(pdev, skb->data, IGB_RXBUFFER_2048, 1361 PCI_DMA_FROMDEVICE); 1362 rx_desc->buffer_addr = cpu_to_le64(rx_ring->buffer_info[i].dma); 1363 memset(skb->data, 0x00, skb->len); 1364 } 1365 1366 return 0; 1367 1368err_nomem: 1369 igb_free_desc_rings(adapter); 1370 return ret_val; 1371} 1372 1373static void igb_phy_disable_receiver(struct igb_adapter *adapter) 1374{ 1375 struct e1000_hw *hw = &adapter->hw; 1376 1377 /* Write out to PHY registers 29 and 30 to disable the Receiver. */ 1378 igb_write_phy_reg(hw, 29, 0x001F); 1379 igb_write_phy_reg(hw, 30, 0x8FFC); 1380 igb_write_phy_reg(hw, 29, 0x001A); 1381 igb_write_phy_reg(hw, 30, 0x8FF0); 1382} 1383 1384static int igb_integrated_phy_loopback(struct igb_adapter *adapter) 1385{ 1386 struct e1000_hw *hw = &adapter->hw; 1387 u32 ctrl_reg = 0; 1388 u32 stat_reg = 0; 1389 1390 hw->mac.autoneg = false; 1391 1392 if (hw->phy.type == e1000_phy_m88) { 1393 /* Auto-MDI/MDIX Off */ 1394 igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); 1395 /* reset to update Auto-MDI/MDIX */ 1396 igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); 1397 /* autoneg off */ 1398 igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); 1399 } 1400 1401 ctrl_reg = rd32(E1000_CTRL); 1402 1403 /* force 1000, set loopback */ 1404 igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); 1405 1406 /* Now set up the MAC to the same speed/duplex as the PHY. */ 1407 ctrl_reg = rd32(E1000_CTRL); 1408 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 1409 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 1410 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 1411 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ 1412 E1000_CTRL_FD); /* Force Duplex to FULL */ 1413 1414 if (hw->phy.media_type == e1000_media_type_copper && 1415 hw->phy.type == e1000_phy_m88) 1416 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ 1417 else { 1418 /* Set the ILOS bit on the fiber Nic if half duplex link is 1419 * detected. */ 1420 stat_reg = rd32(E1000_STATUS); 1421 if ((stat_reg & E1000_STATUS_FD) == 0) 1422 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); 1423 } 1424 1425 wr32(E1000_CTRL, ctrl_reg); 1426 1427 /* Disable the receiver on the PHY so when a cable is plugged in, the 1428 * PHY does not begin to autoneg when a cable is reconnected to the NIC. 1429 */ 1430 if (hw->phy.type == e1000_phy_m88) 1431 igb_phy_disable_receiver(adapter); 1432 1433 udelay(500); 1434 1435 return 0; 1436} 1437 1438static int igb_set_phy_loopback(struct igb_adapter *adapter) 1439{ 1440 return igb_integrated_phy_loopback(adapter); 1441} 1442 1443static int igb_setup_loopback_test(struct igb_adapter *adapter) 1444{ 1445 struct e1000_hw *hw = &adapter->hw; 1446 u32 reg; 1447 1448 if (hw->phy.media_type == e1000_media_type_fiber || 1449 hw->phy.media_type == e1000_media_type_internal_serdes) { 1450 reg = rd32(E1000_RCTL); 1451 reg |= E1000_RCTL_LBM_TCVR; 1452 wr32(E1000_RCTL, reg); 1453 1454 wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK); 1455 1456 reg = rd32(E1000_CTRL); 1457 reg &= ~(E1000_CTRL_RFCE | 1458 E1000_CTRL_TFCE | 1459 E1000_CTRL_LRST); 1460 reg |= E1000_CTRL_SLU | 1461 E1000_CTRL_FD; 1462 wr32(E1000_CTRL, reg); 1463 1464 /* Unset switch control to serdes energy detect */ 1465 reg = rd32(E1000_CONNSW); 1466 reg &= ~E1000_CONNSW_ENRGSRC; 1467 wr32(E1000_CONNSW, reg); 1468 1469 /* Set PCS register for forced speed */ 1470 reg = rd32(E1000_PCS_LCTL); 1471 reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/ 1472 reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ 1473 E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ 1474 E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ 1475 E1000_PCS_LCTL_FSD | /* Force Speed */ 1476 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ 1477 wr32(E1000_PCS_LCTL, reg); 1478 1479 return 0; 1480 } else if (hw->phy.media_type == e1000_media_type_copper) { 1481 return igb_set_phy_loopback(adapter); 1482 } 1483 1484 return 7; 1485} 1486 1487static void igb_loopback_cleanup(struct igb_adapter *adapter) 1488{ 1489 struct e1000_hw *hw = &adapter->hw; 1490 u32 rctl; 1491 u16 phy_reg; 1492 1493 rctl = rd32(E1000_RCTL); 1494 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); 1495 wr32(E1000_RCTL, rctl); 1496 1497 hw->mac.autoneg = true; 1498 igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg); 1499 if (phy_reg & MII_CR_LOOPBACK) { 1500 phy_reg &= ~MII_CR_LOOPBACK; 1501 igb_write_phy_reg(hw, PHY_CONTROL, phy_reg); 1502 igb_phy_sw_reset(hw); 1503 } 1504} 1505 1506static void igb_create_lbtest_frame(struct sk_buff *skb, 1507 unsigned int frame_size) 1508{ 1509 memset(skb->data, 0xFF, frame_size); 1510 frame_size &= ~1; 1511 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); 1512 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); 1513 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); 1514} 1515 1516static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) 1517{ 1518 frame_size &= ~1; 1519 if (*(skb->data + 3) == 0xFF) 1520 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && 1521 (*(skb->data + frame_size / 2 + 12) == 0xAF)) 1522 return 0; 1523 return 13; 1524} 1525 1526static int igb_run_loopback_test(struct igb_adapter *adapter) 1527{ 1528 struct e1000_hw *hw = &adapter->hw; 1529 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1530 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1531 struct pci_dev *pdev = adapter->pdev; 1532 int i, j, k, l, lc, good_cnt; 1533 int ret_val = 0; 1534 unsigned long time; 1535 1536 wr32(E1000_RDT(0), rx_ring->count - 1); 1537 1538 /* Calculate the loop count based on the largest descriptor ring 1539 * The idea is to wrap the largest ring a number of times using 64 1540 * send/receive pairs during each loop 1541 */ 1542 1543 if (rx_ring->count <= tx_ring->count) 1544 lc = ((tx_ring->count / 64) * 2) + 1; 1545 else 1546 lc = ((rx_ring->count / 64) * 2) + 1; 1547 1548 k = l = 0; 1549 for (j = 0; j <= lc; j++) { /* loop count loop */ 1550 for (i = 0; i < 64; i++) { /* send the packets */ 1551 igb_create_lbtest_frame(tx_ring->buffer_info[k].skb, 1552 1024); 1553 pci_dma_sync_single_for_device(pdev, 1554 tx_ring->buffer_info[k].dma, 1555 tx_ring->buffer_info[k].length, 1556 PCI_DMA_TODEVICE); 1557 k++; 1558 if (k == tx_ring->count) 1559 k = 0; 1560 } 1561 wr32(E1000_TDT(0), k); 1562 msleep(200); 1563 time = jiffies; /* set the start time for the receive */ 1564 good_cnt = 0; 1565 do { /* receive the sent packets */ 1566 pci_dma_sync_single_for_cpu(pdev, 1567 rx_ring->buffer_info[l].dma, 1568 IGB_RXBUFFER_2048, 1569 PCI_DMA_FROMDEVICE); 1570 1571 ret_val = igb_check_lbtest_frame( 1572 rx_ring->buffer_info[l].skb, 1024); 1573 if (!ret_val) 1574 good_cnt++; 1575 l++; 1576 if (l == rx_ring->count) 1577 l = 0; 1578 /* time + 20 msecs (200 msecs on 2.4) is more than 1579 * enough time to complete the receives, if it's 1580 * exceeded, break and error off 1581 */ 1582 } while (good_cnt < 64 && jiffies < (time + 20)); 1583 if (good_cnt != 64) { 1584 ret_val = 13; /* ret_val is the same as mis-compare */ 1585 break; 1586 } 1587 if (jiffies >= (time + 20)) { 1588 ret_val = 14; /* error code for time out error */ 1589 break; 1590 } 1591 } /* end loop count loop */ 1592 return ret_val; 1593} 1594 1595static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) 1596{ 1597 /* PHY loopback cannot be performed if SoL/IDER 1598 * sessions are active */ 1599 if (igb_check_reset_block(&adapter->hw)) { 1600 dev_err(&adapter->pdev->dev, 1601 "Cannot do PHY loopback test " 1602 "when SoL/IDER is active.\n"); 1603 *data = 0; 1604 goto out; 1605 } 1606 *data = igb_setup_desc_rings(adapter); 1607 if (*data) 1608 goto out; 1609 *data = igb_setup_loopback_test(adapter); 1610 if (*data) 1611 goto err_loopback; 1612 *data = igb_run_loopback_test(adapter); 1613 igb_loopback_cleanup(adapter); 1614 1615err_loopback: 1616 igb_free_desc_rings(adapter); 1617out: 1618 return *data; 1619} 1620 1621static int igb_link_test(struct igb_adapter *adapter, u64 *data) 1622{ 1623 struct e1000_hw *hw = &adapter->hw; 1624 *data = 0; 1625 if (hw->phy.media_type == e1000_media_type_internal_serdes) { 1626 int i = 0; 1627 hw->mac.serdes_has_link = false; 1628 1629 /* On some blade server designs, link establishment 1630 * could take as long as 2-3 minutes */ 1631 do { 1632 hw->mac.ops.check_for_link(&adapter->hw); 1633 if (hw->mac.serdes_has_link) 1634 return *data; 1635 msleep(20); 1636 } while (i++ < 3750); 1637 1638 *data = 1; 1639 } else { 1640 hw->mac.ops.check_for_link(&adapter->hw); 1641 if (hw->mac.autoneg) 1642 msleep(4000); 1643 1644 if (!(rd32(E1000_STATUS) & 1645 E1000_STATUS_LU)) 1646 *data = 1; 1647 } 1648 return *data; 1649} 1650 1651static void igb_diag_test(struct net_device *netdev, 1652 struct ethtool_test *eth_test, u64 *data) 1653{ 1654 struct igb_adapter *adapter = netdev_priv(netdev); 1655 u16 autoneg_advertised; 1656 u8 forced_speed_duplex, autoneg; 1657 bool if_running = netif_running(netdev); 1658 1659 set_bit(__IGB_TESTING, &adapter->state); 1660 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1661 /* Offline tests */ 1662 1663 /* save speed, duplex, autoneg settings */ 1664 autoneg_advertised = adapter->hw.phy.autoneg_advertised; 1665 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; 1666 autoneg = adapter->hw.mac.autoneg; 1667 1668 dev_info(&adapter->pdev->dev, "offline testing starting\n"); 1669 1670 /* Link test performed before hardware reset so autoneg doesn't 1671 * interfere with test result */ 1672 if (igb_link_test(adapter, &data[4])) 1673 eth_test->flags |= ETH_TEST_FL_FAILED; 1674 1675 if (if_running) 1676 /* indicate we're in test mode */ 1677 dev_close(netdev); 1678 else 1679 igb_reset(adapter); 1680 1681 if (igb_reg_test(adapter, &data[0])) 1682 eth_test->flags |= ETH_TEST_FL_FAILED; 1683 1684 igb_reset(adapter); 1685 if (igb_eeprom_test(adapter, &data[1])) 1686 eth_test->flags |= ETH_TEST_FL_FAILED; 1687 1688 igb_reset(adapter); 1689 if (igb_intr_test(adapter, &data[2])) 1690 eth_test->flags |= ETH_TEST_FL_FAILED; 1691 1692 igb_reset(adapter); 1693 if (igb_loopback_test(adapter, &data[3])) 1694 eth_test->flags |= ETH_TEST_FL_FAILED; 1695 1696 /* restore speed, duplex, autoneg settings */ 1697 adapter->hw.phy.autoneg_advertised = autoneg_advertised; 1698 adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; 1699 adapter->hw.mac.autoneg = autoneg; 1700 1701 /* force this routine to wait until autoneg complete/timeout */ 1702 adapter->hw.phy.autoneg_wait_to_complete = true; 1703 igb_reset(adapter); 1704 adapter->hw.phy.autoneg_wait_to_complete = false; 1705 1706 clear_bit(__IGB_TESTING, &adapter->state); 1707 if (if_running) 1708 dev_open(netdev); 1709 } else { 1710 dev_info(&adapter->pdev->dev, "online testing starting\n"); 1711 /* Online tests */ 1712 if (igb_link_test(adapter, &data[4])) 1713 eth_test->flags |= ETH_TEST_FL_FAILED; 1714 1715 /* Online tests aren't run; pass by default */ 1716 data[0] = 0; 1717 data[1] = 0; 1718 data[2] = 0; 1719 data[3] = 0; 1720 1721 clear_bit(__IGB_TESTING, &adapter->state); 1722 } 1723 msleep_interruptible(4 * 1000); 1724} 1725 1726static int igb_wol_exclusion(struct igb_adapter *adapter, 1727 struct ethtool_wolinfo *wol) 1728{ 1729 struct e1000_hw *hw = &adapter->hw; 1730 int retval = 1; /* fail by default */ 1731 1732 switch (hw->device_id) { 1733 case E1000_DEV_ID_82575GB_QUAD_COPPER: 1734 /* WoL not supported */ 1735 wol->supported = 0; 1736 break; 1737 case E1000_DEV_ID_82575EB_FIBER_SERDES: 1738 case E1000_DEV_ID_82576_FIBER: 1739 case E1000_DEV_ID_82576_SERDES: 1740 /* Wake events not supported on port B */ 1741 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) { 1742 wol->supported = 0; 1743 break; 1744 } 1745 /* return success for non excluded adapter ports */ 1746 retval = 0; 1747 break; 1748 default: 1749 /* dual port cards only support WoL on port A from now on 1750 * unless it was enabled in the eeprom for port B 1751 * so exclude FUNC_1 ports from having WoL enabled */ 1752 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1 && 1753 !adapter->eeprom_wol) { 1754 wol->supported = 0; 1755 break; 1756 } 1757 1758 retval = 0; 1759 } 1760 1761 return retval; 1762} 1763 1764static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 1765{ 1766 struct igb_adapter *adapter = netdev_priv(netdev); 1767 1768 wol->supported = WAKE_UCAST | WAKE_MCAST | 1769 WAKE_BCAST | WAKE_MAGIC; 1770 wol->wolopts = 0; 1771 1772 /* this function will set ->supported = 0 and return 1 if wol is not 1773 * supported by this hardware */ 1774 if (igb_wol_exclusion(adapter, wol) || 1775 !device_can_wakeup(&adapter->pdev->dev)) 1776 return; 1777 1778 /* apply any specific unsupported masks here */ 1779 switch (adapter->hw.device_id) { 1780 default: 1781 break; 1782 } 1783 1784 if (adapter->wol & E1000_WUFC_EX) 1785 wol->wolopts |= WAKE_UCAST; 1786 if (adapter->wol & E1000_WUFC_MC) 1787 wol->wolopts |= WAKE_MCAST; 1788 if (adapter->wol & E1000_WUFC_BC) 1789 wol->wolopts |= WAKE_BCAST; 1790 if (adapter->wol & E1000_WUFC_MAG) 1791 wol->wolopts |= WAKE_MAGIC; 1792 1793 return; 1794} 1795 1796static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 1797{ 1798 struct igb_adapter *adapter = netdev_priv(netdev); 1799 struct e1000_hw *hw = &adapter->hw; 1800 1801 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 1802 return -EOPNOTSUPP; 1803 1804 if (igb_wol_exclusion(adapter, wol) || 1805 !device_can_wakeup(&adapter->pdev->dev)) 1806 return wol->wolopts ? -EOPNOTSUPP : 0; 1807 1808 switch (hw->device_id) { 1809 default: 1810 break; 1811 } 1812 1813 /* these settings will always override what we currently have */ 1814 adapter->wol = 0; 1815 1816 if (wol->wolopts & WAKE_UCAST) 1817 adapter->wol |= E1000_WUFC_EX; 1818 if (wol->wolopts & WAKE_MCAST) 1819 adapter->wol |= E1000_WUFC_MC; 1820 if (wol->wolopts & WAKE_BCAST) 1821 adapter->wol |= E1000_WUFC_BC; 1822 if (wol->wolopts & WAKE_MAGIC) 1823 adapter->wol |= E1000_WUFC_MAG; 1824 1825 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1826 1827 return 0; 1828} 1829 1830/* toggle LED 4 times per second = 2 "blinks" per second */ 1831#define IGB_ID_INTERVAL (HZ/4) 1832 1833/* bit defines for adapter->led_status */ 1834#define IGB_LED_ON 0 1835 1836static int igb_phys_id(struct net_device *netdev, u32 data) 1837{ 1838 struct igb_adapter *adapter = netdev_priv(netdev); 1839 struct e1000_hw *hw = &adapter->hw; 1840 1841 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) 1842 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); 1843 1844 igb_blink_led(hw); 1845 msleep_interruptible(data * 1000); 1846 1847 igb_led_off(hw); 1848 clear_bit(IGB_LED_ON, &adapter->led_status); 1849 igb_cleanup_led(hw); 1850 1851 return 0; 1852} 1853 1854static int igb_set_coalesce(struct net_device *netdev, 1855 struct ethtool_coalesce *ec) 1856{ 1857 struct igb_adapter *adapter = netdev_priv(netdev); 1858 struct e1000_hw *hw = &adapter->hw; 1859 int i; 1860 1861 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || 1862 ((ec->rx_coalesce_usecs > 3) && 1863 (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) || 1864 (ec->rx_coalesce_usecs == 2)) 1865 return -EINVAL; 1866 1867 /* convert to rate of irq's per second */ 1868 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) { 1869 adapter->itr_setting = ec->rx_coalesce_usecs; 1870 adapter->itr = IGB_START_ITR; 1871 } else { 1872 adapter->itr_setting = ec->rx_coalesce_usecs << 2; 1873 adapter->itr = adapter->itr_setting; 1874 } 1875 1876 for (i = 0; i < adapter->num_rx_queues; i++) 1877 wr32(adapter->rx_ring[i].itr_register, adapter->itr); 1878 1879 return 0; 1880} 1881 1882static int igb_get_coalesce(struct net_device *netdev, 1883 struct ethtool_coalesce *ec) 1884{ 1885 struct igb_adapter *adapter = netdev_priv(netdev); 1886 1887 if (adapter->itr_setting <= 3) 1888 ec->rx_coalesce_usecs = adapter->itr_setting; 1889 else 1890 ec->rx_coalesce_usecs = adapter->itr_setting >> 2; 1891 1892 return 0; 1893} 1894 1895 1896static int igb_nway_reset(struct net_device *netdev) 1897{ 1898 struct igb_adapter *adapter = netdev_priv(netdev); 1899 if (netif_running(netdev)) 1900 igb_reinit_locked(adapter); 1901 return 0; 1902} 1903 1904static int igb_get_sset_count(struct net_device *netdev, int sset) 1905{ 1906 switch (sset) { 1907 case ETH_SS_STATS: 1908 return IGB_STATS_LEN; 1909 case ETH_SS_TEST: 1910 return IGB_TEST_LEN; 1911 default: 1912 return -ENOTSUPP; 1913 } 1914} 1915 1916static void igb_get_ethtool_stats(struct net_device *netdev, 1917 struct ethtool_stats *stats, u64 *data) 1918{ 1919 struct igb_adapter *adapter = netdev_priv(netdev); 1920 u64 *queue_stat; 1921 int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64); 1922 int j; 1923 int i; 1924#ifdef CONFIG_IGB_LRO 1925 int aggregated = 0, flushed = 0, no_desc = 0; 1926 1927 for (i = 0; i < adapter->num_rx_queues; i++) { 1928 aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated; 1929 flushed += adapter->rx_ring[i].lro_mgr.stats.flushed; 1930 no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc; 1931 } 1932 adapter->lro_aggregated = aggregated; 1933 adapter->lro_flushed = flushed; 1934 adapter->lro_no_desc = no_desc; 1935#endif 1936 1937 igb_update_stats(adapter); 1938 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 1939 char *p = (char *)adapter+igb_gstrings_stats[i].stat_offset; 1940 data[i] = (igb_gstrings_stats[i].sizeof_stat == 1941 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1942 } 1943 for (j = 0; j < adapter->num_tx_queues; j++) { 1944 int k; 1945 queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats; 1946 for (k = 0; k < stat_count; k++) 1947 data[i + k] = queue_stat[k]; 1948 i += k; 1949 } 1950 for (j = 0; j < adapter->num_rx_queues; j++) { 1951 int k; 1952 queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats; 1953 for (k = 0; k < stat_count; k++) 1954 data[i + k] = queue_stat[k]; 1955 i += k; 1956 } 1957} 1958 1959static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 1960{ 1961 struct igb_adapter *adapter = netdev_priv(netdev); 1962 u8 *p = data; 1963 int i; 1964 1965 switch (stringset) { 1966 case ETH_SS_TEST: 1967 memcpy(data, *igb_gstrings_test, 1968 IGB_TEST_LEN*ETH_GSTRING_LEN); 1969 break; 1970 case ETH_SS_STATS: 1971 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 1972 memcpy(p, igb_gstrings_stats[i].stat_string, 1973 ETH_GSTRING_LEN); 1974 p += ETH_GSTRING_LEN; 1975 } 1976 for (i = 0; i < adapter->num_tx_queues; i++) { 1977 sprintf(p, "tx_queue_%u_packets", i); 1978 p += ETH_GSTRING_LEN; 1979 sprintf(p, "tx_queue_%u_bytes", i); 1980 p += ETH_GSTRING_LEN; 1981 } 1982 for (i = 0; i < adapter->num_rx_queues; i++) { 1983 sprintf(p, "rx_queue_%u_packets", i); 1984 p += ETH_GSTRING_LEN; 1985 sprintf(p, "rx_queue_%u_bytes", i); 1986 p += ETH_GSTRING_LEN; 1987 } 1988/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ 1989 break; 1990 } 1991} 1992 1993static struct ethtool_ops igb_ethtool_ops = { 1994 .get_settings = igb_get_settings, 1995 .set_settings = igb_set_settings, 1996 .get_drvinfo = igb_get_drvinfo, 1997 .get_regs_len = igb_get_regs_len, 1998 .get_regs = igb_get_regs, 1999 .get_wol = igb_get_wol, 2000 .set_wol = igb_set_wol, 2001 .get_msglevel = igb_get_msglevel, 2002 .set_msglevel = igb_set_msglevel, 2003 .nway_reset = igb_nway_reset, 2004 .get_link = ethtool_op_get_link, 2005 .get_eeprom_len = igb_get_eeprom_len, 2006 .get_eeprom = igb_get_eeprom, 2007 .set_eeprom = igb_set_eeprom, 2008 .get_ringparam = igb_get_ringparam, 2009 .set_ringparam = igb_set_ringparam, 2010 .get_pauseparam = igb_get_pauseparam, 2011 .set_pauseparam = igb_set_pauseparam, 2012 .get_rx_csum = igb_get_rx_csum, 2013 .set_rx_csum = igb_set_rx_csum, 2014 .get_tx_csum = igb_get_tx_csum, 2015 .set_tx_csum = igb_set_tx_csum, 2016 .get_sg = ethtool_op_get_sg, 2017 .set_sg = ethtool_op_set_sg, 2018 .get_tso = ethtool_op_get_tso, 2019 .set_tso = igb_set_tso, 2020 .self_test = igb_diag_test, 2021 .get_strings = igb_get_strings, 2022 .phys_id = igb_phys_id, 2023 .get_sset_count = igb_get_sset_count, 2024 .get_ethtool_stats = igb_get_ethtool_stats, 2025 .get_coalesce = igb_get_coalesce, 2026 .set_coalesce = igb_set_coalesce, 2027}; 2028 2029void igb_set_ethtool_ops(struct net_device *netdev) 2030{ 2031 SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops); 2032}