Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.31-rc9 2067 lines 61 kB view raw
1/******************************************************************************* 2 3 Intel(R) Gigabit Ethernet Linux driver 4 Copyright(c) 2007-2009 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26*******************************************************************************/ 27 28/* ethtool support for igb */ 29 30#include <linux/vmalloc.h> 31#include <linux/netdevice.h> 32#include <linux/pci.h> 33#include <linux/delay.h> 34#include <linux/interrupt.h> 35#include <linux/if_ether.h> 36#include <linux/ethtool.h> 37 38#include "igb.h" 39 40struct igb_stats { 41 char stat_string[ETH_GSTRING_LEN]; 42 int sizeof_stat; 43 int stat_offset; 44}; 45 46#define IGB_STAT(m) FIELD_SIZEOF(struct igb_adapter, m), \ 47 offsetof(struct igb_adapter, m) 48static const struct igb_stats igb_gstrings_stats[] = { 49 { "rx_packets", IGB_STAT(stats.gprc) }, 50 { "tx_packets", IGB_STAT(stats.gptc) }, 51 { "rx_bytes", IGB_STAT(stats.gorc) }, 52 { "tx_bytes", IGB_STAT(stats.gotc) }, 53 { "rx_broadcast", IGB_STAT(stats.bprc) }, 54 { "tx_broadcast", IGB_STAT(stats.bptc) }, 55 { "rx_multicast", IGB_STAT(stats.mprc) }, 56 { "tx_multicast", IGB_STAT(stats.mptc) }, 57 { "rx_errors", IGB_STAT(net_stats.rx_errors) }, 58 { "tx_errors", IGB_STAT(net_stats.tx_errors) }, 59 { "tx_dropped", IGB_STAT(net_stats.tx_dropped) }, 60 { "multicast", IGB_STAT(stats.mprc) }, 61 { "collisions", IGB_STAT(stats.colc) }, 62 { "rx_length_errors", IGB_STAT(net_stats.rx_length_errors) }, 63 { "rx_over_errors", IGB_STAT(net_stats.rx_over_errors) }, 64 { "rx_crc_errors", IGB_STAT(stats.crcerrs) }, 65 { "rx_frame_errors", IGB_STAT(net_stats.rx_frame_errors) }, 66 { "rx_no_buffer_count", IGB_STAT(stats.rnbc) }, 67 { "rx_queue_drop_packet_count", IGB_STAT(net_stats.rx_fifo_errors) }, 68 { "rx_missed_errors", IGB_STAT(stats.mpc) }, 69 { "tx_aborted_errors", IGB_STAT(stats.ecol) }, 70 { "tx_carrier_errors", IGB_STAT(stats.tncrs) }, 71 { "tx_fifo_errors", IGB_STAT(net_stats.tx_fifo_errors) }, 72 { "tx_heartbeat_errors", IGB_STAT(net_stats.tx_heartbeat_errors) }, 73 { "tx_window_errors", IGB_STAT(stats.latecol) }, 74 { "tx_abort_late_coll", IGB_STAT(stats.latecol) }, 75 { "tx_deferred_ok", IGB_STAT(stats.dc) }, 76 { "tx_single_coll_ok", IGB_STAT(stats.scc) }, 77 { "tx_multi_coll_ok", IGB_STAT(stats.mcc) }, 78 { "tx_timeout_count", IGB_STAT(tx_timeout_count) }, 79 { "tx_restart_queue", IGB_STAT(restart_queue) }, 80 { "rx_long_length_errors", IGB_STAT(stats.roc) }, 81 { "rx_short_length_errors", IGB_STAT(stats.ruc) }, 82 { "rx_align_errors", IGB_STAT(stats.algnerrc) }, 83 { "tx_tcp_seg_good", IGB_STAT(stats.tsctc) }, 84 { "tx_tcp_seg_failed", IGB_STAT(stats.tsctfc) }, 85 { "rx_flow_control_xon", IGB_STAT(stats.xonrxc) }, 86 { "rx_flow_control_xoff", IGB_STAT(stats.xoffrxc) }, 87 { "tx_flow_control_xon", IGB_STAT(stats.xontxc) }, 88 { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) }, 89 { "rx_long_byte_count", IGB_STAT(stats.gorc) }, 90 { "rx_csum_offload_good", IGB_STAT(hw_csum_good) }, 91 { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) }, 92 { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) }, 93 { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) }, 94 { "tx_smbus", IGB_STAT(stats.mgptc) }, 95 { "rx_smbus", IGB_STAT(stats.mgprc) }, 96 { "dropped_smbus", IGB_STAT(stats.mgpdc) }, 97}; 98 99#define IGB_QUEUE_STATS_LEN \ 100 (((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues)* \ 101 (sizeof(struct igb_rx_queue_stats) / sizeof(u64))) + \ 102 ((((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues) * \ 103 (sizeof(struct igb_tx_queue_stats) / sizeof(u64)))) 104#define IGB_GLOBAL_STATS_LEN \ 105 sizeof(igb_gstrings_stats) / sizeof(struct igb_stats) 106#define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN) 107static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { 108 "Register test (offline)", "Eeprom test (offline)", 109 "Interrupt test (offline)", "Loopback test (offline)", 110 "Link test (on/offline)" 111}; 112#define IGB_TEST_LEN sizeof(igb_gstrings_test) / ETH_GSTRING_LEN 113 114static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 115{ 116 struct igb_adapter *adapter = netdev_priv(netdev); 117 struct e1000_hw *hw = &adapter->hw; 118 119 if (hw->phy.media_type == e1000_media_type_copper) { 120 121 ecmd->supported = (SUPPORTED_10baseT_Half | 122 SUPPORTED_10baseT_Full | 123 SUPPORTED_100baseT_Half | 124 SUPPORTED_100baseT_Full | 125 SUPPORTED_1000baseT_Full| 126 SUPPORTED_Autoneg | 127 SUPPORTED_TP); 128 ecmd->advertising = ADVERTISED_TP; 129 130 if (hw->mac.autoneg == 1) { 131 ecmd->advertising |= ADVERTISED_Autoneg; 132 /* the e1000 autoneg seems to match ethtool nicely */ 133 ecmd->advertising |= hw->phy.autoneg_advertised; 134 } 135 136 ecmd->port = PORT_TP; 137 ecmd->phy_address = hw->phy.addr; 138 } else { 139 ecmd->supported = (SUPPORTED_1000baseT_Full | 140 SUPPORTED_FIBRE | 141 SUPPORTED_Autoneg); 142 143 ecmd->advertising = (ADVERTISED_1000baseT_Full | 144 ADVERTISED_FIBRE | 145 ADVERTISED_Autoneg); 146 147 ecmd->port = PORT_FIBRE; 148 } 149 150 ecmd->transceiver = XCVR_INTERNAL; 151 152 if (rd32(E1000_STATUS) & E1000_STATUS_LU) { 153 154 adapter->hw.mac.ops.get_speed_and_duplex(hw, 155 &adapter->link_speed, 156 &adapter->link_duplex); 157 ecmd->speed = adapter->link_speed; 158 159 /* unfortunately FULL_DUPLEX != DUPLEX_FULL 160 * and HALF_DUPLEX != DUPLEX_HALF */ 161 162 if (adapter->link_duplex == FULL_DUPLEX) 163 ecmd->duplex = DUPLEX_FULL; 164 else 165 ecmd->duplex = DUPLEX_HALF; 166 } else { 167 ecmd->speed = -1; 168 ecmd->duplex = -1; 169 } 170 171 ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) || 172 hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; 173 return 0; 174} 175 176static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 177{ 178 struct igb_adapter *adapter = netdev_priv(netdev); 179 struct e1000_hw *hw = &adapter->hw; 180 181 /* When SoL/IDER sessions are active, autoneg/speed/duplex 182 * cannot be changed */ 183 if (igb_check_reset_block(hw)) { 184 dev_err(&adapter->pdev->dev, "Cannot change link " 185 "characteristics when SoL/IDER is active.\n"); 186 return -EINVAL; 187 } 188 189 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 190 msleep(1); 191 192 if (ecmd->autoneg == AUTONEG_ENABLE) { 193 hw->mac.autoneg = 1; 194 if (hw->phy.media_type == e1000_media_type_fiber) 195 hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | 196 ADVERTISED_FIBRE | 197 ADVERTISED_Autoneg; 198 else 199 hw->phy.autoneg_advertised = ecmd->advertising | 200 ADVERTISED_TP | 201 ADVERTISED_Autoneg; 202 ecmd->advertising = hw->phy.autoneg_advertised; 203 } else 204 if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) { 205 clear_bit(__IGB_RESETTING, &adapter->state); 206 return -EINVAL; 207 } 208 209 /* reset the link */ 210 211 if (netif_running(adapter->netdev)) { 212 igb_down(adapter); 213 igb_up(adapter); 214 } else 215 igb_reset(adapter); 216 217 clear_bit(__IGB_RESETTING, &adapter->state); 218 return 0; 219} 220 221static void igb_get_pauseparam(struct net_device *netdev, 222 struct ethtool_pauseparam *pause) 223{ 224 struct igb_adapter *adapter = netdev_priv(netdev); 225 struct e1000_hw *hw = &adapter->hw; 226 227 pause->autoneg = 228 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); 229 230 if (hw->fc.type == e1000_fc_rx_pause) 231 pause->rx_pause = 1; 232 else if (hw->fc.type == e1000_fc_tx_pause) 233 pause->tx_pause = 1; 234 else if (hw->fc.type == e1000_fc_full) { 235 pause->rx_pause = 1; 236 pause->tx_pause = 1; 237 } 238} 239 240static int igb_set_pauseparam(struct net_device *netdev, 241 struct ethtool_pauseparam *pause) 242{ 243 struct igb_adapter *adapter = netdev_priv(netdev); 244 struct e1000_hw *hw = &adapter->hw; 245 int retval = 0; 246 247 adapter->fc_autoneg = pause->autoneg; 248 249 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 250 msleep(1); 251 252 if (pause->rx_pause && pause->tx_pause) 253 hw->fc.type = e1000_fc_full; 254 else if (pause->rx_pause && !pause->tx_pause) 255 hw->fc.type = e1000_fc_rx_pause; 256 else if (!pause->rx_pause && pause->tx_pause) 257 hw->fc.type = e1000_fc_tx_pause; 258 else if (!pause->rx_pause && !pause->tx_pause) 259 hw->fc.type = e1000_fc_none; 260 261 hw->fc.original_type = hw->fc.type; 262 263 if (adapter->fc_autoneg == AUTONEG_ENABLE) { 264 if (netif_running(adapter->netdev)) { 265 igb_down(adapter); 266 igb_up(adapter); 267 } else 268 igb_reset(adapter); 269 } else 270 retval = ((hw->phy.media_type == e1000_media_type_fiber) ? 271 igb_setup_link(hw) : igb_force_mac_fc(hw)); 272 273 clear_bit(__IGB_RESETTING, &adapter->state); 274 return retval; 275} 276 277static u32 igb_get_rx_csum(struct net_device *netdev) 278{ 279 struct igb_adapter *adapter = netdev_priv(netdev); 280 return !(adapter->flags & IGB_FLAG_RX_CSUM_DISABLED); 281} 282 283static int igb_set_rx_csum(struct net_device *netdev, u32 data) 284{ 285 struct igb_adapter *adapter = netdev_priv(netdev); 286 287 if (data) 288 adapter->flags &= ~IGB_FLAG_RX_CSUM_DISABLED; 289 else 290 adapter->flags |= IGB_FLAG_RX_CSUM_DISABLED; 291 292 return 0; 293} 294 295static u32 igb_get_tx_csum(struct net_device *netdev) 296{ 297 return (netdev->features & NETIF_F_IP_CSUM) != 0; 298} 299 300static int igb_set_tx_csum(struct net_device *netdev, u32 data) 301{ 302 struct igb_adapter *adapter = netdev_priv(netdev); 303 304 if (data) { 305 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 306 if (adapter->hw.mac.type == e1000_82576) 307 netdev->features |= NETIF_F_SCTP_CSUM; 308 } else { 309 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 310 NETIF_F_SCTP_CSUM); 311 } 312 313 return 0; 314} 315 316static int igb_set_tso(struct net_device *netdev, u32 data) 317{ 318 struct igb_adapter *adapter = netdev_priv(netdev); 319 320 if (data) { 321 netdev->features |= NETIF_F_TSO; 322 netdev->features |= NETIF_F_TSO6; 323 } else { 324 netdev->features &= ~NETIF_F_TSO; 325 netdev->features &= ~NETIF_F_TSO6; 326 } 327 328 dev_info(&adapter->pdev->dev, "TSO is %s\n", 329 data ? "Enabled" : "Disabled"); 330 return 0; 331} 332 333static u32 igb_get_msglevel(struct net_device *netdev) 334{ 335 struct igb_adapter *adapter = netdev_priv(netdev); 336 return adapter->msg_enable; 337} 338 339static void igb_set_msglevel(struct net_device *netdev, u32 data) 340{ 341 struct igb_adapter *adapter = netdev_priv(netdev); 342 adapter->msg_enable = data; 343} 344 345static int igb_get_regs_len(struct net_device *netdev) 346{ 347#define IGB_REGS_LEN 551 348 return IGB_REGS_LEN * sizeof(u32); 349} 350 351static void igb_get_regs(struct net_device *netdev, 352 struct ethtool_regs *regs, void *p) 353{ 354 struct igb_adapter *adapter = netdev_priv(netdev); 355 struct e1000_hw *hw = &adapter->hw; 356 u32 *regs_buff = p; 357 u8 i; 358 359 memset(p, 0, IGB_REGS_LEN * sizeof(u32)); 360 361 regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; 362 363 /* General Registers */ 364 regs_buff[0] = rd32(E1000_CTRL); 365 regs_buff[1] = rd32(E1000_STATUS); 366 regs_buff[2] = rd32(E1000_CTRL_EXT); 367 regs_buff[3] = rd32(E1000_MDIC); 368 regs_buff[4] = rd32(E1000_SCTL); 369 regs_buff[5] = rd32(E1000_CONNSW); 370 regs_buff[6] = rd32(E1000_VET); 371 regs_buff[7] = rd32(E1000_LEDCTL); 372 regs_buff[8] = rd32(E1000_PBA); 373 regs_buff[9] = rd32(E1000_PBS); 374 regs_buff[10] = rd32(E1000_FRTIMER); 375 regs_buff[11] = rd32(E1000_TCPTIMER); 376 377 /* NVM Register */ 378 regs_buff[12] = rd32(E1000_EECD); 379 380 /* Interrupt */ 381 /* Reading EICS for EICR because they read the 382 * same but EICS does not clear on read */ 383 regs_buff[13] = rd32(E1000_EICS); 384 regs_buff[14] = rd32(E1000_EICS); 385 regs_buff[15] = rd32(E1000_EIMS); 386 regs_buff[16] = rd32(E1000_EIMC); 387 regs_buff[17] = rd32(E1000_EIAC); 388 regs_buff[18] = rd32(E1000_EIAM); 389 /* Reading ICS for ICR because they read the 390 * same but ICS does not clear on read */ 391 regs_buff[19] = rd32(E1000_ICS); 392 regs_buff[20] = rd32(E1000_ICS); 393 regs_buff[21] = rd32(E1000_IMS); 394 regs_buff[22] = rd32(E1000_IMC); 395 regs_buff[23] = rd32(E1000_IAC); 396 regs_buff[24] = rd32(E1000_IAM); 397 regs_buff[25] = rd32(E1000_IMIRVP); 398 399 /* Flow Control */ 400 regs_buff[26] = rd32(E1000_FCAL); 401 regs_buff[27] = rd32(E1000_FCAH); 402 regs_buff[28] = rd32(E1000_FCTTV); 403 regs_buff[29] = rd32(E1000_FCRTL); 404 regs_buff[30] = rd32(E1000_FCRTH); 405 regs_buff[31] = rd32(E1000_FCRTV); 406 407 /* Receive */ 408 regs_buff[32] = rd32(E1000_RCTL); 409 regs_buff[33] = rd32(E1000_RXCSUM); 410 regs_buff[34] = rd32(E1000_RLPML); 411 regs_buff[35] = rd32(E1000_RFCTL); 412 regs_buff[36] = rd32(E1000_MRQC); 413 regs_buff[37] = rd32(E1000_VT_CTL); 414 415 /* Transmit */ 416 regs_buff[38] = rd32(E1000_TCTL); 417 regs_buff[39] = rd32(E1000_TCTL_EXT); 418 regs_buff[40] = rd32(E1000_TIPG); 419 regs_buff[41] = rd32(E1000_DTXCTL); 420 421 /* Wake Up */ 422 regs_buff[42] = rd32(E1000_WUC); 423 regs_buff[43] = rd32(E1000_WUFC); 424 regs_buff[44] = rd32(E1000_WUS); 425 regs_buff[45] = rd32(E1000_IPAV); 426 regs_buff[46] = rd32(E1000_WUPL); 427 428 /* MAC */ 429 regs_buff[47] = rd32(E1000_PCS_CFG0); 430 regs_buff[48] = rd32(E1000_PCS_LCTL); 431 regs_buff[49] = rd32(E1000_PCS_LSTAT); 432 regs_buff[50] = rd32(E1000_PCS_ANADV); 433 regs_buff[51] = rd32(E1000_PCS_LPAB); 434 regs_buff[52] = rd32(E1000_PCS_NPTX); 435 regs_buff[53] = rd32(E1000_PCS_LPABNP); 436 437 /* Statistics */ 438 regs_buff[54] = adapter->stats.crcerrs; 439 regs_buff[55] = adapter->stats.algnerrc; 440 regs_buff[56] = adapter->stats.symerrs; 441 regs_buff[57] = adapter->stats.rxerrc; 442 regs_buff[58] = adapter->stats.mpc; 443 regs_buff[59] = adapter->stats.scc; 444 regs_buff[60] = adapter->stats.ecol; 445 regs_buff[61] = adapter->stats.mcc; 446 regs_buff[62] = adapter->stats.latecol; 447 regs_buff[63] = adapter->stats.colc; 448 regs_buff[64] = adapter->stats.dc; 449 regs_buff[65] = adapter->stats.tncrs; 450 regs_buff[66] = adapter->stats.sec; 451 regs_buff[67] = adapter->stats.htdpmc; 452 regs_buff[68] = adapter->stats.rlec; 453 regs_buff[69] = adapter->stats.xonrxc; 454 regs_buff[70] = adapter->stats.xontxc; 455 regs_buff[71] = adapter->stats.xoffrxc; 456 regs_buff[72] = adapter->stats.xofftxc; 457 regs_buff[73] = adapter->stats.fcruc; 458 regs_buff[74] = adapter->stats.prc64; 459 regs_buff[75] = adapter->stats.prc127; 460 regs_buff[76] = adapter->stats.prc255; 461 regs_buff[77] = adapter->stats.prc511; 462 regs_buff[78] = adapter->stats.prc1023; 463 regs_buff[79] = adapter->stats.prc1522; 464 regs_buff[80] = adapter->stats.gprc; 465 regs_buff[81] = adapter->stats.bprc; 466 regs_buff[82] = adapter->stats.mprc; 467 regs_buff[83] = adapter->stats.gptc; 468 regs_buff[84] = adapter->stats.gorc; 469 regs_buff[86] = adapter->stats.gotc; 470 regs_buff[88] = adapter->stats.rnbc; 471 regs_buff[89] = adapter->stats.ruc; 472 regs_buff[90] = adapter->stats.rfc; 473 regs_buff[91] = adapter->stats.roc; 474 regs_buff[92] = adapter->stats.rjc; 475 regs_buff[93] = adapter->stats.mgprc; 476 regs_buff[94] = adapter->stats.mgpdc; 477 regs_buff[95] = adapter->stats.mgptc; 478 regs_buff[96] = adapter->stats.tor; 479 regs_buff[98] = adapter->stats.tot; 480 regs_buff[100] = adapter->stats.tpr; 481 regs_buff[101] = adapter->stats.tpt; 482 regs_buff[102] = adapter->stats.ptc64; 483 regs_buff[103] = adapter->stats.ptc127; 484 regs_buff[104] = adapter->stats.ptc255; 485 regs_buff[105] = adapter->stats.ptc511; 486 regs_buff[106] = adapter->stats.ptc1023; 487 regs_buff[107] = adapter->stats.ptc1522; 488 regs_buff[108] = adapter->stats.mptc; 489 regs_buff[109] = adapter->stats.bptc; 490 regs_buff[110] = adapter->stats.tsctc; 491 regs_buff[111] = adapter->stats.iac; 492 regs_buff[112] = adapter->stats.rpthc; 493 regs_buff[113] = adapter->stats.hgptc; 494 regs_buff[114] = adapter->stats.hgorc; 495 regs_buff[116] = adapter->stats.hgotc; 496 regs_buff[118] = adapter->stats.lenerrs; 497 regs_buff[119] = adapter->stats.scvpc; 498 regs_buff[120] = adapter->stats.hrmpc; 499 500 /* These should probably be added to e1000_regs.h instead */ 501 #define E1000_PSRTYPE_REG(_i) (0x05480 + ((_i) * 4)) 502 #define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) 503 #define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) 504 #define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) 505 #define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) 506 #define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) 507 #define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) 508 509 for (i = 0; i < 4; i++) 510 regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); 511 for (i = 0; i < 4; i++) 512 regs_buff[125 + i] = rd32(E1000_PSRTYPE_REG(i)); 513 for (i = 0; i < 4; i++) 514 regs_buff[129 + i] = rd32(E1000_RDBAL(i)); 515 for (i = 0; i < 4; i++) 516 regs_buff[133 + i] = rd32(E1000_RDBAH(i)); 517 for (i = 0; i < 4; i++) 518 regs_buff[137 + i] = rd32(E1000_RDLEN(i)); 519 for (i = 0; i < 4; i++) 520 regs_buff[141 + i] = rd32(E1000_RDH(i)); 521 for (i = 0; i < 4; i++) 522 regs_buff[145 + i] = rd32(E1000_RDT(i)); 523 for (i = 0; i < 4; i++) 524 regs_buff[149 + i] = rd32(E1000_RXDCTL(i)); 525 526 for (i = 0; i < 10; i++) 527 regs_buff[153 + i] = rd32(E1000_EITR(i)); 528 for (i = 0; i < 8; i++) 529 regs_buff[163 + i] = rd32(E1000_IMIR(i)); 530 for (i = 0; i < 8; i++) 531 regs_buff[171 + i] = rd32(E1000_IMIREXT(i)); 532 for (i = 0; i < 16; i++) 533 regs_buff[179 + i] = rd32(E1000_RAL(i)); 534 for (i = 0; i < 16; i++) 535 regs_buff[195 + i] = rd32(E1000_RAH(i)); 536 537 for (i = 0; i < 4; i++) 538 regs_buff[211 + i] = rd32(E1000_TDBAL(i)); 539 for (i = 0; i < 4; i++) 540 regs_buff[215 + i] = rd32(E1000_TDBAH(i)); 541 for (i = 0; i < 4; i++) 542 regs_buff[219 + i] = rd32(E1000_TDLEN(i)); 543 for (i = 0; i < 4; i++) 544 regs_buff[223 + i] = rd32(E1000_TDH(i)); 545 for (i = 0; i < 4; i++) 546 regs_buff[227 + i] = rd32(E1000_TDT(i)); 547 for (i = 0; i < 4; i++) 548 regs_buff[231 + i] = rd32(E1000_TXDCTL(i)); 549 for (i = 0; i < 4; i++) 550 regs_buff[235 + i] = rd32(E1000_TDWBAL(i)); 551 for (i = 0; i < 4; i++) 552 regs_buff[239 + i] = rd32(E1000_TDWBAH(i)); 553 for (i = 0; i < 4; i++) 554 regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i)); 555 556 for (i = 0; i < 4; i++) 557 regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i)); 558 for (i = 0; i < 4; i++) 559 regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i)); 560 for (i = 0; i < 32; i++) 561 regs_buff[255 + i] = rd32(E1000_WUPM_REG(i)); 562 for (i = 0; i < 128; i++) 563 regs_buff[287 + i] = rd32(E1000_FFMT_REG(i)); 564 for (i = 0; i < 128; i++) 565 regs_buff[415 + i] = rd32(E1000_FFVT_REG(i)); 566 for (i = 0; i < 4; i++) 567 regs_buff[543 + i] = rd32(E1000_FFLT_REG(i)); 568 569 regs_buff[547] = rd32(E1000_TDFH); 570 regs_buff[548] = rd32(E1000_TDFT); 571 regs_buff[549] = rd32(E1000_TDFHS); 572 regs_buff[550] = rd32(E1000_TDFPC); 573 574} 575 576static int igb_get_eeprom_len(struct net_device *netdev) 577{ 578 struct igb_adapter *adapter = netdev_priv(netdev); 579 return adapter->hw.nvm.word_size * 2; 580} 581 582static int igb_get_eeprom(struct net_device *netdev, 583 struct ethtool_eeprom *eeprom, u8 *bytes) 584{ 585 struct igb_adapter *adapter = netdev_priv(netdev); 586 struct e1000_hw *hw = &adapter->hw; 587 u16 *eeprom_buff; 588 int first_word, last_word; 589 int ret_val = 0; 590 u16 i; 591 592 if (eeprom->len == 0) 593 return -EINVAL; 594 595 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 596 597 first_word = eeprom->offset >> 1; 598 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 599 600 eeprom_buff = kmalloc(sizeof(u16) * 601 (last_word - first_word + 1), GFP_KERNEL); 602 if (!eeprom_buff) 603 return -ENOMEM; 604 605 if (hw->nvm.type == e1000_nvm_eeprom_spi) 606 ret_val = hw->nvm.ops.read(hw, first_word, 607 last_word - first_word + 1, 608 eeprom_buff); 609 else { 610 for (i = 0; i < last_word - first_word + 1; i++) { 611 ret_val = hw->nvm.ops.read(hw, first_word + i, 1, 612 &eeprom_buff[i]); 613 if (ret_val) 614 break; 615 } 616 } 617 618 /* Device's eeprom is always little-endian, word addressable */ 619 for (i = 0; i < last_word - first_word + 1; i++) 620 le16_to_cpus(&eeprom_buff[i]); 621 622 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), 623 eeprom->len); 624 kfree(eeprom_buff); 625 626 return ret_val; 627} 628 629static int igb_set_eeprom(struct net_device *netdev, 630 struct ethtool_eeprom *eeprom, u8 *bytes) 631{ 632 struct igb_adapter *adapter = netdev_priv(netdev); 633 struct e1000_hw *hw = &adapter->hw; 634 u16 *eeprom_buff; 635 void *ptr; 636 int max_len, first_word, last_word, ret_val = 0; 637 u16 i; 638 639 if (eeprom->len == 0) 640 return -EOPNOTSUPP; 641 642 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) 643 return -EFAULT; 644 645 max_len = hw->nvm.word_size * 2; 646 647 first_word = eeprom->offset >> 1; 648 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 649 eeprom_buff = kmalloc(max_len, GFP_KERNEL); 650 if (!eeprom_buff) 651 return -ENOMEM; 652 653 ptr = (void *)eeprom_buff; 654 655 if (eeprom->offset & 1) { 656 /* need read/modify/write of first changed EEPROM word */ 657 /* only the second byte of the word is being modified */ 658 ret_val = hw->nvm.ops.read(hw, first_word, 1, 659 &eeprom_buff[0]); 660 ptr++; 661 } 662 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { 663 /* need read/modify/write of last changed EEPROM word */ 664 /* only the first byte of the word is being modified */ 665 ret_val = hw->nvm.ops.read(hw, last_word, 1, 666 &eeprom_buff[last_word - first_word]); 667 } 668 669 /* Device's eeprom is always little-endian, word addressable */ 670 for (i = 0; i < last_word - first_word + 1; i++) 671 le16_to_cpus(&eeprom_buff[i]); 672 673 memcpy(ptr, bytes, eeprom->len); 674 675 for (i = 0; i < last_word - first_word + 1; i++) 676 eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); 677 678 ret_val = hw->nvm.ops.write(hw, first_word, 679 last_word - first_word + 1, eeprom_buff); 680 681 /* Update the checksum over the first part of the EEPROM if needed 682 * and flush shadow RAM for 82573 controllers */ 683 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG))) 684 igb_update_nvm_checksum(hw); 685 686 kfree(eeprom_buff); 687 return ret_val; 688} 689 690static void igb_get_drvinfo(struct net_device *netdev, 691 struct ethtool_drvinfo *drvinfo) 692{ 693 struct igb_adapter *adapter = netdev_priv(netdev); 694 char firmware_version[32]; 695 u16 eeprom_data; 696 697 strncpy(drvinfo->driver, igb_driver_name, 32); 698 strncpy(drvinfo->version, igb_driver_version, 32); 699 700 /* EEPROM image version # is reported as firmware version # for 701 * 82575 controllers */ 702 adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data); 703 sprintf(firmware_version, "%d.%d-%d", 704 (eeprom_data & 0xF000) >> 12, 705 (eeprom_data & 0x0FF0) >> 4, 706 eeprom_data & 0x000F); 707 708 strncpy(drvinfo->fw_version, firmware_version, 32); 709 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 710 drvinfo->n_stats = IGB_STATS_LEN; 711 drvinfo->testinfo_len = IGB_TEST_LEN; 712 drvinfo->regdump_len = igb_get_regs_len(netdev); 713 drvinfo->eedump_len = igb_get_eeprom_len(netdev); 714} 715 716static void igb_get_ringparam(struct net_device *netdev, 717 struct ethtool_ringparam *ring) 718{ 719 struct igb_adapter *adapter = netdev_priv(netdev); 720 721 ring->rx_max_pending = IGB_MAX_RXD; 722 ring->tx_max_pending = IGB_MAX_TXD; 723 ring->rx_mini_max_pending = 0; 724 ring->rx_jumbo_max_pending = 0; 725 ring->rx_pending = adapter->rx_ring_count; 726 ring->tx_pending = adapter->tx_ring_count; 727 ring->rx_mini_pending = 0; 728 ring->rx_jumbo_pending = 0; 729} 730 731static int igb_set_ringparam(struct net_device *netdev, 732 struct ethtool_ringparam *ring) 733{ 734 struct igb_adapter *adapter = netdev_priv(netdev); 735 struct igb_ring *temp_ring; 736 int i, err; 737 u32 new_rx_count, new_tx_count; 738 739 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 740 return -EINVAL; 741 742 new_rx_count = max(ring->rx_pending, (u32)IGB_MIN_RXD); 743 new_rx_count = min(new_rx_count, (u32)IGB_MAX_RXD); 744 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); 745 746 new_tx_count = max(ring->tx_pending, (u32)IGB_MIN_TXD); 747 new_tx_count = min(new_tx_count, (u32)IGB_MAX_TXD); 748 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); 749 750 if ((new_tx_count == adapter->tx_ring_count) && 751 (new_rx_count == adapter->rx_ring_count)) { 752 /* nothing to do */ 753 return 0; 754 } 755 756 if (adapter->num_tx_queues > adapter->num_rx_queues) 757 temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring)); 758 else 759 temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring)); 760 if (!temp_ring) 761 return -ENOMEM; 762 763 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 764 msleep(1); 765 766 if (netif_running(adapter->netdev)) 767 igb_down(adapter); 768 769 /* 770 * We can't just free everything and then setup again, 771 * because the ISRs in MSI-X mode get passed pointers 772 * to the tx and rx ring structs. 773 */ 774 if (new_tx_count != adapter->tx_ring_count) { 775 memcpy(temp_ring, adapter->tx_ring, 776 adapter->num_tx_queues * sizeof(struct igb_ring)); 777 778 for (i = 0; i < adapter->num_tx_queues; i++) { 779 temp_ring[i].count = new_tx_count; 780 err = igb_setup_tx_resources(adapter, &temp_ring[i]); 781 if (err) { 782 while (i) { 783 i--; 784 igb_free_tx_resources(&temp_ring[i]); 785 } 786 goto err_setup; 787 } 788 } 789 790 for (i = 0; i < adapter->num_tx_queues; i++) 791 igb_free_tx_resources(&adapter->tx_ring[i]); 792 793 memcpy(adapter->tx_ring, temp_ring, 794 adapter->num_tx_queues * sizeof(struct igb_ring)); 795 796 adapter->tx_ring_count = new_tx_count; 797 } 798 799 if (new_rx_count != adapter->rx_ring->count) { 800 memcpy(temp_ring, adapter->rx_ring, 801 adapter->num_rx_queues * sizeof(struct igb_ring)); 802 803 for (i = 0; i < adapter->num_rx_queues; i++) { 804 temp_ring[i].count = new_rx_count; 805 err = igb_setup_rx_resources(adapter, &temp_ring[i]); 806 if (err) { 807 while (i) { 808 i--; 809 igb_free_rx_resources(&temp_ring[i]); 810 } 811 goto err_setup; 812 } 813 814 } 815 816 for (i = 0; i < adapter->num_rx_queues; i++) 817 igb_free_rx_resources(&adapter->rx_ring[i]); 818 819 memcpy(adapter->rx_ring, temp_ring, 820 adapter->num_rx_queues * sizeof(struct igb_ring)); 821 822 adapter->rx_ring_count = new_rx_count; 823 } 824 825 err = 0; 826err_setup: 827 if (netif_running(adapter->netdev)) 828 igb_up(adapter); 829 830 clear_bit(__IGB_RESETTING, &adapter->state); 831 vfree(temp_ring); 832 return err; 833} 834 835/* ethtool register test data */ 836struct igb_reg_test { 837 u16 reg; 838 u16 reg_offset; 839 u16 array_len; 840 u16 test_type; 841 u32 mask; 842 u32 write; 843}; 844 845/* In the hardware, registers are laid out either singly, in arrays 846 * spaced 0x100 bytes apart, or in contiguous tables. We assume 847 * most tests take place on arrays or single registers (handled 848 * as a single-element array) and special-case the tables. 849 * Table tests are always pattern tests. 850 * 851 * We also make provision for some required setup steps by specifying 852 * registers to be written without any read-back testing. 853 */ 854 855#define PATTERN_TEST 1 856#define SET_READ_TEST 2 857#define WRITE_NO_TEST 3 858#define TABLE32_TEST 4 859#define TABLE64_TEST_LO 5 860#define TABLE64_TEST_HI 6 861 862/* 82576 reg test */ 863static struct igb_reg_test reg_test_82576[] = { 864 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 865 { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, 866 { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, 867 { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 868 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 869 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 870 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 871 { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 872 { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 873 { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 874 /* Enable all RX queues before testing. */ 875 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, 876 { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, 877 /* RDH is read-only for 82576, only test RDT. */ 878 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 879 { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 880 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, 881 { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 }, 882 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, 883 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 884 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, 885 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 886 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 887 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 888 { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 889 { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 890 { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 891 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 892 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, 893 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, 894 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 895 { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 896 { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, 897 { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 898 { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, 899 { E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 900 { 0, 0, 0, 0 } 901}; 902 903/* 82575 register test */ 904static struct igb_reg_test reg_test_82575[] = { 905 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 906 { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, 907 { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, 908 { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 909 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 910 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 911 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 912 /* Enable all four RX queues before testing. */ 913 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, 914 /* RDH is read-only for 82575, only test RDT. */ 915 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 916 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, 917 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, 918 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 919 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, 920 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 921 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 922 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 923 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 924 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB }, 925 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF }, 926 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 927 { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF }, 928 { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 929 { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF }, 930 { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 931 { 0, 0, 0, 0 } 932}; 933 934static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, 935 int reg, u32 mask, u32 write) 936{ 937 struct e1000_hw *hw = &adapter->hw; 938 u32 pat, val; 939 u32 _test[] = 940 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 941 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { 942 wr32(reg, (_test[pat] & write)); 943 val = rd32(reg); 944 if (val != (_test[pat] & write & mask)) { 945 dev_err(&adapter->pdev->dev, "pattern test reg %04X " 946 "failed: got 0x%08X expected 0x%08X\n", 947 reg, val, (_test[pat] & write & mask)); 948 *data = reg; 949 return 1; 950 } 951 } 952 return 0; 953} 954 955static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, 956 int reg, u32 mask, u32 write) 957{ 958 struct e1000_hw *hw = &adapter->hw; 959 u32 val; 960 wr32(reg, write & mask); 961 val = rd32(reg); 962 if ((write & mask) != (val & mask)) { 963 dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:" 964 " got 0x%08X expected 0x%08X\n", reg, 965 (val & mask), (write & mask)); 966 *data = reg; 967 return 1; 968 } 969 return 0; 970} 971 972#define REG_PATTERN_TEST(reg, mask, write) \ 973 do { \ 974 if (reg_pattern_test(adapter, data, reg, mask, write)) \ 975 return 1; \ 976 } while (0) 977 978#define REG_SET_AND_CHECK(reg, mask, write) \ 979 do { \ 980 if (reg_set_and_check(adapter, data, reg, mask, write)) \ 981 return 1; \ 982 } while (0) 983 984static int igb_reg_test(struct igb_adapter *adapter, u64 *data) 985{ 986 struct e1000_hw *hw = &adapter->hw; 987 struct igb_reg_test *test; 988 u32 value, before, after; 989 u32 i, toggle; 990 991 toggle = 0x7FFFF3FF; 992 993 switch (adapter->hw.mac.type) { 994 case e1000_82576: 995 test = reg_test_82576; 996 break; 997 default: 998 test = reg_test_82575; 999 break; 1000 } 1001 1002 /* Because the status register is such a special case, 1003 * we handle it separately from the rest of the register 1004 * tests. Some bits are read-only, some toggle, and some 1005 * are writable on newer MACs. 1006 */ 1007 before = rd32(E1000_STATUS); 1008 value = (rd32(E1000_STATUS) & toggle); 1009 wr32(E1000_STATUS, toggle); 1010 after = rd32(E1000_STATUS) & toggle; 1011 if (value != after) { 1012 dev_err(&adapter->pdev->dev, "failed STATUS register test " 1013 "got: 0x%08X expected: 0x%08X\n", after, value); 1014 *data = 1; 1015 return 1; 1016 } 1017 /* restore previous status */ 1018 wr32(E1000_STATUS, before); 1019 1020 /* Perform the remainder of the register test, looping through 1021 * the test table until we either fail or reach the null entry. 1022 */ 1023 while (test->reg) { 1024 for (i = 0; i < test->array_len; i++) { 1025 switch (test->test_type) { 1026 case PATTERN_TEST: 1027 REG_PATTERN_TEST(test->reg + 1028 (i * test->reg_offset), 1029 test->mask, 1030 test->write); 1031 break; 1032 case SET_READ_TEST: 1033 REG_SET_AND_CHECK(test->reg + 1034 (i * test->reg_offset), 1035 test->mask, 1036 test->write); 1037 break; 1038 case WRITE_NO_TEST: 1039 writel(test->write, 1040 (adapter->hw.hw_addr + test->reg) 1041 + (i * test->reg_offset)); 1042 break; 1043 case TABLE32_TEST: 1044 REG_PATTERN_TEST(test->reg + (i * 4), 1045 test->mask, 1046 test->write); 1047 break; 1048 case TABLE64_TEST_LO: 1049 REG_PATTERN_TEST(test->reg + (i * 8), 1050 test->mask, 1051 test->write); 1052 break; 1053 case TABLE64_TEST_HI: 1054 REG_PATTERN_TEST((test->reg + 4) + (i * 8), 1055 test->mask, 1056 test->write); 1057 break; 1058 } 1059 } 1060 test++; 1061 } 1062 1063 *data = 0; 1064 return 0; 1065} 1066 1067static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) 1068{ 1069 u16 temp; 1070 u16 checksum = 0; 1071 u16 i; 1072 1073 *data = 0; 1074 /* Read and add up the contents of the EEPROM */ 1075 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { 1076 if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) 1077 < 0) { 1078 *data = 1; 1079 break; 1080 } 1081 checksum += temp; 1082 } 1083 1084 /* If Checksum is not Correct return error else test passed */ 1085 if ((checksum != (u16) NVM_SUM) && !(*data)) 1086 *data = 2; 1087 1088 return *data; 1089} 1090 1091static irqreturn_t igb_test_intr(int irq, void *data) 1092{ 1093 struct net_device *netdev = (struct net_device *) data; 1094 struct igb_adapter *adapter = netdev_priv(netdev); 1095 struct e1000_hw *hw = &adapter->hw; 1096 1097 adapter->test_icr |= rd32(E1000_ICR); 1098 1099 return IRQ_HANDLED; 1100} 1101 1102static int igb_intr_test(struct igb_adapter *adapter, u64 *data) 1103{ 1104 struct e1000_hw *hw = &adapter->hw; 1105 struct net_device *netdev = adapter->netdev; 1106 u32 mask, ics_mask, i = 0, shared_int = true; 1107 u32 irq = adapter->pdev->irq; 1108 1109 *data = 0; 1110 1111 /* Hook up test interrupt handler just for this test */ 1112 if (adapter->msix_entries) 1113 /* NOTE: we don't test MSI-X interrupts here, yet */ 1114 return 0; 1115 1116 if (adapter->flags & IGB_FLAG_HAS_MSI) { 1117 shared_int = false; 1118 if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) { 1119 *data = 1; 1120 return -1; 1121 } 1122 } else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED, 1123 netdev->name, netdev)) { 1124 shared_int = false; 1125 } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED, 1126 netdev->name, netdev)) { 1127 *data = 1; 1128 return -1; 1129 } 1130 dev_info(&adapter->pdev->dev, "testing %s interrupt\n", 1131 (shared_int ? "shared" : "unshared")); 1132 /* Disable all the interrupts */ 1133 wr32(E1000_IMC, 0xFFFFFFFF); 1134 msleep(10); 1135 1136 /* Define all writable bits for ICS */ 1137 switch(hw->mac.type) { 1138 case e1000_82575: 1139 ics_mask = 0x37F47EDD; 1140 break; 1141 case e1000_82576: 1142 ics_mask = 0x77D4FBFD; 1143 break; 1144 default: 1145 ics_mask = 0x7FFFFFFF; 1146 break; 1147 } 1148 1149 /* Test each interrupt */ 1150 for (; i < 31; i++) { 1151 /* Interrupt to test */ 1152 mask = 1 << i; 1153 1154 if (!(mask & ics_mask)) 1155 continue; 1156 1157 if (!shared_int) { 1158 /* Disable the interrupt to be reported in 1159 * the cause register and then force the same 1160 * interrupt and see if one gets posted. If 1161 * an interrupt was posted to the bus, the 1162 * test failed. 1163 */ 1164 adapter->test_icr = 0; 1165 1166 /* Flush any pending interrupts */ 1167 wr32(E1000_ICR, ~0); 1168 1169 wr32(E1000_IMC, mask); 1170 wr32(E1000_ICS, mask); 1171 msleep(10); 1172 1173 if (adapter->test_icr & mask) { 1174 *data = 3; 1175 break; 1176 } 1177 } 1178 1179 /* Enable the interrupt to be reported in 1180 * the cause register and then force the same 1181 * interrupt and see if one gets posted. If 1182 * an interrupt was not posted to the bus, the 1183 * test failed. 1184 */ 1185 adapter->test_icr = 0; 1186 1187 /* Flush any pending interrupts */ 1188 wr32(E1000_ICR, ~0); 1189 1190 wr32(E1000_IMS, mask); 1191 wr32(E1000_ICS, mask); 1192 msleep(10); 1193 1194 if (!(adapter->test_icr & mask)) { 1195 *data = 4; 1196 break; 1197 } 1198 1199 if (!shared_int) { 1200 /* Disable the other interrupts to be reported in 1201 * the cause register and then force the other 1202 * interrupts and see if any get posted. If 1203 * an interrupt was posted to the bus, the 1204 * test failed. 1205 */ 1206 adapter->test_icr = 0; 1207 1208 /* Flush any pending interrupts */ 1209 wr32(E1000_ICR, ~0); 1210 1211 wr32(E1000_IMC, ~mask); 1212 wr32(E1000_ICS, ~mask); 1213 msleep(10); 1214 1215 if (adapter->test_icr & mask) { 1216 *data = 5; 1217 break; 1218 } 1219 } 1220 } 1221 1222 /* Disable all the interrupts */ 1223 wr32(E1000_IMC, ~0); 1224 msleep(10); 1225 1226 /* Unhook test interrupt handler */ 1227 free_irq(irq, netdev); 1228 1229 return *data; 1230} 1231 1232static void igb_free_desc_rings(struct igb_adapter *adapter) 1233{ 1234 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1235 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1236 struct pci_dev *pdev = adapter->pdev; 1237 int i; 1238 1239 if (tx_ring->desc && tx_ring->buffer_info) { 1240 for (i = 0; i < tx_ring->count; i++) { 1241 struct igb_buffer *buf = &(tx_ring->buffer_info[i]); 1242 if (buf->dma) 1243 pci_unmap_single(pdev, buf->dma, buf->length, 1244 PCI_DMA_TODEVICE); 1245 if (buf->skb) 1246 dev_kfree_skb(buf->skb); 1247 } 1248 } 1249 1250 if (rx_ring->desc && rx_ring->buffer_info) { 1251 for (i = 0; i < rx_ring->count; i++) { 1252 struct igb_buffer *buf = &(rx_ring->buffer_info[i]); 1253 if (buf->dma) 1254 pci_unmap_single(pdev, buf->dma, 1255 IGB_RXBUFFER_2048, 1256 PCI_DMA_FROMDEVICE); 1257 if (buf->skb) 1258 dev_kfree_skb(buf->skb); 1259 } 1260 } 1261 1262 if (tx_ring->desc) { 1263 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, 1264 tx_ring->dma); 1265 tx_ring->desc = NULL; 1266 } 1267 if (rx_ring->desc) { 1268 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, 1269 rx_ring->dma); 1270 rx_ring->desc = NULL; 1271 } 1272 1273 kfree(tx_ring->buffer_info); 1274 tx_ring->buffer_info = NULL; 1275 kfree(rx_ring->buffer_info); 1276 rx_ring->buffer_info = NULL; 1277 1278 return; 1279} 1280 1281static int igb_setup_desc_rings(struct igb_adapter *adapter) 1282{ 1283 struct e1000_hw *hw = &adapter->hw; 1284 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1285 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1286 struct pci_dev *pdev = adapter->pdev; 1287 struct igb_buffer *buffer_info; 1288 u32 rctl; 1289 int i, ret_val; 1290 1291 /* Setup Tx descriptor ring and Tx buffers */ 1292 1293 if (!tx_ring->count) 1294 tx_ring->count = IGB_DEFAULT_TXD; 1295 1296 tx_ring->buffer_info = kcalloc(tx_ring->count, 1297 sizeof(struct igb_buffer), 1298 GFP_KERNEL); 1299 if (!tx_ring->buffer_info) { 1300 ret_val = 1; 1301 goto err_nomem; 1302 } 1303 1304 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 1305 tx_ring->size = ALIGN(tx_ring->size, 4096); 1306 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 1307 &tx_ring->dma); 1308 if (!tx_ring->desc) { 1309 ret_val = 2; 1310 goto err_nomem; 1311 } 1312 tx_ring->next_to_use = tx_ring->next_to_clean = 0; 1313 1314 wr32(E1000_TDBAL(0), 1315 ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); 1316 wr32(E1000_TDBAH(0), ((u64) tx_ring->dma >> 32)); 1317 wr32(E1000_TDLEN(0), 1318 tx_ring->count * sizeof(union e1000_adv_tx_desc)); 1319 wr32(E1000_TDH(0), 0); 1320 wr32(E1000_TDT(0), 0); 1321 wr32(E1000_TCTL, 1322 E1000_TCTL_PSP | E1000_TCTL_EN | 1323 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | 1324 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); 1325 1326 for (i = 0; i < tx_ring->count; i++) { 1327 union e1000_adv_tx_desc *tx_desc; 1328 struct sk_buff *skb; 1329 unsigned int size = 1024; 1330 1331 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); 1332 skb = alloc_skb(size, GFP_KERNEL); 1333 if (!skb) { 1334 ret_val = 3; 1335 goto err_nomem; 1336 } 1337 skb_put(skb, size); 1338 buffer_info = &tx_ring->buffer_info[i]; 1339 buffer_info->skb = skb; 1340 buffer_info->length = skb->len; 1341 buffer_info->dma = pci_map_single(pdev, skb->data, skb->len, 1342 PCI_DMA_TODEVICE); 1343 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 1344 tx_desc->read.olinfo_status = cpu_to_le32(skb->len) << 1345 E1000_ADVTXD_PAYLEN_SHIFT; 1346 tx_desc->read.cmd_type_len = cpu_to_le32(skb->len); 1347 tx_desc->read.cmd_type_len |= cpu_to_le32(E1000_TXD_CMD_EOP | 1348 E1000_TXD_CMD_IFCS | 1349 E1000_TXD_CMD_RS | 1350 E1000_ADVTXD_DTYP_DATA | 1351 E1000_ADVTXD_DCMD_DEXT); 1352 } 1353 1354 /* Setup Rx descriptor ring and Rx buffers */ 1355 1356 if (!rx_ring->count) 1357 rx_ring->count = IGB_DEFAULT_RXD; 1358 1359 rx_ring->buffer_info = kcalloc(rx_ring->count, 1360 sizeof(struct igb_buffer), 1361 GFP_KERNEL); 1362 if (!rx_ring->buffer_info) { 1363 ret_val = 4; 1364 goto err_nomem; 1365 } 1366 1367 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); 1368 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 1369 &rx_ring->dma); 1370 if (!rx_ring->desc) { 1371 ret_val = 5; 1372 goto err_nomem; 1373 } 1374 rx_ring->next_to_use = rx_ring->next_to_clean = 0; 1375 1376 rctl = rd32(E1000_RCTL); 1377 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); 1378 wr32(E1000_RDBAL(0), 1379 ((u64) rx_ring->dma & 0xFFFFFFFF)); 1380 wr32(E1000_RDBAH(0), 1381 ((u64) rx_ring->dma >> 32)); 1382 wr32(E1000_RDLEN(0), rx_ring->size); 1383 wr32(E1000_RDH(0), 0); 1384 wr32(E1000_RDT(0), 0); 1385 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); 1386 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | 1387 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 1388 wr32(E1000_RCTL, rctl); 1389 wr32(E1000_SRRCTL(0), E1000_SRRCTL_DESCTYPE_ADV_ONEBUF); 1390 1391 for (i = 0; i < rx_ring->count; i++) { 1392 union e1000_adv_rx_desc *rx_desc; 1393 struct sk_buff *skb; 1394 1395 buffer_info = &rx_ring->buffer_info[i]; 1396 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); 1397 skb = alloc_skb(IGB_RXBUFFER_2048 + NET_IP_ALIGN, 1398 GFP_KERNEL); 1399 if (!skb) { 1400 ret_val = 6; 1401 goto err_nomem; 1402 } 1403 skb_reserve(skb, NET_IP_ALIGN); 1404 buffer_info->skb = skb; 1405 buffer_info->dma = pci_map_single(pdev, skb->data, 1406 IGB_RXBUFFER_2048, 1407 PCI_DMA_FROMDEVICE); 1408 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); 1409 memset(skb->data, 0x00, skb->len); 1410 } 1411 1412 return 0; 1413 1414err_nomem: 1415 igb_free_desc_rings(adapter); 1416 return ret_val; 1417} 1418 1419static void igb_phy_disable_receiver(struct igb_adapter *adapter) 1420{ 1421 struct e1000_hw *hw = &adapter->hw; 1422 1423 /* Write out to PHY registers 29 and 30 to disable the Receiver. */ 1424 igb_write_phy_reg(hw, 29, 0x001F); 1425 igb_write_phy_reg(hw, 30, 0x8FFC); 1426 igb_write_phy_reg(hw, 29, 0x001A); 1427 igb_write_phy_reg(hw, 30, 0x8FF0); 1428} 1429 1430static int igb_integrated_phy_loopback(struct igb_adapter *adapter) 1431{ 1432 struct e1000_hw *hw = &adapter->hw; 1433 u32 ctrl_reg = 0; 1434 1435 hw->mac.autoneg = false; 1436 1437 if (hw->phy.type == e1000_phy_m88) { 1438 /* Auto-MDI/MDIX Off */ 1439 igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); 1440 /* reset to update Auto-MDI/MDIX */ 1441 igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); 1442 /* autoneg off */ 1443 igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); 1444 } 1445 1446 ctrl_reg = rd32(E1000_CTRL); 1447 1448 /* force 1000, set loopback */ 1449 igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); 1450 1451 /* Now set up the MAC to the same speed/duplex as the PHY. */ 1452 ctrl_reg = rd32(E1000_CTRL); 1453 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 1454 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 1455 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 1456 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ 1457 E1000_CTRL_FD | /* Force Duplex to FULL */ 1458 E1000_CTRL_SLU); /* Set link up enable bit */ 1459 1460 if (hw->phy.type == e1000_phy_m88) 1461 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ 1462 1463 wr32(E1000_CTRL, ctrl_reg); 1464 1465 /* Disable the receiver on the PHY so when a cable is plugged in, the 1466 * PHY does not begin to autoneg when a cable is reconnected to the NIC. 1467 */ 1468 if (hw->phy.type == e1000_phy_m88) 1469 igb_phy_disable_receiver(adapter); 1470 1471 udelay(500); 1472 1473 return 0; 1474} 1475 1476static int igb_set_phy_loopback(struct igb_adapter *adapter) 1477{ 1478 return igb_integrated_phy_loopback(adapter); 1479} 1480 1481static int igb_setup_loopback_test(struct igb_adapter *adapter) 1482{ 1483 struct e1000_hw *hw = &adapter->hw; 1484 u32 reg; 1485 1486 if (hw->phy.media_type == e1000_media_type_fiber || 1487 hw->phy.media_type == e1000_media_type_internal_serdes) { 1488 reg = rd32(E1000_RCTL); 1489 reg |= E1000_RCTL_LBM_TCVR; 1490 wr32(E1000_RCTL, reg); 1491 1492 wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK); 1493 1494 reg = rd32(E1000_CTRL); 1495 reg &= ~(E1000_CTRL_RFCE | 1496 E1000_CTRL_TFCE | 1497 E1000_CTRL_LRST); 1498 reg |= E1000_CTRL_SLU | 1499 E1000_CTRL_FD; 1500 wr32(E1000_CTRL, reg); 1501 1502 /* Unset switch control to serdes energy detect */ 1503 reg = rd32(E1000_CONNSW); 1504 reg &= ~E1000_CONNSW_ENRGSRC; 1505 wr32(E1000_CONNSW, reg); 1506 1507 /* Set PCS register for forced speed */ 1508 reg = rd32(E1000_PCS_LCTL); 1509 reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/ 1510 reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ 1511 E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ 1512 E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ 1513 E1000_PCS_LCTL_FSD | /* Force Speed */ 1514 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ 1515 wr32(E1000_PCS_LCTL, reg); 1516 1517 return 0; 1518 } else if (hw->phy.media_type == e1000_media_type_copper) { 1519 return igb_set_phy_loopback(adapter); 1520 } 1521 1522 return 7; 1523} 1524 1525static void igb_loopback_cleanup(struct igb_adapter *adapter) 1526{ 1527 struct e1000_hw *hw = &adapter->hw; 1528 u32 rctl; 1529 u16 phy_reg; 1530 1531 rctl = rd32(E1000_RCTL); 1532 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); 1533 wr32(E1000_RCTL, rctl); 1534 1535 hw->mac.autoneg = true; 1536 igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg); 1537 if (phy_reg & MII_CR_LOOPBACK) { 1538 phy_reg &= ~MII_CR_LOOPBACK; 1539 igb_write_phy_reg(hw, PHY_CONTROL, phy_reg); 1540 igb_phy_sw_reset(hw); 1541 } 1542} 1543 1544static void igb_create_lbtest_frame(struct sk_buff *skb, 1545 unsigned int frame_size) 1546{ 1547 memset(skb->data, 0xFF, frame_size); 1548 frame_size &= ~1; 1549 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); 1550 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); 1551 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); 1552} 1553 1554static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) 1555{ 1556 frame_size &= ~1; 1557 if (*(skb->data + 3) == 0xFF) 1558 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && 1559 (*(skb->data + frame_size / 2 + 12) == 0xAF)) 1560 return 0; 1561 return 13; 1562} 1563 1564static int igb_run_loopback_test(struct igb_adapter *adapter) 1565{ 1566 struct e1000_hw *hw = &adapter->hw; 1567 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1568 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1569 struct pci_dev *pdev = adapter->pdev; 1570 int i, j, k, l, lc, good_cnt; 1571 int ret_val = 0; 1572 unsigned long time; 1573 1574 wr32(E1000_RDT(0), rx_ring->count - 1); 1575 1576 /* Calculate the loop count based on the largest descriptor ring 1577 * The idea is to wrap the largest ring a number of times using 64 1578 * send/receive pairs during each loop 1579 */ 1580 1581 if (rx_ring->count <= tx_ring->count) 1582 lc = ((tx_ring->count / 64) * 2) + 1; 1583 else 1584 lc = ((rx_ring->count / 64) * 2) + 1; 1585 1586 k = l = 0; 1587 for (j = 0; j <= lc; j++) { /* loop count loop */ 1588 for (i = 0; i < 64; i++) { /* send the packets */ 1589 igb_create_lbtest_frame(tx_ring->buffer_info[k].skb, 1590 1024); 1591 pci_dma_sync_single_for_device(pdev, 1592 tx_ring->buffer_info[k].dma, 1593 tx_ring->buffer_info[k].length, 1594 PCI_DMA_TODEVICE); 1595 k++; 1596 if (k == tx_ring->count) 1597 k = 0; 1598 } 1599 wr32(E1000_TDT(0), k); 1600 msleep(200); 1601 time = jiffies; /* set the start time for the receive */ 1602 good_cnt = 0; 1603 do { /* receive the sent packets */ 1604 pci_dma_sync_single_for_cpu(pdev, 1605 rx_ring->buffer_info[l].dma, 1606 IGB_RXBUFFER_2048, 1607 PCI_DMA_FROMDEVICE); 1608 1609 ret_val = igb_check_lbtest_frame( 1610 rx_ring->buffer_info[l].skb, 1024); 1611 if (!ret_val) 1612 good_cnt++; 1613 l++; 1614 if (l == rx_ring->count) 1615 l = 0; 1616 /* time + 20 msecs (200 msecs on 2.4) is more than 1617 * enough time to complete the receives, if it's 1618 * exceeded, break and error off 1619 */ 1620 } while (good_cnt < 64 && jiffies < (time + 20)); 1621 if (good_cnt != 64) { 1622 ret_val = 13; /* ret_val is the same as mis-compare */ 1623 break; 1624 } 1625 if (jiffies >= (time + 20)) { 1626 ret_val = 14; /* error code for time out error */ 1627 break; 1628 } 1629 } /* end loop count loop */ 1630 return ret_val; 1631} 1632 1633static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) 1634{ 1635 /* PHY loopback cannot be performed if SoL/IDER 1636 * sessions are active */ 1637 if (igb_check_reset_block(&adapter->hw)) { 1638 dev_err(&adapter->pdev->dev, 1639 "Cannot do PHY loopback test " 1640 "when SoL/IDER is active.\n"); 1641 *data = 0; 1642 goto out; 1643 } 1644 *data = igb_setup_desc_rings(adapter); 1645 if (*data) 1646 goto out; 1647 *data = igb_setup_loopback_test(adapter); 1648 if (*data) 1649 goto err_loopback; 1650 *data = igb_run_loopback_test(adapter); 1651 igb_loopback_cleanup(adapter); 1652 1653err_loopback: 1654 igb_free_desc_rings(adapter); 1655out: 1656 return *data; 1657} 1658 1659static int igb_link_test(struct igb_adapter *adapter, u64 *data) 1660{ 1661 struct e1000_hw *hw = &adapter->hw; 1662 *data = 0; 1663 if (hw->phy.media_type == e1000_media_type_internal_serdes) { 1664 int i = 0; 1665 hw->mac.serdes_has_link = false; 1666 1667 /* On some blade server designs, link establishment 1668 * could take as long as 2-3 minutes */ 1669 do { 1670 hw->mac.ops.check_for_link(&adapter->hw); 1671 if (hw->mac.serdes_has_link) 1672 return *data; 1673 msleep(20); 1674 } while (i++ < 3750); 1675 1676 *data = 1; 1677 } else { 1678 hw->mac.ops.check_for_link(&adapter->hw); 1679 if (hw->mac.autoneg) 1680 msleep(4000); 1681 1682 if (!(rd32(E1000_STATUS) & 1683 E1000_STATUS_LU)) 1684 *data = 1; 1685 } 1686 return *data; 1687} 1688 1689static void igb_diag_test(struct net_device *netdev, 1690 struct ethtool_test *eth_test, u64 *data) 1691{ 1692 struct igb_adapter *adapter = netdev_priv(netdev); 1693 u16 autoneg_advertised; 1694 u8 forced_speed_duplex, autoneg; 1695 bool if_running = netif_running(netdev); 1696 1697 set_bit(__IGB_TESTING, &adapter->state); 1698 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1699 /* Offline tests */ 1700 1701 /* save speed, duplex, autoneg settings */ 1702 autoneg_advertised = adapter->hw.phy.autoneg_advertised; 1703 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; 1704 autoneg = adapter->hw.mac.autoneg; 1705 1706 dev_info(&adapter->pdev->dev, "offline testing starting\n"); 1707 1708 /* Link test performed before hardware reset so autoneg doesn't 1709 * interfere with test result */ 1710 if (igb_link_test(adapter, &data[4])) 1711 eth_test->flags |= ETH_TEST_FL_FAILED; 1712 1713 if (if_running) 1714 /* indicate we're in test mode */ 1715 dev_close(netdev); 1716 else 1717 igb_reset(adapter); 1718 1719 if (igb_reg_test(adapter, &data[0])) 1720 eth_test->flags |= ETH_TEST_FL_FAILED; 1721 1722 igb_reset(adapter); 1723 if (igb_eeprom_test(adapter, &data[1])) 1724 eth_test->flags |= ETH_TEST_FL_FAILED; 1725 1726 igb_reset(adapter); 1727 if (igb_intr_test(adapter, &data[2])) 1728 eth_test->flags |= ETH_TEST_FL_FAILED; 1729 1730 igb_reset(adapter); 1731 if (igb_loopback_test(adapter, &data[3])) 1732 eth_test->flags |= ETH_TEST_FL_FAILED; 1733 1734 /* restore speed, duplex, autoneg settings */ 1735 adapter->hw.phy.autoneg_advertised = autoneg_advertised; 1736 adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; 1737 adapter->hw.mac.autoneg = autoneg; 1738 1739 /* force this routine to wait until autoneg complete/timeout */ 1740 adapter->hw.phy.autoneg_wait_to_complete = true; 1741 igb_reset(adapter); 1742 adapter->hw.phy.autoneg_wait_to_complete = false; 1743 1744 clear_bit(__IGB_TESTING, &adapter->state); 1745 if (if_running) 1746 dev_open(netdev); 1747 } else { 1748 dev_info(&adapter->pdev->dev, "online testing starting\n"); 1749 /* Online tests */ 1750 if (igb_link_test(adapter, &data[4])) 1751 eth_test->flags |= ETH_TEST_FL_FAILED; 1752 1753 /* Online tests aren't run; pass by default */ 1754 data[0] = 0; 1755 data[1] = 0; 1756 data[2] = 0; 1757 data[3] = 0; 1758 1759 clear_bit(__IGB_TESTING, &adapter->state); 1760 } 1761 msleep_interruptible(4 * 1000); 1762} 1763 1764static int igb_wol_exclusion(struct igb_adapter *adapter, 1765 struct ethtool_wolinfo *wol) 1766{ 1767 struct e1000_hw *hw = &adapter->hw; 1768 int retval = 1; /* fail by default */ 1769 1770 switch (hw->device_id) { 1771 case E1000_DEV_ID_82575GB_QUAD_COPPER: 1772 /* WoL not supported */ 1773 wol->supported = 0; 1774 break; 1775 case E1000_DEV_ID_82575EB_FIBER_SERDES: 1776 case E1000_DEV_ID_82576_FIBER: 1777 case E1000_DEV_ID_82576_SERDES: 1778 /* Wake events not supported on port B */ 1779 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) { 1780 wol->supported = 0; 1781 break; 1782 } 1783 /* return success for non excluded adapter ports */ 1784 retval = 0; 1785 break; 1786 case E1000_DEV_ID_82576_QUAD_COPPER: 1787 /* quad port adapters only support WoL on port A */ 1788 if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { 1789 wol->supported = 0; 1790 break; 1791 } 1792 /* return success for non excluded adapter ports */ 1793 retval = 0; 1794 break; 1795 default: 1796 /* dual port cards only support WoL on port A from now on 1797 * unless it was enabled in the eeprom for port B 1798 * so exclude FUNC_1 ports from having WoL enabled */ 1799 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1 && 1800 !adapter->eeprom_wol) { 1801 wol->supported = 0; 1802 break; 1803 } 1804 1805 retval = 0; 1806 } 1807 1808 return retval; 1809} 1810 1811static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 1812{ 1813 struct igb_adapter *adapter = netdev_priv(netdev); 1814 1815 wol->supported = WAKE_UCAST | WAKE_MCAST | 1816 WAKE_BCAST | WAKE_MAGIC; 1817 wol->wolopts = 0; 1818 1819 /* this function will set ->supported = 0 and return 1 if wol is not 1820 * supported by this hardware */ 1821 if (igb_wol_exclusion(adapter, wol) || 1822 !device_can_wakeup(&adapter->pdev->dev)) 1823 return; 1824 1825 /* apply any specific unsupported masks here */ 1826 switch (adapter->hw.device_id) { 1827 default: 1828 break; 1829 } 1830 1831 if (adapter->wol & E1000_WUFC_EX) 1832 wol->wolopts |= WAKE_UCAST; 1833 if (adapter->wol & E1000_WUFC_MC) 1834 wol->wolopts |= WAKE_MCAST; 1835 if (adapter->wol & E1000_WUFC_BC) 1836 wol->wolopts |= WAKE_BCAST; 1837 if (adapter->wol & E1000_WUFC_MAG) 1838 wol->wolopts |= WAKE_MAGIC; 1839 1840 return; 1841} 1842 1843static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 1844{ 1845 struct igb_adapter *adapter = netdev_priv(netdev); 1846 struct e1000_hw *hw = &adapter->hw; 1847 1848 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 1849 return -EOPNOTSUPP; 1850 1851 if (igb_wol_exclusion(adapter, wol) || 1852 !device_can_wakeup(&adapter->pdev->dev)) 1853 return wol->wolopts ? -EOPNOTSUPP : 0; 1854 1855 switch (hw->device_id) { 1856 default: 1857 break; 1858 } 1859 1860 /* these settings will always override what we currently have */ 1861 adapter->wol = 0; 1862 1863 if (wol->wolopts & WAKE_UCAST) 1864 adapter->wol |= E1000_WUFC_EX; 1865 if (wol->wolopts & WAKE_MCAST) 1866 adapter->wol |= E1000_WUFC_MC; 1867 if (wol->wolopts & WAKE_BCAST) 1868 adapter->wol |= E1000_WUFC_BC; 1869 if (wol->wolopts & WAKE_MAGIC) 1870 adapter->wol |= E1000_WUFC_MAG; 1871 1872 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1873 1874 return 0; 1875} 1876 1877/* bit defines for adapter->led_status */ 1878#define IGB_LED_ON 0 1879 1880static int igb_phys_id(struct net_device *netdev, u32 data) 1881{ 1882 struct igb_adapter *adapter = netdev_priv(netdev); 1883 struct e1000_hw *hw = &adapter->hw; 1884 1885 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) 1886 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); 1887 1888 igb_blink_led(hw); 1889 msleep_interruptible(data * 1000); 1890 1891 igb_led_off(hw); 1892 clear_bit(IGB_LED_ON, &adapter->led_status); 1893 igb_cleanup_led(hw); 1894 1895 return 0; 1896} 1897 1898static int igb_set_coalesce(struct net_device *netdev, 1899 struct ethtool_coalesce *ec) 1900{ 1901 struct igb_adapter *adapter = netdev_priv(netdev); 1902 struct e1000_hw *hw = &adapter->hw; 1903 int i; 1904 1905 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || 1906 ((ec->rx_coalesce_usecs > 3) && 1907 (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) || 1908 (ec->rx_coalesce_usecs == 2)) 1909 return -EINVAL; 1910 1911 /* convert to rate of irq's per second */ 1912 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) { 1913 adapter->itr_setting = ec->rx_coalesce_usecs; 1914 adapter->itr = IGB_START_ITR; 1915 } else { 1916 adapter->itr_setting = ec->rx_coalesce_usecs << 2; 1917 adapter->itr = adapter->itr_setting; 1918 } 1919 1920 for (i = 0; i < adapter->num_rx_queues; i++) 1921 wr32(adapter->rx_ring[i].itr_register, adapter->itr); 1922 1923 return 0; 1924} 1925 1926static int igb_get_coalesce(struct net_device *netdev, 1927 struct ethtool_coalesce *ec) 1928{ 1929 struct igb_adapter *adapter = netdev_priv(netdev); 1930 1931 if (adapter->itr_setting <= 3) 1932 ec->rx_coalesce_usecs = adapter->itr_setting; 1933 else 1934 ec->rx_coalesce_usecs = adapter->itr_setting >> 2; 1935 1936 return 0; 1937} 1938 1939 1940static int igb_nway_reset(struct net_device *netdev) 1941{ 1942 struct igb_adapter *adapter = netdev_priv(netdev); 1943 if (netif_running(netdev)) 1944 igb_reinit_locked(adapter); 1945 return 0; 1946} 1947 1948static int igb_get_sset_count(struct net_device *netdev, int sset) 1949{ 1950 switch (sset) { 1951 case ETH_SS_STATS: 1952 return IGB_STATS_LEN; 1953 case ETH_SS_TEST: 1954 return IGB_TEST_LEN; 1955 default: 1956 return -ENOTSUPP; 1957 } 1958} 1959 1960static void igb_get_ethtool_stats(struct net_device *netdev, 1961 struct ethtool_stats *stats, u64 *data) 1962{ 1963 struct igb_adapter *adapter = netdev_priv(netdev); 1964 u64 *queue_stat; 1965 int stat_count_tx = sizeof(struct igb_tx_queue_stats) / sizeof(u64); 1966 int stat_count_rx = sizeof(struct igb_rx_queue_stats) / sizeof(u64); 1967 int j; 1968 int i; 1969 1970 igb_update_stats(adapter); 1971 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 1972 char *p = (char *)adapter+igb_gstrings_stats[i].stat_offset; 1973 data[i] = (igb_gstrings_stats[i].sizeof_stat == 1974 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1975 } 1976 for (j = 0; j < adapter->num_tx_queues; j++) { 1977 int k; 1978 queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats; 1979 for (k = 0; k < stat_count_tx; k++) 1980 data[i + k] = queue_stat[k]; 1981 i += k; 1982 } 1983 for (j = 0; j < adapter->num_rx_queues; j++) { 1984 int k; 1985 queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats; 1986 for (k = 0; k < stat_count_rx; k++) 1987 data[i + k] = queue_stat[k]; 1988 i += k; 1989 } 1990} 1991 1992static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 1993{ 1994 struct igb_adapter *adapter = netdev_priv(netdev); 1995 u8 *p = data; 1996 int i; 1997 1998 switch (stringset) { 1999 case ETH_SS_TEST: 2000 memcpy(data, *igb_gstrings_test, 2001 IGB_TEST_LEN*ETH_GSTRING_LEN); 2002 break; 2003 case ETH_SS_STATS: 2004 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 2005 memcpy(p, igb_gstrings_stats[i].stat_string, 2006 ETH_GSTRING_LEN); 2007 p += ETH_GSTRING_LEN; 2008 } 2009 for (i = 0; i < adapter->num_tx_queues; i++) { 2010 sprintf(p, "tx_queue_%u_packets", i); 2011 p += ETH_GSTRING_LEN; 2012 sprintf(p, "tx_queue_%u_bytes", i); 2013 p += ETH_GSTRING_LEN; 2014 } 2015 for (i = 0; i < adapter->num_rx_queues; i++) { 2016 sprintf(p, "rx_queue_%u_packets", i); 2017 p += ETH_GSTRING_LEN; 2018 sprintf(p, "rx_queue_%u_bytes", i); 2019 p += ETH_GSTRING_LEN; 2020 sprintf(p, "rx_queue_%u_drops", i); 2021 p += ETH_GSTRING_LEN; 2022 } 2023/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ 2024 break; 2025 } 2026} 2027 2028static struct ethtool_ops igb_ethtool_ops = { 2029 .get_settings = igb_get_settings, 2030 .set_settings = igb_set_settings, 2031 .get_drvinfo = igb_get_drvinfo, 2032 .get_regs_len = igb_get_regs_len, 2033 .get_regs = igb_get_regs, 2034 .get_wol = igb_get_wol, 2035 .set_wol = igb_set_wol, 2036 .get_msglevel = igb_get_msglevel, 2037 .set_msglevel = igb_set_msglevel, 2038 .nway_reset = igb_nway_reset, 2039 .get_link = ethtool_op_get_link, 2040 .get_eeprom_len = igb_get_eeprom_len, 2041 .get_eeprom = igb_get_eeprom, 2042 .set_eeprom = igb_set_eeprom, 2043 .get_ringparam = igb_get_ringparam, 2044 .set_ringparam = igb_set_ringparam, 2045 .get_pauseparam = igb_get_pauseparam, 2046 .set_pauseparam = igb_set_pauseparam, 2047 .get_rx_csum = igb_get_rx_csum, 2048 .set_rx_csum = igb_set_rx_csum, 2049 .get_tx_csum = igb_get_tx_csum, 2050 .set_tx_csum = igb_set_tx_csum, 2051 .get_sg = ethtool_op_get_sg, 2052 .set_sg = ethtool_op_set_sg, 2053 .get_tso = ethtool_op_get_tso, 2054 .set_tso = igb_set_tso, 2055 .self_test = igb_diag_test, 2056 .get_strings = igb_get_strings, 2057 .phys_id = igb_phys_id, 2058 .get_sset_count = igb_get_sset_count, 2059 .get_ethtool_stats = igb_get_ethtool_stats, 2060 .get_coalesce = igb_get_coalesce, 2061 .set_coalesce = igb_set_coalesce, 2062}; 2063 2064void igb_set_ethtool_ops(struct net_device *netdev) 2065{ 2066 SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops); 2067}