Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 17431928194b36a0f88082df875e2e036da7fddf 2411 lines 78 kB view raw
1/******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2010 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26*******************************************************************************/ 27 28/* ethtool support for ixgbe */ 29 30#include <linux/types.h> 31#include <linux/module.h> 32#include <linux/slab.h> 33#include <linux/pci.h> 34#include <linux/netdevice.h> 35#include <linux/ethtool.h> 36#include <linux/vmalloc.h> 37#include <linux/uaccess.h> 38 39#include "ixgbe.h" 40 41 42#define IXGBE_ALL_RAR_ENTRIES 16 43 44enum {NETDEV_STATS, IXGBE_STATS}; 45 46struct ixgbe_stats { 47 char stat_string[ETH_GSTRING_LEN]; 48 int type; 49 int sizeof_stat; 50 int stat_offset; 51}; 52 53#define IXGBE_STAT(m) IXGBE_STATS, \ 54 sizeof(((struct ixgbe_adapter *)0)->m), \ 55 offsetof(struct ixgbe_adapter, m) 56#define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \ 57 sizeof(((struct net_device *)0)->m), \ 58 offsetof(struct net_device, m) 59 60static struct ixgbe_stats ixgbe_gstrings_stats[] = { 61 {"rx_packets", IXGBE_NETDEV_STAT(stats.rx_packets)}, 62 {"tx_packets", IXGBE_NETDEV_STAT(stats.tx_packets)}, 63 {"rx_bytes", IXGBE_NETDEV_STAT(stats.rx_bytes)}, 64 {"tx_bytes", IXGBE_NETDEV_STAT(stats.tx_bytes)}, 65 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, 66 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, 67 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, 68 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)}, 69 {"lsc_int", IXGBE_STAT(lsc_int)}, 70 {"tx_busy", IXGBE_STAT(tx_busy)}, 71 {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, 72 {"rx_errors", IXGBE_NETDEV_STAT(stats.rx_errors)}, 73 {"tx_errors", IXGBE_NETDEV_STAT(stats.tx_errors)}, 74 {"rx_dropped", IXGBE_NETDEV_STAT(stats.rx_dropped)}, 75 {"tx_dropped", IXGBE_NETDEV_STAT(stats.tx_dropped)}, 76 {"multicast", IXGBE_NETDEV_STAT(stats.multicast)}, 77 {"broadcast", IXGBE_STAT(stats.bprc)}, 78 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, 79 {"collisions", IXGBE_NETDEV_STAT(stats.collisions)}, 80 {"rx_over_errors", IXGBE_NETDEV_STAT(stats.rx_over_errors)}, 81 {"rx_crc_errors", IXGBE_NETDEV_STAT(stats.rx_crc_errors)}, 82 {"rx_frame_errors", IXGBE_NETDEV_STAT(stats.rx_frame_errors)}, 83 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)}, 84 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, 85 {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, 86 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, 87 {"rx_fifo_errors", IXGBE_NETDEV_STAT(stats.rx_fifo_errors)}, 88 {"rx_missed_errors", IXGBE_NETDEV_STAT(stats.rx_missed_errors)}, 89 {"tx_aborted_errors", IXGBE_NETDEV_STAT(stats.tx_aborted_errors)}, 90 {"tx_carrier_errors", IXGBE_NETDEV_STAT(stats.tx_carrier_errors)}, 91 {"tx_fifo_errors", IXGBE_NETDEV_STAT(stats.tx_fifo_errors)}, 92 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(stats.tx_heartbeat_errors)}, 93 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, 94 {"tx_restart_queue", IXGBE_STAT(restart_queue)}, 95 {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, 96 {"rx_short_length_errors", IXGBE_STAT(stats.ruc)}, 97 {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)}, 98 {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)}, 99 {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)}, 100 {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)}, 101 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)}, 102 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, 103 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, 104 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, 105#ifdef IXGBE_FCOE 106 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, 107 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, 108 {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)}, 109 {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)}, 110 {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)}, 111 {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)}, 112#endif /* IXGBE_FCOE */ 113}; 114 115#define IXGBE_QUEUE_STATS_LEN \ 116 ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \ 117 ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \ 118 (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) 119#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) 120#define IXGBE_PB_STATS_LEN ( \ 121 (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \ 122 IXGBE_FLAG_DCB_ENABLED) ? \ 123 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ 124 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ 125 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ 126 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ 127 / sizeof(u64) : 0) 128#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ 129 IXGBE_PB_STATS_LEN + \ 130 IXGBE_QUEUE_STATS_LEN) 131 132static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { 133 "Register test (offline)", "Eeprom test (offline)", 134 "Interrupt test (offline)", "Loopback test (offline)", 135 "Link test (on/offline)" 136}; 137#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN 138 139static int ixgbe_get_settings(struct net_device *netdev, 140 struct ethtool_cmd *ecmd) 141{ 142 struct ixgbe_adapter *adapter = netdev_priv(netdev); 143 struct ixgbe_hw *hw = &adapter->hw; 144 u32 link_speed = 0; 145 bool link_up; 146 147 ecmd->supported = SUPPORTED_10000baseT_Full; 148 ecmd->autoneg = AUTONEG_ENABLE; 149 ecmd->transceiver = XCVR_EXTERNAL; 150 if ((hw->phy.media_type == ixgbe_media_type_copper) || 151 (hw->phy.multispeed_fiber)) { 152 ecmd->supported |= (SUPPORTED_1000baseT_Full | 153 SUPPORTED_Autoneg); 154 155 ecmd->advertising = ADVERTISED_Autoneg; 156 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) 157 ecmd->advertising |= ADVERTISED_10000baseT_Full; 158 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) 159 ecmd->advertising |= ADVERTISED_1000baseT_Full; 160 /* 161 * It's possible that phy.autoneg_advertised may not be 162 * set yet. If so display what the default would be - 163 * both 1G and 10G supported. 164 */ 165 if (!(ecmd->advertising & (ADVERTISED_1000baseT_Full | 166 ADVERTISED_10000baseT_Full))) 167 ecmd->advertising |= (ADVERTISED_10000baseT_Full | 168 ADVERTISED_1000baseT_Full); 169 170 if (hw->phy.media_type == ixgbe_media_type_copper) { 171 ecmd->supported |= SUPPORTED_TP; 172 ecmd->advertising |= ADVERTISED_TP; 173 ecmd->port = PORT_TP; 174 } else { 175 ecmd->supported |= SUPPORTED_FIBRE; 176 ecmd->advertising |= ADVERTISED_FIBRE; 177 ecmd->port = PORT_FIBRE; 178 } 179 } else if (hw->phy.media_type == ixgbe_media_type_backplane) { 180 /* Set as FIBRE until SERDES defined in kernel */ 181 if (hw->device_id == IXGBE_DEV_ID_82598_BX) { 182 ecmd->supported = (SUPPORTED_1000baseT_Full | 183 SUPPORTED_FIBRE); 184 ecmd->advertising = (ADVERTISED_1000baseT_Full | 185 ADVERTISED_FIBRE); 186 ecmd->port = PORT_FIBRE; 187 ecmd->autoneg = AUTONEG_DISABLE; 188 } else { 189 ecmd->supported |= (SUPPORTED_1000baseT_Full | 190 SUPPORTED_FIBRE); 191 ecmd->advertising = (ADVERTISED_10000baseT_Full | 192 ADVERTISED_1000baseT_Full | 193 ADVERTISED_FIBRE); 194 ecmd->port = PORT_FIBRE; 195 } 196 } else { 197 ecmd->supported |= SUPPORTED_FIBRE; 198 ecmd->advertising = (ADVERTISED_10000baseT_Full | 199 ADVERTISED_FIBRE); 200 ecmd->port = PORT_FIBRE; 201 ecmd->autoneg = AUTONEG_DISABLE; 202 } 203 204 /* Get PHY type */ 205 switch (adapter->hw.phy.type) { 206 case ixgbe_phy_tn: 207 case ixgbe_phy_cu_unknown: 208 /* Copper 10G-BASET */ 209 ecmd->port = PORT_TP; 210 break; 211 case ixgbe_phy_qt: 212 ecmd->port = PORT_FIBRE; 213 break; 214 case ixgbe_phy_nl: 215 case ixgbe_phy_sfp_passive_tyco: 216 case ixgbe_phy_sfp_passive_unknown: 217 case ixgbe_phy_sfp_ftl: 218 case ixgbe_phy_sfp_avago: 219 case ixgbe_phy_sfp_intel: 220 case ixgbe_phy_sfp_unknown: 221 switch (adapter->hw.phy.sfp_type) { 222 /* SFP+ devices, further checking needed */ 223 case ixgbe_sfp_type_da_cu: 224 case ixgbe_sfp_type_da_cu_core0: 225 case ixgbe_sfp_type_da_cu_core1: 226 ecmd->port = PORT_DA; 227 break; 228 case ixgbe_sfp_type_sr: 229 case ixgbe_sfp_type_lr: 230 case ixgbe_sfp_type_srlr_core0: 231 case ixgbe_sfp_type_srlr_core1: 232 ecmd->port = PORT_FIBRE; 233 break; 234 case ixgbe_sfp_type_not_present: 235 ecmd->port = PORT_NONE; 236 break; 237 case ixgbe_sfp_type_unknown: 238 default: 239 ecmd->port = PORT_OTHER; 240 break; 241 } 242 break; 243 case ixgbe_phy_xaui: 244 ecmd->port = PORT_NONE; 245 break; 246 case ixgbe_phy_unknown: 247 case ixgbe_phy_generic: 248 case ixgbe_phy_sfp_unsupported: 249 default: 250 ecmd->port = PORT_OTHER; 251 break; 252 } 253 254 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 255 if (link_up) { 256 ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 257 SPEED_10000 : SPEED_1000; 258 ecmd->duplex = DUPLEX_FULL; 259 } else { 260 ecmd->speed = -1; 261 ecmd->duplex = -1; 262 } 263 264 return 0; 265} 266 267static int ixgbe_set_settings(struct net_device *netdev, 268 struct ethtool_cmd *ecmd) 269{ 270 struct ixgbe_adapter *adapter = netdev_priv(netdev); 271 struct ixgbe_hw *hw = &adapter->hw; 272 u32 advertised, old; 273 s32 err = 0; 274 275 if ((hw->phy.media_type == ixgbe_media_type_copper) || 276 (hw->phy.multispeed_fiber)) { 277 /* 10000/copper and 1000/copper must autoneg 278 * this function does not support any duplex forcing, but can 279 * limit the advertising of the adapter to only 10000 or 1000 */ 280 if (ecmd->autoneg == AUTONEG_DISABLE) 281 return -EINVAL; 282 283 old = hw->phy.autoneg_advertised; 284 advertised = 0; 285 if (ecmd->advertising & ADVERTISED_10000baseT_Full) 286 advertised |= IXGBE_LINK_SPEED_10GB_FULL; 287 288 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 289 advertised |= IXGBE_LINK_SPEED_1GB_FULL; 290 291 if (old == advertised) 292 return err; 293 /* this sets the link speed and restarts auto-neg */ 294 hw->mac.autotry_restart = true; 295 err = hw->mac.ops.setup_link(hw, advertised, true, true); 296 if (err) { 297 DPRINTK(PROBE, INFO, 298 "setup link failed with code %d\n", err); 299 hw->mac.ops.setup_link(hw, old, true, true); 300 } 301 } else { 302 /* in this case we currently only support 10Gb/FULL */ 303 if ((ecmd->autoneg == AUTONEG_ENABLE) || 304 (ecmd->advertising != ADVERTISED_10000baseT_Full) || 305 (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) 306 return -EINVAL; 307 } 308 309 return err; 310} 311 312static void ixgbe_get_pauseparam(struct net_device *netdev, 313 struct ethtool_pauseparam *pause) 314{ 315 struct ixgbe_adapter *adapter = netdev_priv(netdev); 316 struct ixgbe_hw *hw = &adapter->hw; 317 318 /* 319 * Flow Control Autoneg isn't on if 320 * - we didn't ask for it OR 321 * - it failed, we know this by tx & rx being off 322 */ 323 if (hw->fc.disable_fc_autoneg || 324 (hw->fc.current_mode == ixgbe_fc_none)) 325 pause->autoneg = 0; 326 else 327 pause->autoneg = 1; 328 329#ifdef CONFIG_DCB 330 if (hw->fc.current_mode == ixgbe_fc_pfc) { 331 pause->rx_pause = 0; 332 pause->tx_pause = 0; 333 } 334 335#endif 336 if (hw->fc.current_mode == ixgbe_fc_rx_pause) { 337 pause->rx_pause = 1; 338 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { 339 pause->tx_pause = 1; 340 } else if (hw->fc.current_mode == ixgbe_fc_full) { 341 pause->rx_pause = 1; 342 pause->tx_pause = 1; 343 } 344} 345 346static int ixgbe_set_pauseparam(struct net_device *netdev, 347 struct ethtool_pauseparam *pause) 348{ 349 struct ixgbe_adapter *adapter = netdev_priv(netdev); 350 struct ixgbe_hw *hw = &adapter->hw; 351 struct ixgbe_fc_info fc; 352 353#ifdef CONFIG_DCB 354 if (adapter->dcb_cfg.pfc_mode_enable || 355 ((hw->mac.type == ixgbe_mac_82598EB) && 356 (adapter->flags & IXGBE_FLAG_DCB_ENABLED))) 357 return -EINVAL; 358 359#endif 360 361 fc = hw->fc; 362 363 if (pause->autoneg != AUTONEG_ENABLE) 364 fc.disable_fc_autoneg = true; 365 else 366 fc.disable_fc_autoneg = false; 367 368 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) 369 fc.requested_mode = ixgbe_fc_full; 370 else if (pause->rx_pause && !pause->tx_pause) 371 fc.requested_mode = ixgbe_fc_rx_pause; 372 else if (!pause->rx_pause && pause->tx_pause) 373 fc.requested_mode = ixgbe_fc_tx_pause; 374 else if (!pause->rx_pause && !pause->tx_pause) 375 fc.requested_mode = ixgbe_fc_none; 376 else 377 return -EINVAL; 378 379#ifdef CONFIG_DCB 380 adapter->last_lfc_mode = fc.requested_mode; 381#endif 382 383 /* if the thing changed then we'll update and use new autoneg */ 384 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) { 385 hw->fc = fc; 386 if (netif_running(netdev)) 387 ixgbe_reinit_locked(adapter); 388 else 389 ixgbe_reset(adapter); 390 } 391 392 return 0; 393} 394 395static u32 ixgbe_get_rx_csum(struct net_device *netdev) 396{ 397 struct ixgbe_adapter *adapter = netdev_priv(netdev); 398 return (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED); 399} 400 401static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data) 402{ 403 struct ixgbe_adapter *adapter = netdev_priv(netdev); 404 if (data) 405 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; 406 else 407 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; 408 409 if (netif_running(netdev)) 410 ixgbe_reinit_locked(adapter); 411 else 412 ixgbe_reset(adapter); 413 414 return 0; 415} 416 417static u32 ixgbe_get_tx_csum(struct net_device *netdev) 418{ 419 return (netdev->features & NETIF_F_IP_CSUM) != 0; 420} 421 422static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data) 423{ 424 struct ixgbe_adapter *adapter = netdev_priv(netdev); 425 426 if (data) { 427 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 428 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 429 netdev->features |= NETIF_F_SCTP_CSUM; 430 } else { 431 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 432 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 433 netdev->features &= ~NETIF_F_SCTP_CSUM; 434 } 435 436 return 0; 437} 438 439static int ixgbe_set_tso(struct net_device *netdev, u32 data) 440{ 441 if (data) { 442 netdev->features |= NETIF_F_TSO; 443 netdev->features |= NETIF_F_TSO6; 444 } else { 445 netdev->features &= ~NETIF_F_TSO; 446 netdev->features &= ~NETIF_F_TSO6; 447 } 448 return 0; 449} 450 451static u32 ixgbe_get_msglevel(struct net_device *netdev) 452{ 453 struct ixgbe_adapter *adapter = netdev_priv(netdev); 454 return adapter->msg_enable; 455} 456 457static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) 458{ 459 struct ixgbe_adapter *adapter = netdev_priv(netdev); 460 adapter->msg_enable = data; 461} 462 463static int ixgbe_get_regs_len(struct net_device *netdev) 464{ 465#define IXGBE_REGS_LEN 1128 466 return IXGBE_REGS_LEN * sizeof(u32); 467} 468 469#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ 470 471static void ixgbe_get_regs(struct net_device *netdev, 472 struct ethtool_regs *regs, void *p) 473{ 474 struct ixgbe_adapter *adapter = netdev_priv(netdev); 475 struct ixgbe_hw *hw = &adapter->hw; 476 u32 *regs_buff = p; 477 u8 i; 478 479 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32)); 480 481 regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; 482 483 /* General Registers */ 484 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL); 485 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS); 486 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 487 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP); 488 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP); 489 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 490 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER); 491 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); 492 493 /* NVM Register */ 494 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC); 495 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD); 496 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA); 497 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL); 498 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA); 499 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL); 500 regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA); 501 regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT); 502 regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP); 503 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC); 504 505 /* Interrupt */ 506 /* don't read EICR because it can clear interrupt causes, instead 507 * read EICS which is a shadow but doesn't clear EICR */ 508 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS); 509 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); 510 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); 511 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); 512 regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC); 513 regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM); 514 regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0)); 515 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); 516 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); 517 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); 518 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0)); 519 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); 520 521 /* Flow Control */ 522 regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); 523 regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0)); 524 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); 525 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); 526 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); 527 for (i = 0; i < 8; i++) 528 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); 529 for (i = 0; i < 8; i++) 530 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); 531 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); 532 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); 533 534 /* Receive DMA */ 535 for (i = 0; i < 64; i++) 536 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); 537 for (i = 0; i < 64; i++) 538 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); 539 for (i = 0; i < 64; i++) 540 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); 541 for (i = 0; i < 64; i++) 542 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); 543 for (i = 0; i < 64; i++) 544 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); 545 for (i = 0; i < 64; i++) 546 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 547 for (i = 0; i < 16; i++) 548 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); 549 for (i = 0; i < 16; i++) 550 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 551 regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 552 for (i = 0; i < 8; i++) 553 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); 554 regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 555 regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN); 556 557 /* Receive */ 558 regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 559 regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL); 560 for (i = 0; i < 16; i++) 561 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 562 for (i = 0; i < 16; i++) 563 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 564 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)); 565 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); 566 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 567 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); 568 regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC); 569 regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); 570 for (i = 0; i < 8; i++) 571 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i)); 572 for (i = 0; i < 8; i++) 573 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i)); 574 regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP); 575 576 /* Transmit */ 577 for (i = 0; i < 32; i++) 578 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); 579 for (i = 0; i < 32; i++) 580 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); 581 for (i = 0; i < 32; i++) 582 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); 583 for (i = 0; i < 32; i++) 584 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); 585 for (i = 0; i < 32; i++) 586 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); 587 for (i = 0; i < 32; i++) 588 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); 589 for (i = 0; i < 32; i++) 590 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i)); 591 for (i = 0; i < 32; i++) 592 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i)); 593 regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL); 594 for (i = 0; i < 16; i++) 595 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 596 regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG); 597 for (i = 0; i < 8; i++) 598 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i)); 599 regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP); 600 601 /* Wake Up */ 602 regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC); 603 regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC); 604 regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS); 605 regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV); 606 regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT); 607 regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT); 608 regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL); 609 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); 610 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); 611 612 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); 613 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); 614 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); 615 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); 616 for (i = 0; i < 8; i++) 617 regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); 618 for (i = 0; i < 8; i++) 619 regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); 620 for (i = 0; i < 8; i++) 621 regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); 622 for (i = 0; i < 8; i++) 623 regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); 624 for (i = 0; i < 8; i++) 625 regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); 626 for (i = 0; i < 8; i++) 627 regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); 628 629 /* Statistics */ 630 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); 631 regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc); 632 regs_buff[883] = IXGBE_GET_STAT(adapter, errbc); 633 regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc); 634 for (i = 0; i < 8; i++) 635 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]); 636 regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc); 637 regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc); 638 regs_buff[895] = IXGBE_GET_STAT(adapter, rlec); 639 regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc); 640 regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc); 641 regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc); 642 regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc); 643 for (i = 0; i < 8; i++) 644 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]); 645 for (i = 0; i < 8; i++) 646 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]); 647 for (i = 0; i < 8; i++) 648 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]); 649 for (i = 0; i < 8; i++) 650 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]); 651 regs_buff[932] = IXGBE_GET_STAT(adapter, prc64); 652 regs_buff[933] = IXGBE_GET_STAT(adapter, prc127); 653 regs_buff[934] = IXGBE_GET_STAT(adapter, prc255); 654 regs_buff[935] = IXGBE_GET_STAT(adapter, prc511); 655 regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023); 656 regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522); 657 regs_buff[938] = IXGBE_GET_STAT(adapter, gprc); 658 regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); 659 regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); 660 regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); 661 regs_buff[942] = IXGBE_GET_STAT(adapter, gorc); 662 regs_buff[944] = IXGBE_GET_STAT(adapter, gotc); 663 for (i = 0; i < 8; i++) 664 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); 665 regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); 666 regs_buff[955] = IXGBE_GET_STAT(adapter, rfc); 667 regs_buff[956] = IXGBE_GET_STAT(adapter, roc); 668 regs_buff[957] = IXGBE_GET_STAT(adapter, rjc); 669 regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); 670 regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); 671 regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); 672 regs_buff[961] = IXGBE_GET_STAT(adapter, tor); 673 regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); 674 regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); 675 regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); 676 regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127); 677 regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255); 678 regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511); 679 regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023); 680 regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522); 681 regs_buff[971] = IXGBE_GET_STAT(adapter, mptc); 682 regs_buff[972] = IXGBE_GET_STAT(adapter, bptc); 683 regs_buff[973] = IXGBE_GET_STAT(adapter, xec); 684 for (i = 0; i < 16; i++) 685 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]); 686 for (i = 0; i < 16; i++) 687 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]); 688 for (i = 0; i < 16; i++) 689 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]); 690 for (i = 0; i < 16; i++) 691 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]); 692 693 /* MAC */ 694 regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG); 695 regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 696 regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 697 regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0); 698 regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1); 699 regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 700 regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 701 regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP); 702 regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP); 703 regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0); 704 regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1); 705 regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP); 706 regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA); 707 regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE); 708 regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD); 709 regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS); 710 regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA); 711 regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD); 712 regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD); 713 regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD); 714 regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG); 715 regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1); 716 regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2); 717 regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS); 718 regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC); 719 regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS); 720 regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC); 721 regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS); 722 regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 723 regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3); 724 regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1); 725 regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2); 726 regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); 727 728 /* Diagnostic */ 729 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); 730 for (i = 0; i < 8; i++) 731 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); 732 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); 733 for (i = 0; i < 4; i++) 734 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i)); 735 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); 736 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); 737 for (i = 0; i < 8; i++) 738 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); 739 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); 740 for (i = 0; i < 4; i++) 741 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); 742 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); 743 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); 744 regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); 745 regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1); 746 regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2); 747 regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3); 748 regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); 749 regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0); 750 regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1); 751 regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); 752 regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); 753 for (i = 0; i < 8; i++) 754 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); 755 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); 756 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); 757 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); 758 regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1); 759 regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2); 760 regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS); 761 regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL); 762 regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC); 763 regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC); 764} 765 766static int ixgbe_get_eeprom_len(struct net_device *netdev) 767{ 768 struct ixgbe_adapter *adapter = netdev_priv(netdev); 769 return adapter->hw.eeprom.word_size * 2; 770} 771 772static int ixgbe_get_eeprom(struct net_device *netdev, 773 struct ethtool_eeprom *eeprom, u8 *bytes) 774{ 775 struct ixgbe_adapter *adapter = netdev_priv(netdev); 776 struct ixgbe_hw *hw = &adapter->hw; 777 u16 *eeprom_buff; 778 int first_word, last_word, eeprom_len; 779 int ret_val = 0; 780 u16 i; 781 782 if (eeprom->len == 0) 783 return -EINVAL; 784 785 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 786 787 first_word = eeprom->offset >> 1; 788 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 789 eeprom_len = last_word - first_word + 1; 790 791 eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); 792 if (!eeprom_buff) 793 return -ENOMEM; 794 795 for (i = 0; i < eeprom_len; i++) { 796 if ((ret_val = hw->eeprom.ops.read(hw, first_word + i, 797 &eeprom_buff[i]))) 798 break; 799 } 800 801 /* Device's eeprom is always little-endian, word addressable */ 802 for (i = 0; i < eeprom_len; i++) 803 le16_to_cpus(&eeprom_buff[i]); 804 805 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); 806 kfree(eeprom_buff); 807 808 return ret_val; 809} 810 811static void ixgbe_get_drvinfo(struct net_device *netdev, 812 struct ethtool_drvinfo *drvinfo) 813{ 814 struct ixgbe_adapter *adapter = netdev_priv(netdev); 815 char firmware_version[32]; 816 817 strncpy(drvinfo->driver, ixgbe_driver_name, 32); 818 strncpy(drvinfo->version, ixgbe_driver_version, 32); 819 820 sprintf(firmware_version, "%d.%d-%d", 821 (adapter->eeprom_version & 0xF000) >> 12, 822 (adapter->eeprom_version & 0x0FF0) >> 4, 823 adapter->eeprom_version & 0x000F); 824 825 strncpy(drvinfo->fw_version, firmware_version, 32); 826 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 827 drvinfo->n_stats = IXGBE_STATS_LEN; 828 drvinfo->testinfo_len = IXGBE_TEST_LEN; 829 drvinfo->regdump_len = ixgbe_get_regs_len(netdev); 830} 831 832static void ixgbe_get_ringparam(struct net_device *netdev, 833 struct ethtool_ringparam *ring) 834{ 835 struct ixgbe_adapter *adapter = netdev_priv(netdev); 836 struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; 837 struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; 838 839 ring->rx_max_pending = IXGBE_MAX_RXD; 840 ring->tx_max_pending = IXGBE_MAX_TXD; 841 ring->rx_mini_max_pending = 0; 842 ring->rx_jumbo_max_pending = 0; 843 ring->rx_pending = rx_ring->count; 844 ring->tx_pending = tx_ring->count; 845 ring->rx_mini_pending = 0; 846 ring->rx_jumbo_pending = 0; 847} 848 849static int ixgbe_set_ringparam(struct net_device *netdev, 850 struct ethtool_ringparam *ring) 851{ 852 struct ixgbe_adapter *adapter = netdev_priv(netdev); 853 struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; 854 int i, err = 0; 855 u32 new_rx_count, new_tx_count; 856 bool need_update = false; 857 858 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 859 return -EINVAL; 860 861 new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD); 862 new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD); 863 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); 864 865 new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD); 866 new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD); 867 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); 868 869 if ((new_tx_count == adapter->tx_ring[0]->count) && 870 (new_rx_count == adapter->rx_ring[0]->count)) { 871 /* nothing to do */ 872 return 0; 873 } 874 875 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 876 msleep(1); 877 878 if (!netif_running(adapter->netdev)) { 879 for (i = 0; i < adapter->num_tx_queues; i++) 880 adapter->tx_ring[i]->count = new_tx_count; 881 for (i = 0; i < adapter->num_rx_queues; i++) 882 adapter->rx_ring[i]->count = new_rx_count; 883 adapter->tx_ring_count = new_tx_count; 884 adapter->rx_ring_count = new_rx_count; 885 goto clear_reset; 886 } 887 888 temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring)); 889 if (!temp_tx_ring) { 890 err = -ENOMEM; 891 goto clear_reset; 892 } 893 894 if (new_tx_count != adapter->tx_ring_count) { 895 for (i = 0; i < adapter->num_tx_queues; i++) { 896 memcpy(&temp_tx_ring[i], adapter->tx_ring[i], 897 sizeof(struct ixgbe_ring)); 898 temp_tx_ring[i].count = new_tx_count; 899 err = ixgbe_setup_tx_resources(adapter, 900 &temp_tx_ring[i]); 901 if (err) { 902 while (i) { 903 i--; 904 ixgbe_free_tx_resources(adapter, 905 &temp_tx_ring[i]); 906 } 907 goto clear_reset; 908 } 909 } 910 need_update = true; 911 } 912 913 temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring)); 914 if (!temp_rx_ring) { 915 err = -ENOMEM; 916 goto err_setup; 917 } 918 919 if (new_rx_count != adapter->rx_ring_count) { 920 for (i = 0; i < adapter->num_rx_queues; i++) { 921 memcpy(&temp_rx_ring[i], adapter->rx_ring[i], 922 sizeof(struct ixgbe_ring)); 923 temp_rx_ring[i].count = new_rx_count; 924 err = ixgbe_setup_rx_resources(adapter, 925 &temp_rx_ring[i]); 926 if (err) { 927 while (i) { 928 i--; 929 ixgbe_free_rx_resources(adapter, 930 &temp_rx_ring[i]); 931 } 932 goto err_setup; 933 } 934 } 935 need_update = true; 936 } 937 938 /* if rings need to be updated, here's the place to do it in one shot */ 939 if (need_update) { 940 ixgbe_down(adapter); 941 942 /* tx */ 943 if (new_tx_count != adapter->tx_ring_count) { 944 for (i = 0; i < adapter->num_tx_queues; i++) { 945 ixgbe_free_tx_resources(adapter, 946 adapter->tx_ring[i]); 947 memcpy(adapter->tx_ring[i], &temp_tx_ring[i], 948 sizeof(struct ixgbe_ring)); 949 } 950 adapter->tx_ring_count = new_tx_count; 951 } 952 953 /* rx */ 954 if (new_rx_count != adapter->rx_ring_count) { 955 for (i = 0; i < adapter->num_rx_queues; i++) { 956 ixgbe_free_rx_resources(adapter, 957 adapter->rx_ring[i]); 958 memcpy(adapter->rx_ring[i], &temp_rx_ring[i], 959 sizeof(struct ixgbe_ring)); 960 } 961 adapter->rx_ring_count = new_rx_count; 962 } 963 ixgbe_up(adapter); 964 } 965 966 vfree(temp_rx_ring); 967err_setup: 968 vfree(temp_tx_ring); 969clear_reset: 970 clear_bit(__IXGBE_RESETTING, &adapter->state); 971 return err; 972} 973 974static int ixgbe_get_sset_count(struct net_device *netdev, int sset) 975{ 976 switch (sset) { 977 case ETH_SS_TEST: 978 return IXGBE_TEST_LEN; 979 case ETH_SS_STATS: 980 return IXGBE_STATS_LEN; 981 case ETH_SS_NTUPLE_FILTERS: 982 return (ETHTOOL_MAX_NTUPLE_LIST_ENTRY * 983 ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY); 984 default: 985 return -EOPNOTSUPP; 986 } 987} 988 989static void ixgbe_get_ethtool_stats(struct net_device *netdev, 990 struct ethtool_stats *stats, u64 *data) 991{ 992 struct ixgbe_adapter *adapter = netdev_priv(netdev); 993 u64 *queue_stat; 994 int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64); 995 int j, k; 996 int i; 997 char *p = NULL; 998 999 ixgbe_update_stats(adapter); 1000 dev_get_stats(netdev); 1001 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 1002 switch (ixgbe_gstrings_stats[i].type) { 1003 case NETDEV_STATS: 1004 p = (char *) netdev + 1005 ixgbe_gstrings_stats[i].stat_offset; 1006 break; 1007 case IXGBE_STATS: 1008 p = (char *) adapter + 1009 ixgbe_gstrings_stats[i].stat_offset; 1010 break; 1011 } 1012 1013 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == 1014 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1015 } 1016 for (j = 0; j < adapter->num_tx_queues; j++) { 1017 queue_stat = (u64 *)&adapter->tx_ring[j]->stats; 1018 for (k = 0; k < stat_count; k++) 1019 data[i + k] = queue_stat[k]; 1020 i += k; 1021 } 1022 for (j = 0; j < adapter->num_rx_queues; j++) { 1023 queue_stat = (u64 *)&adapter->rx_ring[j]->stats; 1024 for (k = 0; k < stat_count; k++) 1025 data[i + k] = queue_stat[k]; 1026 i += k; 1027 } 1028 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 1029 for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) { 1030 data[i++] = adapter->stats.pxontxc[j]; 1031 data[i++] = adapter->stats.pxofftxc[j]; 1032 } 1033 for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) { 1034 data[i++] = adapter->stats.pxonrxc[j]; 1035 data[i++] = adapter->stats.pxoffrxc[j]; 1036 } 1037 } 1038} 1039 1040static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, 1041 u8 *data) 1042{ 1043 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1044 char *p = (char *)data; 1045 int i; 1046 1047 switch (stringset) { 1048 case ETH_SS_TEST: 1049 memcpy(data, *ixgbe_gstrings_test, 1050 IXGBE_TEST_LEN * ETH_GSTRING_LEN); 1051 break; 1052 case ETH_SS_STATS: 1053 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 1054 memcpy(p, ixgbe_gstrings_stats[i].stat_string, 1055 ETH_GSTRING_LEN); 1056 p += ETH_GSTRING_LEN; 1057 } 1058 for (i = 0; i < adapter->num_tx_queues; i++) { 1059 sprintf(p, "tx_queue_%u_packets", i); 1060 p += ETH_GSTRING_LEN; 1061 sprintf(p, "tx_queue_%u_bytes", i); 1062 p += ETH_GSTRING_LEN; 1063 } 1064 for (i = 0; i < adapter->num_rx_queues; i++) { 1065 sprintf(p, "rx_queue_%u_packets", i); 1066 p += ETH_GSTRING_LEN; 1067 sprintf(p, "rx_queue_%u_bytes", i); 1068 p += ETH_GSTRING_LEN; 1069 } 1070 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 1071 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { 1072 sprintf(p, "tx_pb_%u_pxon", i); 1073 p += ETH_GSTRING_LEN; 1074 sprintf(p, "tx_pb_%u_pxoff", i); 1075 p += ETH_GSTRING_LEN; 1076 } 1077 for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) { 1078 sprintf(p, "rx_pb_%u_pxon", i); 1079 p += ETH_GSTRING_LEN; 1080 sprintf(p, "rx_pb_%u_pxoff", i); 1081 p += ETH_GSTRING_LEN; 1082 } 1083 } 1084 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ 1085 break; 1086 } 1087} 1088 1089static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data) 1090{ 1091 struct ixgbe_hw *hw = &adapter->hw; 1092 bool link_up; 1093 u32 link_speed = 0; 1094 *data = 0; 1095 1096 hw->mac.ops.check_link(hw, &link_speed, &link_up, true); 1097 if (link_up) 1098 return *data; 1099 else 1100 *data = 1; 1101 return *data; 1102} 1103 1104/* ethtool register test data */ 1105struct ixgbe_reg_test { 1106 u16 reg; 1107 u8 array_len; 1108 u8 test_type; 1109 u32 mask; 1110 u32 write; 1111}; 1112 1113/* In the hardware, registers are laid out either singly, in arrays 1114 * spaced 0x40 bytes apart, or in contiguous tables. We assume 1115 * most tests take place on arrays or single registers (handled 1116 * as a single-element array) and special-case the tables. 1117 * Table tests are always pattern tests. 1118 * 1119 * We also make provision for some required setup steps by specifying 1120 * registers to be written without any read-back testing. 1121 */ 1122 1123#define PATTERN_TEST 1 1124#define SET_READ_TEST 2 1125#define WRITE_NO_TEST 3 1126#define TABLE32_TEST 4 1127#define TABLE64_TEST_LO 5 1128#define TABLE64_TEST_HI 6 1129 1130/* default 82599 register test */ 1131static struct ixgbe_reg_test reg_test_82599[] = { 1132 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1133 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1134 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1135 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, 1136 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, 1137 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1138 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1139 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, 1140 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1141 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, 1142 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1143 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1144 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1145 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1146 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, 1147 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, 1148 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1149 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, 1150 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1151 { 0, 0, 0, 0 } 1152}; 1153 1154/* default 82598 register test */ 1155static struct ixgbe_reg_test reg_test_82598[] = { 1156 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1157 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1158 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1159 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, 1160 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1161 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1162 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1163 /* Enable all four RX queues before testing. */ 1164 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, 1165 /* RDH is read-only for 82598, only test RDT. */ 1166 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 1167 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, 1168 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1169 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1170 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF }, 1171 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 1172 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1173 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 1174 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 }, 1175 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 }, 1176 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, 1177 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF }, 1178 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1179 { 0, 0, 0, 0 } 1180}; 1181 1182#define REG_PATTERN_TEST(R, M, W) \ 1183{ \ 1184 u32 pat, val, before; \ 1185 const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ 1186 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \ 1187 before = readl(adapter->hw.hw_addr + R); \ 1188 writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \ 1189 val = readl(adapter->hw.hw_addr + R); \ 1190 if (val != (_test[pat] & W & M)) { \ 1191 DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "\ 1192 "0x%08X expected 0x%08X\n", \ 1193 R, val, (_test[pat] & W & M)); \ 1194 *data = R; \ 1195 writel(before, adapter->hw.hw_addr + R); \ 1196 return 1; \ 1197 } \ 1198 writel(before, adapter->hw.hw_addr + R); \ 1199 } \ 1200} 1201 1202#define REG_SET_AND_CHECK(R, M, W) \ 1203{ \ 1204 u32 val, before; \ 1205 before = readl(adapter->hw.hw_addr + R); \ 1206 writel((W & M), (adapter->hw.hw_addr + R)); \ 1207 val = readl(adapter->hw.hw_addr + R); \ 1208 if ((W & M) != (val & M)) { \ 1209 DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\ 1210 "expected 0x%08X\n", R, (val & M), (W & M)); \ 1211 *data = R; \ 1212 writel(before, (adapter->hw.hw_addr + R)); \ 1213 return 1; \ 1214 } \ 1215 writel(before, (adapter->hw.hw_addr + R)); \ 1216} 1217 1218static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) 1219{ 1220 struct ixgbe_reg_test *test; 1221 u32 value, before, after; 1222 u32 i, toggle; 1223 1224 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1225 toggle = 0x7FFFF30F; 1226 test = reg_test_82599; 1227 } else { 1228 toggle = 0x7FFFF3FF; 1229 test = reg_test_82598; 1230 } 1231 1232 /* 1233 * Because the status register is such a special case, 1234 * we handle it separately from the rest of the register 1235 * tests. Some bits are read-only, some toggle, and some 1236 * are writeable on newer MACs. 1237 */ 1238 before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS); 1239 value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle); 1240 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle); 1241 after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle; 1242 if (value != after) { 1243 DPRINTK(DRV, ERR, "failed STATUS register test got: " 1244 "0x%08X expected: 0x%08X\n", after, value); 1245 *data = 1; 1246 return 1; 1247 } 1248 /* restore previous status */ 1249 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before); 1250 1251 /* 1252 * Perform the remainder of the register test, looping through 1253 * the test table until we either fail or reach the null entry. 1254 */ 1255 while (test->reg) { 1256 for (i = 0; i < test->array_len; i++) { 1257 switch (test->test_type) { 1258 case PATTERN_TEST: 1259 REG_PATTERN_TEST(test->reg + (i * 0x40), 1260 test->mask, 1261 test->write); 1262 break; 1263 case SET_READ_TEST: 1264 REG_SET_AND_CHECK(test->reg + (i * 0x40), 1265 test->mask, 1266 test->write); 1267 break; 1268 case WRITE_NO_TEST: 1269 writel(test->write, 1270 (adapter->hw.hw_addr + test->reg) 1271 + (i * 0x40)); 1272 break; 1273 case TABLE32_TEST: 1274 REG_PATTERN_TEST(test->reg + (i * 4), 1275 test->mask, 1276 test->write); 1277 break; 1278 case TABLE64_TEST_LO: 1279 REG_PATTERN_TEST(test->reg + (i * 8), 1280 test->mask, 1281 test->write); 1282 break; 1283 case TABLE64_TEST_HI: 1284 REG_PATTERN_TEST((test->reg + 4) + (i * 8), 1285 test->mask, 1286 test->write); 1287 break; 1288 } 1289 } 1290 test++; 1291 } 1292 1293 *data = 0; 1294 return 0; 1295} 1296 1297static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) 1298{ 1299 struct ixgbe_hw *hw = &adapter->hw; 1300 if (hw->eeprom.ops.validate_checksum(hw, NULL)) 1301 *data = 1; 1302 else 1303 *data = 0; 1304 return *data; 1305} 1306 1307static irqreturn_t ixgbe_test_intr(int irq, void *data) 1308{ 1309 struct net_device *netdev = (struct net_device *) data; 1310 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1311 1312 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR); 1313 1314 return IRQ_HANDLED; 1315} 1316 1317static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) 1318{ 1319 struct net_device *netdev = adapter->netdev; 1320 u32 mask, i = 0, shared_int = true; 1321 u32 irq = adapter->pdev->irq; 1322 1323 *data = 0; 1324 1325 /* Hook up test interrupt handler just for this test */ 1326 if (adapter->msix_entries) { 1327 /* NOTE: we don't test MSI-X interrupts here, yet */ 1328 return 0; 1329 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1330 shared_int = false; 1331 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name, 1332 netdev)) { 1333 *data = 1; 1334 return -1; 1335 } 1336 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED, 1337 netdev->name, netdev)) { 1338 shared_int = false; 1339 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED, 1340 netdev->name, netdev)) { 1341 *data = 1; 1342 return -1; 1343 } 1344 DPRINTK(HW, INFO, "testing %s interrupt\n", 1345 (shared_int ? "shared" : "unshared")); 1346 1347 /* Disable all the interrupts */ 1348 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1349 msleep(10); 1350 1351 /* Test each interrupt */ 1352 for (; i < 10; i++) { 1353 /* Interrupt to test */ 1354 mask = 1 << i; 1355 1356 if (!shared_int) { 1357 /* 1358 * Disable the interrupts to be reported in 1359 * the cause register and then force the same 1360 * interrupt and see if one gets posted. If 1361 * an interrupt was posted to the bus, the 1362 * test failed. 1363 */ 1364 adapter->test_icr = 0; 1365 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1366 ~mask & 0x00007FFF); 1367 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1368 ~mask & 0x00007FFF); 1369 msleep(10); 1370 1371 if (adapter->test_icr & mask) { 1372 *data = 3; 1373 break; 1374 } 1375 } 1376 1377 /* 1378 * Enable the interrupt to be reported in the cause 1379 * register and then force the same interrupt and see 1380 * if one gets posted. If an interrupt was not posted 1381 * to the bus, the test failed. 1382 */ 1383 adapter->test_icr = 0; 1384 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1385 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 1386 msleep(10); 1387 1388 if (!(adapter->test_icr &mask)) { 1389 *data = 4; 1390 break; 1391 } 1392 1393 if (!shared_int) { 1394 /* 1395 * Disable the other interrupts to be reported in 1396 * the cause register and then force the other 1397 * interrupts and see if any get posted. If 1398 * an interrupt was posted to the bus, the 1399 * test failed. 1400 */ 1401 adapter->test_icr = 0; 1402 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 1403 ~mask & 0x00007FFF); 1404 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1405 ~mask & 0x00007FFF); 1406 msleep(10); 1407 1408 if (adapter->test_icr) { 1409 *data = 5; 1410 break; 1411 } 1412 } 1413 } 1414 1415 /* Disable all the interrupts */ 1416 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1417 msleep(10); 1418 1419 /* Unhook test interrupt handler */ 1420 free_irq(irq, netdev); 1421 1422 return *data; 1423} 1424 1425static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) 1426{ 1427 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1428 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1429 struct ixgbe_hw *hw = &adapter->hw; 1430 struct pci_dev *pdev = adapter->pdev; 1431 u32 reg_ctl; 1432 int i; 1433 1434 /* shut down the DMA engines now so they can be reinitialized later */ 1435 1436 /* first Rx */ 1437 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1438 reg_ctl &= ~IXGBE_RXCTRL_RXEN; 1439 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); 1440 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(0)); 1441 reg_ctl &= ~IXGBE_RXDCTL_ENABLE; 1442 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(0), reg_ctl); 1443 1444 /* now Tx */ 1445 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(0)); 1446 reg_ctl &= ~IXGBE_TXDCTL_ENABLE; 1447 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(0), reg_ctl); 1448 if (hw->mac.type == ixgbe_mac_82599EB) { 1449 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1450 reg_ctl &= ~IXGBE_DMATXCTL_TE; 1451 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); 1452 } 1453 1454 ixgbe_reset(adapter); 1455 1456 if (tx_ring->desc && tx_ring->tx_buffer_info) { 1457 for (i = 0; i < tx_ring->count; i++) { 1458 struct ixgbe_tx_buffer *buf = 1459 &(tx_ring->tx_buffer_info[i]); 1460 if (buf->dma) 1461 dma_unmap_single(&pdev->dev, buf->dma, 1462 buf->length, DMA_TO_DEVICE); 1463 if (buf->skb) 1464 dev_kfree_skb(buf->skb); 1465 } 1466 } 1467 1468 if (rx_ring->desc && rx_ring->rx_buffer_info) { 1469 for (i = 0; i < rx_ring->count; i++) { 1470 struct ixgbe_rx_buffer *buf = 1471 &(rx_ring->rx_buffer_info[i]); 1472 if (buf->dma) 1473 dma_unmap_single(&pdev->dev, buf->dma, 1474 IXGBE_RXBUFFER_2048, 1475 DMA_FROM_DEVICE); 1476 if (buf->skb) 1477 dev_kfree_skb(buf->skb); 1478 } 1479 } 1480 1481 if (tx_ring->desc) { 1482 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 1483 tx_ring->dma); 1484 tx_ring->desc = NULL; 1485 } 1486 if (rx_ring->desc) { 1487 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 1488 rx_ring->dma); 1489 rx_ring->desc = NULL; 1490 } 1491 1492 kfree(tx_ring->tx_buffer_info); 1493 tx_ring->tx_buffer_info = NULL; 1494 kfree(rx_ring->rx_buffer_info); 1495 rx_ring->rx_buffer_info = NULL; 1496} 1497 1498static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) 1499{ 1500 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1501 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1502 struct pci_dev *pdev = adapter->pdev; 1503 u32 rctl, reg_data; 1504 int i, ret_val; 1505 1506 /* Setup Tx descriptor ring and Tx buffers */ 1507 1508 if (!tx_ring->count) 1509 tx_ring->count = IXGBE_DEFAULT_TXD; 1510 1511 tx_ring->tx_buffer_info = kcalloc(tx_ring->count, 1512 sizeof(struct ixgbe_tx_buffer), 1513 GFP_KERNEL); 1514 if (!(tx_ring->tx_buffer_info)) { 1515 ret_val = 1; 1516 goto err_nomem; 1517 } 1518 1519 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 1520 tx_ring->size = ALIGN(tx_ring->size, 4096); 1521 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 1522 &tx_ring->dma, GFP_KERNEL); 1523 if (!(tx_ring->desc)) { 1524 ret_val = 2; 1525 goto err_nomem; 1526 } 1527 tx_ring->next_to_use = tx_ring->next_to_clean = 0; 1528 1529 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAL(0), 1530 ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); 1531 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAH(0), 1532 ((u64) tx_ring->dma >> 32)); 1533 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDLEN(0), 1534 tx_ring->count * sizeof(union ixgbe_adv_tx_desc)); 1535 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDH(0), 0); 1536 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), 0); 1537 1538 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); 1539 reg_data |= IXGBE_HLREG0_TXPADEN; 1540 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); 1541 1542 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1543 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); 1544 reg_data |= IXGBE_DMATXCTL_TE; 1545 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); 1546 } 1547 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(0)); 1548 reg_data |= IXGBE_TXDCTL_ENABLE; 1549 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(0), reg_data); 1550 1551 for (i = 0; i < tx_ring->count; i++) { 1552 union ixgbe_adv_tx_desc *desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 1553 struct sk_buff *skb; 1554 unsigned int size = 1024; 1555 1556 skb = alloc_skb(size, GFP_KERNEL); 1557 if (!skb) { 1558 ret_val = 3; 1559 goto err_nomem; 1560 } 1561 skb_put(skb, size); 1562 tx_ring->tx_buffer_info[i].skb = skb; 1563 tx_ring->tx_buffer_info[i].length = skb->len; 1564 tx_ring->tx_buffer_info[i].dma = 1565 dma_map_single(&pdev->dev, skb->data, skb->len, 1566 DMA_TO_DEVICE); 1567 desc->read.buffer_addr = 1568 cpu_to_le64(tx_ring->tx_buffer_info[i].dma); 1569 desc->read.cmd_type_len = cpu_to_le32(skb->len); 1570 desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD_EOP | 1571 IXGBE_TXD_CMD_IFCS | 1572 IXGBE_TXD_CMD_RS); 1573 desc->read.olinfo_status = 0; 1574 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 1575 desc->read.olinfo_status |= 1576 (skb->len << IXGBE_ADVTXD_PAYLEN_SHIFT); 1577 1578 } 1579 1580 /* Setup Rx Descriptor ring and Rx buffers */ 1581 1582 if (!rx_ring->count) 1583 rx_ring->count = IXGBE_DEFAULT_RXD; 1584 1585 rx_ring->rx_buffer_info = kcalloc(rx_ring->count, 1586 sizeof(struct ixgbe_rx_buffer), 1587 GFP_KERNEL); 1588 if (!(rx_ring->rx_buffer_info)) { 1589 ret_val = 4; 1590 goto err_nomem; 1591 } 1592 1593 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 1594 rx_ring->size = ALIGN(rx_ring->size, 4096); 1595 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 1596 &rx_ring->dma, GFP_KERNEL); 1597 if (!(rx_ring->desc)) { 1598 ret_val = 5; 1599 goto err_nomem; 1600 } 1601 rx_ring->next_to_use = rx_ring->next_to_clean = 0; 1602 1603 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); 1604 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN); 1605 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(0), 1606 ((u64)rx_ring->dma & 0xFFFFFFFF)); 1607 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(0), 1608 ((u64) rx_ring->dma >> 32)); 1609 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(0), rx_ring->size); 1610 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(0), 0); 1611 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), 0); 1612 1613 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 1614 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; 1615 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data); 1616 1617 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); 1618 reg_data &= ~IXGBE_HLREG0_LPBK; 1619 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); 1620 1621 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RDRXCTL); 1622#define IXGBE_RDRXCTL_RDMTS_MASK 0x00000003 /* Receive Descriptor Minimum 1623 Threshold Size mask */ 1624 reg_data &= ~IXGBE_RDRXCTL_RDMTS_MASK; 1625 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDRXCTL, reg_data); 1626 1627 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MCSTCTRL); 1628#define IXGBE_MCSTCTRL_MO_MASK 0x00000003 /* Multicast Offset mask */ 1629 reg_data &= ~IXGBE_MCSTCTRL_MO_MASK; 1630 reg_data |= adapter->hw.mac.mc_filter_type; 1631 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MCSTCTRL, reg_data); 1632 1633 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(0)); 1634 reg_data |= IXGBE_RXDCTL_ENABLE; 1635 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data); 1636 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1637 int j = adapter->rx_ring[0]->reg_idx; 1638 u32 k; 1639 for (k = 0; k < 10; k++) { 1640 if (IXGBE_READ_REG(&adapter->hw, 1641 IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE) 1642 break; 1643 else 1644 msleep(1); 1645 } 1646 } 1647 1648 rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; 1649 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); 1650 1651 for (i = 0; i < rx_ring->count; i++) { 1652 union ixgbe_adv_rx_desc *rx_desc = 1653 IXGBE_RX_DESC_ADV(*rx_ring, i); 1654 struct sk_buff *skb; 1655 1656 skb = alloc_skb(IXGBE_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL); 1657 if (!skb) { 1658 ret_val = 6; 1659 goto err_nomem; 1660 } 1661 skb_reserve(skb, NET_IP_ALIGN); 1662 rx_ring->rx_buffer_info[i].skb = skb; 1663 rx_ring->rx_buffer_info[i].dma = 1664 dma_map_single(&pdev->dev, skb->data, 1665 IXGBE_RXBUFFER_2048, DMA_FROM_DEVICE); 1666 rx_desc->read.pkt_addr = 1667 cpu_to_le64(rx_ring->rx_buffer_info[i].dma); 1668 memset(skb->data, 0x00, skb->len); 1669 } 1670 1671 return 0; 1672 1673err_nomem: 1674 ixgbe_free_desc_rings(adapter); 1675 return ret_val; 1676} 1677 1678static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) 1679{ 1680 struct ixgbe_hw *hw = &adapter->hw; 1681 u32 reg_data; 1682 1683 /* right now we only support MAC loopback in the driver */ 1684 1685 /* Setup MAC loopback */ 1686 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); 1687 reg_data |= IXGBE_HLREG0_LPBK; 1688 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); 1689 1690 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC); 1691 reg_data &= ~IXGBE_AUTOC_LMS_MASK; 1692 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; 1693 IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data); 1694 1695 /* Disable Atlas Tx lanes; re-enabled in reset path */ 1696 if (hw->mac.type == ixgbe_mac_82598EB) { 1697 u8 atlas; 1698 1699 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas); 1700 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; 1701 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas); 1702 1703 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas); 1704 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 1705 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas); 1706 1707 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas); 1708 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 1709 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas); 1710 1711 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas); 1712 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 1713 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas); 1714 } 1715 1716 return 0; 1717} 1718 1719static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter) 1720{ 1721 u32 reg_data; 1722 1723 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); 1724 reg_data &= ~IXGBE_HLREG0_LPBK; 1725 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); 1726} 1727 1728static void ixgbe_create_lbtest_frame(struct sk_buff *skb, 1729 unsigned int frame_size) 1730{ 1731 memset(skb->data, 0xFF, frame_size); 1732 frame_size &= ~1; 1733 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); 1734 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); 1735 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); 1736} 1737 1738static int ixgbe_check_lbtest_frame(struct sk_buff *skb, 1739 unsigned int frame_size) 1740{ 1741 frame_size &= ~1; 1742 if (*(skb->data + 3) == 0xFF) { 1743 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && 1744 (*(skb->data + frame_size / 2 + 12) == 0xAF)) { 1745 return 0; 1746 } 1747 } 1748 return 13; 1749} 1750 1751static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) 1752{ 1753 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1754 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1755 struct pci_dev *pdev = adapter->pdev; 1756 int i, j, k, l, lc, good_cnt, ret_val = 0; 1757 unsigned long time; 1758 1759 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), rx_ring->count - 1); 1760 1761 /* 1762 * Calculate the loop count based on the largest descriptor ring 1763 * The idea is to wrap the largest ring a number of times using 64 1764 * send/receive pairs during each loop 1765 */ 1766 1767 if (rx_ring->count <= tx_ring->count) 1768 lc = ((tx_ring->count / 64) * 2) + 1; 1769 else 1770 lc = ((rx_ring->count / 64) * 2) + 1; 1771 1772 k = l = 0; 1773 for (j = 0; j <= lc; j++) { 1774 for (i = 0; i < 64; i++) { 1775 ixgbe_create_lbtest_frame( 1776 tx_ring->tx_buffer_info[k].skb, 1777 1024); 1778 dma_sync_single_for_device(&pdev->dev, 1779 tx_ring->tx_buffer_info[k].dma, 1780 tx_ring->tx_buffer_info[k].length, 1781 DMA_TO_DEVICE); 1782 if (unlikely(++k == tx_ring->count)) 1783 k = 0; 1784 } 1785 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), k); 1786 msleep(200); 1787 /* set the start time for the receive */ 1788 time = jiffies; 1789 good_cnt = 0; 1790 do { 1791 /* receive the sent packets */ 1792 dma_sync_single_for_cpu(&pdev->dev, 1793 rx_ring->rx_buffer_info[l].dma, 1794 IXGBE_RXBUFFER_2048, 1795 DMA_FROM_DEVICE); 1796 ret_val = ixgbe_check_lbtest_frame( 1797 rx_ring->rx_buffer_info[l].skb, 1024); 1798 if (!ret_val) 1799 good_cnt++; 1800 if (++l == rx_ring->count) 1801 l = 0; 1802 /* 1803 * time + 20 msecs (200 msecs on 2.4) is more than 1804 * enough time to complete the receives, if it's 1805 * exceeded, break and error off 1806 */ 1807 } while (good_cnt < 64 && jiffies < (time + 20)); 1808 if (good_cnt != 64) { 1809 /* ret_val is the same as mis-compare */ 1810 ret_val = 13; 1811 break; 1812 } 1813 if (jiffies >= (time + 20)) { 1814 /* Error code for time out error */ 1815 ret_val = 14; 1816 break; 1817 } 1818 } 1819 1820 return ret_val; 1821} 1822 1823static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data) 1824{ 1825 *data = ixgbe_setup_desc_rings(adapter); 1826 if (*data) 1827 goto out; 1828 *data = ixgbe_setup_loopback_test(adapter); 1829 if (*data) 1830 goto err_loopback; 1831 *data = ixgbe_run_loopback_test(adapter); 1832 ixgbe_loopback_cleanup(adapter); 1833 1834err_loopback: 1835 ixgbe_free_desc_rings(adapter); 1836out: 1837 return *data; 1838} 1839 1840static void ixgbe_diag_test(struct net_device *netdev, 1841 struct ethtool_test *eth_test, u64 *data) 1842{ 1843 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1844 bool if_running = netif_running(netdev); 1845 1846 set_bit(__IXGBE_TESTING, &adapter->state); 1847 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1848 /* Offline tests */ 1849 1850 DPRINTK(HW, INFO, "offline testing starting\n"); 1851 1852 /* Link test performed before hardware reset so autoneg doesn't 1853 * interfere with test result */ 1854 if (ixgbe_link_test(adapter, &data[4])) 1855 eth_test->flags |= ETH_TEST_FL_FAILED; 1856 1857 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 1858 int i; 1859 for (i = 0; i < adapter->num_vfs; i++) { 1860 if (adapter->vfinfo[i].clear_to_send) { 1861 netdev_warn(netdev, "%s", 1862 "offline diagnostic is not " 1863 "supported when VFs are " 1864 "present\n"); 1865 data[0] = 1; 1866 data[1] = 1; 1867 data[2] = 1; 1868 data[3] = 1; 1869 eth_test->flags |= ETH_TEST_FL_FAILED; 1870 clear_bit(__IXGBE_TESTING, 1871 &adapter->state); 1872 goto skip_ol_tests; 1873 } 1874 } 1875 } 1876 1877 if (if_running) 1878 /* indicate we're in test mode */ 1879 dev_close(netdev); 1880 else 1881 ixgbe_reset(adapter); 1882 1883 DPRINTK(HW, INFO, "register testing starting\n"); 1884 if (ixgbe_reg_test(adapter, &data[0])) 1885 eth_test->flags |= ETH_TEST_FL_FAILED; 1886 1887 ixgbe_reset(adapter); 1888 DPRINTK(HW, INFO, "eeprom testing starting\n"); 1889 if (ixgbe_eeprom_test(adapter, &data[1])) 1890 eth_test->flags |= ETH_TEST_FL_FAILED; 1891 1892 ixgbe_reset(adapter); 1893 DPRINTK(HW, INFO, "interrupt testing starting\n"); 1894 if (ixgbe_intr_test(adapter, &data[2])) 1895 eth_test->flags |= ETH_TEST_FL_FAILED; 1896 1897 /* If SRIOV or VMDq is enabled then skip MAC 1898 * loopback diagnostic. */ 1899 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | 1900 IXGBE_FLAG_VMDQ_ENABLED)) { 1901 DPRINTK(HW, INFO, "Skip MAC loopback diagnostic in VT " 1902 "mode\n"); 1903 data[3] = 0; 1904 goto skip_loopback; 1905 } 1906 1907 ixgbe_reset(adapter); 1908 DPRINTK(HW, INFO, "loopback testing starting\n"); 1909 if (ixgbe_loopback_test(adapter, &data[3])) 1910 eth_test->flags |= ETH_TEST_FL_FAILED; 1911 1912skip_loopback: 1913 ixgbe_reset(adapter); 1914 1915 clear_bit(__IXGBE_TESTING, &adapter->state); 1916 if (if_running) 1917 dev_open(netdev); 1918 } else { 1919 DPRINTK(HW, INFO, "online testing starting\n"); 1920 /* Online tests */ 1921 if (ixgbe_link_test(adapter, &data[4])) 1922 eth_test->flags |= ETH_TEST_FL_FAILED; 1923 1924 /* Online tests aren't run; pass by default */ 1925 data[0] = 0; 1926 data[1] = 0; 1927 data[2] = 0; 1928 data[3] = 0; 1929 1930 clear_bit(__IXGBE_TESTING, &adapter->state); 1931 } 1932skip_ol_tests: 1933 msleep_interruptible(4 * 1000); 1934} 1935 1936static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, 1937 struct ethtool_wolinfo *wol) 1938{ 1939 struct ixgbe_hw *hw = &adapter->hw; 1940 int retval = 1; 1941 1942 switch(hw->device_id) { 1943 case IXGBE_DEV_ID_82599_KX4: 1944 retval = 0; 1945 break; 1946 default: 1947 wol->supported = 0; 1948 } 1949 1950 return retval; 1951} 1952 1953static void ixgbe_get_wol(struct net_device *netdev, 1954 struct ethtool_wolinfo *wol) 1955{ 1956 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1957 1958 wol->supported = WAKE_UCAST | WAKE_MCAST | 1959 WAKE_BCAST | WAKE_MAGIC; 1960 wol->wolopts = 0; 1961 1962 if (ixgbe_wol_exclusion(adapter, wol) || 1963 !device_can_wakeup(&adapter->pdev->dev)) 1964 return; 1965 1966 if (adapter->wol & IXGBE_WUFC_EX) 1967 wol->wolopts |= WAKE_UCAST; 1968 if (adapter->wol & IXGBE_WUFC_MC) 1969 wol->wolopts |= WAKE_MCAST; 1970 if (adapter->wol & IXGBE_WUFC_BC) 1971 wol->wolopts |= WAKE_BCAST; 1972 if (adapter->wol & IXGBE_WUFC_MAG) 1973 wol->wolopts |= WAKE_MAGIC; 1974} 1975 1976static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 1977{ 1978 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1979 1980 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 1981 return -EOPNOTSUPP; 1982 1983 if (ixgbe_wol_exclusion(adapter, wol)) 1984 return wol->wolopts ? -EOPNOTSUPP : 0; 1985 1986 adapter->wol = 0; 1987 1988 if (wol->wolopts & WAKE_UCAST) 1989 adapter->wol |= IXGBE_WUFC_EX; 1990 if (wol->wolopts & WAKE_MCAST) 1991 adapter->wol |= IXGBE_WUFC_MC; 1992 if (wol->wolopts & WAKE_BCAST) 1993 adapter->wol |= IXGBE_WUFC_BC; 1994 if (wol->wolopts & WAKE_MAGIC) 1995 adapter->wol |= IXGBE_WUFC_MAG; 1996 1997 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1998 1999 return 0; 2000} 2001 2002static int ixgbe_nway_reset(struct net_device *netdev) 2003{ 2004 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2005 2006 if (netif_running(netdev)) 2007 ixgbe_reinit_locked(adapter); 2008 2009 return 0; 2010} 2011 2012static int ixgbe_phys_id(struct net_device *netdev, u32 data) 2013{ 2014 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2015 struct ixgbe_hw *hw = &adapter->hw; 2016 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2017 u32 i; 2018 2019 if (!data || data > 300) 2020 data = 300; 2021 2022 for (i = 0; i < (data * 1000); i += 400) { 2023 hw->mac.ops.led_on(hw, IXGBE_LED_ON); 2024 msleep_interruptible(200); 2025 hw->mac.ops.led_off(hw, IXGBE_LED_ON); 2026 msleep_interruptible(200); 2027 } 2028 2029 /* Restore LED settings */ 2030 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, led_reg); 2031 2032 return 0; 2033} 2034 2035static int ixgbe_get_coalesce(struct net_device *netdev, 2036 struct ethtool_coalesce *ec) 2037{ 2038 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2039 2040 ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0]->work_limit; 2041 2042 /* only valid if in constant ITR mode */ 2043 switch (adapter->rx_itr_setting) { 2044 case 0: 2045 /* throttling disabled */ 2046 ec->rx_coalesce_usecs = 0; 2047 break; 2048 case 1: 2049 /* dynamic ITR mode */ 2050 ec->rx_coalesce_usecs = 1; 2051 break; 2052 default: 2053 /* fixed interrupt rate mode */ 2054 ec->rx_coalesce_usecs = 1000000/adapter->rx_eitr_param; 2055 break; 2056 } 2057 2058 /* if in mixed tx/rx queues per vector mode, report only rx settings */ 2059 if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count) 2060 return 0; 2061 2062 /* only valid if in constant ITR mode */ 2063 switch (adapter->tx_itr_setting) { 2064 case 0: 2065 /* throttling disabled */ 2066 ec->tx_coalesce_usecs = 0; 2067 break; 2068 case 1: 2069 /* dynamic ITR mode */ 2070 ec->tx_coalesce_usecs = 1; 2071 break; 2072 default: 2073 ec->tx_coalesce_usecs = 1000000/adapter->tx_eitr_param; 2074 break; 2075 } 2076 2077 return 0; 2078} 2079 2080/* 2081 * this function must be called before setting the new value of 2082 * rx_itr_setting 2083 */ 2084static bool ixgbe_reenable_rsc(struct ixgbe_adapter *adapter, 2085 struct ethtool_coalesce *ec) 2086{ 2087 /* check the old value and enable RSC if necessary */ 2088 if ((adapter->rx_itr_setting == 0) && 2089 (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) { 2090 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 2091 adapter->netdev->features |= NETIF_F_LRO; 2092 DPRINTK(PROBE, INFO, "rx-usecs set to %d, re-enabling RSC\n", 2093 ec->rx_coalesce_usecs); 2094 return true; 2095 } 2096 return false; 2097} 2098 2099static int ixgbe_set_coalesce(struct net_device *netdev, 2100 struct ethtool_coalesce *ec) 2101{ 2102 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2103 struct ixgbe_q_vector *q_vector; 2104 int i; 2105 bool need_reset = false; 2106 2107 /* don't accept tx specific changes if we've got mixed RxTx vectors */ 2108 if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count 2109 && ec->tx_coalesce_usecs) 2110 return -EINVAL; 2111 2112 if (ec->tx_max_coalesced_frames_irq) 2113 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq; 2114 2115 if (ec->rx_coalesce_usecs > 1) { 2116 u32 max_int; 2117 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 2118 max_int = IXGBE_MAX_RSC_INT_RATE; 2119 else 2120 max_int = IXGBE_MAX_INT_RATE; 2121 2122 /* check the limits */ 2123 if ((1000000/ec->rx_coalesce_usecs > max_int) || 2124 (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE)) 2125 return -EINVAL; 2126 2127 /* check the old value and enable RSC if necessary */ 2128 need_reset = ixgbe_reenable_rsc(adapter, ec); 2129 2130 /* store the value in ints/second */ 2131 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs; 2132 2133 /* static value of interrupt rate */ 2134 adapter->rx_itr_setting = adapter->rx_eitr_param; 2135 /* clear the lower bit as its used for dynamic state */ 2136 adapter->rx_itr_setting &= ~1; 2137 } else if (ec->rx_coalesce_usecs == 1) { 2138 /* check the old value and enable RSC if necessary */ 2139 need_reset = ixgbe_reenable_rsc(adapter, ec); 2140 2141 /* 1 means dynamic mode */ 2142 adapter->rx_eitr_param = 20000; 2143 adapter->rx_itr_setting = 1; 2144 } else { 2145 /* 2146 * any other value means disable eitr, which is best 2147 * served by setting the interrupt rate very high 2148 */ 2149 adapter->rx_eitr_param = IXGBE_MAX_INT_RATE; 2150 adapter->rx_itr_setting = 0; 2151 2152 /* 2153 * if hardware RSC is enabled, disable it when 2154 * setting low latency mode, to avoid errata, assuming 2155 * that when the user set low latency mode they want 2156 * it at the cost of anything else 2157 */ 2158 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 2159 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; 2160 netdev->features &= ~NETIF_F_LRO; 2161 DPRINTK(PROBE, INFO, 2162 "rx-usecs set to 0, disabling RSC\n"); 2163 2164 need_reset = true; 2165 } 2166 } 2167 2168 if (ec->tx_coalesce_usecs > 1) { 2169 /* 2170 * don't have to worry about max_int as above because 2171 * tx vectors don't do hardware RSC (an rx function) 2172 */ 2173 /* check the limits */ 2174 if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) || 2175 (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE)) 2176 return -EINVAL; 2177 2178 /* store the value in ints/second */ 2179 adapter->tx_eitr_param = 1000000/ec->tx_coalesce_usecs; 2180 2181 /* static value of interrupt rate */ 2182 adapter->tx_itr_setting = adapter->tx_eitr_param; 2183 2184 /* clear the lower bit as its used for dynamic state */ 2185 adapter->tx_itr_setting &= ~1; 2186 } else if (ec->tx_coalesce_usecs == 1) { 2187 /* 1 means dynamic mode */ 2188 adapter->tx_eitr_param = 10000; 2189 adapter->tx_itr_setting = 1; 2190 } else { 2191 adapter->tx_eitr_param = IXGBE_MAX_INT_RATE; 2192 adapter->tx_itr_setting = 0; 2193 } 2194 2195 /* MSI/MSIx Interrupt Mode */ 2196 if (adapter->flags & 2197 (IXGBE_FLAG_MSIX_ENABLED | IXGBE_FLAG_MSI_ENABLED)) { 2198 int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2199 for (i = 0; i < num_vectors; i++) { 2200 q_vector = adapter->q_vector[i]; 2201 if (q_vector->txr_count && !q_vector->rxr_count) 2202 /* tx only */ 2203 q_vector->eitr = adapter->tx_eitr_param; 2204 else 2205 /* rx only or mixed */ 2206 q_vector->eitr = adapter->rx_eitr_param; 2207 ixgbe_write_eitr(q_vector); 2208 } 2209 /* Legacy Interrupt Mode */ 2210 } else { 2211 q_vector = adapter->q_vector[0]; 2212 q_vector->eitr = adapter->rx_eitr_param; 2213 ixgbe_write_eitr(q_vector); 2214 } 2215 2216 /* 2217 * do reset here at the end to make sure EITR==0 case is handled 2218 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings 2219 * also locks in RSC enable/disable which requires reset 2220 */ 2221 if (need_reset) { 2222 if (netif_running(netdev)) 2223 ixgbe_reinit_locked(adapter); 2224 else 2225 ixgbe_reset(adapter); 2226 } 2227 2228 return 0; 2229} 2230 2231static int ixgbe_set_flags(struct net_device *netdev, u32 data) 2232{ 2233 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2234 bool need_reset = false; 2235 2236 ethtool_op_set_flags(netdev, data); 2237 2238 /* if state changes we need to update adapter->flags and reset */ 2239 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) { 2240 /* 2241 * cast both to bool and verify if they are set the same 2242 * but only enable RSC if itr is non-zero, as 2243 * itr=0 and RSC are mutually exclusive 2244 */ 2245 if (((!!(data & ETH_FLAG_LRO)) != 2246 (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) && 2247 adapter->rx_itr_setting) { 2248 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; 2249 switch (adapter->hw.mac.type) { 2250 case ixgbe_mac_82599EB: 2251 need_reset = true; 2252 break; 2253 default: 2254 break; 2255 } 2256 } else if (!adapter->rx_itr_setting) { 2257 netdev->features &= ~ETH_FLAG_LRO; 2258 } 2259 } 2260 2261 /* 2262 * Check if Flow Director n-tuple support was enabled or disabled. If 2263 * the state changed, we need to reset. 2264 */ 2265 if ((adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) && 2266 (!(data & ETH_FLAG_NTUPLE))) { 2267 /* turn off Flow Director perfect, set hash and reset */ 2268 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 2269 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 2270 need_reset = true; 2271 } else if ((!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) && 2272 (data & ETH_FLAG_NTUPLE)) { 2273 /* turn off Flow Director hash, enable perfect and reset */ 2274 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 2275 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 2276 need_reset = true; 2277 } else { 2278 /* no state change */ 2279 } 2280 2281 if (need_reset) { 2282 if (netif_running(netdev)) 2283 ixgbe_reinit_locked(adapter); 2284 else 2285 ixgbe_reset(adapter); 2286 } 2287 2288 return 0; 2289} 2290 2291static int ixgbe_set_rx_ntuple(struct net_device *dev, 2292 struct ethtool_rx_ntuple *cmd) 2293{ 2294 struct ixgbe_adapter *adapter = netdev_priv(dev); 2295 struct ethtool_rx_ntuple_flow_spec fs = cmd->fs; 2296 struct ixgbe_atr_input input_struct; 2297 struct ixgbe_atr_input_masks input_masks; 2298 int target_queue; 2299 2300 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 2301 return -EOPNOTSUPP; 2302 2303 /* 2304 * Don't allow programming if the action is a queue greater than 2305 * the number of online Tx queues. 2306 */ 2307 if ((fs.action >= adapter->num_tx_queues) || 2308 (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP)) 2309 return -EINVAL; 2310 2311 memset(&input_struct, 0, sizeof(struct ixgbe_atr_input)); 2312 memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks)); 2313 2314 input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src; 2315 input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst; 2316 input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc; 2317 input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst; 2318 input_masks.vlan_id_mask = fs.vlan_tag_mask; 2319 /* only use the lowest 2 bytes for flex bytes */ 2320 input_masks.data_mask = (fs.data_mask & 0xffff); 2321 2322 switch (fs.flow_type) { 2323 case TCP_V4_FLOW: 2324 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP); 2325 break; 2326 case UDP_V4_FLOW: 2327 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP); 2328 break; 2329 case SCTP_V4_FLOW: 2330 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP); 2331 break; 2332 default: 2333 return -1; 2334 } 2335 2336 /* Mask bits from the inputs based on user-supplied mask */ 2337 ixgbe_atr_set_src_ipv4_82599(&input_struct, 2338 (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src)); 2339 ixgbe_atr_set_dst_ipv4_82599(&input_struct, 2340 (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst)); 2341 /* 82599 expects these to be byte-swapped for perfect filtering */ 2342 ixgbe_atr_set_src_port_82599(&input_struct, 2343 ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc)); 2344 ixgbe_atr_set_dst_port_82599(&input_struct, 2345 ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst)); 2346 2347 /* VLAN and Flex bytes are either completely masked or not */ 2348 if (!fs.vlan_tag_mask) 2349 ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag); 2350 2351 if (!input_masks.data_mask) 2352 /* make sure we only use the first 2 bytes of user data */ 2353 ixgbe_atr_set_flex_byte_82599(&input_struct, 2354 (fs.data & 0xffff)); 2355 2356 /* determine if we need to drop or route the packet */ 2357 if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) 2358 target_queue = MAX_RX_QUEUES - 1; 2359 else 2360 target_queue = fs.action; 2361 2362 spin_lock(&adapter->fdir_perfect_lock); 2363 ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct, 2364 &input_masks, 0, target_queue); 2365 spin_unlock(&adapter->fdir_perfect_lock); 2366 2367 return 0; 2368} 2369 2370static const struct ethtool_ops ixgbe_ethtool_ops = { 2371 .get_settings = ixgbe_get_settings, 2372 .set_settings = ixgbe_set_settings, 2373 .get_drvinfo = ixgbe_get_drvinfo, 2374 .get_regs_len = ixgbe_get_regs_len, 2375 .get_regs = ixgbe_get_regs, 2376 .get_wol = ixgbe_get_wol, 2377 .set_wol = ixgbe_set_wol, 2378 .nway_reset = ixgbe_nway_reset, 2379 .get_link = ethtool_op_get_link, 2380 .get_eeprom_len = ixgbe_get_eeprom_len, 2381 .get_eeprom = ixgbe_get_eeprom, 2382 .get_ringparam = ixgbe_get_ringparam, 2383 .set_ringparam = ixgbe_set_ringparam, 2384 .get_pauseparam = ixgbe_get_pauseparam, 2385 .set_pauseparam = ixgbe_set_pauseparam, 2386 .get_rx_csum = ixgbe_get_rx_csum, 2387 .set_rx_csum = ixgbe_set_rx_csum, 2388 .get_tx_csum = ixgbe_get_tx_csum, 2389 .set_tx_csum = ixgbe_set_tx_csum, 2390 .get_sg = ethtool_op_get_sg, 2391 .set_sg = ethtool_op_set_sg, 2392 .get_msglevel = ixgbe_get_msglevel, 2393 .set_msglevel = ixgbe_set_msglevel, 2394 .get_tso = ethtool_op_get_tso, 2395 .set_tso = ixgbe_set_tso, 2396 .self_test = ixgbe_diag_test, 2397 .get_strings = ixgbe_get_strings, 2398 .phys_id = ixgbe_phys_id, 2399 .get_sset_count = ixgbe_get_sset_count, 2400 .get_ethtool_stats = ixgbe_get_ethtool_stats, 2401 .get_coalesce = ixgbe_get_coalesce, 2402 .set_coalesce = ixgbe_set_coalesce, 2403 .get_flags = ethtool_op_get_flags, 2404 .set_flags = ixgbe_set_flags, 2405 .set_rx_ntuple = ixgbe_set_rx_ntuple, 2406}; 2407 2408void ixgbe_set_ethtool_ops(struct net_device *netdev) 2409{ 2410 SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops); 2411}