Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 4dfd459b738cf1f65b3eac4e0a9b19bc93cc91c6 6216 lines 182 kB view raw
1/******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2010 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26*******************************************************************************/ 27 28#include <linux/types.h> 29#include <linux/module.h> 30#include <linux/pci.h> 31#include <linux/netdevice.h> 32#include <linux/vmalloc.h> 33#include <linux/string.h> 34#include <linux/in.h> 35#include <linux/ip.h> 36#include <linux/tcp.h> 37#include <linux/pkt_sched.h> 38#include <linux/ipv6.h> 39#include <net/checksum.h> 40#include <net/ip6_checksum.h> 41#include <linux/ethtool.h> 42#include <linux/if_vlan.h> 43#include <scsi/fc/fc_fcoe.h> 44 45#include "ixgbe.h" 46#include "ixgbe_common.h" 47#include "ixgbe_dcb_82599.h" 48 49char ixgbe_driver_name[] = "ixgbe"; 50static const char ixgbe_driver_string[] = 51 "Intel(R) 10 Gigabit PCI Express Network Driver"; 52 53#define DRV_VERSION "2.0.44-k2" 54const char ixgbe_driver_version[] = DRV_VERSION; 55static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; 56 57static const struct ixgbe_info *ixgbe_info_tbl[] = { 58 [board_82598] = &ixgbe_82598_info, 59 [board_82599] = &ixgbe_82599_info, 60}; 61 62/* ixgbe_pci_tbl - PCI Device ID Table 63 * 64 * Wildcard entries (PCI_ANY_ID) should come last 65 * Last entry must be all 0s 66 * 67 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 68 * Class, Class Mask, private data (not used) } 69 */ 70static struct pci_device_id ixgbe_pci_tbl[] = { 71 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), 72 board_82598 }, 73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), 74 board_82598 }, 75 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), 76 board_82598 }, 77 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), 78 board_82598 }, 79 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), 80 board_82598 }, 81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), 82 board_82598 }, 83 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), 84 board_82598 }, 85 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), 86 board_82598 }, 87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), 88 board_82598 }, 89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), 90 board_82598 }, 91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), 92 board_82598 }, 93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), 94 board_82598 }, 95 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), 96 board_82599 }, 97 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), 98 board_82599 }, 99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), 100 board_82599 }, 101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), 102 board_82599 }, 103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), 104 board_82599 }, 105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), 106 board_82599 }, 107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), 108 board_82599 }, 109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), 110 board_82599 }, 111 112 /* required last entry */ 113 {0, } 114}; 115MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); 116 117#ifdef CONFIG_IXGBE_DCA 118static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, 119 void *p); 120static struct notifier_block dca_notifier = { 121 .notifier_call = ixgbe_notify_dca, 122 .next = NULL, 123 .priority = 0 124}; 125#endif 126 127MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 128MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); 129MODULE_LICENSE("GPL"); 130MODULE_VERSION(DRV_VERSION); 131 132#define DEFAULT_DEBUG_LEVEL_SHIFT 3 133 134static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) 135{ 136 u32 ctrl_ext; 137 138 /* Let firmware take over control of h/w */ 139 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 140 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 141 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); 142} 143 144static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) 145{ 146 u32 ctrl_ext; 147 148 /* Let firmware know the driver has taken over */ 149 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 150 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 151 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); 152} 153 154/* 155 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors 156 * @adapter: pointer to adapter struct 157 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 158 * @queue: queue to map the corresponding interrupt to 159 * @msix_vector: the vector to map to the corresponding queue 160 * 161 */ 162static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, 163 u8 queue, u8 msix_vector) 164{ 165 u32 ivar, index; 166 struct ixgbe_hw *hw = &adapter->hw; 167 switch (hw->mac.type) { 168 case ixgbe_mac_82598EB: 169 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 170 if (direction == -1) 171 direction = 0; 172 index = (((direction * 64) + queue) >> 2) & 0x1F; 173 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 174 ivar &= ~(0xFF << (8 * (queue & 0x3))); 175 ivar |= (msix_vector << (8 * (queue & 0x3))); 176 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 177 break; 178 case ixgbe_mac_82599EB: 179 if (direction == -1) { 180 /* other causes */ 181 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 182 index = ((queue & 1) * 8); 183 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC); 184 ivar &= ~(0xFF << index); 185 ivar |= (msix_vector << index); 186 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar); 187 break; 188 } else { 189 /* tx or rx causes */ 190 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 191 index = ((16 * (queue & 1)) + (8 * direction)); 192 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); 193 ivar &= ~(0xFF << index); 194 ivar |= (msix_vector << index); 195 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar); 196 break; 197 } 198 default: 199 break; 200 } 201} 202 203static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, 204 u64 qmask) 205{ 206 u32 mask; 207 208 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 209 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 210 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 211 } else { 212 mask = (qmask & 0xFFFFFFFF); 213 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); 214 mask = (qmask >> 32); 215 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); 216 } 217} 218 219static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, 220 struct ixgbe_tx_buffer 221 *tx_buffer_info) 222{ 223 if (tx_buffer_info->dma) { 224 if (tx_buffer_info->mapped_as_page) 225 pci_unmap_page(adapter->pdev, 226 tx_buffer_info->dma, 227 tx_buffer_info->length, 228 PCI_DMA_TODEVICE); 229 else 230 pci_unmap_single(adapter->pdev, 231 tx_buffer_info->dma, 232 tx_buffer_info->length, 233 PCI_DMA_TODEVICE); 234 tx_buffer_info->dma = 0; 235 } 236 if (tx_buffer_info->skb) { 237 dev_kfree_skb_any(tx_buffer_info->skb); 238 tx_buffer_info->skb = NULL; 239 } 240 tx_buffer_info->time_stamp = 0; 241 /* tx_buffer_info must be completely set up in the transmit path */ 242} 243 244/** 245 * ixgbe_tx_is_paused - check if the tx ring is paused 246 * @adapter: the ixgbe adapter 247 * @tx_ring: the corresponding tx_ring 248 * 249 * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the 250 * corresponding TC of this tx_ring when checking TFCS. 251 * 252 * Returns : true if paused 253 */ 254static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter, 255 struct ixgbe_ring *tx_ring) 256{ 257 u32 txoff = IXGBE_TFCS_TXOFF; 258 259#ifdef CONFIG_IXGBE_DCB 260 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 261 int tc; 262 int reg_idx = tx_ring->reg_idx; 263 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 264 265 switch (adapter->hw.mac.type) { 266 case ixgbe_mac_82598EB: 267 tc = reg_idx >> 2; 268 txoff = IXGBE_TFCS_TXOFF0; 269 break; 270 case ixgbe_mac_82599EB: 271 tc = 0; 272 txoff = IXGBE_TFCS_TXOFF; 273 if (dcb_i == 8) { 274 /* TC0, TC1 */ 275 tc = reg_idx >> 5; 276 if (tc == 2) /* TC2, TC3 */ 277 tc += (reg_idx - 64) >> 4; 278 else if (tc == 3) /* TC4, TC5, TC6, TC7 */ 279 tc += 1 + ((reg_idx - 96) >> 3); 280 } else if (dcb_i == 4) { 281 /* TC0, TC1 */ 282 tc = reg_idx >> 6; 283 if (tc == 1) { 284 tc += (reg_idx - 64) >> 5; 285 if (tc == 2) /* TC2, TC3 */ 286 tc += (reg_idx - 96) >> 4; 287 } 288 } 289 break; 290 default: 291 tc = 0; 292 } 293 txoff <<= tc; 294 } 295#endif 296 return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff; 297} 298 299static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, 300 struct ixgbe_ring *tx_ring, 301 unsigned int eop) 302{ 303 struct ixgbe_hw *hw = &adapter->hw; 304 305 /* Detect a transmit hang in hardware, this serializes the 306 * check with the clearing of time_stamp and movement of eop */ 307 adapter->detect_tx_hung = false; 308 if (tx_ring->tx_buffer_info[eop].time_stamp && 309 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && 310 !ixgbe_tx_is_paused(adapter, tx_ring)) { 311 /* detected Tx unit hang */ 312 union ixgbe_adv_tx_desc *tx_desc; 313 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 314 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" 315 " Tx Queue <%d>\n" 316 " TDH, TDT <%x>, <%x>\n" 317 " next_to_use <%x>\n" 318 " next_to_clean <%x>\n" 319 "tx_buffer_info[next_to_clean]\n" 320 " time_stamp <%lx>\n" 321 " jiffies <%lx>\n", 322 tx_ring->queue_index, 323 IXGBE_READ_REG(hw, tx_ring->head), 324 IXGBE_READ_REG(hw, tx_ring->tail), 325 tx_ring->next_to_use, eop, 326 tx_ring->tx_buffer_info[eop].time_stamp, jiffies); 327 return true; 328 } 329 330 return false; 331} 332 333#define IXGBE_MAX_TXD_PWR 14 334#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 335 336/* Tx Descriptors needed, worst case */ 337#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ 338 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) 339#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ 340 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ 341 342static void ixgbe_tx_timeout(struct net_device *netdev); 343 344/** 345 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes 346 * @q_vector: structure containing interrupt and ring information 347 * @tx_ring: tx ring to clean 348 **/ 349static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, 350 struct ixgbe_ring *tx_ring) 351{ 352 struct ixgbe_adapter *adapter = q_vector->adapter; 353 struct net_device *netdev = adapter->netdev; 354 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 355 struct ixgbe_tx_buffer *tx_buffer_info; 356 unsigned int i, eop, count = 0; 357 unsigned int total_bytes = 0, total_packets = 0; 358 359 i = tx_ring->next_to_clean; 360 eop = tx_ring->tx_buffer_info[i].next_to_watch; 361 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 362 363 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 364 (count < tx_ring->work_limit)) { 365 bool cleaned = false; 366 for ( ; !cleaned; count++) { 367 struct sk_buff *skb; 368 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 369 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 370 cleaned = (i == eop); 371 skb = tx_buffer_info->skb; 372 373 if (cleaned && skb) { 374 unsigned int segs, bytecount; 375 unsigned int hlen = skb_headlen(skb); 376 377 /* gso_segs is currently only valid for tcp */ 378 segs = skb_shinfo(skb)->gso_segs ?: 1; 379#ifdef IXGBE_FCOE 380 /* adjust for FCoE Sequence Offload */ 381 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 382 && (skb->protocol == htons(ETH_P_FCOE)) && 383 skb_is_gso(skb)) { 384 hlen = skb_transport_offset(skb) + 385 sizeof(struct fc_frame_header) + 386 sizeof(struct fcoe_crc_eof); 387 segs = DIV_ROUND_UP(skb->len - hlen, 388 skb_shinfo(skb)->gso_size); 389 } 390#endif /* IXGBE_FCOE */ 391 /* multiply data chunks by size of headers */ 392 bytecount = ((segs - 1) * hlen) + skb->len; 393 total_packets += segs; 394 total_bytes += bytecount; 395 } 396 397 ixgbe_unmap_and_free_tx_resource(adapter, 398 tx_buffer_info); 399 400 tx_desc->wb.status = 0; 401 402 i++; 403 if (i == tx_ring->count) 404 i = 0; 405 } 406 407 eop = tx_ring->tx_buffer_info[i].next_to_watch; 408 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 409 } 410 411 tx_ring->next_to_clean = i; 412 413#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 414 if (unlikely(count && netif_carrier_ok(netdev) && 415 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 416 /* Make sure that anybody stopping the queue after this 417 * sees the new next_to_clean. 418 */ 419 smp_mb(); 420 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && 421 !test_bit(__IXGBE_DOWN, &adapter->state)) { 422 netif_wake_subqueue(netdev, tx_ring->queue_index); 423 ++tx_ring->restart_queue; 424 } 425 } 426 427 if (adapter->detect_tx_hung) { 428 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) { 429 /* schedule immediate reset if we believe we hung */ 430 DPRINTK(PROBE, INFO, 431 "tx hang %d detected, resetting adapter\n", 432 adapter->tx_timeout_count + 1); 433 ixgbe_tx_timeout(adapter->netdev); 434 } 435 } 436 437 /* re-arm the interrupt */ 438 if (count >= tx_ring->work_limit) 439 ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx)); 440 441 tx_ring->total_bytes += total_bytes; 442 tx_ring->total_packets += total_packets; 443 tx_ring->stats.packets += total_packets; 444 tx_ring->stats.bytes += total_bytes; 445 return (count < tx_ring->work_limit); 446} 447 448#ifdef CONFIG_IXGBE_DCA 449static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, 450 struct ixgbe_ring *rx_ring) 451{ 452 u32 rxctrl; 453 int cpu = get_cpu(); 454 int q = rx_ring - adapter->rx_ring; 455 456 if (rx_ring->cpu != cpu) { 457 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); 458 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 459 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; 460 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 461 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 462 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; 463 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << 464 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); 465 } 466 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; 467 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; 468 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); 469 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | 470 IXGBE_DCA_RXCTRL_DESC_HSRO_EN); 471 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); 472 rx_ring->cpu = cpu; 473 } 474 put_cpu(); 475} 476 477static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, 478 struct ixgbe_ring *tx_ring) 479{ 480 u32 txctrl; 481 int cpu = get_cpu(); 482 int q = tx_ring - adapter->tx_ring; 483 struct ixgbe_hw *hw = &adapter->hw; 484 485 if (tx_ring->cpu != cpu) { 486 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 487 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q)); 488 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; 489 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 490 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; 491 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl); 492 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 493 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q)); 494 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; 495 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << 496 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); 497 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; 498 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl); 499 } 500 tx_ring->cpu = cpu; 501 } 502 put_cpu(); 503} 504 505static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) 506{ 507 int i; 508 509 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) 510 return; 511 512 /* always use CB2 mode, difference is masked in the CB driver */ 513 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); 514 515 for (i = 0; i < adapter->num_tx_queues; i++) { 516 adapter->tx_ring[i].cpu = -1; 517 ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]); 518 } 519 for (i = 0; i < adapter->num_rx_queues; i++) { 520 adapter->rx_ring[i].cpu = -1; 521 ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]); 522 } 523} 524 525static int __ixgbe_notify_dca(struct device *dev, void *data) 526{ 527 struct net_device *netdev = dev_get_drvdata(dev); 528 struct ixgbe_adapter *adapter = netdev_priv(netdev); 529 unsigned long event = *(unsigned long *)data; 530 531 switch (event) { 532 case DCA_PROVIDER_ADD: 533 /* if we're already enabled, don't do it again */ 534 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 535 break; 536 if (dca_add_requester(dev) == 0) { 537 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 538 ixgbe_setup_dca(adapter); 539 break; 540 } 541 /* Fall Through since DCA is disabled. */ 542 case DCA_PROVIDER_REMOVE: 543 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 544 dca_remove_requester(dev); 545 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; 546 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); 547 } 548 break; 549 } 550 551 return 0; 552} 553 554#endif /* CONFIG_IXGBE_DCA */ 555/** 556 * ixgbe_receive_skb - Send a completed packet up the stack 557 * @adapter: board private structure 558 * @skb: packet to send up 559 * @status: hardware indication of status of receive 560 * @rx_ring: rx descriptor ring (for a specific queue) to setup 561 * @rx_desc: rx descriptor 562 **/ 563static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, 564 struct sk_buff *skb, u8 status, 565 struct ixgbe_ring *ring, 566 union ixgbe_adv_rx_desc *rx_desc) 567{ 568 struct ixgbe_adapter *adapter = q_vector->adapter; 569 struct napi_struct *napi = &q_vector->napi; 570 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 571 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 572 573 skb_record_rx_queue(skb, ring->queue_index); 574 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { 575 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK)) 576 vlan_gro_receive(napi, adapter->vlgrp, tag, skb); 577 else 578 napi_gro_receive(napi, skb); 579 } else { 580 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK)) 581 vlan_hwaccel_rx(skb, adapter->vlgrp, tag); 582 else 583 netif_rx(skb); 584 } 585} 586 587/** 588 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum 589 * @adapter: address of board private structure 590 * @status_err: hardware indication of status of receive 591 * @skb: skb currently being received and modified 592 **/ 593static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, 594 union ixgbe_adv_rx_desc *rx_desc, 595 struct sk_buff *skb) 596{ 597 u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error); 598 599 skb->ip_summed = CHECKSUM_NONE; 600 601 /* Rx csum disabled */ 602 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) 603 return; 604 605 /* if IP and error */ 606 if ((status_err & IXGBE_RXD_STAT_IPCS) && 607 (status_err & IXGBE_RXDADV_ERR_IPE)) { 608 adapter->hw_csum_rx_error++; 609 return; 610 } 611 612 if (!(status_err & IXGBE_RXD_STAT_L4CS)) 613 return; 614 615 if (status_err & IXGBE_RXDADV_ERR_TCPE) { 616 u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 617 618 /* 619 * 82599 errata, UDP frames with a 0 checksum can be marked as 620 * checksum errors. 621 */ 622 if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) && 623 (adapter->hw.mac.type == ixgbe_mac_82599EB)) 624 return; 625 626 adapter->hw_csum_rx_error++; 627 return; 628 } 629 630 /* It must be a TCP or UDP packet with a valid checksum */ 631 skb->ip_summed = CHECKSUM_UNNECESSARY; 632} 633 634static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw, 635 struct ixgbe_ring *rx_ring, u32 val) 636{ 637 /* 638 * Force memory writes to complete before letting h/w 639 * know there are new descriptors to fetch. (Only 640 * applicable for weak-ordered memory model archs, 641 * such as IA-64). 642 */ 643 wmb(); 644 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val); 645} 646 647/** 648 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split 649 * @adapter: address of board private structure 650 **/ 651static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, 652 struct ixgbe_ring *rx_ring, 653 int cleaned_count) 654{ 655 struct pci_dev *pdev = adapter->pdev; 656 union ixgbe_adv_rx_desc *rx_desc; 657 struct ixgbe_rx_buffer *bi; 658 unsigned int i; 659 660 i = rx_ring->next_to_use; 661 bi = &rx_ring->rx_buffer_info[i]; 662 663 while (cleaned_count--) { 664 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 665 666 if (!bi->page_dma && 667 (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) { 668 if (!bi->page) { 669 bi->page = alloc_page(GFP_ATOMIC); 670 if (!bi->page) { 671 adapter->alloc_rx_page_failed++; 672 goto no_buffers; 673 } 674 bi->page_offset = 0; 675 } else { 676 /* use a half page if we're re-using */ 677 bi->page_offset ^= (PAGE_SIZE / 2); 678 } 679 680 bi->page_dma = pci_map_page(pdev, bi->page, 681 bi->page_offset, 682 (PAGE_SIZE / 2), 683 PCI_DMA_FROMDEVICE); 684 } 685 686 if (!bi->skb) { 687 struct sk_buff *skb; 688 /* netdev_alloc_skb reserves 32 bytes up front!! */ 689 uint bufsz = rx_ring->rx_buf_len + SMP_CACHE_BYTES; 690 skb = netdev_alloc_skb(adapter->netdev, bufsz); 691 692 if (!skb) { 693 adapter->alloc_rx_buff_failed++; 694 goto no_buffers; 695 } 696 697 /* advance the data pointer to the next cache line */ 698 skb_reserve(skb, (PTR_ALIGN(skb->data, SMP_CACHE_BYTES) 699 - skb->data)); 700 701 bi->skb = skb; 702 bi->dma = pci_map_single(pdev, skb->data, 703 rx_ring->rx_buf_len, 704 PCI_DMA_FROMDEVICE); 705 } 706 /* Refresh the desc even if buffer_addrs didn't change because 707 * each write-back erases this info. */ 708 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 709 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); 710 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); 711 } else { 712 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 713 } 714 715 i++; 716 if (i == rx_ring->count) 717 i = 0; 718 bi = &rx_ring->rx_buffer_info[i]; 719 } 720 721no_buffers: 722 if (rx_ring->next_to_use != i) { 723 rx_ring->next_to_use = i; 724 if (i-- == 0) 725 i = (rx_ring->count - 1); 726 727 ixgbe_release_rx_desc(&adapter->hw, rx_ring, i); 728 } 729} 730 731static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc) 732{ 733 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; 734} 735 736static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) 737{ 738 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 739} 740 741static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc) 742{ 743 return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & 744 IXGBE_RXDADV_RSCCNT_MASK) >> 745 IXGBE_RXDADV_RSCCNT_SHIFT; 746} 747 748/** 749 * ixgbe_transform_rsc_queue - change rsc queue into a full packet 750 * @skb: pointer to the last skb in the rsc queue 751 * @count: pointer to number of packets coalesced in this context 752 * 753 * This function changes a queue full of hw rsc buffers into a completed 754 * packet. It uses the ->prev pointers to find the first packet and then 755 * turns it into the frag list owner. 756 **/ 757static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb, 758 u64 *count) 759{ 760 unsigned int frag_list_size = 0; 761 762 while (skb->prev) { 763 struct sk_buff *prev = skb->prev; 764 frag_list_size += skb->len; 765 skb->prev = NULL; 766 skb = prev; 767 *count += 1; 768 } 769 770 skb_shinfo(skb)->frag_list = skb->next; 771 skb->next = NULL; 772 skb->len += frag_list_size; 773 skb->data_len += frag_list_size; 774 skb->truesize += frag_list_size; 775 return skb; 776} 777 778static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, 779 struct ixgbe_ring *rx_ring, 780 int *work_done, int work_to_do) 781{ 782 struct ixgbe_adapter *adapter = q_vector->adapter; 783 struct net_device *netdev = adapter->netdev; 784 struct pci_dev *pdev = adapter->pdev; 785 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 786 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; 787 struct sk_buff *skb; 788 unsigned int i, rsc_count = 0; 789 u32 len, staterr; 790 u16 hdr_info; 791 bool cleaned = false; 792 int cleaned_count = 0; 793 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 794#ifdef IXGBE_FCOE 795 int ddp_bytes = 0; 796#endif /* IXGBE_FCOE */ 797 798 i = rx_ring->next_to_clean; 799 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 800 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 801 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 802 803 while (staterr & IXGBE_RXD_STAT_DD) { 804 u32 upper_len = 0; 805 if (*work_done >= work_to_do) 806 break; 807 (*work_done)++; 808 809 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 810 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); 811 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> 812 IXGBE_RXDADV_HDRBUFLEN_SHIFT; 813 if (len > IXGBE_RX_HDR_SIZE) 814 len = IXGBE_RX_HDR_SIZE; 815 upper_len = le16_to_cpu(rx_desc->wb.upper.length); 816 } else { 817 len = le16_to_cpu(rx_desc->wb.upper.length); 818 } 819 820 cleaned = true; 821 skb = rx_buffer_info->skb; 822 prefetch(skb->data); 823 rx_buffer_info->skb = NULL; 824 825 if (rx_buffer_info->dma) { 826 pci_unmap_single(pdev, rx_buffer_info->dma, 827 rx_ring->rx_buf_len, 828 PCI_DMA_FROMDEVICE); 829 rx_buffer_info->dma = 0; 830 skb_put(skb, len); 831 } 832 833 if (upper_len) { 834 pci_unmap_page(pdev, rx_buffer_info->page_dma, 835 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); 836 rx_buffer_info->page_dma = 0; 837 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 838 rx_buffer_info->page, 839 rx_buffer_info->page_offset, 840 upper_len); 841 842 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || 843 (page_count(rx_buffer_info->page) != 1)) 844 rx_buffer_info->page = NULL; 845 else 846 get_page(rx_buffer_info->page); 847 848 skb->len += upper_len; 849 skb->data_len += upper_len; 850 skb->truesize += upper_len; 851 } 852 853 i++; 854 if (i == rx_ring->count) 855 i = 0; 856 857 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i); 858 prefetch(next_rxd); 859 cleaned_count++; 860 861 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) 862 rsc_count = ixgbe_get_rsc_count(rx_desc); 863 864 if (rsc_count) { 865 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> 866 IXGBE_RXDADV_NEXTP_SHIFT; 867 next_buffer = &rx_ring->rx_buffer_info[nextp]; 868 } else { 869 next_buffer = &rx_ring->rx_buffer_info[i]; 870 } 871 872 if (staterr & IXGBE_RXD_STAT_EOP) { 873 if (skb->prev) 874 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); 875 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 876 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) 877 rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; 878 else 879 rx_ring->rsc_count++; 880 rx_ring->rsc_flush++; 881 } 882 rx_ring->stats.packets++; 883 rx_ring->stats.bytes += skb->len; 884 } else { 885 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 886 rx_buffer_info->skb = next_buffer->skb; 887 rx_buffer_info->dma = next_buffer->dma; 888 next_buffer->skb = skb; 889 next_buffer->dma = 0; 890 } else { 891 skb->next = next_buffer->skb; 892 skb->next->prev = skb; 893 } 894 rx_ring->non_eop_descs++; 895 goto next_desc; 896 } 897 898 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) { 899 dev_kfree_skb_irq(skb); 900 goto next_desc; 901 } 902 903 ixgbe_rx_checksum(adapter, rx_desc, skb); 904 905 /* probably a little skewed due to removing CRC */ 906 total_rx_bytes += skb->len; 907 total_rx_packets++; 908 909 skb->protocol = eth_type_trans(skb, adapter->netdev); 910#ifdef IXGBE_FCOE 911 /* if ddp, not passing to ULD unless for FCP_RSP or error */ 912 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 913 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); 914 if (!ddp_bytes) 915 goto next_desc; 916 } 917#endif /* IXGBE_FCOE */ 918 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); 919 920next_desc: 921 rx_desc->wb.upper.status_error = 0; 922 923 /* return some buffers to hardware, one at a time is too slow */ 924 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { 925 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 926 cleaned_count = 0; 927 } 928 929 /* use prefetched values */ 930 rx_desc = next_rxd; 931 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 932 933 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 934 } 935 936 rx_ring->next_to_clean = i; 937 cleaned_count = IXGBE_DESC_UNUSED(rx_ring); 938 939 if (cleaned_count) 940 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 941 942#ifdef IXGBE_FCOE 943 /* include DDPed FCoE data */ 944 if (ddp_bytes > 0) { 945 unsigned int mss; 946 947 mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) - 948 sizeof(struct fc_frame_header) - 949 sizeof(struct fcoe_crc_eof); 950 if (mss > 512) 951 mss &= ~511; 952 total_rx_bytes += ddp_bytes; 953 total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss); 954 } 955#endif /* IXGBE_FCOE */ 956 957 rx_ring->total_packets += total_rx_packets; 958 rx_ring->total_bytes += total_rx_bytes; 959 netdev->stats.rx_bytes += total_rx_bytes; 960 netdev->stats.rx_packets += total_rx_packets; 961 962 return cleaned; 963} 964 965static int ixgbe_clean_rxonly(struct napi_struct *, int); 966/** 967 * ixgbe_configure_msix - Configure MSI-X hardware 968 * @adapter: board private structure 969 * 970 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X 971 * interrupts. 972 **/ 973static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) 974{ 975 struct ixgbe_q_vector *q_vector; 976 int i, j, q_vectors, v_idx, r_idx; 977 u32 mask; 978 979 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 980 981 /* 982 * Populate the IVAR table and set the ITR values to the 983 * corresponding register. 984 */ 985 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 986 q_vector = adapter->q_vector[v_idx]; 987 /* XXX for_each_bit(...) */ 988 r_idx = find_first_bit(q_vector->rxr_idx, 989 adapter->num_rx_queues); 990 991 for (i = 0; i < q_vector->rxr_count; i++) { 992 j = adapter->rx_ring[r_idx].reg_idx; 993 ixgbe_set_ivar(adapter, 0, j, v_idx); 994 r_idx = find_next_bit(q_vector->rxr_idx, 995 adapter->num_rx_queues, 996 r_idx + 1); 997 } 998 r_idx = find_first_bit(q_vector->txr_idx, 999 adapter->num_tx_queues); 1000 1001 for (i = 0; i < q_vector->txr_count; i++) { 1002 j = adapter->tx_ring[r_idx].reg_idx; 1003 ixgbe_set_ivar(adapter, 1, j, v_idx); 1004 r_idx = find_next_bit(q_vector->txr_idx, 1005 adapter->num_tx_queues, 1006 r_idx + 1); 1007 } 1008 1009 if (q_vector->txr_count && !q_vector->rxr_count) 1010 /* tx only */ 1011 q_vector->eitr = adapter->tx_eitr_param; 1012 else if (q_vector->rxr_count) 1013 /* rx or mixed */ 1014 q_vector->eitr = adapter->rx_eitr_param; 1015 1016 ixgbe_write_eitr(q_vector); 1017 } 1018 1019 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1020 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, 1021 v_idx); 1022 else if (adapter->hw.mac.type == ixgbe_mac_82599EB) 1023 ixgbe_set_ivar(adapter, -1, 1, v_idx); 1024 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); 1025 1026 /* set up to autoclear timer, and the vectors */ 1027 mask = IXGBE_EIMS_ENABLE_MASK; 1028 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 1029 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); 1030} 1031 1032enum latency_range { 1033 lowest_latency = 0, 1034 low_latency = 1, 1035 bulk_latency = 2, 1036 latency_invalid = 255 1037}; 1038 1039/** 1040 * ixgbe_update_itr - update the dynamic ITR value based on statistics 1041 * @adapter: pointer to adapter 1042 * @eitr: eitr setting (ints per sec) to give last timeslice 1043 * @itr_setting: current throttle rate in ints/second 1044 * @packets: the number of packets during this measurement interval 1045 * @bytes: the number of bytes during this measurement interval 1046 * 1047 * Stores a new ITR value based on packets and byte 1048 * counts during the last interrupt. The advantage of per interrupt 1049 * computation is faster updates and more accurate ITR for the current 1050 * traffic pattern. Constants in this function were computed 1051 * based on theoretical maximum wire speed and thresholds were set based 1052 * on testing data as well as attempting to minimize response time 1053 * while increasing bulk throughput. 1054 * this functionality is controlled by the InterruptThrottleRate module 1055 * parameter (see ixgbe_param.c) 1056 **/ 1057static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, 1058 u32 eitr, u8 itr_setting, 1059 int packets, int bytes) 1060{ 1061 unsigned int retval = itr_setting; 1062 u32 timepassed_us; 1063 u64 bytes_perint; 1064 1065 if (packets == 0) 1066 goto update_itr_done; 1067 1068 1069 /* simple throttlerate management 1070 * 0-20MB/s lowest (100000 ints/s) 1071 * 20-100MB/s low (20000 ints/s) 1072 * 100-1249MB/s bulk (8000 ints/s) 1073 */ 1074 /* what was last interrupt timeslice? */ 1075 timepassed_us = 1000000/eitr; 1076 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 1077 1078 switch (itr_setting) { 1079 case lowest_latency: 1080 if (bytes_perint > adapter->eitr_low) 1081 retval = low_latency; 1082 break; 1083 case low_latency: 1084 if (bytes_perint > adapter->eitr_high) 1085 retval = bulk_latency; 1086 else if (bytes_perint <= adapter->eitr_low) 1087 retval = lowest_latency; 1088 break; 1089 case bulk_latency: 1090 if (bytes_perint <= adapter->eitr_high) 1091 retval = low_latency; 1092 break; 1093 } 1094 1095update_itr_done: 1096 return retval; 1097} 1098 1099/** 1100 * ixgbe_write_eitr - write EITR register in hardware specific way 1101 * @q_vector: structure containing interrupt and ring information 1102 * 1103 * This function is made to be called by ethtool and by the driver 1104 * when it needs to update EITR registers at runtime. Hardware 1105 * specific quirks/differences are taken care of here. 1106 */ 1107void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) 1108{ 1109 struct ixgbe_adapter *adapter = q_vector->adapter; 1110 struct ixgbe_hw *hw = &adapter->hw; 1111 int v_idx = q_vector->v_idx; 1112 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr); 1113 1114 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1115 /* must write high and low 16 bits to reset counter */ 1116 itr_reg |= (itr_reg << 16); 1117 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1118 /* 1119 * set the WDIS bit to not clear the timer bits and cause an 1120 * immediate assertion of the interrupt 1121 */ 1122 itr_reg |= IXGBE_EITR_CNT_WDIS; 1123 } 1124 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); 1125} 1126 1127static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) 1128{ 1129 struct ixgbe_adapter *adapter = q_vector->adapter; 1130 u32 new_itr; 1131 u8 current_itr, ret_itr; 1132 int i, r_idx; 1133 struct ixgbe_ring *rx_ring, *tx_ring; 1134 1135 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1136 for (i = 0; i < q_vector->txr_count; i++) { 1137 tx_ring = &(adapter->tx_ring[r_idx]); 1138 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 1139 q_vector->tx_itr, 1140 tx_ring->total_packets, 1141 tx_ring->total_bytes); 1142 /* if the result for this queue would decrease interrupt 1143 * rate for this vector then use that result */ 1144 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ? 1145 q_vector->tx_itr - 1 : ret_itr); 1146 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1147 r_idx + 1); 1148 } 1149 1150 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1151 for (i = 0; i < q_vector->rxr_count; i++) { 1152 rx_ring = &(adapter->rx_ring[r_idx]); 1153 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 1154 q_vector->rx_itr, 1155 rx_ring->total_packets, 1156 rx_ring->total_bytes); 1157 /* if the result for this queue would decrease interrupt 1158 * rate for this vector then use that result */ 1159 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ? 1160 q_vector->rx_itr - 1 : ret_itr); 1161 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1162 r_idx + 1); 1163 } 1164 1165 current_itr = max(q_vector->rx_itr, q_vector->tx_itr); 1166 1167 switch (current_itr) { 1168 /* counts and packets in update_itr are dependent on these numbers */ 1169 case lowest_latency: 1170 new_itr = 100000; 1171 break; 1172 case low_latency: 1173 new_itr = 20000; /* aka hwitr = ~200 */ 1174 break; 1175 case bulk_latency: 1176 default: 1177 new_itr = 8000; 1178 break; 1179 } 1180 1181 if (new_itr != q_vector->eitr) { 1182 /* do an exponential smoothing */ 1183 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 1184 1185 /* save the algorithm value here, not the smoothed one */ 1186 q_vector->eitr = new_itr; 1187 1188 ixgbe_write_eitr(q_vector); 1189 } 1190 1191 return; 1192} 1193 1194static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) 1195{ 1196 struct ixgbe_hw *hw = &adapter->hw; 1197 1198 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && 1199 (eicr & IXGBE_EICR_GPI_SDP1)) { 1200 DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n"); 1201 /* write to clear the interrupt */ 1202 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); 1203 } 1204} 1205 1206static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) 1207{ 1208 struct ixgbe_hw *hw = &adapter->hw; 1209 1210 if (eicr & IXGBE_EICR_GPI_SDP1) { 1211 /* Clear the interrupt */ 1212 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); 1213 schedule_work(&adapter->multispeed_fiber_task); 1214 } else if (eicr & IXGBE_EICR_GPI_SDP2) { 1215 /* Clear the interrupt */ 1216 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); 1217 schedule_work(&adapter->sfp_config_module_task); 1218 } else { 1219 /* Interrupt isn't for us... */ 1220 return; 1221 } 1222} 1223 1224static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) 1225{ 1226 struct ixgbe_hw *hw = &adapter->hw; 1227 1228 adapter->lsc_int++; 1229 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 1230 adapter->link_check_timeout = jiffies; 1231 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 1232 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 1233 IXGBE_WRITE_FLUSH(hw); 1234 schedule_work(&adapter->watchdog_task); 1235 } 1236} 1237 1238static irqreturn_t ixgbe_msix_lsc(int irq, void *data) 1239{ 1240 struct net_device *netdev = data; 1241 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1242 struct ixgbe_hw *hw = &adapter->hw; 1243 u32 eicr; 1244 1245 /* 1246 * Workaround for Silicon errata. Use clear-by-write instead 1247 * of clear-by-read. Reading with EICS will return the 1248 * interrupt causes without clearing, which later be done 1249 * with the write to EICR. 1250 */ 1251 eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 1252 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 1253 1254 if (eicr & IXGBE_EICR_LSC) 1255 ixgbe_check_lsc(adapter); 1256 1257 if (hw->mac.type == ixgbe_mac_82598EB) 1258 ixgbe_check_fan_failure(adapter, eicr); 1259 1260 if (hw->mac.type == ixgbe_mac_82599EB) { 1261 ixgbe_check_sfp_event(adapter, eicr); 1262 1263 /* Handle Flow Director Full threshold interrupt */ 1264 if (eicr & IXGBE_EICR_FLOW_DIR) { 1265 int i; 1266 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR); 1267 /* Disable transmits before FDIR Re-initialization */ 1268 netif_tx_stop_all_queues(netdev); 1269 for (i = 0; i < adapter->num_tx_queues; i++) { 1270 struct ixgbe_ring *tx_ring = 1271 &adapter->tx_ring[i]; 1272 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, 1273 &tx_ring->reinit_state)) 1274 schedule_work(&adapter->fdir_reinit_task); 1275 } 1276 } 1277 } 1278 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1279 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 1280 1281 return IRQ_HANDLED; 1282} 1283 1284static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, 1285 u64 qmask) 1286{ 1287 u32 mask; 1288 1289 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1290 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 1291 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1292 } else { 1293 mask = (qmask & 0xFFFFFFFF); 1294 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask); 1295 mask = (qmask >> 32); 1296 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask); 1297 } 1298 /* skip the flush */ 1299} 1300 1301static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, 1302 u64 qmask) 1303{ 1304 u32 mask; 1305 1306 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1307 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 1308 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask); 1309 } else { 1310 mask = (qmask & 0xFFFFFFFF); 1311 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask); 1312 mask = (qmask >> 32); 1313 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask); 1314 } 1315 /* skip the flush */ 1316} 1317 1318static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) 1319{ 1320 struct ixgbe_q_vector *q_vector = data; 1321 struct ixgbe_adapter *adapter = q_vector->adapter; 1322 struct ixgbe_ring *tx_ring; 1323 int i, r_idx; 1324 1325 if (!q_vector->txr_count) 1326 return IRQ_HANDLED; 1327 1328 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1329 for (i = 0; i < q_vector->txr_count; i++) { 1330 tx_ring = &(adapter->tx_ring[r_idx]); 1331 tx_ring->total_bytes = 0; 1332 tx_ring->total_packets = 0; 1333 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1334 r_idx + 1); 1335 } 1336 1337 /* EIAM disabled interrupts (on this vector) for us */ 1338 napi_schedule(&q_vector->napi); 1339 1340 return IRQ_HANDLED; 1341} 1342 1343/** 1344 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues) 1345 * @irq: unused 1346 * @data: pointer to our q_vector struct for this interrupt vector 1347 **/ 1348static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) 1349{ 1350 struct ixgbe_q_vector *q_vector = data; 1351 struct ixgbe_adapter *adapter = q_vector->adapter; 1352 struct ixgbe_ring *rx_ring; 1353 int r_idx; 1354 int i; 1355 1356 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1357 for (i = 0; i < q_vector->rxr_count; i++) { 1358 rx_ring = &(adapter->rx_ring[r_idx]); 1359 rx_ring->total_bytes = 0; 1360 rx_ring->total_packets = 0; 1361 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1362 r_idx + 1); 1363 } 1364 1365 if (!q_vector->rxr_count) 1366 return IRQ_HANDLED; 1367 1368 /* disable interrupts on this vector only */ 1369 /* EIAM disabled interrupts (on this vector) for us */ 1370 napi_schedule(&q_vector->napi); 1371 1372 return IRQ_HANDLED; 1373} 1374 1375static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) 1376{ 1377 struct ixgbe_q_vector *q_vector = data; 1378 struct ixgbe_adapter *adapter = q_vector->adapter; 1379 struct ixgbe_ring *ring; 1380 int r_idx; 1381 int i; 1382 1383 if (!q_vector->txr_count && !q_vector->rxr_count) 1384 return IRQ_HANDLED; 1385 1386 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1387 for (i = 0; i < q_vector->txr_count; i++) { 1388 ring = &(adapter->tx_ring[r_idx]); 1389 ring->total_bytes = 0; 1390 ring->total_packets = 0; 1391 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1392 r_idx + 1); 1393 } 1394 1395 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1396 for (i = 0; i < q_vector->rxr_count; i++) { 1397 ring = &(adapter->rx_ring[r_idx]); 1398 ring->total_bytes = 0; 1399 ring->total_packets = 0; 1400 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1401 r_idx + 1); 1402 } 1403 1404 /* EIAM disabled interrupts (on this vector) for us */ 1405 napi_schedule(&q_vector->napi); 1406 1407 return IRQ_HANDLED; 1408} 1409 1410/** 1411 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine 1412 * @napi: napi struct with our devices info in it 1413 * @budget: amount of work driver is allowed to do this pass, in packets 1414 * 1415 * This function is optimized for cleaning one queue only on a single 1416 * q_vector!!! 1417 **/ 1418static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) 1419{ 1420 struct ixgbe_q_vector *q_vector = 1421 container_of(napi, struct ixgbe_q_vector, napi); 1422 struct ixgbe_adapter *adapter = q_vector->adapter; 1423 struct ixgbe_ring *rx_ring = NULL; 1424 int work_done = 0; 1425 long r_idx; 1426 1427 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1428 rx_ring = &(adapter->rx_ring[r_idx]); 1429#ifdef CONFIG_IXGBE_DCA 1430 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1431 ixgbe_update_rx_dca(adapter, rx_ring); 1432#endif 1433 1434 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); 1435 1436 /* If all Rx work done, exit the polling mode */ 1437 if (work_done < budget) { 1438 napi_complete(napi); 1439 if (adapter->rx_itr_setting & 1) 1440 ixgbe_set_itr_msix(q_vector); 1441 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1442 ixgbe_irq_enable_queues(adapter, 1443 ((u64)1 << q_vector->v_idx)); 1444 } 1445 1446 return work_done; 1447} 1448 1449/** 1450 * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine 1451 * @napi: napi struct with our devices info in it 1452 * @budget: amount of work driver is allowed to do this pass, in packets 1453 * 1454 * This function will clean more than one rx queue associated with a 1455 * q_vector. 1456 **/ 1457static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) 1458{ 1459 struct ixgbe_q_vector *q_vector = 1460 container_of(napi, struct ixgbe_q_vector, napi); 1461 struct ixgbe_adapter *adapter = q_vector->adapter; 1462 struct ixgbe_ring *ring = NULL; 1463 int work_done = 0, i; 1464 long r_idx; 1465 bool tx_clean_complete = true; 1466 1467 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1468 for (i = 0; i < q_vector->txr_count; i++) { 1469 ring = &(adapter->tx_ring[r_idx]); 1470#ifdef CONFIG_IXGBE_DCA 1471 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1472 ixgbe_update_tx_dca(adapter, ring); 1473#endif 1474 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); 1475 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1476 r_idx + 1); 1477 } 1478 1479 /* attempt to distribute budget to each queue fairly, but don't allow 1480 * the budget to go below 1 because we'll exit polling */ 1481 budget /= (q_vector->rxr_count ?: 1); 1482 budget = max(budget, 1); 1483 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1484 for (i = 0; i < q_vector->rxr_count; i++) { 1485 ring = &(adapter->rx_ring[r_idx]); 1486#ifdef CONFIG_IXGBE_DCA 1487 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1488 ixgbe_update_rx_dca(adapter, ring); 1489#endif 1490 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); 1491 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1492 r_idx + 1); 1493 } 1494 1495 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1496 ring = &(adapter->rx_ring[r_idx]); 1497 /* If all Rx work done, exit the polling mode */ 1498 if (work_done < budget) { 1499 napi_complete(napi); 1500 if (adapter->rx_itr_setting & 1) 1501 ixgbe_set_itr_msix(q_vector); 1502 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1503 ixgbe_irq_enable_queues(adapter, 1504 ((u64)1 << q_vector->v_idx)); 1505 return 0; 1506 } 1507 1508 return work_done; 1509} 1510 1511/** 1512 * ixgbe_clean_txonly - msix (aka one shot) tx clean routine 1513 * @napi: napi struct with our devices info in it 1514 * @budget: amount of work driver is allowed to do this pass, in packets 1515 * 1516 * This function is optimized for cleaning one queue only on a single 1517 * q_vector!!! 1518 **/ 1519static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) 1520{ 1521 struct ixgbe_q_vector *q_vector = 1522 container_of(napi, struct ixgbe_q_vector, napi); 1523 struct ixgbe_adapter *adapter = q_vector->adapter; 1524 struct ixgbe_ring *tx_ring = NULL; 1525 int work_done = 0; 1526 long r_idx; 1527 1528 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1529 tx_ring = &(adapter->tx_ring[r_idx]); 1530#ifdef CONFIG_IXGBE_DCA 1531 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1532 ixgbe_update_tx_dca(adapter, tx_ring); 1533#endif 1534 1535 if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) 1536 work_done = budget; 1537 1538 /* If all Tx work done, exit the polling mode */ 1539 if (work_done < budget) { 1540 napi_complete(napi); 1541 if (adapter->tx_itr_setting & 1) 1542 ixgbe_set_itr_msix(q_vector); 1543 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1544 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); 1545 } 1546 1547 return work_done; 1548} 1549 1550static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, 1551 int r_idx) 1552{ 1553 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; 1554 1555 set_bit(r_idx, q_vector->rxr_idx); 1556 q_vector->rxr_count++; 1557} 1558 1559static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, 1560 int t_idx) 1561{ 1562 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; 1563 1564 set_bit(t_idx, q_vector->txr_idx); 1565 q_vector->txr_count++; 1566} 1567 1568/** 1569 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors 1570 * @adapter: board private structure to initialize 1571 * @vectors: allotted vector count for descriptor rings 1572 * 1573 * This function maps descriptor rings to the queue-specific vectors 1574 * we were allotted through the MSI-X enabling code. Ideally, we'd have 1575 * one vector per ring/queue, but on a constrained vector budget, we 1576 * group the rings as "efficiently" as possible. You would add new 1577 * mapping configurations in here. 1578 **/ 1579static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, 1580 int vectors) 1581{ 1582 int v_start = 0; 1583 int rxr_idx = 0, txr_idx = 0; 1584 int rxr_remaining = adapter->num_rx_queues; 1585 int txr_remaining = adapter->num_tx_queues; 1586 int i, j; 1587 int rqpv, tqpv; 1588 int err = 0; 1589 1590 /* No mapping required if MSI-X is disabled. */ 1591 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 1592 goto out; 1593 1594 /* 1595 * The ideal configuration... 1596 * We have enough vectors to map one per queue. 1597 */ 1598 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) { 1599 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) 1600 map_vector_to_rxq(adapter, v_start, rxr_idx); 1601 1602 for (; txr_idx < txr_remaining; v_start++, txr_idx++) 1603 map_vector_to_txq(adapter, v_start, txr_idx); 1604 1605 goto out; 1606 } 1607 1608 /* 1609 * If we don't have enough vectors for a 1-to-1 1610 * mapping, we'll have to group them so there are 1611 * multiple queues per vector. 1612 */ 1613 /* Re-adjusting *qpv takes care of the remainder. */ 1614 for (i = v_start; i < vectors; i++) { 1615 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i); 1616 for (j = 0; j < rqpv; j++) { 1617 map_vector_to_rxq(adapter, i, rxr_idx); 1618 rxr_idx++; 1619 rxr_remaining--; 1620 } 1621 } 1622 for (i = v_start; i < vectors; i++) { 1623 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i); 1624 for (j = 0; j < tqpv; j++) { 1625 map_vector_to_txq(adapter, i, txr_idx); 1626 txr_idx++; 1627 txr_remaining--; 1628 } 1629 } 1630 1631out: 1632 return err; 1633} 1634 1635/** 1636 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts 1637 * @adapter: board private structure 1638 * 1639 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests 1640 * interrupts from the kernel. 1641 **/ 1642static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) 1643{ 1644 struct net_device *netdev = adapter->netdev; 1645 irqreturn_t (*handler)(int, void *); 1646 int i, vector, q_vectors, err; 1647 int ri=0, ti=0; 1648 1649 /* Decrement for Other and TCP Timer vectors */ 1650 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1651 1652 /* Map the Tx/Rx rings to the vectors we were allotted. */ 1653 err = ixgbe_map_rings_to_vectors(adapter, q_vectors); 1654 if (err) 1655 goto out; 1656 1657#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ 1658 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ 1659 &ixgbe_msix_clean_many) 1660 for (vector = 0; vector < q_vectors; vector++) { 1661 handler = SET_HANDLER(adapter->q_vector[vector]); 1662 1663 if(handler == &ixgbe_msix_clean_rx) { 1664 sprintf(adapter->name[vector], "%s-%s-%d", 1665 netdev->name, "rx", ri++); 1666 } 1667 else if(handler == &ixgbe_msix_clean_tx) { 1668 sprintf(adapter->name[vector], "%s-%s-%d", 1669 netdev->name, "tx", ti++); 1670 } 1671 else 1672 sprintf(adapter->name[vector], "%s-%s-%d", 1673 netdev->name, "TxRx", vector); 1674 1675 err = request_irq(adapter->msix_entries[vector].vector, 1676 handler, 0, adapter->name[vector], 1677 adapter->q_vector[vector]); 1678 if (err) { 1679 DPRINTK(PROBE, ERR, 1680 "request_irq failed for MSIX interrupt " 1681 "Error: %d\n", err); 1682 goto free_queue_irqs; 1683 } 1684 } 1685 1686 sprintf(adapter->name[vector], "%s:lsc", netdev->name); 1687 err = request_irq(adapter->msix_entries[vector].vector, 1688 ixgbe_msix_lsc, 0, adapter->name[vector], netdev); 1689 if (err) { 1690 DPRINTK(PROBE, ERR, 1691 "request_irq for msix_lsc failed: %d\n", err); 1692 goto free_queue_irqs; 1693 } 1694 1695 return 0; 1696 1697free_queue_irqs: 1698 for (i = vector - 1; i >= 0; i--) 1699 free_irq(adapter->msix_entries[--vector].vector, 1700 adapter->q_vector[i]); 1701 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 1702 pci_disable_msix(adapter->pdev); 1703 kfree(adapter->msix_entries); 1704 adapter->msix_entries = NULL; 1705out: 1706 return err; 1707} 1708 1709static void ixgbe_set_itr(struct ixgbe_adapter *adapter) 1710{ 1711 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; 1712 u8 current_itr; 1713 u32 new_itr = q_vector->eitr; 1714 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0]; 1715 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0]; 1716 1717 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, 1718 q_vector->tx_itr, 1719 tx_ring->total_packets, 1720 tx_ring->total_bytes); 1721 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr, 1722 q_vector->rx_itr, 1723 rx_ring->total_packets, 1724 rx_ring->total_bytes); 1725 1726 current_itr = max(q_vector->rx_itr, q_vector->tx_itr); 1727 1728 switch (current_itr) { 1729 /* counts and packets in update_itr are dependent on these numbers */ 1730 case lowest_latency: 1731 new_itr = 100000; 1732 break; 1733 case low_latency: 1734 new_itr = 20000; /* aka hwitr = ~200 */ 1735 break; 1736 case bulk_latency: 1737 new_itr = 8000; 1738 break; 1739 default: 1740 break; 1741 } 1742 1743 if (new_itr != q_vector->eitr) { 1744 /* do an exponential smoothing */ 1745 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 1746 1747 /* save the algorithm value here, not the smoothed one */ 1748 q_vector->eitr = new_itr; 1749 1750 ixgbe_write_eitr(q_vector); 1751 } 1752 1753 return; 1754} 1755 1756/** 1757 * ixgbe_irq_enable - Enable default interrupt generation settings 1758 * @adapter: board private structure 1759 **/ 1760static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) 1761{ 1762 u32 mask; 1763 1764 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 1765 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) 1766 mask |= IXGBE_EIMS_GPI_SDP1; 1767 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1768 mask |= IXGBE_EIMS_ECC; 1769 mask |= IXGBE_EIMS_GPI_SDP1; 1770 mask |= IXGBE_EIMS_GPI_SDP2; 1771 } 1772 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 1773 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 1774 mask |= IXGBE_EIMS_FLOW_DIR; 1775 1776 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1777 ixgbe_irq_enable_queues(adapter, ~0); 1778 IXGBE_WRITE_FLUSH(&adapter->hw); 1779} 1780 1781/** 1782 * ixgbe_intr - legacy mode Interrupt Handler 1783 * @irq: interrupt number 1784 * @data: pointer to a network interface device structure 1785 **/ 1786static irqreturn_t ixgbe_intr(int irq, void *data) 1787{ 1788 struct net_device *netdev = data; 1789 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1790 struct ixgbe_hw *hw = &adapter->hw; 1791 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; 1792 u32 eicr; 1793 1794 /* 1795 * Workaround for silicon errata. Mask the interrupts 1796 * before the read of EICR. 1797 */ 1798 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 1799 1800 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read 1801 * therefore no explict interrupt disable is necessary */ 1802 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 1803 if (!eicr) { 1804 /* shared interrupt alert! 1805 * make sure interrupts are enabled because the read will 1806 * have disabled interrupts due to EIAM */ 1807 ixgbe_irq_enable(adapter); 1808 return IRQ_NONE; /* Not our interrupt */ 1809 } 1810 1811 if (eicr & IXGBE_EICR_LSC) 1812 ixgbe_check_lsc(adapter); 1813 1814 if (hw->mac.type == ixgbe_mac_82599EB) 1815 ixgbe_check_sfp_event(adapter, eicr); 1816 1817 ixgbe_check_fan_failure(adapter, eicr); 1818 1819 if (napi_schedule_prep(&(q_vector->napi))) { 1820 adapter->tx_ring[0].total_packets = 0; 1821 adapter->tx_ring[0].total_bytes = 0; 1822 adapter->rx_ring[0].total_packets = 0; 1823 adapter->rx_ring[0].total_bytes = 0; 1824 /* would disable interrupts here but EIAM disabled it */ 1825 __napi_schedule(&(q_vector->napi)); 1826 } 1827 1828 return IRQ_HANDLED; 1829} 1830 1831static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter) 1832{ 1833 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1834 1835 for (i = 0; i < q_vectors; i++) { 1836 struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; 1837 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES); 1838 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES); 1839 q_vector->rxr_count = 0; 1840 q_vector->txr_count = 0; 1841 } 1842} 1843 1844/** 1845 * ixgbe_request_irq - initialize interrupts 1846 * @adapter: board private structure 1847 * 1848 * Attempts to configure interrupts using the best available 1849 * capabilities of the hardware and kernel. 1850 **/ 1851static int ixgbe_request_irq(struct ixgbe_adapter *adapter) 1852{ 1853 struct net_device *netdev = adapter->netdev; 1854 int err; 1855 1856 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1857 err = ixgbe_request_msix_irqs(adapter); 1858 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1859 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, 1860 netdev->name, netdev); 1861 } else { 1862 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, 1863 netdev->name, netdev); 1864 } 1865 1866 if (err) 1867 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err); 1868 1869 return err; 1870} 1871 1872static void ixgbe_free_irq(struct ixgbe_adapter *adapter) 1873{ 1874 struct net_device *netdev = adapter->netdev; 1875 1876 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1877 int i, q_vectors; 1878 1879 q_vectors = adapter->num_msix_vectors; 1880 1881 i = q_vectors - 1; 1882 free_irq(adapter->msix_entries[i].vector, netdev); 1883 1884 i--; 1885 for (; i >= 0; i--) { 1886 free_irq(adapter->msix_entries[i].vector, 1887 adapter->q_vector[i]); 1888 } 1889 1890 ixgbe_reset_q_vectors(adapter); 1891 } else { 1892 free_irq(adapter->pdev->irq, netdev); 1893 } 1894} 1895 1896/** 1897 * ixgbe_irq_disable - Mask off interrupt generation on the NIC 1898 * @adapter: board private structure 1899 **/ 1900static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) 1901{ 1902 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1903 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 1904 } else { 1905 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 1906 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 1907 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 1908 } 1909 IXGBE_WRITE_FLUSH(&adapter->hw); 1910 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1911 int i; 1912 for (i = 0; i < adapter->num_msix_vectors; i++) 1913 synchronize_irq(adapter->msix_entries[i].vector); 1914 } else { 1915 synchronize_irq(adapter->pdev->irq); 1916 } 1917} 1918 1919/** 1920 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts 1921 * 1922 **/ 1923static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) 1924{ 1925 struct ixgbe_hw *hw = &adapter->hw; 1926 1927 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), 1928 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param)); 1929 1930 ixgbe_set_ivar(adapter, 0, 0, 0); 1931 ixgbe_set_ivar(adapter, 1, 0, 0); 1932 1933 map_vector_to_rxq(adapter, 0, 0); 1934 map_vector_to_txq(adapter, 0, 0); 1935 1936 DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n"); 1937} 1938 1939/** 1940 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset 1941 * @adapter: board private structure 1942 * 1943 * Configure the Tx unit of the MAC after a reset. 1944 **/ 1945static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) 1946{ 1947 u64 tdba; 1948 struct ixgbe_hw *hw = &adapter->hw; 1949 u32 i, j, tdlen, txctrl; 1950 1951 /* Setup the HW Tx Head and Tail descriptor pointers */ 1952 for (i = 0; i < adapter->num_tx_queues; i++) { 1953 struct ixgbe_ring *ring = &adapter->tx_ring[i]; 1954 j = ring->reg_idx; 1955 tdba = ring->dma; 1956 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); 1957 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 1958 (tdba & DMA_BIT_MASK(32))); 1959 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 1960 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen); 1961 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 1962 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 1963 adapter->tx_ring[i].head = IXGBE_TDH(j); 1964 adapter->tx_ring[i].tail = IXGBE_TDT(j); 1965 /* 1966 * Disable Tx Head Writeback RO bit, since this hoses 1967 * bookkeeping if things aren't delivered in order. 1968 */ 1969 switch (hw->mac.type) { 1970 case ixgbe_mac_82598EB: 1971 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 1972 break; 1973 case ixgbe_mac_82599EB: 1974 default: 1975 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 1976 break; 1977 } 1978 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1979 switch (hw->mac.type) { 1980 case ixgbe_mac_82598EB: 1981 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 1982 break; 1983 case ixgbe_mac_82599EB: 1984 default: 1985 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 1986 break; 1987 } 1988 } 1989 1990 if (hw->mac.type == ixgbe_mac_82599EB) { 1991 u32 rttdcs; 1992 1993 /* disable the arbiter while setting MTQC */ 1994 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 1995 rttdcs |= IXGBE_RTTDCS_ARBDIS; 1996 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 1997 1998 /* We enable 8 traffic classes, DCB only */ 1999 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) 2000 IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA | 2001 IXGBE_MTQC_8TC_8TQ)); 2002 else 2003 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); 2004 2005 /* re-eable the arbiter */ 2006 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 2007 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 2008 } 2009} 2010 2011#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 2012 2013static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, 2014 struct ixgbe_ring *rx_ring) 2015{ 2016 u32 srrctl; 2017 int index; 2018 struct ixgbe_ring_feature *feature = adapter->ring_feature; 2019 2020 index = rx_ring->reg_idx; 2021 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 2022 unsigned long mask; 2023 mask = (unsigned long) feature[RING_F_RSS].mask; 2024 index = index & mask; 2025 } 2026 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index)); 2027 2028 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 2029 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 2030 2031 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & 2032 IXGBE_SRRCTL_BSIZEHDR_MASK; 2033 2034 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 2035#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER 2036 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 2037#else 2038 srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 2039#endif 2040 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 2041 } else { 2042 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> 2043 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 2044 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 2045 } 2046 2047 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); 2048} 2049 2050static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) 2051{ 2052 u32 mrqc = 0; 2053 int mask; 2054 2055 if (!(adapter->hw.mac.type == ixgbe_mac_82599EB)) 2056 return mrqc; 2057 2058 mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED 2059#ifdef CONFIG_IXGBE_DCB 2060 | IXGBE_FLAG_DCB_ENABLED 2061#endif 2062 ); 2063 2064 switch (mask) { 2065 case (IXGBE_FLAG_RSS_ENABLED): 2066 mrqc = IXGBE_MRQC_RSSEN; 2067 break; 2068#ifdef CONFIG_IXGBE_DCB 2069 case (IXGBE_FLAG_DCB_ENABLED): 2070 mrqc = IXGBE_MRQC_RT8TCEN; 2071 break; 2072#endif /* CONFIG_IXGBE_DCB */ 2073 default: 2074 break; 2075 } 2076 2077 return mrqc; 2078} 2079 2080/** 2081 * ixgbe_configure_rscctl - enable RSC for the indicated ring 2082 * @adapter: address of board private structure 2083 * @index: index of ring to set 2084 **/ 2085static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index) 2086{ 2087 struct ixgbe_ring *rx_ring; 2088 struct ixgbe_hw *hw = &adapter->hw; 2089 int j; 2090 u32 rscctrl; 2091 int rx_buf_len; 2092 2093 rx_ring = &adapter->rx_ring[index]; 2094 j = rx_ring->reg_idx; 2095 rx_buf_len = rx_ring->rx_buf_len; 2096 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j)); 2097 rscctrl |= IXGBE_RSCCTL_RSCEN; 2098 /* 2099 * we must limit the number of descriptors so that the 2100 * total size of max desc * buf_len is not greater 2101 * than 65535 2102 */ 2103 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 2104#if (MAX_SKB_FRAGS > 16) 2105 rscctrl |= IXGBE_RSCCTL_MAXDESC_16; 2106#elif (MAX_SKB_FRAGS > 8) 2107 rscctrl |= IXGBE_RSCCTL_MAXDESC_8; 2108#elif (MAX_SKB_FRAGS > 4) 2109 rscctrl |= IXGBE_RSCCTL_MAXDESC_4; 2110#else 2111 rscctrl |= IXGBE_RSCCTL_MAXDESC_1; 2112#endif 2113 } else { 2114 if (rx_buf_len < IXGBE_RXBUFFER_4096) 2115 rscctrl |= IXGBE_RSCCTL_MAXDESC_16; 2116 else if (rx_buf_len < IXGBE_RXBUFFER_8192) 2117 rscctrl |= IXGBE_RSCCTL_MAXDESC_8; 2118 else 2119 rscctrl |= IXGBE_RSCCTL_MAXDESC_4; 2120 } 2121 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl); 2122} 2123 2124/** 2125 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset 2126 * @adapter: board private structure 2127 * 2128 * Configure the Rx unit of the MAC after a reset. 2129 **/ 2130static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) 2131{ 2132 u64 rdba; 2133 struct ixgbe_hw *hw = &adapter->hw; 2134 struct ixgbe_ring *rx_ring; 2135 struct net_device *netdev = adapter->netdev; 2136 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 2137 int i, j; 2138 u32 rdlen, rxctrl, rxcsum; 2139 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D, 2140 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, 2141 0x6A3E67EA, 0x14364D17, 0x3BED200D}; 2142 u32 fctrl, hlreg0; 2143 u32 reta = 0, mrqc = 0; 2144 u32 rdrxctl; 2145 int rx_buf_len; 2146 2147 /* Decide whether to use packet split mode or not */ 2148 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; 2149 2150 /* Set the RX buffer length according to the mode */ 2151 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 2152 rx_buf_len = IXGBE_RX_HDR_SIZE; 2153 if (hw->mac.type == ixgbe_mac_82599EB) { 2154 /* PSRTYPE must be initialized in 82599 */ 2155 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | 2156 IXGBE_PSRTYPE_UDPHDR | 2157 IXGBE_PSRTYPE_IPV4HDR | 2158 IXGBE_PSRTYPE_IPV6HDR | 2159 IXGBE_PSRTYPE_L2HDR; 2160 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 2161 } 2162 } else { 2163 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && 2164 (netdev->mtu <= ETH_DATA_LEN)) 2165 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 2166 else 2167 rx_buf_len = ALIGN(max_frame, 1024); 2168 } 2169 2170 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 2171 fctrl |= IXGBE_FCTRL_BAM; 2172 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ 2173 fctrl |= IXGBE_FCTRL_PMCF; 2174 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); 2175 2176 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2177 if (adapter->netdev->mtu <= ETH_DATA_LEN) 2178 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; 2179 else 2180 hlreg0 |= IXGBE_HLREG0_JUMBOEN; 2181#ifdef IXGBE_FCOE 2182 if (netdev->features & NETIF_F_FCOE_MTU) 2183 hlreg0 |= IXGBE_HLREG0_JUMBOEN; 2184#endif 2185 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 2186 2187 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); 2188 /* disable receives while setting up the descriptors */ 2189 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 2190 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 2191 2192 /* 2193 * Setup the HW Rx Head and Tail Descriptor Pointers and 2194 * the Base and Length of the Rx Descriptor Ring 2195 */ 2196 for (i = 0; i < adapter->num_rx_queues; i++) { 2197 rx_ring = &adapter->rx_ring[i]; 2198 rdba = rx_ring->dma; 2199 j = rx_ring->reg_idx; 2200 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32))); 2201 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 2202 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen); 2203 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 2204 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 2205 rx_ring->head = IXGBE_RDH(j); 2206 rx_ring->tail = IXGBE_RDT(j); 2207 rx_ring->rx_buf_len = rx_buf_len; 2208 2209 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) 2210 rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED; 2211 else 2212 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; 2213 2214#ifdef IXGBE_FCOE 2215 if (netdev->features & NETIF_F_FCOE_MTU) { 2216 struct ixgbe_ring_feature *f; 2217 f = &adapter->ring_feature[RING_F_FCOE]; 2218 if ((i >= f->mask) && (i < f->mask + f->indices)) { 2219 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; 2220 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) 2221 rx_ring->rx_buf_len = 2222 IXGBE_FCOE_JUMBO_FRAME_SIZE; 2223 } 2224 } 2225 2226#endif /* IXGBE_FCOE */ 2227 ixgbe_configure_srrctl(adapter, rx_ring); 2228 } 2229 2230 if (hw->mac.type == ixgbe_mac_82598EB) { 2231 /* 2232 * For VMDq support of different descriptor types or 2233 * buffer sizes through the use of multiple SRRCTL 2234 * registers, RDRXCTL.MVMEN must be set to 1 2235 * 2236 * also, the manual doesn't mention it clearly but DCA hints 2237 * will only use queue 0's tags unless this bit is set. Side 2238 * effects of setting this bit are only that SRRCTL must be 2239 * fully programmed [0..15] 2240 */ 2241 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2242 rdrxctl |= IXGBE_RDRXCTL_MVMEN; 2243 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 2244 } 2245 2246 /* Program MRQC for the distribution of queues */ 2247 mrqc = ixgbe_setup_mrqc(adapter); 2248 2249 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 2250 /* Fill out redirection table */ 2251 for (i = 0, j = 0; i < 128; i++, j++) { 2252 if (j == adapter->ring_feature[RING_F_RSS].indices) 2253 j = 0; 2254 /* reta = 4-byte sliding window of 2255 * 0x00..(indices-1)(indices-1)00..etc. */ 2256 reta = (reta << 8) | (j * 0x11); 2257 if ((i & 3) == 3) 2258 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 2259 } 2260 2261 /* Fill out hash function seeds */ 2262 for (i = 0; i < 10; i++) 2263 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); 2264 2265 if (hw->mac.type == ixgbe_mac_82598EB) 2266 mrqc |= IXGBE_MRQC_RSSEN; 2267 /* Perform hash on these packet types */ 2268 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 2269 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP 2270 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP 2271 | IXGBE_MRQC_RSS_FIELD_IPV6 2272 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP 2273 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 2274 } 2275 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2276 2277 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 2278 2279 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED || 2280 adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) { 2281 /* Disable indicating checksum in descriptor, enables 2282 * RSS hash */ 2283 rxcsum |= IXGBE_RXCSUM_PCSD; 2284 } 2285 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) { 2286 /* Enable IPv4 payload checksum for UDP fragments 2287 * if PCSD is not set */ 2288 rxcsum |= IXGBE_RXCSUM_IPPCSE; 2289 } 2290 2291 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 2292 2293 if (hw->mac.type == ixgbe_mac_82599EB) { 2294 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2295 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; 2296 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 2297 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 2298 } 2299 2300 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 2301 /* Enable 82599 HW-RSC */ 2302 for (i = 0; i < adapter->num_rx_queues; i++) 2303 ixgbe_configure_rscctl(adapter, i); 2304 2305 /* Disable RSC for ACK packets */ 2306 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, 2307 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); 2308 } 2309} 2310 2311static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 2312{ 2313 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2314 struct ixgbe_hw *hw = &adapter->hw; 2315 2316 /* add VID to filter table */ 2317 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true); 2318} 2319 2320static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 2321{ 2322 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2323 struct ixgbe_hw *hw = &adapter->hw; 2324 2325 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2326 ixgbe_irq_disable(adapter); 2327 2328 vlan_group_set_device(adapter->vlgrp, vid, NULL); 2329 2330 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2331 ixgbe_irq_enable(adapter); 2332 2333 /* remove VID from filter table */ 2334 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false); 2335} 2336 2337static void ixgbe_vlan_rx_register(struct net_device *netdev, 2338 struct vlan_group *grp) 2339{ 2340 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2341 u32 ctrl; 2342 int i, j; 2343 2344 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2345 ixgbe_irq_disable(adapter); 2346 adapter->vlgrp = grp; 2347 2348 /* 2349 * For a DCB driver, always enable VLAN tag stripping so we can 2350 * still receive traffic from a DCB-enabled host even if we're 2351 * not in DCB mode. 2352 */ 2353 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); 2354 2355 /* Disable CFI check */ 2356 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 2357 2358 /* enable VLAN tag stripping */ 2359 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 2360 ctrl |= IXGBE_VLNCTRL_VME; 2361 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 2362 for (i = 0; i < adapter->num_rx_queues; i++) { 2363 u32 ctrl; 2364 j = adapter->rx_ring[i].reg_idx; 2365 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j)); 2366 ctrl |= IXGBE_RXDCTL_VME; 2367 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl); 2368 } 2369 } 2370 2371 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); 2372 2373 ixgbe_vlan_rx_add_vid(netdev, 0); 2374 2375 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2376 ixgbe_irq_enable(adapter); 2377} 2378 2379static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) 2380{ 2381 ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp); 2382 2383 if (adapter->vlgrp) { 2384 u16 vid; 2385 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 2386 if (!vlan_group_get_device(adapter->vlgrp, vid)) 2387 continue; 2388 ixgbe_vlan_rx_add_vid(adapter->netdev, vid); 2389 } 2390 } 2391} 2392 2393static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq) 2394{ 2395 struct dev_mc_list *mc_ptr; 2396 u8 *addr = *mc_addr_ptr; 2397 *vmdq = 0; 2398 2399 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]); 2400 if (mc_ptr->next) 2401 *mc_addr_ptr = mc_ptr->next->dmi_addr; 2402 else 2403 *mc_addr_ptr = NULL; 2404 2405 return addr; 2406} 2407 2408/** 2409 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set 2410 * @netdev: network interface device structure 2411 * 2412 * The set_rx_method entry point is called whenever the unicast/multicast 2413 * address list or the network interface flags are updated. This routine is 2414 * responsible for configuring the hardware for proper unicast, multicast and 2415 * promiscuous mode. 2416 **/ 2417static void ixgbe_set_rx_mode(struct net_device *netdev) 2418{ 2419 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2420 struct ixgbe_hw *hw = &adapter->hw; 2421 u32 fctrl, vlnctrl; 2422 u8 *addr_list = NULL; 2423 int addr_count = 0; 2424 2425 /* Check for Promiscuous and All Multicast modes */ 2426 2427 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2428 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2429 2430 if (netdev->flags & IFF_PROMISC) { 2431 hw->addr_ctrl.user_set_promisc = 1; 2432 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2433 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 2434 } else { 2435 if (netdev->flags & IFF_ALLMULTI) { 2436 fctrl |= IXGBE_FCTRL_MPE; 2437 fctrl &= ~IXGBE_FCTRL_UPE; 2438 } else { 2439 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2440 } 2441 vlnctrl |= IXGBE_VLNCTRL_VFE; 2442 hw->addr_ctrl.user_set_promisc = 0; 2443 } 2444 2445 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 2446 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 2447 2448 /* reprogram secondary unicast list */ 2449 hw->mac.ops.update_uc_addr_list(hw, &netdev->uc.list); 2450 2451 /* reprogram multicast list */ 2452 addr_count = netdev->mc_count; 2453 if (addr_count) 2454 addr_list = netdev->mc_list->dmi_addr; 2455 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, 2456 ixgbe_addr_list_itr); 2457} 2458 2459static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) 2460{ 2461 int q_idx; 2462 struct ixgbe_q_vector *q_vector; 2463 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2464 2465 /* legacy and MSI only use one vector */ 2466 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 2467 q_vectors = 1; 2468 2469 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 2470 struct napi_struct *napi; 2471 q_vector = adapter->q_vector[q_idx]; 2472 napi = &q_vector->napi; 2473 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 2474 if (!q_vector->rxr_count || !q_vector->txr_count) { 2475 if (q_vector->txr_count == 1) 2476 napi->poll = &ixgbe_clean_txonly; 2477 else if (q_vector->rxr_count == 1) 2478 napi->poll = &ixgbe_clean_rxonly; 2479 } 2480 } 2481 2482 napi_enable(napi); 2483 } 2484} 2485 2486static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) 2487{ 2488 int q_idx; 2489 struct ixgbe_q_vector *q_vector; 2490 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2491 2492 /* legacy and MSI only use one vector */ 2493 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 2494 q_vectors = 1; 2495 2496 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 2497 q_vector = adapter->q_vector[q_idx]; 2498 napi_disable(&q_vector->napi); 2499 } 2500} 2501 2502#ifdef CONFIG_IXGBE_DCB 2503/* 2504 * ixgbe_configure_dcb - Configure DCB hardware 2505 * @adapter: ixgbe adapter struct 2506 * 2507 * This is called by the driver on open to configure the DCB hardware. 2508 * This is also called by the gennetlink interface when reconfiguring 2509 * the DCB state. 2510 */ 2511static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) 2512{ 2513 struct ixgbe_hw *hw = &adapter->hw; 2514 u32 txdctl, vlnctrl; 2515 int i, j; 2516 2517 ixgbe_dcb_check_config(&adapter->dcb_cfg); 2518 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG); 2519 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG); 2520 2521 /* reconfigure the hardware */ 2522 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg); 2523 2524 for (i = 0; i < adapter->num_tx_queues; i++) { 2525 j = adapter->tx_ring[i].reg_idx; 2526 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 2527 /* PThresh workaround for Tx hang with DFP enabled. */ 2528 txdctl |= 32; 2529 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 2530 } 2531 /* Enable VLAN tag insert/strip */ 2532 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2533 if (hw->mac.type == ixgbe_mac_82598EB) { 2534 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE; 2535 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; 2536 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 2537 } else if (hw->mac.type == ixgbe_mac_82599EB) { 2538 vlnctrl |= IXGBE_VLNCTRL_VFE; 2539 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; 2540 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 2541 for (i = 0; i < adapter->num_rx_queues; i++) { 2542 j = adapter->rx_ring[i].reg_idx; 2543 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 2544 vlnctrl |= IXGBE_RXDCTL_VME; 2545 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); 2546 } 2547 } 2548 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); 2549} 2550 2551#endif 2552static void ixgbe_configure(struct ixgbe_adapter *adapter) 2553{ 2554 struct net_device *netdev = adapter->netdev; 2555 struct ixgbe_hw *hw = &adapter->hw; 2556 int i; 2557 2558 ixgbe_set_rx_mode(netdev); 2559 2560 ixgbe_restore_vlan(adapter); 2561#ifdef CONFIG_IXGBE_DCB 2562 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 2563 if (hw->mac.type == ixgbe_mac_82598EB) 2564 netif_set_gso_max_size(netdev, 32768); 2565 else 2566 netif_set_gso_max_size(netdev, 65536); 2567 ixgbe_configure_dcb(adapter); 2568 } else { 2569 netif_set_gso_max_size(netdev, 65536); 2570 } 2571#else 2572 netif_set_gso_max_size(netdev, 65536); 2573#endif 2574 2575#ifdef IXGBE_FCOE 2576 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 2577 ixgbe_configure_fcoe(adapter); 2578 2579#endif /* IXGBE_FCOE */ 2580 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 2581 for (i = 0; i < adapter->num_tx_queues; i++) 2582 adapter->tx_ring[i].atr_sample_rate = 2583 adapter->atr_sample_rate; 2584 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); 2585 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { 2586 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc); 2587 } 2588 2589 ixgbe_configure_tx(adapter); 2590 ixgbe_configure_rx(adapter); 2591 for (i = 0; i < adapter->num_rx_queues; i++) 2592 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i], 2593 (adapter->rx_ring[i].count - 1)); 2594} 2595 2596static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) 2597{ 2598 switch (hw->phy.type) { 2599 case ixgbe_phy_sfp_avago: 2600 case ixgbe_phy_sfp_ftl: 2601 case ixgbe_phy_sfp_intel: 2602 case ixgbe_phy_sfp_unknown: 2603 case ixgbe_phy_tw_tyco: 2604 case ixgbe_phy_tw_unknown: 2605 return true; 2606 default: 2607 return false; 2608 } 2609} 2610 2611/** 2612 * ixgbe_sfp_link_config - set up SFP+ link 2613 * @adapter: pointer to private adapter struct 2614 **/ 2615static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) 2616{ 2617 struct ixgbe_hw *hw = &adapter->hw; 2618 2619 if (hw->phy.multispeed_fiber) { 2620 /* 2621 * In multispeed fiber setups, the device may not have 2622 * had a physical connection when the driver loaded. 2623 * If that's the case, the initial link configuration 2624 * couldn't get the MAC into 10G or 1G mode, so we'll 2625 * never have a link status change interrupt fire. 2626 * We need to try and force an autonegotiation 2627 * session, then bring up link. 2628 */ 2629 hw->mac.ops.setup_sfp(hw); 2630 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) 2631 schedule_work(&adapter->multispeed_fiber_task); 2632 } else { 2633 /* 2634 * Direct Attach Cu and non-multispeed fiber modules 2635 * still need to be configured properly prior to 2636 * attempting link. 2637 */ 2638 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK)) 2639 schedule_work(&adapter->sfp_config_module_task); 2640 } 2641} 2642 2643/** 2644 * ixgbe_non_sfp_link_config - set up non-SFP+ link 2645 * @hw: pointer to private hardware struct 2646 * 2647 * Returns 0 on success, negative on failure 2648 **/ 2649static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) 2650{ 2651 u32 autoneg; 2652 bool negotiation, link_up = false; 2653 u32 ret = IXGBE_ERR_LINK_SETUP; 2654 2655 if (hw->mac.ops.check_link) 2656 ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false); 2657 2658 if (ret) 2659 goto link_cfg_out; 2660 2661 if (hw->mac.ops.get_link_capabilities) 2662 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); 2663 if (ret) 2664 goto link_cfg_out; 2665 2666 if (hw->mac.ops.setup_link) 2667 ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up); 2668link_cfg_out: 2669 return ret; 2670} 2671 2672#define IXGBE_MAX_RX_DESC_POLL 10 2673static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, 2674 int rxr) 2675{ 2676 int j = adapter->rx_ring[rxr].reg_idx; 2677 int k; 2678 2679 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { 2680 if (IXGBE_READ_REG(&adapter->hw, 2681 IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE) 2682 break; 2683 else 2684 msleep(1); 2685 } 2686 if (k >= IXGBE_MAX_RX_DESC_POLL) { 2687 DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d " 2688 "not set within the polling period\n", rxr); 2689 } 2690 ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr], 2691 (adapter->rx_ring[rxr].count - 1)); 2692} 2693 2694static int ixgbe_up_complete(struct ixgbe_adapter *adapter) 2695{ 2696 struct net_device *netdev = adapter->netdev; 2697 struct ixgbe_hw *hw = &adapter->hw; 2698 int i, j = 0; 2699 int num_rx_rings = adapter->num_rx_queues; 2700 int err; 2701 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 2702 u32 txdctl, rxdctl, mhadd; 2703 u32 dmatxctl; 2704 u32 gpie; 2705 2706 ixgbe_get_hw_control(adapter); 2707 2708 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) || 2709 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) { 2710 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 2711 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME | 2712 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD); 2713 } else { 2714 /* MSI only */ 2715 gpie = 0; 2716 } 2717 /* XXX: to interrupt immediately for EICS writes, enable this */ 2718 /* gpie |= IXGBE_GPIE_EIMEN; */ 2719 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2720 } 2721 2722 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 2723 /* 2724 * use EIAM to auto-mask when MSI-X interrupt is asserted 2725 * this saves a register write for every interrupt 2726 */ 2727 switch (hw->mac.type) { 2728 case ixgbe_mac_82598EB: 2729 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 2730 break; 2731 default: 2732 case ixgbe_mac_82599EB: 2733 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 2734 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 2735 break; 2736 } 2737 } else { 2738 /* legacy interrupts, use EIAM to auto-mask when reading EICR, 2739 * specifically only auto mask tx and rx interrupts */ 2740 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 2741 } 2742 2743 /* Enable fan failure interrupt if media type is copper */ 2744 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { 2745 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 2746 gpie |= IXGBE_SDP1_GPIEN; 2747 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2748 } 2749 2750 if (hw->mac.type == ixgbe_mac_82599EB) { 2751 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 2752 gpie |= IXGBE_SDP1_GPIEN; 2753 gpie |= IXGBE_SDP2_GPIEN; 2754 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2755 } 2756 2757#ifdef IXGBE_FCOE 2758 /* adjust max frame to be able to do baby jumbo for FCoE */ 2759 if ((netdev->features & NETIF_F_FCOE_MTU) && 2760 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) 2761 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; 2762 2763#endif /* IXGBE_FCOE */ 2764 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 2765 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { 2766 mhadd &= ~IXGBE_MHADD_MFS_MASK; 2767 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; 2768 2769 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 2770 } 2771 2772 for (i = 0; i < adapter->num_tx_queues; i++) { 2773 j = adapter->tx_ring[i].reg_idx; 2774 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 2775 /* enable WTHRESH=8 descriptors, to encourage burst writeback */ 2776 txdctl |= (8 << 16); 2777 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 2778 } 2779 2780 if (hw->mac.type == ixgbe_mac_82599EB) { 2781 /* DMATXCTL.EN must be set after all Tx queue config is done */ 2782 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2783 dmatxctl |= IXGBE_DMATXCTL_TE; 2784 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 2785 } 2786 for (i = 0; i < adapter->num_tx_queues; i++) { 2787 j = adapter->tx_ring[i].reg_idx; 2788 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 2789 txdctl |= IXGBE_TXDCTL_ENABLE; 2790 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 2791 } 2792 2793 for (i = 0; i < num_rx_rings; i++) { 2794 j = adapter->rx_ring[i].reg_idx; 2795 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 2796 /* enable PTHRESH=32 descriptors (half the internal cache) 2797 * and HTHRESH=0 descriptors (to minimize latency on fetch), 2798 * this also removes a pesky rx_no_buffer_count increment */ 2799 rxdctl |= 0x0020; 2800 rxdctl |= IXGBE_RXDCTL_ENABLE; 2801 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl); 2802 if (hw->mac.type == ixgbe_mac_82599EB) 2803 ixgbe_rx_desc_queue_enable(adapter, i); 2804 } 2805 /* enable all receives */ 2806 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 2807 if (hw->mac.type == ixgbe_mac_82598EB) 2808 rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN); 2809 else 2810 rxdctl |= IXGBE_RXCTRL_RXEN; 2811 hw->mac.ops.enable_rx_dma(hw, rxdctl); 2812 2813 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 2814 ixgbe_configure_msix(adapter); 2815 else 2816 ixgbe_configure_msi_and_legacy(adapter); 2817 2818 clear_bit(__IXGBE_DOWN, &adapter->state); 2819 ixgbe_napi_enable_all(adapter); 2820 2821 /* clear any pending interrupts, may auto mask */ 2822 IXGBE_READ_REG(hw, IXGBE_EICR); 2823 2824 ixgbe_irq_enable(adapter); 2825 2826 /* 2827 * If this adapter has a fan, check to see if we had a failure 2828 * before we enabled the interrupt. 2829 */ 2830 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { 2831 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2832 if (esdp & IXGBE_ESDP_SDP1) 2833 DPRINTK(DRV, CRIT, 2834 "Fan has stopped, replace the adapter\n"); 2835 } 2836 2837 /* 2838 * For hot-pluggable SFP+ devices, a new SFP+ module may have 2839 * arrived before interrupts were enabled but after probe. Such 2840 * devices wouldn't have their type identified yet. We need to 2841 * kick off the SFP+ module setup first, then try to bring up link. 2842 * If we're not hot-pluggable SFP+, we just need to configure link 2843 * and bring it up. 2844 */ 2845 if (hw->phy.type == ixgbe_phy_unknown) { 2846 err = hw->phy.ops.identify(hw); 2847 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 2848 /* 2849 * Take the device down and schedule the sfp tasklet 2850 * which will unregister_netdev and log it. 2851 */ 2852 ixgbe_down(adapter); 2853 schedule_work(&adapter->sfp_config_module_task); 2854 return err; 2855 } 2856 } 2857 2858 if (ixgbe_is_sfp(hw)) { 2859 ixgbe_sfp_link_config(adapter); 2860 } else { 2861 err = ixgbe_non_sfp_link_config(hw); 2862 if (err) 2863 DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err); 2864 } 2865 2866 for (i = 0; i < adapter->num_tx_queues; i++) 2867 set_bit(__IXGBE_FDIR_INIT_DONE, 2868 &(adapter->tx_ring[i].reinit_state)); 2869 2870 /* enable transmits */ 2871 netif_tx_start_all_queues(netdev); 2872 2873 /* bring the link up in the watchdog, this could race with our first 2874 * link up interrupt but shouldn't be a problem */ 2875 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 2876 adapter->link_check_timeout = jiffies; 2877 mod_timer(&adapter->watchdog_timer, jiffies); 2878 return 0; 2879} 2880 2881void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) 2882{ 2883 WARN_ON(in_interrupt()); 2884 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 2885 msleep(1); 2886 ixgbe_down(adapter); 2887 ixgbe_up(adapter); 2888 clear_bit(__IXGBE_RESETTING, &adapter->state); 2889} 2890 2891int ixgbe_up(struct ixgbe_adapter *adapter) 2892{ 2893 /* hardware has been reset, we need to reload some things */ 2894 ixgbe_configure(adapter); 2895 2896 return ixgbe_up_complete(adapter); 2897} 2898 2899void ixgbe_reset(struct ixgbe_adapter *adapter) 2900{ 2901 struct ixgbe_hw *hw = &adapter->hw; 2902 int err; 2903 2904 err = hw->mac.ops.init_hw(hw); 2905 switch (err) { 2906 case 0: 2907 case IXGBE_ERR_SFP_NOT_PRESENT: 2908 break; 2909 case IXGBE_ERR_MASTER_REQUESTS_PENDING: 2910 dev_err(&adapter->pdev->dev, "master disable timed out\n"); 2911 break; 2912 case IXGBE_ERR_EEPROM_VERSION: 2913 /* We are running on a pre-production device, log a warning */ 2914 dev_warn(&adapter->pdev->dev, "This device is a pre-production " 2915 "adapter/LOM. Please be aware there may be issues " 2916 "associated with your hardware. If you are " 2917 "experiencing problems please contact your Intel or " 2918 "hardware representative who provided you with this " 2919 "hardware.\n"); 2920 break; 2921 default: 2922 dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err); 2923 } 2924 2925 /* reprogram the RAR[0] in case user changed it. */ 2926 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 2927} 2928 2929/** 2930 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue 2931 * @adapter: board private structure 2932 * @rx_ring: ring to free buffers from 2933 **/ 2934static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, 2935 struct ixgbe_ring *rx_ring) 2936{ 2937 struct pci_dev *pdev = adapter->pdev; 2938 unsigned long size; 2939 unsigned int i; 2940 2941 /* Free all the Rx ring sk_buffs */ 2942 2943 for (i = 0; i < rx_ring->count; i++) { 2944 struct ixgbe_rx_buffer *rx_buffer_info; 2945 2946 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 2947 if (rx_buffer_info->dma) { 2948 pci_unmap_single(pdev, rx_buffer_info->dma, 2949 rx_ring->rx_buf_len, 2950 PCI_DMA_FROMDEVICE); 2951 rx_buffer_info->dma = 0; 2952 } 2953 if (rx_buffer_info->skb) { 2954 struct sk_buff *skb = rx_buffer_info->skb; 2955 rx_buffer_info->skb = NULL; 2956 do { 2957 struct sk_buff *this = skb; 2958 skb = skb->prev; 2959 dev_kfree_skb(this); 2960 } while (skb); 2961 } 2962 if (!rx_buffer_info->page) 2963 continue; 2964 if (rx_buffer_info->page_dma) { 2965 pci_unmap_page(pdev, rx_buffer_info->page_dma, 2966 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); 2967 rx_buffer_info->page_dma = 0; 2968 } 2969 put_page(rx_buffer_info->page); 2970 rx_buffer_info->page = NULL; 2971 rx_buffer_info->page_offset = 0; 2972 } 2973 2974 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 2975 memset(rx_ring->rx_buffer_info, 0, size); 2976 2977 /* Zero out the descriptor ring */ 2978 memset(rx_ring->desc, 0, rx_ring->size); 2979 2980 rx_ring->next_to_clean = 0; 2981 rx_ring->next_to_use = 0; 2982 2983 if (rx_ring->head) 2984 writel(0, adapter->hw.hw_addr + rx_ring->head); 2985 if (rx_ring->tail) 2986 writel(0, adapter->hw.hw_addr + rx_ring->tail); 2987} 2988 2989/** 2990 * ixgbe_clean_tx_ring - Free Tx Buffers 2991 * @adapter: board private structure 2992 * @tx_ring: ring to be cleaned 2993 **/ 2994static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, 2995 struct ixgbe_ring *tx_ring) 2996{ 2997 struct ixgbe_tx_buffer *tx_buffer_info; 2998 unsigned long size; 2999 unsigned int i; 3000 3001 /* Free all the Tx ring sk_buffs */ 3002 3003 for (i = 0; i < tx_ring->count; i++) { 3004 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 3005 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 3006 } 3007 3008 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 3009 memset(tx_ring->tx_buffer_info, 0, size); 3010 3011 /* Zero out the descriptor ring */ 3012 memset(tx_ring->desc, 0, tx_ring->size); 3013 3014 tx_ring->next_to_use = 0; 3015 tx_ring->next_to_clean = 0; 3016 3017 if (tx_ring->head) 3018 writel(0, adapter->hw.hw_addr + tx_ring->head); 3019 if (tx_ring->tail) 3020 writel(0, adapter->hw.hw_addr + tx_ring->tail); 3021} 3022 3023/** 3024 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues 3025 * @adapter: board private structure 3026 **/ 3027static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) 3028{ 3029 int i; 3030 3031 for (i = 0; i < adapter->num_rx_queues; i++) 3032 ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]); 3033} 3034 3035/** 3036 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues 3037 * @adapter: board private structure 3038 **/ 3039static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) 3040{ 3041 int i; 3042 3043 for (i = 0; i < adapter->num_tx_queues; i++) 3044 ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]); 3045} 3046 3047void ixgbe_down(struct ixgbe_adapter *adapter) 3048{ 3049 struct net_device *netdev = adapter->netdev; 3050 struct ixgbe_hw *hw = &adapter->hw; 3051 u32 rxctrl; 3052 u32 txdctl; 3053 int i, j; 3054 3055 /* signal that we are down to the interrupt handler */ 3056 set_bit(__IXGBE_DOWN, &adapter->state); 3057 3058 /* disable receives */ 3059 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 3060 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 3061 3062 netif_tx_disable(netdev); 3063 3064 IXGBE_WRITE_FLUSH(hw); 3065 msleep(10); 3066 3067 netif_tx_stop_all_queues(netdev); 3068 3069 ixgbe_irq_disable(adapter); 3070 3071 ixgbe_napi_disable_all(adapter); 3072 3073 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 3074 del_timer_sync(&adapter->sfp_timer); 3075 del_timer_sync(&adapter->watchdog_timer); 3076 cancel_work_sync(&adapter->watchdog_task); 3077 3078 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 3079 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 3080 cancel_work_sync(&adapter->fdir_reinit_task); 3081 3082 /* disable transmits in the hardware now that interrupts are off */ 3083 for (i = 0; i < adapter->num_tx_queues; i++) { 3084 j = adapter->tx_ring[i].reg_idx; 3085 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 3086 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), 3087 (txdctl & ~IXGBE_TXDCTL_ENABLE)); 3088 } 3089 /* Disable the Tx DMA engine on 82599 */ 3090 if (hw->mac.type == ixgbe_mac_82599EB) 3091 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, 3092 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & 3093 ~IXGBE_DMATXCTL_TE)); 3094 3095 netif_carrier_off(netdev); 3096 3097 if (!pci_channel_offline(adapter->pdev)) 3098 ixgbe_reset(adapter); 3099 ixgbe_clean_all_tx_rings(adapter); 3100 ixgbe_clean_all_rx_rings(adapter); 3101 3102#ifdef CONFIG_IXGBE_DCA 3103 /* since we reset the hardware DCA settings were cleared */ 3104 ixgbe_setup_dca(adapter); 3105#endif 3106} 3107 3108/** 3109 * ixgbe_poll - NAPI Rx polling callback 3110 * @napi: structure for representing this polling device 3111 * @budget: how many packets driver is allowed to clean 3112 * 3113 * This function is used for legacy and MSI, NAPI mode 3114 **/ 3115static int ixgbe_poll(struct napi_struct *napi, int budget) 3116{ 3117 struct ixgbe_q_vector *q_vector = 3118 container_of(napi, struct ixgbe_q_vector, napi); 3119 struct ixgbe_adapter *adapter = q_vector->adapter; 3120 int tx_clean_complete, work_done = 0; 3121 3122#ifdef CONFIG_IXGBE_DCA 3123 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 3124 ixgbe_update_tx_dca(adapter, adapter->tx_ring); 3125 ixgbe_update_rx_dca(adapter, adapter->rx_ring); 3126 } 3127#endif 3128 3129 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring); 3130 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget); 3131 3132 if (!tx_clean_complete) 3133 work_done = budget; 3134 3135 /* If budget not fully consumed, exit the polling mode */ 3136 if (work_done < budget) { 3137 napi_complete(napi); 3138 if (adapter->rx_itr_setting & 1) 3139 ixgbe_set_itr(adapter); 3140 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 3141 ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE); 3142 } 3143 return work_done; 3144} 3145 3146/** 3147 * ixgbe_tx_timeout - Respond to a Tx Hang 3148 * @netdev: network interface device structure 3149 **/ 3150static void ixgbe_tx_timeout(struct net_device *netdev) 3151{ 3152 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3153 3154 /* Do the reset outside of interrupt context */ 3155 schedule_work(&adapter->reset_task); 3156} 3157 3158static void ixgbe_reset_task(struct work_struct *work) 3159{ 3160 struct ixgbe_adapter *adapter; 3161 adapter = container_of(work, struct ixgbe_adapter, reset_task); 3162 3163 /* If we're already down or resetting, just bail */ 3164 if (test_bit(__IXGBE_DOWN, &adapter->state) || 3165 test_bit(__IXGBE_RESETTING, &adapter->state)) 3166 return; 3167 3168 adapter->tx_timeout_count++; 3169 3170 ixgbe_reinit_locked(adapter); 3171} 3172 3173#ifdef CONFIG_IXGBE_DCB 3174static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) 3175{ 3176 bool ret = false; 3177 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB]; 3178 3179 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) 3180 return ret; 3181 3182 f->mask = 0x7 << 3; 3183 adapter->num_rx_queues = f->indices; 3184 adapter->num_tx_queues = f->indices; 3185 ret = true; 3186 3187 return ret; 3188} 3189#endif 3190 3191/** 3192 * ixgbe_set_rss_queues: Allocate queues for RSS 3193 * @adapter: board private structure to initialize 3194 * 3195 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try 3196 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. 3197 * 3198 **/ 3199static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) 3200{ 3201 bool ret = false; 3202 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS]; 3203 3204 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 3205 f->mask = 0xF; 3206 adapter->num_rx_queues = f->indices; 3207 adapter->num_tx_queues = f->indices; 3208 ret = true; 3209 } else { 3210 ret = false; 3211 } 3212 3213 return ret; 3214} 3215 3216/** 3217 * ixgbe_set_fdir_queues: Allocate queues for Flow Director 3218 * @adapter: board private structure to initialize 3219 * 3220 * Flow Director is an advanced Rx filter, attempting to get Rx flows back 3221 * to the original CPU that initiated the Tx session. This runs in addition 3222 * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the 3223 * Rx load across CPUs using RSS. 3224 * 3225 **/ 3226static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) 3227{ 3228 bool ret = false; 3229 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; 3230 3231 f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices); 3232 f_fdir->mask = 0; 3233 3234 /* Flow Director must have RSS enabled */ 3235 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED && 3236 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 3237 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) { 3238 adapter->num_tx_queues = f_fdir->indices; 3239 adapter->num_rx_queues = f_fdir->indices; 3240 ret = true; 3241 } else { 3242 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 3243 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 3244 } 3245 return ret; 3246} 3247 3248#ifdef IXGBE_FCOE 3249/** 3250 * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) 3251 * @adapter: board private structure to initialize 3252 * 3253 * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. 3254 * The ring feature mask is not used as a mask for FCoE, as it can take any 8 3255 * rx queues out of the max number of rx queues, instead, it is used as the 3256 * index of the first rx queue used by FCoE. 3257 * 3258 **/ 3259static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) 3260{ 3261 bool ret = false; 3262 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; 3263 3264 f->indices = min((int)num_online_cpus(), f->indices); 3265 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 3266 adapter->num_rx_queues = 1; 3267 adapter->num_tx_queues = 1; 3268#ifdef CONFIG_IXGBE_DCB 3269 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 3270 DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n"); 3271 ixgbe_set_dcb_queues(adapter); 3272 } 3273#endif 3274 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 3275 DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n"); 3276 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 3277 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 3278 ixgbe_set_fdir_queues(adapter); 3279 else 3280 ixgbe_set_rss_queues(adapter); 3281 } 3282 /* adding FCoE rx rings to the end */ 3283 f->mask = adapter->num_rx_queues; 3284 adapter->num_rx_queues += f->indices; 3285 adapter->num_tx_queues += f->indices; 3286 3287 ret = true; 3288 } 3289 3290 return ret; 3291} 3292 3293#endif /* IXGBE_FCOE */ 3294/* 3295 * ixgbe_set_num_queues: Allocate queues for device, feature dependant 3296 * @adapter: board private structure to initialize 3297 * 3298 * This is the top level queue allocation routine. The order here is very 3299 * important, starting with the "most" number of features turned on at once, 3300 * and ending with the smallest set of features. This way large combinations 3301 * can be allocated if they're turned on, and smaller combinations are the 3302 * fallthrough conditions. 3303 * 3304 **/ 3305static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 3306{ 3307#ifdef IXGBE_FCOE 3308 if (ixgbe_set_fcoe_queues(adapter)) 3309 goto done; 3310 3311#endif /* IXGBE_FCOE */ 3312#ifdef CONFIG_IXGBE_DCB 3313 if (ixgbe_set_dcb_queues(adapter)) 3314 goto done; 3315 3316#endif 3317 if (ixgbe_set_fdir_queues(adapter)) 3318 goto done; 3319 3320 if (ixgbe_set_rss_queues(adapter)) 3321 goto done; 3322 3323 /* fallback to base case */ 3324 adapter->num_rx_queues = 1; 3325 adapter->num_tx_queues = 1; 3326 3327done: 3328 /* Notify the stack of the (possibly) reduced Tx Queue count. */ 3329 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; 3330} 3331 3332static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, 3333 int vectors) 3334{ 3335 int err, vector_threshold; 3336 3337 /* We'll want at least 3 (vector_threshold): 3338 * 1) TxQ[0] Cleanup 3339 * 2) RxQ[0] Cleanup 3340 * 3) Other (Link Status Change, etc.) 3341 * 4) TCP Timer (optional) 3342 */ 3343 vector_threshold = MIN_MSIX_COUNT; 3344 3345 /* The more we get, the more we will assign to Tx/Rx Cleanup 3346 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 3347 * Right now, we simply care about how many we'll get; we'll 3348 * set them up later while requesting irq's. 3349 */ 3350 while (vectors >= vector_threshold) { 3351 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 3352 vectors); 3353 if (!err) /* Success in acquiring all requested vectors. */ 3354 break; 3355 else if (err < 0) 3356 vectors = 0; /* Nasty failure, quit now */ 3357 else /* err == number of vectors we should try again with */ 3358 vectors = err; 3359 } 3360 3361 if (vectors < vector_threshold) { 3362 /* Can't allocate enough MSI-X interrupts? Oh well. 3363 * This just means we'll go with either a single MSI 3364 * vector or fall back to legacy interrupts. 3365 */ 3366 DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n"); 3367 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 3368 kfree(adapter->msix_entries); 3369 adapter->msix_entries = NULL; 3370 } else { 3371 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ 3372 /* 3373 * Adjust for only the vectors we'll use, which is minimum 3374 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of 3375 * vectors we were allocated. 3376 */ 3377 adapter->num_msix_vectors = min(vectors, 3378 adapter->max_msix_q_vectors + NON_Q_VECTORS); 3379 } 3380} 3381 3382/** 3383 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS 3384 * @adapter: board private structure to initialize 3385 * 3386 * Cache the descriptor ring offsets for RSS to the assigned rings. 3387 * 3388 **/ 3389static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) 3390{ 3391 int i; 3392 bool ret = false; 3393 3394 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 3395 for (i = 0; i < adapter->num_rx_queues; i++) 3396 adapter->rx_ring[i].reg_idx = i; 3397 for (i = 0; i < adapter->num_tx_queues; i++) 3398 adapter->tx_ring[i].reg_idx = i; 3399 ret = true; 3400 } else { 3401 ret = false; 3402 } 3403 3404 return ret; 3405} 3406 3407#ifdef CONFIG_IXGBE_DCB 3408/** 3409 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB 3410 * @adapter: board private structure to initialize 3411 * 3412 * Cache the descriptor ring offsets for DCB to the assigned rings. 3413 * 3414 **/ 3415static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) 3416{ 3417 int i; 3418 bool ret = false; 3419 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 3420 3421 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 3422 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 3423 /* the number of queues is assumed to be symmetric */ 3424 for (i = 0; i < dcb_i; i++) { 3425 adapter->rx_ring[i].reg_idx = i << 3; 3426 adapter->tx_ring[i].reg_idx = i << 2; 3427 } 3428 ret = true; 3429 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 3430 if (dcb_i == 8) { 3431 /* 3432 * Tx TC0 starts at: descriptor queue 0 3433 * Tx TC1 starts at: descriptor queue 32 3434 * Tx TC2 starts at: descriptor queue 64 3435 * Tx TC3 starts at: descriptor queue 80 3436 * Tx TC4 starts at: descriptor queue 96 3437 * Tx TC5 starts at: descriptor queue 104 3438 * Tx TC6 starts at: descriptor queue 112 3439 * Tx TC7 starts at: descriptor queue 120 3440 * 3441 * Rx TC0-TC7 are offset by 16 queues each 3442 */ 3443 for (i = 0; i < 3; i++) { 3444 adapter->tx_ring[i].reg_idx = i << 5; 3445 adapter->rx_ring[i].reg_idx = i << 4; 3446 } 3447 for ( ; i < 5; i++) { 3448 adapter->tx_ring[i].reg_idx = 3449 ((i + 2) << 4); 3450 adapter->rx_ring[i].reg_idx = i << 4; 3451 } 3452 for ( ; i < dcb_i; i++) { 3453 adapter->tx_ring[i].reg_idx = 3454 ((i + 8) << 3); 3455 adapter->rx_ring[i].reg_idx = i << 4; 3456 } 3457 3458 ret = true; 3459 } else if (dcb_i == 4) { 3460 /* 3461 * Tx TC0 starts at: descriptor queue 0 3462 * Tx TC1 starts at: descriptor queue 64 3463 * Tx TC2 starts at: descriptor queue 96 3464 * Tx TC3 starts at: descriptor queue 112 3465 * 3466 * Rx TC0-TC3 are offset by 32 queues each 3467 */ 3468 adapter->tx_ring[0].reg_idx = 0; 3469 adapter->tx_ring[1].reg_idx = 64; 3470 adapter->tx_ring[2].reg_idx = 96; 3471 adapter->tx_ring[3].reg_idx = 112; 3472 for (i = 0 ; i < dcb_i; i++) 3473 adapter->rx_ring[i].reg_idx = i << 5; 3474 3475 ret = true; 3476 } else { 3477 ret = false; 3478 } 3479 } else { 3480 ret = false; 3481 } 3482 } else { 3483 ret = false; 3484 } 3485 3486 return ret; 3487} 3488#endif 3489 3490/** 3491 * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director 3492 * @adapter: board private structure to initialize 3493 * 3494 * Cache the descriptor ring offsets for Flow Director to the assigned rings. 3495 * 3496 **/ 3497static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) 3498{ 3499 int i; 3500 bool ret = false; 3501 3502 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED && 3503 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 3504 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) { 3505 for (i = 0; i < adapter->num_rx_queues; i++) 3506 adapter->rx_ring[i].reg_idx = i; 3507 for (i = 0; i < adapter->num_tx_queues; i++) 3508 adapter->tx_ring[i].reg_idx = i; 3509 ret = true; 3510 } 3511 3512 return ret; 3513} 3514 3515#ifdef IXGBE_FCOE 3516/** 3517 * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE 3518 * @adapter: board private structure to initialize 3519 * 3520 * Cache the descriptor ring offsets for FCoE mode to the assigned rings. 3521 * 3522 */ 3523static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) 3524{ 3525 int i, fcoe_rx_i = 0, fcoe_tx_i = 0; 3526 bool ret = false; 3527 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; 3528 3529 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 3530#ifdef CONFIG_IXGBE_DCB 3531 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 3532 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 3533 3534 ixgbe_cache_ring_dcb(adapter); 3535 /* find out queues in TC for FCoE */ 3536 fcoe_rx_i = adapter->rx_ring[fcoe->tc].reg_idx + 1; 3537 fcoe_tx_i = adapter->tx_ring[fcoe->tc].reg_idx + 1; 3538 /* 3539 * In 82599, the number of Tx queues for each traffic 3540 * class for both 8-TC and 4-TC modes are: 3541 * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7 3542 * 8 TCs: 32 32 16 16 8 8 8 8 3543 * 4 TCs: 64 64 32 32 3544 * We have max 8 queues for FCoE, where 8 the is 3545 * FCoE redirection table size. If TC for FCoE is 3546 * less than or equal to TC3, we have enough queues 3547 * to add max of 8 queues for FCoE, so we start FCoE 3548 * tx descriptor from the next one, i.e., reg_idx + 1. 3549 * If TC for FCoE is above TC3, implying 8 TC mode, 3550 * and we need 8 for FCoE, we have to take all queues 3551 * in that traffic class for FCoE. 3552 */ 3553 if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3)) 3554 fcoe_tx_i--; 3555 } 3556#endif /* CONFIG_IXGBE_DCB */ 3557 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 3558 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 3559 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 3560 ixgbe_cache_ring_fdir(adapter); 3561 else 3562 ixgbe_cache_ring_rss(adapter); 3563 3564 fcoe_rx_i = f->mask; 3565 fcoe_tx_i = f->mask; 3566 } 3567 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { 3568 adapter->rx_ring[f->mask + i].reg_idx = fcoe_rx_i; 3569 adapter->tx_ring[f->mask + i].reg_idx = fcoe_tx_i; 3570 } 3571 ret = true; 3572 } 3573 return ret; 3574} 3575 3576#endif /* IXGBE_FCOE */ 3577/** 3578 * ixgbe_cache_ring_register - Descriptor ring to register mapping 3579 * @adapter: board private structure to initialize 3580 * 3581 * Once we know the feature-set enabled for the device, we'll cache 3582 * the register offset the descriptor ring is assigned to. 3583 * 3584 * Note, the order the various feature calls is important. It must start with 3585 * the "most" features enabled at the same time, then trickle down to the 3586 * least amount of features turned on at once. 3587 **/ 3588static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) 3589{ 3590 /* start with default case */ 3591 adapter->rx_ring[0].reg_idx = 0; 3592 adapter->tx_ring[0].reg_idx = 0; 3593 3594#ifdef IXGBE_FCOE 3595 if (ixgbe_cache_ring_fcoe(adapter)) 3596 return; 3597 3598#endif /* IXGBE_FCOE */ 3599#ifdef CONFIG_IXGBE_DCB 3600 if (ixgbe_cache_ring_dcb(adapter)) 3601 return; 3602 3603#endif 3604 if (ixgbe_cache_ring_fdir(adapter)) 3605 return; 3606 3607 if (ixgbe_cache_ring_rss(adapter)) 3608 return; 3609} 3610 3611/** 3612 * ixgbe_alloc_queues - Allocate memory for all rings 3613 * @adapter: board private structure to initialize 3614 * 3615 * We allocate one ring per queue at run-time since we don't know the 3616 * number of queues at compile-time. The polling_netdev array is 3617 * intended for Multiqueue, but should work fine with a single queue. 3618 **/ 3619static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) 3620{ 3621 int i; 3622 3623 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 3624 sizeof(struct ixgbe_ring), GFP_KERNEL); 3625 if (!adapter->tx_ring) 3626 goto err_tx_ring_allocation; 3627 3628 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 3629 sizeof(struct ixgbe_ring), GFP_KERNEL); 3630 if (!adapter->rx_ring) 3631 goto err_rx_ring_allocation; 3632 3633 for (i = 0; i < adapter->num_tx_queues; i++) { 3634 adapter->tx_ring[i].count = adapter->tx_ring_count; 3635 adapter->tx_ring[i].queue_index = i; 3636 } 3637 3638 for (i = 0; i < adapter->num_rx_queues; i++) { 3639 adapter->rx_ring[i].count = adapter->rx_ring_count; 3640 adapter->rx_ring[i].queue_index = i; 3641 } 3642 3643 ixgbe_cache_ring_register(adapter); 3644 3645 return 0; 3646 3647err_rx_ring_allocation: 3648 kfree(adapter->tx_ring); 3649err_tx_ring_allocation: 3650 return -ENOMEM; 3651} 3652 3653/** 3654 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported 3655 * @adapter: board private structure to initialize 3656 * 3657 * Attempt to configure the interrupts using the best available 3658 * capabilities of the hardware and the kernel. 3659 **/ 3660static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) 3661{ 3662 struct ixgbe_hw *hw = &adapter->hw; 3663 int err = 0; 3664 int vector, v_budget; 3665 3666 /* 3667 * It's easy to be greedy for MSI-X vectors, but it really 3668 * doesn't do us much good if we have a lot more vectors 3669 * than CPU's. So let's be conservative and only ask for 3670 * (roughly) the same number of vectors as there are CPU's. 3671 */ 3672 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, 3673 (int)num_online_cpus()) + NON_Q_VECTORS; 3674 3675 /* 3676 * At the same time, hardware can only support a maximum of 3677 * hw.mac->max_msix_vectors vectors. With features 3678 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx 3679 * descriptor queues supported by our device. Thus, we cap it off in 3680 * those rare cases where the cpu count also exceeds our vector limit. 3681 */ 3682 v_budget = min(v_budget, (int)hw->mac.max_msix_vectors); 3683 3684 /* A failure in MSI-X entry allocation isn't fatal, but it does 3685 * mean we disable MSI-X capabilities of the adapter. */ 3686 adapter->msix_entries = kcalloc(v_budget, 3687 sizeof(struct msix_entry), GFP_KERNEL); 3688 if (adapter->msix_entries) { 3689 for (vector = 0; vector < v_budget; vector++) 3690 adapter->msix_entries[vector].entry = vector; 3691 3692 ixgbe_acquire_msix_vectors(adapter, v_budget); 3693 3694 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 3695 goto out; 3696 } 3697 3698 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 3699 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 3700 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 3701 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 3702 adapter->atr_sample_rate = 0; 3703 ixgbe_set_num_queues(adapter); 3704 3705 err = pci_enable_msi(adapter->pdev); 3706 if (!err) { 3707 adapter->flags |= IXGBE_FLAG_MSI_ENABLED; 3708 } else { 3709 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, " 3710 "falling back to legacy. Error: %d\n", err); 3711 /* reset err */ 3712 err = 0; 3713 } 3714 3715out: 3716 return err; 3717} 3718 3719/** 3720 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors 3721 * @adapter: board private structure to initialize 3722 * 3723 * We allocate one q_vector per queue interrupt. If allocation fails we 3724 * return -ENOMEM. 3725 **/ 3726static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) 3727{ 3728 int q_idx, num_q_vectors; 3729 struct ixgbe_q_vector *q_vector; 3730 int napi_vectors; 3731 int (*poll)(struct napi_struct *, int); 3732 3733 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3734 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 3735 napi_vectors = adapter->num_rx_queues; 3736 poll = &ixgbe_clean_rxtx_many; 3737 } else { 3738 num_q_vectors = 1; 3739 napi_vectors = 1; 3740 poll = &ixgbe_poll; 3741 } 3742 3743 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 3744 q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL); 3745 if (!q_vector) 3746 goto err_out; 3747 q_vector->adapter = adapter; 3748 if (q_vector->txr_count && !q_vector->rxr_count) 3749 q_vector->eitr = adapter->tx_eitr_param; 3750 else 3751 q_vector->eitr = adapter->rx_eitr_param; 3752 q_vector->v_idx = q_idx; 3753 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64); 3754 adapter->q_vector[q_idx] = q_vector; 3755 } 3756 3757 return 0; 3758 3759err_out: 3760 while (q_idx) { 3761 q_idx--; 3762 q_vector = adapter->q_vector[q_idx]; 3763 netif_napi_del(&q_vector->napi); 3764 kfree(q_vector); 3765 adapter->q_vector[q_idx] = NULL; 3766 } 3767 return -ENOMEM; 3768} 3769 3770/** 3771 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors 3772 * @adapter: board private structure to initialize 3773 * 3774 * This function frees the memory allocated to the q_vectors. In addition if 3775 * NAPI is enabled it will delete any references to the NAPI struct prior 3776 * to freeing the q_vector. 3777 **/ 3778static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) 3779{ 3780 int q_idx, num_q_vectors; 3781 3782 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 3783 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 3784 else 3785 num_q_vectors = 1; 3786 3787 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 3788 struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx]; 3789 adapter->q_vector[q_idx] = NULL; 3790 netif_napi_del(&q_vector->napi); 3791 kfree(q_vector); 3792 } 3793} 3794 3795static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) 3796{ 3797 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3798 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 3799 pci_disable_msix(adapter->pdev); 3800 kfree(adapter->msix_entries); 3801 adapter->msix_entries = NULL; 3802 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 3803 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; 3804 pci_disable_msi(adapter->pdev); 3805 } 3806 return; 3807} 3808 3809/** 3810 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme 3811 * @adapter: board private structure to initialize 3812 * 3813 * We determine which interrupt scheme to use based on... 3814 * - Kernel support (MSI, MSI-X) 3815 * - which can be user-defined (via MODULE_PARAM) 3816 * - Hardware queue count (num_*_queues) 3817 * - defined by miscellaneous hardware support/features (RSS, etc.) 3818 **/ 3819int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) 3820{ 3821 int err; 3822 3823 /* Number of supported queues */ 3824 ixgbe_set_num_queues(adapter); 3825 3826 err = ixgbe_set_interrupt_capability(adapter); 3827 if (err) { 3828 DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n"); 3829 goto err_set_interrupt; 3830 } 3831 3832 err = ixgbe_alloc_q_vectors(adapter); 3833 if (err) { 3834 DPRINTK(PROBE, ERR, "Unable to allocate memory for queue " 3835 "vectors\n"); 3836 goto err_alloc_q_vectors; 3837 } 3838 3839 err = ixgbe_alloc_queues(adapter); 3840 if (err) { 3841 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); 3842 goto err_alloc_queues; 3843 } 3844 3845 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, " 3846 "Tx Queue count = %u\n", 3847 (adapter->num_rx_queues > 1) ? "Enabled" : 3848 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); 3849 3850 set_bit(__IXGBE_DOWN, &adapter->state); 3851 3852 return 0; 3853 3854err_alloc_queues: 3855 ixgbe_free_q_vectors(adapter); 3856err_alloc_q_vectors: 3857 ixgbe_reset_interrupt_capability(adapter); 3858err_set_interrupt: 3859 return err; 3860} 3861 3862/** 3863 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings 3864 * @adapter: board private structure to clear interrupt scheme on 3865 * 3866 * We go through and clear interrupt specific resources and reset the structure 3867 * to pre-load conditions 3868 **/ 3869void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) 3870{ 3871 kfree(adapter->tx_ring); 3872 kfree(adapter->rx_ring); 3873 adapter->tx_ring = NULL; 3874 adapter->rx_ring = NULL; 3875 3876 ixgbe_free_q_vectors(adapter); 3877 ixgbe_reset_interrupt_capability(adapter); 3878} 3879 3880/** 3881 * ixgbe_sfp_timer - worker thread to find a missing module 3882 * @data: pointer to our adapter struct 3883 **/ 3884static void ixgbe_sfp_timer(unsigned long data) 3885{ 3886 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; 3887 3888 /* 3889 * Do the sfp_timer outside of interrupt context due to the 3890 * delays that sfp+ detection requires 3891 */ 3892 schedule_work(&adapter->sfp_task); 3893} 3894 3895/** 3896 * ixgbe_sfp_task - worker thread to find a missing module 3897 * @work: pointer to work_struct containing our data 3898 **/ 3899static void ixgbe_sfp_task(struct work_struct *work) 3900{ 3901 struct ixgbe_adapter *adapter = container_of(work, 3902 struct ixgbe_adapter, 3903 sfp_task); 3904 struct ixgbe_hw *hw = &adapter->hw; 3905 3906 if ((hw->phy.type == ixgbe_phy_nl) && 3907 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { 3908 s32 ret = hw->phy.ops.identify_sfp(hw); 3909 if (ret == IXGBE_ERR_SFP_NOT_PRESENT) 3910 goto reschedule; 3911 ret = hw->phy.ops.reset(hw); 3912 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3913 dev_err(&adapter->pdev->dev, "failed to initialize " 3914 "because an unsupported SFP+ module type " 3915 "was detected.\n" 3916 "Reload the driver after installing a " 3917 "supported module.\n"); 3918 unregister_netdev(adapter->netdev); 3919 } else { 3920 DPRINTK(PROBE, INFO, "detected SFP+: %d\n", 3921 hw->phy.sfp_type); 3922 } 3923 /* don't need this routine any more */ 3924 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 3925 } 3926 return; 3927reschedule: 3928 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state)) 3929 mod_timer(&adapter->sfp_timer, 3930 round_jiffies(jiffies + (2 * HZ))); 3931} 3932 3933/** 3934 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) 3935 * @adapter: board private structure to initialize 3936 * 3937 * ixgbe_sw_init initializes the Adapter private data structure. 3938 * Fields are initialized based on PCI device information and 3939 * OS network device settings (MTU size). 3940 **/ 3941static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) 3942{ 3943 struct ixgbe_hw *hw = &adapter->hw; 3944 struct pci_dev *pdev = adapter->pdev; 3945 unsigned int rss; 3946#ifdef CONFIG_IXGBE_DCB 3947 int j; 3948 struct tc_configuration *tc; 3949#endif 3950 3951 /* PCI config space info */ 3952 3953 hw->vendor_id = pdev->vendor; 3954 hw->device_id = pdev->device; 3955 hw->revision_id = pdev->revision; 3956 hw->subsystem_vendor_id = pdev->subsystem_vendor; 3957 hw->subsystem_device_id = pdev->subsystem_device; 3958 3959 /* Set capability flags */ 3960 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); 3961 adapter->ring_feature[RING_F_RSS].indices = rss; 3962 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 3963 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; 3964 if (hw->mac.type == ixgbe_mac_82598EB) { 3965 if (hw->device_id == IXGBE_DEV_ID_82598AT) 3966 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 3967 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; 3968 } else if (hw->mac.type == ixgbe_mac_82599EB) { 3969 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; 3970 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; 3971 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 3972 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 3973 adapter->ring_feature[RING_F_FDIR].indices = 3974 IXGBE_MAX_FDIR_INDICES; 3975 adapter->atr_sample_rate = 20; 3976 adapter->fdir_pballoc = 0; 3977#ifdef IXGBE_FCOE 3978 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; 3979 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 3980 adapter->ring_feature[RING_F_FCOE].indices = 0; 3981#ifdef CONFIG_IXGBE_DCB 3982 /* Default traffic class to use for FCoE */ 3983 adapter->fcoe.tc = IXGBE_FCOE_DEFTC; 3984#endif 3985#endif /* IXGBE_FCOE */ 3986 } 3987 3988#ifdef CONFIG_IXGBE_DCB 3989 /* Configure DCB traffic classes */ 3990 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { 3991 tc = &adapter->dcb_cfg.tc_config[j]; 3992 tc->path[DCB_TX_CONFIG].bwg_id = 0; 3993 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); 3994 tc->path[DCB_RX_CONFIG].bwg_id = 0; 3995 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); 3996 tc->dcb_pfc = pfc_disabled; 3997 } 3998 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; 3999 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; 4000 adapter->dcb_cfg.rx_pba_cfg = pba_equal; 4001 adapter->dcb_cfg.pfc_mode_enable = false; 4002 adapter->dcb_cfg.round_robin_enable = false; 4003 adapter->dcb_set_bitmap = 0x00; 4004 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, 4005 adapter->ring_feature[RING_F_DCB].indices); 4006 4007#endif 4008 4009 /* default flow control settings */ 4010 hw->fc.requested_mode = ixgbe_fc_full; 4011 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ 4012#ifdef CONFIG_DCB 4013 adapter->last_lfc_mode = hw->fc.current_mode; 4014#endif 4015 hw->fc.high_water = IXGBE_DEFAULT_FCRTH; 4016 hw->fc.low_water = IXGBE_DEFAULT_FCRTL; 4017 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; 4018 hw->fc.send_xon = true; 4019 hw->fc.disable_fc_autoneg = false; 4020 4021 /* enable itr by default in dynamic mode */ 4022 adapter->rx_itr_setting = 1; 4023 adapter->rx_eitr_param = 20000; 4024 adapter->tx_itr_setting = 1; 4025 adapter->tx_eitr_param = 10000; 4026 4027 /* set defaults for eitr in MegaBytes */ 4028 adapter->eitr_low = 10; 4029 adapter->eitr_high = 20; 4030 4031 /* set default ring sizes */ 4032 adapter->tx_ring_count = IXGBE_DEFAULT_TXD; 4033 adapter->rx_ring_count = IXGBE_DEFAULT_RXD; 4034 4035 /* initialize eeprom parameters */ 4036 if (ixgbe_init_eeprom_params_generic(hw)) { 4037 dev_err(&pdev->dev, "EEPROM initialization failed\n"); 4038 return -EIO; 4039 } 4040 4041 /* enable rx csum by default */ 4042 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; 4043 4044 set_bit(__IXGBE_DOWN, &adapter->state); 4045 4046 return 0; 4047} 4048 4049/** 4050 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) 4051 * @adapter: board private structure 4052 * @tx_ring: tx descriptor ring (for a specific queue) to setup 4053 * 4054 * Return 0 on success, negative on failure 4055 **/ 4056int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, 4057 struct ixgbe_ring *tx_ring) 4058{ 4059 struct pci_dev *pdev = adapter->pdev; 4060 int size; 4061 4062 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 4063 tx_ring->tx_buffer_info = vmalloc(size); 4064 if (!tx_ring->tx_buffer_info) 4065 goto err; 4066 memset(tx_ring->tx_buffer_info, 0, size); 4067 4068 /* round up to nearest 4K */ 4069 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 4070 tx_ring->size = ALIGN(tx_ring->size, 4096); 4071 4072 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 4073 &tx_ring->dma); 4074 if (!tx_ring->desc) 4075 goto err; 4076 4077 tx_ring->next_to_use = 0; 4078 tx_ring->next_to_clean = 0; 4079 tx_ring->work_limit = tx_ring->count; 4080 return 0; 4081 4082err: 4083 vfree(tx_ring->tx_buffer_info); 4084 tx_ring->tx_buffer_info = NULL; 4085 DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit " 4086 "descriptor ring\n"); 4087 return -ENOMEM; 4088} 4089 4090/** 4091 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources 4092 * @adapter: board private structure 4093 * 4094 * If this function returns with an error, then it's possible one or 4095 * more of the rings is populated (while the rest are not). It is the 4096 * callers duty to clean those orphaned rings. 4097 * 4098 * Return 0 on success, negative on failure 4099 **/ 4100static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) 4101{ 4102 int i, err = 0; 4103 4104 for (i = 0; i < adapter->num_tx_queues; i++) { 4105 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]); 4106 if (!err) 4107 continue; 4108 DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i); 4109 break; 4110 } 4111 4112 return err; 4113} 4114 4115/** 4116 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) 4117 * @adapter: board private structure 4118 * @rx_ring: rx descriptor ring (for a specific queue) to setup 4119 * 4120 * Returns 0 on success, negative on failure 4121 **/ 4122int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, 4123 struct ixgbe_ring *rx_ring) 4124{ 4125 struct pci_dev *pdev = adapter->pdev; 4126 int size; 4127 4128 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 4129 rx_ring->rx_buffer_info = vmalloc(size); 4130 if (!rx_ring->rx_buffer_info) { 4131 DPRINTK(PROBE, ERR, 4132 "vmalloc allocation failed for the rx desc ring\n"); 4133 goto alloc_failed; 4134 } 4135 memset(rx_ring->rx_buffer_info, 0, size); 4136 4137 /* Round up to nearest 4K */ 4138 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 4139 rx_ring->size = ALIGN(rx_ring->size, 4096); 4140 4141 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma); 4142 4143 if (!rx_ring->desc) { 4144 DPRINTK(PROBE, ERR, 4145 "Memory allocation failed for the rx desc ring\n"); 4146 vfree(rx_ring->rx_buffer_info); 4147 goto alloc_failed; 4148 } 4149 4150 rx_ring->next_to_clean = 0; 4151 rx_ring->next_to_use = 0; 4152 4153 return 0; 4154 4155alloc_failed: 4156 return -ENOMEM; 4157} 4158 4159/** 4160 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources 4161 * @adapter: board private structure 4162 * 4163 * If this function returns with an error, then it's possible one or 4164 * more of the rings is populated (while the rest are not). It is the 4165 * callers duty to clean those orphaned rings. 4166 * 4167 * Return 0 on success, negative on failure 4168 **/ 4169 4170static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) 4171{ 4172 int i, err = 0; 4173 4174 for (i = 0; i < adapter->num_rx_queues; i++) { 4175 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]); 4176 if (!err) 4177 continue; 4178 DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i); 4179 break; 4180 } 4181 4182 return err; 4183} 4184 4185/** 4186 * ixgbe_free_tx_resources - Free Tx Resources per Queue 4187 * @adapter: board private structure 4188 * @tx_ring: Tx descriptor ring for a specific queue 4189 * 4190 * Free all transmit software resources 4191 **/ 4192void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, 4193 struct ixgbe_ring *tx_ring) 4194{ 4195 struct pci_dev *pdev = adapter->pdev; 4196 4197 ixgbe_clean_tx_ring(adapter, tx_ring); 4198 4199 vfree(tx_ring->tx_buffer_info); 4200 tx_ring->tx_buffer_info = NULL; 4201 4202 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 4203 4204 tx_ring->desc = NULL; 4205} 4206 4207/** 4208 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues 4209 * @adapter: board private structure 4210 * 4211 * Free all transmit software resources 4212 **/ 4213static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) 4214{ 4215 int i; 4216 4217 for (i = 0; i < adapter->num_tx_queues; i++) 4218 if (adapter->tx_ring[i].desc) 4219 ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]); 4220} 4221 4222/** 4223 * ixgbe_free_rx_resources - Free Rx Resources 4224 * @adapter: board private structure 4225 * @rx_ring: ring to clean the resources from 4226 * 4227 * Free all receive software resources 4228 **/ 4229void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, 4230 struct ixgbe_ring *rx_ring) 4231{ 4232 struct pci_dev *pdev = adapter->pdev; 4233 4234 ixgbe_clean_rx_ring(adapter, rx_ring); 4235 4236 vfree(rx_ring->rx_buffer_info); 4237 rx_ring->rx_buffer_info = NULL; 4238 4239 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 4240 4241 rx_ring->desc = NULL; 4242} 4243 4244/** 4245 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues 4246 * @adapter: board private structure 4247 * 4248 * Free all receive software resources 4249 **/ 4250static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) 4251{ 4252 int i; 4253 4254 for (i = 0; i < adapter->num_rx_queues; i++) 4255 if (adapter->rx_ring[i].desc) 4256 ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]); 4257} 4258 4259/** 4260 * ixgbe_change_mtu - Change the Maximum Transfer Unit 4261 * @netdev: network interface device structure 4262 * @new_mtu: new value for maximum frame size 4263 * 4264 * Returns 0 on success, negative on failure 4265 **/ 4266static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) 4267{ 4268 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4269 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 4270 4271 /* MTU < 68 is an error and causes problems on some kernels */ 4272 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) 4273 return -EINVAL; 4274 4275 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n", 4276 netdev->mtu, new_mtu); 4277 /* must set new MTU before calling down or up */ 4278 netdev->mtu = new_mtu; 4279 4280 if (netif_running(netdev)) 4281 ixgbe_reinit_locked(adapter); 4282 4283 return 0; 4284} 4285 4286/** 4287 * ixgbe_open - Called when a network interface is made active 4288 * @netdev: network interface device structure 4289 * 4290 * Returns 0 on success, negative value on failure 4291 * 4292 * The open entry point is called when a network interface is made 4293 * active by the system (IFF_UP). At this point all resources needed 4294 * for transmit and receive operations are allocated, the interrupt 4295 * handler is registered with the OS, the watchdog timer is started, 4296 * and the stack is notified that the interface is ready. 4297 **/ 4298static int ixgbe_open(struct net_device *netdev) 4299{ 4300 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4301 int err; 4302 4303 /* disallow open during test */ 4304 if (test_bit(__IXGBE_TESTING, &adapter->state)) 4305 return -EBUSY; 4306 4307 netif_carrier_off(netdev); 4308 4309 /* allocate transmit descriptors */ 4310 err = ixgbe_setup_all_tx_resources(adapter); 4311 if (err) 4312 goto err_setup_tx; 4313 4314 /* allocate receive descriptors */ 4315 err = ixgbe_setup_all_rx_resources(adapter); 4316 if (err) 4317 goto err_setup_rx; 4318 4319 ixgbe_configure(adapter); 4320 4321 err = ixgbe_request_irq(adapter); 4322 if (err) 4323 goto err_req_irq; 4324 4325 err = ixgbe_up_complete(adapter); 4326 if (err) 4327 goto err_up; 4328 4329 netif_tx_start_all_queues(netdev); 4330 4331 return 0; 4332 4333err_up: 4334 ixgbe_release_hw_control(adapter); 4335 ixgbe_free_irq(adapter); 4336err_req_irq: 4337err_setup_rx: 4338 ixgbe_free_all_rx_resources(adapter); 4339err_setup_tx: 4340 ixgbe_free_all_tx_resources(adapter); 4341 ixgbe_reset(adapter); 4342 4343 return err; 4344} 4345 4346/** 4347 * ixgbe_close - Disables a network interface 4348 * @netdev: network interface device structure 4349 * 4350 * Returns 0, this is not allowed to fail 4351 * 4352 * The close entry point is called when an interface is de-activated 4353 * by the OS. The hardware is still under the drivers control, but 4354 * needs to be disabled. A global MAC reset is issued to stop the 4355 * hardware, and all transmit and receive resources are freed. 4356 **/ 4357static int ixgbe_close(struct net_device *netdev) 4358{ 4359 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4360 4361 ixgbe_down(adapter); 4362 ixgbe_free_irq(adapter); 4363 4364 ixgbe_free_all_tx_resources(adapter); 4365 ixgbe_free_all_rx_resources(adapter); 4366 4367 ixgbe_release_hw_control(adapter); 4368 4369 return 0; 4370} 4371 4372#ifdef CONFIG_PM 4373static int ixgbe_resume(struct pci_dev *pdev) 4374{ 4375 struct net_device *netdev = pci_get_drvdata(pdev); 4376 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4377 u32 err; 4378 4379 pci_set_power_state(pdev, PCI_D0); 4380 pci_restore_state(pdev); 4381 /* 4382 * pci_restore_state clears dev->state_saved so call 4383 * pci_save_state to restore it. 4384 */ 4385 pci_save_state(pdev); 4386 4387 err = pci_enable_device_mem(pdev); 4388 if (err) { 4389 printk(KERN_ERR "ixgbe: Cannot enable PCI device from " 4390 "suspend\n"); 4391 return err; 4392 } 4393 pci_set_master(pdev); 4394 4395 pci_wake_from_d3(pdev, false); 4396 4397 err = ixgbe_init_interrupt_scheme(adapter); 4398 if (err) { 4399 printk(KERN_ERR "ixgbe: Cannot initialize interrupts for " 4400 "device\n"); 4401 return err; 4402 } 4403 4404 ixgbe_reset(adapter); 4405 4406 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 4407 4408 if (netif_running(netdev)) { 4409 err = ixgbe_open(adapter->netdev); 4410 if (err) 4411 return err; 4412 } 4413 4414 netif_device_attach(netdev); 4415 4416 return 0; 4417} 4418#endif /* CONFIG_PM */ 4419 4420static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) 4421{ 4422 struct net_device *netdev = pci_get_drvdata(pdev); 4423 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4424 struct ixgbe_hw *hw = &adapter->hw; 4425 u32 ctrl, fctrl; 4426 u32 wufc = adapter->wol; 4427#ifdef CONFIG_PM 4428 int retval = 0; 4429#endif 4430 4431 netif_device_detach(netdev); 4432 4433 if (netif_running(netdev)) { 4434 ixgbe_down(adapter); 4435 ixgbe_free_irq(adapter); 4436 ixgbe_free_all_tx_resources(adapter); 4437 ixgbe_free_all_rx_resources(adapter); 4438 } 4439 ixgbe_clear_interrupt_scheme(adapter); 4440 4441#ifdef CONFIG_PM 4442 retval = pci_save_state(pdev); 4443 if (retval) 4444 return retval; 4445 4446#endif 4447 if (wufc) { 4448 ixgbe_set_rx_mode(netdev); 4449 4450 /* turn on all-multi mode if wake on multicast is enabled */ 4451 if (wufc & IXGBE_WUFC_MC) { 4452 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4453 fctrl |= IXGBE_FCTRL_MPE; 4454 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4455 } 4456 4457 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 4458 ctrl |= IXGBE_CTRL_GIO_DIS; 4459 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 4460 4461 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc); 4462 } else { 4463 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); 4464 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 4465 } 4466 4467 if (wufc && hw->mac.type == ixgbe_mac_82599EB) 4468 pci_wake_from_d3(pdev, true); 4469 else 4470 pci_wake_from_d3(pdev, false); 4471 4472 *enable_wake = !!wufc; 4473 4474 ixgbe_release_hw_control(adapter); 4475 4476 pci_disable_device(pdev); 4477 4478 return 0; 4479} 4480 4481#ifdef CONFIG_PM 4482static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) 4483{ 4484 int retval; 4485 bool wake; 4486 4487 retval = __ixgbe_shutdown(pdev, &wake); 4488 if (retval) 4489 return retval; 4490 4491 if (wake) { 4492 pci_prepare_to_sleep(pdev); 4493 } else { 4494 pci_wake_from_d3(pdev, false); 4495 pci_set_power_state(pdev, PCI_D3hot); 4496 } 4497 4498 return 0; 4499} 4500#endif /* CONFIG_PM */ 4501 4502static void ixgbe_shutdown(struct pci_dev *pdev) 4503{ 4504 bool wake; 4505 4506 __ixgbe_shutdown(pdev, &wake); 4507 4508 if (system_state == SYSTEM_POWER_OFF) { 4509 pci_wake_from_d3(pdev, wake); 4510 pci_set_power_state(pdev, PCI_D3hot); 4511 } 4512} 4513 4514/** 4515 * ixgbe_update_stats - Update the board statistics counters. 4516 * @adapter: board private structure 4517 **/ 4518void ixgbe_update_stats(struct ixgbe_adapter *adapter) 4519{ 4520 struct net_device *netdev = adapter->netdev; 4521 struct ixgbe_hw *hw = &adapter->hw; 4522 u64 total_mpc = 0; 4523 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; 4524 u64 non_eop_descs = 0, restart_queue = 0; 4525 4526 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 4527 u64 rsc_count = 0; 4528 u64 rsc_flush = 0; 4529 for (i = 0; i < 16; i++) 4530 adapter->hw_rx_no_dma_resources += 4531 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 4532 for (i = 0; i < adapter->num_rx_queues; i++) { 4533 rsc_count += adapter->rx_ring[i].rsc_count; 4534 rsc_flush += adapter->rx_ring[i].rsc_flush; 4535 } 4536 adapter->rsc_total_count = rsc_count; 4537 adapter->rsc_total_flush = rsc_flush; 4538 } 4539 4540 /* gather some stats to the adapter struct that are per queue */ 4541 for (i = 0; i < adapter->num_tx_queues; i++) 4542 restart_queue += adapter->tx_ring[i].restart_queue; 4543 adapter->restart_queue = restart_queue; 4544 4545 for (i = 0; i < adapter->num_rx_queues; i++) 4546 non_eop_descs += adapter->rx_ring[i].non_eop_descs; 4547 adapter->non_eop_descs = non_eop_descs; 4548 4549 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 4550 for (i = 0; i < 8; i++) { 4551 /* for packet buffers not used, the register should read 0 */ 4552 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 4553 missed_rx += mpc; 4554 adapter->stats.mpc[i] += mpc; 4555 total_mpc += adapter->stats.mpc[i]; 4556 if (hw->mac.type == ixgbe_mac_82598EB) 4557 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 4558 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 4559 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 4560 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 4561 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 4562 if (hw->mac.type == ixgbe_mac_82599EB) { 4563 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw, 4564 IXGBE_PXONRXCNT(i)); 4565 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw, 4566 IXGBE_PXOFFRXCNT(i)); 4567 adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 4568 } else { 4569 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw, 4570 IXGBE_PXONRXC(i)); 4571 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw, 4572 IXGBE_PXOFFRXC(i)); 4573 } 4574 adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw, 4575 IXGBE_PXONTXC(i)); 4576 adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw, 4577 IXGBE_PXOFFTXC(i)); 4578 } 4579 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 4580 /* work around hardware counting issue */ 4581 adapter->stats.gprc -= missed_rx; 4582 4583 /* 82598 hardware only has a 32 bit counter in the high register */ 4584 if (hw->mac.type == ixgbe_mac_82599EB) { 4585 u64 tmp; 4586 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 4587 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */ 4588 adapter->stats.gorc += (tmp << 32); 4589 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 4590 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */ 4591 adapter->stats.gotc += (tmp << 32); 4592 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL); 4593 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ 4594 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 4595 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 4596 adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 4597 adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 4598#ifdef IXGBE_FCOE 4599 adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 4600 adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 4601 adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 4602 adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 4603 adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 4604 adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 4605#endif /* IXGBE_FCOE */ 4606 } else { 4607 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 4608 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 4609 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 4610 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 4611 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH); 4612 } 4613 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 4614 adapter->stats.bprc += bprc; 4615 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 4616 if (hw->mac.type == ixgbe_mac_82598EB) 4617 adapter->stats.mprc -= bprc; 4618 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC); 4619 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 4620 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 4621 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 4622 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 4623 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 4624 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 4625 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 4626 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 4627 adapter->stats.lxontxc += lxon; 4628 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 4629 adapter->stats.lxofftxc += lxoff; 4630 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 4631 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 4632 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 4633 /* 4634 * 82598 errata - tx of flow control packets is included in tx counters 4635 */ 4636 xon_off_tot = lxon + lxoff; 4637 adapter->stats.gptc -= xon_off_tot; 4638 adapter->stats.mptc -= xon_off_tot; 4639 adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); 4640 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 4641 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 4642 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 4643 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 4644 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 4645 adapter->stats.ptc64 -= xon_off_tot; 4646 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 4647 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 4648 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 4649 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 4650 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 4651 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 4652 4653 /* Fill out the OS statistics structure */ 4654 netdev->stats.multicast = adapter->stats.mprc; 4655 4656 /* Rx Errors */ 4657 netdev->stats.rx_errors = adapter->stats.crcerrs + 4658 adapter->stats.rlec; 4659 netdev->stats.rx_dropped = 0; 4660 netdev->stats.rx_length_errors = adapter->stats.rlec; 4661 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 4662 netdev->stats.rx_missed_errors = total_mpc; 4663} 4664 4665/** 4666 * ixgbe_watchdog - Timer Call-back 4667 * @data: pointer to adapter cast into an unsigned long 4668 **/ 4669static void ixgbe_watchdog(unsigned long data) 4670{ 4671 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; 4672 struct ixgbe_hw *hw = &adapter->hw; 4673 u64 eics = 0; 4674 int i; 4675 4676 /* 4677 * Do the watchdog outside of interrupt context due to the lovely 4678 * delays that some of the newer hardware requires 4679 */ 4680 4681 if (test_bit(__IXGBE_DOWN, &adapter->state)) 4682 goto watchdog_short_circuit; 4683 4684 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { 4685 /* 4686 * for legacy and MSI interrupts don't set any bits 4687 * that are enabled for EIAM, because this operation 4688 * would set *both* EIMS and EICS for any bit in EIAM 4689 */ 4690 IXGBE_WRITE_REG(hw, IXGBE_EICS, 4691 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); 4692 goto watchdog_reschedule; 4693 } 4694 4695 /* get one bit for every active tx/rx interrupt vector */ 4696 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 4697 struct ixgbe_q_vector *qv = adapter->q_vector[i]; 4698 if (qv->rxr_count || qv->txr_count) 4699 eics |= ((u64)1 << i); 4700 } 4701 4702 /* Cause software interrupt to ensure rx rings are cleaned */ 4703 ixgbe_irq_rearm_queues(adapter, eics); 4704 4705watchdog_reschedule: 4706 /* Reset the timer */ 4707 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ)); 4708 4709watchdog_short_circuit: 4710 schedule_work(&adapter->watchdog_task); 4711} 4712 4713/** 4714 * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber 4715 * @work: pointer to work_struct containing our data 4716 **/ 4717static void ixgbe_multispeed_fiber_task(struct work_struct *work) 4718{ 4719 struct ixgbe_adapter *adapter = container_of(work, 4720 struct ixgbe_adapter, 4721 multispeed_fiber_task); 4722 struct ixgbe_hw *hw = &adapter->hw; 4723 u32 autoneg; 4724 bool negotiation; 4725 4726 adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK; 4727 autoneg = hw->phy.autoneg_advertised; 4728 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 4729 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); 4730 if (hw->mac.ops.setup_link) 4731 hw->mac.ops.setup_link(hw, autoneg, negotiation, true); 4732 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 4733 adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK; 4734} 4735 4736/** 4737 * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module 4738 * @work: pointer to work_struct containing our data 4739 **/ 4740static void ixgbe_sfp_config_module_task(struct work_struct *work) 4741{ 4742 struct ixgbe_adapter *adapter = container_of(work, 4743 struct ixgbe_adapter, 4744 sfp_config_module_task); 4745 struct ixgbe_hw *hw = &adapter->hw; 4746 u32 err; 4747 4748 adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK; 4749 4750 /* Time for electrical oscillations to settle down */ 4751 msleep(100); 4752 err = hw->phy.ops.identify_sfp(hw); 4753 4754 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 4755 dev_err(&adapter->pdev->dev, "failed to initialize because " 4756 "an unsupported SFP+ module type was detected.\n" 4757 "Reload the driver after installing a supported " 4758 "module.\n"); 4759 unregister_netdev(adapter->netdev); 4760 return; 4761 } 4762 hw->mac.ops.setup_sfp(hw); 4763 4764 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) 4765 /* This will also work for DA Twinax connections */ 4766 schedule_work(&adapter->multispeed_fiber_task); 4767 adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK; 4768} 4769 4770/** 4771 * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table 4772 * @work: pointer to work_struct containing our data 4773 **/ 4774static void ixgbe_fdir_reinit_task(struct work_struct *work) 4775{ 4776 struct ixgbe_adapter *adapter = container_of(work, 4777 struct ixgbe_adapter, 4778 fdir_reinit_task); 4779 struct ixgbe_hw *hw = &adapter->hw; 4780 int i; 4781 4782 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { 4783 for (i = 0; i < adapter->num_tx_queues; i++) 4784 set_bit(__IXGBE_FDIR_INIT_DONE, 4785 &(adapter->tx_ring[i].reinit_state)); 4786 } else { 4787 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, " 4788 "ignored adding FDIR ATR filters \n"); 4789 } 4790 /* Done FDIR Re-initialization, enable transmits */ 4791 netif_tx_start_all_queues(adapter->netdev); 4792} 4793 4794/** 4795 * ixgbe_watchdog_task - worker thread to bring link up 4796 * @work: pointer to work_struct containing our data 4797 **/ 4798static void ixgbe_watchdog_task(struct work_struct *work) 4799{ 4800 struct ixgbe_adapter *adapter = container_of(work, 4801 struct ixgbe_adapter, 4802 watchdog_task); 4803 struct net_device *netdev = adapter->netdev; 4804 struct ixgbe_hw *hw = &adapter->hw; 4805 u32 link_speed = adapter->link_speed; 4806 bool link_up = adapter->link_up; 4807 int i; 4808 struct ixgbe_ring *tx_ring; 4809 int some_tx_pending = 0; 4810 4811 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; 4812 4813 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 4814 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 4815 if (link_up) { 4816#ifdef CONFIG_DCB 4817 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 4818 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) 4819 hw->mac.ops.fc_enable(hw, i); 4820 } else { 4821 hw->mac.ops.fc_enable(hw, 0); 4822 } 4823#else 4824 hw->mac.ops.fc_enable(hw, 0); 4825#endif 4826 } 4827 4828 if (link_up || 4829 time_after(jiffies, (adapter->link_check_timeout + 4830 IXGBE_TRY_LINK_TIMEOUT))) { 4831 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 4832 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); 4833 } 4834 adapter->link_up = link_up; 4835 adapter->link_speed = link_speed; 4836 } 4837 4838 if (link_up) { 4839 if (!netif_carrier_ok(netdev)) { 4840 bool flow_rx, flow_tx; 4841 4842 if (hw->mac.type == ixgbe_mac_82599EB) { 4843 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); 4844 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 4845 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); 4846 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); 4847 } else { 4848 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4849 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); 4850 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); 4851 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); 4852 } 4853 4854 printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, " 4855 "Flow Control: %s\n", 4856 netdev->name, 4857 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 4858 "10 Gbps" : 4859 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? 4860 "1 Gbps" : "unknown speed")), 4861 ((flow_rx && flow_tx) ? "RX/TX" : 4862 (flow_rx ? "RX" : 4863 (flow_tx ? "TX" : "None")))); 4864 4865 netif_carrier_on(netdev); 4866 } else { 4867 /* Force detection of hung controller */ 4868 adapter->detect_tx_hung = true; 4869 } 4870 } else { 4871 adapter->link_up = false; 4872 adapter->link_speed = 0; 4873 if (netif_carrier_ok(netdev)) { 4874 printk(KERN_INFO "ixgbe: %s NIC Link is Down\n", 4875 netdev->name); 4876 netif_carrier_off(netdev); 4877 } 4878 } 4879 4880 if (!netif_carrier_ok(netdev)) { 4881 for (i = 0; i < adapter->num_tx_queues; i++) { 4882 tx_ring = &adapter->tx_ring[i]; 4883 if (tx_ring->next_to_use != tx_ring->next_to_clean) { 4884 some_tx_pending = 1; 4885 break; 4886 } 4887 } 4888 4889 if (some_tx_pending) { 4890 /* We've lost link, so the controller stops DMA, 4891 * but we've got queued Tx work that's never going 4892 * to get done, so reset controller to flush Tx. 4893 * (Do the reset outside of interrupt context). 4894 */ 4895 schedule_work(&adapter->reset_task); 4896 } 4897 } 4898 4899 ixgbe_update_stats(adapter); 4900 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; 4901} 4902 4903static int ixgbe_tso(struct ixgbe_adapter *adapter, 4904 struct ixgbe_ring *tx_ring, struct sk_buff *skb, 4905 u32 tx_flags, u8 *hdr_len) 4906{ 4907 struct ixgbe_adv_tx_context_desc *context_desc; 4908 unsigned int i; 4909 int err; 4910 struct ixgbe_tx_buffer *tx_buffer_info; 4911 u32 vlan_macip_lens = 0, type_tucmd_mlhl; 4912 u32 mss_l4len_idx, l4len; 4913 4914 if (skb_is_gso(skb)) { 4915 if (skb_header_cloned(skb)) { 4916 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4917 if (err) 4918 return err; 4919 } 4920 l4len = tcp_hdrlen(skb); 4921 *hdr_len += l4len; 4922 4923 if (skb->protocol == htons(ETH_P_IP)) { 4924 struct iphdr *iph = ip_hdr(skb); 4925 iph->tot_len = 0; 4926 iph->check = 0; 4927 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 4928 iph->daddr, 0, 4929 IPPROTO_TCP, 4930 0); 4931 } else if (skb_is_gso_v6(skb)) { 4932 ipv6_hdr(skb)->payload_len = 0; 4933 tcp_hdr(skb)->check = 4934 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 4935 &ipv6_hdr(skb)->daddr, 4936 0, IPPROTO_TCP, 0); 4937 } 4938 4939 i = tx_ring->next_to_use; 4940 4941 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 4942 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); 4943 4944 /* VLAN MACLEN IPLEN */ 4945 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 4946 vlan_macip_lens |= 4947 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 4948 vlan_macip_lens |= ((skb_network_offset(skb)) << 4949 IXGBE_ADVTXD_MACLEN_SHIFT); 4950 *hdr_len += skb_network_offset(skb); 4951 vlan_macip_lens |= 4952 (skb_transport_header(skb) - skb_network_header(skb)); 4953 *hdr_len += 4954 (skb_transport_header(skb) - skb_network_header(skb)); 4955 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 4956 context_desc->seqnum_seed = 0; 4957 4958 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 4959 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | 4960 IXGBE_ADVTXD_DTYP_CTXT); 4961 4962 if (skb->protocol == htons(ETH_P_IP)) 4963 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 4964 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 4965 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 4966 4967 /* MSS L4LEN IDX */ 4968 mss_l4len_idx = 4969 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT); 4970 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT); 4971 /* use index 1 for TSO */ 4972 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 4973 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 4974 4975 tx_buffer_info->time_stamp = jiffies; 4976 tx_buffer_info->next_to_watch = i; 4977 4978 i++; 4979 if (i == tx_ring->count) 4980 i = 0; 4981 tx_ring->next_to_use = i; 4982 4983 return true; 4984 } 4985 return false; 4986} 4987 4988static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, 4989 struct ixgbe_ring *tx_ring, 4990 struct sk_buff *skb, u32 tx_flags) 4991{ 4992 struct ixgbe_adv_tx_context_desc *context_desc; 4993 unsigned int i; 4994 struct ixgbe_tx_buffer *tx_buffer_info; 4995 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; 4996 4997 if (skb->ip_summed == CHECKSUM_PARTIAL || 4998 (tx_flags & IXGBE_TX_FLAGS_VLAN)) { 4999 i = tx_ring->next_to_use; 5000 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 5001 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); 5002 5003 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 5004 vlan_macip_lens |= 5005 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 5006 vlan_macip_lens |= (skb_network_offset(skb) << 5007 IXGBE_ADVTXD_MACLEN_SHIFT); 5008 if (skb->ip_summed == CHECKSUM_PARTIAL) 5009 vlan_macip_lens |= (skb_transport_header(skb) - 5010 skb_network_header(skb)); 5011 5012 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 5013 context_desc->seqnum_seed = 0; 5014 5015 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | 5016 IXGBE_ADVTXD_DTYP_CTXT); 5017 5018 if (skb->ip_summed == CHECKSUM_PARTIAL) { 5019 __be16 protocol; 5020 5021 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { 5022 const struct vlan_ethhdr *vhdr = 5023 (const struct vlan_ethhdr *)skb->data; 5024 5025 protocol = vhdr->h_vlan_encapsulated_proto; 5026 } else { 5027 protocol = skb->protocol; 5028 } 5029 5030 switch (protocol) { 5031 case cpu_to_be16(ETH_P_IP): 5032 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 5033 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 5034 type_tucmd_mlhl |= 5035 IXGBE_ADVTXD_TUCMD_L4T_TCP; 5036 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP) 5037 type_tucmd_mlhl |= 5038 IXGBE_ADVTXD_TUCMD_L4T_SCTP; 5039 break; 5040 case cpu_to_be16(ETH_P_IPV6): 5041 /* XXX what about other V6 headers?? */ 5042 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 5043 type_tucmd_mlhl |= 5044 IXGBE_ADVTXD_TUCMD_L4T_TCP; 5045 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP) 5046 type_tucmd_mlhl |= 5047 IXGBE_ADVTXD_TUCMD_L4T_SCTP; 5048 break; 5049 default: 5050 if (unlikely(net_ratelimit())) { 5051 DPRINTK(PROBE, WARNING, 5052 "partial checksum but proto=%x!\n", 5053 skb->protocol); 5054 } 5055 break; 5056 } 5057 } 5058 5059 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 5060 /* use index zero for tx checksum offload */ 5061 context_desc->mss_l4len_idx = 0; 5062 5063 tx_buffer_info->time_stamp = jiffies; 5064 tx_buffer_info->next_to_watch = i; 5065 5066 i++; 5067 if (i == tx_ring->count) 5068 i = 0; 5069 tx_ring->next_to_use = i; 5070 5071 return true; 5072 } 5073 5074 return false; 5075} 5076 5077static int ixgbe_tx_map(struct ixgbe_adapter *adapter, 5078 struct ixgbe_ring *tx_ring, 5079 struct sk_buff *skb, u32 tx_flags, 5080 unsigned int first) 5081{ 5082 struct pci_dev *pdev = adapter->pdev; 5083 struct ixgbe_tx_buffer *tx_buffer_info; 5084 unsigned int len; 5085 unsigned int total = skb->len; 5086 unsigned int offset = 0, size, count = 0, i; 5087 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 5088 unsigned int f; 5089 5090 i = tx_ring->next_to_use; 5091 5092 if (tx_flags & IXGBE_TX_FLAGS_FCOE) 5093 /* excluding fcoe_crc_eof for FCoE */ 5094 total -= sizeof(struct fcoe_crc_eof); 5095 5096 len = min(skb_headlen(skb), total); 5097 while (len) { 5098 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 5099 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 5100 5101 tx_buffer_info->length = size; 5102 tx_buffer_info->mapped_as_page = false; 5103 tx_buffer_info->dma = pci_map_single(pdev, 5104 skb->data + offset, 5105 size, PCI_DMA_TODEVICE); 5106 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) 5107 goto dma_error; 5108 tx_buffer_info->time_stamp = jiffies; 5109 tx_buffer_info->next_to_watch = i; 5110 5111 len -= size; 5112 total -= size; 5113 offset += size; 5114 count++; 5115 5116 if (len) { 5117 i++; 5118 if (i == tx_ring->count) 5119 i = 0; 5120 } 5121 } 5122 5123 for (f = 0; f < nr_frags; f++) { 5124 struct skb_frag_struct *frag; 5125 5126 frag = &skb_shinfo(skb)->frags[f]; 5127 len = min((unsigned int)frag->size, total); 5128 offset = frag->page_offset; 5129 5130 while (len) { 5131 i++; 5132 if (i == tx_ring->count) 5133 i = 0; 5134 5135 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 5136 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 5137 5138 tx_buffer_info->length = size; 5139 tx_buffer_info->dma = pci_map_page(adapter->pdev, 5140 frag->page, 5141 offset, size, 5142 PCI_DMA_TODEVICE); 5143 tx_buffer_info->mapped_as_page = true; 5144 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) 5145 goto dma_error; 5146 tx_buffer_info->time_stamp = jiffies; 5147 tx_buffer_info->next_to_watch = i; 5148 5149 len -= size; 5150 total -= size; 5151 offset += size; 5152 count++; 5153 } 5154 if (total == 0) 5155 break; 5156 } 5157 5158 tx_ring->tx_buffer_info[i].skb = skb; 5159 tx_ring->tx_buffer_info[first].next_to_watch = i; 5160 5161 return count; 5162 5163dma_error: 5164 dev_err(&pdev->dev, "TX DMA map failed\n"); 5165 5166 /* clear timestamp and dma mappings for failed tx_buffer_info map */ 5167 tx_buffer_info->dma = 0; 5168 tx_buffer_info->time_stamp = 0; 5169 tx_buffer_info->next_to_watch = 0; 5170 if (count) 5171 count--; 5172 5173 /* clear timestamp and dma mappings for remaining portion of packet */ 5174 while (count--) { 5175 if (i==0) 5176 i += tx_ring->count; 5177 i--; 5178 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 5179 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 5180 } 5181 5182 return 0; 5183} 5184 5185static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, 5186 struct ixgbe_ring *tx_ring, 5187 int tx_flags, int count, u32 paylen, u8 hdr_len) 5188{ 5189 union ixgbe_adv_tx_desc *tx_desc = NULL; 5190 struct ixgbe_tx_buffer *tx_buffer_info; 5191 u32 olinfo_status = 0, cmd_type_len = 0; 5192 unsigned int i; 5193 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; 5194 5195 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; 5196 5197 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; 5198 5199 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 5200 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; 5201 5202 if (tx_flags & IXGBE_TX_FLAGS_TSO) { 5203 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 5204 5205 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 5206 IXGBE_ADVTXD_POPTS_SHIFT; 5207 5208 /* use index 1 context for tso */ 5209 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 5210 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 5211 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 5212 IXGBE_ADVTXD_POPTS_SHIFT; 5213 5214 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) 5215 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 5216 IXGBE_ADVTXD_POPTS_SHIFT; 5217 5218 if (tx_flags & IXGBE_TX_FLAGS_FCOE) { 5219 olinfo_status |= IXGBE_ADVTXD_CC; 5220 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 5221 if (tx_flags & IXGBE_TX_FLAGS_FSO) 5222 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 5223 } 5224 5225 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); 5226 5227 i = tx_ring->next_to_use; 5228 while (count--) { 5229 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 5230 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 5231 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); 5232 tx_desc->read.cmd_type_len = 5233 cpu_to_le32(cmd_type_len | tx_buffer_info->length); 5234 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 5235 i++; 5236 if (i == tx_ring->count) 5237 i = 0; 5238 } 5239 5240 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); 5241 5242 /* 5243 * Force memory writes to complete before letting h/w 5244 * know there are new descriptors to fetch. (Only 5245 * applicable for weak-ordered memory model archs, 5246 * such as IA-64). 5247 */ 5248 wmb(); 5249 5250 tx_ring->next_to_use = i; 5251 writel(i, adapter->hw.hw_addr + tx_ring->tail); 5252} 5253 5254static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, 5255 int queue, u32 tx_flags) 5256{ 5257 /* Right now, we support IPv4 only */ 5258 struct ixgbe_atr_input atr_input; 5259 struct tcphdr *th; 5260 struct iphdr *iph = ip_hdr(skb); 5261 struct ethhdr *eth = (struct ethhdr *)skb->data; 5262 u16 vlan_id, src_port, dst_port, flex_bytes; 5263 u32 src_ipv4_addr, dst_ipv4_addr; 5264 u8 l4type = 0; 5265 5266 /* check if we're UDP or TCP */ 5267 if (iph->protocol == IPPROTO_TCP) { 5268 th = tcp_hdr(skb); 5269 src_port = th->source; 5270 dst_port = th->dest; 5271 l4type |= IXGBE_ATR_L4TYPE_TCP; 5272 /* l4type IPv4 type is 0, no need to assign */ 5273 } else { 5274 /* Unsupported L4 header, just bail here */ 5275 return; 5276 } 5277 5278 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); 5279 5280 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> 5281 IXGBE_TX_FLAGS_VLAN_SHIFT; 5282 src_ipv4_addr = iph->saddr; 5283 dst_ipv4_addr = iph->daddr; 5284 flex_bytes = eth->h_proto; 5285 5286 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id); 5287 ixgbe_atr_set_src_port_82599(&atr_input, dst_port); 5288 ixgbe_atr_set_dst_port_82599(&atr_input, src_port); 5289 ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes); 5290 ixgbe_atr_set_l4type_82599(&atr_input, l4type); 5291 /* src and dst are inverted, think how the receiver sees them */ 5292 ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr); 5293 ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr); 5294 5295 /* This assumes the Rx queue and Tx queue are bound to the same CPU */ 5296 ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue); 5297} 5298 5299static int __ixgbe_maybe_stop_tx(struct net_device *netdev, 5300 struct ixgbe_ring *tx_ring, int size) 5301{ 5302 netif_stop_subqueue(netdev, tx_ring->queue_index); 5303 /* Herbert's original patch had: 5304 * smp_mb__after_netif_stop_queue(); 5305 * but since that doesn't exist yet, just open code it. */ 5306 smp_mb(); 5307 5308 /* We need to check again in a case another CPU has just 5309 * made room available. */ 5310 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) 5311 return -EBUSY; 5312 5313 /* A reprieve! - use start_queue because it doesn't call schedule */ 5314 netif_start_subqueue(netdev, tx_ring->queue_index); 5315 ++tx_ring->restart_queue; 5316 return 0; 5317} 5318 5319static int ixgbe_maybe_stop_tx(struct net_device *netdev, 5320 struct ixgbe_ring *tx_ring, int size) 5321{ 5322 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 5323 return 0; 5324 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size); 5325} 5326 5327static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) 5328{ 5329 struct ixgbe_adapter *adapter = netdev_priv(dev); 5330 int txq = smp_processor_id(); 5331 5332 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 5333 while (unlikely(txq >= dev->real_num_tx_queues)) 5334 txq -= dev->real_num_tx_queues; 5335 return txq; 5336 } 5337 5338#ifdef IXGBE_FCOE 5339 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && 5340 (skb->protocol == htons(ETH_P_FCOE))) { 5341 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); 5342 txq += adapter->ring_feature[RING_F_FCOE].mask; 5343 return txq; 5344 } 5345#endif 5346 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) 5347 return (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13; 5348 5349 return skb_tx_hash(dev, skb); 5350} 5351 5352static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, 5353 struct net_device *netdev) 5354{ 5355 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5356 struct ixgbe_ring *tx_ring; 5357 struct netdev_queue *txq; 5358 unsigned int first; 5359 unsigned int tx_flags = 0; 5360 u8 hdr_len = 0; 5361 int tso; 5362 int count = 0; 5363 unsigned int f; 5364 5365 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 5366 tx_flags |= vlan_tx_tag_get(skb); 5367 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 5368 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; 5369 tx_flags |= ((skb->queue_mapping & 0x7) << 13); 5370 } 5371 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 5372 tx_flags |= IXGBE_TX_FLAGS_VLAN; 5373 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 5374 if (skb->priority != TC_PRIO_CONTROL) { 5375 tx_flags |= ((skb->queue_mapping & 0x7) << 13); 5376 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 5377 tx_flags |= IXGBE_TX_FLAGS_VLAN; 5378 } else { 5379 skb->queue_mapping = 5380 adapter->ring_feature[RING_F_DCB].indices-1; 5381 } 5382 } 5383 5384 tx_ring = &adapter->tx_ring[skb->queue_mapping]; 5385 5386 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && 5387 (skb->protocol == htons(ETH_P_FCOE))) { 5388 tx_flags |= IXGBE_TX_FLAGS_FCOE; 5389#ifdef IXGBE_FCOE 5390#ifdef CONFIG_IXGBE_DCB 5391 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK 5392 << IXGBE_TX_FLAGS_VLAN_SHIFT); 5393 tx_flags |= ((adapter->fcoe.up << 13) 5394 << IXGBE_TX_FLAGS_VLAN_SHIFT); 5395#endif 5396#endif 5397 } 5398 /* four things can cause us to need a context descriptor */ 5399 if (skb_is_gso(skb) || 5400 (skb->ip_summed == CHECKSUM_PARTIAL) || 5401 (tx_flags & IXGBE_TX_FLAGS_VLAN) || 5402 (tx_flags & IXGBE_TX_FLAGS_FCOE)) 5403 count++; 5404 5405 count += TXD_USE_COUNT(skb_headlen(skb)); 5406 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 5407 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 5408 5409 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) { 5410 adapter->tx_busy++; 5411 return NETDEV_TX_BUSY; 5412 } 5413 5414 first = tx_ring->next_to_use; 5415 if (tx_flags & IXGBE_TX_FLAGS_FCOE) { 5416#ifdef IXGBE_FCOE 5417 /* setup tx offload for FCoE */ 5418 tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len); 5419 if (tso < 0) { 5420 dev_kfree_skb_any(skb); 5421 return NETDEV_TX_OK; 5422 } 5423 if (tso) 5424 tx_flags |= IXGBE_TX_FLAGS_FSO; 5425#endif /* IXGBE_FCOE */ 5426 } else { 5427 if (skb->protocol == htons(ETH_P_IP)) 5428 tx_flags |= IXGBE_TX_FLAGS_IPV4; 5429 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); 5430 if (tso < 0) { 5431 dev_kfree_skb_any(skb); 5432 return NETDEV_TX_OK; 5433 } 5434 5435 if (tso) 5436 tx_flags |= IXGBE_TX_FLAGS_TSO; 5437 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && 5438 (skb->ip_summed == CHECKSUM_PARTIAL)) 5439 tx_flags |= IXGBE_TX_FLAGS_CSUM; 5440 } 5441 5442 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first); 5443 if (count) { 5444 /* add the ATR filter if ATR is on */ 5445 if (tx_ring->atr_sample_rate) { 5446 ++tx_ring->atr_count; 5447 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && 5448 test_bit(__IXGBE_FDIR_INIT_DONE, 5449 &tx_ring->reinit_state)) { 5450 ixgbe_atr(adapter, skb, tx_ring->queue_index, 5451 tx_flags); 5452 tx_ring->atr_count = 0; 5453 } 5454 } 5455 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index); 5456 txq->tx_bytes += skb->len; 5457 txq->tx_packets++; 5458 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len, 5459 hdr_len); 5460 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); 5461 5462 } else { 5463 dev_kfree_skb_any(skb); 5464 tx_ring->tx_buffer_info[first].time_stamp = 0; 5465 tx_ring->next_to_use = first; 5466 } 5467 5468 return NETDEV_TX_OK; 5469} 5470 5471/** 5472 * ixgbe_set_mac - Change the Ethernet Address of the NIC 5473 * @netdev: network interface device structure 5474 * @p: pointer to an address structure 5475 * 5476 * Returns 0 on success, negative on failure 5477 **/ 5478static int ixgbe_set_mac(struct net_device *netdev, void *p) 5479{ 5480 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5481 struct ixgbe_hw *hw = &adapter->hw; 5482 struct sockaddr *addr = p; 5483 5484 if (!is_valid_ether_addr(addr->sa_data)) 5485 return -EADDRNOTAVAIL; 5486 5487 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 5488 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 5489 5490 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 5491 5492 return 0; 5493} 5494 5495static int 5496ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) 5497{ 5498 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5499 struct ixgbe_hw *hw = &adapter->hw; 5500 u16 value; 5501 int rc; 5502 5503 if (prtad != hw->phy.mdio.prtad) 5504 return -EINVAL; 5505 rc = hw->phy.ops.read_reg(hw, addr, devad, &value); 5506 if (!rc) 5507 rc = value; 5508 return rc; 5509} 5510 5511static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, 5512 u16 addr, u16 value) 5513{ 5514 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5515 struct ixgbe_hw *hw = &adapter->hw; 5516 5517 if (prtad != hw->phy.mdio.prtad) 5518 return -EINVAL; 5519 return hw->phy.ops.write_reg(hw, addr, devad, value); 5520} 5521 5522static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) 5523{ 5524 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5525 5526 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); 5527} 5528 5529/** 5530 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding 5531 * netdev->dev_addrs 5532 * @netdev: network interface device structure 5533 * 5534 * Returns non-zero on failure 5535 **/ 5536static int ixgbe_add_sanmac_netdev(struct net_device *dev) 5537{ 5538 int err = 0; 5539 struct ixgbe_adapter *adapter = netdev_priv(dev); 5540 struct ixgbe_mac_info *mac = &adapter->hw.mac; 5541 5542 if (is_valid_ether_addr(mac->san_addr)) { 5543 rtnl_lock(); 5544 err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); 5545 rtnl_unlock(); 5546 } 5547 return err; 5548} 5549 5550/** 5551 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding 5552 * netdev->dev_addrs 5553 * @netdev: network interface device structure 5554 * 5555 * Returns non-zero on failure 5556 **/ 5557static int ixgbe_del_sanmac_netdev(struct net_device *dev) 5558{ 5559 int err = 0; 5560 struct ixgbe_adapter *adapter = netdev_priv(dev); 5561 struct ixgbe_mac_info *mac = &adapter->hw.mac; 5562 5563 if (is_valid_ether_addr(mac->san_addr)) { 5564 rtnl_lock(); 5565 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); 5566 rtnl_unlock(); 5567 } 5568 return err; 5569} 5570 5571#ifdef CONFIG_NET_POLL_CONTROLLER 5572/* 5573 * Polling 'interrupt' - used by things like netconsole to send skbs 5574 * without having to re-enable interrupts. It's not called while 5575 * the interrupt routine is executing. 5576 */ 5577static void ixgbe_netpoll(struct net_device *netdev) 5578{ 5579 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5580 int i; 5581 5582 /* if interface is down do nothing */ 5583 if (test_bit(__IXGBE_DOWN, &adapter->state)) 5584 return; 5585 5586 adapter->flags |= IXGBE_FLAG_IN_NETPOLL; 5587 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 5588 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 5589 for (i = 0; i < num_q_vectors; i++) { 5590 struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; 5591 ixgbe_msix_clean_many(0, q_vector); 5592 } 5593 } else { 5594 ixgbe_intr(adapter->pdev->irq, netdev); 5595 } 5596 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; 5597} 5598#endif 5599 5600static const struct net_device_ops ixgbe_netdev_ops = { 5601 .ndo_open = ixgbe_open, 5602 .ndo_stop = ixgbe_close, 5603 .ndo_start_xmit = ixgbe_xmit_frame, 5604 .ndo_select_queue = ixgbe_select_queue, 5605 .ndo_set_rx_mode = ixgbe_set_rx_mode, 5606 .ndo_set_multicast_list = ixgbe_set_rx_mode, 5607 .ndo_validate_addr = eth_validate_addr, 5608 .ndo_set_mac_address = ixgbe_set_mac, 5609 .ndo_change_mtu = ixgbe_change_mtu, 5610 .ndo_tx_timeout = ixgbe_tx_timeout, 5611 .ndo_vlan_rx_register = ixgbe_vlan_rx_register, 5612 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, 5613 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, 5614 .ndo_do_ioctl = ixgbe_ioctl, 5615#ifdef CONFIG_NET_POLL_CONTROLLER 5616 .ndo_poll_controller = ixgbe_netpoll, 5617#endif 5618#ifdef IXGBE_FCOE 5619 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, 5620 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, 5621 .ndo_fcoe_enable = ixgbe_fcoe_enable, 5622 .ndo_fcoe_disable = ixgbe_fcoe_disable, 5623 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, 5624#endif /* IXGBE_FCOE */ 5625}; 5626 5627/** 5628 * ixgbe_probe - Device Initialization Routine 5629 * @pdev: PCI device information struct 5630 * @ent: entry in ixgbe_pci_tbl 5631 * 5632 * Returns 0 on success, negative on failure 5633 * 5634 * ixgbe_probe initializes an adapter identified by a pci_dev structure. 5635 * The OS initialization, configuring of the adapter private structure, 5636 * and a hardware reset occur. 5637 **/ 5638static int __devinit ixgbe_probe(struct pci_dev *pdev, 5639 const struct pci_device_id *ent) 5640{ 5641 struct net_device *netdev; 5642 struct ixgbe_adapter *adapter = NULL; 5643 struct ixgbe_hw *hw; 5644 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; 5645 static int cards_found; 5646 int i, err, pci_using_dac; 5647#ifdef IXGBE_FCOE 5648 u16 device_caps; 5649#endif 5650 u32 part_num, eec; 5651 5652 err = pci_enable_device_mem(pdev); 5653 if (err) 5654 return err; 5655 5656 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 5657 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 5658 pci_using_dac = 1; 5659 } else { 5660 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 5661 if (err) { 5662 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 5663 if (err) { 5664 dev_err(&pdev->dev, "No usable DMA " 5665 "configuration, aborting\n"); 5666 goto err_dma; 5667 } 5668 } 5669 pci_using_dac = 0; 5670 } 5671 5672 err = pci_request_selected_regions(pdev, pci_select_bars(pdev, 5673 IORESOURCE_MEM), ixgbe_driver_name); 5674 if (err) { 5675 dev_err(&pdev->dev, 5676 "pci_request_selected_regions failed 0x%x\n", err); 5677 goto err_pci_reg; 5678 } 5679 5680 pci_enable_pcie_error_reporting(pdev); 5681 5682 pci_set_master(pdev); 5683 pci_save_state(pdev); 5684 5685 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES); 5686 if (!netdev) { 5687 err = -ENOMEM; 5688 goto err_alloc_etherdev; 5689 } 5690 5691 SET_NETDEV_DEV(netdev, &pdev->dev); 5692 5693 pci_set_drvdata(pdev, netdev); 5694 adapter = netdev_priv(netdev); 5695 5696 adapter->netdev = netdev; 5697 adapter->pdev = pdev; 5698 hw = &adapter->hw; 5699 hw->back = adapter; 5700 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 5701 5702 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 5703 pci_resource_len(pdev, 0)); 5704 if (!hw->hw_addr) { 5705 err = -EIO; 5706 goto err_ioremap; 5707 } 5708 5709 for (i = 1; i <= 5; i++) { 5710 if (pci_resource_len(pdev, i) == 0) 5711 continue; 5712 } 5713 5714 netdev->netdev_ops = &ixgbe_netdev_ops; 5715 ixgbe_set_ethtool_ops(netdev); 5716 netdev->watchdog_timeo = 5 * HZ; 5717 strcpy(netdev->name, pci_name(pdev)); 5718 5719 adapter->bd_number = cards_found; 5720 5721 /* Setup hw api */ 5722 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 5723 hw->mac.type = ii->mac; 5724 5725 /* EEPROM */ 5726 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); 5727 eec = IXGBE_READ_REG(hw, IXGBE_EEC); 5728 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ 5729 if (!(eec & (1 << 8))) 5730 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; 5731 5732 /* PHY */ 5733 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); 5734 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 5735 /* ixgbe_identify_phy_generic will set prtad and mmds properly */ 5736 hw->phy.mdio.prtad = MDIO_PRTAD_NONE; 5737 hw->phy.mdio.mmds = 0; 5738 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 5739 hw->phy.mdio.dev = netdev; 5740 hw->phy.mdio.mdio_read = ixgbe_mdio_read; 5741 hw->phy.mdio.mdio_write = ixgbe_mdio_write; 5742 5743 /* set up this timer and work struct before calling get_invariants 5744 * which might start the timer 5745 */ 5746 init_timer(&adapter->sfp_timer); 5747 adapter->sfp_timer.function = &ixgbe_sfp_timer; 5748 adapter->sfp_timer.data = (unsigned long) adapter; 5749 5750 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task); 5751 5752 /* multispeed fiber has its own tasklet, called from GPI SDP1 context */ 5753 INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task); 5754 5755 /* a new SFP+ module arrival, called from GPI SDP2 context */ 5756 INIT_WORK(&adapter->sfp_config_module_task, 5757 ixgbe_sfp_config_module_task); 5758 5759 ii->get_invariants(hw); 5760 5761 /* setup the private structure */ 5762 err = ixgbe_sw_init(adapter); 5763 if (err) 5764 goto err_sw_init; 5765 5766 /* 5767 * If there is a fan on this device and it has failed log the 5768 * failure. 5769 */ 5770 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { 5771 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 5772 if (esdp & IXGBE_ESDP_SDP1) 5773 DPRINTK(PROBE, CRIT, 5774 "Fan has stopped, replace the adapter\n"); 5775 } 5776 5777 /* reset_hw fills in the perm_addr as well */ 5778 err = hw->mac.ops.reset_hw(hw); 5779 if (err == IXGBE_ERR_SFP_NOT_PRESENT && 5780 hw->mac.type == ixgbe_mac_82598EB) { 5781 /* 5782 * Start a kernel thread to watch for a module to arrive. 5783 * Only do this for 82598, since 82599 will generate 5784 * interrupts on module arrival. 5785 */ 5786 set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 5787 mod_timer(&adapter->sfp_timer, 5788 round_jiffies(jiffies + (2 * HZ))); 5789 err = 0; 5790 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 5791 dev_err(&adapter->pdev->dev, "failed to initialize because " 5792 "an unsupported SFP+ module type was detected.\n" 5793 "Reload the driver after installing a supported " 5794 "module.\n"); 5795 goto err_sw_init; 5796 } else if (err) { 5797 dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err); 5798 goto err_sw_init; 5799 } 5800 5801 netdev->features = NETIF_F_SG | 5802 NETIF_F_IP_CSUM | 5803 NETIF_F_HW_VLAN_TX | 5804 NETIF_F_HW_VLAN_RX | 5805 NETIF_F_HW_VLAN_FILTER; 5806 5807 netdev->features |= NETIF_F_IPV6_CSUM; 5808 netdev->features |= NETIF_F_TSO; 5809 netdev->features |= NETIF_F_TSO6; 5810 netdev->features |= NETIF_F_GRO; 5811 5812 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 5813 netdev->features |= NETIF_F_SCTP_CSUM; 5814 5815 netdev->vlan_features |= NETIF_F_TSO; 5816 netdev->vlan_features |= NETIF_F_TSO6; 5817 netdev->vlan_features |= NETIF_F_IP_CSUM; 5818 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 5819 netdev->vlan_features |= NETIF_F_SG; 5820 5821 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) 5822 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 5823 5824#ifdef CONFIG_IXGBE_DCB 5825 netdev->dcbnl_ops = &dcbnl_ops; 5826#endif 5827 5828#ifdef IXGBE_FCOE 5829 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { 5830 if (hw->mac.ops.get_device_caps) { 5831 hw->mac.ops.get_device_caps(hw, &device_caps); 5832 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) 5833 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; 5834 } 5835 } 5836#endif /* IXGBE_FCOE */ 5837 if (pci_using_dac) 5838 netdev->features |= NETIF_F_HIGHDMA; 5839 5840 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 5841 netdev->features |= NETIF_F_LRO; 5842 5843 /* make sure the EEPROM is good */ 5844 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { 5845 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); 5846 err = -EIO; 5847 goto err_eeprom; 5848 } 5849 5850 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); 5851 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); 5852 5853 if (ixgbe_validate_mac_addr(netdev->perm_addr)) { 5854 dev_err(&pdev->dev, "invalid MAC address\n"); 5855 err = -EIO; 5856 goto err_eeprom; 5857 } 5858 5859 init_timer(&adapter->watchdog_timer); 5860 adapter->watchdog_timer.function = &ixgbe_watchdog; 5861 adapter->watchdog_timer.data = (unsigned long)adapter; 5862 5863 INIT_WORK(&adapter->reset_task, ixgbe_reset_task); 5864 INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task); 5865 5866 err = ixgbe_init_interrupt_scheme(adapter); 5867 if (err) 5868 goto err_sw_init; 5869 5870 switch (pdev->device) { 5871 case IXGBE_DEV_ID_82599_KX4: 5872 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | 5873 IXGBE_WUFC_MC | IXGBE_WUFC_BC); 5874 /* Enable ACPI wakeup in GRC */ 5875 IXGBE_WRITE_REG(hw, IXGBE_GRC, 5876 (IXGBE_READ_REG(hw, IXGBE_GRC) & ~IXGBE_GRC_APME)); 5877 break; 5878 default: 5879 adapter->wol = 0; 5880 break; 5881 } 5882 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 5883 5884 /* pick up the PCI bus settings for reporting later */ 5885 hw->mac.ops.get_bus_info(hw); 5886 5887 /* print bus type/speed/width info */ 5888 dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n", 5889 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s": 5890 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"), 5891 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 5892 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 5893 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 5894 "Unknown"), 5895 netdev->dev_addr); 5896 ixgbe_read_pba_num_generic(hw, &part_num); 5897 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 5898 dev_info(&pdev->dev, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %06x-%03x\n", 5899 hw->mac.type, hw->phy.type, hw->phy.sfp_type, 5900 (part_num >> 8), (part_num & 0xff)); 5901 else 5902 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 5903 hw->mac.type, hw->phy.type, 5904 (part_num >> 8), (part_num & 0xff)); 5905 5906 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { 5907 dev_warn(&pdev->dev, "PCI-Express bandwidth available for " 5908 "this card is not sufficient for optimal " 5909 "performance.\n"); 5910 dev_warn(&pdev->dev, "For optimal performance a x8 " 5911 "PCI-Express slot is required.\n"); 5912 } 5913 5914 /* save off EEPROM version number */ 5915 hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version); 5916 5917 /* reset the hardware with the new settings */ 5918 err = hw->mac.ops.start_hw(hw); 5919 5920 if (err == IXGBE_ERR_EEPROM_VERSION) { 5921 /* We are running on a pre-production device, log a warning */ 5922 dev_warn(&pdev->dev, "This device is a pre-production " 5923 "adapter/LOM. Please be aware there may be issues " 5924 "associated with your hardware. If you are " 5925 "experiencing problems please contact your Intel or " 5926 "hardware representative who provided you with this " 5927 "hardware.\n"); 5928 } 5929 strcpy(netdev->name, "eth%d"); 5930 err = register_netdev(netdev); 5931 if (err) 5932 goto err_register; 5933 5934 /* carrier off reporting is important to ethtool even BEFORE open */ 5935 netif_carrier_off(netdev); 5936 5937 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 5938 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 5939 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task); 5940 5941#ifdef CONFIG_IXGBE_DCA 5942 if (dca_add_requester(&pdev->dev) == 0) { 5943 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 5944 ixgbe_setup_dca(adapter); 5945 } 5946#endif 5947 /* add san mac addr to netdev */ 5948 ixgbe_add_sanmac_netdev(netdev); 5949 5950 dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n"); 5951 cards_found++; 5952 return 0; 5953 5954err_register: 5955 ixgbe_release_hw_control(adapter); 5956 ixgbe_clear_interrupt_scheme(adapter); 5957err_sw_init: 5958err_eeprom: 5959 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 5960 del_timer_sync(&adapter->sfp_timer); 5961 cancel_work_sync(&adapter->sfp_task); 5962 cancel_work_sync(&adapter->multispeed_fiber_task); 5963 cancel_work_sync(&adapter->sfp_config_module_task); 5964 iounmap(hw->hw_addr); 5965err_ioremap: 5966 free_netdev(netdev); 5967err_alloc_etherdev: 5968 pci_release_selected_regions(pdev, pci_select_bars(pdev, 5969 IORESOURCE_MEM)); 5970err_pci_reg: 5971err_dma: 5972 pci_disable_device(pdev); 5973 return err; 5974} 5975 5976/** 5977 * ixgbe_remove - Device Removal Routine 5978 * @pdev: PCI device information struct 5979 * 5980 * ixgbe_remove is called by the PCI subsystem to alert the driver 5981 * that it should release a PCI device. The could be caused by a 5982 * Hot-Plug event, or because the driver is going to be removed from 5983 * memory. 5984 **/ 5985static void __devexit ixgbe_remove(struct pci_dev *pdev) 5986{ 5987 struct net_device *netdev = pci_get_drvdata(pdev); 5988 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5989 5990 set_bit(__IXGBE_DOWN, &adapter->state); 5991 /* clear the module not found bit to make sure the worker won't 5992 * reschedule 5993 */ 5994 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 5995 del_timer_sync(&adapter->watchdog_timer); 5996 5997 del_timer_sync(&adapter->sfp_timer); 5998 cancel_work_sync(&adapter->watchdog_task); 5999 cancel_work_sync(&adapter->sfp_task); 6000 cancel_work_sync(&adapter->multispeed_fiber_task); 6001 cancel_work_sync(&adapter->sfp_config_module_task); 6002 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 6003 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 6004 cancel_work_sync(&adapter->fdir_reinit_task); 6005 flush_scheduled_work(); 6006 6007#ifdef CONFIG_IXGBE_DCA 6008 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 6009 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; 6010 dca_remove_requester(&pdev->dev); 6011 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); 6012 } 6013 6014#endif 6015#ifdef IXGBE_FCOE 6016 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 6017 ixgbe_cleanup_fcoe(adapter); 6018 6019#endif /* IXGBE_FCOE */ 6020 6021 /* remove the added san mac */ 6022 ixgbe_del_sanmac_netdev(netdev); 6023 6024 if (netdev->reg_state == NETREG_REGISTERED) 6025 unregister_netdev(netdev); 6026 6027 ixgbe_clear_interrupt_scheme(adapter); 6028 6029 ixgbe_release_hw_control(adapter); 6030 6031 iounmap(adapter->hw.hw_addr); 6032 pci_release_selected_regions(pdev, pci_select_bars(pdev, 6033 IORESOURCE_MEM)); 6034 6035 DPRINTK(PROBE, INFO, "complete\n"); 6036 6037 free_netdev(netdev); 6038 6039 pci_disable_pcie_error_reporting(pdev); 6040 6041 pci_disable_device(pdev); 6042} 6043 6044/** 6045 * ixgbe_io_error_detected - called when PCI error is detected 6046 * @pdev: Pointer to PCI device 6047 * @state: The current pci connection state 6048 * 6049 * This function is called after a PCI bus error affecting 6050 * this device has been detected. 6051 */ 6052static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, 6053 pci_channel_state_t state) 6054{ 6055 struct net_device *netdev = pci_get_drvdata(pdev); 6056 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6057 6058 netif_device_detach(netdev); 6059 6060 if (state == pci_channel_io_perm_failure) 6061 return PCI_ERS_RESULT_DISCONNECT; 6062 6063 if (netif_running(netdev)) 6064 ixgbe_down(adapter); 6065 pci_disable_device(pdev); 6066 6067 /* Request a slot reset. */ 6068 return PCI_ERS_RESULT_NEED_RESET; 6069} 6070 6071/** 6072 * ixgbe_io_slot_reset - called after the pci bus has been reset. 6073 * @pdev: Pointer to PCI device 6074 * 6075 * Restart the card from scratch, as if from a cold-boot. 6076 */ 6077static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) 6078{ 6079 struct net_device *netdev = pci_get_drvdata(pdev); 6080 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6081 pci_ers_result_t result; 6082 int err; 6083 6084 if (pci_enable_device_mem(pdev)) { 6085 DPRINTK(PROBE, ERR, 6086 "Cannot re-enable PCI device after reset.\n"); 6087 result = PCI_ERS_RESULT_DISCONNECT; 6088 } else { 6089 pci_set_master(pdev); 6090 pci_restore_state(pdev); 6091 pci_save_state(pdev); 6092 6093 pci_wake_from_d3(pdev, false); 6094 6095 ixgbe_reset(adapter); 6096 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 6097 result = PCI_ERS_RESULT_RECOVERED; 6098 } 6099 6100 err = pci_cleanup_aer_uncorrect_error_status(pdev); 6101 if (err) { 6102 dev_err(&pdev->dev, 6103 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err); 6104 /* non-fatal, continue */ 6105 } 6106 6107 return result; 6108} 6109 6110/** 6111 * ixgbe_io_resume - called when traffic can start flowing again. 6112 * @pdev: Pointer to PCI device 6113 * 6114 * This callback is called when the error recovery driver tells us that 6115 * its OK to resume normal operation. 6116 */ 6117static void ixgbe_io_resume(struct pci_dev *pdev) 6118{ 6119 struct net_device *netdev = pci_get_drvdata(pdev); 6120 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6121 6122 if (netif_running(netdev)) { 6123 if (ixgbe_up(adapter)) { 6124 DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n"); 6125 return; 6126 } 6127 } 6128 6129 netif_device_attach(netdev); 6130} 6131 6132static struct pci_error_handlers ixgbe_err_handler = { 6133 .error_detected = ixgbe_io_error_detected, 6134 .slot_reset = ixgbe_io_slot_reset, 6135 .resume = ixgbe_io_resume, 6136}; 6137 6138static struct pci_driver ixgbe_driver = { 6139 .name = ixgbe_driver_name, 6140 .id_table = ixgbe_pci_tbl, 6141 .probe = ixgbe_probe, 6142 .remove = __devexit_p(ixgbe_remove), 6143#ifdef CONFIG_PM 6144 .suspend = ixgbe_suspend, 6145 .resume = ixgbe_resume, 6146#endif 6147 .shutdown = ixgbe_shutdown, 6148 .err_handler = &ixgbe_err_handler 6149}; 6150 6151/** 6152 * ixgbe_init_module - Driver Registration Routine 6153 * 6154 * ixgbe_init_module is the first routine called when the driver is 6155 * loaded. All it does is register with the PCI subsystem. 6156 **/ 6157static int __init ixgbe_init_module(void) 6158{ 6159 int ret; 6160 printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name, 6161 ixgbe_driver_string, ixgbe_driver_version); 6162 6163 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright); 6164 6165#ifdef CONFIG_IXGBE_DCA 6166 dca_register_notify(&dca_notifier); 6167#endif 6168 6169 ret = pci_register_driver(&ixgbe_driver); 6170 return ret; 6171} 6172 6173module_init(ixgbe_init_module); 6174 6175/** 6176 * ixgbe_exit_module - Driver Exit Cleanup Routine 6177 * 6178 * ixgbe_exit_module is called just before the driver is removed 6179 * from memory. 6180 **/ 6181static void __exit ixgbe_exit_module(void) 6182{ 6183#ifdef CONFIG_IXGBE_DCA 6184 dca_unregister_notify(&dca_notifier); 6185#endif 6186 pci_unregister_driver(&ixgbe_driver); 6187} 6188 6189#ifdef CONFIG_IXGBE_DCA 6190static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, 6191 void *p) 6192{ 6193 int ret_val; 6194 6195 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, 6196 __ixgbe_notify_dca); 6197 6198 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 6199} 6200 6201#endif /* CONFIG_IXGBE_DCA */ 6202#ifdef DEBUG 6203/** 6204 * ixgbe_get_hw_dev_name - return device name string 6205 * used by hardware layer to print debugging information 6206 **/ 6207char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw) 6208{ 6209 struct ixgbe_adapter *adapter = hw->back; 6210 return adapter->netdev->name; 6211} 6212 6213#endif 6214module_exit(ixgbe_exit_module); 6215 6216/* ixgbe_main.c */