Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ixgbe: Add 82599 device id's, hook it up into the main driver.

With the hardware-specific code in place, add all supported device id's,
along with base driver changes to enable 82599 devices. The devices
being enabled are:

8086:10f7: 82599EB 10 Gigabit KX4 Network Connection
8086:10fb: 82599EB 10 Gigabit Network Connection

The device 8086:10fb is a fully-pluggable SFP+ NIC.

Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

PJ Waskiewicz and committed by
David S. Miller
e8e26350 235ea828

+561 -137
+3 -2
drivers/net/ixgbe/Makefile
··· 33 33 obj-$(CONFIG_IXGBE) += ixgbe.o 34 34 35 35 ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ 36 - ixgbe_82598.o ixgbe_phy.o 36 + ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o 37 37 38 - ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o ixgbe_dcb_nl.o 38 + ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ 39 + ixgbe_dcb_82599.o ixgbe_dcb_nl.o
+19 -10
drivers/net/ixgbe/ixgbe.h
··· 140 140 int cpu; 141 141 #endif 142 142 struct ixgbe_queue_stats stats; 143 - u16 v_idx; /* maps directly to the index for this ring in the hardware 144 - * vector array, can also be used for finding the bit in EICR 145 - * and friends that represents the vector for this ring */ 143 + u64 v_idx; /* maps directly to the index for this ring in the hardware 144 + * vector array, can also be used for finding the bit in EICR 145 + * and friends that represents the vector for this ring */ 146 146 147 147 148 148 u16 work_limit; /* max work per interrupt */ ··· 166 166 int mask; 167 167 }; 168 168 169 - #define MAX_RX_QUEUES 64 170 - #define MAX_TX_QUEUES 32 169 + #define MAX_RX_QUEUES 128 170 + #define MAX_TX_QUEUES 128 171 171 172 172 #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ 173 173 ? 8 : 1) ··· 211 211 #define OTHER_VECTOR 1 212 212 #define NON_Q_VECTORS (OTHER_VECTOR) 213 213 214 + #define MAX_MSIX_VECTORS_82599 64 215 + #define MAX_MSIX_Q_VECTORS_82599 64 214 216 #define MAX_MSIX_VECTORS_82598 18 215 217 #define MAX_MSIX_Q_VECTORS_82598 16 216 218 217 - #define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82598 218 - #define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82598 219 + #define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82599 220 + #define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599 219 221 220 222 #define MIN_MSIX_Q_VECTORS 2 221 223 #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) ··· 229 227 u16 bd_number; 230 228 struct work_struct reset_task; 231 229 struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS]; 232 - char name[MAX_MSIX_COUNT][IFNAMSIZ + 5]; 230 + char name[MAX_MSIX_COUNT][IFNAMSIZ + 9]; 233 231 struct ixgbe_dcb_config dcb_cfg; 234 232 struct ixgbe_dcb_config temp_dcb_cfg; 235 233 u8 dcb_set_bitmap; ··· 254 252 struct ixgbe_ring *rx_ring; /* One per active queue */ 255 253 int num_rx_queues; 256 254 u64 hw_csum_rx_error; 255 + u64 hw_rx_no_dma_resources; 257 256 u64 hw_csum_rx_good; 258 257 u64 non_eop_descs; 259 258 int num_msix_vectors; ··· 283 280 #define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11) 284 281 #define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12) 285 282 #define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13) 283 + #define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14) 286 284 #define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16) 287 285 #define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17) 288 286 #define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18) ··· 291 287 #define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20) 292 288 #define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22) 293 289 #define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23) 294 - #define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 24) 290 + #define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 24) 291 + #define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25) 295 292 296 293 /* default to trying for four seconds */ 297 294 #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) ··· 322 317 struct work_struct watchdog_task; 323 318 struct work_struct sfp_task; 324 319 struct timer_list sfp_timer; 325 - 320 + struct work_struct multispeed_fiber_task; 321 + struct work_struct sfp_config_module_task; 322 + u32 wol; 326 323 u16 eeprom_version; 327 324 }; 328 325 ··· 337 330 338 331 enum ixgbe_boards { 339 332 board_82598, 333 + board_82599, 340 334 }; 341 335 342 336 extern struct ixgbe_info ixgbe_82598_info; 337 + extern struct ixgbe_info ixgbe_82599_info; 343 338 #ifdef CONFIG_IXGBE_DCB 344 339 extern struct dcbnl_rtnl_ops dcbnl_ops; 345 340 extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
+1
drivers/net/ixgbe/ixgbe_ethtool.c
··· 89 89 {"rx_header_split", IXGBE_STAT(rx_hdr_split)}, 90 90 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, 91 91 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, 92 + {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, 92 93 }; 93 94 94 95 #define IXGBE_QUEUE_STATS_LEN \
+538 -125
drivers/net/ixgbe/ixgbe_main.c
··· 47 47 static const char ixgbe_driver_string[] = 48 48 "Intel(R) 10 Gigabit PCI Express Network Driver"; 49 49 50 - #define DRV_VERSION "1.3.56-k2" 50 + #define DRV_VERSION "2.0.8-k2" 51 51 const char ixgbe_driver_version[] = DRV_VERSION; 52 52 static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation."; 53 53 54 54 static const struct ixgbe_info *ixgbe_info_tbl[] = { 55 55 [board_82598] = &ixgbe_82598_info, 56 + [board_82599] = &ixgbe_82599_info, 56 57 }; 57 58 58 59 /* ixgbe_pci_tbl - PCI Device ID Table ··· 87 86 board_82598 }, 88 87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), 89 88 board_82598 }, 89 + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), 90 + board_82599 }, 91 + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), 92 + board_82599 }, 90 93 91 94 /* required last entry */ 92 95 {0, } ··· 134 129 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); 135 130 } 136 131 137 - static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry, 138 - u8 msix_vector) 132 + /* 133 + * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors 134 + * @adapter: pointer to adapter struct 135 + * @direction: 0 for Rx, 1 for Tx, -1 for other causes 136 + * @queue: queue to map the corresponding interrupt to 137 + * @msix_vector: the vector to map to the corresponding queue 138 + * 139 + */ 140 + static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, 141 + u8 queue, u8 msix_vector) 139 142 { 140 143 u32 ivar, index; 141 - 142 - msix_vector |= IXGBE_IVAR_ALLOC_VAL; 143 - index = (int_alloc_entry >> 2) & 0x1F; 144 - ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index)); 145 - ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3))); 146 - ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3))); 147 - IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar); 144 + struct ixgbe_hw *hw = &adapter->hw; 145 + switch (hw->mac.type) { 146 + case ixgbe_mac_82598EB: 147 + msix_vector |= IXGBE_IVAR_ALLOC_VAL; 148 + if (direction == -1) 149 + direction = 0; 150 + index = (((direction * 64) + queue) >> 2) & 0x1F; 151 + ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 152 + ivar &= ~(0xFF << (8 * (queue & 0x3))); 153 + ivar |= (msix_vector << (8 * (queue & 0x3))); 154 + IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 155 + break; 156 + case ixgbe_mac_82599EB: 157 + if (direction == -1) { 158 + /* other causes */ 159 + msix_vector |= IXGBE_IVAR_ALLOC_VAL; 160 + index = ((queue & 1) * 8); 161 + ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC); 162 + ivar &= ~(0xFF << index); 163 + ivar |= (msix_vector << index); 164 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar); 165 + break; 166 + } else { 167 + /* tx or rx causes */ 168 + msix_vector |= IXGBE_IVAR_ALLOC_VAL; 169 + index = ((16 * (queue & 1)) + (8 * direction)); 170 + ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); 171 + ivar &= ~(0xFF << index); 172 + ivar |= (msix_vector << index); 173 + IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar); 174 + break; 175 + } 176 + default: 177 + break; 178 + } 148 179 } 149 180 150 181 static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, ··· 351 310 352 311 if (rx_ring->cpu != cpu) { 353 312 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); 354 - rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; 355 - rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 313 + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 314 + rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; 315 + rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 316 + } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 317 + rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; 318 + rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << 319 + IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); 320 + } 356 321 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; 357 322 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; 358 323 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); 359 324 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | 360 - IXGBE_DCA_RXCTRL_DESC_HSRO_EN); 325 + IXGBE_DCA_RXCTRL_DESC_HSRO_EN); 361 326 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); 362 327 rx_ring->cpu = cpu; 363 328 } ··· 379 332 380 333 if (tx_ring->cpu != cpu) { 381 334 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q)); 382 - txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; 383 - txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 335 + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 336 + txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; 337 + txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 338 + } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 339 + txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; 340 + txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << 341 + IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); 342 + } 384 343 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; 385 344 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl); 386 345 tx_ring->cpu = cpu; ··· 517 464 adapter->hw_csum_rx_good++; 518 465 } 519 466 467 + static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw, 468 + struct ixgbe_ring *rx_ring, u32 val) 469 + { 470 + /* 471 + * Force memory writes to complete before letting h/w 472 + * know there are new descriptors to fetch. (Only 473 + * applicable for weak-ordered memory model archs, 474 + * such as IA-64). 475 + */ 476 + wmb(); 477 + IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val); 478 + } 479 + 520 480 /** 521 481 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split 522 482 * @adapter: address of board private structure ··· 542 476 union ixgbe_adv_rx_desc *rx_desc; 543 477 struct ixgbe_rx_buffer *bi; 544 478 unsigned int i; 479 + unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN; 545 480 546 481 i = rx_ring->next_to_use; 547 482 bi = &rx_ring->rx_buffer_info[i]; ··· 572 505 573 506 if (!bi->skb) { 574 507 struct sk_buff *skb; 575 - skb = netdev_alloc_skb(adapter->netdev, 576 - (rx_ring->rx_buf_len + 577 - NET_IP_ALIGN)); 508 + skb = netdev_alloc_skb(adapter->netdev, bufsz); 578 509 579 510 if (!skb) { 580 511 adapter->alloc_rx_buff_failed++; ··· 587 522 skb_reserve(skb, NET_IP_ALIGN); 588 523 589 524 bi->skb = skb; 590 - bi->dma = pci_map_single(pdev, skb->data, 591 - rx_ring->rx_buf_len, 525 + bi->dma = pci_map_single(pdev, skb->data, bufsz, 592 526 PCI_DMA_FROMDEVICE); 593 527 } 594 528 /* Refresh the desc even if buffer_addrs didn't change because ··· 611 547 if (i-- == 0) 612 548 i = (rx_ring->count - 1); 613 549 614 - /* 615 - * Force memory writes to complete before letting h/w 616 - * know there are new descriptors to fetch. (Only 617 - * applicable for weak-ordered memory model archs, 618 - * such as IA-64). 619 - */ 620 - wmb(); 621 - writel(i, adapter->hw.hw_addr + rx_ring->tail); 550 + ixgbe_release_rx_desc(&adapter->hw, rx_ring, i); 622 551 } 623 552 } 624 553 ··· 789 732 790 733 for (i = 0; i < q_vector->rxr_count; i++) { 791 734 j = adapter->rx_ring[r_idx].reg_idx; 792 - ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx); 735 + ixgbe_set_ivar(adapter, 0, j, v_idx); 793 736 r_idx = find_next_bit(q_vector->rxr_idx, 794 737 adapter->num_rx_queues, 795 738 r_idx + 1); ··· 799 742 800 743 for (i = 0; i < q_vector->txr_count; i++) { 801 744 j = adapter->tx_ring[r_idx].reg_idx; 802 - ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx); 745 + ixgbe_set_ivar(adapter, 1, j, v_idx); 803 746 r_idx = find_next_bit(q_vector->txr_idx, 804 747 adapter->num_tx_queues, 805 748 r_idx + 1); ··· 816 759 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr)); 817 760 } 818 761 819 - ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx); 762 + if (adapter->hw.mac.type == ixgbe_mac_82598EB) 763 + ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, 764 + v_idx); 765 + else if (adapter->hw.mac.type == ixgbe_mac_82599EB) 766 + ixgbe_set_ivar(adapter, -1, 1, v_idx); 820 767 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); 821 768 822 769 /* set up to autoclear timer, and the vectors */ ··· 958 897 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 959 898 q_vector->eitr = new_itr; 960 899 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); 900 + if (adapter->hw.mac.type == ixgbe_mac_82599EB) 901 + /* Resolution is 2 usec on 82599, so halve the rate */ 902 + itr_reg >>= 1; 961 903 /* must write high and low 16 bits to reset counter */ 962 904 DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx, 963 905 itr_reg); ··· 979 915 DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n"); 980 916 /* write to clear the interrupt */ 981 917 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); 918 + } 919 + } 920 + 921 + static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) 922 + { 923 + struct ixgbe_hw *hw = &adapter->hw; 924 + 925 + if (eicr & IXGBE_EICR_GPI_SDP1) { 926 + /* Clear the interrupt */ 927 + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); 928 + schedule_work(&adapter->multispeed_fiber_task); 929 + } else if (eicr & IXGBE_EICR_GPI_SDP2) { 930 + /* Clear the interrupt */ 931 + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); 932 + schedule_work(&adapter->sfp_config_module_task); 933 + } else { 934 + /* Interrupt isn't for us... */ 935 + return; 982 936 } 983 937 } 984 938 ··· 1032 950 if (eicr & IXGBE_EICR_LSC) 1033 951 ixgbe_check_lsc(adapter); 1034 952 1035 - ixgbe_check_fan_failure(adapter, eicr); 953 + if (hw->mac.type == ixgbe_mac_82598EB) 954 + ixgbe_check_fan_failure(adapter, eicr); 1036 955 956 + if (hw->mac.type == ixgbe_mac_82599EB) 957 + ixgbe_check_sfp_event(adapter, eicr); 1037 958 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1038 959 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 1039 960 ··· 1399 1314 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 1400 1315 q_vector->eitr = new_itr; 1401 1316 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); 1317 + if (adapter->hw.mac.type == ixgbe_mac_82599EB) 1318 + /* Resolution is 2 usec on 82599, so halve the rate */ 1319 + itr_reg >>= 1; 1402 1320 /* must write high and low 16 bits to reset counter */ 1403 1321 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), itr_reg | (itr_reg)<<16); 1404 1322 } ··· 1416 1328 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) 1417 1329 { 1418 1330 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 1331 + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1332 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 1333 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(2), ~0); 1334 + } 1419 1335 IXGBE_WRITE_FLUSH(&adapter->hw); 1420 1336 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1421 1337 int i; ··· 1440 1348 mask = IXGBE_EIMS_ENABLE_MASK; 1441 1349 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) 1442 1350 mask |= IXGBE_EIMS_GPI_SDP1; 1351 + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1352 + mask |= IXGBE_EIMS_GPI_SDP1; 1353 + mask |= IXGBE_EIMS_GPI_SDP2; 1354 + } 1355 + 1443 1356 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1357 + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1358 + /* enable the rest of the queue vectors */ 1359 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), 1360 + (IXGBE_EIMS_RTX_QUEUE << 16)); 1361 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(2), 1362 + ((IXGBE_EIMS_RTX_QUEUE << 16) | 1363 + IXGBE_EIMS_RTX_QUEUE)); 1364 + } 1444 1365 IXGBE_WRITE_FLUSH(&adapter->hw); 1445 1366 } 1446 1367 ··· 1488 1383 1489 1384 if (eicr & IXGBE_EICR_LSC) 1490 1385 ixgbe_check_lsc(adapter); 1386 + 1387 + if (hw->mac.type == ixgbe_mac_82599EB) 1388 + ixgbe_check_sfp_event(adapter, eicr); 1491 1389 1492 1390 ixgbe_check_fan_failure(adapter, eicr); 1493 1391 ··· 1582 1474 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), 1583 1475 EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param)); 1584 1476 1585 - ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0); 1586 - ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0); 1477 + ixgbe_set_ivar(adapter, 0, 0, 0); 1478 + ixgbe_set_ivar(adapter, 1, 0, 0); 1587 1479 1588 1480 map_vector_to_rxq(adapter, 0, 0); 1589 1481 map_vector_to_txq(adapter, 0, 0); ··· 1624 1516 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1625 1517 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 1626 1518 } 1519 + if (hw->mac.type == ixgbe_mac_82599EB) { 1520 + /* We enable 8 traffic classes, DCB only */ 1521 + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) 1522 + IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA | 1523 + IXGBE_MTQC_8TC_8TQ)); 1524 + } 1627 1525 } 1628 1526 1629 - #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1527 + #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1630 1528 1631 1529 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index) 1632 1530 { 1633 1531 struct ixgbe_ring *rx_ring; 1634 1532 u32 srrctl; 1635 - int queue0; 1533 + int queue0 = 0; 1636 1534 unsigned long mask; 1637 1535 1638 - /* program one srrctl register per VMDq index */ 1639 - if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { 1640 - long shift, len; 1641 - mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask; 1642 - len = sizeof(adapter->ring_feature[RING_F_VMDQ].mask) * 8; 1643 - shift = find_first_bit(&mask, len); 1644 - queue0 = index & mask; 1645 - index = (index & mask) >> shift; 1646 - /* program one srrctl per RSS queue since RDRXCTL.MVMEN is enabled */ 1536 + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1537 + queue0 = index; 1647 1538 } else { 1648 1539 mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask; 1649 1540 queue0 = index & mask; ··· 1679 1572 srrctl |= rx_ring->rx_buf_len >> 1680 1573 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1681 1574 } 1575 + if (adapter->hw.mac.type == ixgbe_mac_82599EB) 1576 + srrctl |= IXGBE_SRRCTL_DROP_EN; 1577 + 1682 1578 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); 1683 1579 } 1684 1580 ··· 1713 1603 /* Set the RX buffer length according to the mode */ 1714 1604 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 1715 1605 rx_buf_len = IXGBE_RX_HDR_SIZE; 1606 + if (hw->mac.type == ixgbe_mac_82599EB) { 1607 + /* PSRTYPE must be initialized in 82599 */ 1608 + u32 psrtype = IXGBE_PSRTYPE_TCPHDR | 1609 + IXGBE_PSRTYPE_UDPHDR | 1610 + IXGBE_PSRTYPE_IPV4HDR | 1611 + IXGBE_PSRTYPE_IPV6HDR; 1612 + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 1613 + } 1716 1614 } else { 1717 1615 if (netdev->mtu <= ETH_DATA_LEN) 1718 1616 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; ··· 1731 1613 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 1732 1614 fctrl |= IXGBE_FCTRL_BAM; 1733 1615 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ 1616 + fctrl |= IXGBE_FCTRL_PMCF; 1734 1617 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); 1735 1618 1736 1619 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); ··· 1763 1644 ixgbe_configure_srrctl(adapter, j); 1764 1645 } 1765 1646 1766 - /* 1767 - * For VMDq support of different descriptor types or 1768 - * buffer sizes through the use of multiple SRRCTL 1769 - * registers, RDRXCTL.MVMEN must be set to 1 1770 - * 1771 - * also, the manual doesn't mention it clearly but DCA hints 1772 - * will only use queue 0's tags unless this bit is set. Side 1773 - * effects of setting this bit are only that SRRCTL must be 1774 - * fully programmed [0..15] 1775 - */ 1776 - if (adapter->flags & 1777 - (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED)) { 1778 - rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 1779 - rdrxctl |= IXGBE_RDRXCTL_MVMEN; 1780 - IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 1647 + if (hw->mac.type == ixgbe_mac_82598EB) { 1648 + /* 1649 + * For VMDq support of different descriptor types or 1650 + * buffer sizes through the use of multiple SRRCTL 1651 + * registers, RDRXCTL.MVMEN must be set to 1 1652 + * 1653 + * also, the manual doesn't mention it clearly but DCA hints 1654 + * will only use queue 0's tags unless this bit is set. Side 1655 + * effects of setting this bit are only that SRRCTL must be 1656 + * fully programmed [0..15] 1657 + */ 1658 + if (adapter->flags & 1659 + (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED)) { 1660 + rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 1661 + rdrxctl |= IXGBE_RDRXCTL_MVMEN; 1662 + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 1663 + } 1781 1664 } 1782 1665 1666 + /* Program MRQC for the distribution of queues */ 1667 + if (hw->mac.type == ixgbe_mac_82599EB) { 1668 + int mask = adapter->flags & ( 1669 + IXGBE_FLAG_RSS_ENABLED 1670 + | IXGBE_FLAG_DCB_ENABLED 1671 + ); 1672 + 1673 + switch (mask) { 1674 + case (IXGBE_FLAG_RSS_ENABLED): 1675 + mrqc = IXGBE_MRQC_RSSEN; 1676 + break; 1677 + case (IXGBE_FLAG_DCB_ENABLED): 1678 + mrqc = IXGBE_MRQC_RT8TCEN; 1679 + break; 1680 + default: 1681 + break; 1682 + } 1683 + } 1783 1684 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 1784 1685 /* Fill out redirection table */ 1785 1686 for (i = 0, j = 0; i < 128; i++, j++) { ··· 1821 1682 | IXGBE_MRQC_RSS_FIELD_IPV4 1822 1683 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP 1823 1684 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP 1824 - | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 1825 - | IXGBE_MRQC_RSS_FIELD_IPV6_EX 1826 1685 | IXGBE_MRQC_RSS_FIELD_IPV6 1827 1686 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP 1828 - | IXGBE_MRQC_RSS_FIELD_IPV6_UDP 1829 - | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 1687 + | IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 1830 1688 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 1831 1689 } 1832 1690 ··· 1842 1706 } 1843 1707 1844 1708 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 1709 + 1710 + if (hw->mac.type == ixgbe_mac_82599EB) { 1711 + rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 1712 + rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; 1713 + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 1714 + } 1845 1715 } 1846 1716 1847 1717 static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) ··· 1881 1739 { 1882 1740 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1883 1741 u32 ctrl; 1742 + int i, j; 1884 1743 1885 1744 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1886 1745 ixgbe_irq_disable(adapter); ··· 1893 1750 * not in DCB mode. 1894 1751 */ 1895 1752 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); 1896 - ctrl |= IXGBE_VLNCTRL_VME; 1897 - ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1898 - IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); 1899 - ixgbe_vlan_rx_add_vid(netdev, 0); 1900 - 1901 - if (grp) { 1902 - /* enable VLAN tag insert/strip */ 1903 - ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); 1904 - ctrl |= IXGBE_VLNCTRL_VME; 1753 + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1754 + ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE; 1905 1755 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1906 1756 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); 1757 + } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1758 + ctrl |= IXGBE_VLNCTRL_VFE; 1759 + /* enable VLAN tag insert/strip */ 1760 + ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); 1761 + ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1762 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); 1763 + for (i = 0; i < adapter->num_rx_queues; i++) { 1764 + j = adapter->rx_ring[i].reg_idx; 1765 + ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j)); 1766 + ctrl |= IXGBE_RXDCTL_VME; 1767 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl); 1768 + } 1907 1769 } 1770 + ixgbe_vlan_rx_add_vid(netdev, 0); 1908 1771 1909 1772 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1910 1773 ixgbe_irq_enable(adapter); ··· 2073 1924 } 2074 1925 /* Enable VLAN tag insert/strip */ 2075 1926 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2076 - vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE; 2077 - vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; 2078 - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1927 + if (hw->mac.type == ixgbe_mac_82598EB) { 1928 + vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE; 1929 + vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; 1930 + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1931 + } else if (hw->mac.type == ixgbe_mac_82599EB) { 1932 + vlnctrl |= IXGBE_VLNCTRL_VFE; 1933 + vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; 1934 + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1935 + for (i = 0; i < adapter->num_rx_queues; i++) { 1936 + j = adapter->rx_ring[i].reg_idx; 1937 + vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 1938 + vlnctrl |= IXGBE_RXDCTL_VME; 1939 + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); 1940 + } 1941 + } 2079 1942 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); 2080 1943 } 2081 1944 ··· 2118 1957 (adapter->rx_ring[i].count - 1)); 2119 1958 } 2120 1959 1960 + static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) 1961 + { 1962 + switch (hw->phy.type) { 1963 + case ixgbe_phy_sfp_avago: 1964 + case ixgbe_phy_sfp_ftl: 1965 + case ixgbe_phy_sfp_intel: 1966 + case ixgbe_phy_sfp_unknown: 1967 + case ixgbe_phy_tw_tyco: 1968 + case ixgbe_phy_tw_unknown: 1969 + return true; 1970 + default: 1971 + return false; 1972 + } 1973 + } 1974 + 2121 1975 /** 2122 - * ixgbe_link_config - set up initial link with default speed and duplex 1976 + * ixgbe_sfp_link_config - set up SFP+ link 1977 + * @adapter: pointer to private adapter struct 1978 + **/ 1979 + static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) 1980 + { 1981 + struct ixgbe_hw *hw = &adapter->hw; 1982 + 1983 + if (hw->phy.multispeed_fiber) { 1984 + /* 1985 + * In multispeed fiber setups, the device may not have 1986 + * had a physical connection when the driver loaded. 1987 + * If that's the case, the initial link configuration 1988 + * couldn't get the MAC into 10G or 1G mode, so we'll 1989 + * never have a link status change interrupt fire. 1990 + * We need to try and force an autonegotiation 1991 + * session, then bring up link. 1992 + */ 1993 + hw->mac.ops.setup_sfp(hw); 1994 + if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) 1995 + schedule_work(&adapter->multispeed_fiber_task); 1996 + } else { 1997 + /* 1998 + * Direct Attach Cu and non-multispeed fiber modules 1999 + * still need to be configured properly prior to 2000 + * attempting link. 2001 + */ 2002 + if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK)) 2003 + schedule_work(&adapter->sfp_config_module_task); 2004 + } 2005 + } 2006 + 2007 + /** 2008 + * ixgbe_non_sfp_link_config - set up non-SFP+ link 2123 2009 * @hw: pointer to private hardware struct 2124 2010 * 2125 2011 * Returns 0 on success, negative on failure 2126 2012 **/ 2127 - static int ixgbe_link_config(struct ixgbe_hw *hw) 2013 + static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) 2128 2014 { 2129 2015 u32 autoneg; 2130 2016 bool link_up = false; ··· 2191 1983 2192 1984 if (hw->mac.ops.setup_link_speed) 2193 1985 ret = hw->mac.ops.setup_link_speed(hw, autoneg, true, link_up); 2194 - 2195 1986 link_cfg_out: 2196 1987 return ret; 1988 + } 1989 + 1990 + #define IXGBE_MAX_RX_DESC_POLL 10 1991 + static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, 1992 + int rxr) 1993 + { 1994 + int j = adapter->rx_ring[rxr].reg_idx; 1995 + int k; 1996 + 1997 + for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { 1998 + if (IXGBE_READ_REG(&adapter->hw, 1999 + IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE) 2000 + break; 2001 + else 2002 + msleep(1); 2003 + } 2004 + if (k >= IXGBE_MAX_RX_DESC_POLL) { 2005 + DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d " 2006 + "not set within the polling period\n", rxr); 2007 + } 2008 + ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr], 2009 + (adapter->rx_ring[rxr].count - 1)); 2197 2010 } 2198 2011 2199 2012 static int ixgbe_up_complete(struct ixgbe_adapter *adapter) ··· 2222 1993 struct net_device *netdev = adapter->netdev; 2223 1994 struct ixgbe_hw *hw = &adapter->hw; 2224 1995 int i, j = 0; 1996 + int num_rx_rings = adapter->num_rx_queues; 2225 1997 int err; 2226 1998 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 2227 1999 u32 txdctl, rxdctl, mhadd; 2000 + u32 dmatxctl; 2228 2001 u32 gpie; 2229 2002 2230 2003 ixgbe_get_hw_control(adapter); ··· 2258 2027 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2259 2028 } 2260 2029 2030 + if (hw->mac.type == ixgbe_mac_82599EB) { 2031 + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 2032 + gpie |= IXGBE_SDP1_GPIEN; 2033 + gpie |= IXGBE_SDP2_GPIEN; 2034 + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2035 + } 2036 + 2261 2037 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 2262 2038 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { 2263 2039 mhadd &= ~IXGBE_MHADD_MFS_MASK; ··· 2278 2040 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 2279 2041 /* enable WTHRESH=8 descriptors, to encourage burst writeback */ 2280 2042 txdctl |= (8 << 16); 2043 + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 2044 + } 2045 + 2046 + if (hw->mac.type == ixgbe_mac_82599EB) { 2047 + /* DMATXCTL.EN must be set after all Tx queue config is done */ 2048 + dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2049 + dmatxctl |= IXGBE_DMATXCTL_TE; 2050 + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 2051 + } 2052 + for (i = 0; i < adapter->num_tx_queues; i++) { 2053 + j = adapter->tx_ring[i].reg_idx; 2054 + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 2281 2055 txdctl |= IXGBE_TXDCTL_ENABLE; 2282 2056 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 2283 2057 } 2284 2058 2285 - for (i = 0; i < adapter->num_rx_queues; i++) { 2059 + for (i = 0; i < num_rx_rings; i++) { 2286 2060 j = adapter->rx_ring[i].reg_idx; 2287 2061 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 2288 2062 /* enable PTHRESH=32 descriptors (half the internal cache) ··· 2303 2053 rxdctl |= 0x0020; 2304 2054 rxdctl |= IXGBE_RXDCTL_ENABLE; 2305 2055 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl); 2056 + if (hw->mac.type == ixgbe_mac_82599EB) 2057 + ixgbe_rx_desc_queue_enable(adapter, i); 2306 2058 } 2307 2059 /* enable all receives */ 2308 2060 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 2309 - rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN); 2310 - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxdctl); 2061 + if (hw->mac.type == ixgbe_mac_82598EB) 2062 + rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN); 2063 + else 2064 + rxdctl |= IXGBE_RXCTRL_RXEN; 2065 + hw->mac.ops.enable_rx_dma(hw, rxdctl); 2311 2066 2312 2067 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 2313 2068 ixgbe_configure_msix(adapter); ··· 2329 2074 2330 2075 ixgbe_irq_enable(adapter); 2331 2076 2332 - err = ixgbe_link_config(hw); 2333 - if (err) 2334 - dev_err(&adapter->pdev->dev, "link_config FAILED %d\n", err); 2077 + /* 2078 + * For hot-pluggable SFP+ devices, a new SFP+ module may have 2079 + * arrived before interrupts were enabled. We need to kick off 2080 + * the SFP+ module setup first, then try to bring up link. 2081 + * If we're not hot-pluggable SFP+, we just need to configure link 2082 + * and bring it up. 2083 + */ 2084 + err = hw->phy.ops.identify(hw); 2085 + if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 2086 + DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err); 2087 + ixgbe_down(adapter); 2088 + return err; 2089 + } 2090 + 2091 + if (ixgbe_is_sfp(hw)) { 2092 + ixgbe_sfp_link_config(adapter); 2093 + } else { 2094 + err = ixgbe_non_sfp_link_config(hw); 2095 + if (err) 2096 + DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err); 2097 + } 2335 2098 2336 2099 /* enable transmits */ 2337 2100 netif_tx_start_all_queues(netdev); ··· 2779 2506 adapter->tx_ring[i].reg_idx = i << 2; 2780 2507 } 2781 2508 ret = true; 2509 + } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 2510 + for (i = 0; i < dcb_i; i++) { 2511 + adapter->rx_ring[i].reg_idx = i << 4; 2512 + adapter->tx_ring[i].reg_idx = i << 4; 2513 + } 2514 + ret = true; 2782 2515 } else { 2783 2516 ret = false; 2784 2517 } ··· 3080 2801 adapter->ring_feature[RING_F_RSS].indices = rss; 3081 2802 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 3082 2803 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; 3083 - adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; 2804 + if (hw->mac.type == ixgbe_mac_82598EB) 2805 + adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; 2806 + else if (hw->mac.type == ixgbe_mac_82599EB) 2807 + adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; 3084 2808 3085 2809 #ifdef CONFIG_IXGBE_DCB 3086 2810 /* Configure DCB traffic classes */ ··· 3104 2822 adapter->ring_feature[RING_F_DCB].indices); 3105 2823 3106 2824 #endif 3107 - if (hw->mac.ops.get_media_type && 3108 - (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) 3109 - adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 3110 2825 3111 2826 /* default flow control settings */ 3112 2827 hw->fc.requested_mode = ixgbe_fc_none; ··· 3551 3272 { 3552 3273 struct net_device *netdev = pci_get_drvdata(pdev); 3553 3274 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3275 + struct ixgbe_hw *hw = &adapter->hw; 3276 + u32 ctrl, fctrl; 3277 + u32 wufc = adapter->wol; 3554 3278 #ifdef CONFIG_PM 3555 3279 int retval = 0; 3556 3280 #endif ··· 3577 3295 if (retval) 3578 3296 return retval; 3579 3297 #endif 3298 + if (wufc) { 3299 + ixgbe_set_rx_mode(netdev); 3580 3300 3581 - pci_enable_wake(pdev, PCI_D3hot, 0); 3582 - pci_enable_wake(pdev, PCI_D3cold, 0); 3301 + /* turn on all-multi mode if wake on multicast is enabled */ 3302 + if (wufc & IXGBE_WUFC_MC) { 3303 + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3304 + fctrl |= IXGBE_FCTRL_MPE; 3305 + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 3306 + } 3307 + 3308 + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 3309 + ctrl |= IXGBE_CTRL_GIO_DIS; 3310 + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 3311 + 3312 + IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc); 3313 + } else { 3314 + IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); 3315 + IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 3316 + } 3317 + 3318 + if (wufc && hw->mac.type == ixgbe_mac_82599EB) { 3319 + pci_enable_wake(pdev, PCI_D3hot, 1); 3320 + pci_enable_wake(pdev, PCI_D3cold, 1); 3321 + } else { 3322 + pci_enable_wake(pdev, PCI_D3hot, 0); 3323 + pci_enable_wake(pdev, PCI_D3cold, 0); 3324 + } 3583 3325 3584 3326 ixgbe_release_hw_control(adapter); 3585 3327 ··· 3636 3330 missed_rx += mpc; 3637 3331 adapter->stats.mpc[i] += mpc; 3638 3332 total_mpc += adapter->stats.mpc[i]; 3639 - adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 3333 + if (hw->mac.type == ixgbe_mac_82598EB) 3334 + adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 3640 3335 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 3641 3336 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 3642 3337 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 3643 3338 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 3644 - adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw, 3645 - IXGBE_PXONRXC(i)); 3339 + if (hw->mac.type == ixgbe_mac_82599EB) { 3340 + adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw, 3341 + IXGBE_PXONRXCNT(i)); 3342 + adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw, 3343 + IXGBE_PXOFFRXCNT(i)); 3344 + adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 3345 + adapter->hw_rx_no_dma_resources += adapter->stats.qprdc[i]; 3346 + } else { 3347 + adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw, 3348 + IXGBE_PXONRXC(i)); 3349 + adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw, 3350 + IXGBE_PXOFFRXC(i)); 3351 + } 3646 3352 adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw, 3647 3353 IXGBE_PXONTXC(i)); 3648 - adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw, 3649 - IXGBE_PXOFFRXC(i)); 3650 3354 adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw, 3651 - IXGBE_PXOFFTXC(i)); 3355 + IXGBE_PXOFFTXC(i)); 3652 3356 } 3653 3357 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 3654 3358 /* work around hardware counting issue */ 3655 3359 adapter->stats.gprc -= missed_rx; 3656 3360 3657 3361 /* 82598 hardware only has a 32 bit counter in the high register */ 3658 - adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 3659 - adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 3660 - adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH); 3362 + if (hw->mac.type == ixgbe_mac_82599EB) { 3363 + adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 3364 + IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ 3365 + adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 3366 + IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ 3367 + adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL); 3368 + IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ 3369 + adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 3370 + adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 3371 + } else { 3372 + adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 3373 + adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 3374 + adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 3375 + adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 3376 + adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH); 3377 + } 3661 3378 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 3662 3379 adapter->stats.bprc += bprc; 3663 3380 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 3664 - adapter->stats.mprc -= bprc; 3381 + if (hw->mac.type == ixgbe_mac_82598EB) 3382 + adapter->stats.mprc -= bprc; 3665 3383 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC); 3666 3384 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 3667 3385 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); ··· 3694 3364 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 3695 3365 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 3696 3366 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 3697 - adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 3698 - adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 3699 3367 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 3700 3368 adapter->stats.lxontxc += lxon; 3701 3369 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); ··· 3766 3438 } 3767 3439 3768 3440 /** 3441 + * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber 3442 + * @work: pointer to work_struct containing our data 3443 + **/ 3444 + static void ixgbe_multispeed_fiber_task(struct work_struct *work) 3445 + { 3446 + struct ixgbe_adapter *adapter = container_of(work, 3447 + struct ixgbe_adapter, 3448 + multispeed_fiber_task); 3449 + struct ixgbe_hw *hw = &adapter->hw; 3450 + u32 autoneg; 3451 + 3452 + adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK; 3453 + if (hw->mac.ops.get_link_capabilities) 3454 + hw->mac.ops.get_link_capabilities(hw, &autoneg, 3455 + &hw->mac.autoneg); 3456 + if (hw->mac.ops.setup_link_speed) 3457 + hw->mac.ops.setup_link_speed(hw, autoneg, true, true); 3458 + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 3459 + adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK; 3460 + } 3461 + 3462 + /** 3463 + * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module 3464 + * @work: pointer to work_struct containing our data 3465 + **/ 3466 + static void ixgbe_sfp_config_module_task(struct work_struct *work) 3467 + { 3468 + struct ixgbe_adapter *adapter = container_of(work, 3469 + struct ixgbe_adapter, 3470 + sfp_config_module_task); 3471 + struct ixgbe_hw *hw = &adapter->hw; 3472 + u32 err; 3473 + 3474 + adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK; 3475 + err = hw->phy.ops.identify_sfp(hw); 3476 + if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 3477 + DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err); 3478 + ixgbe_down(adapter); 3479 + return; 3480 + } 3481 + hw->mac.ops.setup_sfp(hw); 3482 + 3483 + if (!adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK) 3484 + /* This will also work for DA Twinax connections */ 3485 + schedule_work(&adapter->multispeed_fiber_task); 3486 + adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK; 3487 + } 3488 + 3489 + /** 3769 3490 * ixgbe_watchdog_task - worker thread to bring link up 3770 3491 * @work: pointer to work_struct containing our data 3771 3492 **/ ··· 3844 3467 3845 3468 if (link_up) { 3846 3469 if (!netif_carrier_ok(netdev)) { 3847 - u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3848 - u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); 3849 - #define FLOW_RX (frctl & IXGBE_FCTRL_RFCE) 3850 - #define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X) 3470 + bool flow_rx, flow_tx; 3471 + 3472 + if (hw->mac.type == ixgbe_mac_82599EB) { 3473 + u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); 3474 + u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 3475 + flow_rx = (mflcn & IXGBE_MFLCN_RFCE); 3476 + flow_tx = (fccfg & IXGBE_FCCFG_TFCE_802_3X); 3477 + } else { 3478 + u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3479 + u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); 3480 + flow_rx = (frctl & IXGBE_FCTRL_RFCE); 3481 + flow_tx = (rmcs & IXGBE_RMCS_TFCE_802_3X); 3482 + } 3483 + 3851 3484 printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, " 3852 3485 "Flow Control: %s\n", 3853 3486 netdev->name, ··· 3865 3478 "10 Gbps" : 3866 3479 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? 3867 3480 "1 Gbps" : "unknown speed")), 3868 - ((FLOW_RX && FLOW_TX) ? "RX/TX" : 3869 - (FLOW_RX ? "RX" : 3870 - (FLOW_TX ? "TX" : "None")))); 3481 + ((flow_rx && flow_tx) ? "RX/TX" : 3482 + (flow_rx ? "RX" : 3483 + (flow_tx ? "TX" : "None")))); 3871 3484 3872 3485 netif_carrier_on(netdev); 3873 3486 } else { ··· 4374 3987 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; 4375 3988 static int cards_found; 4376 3989 int i, err, pci_using_dac; 4377 - u16 link_status, link_speed, link_width; 3990 + u16 pm_value = 0; 4378 3991 u32 part_num, eec; 4379 3992 4380 3993 err = pci_enable_device(pdev); ··· 4473 4086 4474 4087 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task); 4475 4088 4089 + /* multispeed fiber has its own tasklet, called from GPI SDP1 context */ 4090 + INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task); 4091 + 4092 + /* a new SFP+ module arrival, called from GPI SDP2 context */ 4093 + INIT_WORK(&adapter->sfp_config_module_task, 4094 + ixgbe_sfp_config_module_task); 4095 + 4476 4096 err = ii->get_invariants(hw); 4477 4097 if (err == IXGBE_ERR_SFP_NOT_PRESENT) { 4478 4098 /* start a kernel thread to watch for a module to arrive */ ··· 4560 4166 if (err) 4561 4167 goto err_sw_init; 4562 4168 4169 + switch (pdev->device) { 4170 + case IXGBE_DEV_ID_82599_KX4: 4171 + #define IXGBE_PCIE_PMCSR 0x44 4172 + adapter->wol = IXGBE_WUFC_MAG; 4173 + pci_read_config_word(pdev, IXGBE_PCIE_PMCSR, &pm_value); 4174 + pci_write_config_word(pdev, IXGBE_PCIE_PMCSR, 4175 + (pm_value | (1 << 8))); 4176 + break; 4177 + default: 4178 + adapter->wol = 0; 4179 + break; 4180 + } 4181 + device_init_wakeup(&adapter->pdev->dev, true); 4182 + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 4183 + 4563 4184 /* print bus type/speed/width info */ 4564 - pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status); 4565 - link_speed = link_status & IXGBE_PCI_LINK_SPEED; 4566 - link_width = link_status & IXGBE_PCI_LINK_WIDTH; 4567 4185 dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n", 4568 - ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" : 4569 - (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" : 4570 - "Unknown"), 4571 - ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" : 4572 - (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" : 4573 - (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" : 4574 - (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" : 4186 + ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s": 4187 + (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"), 4188 + ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 4189 + (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 4190 + (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 4575 4191 "Unknown"), 4576 4192 netdev->dev_addr); 4577 4193 ixgbe_read_pba_num_generic(hw, &part_num); 4578 - dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 4579 - hw->mac.type, hw->phy.type, 4580 - (part_num >> 8), (part_num & 0xff)); 4194 + if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 4195 + dev_info(&pdev->dev, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %06x-%03x\n", 4196 + hw->mac.type, hw->phy.type, hw->phy.sfp_type, 4197 + (part_num >> 8), (part_num & 0xff)); 4198 + else 4199 + dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 4200 + hw->mac.type, hw->phy.type, 4201 + (part_num >> 8), (part_num & 0xff)); 4581 4202 4582 - if (link_width <= IXGBE_PCI_LINK_WIDTH_4) { 4203 + if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { 4583 4204 dev_warn(&pdev->dev, "PCI-Express bandwidth available for " 4584 4205 "this card is not sufficient for optimal " 4585 4206 "performance.\n"); ··· 4638 4229 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 4639 4230 del_timer_sync(&adapter->sfp_timer); 4640 4231 cancel_work_sync(&adapter->sfp_task); 4232 + cancel_work_sync(&adapter->multispeed_fiber_task); 4233 + cancel_work_sync(&adapter->sfp_config_module_task); 4641 4234 iounmap(hw->hw_addr); 4642 4235 err_ioremap: 4643 4236 free_netdev(netdev); ··· 4676 4265 del_timer_sync(&adapter->sfp_timer); 4677 4266 cancel_work_sync(&adapter->watchdog_task); 4678 4267 cancel_work_sync(&adapter->sfp_task); 4268 + cancel_work_sync(&adapter->multispeed_fiber_task); 4269 + cancel_work_sync(&adapter->sfp_config_module_task); 4679 4270 flush_scheduled_work(); 4680 4271 4681 4272 #ifdef CONFIG_IXGBE_DCA