Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.26-rc2 3829 lines 106 kB view raw
1/******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2007 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27*******************************************************************************/ 28 29#include <linux/types.h> 30#include <linux/module.h> 31#include <linux/pci.h> 32#include <linux/netdevice.h> 33#include <linux/vmalloc.h> 34#include <linux/string.h> 35#include <linux/in.h> 36#include <linux/ip.h> 37#include <linux/tcp.h> 38#include <linux/ipv6.h> 39#include <net/checksum.h> 40#include <net/ip6_checksum.h> 41#include <linux/ethtool.h> 42#include <linux/if_vlan.h> 43 44#include "ixgbe.h" 45#include "ixgbe_common.h" 46 47char ixgbe_driver_name[] = "ixgbe"; 48static const char ixgbe_driver_string[] = 49 "Intel(R) 10 Gigabit PCI Express Network Driver"; 50 51#define DRV_VERSION "1.3.18-k2" 52const char ixgbe_driver_version[] = DRV_VERSION; 53static const char ixgbe_copyright[] = 54 "Copyright (c) 1999-2007 Intel Corporation."; 55 56static const struct ixgbe_info *ixgbe_info_tbl[] = { 57 [board_82598] = &ixgbe_82598_info, 58}; 59 60/* ixgbe_pci_tbl - PCI Device ID Table 61 * 62 * Wildcard entries (PCI_ANY_ID) should come last 63 * Last entry must be all 0s 64 * 65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 66 * Class, Class Mask, private data (not used) } 67 */ 68static struct pci_device_id ixgbe_pci_tbl[] = { 69 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), 70 board_82598 }, 71 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), 72 board_82598 }, 73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT_DUAL_PORT), 74 board_82598 }, 75 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), 76 board_82598 }, 77 78 /* required last entry */ 79 {0, } 80}; 81MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); 82 83#ifdef CONFIG_DCA 84static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, 85 void *p); 86static struct notifier_block dca_notifier = { 87 .notifier_call = ixgbe_notify_dca, 88 .next = NULL, 89 .priority = 0 90}; 91#endif 92 93MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 94MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); 95MODULE_LICENSE("GPL"); 96MODULE_VERSION(DRV_VERSION); 97 98#define DEFAULT_DEBUG_LEVEL_SHIFT 3 99 100static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) 101{ 102 u32 ctrl_ext; 103 104 /* Let firmware take over control of h/w */ 105 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 106 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 107 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); 108} 109 110static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) 111{ 112 u32 ctrl_ext; 113 114 /* Let firmware know the driver has taken over */ 115 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 116 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 117 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); 118} 119 120#ifdef DEBUG 121/** 122 * ixgbe_get_hw_dev_name - return device name string 123 * used by hardware layer to print debugging information 124 **/ 125char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw) 126{ 127 struct ixgbe_adapter *adapter = hw->back; 128 struct net_device *netdev = adapter->netdev; 129 return netdev->name; 130} 131#endif 132 133static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry, 134 u8 msix_vector) 135{ 136 u32 ivar, index; 137 138 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 139 index = (int_alloc_entry >> 2) & 0x1F; 140 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index)); 141 ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3))); 142 ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3))); 143 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar); 144} 145 146static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, 147 struct ixgbe_tx_buffer 148 *tx_buffer_info) 149{ 150 if (tx_buffer_info->dma) { 151 pci_unmap_page(adapter->pdev, 152 tx_buffer_info->dma, 153 tx_buffer_info->length, PCI_DMA_TODEVICE); 154 tx_buffer_info->dma = 0; 155 } 156 if (tx_buffer_info->skb) { 157 dev_kfree_skb_any(tx_buffer_info->skb); 158 tx_buffer_info->skb = NULL; 159 } 160 /* tx_buffer_info must be completely set up in the transmit path */ 161} 162 163static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, 164 struct ixgbe_ring *tx_ring, 165 unsigned int eop, 166 union ixgbe_adv_tx_desc *eop_desc) 167{ 168 /* Detect a transmit hang in hardware, this serializes the 169 * check with the clearing of time_stamp and movement of i */ 170 adapter->detect_tx_hung = false; 171 if (tx_ring->tx_buffer_info[eop].dma && 172 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && 173 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) { 174 /* detected Tx unit hang */ 175 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" 176 " TDH <%x>\n" 177 " TDT <%x>\n" 178 " next_to_use <%x>\n" 179 " next_to_clean <%x>\n" 180 "tx_buffer_info[next_to_clean]\n" 181 " time_stamp <%lx>\n" 182 " next_to_watch <%x>\n" 183 " jiffies <%lx>\n" 184 " next_to_watch.status <%x>\n", 185 readl(adapter->hw.hw_addr + tx_ring->head), 186 readl(adapter->hw.hw_addr + tx_ring->tail), 187 tx_ring->next_to_use, 188 tx_ring->next_to_clean, 189 tx_ring->tx_buffer_info[eop].time_stamp, 190 eop, jiffies, eop_desc->wb.status); 191 return true; 192 } 193 194 return false; 195} 196 197#define IXGBE_MAX_TXD_PWR 14 198#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 199 200/* Tx Descriptors needed, worst case */ 201#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ 202 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) 203#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ 204 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ 205 206/** 207 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes 208 * @adapter: board private structure 209 **/ 210static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, 211 struct ixgbe_ring *tx_ring) 212{ 213 struct net_device *netdev = adapter->netdev; 214 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 215 struct ixgbe_tx_buffer *tx_buffer_info; 216 unsigned int i, eop; 217 bool cleaned = false; 218 unsigned int total_tx_bytes = 0, total_tx_packets = 0; 219 220 i = tx_ring->next_to_clean; 221 eop = tx_ring->tx_buffer_info[i].next_to_watch; 222 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 223 while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) { 224 cleaned = false; 225 while (!cleaned) { 226 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 227 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 228 cleaned = (i == eop); 229 230 tx_ring->stats.bytes += tx_buffer_info->length; 231 if (cleaned) { 232 struct sk_buff *skb = tx_buffer_info->skb; 233 unsigned int segs, bytecount; 234 segs = skb_shinfo(skb)->gso_segs ?: 1; 235 /* multiply data chunks by size of headers */ 236 bytecount = ((segs - 1) * skb_headlen(skb)) + 237 skb->len; 238 total_tx_packets += segs; 239 total_tx_bytes += bytecount; 240 } 241 ixgbe_unmap_and_free_tx_resource(adapter, 242 tx_buffer_info); 243 tx_desc->wb.status = 0; 244 245 i++; 246 if (i == tx_ring->count) 247 i = 0; 248 } 249 250 tx_ring->stats.packets++; 251 252 eop = tx_ring->tx_buffer_info[i].next_to_watch; 253 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 254 255 /* weight of a sort for tx, avoid endless transmit cleanup */ 256 if (total_tx_packets >= tx_ring->work_limit) 257 break; 258 } 259 260 tx_ring->next_to_clean = i; 261 262#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 263 if (total_tx_packets && netif_carrier_ok(netdev) && 264 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { 265 /* Make sure that anybody stopping the queue after this 266 * sees the new next_to_clean. 267 */ 268 smp_mb(); 269#ifdef CONFIG_NETDEVICES_MULTIQUEUE 270 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && 271 !test_bit(__IXGBE_DOWN, &adapter->state)) { 272 netif_wake_subqueue(netdev, tx_ring->queue_index); 273 adapter->restart_queue++; 274 } 275#else 276 if (netif_queue_stopped(netdev) && 277 !test_bit(__IXGBE_DOWN, &adapter->state)) { 278 netif_wake_queue(netdev); 279 adapter->restart_queue++; 280 } 281#endif 282 } 283 284 if (adapter->detect_tx_hung) 285 if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc)) 286#ifdef CONFIG_NETDEVICES_MULTIQUEUE 287 netif_stop_subqueue(netdev, tx_ring->queue_index); 288#else 289 netif_stop_queue(netdev); 290#endif 291 292 if (total_tx_packets >= tx_ring->work_limit) 293 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value); 294 295 tx_ring->total_bytes += total_tx_bytes; 296 tx_ring->total_packets += total_tx_packets; 297 adapter->net_stats.tx_bytes += total_tx_bytes; 298 adapter->net_stats.tx_packets += total_tx_packets; 299 cleaned = total_tx_packets ? true : false; 300 return cleaned; 301} 302 303#ifdef CONFIG_DCA 304static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, 305 struct ixgbe_ring *rxr) 306{ 307 u32 rxctrl; 308 int cpu = get_cpu(); 309 int q = rxr - adapter->rx_ring; 310 311 if (rxr->cpu != cpu) { 312 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); 313 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; 314 rxctrl |= dca_get_tag(cpu); 315 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; 316 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; 317 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); 318 rxr->cpu = cpu; 319 } 320 put_cpu(); 321} 322 323static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, 324 struct ixgbe_ring *txr) 325{ 326 u32 txctrl; 327 int cpu = get_cpu(); 328 int q = txr - adapter->tx_ring; 329 330 if (txr->cpu != cpu) { 331 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q)); 332 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; 333 txctrl |= dca_get_tag(cpu); 334 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; 335 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl); 336 txr->cpu = cpu; 337 } 338 put_cpu(); 339} 340 341static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) 342{ 343 int i; 344 345 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) 346 return; 347 348 for (i = 0; i < adapter->num_tx_queues; i++) { 349 adapter->tx_ring[i].cpu = -1; 350 ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]); 351 } 352 for (i = 0; i < adapter->num_rx_queues; i++) { 353 adapter->rx_ring[i].cpu = -1; 354 ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]); 355 } 356} 357 358static int __ixgbe_notify_dca(struct device *dev, void *data) 359{ 360 struct net_device *netdev = dev_get_drvdata(dev); 361 struct ixgbe_adapter *adapter = netdev_priv(netdev); 362 unsigned long event = *(unsigned long *)data; 363 364 switch (event) { 365 case DCA_PROVIDER_ADD: 366 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 367 /* Always use CB2 mode, difference is masked 368 * in the CB driver. */ 369 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); 370 if (dca_add_requester(dev) == 0) { 371 ixgbe_setup_dca(adapter); 372 break; 373 } 374 /* Fall Through since DCA is disabled. */ 375 case DCA_PROVIDER_REMOVE: 376 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 377 dca_remove_requester(dev); 378 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; 379 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); 380 } 381 break; 382 } 383 384 return 0; 385} 386 387#endif /* CONFIG_DCA */ 388/** 389 * ixgbe_receive_skb - Send a completed packet up the stack 390 * @adapter: board private structure 391 * @skb: packet to send up 392 * @is_vlan: packet has a VLAN tag 393 * @tag: VLAN tag from descriptor 394 **/ 395static void ixgbe_receive_skb(struct ixgbe_adapter *adapter, 396 struct sk_buff *skb, bool is_vlan, 397 u16 tag) 398{ 399 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { 400 if (adapter->vlgrp && is_vlan) 401 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag); 402 else 403 netif_receive_skb(skb); 404 } else { 405 406 if (adapter->vlgrp && is_vlan) 407 vlan_hwaccel_rx(skb, adapter->vlgrp, tag); 408 else 409 netif_rx(skb); 410 } 411} 412 413/** 414 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum 415 * @adapter: address of board private structure 416 * @status_err: hardware indication of status of receive 417 * @skb: skb currently being received and modified 418 **/ 419static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, 420 u32 status_err, 421 struct sk_buff *skb) 422{ 423 skb->ip_summed = CHECKSUM_NONE; 424 425 /* Ignore Checksum bit is set, or rx csum disabled */ 426 if ((status_err & IXGBE_RXD_STAT_IXSM) || 427 !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) 428 return; 429 430 /* if IP and error */ 431 if ((status_err & IXGBE_RXD_STAT_IPCS) && 432 (status_err & IXGBE_RXDADV_ERR_IPE)) { 433 adapter->hw_csum_rx_error++; 434 return; 435 } 436 437 if (!(status_err & IXGBE_RXD_STAT_L4CS)) 438 return; 439 440 if (status_err & IXGBE_RXDADV_ERR_TCPE) { 441 adapter->hw_csum_rx_error++; 442 return; 443 } 444 445 /* It must be a TCP or UDP packet with a valid checksum */ 446 skb->ip_summed = CHECKSUM_UNNECESSARY; 447 adapter->hw_csum_rx_good++; 448} 449 450/** 451 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split 452 * @adapter: address of board private structure 453 **/ 454static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, 455 struct ixgbe_ring *rx_ring, 456 int cleaned_count) 457{ 458 struct net_device *netdev = adapter->netdev; 459 struct pci_dev *pdev = adapter->pdev; 460 union ixgbe_adv_rx_desc *rx_desc; 461 struct ixgbe_rx_buffer *rx_buffer_info; 462 struct sk_buff *skb; 463 unsigned int i; 464 unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN; 465 466 i = rx_ring->next_to_use; 467 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 468 469 while (cleaned_count--) { 470 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 471 472 if (!rx_buffer_info->page && 473 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { 474 rx_buffer_info->page = alloc_page(GFP_ATOMIC); 475 if (!rx_buffer_info->page) { 476 adapter->alloc_rx_page_failed++; 477 goto no_buffers; 478 } 479 rx_buffer_info->page_dma = 480 pci_map_page(pdev, rx_buffer_info->page, 481 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); 482 } 483 484 if (!rx_buffer_info->skb) { 485 skb = netdev_alloc_skb(netdev, bufsz); 486 487 if (!skb) { 488 adapter->alloc_rx_buff_failed++; 489 goto no_buffers; 490 } 491 492 /* 493 * Make buffer alignment 2 beyond a 16 byte boundary 494 * this will result in a 16 byte aligned IP header after 495 * the 14 byte MAC header is removed 496 */ 497 skb_reserve(skb, NET_IP_ALIGN); 498 499 rx_buffer_info->skb = skb; 500 rx_buffer_info->dma = pci_map_single(pdev, skb->data, 501 bufsz, 502 PCI_DMA_FROMDEVICE); 503 } 504 /* Refresh the desc even if buffer_addrs didn't change because 505 * each write-back erases this info. */ 506 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 507 rx_desc->read.pkt_addr = 508 cpu_to_le64(rx_buffer_info->page_dma); 509 rx_desc->read.hdr_addr = 510 cpu_to_le64(rx_buffer_info->dma); 511 } else { 512 rx_desc->read.pkt_addr = 513 cpu_to_le64(rx_buffer_info->dma); 514 } 515 516 i++; 517 if (i == rx_ring->count) 518 i = 0; 519 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 520 } 521no_buffers: 522 if (rx_ring->next_to_use != i) { 523 rx_ring->next_to_use = i; 524 if (i-- == 0) 525 i = (rx_ring->count - 1); 526 527 /* 528 * Force memory writes to complete before letting h/w 529 * know there are new descriptors to fetch. (Only 530 * applicable for weak-ordered memory model archs, 531 * such as IA-64). 532 */ 533 wmb(); 534 writel(i, adapter->hw.hw_addr + rx_ring->tail); 535 } 536} 537 538static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, 539 struct ixgbe_ring *rx_ring, 540 int *work_done, int work_to_do) 541{ 542 struct net_device *netdev = adapter->netdev; 543 struct pci_dev *pdev = adapter->pdev; 544 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 545 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; 546 struct sk_buff *skb; 547 unsigned int i; 548 u32 upper_len, len, staterr; 549 u16 hdr_info, vlan_tag; 550 bool is_vlan, cleaned = false; 551 int cleaned_count = 0; 552 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 553 554 i = rx_ring->next_to_clean; 555 upper_len = 0; 556 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 557 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 558 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 559 is_vlan = (staterr & IXGBE_RXD_STAT_VP); 560 vlan_tag = le16_to_cpu(rx_desc->wb.upper.vlan); 561 562 while (staterr & IXGBE_RXD_STAT_DD) { 563 if (*work_done >= work_to_do) 564 break; 565 (*work_done)++; 566 567 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 568 hdr_info = 569 le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info); 570 len = 571 ((hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> 572 IXGBE_RXDADV_HDRBUFLEN_SHIFT); 573 if (hdr_info & IXGBE_RXDADV_SPH) 574 adapter->rx_hdr_split++; 575 if (len > IXGBE_RX_HDR_SIZE) 576 len = IXGBE_RX_HDR_SIZE; 577 upper_len = le16_to_cpu(rx_desc->wb.upper.length); 578 } else 579 len = le16_to_cpu(rx_desc->wb.upper.length); 580 581 cleaned = true; 582 skb = rx_buffer_info->skb; 583 prefetch(skb->data - NET_IP_ALIGN); 584 rx_buffer_info->skb = NULL; 585 586 if (len && !skb_shinfo(skb)->nr_frags) { 587 pci_unmap_single(pdev, rx_buffer_info->dma, 588 adapter->rx_buf_len + NET_IP_ALIGN, 589 PCI_DMA_FROMDEVICE); 590 skb_put(skb, len); 591 } 592 593 if (upper_len) { 594 pci_unmap_page(pdev, rx_buffer_info->page_dma, 595 PAGE_SIZE, PCI_DMA_FROMDEVICE); 596 rx_buffer_info->page_dma = 0; 597 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 598 rx_buffer_info->page, 0, upper_len); 599 rx_buffer_info->page = NULL; 600 601 skb->len += upper_len; 602 skb->data_len += upper_len; 603 skb->truesize += upper_len; 604 } 605 606 i++; 607 if (i == rx_ring->count) 608 i = 0; 609 next_buffer = &rx_ring->rx_buffer_info[i]; 610 611 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i); 612 prefetch(next_rxd); 613 614 cleaned_count++; 615 if (staterr & IXGBE_RXD_STAT_EOP) { 616 rx_ring->stats.packets++; 617 rx_ring->stats.bytes += skb->len; 618 } else { 619 rx_buffer_info->skb = next_buffer->skb; 620 rx_buffer_info->dma = next_buffer->dma; 621 next_buffer->skb = skb; 622 adapter->non_eop_descs++; 623 goto next_desc; 624 } 625 626 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) { 627 dev_kfree_skb_irq(skb); 628 goto next_desc; 629 } 630 631 ixgbe_rx_checksum(adapter, staterr, skb); 632 633 /* probably a little skewed due to removing CRC */ 634 total_rx_bytes += skb->len; 635 total_rx_packets++; 636 637 skb->protocol = eth_type_trans(skb, netdev); 638 ixgbe_receive_skb(adapter, skb, is_vlan, vlan_tag); 639 netdev->last_rx = jiffies; 640 641next_desc: 642 rx_desc->wb.upper.status_error = 0; 643 644 /* return some buffers to hardware, one at a time is too slow */ 645 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { 646 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 647 cleaned_count = 0; 648 } 649 650 /* use prefetched values */ 651 rx_desc = next_rxd; 652 rx_buffer_info = next_buffer; 653 654 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 655 is_vlan = (staterr & IXGBE_RXD_STAT_VP); 656 vlan_tag = le16_to_cpu(rx_desc->wb.upper.vlan); 657 } 658 659 rx_ring->next_to_clean = i; 660 cleaned_count = IXGBE_DESC_UNUSED(rx_ring); 661 662 if (cleaned_count) 663 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 664 665 adapter->net_stats.rx_bytes += total_rx_bytes; 666 adapter->net_stats.rx_packets += total_rx_packets; 667 668 rx_ring->total_packets += total_rx_packets; 669 rx_ring->total_bytes += total_rx_bytes; 670 adapter->net_stats.rx_bytes += total_rx_bytes; 671 adapter->net_stats.rx_packets += total_rx_packets; 672 673 return cleaned; 674} 675 676static int ixgbe_clean_rxonly(struct napi_struct *, int); 677/** 678 * ixgbe_configure_msix - Configure MSI-X hardware 679 * @adapter: board private structure 680 * 681 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X 682 * interrupts. 683 **/ 684static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) 685{ 686 struct ixgbe_q_vector *q_vector; 687 int i, j, q_vectors, v_idx, r_idx; 688 u32 mask; 689 690 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 691 692 /* Populate the IVAR table and set the ITR values to the 693 * corresponding register. 694 */ 695 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 696 q_vector = &adapter->q_vector[v_idx]; 697 /* XXX for_each_bit(...) */ 698 r_idx = find_first_bit(q_vector->rxr_idx, 699 adapter->num_rx_queues); 700 701 for (i = 0; i < q_vector->rxr_count; i++) { 702 j = adapter->rx_ring[r_idx].reg_idx; 703 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx); 704 r_idx = find_next_bit(q_vector->rxr_idx, 705 adapter->num_rx_queues, 706 r_idx + 1); 707 } 708 r_idx = find_first_bit(q_vector->txr_idx, 709 adapter->num_tx_queues); 710 711 for (i = 0; i < q_vector->txr_count; i++) { 712 j = adapter->tx_ring[r_idx].reg_idx; 713 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx); 714 r_idx = find_next_bit(q_vector->txr_idx, 715 adapter->num_tx_queues, 716 r_idx + 1); 717 } 718 719 /* if this is a tx only vector use half the irq (tx) rate */ 720 if (q_vector->txr_count && !q_vector->rxr_count) 721 q_vector->eitr = adapter->tx_eitr; 722 else 723 /* rx only or mixed */ 724 q_vector->eitr = adapter->rx_eitr; 725 726 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 727 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr)); 728 } 729 730 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx); 731 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); 732 733 /* set up to autoclear timer, lsc, and the vectors */ 734 mask = IXGBE_EIMS_ENABLE_MASK; 735 mask &= ~IXGBE_EIMS_OTHER; 736 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); 737} 738 739enum latency_range { 740 lowest_latency = 0, 741 low_latency = 1, 742 bulk_latency = 2, 743 latency_invalid = 255 744}; 745 746/** 747 * ixgbe_update_itr - update the dynamic ITR value based on statistics 748 * @adapter: pointer to adapter 749 * @eitr: eitr setting (ints per sec) to give last timeslice 750 * @itr_setting: current throttle rate in ints/second 751 * @packets: the number of packets during this measurement interval 752 * @bytes: the number of bytes during this measurement interval 753 * 754 * Stores a new ITR value based on packets and byte 755 * counts during the last interrupt. The advantage of per interrupt 756 * computation is faster updates and more accurate ITR for the current 757 * traffic pattern. Constants in this function were computed 758 * based on theoretical maximum wire speed and thresholds were set based 759 * on testing data as well as attempting to minimize response time 760 * while increasing bulk throughput. 761 * this functionality is controlled by the InterruptThrottleRate module 762 * parameter (see ixgbe_param.c) 763 **/ 764static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, 765 u32 eitr, u8 itr_setting, 766 int packets, int bytes) 767{ 768 unsigned int retval = itr_setting; 769 u32 timepassed_us; 770 u64 bytes_perint; 771 772 if (packets == 0) 773 goto update_itr_done; 774 775 776 /* simple throttlerate management 777 * 0-20MB/s lowest (100000 ints/s) 778 * 20-100MB/s low (20000 ints/s) 779 * 100-1249MB/s bulk (8000 ints/s) 780 */ 781 /* what was last interrupt timeslice? */ 782 timepassed_us = 1000000/eitr; 783 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 784 785 switch (itr_setting) { 786 case lowest_latency: 787 if (bytes_perint > adapter->eitr_low) 788 retval = low_latency; 789 break; 790 case low_latency: 791 if (bytes_perint > adapter->eitr_high) 792 retval = bulk_latency; 793 else if (bytes_perint <= adapter->eitr_low) 794 retval = lowest_latency; 795 break; 796 case bulk_latency: 797 if (bytes_perint <= adapter->eitr_high) 798 retval = low_latency; 799 break; 800 } 801 802update_itr_done: 803 return retval; 804} 805 806static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) 807{ 808 struct ixgbe_adapter *adapter = q_vector->adapter; 809 struct ixgbe_hw *hw = &adapter->hw; 810 u32 new_itr; 811 u8 current_itr, ret_itr; 812 int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) / 813 sizeof(struct ixgbe_q_vector); 814 struct ixgbe_ring *rx_ring, *tx_ring; 815 816 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 817 for (i = 0; i < q_vector->txr_count; i++) { 818 tx_ring = &(adapter->tx_ring[r_idx]); 819 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 820 q_vector->tx_eitr, 821 tx_ring->total_packets, 822 tx_ring->total_bytes); 823 /* if the result for this queue would decrease interrupt 824 * rate for this vector then use that result */ 825 q_vector->tx_eitr = ((q_vector->tx_eitr > ret_itr) ? 826 q_vector->tx_eitr - 1 : ret_itr); 827 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 828 r_idx + 1); 829 } 830 831 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 832 for (i = 0; i < q_vector->rxr_count; i++) { 833 rx_ring = &(adapter->rx_ring[r_idx]); 834 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 835 q_vector->rx_eitr, 836 rx_ring->total_packets, 837 rx_ring->total_bytes); 838 /* if the result for this queue would decrease interrupt 839 * rate for this vector then use that result */ 840 q_vector->rx_eitr = ((q_vector->rx_eitr > ret_itr) ? 841 q_vector->rx_eitr - 1 : ret_itr); 842 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 843 r_idx + 1); 844 } 845 846 current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr); 847 848 switch (current_itr) { 849 /* counts and packets in update_itr are dependent on these numbers */ 850 case lowest_latency: 851 new_itr = 100000; 852 break; 853 case low_latency: 854 new_itr = 20000; /* aka hwitr = ~200 */ 855 break; 856 case bulk_latency: 857 default: 858 new_itr = 8000; 859 break; 860 } 861 862 if (new_itr != q_vector->eitr) { 863 u32 itr_reg; 864 /* do an exponential smoothing */ 865 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 866 q_vector->eitr = new_itr; 867 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); 868 /* must write high and low 16 bits to reset counter */ 869 DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx, 870 itr_reg); 871 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16); 872 } 873 874 return; 875} 876 877static irqreturn_t ixgbe_msix_lsc(int irq, void *data) 878{ 879 struct net_device *netdev = data; 880 struct ixgbe_adapter *adapter = netdev_priv(netdev); 881 struct ixgbe_hw *hw = &adapter->hw; 882 u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 883 884 if (eicr & IXGBE_EICR_LSC) { 885 adapter->lsc_int++; 886 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 887 mod_timer(&adapter->watchdog_timer, jiffies); 888 } 889 890 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 891 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 892 893 return IRQ_HANDLED; 894} 895 896static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) 897{ 898 struct ixgbe_q_vector *q_vector = data; 899 struct ixgbe_adapter *adapter = q_vector->adapter; 900 struct ixgbe_ring *txr; 901 int i, r_idx; 902 903 if (!q_vector->txr_count) 904 return IRQ_HANDLED; 905 906 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 907 for (i = 0; i < q_vector->txr_count; i++) { 908 txr = &(adapter->tx_ring[r_idx]); 909#ifdef CONFIG_DCA 910 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 911 ixgbe_update_tx_dca(adapter, txr); 912#endif 913 txr->total_bytes = 0; 914 txr->total_packets = 0; 915 ixgbe_clean_tx_irq(adapter, txr); 916 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 917 r_idx + 1); 918 } 919 920 return IRQ_HANDLED; 921} 922 923/** 924 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues) 925 * @irq: unused 926 * @data: pointer to our q_vector struct for this interrupt vector 927 **/ 928static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) 929{ 930 struct ixgbe_q_vector *q_vector = data; 931 struct ixgbe_adapter *adapter = q_vector->adapter; 932 struct ixgbe_ring *rxr; 933 int r_idx; 934 935 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 936 if (!q_vector->rxr_count) 937 return IRQ_HANDLED; 938 939 rxr = &(adapter->rx_ring[r_idx]); 940 /* disable interrupts on this vector only */ 941 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->v_idx); 942 rxr->total_bytes = 0; 943 rxr->total_packets = 0; 944 netif_rx_schedule(adapter->netdev, &q_vector->napi); 945 946 return IRQ_HANDLED; 947} 948 949static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) 950{ 951 ixgbe_msix_clean_rx(irq, data); 952 ixgbe_msix_clean_tx(irq, data); 953 954 return IRQ_HANDLED; 955} 956 957/** 958 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine 959 * @napi: napi struct with our devices info in it 960 * @budget: amount of work driver is allowed to do this pass, in packets 961 * 962 **/ 963static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) 964{ 965 struct ixgbe_q_vector *q_vector = 966 container_of(napi, struct ixgbe_q_vector, napi); 967 struct ixgbe_adapter *adapter = q_vector->adapter; 968 struct ixgbe_ring *rxr; 969 int work_done = 0; 970 long r_idx; 971 972 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 973 rxr = &(adapter->rx_ring[r_idx]); 974#ifdef CONFIG_DCA 975 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 976 ixgbe_update_rx_dca(adapter, rxr); 977#endif 978 979 ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget); 980 981 /* If all Rx work done, exit the polling mode */ 982 if (work_done < budget) { 983 netif_rx_complete(adapter->netdev, napi); 984 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS) 985 ixgbe_set_itr_msix(q_vector); 986 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 987 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->v_idx); 988 } 989 990 return work_done; 991} 992 993static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, 994 int r_idx) 995{ 996 a->q_vector[v_idx].adapter = a; 997 set_bit(r_idx, a->q_vector[v_idx].rxr_idx); 998 a->q_vector[v_idx].rxr_count++; 999 a->rx_ring[r_idx].v_idx = 1 << v_idx; 1000} 1001 1002static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, 1003 int r_idx) 1004{ 1005 a->q_vector[v_idx].adapter = a; 1006 set_bit(r_idx, a->q_vector[v_idx].txr_idx); 1007 a->q_vector[v_idx].txr_count++; 1008 a->tx_ring[r_idx].v_idx = 1 << v_idx; 1009} 1010 1011/** 1012 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors 1013 * @adapter: board private structure to initialize 1014 * @vectors: allotted vector count for descriptor rings 1015 * 1016 * This function maps descriptor rings to the queue-specific vectors 1017 * we were allotted through the MSI-X enabling code. Ideally, we'd have 1018 * one vector per ring/queue, but on a constrained vector budget, we 1019 * group the rings as "efficiently" as possible. You would add new 1020 * mapping configurations in here. 1021 **/ 1022static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, 1023 int vectors) 1024{ 1025 int v_start = 0; 1026 int rxr_idx = 0, txr_idx = 0; 1027 int rxr_remaining = adapter->num_rx_queues; 1028 int txr_remaining = adapter->num_tx_queues; 1029 int i, j; 1030 int rqpv, tqpv; 1031 int err = 0; 1032 1033 /* No mapping required if MSI-X is disabled. */ 1034 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 1035 goto out; 1036 1037 /* 1038 * The ideal configuration... 1039 * We have enough vectors to map one per queue. 1040 */ 1041 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) { 1042 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) 1043 map_vector_to_rxq(adapter, v_start, rxr_idx); 1044 1045 for (; txr_idx < txr_remaining; v_start++, txr_idx++) 1046 map_vector_to_txq(adapter, v_start, txr_idx); 1047 1048 goto out; 1049 } 1050 1051 /* 1052 * If we don't have enough vectors for a 1-to-1 1053 * mapping, we'll have to group them so there are 1054 * multiple queues per vector. 1055 */ 1056 /* Re-adjusting *qpv takes care of the remainder. */ 1057 for (i = v_start; i < vectors; i++) { 1058 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i); 1059 for (j = 0; j < rqpv; j++) { 1060 map_vector_to_rxq(adapter, i, rxr_idx); 1061 rxr_idx++; 1062 rxr_remaining--; 1063 } 1064 } 1065 for (i = v_start; i < vectors; i++) { 1066 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i); 1067 for (j = 0; j < tqpv; j++) { 1068 map_vector_to_txq(adapter, i, txr_idx); 1069 txr_idx++; 1070 txr_remaining--; 1071 } 1072 } 1073 1074out: 1075 return err; 1076} 1077 1078/** 1079 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts 1080 * @adapter: board private structure 1081 * 1082 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests 1083 * interrupts from the kernel. 1084 **/ 1085static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) 1086{ 1087 struct net_device *netdev = adapter->netdev; 1088 irqreturn_t (*handler)(int, void *); 1089 int i, vector, q_vectors, err; 1090 1091 /* Decrement for Other and TCP Timer vectors */ 1092 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1093 1094 /* Map the Tx/Rx rings to the vectors we were allotted. */ 1095 err = ixgbe_map_rings_to_vectors(adapter, q_vectors); 1096 if (err) 1097 goto out; 1098 1099#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ 1100 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ 1101 &ixgbe_msix_clean_many) 1102 for (vector = 0; vector < q_vectors; vector++) { 1103 handler = SET_HANDLER(&adapter->q_vector[vector]); 1104 sprintf(adapter->name[vector], "%s:v%d-%s", 1105 netdev->name, vector, 1106 (handler == &ixgbe_msix_clean_rx) ? "Rx" : 1107 ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx")); 1108 err = request_irq(adapter->msix_entries[vector].vector, 1109 handler, 0, adapter->name[vector], 1110 &(adapter->q_vector[vector])); 1111 if (err) { 1112 DPRINTK(PROBE, ERR, 1113 "request_irq failed for MSIX interrupt " 1114 "Error: %d\n", err); 1115 goto free_queue_irqs; 1116 } 1117 } 1118 1119 sprintf(adapter->name[vector], "%s:lsc", netdev->name); 1120 err = request_irq(adapter->msix_entries[vector].vector, 1121 &ixgbe_msix_lsc, 0, adapter->name[vector], netdev); 1122 if (err) { 1123 DPRINTK(PROBE, ERR, 1124 "request_irq for msix_lsc failed: %d\n", err); 1125 goto free_queue_irqs; 1126 } 1127 1128 return 0; 1129 1130free_queue_irqs: 1131 for (i = vector - 1; i >= 0; i--) 1132 free_irq(adapter->msix_entries[--vector].vector, 1133 &(adapter->q_vector[i])); 1134 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 1135 pci_disable_msix(adapter->pdev); 1136 kfree(adapter->msix_entries); 1137 adapter->msix_entries = NULL; 1138out: 1139 return err; 1140} 1141 1142static void ixgbe_set_itr(struct ixgbe_adapter *adapter) 1143{ 1144 struct ixgbe_hw *hw = &adapter->hw; 1145 struct ixgbe_q_vector *q_vector = adapter->q_vector; 1146 u8 current_itr; 1147 u32 new_itr = q_vector->eitr; 1148 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0]; 1149 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0]; 1150 1151 q_vector->tx_eitr = ixgbe_update_itr(adapter, new_itr, 1152 q_vector->tx_eitr, 1153 tx_ring->total_packets, 1154 tx_ring->total_bytes); 1155 q_vector->rx_eitr = ixgbe_update_itr(adapter, new_itr, 1156 q_vector->rx_eitr, 1157 rx_ring->total_packets, 1158 rx_ring->total_bytes); 1159 1160 current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr); 1161 1162 switch (current_itr) { 1163 /* counts and packets in update_itr are dependent on these numbers */ 1164 case lowest_latency: 1165 new_itr = 100000; 1166 break; 1167 case low_latency: 1168 new_itr = 20000; /* aka hwitr = ~200 */ 1169 break; 1170 case bulk_latency: 1171 new_itr = 8000; 1172 break; 1173 default: 1174 break; 1175 } 1176 1177 if (new_itr != q_vector->eitr) { 1178 u32 itr_reg; 1179 /* do an exponential smoothing */ 1180 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 1181 q_vector->eitr = new_itr; 1182 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); 1183 /* must write high and low 16 bits to reset counter */ 1184 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), itr_reg | (itr_reg)<<16); 1185 } 1186 1187 return; 1188} 1189 1190static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter); 1191 1192/** 1193 * ixgbe_intr - legacy mode Interrupt Handler 1194 * @irq: interrupt number 1195 * @data: pointer to a network interface device structure 1196 * @pt_regs: CPU registers structure 1197 **/ 1198static irqreturn_t ixgbe_intr(int irq, void *data) 1199{ 1200 struct net_device *netdev = data; 1201 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1202 struct ixgbe_hw *hw = &adapter->hw; 1203 u32 eicr; 1204 1205 1206 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read 1207 * therefore no explict interrupt disable is necessary */ 1208 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 1209 if (!eicr) 1210 return IRQ_NONE; /* Not our interrupt */ 1211 1212 if (eicr & IXGBE_EICR_LSC) { 1213 adapter->lsc_int++; 1214 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1215 mod_timer(&adapter->watchdog_timer, jiffies); 1216 } 1217 1218 1219 if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) { 1220 adapter->tx_ring[0].total_packets = 0; 1221 adapter->tx_ring[0].total_bytes = 0; 1222 adapter->rx_ring[0].total_packets = 0; 1223 adapter->rx_ring[0].total_bytes = 0; 1224 /* would disable interrupts here but EIAM disabled it */ 1225 __netif_rx_schedule(netdev, &adapter->q_vector[0].napi); 1226 } 1227 1228 return IRQ_HANDLED; 1229} 1230 1231static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter) 1232{ 1233 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1234 1235 for (i = 0; i < q_vectors; i++) { 1236 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i]; 1237 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES); 1238 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES); 1239 q_vector->rxr_count = 0; 1240 q_vector->txr_count = 0; 1241 } 1242} 1243 1244/** 1245 * ixgbe_request_irq - initialize interrupts 1246 * @adapter: board private structure 1247 * 1248 * Attempts to configure interrupts using the best available 1249 * capabilities of the hardware and kernel. 1250 **/ 1251static int ixgbe_request_irq(struct ixgbe_adapter *adapter) 1252{ 1253 struct net_device *netdev = adapter->netdev; 1254 int err; 1255 1256 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1257 err = ixgbe_request_msix_irqs(adapter); 1258 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1259 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0, 1260 netdev->name, netdev); 1261 } else { 1262 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED, 1263 netdev->name, netdev); 1264 } 1265 1266 if (err) 1267 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err); 1268 1269 return err; 1270} 1271 1272static void ixgbe_free_irq(struct ixgbe_adapter *adapter) 1273{ 1274 struct net_device *netdev = adapter->netdev; 1275 1276 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1277 int i, q_vectors; 1278 1279 q_vectors = adapter->num_msix_vectors; 1280 1281 i = q_vectors - 1; 1282 free_irq(adapter->msix_entries[i].vector, netdev); 1283 1284 i--; 1285 for (; i >= 0; i--) { 1286 free_irq(adapter->msix_entries[i].vector, 1287 &(adapter->q_vector[i])); 1288 } 1289 1290 ixgbe_reset_q_vectors(adapter); 1291 } else { 1292 free_irq(adapter->pdev->irq, netdev); 1293 } 1294} 1295 1296/** 1297 * ixgbe_irq_disable - Mask off interrupt generation on the NIC 1298 * @adapter: board private structure 1299 **/ 1300static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) 1301{ 1302 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 1303 IXGBE_WRITE_FLUSH(&adapter->hw); 1304 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1305 int i; 1306 for (i = 0; i < adapter->num_msix_vectors; i++) 1307 synchronize_irq(adapter->msix_entries[i].vector); 1308 } else { 1309 synchronize_irq(adapter->pdev->irq); 1310 } 1311} 1312 1313/** 1314 * ixgbe_irq_enable - Enable default interrupt generation settings 1315 * @adapter: board private structure 1316 **/ 1317static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) 1318{ 1319 u32 mask; 1320 mask = IXGBE_EIMS_ENABLE_MASK; 1321 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1322 IXGBE_WRITE_FLUSH(&adapter->hw); 1323} 1324 1325/** 1326 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts 1327 * 1328 **/ 1329static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) 1330{ 1331 struct ixgbe_hw *hw = &adapter->hw; 1332 1333 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), 1334 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr)); 1335 1336 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0); 1337 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0); 1338 1339 map_vector_to_rxq(adapter, 0, 0); 1340 map_vector_to_txq(adapter, 0, 0); 1341 1342 DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n"); 1343} 1344 1345/** 1346 * ixgbe_configure_tx - Configure 8254x Transmit Unit after Reset 1347 * @adapter: board private structure 1348 * 1349 * Configure the Tx unit of the MAC after a reset. 1350 **/ 1351static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) 1352{ 1353 u64 tdba; 1354 struct ixgbe_hw *hw = &adapter->hw; 1355 u32 i, j, tdlen, txctrl; 1356 1357 /* Setup the HW Tx Head and Tail descriptor pointers */ 1358 for (i = 0; i < adapter->num_tx_queues; i++) { 1359 j = adapter->tx_ring[i].reg_idx; 1360 tdba = adapter->tx_ring[i].dma; 1361 tdlen = adapter->tx_ring[i].count * 1362 sizeof(union ixgbe_adv_tx_desc); 1363 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 1364 (tdba & DMA_32BIT_MASK)); 1365 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 1366 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen); 1367 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 1368 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 1369 adapter->tx_ring[i].head = IXGBE_TDH(j); 1370 adapter->tx_ring[i].tail = IXGBE_TDT(j); 1371 /* Disable Tx Head Writeback RO bit, since this hoses 1372 * bookkeeping if things aren't delivered in order. 1373 */ 1374 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 1375 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1376 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl); 1377 } 1378} 1379 1380#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ 1381 (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) 1382 1383#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1384/** 1385 * ixgbe_configure_rx - Configure 8254x Receive Unit after Reset 1386 * @adapter: board private structure 1387 * 1388 * Configure the Rx unit of the MAC after a reset. 1389 **/ 1390static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) 1391{ 1392 u64 rdba; 1393 struct ixgbe_hw *hw = &adapter->hw; 1394 struct net_device *netdev = adapter->netdev; 1395 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1396 int i, j; 1397 u32 rdlen, rxctrl, rxcsum; 1398 u32 random[10]; 1399 u32 fctrl, hlreg0; 1400 u32 pages; 1401 u32 reta = 0, mrqc, srrctl; 1402 1403 /* Decide whether to use packet split mode or not */ 1404 if (netdev->mtu > ETH_DATA_LEN) 1405 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; 1406 else 1407 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; 1408 1409 /* Set the RX buffer length according to the mode */ 1410 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 1411 adapter->rx_buf_len = IXGBE_RX_HDR_SIZE; 1412 } else { 1413 if (netdev->mtu <= ETH_DATA_LEN) 1414 adapter->rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1415 else 1416 adapter->rx_buf_len = ALIGN(max_frame, 1024); 1417 } 1418 1419 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 1420 fctrl |= IXGBE_FCTRL_BAM; 1421 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ 1422 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); 1423 1424 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 1425 if (adapter->netdev->mtu <= ETH_DATA_LEN) 1426 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; 1427 else 1428 hlreg0 |= IXGBE_HLREG0_JUMBOEN; 1429 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 1430 1431 pages = PAGE_USE_COUNT(adapter->netdev->mtu); 1432 1433 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0)); 1434 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 1435 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 1436 1437 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 1438 srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1439 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 1440 srrctl |= ((IXGBE_RX_HDR_SIZE << 1441 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & 1442 IXGBE_SRRCTL_BSIZEHDR_MASK); 1443 } else { 1444 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1445 1446 if (adapter->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) 1447 srrctl |= 1448 IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1449 else 1450 srrctl |= 1451 adapter->rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1452 } 1453 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl); 1454 1455 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); 1456 /* disable receives while setting up the descriptors */ 1457 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1458 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 1459 1460 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1461 * the Base and Length of the Rx Descriptor Ring */ 1462 for (i = 0; i < adapter->num_rx_queues; i++) { 1463 rdba = adapter->rx_ring[i].dma; 1464 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), (rdba & DMA_32BIT_MASK)); 1465 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32)); 1466 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), rdlen); 1467 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0); 1468 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0); 1469 adapter->rx_ring[i].head = IXGBE_RDH(i); 1470 adapter->rx_ring[i].tail = IXGBE_RDT(i); 1471 } 1472 1473 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 1474 /* Fill out redirection table */ 1475 for (i = 0, j = 0; i < 128; i++, j++) { 1476 if (j == adapter->ring_feature[RING_F_RSS].indices) 1477 j = 0; 1478 /* reta = 4-byte sliding window of 1479 * 0x00..(indices-1)(indices-1)00..etc. */ 1480 reta = (reta << 8) | (j * 0x11); 1481 if ((i & 3) == 3) 1482 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 1483 } 1484 1485 /* Fill out hash function seeds */ 1486 /* XXX use a random constant here to glue certain flows */ 1487 get_random_bytes(&random[0], 40); 1488 for (i = 0; i < 10; i++) 1489 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]); 1490 1491 mrqc = IXGBE_MRQC_RSSEN 1492 /* Perform hash on these packet types */ 1493 | IXGBE_MRQC_RSS_FIELD_IPV4 1494 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP 1495 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP 1496 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 1497 | IXGBE_MRQC_RSS_FIELD_IPV6_EX 1498 | IXGBE_MRQC_RSS_FIELD_IPV6 1499 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP 1500 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP 1501 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 1502 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 1503 } 1504 1505 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 1506 1507 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED || 1508 adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) { 1509 /* Disable indicating checksum in descriptor, enables 1510 * RSS hash */ 1511 rxcsum |= IXGBE_RXCSUM_PCSD; 1512 } 1513 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) { 1514 /* Enable IPv4 payload checksum for UDP fragments 1515 * if PCSD is not set */ 1516 rxcsum |= IXGBE_RXCSUM_IPPCSE; 1517 } 1518 1519 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 1520} 1521 1522static void ixgbe_vlan_rx_register(struct net_device *netdev, 1523 struct vlan_group *grp) 1524{ 1525 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1526 u32 ctrl; 1527 1528 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1529 ixgbe_irq_disable(adapter); 1530 adapter->vlgrp = grp; 1531 1532 if (grp) { 1533 /* enable VLAN tag insert/strip */ 1534 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); 1535 ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE; 1536 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1537 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); 1538 } 1539 1540 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1541 ixgbe_irq_enable(adapter); 1542} 1543 1544static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1545{ 1546 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1547 1548 /* add VID to filter table */ 1549 ixgbe_set_vfta(&adapter->hw, vid, 0, true); 1550} 1551 1552static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1553{ 1554 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1555 1556 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1557 ixgbe_irq_disable(adapter); 1558 1559 vlan_group_set_device(adapter->vlgrp, vid, NULL); 1560 1561 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1562 ixgbe_irq_enable(adapter); 1563 1564 /* remove VID from filter table */ 1565 ixgbe_set_vfta(&adapter->hw, vid, 0, false); 1566} 1567 1568static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) 1569{ 1570 ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp); 1571 1572 if (adapter->vlgrp) { 1573 u16 vid; 1574 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 1575 if (!vlan_group_get_device(adapter->vlgrp, vid)) 1576 continue; 1577 ixgbe_vlan_rx_add_vid(adapter->netdev, vid); 1578 } 1579 } 1580} 1581 1582/** 1583 * ixgbe_set_multi - Multicast and Promiscuous mode set 1584 * @netdev: network interface device structure 1585 * 1586 * The set_multi entry point is called whenever the multicast address 1587 * list or the network interface flags are updated. This routine is 1588 * responsible for configuring the hardware for proper multicast, 1589 * promiscuous mode, and all-multi behavior. 1590 **/ 1591static void ixgbe_set_multi(struct net_device *netdev) 1592{ 1593 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1594 struct ixgbe_hw *hw = &adapter->hw; 1595 struct dev_mc_list *mc_ptr; 1596 u8 *mta_list; 1597 u32 fctrl; 1598 int i; 1599 1600 /* Check for Promiscuous and All Multicast modes */ 1601 1602 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1603 1604 if (netdev->flags & IFF_PROMISC) { 1605 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 1606 } else if (netdev->flags & IFF_ALLMULTI) { 1607 fctrl |= IXGBE_FCTRL_MPE; 1608 fctrl &= ~IXGBE_FCTRL_UPE; 1609 } else { 1610 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 1611 } 1612 1613 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 1614 1615 if (netdev->mc_count) { 1616 mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC); 1617 if (!mta_list) 1618 return; 1619 1620 /* Shared function expects packed array of only addresses. */ 1621 mc_ptr = netdev->mc_list; 1622 1623 for (i = 0; i < netdev->mc_count; i++) { 1624 if (!mc_ptr) 1625 break; 1626 memcpy(mta_list + (i * ETH_ALEN), mc_ptr->dmi_addr, 1627 ETH_ALEN); 1628 mc_ptr = mc_ptr->next; 1629 } 1630 1631 ixgbe_update_mc_addr_list(hw, mta_list, i, 0); 1632 kfree(mta_list); 1633 } else { 1634 ixgbe_update_mc_addr_list(hw, NULL, 0, 0); 1635 } 1636 1637} 1638 1639static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) 1640{ 1641 int q_idx; 1642 struct ixgbe_q_vector *q_vector; 1643 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1644 1645 /* legacy and MSI only use one vector */ 1646 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 1647 q_vectors = 1; 1648 1649 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1650 q_vector = &adapter->q_vector[q_idx]; 1651 if (!q_vector->rxr_count) 1652 continue; 1653 napi_enable(&q_vector->napi); 1654 } 1655} 1656 1657static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) 1658{ 1659 int q_idx; 1660 struct ixgbe_q_vector *q_vector; 1661 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1662 1663 /* legacy and MSI only use one vector */ 1664 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 1665 q_vectors = 1; 1666 1667 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1668 q_vector = &adapter->q_vector[q_idx]; 1669 if (!q_vector->rxr_count) 1670 continue; 1671 napi_disable(&q_vector->napi); 1672 } 1673} 1674 1675static void ixgbe_configure(struct ixgbe_adapter *adapter) 1676{ 1677 struct net_device *netdev = adapter->netdev; 1678 int i; 1679 1680 ixgbe_set_multi(netdev); 1681 1682 ixgbe_restore_vlan(adapter); 1683 1684 ixgbe_configure_tx(adapter); 1685 ixgbe_configure_rx(adapter); 1686 for (i = 0; i < adapter->num_rx_queues; i++) 1687 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i], 1688 (adapter->rx_ring[i].count - 1)); 1689} 1690 1691static int ixgbe_up_complete(struct ixgbe_adapter *adapter) 1692{ 1693 struct net_device *netdev = adapter->netdev; 1694 struct ixgbe_hw *hw = &adapter->hw; 1695 int i, j = 0; 1696 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1697 u32 txdctl, rxdctl, mhadd; 1698 u32 gpie; 1699 1700 ixgbe_get_hw_control(adapter); 1701 1702 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) || 1703 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) { 1704 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1705 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME | 1706 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD); 1707 } else { 1708 /* MSI only */ 1709 gpie = 0; 1710 } 1711 /* XXX: to interrupt immediately for EICS writes, enable this */ 1712 /* gpie |= IXGBE_GPIE_EIMEN; */ 1713 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 1714 } 1715 1716 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { 1717 /* legacy interrupts, use EIAM to auto-mask when reading EICR, 1718 * specifically only auto mask tx and rx interrupts */ 1719 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 1720 } 1721 1722 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 1723 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { 1724 mhadd &= ~IXGBE_MHADD_MFS_MASK; 1725 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; 1726 1727 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 1728 } 1729 1730 for (i = 0; i < adapter->num_tx_queues; i++) { 1731 j = adapter->tx_ring[i].reg_idx; 1732 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 1733 txdctl |= IXGBE_TXDCTL_ENABLE; 1734 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 1735 } 1736 1737 for (i = 0; i < adapter->num_rx_queues; i++) { 1738 j = adapter->rx_ring[i].reg_idx; 1739 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 1740 /* enable PTHRESH=32 descriptors (half the internal cache) 1741 * and HTHRESH=0 descriptors (to minimize latency on fetch), 1742 * this also removes a pesky rx_no_buffer_count increment */ 1743 rxdctl |= 0x0020; 1744 rxdctl |= IXGBE_RXDCTL_ENABLE; 1745 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl); 1746 } 1747 /* enable all receives */ 1748 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1749 rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN); 1750 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxdctl); 1751 1752 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 1753 ixgbe_configure_msix(adapter); 1754 else 1755 ixgbe_configure_msi_and_legacy(adapter); 1756 1757 clear_bit(__IXGBE_DOWN, &adapter->state); 1758 ixgbe_napi_enable_all(adapter); 1759 1760 /* clear any pending interrupts, may auto mask */ 1761 IXGBE_READ_REG(hw, IXGBE_EICR); 1762 1763 ixgbe_irq_enable(adapter); 1764 1765 /* bring the link up in the watchdog, this could race with our first 1766 * link up interrupt but shouldn't be a problem */ 1767 mod_timer(&adapter->watchdog_timer, jiffies); 1768 return 0; 1769} 1770 1771void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) 1772{ 1773 WARN_ON(in_interrupt()); 1774 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 1775 msleep(1); 1776 ixgbe_down(adapter); 1777 ixgbe_up(adapter); 1778 clear_bit(__IXGBE_RESETTING, &adapter->state); 1779} 1780 1781int ixgbe_up(struct ixgbe_adapter *adapter) 1782{ 1783 /* hardware has been reset, we need to reload some things */ 1784 ixgbe_configure(adapter); 1785 1786 return ixgbe_up_complete(adapter); 1787} 1788 1789void ixgbe_reset(struct ixgbe_adapter *adapter) 1790{ 1791 if (ixgbe_init_hw(&adapter->hw)) 1792 DPRINTK(PROBE, ERR, "Hardware Error\n"); 1793 1794 /* reprogram the RAR[0] in case user changed it. */ 1795 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 1796 1797} 1798 1799#ifdef CONFIG_PM 1800static int ixgbe_resume(struct pci_dev *pdev) 1801{ 1802 struct net_device *netdev = pci_get_drvdata(pdev); 1803 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1804 u32 err; 1805 1806 pci_set_power_state(pdev, PCI_D0); 1807 pci_restore_state(pdev); 1808 err = pci_enable_device(pdev); 1809 if (err) { 1810 printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \ 1811 "suspend\n"); 1812 return err; 1813 } 1814 pci_set_master(pdev); 1815 1816 pci_enable_wake(pdev, PCI_D3hot, 0); 1817 pci_enable_wake(pdev, PCI_D3cold, 0); 1818 1819 if (netif_running(netdev)) { 1820 err = ixgbe_request_irq(adapter); 1821 if (err) 1822 return err; 1823 } 1824 1825 ixgbe_reset(adapter); 1826 1827 if (netif_running(netdev)) 1828 ixgbe_up(adapter); 1829 1830 netif_device_attach(netdev); 1831 1832 return 0; 1833} 1834#endif 1835 1836/** 1837 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue 1838 * @adapter: board private structure 1839 * @rx_ring: ring to free buffers from 1840 **/ 1841static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, 1842 struct ixgbe_ring *rx_ring) 1843{ 1844 struct pci_dev *pdev = adapter->pdev; 1845 unsigned long size; 1846 unsigned int i; 1847 1848 /* Free all the Rx ring sk_buffs */ 1849 1850 for (i = 0; i < rx_ring->count; i++) { 1851 struct ixgbe_rx_buffer *rx_buffer_info; 1852 1853 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 1854 if (rx_buffer_info->dma) { 1855 pci_unmap_single(pdev, rx_buffer_info->dma, 1856 adapter->rx_buf_len, 1857 PCI_DMA_FROMDEVICE); 1858 rx_buffer_info->dma = 0; 1859 } 1860 if (rx_buffer_info->skb) { 1861 dev_kfree_skb(rx_buffer_info->skb); 1862 rx_buffer_info->skb = NULL; 1863 } 1864 if (!rx_buffer_info->page) 1865 continue; 1866 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE, 1867 PCI_DMA_FROMDEVICE); 1868 rx_buffer_info->page_dma = 0; 1869 1870 put_page(rx_buffer_info->page); 1871 rx_buffer_info->page = NULL; 1872 } 1873 1874 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 1875 memset(rx_ring->rx_buffer_info, 0, size); 1876 1877 /* Zero out the descriptor ring */ 1878 memset(rx_ring->desc, 0, rx_ring->size); 1879 1880 rx_ring->next_to_clean = 0; 1881 rx_ring->next_to_use = 0; 1882 1883 writel(0, adapter->hw.hw_addr + rx_ring->head); 1884 writel(0, adapter->hw.hw_addr + rx_ring->tail); 1885} 1886 1887/** 1888 * ixgbe_clean_tx_ring - Free Tx Buffers 1889 * @adapter: board private structure 1890 * @tx_ring: ring to be cleaned 1891 **/ 1892static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, 1893 struct ixgbe_ring *tx_ring) 1894{ 1895 struct ixgbe_tx_buffer *tx_buffer_info; 1896 unsigned long size; 1897 unsigned int i; 1898 1899 /* Free all the Tx ring sk_buffs */ 1900 1901 for (i = 0; i < tx_ring->count; i++) { 1902 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 1903 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 1904 } 1905 1906 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 1907 memset(tx_ring->tx_buffer_info, 0, size); 1908 1909 /* Zero out the descriptor ring */ 1910 memset(tx_ring->desc, 0, tx_ring->size); 1911 1912 tx_ring->next_to_use = 0; 1913 tx_ring->next_to_clean = 0; 1914 1915 writel(0, adapter->hw.hw_addr + tx_ring->head); 1916 writel(0, adapter->hw.hw_addr + tx_ring->tail); 1917} 1918 1919/** 1920 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues 1921 * @adapter: board private structure 1922 **/ 1923static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) 1924{ 1925 int i; 1926 1927 for (i = 0; i < adapter->num_rx_queues; i++) 1928 ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]); 1929} 1930 1931/** 1932 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues 1933 * @adapter: board private structure 1934 **/ 1935static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) 1936{ 1937 int i; 1938 1939 for (i = 0; i < adapter->num_tx_queues; i++) 1940 ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]); 1941} 1942 1943void ixgbe_down(struct ixgbe_adapter *adapter) 1944{ 1945 struct net_device *netdev = adapter->netdev; 1946 u32 rxctrl; 1947 1948 /* signal that we are down to the interrupt handler */ 1949 set_bit(__IXGBE_DOWN, &adapter->state); 1950 1951 /* disable receives */ 1952 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); 1953 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, 1954 rxctrl & ~IXGBE_RXCTRL_RXEN); 1955 1956 netif_tx_disable(netdev); 1957 1958 /* disable transmits in the hardware */ 1959 1960 /* flush both disables */ 1961 IXGBE_WRITE_FLUSH(&adapter->hw); 1962 msleep(10); 1963 1964 ixgbe_irq_disable(adapter); 1965 1966 ixgbe_napi_disable_all(adapter); 1967 del_timer_sync(&adapter->watchdog_timer); 1968 1969 netif_carrier_off(netdev); 1970 netif_stop_queue(netdev); 1971 1972 ixgbe_reset(adapter); 1973 ixgbe_clean_all_tx_rings(adapter); 1974 ixgbe_clean_all_rx_rings(adapter); 1975 1976} 1977 1978static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) 1979{ 1980 struct net_device *netdev = pci_get_drvdata(pdev); 1981 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1982#ifdef CONFIG_PM 1983 int retval = 0; 1984#endif 1985 1986 netif_device_detach(netdev); 1987 1988 if (netif_running(netdev)) { 1989 ixgbe_down(adapter); 1990 ixgbe_free_irq(adapter); 1991 } 1992 1993#ifdef CONFIG_PM 1994 retval = pci_save_state(pdev); 1995 if (retval) 1996 return retval; 1997#endif 1998 1999 pci_enable_wake(pdev, PCI_D3hot, 0); 2000 pci_enable_wake(pdev, PCI_D3cold, 0); 2001 2002 ixgbe_release_hw_control(adapter); 2003 2004 pci_disable_device(pdev); 2005 2006 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 2007 2008 return 0; 2009} 2010 2011static void ixgbe_shutdown(struct pci_dev *pdev) 2012{ 2013 ixgbe_suspend(pdev, PMSG_SUSPEND); 2014} 2015 2016/** 2017 * ixgbe_poll - NAPI Rx polling callback 2018 * @napi: structure for representing this polling device 2019 * @budget: how many packets driver is allowed to clean 2020 * 2021 * This function is used for legacy and MSI, NAPI mode 2022 **/ 2023static int ixgbe_poll(struct napi_struct *napi, int budget) 2024{ 2025 struct ixgbe_q_vector *q_vector = container_of(napi, 2026 struct ixgbe_q_vector, napi); 2027 struct ixgbe_adapter *adapter = q_vector->adapter; 2028 int tx_cleaned = 0, work_done = 0; 2029 2030#ifdef CONFIG_DCA 2031 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 2032 ixgbe_update_tx_dca(adapter, adapter->tx_ring); 2033 ixgbe_update_rx_dca(adapter, adapter->rx_ring); 2034 } 2035#endif 2036 2037 tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring); 2038 ixgbe_clean_rx_irq(adapter, adapter->rx_ring, &work_done, budget); 2039 2040 if (tx_cleaned) 2041 work_done = budget; 2042 2043 /* If budget not fully consumed, exit the polling mode */ 2044 if (work_done < budget) { 2045 netif_rx_complete(adapter->netdev, napi); 2046 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS) 2047 ixgbe_set_itr(adapter); 2048 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2049 ixgbe_irq_enable(adapter); 2050 } 2051 2052 return work_done; 2053} 2054 2055/** 2056 * ixgbe_tx_timeout - Respond to a Tx Hang 2057 * @netdev: network interface device structure 2058 **/ 2059static void ixgbe_tx_timeout(struct net_device *netdev) 2060{ 2061 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2062 2063 /* Do the reset outside of interrupt context */ 2064 schedule_work(&adapter->reset_task); 2065} 2066 2067static void ixgbe_reset_task(struct work_struct *work) 2068{ 2069 struct ixgbe_adapter *adapter; 2070 adapter = container_of(work, struct ixgbe_adapter, reset_task); 2071 2072 adapter->tx_timeout_count++; 2073 2074 ixgbe_reinit_locked(adapter); 2075} 2076 2077static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, 2078 int vectors) 2079{ 2080 int err, vector_threshold; 2081 2082 /* We'll want at least 3 (vector_threshold): 2083 * 1) TxQ[0] Cleanup 2084 * 2) RxQ[0] Cleanup 2085 * 3) Other (Link Status Change, etc.) 2086 * 4) TCP Timer (optional) 2087 */ 2088 vector_threshold = MIN_MSIX_COUNT; 2089 2090 /* The more we get, the more we will assign to Tx/Rx Cleanup 2091 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 2092 * Right now, we simply care about how many we'll get; we'll 2093 * set them up later while requesting irq's. 2094 */ 2095 while (vectors >= vector_threshold) { 2096 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 2097 vectors); 2098 if (!err) /* Success in acquiring all requested vectors. */ 2099 break; 2100 else if (err < 0) 2101 vectors = 0; /* Nasty failure, quit now */ 2102 else /* err == number of vectors we should try again with */ 2103 vectors = err; 2104 } 2105 2106 if (vectors < vector_threshold) { 2107 /* Can't allocate enough MSI-X interrupts? Oh well. 2108 * This just means we'll go with either a single MSI 2109 * vector or fall back to legacy interrupts. 2110 */ 2111 DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n"); 2112 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 2113 kfree(adapter->msix_entries); 2114 adapter->msix_entries = NULL; 2115 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 2116 adapter->num_tx_queues = 1; 2117 adapter->num_rx_queues = 1; 2118 } else { 2119 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ 2120 adapter->num_msix_vectors = vectors; 2121 } 2122} 2123 2124static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 2125{ 2126 int nrq, ntq; 2127 int feature_mask = 0, rss_i, rss_m; 2128 2129 /* Number of supported queues */ 2130 switch (adapter->hw.mac.type) { 2131 case ixgbe_mac_82598EB: 2132 rss_i = adapter->ring_feature[RING_F_RSS].indices; 2133 rss_m = 0; 2134 feature_mask |= IXGBE_FLAG_RSS_ENABLED; 2135 2136 switch (adapter->flags & feature_mask) { 2137 case (IXGBE_FLAG_RSS_ENABLED): 2138 rss_m = 0xF; 2139 nrq = rss_i; 2140#ifdef CONFIG_NETDEVICES_MULTIQUEUE 2141 ntq = rss_i; 2142#else 2143 ntq = 1; 2144#endif 2145 break; 2146 case 0: 2147 default: 2148 rss_i = 0; 2149 rss_m = 0; 2150 nrq = 1; 2151 ntq = 1; 2152 break; 2153 } 2154 2155 adapter->ring_feature[RING_F_RSS].indices = rss_i; 2156 adapter->ring_feature[RING_F_RSS].mask = rss_m; 2157 break; 2158 default: 2159 nrq = 1; 2160 ntq = 1; 2161 break; 2162 } 2163 2164 adapter->num_rx_queues = nrq; 2165 adapter->num_tx_queues = ntq; 2166} 2167 2168/** 2169 * ixgbe_cache_ring_register - Descriptor ring to register mapping 2170 * @adapter: board private structure to initialize 2171 * 2172 * Once we know the feature-set enabled for the device, we'll cache 2173 * the register offset the descriptor ring is assigned to. 2174 **/ 2175static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) 2176{ 2177 /* TODO: Remove all uses of the indices in the cases where multiple 2178 * features are OR'd together, if the feature set makes sense. 2179 */ 2180 int feature_mask = 0, rss_i; 2181 int i, txr_idx, rxr_idx; 2182 2183 /* Number of supported queues */ 2184 switch (adapter->hw.mac.type) { 2185 case ixgbe_mac_82598EB: 2186 rss_i = adapter->ring_feature[RING_F_RSS].indices; 2187 txr_idx = 0; 2188 rxr_idx = 0; 2189 feature_mask |= IXGBE_FLAG_RSS_ENABLED; 2190 switch (adapter->flags & feature_mask) { 2191 case (IXGBE_FLAG_RSS_ENABLED): 2192 for (i = 0; i < adapter->num_rx_queues; i++) 2193 adapter->rx_ring[i].reg_idx = i; 2194 for (i = 0; i < adapter->num_tx_queues; i++) 2195 adapter->tx_ring[i].reg_idx = i; 2196 break; 2197 case 0: 2198 default: 2199 break; 2200 } 2201 break; 2202 default: 2203 break; 2204 } 2205} 2206 2207/** 2208 * ixgbe_alloc_queues - Allocate memory for all rings 2209 * @adapter: board private structure to initialize 2210 * 2211 * We allocate one ring per queue at run-time since we don't know the 2212 * number of queues at compile-time. The polling_netdev array is 2213 * intended for Multiqueue, but should work fine with a single queue. 2214 **/ 2215static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter) 2216{ 2217 int i; 2218 2219 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 2220 sizeof(struct ixgbe_ring), GFP_KERNEL); 2221 if (!adapter->tx_ring) 2222 goto err_tx_ring_allocation; 2223 2224 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 2225 sizeof(struct ixgbe_ring), GFP_KERNEL); 2226 if (!adapter->rx_ring) 2227 goto err_rx_ring_allocation; 2228 2229 for (i = 0; i < adapter->num_tx_queues; i++) { 2230 adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD; 2231 adapter->tx_ring[i].queue_index = i; 2232 } 2233 for (i = 0; i < adapter->num_rx_queues; i++) { 2234 adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD; 2235 adapter->rx_ring[i].queue_index = i; 2236 } 2237 2238 ixgbe_cache_ring_register(adapter); 2239 2240 return 0; 2241 2242err_rx_ring_allocation: 2243 kfree(adapter->tx_ring); 2244err_tx_ring_allocation: 2245 return -ENOMEM; 2246} 2247 2248/** 2249 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported 2250 * @adapter: board private structure to initialize 2251 * 2252 * Attempt to configure the interrupts using the best available 2253 * capabilities of the hardware and the kernel. 2254 **/ 2255static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter 2256 *adapter) 2257{ 2258 int err = 0; 2259 int vector, v_budget; 2260 2261 /* 2262 * It's easy to be greedy for MSI-X vectors, but it really 2263 * doesn't do us much good if we have a lot more vectors 2264 * than CPU's. So let's be conservative and only ask for 2265 * (roughly) twice the number of vectors as there are CPU's. 2266 */ 2267 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, 2268 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS; 2269 2270 /* 2271 * At the same time, hardware can only support a maximum of 2272 * MAX_MSIX_COUNT vectors. With features such as RSS and VMDq, 2273 * we can easily reach upwards of 64 Rx descriptor queues and 2274 * 32 Tx queues. Thus, we cap it off in those rare cases where 2275 * the cpu count also exceeds our vector limit. 2276 */ 2277 v_budget = min(v_budget, MAX_MSIX_COUNT); 2278 2279 /* A failure in MSI-X entry allocation isn't fatal, but it does 2280 * mean we disable MSI-X capabilities of the adapter. */ 2281 adapter->msix_entries = kcalloc(v_budget, 2282 sizeof(struct msix_entry), GFP_KERNEL); 2283 if (!adapter->msix_entries) { 2284 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 2285 ixgbe_set_num_queues(adapter); 2286 kfree(adapter->tx_ring); 2287 kfree(adapter->rx_ring); 2288 err = ixgbe_alloc_queues(adapter); 2289 if (err) { 2290 DPRINTK(PROBE, ERR, "Unable to allocate memory " 2291 "for queues\n"); 2292 goto out; 2293 } 2294 2295 goto try_msi; 2296 } 2297 2298 for (vector = 0; vector < v_budget; vector++) 2299 adapter->msix_entries[vector].entry = vector; 2300 2301 ixgbe_acquire_msix_vectors(adapter, v_budget); 2302 2303 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 2304 goto out; 2305 2306try_msi: 2307 err = pci_enable_msi(adapter->pdev); 2308 if (!err) { 2309 adapter->flags |= IXGBE_FLAG_MSI_ENABLED; 2310 } else { 2311 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, " 2312 "falling back to legacy. Error: %d\n", err); 2313 /* reset err */ 2314 err = 0; 2315 } 2316 2317out: 2318#ifdef CONFIG_NETDEVICES_MULTIQUEUE 2319 /* Notify the stack of the (possibly) reduced Tx Queue count. */ 2320 adapter->netdev->egress_subqueue_count = adapter->num_tx_queues; 2321#endif 2322 2323 return err; 2324} 2325 2326static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) 2327{ 2328 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 2329 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 2330 pci_disable_msix(adapter->pdev); 2331 kfree(adapter->msix_entries); 2332 adapter->msix_entries = NULL; 2333 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 2334 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; 2335 pci_disable_msi(adapter->pdev); 2336 } 2337 return; 2338} 2339 2340/** 2341 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme 2342 * @adapter: board private structure to initialize 2343 * 2344 * We determine which interrupt scheme to use based on... 2345 * - Kernel support (MSI, MSI-X) 2346 * - which can be user-defined (via MODULE_PARAM) 2347 * - Hardware queue count (num_*_queues) 2348 * - defined by miscellaneous hardware support/features (RSS, etc.) 2349 **/ 2350static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) 2351{ 2352 int err; 2353 2354 /* Number of supported queues */ 2355 ixgbe_set_num_queues(adapter); 2356 2357 err = ixgbe_alloc_queues(adapter); 2358 if (err) { 2359 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); 2360 goto err_alloc_queues; 2361 } 2362 2363 err = ixgbe_set_interrupt_capability(adapter); 2364 if (err) { 2365 DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n"); 2366 goto err_set_interrupt; 2367 } 2368 2369 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, " 2370 "Tx Queue count = %u\n", 2371 (adapter->num_rx_queues > 1) ? "Enabled" : 2372 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); 2373 2374 set_bit(__IXGBE_DOWN, &adapter->state); 2375 2376 return 0; 2377 2378err_set_interrupt: 2379 kfree(adapter->tx_ring); 2380 kfree(adapter->rx_ring); 2381err_alloc_queues: 2382 return err; 2383} 2384 2385/** 2386 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) 2387 * @adapter: board private structure to initialize 2388 * 2389 * ixgbe_sw_init initializes the Adapter private data structure. 2390 * Fields are initialized based on PCI device information and 2391 * OS network device settings (MTU size). 2392 **/ 2393static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) 2394{ 2395 struct ixgbe_hw *hw = &adapter->hw; 2396 struct pci_dev *pdev = adapter->pdev; 2397 unsigned int rss; 2398 2399 /* Set capability flags */ 2400 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); 2401 adapter->ring_feature[RING_F_RSS].indices = rss; 2402 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 2403 2404 /* Enable Dynamic interrupt throttling by default */ 2405 adapter->rx_eitr = 1; 2406 adapter->tx_eitr = 1; 2407 2408 /* default flow control settings */ 2409 hw->fc.original_type = ixgbe_fc_full; 2410 hw->fc.type = ixgbe_fc_full; 2411 2412 /* select 10G link by default */ 2413 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; 2414 if (hw->mac.ops.reset(hw)) { 2415 dev_err(&pdev->dev, "HW Init failed\n"); 2416 return -EIO; 2417 } 2418 if (hw->mac.ops.setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true, 2419 false)) { 2420 dev_err(&pdev->dev, "Link Speed setup failed\n"); 2421 return -EIO; 2422 } 2423 2424 /* initialize eeprom parameters */ 2425 if (ixgbe_init_eeprom(hw)) { 2426 dev_err(&pdev->dev, "EEPROM initialization failed\n"); 2427 return -EIO; 2428 } 2429 2430 /* enable rx csum by default */ 2431 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; 2432 2433 set_bit(__IXGBE_DOWN, &adapter->state); 2434 2435 return 0; 2436} 2437 2438/** 2439 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) 2440 * @adapter: board private structure 2441 * @txdr: tx descriptor ring (for a specific queue) to setup 2442 * 2443 * Return 0 on success, negative on failure 2444 **/ 2445int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, 2446 struct ixgbe_ring *txdr) 2447{ 2448 struct pci_dev *pdev = adapter->pdev; 2449 int size; 2450 2451 size = sizeof(struct ixgbe_tx_buffer) * txdr->count; 2452 txdr->tx_buffer_info = vmalloc(size); 2453 if (!txdr->tx_buffer_info) { 2454 DPRINTK(PROBE, ERR, 2455 "Unable to allocate memory for the transmit descriptor ring\n"); 2456 return -ENOMEM; 2457 } 2458 memset(txdr->tx_buffer_info, 0, size); 2459 2460 /* round up to nearest 4K */ 2461 txdr->size = txdr->count * sizeof(union ixgbe_adv_tx_desc); 2462 txdr->size = ALIGN(txdr->size, 4096); 2463 2464 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 2465 if (!txdr->desc) { 2466 vfree(txdr->tx_buffer_info); 2467 DPRINTK(PROBE, ERR, 2468 "Memory allocation failed for the tx desc ring\n"); 2469 return -ENOMEM; 2470 } 2471 2472 txdr->next_to_use = 0; 2473 txdr->next_to_clean = 0; 2474 txdr->work_limit = txdr->count; 2475 2476 return 0; 2477} 2478 2479/** 2480 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) 2481 * @adapter: board private structure 2482 * @rxdr: rx descriptor ring (for a specific queue) to setup 2483 * 2484 * Returns 0 on success, negative on failure 2485 **/ 2486int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, 2487 struct ixgbe_ring *rxdr) 2488{ 2489 struct pci_dev *pdev = adapter->pdev; 2490 int size; 2491 2492 size = sizeof(struct ixgbe_rx_buffer) * rxdr->count; 2493 rxdr->rx_buffer_info = vmalloc(size); 2494 if (!rxdr->rx_buffer_info) { 2495 DPRINTK(PROBE, ERR, 2496 "vmalloc allocation failed for the rx desc ring\n"); 2497 return -ENOMEM; 2498 } 2499 memset(rxdr->rx_buffer_info, 0, size); 2500 2501 /* Round up to nearest 4K */ 2502 rxdr->size = rxdr->count * sizeof(union ixgbe_adv_rx_desc); 2503 rxdr->size = ALIGN(rxdr->size, 4096); 2504 2505 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 2506 2507 if (!rxdr->desc) { 2508 DPRINTK(PROBE, ERR, 2509 "Memory allocation failed for the rx desc ring\n"); 2510 vfree(rxdr->rx_buffer_info); 2511 return -ENOMEM; 2512 } 2513 2514 rxdr->next_to_clean = 0; 2515 rxdr->next_to_use = 0; 2516 2517 return 0; 2518} 2519 2520/** 2521 * ixgbe_free_tx_resources - Free Tx Resources per Queue 2522 * @adapter: board private structure 2523 * @tx_ring: Tx descriptor ring for a specific queue 2524 * 2525 * Free all transmit software resources 2526 **/ 2527static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, 2528 struct ixgbe_ring *tx_ring) 2529{ 2530 struct pci_dev *pdev = adapter->pdev; 2531 2532 ixgbe_clean_tx_ring(adapter, tx_ring); 2533 2534 vfree(tx_ring->tx_buffer_info); 2535 tx_ring->tx_buffer_info = NULL; 2536 2537 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 2538 2539 tx_ring->desc = NULL; 2540} 2541 2542/** 2543 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues 2544 * @adapter: board private structure 2545 * 2546 * Free all transmit software resources 2547 **/ 2548static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) 2549{ 2550 int i; 2551 2552 for (i = 0; i < adapter->num_tx_queues; i++) 2553 ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]); 2554} 2555 2556/** 2557 * ixgbe_free_rx_resources - Free Rx Resources 2558 * @adapter: board private structure 2559 * @rx_ring: ring to clean the resources from 2560 * 2561 * Free all receive software resources 2562 **/ 2563static void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, 2564 struct ixgbe_ring *rx_ring) 2565{ 2566 struct pci_dev *pdev = adapter->pdev; 2567 2568 ixgbe_clean_rx_ring(adapter, rx_ring); 2569 2570 vfree(rx_ring->rx_buffer_info); 2571 rx_ring->rx_buffer_info = NULL; 2572 2573 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 2574 2575 rx_ring->desc = NULL; 2576} 2577 2578/** 2579 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues 2580 * @adapter: board private structure 2581 * 2582 * Free all receive software resources 2583 **/ 2584static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) 2585{ 2586 int i; 2587 2588 for (i = 0; i < adapter->num_rx_queues; i++) 2589 ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]); 2590} 2591 2592/** 2593 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources 2594 * @adapter: board private structure 2595 * 2596 * If this function returns with an error, then it's possible one or 2597 * more of the rings is populated (while the rest are not). It is the 2598 * callers duty to clean those orphaned rings. 2599 * 2600 * Return 0 on success, negative on failure 2601 **/ 2602static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) 2603{ 2604 int i, err = 0; 2605 2606 for (i = 0; i < adapter->num_tx_queues; i++) { 2607 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]); 2608 if (err) { 2609 DPRINTK(PROBE, ERR, 2610 "Allocation for Tx Queue %u failed\n", i); 2611 break; 2612 } 2613 } 2614 2615 return err; 2616} 2617 2618/** 2619 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources 2620 * @adapter: board private structure 2621 * 2622 * If this function returns with an error, then it's possible one or 2623 * more of the rings is populated (while the rest are not). It is the 2624 * callers duty to clean those orphaned rings. 2625 * 2626 * Return 0 on success, negative on failure 2627 **/ 2628 2629static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) 2630{ 2631 int i, err = 0; 2632 2633 for (i = 0; i < adapter->num_rx_queues; i++) { 2634 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]); 2635 if (err) { 2636 DPRINTK(PROBE, ERR, 2637 "Allocation for Rx Queue %u failed\n", i); 2638 break; 2639 } 2640 } 2641 2642 return err; 2643} 2644 2645/** 2646 * ixgbe_change_mtu - Change the Maximum Transfer Unit 2647 * @netdev: network interface device structure 2648 * @new_mtu: new value for maximum frame size 2649 * 2650 * Returns 0 on success, negative on failure 2651 **/ 2652static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) 2653{ 2654 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2655 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2656 2657 if ((max_frame < (ETH_ZLEN + ETH_FCS_LEN)) || 2658 (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) 2659 return -EINVAL; 2660 2661 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n", 2662 netdev->mtu, new_mtu); 2663 /* must set new MTU before calling down or up */ 2664 netdev->mtu = new_mtu; 2665 2666 if (netif_running(netdev)) 2667 ixgbe_reinit_locked(adapter); 2668 2669 return 0; 2670} 2671 2672/** 2673 * ixgbe_open - Called when a network interface is made active 2674 * @netdev: network interface device structure 2675 * 2676 * Returns 0 on success, negative value on failure 2677 * 2678 * The open entry point is called when a network interface is made 2679 * active by the system (IFF_UP). At this point all resources needed 2680 * for transmit and receive operations are allocated, the interrupt 2681 * handler is registered with the OS, the watchdog timer is started, 2682 * and the stack is notified that the interface is ready. 2683 **/ 2684static int ixgbe_open(struct net_device *netdev) 2685{ 2686 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2687 int err; 2688 2689 /* disallow open during test */ 2690 if (test_bit(__IXGBE_TESTING, &adapter->state)) 2691 return -EBUSY; 2692 2693 /* allocate transmit descriptors */ 2694 err = ixgbe_setup_all_tx_resources(adapter); 2695 if (err) 2696 goto err_setup_tx; 2697 2698 /* allocate receive descriptors */ 2699 err = ixgbe_setup_all_rx_resources(adapter); 2700 if (err) 2701 goto err_setup_rx; 2702 2703 ixgbe_configure(adapter); 2704 2705 err = ixgbe_request_irq(adapter); 2706 if (err) 2707 goto err_req_irq; 2708 2709 err = ixgbe_up_complete(adapter); 2710 if (err) 2711 goto err_up; 2712 2713 return 0; 2714 2715err_up: 2716 ixgbe_release_hw_control(adapter); 2717 ixgbe_free_irq(adapter); 2718err_req_irq: 2719 ixgbe_free_all_rx_resources(adapter); 2720err_setup_rx: 2721 ixgbe_free_all_tx_resources(adapter); 2722err_setup_tx: 2723 ixgbe_reset(adapter); 2724 2725 return err; 2726} 2727 2728/** 2729 * ixgbe_close - Disables a network interface 2730 * @netdev: network interface device structure 2731 * 2732 * Returns 0, this is not allowed to fail 2733 * 2734 * The close entry point is called when an interface is de-activated 2735 * by the OS. The hardware is still under the drivers control, but 2736 * needs to be disabled. A global MAC reset is issued to stop the 2737 * hardware, and all transmit and receive resources are freed. 2738 **/ 2739static int ixgbe_close(struct net_device *netdev) 2740{ 2741 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2742 2743 ixgbe_down(adapter); 2744 ixgbe_free_irq(adapter); 2745 2746 ixgbe_free_all_tx_resources(adapter); 2747 ixgbe_free_all_rx_resources(adapter); 2748 2749 ixgbe_release_hw_control(adapter); 2750 2751 return 0; 2752} 2753 2754/** 2755 * ixgbe_update_stats - Update the board statistics counters. 2756 * @adapter: board private structure 2757 **/ 2758void ixgbe_update_stats(struct ixgbe_adapter *adapter) 2759{ 2760 struct ixgbe_hw *hw = &adapter->hw; 2761 u64 total_mpc = 0; 2762 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; 2763 2764 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 2765 for (i = 0; i < 8; i++) { 2766 /* for packet buffers not used, the register should read 0 */ 2767 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 2768 missed_rx += mpc; 2769 adapter->stats.mpc[i] += mpc; 2770 total_mpc += adapter->stats.mpc[i]; 2771 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 2772 } 2773 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 2774 /* work around hardware counting issue */ 2775 adapter->stats.gprc -= missed_rx; 2776 2777 /* 82598 hardware only has a 32 bit counter in the high register */ 2778 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 2779 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 2780 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH); 2781 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 2782 adapter->stats.bprc += bprc; 2783 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 2784 adapter->stats.mprc -= bprc; 2785 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC); 2786 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 2787 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 2788 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 2789 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 2790 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 2791 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 2792 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 2793 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 2794 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 2795 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 2796 adapter->stats.lxontxc += lxon; 2797 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 2798 adapter->stats.lxofftxc += lxoff; 2799 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 2800 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 2801 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 2802 /* 2803 * 82598 errata - tx of flow control packets is included in tx counters 2804 */ 2805 xon_off_tot = lxon + lxoff; 2806 adapter->stats.gptc -= xon_off_tot; 2807 adapter->stats.mptc -= xon_off_tot; 2808 adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); 2809 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 2810 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 2811 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 2812 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 2813 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 2814 adapter->stats.ptc64 -= xon_off_tot; 2815 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 2816 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 2817 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 2818 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 2819 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 2820 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 2821 2822 /* Fill out the OS statistics structure */ 2823 adapter->net_stats.multicast = adapter->stats.mprc; 2824 2825 /* Rx Errors */ 2826 adapter->net_stats.rx_errors = adapter->stats.crcerrs + 2827 adapter->stats.rlec; 2828 adapter->net_stats.rx_dropped = 0; 2829 adapter->net_stats.rx_length_errors = adapter->stats.rlec; 2830 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 2831 adapter->net_stats.rx_missed_errors = total_mpc; 2832} 2833 2834/** 2835 * ixgbe_watchdog - Timer Call-back 2836 * @data: pointer to adapter cast into an unsigned long 2837 **/ 2838static void ixgbe_watchdog(unsigned long data) 2839{ 2840 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; 2841 struct net_device *netdev = adapter->netdev; 2842 bool link_up; 2843 u32 link_speed = 0; 2844#ifdef CONFIG_NETDEVICES_MULTIQUEUE 2845 int i; 2846#endif 2847 2848 adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up); 2849 2850 if (link_up) { 2851 if (!netif_carrier_ok(netdev)) { 2852 u32 frctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 2853 u32 rmcs = IXGBE_READ_REG(&adapter->hw, IXGBE_RMCS); 2854#define FLOW_RX (frctl & IXGBE_FCTRL_RFCE) 2855#define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X) 2856 DPRINTK(LINK, INFO, "NIC Link is Up %s, " 2857 "Flow Control: %s\n", 2858 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 2859 "10 Gbps" : 2860 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? 2861 "1 Gbps" : "unknown speed")), 2862 ((FLOW_RX && FLOW_TX) ? "RX/TX" : 2863 (FLOW_RX ? "RX" : 2864 (FLOW_TX ? "TX" : "None")))); 2865 2866 netif_carrier_on(netdev); 2867 netif_wake_queue(netdev); 2868#ifdef CONFIG_NETDEVICES_MULTIQUEUE 2869 for (i = 0; i < adapter->num_tx_queues; i++) 2870 netif_wake_subqueue(netdev, i); 2871#endif 2872 } else { 2873 /* Force detection of hung controller */ 2874 adapter->detect_tx_hung = true; 2875 } 2876 } else { 2877 if (netif_carrier_ok(netdev)) { 2878 DPRINTK(LINK, INFO, "NIC Link is Down\n"); 2879 netif_carrier_off(netdev); 2880 netif_stop_queue(netdev); 2881 } 2882 } 2883 2884 ixgbe_update_stats(adapter); 2885 2886 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 2887 /* Cause software interrupt to ensure rx rings are cleaned */ 2888 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 2889 u32 eics = 2890 (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1; 2891 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, eics); 2892 } else { 2893 /* for legacy and MSI interrupts don't set any bits that 2894 * are enabled for EIAM, because this operation would 2895 * set *both* EIMS and EICS for any bit in EIAM */ 2896 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 2897 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); 2898 } 2899 /* Reset the timer */ 2900 mod_timer(&adapter->watchdog_timer, 2901 round_jiffies(jiffies + 2 * HZ)); 2902 } 2903} 2904 2905static int ixgbe_tso(struct ixgbe_adapter *adapter, 2906 struct ixgbe_ring *tx_ring, struct sk_buff *skb, 2907 u32 tx_flags, u8 *hdr_len) 2908{ 2909 struct ixgbe_adv_tx_context_desc *context_desc; 2910 unsigned int i; 2911 int err; 2912 struct ixgbe_tx_buffer *tx_buffer_info; 2913 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; 2914 u32 mss_l4len_idx = 0, l4len; 2915 2916 if (skb_is_gso(skb)) { 2917 if (skb_header_cloned(skb)) { 2918 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2919 if (err) 2920 return err; 2921 } 2922 l4len = tcp_hdrlen(skb); 2923 *hdr_len += l4len; 2924 2925 if (skb->protocol == htons(ETH_P_IP)) { 2926 struct iphdr *iph = ip_hdr(skb); 2927 iph->tot_len = 0; 2928 iph->check = 0; 2929 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2930 iph->daddr, 0, 2931 IPPROTO_TCP, 2932 0); 2933 adapter->hw_tso_ctxt++; 2934 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 2935 ipv6_hdr(skb)->payload_len = 0; 2936 tcp_hdr(skb)->check = 2937 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2938 &ipv6_hdr(skb)->daddr, 2939 0, IPPROTO_TCP, 0); 2940 adapter->hw_tso6_ctxt++; 2941 } 2942 2943 i = tx_ring->next_to_use; 2944 2945 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2946 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); 2947 2948 /* VLAN MACLEN IPLEN */ 2949 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 2950 vlan_macip_lens |= 2951 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 2952 vlan_macip_lens |= ((skb_network_offset(skb)) << 2953 IXGBE_ADVTXD_MACLEN_SHIFT); 2954 *hdr_len += skb_network_offset(skb); 2955 vlan_macip_lens |= 2956 (skb_transport_header(skb) - skb_network_header(skb)); 2957 *hdr_len += 2958 (skb_transport_header(skb) - skb_network_header(skb)); 2959 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 2960 context_desc->seqnum_seed = 0; 2961 2962 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 2963 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | 2964 IXGBE_ADVTXD_DTYP_CTXT); 2965 2966 if (skb->protocol == htons(ETH_P_IP)) 2967 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 2968 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 2969 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 2970 2971 /* MSS L4LEN IDX */ 2972 mss_l4len_idx |= 2973 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT); 2974 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT); 2975 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 2976 2977 tx_buffer_info->time_stamp = jiffies; 2978 tx_buffer_info->next_to_watch = i; 2979 2980 i++; 2981 if (i == tx_ring->count) 2982 i = 0; 2983 tx_ring->next_to_use = i; 2984 2985 return true; 2986 } 2987 return false; 2988} 2989 2990static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, 2991 struct ixgbe_ring *tx_ring, 2992 struct sk_buff *skb, u32 tx_flags) 2993{ 2994 struct ixgbe_adv_tx_context_desc *context_desc; 2995 unsigned int i; 2996 struct ixgbe_tx_buffer *tx_buffer_info; 2997 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; 2998 2999 if (skb->ip_summed == CHECKSUM_PARTIAL || 3000 (tx_flags & IXGBE_TX_FLAGS_VLAN)) { 3001 i = tx_ring->next_to_use; 3002 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 3003 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); 3004 3005 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 3006 vlan_macip_lens |= 3007 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 3008 vlan_macip_lens |= (skb_network_offset(skb) << 3009 IXGBE_ADVTXD_MACLEN_SHIFT); 3010 if (skb->ip_summed == CHECKSUM_PARTIAL) 3011 vlan_macip_lens |= (skb_transport_header(skb) - 3012 skb_network_header(skb)); 3013 3014 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 3015 context_desc->seqnum_seed = 0; 3016 3017 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | 3018 IXGBE_ADVTXD_DTYP_CTXT); 3019 3020 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3021 switch (skb->protocol) { 3022 case __constant_htons(ETH_P_IP): 3023 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 3024 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 3025 type_tucmd_mlhl |= 3026 IXGBE_ADVTXD_TUCMD_L4T_TCP; 3027 break; 3028 3029 case __constant_htons(ETH_P_IPV6): 3030 /* XXX what about other V6 headers?? */ 3031 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 3032 type_tucmd_mlhl |= 3033 IXGBE_ADVTXD_TUCMD_L4T_TCP; 3034 break; 3035 3036 default: 3037 if (unlikely(net_ratelimit())) { 3038 DPRINTK(PROBE, WARNING, 3039 "partial checksum but proto=%x!\n", 3040 skb->protocol); 3041 } 3042 break; 3043 } 3044 } 3045 3046 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 3047 context_desc->mss_l4len_idx = 0; 3048 3049 tx_buffer_info->time_stamp = jiffies; 3050 tx_buffer_info->next_to_watch = i; 3051 adapter->hw_csum_tx_good++; 3052 i++; 3053 if (i == tx_ring->count) 3054 i = 0; 3055 tx_ring->next_to_use = i; 3056 3057 return true; 3058 } 3059 return false; 3060} 3061 3062static int ixgbe_tx_map(struct ixgbe_adapter *adapter, 3063 struct ixgbe_ring *tx_ring, 3064 struct sk_buff *skb, unsigned int first) 3065{ 3066 struct ixgbe_tx_buffer *tx_buffer_info; 3067 unsigned int len = skb->len; 3068 unsigned int offset = 0, size, count = 0, i; 3069 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 3070 unsigned int f; 3071 3072 len -= skb->data_len; 3073 3074 i = tx_ring->next_to_use; 3075 3076 while (len) { 3077 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 3078 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 3079 3080 tx_buffer_info->length = size; 3081 tx_buffer_info->dma = pci_map_single(adapter->pdev, 3082 skb->data + offset, 3083 size, PCI_DMA_TODEVICE); 3084 tx_buffer_info->time_stamp = jiffies; 3085 tx_buffer_info->next_to_watch = i; 3086 3087 len -= size; 3088 offset += size; 3089 count++; 3090 i++; 3091 if (i == tx_ring->count) 3092 i = 0; 3093 } 3094 3095 for (f = 0; f < nr_frags; f++) { 3096 struct skb_frag_struct *frag; 3097 3098 frag = &skb_shinfo(skb)->frags[f]; 3099 len = frag->size; 3100 offset = frag->page_offset; 3101 3102 while (len) { 3103 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 3104 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 3105 3106 tx_buffer_info->length = size; 3107 tx_buffer_info->dma = pci_map_page(adapter->pdev, 3108 frag->page, 3109 offset, 3110 size, PCI_DMA_TODEVICE); 3111 tx_buffer_info->time_stamp = jiffies; 3112 tx_buffer_info->next_to_watch = i; 3113 3114 len -= size; 3115 offset += size; 3116 count++; 3117 i++; 3118 if (i == tx_ring->count) 3119 i = 0; 3120 } 3121 } 3122 if (i == 0) 3123 i = tx_ring->count - 1; 3124 else 3125 i = i - 1; 3126 tx_ring->tx_buffer_info[i].skb = skb; 3127 tx_ring->tx_buffer_info[first].next_to_watch = i; 3128 3129 return count; 3130} 3131 3132static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, 3133 struct ixgbe_ring *tx_ring, 3134 int tx_flags, int count, u32 paylen, u8 hdr_len) 3135{ 3136 union ixgbe_adv_tx_desc *tx_desc = NULL; 3137 struct ixgbe_tx_buffer *tx_buffer_info; 3138 u32 olinfo_status = 0, cmd_type_len = 0; 3139 unsigned int i; 3140 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; 3141 3142 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; 3143 3144 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; 3145 3146 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 3147 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; 3148 3149 if (tx_flags & IXGBE_TX_FLAGS_TSO) { 3150 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 3151 3152 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 3153 IXGBE_ADVTXD_POPTS_SHIFT; 3154 3155 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 3156 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 3157 IXGBE_ADVTXD_POPTS_SHIFT; 3158 3159 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) 3160 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 3161 IXGBE_ADVTXD_POPTS_SHIFT; 3162 3163 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); 3164 3165 i = tx_ring->next_to_use; 3166 while (count--) { 3167 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 3168 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 3169 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); 3170 tx_desc->read.cmd_type_len = 3171 cpu_to_le32(cmd_type_len | tx_buffer_info->length); 3172 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 3173 3174 i++; 3175 if (i == tx_ring->count) 3176 i = 0; 3177 } 3178 3179 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); 3180 3181 /* 3182 * Force memory writes to complete before letting h/w 3183 * know there are new descriptors to fetch. (Only 3184 * applicable for weak-ordered memory model archs, 3185 * such as IA-64). 3186 */ 3187 wmb(); 3188 3189 tx_ring->next_to_use = i; 3190 writel(i, adapter->hw.hw_addr + tx_ring->tail); 3191} 3192 3193static int __ixgbe_maybe_stop_tx(struct net_device *netdev, 3194 struct ixgbe_ring *tx_ring, int size) 3195{ 3196 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3197 3198#ifdef CONFIG_NETDEVICES_MULTIQUEUE 3199 netif_stop_subqueue(netdev, tx_ring->queue_index); 3200#else 3201 netif_stop_queue(netdev); 3202#endif 3203 /* Herbert's original patch had: 3204 * smp_mb__after_netif_stop_queue(); 3205 * but since that doesn't exist yet, just open code it. */ 3206 smp_mb(); 3207 3208 /* We need to check again in a case another CPU has just 3209 * made room available. */ 3210 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) 3211 return -EBUSY; 3212 3213 /* A reprieve! - use start_queue because it doesn't call schedule */ 3214#ifdef CONFIG_NETDEVICES_MULTIQUEUE 3215 netif_wake_subqueue(netdev, tx_ring->queue_index); 3216#else 3217 netif_wake_queue(netdev); 3218#endif 3219 ++adapter->restart_queue; 3220 return 0; 3221} 3222 3223static int ixgbe_maybe_stop_tx(struct net_device *netdev, 3224 struct ixgbe_ring *tx_ring, int size) 3225{ 3226 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 3227 return 0; 3228 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size); 3229} 3230 3231 3232static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 3233{ 3234 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3235 struct ixgbe_ring *tx_ring; 3236 unsigned int len = skb->len; 3237 unsigned int first; 3238 unsigned int tx_flags = 0; 3239 u8 hdr_len = 0; 3240 int r_idx = 0, tso; 3241 unsigned int mss = 0; 3242 int count = 0; 3243 unsigned int f; 3244 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 3245 len -= skb->data_len; 3246#ifdef CONFIG_NETDEVICES_MULTIQUEUE 3247 r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping; 3248#endif 3249 tx_ring = &adapter->tx_ring[r_idx]; 3250 3251 3252 if (skb->len <= 0) { 3253 dev_kfree_skb(skb); 3254 return NETDEV_TX_OK; 3255 } 3256 mss = skb_shinfo(skb)->gso_size; 3257 3258 if (mss) 3259 count++; 3260 else if (skb->ip_summed == CHECKSUM_PARTIAL) 3261 count++; 3262 3263 count += TXD_USE_COUNT(len); 3264 for (f = 0; f < nr_frags; f++) 3265 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 3266 3267 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) { 3268 adapter->tx_busy++; 3269 return NETDEV_TX_BUSY; 3270 } 3271 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 3272 tx_flags |= IXGBE_TX_FLAGS_VLAN; 3273 tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT); 3274 } 3275 3276 if (skb->protocol == htons(ETH_P_IP)) 3277 tx_flags |= IXGBE_TX_FLAGS_IPV4; 3278 first = tx_ring->next_to_use; 3279 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); 3280 if (tso < 0) { 3281 dev_kfree_skb_any(skb); 3282 return NETDEV_TX_OK; 3283 } 3284 3285 if (tso) 3286 tx_flags |= IXGBE_TX_FLAGS_TSO; 3287 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && 3288 (skb->ip_summed == CHECKSUM_PARTIAL)) 3289 tx_flags |= IXGBE_TX_FLAGS_CSUM; 3290 3291 ixgbe_tx_queue(adapter, tx_ring, tx_flags, 3292 ixgbe_tx_map(adapter, tx_ring, skb, first), 3293 skb->len, hdr_len); 3294 3295 netdev->trans_start = jiffies; 3296 3297 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); 3298 3299 return NETDEV_TX_OK; 3300} 3301 3302/** 3303 * ixgbe_get_stats - Get System Network Statistics 3304 * @netdev: network interface device structure 3305 * 3306 * Returns the address of the device statistics structure. 3307 * The statistics are actually updated from the timer callback. 3308 **/ 3309static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev) 3310{ 3311 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3312 3313 /* only return the current stats */ 3314 return &adapter->net_stats; 3315} 3316 3317/** 3318 * ixgbe_set_mac - Change the Ethernet Address of the NIC 3319 * @netdev: network interface device structure 3320 * @p: pointer to an address structure 3321 * 3322 * Returns 0 on success, negative on failure 3323 **/ 3324static int ixgbe_set_mac(struct net_device *netdev, void *p) 3325{ 3326 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3327 struct sockaddr *addr = p; 3328 3329 if (!is_valid_ether_addr(addr->sa_data)) 3330 return -EADDRNOTAVAIL; 3331 3332 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 3333 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); 3334 3335 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 3336 3337 return 0; 3338} 3339 3340#ifdef CONFIG_NET_POLL_CONTROLLER 3341/* 3342 * Polling 'interrupt' - used by things like netconsole to send skbs 3343 * without having to re-enable interrupts. It's not called while 3344 * the interrupt routine is executing. 3345 */ 3346static void ixgbe_netpoll(struct net_device *netdev) 3347{ 3348 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3349 3350 disable_irq(adapter->pdev->irq); 3351 adapter->flags |= IXGBE_FLAG_IN_NETPOLL; 3352 ixgbe_intr(adapter->pdev->irq, netdev); 3353 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; 3354 enable_irq(adapter->pdev->irq); 3355} 3356#endif 3357 3358/** 3359 * ixgbe_napi_add_all - prep napi structs for use 3360 * @adapter: private struct 3361 * helper function to napi_add each possible q_vector->napi 3362 */ 3363static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter) 3364{ 3365 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 3366 int (*poll)(struct napi_struct *, int); 3367 3368 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3369 poll = &ixgbe_clean_rxonly; 3370 } else { 3371 poll = &ixgbe_poll; 3372 /* only one q_vector for legacy modes */ 3373 q_vectors = 1; 3374 } 3375 3376 for (i = 0; i < q_vectors; i++) { 3377 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i]; 3378 netif_napi_add(adapter->netdev, &q_vector->napi, 3379 (*poll), 64); 3380 } 3381} 3382 3383/** 3384 * ixgbe_probe - Device Initialization Routine 3385 * @pdev: PCI device information struct 3386 * @ent: entry in ixgbe_pci_tbl 3387 * 3388 * Returns 0 on success, negative on failure 3389 * 3390 * ixgbe_probe initializes an adapter identified by a pci_dev structure. 3391 * The OS initialization, configuring of the adapter private structure, 3392 * and a hardware reset occur. 3393 **/ 3394static int __devinit ixgbe_probe(struct pci_dev *pdev, 3395 const struct pci_device_id *ent) 3396{ 3397 struct net_device *netdev; 3398 struct ixgbe_adapter *adapter = NULL; 3399 struct ixgbe_hw *hw; 3400 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; 3401 unsigned long mmio_start, mmio_len; 3402 static int cards_found; 3403 int i, err, pci_using_dac; 3404 u16 link_status, link_speed, link_width; 3405 u32 part_num; 3406 3407 err = pci_enable_device(pdev); 3408 if (err) 3409 return err; 3410 3411 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) && 3412 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) { 3413 pci_using_dac = 1; 3414 } else { 3415 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 3416 if (err) { 3417 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 3418 if (err) { 3419 dev_err(&pdev->dev, "No usable DMA " 3420 "configuration, aborting\n"); 3421 goto err_dma; 3422 } 3423 } 3424 pci_using_dac = 0; 3425 } 3426 3427 err = pci_request_regions(pdev, ixgbe_driver_name); 3428 if (err) { 3429 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); 3430 goto err_pci_reg; 3431 } 3432 3433 pci_set_master(pdev); 3434 pci_save_state(pdev); 3435 3436#ifdef CONFIG_NETDEVICES_MULTIQUEUE 3437 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES); 3438#else 3439 netdev = alloc_etherdev(sizeof(struct ixgbe_adapter)); 3440#endif 3441 if (!netdev) { 3442 err = -ENOMEM; 3443 goto err_alloc_etherdev; 3444 } 3445 3446 SET_NETDEV_DEV(netdev, &pdev->dev); 3447 3448 pci_set_drvdata(pdev, netdev); 3449 adapter = netdev_priv(netdev); 3450 3451 adapter->netdev = netdev; 3452 adapter->pdev = pdev; 3453 hw = &adapter->hw; 3454 hw->back = adapter; 3455 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 3456 3457 mmio_start = pci_resource_start(pdev, 0); 3458 mmio_len = pci_resource_len(pdev, 0); 3459 3460 hw->hw_addr = ioremap(mmio_start, mmio_len); 3461 if (!hw->hw_addr) { 3462 err = -EIO; 3463 goto err_ioremap; 3464 } 3465 3466 for (i = 1; i <= 5; i++) { 3467 if (pci_resource_len(pdev, i) == 0) 3468 continue; 3469 } 3470 3471 netdev->open = &ixgbe_open; 3472 netdev->stop = &ixgbe_close; 3473 netdev->hard_start_xmit = &ixgbe_xmit_frame; 3474 netdev->get_stats = &ixgbe_get_stats; 3475 netdev->set_multicast_list = &ixgbe_set_multi; 3476 netdev->set_mac_address = &ixgbe_set_mac; 3477 netdev->change_mtu = &ixgbe_change_mtu; 3478 ixgbe_set_ethtool_ops(netdev); 3479 netdev->tx_timeout = &ixgbe_tx_timeout; 3480 netdev->watchdog_timeo = 5 * HZ; 3481 netdev->vlan_rx_register = ixgbe_vlan_rx_register; 3482 netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid; 3483 netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid; 3484#ifdef CONFIG_NET_POLL_CONTROLLER 3485 netdev->poll_controller = ixgbe_netpoll; 3486#endif 3487 strcpy(netdev->name, pci_name(pdev)); 3488 3489 netdev->mem_start = mmio_start; 3490 netdev->mem_end = mmio_start + mmio_len; 3491 3492 adapter->bd_number = cards_found; 3493 3494 /* PCI config space info */ 3495 hw->vendor_id = pdev->vendor; 3496 hw->device_id = pdev->device; 3497 hw->revision_id = pdev->revision; 3498 hw->subsystem_vendor_id = pdev->subsystem_vendor; 3499 hw->subsystem_device_id = pdev->subsystem_device; 3500 3501 /* Setup hw api */ 3502 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 3503 hw->mac.type = ii->mac; 3504 3505 err = ii->get_invariants(hw); 3506 if (err) 3507 goto err_hw_init; 3508 3509 /* setup the private structure */ 3510 err = ixgbe_sw_init(adapter); 3511 if (err) 3512 goto err_sw_init; 3513 3514 netdev->features = NETIF_F_SG | 3515 NETIF_F_HW_CSUM | 3516 NETIF_F_HW_VLAN_TX | 3517 NETIF_F_HW_VLAN_RX | 3518 NETIF_F_HW_VLAN_FILTER; 3519 3520 netdev->features |= NETIF_F_TSO; 3521 3522 netdev->features |= NETIF_F_TSO6; 3523 if (pci_using_dac) 3524 netdev->features |= NETIF_F_HIGHDMA; 3525 3526#ifdef CONFIG_NETDEVICES_MULTIQUEUE 3527 netdev->features |= NETIF_F_MULTI_QUEUE; 3528#endif 3529 3530 /* make sure the EEPROM is good */ 3531 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 3532 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); 3533 err = -EIO; 3534 goto err_eeprom; 3535 } 3536 3537 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); 3538 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); 3539 3540 if (ixgbe_validate_mac_addr(netdev->dev_addr)) { 3541 err = -EIO; 3542 goto err_eeprom; 3543 } 3544 3545 init_timer(&adapter->watchdog_timer); 3546 adapter->watchdog_timer.function = &ixgbe_watchdog; 3547 adapter->watchdog_timer.data = (unsigned long)adapter; 3548 3549 INIT_WORK(&adapter->reset_task, ixgbe_reset_task); 3550 3551 /* initialize default flow control settings */ 3552 hw->fc.original_type = ixgbe_fc_full; 3553 hw->fc.type = ixgbe_fc_full; 3554 hw->fc.high_water = IXGBE_DEFAULT_FCRTH; 3555 hw->fc.low_water = IXGBE_DEFAULT_FCRTL; 3556 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; 3557 3558 err = ixgbe_init_interrupt_scheme(adapter); 3559 if (err) 3560 goto err_sw_init; 3561 3562 /* print bus type/speed/width info */ 3563 pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status); 3564 link_speed = link_status & IXGBE_PCI_LINK_SPEED; 3565 link_width = link_status & IXGBE_PCI_LINK_WIDTH; 3566 dev_info(&pdev->dev, "(PCI Express:%s:%s) " 3567 "%02x:%02x:%02x:%02x:%02x:%02x\n", 3568 ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" : 3569 (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" : 3570 "Unknown"), 3571 ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" : 3572 (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" : 3573 (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" : 3574 (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" : 3575 "Unknown"), 3576 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], 3577 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); 3578 ixgbe_read_part_num(hw, &part_num); 3579 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 3580 hw->mac.type, hw->phy.type, 3581 (part_num >> 8), (part_num & 0xff)); 3582 3583 if (link_width <= IXGBE_PCI_LINK_WIDTH_4) { 3584 dev_warn(&pdev->dev, "PCI-Express bandwidth available for " 3585 "this card is not sufficient for optimal " 3586 "performance.\n"); 3587 dev_warn(&pdev->dev, "For optimal performance a x8 " 3588 "PCI-Express slot is required.\n"); 3589 } 3590 3591 /* reset the hardware with the new settings */ 3592 ixgbe_start_hw(hw); 3593 3594 netif_carrier_off(netdev); 3595 netif_stop_queue(netdev); 3596#ifdef CONFIG_NETDEVICES_MULTIQUEUE 3597 for (i = 0; i < adapter->num_tx_queues; i++) 3598 netif_stop_subqueue(netdev, i); 3599#endif 3600 3601 ixgbe_napi_add_all(adapter); 3602 3603 strcpy(netdev->name, "eth%d"); 3604 err = register_netdev(netdev); 3605 if (err) 3606 goto err_register; 3607 3608#ifdef CONFIG_DCA 3609 if (dca_add_requester(&pdev->dev) == 0) { 3610 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 3611 /* always use CB2 mode, difference is masked 3612 * in the CB driver */ 3613 IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2); 3614 ixgbe_setup_dca(adapter); 3615 } 3616#endif 3617 3618 dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n"); 3619 cards_found++; 3620 return 0; 3621 3622err_register: 3623 ixgbe_release_hw_control(adapter); 3624err_hw_init: 3625err_sw_init: 3626 ixgbe_reset_interrupt_capability(adapter); 3627err_eeprom: 3628 iounmap(hw->hw_addr); 3629err_ioremap: 3630 free_netdev(netdev); 3631err_alloc_etherdev: 3632 pci_release_regions(pdev); 3633err_pci_reg: 3634err_dma: 3635 pci_disable_device(pdev); 3636 return err; 3637} 3638 3639/** 3640 * ixgbe_remove - Device Removal Routine 3641 * @pdev: PCI device information struct 3642 * 3643 * ixgbe_remove is called by the PCI subsystem to alert the driver 3644 * that it should release a PCI device. The could be caused by a 3645 * Hot-Plug event, or because the driver is going to be removed from 3646 * memory. 3647 **/ 3648static void __devexit ixgbe_remove(struct pci_dev *pdev) 3649{ 3650 struct net_device *netdev = pci_get_drvdata(pdev); 3651 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3652 3653 set_bit(__IXGBE_DOWN, &adapter->state); 3654 del_timer_sync(&adapter->watchdog_timer); 3655 3656 flush_scheduled_work(); 3657 3658#ifdef CONFIG_DCA 3659 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 3660 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; 3661 dca_remove_requester(&pdev->dev); 3662 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); 3663 } 3664 3665#endif 3666 unregister_netdev(netdev); 3667 3668 ixgbe_reset_interrupt_capability(adapter); 3669 3670 ixgbe_release_hw_control(adapter); 3671 3672 iounmap(adapter->hw.hw_addr); 3673 pci_release_regions(pdev); 3674 3675 DPRINTK(PROBE, INFO, "complete\n"); 3676 kfree(adapter->tx_ring); 3677 kfree(adapter->rx_ring); 3678 3679 free_netdev(netdev); 3680 3681 pci_disable_device(pdev); 3682} 3683 3684/** 3685 * ixgbe_io_error_detected - called when PCI error is detected 3686 * @pdev: Pointer to PCI device 3687 * @state: The current pci connection state 3688 * 3689 * This function is called after a PCI bus error affecting 3690 * this device has been detected. 3691 */ 3692static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, 3693 pci_channel_state_t state) 3694{ 3695 struct net_device *netdev = pci_get_drvdata(pdev); 3696 struct ixgbe_adapter *adapter = netdev->priv; 3697 3698 netif_device_detach(netdev); 3699 3700 if (netif_running(netdev)) 3701 ixgbe_down(adapter); 3702 pci_disable_device(pdev); 3703 3704 /* Request a slot slot reset. */ 3705 return PCI_ERS_RESULT_NEED_RESET; 3706} 3707 3708/** 3709 * ixgbe_io_slot_reset - called after the pci bus has been reset. 3710 * @pdev: Pointer to PCI device 3711 * 3712 * Restart the card from scratch, as if from a cold-boot. 3713 */ 3714static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) 3715{ 3716 struct net_device *netdev = pci_get_drvdata(pdev); 3717 struct ixgbe_adapter *adapter = netdev->priv; 3718 3719 if (pci_enable_device(pdev)) { 3720 DPRINTK(PROBE, ERR, 3721 "Cannot re-enable PCI device after reset.\n"); 3722 return PCI_ERS_RESULT_DISCONNECT; 3723 } 3724 pci_set_master(pdev); 3725 pci_restore_state(pdev); 3726 3727 pci_enable_wake(pdev, PCI_D3hot, 0); 3728 pci_enable_wake(pdev, PCI_D3cold, 0); 3729 3730 ixgbe_reset(adapter); 3731 3732 return PCI_ERS_RESULT_RECOVERED; 3733} 3734 3735/** 3736 * ixgbe_io_resume - called when traffic can start flowing again. 3737 * @pdev: Pointer to PCI device 3738 * 3739 * This callback is called when the error recovery driver tells us that 3740 * its OK to resume normal operation. 3741 */ 3742static void ixgbe_io_resume(struct pci_dev *pdev) 3743{ 3744 struct net_device *netdev = pci_get_drvdata(pdev); 3745 struct ixgbe_adapter *adapter = netdev->priv; 3746 3747 if (netif_running(netdev)) { 3748 if (ixgbe_up(adapter)) { 3749 DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n"); 3750 return; 3751 } 3752 } 3753 3754 netif_device_attach(netdev); 3755 3756} 3757 3758static struct pci_error_handlers ixgbe_err_handler = { 3759 .error_detected = ixgbe_io_error_detected, 3760 .slot_reset = ixgbe_io_slot_reset, 3761 .resume = ixgbe_io_resume, 3762}; 3763 3764static struct pci_driver ixgbe_driver = { 3765 .name = ixgbe_driver_name, 3766 .id_table = ixgbe_pci_tbl, 3767 .probe = ixgbe_probe, 3768 .remove = __devexit_p(ixgbe_remove), 3769#ifdef CONFIG_PM 3770 .suspend = ixgbe_suspend, 3771 .resume = ixgbe_resume, 3772#endif 3773 .shutdown = ixgbe_shutdown, 3774 .err_handler = &ixgbe_err_handler 3775}; 3776 3777/** 3778 * ixgbe_init_module - Driver Registration Routine 3779 * 3780 * ixgbe_init_module is the first routine called when the driver is 3781 * loaded. All it does is register with the PCI subsystem. 3782 **/ 3783static int __init ixgbe_init_module(void) 3784{ 3785 int ret; 3786 printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name, 3787 ixgbe_driver_string, ixgbe_driver_version); 3788 3789 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright); 3790 3791#ifdef CONFIG_DCA 3792 dca_register_notify(&dca_notifier); 3793 3794#endif 3795 ret = pci_register_driver(&ixgbe_driver); 3796 return ret; 3797} 3798module_init(ixgbe_init_module); 3799 3800/** 3801 * ixgbe_exit_module - Driver Exit Cleanup Routine 3802 * 3803 * ixgbe_exit_module is called just before the driver is removed 3804 * from memory. 3805 **/ 3806static void __exit ixgbe_exit_module(void) 3807{ 3808#ifdef CONFIG_DCA 3809 dca_unregister_notify(&dca_notifier); 3810#endif 3811 pci_unregister_driver(&ixgbe_driver); 3812} 3813 3814#ifdef CONFIG_DCA 3815static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, 3816 void *p) 3817{ 3818 int ret_val; 3819 3820 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, 3821 __ixgbe_notify_dca); 3822 3823 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 3824} 3825#endif /* CONFIG_DCA */ 3826 3827module_exit(ixgbe_exit_module); 3828 3829/* ixgbe_main.c */