at v2.6.31-rc2 2914 lines 80 kB view raw
1/******************************************************************************* 2 3 Intel(R) 82576 Virtual Function Linux driver 4 Copyright(c) 2009 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26*******************************************************************************/ 27 28#include <linux/module.h> 29#include <linux/types.h> 30#include <linux/init.h> 31#include <linux/pci.h> 32#include <linux/vmalloc.h> 33#include <linux/pagemap.h> 34#include <linux/delay.h> 35#include <linux/netdevice.h> 36#include <linux/tcp.h> 37#include <linux/ipv6.h> 38#include <net/checksum.h> 39#include <net/ip6_checksum.h> 40#include <linux/mii.h> 41#include <linux/ethtool.h> 42#include <linux/if_vlan.h> 43#include <linux/pm_qos_params.h> 44 45#include "igbvf.h" 46 47#define DRV_VERSION "1.0.0-k0" 48char igbvf_driver_name[] = "igbvf"; 49const char igbvf_driver_version[] = DRV_VERSION; 50static const char igbvf_driver_string[] = 51 "Intel(R) Virtual Function Network Driver"; 52static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation."; 53 54static int igbvf_poll(struct napi_struct *napi, int budget); 55static void igbvf_reset(struct igbvf_adapter *); 56static void igbvf_set_interrupt_capability(struct igbvf_adapter *); 57static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); 58 59static struct igbvf_info igbvf_vf_info = { 60 .mac = e1000_vfadapt, 61 .flags = 0, 62 .pba = 10, 63 .init_ops = e1000_init_function_pointers_vf, 64}; 65 66static const struct igbvf_info *igbvf_info_tbl[] = { 67 [board_vf] = &igbvf_vf_info, 68}; 69 70/** 71 * igbvf_desc_unused - calculate if we have unused descriptors 72 **/ 73static int igbvf_desc_unused(struct igbvf_ring *ring) 74{ 75 if (ring->next_to_clean > ring->next_to_use) 76 return ring->next_to_clean - ring->next_to_use - 1; 77 78 return ring->count + ring->next_to_clean - ring->next_to_use - 1; 79} 80 81/** 82 * igbvf_receive_skb - helper function to handle Rx indications 83 * @adapter: board private structure 84 * @status: descriptor status field as written by hardware 85 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 86 * @skb: pointer to sk_buff to be indicated to stack 87 **/ 88static void igbvf_receive_skb(struct igbvf_adapter *adapter, 89 struct net_device *netdev, 90 struct sk_buff *skb, 91 u32 status, u16 vlan) 92{ 93 if (adapter->vlgrp && (status & E1000_RXD_STAT_VP)) 94 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 95 le16_to_cpu(vlan) & 96 E1000_RXD_SPC_VLAN_MASK); 97 else 98 netif_receive_skb(skb); 99 100 netdev->last_rx = jiffies; 101} 102 103static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, 104 u32 status_err, struct sk_buff *skb) 105{ 106 skb->ip_summed = CHECKSUM_NONE; 107 108 /* Ignore Checksum bit is set or checksum is disabled through ethtool */ 109 if ((status_err & E1000_RXD_STAT_IXSM) || 110 (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED)) 111 return; 112 113 /* TCP/UDP checksum error bit is set */ 114 if (status_err & 115 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { 116 /* let the stack verify checksum errors */ 117 adapter->hw_csum_err++; 118 return; 119 } 120 121 /* It must be a TCP or UDP packet with a valid checksum */ 122 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) 123 skb->ip_summed = CHECKSUM_UNNECESSARY; 124 125 adapter->hw_csum_good++; 126} 127 128/** 129 * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split 130 * @rx_ring: address of ring structure to repopulate 131 * @cleaned_count: number of buffers to repopulate 132 **/ 133static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, 134 int cleaned_count) 135{ 136 struct igbvf_adapter *adapter = rx_ring->adapter; 137 struct net_device *netdev = adapter->netdev; 138 struct pci_dev *pdev = adapter->pdev; 139 union e1000_adv_rx_desc *rx_desc; 140 struct igbvf_buffer *buffer_info; 141 struct sk_buff *skb; 142 unsigned int i; 143 int bufsz; 144 145 i = rx_ring->next_to_use; 146 buffer_info = &rx_ring->buffer_info[i]; 147 148 if (adapter->rx_ps_hdr_size) 149 bufsz = adapter->rx_ps_hdr_size; 150 else 151 bufsz = adapter->rx_buffer_len; 152 bufsz += NET_IP_ALIGN; 153 154 while (cleaned_count--) { 155 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); 156 157 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { 158 if (!buffer_info->page) { 159 buffer_info->page = alloc_page(GFP_ATOMIC); 160 if (!buffer_info->page) { 161 adapter->alloc_rx_buff_failed++; 162 goto no_buffers; 163 } 164 buffer_info->page_offset = 0; 165 } else { 166 buffer_info->page_offset ^= PAGE_SIZE / 2; 167 } 168 buffer_info->page_dma = 169 pci_map_page(pdev, buffer_info->page, 170 buffer_info->page_offset, 171 PAGE_SIZE / 2, 172 PCI_DMA_FROMDEVICE); 173 } 174 175 if (!buffer_info->skb) { 176 skb = netdev_alloc_skb(netdev, bufsz); 177 if (!skb) { 178 adapter->alloc_rx_buff_failed++; 179 goto no_buffers; 180 } 181 182 /* Make buffer alignment 2 beyond a 16 byte boundary 183 * this will result in a 16 byte aligned IP header after 184 * the 14 byte MAC header is removed 185 */ 186 skb_reserve(skb, NET_IP_ALIGN); 187 188 buffer_info->skb = skb; 189 buffer_info->dma = pci_map_single(pdev, skb->data, 190 bufsz, 191 PCI_DMA_FROMDEVICE); 192 } 193 /* Refresh the desc even if buffer_addrs didn't change because 194 * each write-back erases this info. */ 195 if (adapter->rx_ps_hdr_size) { 196 rx_desc->read.pkt_addr = 197 cpu_to_le64(buffer_info->page_dma); 198 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); 199 } else { 200 rx_desc->read.pkt_addr = 201 cpu_to_le64(buffer_info->dma); 202 rx_desc->read.hdr_addr = 0; 203 } 204 205 i++; 206 if (i == rx_ring->count) 207 i = 0; 208 buffer_info = &rx_ring->buffer_info[i]; 209 } 210 211no_buffers: 212 if (rx_ring->next_to_use != i) { 213 rx_ring->next_to_use = i; 214 if (i == 0) 215 i = (rx_ring->count - 1); 216 else 217 i--; 218 219 /* Force memory writes to complete before letting h/w 220 * know there are new descriptors to fetch. (Only 221 * applicable for weak-ordered memory model archs, 222 * such as IA-64). */ 223 wmb(); 224 writel(i, adapter->hw.hw_addr + rx_ring->tail); 225 } 226} 227 228/** 229 * igbvf_clean_rx_irq - Send received data up the network stack; legacy 230 * @adapter: board private structure 231 * 232 * the return value indicates whether actual cleaning was done, there 233 * is no guarantee that everything was cleaned 234 **/ 235static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, 236 int *work_done, int work_to_do) 237{ 238 struct igbvf_ring *rx_ring = adapter->rx_ring; 239 struct net_device *netdev = adapter->netdev; 240 struct pci_dev *pdev = adapter->pdev; 241 union e1000_adv_rx_desc *rx_desc, *next_rxd; 242 struct igbvf_buffer *buffer_info, *next_buffer; 243 struct sk_buff *skb; 244 bool cleaned = false; 245 int cleaned_count = 0; 246 unsigned int total_bytes = 0, total_packets = 0; 247 unsigned int i; 248 u32 length, hlen, staterr; 249 250 i = rx_ring->next_to_clean; 251 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); 252 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 253 254 while (staterr & E1000_RXD_STAT_DD) { 255 if (*work_done >= work_to_do) 256 break; 257 (*work_done)++; 258 259 buffer_info = &rx_ring->buffer_info[i]; 260 261 /* HW will not DMA in data larger than the given buffer, even 262 * if it parses the (NFS, of course) header to be larger. In 263 * that case, it fills the header buffer and spills the rest 264 * into the page. 265 */ 266 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) & 267 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; 268 if (hlen > adapter->rx_ps_hdr_size) 269 hlen = adapter->rx_ps_hdr_size; 270 271 length = le16_to_cpu(rx_desc->wb.upper.length); 272 cleaned = true; 273 cleaned_count++; 274 275 skb = buffer_info->skb; 276 prefetch(skb->data - NET_IP_ALIGN); 277 buffer_info->skb = NULL; 278 if (!adapter->rx_ps_hdr_size) { 279 pci_unmap_single(pdev, buffer_info->dma, 280 adapter->rx_buffer_len, 281 PCI_DMA_FROMDEVICE); 282 buffer_info->dma = 0; 283 skb_put(skb, length); 284 goto send_up; 285 } 286 287 if (!skb_shinfo(skb)->nr_frags) { 288 pci_unmap_single(pdev, buffer_info->dma, 289 adapter->rx_ps_hdr_size + NET_IP_ALIGN, 290 PCI_DMA_FROMDEVICE); 291 skb_put(skb, hlen); 292 } 293 294 if (length) { 295 pci_unmap_page(pdev, buffer_info->page_dma, 296 PAGE_SIZE / 2, 297 PCI_DMA_FROMDEVICE); 298 buffer_info->page_dma = 0; 299 300 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, 301 buffer_info->page, 302 buffer_info->page_offset, 303 length); 304 305 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || 306 (page_count(buffer_info->page) != 1)) 307 buffer_info->page = NULL; 308 else 309 get_page(buffer_info->page); 310 311 skb->len += length; 312 skb->data_len += length; 313 skb->truesize += length; 314 } 315send_up: 316 i++; 317 if (i == rx_ring->count) 318 i = 0; 319 next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i); 320 prefetch(next_rxd); 321 next_buffer = &rx_ring->buffer_info[i]; 322 323 if (!(staterr & E1000_RXD_STAT_EOP)) { 324 buffer_info->skb = next_buffer->skb; 325 buffer_info->dma = next_buffer->dma; 326 next_buffer->skb = skb; 327 next_buffer->dma = 0; 328 goto next_desc; 329 } 330 331 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { 332 dev_kfree_skb_irq(skb); 333 goto next_desc; 334 } 335 336 total_bytes += skb->len; 337 total_packets++; 338 339 igbvf_rx_checksum_adv(adapter, staterr, skb); 340 341 skb->protocol = eth_type_trans(skb, netdev); 342 343 igbvf_receive_skb(adapter, netdev, skb, staterr, 344 rx_desc->wb.upper.vlan); 345 346 netdev->last_rx = jiffies; 347 348next_desc: 349 rx_desc->wb.upper.status_error = 0; 350 351 /* return some buffers to hardware, one at a time is too slow */ 352 if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) { 353 igbvf_alloc_rx_buffers(rx_ring, cleaned_count); 354 cleaned_count = 0; 355 } 356 357 /* use prefetched values */ 358 rx_desc = next_rxd; 359 buffer_info = next_buffer; 360 361 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 362 } 363 364 rx_ring->next_to_clean = i; 365 cleaned_count = igbvf_desc_unused(rx_ring); 366 367 if (cleaned_count) 368 igbvf_alloc_rx_buffers(rx_ring, cleaned_count); 369 370 adapter->total_rx_packets += total_packets; 371 adapter->total_rx_bytes += total_bytes; 372 adapter->net_stats.rx_bytes += total_bytes; 373 adapter->net_stats.rx_packets += total_packets; 374 return cleaned; 375} 376 377static void igbvf_put_txbuf(struct igbvf_adapter *adapter, 378 struct igbvf_buffer *buffer_info) 379{ 380 buffer_info->dma = 0; 381 if (buffer_info->skb) { 382 skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb, 383 DMA_TO_DEVICE); 384 dev_kfree_skb_any(buffer_info->skb); 385 buffer_info->skb = NULL; 386 } 387 buffer_info->time_stamp = 0; 388} 389 390static void igbvf_print_tx_hang(struct igbvf_adapter *adapter) 391{ 392 struct igbvf_ring *tx_ring = adapter->tx_ring; 393 unsigned int i = tx_ring->next_to_clean; 394 unsigned int eop = tx_ring->buffer_info[i].next_to_watch; 395 union e1000_adv_tx_desc *eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); 396 397 /* detected Tx unit hang */ 398 dev_err(&adapter->pdev->dev, 399 "Detected Tx Unit Hang:\n" 400 " TDH <%x>\n" 401 " TDT <%x>\n" 402 " next_to_use <%x>\n" 403 " next_to_clean <%x>\n" 404 "buffer_info[next_to_clean]:\n" 405 " time_stamp <%lx>\n" 406 " next_to_watch <%x>\n" 407 " jiffies <%lx>\n" 408 " next_to_watch.status <%x>\n", 409 readl(adapter->hw.hw_addr + tx_ring->head), 410 readl(adapter->hw.hw_addr + tx_ring->tail), 411 tx_ring->next_to_use, 412 tx_ring->next_to_clean, 413 tx_ring->buffer_info[eop].time_stamp, 414 eop, 415 jiffies, 416 eop_desc->wb.status); 417} 418 419/** 420 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors) 421 * @adapter: board private structure 422 * 423 * Return 0 on success, negative on failure 424 **/ 425int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, 426 struct igbvf_ring *tx_ring) 427{ 428 struct pci_dev *pdev = adapter->pdev; 429 int size; 430 431 size = sizeof(struct igbvf_buffer) * tx_ring->count; 432 tx_ring->buffer_info = vmalloc(size); 433 if (!tx_ring->buffer_info) 434 goto err; 435 memset(tx_ring->buffer_info, 0, size); 436 437 /* round up to nearest 4K */ 438 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 439 tx_ring->size = ALIGN(tx_ring->size, 4096); 440 441 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 442 &tx_ring->dma); 443 444 if (!tx_ring->desc) 445 goto err; 446 447 tx_ring->adapter = adapter; 448 tx_ring->next_to_use = 0; 449 tx_ring->next_to_clean = 0; 450 451 return 0; 452err: 453 vfree(tx_ring->buffer_info); 454 dev_err(&adapter->pdev->dev, 455 "Unable to allocate memory for the transmit descriptor ring\n"); 456 return -ENOMEM; 457} 458 459/** 460 * igbvf_setup_rx_resources - allocate Rx resources (Descriptors) 461 * @adapter: board private structure 462 * 463 * Returns 0 on success, negative on failure 464 **/ 465int igbvf_setup_rx_resources(struct igbvf_adapter *adapter, 466 struct igbvf_ring *rx_ring) 467{ 468 struct pci_dev *pdev = adapter->pdev; 469 int size, desc_len; 470 471 size = sizeof(struct igbvf_buffer) * rx_ring->count; 472 rx_ring->buffer_info = vmalloc(size); 473 if (!rx_ring->buffer_info) 474 goto err; 475 memset(rx_ring->buffer_info, 0, size); 476 477 desc_len = sizeof(union e1000_adv_rx_desc); 478 479 /* Round up to nearest 4K */ 480 rx_ring->size = rx_ring->count * desc_len; 481 rx_ring->size = ALIGN(rx_ring->size, 4096); 482 483 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 484 &rx_ring->dma); 485 486 if (!rx_ring->desc) 487 goto err; 488 489 rx_ring->next_to_clean = 0; 490 rx_ring->next_to_use = 0; 491 492 rx_ring->adapter = adapter; 493 494 return 0; 495 496err: 497 vfree(rx_ring->buffer_info); 498 rx_ring->buffer_info = NULL; 499 dev_err(&adapter->pdev->dev, 500 "Unable to allocate memory for the receive descriptor ring\n"); 501 return -ENOMEM; 502} 503 504/** 505 * igbvf_clean_tx_ring - Free Tx Buffers 506 * @tx_ring: ring to be cleaned 507 **/ 508static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring) 509{ 510 struct igbvf_adapter *adapter = tx_ring->adapter; 511 struct igbvf_buffer *buffer_info; 512 unsigned long size; 513 unsigned int i; 514 515 if (!tx_ring->buffer_info) 516 return; 517 518 /* Free all the Tx ring sk_buffs */ 519 for (i = 0; i < tx_ring->count; i++) { 520 buffer_info = &tx_ring->buffer_info[i]; 521 igbvf_put_txbuf(adapter, buffer_info); 522 } 523 524 size = sizeof(struct igbvf_buffer) * tx_ring->count; 525 memset(tx_ring->buffer_info, 0, size); 526 527 /* Zero out the descriptor ring */ 528 memset(tx_ring->desc, 0, tx_ring->size); 529 530 tx_ring->next_to_use = 0; 531 tx_ring->next_to_clean = 0; 532 533 writel(0, adapter->hw.hw_addr + tx_ring->head); 534 writel(0, adapter->hw.hw_addr + tx_ring->tail); 535} 536 537/** 538 * igbvf_free_tx_resources - Free Tx Resources per Queue 539 * @tx_ring: ring to free resources from 540 * 541 * Free all transmit software resources 542 **/ 543void igbvf_free_tx_resources(struct igbvf_ring *tx_ring) 544{ 545 struct pci_dev *pdev = tx_ring->adapter->pdev; 546 547 igbvf_clean_tx_ring(tx_ring); 548 549 vfree(tx_ring->buffer_info); 550 tx_ring->buffer_info = NULL; 551 552 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 553 554 tx_ring->desc = NULL; 555} 556 557/** 558 * igbvf_clean_rx_ring - Free Rx Buffers per Queue 559 * @adapter: board private structure 560 **/ 561static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) 562{ 563 struct igbvf_adapter *adapter = rx_ring->adapter; 564 struct igbvf_buffer *buffer_info; 565 struct pci_dev *pdev = adapter->pdev; 566 unsigned long size; 567 unsigned int i; 568 569 if (!rx_ring->buffer_info) 570 return; 571 572 /* Free all the Rx ring sk_buffs */ 573 for (i = 0; i < rx_ring->count; i++) { 574 buffer_info = &rx_ring->buffer_info[i]; 575 if (buffer_info->dma) { 576 if (adapter->rx_ps_hdr_size){ 577 pci_unmap_single(pdev, buffer_info->dma, 578 adapter->rx_ps_hdr_size, 579 PCI_DMA_FROMDEVICE); 580 } else { 581 pci_unmap_single(pdev, buffer_info->dma, 582 adapter->rx_buffer_len, 583 PCI_DMA_FROMDEVICE); 584 } 585 buffer_info->dma = 0; 586 } 587 588 if (buffer_info->skb) { 589 dev_kfree_skb(buffer_info->skb); 590 buffer_info->skb = NULL; 591 } 592 593 if (buffer_info->page) { 594 if (buffer_info->page_dma) 595 pci_unmap_page(pdev, buffer_info->page_dma, 596 PAGE_SIZE / 2, 597 PCI_DMA_FROMDEVICE); 598 put_page(buffer_info->page); 599 buffer_info->page = NULL; 600 buffer_info->page_dma = 0; 601 buffer_info->page_offset = 0; 602 } 603 } 604 605 size = sizeof(struct igbvf_buffer) * rx_ring->count; 606 memset(rx_ring->buffer_info, 0, size); 607 608 /* Zero out the descriptor ring */ 609 memset(rx_ring->desc, 0, rx_ring->size); 610 611 rx_ring->next_to_clean = 0; 612 rx_ring->next_to_use = 0; 613 614 writel(0, adapter->hw.hw_addr + rx_ring->head); 615 writel(0, adapter->hw.hw_addr + rx_ring->tail); 616} 617 618/** 619 * igbvf_free_rx_resources - Free Rx Resources 620 * @rx_ring: ring to clean the resources from 621 * 622 * Free all receive software resources 623 **/ 624 625void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) 626{ 627 struct pci_dev *pdev = rx_ring->adapter->pdev; 628 629 igbvf_clean_rx_ring(rx_ring); 630 631 vfree(rx_ring->buffer_info); 632 rx_ring->buffer_info = NULL; 633 634 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 635 rx_ring->dma); 636 rx_ring->desc = NULL; 637} 638 639/** 640 * igbvf_update_itr - update the dynamic ITR value based on statistics 641 * @adapter: pointer to adapter 642 * @itr_setting: current adapter->itr 643 * @packets: the number of packets during this measurement interval 644 * @bytes: the number of bytes during this measurement interval 645 * 646 * Stores a new ITR value based on packets and byte 647 * counts during the last interrupt. The advantage of per interrupt 648 * computation is faster updates and more accurate ITR for the current 649 * traffic pattern. Constants in this function were computed 650 * based on theoretical maximum wire speed and thresholds were set based 651 * on testing data as well as attempting to minimize response time 652 * while increasing bulk throughput. This functionality is controlled 653 * by the InterruptThrottleRate module parameter. 654 **/ 655static unsigned int igbvf_update_itr(struct igbvf_adapter *adapter, 656 u16 itr_setting, int packets, 657 int bytes) 658{ 659 unsigned int retval = itr_setting; 660 661 if (packets == 0) 662 goto update_itr_done; 663 664 switch (itr_setting) { 665 case lowest_latency: 666 /* handle TSO and jumbo frames */ 667 if (bytes/packets > 8000) 668 retval = bulk_latency; 669 else if ((packets < 5) && (bytes > 512)) 670 retval = low_latency; 671 break; 672 case low_latency: /* 50 usec aka 20000 ints/s */ 673 if (bytes > 10000) { 674 /* this if handles the TSO accounting */ 675 if (bytes/packets > 8000) 676 retval = bulk_latency; 677 else if ((packets < 10) || ((bytes/packets) > 1200)) 678 retval = bulk_latency; 679 else if ((packets > 35)) 680 retval = lowest_latency; 681 } else if (bytes/packets > 2000) { 682 retval = bulk_latency; 683 } else if (packets <= 2 && bytes < 512) { 684 retval = lowest_latency; 685 } 686 break; 687 case bulk_latency: /* 250 usec aka 4000 ints/s */ 688 if (bytes > 25000) { 689 if (packets > 35) 690 retval = low_latency; 691 } else if (bytes < 6000) { 692 retval = low_latency; 693 } 694 break; 695 } 696 697update_itr_done: 698 return retval; 699} 700 701static void igbvf_set_itr(struct igbvf_adapter *adapter) 702{ 703 struct e1000_hw *hw = &adapter->hw; 704 u16 current_itr; 705 u32 new_itr = adapter->itr; 706 707 adapter->tx_itr = igbvf_update_itr(adapter, adapter->tx_itr, 708 adapter->total_tx_packets, 709 adapter->total_tx_bytes); 710 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 711 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) 712 adapter->tx_itr = low_latency; 713 714 adapter->rx_itr = igbvf_update_itr(adapter, adapter->rx_itr, 715 adapter->total_rx_packets, 716 adapter->total_rx_bytes); 717 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 718 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) 719 adapter->rx_itr = low_latency; 720 721 current_itr = max(adapter->rx_itr, adapter->tx_itr); 722 723 switch (current_itr) { 724 /* counts and packets in update_itr are dependent on these numbers */ 725 case lowest_latency: 726 new_itr = 70000; 727 break; 728 case low_latency: 729 new_itr = 20000; /* aka hwitr = ~200 */ 730 break; 731 case bulk_latency: 732 new_itr = 4000; 733 break; 734 default: 735 break; 736 } 737 738 if (new_itr != adapter->itr) { 739 /* 740 * this attempts to bias the interrupt rate towards Bulk 741 * by adding intermediate steps when interrupt rate is 742 * increasing 743 */ 744 new_itr = new_itr > adapter->itr ? 745 min(adapter->itr + (new_itr >> 2), new_itr) : 746 new_itr; 747 adapter->itr = new_itr; 748 adapter->rx_ring->itr_val = 1952; 749 750 if (adapter->msix_entries) 751 adapter->rx_ring->set_itr = 1; 752 else 753 ew32(ITR, 1952); 754 } 755} 756 757/** 758 * igbvf_clean_tx_irq - Reclaim resources after transmit completes 759 * @adapter: board private structure 760 * returns true if ring is completely cleaned 761 **/ 762static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) 763{ 764 struct igbvf_adapter *adapter = tx_ring->adapter; 765 struct e1000_hw *hw = &adapter->hw; 766 struct net_device *netdev = adapter->netdev; 767 struct igbvf_buffer *buffer_info; 768 struct sk_buff *skb; 769 union e1000_adv_tx_desc *tx_desc, *eop_desc; 770 unsigned int total_bytes = 0, total_packets = 0; 771 unsigned int i, eop, count = 0; 772 bool cleaned = false; 773 774 i = tx_ring->next_to_clean; 775 eop = tx_ring->buffer_info[i].next_to_watch; 776 eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); 777 778 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && 779 (count < tx_ring->count)) { 780 for (cleaned = false; !cleaned; count++) { 781 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); 782 buffer_info = &tx_ring->buffer_info[i]; 783 cleaned = (i == eop); 784 skb = buffer_info->skb; 785 786 if (skb) { 787 unsigned int segs, bytecount; 788 789 /* gso_segs is currently only valid for tcp */ 790 segs = skb_shinfo(skb)->gso_segs ?: 1; 791 /* multiply data chunks by size of headers */ 792 bytecount = ((segs - 1) * skb_headlen(skb)) + 793 skb->len; 794 total_packets += segs; 795 total_bytes += bytecount; 796 } 797 798 igbvf_put_txbuf(adapter, buffer_info); 799 tx_desc->wb.status = 0; 800 801 i++; 802 if (i == tx_ring->count) 803 i = 0; 804 } 805 eop = tx_ring->buffer_info[i].next_to_watch; 806 eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); 807 } 808 809 tx_ring->next_to_clean = i; 810 811 if (unlikely(count && 812 netif_carrier_ok(netdev) && 813 igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { 814 /* Make sure that anybody stopping the queue after this 815 * sees the new next_to_clean. 816 */ 817 smp_mb(); 818 if (netif_queue_stopped(netdev) && 819 !(test_bit(__IGBVF_DOWN, &adapter->state))) { 820 netif_wake_queue(netdev); 821 ++adapter->restart_queue; 822 } 823 } 824 825 if (adapter->detect_tx_hung) { 826 /* Detect a transmit hang in hardware, this serializes the 827 * check with the clearing of time_stamp and movement of i */ 828 adapter->detect_tx_hung = false; 829 if (tx_ring->buffer_info[i].time_stamp && 830 time_after(jiffies, tx_ring->buffer_info[i].time_stamp + 831 (adapter->tx_timeout_factor * HZ)) 832 && !(er32(STATUS) & E1000_STATUS_TXOFF)) { 833 834 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); 835 /* detected Tx unit hang */ 836 igbvf_print_tx_hang(adapter); 837 838 netif_stop_queue(netdev); 839 } 840 } 841 adapter->net_stats.tx_bytes += total_bytes; 842 adapter->net_stats.tx_packets += total_packets; 843 return (count < tx_ring->count); 844} 845 846static irqreturn_t igbvf_msix_other(int irq, void *data) 847{ 848 struct net_device *netdev = data; 849 struct igbvf_adapter *adapter = netdev_priv(netdev); 850 struct e1000_hw *hw = &adapter->hw; 851 852 adapter->int_counter1++; 853 854 netif_carrier_off(netdev); 855 hw->mac.get_link_status = 1; 856 if (!test_bit(__IGBVF_DOWN, &adapter->state)) 857 mod_timer(&adapter->watchdog_timer, jiffies + 1); 858 859 ew32(EIMS, adapter->eims_other); 860 861 return IRQ_HANDLED; 862} 863 864static irqreturn_t igbvf_intr_msix_tx(int irq, void *data) 865{ 866 struct net_device *netdev = data; 867 struct igbvf_adapter *adapter = netdev_priv(netdev); 868 struct e1000_hw *hw = &adapter->hw; 869 struct igbvf_ring *tx_ring = adapter->tx_ring; 870 871 872 adapter->total_tx_bytes = 0; 873 adapter->total_tx_packets = 0; 874 875 /* auto mask will automatically reenable the interrupt when we write 876 * EICS */ 877 if (!igbvf_clean_tx_irq(tx_ring)) 878 /* Ring was not completely cleaned, so fire another interrupt */ 879 ew32(EICS, tx_ring->eims_value); 880 else 881 ew32(EIMS, tx_ring->eims_value); 882 883 return IRQ_HANDLED; 884} 885 886static irqreturn_t igbvf_intr_msix_rx(int irq, void *data) 887{ 888 struct net_device *netdev = data; 889 struct igbvf_adapter *adapter = netdev_priv(netdev); 890 891 adapter->int_counter0++; 892 893 /* Write the ITR value calculated at the end of the 894 * previous interrupt. 895 */ 896 if (adapter->rx_ring->set_itr) { 897 writel(adapter->rx_ring->itr_val, 898 adapter->hw.hw_addr + adapter->rx_ring->itr_register); 899 adapter->rx_ring->set_itr = 0; 900 } 901 902 if (napi_schedule_prep(&adapter->rx_ring->napi)) { 903 adapter->total_rx_bytes = 0; 904 adapter->total_rx_packets = 0; 905 __napi_schedule(&adapter->rx_ring->napi); 906 } 907 908 return IRQ_HANDLED; 909} 910 911#define IGBVF_NO_QUEUE -1 912 913static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, 914 int tx_queue, int msix_vector) 915{ 916 struct e1000_hw *hw = &adapter->hw; 917 u32 ivar, index; 918 919 /* 82576 uses a table-based method for assigning vectors. 920 Each queue has a single entry in the table to which we write 921 a vector number along with a "valid" bit. Sadly, the layout 922 of the table is somewhat counterintuitive. */ 923 if (rx_queue > IGBVF_NO_QUEUE) { 924 index = (rx_queue >> 1); 925 ivar = array_er32(IVAR0, index); 926 if (rx_queue & 0x1) { 927 /* vector goes into third byte of register */ 928 ivar = ivar & 0xFF00FFFF; 929 ivar |= (msix_vector | E1000_IVAR_VALID) << 16; 930 } else { 931 /* vector goes into low byte of register */ 932 ivar = ivar & 0xFFFFFF00; 933 ivar |= msix_vector | E1000_IVAR_VALID; 934 } 935 adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector; 936 array_ew32(IVAR0, index, ivar); 937 } 938 if (tx_queue > IGBVF_NO_QUEUE) { 939 index = (tx_queue >> 1); 940 ivar = array_er32(IVAR0, index); 941 if (tx_queue & 0x1) { 942 /* vector goes into high byte of register */ 943 ivar = ivar & 0x00FFFFFF; 944 ivar |= (msix_vector | E1000_IVAR_VALID) << 24; 945 } else { 946 /* vector goes into second byte of register */ 947 ivar = ivar & 0xFFFF00FF; 948 ivar |= (msix_vector | E1000_IVAR_VALID) << 8; 949 } 950 adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector; 951 array_ew32(IVAR0, index, ivar); 952 } 953} 954 955/** 956 * igbvf_configure_msix - Configure MSI-X hardware 957 * 958 * igbvf_configure_msix sets up the hardware to properly 959 * generate MSI-X interrupts. 960 **/ 961static void igbvf_configure_msix(struct igbvf_adapter *adapter) 962{ 963 u32 tmp; 964 struct e1000_hw *hw = &adapter->hw; 965 struct igbvf_ring *tx_ring = adapter->tx_ring; 966 struct igbvf_ring *rx_ring = adapter->rx_ring; 967 int vector = 0; 968 969 adapter->eims_enable_mask = 0; 970 971 igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++); 972 adapter->eims_enable_mask |= tx_ring->eims_value; 973 if (tx_ring->itr_val) 974 writel(tx_ring->itr_val, 975 hw->hw_addr + tx_ring->itr_register); 976 else 977 writel(1952, hw->hw_addr + tx_ring->itr_register); 978 979 igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++); 980 adapter->eims_enable_mask |= rx_ring->eims_value; 981 if (rx_ring->itr_val) 982 writel(rx_ring->itr_val, 983 hw->hw_addr + rx_ring->itr_register); 984 else 985 writel(1952, hw->hw_addr + rx_ring->itr_register); 986 987 /* set vector for other causes, i.e. link changes */ 988 989 tmp = (vector++ | E1000_IVAR_VALID); 990 991 ew32(IVAR_MISC, tmp); 992 993 adapter->eims_enable_mask = (1 << (vector)) - 1; 994 adapter->eims_other = 1 << (vector - 1); 995 e1e_flush(); 996} 997 998static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter) 999{ 1000 if (adapter->msix_entries) { 1001 pci_disable_msix(adapter->pdev); 1002 kfree(adapter->msix_entries); 1003 adapter->msix_entries = NULL; 1004 } 1005} 1006 1007/** 1008 * igbvf_set_interrupt_capability - set MSI or MSI-X if supported 1009 * 1010 * Attempt to configure interrupts using the best available 1011 * capabilities of the hardware and kernel. 1012 **/ 1013static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter) 1014{ 1015 int err = -ENOMEM; 1016 int i; 1017 1018 /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */ 1019 adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), 1020 GFP_KERNEL); 1021 if (adapter->msix_entries) { 1022 for (i = 0; i < 3; i++) 1023 adapter->msix_entries[i].entry = i; 1024 1025 err = pci_enable_msix(adapter->pdev, 1026 adapter->msix_entries, 3); 1027 } 1028 1029 if (err) { 1030 /* MSI-X failed */ 1031 dev_err(&adapter->pdev->dev, 1032 "Failed to initialize MSI-X interrupts.\n"); 1033 igbvf_reset_interrupt_capability(adapter); 1034 } 1035} 1036 1037/** 1038 * igbvf_request_msix - Initialize MSI-X interrupts 1039 * 1040 * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the 1041 * kernel. 1042 **/ 1043static int igbvf_request_msix(struct igbvf_adapter *adapter) 1044{ 1045 struct net_device *netdev = adapter->netdev; 1046 int err = 0, vector = 0; 1047 1048 if (strlen(netdev->name) < (IFNAMSIZ - 5)) { 1049 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name); 1050 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name); 1051 } else { 1052 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); 1053 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); 1054 } 1055 1056 err = request_irq(adapter->msix_entries[vector].vector, 1057 &igbvf_intr_msix_tx, 0, adapter->tx_ring->name, 1058 netdev); 1059 if (err) 1060 goto out; 1061 1062 adapter->tx_ring->itr_register = E1000_EITR(vector); 1063 adapter->tx_ring->itr_val = 1952; 1064 vector++; 1065 1066 err = request_irq(adapter->msix_entries[vector].vector, 1067 &igbvf_intr_msix_rx, 0, adapter->rx_ring->name, 1068 netdev); 1069 if (err) 1070 goto out; 1071 1072 adapter->rx_ring->itr_register = E1000_EITR(vector); 1073 adapter->rx_ring->itr_val = 1952; 1074 vector++; 1075 1076 err = request_irq(adapter->msix_entries[vector].vector, 1077 &igbvf_msix_other, 0, netdev->name, netdev); 1078 if (err) 1079 goto out; 1080 1081 igbvf_configure_msix(adapter); 1082 return 0; 1083out: 1084 return err; 1085} 1086 1087/** 1088 * igbvf_alloc_queues - Allocate memory for all rings 1089 * @adapter: board private structure to initialize 1090 **/ 1091static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter) 1092{ 1093 struct net_device *netdev = adapter->netdev; 1094 1095 adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); 1096 if (!adapter->tx_ring) 1097 return -ENOMEM; 1098 1099 adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); 1100 if (!adapter->rx_ring) { 1101 kfree(adapter->tx_ring); 1102 return -ENOMEM; 1103 } 1104 1105 netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64); 1106 1107 return 0; 1108} 1109 1110/** 1111 * igbvf_request_irq - initialize interrupts 1112 * 1113 * Attempts to configure interrupts using the best available 1114 * capabilities of the hardware and kernel. 1115 **/ 1116static int igbvf_request_irq(struct igbvf_adapter *adapter) 1117{ 1118 int err = -1; 1119 1120 /* igbvf supports msi-x only */ 1121 if (adapter->msix_entries) 1122 err = igbvf_request_msix(adapter); 1123 1124 if (!err) 1125 return err; 1126 1127 dev_err(&adapter->pdev->dev, 1128 "Unable to allocate interrupt, Error: %d\n", err); 1129 1130 return err; 1131} 1132 1133static void igbvf_free_irq(struct igbvf_adapter *adapter) 1134{ 1135 struct net_device *netdev = adapter->netdev; 1136 int vector; 1137 1138 if (adapter->msix_entries) { 1139 for (vector = 0; vector < 3; vector++) 1140 free_irq(adapter->msix_entries[vector].vector, netdev); 1141 } 1142} 1143 1144/** 1145 * igbvf_irq_disable - Mask off interrupt generation on the NIC 1146 **/ 1147static void igbvf_irq_disable(struct igbvf_adapter *adapter) 1148{ 1149 struct e1000_hw *hw = &adapter->hw; 1150 1151 ew32(EIMC, ~0); 1152 1153 if (adapter->msix_entries) 1154 ew32(EIAC, 0); 1155} 1156 1157/** 1158 * igbvf_irq_enable - Enable default interrupt generation settings 1159 **/ 1160static void igbvf_irq_enable(struct igbvf_adapter *adapter) 1161{ 1162 struct e1000_hw *hw = &adapter->hw; 1163 1164 ew32(EIAC, adapter->eims_enable_mask); 1165 ew32(EIAM, adapter->eims_enable_mask); 1166 ew32(EIMS, adapter->eims_enable_mask); 1167} 1168 1169/** 1170 * igbvf_poll - NAPI Rx polling callback 1171 * @napi: struct associated with this polling callback 1172 * @budget: amount of packets driver is allowed to process this poll 1173 **/ 1174static int igbvf_poll(struct napi_struct *napi, int budget) 1175{ 1176 struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi); 1177 struct igbvf_adapter *adapter = rx_ring->adapter; 1178 struct e1000_hw *hw = &adapter->hw; 1179 int work_done = 0; 1180 1181 igbvf_clean_rx_irq(adapter, &work_done, budget); 1182 1183 /* If not enough Rx work done, exit the polling mode */ 1184 if (work_done < budget) { 1185 napi_complete(napi); 1186 1187 if (adapter->itr_setting & 3) 1188 igbvf_set_itr(adapter); 1189 1190 if (!test_bit(__IGBVF_DOWN, &adapter->state)) 1191 ew32(EIMS, adapter->rx_ring->eims_value); 1192 } 1193 1194 return work_done; 1195} 1196 1197/** 1198 * igbvf_set_rlpml - set receive large packet maximum length 1199 * @adapter: board private structure 1200 * 1201 * Configure the maximum size of packets that will be received 1202 */ 1203static void igbvf_set_rlpml(struct igbvf_adapter *adapter) 1204{ 1205 int max_frame_size = adapter->max_frame_size; 1206 struct e1000_hw *hw = &adapter->hw; 1207 1208 if (adapter->vlgrp) 1209 max_frame_size += VLAN_TAG_SIZE; 1210 1211 e1000_rlpml_set_vf(hw, max_frame_size); 1212} 1213 1214static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1215{ 1216 struct igbvf_adapter *adapter = netdev_priv(netdev); 1217 struct e1000_hw *hw = &adapter->hw; 1218 1219 if (hw->mac.ops.set_vfta(hw, vid, true)) 1220 dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid); 1221} 1222 1223static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1224{ 1225 struct igbvf_adapter *adapter = netdev_priv(netdev); 1226 struct e1000_hw *hw = &adapter->hw; 1227 1228 igbvf_irq_disable(adapter); 1229 vlan_group_set_device(adapter->vlgrp, vid, NULL); 1230 1231 if (!test_bit(__IGBVF_DOWN, &adapter->state)) 1232 igbvf_irq_enable(adapter); 1233 1234 if (hw->mac.ops.set_vfta(hw, vid, false)) 1235 dev_err(&adapter->pdev->dev, 1236 "Failed to remove vlan id %d\n", vid); 1237} 1238 1239static void igbvf_vlan_rx_register(struct net_device *netdev, 1240 struct vlan_group *grp) 1241{ 1242 struct igbvf_adapter *adapter = netdev_priv(netdev); 1243 1244 adapter->vlgrp = grp; 1245} 1246 1247static void igbvf_restore_vlan(struct igbvf_adapter *adapter) 1248{ 1249 u16 vid; 1250 1251 if (!adapter->vlgrp) 1252 return; 1253 1254 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 1255 if (!vlan_group_get_device(adapter->vlgrp, vid)) 1256 continue; 1257 igbvf_vlan_rx_add_vid(adapter->netdev, vid); 1258 } 1259 1260 igbvf_set_rlpml(adapter); 1261} 1262 1263/** 1264 * igbvf_configure_tx - Configure Transmit Unit after Reset 1265 * @adapter: board private structure 1266 * 1267 * Configure the Tx unit of the MAC after a reset. 1268 **/ 1269static void igbvf_configure_tx(struct igbvf_adapter *adapter) 1270{ 1271 struct e1000_hw *hw = &adapter->hw; 1272 struct igbvf_ring *tx_ring = adapter->tx_ring; 1273 u64 tdba; 1274 u32 txdctl, dca_txctrl; 1275 1276 /* disable transmits */ 1277 txdctl = er32(TXDCTL(0)); 1278 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); 1279 msleep(10); 1280 1281 /* Setup the HW Tx Head and Tail descriptor pointers */ 1282 ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc)); 1283 tdba = tx_ring->dma; 1284 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32))); 1285 ew32(TDBAH(0), (tdba >> 32)); 1286 ew32(TDH(0), 0); 1287 ew32(TDT(0), 0); 1288 tx_ring->head = E1000_TDH(0); 1289 tx_ring->tail = E1000_TDT(0); 1290 1291 /* Turn off Relaxed Ordering on head write-backs. The writebacks 1292 * MUST be delivered in order or it will completely screw up 1293 * our bookeeping. 1294 */ 1295 dca_txctrl = er32(DCA_TXCTRL(0)); 1296 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; 1297 ew32(DCA_TXCTRL(0), dca_txctrl); 1298 1299 /* enable transmits */ 1300 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 1301 ew32(TXDCTL(0), txdctl); 1302 1303 /* Setup Transmit Descriptor Settings for eop descriptor */ 1304 adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS; 1305 1306 /* enable Report Status bit */ 1307 adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; 1308 1309 adapter->tx_queue_len = adapter->netdev->tx_queue_len; 1310} 1311 1312/** 1313 * igbvf_setup_srrctl - configure the receive control registers 1314 * @adapter: Board private structure 1315 **/ 1316static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) 1317{ 1318 struct e1000_hw *hw = &adapter->hw; 1319 u32 srrctl = 0; 1320 1321 srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | 1322 E1000_SRRCTL_BSIZEHDR_MASK | 1323 E1000_SRRCTL_BSIZEPKT_MASK); 1324 1325 /* Enable queue drop to avoid head of line blocking */ 1326 srrctl |= E1000_SRRCTL_DROP_EN; 1327 1328 /* Setup buffer sizes */ 1329 srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> 1330 E1000_SRRCTL_BSIZEPKT_SHIFT; 1331 1332 if (adapter->rx_buffer_len < 2048) { 1333 adapter->rx_ps_hdr_size = 0; 1334 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 1335 } else { 1336 adapter->rx_ps_hdr_size = 128; 1337 srrctl |= adapter->rx_ps_hdr_size << 1338 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 1339 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 1340 } 1341 1342 ew32(SRRCTL(0), srrctl); 1343} 1344 1345/** 1346 * igbvf_configure_rx - Configure Receive Unit after Reset 1347 * @adapter: board private structure 1348 * 1349 * Configure the Rx unit of the MAC after a reset. 1350 **/ 1351static void igbvf_configure_rx(struct igbvf_adapter *adapter) 1352{ 1353 struct e1000_hw *hw = &adapter->hw; 1354 struct igbvf_ring *rx_ring = adapter->rx_ring; 1355 u64 rdba; 1356 u32 rdlen, rxdctl; 1357 1358 /* disable receives */ 1359 rxdctl = er32(RXDCTL(0)); 1360 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); 1361 msleep(10); 1362 1363 rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); 1364 1365 /* 1366 * Setup the HW Rx Head and Tail Descriptor Pointers and 1367 * the Base and Length of the Rx Descriptor Ring 1368 */ 1369 rdba = rx_ring->dma; 1370 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32))); 1371 ew32(RDBAH(0), (rdba >> 32)); 1372 ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc)); 1373 rx_ring->head = E1000_RDH(0); 1374 rx_ring->tail = E1000_RDT(0); 1375 ew32(RDH(0), 0); 1376 ew32(RDT(0), 0); 1377 1378 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 1379 rxdctl &= 0xFFF00000; 1380 rxdctl |= IGBVF_RX_PTHRESH; 1381 rxdctl |= IGBVF_RX_HTHRESH << 8; 1382 rxdctl |= IGBVF_RX_WTHRESH << 16; 1383 1384 igbvf_set_rlpml(adapter); 1385 1386 /* enable receives */ 1387 ew32(RXDCTL(0), rxdctl); 1388} 1389 1390/** 1391 * igbvf_set_multi - Multicast and Promiscuous mode set 1392 * @netdev: network interface device structure 1393 * 1394 * The set_multi entry point is called whenever the multicast address 1395 * list or the network interface flags are updated. This routine is 1396 * responsible for configuring the hardware for proper multicast, 1397 * promiscuous mode, and all-multi behavior. 1398 **/ 1399static void igbvf_set_multi(struct net_device *netdev) 1400{ 1401 struct igbvf_adapter *adapter = netdev_priv(netdev); 1402 struct e1000_hw *hw = &adapter->hw; 1403 struct dev_mc_list *mc_ptr; 1404 u8 *mta_list = NULL; 1405 int i; 1406 1407 if (netdev->mc_count) { 1408 mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC); 1409 if (!mta_list) { 1410 dev_err(&adapter->pdev->dev, 1411 "failed to allocate multicast filter list\n"); 1412 return; 1413 } 1414 } 1415 1416 /* prepare a packed array of only addresses. */ 1417 mc_ptr = netdev->mc_list; 1418 1419 for (i = 0; i < netdev->mc_count; i++) { 1420 if (!mc_ptr) 1421 break; 1422 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, 1423 ETH_ALEN); 1424 mc_ptr = mc_ptr->next; 1425 } 1426 1427 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); 1428 kfree(mta_list); 1429} 1430 1431/** 1432 * igbvf_configure - configure the hardware for Rx and Tx 1433 * @adapter: private board structure 1434 **/ 1435static void igbvf_configure(struct igbvf_adapter *adapter) 1436{ 1437 igbvf_set_multi(adapter->netdev); 1438 1439 igbvf_restore_vlan(adapter); 1440 1441 igbvf_configure_tx(adapter); 1442 igbvf_setup_srrctl(adapter); 1443 igbvf_configure_rx(adapter); 1444 igbvf_alloc_rx_buffers(adapter->rx_ring, 1445 igbvf_desc_unused(adapter->rx_ring)); 1446} 1447 1448/* igbvf_reset - bring the hardware into a known good state 1449 * 1450 * This function boots the hardware and enables some settings that 1451 * require a configuration cycle of the hardware - those cannot be 1452 * set/changed during runtime. After reset the device needs to be 1453 * properly configured for Rx, Tx etc. 1454 */ 1455static void igbvf_reset(struct igbvf_adapter *adapter) 1456{ 1457 struct e1000_mac_info *mac = &adapter->hw.mac; 1458 struct net_device *netdev = adapter->netdev; 1459 struct e1000_hw *hw = &adapter->hw; 1460 1461 /* Allow time for pending master requests to run */ 1462 if (mac->ops.reset_hw(hw)) 1463 dev_err(&adapter->pdev->dev, "PF still resetting\n"); 1464 1465 mac->ops.init_hw(hw); 1466 1467 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 1468 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 1469 netdev->addr_len); 1470 memcpy(netdev->perm_addr, adapter->hw.mac.addr, 1471 netdev->addr_len); 1472 } 1473} 1474 1475int igbvf_up(struct igbvf_adapter *adapter) 1476{ 1477 struct e1000_hw *hw = &adapter->hw; 1478 1479 /* hardware has been reset, we need to reload some things */ 1480 igbvf_configure(adapter); 1481 1482 clear_bit(__IGBVF_DOWN, &adapter->state); 1483 1484 napi_enable(&adapter->rx_ring->napi); 1485 if (adapter->msix_entries) 1486 igbvf_configure_msix(adapter); 1487 1488 /* Clear any pending interrupts. */ 1489 er32(EICR); 1490 igbvf_irq_enable(adapter); 1491 1492 /* start the watchdog */ 1493 hw->mac.get_link_status = 1; 1494 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1495 1496 1497 return 0; 1498} 1499 1500void igbvf_down(struct igbvf_adapter *adapter) 1501{ 1502 struct net_device *netdev = adapter->netdev; 1503 struct e1000_hw *hw = &adapter->hw; 1504 u32 rxdctl, txdctl; 1505 1506 /* 1507 * signal that we're down so the interrupt handler does not 1508 * reschedule our watchdog timer 1509 */ 1510 set_bit(__IGBVF_DOWN, &adapter->state); 1511 1512 /* disable receives in the hardware */ 1513 rxdctl = er32(RXDCTL(0)); 1514 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); 1515 1516 netif_stop_queue(netdev); 1517 1518 /* disable transmits in the hardware */ 1519 txdctl = er32(TXDCTL(0)); 1520 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); 1521 1522 /* flush both disables and wait for them to finish */ 1523 e1e_flush(); 1524 msleep(10); 1525 1526 napi_disable(&adapter->rx_ring->napi); 1527 1528 igbvf_irq_disable(adapter); 1529 1530 del_timer_sync(&adapter->watchdog_timer); 1531 1532 netdev->tx_queue_len = adapter->tx_queue_len; 1533 netif_carrier_off(netdev); 1534 1535 /* record the stats before reset*/ 1536 igbvf_update_stats(adapter); 1537 1538 adapter->link_speed = 0; 1539 adapter->link_duplex = 0; 1540 1541 igbvf_reset(adapter); 1542 igbvf_clean_tx_ring(adapter->tx_ring); 1543 igbvf_clean_rx_ring(adapter->rx_ring); 1544} 1545 1546void igbvf_reinit_locked(struct igbvf_adapter *adapter) 1547{ 1548 might_sleep(); 1549 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) 1550 msleep(1); 1551 igbvf_down(adapter); 1552 igbvf_up(adapter); 1553 clear_bit(__IGBVF_RESETTING, &adapter->state); 1554} 1555 1556/** 1557 * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter) 1558 * @adapter: board private structure to initialize 1559 * 1560 * igbvf_sw_init initializes the Adapter private data structure. 1561 * Fields are initialized based on PCI device information and 1562 * OS network device settings (MTU size). 1563 **/ 1564static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter) 1565{ 1566 struct net_device *netdev = adapter->netdev; 1567 s32 rc; 1568 1569 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; 1570 adapter->rx_ps_hdr_size = 0; 1571 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1572 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 1573 1574 adapter->tx_int_delay = 8; 1575 adapter->tx_abs_int_delay = 32; 1576 adapter->rx_int_delay = 0; 1577 adapter->rx_abs_int_delay = 8; 1578 adapter->itr_setting = 3; 1579 adapter->itr = 20000; 1580 1581 /* Set various function pointers */ 1582 adapter->ei->init_ops(&adapter->hw); 1583 1584 rc = adapter->hw.mac.ops.init_params(&adapter->hw); 1585 if (rc) 1586 return rc; 1587 1588 rc = adapter->hw.mbx.ops.init_params(&adapter->hw); 1589 if (rc) 1590 return rc; 1591 1592 igbvf_set_interrupt_capability(adapter); 1593 1594 if (igbvf_alloc_queues(adapter)) 1595 return -ENOMEM; 1596 1597 spin_lock_init(&adapter->tx_queue_lock); 1598 1599 /* Explicitly disable IRQ since the NIC can be in any state. */ 1600 igbvf_irq_disable(adapter); 1601 1602 spin_lock_init(&adapter->stats_lock); 1603 1604 set_bit(__IGBVF_DOWN, &adapter->state); 1605 return 0; 1606} 1607 1608static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter) 1609{ 1610 struct e1000_hw *hw = &adapter->hw; 1611 1612 adapter->stats.last_gprc = er32(VFGPRC); 1613 adapter->stats.last_gorc = er32(VFGORC); 1614 adapter->stats.last_gptc = er32(VFGPTC); 1615 adapter->stats.last_gotc = er32(VFGOTC); 1616 adapter->stats.last_mprc = er32(VFMPRC); 1617 adapter->stats.last_gotlbc = er32(VFGOTLBC); 1618 adapter->stats.last_gptlbc = er32(VFGPTLBC); 1619 adapter->stats.last_gorlbc = er32(VFGORLBC); 1620 adapter->stats.last_gprlbc = er32(VFGPRLBC); 1621 1622 adapter->stats.base_gprc = er32(VFGPRC); 1623 adapter->stats.base_gorc = er32(VFGORC); 1624 adapter->stats.base_gptc = er32(VFGPTC); 1625 adapter->stats.base_gotc = er32(VFGOTC); 1626 adapter->stats.base_mprc = er32(VFMPRC); 1627 adapter->stats.base_gotlbc = er32(VFGOTLBC); 1628 adapter->stats.base_gptlbc = er32(VFGPTLBC); 1629 adapter->stats.base_gorlbc = er32(VFGORLBC); 1630 adapter->stats.base_gprlbc = er32(VFGPRLBC); 1631} 1632 1633/** 1634 * igbvf_open - Called when a network interface is made active 1635 * @netdev: network interface device structure 1636 * 1637 * Returns 0 on success, negative value on failure 1638 * 1639 * The open entry point is called when a network interface is made 1640 * active by the system (IFF_UP). At this point all resources needed 1641 * for transmit and receive operations are allocated, the interrupt 1642 * handler is registered with the OS, the watchdog timer is started, 1643 * and the stack is notified that the interface is ready. 1644 **/ 1645static int igbvf_open(struct net_device *netdev) 1646{ 1647 struct igbvf_adapter *adapter = netdev_priv(netdev); 1648 struct e1000_hw *hw = &adapter->hw; 1649 int err; 1650 1651 /* disallow open during test */ 1652 if (test_bit(__IGBVF_TESTING, &adapter->state)) 1653 return -EBUSY; 1654 1655 /* allocate transmit descriptors */ 1656 err = igbvf_setup_tx_resources(adapter, adapter->tx_ring); 1657 if (err) 1658 goto err_setup_tx; 1659 1660 /* allocate receive descriptors */ 1661 err = igbvf_setup_rx_resources(adapter, adapter->rx_ring); 1662 if (err) 1663 goto err_setup_rx; 1664 1665 /* 1666 * before we allocate an interrupt, we must be ready to handle it. 1667 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1668 * as soon as we call pci_request_irq, so we have to setup our 1669 * clean_rx handler before we do so. 1670 */ 1671 igbvf_configure(adapter); 1672 1673 err = igbvf_request_irq(adapter); 1674 if (err) 1675 goto err_req_irq; 1676 1677 /* From here on the code is the same as igbvf_up() */ 1678 clear_bit(__IGBVF_DOWN, &adapter->state); 1679 1680 napi_enable(&adapter->rx_ring->napi); 1681 1682 /* clear any pending interrupts */ 1683 er32(EICR); 1684 1685 igbvf_irq_enable(adapter); 1686 1687 /* start the watchdog */ 1688 hw->mac.get_link_status = 1; 1689 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1690 1691 return 0; 1692 1693err_req_irq: 1694 igbvf_free_rx_resources(adapter->rx_ring); 1695err_setup_rx: 1696 igbvf_free_tx_resources(adapter->tx_ring); 1697err_setup_tx: 1698 igbvf_reset(adapter); 1699 1700 return err; 1701} 1702 1703/** 1704 * igbvf_close - Disables a network interface 1705 * @netdev: network interface device structure 1706 * 1707 * Returns 0, this is not allowed to fail 1708 * 1709 * The close entry point is called when an interface is de-activated 1710 * by the OS. The hardware is still under the drivers control, but 1711 * needs to be disabled. A global MAC reset is issued to stop the 1712 * hardware, and all transmit and receive resources are freed. 1713 **/ 1714static int igbvf_close(struct net_device *netdev) 1715{ 1716 struct igbvf_adapter *adapter = netdev_priv(netdev); 1717 1718 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); 1719 igbvf_down(adapter); 1720 1721 igbvf_free_irq(adapter); 1722 1723 igbvf_free_tx_resources(adapter->tx_ring); 1724 igbvf_free_rx_resources(adapter->rx_ring); 1725 1726 return 0; 1727} 1728/** 1729 * igbvf_set_mac - Change the Ethernet Address of the NIC 1730 * @netdev: network interface device structure 1731 * @p: pointer to an address structure 1732 * 1733 * Returns 0 on success, negative on failure 1734 **/ 1735static int igbvf_set_mac(struct net_device *netdev, void *p) 1736{ 1737 struct igbvf_adapter *adapter = netdev_priv(netdev); 1738 struct e1000_hw *hw = &adapter->hw; 1739 struct sockaddr *addr = p; 1740 1741 if (!is_valid_ether_addr(addr->sa_data)) 1742 return -EADDRNOTAVAIL; 1743 1744 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 1745 1746 hw->mac.ops.rar_set(hw, hw->mac.addr, 0); 1747 1748 if (memcmp(addr->sa_data, hw->mac.addr, 6)) 1749 return -EADDRNOTAVAIL; 1750 1751 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1752 1753 return 0; 1754} 1755 1756#define UPDATE_VF_COUNTER(reg, name) \ 1757 { \ 1758 u32 current_counter = er32(reg); \ 1759 if (current_counter < adapter->stats.last_##name) \ 1760 adapter->stats.name += 0x100000000LL; \ 1761 adapter->stats.last_##name = current_counter; \ 1762 adapter->stats.name &= 0xFFFFFFFF00000000LL; \ 1763 adapter->stats.name |= current_counter; \ 1764 } 1765 1766/** 1767 * igbvf_update_stats - Update the board statistics counters 1768 * @adapter: board private structure 1769**/ 1770void igbvf_update_stats(struct igbvf_adapter *adapter) 1771{ 1772 struct e1000_hw *hw = &adapter->hw; 1773 struct pci_dev *pdev = adapter->pdev; 1774 1775 /* 1776 * Prevent stats update while adapter is being reset, link is down 1777 * or if the pci connection is down. 1778 */ 1779 if (adapter->link_speed == 0) 1780 return; 1781 1782 if (test_bit(__IGBVF_RESETTING, &adapter->state)) 1783 return; 1784 1785 if (pci_channel_offline(pdev)) 1786 return; 1787 1788 UPDATE_VF_COUNTER(VFGPRC, gprc); 1789 UPDATE_VF_COUNTER(VFGORC, gorc); 1790 UPDATE_VF_COUNTER(VFGPTC, gptc); 1791 UPDATE_VF_COUNTER(VFGOTC, gotc); 1792 UPDATE_VF_COUNTER(VFMPRC, mprc); 1793 UPDATE_VF_COUNTER(VFGOTLBC, gotlbc); 1794 UPDATE_VF_COUNTER(VFGPTLBC, gptlbc); 1795 UPDATE_VF_COUNTER(VFGORLBC, gorlbc); 1796 UPDATE_VF_COUNTER(VFGPRLBC, gprlbc); 1797 1798 /* Fill out the OS statistics structure */ 1799 adapter->net_stats.multicast = adapter->stats.mprc; 1800} 1801 1802static void igbvf_print_link_info(struct igbvf_adapter *adapter) 1803{ 1804 dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s\n", 1805 adapter->link_speed, 1806 ((adapter->link_duplex == FULL_DUPLEX) ? 1807 "Full Duplex" : "Half Duplex")); 1808} 1809 1810static bool igbvf_has_link(struct igbvf_adapter *adapter) 1811{ 1812 struct e1000_hw *hw = &adapter->hw; 1813 s32 ret_val = E1000_SUCCESS; 1814 bool link_active; 1815 1816 ret_val = hw->mac.ops.check_for_link(hw); 1817 link_active = !hw->mac.get_link_status; 1818 1819 /* if check for link returns error we will need to reset */ 1820 if (ret_val) 1821 schedule_work(&adapter->reset_task); 1822 1823 return link_active; 1824} 1825 1826/** 1827 * igbvf_watchdog - Timer Call-back 1828 * @data: pointer to adapter cast into an unsigned long 1829 **/ 1830static void igbvf_watchdog(unsigned long data) 1831{ 1832 struct igbvf_adapter *adapter = (struct igbvf_adapter *) data; 1833 1834 /* Do the rest outside of interrupt context */ 1835 schedule_work(&adapter->watchdog_task); 1836} 1837 1838static void igbvf_watchdog_task(struct work_struct *work) 1839{ 1840 struct igbvf_adapter *adapter = container_of(work, 1841 struct igbvf_adapter, 1842 watchdog_task); 1843 struct net_device *netdev = adapter->netdev; 1844 struct e1000_mac_info *mac = &adapter->hw.mac; 1845 struct igbvf_ring *tx_ring = adapter->tx_ring; 1846 struct e1000_hw *hw = &adapter->hw; 1847 u32 link; 1848 int tx_pending = 0; 1849 1850 link = igbvf_has_link(adapter); 1851 1852 if (link) { 1853 if (!netif_carrier_ok(netdev)) { 1854 bool txb2b = 1; 1855 1856 mac->ops.get_link_up_info(&adapter->hw, 1857 &adapter->link_speed, 1858 &adapter->link_duplex); 1859 igbvf_print_link_info(adapter); 1860 1861 /* 1862 * tweak tx_queue_len according to speed/duplex 1863 * and adjust the timeout factor 1864 */ 1865 netdev->tx_queue_len = adapter->tx_queue_len; 1866 adapter->tx_timeout_factor = 1; 1867 switch (adapter->link_speed) { 1868 case SPEED_10: 1869 txb2b = 0; 1870 netdev->tx_queue_len = 10; 1871 adapter->tx_timeout_factor = 16; 1872 break; 1873 case SPEED_100: 1874 txb2b = 0; 1875 netdev->tx_queue_len = 100; 1876 /* maybe add some timeout factor ? */ 1877 break; 1878 } 1879 1880 netif_carrier_on(netdev); 1881 netif_wake_queue(netdev); 1882 } 1883 } else { 1884 if (netif_carrier_ok(netdev)) { 1885 adapter->link_speed = 0; 1886 adapter->link_duplex = 0; 1887 dev_info(&adapter->pdev->dev, "Link is Down\n"); 1888 netif_carrier_off(netdev); 1889 netif_stop_queue(netdev); 1890 } 1891 } 1892 1893 if (netif_carrier_ok(netdev)) { 1894 igbvf_update_stats(adapter); 1895 } else { 1896 tx_pending = (igbvf_desc_unused(tx_ring) + 1 < 1897 tx_ring->count); 1898 if (tx_pending) { 1899 /* 1900 * We've lost link, so the controller stops DMA, 1901 * but we've got queued Tx work that's never going 1902 * to get done, so reset controller to flush Tx. 1903 * (Do the reset outside of interrupt context). 1904 */ 1905 adapter->tx_timeout_count++; 1906 schedule_work(&adapter->reset_task); 1907 } 1908 } 1909 1910 /* Cause software interrupt to ensure Rx ring is cleaned */ 1911 ew32(EICS, adapter->rx_ring->eims_value); 1912 1913 /* Force detection of hung controller every watchdog period */ 1914 adapter->detect_tx_hung = 1; 1915 1916 /* Reset the timer */ 1917 if (!test_bit(__IGBVF_DOWN, &adapter->state)) 1918 mod_timer(&adapter->watchdog_timer, 1919 round_jiffies(jiffies + (2 * HZ))); 1920} 1921 1922#define IGBVF_TX_FLAGS_CSUM 0x00000001 1923#define IGBVF_TX_FLAGS_VLAN 0x00000002 1924#define IGBVF_TX_FLAGS_TSO 0x00000004 1925#define IGBVF_TX_FLAGS_IPV4 0x00000008 1926#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 1927#define IGBVF_TX_FLAGS_VLAN_SHIFT 16 1928 1929static int igbvf_tso(struct igbvf_adapter *adapter, 1930 struct igbvf_ring *tx_ring, 1931 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 1932{ 1933 struct e1000_adv_tx_context_desc *context_desc; 1934 unsigned int i; 1935 int err; 1936 struct igbvf_buffer *buffer_info; 1937 u32 info = 0, tu_cmd = 0; 1938 u32 mss_l4len_idx, l4len; 1939 *hdr_len = 0; 1940 1941 if (skb_header_cloned(skb)) { 1942 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1943 if (err) { 1944 dev_err(&adapter->pdev->dev, 1945 "igbvf_tso returning an error\n"); 1946 return err; 1947 } 1948 } 1949 1950 l4len = tcp_hdrlen(skb); 1951 *hdr_len += l4len; 1952 1953 if (skb->protocol == htons(ETH_P_IP)) { 1954 struct iphdr *iph = ip_hdr(skb); 1955 iph->tot_len = 0; 1956 iph->check = 0; 1957 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 1958 iph->daddr, 0, 1959 IPPROTO_TCP, 1960 0); 1961 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 1962 ipv6_hdr(skb)->payload_len = 0; 1963 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 1964 &ipv6_hdr(skb)->daddr, 1965 0, IPPROTO_TCP, 0); 1966 } 1967 1968 i = tx_ring->next_to_use; 1969 1970 buffer_info = &tx_ring->buffer_info[i]; 1971 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); 1972 /* VLAN MACLEN IPLEN */ 1973 if (tx_flags & IGBVF_TX_FLAGS_VLAN) 1974 info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); 1975 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); 1976 *hdr_len += skb_network_offset(skb); 1977 info |= (skb_transport_header(skb) - skb_network_header(skb)); 1978 *hdr_len += (skb_transport_header(skb) - skb_network_header(skb)); 1979 context_desc->vlan_macip_lens = cpu_to_le32(info); 1980 1981 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 1982 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); 1983 1984 if (skb->protocol == htons(ETH_P_IP)) 1985 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 1986 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 1987 1988 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); 1989 1990 /* MSS L4LEN IDX */ 1991 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); 1992 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); 1993 1994 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 1995 context_desc->seqnum_seed = 0; 1996 1997 buffer_info->time_stamp = jiffies; 1998 buffer_info->next_to_watch = i; 1999 buffer_info->dma = 0; 2000 i++; 2001 if (i == tx_ring->count) 2002 i = 0; 2003 2004 tx_ring->next_to_use = i; 2005 2006 return true; 2007} 2008 2009static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, 2010 struct igbvf_ring *tx_ring, 2011 struct sk_buff *skb, u32 tx_flags) 2012{ 2013 struct e1000_adv_tx_context_desc *context_desc; 2014 unsigned int i; 2015 struct igbvf_buffer *buffer_info; 2016 u32 info = 0, tu_cmd = 0; 2017 2018 if ((skb->ip_summed == CHECKSUM_PARTIAL) || 2019 (tx_flags & IGBVF_TX_FLAGS_VLAN)) { 2020 i = tx_ring->next_to_use; 2021 buffer_info = &tx_ring->buffer_info[i]; 2022 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); 2023 2024 if (tx_flags & IGBVF_TX_FLAGS_VLAN) 2025 info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); 2026 2027 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); 2028 if (skb->ip_summed == CHECKSUM_PARTIAL) 2029 info |= (skb_transport_header(skb) - 2030 skb_network_header(skb)); 2031 2032 2033 context_desc->vlan_macip_lens = cpu_to_le32(info); 2034 2035 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); 2036 2037 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2038 switch (skb->protocol) { 2039 case __constant_htons(ETH_P_IP): 2040 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 2041 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2042 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 2043 break; 2044 case __constant_htons(ETH_P_IPV6): 2045 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2046 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 2047 break; 2048 default: 2049 break; 2050 } 2051 } 2052 2053 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); 2054 context_desc->seqnum_seed = 0; 2055 context_desc->mss_l4len_idx = 0; 2056 2057 buffer_info->time_stamp = jiffies; 2058 buffer_info->next_to_watch = i; 2059 buffer_info->dma = 0; 2060 i++; 2061 if (i == tx_ring->count) 2062 i = 0; 2063 tx_ring->next_to_use = i; 2064 2065 return true; 2066 } 2067 2068 return false; 2069} 2070 2071static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) 2072{ 2073 struct igbvf_adapter *adapter = netdev_priv(netdev); 2074 2075 /* there is enough descriptors then we don't need to worry */ 2076 if (igbvf_desc_unused(adapter->tx_ring) >= size) 2077 return 0; 2078 2079 netif_stop_queue(netdev); 2080 2081 smp_mb(); 2082 2083 /* We need to check again just in case room has been made available */ 2084 if (igbvf_desc_unused(adapter->tx_ring) < size) 2085 return -EBUSY; 2086 2087 netif_wake_queue(netdev); 2088 2089 ++adapter->restart_queue; 2090 return 0; 2091} 2092 2093#define IGBVF_MAX_TXD_PWR 16 2094#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) 2095 2096static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, 2097 struct igbvf_ring *tx_ring, 2098 struct sk_buff *skb, 2099 unsigned int first) 2100{ 2101 struct igbvf_buffer *buffer_info; 2102 unsigned int len = skb_headlen(skb); 2103 unsigned int count = 0, i; 2104 unsigned int f; 2105 dma_addr_t *map; 2106 2107 i = tx_ring->next_to_use; 2108 2109 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) { 2110 dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); 2111 return 0; 2112 } 2113 2114 map = skb_shinfo(skb)->dma_maps; 2115 2116 buffer_info = &tx_ring->buffer_info[i]; 2117 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); 2118 buffer_info->length = len; 2119 /* set time_stamp *before* dma to help avoid a possible race */ 2120 buffer_info->time_stamp = jiffies; 2121 buffer_info->next_to_watch = i; 2122 buffer_info->dma = skb_shinfo(skb)->dma_head; 2123 2124 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 2125 struct skb_frag_struct *frag; 2126 2127 i++; 2128 if (i == tx_ring->count) 2129 i = 0; 2130 2131 frag = &skb_shinfo(skb)->frags[f]; 2132 len = frag->size; 2133 2134 buffer_info = &tx_ring->buffer_info[i]; 2135 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); 2136 buffer_info->length = len; 2137 buffer_info->time_stamp = jiffies; 2138 buffer_info->next_to_watch = i; 2139 buffer_info->dma = map[count]; 2140 count++; 2141 } 2142 2143 tx_ring->buffer_info[i].skb = skb; 2144 tx_ring->buffer_info[first].next_to_watch = i; 2145 2146 return count + 1; 2147} 2148 2149static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, 2150 struct igbvf_ring *tx_ring, 2151 int tx_flags, int count, u32 paylen, 2152 u8 hdr_len) 2153{ 2154 union e1000_adv_tx_desc *tx_desc = NULL; 2155 struct igbvf_buffer *buffer_info; 2156 u32 olinfo_status = 0, cmd_type_len; 2157 unsigned int i; 2158 2159 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | 2160 E1000_ADVTXD_DCMD_DEXT); 2161 2162 if (tx_flags & IGBVF_TX_FLAGS_VLAN) 2163 cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 2164 2165 if (tx_flags & IGBVF_TX_FLAGS_TSO) { 2166 cmd_type_len |= E1000_ADVTXD_DCMD_TSE; 2167 2168 /* insert tcp checksum */ 2169 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 2170 2171 /* insert ip checksum */ 2172 if (tx_flags & IGBVF_TX_FLAGS_IPV4) 2173 olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 2174 2175 } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) { 2176 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 2177 } 2178 2179 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); 2180 2181 i = tx_ring->next_to_use; 2182 while (count--) { 2183 buffer_info = &tx_ring->buffer_info[i]; 2184 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); 2185 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 2186 tx_desc->read.cmd_type_len = 2187 cpu_to_le32(cmd_type_len | buffer_info->length); 2188 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2189 i++; 2190 if (i == tx_ring->count) 2191 i = 0; 2192 } 2193 2194 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); 2195 /* Force memory writes to complete before letting h/w 2196 * know there are new descriptors to fetch. (Only 2197 * applicable for weak-ordered memory model archs, 2198 * such as IA-64). */ 2199 wmb(); 2200 2201 tx_ring->next_to_use = i; 2202 writel(i, adapter->hw.hw_addr + tx_ring->tail); 2203 /* we need this if more than one processor can write to our tail 2204 * at a time, it syncronizes IO on IA64/Altix systems */ 2205 mmiowb(); 2206} 2207 2208static int igbvf_xmit_frame_ring_adv(struct sk_buff *skb, 2209 struct net_device *netdev, 2210 struct igbvf_ring *tx_ring) 2211{ 2212 struct igbvf_adapter *adapter = netdev_priv(netdev); 2213 unsigned int first, tx_flags = 0; 2214 u8 hdr_len = 0; 2215 int count = 0; 2216 int tso = 0; 2217 2218 if (test_bit(__IGBVF_DOWN, &adapter->state)) { 2219 dev_kfree_skb_any(skb); 2220 return NETDEV_TX_OK; 2221 } 2222 2223 if (skb->len <= 0) { 2224 dev_kfree_skb_any(skb); 2225 return NETDEV_TX_OK; 2226 } 2227 2228 /* 2229 * need: count + 4 desc gap to keep tail from touching 2230 * + 2 desc gap to keep tail from touching head, 2231 * + 1 desc for skb->data, 2232 * + 1 desc for context descriptor, 2233 * head, otherwise try next time 2234 */ 2235 if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { 2236 /* this is a hard error */ 2237 return NETDEV_TX_BUSY; 2238 } 2239 2240 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 2241 tx_flags |= IGBVF_TX_FLAGS_VLAN; 2242 tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT); 2243 } 2244 2245 if (skb->protocol == htons(ETH_P_IP)) 2246 tx_flags |= IGBVF_TX_FLAGS_IPV4; 2247 2248 first = tx_ring->next_to_use; 2249 2250 tso = skb_is_gso(skb) ? 2251 igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0; 2252 if (unlikely(tso < 0)) { 2253 dev_kfree_skb_any(skb); 2254 return NETDEV_TX_OK; 2255 } 2256 2257 if (tso) 2258 tx_flags |= IGBVF_TX_FLAGS_TSO; 2259 else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) && 2260 (skb->ip_summed == CHECKSUM_PARTIAL)) 2261 tx_flags |= IGBVF_TX_FLAGS_CSUM; 2262 2263 /* 2264 * count reflects descriptors mapped, if 0 then mapping error 2265 * has occured and we need to rewind the descriptor queue 2266 */ 2267 count = igbvf_tx_map_adv(adapter, tx_ring, skb, first); 2268 2269 if (count) { 2270 igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count, 2271 skb->len, hdr_len); 2272 /* Make sure there is space in the ring for the next send. */ 2273 igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4); 2274 } else { 2275 dev_kfree_skb_any(skb); 2276 tx_ring->buffer_info[first].time_stamp = 0; 2277 tx_ring->next_to_use = first; 2278 } 2279 2280 return NETDEV_TX_OK; 2281} 2282 2283static int igbvf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2284{ 2285 struct igbvf_adapter *adapter = netdev_priv(netdev); 2286 struct igbvf_ring *tx_ring; 2287 int retval; 2288 2289 if (test_bit(__IGBVF_DOWN, &adapter->state)) { 2290 dev_kfree_skb_any(skb); 2291 return NETDEV_TX_OK; 2292 } 2293 2294 tx_ring = &adapter->tx_ring[0]; 2295 2296 retval = igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring); 2297 2298 return retval; 2299} 2300 2301/** 2302 * igbvf_tx_timeout - Respond to a Tx Hang 2303 * @netdev: network interface device structure 2304 **/ 2305static void igbvf_tx_timeout(struct net_device *netdev) 2306{ 2307 struct igbvf_adapter *adapter = netdev_priv(netdev); 2308 2309 /* Do the reset outside of interrupt context */ 2310 adapter->tx_timeout_count++; 2311 schedule_work(&adapter->reset_task); 2312} 2313 2314static void igbvf_reset_task(struct work_struct *work) 2315{ 2316 struct igbvf_adapter *adapter; 2317 adapter = container_of(work, struct igbvf_adapter, reset_task); 2318 2319 igbvf_reinit_locked(adapter); 2320} 2321 2322/** 2323 * igbvf_get_stats - Get System Network Statistics 2324 * @netdev: network interface device structure 2325 * 2326 * Returns the address of the device statistics structure. 2327 * The statistics are actually updated from the timer callback. 2328 **/ 2329static struct net_device_stats *igbvf_get_stats(struct net_device *netdev) 2330{ 2331 struct igbvf_adapter *adapter = netdev_priv(netdev); 2332 2333 /* only return the current stats */ 2334 return &adapter->net_stats; 2335} 2336 2337/** 2338 * igbvf_change_mtu - Change the Maximum Transfer Unit 2339 * @netdev: network interface device structure 2340 * @new_mtu: new value for maximum frame size 2341 * 2342 * Returns 0 on success, negative on failure 2343 **/ 2344static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) 2345{ 2346 struct igbvf_adapter *adapter = netdev_priv(netdev); 2347 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2348 2349 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { 2350 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n"); 2351 return -EINVAL; 2352 } 2353 2354#define MAX_STD_JUMBO_FRAME_SIZE 9234 2355 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { 2356 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n"); 2357 return -EINVAL; 2358 } 2359 2360 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) 2361 msleep(1); 2362 /* igbvf_down has a dependency on max_frame_size */ 2363 adapter->max_frame_size = max_frame; 2364 if (netif_running(netdev)) 2365 igbvf_down(adapter); 2366 2367 /* 2368 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 2369 * means we reserve 2 more, this pushes us to allocate from the next 2370 * larger slab size. 2371 * i.e. RXBUFFER_2048 --> size-4096 slab 2372 * However with the new *_jumbo_rx* routines, jumbo receives will use 2373 * fragmented skbs 2374 */ 2375 2376 if (max_frame <= 1024) 2377 adapter->rx_buffer_len = 1024; 2378 else if (max_frame <= 2048) 2379 adapter->rx_buffer_len = 2048; 2380 else 2381#if (PAGE_SIZE / 2) > 16384 2382 adapter->rx_buffer_len = 16384; 2383#else 2384 adapter->rx_buffer_len = PAGE_SIZE / 2; 2385#endif 2386 2387 2388 /* adjust allocation if LPE protects us, and we aren't using SBP */ 2389 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 2390 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) 2391 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + 2392 ETH_FCS_LEN; 2393 2394 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", 2395 netdev->mtu, new_mtu); 2396 netdev->mtu = new_mtu; 2397 2398 if (netif_running(netdev)) 2399 igbvf_up(adapter); 2400 else 2401 igbvf_reset(adapter); 2402 2403 clear_bit(__IGBVF_RESETTING, &adapter->state); 2404 2405 return 0; 2406} 2407 2408static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2409{ 2410 switch (cmd) { 2411 default: 2412 return -EOPNOTSUPP; 2413 } 2414} 2415 2416static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state) 2417{ 2418 struct net_device *netdev = pci_get_drvdata(pdev); 2419 struct igbvf_adapter *adapter = netdev_priv(netdev); 2420#ifdef CONFIG_PM 2421 int retval = 0; 2422#endif 2423 2424 netif_device_detach(netdev); 2425 2426 if (netif_running(netdev)) { 2427 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); 2428 igbvf_down(adapter); 2429 igbvf_free_irq(adapter); 2430 } 2431 2432#ifdef CONFIG_PM 2433 retval = pci_save_state(pdev); 2434 if (retval) 2435 return retval; 2436#endif 2437 2438 pci_disable_device(pdev); 2439 2440 return 0; 2441} 2442 2443#ifdef CONFIG_PM 2444static int igbvf_resume(struct pci_dev *pdev) 2445{ 2446 struct net_device *netdev = pci_get_drvdata(pdev); 2447 struct igbvf_adapter *adapter = netdev_priv(netdev); 2448 u32 err; 2449 2450 pci_restore_state(pdev); 2451 err = pci_enable_device_mem(pdev); 2452 if (err) { 2453 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 2454 return err; 2455 } 2456 2457 pci_set_master(pdev); 2458 2459 if (netif_running(netdev)) { 2460 err = igbvf_request_irq(adapter); 2461 if (err) 2462 return err; 2463 } 2464 2465 igbvf_reset(adapter); 2466 2467 if (netif_running(netdev)) 2468 igbvf_up(adapter); 2469 2470 netif_device_attach(netdev); 2471 2472 return 0; 2473} 2474#endif 2475 2476static void igbvf_shutdown(struct pci_dev *pdev) 2477{ 2478 igbvf_suspend(pdev, PMSG_SUSPEND); 2479} 2480 2481#ifdef CONFIG_NET_POLL_CONTROLLER 2482/* 2483 * Polling 'interrupt' - used by things like netconsole to send skbs 2484 * without having to re-enable interrupts. It's not called while 2485 * the interrupt routine is executing. 2486 */ 2487static void igbvf_netpoll(struct net_device *netdev) 2488{ 2489 struct igbvf_adapter *adapter = netdev_priv(netdev); 2490 2491 disable_irq(adapter->pdev->irq); 2492 2493 igbvf_clean_tx_irq(adapter->tx_ring); 2494 2495 enable_irq(adapter->pdev->irq); 2496} 2497#endif 2498 2499/** 2500 * igbvf_io_error_detected - called when PCI error is detected 2501 * @pdev: Pointer to PCI device 2502 * @state: The current pci connection state 2503 * 2504 * This function is called after a PCI bus error affecting 2505 * this device has been detected. 2506 */ 2507static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, 2508 pci_channel_state_t state) 2509{ 2510 struct net_device *netdev = pci_get_drvdata(pdev); 2511 struct igbvf_adapter *adapter = netdev_priv(netdev); 2512 2513 netif_device_detach(netdev); 2514 2515 if (netif_running(netdev)) 2516 igbvf_down(adapter); 2517 pci_disable_device(pdev); 2518 2519 /* Request a slot slot reset. */ 2520 return PCI_ERS_RESULT_NEED_RESET; 2521} 2522 2523/** 2524 * igbvf_io_slot_reset - called after the pci bus has been reset. 2525 * @pdev: Pointer to PCI device 2526 * 2527 * Restart the card from scratch, as if from a cold-boot. Implementation 2528 * resembles the first-half of the igbvf_resume routine. 2529 */ 2530static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev) 2531{ 2532 struct net_device *netdev = pci_get_drvdata(pdev); 2533 struct igbvf_adapter *adapter = netdev_priv(netdev); 2534 2535 if (pci_enable_device_mem(pdev)) { 2536 dev_err(&pdev->dev, 2537 "Cannot re-enable PCI device after reset.\n"); 2538 return PCI_ERS_RESULT_DISCONNECT; 2539 } 2540 pci_set_master(pdev); 2541 2542 igbvf_reset(adapter); 2543 2544 return PCI_ERS_RESULT_RECOVERED; 2545} 2546 2547/** 2548 * igbvf_io_resume - called when traffic can start flowing again. 2549 * @pdev: Pointer to PCI device 2550 * 2551 * This callback is called when the error recovery driver tells us that 2552 * its OK to resume normal operation. Implementation resembles the 2553 * second-half of the igbvf_resume routine. 2554 */ 2555static void igbvf_io_resume(struct pci_dev *pdev) 2556{ 2557 struct net_device *netdev = pci_get_drvdata(pdev); 2558 struct igbvf_adapter *adapter = netdev_priv(netdev); 2559 2560 if (netif_running(netdev)) { 2561 if (igbvf_up(adapter)) { 2562 dev_err(&pdev->dev, 2563 "can't bring device back up after reset\n"); 2564 return; 2565 } 2566 } 2567 2568 netif_device_attach(netdev); 2569} 2570 2571static void igbvf_print_device_info(struct igbvf_adapter *adapter) 2572{ 2573 struct e1000_hw *hw = &adapter->hw; 2574 struct net_device *netdev = adapter->netdev; 2575 struct pci_dev *pdev = adapter->pdev; 2576 2577 dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n"); 2578 dev_info(&pdev->dev, "Address: %02x:%02x:%02x:%02x:%02x:%02x\n", 2579 /* MAC address */ 2580 netdev->dev_addr[0], netdev->dev_addr[1], 2581 netdev->dev_addr[2], netdev->dev_addr[3], 2582 netdev->dev_addr[4], netdev->dev_addr[5]); 2583 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type); 2584} 2585 2586static const struct net_device_ops igbvf_netdev_ops = { 2587 .ndo_open = igbvf_open, 2588 .ndo_stop = igbvf_close, 2589 .ndo_start_xmit = igbvf_xmit_frame, 2590 .ndo_get_stats = igbvf_get_stats, 2591 .ndo_set_multicast_list = igbvf_set_multi, 2592 .ndo_set_mac_address = igbvf_set_mac, 2593 .ndo_change_mtu = igbvf_change_mtu, 2594 .ndo_do_ioctl = igbvf_ioctl, 2595 .ndo_tx_timeout = igbvf_tx_timeout, 2596 .ndo_vlan_rx_register = igbvf_vlan_rx_register, 2597 .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, 2598 .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, 2599#ifdef CONFIG_NET_POLL_CONTROLLER 2600 .ndo_poll_controller = igbvf_netpoll, 2601#endif 2602}; 2603 2604/** 2605 * igbvf_probe - Device Initialization Routine 2606 * @pdev: PCI device information struct 2607 * @ent: entry in igbvf_pci_tbl 2608 * 2609 * Returns 0 on success, negative on failure 2610 * 2611 * igbvf_probe initializes an adapter identified by a pci_dev structure. 2612 * The OS initialization, configuring of the adapter private structure, 2613 * and a hardware reset occur. 2614 **/ 2615static int __devinit igbvf_probe(struct pci_dev *pdev, 2616 const struct pci_device_id *ent) 2617{ 2618 struct net_device *netdev; 2619 struct igbvf_adapter *adapter; 2620 struct e1000_hw *hw; 2621 const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data]; 2622 2623 static int cards_found; 2624 int err, pci_using_dac; 2625 2626 err = pci_enable_device_mem(pdev); 2627 if (err) 2628 return err; 2629 2630 pci_using_dac = 0; 2631 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2632 if (!err) { 2633 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2634 if (!err) 2635 pci_using_dac = 1; 2636 } else { 2637 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2638 if (err) { 2639 err = pci_set_consistent_dma_mask(pdev, 2640 DMA_BIT_MASK(32)); 2641 if (err) { 2642 dev_err(&pdev->dev, "No usable DMA " 2643 "configuration, aborting\n"); 2644 goto err_dma; 2645 } 2646 } 2647 } 2648 2649 err = pci_request_regions(pdev, igbvf_driver_name); 2650 if (err) 2651 goto err_pci_reg; 2652 2653 pci_set_master(pdev); 2654 2655 err = -ENOMEM; 2656 netdev = alloc_etherdev(sizeof(struct igbvf_adapter)); 2657 if (!netdev) 2658 goto err_alloc_etherdev; 2659 2660 SET_NETDEV_DEV(netdev, &pdev->dev); 2661 2662 pci_set_drvdata(pdev, netdev); 2663 adapter = netdev_priv(netdev); 2664 hw = &adapter->hw; 2665 adapter->netdev = netdev; 2666 adapter->pdev = pdev; 2667 adapter->ei = ei; 2668 adapter->pba = ei->pba; 2669 adapter->flags = ei->flags; 2670 adapter->hw.back = adapter; 2671 adapter->hw.mac.type = ei->mac; 2672 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; 2673 2674 /* PCI config space info */ 2675 2676 hw->vendor_id = pdev->vendor; 2677 hw->device_id = pdev->device; 2678 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2679 hw->subsystem_device_id = pdev->subsystem_device; 2680 2681 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 2682 2683 err = -EIO; 2684 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), 2685 pci_resource_len(pdev, 0)); 2686 2687 if (!adapter->hw.hw_addr) 2688 goto err_ioremap; 2689 2690 if (ei->get_variants) { 2691 err = ei->get_variants(adapter); 2692 if (err) 2693 goto err_ioremap; 2694 } 2695 2696 /* setup adapter struct */ 2697 err = igbvf_sw_init(adapter); 2698 if (err) 2699 goto err_sw_init; 2700 2701 /* construct the net_device struct */ 2702 netdev->netdev_ops = &igbvf_netdev_ops; 2703 2704 igbvf_set_ethtool_ops(netdev); 2705 netdev->watchdog_timeo = 5 * HZ; 2706 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 2707 2708 adapter->bd_number = cards_found++; 2709 2710 netdev->features = NETIF_F_SG | 2711 NETIF_F_IP_CSUM | 2712 NETIF_F_HW_VLAN_TX | 2713 NETIF_F_HW_VLAN_RX | 2714 NETIF_F_HW_VLAN_FILTER; 2715 2716 netdev->features |= NETIF_F_IPV6_CSUM; 2717 netdev->features |= NETIF_F_TSO; 2718 netdev->features |= NETIF_F_TSO6; 2719 2720 if (pci_using_dac) 2721 netdev->features |= NETIF_F_HIGHDMA; 2722 2723 netdev->vlan_features |= NETIF_F_TSO; 2724 netdev->vlan_features |= NETIF_F_TSO6; 2725 netdev->vlan_features |= NETIF_F_IP_CSUM; 2726 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 2727 netdev->vlan_features |= NETIF_F_SG; 2728 2729 /*reset the controller to put the device in a known good state */ 2730 err = hw->mac.ops.reset_hw(hw); 2731 if (err) { 2732 dev_info(&pdev->dev, 2733 "PF still in reset state, assigning new address\n"); 2734 random_ether_addr(hw->mac.addr); 2735 } else { 2736 err = hw->mac.ops.read_mac_addr(hw); 2737 if (err) { 2738 dev_err(&pdev->dev, "Error reading MAC address\n"); 2739 goto err_hw_init; 2740 } 2741 } 2742 2743 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 2744 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 2745 2746 if (!is_valid_ether_addr(netdev->perm_addr)) { 2747 dev_err(&pdev->dev, "Invalid MAC Address: " 2748 "%02x:%02x:%02x:%02x:%02x:%02x\n", 2749 netdev->dev_addr[0], netdev->dev_addr[1], 2750 netdev->dev_addr[2], netdev->dev_addr[3], 2751 netdev->dev_addr[4], netdev->dev_addr[5]); 2752 err = -EIO; 2753 goto err_hw_init; 2754 } 2755 2756 setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, 2757 (unsigned long) adapter); 2758 2759 INIT_WORK(&adapter->reset_task, igbvf_reset_task); 2760 INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); 2761 2762 /* ring size defaults */ 2763 adapter->rx_ring->count = 1024; 2764 adapter->tx_ring->count = 1024; 2765 2766 /* reset the hardware with the new settings */ 2767 igbvf_reset(adapter); 2768 2769 /* tell the stack to leave us alone until igbvf_open() is called */ 2770 netif_carrier_off(netdev); 2771 netif_stop_queue(netdev); 2772 2773 strcpy(netdev->name, "eth%d"); 2774 err = register_netdev(netdev); 2775 if (err) 2776 goto err_hw_init; 2777 2778 igbvf_print_device_info(adapter); 2779 2780 igbvf_initialize_last_counter_stats(adapter); 2781 2782 return 0; 2783 2784err_hw_init: 2785 kfree(adapter->tx_ring); 2786 kfree(adapter->rx_ring); 2787err_sw_init: 2788 igbvf_reset_interrupt_capability(adapter); 2789 iounmap(adapter->hw.hw_addr); 2790err_ioremap: 2791 free_netdev(netdev); 2792err_alloc_etherdev: 2793 pci_release_regions(pdev); 2794err_pci_reg: 2795err_dma: 2796 pci_disable_device(pdev); 2797 return err; 2798} 2799 2800/** 2801 * igbvf_remove - Device Removal Routine 2802 * @pdev: PCI device information struct 2803 * 2804 * igbvf_remove is called by the PCI subsystem to alert the driver 2805 * that it should release a PCI device. The could be caused by a 2806 * Hot-Plug event, or because the driver is going to be removed from 2807 * memory. 2808 **/ 2809static void __devexit igbvf_remove(struct pci_dev *pdev) 2810{ 2811 struct net_device *netdev = pci_get_drvdata(pdev); 2812 struct igbvf_adapter *adapter = netdev_priv(netdev); 2813 struct e1000_hw *hw = &adapter->hw; 2814 2815 /* 2816 * flush_scheduled work may reschedule our watchdog task, so 2817 * explicitly disable watchdog tasks from being rescheduled 2818 */ 2819 set_bit(__IGBVF_DOWN, &adapter->state); 2820 del_timer_sync(&adapter->watchdog_timer); 2821 2822 flush_scheduled_work(); 2823 2824 unregister_netdev(netdev); 2825 2826 igbvf_reset_interrupt_capability(adapter); 2827 2828 /* 2829 * it is important to delete the napi struct prior to freeing the 2830 * rx ring so that you do not end up with null pointer refs 2831 */ 2832 netif_napi_del(&adapter->rx_ring->napi); 2833 kfree(adapter->tx_ring); 2834 kfree(adapter->rx_ring); 2835 2836 iounmap(hw->hw_addr); 2837 if (hw->flash_address) 2838 iounmap(hw->flash_address); 2839 pci_release_regions(pdev); 2840 2841 free_netdev(netdev); 2842 2843 pci_disable_device(pdev); 2844} 2845 2846/* PCI Error Recovery (ERS) */ 2847static struct pci_error_handlers igbvf_err_handler = { 2848 .error_detected = igbvf_io_error_detected, 2849 .slot_reset = igbvf_io_slot_reset, 2850 .resume = igbvf_io_resume, 2851}; 2852 2853static struct pci_device_id igbvf_pci_tbl[] = { 2854 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf }, 2855 { } /* terminate list */ 2856}; 2857MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); 2858 2859/* PCI Device API Driver */ 2860static struct pci_driver igbvf_driver = { 2861 .name = igbvf_driver_name, 2862 .id_table = igbvf_pci_tbl, 2863 .probe = igbvf_probe, 2864 .remove = __devexit_p(igbvf_remove), 2865#ifdef CONFIG_PM 2866 /* Power Management Hooks */ 2867 .suspend = igbvf_suspend, 2868 .resume = igbvf_resume, 2869#endif 2870 .shutdown = igbvf_shutdown, 2871 .err_handler = &igbvf_err_handler 2872}; 2873 2874/** 2875 * igbvf_init_module - Driver Registration Routine 2876 * 2877 * igbvf_init_module is the first routine called when the driver is 2878 * loaded. All it does is register with the PCI subsystem. 2879 **/ 2880static int __init igbvf_init_module(void) 2881{ 2882 int ret; 2883 printk(KERN_INFO "%s - version %s\n", 2884 igbvf_driver_string, igbvf_driver_version); 2885 printk(KERN_INFO "%s\n", igbvf_copyright); 2886 2887 ret = pci_register_driver(&igbvf_driver); 2888 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name, 2889 PM_QOS_DEFAULT_VALUE); 2890 2891 return ret; 2892} 2893module_init(igbvf_init_module); 2894 2895/** 2896 * igbvf_exit_module - Driver Exit Cleanup Routine 2897 * 2898 * igbvf_exit_module is called just before the driver is removed 2899 * from memory. 2900 **/ 2901static void __exit igbvf_exit_module(void) 2902{ 2903 pci_unregister_driver(&igbvf_driver); 2904 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name); 2905} 2906module_exit(igbvf_exit_module); 2907 2908 2909MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 2910MODULE_DESCRIPTION("Intel(R) 82576 Virtual Function Network Driver"); 2911MODULE_LICENSE("GPL"); 2912MODULE_VERSION(DRV_VERSION); 2913 2914/* netdev.c */