Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
10GbE Intel Wired LAN Driver Updates 2018-01-26

This series contains updates to ixgbe and ixgbevf.

Emil updates ixgbevf to match ixgbe functionality, starting with the
consolidating of functions that represent logical steps in the receive
process so we can later update them more easily. Updated ixgbevf to
only synchronize the length of the frame, which will typically be the
MTU or smaller. Updated the VF driver to use the length of the packet
instead of the DD status bit to determine if a new descriptor is ready
to be processed, which saves on reads and we can save time on
initialization. Added support for DMA_ATTR_SKIP_CPU_SYNC/WEAK_ORDERING
to help improve performance on some platforms. Updated the VF driver to
do bulk updates of the page reference count instead of just incrementing
it by one reference at a time. Updated the VF driver to only go through
the region of the receive ring that was designated to be cleaned up,
rather than process the entire ring.

Colin Ian King adds the use of ARRAY_SIZE() on various arrays.

Miroslav Lichvar fixes an issue where ethtool was reporting timestamping
filters unsupported for X550, which is incorrect.

Paul adds support for reporting 5G link speed for some devices.

Dan Carpenter fixes a typo where && was used when it should have been
||.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+271 -179
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
··· 4087 4087 hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset); 4088 4088 4089 4089 /* Return is offset to OEM Product Version block is invalid */ 4090 - if (offset == 0x0 && offset == NVM_INVALID_PTR) 4090 + if (offset == 0x0 || offset == NVM_INVALID_PTR) 4091 4091 return; 4092 4092 4093 4093 /* Read product version block */
+19 -18
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
··· 3085 3085 case ixgbe_mac_X550EM_x: 3086 3086 case ixgbe_mac_x550em_a: 3087 3087 info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); 3088 - /* fallthrough */ 3088 + break; 3089 3089 case ixgbe_mac_X540: 3090 3090 case ixgbe_mac_82599EB: 3091 - info->so_timestamping = 3092 - SOF_TIMESTAMPING_TX_SOFTWARE | 3093 - SOF_TIMESTAMPING_RX_SOFTWARE | 3094 - SOF_TIMESTAMPING_SOFTWARE | 3095 - SOF_TIMESTAMPING_TX_HARDWARE | 3096 - SOF_TIMESTAMPING_RX_HARDWARE | 3097 - SOF_TIMESTAMPING_RAW_HARDWARE; 3098 - 3099 - if (adapter->ptp_clock) 3100 - info->phc_index = ptp_clock_index(adapter->ptp_clock); 3101 - else 3102 - info->phc_index = -1; 3103 - 3104 - info->tx_types = 3105 - BIT(HWTSTAMP_TX_OFF) | 3106 - BIT(HWTSTAMP_TX_ON); 3107 - 3108 3091 info->rx_filters |= 3109 3092 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 3110 3093 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | ··· 3096 3113 default: 3097 3114 return ethtool_op_get_ts_info(dev, info); 3098 3115 } 3116 + 3117 + info->so_timestamping = 3118 + SOF_TIMESTAMPING_TX_SOFTWARE | 3119 + SOF_TIMESTAMPING_RX_SOFTWARE | 3120 + SOF_TIMESTAMPING_SOFTWARE | 3121 + SOF_TIMESTAMPING_TX_HARDWARE | 3122 + SOF_TIMESTAMPING_RX_HARDWARE | 3123 + SOF_TIMESTAMPING_RAW_HARDWARE; 3124 + 3125 + if (adapter->ptp_clock) 3126 + info->phc_index = ptp_clock_index(adapter->ptp_clock); 3127 + else 3128 + info->phc_index = -1; 3129 + 3130 + info->tx_types = 3131 + BIT(HWTSTAMP_TX_OFF) | 3132 + BIT(HWTSTAMP_TX_ON); 3133 + 3099 3134 return 0; 3100 3135 } 3101 3136
+9 -2
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 4133 4133 rxdctl &= ~0x3FFFFF; 4134 4134 rxdctl |= 0x080420; 4135 4135 #if (PAGE_SIZE < 8192) 4136 - } else { 4136 + /* RXDCTL.RLPML does not work on 82599 */ 4137 + } else if (hw->mac.type != ixgbe_mac_82599EB) { 4137 4138 rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | 4138 4139 IXGBE_RXDCTL_RLPML_EN); 4139 4140 4140 - /* Limit the maximum frame size so we don't overrun the skb */ 4141 + /* Limit the maximum frame size so we don't overrun the skb. 4142 + * This can happen in SRIOV mode when the MTU of the VF is 4143 + * higher than the MTU of the PF. 4144 + */ 4141 4145 if (ring_uses_build_skb(ring) && 4142 4146 !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) 4143 4147 rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB | ··· 7262 7258 switch (link_speed) { 7263 7259 case IXGBE_LINK_SPEED_10GB_FULL: 7264 7260 speed_str = "10 Gbps"; 7261 + break; 7262 + case IXGBE_LINK_SPEED_5GB_FULL: 7263 + speed_str = "5 Gbps"; 7265 7264 break; 7266 7265 case IXGBE_LINK_SPEED_2_5GB_FULL: 7267 7266 speed_str = "2.5 Gbps";
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
··· 949 949 u16 length, bufsz, i, start; 950 950 u16 *local_buffer; 951 951 952 - bufsz = sizeof(buf) / sizeof(buf[0]); 952 + bufsz = ARRAY_SIZE(buf); 953 953 954 954 /* Read a chunk at the pointer location */ 955 955 if (!buffer) {
+3
drivers/net/ethernet/intel/ixgbevf/ethtool.c
··· 75 75 IXGBEVF_STAT("tx_timeout_count", tx_timeout_count), 76 76 IXGBEVF_NETDEV_STAT(multicast), 77 77 IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error), 78 + IXGBEVF_STAT("alloc_rx_page", alloc_rx_page), 79 + IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed), 80 + IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), 78 81 }; 79 82 80 83 #define IXGBEVF_QUEUE_STATS_LEN ( \
+13 -3
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
··· 62 62 struct ixgbevf_rx_buffer { 63 63 dma_addr_t dma; 64 64 struct page *page; 65 - unsigned int page_offset; 65 + #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) 66 + __u32 page_offset; 67 + #else 68 + __u16 page_offset; 69 + #endif 70 + __u16 pagecnt_bias; 66 71 }; 67 72 68 73 struct ixgbevf_stats { ··· 84 79 struct ixgbevf_rx_queue_stats { 85 80 u64 alloc_rx_page_failed; 86 81 u64 alloc_rx_buff_failed; 82 + u64 alloc_rx_page; 87 83 u64 csum_err; 88 84 }; 89 85 ··· 266 260 #define MIN_MSIX_Q_VECTORS 1 267 261 #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) 268 262 263 + #define IXGBEVF_RX_DMA_ATTR \ 264 + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) 265 + 269 266 /* board specific private data structure */ 270 267 struct ixgbevf_adapter { 271 268 /* this field must be first, see ixgbevf_process_skb_fields */ ··· 296 287 u64 hw_csum_rx_error; 297 288 u64 hw_rx_no_dma_resources; 298 289 int num_msix_vectors; 299 - u32 alloc_rx_page_failed; 300 - u32 alloc_rx_buff_failed; 290 + u64 alloc_rx_page_failed; 291 + u64 alloc_rx_buff_failed; 292 + u64 alloc_rx_page; 301 293 302 294 struct msix_entry *msix_entries; 303 295
+218 -144
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
··· 206 206 } 207 207 } 208 208 209 - static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, 210 - struct ixgbevf_tx_buffer *tx_buffer) 211 - { 212 - if (tx_buffer->skb) { 213 - dev_kfree_skb_any(tx_buffer->skb); 214 - if (dma_unmap_len(tx_buffer, len)) 215 - dma_unmap_single(tx_ring->dev, 216 - dma_unmap_addr(tx_buffer, dma), 217 - dma_unmap_len(tx_buffer, len), 218 - DMA_TO_DEVICE); 219 - } else if (dma_unmap_len(tx_buffer, len)) { 220 - dma_unmap_page(tx_ring->dev, 221 - dma_unmap_addr(tx_buffer, dma), 222 - dma_unmap_len(tx_buffer, len), 223 - DMA_TO_DEVICE); 224 - } 225 - tx_buffer->next_to_watch = NULL; 226 - tx_buffer->skb = NULL; 227 - dma_unmap_len_set(tx_buffer, len, 0); 228 - /* tx_buffer must be completely set up in the transmit path */ 229 - } 230 - 231 209 static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring) 232 210 { 233 211 return ring->stats.packets; ··· 327 349 DMA_TO_DEVICE); 328 350 329 351 /* clear tx_buffer data */ 330 - tx_buffer->skb = NULL; 331 352 dma_unmap_len_set(tx_buffer, len, 0); 332 353 333 354 /* unmap remaining buffers */ ··· 572 595 } 573 596 574 597 /* map page for use */ 575 - dma = dma_map_page(rx_ring->dev, page, 0, 576 - PAGE_SIZE, DMA_FROM_DEVICE); 598 + dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE, 599 + DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR); 577 600 578 601 /* if mapping failed free memory back to system since 579 602 * there isn't much point in holding memory we can't use ··· 581 604 if (dma_mapping_error(rx_ring->dev, dma)) { 582 605 __free_page(page); 583 606 584 - rx_ring->rx_stats.alloc_rx_buff_failed++; 607 + rx_ring->rx_stats.alloc_rx_page_failed++; 585 608 return false; 586 609 } 587 610 588 611 bi->dma = dma; 589 612 bi->page = page; 590 613 bi->page_offset = 0; 614 + bi->pagecnt_bias = 1; 615 + rx_ring->rx_stats.alloc_rx_page++; 591 616 592 617 return true; 593 618 } ··· 618 639 if (!ixgbevf_alloc_mapped_page(rx_ring, bi)) 619 640 break; 620 641 642 + /* sync the buffer for use by the device */ 643 + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 644 + bi->page_offset, 645 + IXGBEVF_RX_BUFSZ, 646 + DMA_FROM_DEVICE); 647 + 621 648 /* Refresh the desc even if pkt_addr didn't change 622 649 * because each write-back erases this info. 623 650 */ ··· 638 653 i -= rx_ring->count; 639 654 } 640 655 641 - /* clear the hdr_addr for the next_to_use descriptor */ 642 - rx_desc->read.hdr_addr = 0; 656 + /* clear the length for the next_to_use descriptor */ 657 + rx_desc->wb.upper.length = 0; 643 658 644 659 cleaned_count--; 645 660 } while (cleaned_count); ··· 726 741 new_buff->page = old_buff->page; 727 742 new_buff->dma = old_buff->dma; 728 743 new_buff->page_offset = old_buff->page_offset; 729 - 730 - /* sync the buffer for use by the device */ 731 - dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, 732 - new_buff->page_offset, 733 - IXGBEVF_RX_BUFSZ, 734 - DMA_FROM_DEVICE); 744 + new_buff->pagecnt_bias = old_buff->pagecnt_bias; 735 745 } 736 746 737 747 static inline bool ixgbevf_page_is_reserved(struct page *page) 738 748 { 739 749 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 750 + } 751 + 752 + static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer, 753 + struct page *page, 754 + const unsigned int truesize) 755 + { 756 + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--; 757 + 758 + /* avoid re-using remote pages */ 759 + if (unlikely(ixgbevf_page_is_reserved(page))) 760 + return false; 761 + 762 + #if (PAGE_SIZE < 8192) 763 + /* if we are only owner of page we can reuse it */ 764 + if (unlikely(page_ref_count(page) != pagecnt_bias)) 765 + return false; 766 + 767 + /* flip page offset to other buffer */ 768 + rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ; 769 + 770 + #else 771 + /* move offset up to the next cache line */ 772 + rx_buffer->page_offset += truesize; 773 + 774 + if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ)) 775 + return false; 776 + 777 + #endif 778 + 779 + /* If we have drained the page fragment pool we need to update 780 + * the pagecnt_bias and page count so that we fully restock the 781 + * number of references the driver holds. 782 + */ 783 + if (unlikely(pagecnt_bias == 1)) { 784 + page_ref_add(page, USHRT_MAX); 785 + rx_buffer->pagecnt_bias = USHRT_MAX; 786 + } 787 + 788 + return true; 740 789 } 741 790 742 791 /** ··· 790 771 **/ 791 772 static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, 792 773 struct ixgbevf_rx_buffer *rx_buffer, 774 + u16 size, 793 775 union ixgbe_adv_rx_desc *rx_desc, 794 776 struct sk_buff *skb) 795 777 { 796 778 struct page *page = rx_buffer->page; 797 779 unsigned char *va = page_address(page) + rx_buffer->page_offset; 798 - unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); 799 780 #if (PAGE_SIZE < 8192) 800 781 unsigned int truesize = IXGBEVF_RX_BUFSZ; 801 782 #else ··· 814 795 return true; 815 796 816 797 /* this page cannot be reused so discard it */ 817 - put_page(page); 818 798 return false; 819 799 } 820 800 ··· 833 815 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 834 816 (unsigned long)va & ~PAGE_MASK, size, truesize); 835 817 836 - /* avoid re-using remote pages */ 837 - if (unlikely(ixgbevf_page_is_reserved(page))) 838 - return false; 839 - 840 - #if (PAGE_SIZE < 8192) 841 - /* if we are only owner of page we can reuse it */ 842 - if (unlikely(page_count(page) != 1)) 843 - return false; 844 - 845 - /* flip page offset to other buffer */ 846 - rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ; 847 - 848 - #else 849 - /* move offset up to the next cache line */ 850 - rx_buffer->page_offset += truesize; 851 - 852 - if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ)) 853 - return false; 854 - 855 - #endif 856 - /* Even if we own the page, we are not allowed to use atomic_set() 857 - * This would break get_page_unless_zero() users. 858 - */ 859 - page_ref_inc(page); 860 - 861 - return true; 818 + return ixgbevf_can_reuse_rx_page(rx_buffer, page, truesize); 862 819 } 863 820 864 821 static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring, ··· 842 849 { 843 850 struct ixgbevf_rx_buffer *rx_buffer; 844 851 struct page *page; 852 + u16 size = le16_to_cpu(rx_desc->wb.upper.length); 845 853 846 854 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 847 855 page = rx_buffer->page; 848 856 prefetchw(page); 857 + 858 + /* we are reusing so sync this buffer for CPU use */ 859 + dma_sync_single_range_for_cpu(rx_ring->dev, 860 + rx_buffer->dma, 861 + rx_buffer->page_offset, 862 + size, 863 + DMA_FROM_DEVICE); 849 864 850 865 if (likely(!skb)) { 851 866 void *page_addr = page_address(page) + ··· 880 879 prefetchw(skb->data); 881 880 } 882 881 883 - /* we are reusing so sync this buffer for CPU use */ 884 - dma_sync_single_range_for_cpu(rx_ring->dev, 885 - rx_buffer->dma, 886 - rx_buffer->page_offset, 887 - IXGBEVF_RX_BUFSZ, 888 - DMA_FROM_DEVICE); 889 - 890 882 /* pull page into skb */ 891 - if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { 883 + if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) { 892 884 /* hand second half of page back to the ring */ 893 885 ixgbevf_reuse_rx_page(rx_ring, rx_buffer); 894 886 } else { 895 - /* we are not reusing the buffer so unmap it */ 896 - dma_unmap_page(rx_ring->dev, rx_buffer->dma, 897 - PAGE_SIZE, DMA_FROM_DEVICE); 887 + /* We are not reusing the buffer so unmap it and free 888 + * any references we are holding to it 889 + */ 890 + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, 891 + PAGE_SIZE, DMA_FROM_DEVICE, 892 + IXGBEVF_RX_DMA_ATTR); 893 + __page_frag_cache_drain(page, rx_buffer->pagecnt_bias); 898 894 } 899 895 900 896 /* clear contents of buffer_info */ ··· 928 930 929 931 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean); 930 932 931 - if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) 933 + if (!rx_desc->wb.upper.length) 932 934 break; 933 935 934 936 /* This memory barrier is needed to keep us from reading ··· 941 943 skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb); 942 944 943 945 /* exit if we failed to retrieve a buffer */ 944 - if (!skb) 946 + if (!skb) { 947 + rx_ring->rx_stats.alloc_rx_buff_failed++; 945 948 break; 949 + } 946 950 947 951 cleaned_count++; 948 952 ··· 1553 1553 txdctl |= (1u << 8) | /* HTHRESH = 1 */ 1554 1554 32; /* PTHRESH = 32 */ 1555 1555 1556 + /* reinitialize tx_buffer_info */ 1557 + memset(ring->tx_buffer_info, 0, 1558 + sizeof(struct ixgbevf_tx_buffer) * ring->count); 1559 + 1556 1560 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); 1557 1561 1558 1562 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl); ··· 1725 1721 struct ixgbevf_ring *ring) 1726 1722 { 1727 1723 struct ixgbe_hw *hw = &adapter->hw; 1724 + union ixgbe_adv_rx_desc *rx_desc; 1728 1725 u64 rdba = ring->dma; 1729 1726 u32 rxdctl; 1730 1727 u8 reg_idx = ring->reg_idx; ··· 1753 1748 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0); 1754 1749 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0); 1755 1750 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx); 1751 + 1752 + /* initialize rx_buffer_info */ 1753 + memset(ring->rx_buffer_info, 0, 1754 + sizeof(struct ixgbevf_rx_buffer) * ring->count); 1755 + 1756 + /* initialize Rx descriptor 0 */ 1757 + rx_desc = IXGBEVF_RX_DESC(ring, 0); 1758 + rx_desc->wb.upper.length = 0; 1756 1759 1757 1760 /* reset ntu and ntc to place SW in sync with hardwdare */ 1758 1761 ring->next_to_clean = 0; ··· 2116 2103 **/ 2117 2104 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) 2118 2105 { 2119 - struct device *dev = rx_ring->dev; 2120 - unsigned long size; 2121 - unsigned int i; 2106 + u16 i = rx_ring->next_to_clean; 2122 2107 2123 2108 /* Free Rx ring sk_buff */ 2124 2109 if (rx_ring->skb) { ··· 2124 2113 rx_ring->skb = NULL; 2125 2114 } 2126 2115 2127 - /* ring already cleared, nothing to do */ 2128 - if (!rx_ring->rx_buffer_info) 2129 - return; 2130 - 2131 2116 /* Free all the Rx ring pages */ 2132 - for (i = 0; i < rx_ring->count; i++) { 2117 + while (i != rx_ring->next_to_alloc) { 2133 2118 struct ixgbevf_rx_buffer *rx_buffer; 2134 2119 2135 2120 rx_buffer = &rx_ring->rx_buffer_info[i]; 2136 - if (rx_buffer->dma) 2137 - dma_unmap_page(dev, rx_buffer->dma, 2138 - PAGE_SIZE, DMA_FROM_DEVICE); 2139 - rx_buffer->dma = 0; 2140 - if (rx_buffer->page) 2141 - __free_page(rx_buffer->page); 2142 - rx_buffer->page = NULL; 2121 + 2122 + /* Invalidate cache lines that may have been written to by 2123 + * device so that we avoid corrupting memory. 2124 + */ 2125 + dma_sync_single_range_for_cpu(rx_ring->dev, 2126 + rx_buffer->dma, 2127 + rx_buffer->page_offset, 2128 + IXGBEVF_RX_BUFSZ, 2129 + DMA_FROM_DEVICE); 2130 + 2131 + /* free resources associated with mapping */ 2132 + dma_unmap_page_attrs(rx_ring->dev, 2133 + rx_buffer->dma, 2134 + PAGE_SIZE, 2135 + DMA_FROM_DEVICE, 2136 + IXGBEVF_RX_DMA_ATTR); 2137 + 2138 + __page_frag_cache_drain(rx_buffer->page, 2139 + rx_buffer->pagecnt_bias); 2140 + 2141 + i++; 2142 + if (i == rx_ring->count) 2143 + i = 0; 2143 2144 } 2144 2145 2145 - size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 2146 - memset(rx_ring->rx_buffer_info, 0, size); 2147 - 2148 - /* Zero out the descriptor ring */ 2149 - memset(rx_ring->desc, 0, rx_ring->size); 2146 + rx_ring->next_to_alloc = 0; 2147 + rx_ring->next_to_clean = 0; 2148 + rx_ring->next_to_use = 0; 2150 2149 } 2151 2150 2152 2151 /** ··· 2165 2144 **/ 2166 2145 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring) 2167 2146 { 2168 - struct ixgbevf_tx_buffer *tx_buffer_info; 2169 - unsigned long size; 2170 - unsigned int i; 2147 + u16 i = tx_ring->next_to_clean; 2148 + struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; 2171 2149 2172 - if (!tx_ring->tx_buffer_info) 2173 - return; 2150 + while (i != tx_ring->next_to_use) { 2151 + union ixgbe_adv_tx_desc *eop_desc, *tx_desc; 2174 2152 2175 - /* Free all the Tx ring sk_buffs */ 2176 - for (i = 0; i < tx_ring->count; i++) { 2177 - tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2178 - ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 2153 + /* Free all the Tx ring sk_buffs */ 2154 + dev_kfree_skb_any(tx_buffer->skb); 2155 + 2156 + /* unmap skb header data */ 2157 + dma_unmap_single(tx_ring->dev, 2158 + dma_unmap_addr(tx_buffer, dma), 2159 + dma_unmap_len(tx_buffer, len), 2160 + DMA_TO_DEVICE); 2161 + 2162 + /* check for eop_desc to determine the end of the packet */ 2163 + eop_desc = tx_buffer->next_to_watch; 2164 + tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 2165 + 2166 + /* unmap remaining buffers */ 2167 + while (tx_desc != eop_desc) { 2168 + tx_buffer++; 2169 + tx_desc++; 2170 + i++; 2171 + if (unlikely(i == tx_ring->count)) { 2172 + i = 0; 2173 + tx_buffer = tx_ring->tx_buffer_info; 2174 + tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 2175 + } 2176 + 2177 + /* unmap any remaining paged data */ 2178 + if (dma_unmap_len(tx_buffer, len)) 2179 + dma_unmap_page(tx_ring->dev, 2180 + dma_unmap_addr(tx_buffer, dma), 2181 + dma_unmap_len(tx_buffer, len), 2182 + DMA_TO_DEVICE); 2183 + } 2184 + 2185 + /* move us one more past the eop_desc for start of next pkt */ 2186 + tx_buffer++; 2187 + i++; 2188 + if (unlikely(i == tx_ring->count)) { 2189 + i = 0; 2190 + tx_buffer = tx_ring->tx_buffer_info; 2191 + } 2179 2192 } 2180 2193 2181 - size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2182 - memset(tx_ring->tx_buffer_info, 0, size); 2194 + /* reset next_to_use and next_to_clean */ 2195 + tx_ring->next_to_use = 0; 2196 + tx_ring->next_to_clean = 0; 2183 2197 2184 - memset(tx_ring->desc, 0, tx_ring->size); 2185 2198 } 2186 2199 2187 2200 /** ··· 2767 2712 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) 2768 2713 { 2769 2714 struct ixgbe_hw *hw = &adapter->hw; 2715 + u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; 2716 + u64 alloc_rx_page = 0, hw_csum_rx_error = 0; 2770 2717 int i; 2771 2718 2772 2719 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || ··· 2789 2732 adapter->stats.vfmprc); 2790 2733 2791 2734 for (i = 0; i < adapter->num_rx_queues; i++) { 2792 - adapter->hw_csum_rx_error += 2793 - adapter->rx_ring[i]->hw_csum_rx_error; 2794 - adapter->rx_ring[i]->hw_csum_rx_error = 0; 2735 + struct ixgbevf_ring *rx_ring = adapter->rx_ring[i]; 2736 + 2737 + hw_csum_rx_error += rx_ring->rx_stats.csum_err; 2738 + alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; 2739 + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; 2740 + alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; 2795 2741 } 2742 + 2743 + adapter->hw_csum_rx_error = hw_csum_rx_error; 2744 + adapter->alloc_rx_page_failed = alloc_rx_page_failed; 2745 + adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; 2746 + adapter->alloc_rx_page = alloc_rx_page; 2796 2747 } 2797 2748 2798 2749 /** ··· 3045 2980 int size; 3046 2981 3047 2982 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 3048 - tx_ring->tx_buffer_info = vzalloc(size); 2983 + tx_ring->tx_buffer_info = vmalloc(size); 3049 2984 if (!tx_ring->tx_buffer_info) 3050 2985 goto err; 3051 2986 ··· 3105 3040 int size; 3106 3041 3107 3042 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 3108 - rx_ring->rx_buffer_info = vzalloc(size); 3043 + rx_ring->rx_buffer_info = vmalloc(size); 3109 3044 if (!rx_ring->rx_buffer_info) 3110 3045 goto err; 3111 3046 ··· 3547 3482 struct ixgbevf_tx_buffer *first, 3548 3483 const u8 hdr_len) 3549 3484 { 3550 - dma_addr_t dma; 3551 3485 struct sk_buff *skb = first->skb; 3552 3486 struct ixgbevf_tx_buffer *tx_buffer; 3553 3487 union ixgbe_adv_tx_desc *tx_desc; 3554 - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 3555 - unsigned int data_len = skb->data_len; 3556 - unsigned int size = skb_headlen(skb); 3557 - unsigned int paylen = skb->len - hdr_len; 3488 + struct skb_frag_struct *frag; 3489 + dma_addr_t dma; 3490 + unsigned int data_len, size; 3558 3491 u32 tx_flags = first->tx_flags; 3559 - __le32 cmd_type; 3492 + __le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags); 3560 3493 u16 i = tx_ring->next_to_use; 3561 3494 3562 3495 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 3563 3496 3564 - ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen); 3565 - cmd_type = ixgbevf_tx_cmd_type(tx_flags); 3497 + ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); 3498 + 3499 + size = skb_headlen(skb); 3500 + data_len = skb->data_len; 3566 3501 3567 3502 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 3568 - if (dma_mapping_error(tx_ring->dev, dma)) 3569 - goto dma_error; 3570 3503 3571 - /* record length, and DMA address */ 3572 - dma_unmap_len_set(first, len, size); 3573 - dma_unmap_addr_set(first, dma, dma); 3504 + tx_buffer = first; 3574 3505 3575 - tx_desc->read.buffer_addr = cpu_to_le64(dma); 3506 + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 3507 + if (dma_mapping_error(tx_ring->dev, dma)) 3508 + goto dma_error; 3576 3509 3577 - for (;;) { 3510 + /* record length, and DMA address */ 3511 + dma_unmap_len_set(tx_buffer, len, size); 3512 + dma_unmap_addr_set(tx_buffer, dma, dma); 3513 + 3514 + tx_desc->read.buffer_addr = cpu_to_le64(dma); 3515 + 3578 3516 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { 3579 3517 tx_desc->read.cmd_type_len = 3580 3518 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); ··· 3588 3520 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 3589 3521 i = 0; 3590 3522 } 3523 + tx_desc->read.olinfo_status = 0; 3591 3524 3592 3525 dma += IXGBE_MAX_DATA_PER_TXD; 3593 3526 size -= IXGBE_MAX_DATA_PER_TXD; 3594 3527 3595 3528 tx_desc->read.buffer_addr = cpu_to_le64(dma); 3596 - tx_desc->read.olinfo_status = 0; 3597 3529 } 3598 3530 3599 3531 if (likely(!data_len)) ··· 3607 3539 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 3608 3540 i = 0; 3609 3541 } 3542 + tx_desc->read.olinfo_status = 0; 3610 3543 3611 3544 size = skb_frag_size(frag); 3612 3545 data_len -= size; 3613 3546 3614 3547 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 3615 3548 DMA_TO_DEVICE); 3616 - if (dma_mapping_error(tx_ring->dev, dma)) 3617 - goto dma_error; 3618 3549 3619 3550 tx_buffer = &tx_ring->tx_buffer_info[i]; 3620 - dma_unmap_len_set(tx_buffer, len, size); 3621 - dma_unmap_addr_set(tx_buffer, dma, dma); 3622 - 3623 - tx_desc->read.buffer_addr = cpu_to_le64(dma); 3624 - tx_desc->read.olinfo_status = 0; 3625 - 3626 - frag++; 3627 3551 } 3628 3552 3629 3553 /* write last descriptor with RS and EOP bits */ ··· 3649 3589 return; 3650 3590 dma_error: 3651 3591 dev_err(tx_ring->dev, "TX DMA map failed\n"); 3592 + tx_buffer = &tx_ring->tx_buffer_info[i]; 3652 3593 3653 3594 /* clear dma mappings for failed tx_buffer_info map */ 3654 - for (;;) { 3595 + while (tx_buffer != first) { 3596 + if (dma_unmap_len(tx_buffer, len)) 3597 + dma_unmap_page(tx_ring->dev, 3598 + dma_unmap_addr(tx_buffer, dma), 3599 + dma_unmap_len(tx_buffer, len), 3600 + DMA_TO_DEVICE); 3601 + dma_unmap_len_set(tx_buffer, len, 0); 3602 + 3603 + if (i-- == 0) 3604 + i += tx_ring->count; 3655 3605 tx_buffer = &tx_ring->tx_buffer_info[i]; 3656 - ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer); 3657 - if (tx_buffer == first) 3658 - break; 3659 - if (i == 0) 3660 - i = tx_ring->count; 3661 - i--; 3662 3606 } 3607 + 3608 + if (dma_unmap_len(tx_buffer, len)) 3609 + dma_unmap_single(tx_ring->dev, 3610 + dma_unmap_addr(tx_buffer, dma), 3611 + dma_unmap_len(tx_buffer, len), 3612 + DMA_TO_DEVICE); 3613 + dma_unmap_len_set(tx_buffer, len, 0); 3614 + 3615 + dev_kfree_skb_any(tx_buffer->skb); 3616 + tx_buffer->skb = NULL; 3663 3617 3664 3618 tx_ring->next_to_use = i; 3665 3619 }
+7 -10
drivers/net/ethernet/intel/ixgbevf/vf.c
··· 286 286 ether_addr_copy(msg_addr, addr); 287 287 288 288 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 289 - sizeof(msgbuf) / sizeof(u32)); 289 + ARRAY_SIZE(msgbuf)); 290 290 if (!ret_val) { 291 291 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; 292 292 ··· 456 456 ether_addr_copy(msg_addr, addr); 457 457 458 458 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 459 - sizeof(msgbuf) / sizeof(u32)); 460 - 459 + ARRAY_SIZE(msgbuf)); 461 460 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; 462 461 463 462 /* if nacked the address was rejected, use "perm_addr" */ ··· 573 574 msgbuf[1] = xcast_mode; 574 575 575 576 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 576 - sizeof(msgbuf) / sizeof(u32)); 577 + ARRAY_SIZE(msgbuf)); 577 578 if (err) 578 579 return err; 579 580 ··· 613 614 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; 614 615 615 616 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 616 - sizeof(msgbuf) / sizeof(u32)); 617 + ARRAY_SIZE(msgbuf)); 617 618 if (err) 618 619 goto mbx_err; 619 620 ··· 825 826 msgbuf[1] = max_size; 826 827 827 828 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 828 - sizeof(msgbuf) / sizeof(u32)); 829 + ARRAY_SIZE(msgbuf)); 829 830 if (ret_val) 830 831 return ret_val; 831 832 if ((msgbuf[0] & IXGBE_VF_SET_LPE) && ··· 871 872 msg[1] = api; 872 873 msg[2] = 0; 873 874 874 - err = ixgbevf_write_msg_read_ack(hw, msg, msg, 875 - sizeof(msg) / sizeof(u32)); 875 + err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg)); 876 876 if (!err) { 877 877 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; 878 878 ··· 922 924 msg[0] = IXGBE_VF_GET_QUEUE; 923 925 msg[1] = msg[2] = msg[3] = msg[4] = 0; 924 926 925 - err = ixgbevf_write_msg_read_ack(hw, msg, msg, 926 - sizeof(msg) / sizeof(u32)); 927 + err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg)); 927 928 if (!err) { 928 929 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; 929 930