Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iavf: switch to Page Pool

Now that the IAVF driver simply uses dev_alloc_page() + free_page() with
no custom recycling logics, it can easily be switched to using Page
Pool / libeth API instead.
This allows to removing the whole dancing around headroom, HW buffer
size, and page order. All DMA-for-device is now done in the PP core,
for-CPU -- in the libeth helper.
Use skb_mark_for_recycle() to bring back the recycling and restore the
performance. Speaking of performance: on par with the baseline and
faster with the PP optimization series applied. But the memory usage for
1500b MTU is now almost 2x lower (x86_64) thanks to allocating a page
every second descriptor.

Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>

authored by

Alexander Lobakin and committed by
Tony Nguyen
5fa4caff 97cadd3d

+110 -213
+4 -3
drivers/net/ethernet/intel/iavf/iavf_main.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 3 4 + #include <linux/net/intel/libie/rx.h> 5 + 4 6 #include "iavf.h" 5 7 #include "iavf_prototype.h" 6 8 /* All iavf tracepoints are defined by the include below, which must ··· 47 45 MODULE_ALIAS("i40evf"); 48 46 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 49 47 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); 48 + MODULE_IMPORT_NS(LIBETH); 50 49 MODULE_IMPORT_NS(LIBIE); 51 50 MODULE_LICENSE("GPL v2"); 52 51 ··· 1589 1586 rx_ring = &adapter->rx_rings[i]; 1590 1587 rx_ring->queue_index = i; 1591 1588 rx_ring->netdev = adapter->netdev; 1592 - rx_ring->dev = &adapter->pdev->dev; 1593 1589 rx_ring->count = adapter->rx_desc_count; 1594 1590 rx_ring->itr_setting = IAVF_ITR_RX_DEF; 1595 1591 } ··· 2615 2613 iavf_set_ethtool_ops(netdev); 2616 2614 netdev->watchdog_timeo = 5 * HZ; 2617 2615 2618 - /* MTU range: 68 - 9710 */ 2619 2616 netdev->min_mtu = ETH_MIN_MTU; 2620 - netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; 2617 + netdev->max_mtu = LIBIE_MAX_MTU; 2621 2618 2622 2619 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 2623 2620 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
+74 -181
drivers/net/ethernet/intel/iavf/iavf_txrx.c
··· 690 690 **/ 691 691 static void iavf_clean_rx_ring(struct iavf_ring *rx_ring) 692 692 { 693 - unsigned long bi_size; 694 - u16 i; 695 - 696 693 /* ring already cleared, nothing to do */ 697 - if (!rx_ring->rx_bi) 694 + if (!rx_ring->rx_fqes) 698 695 return; 699 696 700 697 if (rx_ring->skb) { ··· 699 702 rx_ring->skb = NULL; 700 703 } 701 704 702 - /* Free all the Rx ring sk_buffs */ 703 - for (i = 0; i < rx_ring->count; i++) { 704 - struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; 705 + /* Free all the Rx ring buffers */ 706 + for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) { 707 + const struct libeth_fqe *rx_fqes = &rx_ring->rx_fqes[i]; 705 708 706 - if (!rx_bi->page) 707 - continue; 709 + page_pool_put_full_page(rx_ring->pp, rx_fqes->page, false); 708 710 709 - /* Invalidate cache lines that may have been written to by 710 - * device so that we avoid corrupting memory. 711 - */ 712 - dma_sync_single_range_for_cpu(rx_ring->dev, 713 - rx_bi->dma, 714 - rx_bi->page_offset, 715 - IAVF_RXBUFFER_3072, 716 - DMA_FROM_DEVICE); 717 - 718 - /* free resources associated with mapping */ 719 - dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, 720 - iavf_rx_pg_size(rx_ring), 721 - DMA_FROM_DEVICE, 722 - IAVF_RX_DMA_ATTR); 723 - 724 - __free_page(rx_bi->page); 725 - 726 - rx_bi->page = NULL; 727 - rx_bi->page_offset = 0; 711 + if (unlikely(++i == rx_ring->count)) 712 + i = 0; 728 713 } 729 - 730 - bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; 731 - memset(rx_ring->rx_bi, 0, bi_size); 732 - 733 - /* Zero out the descriptor ring */ 734 - memset(rx_ring->desc, 0, rx_ring->size); 735 714 736 715 rx_ring->next_to_clean = 0; 737 716 rx_ring->next_to_use = 0; ··· 721 748 **/ 722 749 void iavf_free_rx_resources(struct iavf_ring *rx_ring) 723 750 { 751 + struct libeth_fq fq = { 752 + .fqes = rx_ring->rx_fqes, 753 + .pp = rx_ring->pp, 754 + }; 755 + 724 756 iavf_clean_rx_ring(rx_ring); 725 - kfree(rx_ring->rx_bi); 726 - rx_ring->rx_bi = NULL; 727 757 728 758 if (rx_ring->desc) { 729 - dma_free_coherent(rx_ring->dev, rx_ring->size, 759 + dma_free_coherent(rx_ring->pp->p.dev, rx_ring->size, 730 760 rx_ring->desc, rx_ring->dma); 731 761 rx_ring->desc = NULL; 732 762 } 763 + 764 + libeth_rx_fq_destroy(&fq); 765 + rx_ring->rx_fqes = NULL; 766 + rx_ring->pp = NULL; 733 767 } 734 768 735 769 /** ··· 747 767 **/ 748 768 int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring) 749 769 { 750 - struct device *dev = rx_ring->dev; 751 - int bi_size; 770 + struct libeth_fq fq = { 771 + .count = rx_ring->count, 772 + .buf_len = LIBIE_MAX_RX_BUF_LEN, 773 + .nid = NUMA_NO_NODE, 774 + }; 775 + int ret; 752 776 753 - /* warn if we are about to overwrite the pointer */ 754 - WARN_ON(rx_ring->rx_bi); 755 - bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; 756 - rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); 757 - if (!rx_ring->rx_bi) 758 - goto err; 777 + ret = libeth_rx_fq_create(&fq, &rx_ring->q_vector->napi); 778 + if (ret) 779 + return ret; 780 + 781 + rx_ring->pp = fq.pp; 782 + rx_ring->rx_fqes = fq.fqes; 783 + rx_ring->truesize = fq.truesize; 784 + rx_ring->rx_buf_len = fq.buf_len; 759 785 760 786 u64_stats_init(&rx_ring->syncp); 761 787 762 788 /* Round up to nearest 4K */ 763 789 rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc); 764 790 rx_ring->size = ALIGN(rx_ring->size, 4096); 765 - rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 791 + rx_ring->desc = dma_alloc_coherent(fq.pp->p.dev, rx_ring->size, 766 792 &rx_ring->dma, GFP_KERNEL); 767 793 768 794 if (!rx_ring->desc) { 769 - dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 795 + dev_info(fq.pp->p.dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 770 796 rx_ring->size); 771 797 goto err; 772 798 } ··· 781 795 rx_ring->next_to_use = 0; 782 796 783 797 return 0; 798 + 784 799 err: 785 - kfree(rx_ring->rx_bi); 786 - rx_ring->rx_bi = NULL; 800 + libeth_rx_fq_destroy(&fq); 801 + rx_ring->rx_fqes = NULL; 802 + rx_ring->pp = NULL; 803 + 787 804 return -ENOMEM; 788 805 } 789 806 ··· 806 817 */ 807 818 wmb(); 808 819 writel(val, rx_ring->tail); 809 - } 810 - 811 - /** 812 - * iavf_alloc_mapped_page - recycle or make a new page 813 - * @rx_ring: ring to use 814 - * @bi: rx_buffer struct to modify 815 - * 816 - * Returns true if the page was successfully allocated or 817 - * reused. 818 - **/ 819 - static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring, 820 - struct iavf_rx_buffer *bi) 821 - { 822 - struct page *page = bi->page; 823 - dma_addr_t dma; 824 - 825 - /* alloc new page for storage */ 826 - page = dev_alloc_pages(iavf_rx_pg_order(rx_ring)); 827 - if (unlikely(!page)) { 828 - rx_ring->rx_stats.alloc_page_failed++; 829 - return false; 830 - } 831 - 832 - /* map page for use */ 833 - dma = dma_map_page_attrs(rx_ring->dev, page, 0, 834 - iavf_rx_pg_size(rx_ring), 835 - DMA_FROM_DEVICE, 836 - IAVF_RX_DMA_ATTR); 837 - 838 - /* if mapping failed free memory back to system since 839 - * there isn't much point in holding memory we can't use 840 - */ 841 - if (dma_mapping_error(rx_ring->dev, dma)) { 842 - __free_pages(page, iavf_rx_pg_order(rx_ring)); 843 - rx_ring->rx_stats.alloc_page_failed++; 844 - return false; 845 - } 846 - 847 - bi->dma = dma; 848 - bi->page = page; 849 - bi->page_offset = IAVF_SKB_PAD; 850 - 851 - return true; 852 820 } 853 821 854 822 /** ··· 838 892 **/ 839 893 bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count) 840 894 { 895 + const struct libeth_fq_fp fq = { 896 + .pp = rx_ring->pp, 897 + .fqes = rx_ring->rx_fqes, 898 + .truesize = rx_ring->truesize, 899 + .count = rx_ring->count, 900 + }; 841 901 u16 ntu = rx_ring->next_to_use; 842 902 union iavf_rx_desc *rx_desc; 843 - struct iavf_rx_buffer *bi; 844 903 845 904 /* do nothing if no valid netdev defined */ 846 905 if (!rx_ring->netdev || !cleaned_count) 847 906 return false; 848 907 849 908 rx_desc = IAVF_RX_DESC(rx_ring, ntu); 850 - bi = &rx_ring->rx_bi[ntu]; 851 909 852 910 do { 853 - if (!iavf_alloc_mapped_page(rx_ring, bi)) 854 - goto no_buffers; 911 + dma_addr_t addr; 855 912 856 - /* sync the buffer for use by the device */ 857 - dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 858 - bi->page_offset, 859 - IAVF_RXBUFFER_3072, 860 - DMA_FROM_DEVICE); 913 + addr = libeth_rx_alloc(&fq, ntu); 914 + if (addr == DMA_MAPPING_ERROR) 915 + goto no_buffers; 861 916 862 917 /* Refresh the desc even if buffer_addrs didn't change 863 918 * because each write-back erases this info. 864 919 */ 865 - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 920 + rx_desc->read.pkt_addr = cpu_to_le64(addr); 866 921 867 922 rx_desc++; 868 - bi++; 869 923 ntu++; 870 924 if (unlikely(ntu == rx_ring->count)) { 871 925 rx_desc = IAVF_RX_DESC(rx_ring, 0); 872 - bi = rx_ring->rx_bi; 873 926 ntu = 0; 874 927 } 875 928 ··· 886 941 no_buffers: 887 942 if (rx_ring->next_to_use != ntu) 888 943 iavf_release_rx_desc(rx_ring, ntu); 944 + 945 + rx_ring->rx_stats.alloc_page_failed++; 889 946 890 947 /* make sure to come back via polling to try again after 891 948 * allocation failure ··· 1037 1090 1038 1091 /** 1039 1092 * iavf_add_rx_frag - Add contents of Rx buffer to sk_buff 1040 - * @rx_ring: rx descriptor ring to transact packets on 1041 - * @rx_buffer: buffer containing page to add 1042 1093 * @skb: sk_buff to place the data into 1094 + * @rx_buffer: buffer containing page to add 1043 1095 * @size: packet length from rx_desc 1044 1096 * 1045 1097 * This function will add the data contained in rx_buffer->page to the skb. ··· 1046 1100 * 1047 1101 * The function will then update the page offset. 1048 1102 **/ 1049 - static void iavf_add_rx_frag(struct iavf_ring *rx_ring, 1050 - struct iavf_rx_buffer *rx_buffer, 1051 - struct sk_buff *skb, 1103 + static void iavf_add_rx_frag(struct sk_buff *skb, 1104 + const struct libeth_fqe *rx_buffer, 1052 1105 unsigned int size) 1053 1106 { 1054 - unsigned int truesize = SKB_DATA_ALIGN(size + IAVF_SKB_PAD); 1055 - 1056 - if (!size) 1057 - return; 1107 + u32 hr = rx_buffer->page->pp->p.offset; 1058 1108 1059 1109 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 1060 - rx_buffer->page_offset, size, truesize); 1061 - } 1062 - 1063 - /** 1064 - * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use 1065 - * @rx_ring: rx descriptor ring to transact packets on 1066 - * @size: size of buffer to add to skb 1067 - * 1068 - * This function will pull an Rx buffer from the ring and synchronize it 1069 - * for use by the CPU. 1070 - */ 1071 - static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring, 1072 - const unsigned int size) 1073 - { 1074 - struct iavf_rx_buffer *rx_buffer; 1075 - 1076 - rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; 1077 - prefetchw(rx_buffer->page); 1078 - if (!size) 1079 - return rx_buffer; 1080 - 1081 - /* we are reusing so sync this buffer for CPU use */ 1082 - dma_sync_single_range_for_cpu(rx_ring->dev, 1083 - rx_buffer->dma, 1084 - rx_buffer->page_offset, 1085 - size, 1086 - DMA_FROM_DEVICE); 1087 - 1088 - return rx_buffer; 1110 + rx_buffer->offset + hr, size, rx_buffer->truesize); 1089 1111 } 1090 1112 1091 1113 /** 1092 1114 * iavf_build_skb - Build skb around an existing buffer 1093 - * @rx_ring: Rx descriptor ring to transact packets on 1094 1115 * @rx_buffer: Rx buffer to pull data from 1095 1116 * @size: size of buffer to add to skb 1096 1117 * 1097 1118 * This function builds an skb around an existing Rx buffer, taking care 1098 1119 * to set up the skb correctly and avoid any memcpy overhead. 1099 1120 */ 1100 - static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, 1101 - struct iavf_rx_buffer *rx_buffer, 1121 + static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer, 1102 1122 unsigned int size) 1103 1123 { 1104 - void *va; 1105 - unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 1106 - SKB_DATA_ALIGN(IAVF_SKB_PAD + size); 1124 + u32 hr = rx_buffer->page->pp->p.offset; 1107 1125 struct sk_buff *skb; 1126 + void *va; 1108 1127 1109 - if (!rx_buffer || !size) 1110 - return NULL; 1111 1128 /* prefetch first cache line of first page */ 1112 - va = page_address(rx_buffer->page) + rx_buffer->page_offset; 1113 - net_prefetch(va); 1129 + va = page_address(rx_buffer->page) + rx_buffer->offset; 1130 + net_prefetch(va + hr); 1114 1131 1115 1132 /* build an skb around the page buffer */ 1116 - skb = napi_build_skb(va - IAVF_SKB_PAD, truesize); 1133 + skb = napi_build_skb(va, rx_buffer->truesize); 1117 1134 if (unlikely(!skb)) 1118 1135 return NULL; 1119 1136 1137 + skb_mark_for_recycle(skb); 1138 + 1120 1139 /* update pointers within the skb to store the data */ 1121 - skb_reserve(skb, IAVF_SKB_PAD); 1140 + skb_reserve(skb, hr); 1122 1141 __skb_put(skb, size); 1123 1142 1124 1143 return skb; 1125 - } 1126 - 1127 - /** 1128 - * iavf_put_rx_buffer - Unmap used buffer 1129 - * @rx_ring: rx descriptor ring to transact packets on 1130 - * @rx_buffer: rx buffer to pull data from 1131 - * 1132 - * This function will unmap the buffer after it's written by HW. 1133 - */ 1134 - static void iavf_put_rx_buffer(struct iavf_ring *rx_ring, 1135 - struct iavf_rx_buffer *rx_buffer) 1136 - { 1137 - if (!rx_buffer) 1138 - return; 1139 - 1140 - /* we are not reusing the buffer so unmap it */ 1141 - dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE, 1142 - DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR); 1143 - 1144 - /* clear contents of buffer_info */ 1145 - rx_buffer->page = NULL; 1146 1144 } 1147 1145 1148 1146 /** ··· 1142 1252 bool failure = false; 1143 1253 1144 1254 while (likely(total_rx_packets < (unsigned int)budget)) { 1145 - struct iavf_rx_buffer *rx_buffer; 1255 + struct libeth_fqe *rx_buffer; 1146 1256 union iavf_rx_desc *rx_desc; 1147 1257 unsigned int size; 1148 1258 u16 vlan_tag = 0; ··· 1177 1287 size = FIELD_GET(IAVF_RXD_QW1_LENGTH_PBUF_MASK, qword); 1178 1288 1179 1289 iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb); 1180 - rx_buffer = iavf_get_rx_buffer(rx_ring, size); 1290 + 1291 + rx_buffer = &rx_ring->rx_fqes[rx_ring->next_to_clean]; 1292 + if (!libeth_rx_sync_for_cpu(rx_buffer, size)) 1293 + goto skip_data; 1181 1294 1182 1295 /* retrieve a buffer from the ring */ 1183 1296 if (skb) 1184 - iavf_add_rx_frag(rx_ring, rx_buffer, skb, size); 1297 + iavf_add_rx_frag(skb, rx_buffer, size); 1185 1298 else 1186 - skb = iavf_build_skb(rx_ring, rx_buffer, size); 1299 + skb = iavf_build_skb(rx_buffer, size); 1187 1300 1188 1301 /* exit if we failed to retrieve a buffer */ 1189 1302 if (!skb) { ··· 1194 1301 break; 1195 1302 } 1196 1303 1197 - iavf_put_rx_buffer(rx_ring, rx_buffer); 1304 + skip_data: 1198 1305 cleaned_count++; 1199 1306 1200 - if (iavf_is_non_eop(rx_ring, rx_desc, skb)) 1307 + if (iavf_is_non_eop(rx_ring, rx_desc, skb) || unlikely(!skb)) 1201 1308 continue; 1202 1309 1203 1310 /* ERR_MASK will only have valid bits if EOP set, and
+9 -25
drivers/net/ethernet/intel/iavf/iavf_txrx.h
··· 80 80 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ 81 81 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) 82 82 83 - /* Supported Rx Buffer Sizes (a multiple of 128) */ 84 - #define IAVF_RXBUFFER_3072 3072 /* Used for large frames w/ padding */ 85 - #define IAVF_MAX_RXBUFFER 9728 /* largest size for single descriptor */ 86 - 87 - #define IAVF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) 88 83 #define iavf_rx_desc iavf_32byte_rx_desc 89 - 90 - #define IAVF_RX_DMA_ATTR \ 91 - (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) 92 - 93 - #define IAVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) 94 84 95 85 /** 96 86 * iavf_test_staterr - tests bits in Rx descriptor status and error fields ··· 200 210 u32 tx_flags; 201 211 }; 202 212 203 - struct iavf_rx_buffer { 204 - dma_addr_t dma; 205 - struct page *page; 206 - __u32 page_offset; 207 - }; 208 - 209 213 struct iavf_queue_stats { 210 214 u64 packets; 211 215 u64 bytes; ··· 235 251 struct iavf_ring { 236 252 struct iavf_ring *next; /* pointer to next ring in q_vector */ 237 253 void *desc; /* Descriptor ring memory */ 238 - struct device *dev; /* Used for DMA mapping */ 254 + union { 255 + struct page_pool *pp; /* Used on Rx for buffer management */ 256 + struct device *dev; /* Used on Tx for DMA mapping */ 257 + }; 239 258 struct net_device *netdev; /* netdev ring maps to */ 240 259 union { 260 + struct libeth_fqe *rx_fqes; 241 261 struct iavf_tx_buffer *tx_bi; 242 - struct iavf_rx_buffer *rx_bi; 243 262 }; 244 263 u8 __iomem *tail; 264 + u32 truesize; 265 + 245 266 u16 queue_index; /* Queue number of ring */ 246 267 247 268 /* high bit set means dynamic, use accessors routines to read/write. ··· 294 305 * iavf_clean_rx_ring_irq() is called 295 306 * for this ring. 296 307 */ 308 + 309 + u32 rx_buf_len; 297 310 } ____cacheline_internodealigned_in_smp; 298 311 299 312 #define IAVF_ITR_ADAPTIVE_MIN_INC 0x0002 ··· 318 327 /* iterator for handling rings in ring container */ 319 328 #define iavf_for_each_ring(pos, head) \ 320 329 for (pos = (head).ring; pos != NULL; pos = pos->next) 321 - 322 - static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring) 323 - { 324 - return 0; 325 - } 326 - 327 - #define iavf_rx_pg_size(_ring) (PAGE_SIZE << iavf_rx_pg_order(_ring)) 328 330 329 331 bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count); 330 332 netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+6 -4
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 3 4 + #include <linux/net/intel/libie/rx.h> 5 + 4 6 #include "iavf.h" 5 7 #include "iavf_prototype.h" 6 8 ··· 270 268 void iavf_configure_queues(struct iavf_adapter *adapter) 271 269 { 272 270 struct virtchnl_vsi_queue_config_info *vqci; 273 - int i, max_frame = adapter->vf_res->max_mtu; 274 271 int pairs = adapter->num_active_queues; 275 272 struct virtchnl_queue_pair_info *vqpi; 273 + u32 i, max_frame; 276 274 size_t len; 277 275 278 - if (max_frame > IAVF_MAX_RXBUFFER || !max_frame) 279 - max_frame = IAVF_MAX_RXBUFFER; 276 + max_frame = LIBIE_MAX_RX_FRM_LEN(adapter->rx_rings->pp->p.offset); 277 + max_frame = min_not_zero(adapter->vf_res->max_mtu, max_frame); 280 278 281 279 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { 282 280 /* bail because we already have a command pending */ ··· 306 304 vqpi->rxq.ring_len = adapter->rx_rings[i].count; 307 305 vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma; 308 306 vqpi->rxq.max_pkt_size = max_frame; 309 - vqpi->rxq.databuffer_size = IAVF_RXBUFFER_3072; 307 + vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len; 310 308 if (CRC_OFFLOAD_ALLOWED(adapter)) 311 309 vqpi->rxq.crc_disable = !!(adapter->netdev->features & 312 310 NETIF_F_RXFCS);
+17
include/linux/net/intel/libie/rx.h
··· 6 6 7 7 #include <net/libeth/rx.h> 8 8 9 + /* Rx buffer management */ 10 + 11 + /* The largest size for a single descriptor as per HW */ 12 + #define LIBIE_MAX_RX_BUF_LEN 9728U 13 + /* "True" HW-writeable space: minimum from SW and HW values */ 14 + #define LIBIE_RX_BUF_LEN(hr) min_t(u32, LIBETH_RX_PAGE_LEN(hr), \ 15 + LIBIE_MAX_RX_BUF_LEN) 16 + 17 + /* The maximum frame size as per HW (S/G) */ 18 + #define __LIBIE_MAX_RX_FRM_LEN 16382U 19 + /* ATST, HW can chain up to 5 Rx descriptors */ 20 + #define LIBIE_MAX_RX_FRM_LEN(hr) \ 21 + min_t(u32, __LIBIE_MAX_RX_FRM_LEN, LIBIE_RX_BUF_LEN(hr) * 5) 22 + /* Maximum frame size minus LL overhead */ 23 + #define LIBIE_MAX_MTU \ 24 + (LIBIE_MAX_RX_FRM_LEN(LIBETH_MAX_HEADROOM) - LIBETH_RX_LL_LEN) 25 + 9 26 /* O(1) converting i40e/ice/iavf's 8/10-bit hardware packet type to a parsed 10 27 * bitfield struct. 11 28 */