Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

libeth: support native XDP and register memory model

Expand libeth's Page Pool functionality by adding native XDP support.
This means picking the appropriate headroom and DMA direction.
Also, register all the created &page_pools as XDP memory models.
A driver then can call xdp_rxq_info_attach_page_pool() when registering
its RxQ info.

Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>

authored by

Alexander Lobakin and committed by
Tony Nguyen
35c64b65 6ad5ff6e

+20 -6
+15 -5
drivers/net/ethernet/intel/libeth/rx.c
··· 72 72 static bool libeth_rx_page_pool_params(struct libeth_fq *fq, 73 73 struct page_pool_params *pp) 74 74 { 75 - pp->offset = LIBETH_SKB_HEADROOM; 75 + pp->offset = fq->xdp ? LIBETH_XDP_HEADROOM : LIBETH_SKB_HEADROOM; 76 76 /* HW-writeable / syncable length per one page */ 77 77 pp->max_len = LIBETH_RX_PAGE_LEN(pp->offset); 78 78 ··· 159 159 .dev = napi->dev->dev.parent, 160 160 .netdev = napi->dev, 161 161 .napi = napi, 162 - .dma_dir = DMA_FROM_DEVICE, 163 162 }; 164 163 struct libeth_fqe *fqes; 165 164 struct page_pool *pool; 166 - bool ret; 165 + int ret; 166 + 167 + pp.dma_dir = fq->xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; 167 168 168 169 if (!fq->hsplit) 169 170 ret = libeth_rx_page_pool_params(fq, &pp); ··· 178 177 return PTR_ERR(pool); 179 178 180 179 fqes = kvcalloc_node(fq->count, sizeof(*fqes), GFP_KERNEL, fq->nid); 181 - if (!fqes) 180 + if (!fqes) { 181 + ret = -ENOMEM; 182 182 goto err_buf; 183 + } 184 + 185 + ret = xdp_reg_page_pool(pool); 186 + if (ret) 187 + goto err_mem; 183 188 184 189 fq->fqes = fqes; 185 190 fq->pp = pool; 186 191 187 192 return 0; 188 193 194 + err_mem: 195 + kvfree(fqes); 189 196 err_buf: 190 197 page_pool_destroy(pool); 191 198 192 - return -ENOMEM; 199 + return ret; 193 200 } 194 201 EXPORT_SYMBOL_GPL(libeth_rx_fq_create); 195 202 ··· 207 198 */ 208 199 void libeth_rx_fq_destroy(struct libeth_fq *fq) 209 200 { 201 + xdp_unreg_page_pool(fq->pp); 210 202 kvfree(fq->fqes); 211 203 page_pool_destroy(fq->pp); 212 204 }
+5 -1
include/net/libeth/rx.h
··· 13 13 14 14 /* Space reserved in front of each frame */ 15 15 #define LIBETH_SKB_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) 16 + #define LIBETH_XDP_HEADROOM (ALIGN(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \ 17 + NET_IP_ALIGN) 16 18 /* Maximum headroom for worst-case calculations */ 17 - #define LIBETH_MAX_HEADROOM LIBETH_SKB_HEADROOM 19 + #define LIBETH_MAX_HEADROOM LIBETH_XDP_HEADROOM 18 20 /* Link layer / L2 overhead: Ethernet, 2 VLAN tags (C + S), FCS */ 19 21 #define LIBETH_RX_LL_LEN (ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN) 20 22 /* Maximum supported L2-L4 header length */ ··· 68 66 * @count: number of descriptors/buffers the queue has 69 67 * @type: type of the buffers this queue has 70 68 * @hsplit: flag whether header split is enabled 69 + * @xdp: flag indicating whether XDP is enabled 71 70 * @buf_len: HW-writeable length per each buffer 72 71 * @nid: ID of the closest NUMA node with memory 73 72 */ ··· 84 81 /* Cold fields */ 85 82 enum libeth_fqe_type type:2; 86 83 bool hsplit:1; 84 + bool xdp:1; 87 85 88 86 u32 buf_len; 89 87 int nid;