Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xdp: page_pool related fix to cpumap

When converting an xdp_frame into an SKB, and sending this into the network
stack, then the underlying XDP memory model need to release associated
resources, because the network stack don't have callbacks for XDP memory
models. The only memory model that needs this is page_pool, when a driver
use the DMA-mapping feature.

Introduce page_pool_release_page(), which basically does the same as
page_pool_unmap_page(). Add xdp_release_frame() as the XDP memory model
interface for calling it, if the memory model match MEM_TYPE_PAGE_POOL, to
save the function call overhead for others. Have cpumap call
xdp_release_frame() before xdp_scrub_frame().

Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Jesper Dangaard Brouer and committed by
David S. Miller
6bf071bf 516a7593

+47 -1
+14 -1
include/net/page_pool.h
··· 110 110 struct page_pool *page_pool_create(const struct page_pool_params *params); 111 111 112 112 void page_pool_destroy(struct page_pool *pool); 113 - void page_pool_unmap_page(struct page_pool *pool, struct page *page); 114 113 115 114 /* Never call this directly, use helpers below */ 116 115 void __page_pool_put_page(struct page_pool *pool, ··· 130 131 struct page *page) 131 132 { 132 133 __page_pool_put_page(pool, page, true); 134 + } 135 + 136 + /* Disconnects a page (from a page_pool). API users can have a need 137 + * to disconnect a page (from a page_pool), to allow it to be used as 138 + * a regular page (that will eventually be returned to the normal 139 + * page-allocator via put_page). 140 + */ 141 + void page_pool_unmap_page(struct page_pool *pool, struct page *page); 142 + static inline void page_pool_release_page(struct page_pool *pool, 143 + struct page *page) 144 + { 145 + #ifdef CONFIG_PAGE_POOL 146 + page_pool_unmap_page(pool, page); 147 + #endif 133 148 } 134 149 135 150 static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
+15
include/net/xdp.h
··· 129 129 void xdp_return_frame_rx_napi(struct xdp_frame *xdpf); 130 130 void xdp_return_buff(struct xdp_buff *xdp); 131 131 132 + /* When sending xdp_frame into the network stack, then there is no 133 + * return point callback, which is needed to release e.g. DMA-mapping 134 + * resources with page_pool. Thus, have explicit function to release 135 + * frame resources. 136 + */ 137 + void __xdp_release_frame(void *data, struct xdp_mem_info *mem); 138 + static inline void xdp_release_frame(struct xdp_frame *xdpf) 139 + { 140 + struct xdp_mem_info *mem = &xdpf->mem; 141 + 142 + /* Curr only page_pool needs this */ 143 + if (mem->type == MEM_TYPE_PAGE_POOL) 144 + __xdp_release_frame(xdpf->data, mem); 145 + } 146 + 132 147 int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, 133 148 struct net_device *dev, u32 queue_index); 134 149 void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
+3
kernel/bpf/cpumap.c
··· 208 208 * - RX ring dev queue index (skb_record_rx_queue) 209 209 */ 210 210 211 + /* Until page_pool get SKB return path, release DMA here */ 212 + xdp_release_frame(xdpf); 213 + 211 214 /* Allow SKB to reuse area used by xdp_frame */ 212 215 xdp_scrub_frame(xdpf); 213 216
+15
net/core/xdp.c
··· 381 381 } 382 382 EXPORT_SYMBOL_GPL(xdp_return_buff); 383 383 384 + /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */ 385 + void __xdp_release_frame(void *data, struct xdp_mem_info *mem) 386 + { 387 + struct xdp_mem_allocator *xa; 388 + struct page *page; 389 + 390 + rcu_read_lock(); 391 + xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); 392 + page = virt_to_head_page(data); 393 + if (xa) 394 + page_pool_release_page(xa->page_pool, page); 395 + rcu_read_unlock(); 396 + } 397 + EXPORT_SYMBOL_GPL(__xdp_release_frame); 398 + 384 399 int xdp_attachment_query(struct xdp_attachment_info *info, 385 400 struct netdev_bpf *bpf) 386 401 {