Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: add get_netmem/put_netmem support

Currently net_iovs support only pp ref counts, and do not support a
page ref equivalent.

This is fine for the RX path as net_iovs are used exclusively with the
pp and only pp refcounting is needed there. The TX path however does not
use pp ref counts, thus, support for get_page/put_page equivalent is
needed for netmem.

Support get_netmem/put_netmem. Check the type of the netmem before
passing it to page or net_iov specific code to obtain a page ref
equivalent.

For dmabuf net_iovs, we obtain a ref on the underlying binding. This
ensures the entire binding doesn't disappear until all the net_iovs have
been put_netmem'ed. We do not need to track the refcount of individual
dmabuf net_iovs as we don't allocate/free them from a pool similar to
what the buddy allocator does for pages.

This code is written to be extensible by other net_iov implementers.
get_netmem/put_netmem will check the type of the netmem and route it to
the correct helper:

pages -> [get|put]_page()
dmabuf net_iovs -> net_devmem_[get|put]_net_iov()
new net_iovs -> new helpers

Signed-off-by: Mina Almasry <almasrymina@google.com>
Acked-by: Stanislav Fomichev <sdf@fomichev.me>
Link: https://patch.msgid.link/20250508004830.4100853-3-almasrymina@google.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

authored by

Mina Almasry and committed by
Paolo Abeni
e9f3d61d 03e96b8c

+65 -2
+2 -2
include/linux/skbuff_ref.h
··· 17 17 */ 18 18 static inline void __skb_frag_ref(skb_frag_t *frag) 19 19 { 20 - get_page(skb_frag_page(frag)); 20 + get_netmem(skb_frag_netmem(frag)); 21 21 } 22 22 23 23 /** ··· 40 40 if (recycle && napi_pp_put_page(netmem)) 41 41 return; 42 42 #endif 43 - put_page(netmem_to_page(netmem)); 43 + put_netmem(netmem); 44 44 } 45 45 46 46 /**
+3
include/net/netmem.h
··· 273 273 return __netmem_clear_lsb(netmem)->dma_addr; 274 274 } 275 275 276 + void get_netmem(netmem_ref netmem); 277 + void put_netmem(netmem_ref netmem); 278 + 276 279 #endif /* _NET_NETMEM_H */
+10
net/core/devmem.c
··· 295 295 return ERR_PTR(err); 296 296 } 297 297 298 + void net_devmem_get_net_iov(struct net_iov *niov) 299 + { 300 + net_devmem_dmabuf_binding_get(net_devmem_iov_binding(niov)); 301 + } 302 + 303 + void net_devmem_put_net_iov(struct net_iov *niov) 304 + { 305 + net_devmem_dmabuf_binding_put(net_devmem_iov_binding(niov)); 306 + } 307 + 298 308 /*** "Dmabuf devmem memory provider" ***/ 299 309 300 310 int mp_dmabuf_devmem_init(struct page_pool *pool)
+20
net/core/devmem.h
··· 29 29 * The binding undos itself and unmaps the underlying dmabuf once all 30 30 * those refs are dropped and the binding is no longer desired or in 31 31 * use. 32 + * 33 + * net_devmem_get_net_iov() on dmabuf net_iovs will increment this 34 + * reference, making sure that the binding remains alive until all the 35 + * net_iovs are no longer used. 32 36 */ 33 37 refcount_t ref; 34 38 ··· 115 111 __net_devmem_dmabuf_binding_free(binding); 116 112 } 117 113 114 + void net_devmem_get_net_iov(struct net_iov *niov); 115 + void net_devmem_put_net_iov(struct net_iov *niov); 116 + 118 117 struct net_iov * 119 118 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding); 120 119 void net_devmem_free_dmabuf(struct net_iov *ppiov); ··· 126 119 127 120 #else 128 121 struct net_devmem_dmabuf_binding; 122 + 123 + static inline void 124 + net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding) 125 + { 126 + } 127 + 128 + static inline void net_devmem_get_net_iov(struct net_iov *niov) 129 + { 130 + } 131 + 132 + static inline void net_devmem_put_net_iov(struct net_iov *niov) 133 + { 134 + } 129 135 130 136 static inline void 131 137 __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
+30
net/core/skbuff.c
··· 89 89 #include <linux/textsearch.h> 90 90 91 91 #include "dev.h" 92 + #include "devmem.h" 92 93 #include "netmem_priv.h" 93 94 #include "sock_destructor.h" 94 95 ··· 7314 7313 return false; 7315 7314 } 7316 7315 EXPORT_SYMBOL(csum_and_copy_from_iter_full); 7316 + 7317 + void get_netmem(netmem_ref netmem) 7318 + { 7319 + struct net_iov *niov; 7320 + 7321 + if (netmem_is_net_iov(netmem)) { 7322 + niov = netmem_to_net_iov(netmem); 7323 + if (net_is_devmem_iov(niov)) 7324 + net_devmem_get_net_iov(netmem_to_net_iov(netmem)); 7325 + return; 7326 + } 7327 + get_page(netmem_to_page(netmem)); 7328 + } 7329 + EXPORT_SYMBOL(get_netmem); 7330 + 7331 + void put_netmem(netmem_ref netmem) 7332 + { 7333 + struct net_iov *niov; 7334 + 7335 + if (netmem_is_net_iov(netmem)) { 7336 + niov = netmem_to_net_iov(netmem); 7337 + if (net_is_devmem_iov(niov)) 7338 + net_devmem_put_net_iov(netmem_to_net_iov(netmem)); 7339 + return; 7340 + } 7341 + 7342 + put_page(netmem_to_page(netmem)); 7343 + } 7344 + EXPORT_SYMBOL(put_netmem);