at v6.16-rc7 6.7 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * Device memory TCP support 4 * 5 * Authors: Mina Almasry <almasrymina@google.com> 6 * Willem de Bruijn <willemb@google.com> 7 * Kaiyuan Zhang <kaiyuanz@google.com> 8 * 9 */ 10#ifndef _NET_DEVMEM_H 11#define _NET_DEVMEM_H 12 13#include <net/netmem.h> 14#include <net/netdev_netlink.h> 15 16struct netlink_ext_ack; 17 18struct net_devmem_dmabuf_binding { 19 struct dma_buf *dmabuf; 20 struct dma_buf_attachment *attachment; 21 struct sg_table *sgt; 22 struct net_device *dev; 23 struct gen_pool *chunk_pool; 24 /* Protect dev */ 25 struct mutex lock; 26 27 /* The user holds a ref (via the netlink API) for as long as they want 28 * the binding to remain alive. Each page pool using this binding holds 29 * a ref to keep the binding alive. The page_pool does not release the 30 * ref until all the net_iovs allocated from this binding are released 31 * back to the page_pool. 32 * 33 * The binding undos itself and unmaps the underlying dmabuf once all 34 * those refs are dropped and the binding is no longer desired or in 35 * use. 36 * 37 * net_devmem_get_net_iov() on dmabuf net_iovs will increment this 38 * reference, making sure that the binding remains alive until all the 39 * net_iovs are no longer used. net_iovs allocated from this binding 40 * that are stuck in the TX path for any reason (such as awaiting 41 * retransmits) hold a reference to the binding until the skb holding 42 * them is freed. 43 */ 44 refcount_t ref; 45 46 /* The list of bindings currently active. Used for netlink to notify us 47 * of the user dropping the bind. 48 */ 49 struct list_head list; 50 51 /* rxq's this binding is active on. */ 52 struct xarray bound_rxqs; 53 54 /* ID of this binding. Globally unique to all bindings currently 55 * active. 56 */ 57 u32 id; 58 59 /* Array of net_iov pointers for this binding, sorted by virtual 60 * address. This array is convenient to map the virtual addresses to 61 * net_iovs in the TX path. 62 */ 63 struct net_iov **tx_vec; 64 65 struct work_struct unbind_w; 66}; 67 68#if defined(CONFIG_NET_DEVMEM) 69/* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist 70 * entry from the dmabuf is inserted into the genpool as a chunk, and needs 71 * this owner struct to keep track of some metadata necessary to create 72 * allocations from this chunk. 73 */ 74struct dmabuf_genpool_chunk_owner { 75 struct net_iov_area area; 76 struct net_devmem_dmabuf_binding *binding; 77 78 /* dma_addr of the start of the chunk. */ 79 dma_addr_t base_dma_addr; 80}; 81 82void __net_devmem_dmabuf_binding_free(struct work_struct *wq); 83struct net_devmem_dmabuf_binding * 84net_devmem_bind_dmabuf(struct net_device *dev, 85 enum dma_data_direction direction, 86 unsigned int dmabuf_fd, struct netdev_nl_sock *priv, 87 struct netlink_ext_ack *extack); 88struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id); 89void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding); 90int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx, 91 struct net_devmem_dmabuf_binding *binding, 92 struct netlink_ext_ack *extack); 93void net_devmem_bind_tx_release(struct sock *sk); 94 95static inline struct dmabuf_genpool_chunk_owner * 96net_devmem_iov_to_chunk_owner(const struct net_iov *niov) 97{ 98 struct net_iov_area *owner = net_iov_owner(niov); 99 100 return container_of(owner, struct dmabuf_genpool_chunk_owner, area); 101} 102 103static inline struct net_devmem_dmabuf_binding * 104net_devmem_iov_binding(const struct net_iov *niov) 105{ 106 return net_devmem_iov_to_chunk_owner(niov)->binding; 107} 108 109static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov) 110{ 111 return net_devmem_iov_binding(niov)->id; 112} 113 114static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov) 115{ 116 struct net_iov_area *owner = net_iov_owner(niov); 117 118 return owner->base_virtual + 119 ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT); 120} 121 122static inline bool 123net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding) 124{ 125 return refcount_inc_not_zero(&binding->ref); 126} 127 128static inline void 129net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding) 130{ 131 if (!refcount_dec_and_test(&binding->ref)) 132 return; 133 134 INIT_WORK(&binding->unbind_w, __net_devmem_dmabuf_binding_free); 135 schedule_work(&binding->unbind_w); 136} 137 138void net_devmem_get_net_iov(struct net_iov *niov); 139void net_devmem_put_net_iov(struct net_iov *niov); 140 141struct net_iov * 142net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding); 143void net_devmem_free_dmabuf(struct net_iov *ppiov); 144 145bool net_is_devmem_iov(struct net_iov *niov); 146struct net_devmem_dmabuf_binding * 147net_devmem_get_binding(struct sock *sk, unsigned int dmabuf_id); 148struct net_iov * 149net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr, 150 size_t *off, size_t *size); 151 152#else 153struct net_devmem_dmabuf_binding; 154 155static inline void 156net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding) 157{ 158} 159 160static inline void net_devmem_get_net_iov(struct net_iov *niov) 161{ 162} 163 164static inline void net_devmem_put_net_iov(struct net_iov *niov) 165{ 166} 167 168static inline void __net_devmem_dmabuf_binding_free(struct work_struct *wq) 169{ 170} 171 172static inline struct net_devmem_dmabuf_binding * 173net_devmem_bind_dmabuf(struct net_device *dev, 174 enum dma_data_direction direction, 175 unsigned int dmabuf_fd, 176 struct netdev_nl_sock *priv, 177 struct netlink_ext_ack *extack) 178{ 179 return ERR_PTR(-EOPNOTSUPP); 180} 181 182static inline struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id) 183{ 184 return NULL; 185} 186 187static inline void 188net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding) 189{ 190} 191 192static inline int 193net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx, 194 struct net_devmem_dmabuf_binding *binding, 195 struct netlink_ext_ack *extack) 196 197{ 198 return -EOPNOTSUPP; 199} 200 201static inline struct net_iov * 202net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding) 203{ 204 return NULL; 205} 206 207static inline void net_devmem_free_dmabuf(struct net_iov *ppiov) 208{ 209} 210 211static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov) 212{ 213 return 0; 214} 215 216static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov) 217{ 218 return 0; 219} 220 221static inline bool net_is_devmem_iov(struct net_iov *niov) 222{ 223 return false; 224} 225 226static inline struct net_devmem_dmabuf_binding * 227net_devmem_get_binding(struct sock *sk, unsigned int dmabuf_id) 228{ 229 return ERR_PTR(-EOPNOTSUPP); 230} 231 232static inline struct net_iov * 233net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr, 234 size_t *off, size_t *size) 235{ 236 return NULL; 237} 238 239static inline struct net_devmem_dmabuf_binding * 240net_devmem_iov_binding(const struct net_iov *niov) 241{ 242 return NULL; 243} 244#endif 245 246#endif /* _NET_DEVMEM_H */