Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Device memory TCP support
4 *
5 * Authors: Mina Almasry <almasrymina@google.com>
6 * Willem de Bruijn <willemb@google.com>
7 * Kaiyuan Zhang <kaiyuanz@google.com>
8 *
9 */
10#ifndef _NET_DEVMEM_H
11#define _NET_DEVMEM_H
12
13#include <net/netmem.h>
14#include <net/netdev_netlink.h>
15
16struct netlink_ext_ack;
17
18struct net_devmem_dmabuf_binding {
19 struct dma_buf *dmabuf;
20 struct dma_buf_attachment *attachment;
21 struct sg_table *sgt;
22 struct net_device *dev;
23 struct gen_pool *chunk_pool;
24 /* Protect dev */
25 struct mutex lock;
26
27 /* The user holds a ref (via the netlink API) for as long as they want
28 * the binding to remain alive. Each page pool using this binding holds
29 * a ref to keep the binding alive. The page_pool does not release the
30 * ref until all the net_iovs allocated from this binding are released
31 * back to the page_pool.
32 *
33 * The binding undos itself and unmaps the underlying dmabuf once all
34 * those refs are dropped and the binding is no longer desired or in
35 * use.
36 *
37 * net_devmem_get_net_iov() on dmabuf net_iovs will increment this
38 * reference, making sure that the binding remains alive until all the
39 * net_iovs are no longer used. net_iovs allocated from this binding
40 * that are stuck in the TX path for any reason (such as awaiting
41 * retransmits) hold a reference to the binding until the skb holding
42 * them is freed.
43 */
44 refcount_t ref;
45
46 /* The list of bindings currently active. Used for netlink to notify us
47 * of the user dropping the bind.
48 */
49 struct list_head list;
50
51 /* rxq's this binding is active on. */
52 struct xarray bound_rxqs;
53
54 /* ID of this binding. Globally unique to all bindings currently
55 * active.
56 */
57 u32 id;
58
59 /* DMA direction, FROM_DEVICE for Rx binding, TO_DEVICE for Tx. */
60 enum dma_data_direction direction;
61
62 /* Array of net_iov pointers for this binding, sorted by virtual
63 * address. This array is convenient to map the virtual addresses to
64 * net_iovs in the TX path.
65 */
66 struct net_iov **tx_vec;
67
68 struct work_struct unbind_w;
69};
70
71#if defined(CONFIG_NET_DEVMEM)
72/* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
73 * entry from the dmabuf is inserted into the genpool as a chunk, and needs
74 * this owner struct to keep track of some metadata necessary to create
75 * allocations from this chunk.
76 */
77struct dmabuf_genpool_chunk_owner {
78 struct net_iov_area area;
79 struct net_devmem_dmabuf_binding *binding;
80
81 /* dma_addr of the start of the chunk. */
82 dma_addr_t base_dma_addr;
83};
84
85void __net_devmem_dmabuf_binding_free(struct work_struct *wq);
86struct net_devmem_dmabuf_binding *
87net_devmem_bind_dmabuf(struct net_device *dev,
88 enum dma_data_direction direction,
89 unsigned int dmabuf_fd, struct netdev_nl_sock *priv,
90 struct netlink_ext_ack *extack);
91struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id);
92void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
93int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
94 struct net_devmem_dmabuf_binding *binding,
95 struct netlink_ext_ack *extack);
96void net_devmem_bind_tx_release(struct sock *sk);
97
98static inline struct dmabuf_genpool_chunk_owner *
99net_devmem_iov_to_chunk_owner(const struct net_iov *niov)
100{
101 struct net_iov_area *owner = net_iov_owner(niov);
102
103 return container_of(owner, struct dmabuf_genpool_chunk_owner, area);
104}
105
106static inline struct net_devmem_dmabuf_binding *
107net_devmem_iov_binding(const struct net_iov *niov)
108{
109 return net_devmem_iov_to_chunk_owner(niov)->binding;
110}
111
112static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
113{
114 return net_devmem_iov_binding(niov)->id;
115}
116
117static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
118{
119 struct net_iov_area *owner = net_iov_owner(niov);
120
121 return owner->base_virtual +
122 ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT);
123}
124
125static inline bool
126net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
127{
128 return refcount_inc_not_zero(&binding->ref);
129}
130
131static inline void
132net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
133{
134 if (!refcount_dec_and_test(&binding->ref))
135 return;
136
137 INIT_WORK(&binding->unbind_w, __net_devmem_dmabuf_binding_free);
138 schedule_work(&binding->unbind_w);
139}
140
141void net_devmem_get_net_iov(struct net_iov *niov);
142void net_devmem_put_net_iov(struct net_iov *niov);
143
144struct net_iov *
145net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
146void net_devmem_free_dmabuf(struct net_iov *ppiov);
147
148bool net_is_devmem_iov(struct net_iov *niov);
149struct net_devmem_dmabuf_binding *
150net_devmem_get_binding(struct sock *sk, unsigned int dmabuf_id);
151struct net_iov *
152net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr,
153 size_t *off, size_t *size);
154
155#else
156struct net_devmem_dmabuf_binding;
157
158static inline void
159net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
160{
161}
162
163static inline void net_devmem_get_net_iov(struct net_iov *niov)
164{
165}
166
167static inline void net_devmem_put_net_iov(struct net_iov *niov)
168{
169}
170
171static inline struct net_devmem_dmabuf_binding *
172net_devmem_bind_dmabuf(struct net_device *dev,
173 enum dma_data_direction direction,
174 unsigned int dmabuf_fd,
175 struct netdev_nl_sock *priv,
176 struct netlink_ext_ack *extack)
177{
178 return ERR_PTR(-EOPNOTSUPP);
179}
180
181static inline struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id)
182{
183 return NULL;
184}
185
186static inline void
187net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
188{
189}
190
191static inline int
192net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
193 struct net_devmem_dmabuf_binding *binding,
194 struct netlink_ext_ack *extack)
195
196{
197 return -EOPNOTSUPP;
198}
199
200static inline struct net_iov *
201net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
202{
203 return NULL;
204}
205
206static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
207{
208}
209
210static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
211{
212 return 0;
213}
214
215static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
216{
217 return 0;
218}
219
220static inline bool net_is_devmem_iov(struct net_iov *niov)
221{
222 return false;
223}
224
225static inline struct net_devmem_dmabuf_binding *
226net_devmem_get_binding(struct sock *sk, unsigned int dmabuf_id)
227{
228 return ERR_PTR(-EOPNOTSUPP);
229}
230
231static inline struct net_iov *
232net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr,
233 size_t *off, size_t *size)
234{
235 return NULL;
236}
237
238static inline struct net_devmem_dmabuf_binding *
239net_devmem_iov_binding(const struct net_iov *niov)
240{
241 return NULL;
242}
243#endif
244
245#endif /* _NET_DEVMEM_H */