Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Device memory TCP support
4 *
5 * Authors: Mina Almasry <almasrymina@google.com>
6 * Willem de Bruijn <willemb@google.com>
7 * Kaiyuan Zhang <kaiyuanz@google.com>
8 *
9 */
10#ifndef _NET_DEVMEM_H
11#define _NET_DEVMEM_H
12
13#include <net/netmem.h>
14
15struct netlink_ext_ack;
16
17struct net_devmem_dmabuf_binding {
18 struct dma_buf *dmabuf;
19 struct dma_buf_attachment *attachment;
20 struct sg_table *sgt;
21 struct net_device *dev;
22 struct gen_pool *chunk_pool;
23 /* Protect dev */
24 struct mutex lock;
25
26 /* The user holds a ref (via the netlink API) for as long as they want
27 * the binding to remain alive. Each page pool using this binding holds
28 * a ref to keep the binding alive. Each allocated net_iov holds a
29 * ref.
30 *
31 * The binding undos itself and unmaps the underlying dmabuf once all
32 * those refs are dropped and the binding is no longer desired or in
33 * use.
34 */
35 refcount_t ref;
36
37 /* The list of bindings currently active. Used for netlink to notify us
38 * of the user dropping the bind.
39 */
40 struct list_head list;
41
42 /* rxq's this binding is active on. */
43 struct xarray bound_rxqs;
44
45 /* ID of this binding. Globally unique to all bindings currently
46 * active.
47 */
48 u32 id;
49};
50
51#if defined(CONFIG_NET_DEVMEM)
52/* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
53 * entry from the dmabuf is inserted into the genpool as a chunk, and needs
54 * this owner struct to keep track of some metadata necessary to create
55 * allocations from this chunk.
56 */
57struct dmabuf_genpool_chunk_owner {
58 struct net_iov_area area;
59 struct net_devmem_dmabuf_binding *binding;
60
61 /* dma_addr of the start of the chunk. */
62 dma_addr_t base_dma_addr;
63};
64
65void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding);
66struct net_devmem_dmabuf_binding *
67net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
68 struct netlink_ext_ack *extack);
69void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
70int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
71 struct net_devmem_dmabuf_binding *binding,
72 struct netlink_ext_ack *extack);
73
74static inline struct dmabuf_genpool_chunk_owner *
75net_devmem_iov_to_chunk_owner(const struct net_iov *niov)
76{
77 struct net_iov_area *owner = net_iov_owner(niov);
78
79 return container_of(owner, struct dmabuf_genpool_chunk_owner, area);
80}
81
82static inline struct net_devmem_dmabuf_binding *
83net_devmem_iov_binding(const struct net_iov *niov)
84{
85 return net_devmem_iov_to_chunk_owner(niov)->binding;
86}
87
88static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
89{
90 return net_devmem_iov_binding(niov)->id;
91}
92
93static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
94{
95 struct net_iov_area *owner = net_iov_owner(niov);
96
97 return owner->base_virtual +
98 ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT);
99}
100
101static inline void
102net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
103{
104 refcount_inc(&binding->ref);
105}
106
107static inline void
108net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
109{
110 if (!refcount_dec_and_test(&binding->ref))
111 return;
112
113 __net_devmem_dmabuf_binding_free(binding);
114}
115
116struct net_iov *
117net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
118void net_devmem_free_dmabuf(struct net_iov *ppiov);
119
120bool net_is_devmem_iov(struct net_iov *niov);
121
122#else
123struct net_devmem_dmabuf_binding;
124
125static inline void
126__net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
127{
128}
129
130static inline struct net_devmem_dmabuf_binding *
131net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
132 struct netlink_ext_ack *extack)
133{
134 return ERR_PTR(-EOPNOTSUPP);
135}
136
137static inline void
138net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
139{
140}
141
142static inline int
143net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
144 struct net_devmem_dmabuf_binding *binding,
145 struct netlink_ext_ack *extack)
146
147{
148 return -EOPNOTSUPP;
149}
150
151static inline struct net_iov *
152net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
153{
154 return NULL;
155}
156
157static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
158{
159}
160
161static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
162{
163 return 0;
164}
165
166static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
167{
168 return 0;
169}
170
171static inline bool net_is_devmem_iov(struct net_iov *niov)
172{
173 return false;
174}
175#endif
176
177#endif /* _NET_DEVMEM_H */