Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Device memory TCP support
4 *
5 * Authors: Mina Almasry <almasrymina@google.com>
6 * Willem de Bruijn <willemb@google.com>
7 * Kaiyuan Zhang <kaiyuanz@google.com>
8 *
9 */
10#ifndef _NET_DEVMEM_H
11#define _NET_DEVMEM_H
12
13#include <net/netmem.h>
14
15struct netlink_ext_ack;
16
17struct net_devmem_dmabuf_binding {
18 struct dma_buf *dmabuf;
19 struct dma_buf_attachment *attachment;
20 struct sg_table *sgt;
21 struct net_device *dev;
22 struct gen_pool *chunk_pool;
23
24 /* The user holds a ref (via the netlink API) for as long as they want
25 * the binding to remain alive. Each page pool using this binding holds
26 * a ref to keep the binding alive. Each allocated net_iov holds a
27 * ref.
28 *
29 * The binding undos itself and unmaps the underlying dmabuf once all
30 * those refs are dropped and the binding is no longer desired or in
31 * use.
32 */
33 refcount_t ref;
34
35 /* The list of bindings currently active. Used for netlink to notify us
36 * of the user dropping the bind.
37 */
38 struct list_head list;
39
40 /* rxq's this binding is active on. */
41 struct xarray bound_rxqs;
42
43 /* ID of this binding. Globally unique to all bindings currently
44 * active.
45 */
46 u32 id;
47};
48
49#if defined(CONFIG_NET_DEVMEM)
50/* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
51 * entry from the dmabuf is inserted into the genpool as a chunk, and needs
52 * this owner struct to keep track of some metadata necessary to create
53 * allocations from this chunk.
54 */
55struct dmabuf_genpool_chunk_owner {
56 struct net_iov_area area;
57 struct net_devmem_dmabuf_binding *binding;
58
59 /* dma_addr of the start of the chunk. */
60 dma_addr_t base_dma_addr;
61};
62
63void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding);
64struct net_devmem_dmabuf_binding *
65net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
66 struct netlink_ext_ack *extack);
67void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
68int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
69 struct net_devmem_dmabuf_binding *binding,
70 struct netlink_ext_ack *extack);
71
72static inline struct dmabuf_genpool_chunk_owner *
73net_devmem_iov_to_chunk_owner(const struct net_iov *niov)
74{
75 struct net_iov_area *owner = net_iov_owner(niov);
76
77 return container_of(owner, struct dmabuf_genpool_chunk_owner, area);
78}
79
80static inline struct net_devmem_dmabuf_binding *
81net_devmem_iov_binding(const struct net_iov *niov)
82{
83 return net_devmem_iov_to_chunk_owner(niov)->binding;
84}
85
86static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
87{
88 return net_devmem_iov_binding(niov)->id;
89}
90
91static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
92{
93 struct net_iov_area *owner = net_iov_owner(niov);
94
95 return owner->base_virtual +
96 ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT);
97}
98
99static inline void
100net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
101{
102 refcount_inc(&binding->ref);
103}
104
105static inline void
106net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
107{
108 if (!refcount_dec_and_test(&binding->ref))
109 return;
110
111 __net_devmem_dmabuf_binding_free(binding);
112}
113
114struct net_iov *
115net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
116void net_devmem_free_dmabuf(struct net_iov *ppiov);
117
118bool net_is_devmem_iov(struct net_iov *niov);
119
120#else
121struct net_devmem_dmabuf_binding;
122
123static inline void
124__net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
125{
126}
127
128static inline struct net_devmem_dmabuf_binding *
129net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
130 struct netlink_ext_ack *extack)
131{
132 return ERR_PTR(-EOPNOTSUPP);
133}
134
135static inline void
136net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
137{
138}
139
140static inline int
141net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
142 struct net_devmem_dmabuf_binding *binding,
143 struct netlink_ext_ack *extack)
144
145{
146 return -EOPNOTSUPP;
147}
148
149static inline struct net_iov *
150net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
151{
152 return NULL;
153}
154
155static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
156{
157}
158
159static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
160{
161 return 0;
162}
163
164static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
165{
166 return 0;
167}
168
169static inline bool net_is_devmem_iov(struct net_iov *niov)
170{
171 return false;
172}
173#endif
174
175#endif /* _NET_DEVMEM_H */