Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2
3#include <net/xsk_buff_pool.h>
4#include <net/xdp_sock.h>
5
6#include "xsk_queue.h"
7
8static void xp_addr_unmap(struct xsk_buff_pool *pool)
9{
10 vunmap(pool->addrs);
11}
12
13static int xp_addr_map(struct xsk_buff_pool *pool,
14 struct page **pages, u32 nr_pages)
15{
16 pool->addrs = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
17 if (!pool->addrs)
18 return -ENOMEM;
19 return 0;
20}
21
22void xp_destroy(struct xsk_buff_pool *pool)
23{
24 if (!pool)
25 return;
26
27 xp_addr_unmap(pool);
28 kvfree(pool->heads);
29 kvfree(pool);
30}
31
32struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks,
33 u32 chunk_size, u32 headroom, u64 size,
34 bool unaligned)
35{
36 struct xsk_buff_pool *pool;
37 struct xdp_buff_xsk *xskb;
38 int err;
39 u32 i;
40
41 pool = kvzalloc(struct_size(pool, free_heads, chunks), GFP_KERNEL);
42 if (!pool)
43 goto out;
44
45 pool->heads = kvcalloc(chunks, sizeof(*pool->heads), GFP_KERNEL);
46 if (!pool->heads)
47 goto out;
48
49 pool->chunk_mask = ~((u64)chunk_size - 1);
50 pool->addrs_cnt = size;
51 pool->heads_cnt = chunks;
52 pool->free_heads_cnt = chunks;
53 pool->headroom = headroom;
54 pool->chunk_size = chunk_size;
55 pool->unaligned = unaligned;
56 pool->frame_len = chunk_size - headroom - XDP_PACKET_HEADROOM;
57 INIT_LIST_HEAD(&pool->free_list);
58
59 for (i = 0; i < pool->free_heads_cnt; i++) {
60 xskb = &pool->heads[i];
61 xskb->pool = pool;
62 xskb->xdp.frame_sz = chunk_size - headroom;
63 pool->free_heads[i] = xskb;
64 }
65
66 err = xp_addr_map(pool, pages, nr_pages);
67 if (!err)
68 return pool;
69
70out:
71 xp_destroy(pool);
72 return NULL;
73}
74
75void xp_set_fq(struct xsk_buff_pool *pool, struct xsk_queue *fq)
76{
77 pool->fq = fq;
78}
79
80void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq)
81{
82 u32 i;
83
84 for (i = 0; i < pool->heads_cnt; i++)
85 pool->heads[i].xdp.rxq = rxq;
86}
87EXPORT_SYMBOL(xp_set_rxq_info);
88
89void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
90{
91 dma_addr_t *dma;
92 u32 i;
93
94 if (pool->dma_pages_cnt == 0)
95 return;
96
97 for (i = 0; i < pool->dma_pages_cnt; i++) {
98 dma = &pool->dma_pages[i];
99 if (*dma) {
100 dma_unmap_page_attrs(pool->dev, *dma, PAGE_SIZE,
101 DMA_BIDIRECTIONAL, attrs);
102 *dma = 0;
103 }
104 }
105
106 kvfree(pool->dma_pages);
107 pool->dma_pages_cnt = 0;
108 pool->dev = NULL;
109}
110EXPORT_SYMBOL(xp_dma_unmap);
111
112static void xp_check_dma_contiguity(struct xsk_buff_pool *pool)
113{
114 u32 i;
115
116 for (i = 0; i < pool->dma_pages_cnt - 1; i++) {
117 if (pool->dma_pages[i] + PAGE_SIZE == pool->dma_pages[i + 1])
118 pool->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK;
119 else
120 pool->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK;
121 }
122}
123
124int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
125 unsigned long attrs, struct page **pages, u32 nr_pages)
126{
127 dma_addr_t dma;
128 u32 i;
129
130 pool->dma_pages = kvcalloc(nr_pages, sizeof(*pool->dma_pages),
131 GFP_KERNEL);
132 if (!pool->dma_pages)
133 return -ENOMEM;
134
135 pool->dev = dev;
136 pool->dma_pages_cnt = nr_pages;
137 pool->dma_need_sync = false;
138
139 for (i = 0; i < pool->dma_pages_cnt; i++) {
140 dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
141 DMA_BIDIRECTIONAL, attrs);
142 if (dma_mapping_error(dev, dma)) {
143 xp_dma_unmap(pool, attrs);
144 return -ENOMEM;
145 }
146 if (dma_need_sync(dev, dma))
147 pool->dma_need_sync = true;
148 pool->dma_pages[i] = dma;
149 }
150
151 if (pool->unaligned)
152 xp_check_dma_contiguity(pool);
153 return 0;
154}
155EXPORT_SYMBOL(xp_dma_map);
156
157static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool,
158 u64 addr)
159{
160 return xp_desc_crosses_non_contig_pg(pool, addr, pool->chunk_size);
161}
162
163static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr)
164{
165 *addr = xp_unaligned_extract_addr(*addr);
166 if (*addr >= pool->addrs_cnt ||
167 *addr + pool->chunk_size > pool->addrs_cnt ||
168 xp_addr_crosses_non_contig_pg(pool, *addr))
169 return false;
170 return true;
171}
172
173static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr)
174{
175 *addr = xp_aligned_extract_addr(pool, *addr);
176 return *addr < pool->addrs_cnt;
177}
178
179static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
180{
181 struct xdp_buff_xsk *xskb;
182 u64 addr;
183 bool ok;
184
185 if (pool->free_heads_cnt == 0)
186 return NULL;
187
188 xskb = pool->free_heads[--pool->free_heads_cnt];
189
190 for (;;) {
191 if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) {
192 pool->fq->queue_empty_descs++;
193 xp_release(xskb);
194 return NULL;
195 }
196
197 ok = pool->unaligned ? xp_check_unaligned(pool, &addr) :
198 xp_check_aligned(pool, &addr);
199 if (!ok) {
200 pool->fq->invalid_descs++;
201 xskq_cons_release(pool->fq);
202 continue;
203 }
204 break;
205 }
206 xskq_cons_release(pool->fq);
207
208 xskb->orig_addr = addr;
209 xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
210 if (pool->dma_pages_cnt) {
211 xskb->frame_dma = (pool->dma_pages[addr >> PAGE_SHIFT] &
212 ~XSK_NEXT_PG_CONTIG_MASK) +
213 (addr & ~PAGE_MASK);
214 xskb->dma = xskb->frame_dma + pool->headroom +
215 XDP_PACKET_HEADROOM;
216 }
217 return xskb;
218}
219
220struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
221{
222 struct xdp_buff_xsk *xskb;
223
224 if (!pool->free_list_cnt) {
225 xskb = __xp_alloc(pool);
226 if (!xskb)
227 return NULL;
228 } else {
229 pool->free_list_cnt--;
230 xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk,
231 free_list_node);
232 list_del(&xskb->free_list_node);
233 }
234
235 xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
236 xskb->xdp.data_meta = xskb->xdp.data;
237
238 if (pool->dma_need_sync) {
239 dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
240 pool->frame_len,
241 DMA_BIDIRECTIONAL);
242 }
243 return &xskb->xdp;
244}
245EXPORT_SYMBOL(xp_alloc);
246
247bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count)
248{
249 if (pool->free_list_cnt >= count)
250 return true;
251 return xskq_cons_has_entries(pool->fq, count - pool->free_list_cnt);
252}
253EXPORT_SYMBOL(xp_can_alloc);
254
255void xp_free(struct xdp_buff_xsk *xskb)
256{
257 xskb->pool->free_list_cnt++;
258 list_add(&xskb->free_list_node, &xskb->pool->free_list);
259}
260EXPORT_SYMBOL(xp_free);
261
262void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
263{
264 addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr;
265 return pool->addrs + addr;
266}
267EXPORT_SYMBOL(xp_raw_get_data);
268
269dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr)
270{
271 addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr;
272 return (pool->dma_pages[addr >> PAGE_SHIFT] &
273 ~XSK_NEXT_PG_CONTIG_MASK) +
274 (addr & ~PAGE_MASK);
275}
276EXPORT_SYMBOL(xp_raw_get_dma);
277
278void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb)
279{
280 dma_sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0,
281 xskb->pool->frame_len, DMA_BIDIRECTIONAL);
282}
283EXPORT_SYMBOL(xp_dma_sync_for_cpu_slow);
284
285void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
286 size_t size)
287{
288 dma_sync_single_range_for_device(pool->dev, dma, 0,
289 size, DMA_BIDIRECTIONAL);
290}
291EXPORT_SYMBOL(xp_dma_sync_for_device_slow);