Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/netdevice.h>
4#include <net/netdev_lock.h>
5#include <net/xsk_buff_pool.h>
6#include <net/xdp_sock.h>
7#include <net/xdp_sock_drv.h>
8
9#include "xsk_queue.h"
10#include "xdp_umem.h"
11#include "xsk.h"
12
13void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
14{
15 if (!xs->tx)
16 return;
17
18 spin_lock(&pool->xsk_tx_list_lock);
19 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list);
20 spin_unlock(&pool->xsk_tx_list_lock);
21}
22
23void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
24{
25 if (!xs->tx)
26 return;
27
28 spin_lock(&pool->xsk_tx_list_lock);
29 list_del_rcu(&xs->tx_list);
30 spin_unlock(&pool->xsk_tx_list_lock);
31}
32
33void xp_destroy(struct xsk_buff_pool *pool)
34{
35 if (!pool)
36 return;
37
38 kvfree(pool->tx_descs);
39 kvfree(pool->heads);
40 kvfree(pool);
41}
42
43int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs)
44{
45 pool->tx_descs = kvcalloc(xs->tx->nentries, sizeof(*pool->tx_descs),
46 GFP_KERNEL);
47 if (!pool->tx_descs)
48 return -ENOMEM;
49
50 return 0;
51}
52
53struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
54 struct xdp_umem *umem)
55{
56 bool unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
57 struct xsk_buff_pool *pool;
58 struct xdp_buff_xsk *xskb;
59 u32 i, entries;
60
61 entries = unaligned ? umem->chunks : 0;
62 pool = kvzalloc(struct_size(pool, free_heads, entries), GFP_KERNEL);
63 if (!pool)
64 goto out;
65
66 pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL);
67 if (!pool->heads)
68 goto out;
69
70 if (xs->tx)
71 if (xp_alloc_tx_descs(pool, xs))
72 goto out;
73
74 pool->chunk_mask = ~((u64)umem->chunk_size - 1);
75 pool->addrs_cnt = umem->size;
76 pool->heads_cnt = umem->chunks;
77 pool->free_heads_cnt = umem->chunks;
78 pool->headroom = umem->headroom;
79 pool->chunk_size = umem->chunk_size;
80 pool->chunk_shift = ffs(umem->chunk_size) - 1;
81 pool->unaligned = unaligned;
82 pool->frame_len = umem->chunk_size - umem->headroom -
83 XDP_PACKET_HEADROOM;
84 pool->umem = umem;
85 pool->addrs = umem->addrs;
86 pool->tx_metadata_len = umem->tx_metadata_len;
87 pool->tx_sw_csum = umem->flags & XDP_UMEM_TX_SW_CSUM;
88 spin_lock_init(&pool->rx_lock);
89 INIT_LIST_HEAD(&pool->free_list);
90 INIT_LIST_HEAD(&pool->xskb_list);
91 INIT_LIST_HEAD(&pool->xsk_tx_list);
92 spin_lock_init(&pool->xsk_tx_list_lock);
93 spin_lock_init(&pool->cq_prod_lock);
94 spin_lock_init(&pool->cq_cached_prod_lock);
95 refcount_set(&pool->users, 1);
96
97 pool->fq = xs->fq_tmp;
98 pool->cq = xs->cq_tmp;
99
100 for (i = 0; i < pool->free_heads_cnt; i++) {
101 xskb = &pool->heads[i];
102 xskb->pool = pool;
103 xskb->xdp.frame_sz = umem->chunk_size - umem->headroom;
104 INIT_LIST_HEAD(&xskb->list_node);
105 if (pool->unaligned)
106 pool->free_heads[i] = xskb;
107 else
108 xp_init_xskb_addr(xskb, pool, (u64)i * pool->chunk_size);
109 }
110
111 return pool;
112
113out:
114 xp_destroy(pool);
115 return NULL;
116}
117
118void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq)
119{
120 u32 i;
121
122 for (i = 0; i < pool->heads_cnt; i++)
123 pool->heads[i].xdp.rxq = rxq;
124}
125EXPORT_SYMBOL(xp_set_rxq_info);
126
127void xp_fill_cb(struct xsk_buff_pool *pool, struct xsk_cb_desc *desc)
128{
129 u32 i;
130
131 for (i = 0; i < pool->heads_cnt; i++) {
132 struct xdp_buff_xsk *xskb = &pool->heads[i];
133
134 memcpy(xskb->cb + desc->off, desc->src, desc->bytes);
135 }
136}
137EXPORT_SYMBOL(xp_fill_cb);
138
139static void xp_disable_drv_zc(struct xsk_buff_pool *pool)
140{
141 struct netdev_bpf bpf;
142 int err;
143
144 ASSERT_RTNL();
145
146 if (pool->umem->zc) {
147 bpf.command = XDP_SETUP_XSK_POOL;
148 bpf.xsk.pool = NULL;
149 bpf.xsk.queue_id = pool->queue_id;
150
151 err = pool->netdev->netdev_ops->ndo_bpf(pool->netdev, &bpf);
152
153 if (err)
154 WARN(1, "Failed to disable zero-copy!\n");
155 }
156}
157
158int xp_assign_dev(struct xsk_buff_pool *pool,
159 struct net_device *netdev, u16 queue_id, u16 flags)
160{
161 bool force_zc, force_copy;
162 struct netdev_bpf bpf;
163 int err = 0;
164
165 ASSERT_RTNL();
166
167 force_zc = flags & XDP_ZEROCOPY;
168 force_copy = flags & XDP_COPY;
169
170 if (force_zc && force_copy)
171 return -EINVAL;
172
173 if (xsk_get_pool_from_qid(netdev, queue_id))
174 return -EBUSY;
175
176 pool->netdev = netdev;
177 pool->queue_id = queue_id;
178 err = xsk_reg_pool_at_qid(netdev, pool, queue_id);
179 if (err)
180 return err;
181
182 if (flags & XDP_USE_SG)
183 pool->umem->flags |= XDP_UMEM_SG_FLAG;
184
185 if (flags & XDP_USE_NEED_WAKEUP)
186 pool->uses_need_wakeup = true;
187 /* Tx needs to be explicitly woken up the first time. Also
188 * for supporting drivers that do not implement this
189 * feature. They will always have to call sendto() or poll().
190 */
191 pool->cached_need_wakeup = XDP_WAKEUP_TX;
192
193 dev_hold(netdev);
194
195 if (force_copy)
196 /* For copy-mode, we are done. */
197 return 0;
198
199 if ((netdev->xdp_features & NETDEV_XDP_ACT_XSK) != NETDEV_XDP_ACT_XSK) {
200 err = -EOPNOTSUPP;
201 goto err_unreg_pool;
202 }
203
204 if (netdev->xdp_zc_max_segs == 1 && (flags & XDP_USE_SG)) {
205 err = -EOPNOTSUPP;
206 goto err_unreg_pool;
207 }
208
209 if (dev_get_min_mp_channel_count(netdev)) {
210 err = -EBUSY;
211 goto err_unreg_pool;
212 }
213
214 bpf.command = XDP_SETUP_XSK_POOL;
215 bpf.xsk.pool = pool;
216 bpf.xsk.queue_id = queue_id;
217
218 netdev_ops_assert_locked(netdev);
219 err = netdev->netdev_ops->ndo_bpf(netdev, &bpf);
220 if (err)
221 goto err_unreg_pool;
222
223 if (!pool->dma_pages) {
224 WARN(1, "Driver did not DMA map zero-copy buffers");
225 err = -EINVAL;
226 goto err_unreg_xsk;
227 }
228 pool->umem->zc = true;
229 pool->xdp_zc_max_segs = netdev->xdp_zc_max_segs;
230 return 0;
231
232err_unreg_xsk:
233 xp_disable_drv_zc(pool);
234err_unreg_pool:
235 if (!force_zc)
236 err = 0; /* fallback to copy mode */
237 if (err) {
238 xsk_clear_pool_at_qid(netdev, queue_id);
239 dev_put(netdev);
240 }
241 return err;
242}
243
244int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
245 struct net_device *dev, u16 queue_id)
246{
247 u16 flags;
248 struct xdp_umem *umem = umem_xs->umem;
249
250 /* One fill and completion ring required for each queue id. */
251 if (!pool->fq || !pool->cq)
252 return -EINVAL;
253
254 flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY;
255 if (umem_xs->pool->uses_need_wakeup)
256 flags |= XDP_USE_NEED_WAKEUP;
257
258 return xp_assign_dev(pool, dev, queue_id, flags);
259}
260
261void xp_clear_dev(struct xsk_buff_pool *pool)
262{
263 struct net_device *netdev = pool->netdev;
264
265 if (!pool->netdev)
266 return;
267
268 netdev_lock_ops(netdev);
269 xp_disable_drv_zc(pool);
270 xsk_clear_pool_at_qid(pool->netdev, pool->queue_id);
271 pool->netdev = NULL;
272 netdev_unlock_ops(netdev);
273 dev_put(netdev);
274}
275
276static void xp_release_deferred(struct work_struct *work)
277{
278 struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool,
279 work);
280
281 rtnl_lock();
282 xp_clear_dev(pool);
283 rtnl_unlock();
284
285 if (pool->fq) {
286 xskq_destroy(pool->fq);
287 pool->fq = NULL;
288 }
289
290 if (pool->cq) {
291 xskq_destroy(pool->cq);
292 pool->cq = NULL;
293 }
294
295 xdp_put_umem(pool->umem, false);
296 xp_destroy(pool);
297}
298
299void xp_get_pool(struct xsk_buff_pool *pool)
300{
301 refcount_inc(&pool->users);
302}
303
304bool xp_put_pool(struct xsk_buff_pool *pool)
305{
306 if (!pool)
307 return false;
308
309 if (refcount_dec_and_test(&pool->users)) {
310 INIT_WORK(&pool->work, xp_release_deferred);
311 schedule_work(&pool->work);
312 return true;
313 }
314
315 return false;
316}
317
318static struct xsk_dma_map *xp_find_dma_map(struct xsk_buff_pool *pool)
319{
320 struct xsk_dma_map *dma_map;
321
322 list_for_each_entry(dma_map, &pool->umem->xsk_dma_list, list) {
323 if (dma_map->netdev == pool->netdev)
324 return dma_map;
325 }
326
327 return NULL;
328}
329
330static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_device *netdev,
331 u32 nr_pages, struct xdp_umem *umem)
332{
333 struct xsk_dma_map *dma_map;
334
335 dma_map = kzalloc(sizeof(*dma_map), GFP_KERNEL);
336 if (!dma_map)
337 return NULL;
338
339 dma_map->dma_pages = kvcalloc(nr_pages, sizeof(*dma_map->dma_pages), GFP_KERNEL);
340 if (!dma_map->dma_pages) {
341 kfree(dma_map);
342 return NULL;
343 }
344
345 dma_map->netdev = netdev;
346 dma_map->dev = dev;
347 dma_map->dma_pages_cnt = nr_pages;
348 refcount_set(&dma_map->users, 1);
349 list_add(&dma_map->list, &umem->xsk_dma_list);
350 return dma_map;
351}
352
353static void xp_destroy_dma_map(struct xsk_dma_map *dma_map)
354{
355 list_del(&dma_map->list);
356 kvfree(dma_map->dma_pages);
357 kfree(dma_map);
358}
359
360static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs)
361{
362 dma_addr_t *dma;
363 u32 i;
364
365 for (i = 0; i < dma_map->dma_pages_cnt; i++) {
366 dma = &dma_map->dma_pages[i];
367 if (*dma) {
368 *dma &= ~XSK_NEXT_PG_CONTIG_MASK;
369 dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE,
370 DMA_BIDIRECTIONAL, attrs);
371 *dma = 0;
372 }
373 }
374
375 xp_destroy_dma_map(dma_map);
376}
377
378void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
379{
380 struct xsk_dma_map *dma_map;
381
382 if (!pool->dma_pages)
383 return;
384
385 dma_map = xp_find_dma_map(pool);
386 if (!dma_map) {
387 WARN(1, "Could not find dma_map for device");
388 return;
389 }
390
391 if (refcount_dec_and_test(&dma_map->users))
392 __xp_dma_unmap(dma_map, attrs);
393
394 kvfree(pool->dma_pages);
395 pool->dma_pages = NULL;
396 pool->dma_pages_cnt = 0;
397 pool->dev = NULL;
398}
399EXPORT_SYMBOL(xp_dma_unmap);
400
401static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map)
402{
403 u32 i;
404
405 for (i = 0; i < dma_map->dma_pages_cnt - 1; i++) {
406 if (dma_map->dma_pages[i] + PAGE_SIZE == dma_map->dma_pages[i + 1])
407 dma_map->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK;
408 else
409 dma_map->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK;
410 }
411}
412
413static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map)
414{
415 if (!pool->unaligned) {
416 u32 i;
417
418 for (i = 0; i < pool->heads_cnt; i++) {
419 struct xdp_buff_xsk *xskb = &pool->heads[i];
420 u64 orig_addr;
421
422 orig_addr = xskb->xdp.data_hard_start - pool->addrs - pool->headroom;
423 xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, orig_addr);
424 }
425 }
426
427 pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL);
428 if (!pool->dma_pages)
429 return -ENOMEM;
430
431 pool->dev = dma_map->dev;
432 pool->dma_pages_cnt = dma_map->dma_pages_cnt;
433 memcpy(pool->dma_pages, dma_map->dma_pages,
434 pool->dma_pages_cnt * sizeof(*pool->dma_pages));
435
436 return 0;
437}
438
439int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
440 unsigned long attrs, struct page **pages, u32 nr_pages)
441{
442 struct xsk_dma_map *dma_map;
443 dma_addr_t dma;
444 int err;
445 u32 i;
446
447 dma_map = xp_find_dma_map(pool);
448 if (dma_map) {
449 err = xp_init_dma_info(pool, dma_map);
450 if (err)
451 return err;
452
453 refcount_inc(&dma_map->users);
454 return 0;
455 }
456
457 dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem);
458 if (!dma_map)
459 return -ENOMEM;
460
461 for (i = 0; i < dma_map->dma_pages_cnt; i++) {
462 dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
463 DMA_BIDIRECTIONAL, attrs);
464 if (dma_mapping_error(dev, dma)) {
465 __xp_dma_unmap(dma_map, attrs);
466 return -ENOMEM;
467 }
468 dma_map->dma_pages[i] = dma;
469 }
470
471 if (pool->unaligned)
472 xp_check_dma_contiguity(dma_map);
473
474 err = xp_init_dma_info(pool, dma_map);
475 if (err) {
476 __xp_dma_unmap(dma_map, attrs);
477 return err;
478 }
479
480 return 0;
481}
482EXPORT_SYMBOL(xp_dma_map);
483
484static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool,
485 u64 addr)
486{
487 return xp_desc_crosses_non_contig_pg(pool, addr, pool->chunk_size);
488}
489
490static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr)
491{
492 *addr = xp_unaligned_extract_addr(*addr);
493 if (*addr >= pool->addrs_cnt ||
494 *addr + pool->chunk_size > pool->addrs_cnt ||
495 xp_addr_crosses_non_contig_pg(pool, *addr))
496 return false;
497 return true;
498}
499
500static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr)
501{
502 *addr = xp_aligned_extract_addr(pool, *addr);
503 return *addr < pool->addrs_cnt;
504}
505
506static struct xdp_buff_xsk *xp_get_xskb(struct xsk_buff_pool *pool, u64 addr)
507{
508 struct xdp_buff_xsk *xskb;
509
510 if (pool->unaligned) {
511 xskb = pool->free_heads[--pool->free_heads_cnt];
512 xp_init_xskb_addr(xskb, pool, addr);
513 if (pool->dma_pages)
514 xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
515 } else {
516 xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
517 }
518
519 return xskb;
520}
521
522static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
523{
524 struct xdp_buff_xsk *xskb;
525 u64 addr;
526 bool ok;
527
528 if (pool->free_heads_cnt == 0)
529 return NULL;
530
531 for (;;) {
532 if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) {
533 pool->fq->queue_empty_descs++;
534 return NULL;
535 }
536
537 ok = pool->unaligned ? xp_check_unaligned(pool, &addr) :
538 xp_check_aligned(pool, &addr);
539 if (!ok) {
540 pool->fq->invalid_descs++;
541 xskq_cons_release(pool->fq);
542 continue;
543 }
544 break;
545 }
546
547 xskb = xp_get_xskb(pool, addr);
548
549 xskq_cons_release(pool->fq);
550 return xskb;
551}
552
553struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
554{
555 struct xdp_buff_xsk *xskb;
556
557 if (!pool->free_list_cnt) {
558 xskb = __xp_alloc(pool);
559 if (!xskb)
560 return NULL;
561 } else {
562 pool->free_list_cnt--;
563 xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk,
564 list_node);
565 list_del_init(&xskb->list_node);
566 }
567
568 xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
569 xskb->xdp.data_meta = xskb->xdp.data;
570 xskb->xdp.flags = 0;
571
572 if (pool->dev)
573 xp_dma_sync_for_device(pool, xskb->dma, pool->frame_len);
574
575 return &xskb->xdp;
576}
577EXPORT_SYMBOL(xp_alloc);
578
579static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
580{
581 u32 i, cached_cons, nb_entries;
582
583 if (max > pool->free_heads_cnt)
584 max = pool->free_heads_cnt;
585 max = xskq_cons_nb_entries(pool->fq, max);
586
587 cached_cons = pool->fq->cached_cons;
588 nb_entries = max;
589 i = max;
590 while (i--) {
591 struct xdp_buff_xsk *xskb;
592 u64 addr;
593 bool ok;
594
595 __xskq_cons_read_addr_unchecked(pool->fq, cached_cons++, &addr);
596
597 ok = pool->unaligned ? xp_check_unaligned(pool, &addr) :
598 xp_check_aligned(pool, &addr);
599 if (unlikely(!ok)) {
600 pool->fq->invalid_descs++;
601 nb_entries--;
602 continue;
603 }
604
605 xskb = xp_get_xskb(pool, addr);
606
607 *xdp = &xskb->xdp;
608 xdp++;
609 }
610
611 xskq_cons_release_n(pool->fq, max);
612 return nb_entries;
613}
614
615static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 nb_entries)
616{
617 struct xdp_buff_xsk *xskb;
618 u32 i;
619
620 nb_entries = min_t(u32, nb_entries, pool->free_list_cnt);
621
622 i = nb_entries;
623 while (i--) {
624 xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, list_node);
625 list_del_init(&xskb->list_node);
626
627 *xdp = &xskb->xdp;
628 xdp++;
629 }
630 pool->free_list_cnt -= nb_entries;
631
632 return nb_entries;
633}
634
635static u32 xp_alloc_slow(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
636 u32 max)
637{
638 int i;
639
640 for (i = 0; i < max; i++) {
641 struct xdp_buff *buff;
642
643 buff = xp_alloc(pool);
644 if (unlikely(!buff))
645 return i;
646 *xdp = buff;
647 xdp++;
648 }
649
650 return max;
651}
652
653u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
654{
655 u32 nb_entries1 = 0, nb_entries2;
656
657 if (unlikely(pool->dev && dma_dev_need_sync(pool->dev)))
658 return xp_alloc_slow(pool, xdp, max);
659
660 if (unlikely(pool->free_list_cnt)) {
661 nb_entries1 = xp_alloc_reused(pool, xdp, max);
662 if (nb_entries1 == max)
663 return nb_entries1;
664
665 max -= nb_entries1;
666 xdp += nb_entries1;
667 }
668
669 nb_entries2 = xp_alloc_new_from_fq(pool, xdp, max);
670 if (!nb_entries2)
671 pool->fq->queue_empty_descs++;
672
673 return nb_entries1 + nb_entries2;
674}
675EXPORT_SYMBOL(xp_alloc_batch);
676
677bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count)
678{
679 u32 req_count, avail_count;
680
681 if (pool->free_list_cnt >= count)
682 return true;
683
684 req_count = count - pool->free_list_cnt;
685 avail_count = xskq_cons_nb_entries(pool->fq, req_count);
686 if (!avail_count)
687 pool->fq->queue_empty_descs++;
688
689 return avail_count >= req_count;
690}
691EXPORT_SYMBOL(xp_can_alloc);
692
693void xp_free(struct xdp_buff_xsk *xskb)
694{
695 if (!list_empty(&xskb->list_node))
696 return;
697
698 xskb->pool->free_list_cnt++;
699 list_add(&xskb->list_node, &xskb->pool->free_list);
700}
701EXPORT_SYMBOL(xp_free);
702
703static u64 __xp_raw_get_addr(const struct xsk_buff_pool *pool, u64 addr)
704{
705 return pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr;
706}
707
708static void *__xp_raw_get_data(const struct xsk_buff_pool *pool, u64 addr)
709{
710 return pool->addrs + addr;
711}
712
713void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
714{
715 return __xp_raw_get_data(pool, __xp_raw_get_addr(pool, addr));
716}
717EXPORT_SYMBOL(xp_raw_get_data);
718
719static dma_addr_t __xp_raw_get_dma(const struct xsk_buff_pool *pool, u64 addr)
720{
721 return (pool->dma_pages[addr >> PAGE_SHIFT] &
722 ~XSK_NEXT_PG_CONTIG_MASK) +
723 (addr & ~PAGE_MASK);
724}
725
726dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr)
727{
728 return __xp_raw_get_dma(pool, __xp_raw_get_addr(pool, addr));
729}
730EXPORT_SYMBOL(xp_raw_get_dma);
731
732/**
733 * xp_raw_get_ctx - get &xdp_desc context
734 * @pool: XSk buff pool desc address belongs to
735 * @addr: desc address (from userspace)
736 *
737 * Helper for getting desc's DMA address and metadata pointer, if present.
738 * Saves one call on hotpath, double calculation of the actual address,
739 * and inline checks for metadata presence and sanity.
740 *
741 * Return: new &xdp_desc_ctx struct containing desc's DMA address and metadata
742 * pointer, if it is present and valid (initialized to %NULL otherwise).
743 */
744struct xdp_desc_ctx xp_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr)
745{
746 struct xdp_desc_ctx ret;
747
748 addr = __xp_raw_get_addr(pool, addr);
749
750 ret.dma = __xp_raw_get_dma(pool, addr);
751 ret.meta = __xsk_buff_get_metadata(pool, __xp_raw_get_data(pool, addr));
752
753 return ret;
754}
755EXPORT_SYMBOL(xp_raw_get_ctx);