Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2
3/*
4 * AF_XDP user-space access library.
5 *
6 * Copyright(c) 2018 - 2019 Intel Corporation.
7 *
8 * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
9 */
10
11#include <errno.h>
12#include <stdlib.h>
13#include <string.h>
14#include <unistd.h>
15#include <arpa/inet.h>
16#include <asm/barrier.h>
17#include <linux/compiler.h>
18#include <linux/ethtool.h>
19#include <linux/filter.h>
20#include <linux/if_ether.h>
21#include <linux/if_link.h>
22#include <linux/if_packet.h>
23#include <linux/if_xdp.h>
24#include <linux/kernel.h>
25#include <linux/list.h>
26#include <linux/netlink.h>
27#include <linux/rtnetlink.h>
28#include <linux/sockios.h>
29#include <net/if.h>
30#include <sys/ioctl.h>
31#include <sys/mman.h>
32#include <sys/socket.h>
33#include <sys/types.h>
34
35#include <bpf/bpf.h>
36#include <bpf/libbpf.h>
37#include "xsk.h"
38#include "bpf_util.h"
39
40#ifndef SOL_XDP
41 #define SOL_XDP 283
42#endif
43
44#ifndef AF_XDP
45 #define AF_XDP 44
46#endif
47
48#ifndef PF_XDP
49 #define PF_XDP AF_XDP
50#endif
51
52#define pr_warn(fmt, ...) fprintf(stderr, fmt, ##__VA_ARGS__)
53
54#define XSKMAP_SIZE 1
55
56struct xsk_umem {
57 struct xsk_ring_prod *fill_save;
58 struct xsk_ring_cons *comp_save;
59 char *umem_area;
60 struct xsk_umem_config config;
61 int fd;
62 int refcount;
63 struct list_head ctx_list;
64 bool rx_ring_setup_done;
65 bool tx_ring_setup_done;
66};
67
68struct xsk_ctx {
69 struct xsk_ring_prod *fill;
70 struct xsk_ring_cons *comp;
71 __u32 queue_id;
72 struct xsk_umem *umem;
73 int refcount;
74 int ifindex;
75 struct list_head list;
76};
77
78struct xsk_socket {
79 struct xsk_ring_cons *rx;
80 struct xsk_ring_prod *tx;
81 struct xsk_ctx *ctx;
82 struct xsk_socket_config config;
83 int fd;
84};
85
86struct nl_mtu_req {
87 struct nlmsghdr nh;
88 struct ifinfomsg msg;
89 char buf[512];
90};
91
92int xsk_umem__fd(const struct xsk_umem *umem)
93{
94 return umem ? umem->fd : -EINVAL;
95}
96
97int xsk_socket__fd(const struct xsk_socket *xsk)
98{
99 return xsk ? xsk->fd : -EINVAL;
100}
101
102static bool xsk_page_aligned(void *buffer)
103{
104 unsigned long addr = (unsigned long)buffer;
105
106 return !(addr & (getpagesize() - 1));
107}
108
109static void xsk_set_umem_config(struct xsk_umem_config *cfg,
110 const struct xsk_umem_config *usr_cfg)
111{
112 if (!usr_cfg) {
113 cfg->fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
114 cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
115 cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
116 cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM;
117 cfg->flags = XSK_UMEM__DEFAULT_FLAGS;
118 return;
119 }
120
121 cfg->fill_size = usr_cfg->fill_size;
122 cfg->comp_size = usr_cfg->comp_size;
123 cfg->frame_size = usr_cfg->frame_size;
124 cfg->frame_headroom = usr_cfg->frame_headroom;
125 cfg->flags = usr_cfg->flags;
126}
127
128static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
129 const struct xsk_socket_config *usr_cfg)
130{
131 if (!usr_cfg) {
132 cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
133 cfg->tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
134 cfg->bind_flags = 0;
135 return 0;
136 }
137
138 cfg->rx_size = usr_cfg->rx_size;
139 cfg->tx_size = usr_cfg->tx_size;
140 cfg->bind_flags = usr_cfg->bind_flags;
141
142 return 0;
143}
144
145static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off)
146{
147 socklen_t optlen;
148 int err;
149
150 optlen = sizeof(*off);
151 err = getsockopt(fd, SOL_XDP, XDP_MMAP_OFFSETS, off, &optlen);
152 if (err)
153 return err;
154
155 if (optlen == sizeof(*off))
156 return 0;
157
158 return -EINVAL;
159}
160
161static int xsk_create_umem_rings(struct xsk_umem *umem, int fd,
162 struct xsk_ring_prod *fill,
163 struct xsk_ring_cons *comp)
164{
165 struct xdp_mmap_offsets off;
166 void *map;
167 int err;
168
169 err = setsockopt(fd, SOL_XDP, XDP_UMEM_FILL_RING,
170 &umem->config.fill_size,
171 sizeof(umem->config.fill_size));
172 if (err)
173 return -errno;
174
175 err = setsockopt(fd, SOL_XDP, XDP_UMEM_COMPLETION_RING,
176 &umem->config.comp_size,
177 sizeof(umem->config.comp_size));
178 if (err)
179 return -errno;
180
181 err = xsk_get_mmap_offsets(fd, &off);
182 if (err)
183 return -errno;
184
185 map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64),
186 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
187 XDP_UMEM_PGOFF_FILL_RING);
188 if (map == MAP_FAILED)
189 return -errno;
190
191 fill->mask = umem->config.fill_size - 1;
192 fill->size = umem->config.fill_size;
193 fill->producer = map + off.fr.producer;
194 fill->consumer = map + off.fr.consumer;
195 fill->flags = map + off.fr.flags;
196 fill->ring = map + off.fr.desc;
197 fill->cached_cons = umem->config.fill_size;
198
199 map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
200 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
201 XDP_UMEM_PGOFF_COMPLETION_RING);
202 if (map == MAP_FAILED) {
203 err = -errno;
204 goto out_mmap;
205 }
206
207 comp->mask = umem->config.comp_size - 1;
208 comp->size = umem->config.comp_size;
209 comp->producer = map + off.cr.producer;
210 comp->consumer = map + off.cr.consumer;
211 comp->flags = map + off.cr.flags;
212 comp->ring = map + off.cr.desc;
213
214 return 0;
215
216out_mmap:
217 munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64));
218 return err;
219}
220
221int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area,
222 __u64 size, struct xsk_ring_prod *fill,
223 struct xsk_ring_cons *comp,
224 const struct xsk_umem_config *usr_config)
225{
226 struct xdp_umem_reg mr;
227 struct xsk_umem *umem;
228 int err;
229
230 if (!umem_area || !umem_ptr || !fill || !comp)
231 return -EFAULT;
232 if (!size && !xsk_page_aligned(umem_area))
233 return -EINVAL;
234
235 umem = calloc(1, sizeof(*umem));
236 if (!umem)
237 return -ENOMEM;
238
239 umem->fd = socket(AF_XDP, SOCK_RAW | SOCK_CLOEXEC, 0);
240 if (umem->fd < 0) {
241 err = -errno;
242 goto out_umem_alloc;
243 }
244
245 umem->umem_area = umem_area;
246 INIT_LIST_HEAD(&umem->ctx_list);
247 xsk_set_umem_config(&umem->config, usr_config);
248
249 memset(&mr, 0, sizeof(mr));
250 mr.addr = (uintptr_t)umem_area;
251 mr.len = size;
252 mr.chunk_size = umem->config.frame_size;
253 mr.headroom = umem->config.frame_headroom;
254 mr.flags = umem->config.flags;
255
256 err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr));
257 if (err) {
258 err = -errno;
259 goto out_socket;
260 }
261
262 err = xsk_create_umem_rings(umem, umem->fd, fill, comp);
263 if (err)
264 goto out_socket;
265
266 umem->fill_save = fill;
267 umem->comp_save = comp;
268 *umem_ptr = umem;
269 return 0;
270
271out_socket:
272 close(umem->fd);
273out_umem_alloc:
274 free(umem);
275 return err;
276}
277
278bool xsk_is_in_mode(u32 ifindex, int mode)
279{
280 LIBBPF_OPTS(bpf_xdp_query_opts, opts);
281 int ret;
282
283 ret = bpf_xdp_query(ifindex, mode, &opts);
284 if (ret) {
285 printf("XDP mode query returned error %s\n", strerror(errno));
286 return false;
287 }
288
289 if (mode == XDP_FLAGS_DRV_MODE)
290 return opts.attach_mode == XDP_ATTACHED_DRV;
291 else if (mode == XDP_FLAGS_SKB_MODE)
292 return opts.attach_mode == XDP_ATTACHED_SKB;
293
294 return false;
295}
296
297/* Lifted from netlink.c in tools/lib/bpf */
298static int netlink_recvmsg(int sock, struct msghdr *mhdr, int flags)
299{
300 int len;
301
302 do {
303 len = recvmsg(sock, mhdr, flags);
304 } while (len < 0 && (errno == EINTR || errno == EAGAIN));
305
306 if (len < 0)
307 return -errno;
308 return len;
309}
310
311/* Lifted from netlink.c in tools/lib/bpf */
312static int alloc_iov(struct iovec *iov, int len)
313{
314 void *nbuf;
315
316 nbuf = realloc(iov->iov_base, len);
317 if (!nbuf)
318 return -ENOMEM;
319
320 iov->iov_base = nbuf;
321 iov->iov_len = len;
322 return 0;
323}
324
325/* Original version lifted from netlink.c in tools/lib/bpf */
326static int netlink_recv(int sock)
327{
328 struct iovec iov = {};
329 struct msghdr mhdr = {
330 .msg_iov = &iov,
331 .msg_iovlen = 1,
332 };
333 bool multipart = true;
334 struct nlmsgerr *err;
335 struct nlmsghdr *nh;
336 int len, ret;
337
338 ret = alloc_iov(&iov, 4096);
339 if (ret)
340 goto done;
341
342 while (multipart) {
343 multipart = false;
344 len = netlink_recvmsg(sock, &mhdr, MSG_PEEK | MSG_TRUNC);
345 if (len < 0) {
346 ret = len;
347 goto done;
348 }
349
350 if (len > iov.iov_len) {
351 ret = alloc_iov(&iov, len);
352 if (ret)
353 goto done;
354 }
355
356 len = netlink_recvmsg(sock, &mhdr, 0);
357 if (len < 0) {
358 ret = len;
359 goto done;
360 }
361
362 if (len == 0)
363 break;
364
365 for (nh = (struct nlmsghdr *)iov.iov_base; NLMSG_OK(nh, len);
366 nh = NLMSG_NEXT(nh, len)) {
367 if (nh->nlmsg_flags & NLM_F_MULTI)
368 multipart = true;
369 switch (nh->nlmsg_type) {
370 case NLMSG_ERROR:
371 err = (struct nlmsgerr *)NLMSG_DATA(nh);
372 if (!err->error)
373 continue;
374 ret = err->error;
375 goto done;
376 case NLMSG_DONE:
377 ret = 0;
378 goto done;
379 default:
380 break;
381 }
382 }
383 }
384 ret = 0;
385done:
386 free(iov.iov_base);
387 return ret;
388}
389
390int xsk_set_mtu(int ifindex, int mtu)
391{
392 struct nl_mtu_req req;
393 struct rtattr *rta;
394 int fd, ret;
395
396 fd = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_ROUTE);
397 if (fd < 0)
398 return fd;
399
400 memset(&req, 0, sizeof(req));
401 req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
402 req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
403 req.nh.nlmsg_type = RTM_NEWLINK;
404 req.msg.ifi_family = AF_UNSPEC;
405 req.msg.ifi_index = ifindex;
406 rta = (struct rtattr *)(((char *)&req) + NLMSG_ALIGN(req.nh.nlmsg_len));
407 rta->rta_type = IFLA_MTU;
408 rta->rta_len = RTA_LENGTH(sizeof(unsigned int));
409 req.nh.nlmsg_len = NLMSG_ALIGN(req.nh.nlmsg_len) + RTA_LENGTH(sizeof(mtu));
410 memcpy(RTA_DATA(rta), &mtu, sizeof(mtu));
411
412 ret = send(fd, &req, req.nh.nlmsg_len, 0);
413 if (ret < 0) {
414 close(fd);
415 return errno;
416 }
417
418 ret = netlink_recv(fd);
419 close(fd);
420 return ret;
421}
422
423int xsk_attach_xdp_program(struct bpf_program *prog, int ifindex, u32 xdp_flags)
424{
425 int prog_fd;
426
427 prog_fd = bpf_program__fd(prog);
428 return bpf_xdp_attach(ifindex, prog_fd, xdp_flags, NULL);
429}
430
431void xsk_detach_xdp_program(int ifindex, u32 xdp_flags)
432{
433 bpf_xdp_detach(ifindex, xdp_flags, NULL);
434}
435
436void xsk_clear_xskmap(struct bpf_map *map)
437{
438 u32 index = 0;
439 int map_fd;
440
441 map_fd = bpf_map__fd(map);
442 bpf_map_delete_elem(map_fd, &index);
443}
444
445int xsk_update_xskmap(struct bpf_map *map, struct xsk_socket *xsk, u32 index)
446{
447 int map_fd, sock_fd;
448
449 map_fd = bpf_map__fd(map);
450 sock_fd = xsk_socket__fd(xsk);
451
452 return bpf_map_update_elem(map_fd, &index, &sock_fd, 0);
453}
454
455static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex,
456 __u32 queue_id)
457{
458 struct xsk_ctx *ctx;
459
460 if (list_empty(&umem->ctx_list))
461 return NULL;
462
463 list_for_each_entry(ctx, &umem->ctx_list, list) {
464 if (ctx->ifindex == ifindex && ctx->queue_id == queue_id) {
465 ctx->refcount++;
466 return ctx;
467 }
468 }
469
470 return NULL;
471}
472
473static void xsk_put_ctx(struct xsk_ctx *ctx, bool unmap)
474{
475 struct xsk_umem *umem = ctx->umem;
476 struct xdp_mmap_offsets off;
477 int err;
478
479 if (--ctx->refcount)
480 return;
481
482 if (!unmap)
483 goto out_free;
484
485 err = xsk_get_mmap_offsets(umem->fd, &off);
486 if (err)
487 goto out_free;
488
489 munmap(ctx->fill->ring - off.fr.desc, off.fr.desc + umem->config.fill_size *
490 sizeof(__u64));
491 munmap(ctx->comp->ring - off.cr.desc, off.cr.desc + umem->config.comp_size *
492 sizeof(__u64));
493
494out_free:
495 list_del(&ctx->list);
496 free(ctx);
497}
498
499static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
500 struct xsk_umem *umem, int ifindex,
501 __u32 queue_id,
502 struct xsk_ring_prod *fill,
503 struct xsk_ring_cons *comp)
504{
505 struct xsk_ctx *ctx;
506 int err;
507
508 ctx = calloc(1, sizeof(*ctx));
509 if (!ctx)
510 return NULL;
511
512 if (!umem->fill_save) {
513 err = xsk_create_umem_rings(umem, xsk->fd, fill, comp);
514 if (err) {
515 free(ctx);
516 return NULL;
517 }
518 } else if (umem->fill_save != fill || umem->comp_save != comp) {
519 /* Copy over rings to new structs. */
520 memcpy(fill, umem->fill_save, sizeof(*fill));
521 memcpy(comp, umem->comp_save, sizeof(*comp));
522 }
523
524 ctx->ifindex = ifindex;
525 ctx->refcount = 1;
526 ctx->umem = umem;
527 ctx->queue_id = queue_id;
528
529 ctx->fill = fill;
530 ctx->comp = comp;
531 list_add(&ctx->list, &umem->ctx_list);
532 return ctx;
533}
534
535int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
536 int ifindex,
537 __u32 queue_id, struct xsk_umem *umem,
538 struct xsk_ring_cons *rx,
539 struct xsk_ring_prod *tx,
540 struct xsk_ring_prod *fill,
541 struct xsk_ring_cons *comp,
542 const struct xsk_socket_config *usr_config)
543{
544 bool unmap, rx_setup_done = false, tx_setup_done = false;
545 void *rx_map = NULL, *tx_map = NULL;
546 struct sockaddr_xdp sxdp = {};
547 struct xdp_mmap_offsets off;
548 struct xsk_socket *xsk;
549 struct xsk_ctx *ctx;
550 int err;
551
552 if (!umem || !xsk_ptr || !(rx || tx))
553 return -EFAULT;
554
555 unmap = umem->fill_save != fill;
556
557 xsk = calloc(1, sizeof(*xsk));
558 if (!xsk)
559 return -ENOMEM;
560
561 err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
562 if (err)
563 goto out_xsk_alloc;
564
565 if (umem->refcount++ > 0) {
566 xsk->fd = socket(AF_XDP, SOCK_RAW | SOCK_CLOEXEC, 0);
567 if (xsk->fd < 0) {
568 err = -errno;
569 goto out_xsk_alloc;
570 }
571 } else {
572 xsk->fd = umem->fd;
573 rx_setup_done = umem->rx_ring_setup_done;
574 tx_setup_done = umem->tx_ring_setup_done;
575 }
576
577 ctx = xsk_get_ctx(umem, ifindex, queue_id);
578 if (!ctx) {
579 if (!fill || !comp) {
580 err = -EFAULT;
581 goto out_socket;
582 }
583
584 ctx = xsk_create_ctx(xsk, umem, ifindex, queue_id, fill, comp);
585 if (!ctx) {
586 err = -ENOMEM;
587 goto out_socket;
588 }
589 }
590 xsk->ctx = ctx;
591
592 if (rx && !rx_setup_done) {
593 err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
594 &xsk->config.rx_size,
595 sizeof(xsk->config.rx_size));
596 if (err) {
597 err = -errno;
598 goto out_put_ctx;
599 }
600 if (xsk->fd == umem->fd)
601 umem->rx_ring_setup_done = true;
602 }
603 if (tx && !tx_setup_done) {
604 err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
605 &xsk->config.tx_size,
606 sizeof(xsk->config.tx_size));
607 if (err) {
608 err = -errno;
609 goto out_put_ctx;
610 }
611 if (xsk->fd == umem->fd)
612 umem->tx_ring_setup_done = true;
613 }
614
615 err = xsk_get_mmap_offsets(xsk->fd, &off);
616 if (err) {
617 err = -errno;
618 goto out_put_ctx;
619 }
620
621 if (rx) {
622 rx_map = mmap(NULL, off.rx.desc +
623 xsk->config.rx_size * sizeof(struct xdp_desc),
624 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
625 xsk->fd, XDP_PGOFF_RX_RING);
626 if (rx_map == MAP_FAILED) {
627 err = -errno;
628 goto out_put_ctx;
629 }
630
631 rx->mask = xsk->config.rx_size - 1;
632 rx->size = xsk->config.rx_size;
633 rx->producer = rx_map + off.rx.producer;
634 rx->consumer = rx_map + off.rx.consumer;
635 rx->flags = rx_map + off.rx.flags;
636 rx->ring = rx_map + off.rx.desc;
637 rx->cached_prod = *rx->producer;
638 rx->cached_cons = *rx->consumer;
639 }
640 xsk->rx = rx;
641
642 if (tx) {
643 tx_map = mmap(NULL, off.tx.desc +
644 xsk->config.tx_size * sizeof(struct xdp_desc),
645 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
646 xsk->fd, XDP_PGOFF_TX_RING);
647 if (tx_map == MAP_FAILED) {
648 err = -errno;
649 goto out_mmap_rx;
650 }
651
652 tx->mask = xsk->config.tx_size - 1;
653 tx->size = xsk->config.tx_size;
654 tx->producer = tx_map + off.tx.producer;
655 tx->consumer = tx_map + off.tx.consumer;
656 tx->flags = tx_map + off.tx.flags;
657 tx->ring = tx_map + off.tx.desc;
658 tx->cached_prod = *tx->producer;
659 /* cached_cons is r->size bigger than the real consumer pointer
660 * See xsk_prod_nb_free
661 */
662 tx->cached_cons = *tx->consumer + xsk->config.tx_size;
663 }
664 xsk->tx = tx;
665
666 sxdp.sxdp_family = PF_XDP;
667 sxdp.sxdp_ifindex = ctx->ifindex;
668 sxdp.sxdp_queue_id = ctx->queue_id;
669 if (umem->refcount > 1) {
670 sxdp.sxdp_flags |= XDP_SHARED_UMEM;
671 sxdp.sxdp_shared_umem_fd = umem->fd;
672 } else {
673 sxdp.sxdp_flags = xsk->config.bind_flags;
674 }
675
676 err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp));
677 if (err) {
678 err = -errno;
679 goto out_mmap_tx;
680 }
681
682 *xsk_ptr = xsk;
683 umem->fill_save = NULL;
684 umem->comp_save = NULL;
685 return 0;
686
687out_mmap_tx:
688 if (tx)
689 munmap(tx_map, off.tx.desc +
690 xsk->config.tx_size * sizeof(struct xdp_desc));
691out_mmap_rx:
692 if (rx)
693 munmap(rx_map, off.rx.desc +
694 xsk->config.rx_size * sizeof(struct xdp_desc));
695out_put_ctx:
696 xsk_put_ctx(ctx, unmap);
697out_socket:
698 if (--umem->refcount)
699 close(xsk->fd);
700out_xsk_alloc:
701 free(xsk);
702 return err;
703}
704
705int xsk_socket__create(struct xsk_socket **xsk_ptr, int ifindex,
706 __u32 queue_id, struct xsk_umem *umem,
707 struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
708 const struct xsk_socket_config *usr_config)
709{
710 if (!umem)
711 return -EFAULT;
712
713 return xsk_socket__create_shared(xsk_ptr, ifindex, queue_id, umem,
714 rx, tx, umem->fill_save,
715 umem->comp_save, usr_config);
716}
717
718int xsk_umem__delete(struct xsk_umem *umem)
719{
720 struct xdp_mmap_offsets off;
721 int err;
722
723 if (!umem)
724 return 0;
725
726 if (umem->refcount)
727 return -EBUSY;
728
729 err = xsk_get_mmap_offsets(umem->fd, &off);
730 if (!err && umem->fill_save && umem->comp_save) {
731 munmap(umem->fill_save->ring - off.fr.desc,
732 off.fr.desc + umem->config.fill_size * sizeof(__u64));
733 munmap(umem->comp_save->ring - off.cr.desc,
734 off.cr.desc + umem->config.comp_size * sizeof(__u64));
735 }
736
737 close(umem->fd);
738 free(umem);
739
740 return 0;
741}
742
743void xsk_socket__delete(struct xsk_socket *xsk)
744{
745 size_t desc_sz = sizeof(struct xdp_desc);
746 struct xdp_mmap_offsets off;
747 struct xsk_umem *umem;
748 struct xsk_ctx *ctx;
749 int err;
750
751 if (!xsk)
752 return;
753
754 ctx = xsk->ctx;
755 umem = ctx->umem;
756
757 xsk_put_ctx(ctx, true);
758
759 err = xsk_get_mmap_offsets(xsk->fd, &off);
760 if (!err) {
761 if (xsk->rx) {
762 munmap(xsk->rx->ring - off.rx.desc,
763 off.rx.desc + xsk->config.rx_size * desc_sz);
764 }
765 if (xsk->tx) {
766 munmap(xsk->tx->ring - off.tx.desc,
767 off.tx.desc + xsk->config.tx_size * desc_sz);
768 }
769 }
770
771 umem->refcount--;
772 /* Do not close an fd that also has an associated umem connected
773 * to it.
774 */
775 if (xsk->fd != umem->fd)
776 close(xsk->fd);
777 free(xsk);
778}