Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4 */
5
6#include "netlink.h"
7#include "device.h"
8#include "peer.h"
9#include "socket.h"
10#include "queueing.h"
11#include "messages.h"
12#include "generated/netlink.h"
13
14#include <uapi/linux/wireguard.h>
15
16#include <linux/if.h>
17#include <net/genetlink.h>
18#include <net/sock.h>
19#include <crypto/utils.h>
20
21static struct genl_family genl_family;
22
23static struct wg_device *lookup_interface(struct nlattr **attrs,
24 struct sk_buff *skb)
25{
26 struct net_device *dev = NULL;
27
28 if (!attrs[WGDEVICE_A_IFINDEX] == !attrs[WGDEVICE_A_IFNAME])
29 return ERR_PTR(-EBADR);
30 if (attrs[WGDEVICE_A_IFINDEX])
31 dev = dev_get_by_index(sock_net(skb->sk),
32 nla_get_u32(attrs[WGDEVICE_A_IFINDEX]));
33 else if (attrs[WGDEVICE_A_IFNAME])
34 dev = dev_get_by_name(sock_net(skb->sk),
35 nla_data(attrs[WGDEVICE_A_IFNAME]));
36 if (!dev)
37 return ERR_PTR(-ENODEV);
38 if (!dev->rtnl_link_ops || !dev->rtnl_link_ops->kind ||
39 strcmp(dev->rtnl_link_ops->kind, KBUILD_MODNAME)) {
40 dev_put(dev);
41 return ERR_PTR(-EOPNOTSUPP);
42 }
43 return netdev_priv(dev);
44}
45
46static int get_allowedips(struct sk_buff *skb, const u8 *ip, u8 cidr,
47 int family)
48{
49 struct nlattr *allowedip_nest;
50
51 allowedip_nest = nla_nest_start(skb, 0);
52 if (!allowedip_nest)
53 return -EMSGSIZE;
54
55 if (nla_put_u8(skb, WGALLOWEDIP_A_CIDR_MASK, cidr) ||
56 nla_put_u16(skb, WGALLOWEDIP_A_FAMILY, family) ||
57 nla_put(skb, WGALLOWEDIP_A_IPADDR, family == AF_INET6 ?
58 sizeof(struct in6_addr) : sizeof(struct in_addr), ip)) {
59 nla_nest_cancel(skb, allowedip_nest);
60 return -EMSGSIZE;
61 }
62
63 nla_nest_end(skb, allowedip_nest);
64 return 0;
65}
66
67struct dump_ctx {
68 struct wg_device *wg;
69 struct wg_peer *next_peer;
70 u64 allowedips_seq;
71 struct allowedips_node *next_allowedip;
72};
73
74#define DUMP_CTX(cb) ((struct dump_ctx *)(cb)->args)
75
76static int
77get_peer(struct wg_peer *peer, struct sk_buff *skb, struct dump_ctx *ctx)
78{
79
80 struct nlattr *allowedips_nest, *peer_nest = nla_nest_start(skb, 0);
81 struct allowedips_node *allowedips_node = ctx->next_allowedip;
82 bool fail;
83
84 if (!peer_nest)
85 return -EMSGSIZE;
86
87 down_read(&peer->handshake.lock);
88 fail = nla_put(skb, WGPEER_A_PUBLIC_KEY, NOISE_PUBLIC_KEY_LEN,
89 peer->handshake.remote_static);
90 up_read(&peer->handshake.lock);
91 if (fail)
92 goto err;
93
94 if (!allowedips_node) {
95 const struct __kernel_timespec last_handshake = {
96 .tv_sec = peer->walltime_last_handshake.tv_sec,
97 .tv_nsec = peer->walltime_last_handshake.tv_nsec
98 };
99
100 down_read(&peer->handshake.lock);
101 fail = nla_put(skb, WGPEER_A_PRESHARED_KEY,
102 NOISE_SYMMETRIC_KEY_LEN,
103 peer->handshake.preshared_key);
104 up_read(&peer->handshake.lock);
105 if (fail)
106 goto err;
107
108 if (nla_put(skb, WGPEER_A_LAST_HANDSHAKE_TIME,
109 sizeof(last_handshake), &last_handshake) ||
110 nla_put_u16(skb, WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL,
111 peer->persistent_keepalive_interval) ||
112 nla_put_u64_64bit(skb, WGPEER_A_TX_BYTES, peer->tx_bytes,
113 WGPEER_A_UNSPEC) ||
114 nla_put_u64_64bit(skb, WGPEER_A_RX_BYTES, peer->rx_bytes,
115 WGPEER_A_UNSPEC) ||
116 nla_put_u32(skb, WGPEER_A_PROTOCOL_VERSION, 1))
117 goto err;
118
119 read_lock_bh(&peer->endpoint_lock);
120 if (peer->endpoint.addr.sa_family == AF_INET)
121 fail = nla_put(skb, WGPEER_A_ENDPOINT,
122 sizeof(peer->endpoint.addr4),
123 &peer->endpoint.addr4);
124 else if (peer->endpoint.addr.sa_family == AF_INET6)
125 fail = nla_put(skb, WGPEER_A_ENDPOINT,
126 sizeof(peer->endpoint.addr6),
127 &peer->endpoint.addr6);
128 read_unlock_bh(&peer->endpoint_lock);
129 if (fail)
130 goto err;
131 allowedips_node =
132 list_first_entry_or_null(&peer->allowedips_list,
133 struct allowedips_node, peer_list);
134 }
135 if (!allowedips_node)
136 goto no_allowedips;
137 if (!ctx->allowedips_seq)
138 ctx->allowedips_seq = ctx->wg->peer_allowedips.seq;
139 else if (ctx->allowedips_seq != ctx->wg->peer_allowedips.seq)
140 goto no_allowedips;
141
142 allowedips_nest = nla_nest_start(skb, WGPEER_A_ALLOWEDIPS);
143 if (!allowedips_nest)
144 goto err;
145
146 list_for_each_entry_from(allowedips_node, &peer->allowedips_list,
147 peer_list) {
148 u8 cidr, ip[16] __aligned(__alignof(u64));
149 int family;
150
151 family = wg_allowedips_read_node(allowedips_node, ip, &cidr);
152 if (get_allowedips(skb, ip, cidr, family)) {
153 nla_nest_end(skb, allowedips_nest);
154 nla_nest_end(skb, peer_nest);
155 ctx->next_allowedip = allowedips_node;
156 return -EMSGSIZE;
157 }
158 }
159 nla_nest_end(skb, allowedips_nest);
160no_allowedips:
161 nla_nest_end(skb, peer_nest);
162 ctx->next_allowedip = NULL;
163 ctx->allowedips_seq = 0;
164 return 0;
165err:
166 nla_nest_cancel(skb, peer_nest);
167 return -EMSGSIZE;
168}
169
170int wg_get_device_start(struct netlink_callback *cb)
171{
172 struct wg_device *wg;
173
174 wg = lookup_interface(genl_info_dump(cb)->attrs, cb->skb);
175 if (IS_ERR(wg))
176 return PTR_ERR(wg);
177 DUMP_CTX(cb)->wg = wg;
178 return 0;
179}
180
181int wg_get_device_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
182{
183 struct wg_peer *peer, *next_peer_cursor;
184 struct dump_ctx *ctx = DUMP_CTX(cb);
185 struct wg_device *wg = ctx->wg;
186 struct nlattr *peers_nest;
187 int ret = -EMSGSIZE;
188 bool done = true;
189 void *hdr;
190
191 rtnl_lock();
192 mutex_lock(&wg->device_update_lock);
193 cb->seq = wg->device_update_gen;
194 next_peer_cursor = ctx->next_peer;
195
196 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
197 &genl_family, NLM_F_MULTI, WG_CMD_GET_DEVICE);
198 if (!hdr)
199 goto out;
200 genl_dump_check_consistent(cb, hdr);
201
202 if (!ctx->next_peer) {
203 if (nla_put_u16(skb, WGDEVICE_A_LISTEN_PORT,
204 wg->incoming_port) ||
205 nla_put_u32(skb, WGDEVICE_A_FWMARK, wg->fwmark) ||
206 nla_put_u32(skb, WGDEVICE_A_IFINDEX, wg->dev->ifindex) ||
207 nla_put_string(skb, WGDEVICE_A_IFNAME, wg->dev->name))
208 goto out;
209
210 down_read(&wg->static_identity.lock);
211 if (wg->static_identity.has_identity) {
212 if (nla_put(skb, WGDEVICE_A_PRIVATE_KEY,
213 NOISE_PUBLIC_KEY_LEN,
214 wg->static_identity.static_private) ||
215 nla_put(skb, WGDEVICE_A_PUBLIC_KEY,
216 NOISE_PUBLIC_KEY_LEN,
217 wg->static_identity.static_public)) {
218 up_read(&wg->static_identity.lock);
219 goto out;
220 }
221 }
222 up_read(&wg->static_identity.lock);
223 }
224
225 peers_nest = nla_nest_start(skb, WGDEVICE_A_PEERS);
226 if (!peers_nest)
227 goto out;
228 ret = 0;
229 lockdep_assert_held(&wg->device_update_lock);
230 /* If the last cursor was removed in peer_remove or peer_remove_all, then
231 * we just treat this the same as there being no more peers left. The
232 * reason is that seq_nr should indicate to userspace that this isn't a
233 * coherent dump anyway, so they'll try again.
234 */
235 if (list_empty(&wg->peer_list) ||
236 (ctx->next_peer && ctx->next_peer->is_dead)) {
237 nla_nest_cancel(skb, peers_nest);
238 goto out;
239 }
240 peer = list_prepare_entry(ctx->next_peer, &wg->peer_list, peer_list);
241 list_for_each_entry_continue(peer, &wg->peer_list, peer_list) {
242 if (get_peer(peer, skb, ctx)) {
243 done = false;
244 break;
245 }
246 next_peer_cursor = peer;
247 }
248 nla_nest_end(skb, peers_nest);
249
250out:
251 if (!ret && !done && next_peer_cursor)
252 wg_peer_get(next_peer_cursor);
253 wg_peer_put(ctx->next_peer);
254 mutex_unlock(&wg->device_update_lock);
255 rtnl_unlock();
256
257 if (ret) {
258 genlmsg_cancel(skb, hdr);
259 return ret;
260 }
261 genlmsg_end(skb, hdr);
262 if (done) {
263 ctx->next_peer = NULL;
264 return 0;
265 }
266 ctx->next_peer = next_peer_cursor;
267 return skb->len;
268
269 /* At this point, we can't really deal ourselves with safely zeroing out
270 * the private key material after usage. This will need an additional API
271 * in the kernel for marking skbs as zero_on_free.
272 */
273}
274
275int wg_get_device_done(struct netlink_callback *cb)
276{
277 struct dump_ctx *ctx = DUMP_CTX(cb);
278
279 if (ctx->wg)
280 dev_put(ctx->wg->dev);
281 wg_peer_put(ctx->next_peer);
282 return 0;
283}
284
285static int set_port(struct wg_device *wg, u16 port)
286{
287 struct wg_peer *peer;
288
289 if (wg->incoming_port == port)
290 return 0;
291 list_for_each_entry(peer, &wg->peer_list, peer_list)
292 wg_socket_clear_peer_endpoint_src(peer);
293 if (!netif_running(wg->dev)) {
294 wg->incoming_port = port;
295 return 0;
296 }
297 return wg_socket_init(wg, port);
298}
299
300static int set_allowedip(struct wg_peer *peer, struct nlattr **attrs)
301{
302 int ret = -EINVAL;
303 u32 flags = 0;
304 u16 family;
305 u8 cidr;
306
307 if (!attrs[WGALLOWEDIP_A_FAMILY] || !attrs[WGALLOWEDIP_A_IPADDR] ||
308 !attrs[WGALLOWEDIP_A_CIDR_MASK])
309 return ret;
310 family = nla_get_u16(attrs[WGALLOWEDIP_A_FAMILY]);
311 cidr = nla_get_u8(attrs[WGALLOWEDIP_A_CIDR_MASK]);
312 if (attrs[WGALLOWEDIP_A_FLAGS])
313 flags = nla_get_u32(attrs[WGALLOWEDIP_A_FLAGS]);
314
315 if (family == AF_INET && cidr <= 32 &&
316 nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in_addr)) {
317 if (flags & WGALLOWEDIP_F_REMOVE_ME)
318 ret = wg_allowedips_remove_v4(&peer->device->peer_allowedips,
319 nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr,
320 peer, &peer->device->device_update_lock);
321 else
322 ret = wg_allowedips_insert_v4(&peer->device->peer_allowedips,
323 nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr,
324 peer, &peer->device->device_update_lock);
325 } else if (family == AF_INET6 && cidr <= 128 &&
326 nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in6_addr)) {
327 if (flags & WGALLOWEDIP_F_REMOVE_ME)
328 ret = wg_allowedips_remove_v6(&peer->device->peer_allowedips,
329 nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr,
330 peer, &peer->device->device_update_lock);
331 else
332 ret = wg_allowedips_insert_v6(&peer->device->peer_allowedips,
333 nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr,
334 peer, &peer->device->device_update_lock);
335 }
336
337 return ret;
338}
339
340static int set_peer(struct wg_device *wg, struct nlattr **attrs)
341{
342 u8 *public_key = NULL, *preshared_key = NULL;
343 struct wg_peer *peer = NULL;
344 u32 flags = 0;
345 int ret;
346
347 ret = -EINVAL;
348 if (attrs[WGPEER_A_PUBLIC_KEY] &&
349 nla_len(attrs[WGPEER_A_PUBLIC_KEY]) == NOISE_PUBLIC_KEY_LEN)
350 public_key = nla_data(attrs[WGPEER_A_PUBLIC_KEY]);
351 else
352 goto out;
353 if (attrs[WGPEER_A_PRESHARED_KEY] &&
354 nla_len(attrs[WGPEER_A_PRESHARED_KEY]) == NOISE_SYMMETRIC_KEY_LEN)
355 preshared_key = nla_data(attrs[WGPEER_A_PRESHARED_KEY]);
356
357 if (attrs[WGPEER_A_FLAGS])
358 flags = nla_get_u32(attrs[WGPEER_A_FLAGS]);
359
360 ret = -EPFNOSUPPORT;
361 if (attrs[WGPEER_A_PROTOCOL_VERSION]) {
362 if (nla_get_u32(attrs[WGPEER_A_PROTOCOL_VERSION]) != 1)
363 goto out;
364 }
365
366 peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable,
367 nla_data(attrs[WGPEER_A_PUBLIC_KEY]));
368 ret = 0;
369 if (!peer) { /* Peer doesn't exist yet. Add a new one. */
370 if (flags & (WGPEER_F_REMOVE_ME | WGPEER_F_UPDATE_ONLY))
371 goto out;
372
373 /* The peer is new, so there aren't allowed IPs to remove. */
374 flags &= ~WGPEER_F_REPLACE_ALLOWEDIPS;
375
376 down_read(&wg->static_identity.lock);
377 if (wg->static_identity.has_identity &&
378 !memcmp(nla_data(attrs[WGPEER_A_PUBLIC_KEY]),
379 wg->static_identity.static_public,
380 NOISE_PUBLIC_KEY_LEN)) {
381 /* We silently ignore peers that have the same public
382 * key as the device. The reason we do it silently is
383 * that we'd like for people to be able to reuse the
384 * same set of API calls across peers.
385 */
386 up_read(&wg->static_identity.lock);
387 ret = 0;
388 goto out;
389 }
390 up_read(&wg->static_identity.lock);
391
392 peer = wg_peer_create(wg, public_key, preshared_key);
393 if (IS_ERR(peer)) {
394 ret = PTR_ERR(peer);
395 peer = NULL;
396 goto out;
397 }
398 /* Take additional reference, as though we've just been
399 * looked up.
400 */
401 wg_peer_get(peer);
402 }
403
404 if (flags & WGPEER_F_REMOVE_ME) {
405 wg_peer_remove(peer);
406 goto out;
407 }
408
409 if (preshared_key) {
410 down_write(&peer->handshake.lock);
411 memcpy(&peer->handshake.preshared_key, preshared_key,
412 NOISE_SYMMETRIC_KEY_LEN);
413 up_write(&peer->handshake.lock);
414 }
415
416 if (attrs[WGPEER_A_ENDPOINT]) {
417 struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]);
418 size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]);
419 struct endpoint endpoint = { { { 0 } } };
420
421 if (len == sizeof(struct sockaddr_in) && addr->sa_family == AF_INET) {
422 endpoint.addr4 = *(struct sockaddr_in *)addr;
423 wg_socket_set_peer_endpoint(peer, &endpoint);
424 } else if (len == sizeof(struct sockaddr_in6) && addr->sa_family == AF_INET6) {
425 endpoint.addr6 = *(struct sockaddr_in6 *)addr;
426 wg_socket_set_peer_endpoint(peer, &endpoint);
427 }
428 }
429
430 if (flags & WGPEER_F_REPLACE_ALLOWEDIPS)
431 wg_allowedips_remove_by_peer(&wg->peer_allowedips, peer,
432 &wg->device_update_lock);
433
434 if (attrs[WGPEER_A_ALLOWEDIPS]) {
435 struct nlattr *attr, *allowedip[WGALLOWEDIP_A_MAX + 1];
436 int rem;
437
438 nla_for_each_nested(attr, attrs[WGPEER_A_ALLOWEDIPS], rem) {
439 ret = nla_parse_nested(allowedip, WGALLOWEDIP_A_MAX,
440 attr, NULL, NULL);
441 if (ret < 0)
442 goto out;
443 ret = set_allowedip(peer, allowedip);
444 if (ret < 0)
445 goto out;
446 }
447 }
448
449 if (attrs[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]) {
450 const u16 persistent_keepalive_interval = nla_get_u16(
451 attrs[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]);
452 const bool send_keepalive =
453 !peer->persistent_keepalive_interval &&
454 persistent_keepalive_interval &&
455 netif_running(wg->dev);
456
457 peer->persistent_keepalive_interval = persistent_keepalive_interval;
458 if (send_keepalive)
459 wg_packet_send_keepalive(peer);
460 }
461
462 if (netif_running(wg->dev))
463 wg_packet_send_staged_packets(peer);
464
465out:
466 wg_peer_put(peer);
467 if (attrs[WGPEER_A_PRESHARED_KEY])
468 memzero_explicit(nla_data(attrs[WGPEER_A_PRESHARED_KEY]),
469 nla_len(attrs[WGPEER_A_PRESHARED_KEY]));
470 return ret;
471}
472
473int wg_set_device_doit(struct sk_buff *skb, struct genl_info *info)
474{
475 struct wg_device *wg = lookup_interface(info->attrs, skb);
476 u32 flags = 0;
477 int ret;
478
479 if (IS_ERR(wg)) {
480 ret = PTR_ERR(wg);
481 goto out_nodev;
482 }
483
484 rtnl_lock();
485 mutex_lock(&wg->device_update_lock);
486
487 if (info->attrs[WGDEVICE_A_FLAGS])
488 flags = nla_get_u32(info->attrs[WGDEVICE_A_FLAGS]);
489
490 if (info->attrs[WGDEVICE_A_LISTEN_PORT] || info->attrs[WGDEVICE_A_FWMARK]) {
491 struct net *net;
492 rcu_read_lock();
493 net = rcu_dereference(wg->creating_net);
494 ret = !net || !ns_capable(net->user_ns, CAP_NET_ADMIN) ? -EPERM : 0;
495 rcu_read_unlock();
496 if (ret)
497 goto out;
498 }
499
500 ++wg->device_update_gen;
501
502 if (info->attrs[WGDEVICE_A_FWMARK]) {
503 struct wg_peer *peer;
504
505 wg->fwmark = nla_get_u32(info->attrs[WGDEVICE_A_FWMARK]);
506 list_for_each_entry(peer, &wg->peer_list, peer_list)
507 wg_socket_clear_peer_endpoint_src(peer);
508 }
509
510 if (info->attrs[WGDEVICE_A_LISTEN_PORT]) {
511 ret = set_port(wg,
512 nla_get_u16(info->attrs[WGDEVICE_A_LISTEN_PORT]));
513 if (ret)
514 goto out;
515 }
516
517 if (flags & WGDEVICE_F_REPLACE_PEERS)
518 wg_peer_remove_all(wg);
519
520 if (info->attrs[WGDEVICE_A_PRIVATE_KEY] &&
521 nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY]) ==
522 NOISE_PUBLIC_KEY_LEN) {
523 u8 *private_key = nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]);
524 u8 public_key[NOISE_PUBLIC_KEY_LEN];
525 struct wg_peer *peer, *temp;
526 bool send_staged_packets;
527
528 if (!crypto_memneq(wg->static_identity.static_private,
529 private_key, NOISE_PUBLIC_KEY_LEN))
530 goto skip_set_private_key;
531
532 /* We remove before setting, to prevent race, which means doing
533 * two 25519-genpub ops.
534 */
535 if (curve25519_generate_public(public_key, private_key)) {
536 peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable,
537 public_key);
538 if (peer) {
539 wg_peer_put(peer);
540 wg_peer_remove(peer);
541 }
542 }
543
544 down_write(&wg->static_identity.lock);
545 send_staged_packets = !wg->static_identity.has_identity && netif_running(wg->dev);
546 wg_noise_set_static_identity_private_key(&wg->static_identity, private_key);
547 send_staged_packets = send_staged_packets && wg->static_identity.has_identity;
548
549 wg_cookie_checker_precompute_device_keys(&wg->cookie_checker);
550 list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) {
551 wg_noise_precompute_static_static(peer);
552 wg_noise_expire_current_peer_keypairs(peer);
553 if (send_staged_packets)
554 wg_packet_send_staged_packets(peer);
555 }
556 up_write(&wg->static_identity.lock);
557 }
558skip_set_private_key:
559
560 if (info->attrs[WGDEVICE_A_PEERS]) {
561 struct nlattr *attr, *peer[WGPEER_A_MAX + 1];
562 int rem;
563
564 nla_for_each_nested(attr, info->attrs[WGDEVICE_A_PEERS], rem) {
565 ret = nla_parse_nested(peer, WGPEER_A_MAX, attr,
566 NULL, NULL);
567 if (ret < 0)
568 goto out;
569 ret = set_peer(wg, peer);
570 if (ret < 0)
571 goto out;
572 }
573 }
574 ret = 0;
575
576out:
577 mutex_unlock(&wg->device_update_lock);
578 rtnl_unlock();
579 dev_put(wg->dev);
580out_nodev:
581 if (info->attrs[WGDEVICE_A_PRIVATE_KEY])
582 memzero_explicit(nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]),
583 nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY]));
584 return ret;
585}
586
587static struct genl_family genl_family __ro_after_init = {
588 .split_ops = wireguard_nl_ops,
589 .n_split_ops = ARRAY_SIZE(wireguard_nl_ops),
590 .name = WG_GENL_NAME,
591 .version = WG_GENL_VERSION,
592 .module = THIS_MODULE,
593 .netnsok = true
594};
595
596int __init wg_genetlink_init(void)
597{
598 BUILD_BUG_ON(WG_KEY_LEN != NOISE_PUBLIC_KEY_LEN);
599 BUILD_BUG_ON(WG_KEY_LEN != NOISE_SYMMETRIC_KEY_LEN);
600
601 return genl_register_family(&genl_family);
602}
603
604void __exit wg_genetlink_uninit(void)
605{
606 genl_unregister_family(&genl_family);
607}