Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4 */
5
6#ifndef _WG_QUEUEING_H
7#define _WG_QUEUEING_H
8
9#include "peer.h"
10#include <linux/types.h>
11#include <linux/skbuff.h>
12#include <linux/ip.h>
13#include <linux/ipv6.h>
14
15struct wg_device;
16struct wg_peer;
17struct multicore_worker;
18struct crypt_queue;
19struct sk_buff;
20
21/* queueing.c APIs: */
22int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
23 bool multicore, unsigned int len);
24void wg_packet_queue_free(struct crypt_queue *queue, bool multicore);
25struct multicore_worker __percpu *
26wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
27
28/* receive.c APIs: */
29void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb);
30void wg_packet_handshake_receive_worker(struct work_struct *work);
31/* NAPI poll function: */
32int wg_packet_rx_poll(struct napi_struct *napi, int budget);
33/* Workqueue worker: */
34void wg_packet_decrypt_worker(struct work_struct *work);
35
36/* send.c APIs: */
37void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
38 bool is_retry);
39void wg_packet_send_handshake_response(struct wg_peer *peer);
40void wg_packet_send_handshake_cookie(struct wg_device *wg,
41 struct sk_buff *initiating_skb,
42 __le32 sender_index);
43void wg_packet_send_keepalive(struct wg_peer *peer);
44void wg_packet_purge_staged_packets(struct wg_peer *peer);
45void wg_packet_send_staged_packets(struct wg_peer *peer);
46/* Workqueue workers: */
47void wg_packet_handshake_send_worker(struct work_struct *work);
48void wg_packet_tx_worker(struct work_struct *work);
49void wg_packet_encrypt_worker(struct work_struct *work);
50
51enum packet_state {
52 PACKET_STATE_UNCRYPTED,
53 PACKET_STATE_CRYPTED,
54 PACKET_STATE_DEAD
55};
56
57struct packet_cb {
58 u64 nonce;
59 struct noise_keypair *keypair;
60 atomic_t state;
61 u32 mtu;
62 u8 ds;
63};
64
65#define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
66#define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
67
68/* Returns either the correct skb->protocol value, or 0 if invalid. */
69static inline __be16 wg_examine_packet_protocol(struct sk_buff *skb)
70{
71 if (skb_network_header(skb) >= skb->head &&
72 (skb_network_header(skb) + sizeof(struct iphdr)) <=
73 skb_tail_pointer(skb) &&
74 ip_hdr(skb)->version == 4)
75 return htons(ETH_P_IP);
76 if (skb_network_header(skb) >= skb->head &&
77 (skb_network_header(skb) + sizeof(struct ipv6hdr)) <=
78 skb_tail_pointer(skb) &&
79 ipv6_hdr(skb)->version == 6)
80 return htons(ETH_P_IPV6);
81 return 0;
82}
83
84static inline bool wg_check_packet_protocol(struct sk_buff *skb)
85{
86 __be16 real_protocol = wg_examine_packet_protocol(skb);
87 return real_protocol && skb->protocol == real_protocol;
88}
89
90static inline void wg_reset_packet(struct sk_buff *skb)
91{
92 skb_scrub_packet(skb, true);
93 memset(&skb->headers_start, 0,
94 offsetof(struct sk_buff, headers_end) -
95 offsetof(struct sk_buff, headers_start));
96 skb->queue_mapping = 0;
97 skb->nohdr = 0;
98 skb->peeked = 0;
99 skb->mac_len = 0;
100 skb->dev = NULL;
101#ifdef CONFIG_NET_SCHED
102 skb->tc_index = 0;
103#endif
104 skb_reset_redirect(skb);
105 skb->hdr_len = skb_headroom(skb);
106 skb_reset_mac_header(skb);
107 skb_reset_network_header(skb);
108 skb_reset_transport_header(skb);
109 skb_probe_transport_header(skb);
110 skb_reset_inner_headers(skb);
111}
112
113static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
114{
115 unsigned int cpu = *stored_cpu, cpu_index, i;
116
117 if (unlikely(cpu == nr_cpumask_bits ||
118 !cpumask_test_cpu(cpu, cpu_online_mask))) {
119 cpu_index = id % cpumask_weight(cpu_online_mask);
120 cpu = cpumask_first(cpu_online_mask);
121 for (i = 0; i < cpu_index; ++i)
122 cpu = cpumask_next(cpu, cpu_online_mask);
123 *stored_cpu = cpu;
124 }
125 return cpu;
126}
127
128/* This function is racy, in the sense that next is unlocked, so it could return
129 * the same CPU twice. A race-free version of this would be to instead store an
130 * atomic sequence number, do an increment-and-return, and then iterate through
131 * every possible CPU until we get to that index -- choose_cpu. However that's
132 * a bit slower, and it doesn't seem like this potential race actually
133 * introduces any performance loss, so we live with it.
134 */
135static inline int wg_cpumask_next_online(int *next)
136{
137 int cpu = *next;
138
139 while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask)))
140 cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
141 *next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
142 return cpu;
143}
144
145static inline int wg_queue_enqueue_per_device_and_peer(
146 struct crypt_queue *device_queue, struct crypt_queue *peer_queue,
147 struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
148{
149 int cpu;
150
151 atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED);
152 /* We first queue this up for the peer ingestion, but the consumer
153 * will wait for the state to change to CRYPTED or DEAD before.
154 */
155 if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb)))
156 return -ENOSPC;
157 /* Then we queue it up in the device queue, which consumes the
158 * packet as soon as it can.
159 */
160 cpu = wg_cpumask_next_online(next_cpu);
161 if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
162 return -EPIPE;
163 queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
164 return 0;
165}
166
167static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
168 struct sk_buff *skb,
169 enum packet_state state)
170{
171 /* We take a reference, because as soon as we call atomic_set, the
172 * peer can be freed from below us.
173 */
174 struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
175
176 atomic_set_release(&PACKET_CB(skb)->state, state);
177 queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu,
178 peer->internal_id),
179 peer->device->packet_crypt_wq, &queue->work);
180 wg_peer_put(peer);
181}
182
183static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb,
184 enum packet_state state)
185{
186 /* We take a reference, because as soon as we call atomic_set, the
187 * peer can be freed from below us.
188 */
189 struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
190
191 atomic_set_release(&PACKET_CB(skb)->state, state);
192 napi_schedule(&peer->napi);
193 wg_peer_put(peer);
194}
195
196#ifdef DEBUG
197bool wg_packet_counter_selftest(void);
198#endif
199
200#endif /* _WG_QUEUEING_H */