Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4 */
5
6#ifndef _WG_QUEUEING_H
7#define _WG_QUEUEING_H
8
9#include "peer.h"
10#include <linux/types.h>
11#include <linux/skbuff.h>
12#include <linux/ip.h>
13#include <linux/ipv6.h>
14#include <net/ip_tunnels.h>
15
16struct wg_device;
17struct wg_peer;
18struct multicore_worker;
19struct crypt_queue;
20struct prev_queue;
21struct sk_buff;
22
23/* queueing.c APIs: */
24int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
25 unsigned int len);
26void wg_packet_queue_free(struct crypt_queue *queue, bool purge);
27struct multicore_worker __percpu *
28wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
29
30/* receive.c APIs: */
31void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb);
32void wg_packet_handshake_receive_worker(struct work_struct *work);
33/* NAPI poll function: */
34int wg_packet_rx_poll(struct napi_struct *napi, int budget);
35/* Workqueue worker: */
36void wg_packet_decrypt_worker(struct work_struct *work);
37
38/* send.c APIs: */
39void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
40 bool is_retry);
41void wg_packet_send_handshake_response(struct wg_peer *peer);
42void wg_packet_send_handshake_cookie(struct wg_device *wg,
43 struct sk_buff *initiating_skb,
44 __le32 sender_index);
45void wg_packet_send_keepalive(struct wg_peer *peer);
46void wg_packet_purge_staged_packets(struct wg_peer *peer);
47void wg_packet_send_staged_packets(struct wg_peer *peer);
48/* Workqueue workers: */
49void wg_packet_handshake_send_worker(struct work_struct *work);
50void wg_packet_tx_worker(struct work_struct *work);
51void wg_packet_encrypt_worker(struct work_struct *work);
52
53enum packet_state {
54 PACKET_STATE_UNCRYPTED,
55 PACKET_STATE_CRYPTED,
56 PACKET_STATE_DEAD
57};
58
59struct packet_cb {
60 u64 nonce;
61 struct noise_keypair *keypair;
62 atomic_t state;
63 u32 mtu;
64 u8 ds;
65};
66
67#define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
68#define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
69
70static inline bool wg_check_packet_protocol(struct sk_buff *skb)
71{
72 __be16 real_protocol = ip_tunnel_parse_protocol(skb);
73 return real_protocol && skb->protocol == real_protocol;
74}
75
76static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
77{
78 u8 l4_hash = skb->l4_hash;
79 u8 sw_hash = skb->sw_hash;
80 u32 hash = skb->hash;
81 skb_scrub_packet(skb, true);
82 memset(&skb->headers, 0, sizeof(skb->headers));
83 if (encapsulating) {
84 skb->l4_hash = l4_hash;
85 skb->sw_hash = sw_hash;
86 skb->hash = hash;
87 }
88 skb->queue_mapping = 0;
89 skb->nohdr = 0;
90 skb->peeked = 0;
91 skb->mac_len = 0;
92 skb->dev = NULL;
93#ifdef CONFIG_NET_SCHED
94 skb->tc_index = 0;
95#endif
96 skb_reset_redirect(skb);
97 skb->hdr_len = skb_headroom(skb);
98 skb_reset_mac_header(skb);
99 skb_reset_network_header(skb);
100 skb_reset_transport_header(skb);
101 skb_probe_transport_header(skb);
102 skb_reset_inner_headers(skb);
103}
104
105static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
106{
107 unsigned int cpu = *stored_cpu;
108
109 while (unlikely(cpu >= nr_cpu_ids || !cpu_online(cpu)))
110 cpu = *stored_cpu = cpumask_nth(id % num_online_cpus(), cpu_online_mask);
111
112 return cpu;
113}
114
115/* This function is racy, in the sense that it's called while last_cpu is
116 * unlocked, so it could return the same CPU twice. Adding locking or using
117 * atomic sequence numbers is slower though, and the consequences of racing are
118 * harmless, so live with it.
119 */
120static inline int wg_cpumask_next_online(int *last_cpu)
121{
122 int cpu = cpumask_next(READ_ONCE(*last_cpu), cpu_online_mask);
123 if (cpu >= nr_cpu_ids)
124 cpu = cpumask_first(cpu_online_mask);
125 WRITE_ONCE(*last_cpu, cpu);
126 return cpu;
127}
128
129void wg_prev_queue_init(struct prev_queue *queue);
130
131/* Multi producer */
132bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb);
133
134/* Single consumer */
135struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue);
136
137/* Single consumer */
138static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue)
139{
140 if (queue->peeked)
141 return queue->peeked;
142 queue->peeked = wg_prev_queue_dequeue(queue);
143 return queue->peeked;
144}
145
146/* Single consumer */
147static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue)
148{
149 queue->peeked = NULL;
150}
151
152static inline int wg_queue_enqueue_per_device_and_peer(
153 struct crypt_queue *device_queue, struct prev_queue *peer_queue,
154 struct sk_buff *skb, struct workqueue_struct *wq)
155{
156 int cpu;
157
158 atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED);
159 /* We first queue this up for the peer ingestion, but the consumer
160 * will wait for the state to change to CRYPTED or DEAD before.
161 */
162 if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb)))
163 return -ENOSPC;
164
165 /* Then we queue it up in the device queue, which consumes the
166 * packet as soon as it can.
167 */
168 cpu = wg_cpumask_next_online(&device_queue->last_cpu);
169 if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
170 return -EPIPE;
171 queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
172 return 0;
173}
174
175static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state)
176{
177 /* We take a reference, because as soon as we call atomic_set, the
178 * peer can be freed from below us.
179 */
180 struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
181
182 atomic_set_release(&PACKET_CB(skb)->state, state);
183 queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id),
184 peer->device->packet_crypt_wq, &peer->transmit_packet_work);
185 wg_peer_put(peer);
186}
187
188static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state)
189{
190 /* We take a reference, because as soon as we call atomic_set, the
191 * peer can be freed from below us.
192 */
193 struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
194
195 atomic_set_release(&PACKET_CB(skb)->state, state);
196 napi_schedule(&peer->napi);
197 wg_peer_put(peer);
198}
199
200#ifdef DEBUG
201bool wg_packet_counter_selftest(void);
202#endif
203
204#endif /* _WG_QUEUEING_H */