Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ovpn: implement packet processing

This change implements encryption/decryption and
encapsulation/decapsulation of OpenVPN packets.

Support for generic crypto state is added along with
a wrapper for the AEAD crypto kernel API.

Signed-off-by: Antonio Quartulli <antonio@openvpn.net>
Link: https://patch.msgid.link/20250415-b4-ovpn-v26-9-577f6097b964@openvpn.net
Reviewed-by: Sabrina Dubroca <sd@queasysnail.net>
Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

authored by

Antonio Quartulli and committed by
Paolo Abeni
8534731d ab66abbc

+1105 -17
+4
drivers/net/Kconfig
··· 121 121 depends on IPV6 || !IPV6 122 122 select DST_CACHE 123 123 select NET_UDP_TUNNEL 124 + select CRYPTO 125 + select CRYPTO_AES 126 + select CRYPTO_GCM 127 + select CRYPTO_CHACHA20POLY1305 124 128 help 125 129 This module enhances the performance of the OpenVPN userspace software 126 130 by offloading the data channel processing to kernelspace.
+3
drivers/net/ovpn/Makefile
··· 8 8 9 9 obj-$(CONFIG_OVPN) := ovpn.o 10 10 ovpn-y += bind.o 11 + ovpn-y += crypto.o 12 + ovpn-y += crypto_aead.o 11 13 ovpn-y += main.o 12 14 ovpn-y += io.o 13 15 ovpn-y += netlink.o 14 16 ovpn-y += netlink-gen.o 15 17 ovpn-y += peer.o 18 + ovpn-y += pktid.o 16 19 ovpn-y += socket.o 17 20 ovpn-y += udp.o
+3 -6
drivers/net/ovpn/bind.c
··· 48 48 */ 49 49 void ovpn_bind_reset(struct ovpn_peer *peer, struct ovpn_bind *new) 50 50 { 51 - struct ovpn_bind *old; 51 + lockdep_assert_held(&peer->lock); 52 52 53 - spin_lock_bh(&peer->lock); 54 - old = rcu_replace_pointer(peer->bind, new, true); 55 - spin_unlock_bh(&peer->lock); 56 - 57 - kfree_rcu(old, rcu); 53 + kfree_rcu(rcu_replace_pointer(peer->bind, new, 54 + lockdep_is_held(&peer->lock)), rcu); 58 55 }
+148
drivers/net/ovpn/crypto.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* OpenVPN data channel offload 3 + * 4 + * Copyright (C) 2020-2025 OpenVPN, Inc. 5 + * 6 + * Author: James Yonan <james@openvpn.net> 7 + * Antonio Quartulli <antonio@openvpn.net> 8 + */ 9 + 10 + #include <linux/types.h> 11 + #include <linux/net.h> 12 + #include <linux/netdevice.h> 13 + #include <uapi/linux/ovpn.h> 14 + 15 + #include "ovpnpriv.h" 16 + #include "main.h" 17 + #include "pktid.h" 18 + #include "crypto_aead.h" 19 + #include "crypto.h" 20 + 21 + static void ovpn_ks_destroy_rcu(struct rcu_head *head) 22 + { 23 + struct ovpn_crypto_key_slot *ks; 24 + 25 + ks = container_of(head, struct ovpn_crypto_key_slot, rcu); 26 + ovpn_aead_crypto_key_slot_destroy(ks); 27 + } 28 + 29 + void ovpn_crypto_key_slot_release(struct kref *kref) 30 + { 31 + struct ovpn_crypto_key_slot *ks; 32 + 33 + ks = container_of(kref, struct ovpn_crypto_key_slot, refcount); 34 + call_rcu(&ks->rcu, ovpn_ks_destroy_rcu); 35 + } 36 + 37 + /* can only be invoked when all peer references have been dropped (i.e. RCU 38 + * release routine) 39 + */ 40 + void ovpn_crypto_state_release(struct ovpn_crypto_state *cs) 41 + { 42 + struct ovpn_crypto_key_slot *ks; 43 + 44 + ks = rcu_access_pointer(cs->slots[0]); 45 + if (ks) { 46 + RCU_INIT_POINTER(cs->slots[0], NULL); 47 + ovpn_crypto_key_slot_put(ks); 48 + } 49 + 50 + ks = rcu_access_pointer(cs->slots[1]); 51 + if (ks) { 52 + RCU_INIT_POINTER(cs->slots[1], NULL); 53 + ovpn_crypto_key_slot_put(ks); 54 + } 55 + } 56 + 57 + /* Reset the ovpn_crypto_state object in a way that is atomic 58 + * to RCU readers. 59 + */ 60 + int ovpn_crypto_state_reset(struct ovpn_crypto_state *cs, 61 + const struct ovpn_peer_key_reset *pkr) 62 + { 63 + struct ovpn_crypto_key_slot *old = NULL, *new; 64 + u8 idx; 65 + 66 + if (pkr->slot != OVPN_KEY_SLOT_PRIMARY && 67 + pkr->slot != OVPN_KEY_SLOT_SECONDARY) 68 + return -EINVAL; 69 + 70 + new = ovpn_aead_crypto_key_slot_new(&pkr->key); 71 + if (IS_ERR(new)) 72 + return PTR_ERR(new); 73 + 74 + spin_lock_bh(&cs->lock); 75 + idx = cs->primary_idx; 76 + switch (pkr->slot) { 77 + case OVPN_KEY_SLOT_PRIMARY: 78 + old = rcu_replace_pointer(cs->slots[idx], new, 79 + lockdep_is_held(&cs->lock)); 80 + break; 81 + case OVPN_KEY_SLOT_SECONDARY: 82 + old = rcu_replace_pointer(cs->slots[!idx], new, 83 + lockdep_is_held(&cs->lock)); 84 + break; 85 + } 86 + spin_unlock_bh(&cs->lock); 87 + 88 + if (old) 89 + ovpn_crypto_key_slot_put(old); 90 + 91 + return 0; 92 + } 93 + 94 + void ovpn_crypto_key_slot_delete(struct ovpn_crypto_state *cs, 95 + enum ovpn_key_slot slot) 96 + { 97 + struct ovpn_crypto_key_slot *ks = NULL; 98 + u8 idx; 99 + 100 + if (slot != OVPN_KEY_SLOT_PRIMARY && 101 + slot != OVPN_KEY_SLOT_SECONDARY) { 102 + pr_warn("Invalid slot to release: %u\n", slot); 103 + return; 104 + } 105 + 106 + spin_lock_bh(&cs->lock); 107 + idx = cs->primary_idx; 108 + switch (slot) { 109 + case OVPN_KEY_SLOT_PRIMARY: 110 + ks = rcu_replace_pointer(cs->slots[idx], NULL, 111 + lockdep_is_held(&cs->lock)); 112 + break; 113 + case OVPN_KEY_SLOT_SECONDARY: 114 + ks = rcu_replace_pointer(cs->slots[!idx], NULL, 115 + lockdep_is_held(&cs->lock)); 116 + break; 117 + } 118 + spin_unlock_bh(&cs->lock); 119 + 120 + if (!ks) { 121 + pr_debug("Key slot already released: %u\n", slot); 122 + return; 123 + } 124 + 125 + pr_debug("deleting key slot %u, key_id=%u\n", slot, ks->key_id); 126 + ovpn_crypto_key_slot_put(ks); 127 + } 128 + 129 + void ovpn_crypto_key_slots_swap(struct ovpn_crypto_state *cs) 130 + { 131 + const struct ovpn_crypto_key_slot *old_primary, *old_secondary; 132 + u8 idx; 133 + 134 + spin_lock_bh(&cs->lock); 135 + idx = cs->primary_idx; 136 + old_primary = rcu_dereference_protected(cs->slots[idx], 137 + lockdep_is_held(&cs->lock)); 138 + old_secondary = rcu_dereference_protected(cs->slots[!idx], 139 + lockdep_is_held(&cs->lock)); 140 + /* perform real swap by switching the index of the primary key */ 141 + WRITE_ONCE(cs->primary_idx, !cs->primary_idx); 142 + 143 + pr_debug("key swapped: (old primary) %d <-> (new primary) %d\n", 144 + old_primary ? old_primary->key_id : -1, 145 + old_secondary ? old_secondary->key_id : -1); 146 + 147 + spin_unlock_bh(&cs->lock); 148 + }
+139
drivers/net/ovpn/crypto.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* OpenVPN data channel offload 3 + * 4 + * Copyright (C) 2020-2025 OpenVPN, Inc. 5 + * 6 + * Author: James Yonan <james@openvpn.net> 7 + * Antonio Quartulli <antonio@openvpn.net> 8 + */ 9 + 10 + #ifndef _NET_OVPN_OVPNCRYPTO_H_ 11 + #define _NET_OVPN_OVPNCRYPTO_H_ 12 + 13 + #include "pktid.h" 14 + #include "proto.h" 15 + 16 + /* info needed for both encrypt and decrypt directions */ 17 + struct ovpn_key_direction { 18 + const u8 *cipher_key; 19 + size_t cipher_key_size; 20 + const u8 *nonce_tail; /* only needed for GCM modes */ 21 + size_t nonce_tail_size; /* only needed for GCM modes */ 22 + }; 23 + 24 + /* all info for a particular symmetric key (primary or secondary) */ 25 + struct ovpn_key_config { 26 + enum ovpn_cipher_alg cipher_alg; 27 + u8 key_id; 28 + struct ovpn_key_direction encrypt; 29 + struct ovpn_key_direction decrypt; 30 + }; 31 + 32 + /* used to pass settings from netlink to the crypto engine */ 33 + struct ovpn_peer_key_reset { 34 + enum ovpn_key_slot slot; 35 + struct ovpn_key_config key; 36 + }; 37 + 38 + struct ovpn_crypto_key_slot { 39 + u8 key_id; 40 + 41 + struct crypto_aead *encrypt; 42 + struct crypto_aead *decrypt; 43 + u8 nonce_tail_xmit[OVPN_NONCE_TAIL_SIZE]; 44 + u8 nonce_tail_recv[OVPN_NONCE_TAIL_SIZE]; 45 + 46 + struct ovpn_pktid_recv pid_recv ____cacheline_aligned_in_smp; 47 + struct ovpn_pktid_xmit pid_xmit ____cacheline_aligned_in_smp; 48 + struct kref refcount; 49 + struct rcu_head rcu; 50 + }; 51 + 52 + struct ovpn_crypto_state { 53 + struct ovpn_crypto_key_slot __rcu *slots[2]; 54 + u8 primary_idx; 55 + 56 + /* protects primary and secondary slots */ 57 + spinlock_t lock; 58 + }; 59 + 60 + static inline bool ovpn_crypto_key_slot_hold(struct ovpn_crypto_key_slot *ks) 61 + { 62 + return kref_get_unless_zero(&ks->refcount); 63 + } 64 + 65 + static inline void ovpn_crypto_state_init(struct ovpn_crypto_state *cs) 66 + { 67 + RCU_INIT_POINTER(cs->slots[0], NULL); 68 + RCU_INIT_POINTER(cs->slots[1], NULL); 69 + cs->primary_idx = 0; 70 + spin_lock_init(&cs->lock); 71 + } 72 + 73 + static inline struct ovpn_crypto_key_slot * 74 + ovpn_crypto_key_id_to_slot(const struct ovpn_crypto_state *cs, u8 key_id) 75 + { 76 + struct ovpn_crypto_key_slot *ks; 77 + u8 idx; 78 + 79 + if (unlikely(!cs)) 80 + return NULL; 81 + 82 + rcu_read_lock(); 83 + idx = READ_ONCE(cs->primary_idx); 84 + ks = rcu_dereference(cs->slots[idx]); 85 + if (ks && ks->key_id == key_id) { 86 + if (unlikely(!ovpn_crypto_key_slot_hold(ks))) 87 + ks = NULL; 88 + goto out; 89 + } 90 + 91 + ks = rcu_dereference(cs->slots[!idx]); 92 + if (ks && ks->key_id == key_id) { 93 + if (unlikely(!ovpn_crypto_key_slot_hold(ks))) 94 + ks = NULL; 95 + goto out; 96 + } 97 + 98 + /* when both key slots are occupied but no matching key ID is found, ks 99 + * has to be reset to NULL to avoid carrying a stale pointer 100 + */ 101 + ks = NULL; 102 + out: 103 + rcu_read_unlock(); 104 + 105 + return ks; 106 + } 107 + 108 + static inline struct ovpn_crypto_key_slot * 109 + ovpn_crypto_key_slot_primary(const struct ovpn_crypto_state *cs) 110 + { 111 + struct ovpn_crypto_key_slot *ks; 112 + 113 + rcu_read_lock(); 114 + ks = rcu_dereference(cs->slots[cs->primary_idx]); 115 + if (unlikely(ks && !ovpn_crypto_key_slot_hold(ks))) 116 + ks = NULL; 117 + rcu_read_unlock(); 118 + 119 + return ks; 120 + } 121 + 122 + void ovpn_crypto_key_slot_release(struct kref *kref); 123 + 124 + static inline void ovpn_crypto_key_slot_put(struct ovpn_crypto_key_slot *ks) 125 + { 126 + kref_put(&ks->refcount, ovpn_crypto_key_slot_release); 127 + } 128 + 129 + int ovpn_crypto_state_reset(struct ovpn_crypto_state *cs, 130 + const struct ovpn_peer_key_reset *pkr); 131 + 132 + void ovpn_crypto_key_slot_delete(struct ovpn_crypto_state *cs, 133 + enum ovpn_key_slot slot); 134 + 135 + void ovpn_crypto_state_release(struct ovpn_crypto_state *cs); 136 + 137 + void ovpn_crypto_key_slots_swap(struct ovpn_crypto_state *cs); 138 + 139 + #endif /* _NET_OVPN_OVPNCRYPTO_H_ */
+366
drivers/net/ovpn/crypto_aead.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* OpenVPN data channel offload 3 + * 4 + * Copyright (C) 2020-2025 OpenVPN, Inc. 5 + * 6 + * Author: James Yonan <james@openvpn.net> 7 + * Antonio Quartulli <antonio@openvpn.net> 8 + */ 9 + 10 + #include <crypto/aead.h> 11 + #include <linux/skbuff.h> 12 + #include <net/ip.h> 13 + #include <net/ipv6.h> 14 + #include <net/udp.h> 15 + 16 + #include "ovpnpriv.h" 17 + #include "main.h" 18 + #include "io.h" 19 + #include "pktid.h" 20 + #include "crypto_aead.h" 21 + #include "crypto.h" 22 + #include "peer.h" 23 + #include "proto.h" 24 + #include "skb.h" 25 + 26 + #define OVPN_AUTH_TAG_SIZE 16 27 + #define OVPN_AAD_SIZE (OVPN_OPCODE_SIZE + OVPN_NONCE_WIRE_SIZE) 28 + 29 + #define ALG_NAME_AES "gcm(aes)" 30 + #define ALG_NAME_CHACHAPOLY "rfc7539(chacha20,poly1305)" 31 + 32 + static int ovpn_aead_encap_overhead(const struct ovpn_crypto_key_slot *ks) 33 + { 34 + return OVPN_OPCODE_SIZE + /* OP header size */ 35 + sizeof(u32) + /* Packet ID */ 36 + crypto_aead_authsize(ks->encrypt); /* Auth Tag */ 37 + } 38 + 39 + int ovpn_aead_encrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks, 40 + struct sk_buff *skb) 41 + { 42 + const unsigned int tag_size = crypto_aead_authsize(ks->encrypt); 43 + struct aead_request *req; 44 + struct sk_buff *trailer; 45 + struct scatterlist *sg; 46 + int nfrags, ret; 47 + u32 pktid, op; 48 + u8 *iv; 49 + 50 + ovpn_skb_cb(skb)->peer = peer; 51 + ovpn_skb_cb(skb)->ks = ks; 52 + 53 + /* Sample AEAD header format: 54 + * 48000001 00000005 7e7046bd 444a7e28 cc6387b1 64a4d6c1 380275a... 55 + * [ OP32 ] [seq # ] [ auth tag ] [ payload ... ] 56 + * [4-byte 57 + * IV head] 58 + */ 59 + 60 + /* check that there's enough headroom in the skb for packet 61 + * encapsulation 62 + */ 63 + if (unlikely(skb_cow_head(skb, OVPN_HEAD_ROOM))) 64 + return -ENOBUFS; 65 + 66 + /* get number of skb frags and ensure that packet data is writable */ 67 + nfrags = skb_cow_data(skb, 0, &trailer); 68 + if (unlikely(nfrags < 0)) 69 + return nfrags; 70 + 71 + if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2))) 72 + return -ENOSPC; 73 + 74 + /* sg may be required by async crypto */ 75 + ovpn_skb_cb(skb)->sg = kmalloc(sizeof(*ovpn_skb_cb(skb)->sg) * 76 + (nfrags + 2), GFP_ATOMIC); 77 + if (unlikely(!ovpn_skb_cb(skb)->sg)) 78 + return -ENOMEM; 79 + 80 + sg = ovpn_skb_cb(skb)->sg; 81 + 82 + /* sg table: 83 + * 0: op, wire nonce (AD, len=OVPN_OP_SIZE_V2+OVPN_NONCE_WIRE_SIZE), 84 + * 1, 2, 3, ..., n: payload, 85 + * n+1: auth_tag (len=tag_size) 86 + */ 87 + sg_init_table(sg, nfrags + 2); 88 + 89 + /* build scatterlist to encrypt packet payload */ 90 + ret = skb_to_sgvec_nomark(skb, sg + 1, 0, skb->len); 91 + if (unlikely(nfrags != ret)) 92 + return -EINVAL; 93 + 94 + /* append auth_tag onto scatterlist */ 95 + __skb_push(skb, tag_size); 96 + sg_set_buf(sg + nfrags + 1, skb->data, tag_size); 97 + 98 + /* obtain packet ID, which is used both as a first 99 + * 4 bytes of nonce and last 4 bytes of associated data. 100 + */ 101 + ret = ovpn_pktid_xmit_next(&ks->pid_xmit, &pktid); 102 + if (unlikely(ret < 0)) 103 + return ret; 104 + 105 + /* iv may be required by async crypto */ 106 + ovpn_skb_cb(skb)->iv = kmalloc(OVPN_NONCE_SIZE, GFP_ATOMIC); 107 + if (unlikely(!ovpn_skb_cb(skb)->iv)) 108 + return -ENOMEM; 109 + 110 + iv = ovpn_skb_cb(skb)->iv; 111 + 112 + /* concat 4 bytes packet id and 8 bytes nonce tail into 12 bytes 113 + * nonce 114 + */ 115 + ovpn_pktid_aead_write(pktid, ks->nonce_tail_xmit, iv); 116 + 117 + /* make space for packet id and push it to the front */ 118 + __skb_push(skb, OVPN_NONCE_WIRE_SIZE); 119 + memcpy(skb->data, iv, OVPN_NONCE_WIRE_SIZE); 120 + 121 + /* add packet op as head of additional data */ 122 + op = ovpn_opcode_compose(OVPN_DATA_V2, ks->key_id, peer->id); 123 + __skb_push(skb, OVPN_OPCODE_SIZE); 124 + BUILD_BUG_ON(sizeof(op) != OVPN_OPCODE_SIZE); 125 + *((__force __be32 *)skb->data) = htonl(op); 126 + 127 + /* AEAD Additional data */ 128 + sg_set_buf(sg, skb->data, OVPN_AAD_SIZE); 129 + 130 + req = aead_request_alloc(ks->encrypt, GFP_ATOMIC); 131 + if (unlikely(!req)) 132 + return -ENOMEM; 133 + 134 + ovpn_skb_cb(skb)->req = req; 135 + 136 + /* setup async crypto operation */ 137 + aead_request_set_tfm(req, ks->encrypt); 138 + aead_request_set_callback(req, 0, ovpn_encrypt_post, skb); 139 + aead_request_set_crypt(req, sg, sg, 140 + skb->len - ovpn_aead_encap_overhead(ks), iv); 141 + aead_request_set_ad(req, OVPN_AAD_SIZE); 142 + 143 + /* encrypt it */ 144 + return crypto_aead_encrypt(req); 145 + } 146 + 147 + int ovpn_aead_decrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks, 148 + struct sk_buff *skb) 149 + { 150 + const unsigned int tag_size = crypto_aead_authsize(ks->decrypt); 151 + int ret, payload_len, nfrags; 152 + unsigned int payload_offset; 153 + struct aead_request *req; 154 + struct sk_buff *trailer; 155 + struct scatterlist *sg; 156 + u8 *iv; 157 + 158 + payload_offset = OVPN_AAD_SIZE + tag_size; 159 + payload_len = skb->len - payload_offset; 160 + 161 + ovpn_skb_cb(skb)->payload_offset = payload_offset; 162 + ovpn_skb_cb(skb)->peer = peer; 163 + ovpn_skb_cb(skb)->ks = ks; 164 + 165 + /* sanity check on packet size, payload size must be >= 0 */ 166 + if (unlikely(payload_len < 0)) 167 + return -EINVAL; 168 + 169 + /* Prepare the skb data buffer to be accessed up until the auth tag. 170 + * This is required because this area is directly mapped into the sg 171 + * list. 172 + */ 173 + if (unlikely(!pskb_may_pull(skb, payload_offset))) 174 + return -ENODATA; 175 + 176 + /* get number of skb frags and ensure that packet data is writable */ 177 + nfrags = skb_cow_data(skb, 0, &trailer); 178 + if (unlikely(nfrags < 0)) 179 + return nfrags; 180 + 181 + if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2))) 182 + return -ENOSPC; 183 + 184 + /* sg may be required by async crypto */ 185 + ovpn_skb_cb(skb)->sg = kmalloc(sizeof(*ovpn_skb_cb(skb)->sg) * 186 + (nfrags + 2), GFP_ATOMIC); 187 + if (unlikely(!ovpn_skb_cb(skb)->sg)) 188 + return -ENOMEM; 189 + 190 + sg = ovpn_skb_cb(skb)->sg; 191 + 192 + /* sg table: 193 + * 0: op, wire nonce (AD, len=OVPN_OPCODE_SIZE+OVPN_NONCE_WIRE_SIZE), 194 + * 1, 2, 3, ..., n: payload, 195 + * n+1: auth_tag (len=tag_size) 196 + */ 197 + sg_init_table(sg, nfrags + 2); 198 + 199 + /* packet op is head of additional data */ 200 + sg_set_buf(sg, skb->data, OVPN_AAD_SIZE); 201 + 202 + /* build scatterlist to decrypt packet payload */ 203 + ret = skb_to_sgvec_nomark(skb, sg + 1, payload_offset, payload_len); 204 + if (unlikely(nfrags != ret)) 205 + return -EINVAL; 206 + 207 + /* append auth_tag onto scatterlist */ 208 + sg_set_buf(sg + nfrags + 1, skb->data + OVPN_AAD_SIZE, tag_size); 209 + 210 + /* iv may be required by async crypto */ 211 + ovpn_skb_cb(skb)->iv = kmalloc(OVPN_NONCE_SIZE, GFP_ATOMIC); 212 + if (unlikely(!ovpn_skb_cb(skb)->iv)) 213 + return -ENOMEM; 214 + 215 + iv = ovpn_skb_cb(skb)->iv; 216 + 217 + /* copy nonce into IV buffer */ 218 + memcpy(iv, skb->data + OVPN_OPCODE_SIZE, OVPN_NONCE_WIRE_SIZE); 219 + memcpy(iv + OVPN_NONCE_WIRE_SIZE, ks->nonce_tail_recv, 220 + OVPN_NONCE_TAIL_SIZE); 221 + 222 + req = aead_request_alloc(ks->decrypt, GFP_ATOMIC); 223 + if (unlikely(!req)) 224 + return -ENOMEM; 225 + 226 + ovpn_skb_cb(skb)->req = req; 227 + 228 + /* setup async crypto operation */ 229 + aead_request_set_tfm(req, ks->decrypt); 230 + aead_request_set_callback(req, 0, ovpn_decrypt_post, skb); 231 + aead_request_set_crypt(req, sg, sg, payload_len + tag_size, iv); 232 + 233 + aead_request_set_ad(req, OVPN_AAD_SIZE); 234 + 235 + /* decrypt it */ 236 + return crypto_aead_decrypt(req); 237 + } 238 + 239 + /* Initialize a struct crypto_aead object */ 240 + static struct crypto_aead *ovpn_aead_init(const char *title, 241 + const char *alg_name, 242 + const unsigned char *key, 243 + unsigned int keylen) 244 + { 245 + struct crypto_aead *aead; 246 + int ret; 247 + 248 + aead = crypto_alloc_aead(alg_name, 0, 0); 249 + if (IS_ERR(aead)) { 250 + ret = PTR_ERR(aead); 251 + pr_err("%s crypto_alloc_aead failed, err=%d\n", title, ret); 252 + aead = NULL; 253 + goto error; 254 + } 255 + 256 + ret = crypto_aead_setkey(aead, key, keylen); 257 + if (ret) { 258 + pr_err("%s crypto_aead_setkey size=%u failed, err=%d\n", title, 259 + keylen, ret); 260 + goto error; 261 + } 262 + 263 + ret = crypto_aead_setauthsize(aead, OVPN_AUTH_TAG_SIZE); 264 + if (ret) { 265 + pr_err("%s crypto_aead_setauthsize failed, err=%d\n", title, 266 + ret); 267 + goto error; 268 + } 269 + 270 + /* basic AEAD assumption */ 271 + if (crypto_aead_ivsize(aead) != OVPN_NONCE_SIZE) { 272 + pr_err("%s IV size must be %d\n", title, OVPN_NONCE_SIZE); 273 + ret = -EINVAL; 274 + goto error; 275 + } 276 + 277 + pr_debug("********* Cipher %s (%s)\n", alg_name, title); 278 + pr_debug("*** IV size=%u\n", crypto_aead_ivsize(aead)); 279 + pr_debug("*** req size=%u\n", crypto_aead_reqsize(aead)); 280 + pr_debug("*** block size=%u\n", crypto_aead_blocksize(aead)); 281 + pr_debug("*** auth size=%u\n", crypto_aead_authsize(aead)); 282 + pr_debug("*** alignmask=0x%x\n", crypto_aead_alignmask(aead)); 283 + 284 + return aead; 285 + 286 + error: 287 + crypto_free_aead(aead); 288 + return ERR_PTR(ret); 289 + } 290 + 291 + void ovpn_aead_crypto_key_slot_destroy(struct ovpn_crypto_key_slot *ks) 292 + { 293 + if (!ks) 294 + return; 295 + 296 + crypto_free_aead(ks->encrypt); 297 + crypto_free_aead(ks->decrypt); 298 + kfree(ks); 299 + } 300 + 301 + struct ovpn_crypto_key_slot * 302 + ovpn_aead_crypto_key_slot_new(const struct ovpn_key_config *kc) 303 + { 304 + struct ovpn_crypto_key_slot *ks = NULL; 305 + const char *alg_name; 306 + int ret; 307 + 308 + /* validate crypto alg */ 309 + switch (kc->cipher_alg) { 310 + case OVPN_CIPHER_ALG_AES_GCM: 311 + alg_name = ALG_NAME_AES; 312 + break; 313 + case OVPN_CIPHER_ALG_CHACHA20_POLY1305: 314 + alg_name = ALG_NAME_CHACHAPOLY; 315 + break; 316 + default: 317 + return ERR_PTR(-EOPNOTSUPP); 318 + } 319 + 320 + if (kc->encrypt.nonce_tail_size != OVPN_NONCE_TAIL_SIZE || 321 + kc->decrypt.nonce_tail_size != OVPN_NONCE_TAIL_SIZE) 322 + return ERR_PTR(-EINVAL); 323 + 324 + /* build the key slot */ 325 + ks = kmalloc(sizeof(*ks), GFP_KERNEL); 326 + if (!ks) 327 + return ERR_PTR(-ENOMEM); 328 + 329 + ks->encrypt = NULL; 330 + ks->decrypt = NULL; 331 + kref_init(&ks->refcount); 332 + ks->key_id = kc->key_id; 333 + 334 + ks->encrypt = ovpn_aead_init("encrypt", alg_name, 335 + kc->encrypt.cipher_key, 336 + kc->encrypt.cipher_key_size); 337 + if (IS_ERR(ks->encrypt)) { 338 + ret = PTR_ERR(ks->encrypt); 339 + ks->encrypt = NULL; 340 + goto destroy_ks; 341 + } 342 + 343 + ks->decrypt = ovpn_aead_init("decrypt", alg_name, 344 + kc->decrypt.cipher_key, 345 + kc->decrypt.cipher_key_size); 346 + if (IS_ERR(ks->decrypt)) { 347 + ret = PTR_ERR(ks->decrypt); 348 + ks->decrypt = NULL; 349 + goto destroy_ks; 350 + } 351 + 352 + memcpy(ks->nonce_tail_xmit, kc->encrypt.nonce_tail, 353 + OVPN_NONCE_TAIL_SIZE); 354 + memcpy(ks->nonce_tail_recv, kc->decrypt.nonce_tail, 355 + OVPN_NONCE_TAIL_SIZE); 356 + 357 + /* init packet ID generation/validation */ 358 + ovpn_pktid_xmit_init(&ks->pid_xmit); 359 + ovpn_pktid_recv_init(&ks->pid_recv); 360 + 361 + return ks; 362 + 363 + destroy_ks: 364 + ovpn_aead_crypto_key_slot_destroy(ks); 365 + return ERR_PTR(ret); 366 + }
+27
drivers/net/ovpn/crypto_aead.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* OpenVPN data channel offload 3 + * 4 + * Copyright (C) 2020-2025 OpenVPN, Inc. 5 + * 6 + * Author: James Yonan <james@openvpn.net> 7 + * Antonio Quartulli <antonio@openvpn.net> 8 + */ 9 + 10 + #ifndef _NET_OVPN_OVPNAEAD_H_ 11 + #define _NET_OVPN_OVPNAEAD_H_ 12 + 13 + #include "crypto.h" 14 + 15 + #include <asm/types.h> 16 + #include <linux/skbuff.h> 17 + 18 + int ovpn_aead_encrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks, 19 + struct sk_buff *skb); 20 + int ovpn_aead_decrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks, 21 + struct sk_buff *skb); 22 + 23 + struct ovpn_crypto_key_slot * 24 + ovpn_aead_crypto_key_slot_new(const struct ovpn_key_config *kc); 25 + void ovpn_aead_crypto_key_slot_destroy(struct ovpn_crypto_key_slot *ks); 26 + 27 + #endif /* _NET_OVPN_OVPNAEAD_H_ */
+126 -11
drivers/net/ovpn/io.c
··· 7 7 * Antonio Quartulli <antonio@openvpn.net> 8 8 */ 9 9 10 + #include <crypto/aead.h> 10 11 #include <linux/netdevice.h> 11 12 #include <linux/skbuff.h> 12 13 #include <net/gro_cells.h> ··· 16 15 #include "ovpnpriv.h" 17 16 #include "peer.h" 18 17 #include "io.h" 18 + #include "bind.h" 19 + #include "crypto.h" 20 + #include "crypto_aead.h" 19 21 #include "netlink.h" 20 22 #include "proto.h" 21 23 #include "udp.h" ··· 48 44 skb_set_queue_mapping(skb, 0); 49 45 skb_scrub_packet(skb, true); 50 46 51 - skb_reset_network_header(skb); 47 + /* network header reset in ovpn_decrypt_post() */ 52 48 skb_reset_transport_header(skb); 53 49 skb_reset_inner_headers(skb); 54 50 ··· 60 56 dev_dstats_rx_add(peer->ovpn->dev, pkt_len); 61 57 } 62 58 63 - static void ovpn_decrypt_post(struct sk_buff *skb, int ret) 59 + void ovpn_decrypt_post(void *data, int ret) 64 60 { 65 - struct ovpn_peer *peer = ovpn_skb_cb(skb)->peer; 61 + struct ovpn_crypto_key_slot *ks; 62 + unsigned int payload_offset = 0; 63 + struct sk_buff *skb = data; 64 + struct ovpn_peer *peer; 65 + __be16 proto; 66 + __be32 *pid; 67 + 68 + /* crypto is happening asynchronously. this function will be called 69 + * again later by the crypto callback with a proper return code 70 + */ 71 + if (unlikely(ret == -EINPROGRESS)) 72 + return; 73 + 74 + payload_offset = ovpn_skb_cb(skb)->payload_offset; 75 + ks = ovpn_skb_cb(skb)->ks; 76 + peer = ovpn_skb_cb(skb)->peer; 77 + 78 + /* crypto is done, cleanup skb CB and its members */ 79 + kfree(ovpn_skb_cb(skb)->iv); 80 + kfree(ovpn_skb_cb(skb)->sg); 81 + aead_request_free(ovpn_skb_cb(skb)->req); 66 82 67 83 if (unlikely(ret < 0)) 68 84 goto drop; 85 + 86 + /* PID sits after the op */ 87 + pid = (__force __be32 *)(skb->data + OVPN_OPCODE_SIZE); 88 + ret = ovpn_pktid_recv(&ks->pid_recv, ntohl(*pid), 0); 89 + if (unlikely(ret < 0)) { 90 + net_err_ratelimited("%s: PKT ID RX error for peer %u: %d\n", 91 + netdev_name(peer->ovpn->dev), peer->id, 92 + ret); 93 + goto drop; 94 + } 95 + 96 + /* point to encapsulated IP packet */ 97 + __skb_pull(skb, payload_offset); 98 + 99 + /* check if this is a valid datapacket that has to be delivered to the 100 + * ovpn interface 101 + */ 102 + skb_reset_network_header(skb); 103 + proto = ovpn_ip_check_protocol(skb); 104 + if (unlikely(!proto)) { 105 + /* check if null packet */ 106 + if (unlikely(!pskb_may_pull(skb, 1))) { 107 + net_info_ratelimited("%s: NULL packet received from peer %u\n", 108 + netdev_name(peer->ovpn->dev), 109 + peer->id); 110 + goto drop; 111 + } 112 + 113 + net_info_ratelimited("%s: unsupported protocol received from peer %u\n", 114 + netdev_name(peer->ovpn->dev), peer->id); 115 + goto drop; 116 + } 117 + skb->protocol = proto; 118 + 119 + /* perform Reverse Path Filtering (RPF) */ 120 + if (unlikely(!ovpn_peer_check_by_src(peer->ovpn, skb, peer))) { 121 + if (skb->protocol == htons(ETH_P_IPV6)) 122 + net_dbg_ratelimited("%s: RPF dropped packet from peer %u, src: %pI6c\n", 123 + netdev_name(peer->ovpn->dev), 124 + peer->id, &ipv6_hdr(skb)->saddr); 125 + else 126 + net_dbg_ratelimited("%s: RPF dropped packet from peer %u, src: %pI4\n", 127 + netdev_name(peer->ovpn->dev), 128 + peer->id, &ip_hdr(skb)->saddr); 129 + goto drop; 130 + } 69 131 70 132 ovpn_netdev_write(peer, skb); 71 133 /* skb is passed to upper layer - don't free it */ ··· 139 69 drop: 140 70 if (unlikely(skb)) 141 71 dev_dstats_rx_dropped(peer->ovpn->dev); 142 - ovpn_peer_put(peer); 72 + if (likely(peer)) 73 + ovpn_peer_put(peer); 74 + if (likely(ks)) 75 + ovpn_crypto_key_slot_put(ks); 143 76 kfree_skb(skb); 144 77 } 145 78 146 79 /* RX path entry point: decrypt packet and forward it to the device */ 147 80 void ovpn_recv(struct ovpn_peer *peer, struct sk_buff *skb) 148 81 { 149 - ovpn_skb_cb(skb)->peer = peer; 150 - ovpn_decrypt_post(skb, 0); 82 + struct ovpn_crypto_key_slot *ks; 83 + u8 key_id; 84 + 85 + /* get the key slot matching the key ID in the received packet */ 86 + key_id = ovpn_key_id_from_skb(skb); 87 + ks = ovpn_crypto_key_id_to_slot(&peer->crypto, key_id); 88 + if (unlikely(!ks)) { 89 + net_info_ratelimited("%s: no available key for peer %u, key-id: %u\n", 90 + netdev_name(peer->ovpn->dev), peer->id, 91 + key_id); 92 + dev_dstats_rx_dropped(peer->ovpn->dev); 93 + kfree_skb(skb); 94 + ovpn_peer_put(peer); 95 + return; 96 + } 97 + 98 + memset(ovpn_skb_cb(skb), 0, sizeof(struct ovpn_cb)); 99 + ovpn_decrypt_post(skb, ovpn_aead_decrypt(peer, ks, skb)); 151 100 } 152 101 153 - static void ovpn_encrypt_post(struct sk_buff *skb, int ret) 102 + void ovpn_encrypt_post(void *data, int ret) 154 103 { 155 - struct ovpn_peer *peer = ovpn_skb_cb(skb)->peer; 104 + struct ovpn_crypto_key_slot *ks; 105 + struct sk_buff *skb = data; 156 106 struct ovpn_socket *sock; 107 + struct ovpn_peer *peer; 108 + 109 + /* encryption is happening asynchronously. This function will be 110 + * called later by the crypto callback with a proper return value 111 + */ 112 + if (unlikely(ret == -EINPROGRESS)) 113 + return; 114 + 115 + ks = ovpn_skb_cb(skb)->ks; 116 + peer = ovpn_skb_cb(skb)->peer; 117 + 118 + /* crypto is done, cleanup skb CB and its members */ 119 + kfree(ovpn_skb_cb(skb)->iv); 120 + kfree(ovpn_skb_cb(skb)->sg); 121 + aead_request_free(ovpn_skb_cb(skb)->req); 157 122 158 123 if (unlikely(ret < 0)) 159 124 goto err; ··· 215 110 err: 216 111 if (unlikely(skb)) 217 112 dev_dstats_tx_dropped(peer->ovpn->dev); 218 - ovpn_peer_put(peer); 113 + if (likely(peer)) 114 + ovpn_peer_put(peer); 115 + if (likely(ks)) 116 + ovpn_crypto_key_slot_put(ks); 219 117 kfree_skb(skb); 220 118 } 221 119 222 120 static bool ovpn_encrypt_one(struct ovpn_peer *peer, struct sk_buff *skb) 223 121 { 224 - ovpn_skb_cb(skb)->peer = peer; 122 + struct ovpn_crypto_key_slot *ks; 123 + 124 + /* get primary key to be used for encrypting data */ 125 + ks = ovpn_crypto_key_slot_primary(&peer->crypto); 126 + if (unlikely(!ks)) 127 + return false; 225 128 226 129 /* take a reference to the peer because the crypto code may run async. 227 130 * ovpn_encrypt_post() will release it upon completion 228 131 */ 229 132 if (unlikely(!ovpn_peer_hold(peer))) { 230 133 DEBUG_NET_WARN_ON_ONCE(1); 134 + ovpn_crypto_key_slot_put(ks); 231 135 return false; 232 136 } 233 137 234 - ovpn_encrypt_post(skb, 0); 138 + memset(ovpn_skb_cb(skb), 0, sizeof(struct ovpn_cb)); 139 + ovpn_encrypt_post(skb, ovpn_aead_encrypt(peer, ks, skb)); 235 140 return true; 236 141 } 237 142
+3
drivers/net/ovpn/io.h
··· 23 23 24 24 void ovpn_recv(struct ovpn_peer *peer, struct sk_buff *skb); 25 25 26 + void ovpn_encrypt_post(void *data, int ret); 27 + void ovpn_decrypt_post(void *data, int ret); 28 + 26 29 #endif /* _NET_OVPN_OVPN_H_ */
+29
drivers/net/ovpn/peer.c
··· 12 12 13 13 #include "ovpnpriv.h" 14 14 #include "bind.h" 15 + #include "pktid.h" 16 + #include "crypto.h" 15 17 #include "io.h" 16 18 #include "main.h" 17 19 #include "netlink.h" ··· 58 56 peer->vpn_addrs.ipv6 = in6addr_any; 59 57 60 58 RCU_INIT_POINTER(peer->bind, NULL); 59 + ovpn_crypto_state_init(&peer->crypto); 61 60 spin_lock_init(&peer->lock); 62 61 kref_init(&peer->refcount); 63 62 ··· 97 94 */ 98 95 static void ovpn_peer_release(struct ovpn_peer *peer) 99 96 { 97 + ovpn_crypto_state_release(&peer->crypto); 98 + spin_lock_bh(&peer->lock); 100 99 ovpn_bind_reset(peer, NULL); 100 + spin_unlock_bh(&peer->lock); 101 101 call_rcu(&peer->rcu, ovpn_peer_release_rcu); 102 102 netdev_put(peer->ovpn->dev, &peer->dev_tracker); 103 103 } ··· 330 324 } 331 325 332 326 return peer; 327 + } 328 + 329 + /** 330 + * ovpn_peer_check_by_src - check that skb source is routed via peer 331 + * @ovpn: the openvpn instance to search 332 + * @skb: the packet to extract source address from 333 + * @peer: the peer to check against the source address 334 + * 335 + * Return: true if the peer is matching or false otherwise 336 + */ 337 + bool ovpn_peer_check_by_src(struct ovpn_priv *ovpn, struct sk_buff *skb, 338 + struct ovpn_peer *peer) 339 + { 340 + bool match = false; 341 + 342 + if (ovpn->mode == OVPN_MODE_P2P) { 343 + /* in P2P mode, no matter the destination, packets are always 344 + * sent to the single peer listening on the other side 345 + */ 346 + match = (peer == rcu_access_pointer(ovpn->peer)); 347 + } 348 + 349 + return match; 333 350 } 334 351 335 352 /**
+5
drivers/net/ovpn/peer.h
··· 12 12 13 13 #include <net/dst_cache.h> 14 14 15 + #include "crypto.h" 15 16 #include "socket.h" 16 17 17 18 /** ··· 24 23 * @vpn_addrs.ipv4: IPv4 assigned to peer on the tunnel 25 24 * @vpn_addrs.ipv6: IPv6 assigned to peer on the tunnel 26 25 * @sock: the socket being used to talk to this peer 26 + * @crypto: the crypto configuration (ciphers, keys, etc..) 27 27 * @dst_cache: cache for dst_entry used to send to peer 28 28 * @bind: remote peer binding 29 29 * @delete_reason: why peer was deleted (i.e. timeout, transport error, ..) ··· 42 40 struct in6_addr ipv6; 43 41 } vpn_addrs; 44 42 struct ovpn_socket __rcu *sock; 43 + struct ovpn_crypto_state crypto; 45 44 struct dst_cache dst_cache; 46 45 struct ovpn_bind __rcu *bind; 47 46 enum ovpn_del_peer_reason delete_reason; ··· 85 82 struct ovpn_peer *ovpn_peer_get_by_id(struct ovpn_priv *ovpn, u32 peer_id); 86 83 struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_priv *ovpn, 87 84 struct sk_buff *skb); 85 + bool ovpn_peer_check_by_src(struct ovpn_priv *ovpn, struct sk_buff *skb, 86 + struct ovpn_peer *peer); 88 87 89 88 #endif /* _NET_OVPN_OVPNPEER_H_ */
+129
drivers/net/ovpn/pktid.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* OpenVPN data channel offload 3 + * 4 + * Copyright (C) 2020-2025 OpenVPN, Inc. 5 + * 6 + * Author: Antonio Quartulli <antonio@openvpn.net> 7 + * James Yonan <james@openvpn.net> 8 + */ 9 + 10 + #include <linux/atomic.h> 11 + #include <linux/jiffies.h> 12 + #include <linux/net.h> 13 + #include <linux/netdevice.h> 14 + #include <linux/types.h> 15 + 16 + #include "ovpnpriv.h" 17 + #include "main.h" 18 + #include "pktid.h" 19 + 20 + void ovpn_pktid_xmit_init(struct ovpn_pktid_xmit *pid) 21 + { 22 + atomic_set(&pid->seq_num, 1); 23 + } 24 + 25 + void ovpn_pktid_recv_init(struct ovpn_pktid_recv *pr) 26 + { 27 + memset(pr, 0, sizeof(*pr)); 28 + spin_lock_init(&pr->lock); 29 + } 30 + 31 + /* Packet replay detection. 32 + * Allows ID backtrack of up to REPLAY_WINDOW_SIZE - 1. 33 + */ 34 + int ovpn_pktid_recv(struct ovpn_pktid_recv *pr, u32 pkt_id, u32 pkt_time) 35 + { 36 + const unsigned long now = jiffies; 37 + int ret; 38 + 39 + /* ID must not be zero */ 40 + if (unlikely(pkt_id == 0)) 41 + return -EINVAL; 42 + 43 + spin_lock_bh(&pr->lock); 44 + 45 + /* expire backtracks at or below pr->id after PKTID_RECV_EXPIRE time */ 46 + if (unlikely(time_after_eq(now, pr->expire))) 47 + pr->id_floor = pr->id; 48 + 49 + /* time changed? */ 50 + if (unlikely(pkt_time != pr->time)) { 51 + if (pkt_time > pr->time) { 52 + /* time moved forward, accept */ 53 + pr->base = 0; 54 + pr->extent = 0; 55 + pr->id = 0; 56 + pr->time = pkt_time; 57 + pr->id_floor = 0; 58 + } else { 59 + /* time moved backward, reject */ 60 + ret = -ETIME; 61 + goto out; 62 + } 63 + } 64 + 65 + if (likely(pkt_id == pr->id + 1)) { 66 + /* well-formed ID sequence (incremented by 1) */ 67 + pr->base = REPLAY_INDEX(pr->base, -1); 68 + pr->history[pr->base / 8] |= (1 << (pr->base % 8)); 69 + if (pr->extent < REPLAY_WINDOW_SIZE) 70 + ++pr->extent; 71 + pr->id = pkt_id; 72 + } else if (pkt_id > pr->id) { 73 + /* ID jumped forward by more than one */ 74 + const unsigned int delta = pkt_id - pr->id; 75 + 76 + if (delta < REPLAY_WINDOW_SIZE) { 77 + unsigned int i; 78 + 79 + pr->base = REPLAY_INDEX(pr->base, -delta); 80 + pr->history[pr->base / 8] |= (1 << (pr->base % 8)); 81 + pr->extent += delta; 82 + if (pr->extent > REPLAY_WINDOW_SIZE) 83 + pr->extent = REPLAY_WINDOW_SIZE; 84 + for (i = 1; i < delta; ++i) { 85 + unsigned int newb = REPLAY_INDEX(pr->base, i); 86 + 87 + pr->history[newb / 8] &= ~BIT(newb % 8); 88 + } 89 + } else { 90 + pr->base = 0; 91 + pr->extent = REPLAY_WINDOW_SIZE; 92 + memset(pr->history, 0, sizeof(pr->history)); 93 + pr->history[0] = 1; 94 + } 95 + pr->id = pkt_id; 96 + } else { 97 + /* ID backtrack */ 98 + const unsigned int delta = pr->id - pkt_id; 99 + 100 + if (delta > pr->max_backtrack) 101 + pr->max_backtrack = delta; 102 + if (delta < pr->extent) { 103 + if (pkt_id > pr->id_floor) { 104 + const unsigned int ri = REPLAY_INDEX(pr->base, 105 + delta); 106 + u8 *p = &pr->history[ri / 8]; 107 + const u8 mask = (1 << (ri % 8)); 108 + 109 + if (*p & mask) { 110 + ret = -EINVAL; 111 + goto out; 112 + } 113 + *p |= mask; 114 + } else { 115 + ret = -EINVAL; 116 + goto out; 117 + } 118 + } else { 119 + ret = -EINVAL; 120 + goto out; 121 + } 122 + } 123 + 124 + pr->expire = now + PKTID_RECV_EXPIRE; 125 + ret = 0; 126 + out: 127 + spin_unlock_bh(&pr->lock); 128 + return ret; 129 + }
+86
drivers/net/ovpn/pktid.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* OpenVPN data channel offload 3 + * 4 + * Copyright (C) 2020-2025 OpenVPN, Inc. 5 + * 6 + * Author: Antonio Quartulli <antonio@openvpn.net> 7 + * James Yonan <james@openvpn.net> 8 + */ 9 + 10 + #ifndef _NET_OVPN_OVPNPKTID_H_ 11 + #define _NET_OVPN_OVPNPKTID_H_ 12 + 13 + #include "proto.h" 14 + 15 + /* If no packets received for this length of time, set a backtrack floor 16 + * at highest received packet ID thus far. 17 + */ 18 + #define PKTID_RECV_EXPIRE (30 * HZ) 19 + 20 + /* Packet-ID state for transmitter */ 21 + struct ovpn_pktid_xmit { 22 + atomic_t seq_num; 23 + }; 24 + 25 + /* replay window sizing in bytes = 2^REPLAY_WINDOW_ORDER */ 26 + #define REPLAY_WINDOW_ORDER 8 27 + 28 + #define REPLAY_WINDOW_BYTES BIT(REPLAY_WINDOW_ORDER) 29 + #define REPLAY_WINDOW_SIZE (REPLAY_WINDOW_BYTES * 8) 30 + #define REPLAY_INDEX(base, i) (((base) + (i)) & (REPLAY_WINDOW_SIZE - 1)) 31 + 32 + /* Packet-ID state for receiver. 33 + * Other than lock member, can be zeroed to initialize. 34 + */ 35 + struct ovpn_pktid_recv { 36 + /* "sliding window" bitmask of recent packet IDs received */ 37 + u8 history[REPLAY_WINDOW_BYTES]; 38 + /* bit position of deque base in history */ 39 + unsigned int base; 40 + /* extent (in bits) of deque in history */ 41 + unsigned int extent; 42 + /* expiration of history in jiffies */ 43 + unsigned long expire; 44 + /* highest sequence number received */ 45 + u32 id; 46 + /* highest time stamp received */ 47 + u32 time; 48 + /* we will only accept backtrack IDs > id_floor */ 49 + u32 id_floor; 50 + unsigned int max_backtrack; 51 + /* protects entire pktd ID state */ 52 + spinlock_t lock; 53 + }; 54 + 55 + /* Get the next packet ID for xmit */ 56 + static inline int ovpn_pktid_xmit_next(struct ovpn_pktid_xmit *pid, u32 *pktid) 57 + { 58 + const u32 seq_num = atomic_fetch_add_unless(&pid->seq_num, 1, 0); 59 + /* when the 32bit space is over, we return an error because the packet 60 + * ID is used to create the cipher IV and we do not want to reuse the 61 + * same value more than once 62 + */ 63 + if (unlikely(!seq_num)) 64 + return -ERANGE; 65 + 66 + *pktid = seq_num; 67 + 68 + return 0; 69 + } 70 + 71 + /* Write 12-byte AEAD IV to dest */ 72 + static inline void ovpn_pktid_aead_write(const u32 pktid, 73 + const u8 nt[], 74 + unsigned char *dest) 75 + { 76 + *(__force __be32 *)(dest) = htonl(pktid); 77 + BUILD_BUG_ON(4 + OVPN_NONCE_TAIL_SIZE != OVPN_NONCE_SIZE); 78 + memcpy(dest + 4, nt, OVPN_NONCE_TAIL_SIZE); 79 + } 80 + 81 + void ovpn_pktid_xmit_init(struct ovpn_pktid_xmit *pid); 82 + void ovpn_pktid_recv_init(struct ovpn_pktid_recv *pr); 83 + 84 + int ovpn_pktid_recv(struct ovpn_pktid_recv *pr, u32 pkt_id, u32 pkt_time); 85 + 86 + #endif /* _NET_OVPN_OVPNPKTID_H_ */
+32
drivers/net/ovpn/proto.h
··· 83 83 return FIELD_GET(OVPN_OPCODE_PEERID_MASK, opcode); 84 84 } 85 85 86 + /** 87 + * ovpn_key_id_from_skb - extract key ID from the skb head 88 + * @skb: the packet to extract the key ID code from 89 + * 90 + * Note: this function assumes that the skb head was pulled enough 91 + * to access the first 4 bytes. 92 + * 93 + * Return: the key ID 94 + */ 95 + static inline u8 ovpn_key_id_from_skb(const struct sk_buff *skb) 96 + { 97 + u32 opcode = be32_to_cpu(*(__be32 *)skb->data); 98 + 99 + return FIELD_GET(OVPN_OPCODE_KEYID_MASK, opcode); 100 + } 101 + 102 + /** 103 + * ovpn_opcode_compose - combine OP code, key ID and peer ID to wire format 104 + * @opcode: the OP code 105 + * @key_id: the key ID 106 + * @peer_id: the peer ID 107 + * 108 + * Return: a 4 bytes integer obtained combining all input values following the 109 + * OpenVPN wire format. This integer can then be written to the packet header. 110 + */ 111 + static inline u32 ovpn_opcode_compose(u8 opcode, u8 key_id, u32 peer_id) 112 + { 113 + return FIELD_PREP(OVPN_OPCODE_PKTTYPE_MASK, opcode) | 114 + FIELD_PREP(OVPN_OPCODE_KEYID_MASK, key_id) | 115 + FIELD_PREP(OVPN_OPCODE_PEERID_MASK, peer_id); 116 + } 117 + 86 118 #endif /* _NET_OVPN_OVPNPROTO_H_ */
+5
drivers/net/ovpn/skb.h
··· 20 20 21 21 struct ovpn_cb { 22 22 struct ovpn_peer *peer; 23 + struct ovpn_crypto_key_slot *ks; 24 + struct aead_request *req; 25 + struct scatterlist *sg; 26 + u8 *iv; 27 + unsigned int payload_offset; 23 28 }; 24 29 25 30 static inline struct ovpn_cb *ovpn_skb_cb(struct sk_buff *skb)