Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* Copyright (c) 2018, Mellanox Technologies All rights reserved.
2 *
3 * This software is available to you under a choice of one of two
4 * licenses. You may choose to be licensed under the terms of the GNU
5 * General Public License (GPL) Version 2, available from the file
6 * COPYING in the main directory of this source tree, or the
7 * OpenIB.org BSD license below:
8 *
9 * Redistribution and use in source and binary forms, with or
10 * without modification, are permitted provided that the following
11 * conditions are met:
12 *
13 * - Redistributions of source code must retain the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer.
16 *
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31
32#include <net/tls.h>
33#include <crypto/aead.h>
34#include <crypto/scatterwalk.h>
35#include <net/ip6_checksum.h>
36
37#include "tls.h"
38
39static void chain_to_walk(struct scatterlist *sg, struct scatter_walk *walk)
40{
41 struct scatterlist *src = walk->sg;
42 int diff = walk->offset - src->offset;
43
44 sg_set_page(sg, sg_page(src),
45 src->length - diff, walk->offset);
46
47 scatterwalk_crypto_chain(sg, sg_next(src), 2);
48}
49
50static int tls_enc_record(struct aead_request *aead_req,
51 struct crypto_aead *aead, char *aad,
52 char *iv, __be64 rcd_sn,
53 struct scatter_walk *in,
54 struct scatter_walk *out, int *in_len,
55 struct tls_prot_info *prot)
56{
57 unsigned char buf[TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE];
58 struct scatterlist sg_in[3];
59 struct scatterlist sg_out[3];
60 u16 len;
61 int rc;
62
63 len = min_t(int, *in_len, ARRAY_SIZE(buf));
64
65 scatterwalk_copychunks(buf, in, len, 0);
66 scatterwalk_copychunks(buf, out, len, 1);
67
68 *in_len -= len;
69 if (!*in_len)
70 return 0;
71
72 scatterwalk_pagedone(in, 0, 1);
73 scatterwalk_pagedone(out, 1, 1);
74
75 len = buf[4] | (buf[3] << 8);
76 len -= TLS_CIPHER_AES_GCM_128_IV_SIZE;
77
78 tls_make_aad(aad, len - TLS_CIPHER_AES_GCM_128_TAG_SIZE,
79 (char *)&rcd_sn, buf[0], prot);
80
81 memcpy(iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, buf + TLS_HEADER_SIZE,
82 TLS_CIPHER_AES_GCM_128_IV_SIZE);
83
84 sg_init_table(sg_in, ARRAY_SIZE(sg_in));
85 sg_init_table(sg_out, ARRAY_SIZE(sg_out));
86 sg_set_buf(sg_in, aad, TLS_AAD_SPACE_SIZE);
87 sg_set_buf(sg_out, aad, TLS_AAD_SPACE_SIZE);
88 chain_to_walk(sg_in + 1, in);
89 chain_to_walk(sg_out + 1, out);
90
91 *in_len -= len;
92 if (*in_len < 0) {
93 *in_len += TLS_CIPHER_AES_GCM_128_TAG_SIZE;
94 /* the input buffer doesn't contain the entire record.
95 * trim len accordingly. The resulting authentication tag
96 * will contain garbage, but we don't care, so we won't
97 * include any of it in the output skb
98 * Note that we assume the output buffer length
99 * is larger then input buffer length + tag size
100 */
101 if (*in_len < 0)
102 len += *in_len;
103
104 *in_len = 0;
105 }
106
107 if (*in_len) {
108 scatterwalk_copychunks(NULL, in, len, 2);
109 scatterwalk_pagedone(in, 0, 1);
110 scatterwalk_copychunks(NULL, out, len, 2);
111 scatterwalk_pagedone(out, 1, 1);
112 }
113
114 len -= TLS_CIPHER_AES_GCM_128_TAG_SIZE;
115 aead_request_set_crypt(aead_req, sg_in, sg_out, len, iv);
116
117 rc = crypto_aead_encrypt(aead_req);
118
119 return rc;
120}
121
122static void tls_init_aead_request(struct aead_request *aead_req,
123 struct crypto_aead *aead)
124{
125 aead_request_set_tfm(aead_req, aead);
126 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
127}
128
129static struct aead_request *tls_alloc_aead_request(struct crypto_aead *aead,
130 gfp_t flags)
131{
132 unsigned int req_size = sizeof(struct aead_request) +
133 crypto_aead_reqsize(aead);
134 struct aead_request *aead_req;
135
136 aead_req = kzalloc(req_size, flags);
137 if (aead_req)
138 tls_init_aead_request(aead_req, aead);
139 return aead_req;
140}
141
142static int tls_enc_records(struct aead_request *aead_req,
143 struct crypto_aead *aead, struct scatterlist *sg_in,
144 struct scatterlist *sg_out, char *aad, char *iv,
145 u64 rcd_sn, int len, struct tls_prot_info *prot)
146{
147 struct scatter_walk out, in;
148 int rc;
149
150 scatterwalk_start(&in, sg_in);
151 scatterwalk_start(&out, sg_out);
152
153 do {
154 rc = tls_enc_record(aead_req, aead, aad, iv,
155 cpu_to_be64(rcd_sn), &in, &out, &len, prot);
156 rcd_sn++;
157
158 } while (rc == 0 && len);
159
160 scatterwalk_done(&in, 0, 0);
161 scatterwalk_done(&out, 1, 0);
162
163 return rc;
164}
165
166/* Can't use icsk->icsk_af_ops->send_check here because the ip addresses
167 * might have been changed by NAT.
168 */
169static void update_chksum(struct sk_buff *skb, int headln)
170{
171 struct tcphdr *th = tcp_hdr(skb);
172 int datalen = skb->len - headln;
173 const struct ipv6hdr *ipv6h;
174 const struct iphdr *iph;
175
176 /* We only changed the payload so if we are using partial we don't
177 * need to update anything.
178 */
179 if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
180 return;
181
182 skb->ip_summed = CHECKSUM_PARTIAL;
183 skb->csum_start = skb_transport_header(skb) - skb->head;
184 skb->csum_offset = offsetof(struct tcphdr, check);
185
186 if (skb->sk->sk_family == AF_INET6) {
187 ipv6h = ipv6_hdr(skb);
188 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
189 datalen, IPPROTO_TCP, 0);
190 } else {
191 iph = ip_hdr(skb);
192 th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, datalen,
193 IPPROTO_TCP, 0);
194 }
195}
196
197static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
198{
199 struct sock *sk = skb->sk;
200 int delta;
201
202 skb_copy_header(nskb, skb);
203
204 skb_put(nskb, skb->len);
205 memcpy(nskb->data, skb->data, headln);
206
207 nskb->destructor = skb->destructor;
208 nskb->sk = sk;
209 skb->destructor = NULL;
210 skb->sk = NULL;
211
212 update_chksum(nskb, headln);
213
214 /* sock_efree means skb must gone through skb_orphan_partial() */
215 if (nskb->destructor == sock_efree)
216 return;
217
218 delta = nskb->truesize - skb->truesize;
219 if (likely(delta < 0))
220 WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
221 else if (delta)
222 refcount_add(delta, &sk->sk_wmem_alloc);
223}
224
225/* This function may be called after the user socket is already
226 * closed so make sure we don't use anything freed during
227 * tls_sk_proto_close here
228 */
229
230static int fill_sg_in(struct scatterlist *sg_in,
231 struct sk_buff *skb,
232 struct tls_offload_context_tx *ctx,
233 u64 *rcd_sn,
234 s32 *sync_size,
235 int *resync_sgs)
236{
237 int tcp_payload_offset = skb_tcp_all_headers(skb);
238 int payload_len = skb->len - tcp_payload_offset;
239 u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
240 struct tls_record_info *record;
241 unsigned long flags;
242 int remaining;
243 int i;
244
245 spin_lock_irqsave(&ctx->lock, flags);
246 record = tls_get_record(ctx, tcp_seq, rcd_sn);
247 if (!record) {
248 spin_unlock_irqrestore(&ctx->lock, flags);
249 return -EINVAL;
250 }
251
252 *sync_size = tcp_seq - tls_record_start_seq(record);
253 if (*sync_size < 0) {
254 int is_start_marker = tls_record_is_start_marker(record);
255
256 spin_unlock_irqrestore(&ctx->lock, flags);
257 /* This should only occur if the relevant record was
258 * already acked. In that case it should be ok
259 * to drop the packet and avoid retransmission.
260 *
261 * There is a corner case where the packet contains
262 * both an acked and a non-acked record.
263 * We currently don't handle that case and rely
264 * on TCP to retranmit a packet that doesn't contain
265 * already acked payload.
266 */
267 if (!is_start_marker)
268 *sync_size = 0;
269 return -EINVAL;
270 }
271
272 remaining = *sync_size;
273 for (i = 0; remaining > 0; i++) {
274 skb_frag_t *frag = &record->frags[i];
275
276 __skb_frag_ref(frag);
277 sg_set_page(sg_in + i, skb_frag_page(frag),
278 skb_frag_size(frag), skb_frag_off(frag));
279
280 remaining -= skb_frag_size(frag);
281
282 if (remaining < 0)
283 sg_in[i].length += remaining;
284 }
285 *resync_sgs = i;
286
287 spin_unlock_irqrestore(&ctx->lock, flags);
288 if (skb_to_sgvec(skb, &sg_in[i], tcp_payload_offset, payload_len) < 0)
289 return -EINVAL;
290
291 return 0;
292}
293
294static void fill_sg_out(struct scatterlist sg_out[3], void *buf,
295 struct tls_context *tls_ctx,
296 struct sk_buff *nskb,
297 int tcp_payload_offset,
298 int payload_len,
299 int sync_size,
300 void *dummy_buf)
301{
302 sg_set_buf(&sg_out[0], dummy_buf, sync_size);
303 sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len);
304 /* Add room for authentication tag produced by crypto */
305 dummy_buf += sync_size;
306 sg_set_buf(&sg_out[2], dummy_buf, TLS_CIPHER_AES_GCM_128_TAG_SIZE);
307}
308
309static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
310 struct scatterlist sg_out[3],
311 struct scatterlist *sg_in,
312 struct sk_buff *skb,
313 s32 sync_size, u64 rcd_sn)
314{
315 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
316 int tcp_payload_offset = skb_tcp_all_headers(skb);
317 int payload_len = skb->len - tcp_payload_offset;
318 void *buf, *iv, *aad, *dummy_buf;
319 struct aead_request *aead_req;
320 struct sk_buff *nskb = NULL;
321 int buf_len;
322
323 aead_req = tls_alloc_aead_request(ctx->aead_send, GFP_ATOMIC);
324 if (!aead_req)
325 return NULL;
326
327 buf_len = TLS_CIPHER_AES_GCM_128_SALT_SIZE +
328 TLS_CIPHER_AES_GCM_128_IV_SIZE +
329 TLS_AAD_SPACE_SIZE +
330 sync_size +
331 TLS_CIPHER_AES_GCM_128_TAG_SIZE;
332 buf = kmalloc(buf_len, GFP_ATOMIC);
333 if (!buf)
334 goto free_req;
335
336 iv = buf;
337 memcpy(iv, tls_ctx->crypto_send.aes_gcm_128.salt,
338 TLS_CIPHER_AES_GCM_128_SALT_SIZE);
339 aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE +
340 TLS_CIPHER_AES_GCM_128_IV_SIZE;
341 dummy_buf = aad + TLS_AAD_SPACE_SIZE;
342
343 nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC);
344 if (!nskb)
345 goto free_buf;
346
347 skb_reserve(nskb, skb_headroom(skb));
348
349 fill_sg_out(sg_out, buf, tls_ctx, nskb, tcp_payload_offset,
350 payload_len, sync_size, dummy_buf);
351
352 if (tls_enc_records(aead_req, ctx->aead_send, sg_in, sg_out, aad, iv,
353 rcd_sn, sync_size + payload_len,
354 &tls_ctx->prot_info) < 0)
355 goto free_nskb;
356
357 complete_skb(nskb, skb, tcp_payload_offset);
358
359 /* validate_xmit_skb_list assumes that if the skb wasn't segmented
360 * nskb->prev will point to the skb itself
361 */
362 nskb->prev = nskb;
363
364free_buf:
365 kfree(buf);
366free_req:
367 kfree(aead_req);
368 return nskb;
369free_nskb:
370 kfree_skb(nskb);
371 nskb = NULL;
372 goto free_buf;
373}
374
375static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
376{
377 int tcp_payload_offset = skb_tcp_all_headers(skb);
378 struct tls_context *tls_ctx = tls_get_ctx(sk);
379 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
380 int payload_len = skb->len - tcp_payload_offset;
381 struct scatterlist *sg_in, sg_out[3];
382 struct sk_buff *nskb = NULL;
383 int sg_in_max_elements;
384 int resync_sgs = 0;
385 s32 sync_size = 0;
386 u64 rcd_sn;
387
388 /* worst case is:
389 * MAX_SKB_FRAGS in tls_record_info
390 * MAX_SKB_FRAGS + 1 in SKB head and frags.
391 */
392 sg_in_max_elements = 2 * MAX_SKB_FRAGS + 1;
393
394 if (!payload_len)
395 return skb;
396
397 sg_in = kmalloc_array(sg_in_max_elements, sizeof(*sg_in), GFP_ATOMIC);
398 if (!sg_in)
399 goto free_orig;
400
401 sg_init_table(sg_in, sg_in_max_elements);
402 sg_init_table(sg_out, ARRAY_SIZE(sg_out));
403
404 if (fill_sg_in(sg_in, skb, ctx, &rcd_sn, &sync_size, &resync_sgs)) {
405 /* bypass packets before kernel TLS socket option was set */
406 if (sync_size < 0 && payload_len <= -sync_size)
407 nskb = skb_get(skb);
408 goto put_sg;
409 }
410
411 nskb = tls_enc_skb(tls_ctx, sg_out, sg_in, skb, sync_size, rcd_sn);
412
413put_sg:
414 while (resync_sgs)
415 put_page(sg_page(&sg_in[--resync_sgs]));
416 kfree(sg_in);
417free_orig:
418 if (nskb)
419 consume_skb(skb);
420 else
421 kfree_skb(skb);
422 return nskb;
423}
424
425struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
426 struct net_device *dev,
427 struct sk_buff *skb)
428{
429 if (dev == rcu_dereference_bh(tls_get_ctx(sk)->netdev) ||
430 netif_is_bond_master(dev))
431 return skb;
432
433 return tls_sw_fallback(sk, skb);
434}
435EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
436
437struct sk_buff *tls_validate_xmit_skb_sw(struct sock *sk,
438 struct net_device *dev,
439 struct sk_buff *skb)
440{
441 return tls_sw_fallback(sk, skb);
442}
443
444struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
445{
446 return tls_sw_fallback(skb->sk, skb);
447}
448EXPORT_SYMBOL_GPL(tls_encrypt_skb);
449
450int tls_sw_fallback_init(struct sock *sk,
451 struct tls_offload_context_tx *offload_ctx,
452 struct tls_crypto_info *crypto_info)
453{
454 const u8 *key;
455 int rc;
456
457 offload_ctx->aead_send =
458 crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
459 if (IS_ERR(offload_ctx->aead_send)) {
460 rc = PTR_ERR(offload_ctx->aead_send);
461 pr_err_ratelimited("crypto_alloc_aead failed rc=%d\n", rc);
462 offload_ctx->aead_send = NULL;
463 goto err_out;
464 }
465
466 key = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->key;
467
468 rc = crypto_aead_setkey(offload_ctx->aead_send, key,
469 TLS_CIPHER_AES_GCM_128_KEY_SIZE);
470 if (rc)
471 goto free_aead;
472
473 rc = crypto_aead_setauthsize(offload_ctx->aead_send,
474 TLS_CIPHER_AES_GCM_128_TAG_SIZE);
475 if (rc)
476 goto free_aead;
477
478 return 0;
479free_aead:
480 crypto_free_aead(offload_ctx->aead_send);
481err_out:
482 return rc;
483}