Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* Copyright (c) 2018, Mellanox Technologies All rights reserved.
2 *
3 * This software is available to you under a choice of one of two
4 * licenses. You may choose to be licensed under the terms of the GNU
5 * General Public License (GPL) Version 2, available from the file
6 * COPYING in the main directory of this source tree, or the
7 * OpenIB.org BSD license below:
8 *
9 * Redistribution and use in source and binary forms, with or
10 * without modification, are permitted provided that the following
11 * conditions are met:
12 *
13 * - Redistributions of source code must retain the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer.
16 *
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31
32#include <crypto/aead.h>
33#include <linux/highmem.h>
34#include <linux/module.h>
35#include <linux/netdevice.h>
36#include <net/dst.h>
37#include <net/inet_connection_sock.h>
38#include <net/tcp.h>
39#include <net/tls.h>
40#include <linux/skbuff_ref.h>
41
42#include "tls.h"
43#include "trace.h"
44
45/* device_offload_lock is used to synchronize tls_dev_add
46 * against NETDEV_DOWN notifications.
47 */
48static DECLARE_RWSEM(device_offload_lock);
49
50static struct workqueue_struct *destruct_wq __read_mostly;
51
52static LIST_HEAD(tls_device_list);
53static LIST_HEAD(tls_device_down_list);
54static DEFINE_SPINLOCK(tls_device_lock);
55
56static struct page *dummy_page;
57
58static void tls_device_free_ctx(struct tls_context *ctx)
59{
60 if (ctx->tx_conf == TLS_HW)
61 kfree(tls_offload_ctx_tx(ctx));
62
63 if (ctx->rx_conf == TLS_HW)
64 kfree(tls_offload_ctx_rx(ctx));
65
66 tls_ctx_free(NULL, ctx);
67}
68
69static void tls_device_tx_del_task(struct work_struct *work)
70{
71 struct tls_offload_context_tx *offload_ctx =
72 container_of(work, struct tls_offload_context_tx, destruct_work);
73 struct tls_context *ctx = offload_ctx->ctx;
74 struct net_device *netdev;
75
76 /* Safe, because this is the destroy flow, refcount is 0, so
77 * tls_device_down can't store this field in parallel.
78 */
79 netdev = rcu_dereference_protected(ctx->netdev,
80 !refcount_read(&ctx->refcount));
81
82 netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX);
83 dev_put(netdev);
84 ctx->netdev = NULL;
85 tls_device_free_ctx(ctx);
86}
87
88static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
89{
90 struct net_device *netdev;
91 unsigned long flags;
92 bool async_cleanup;
93
94 spin_lock_irqsave(&tls_device_lock, flags);
95 if (unlikely(!refcount_dec_and_test(&ctx->refcount))) {
96 spin_unlock_irqrestore(&tls_device_lock, flags);
97 return;
98 }
99
100 list_del(&ctx->list); /* Remove from tls_device_list / tls_device_down_list */
101
102 /* Safe, because this is the destroy flow, refcount is 0, so
103 * tls_device_down can't store this field in parallel.
104 */
105 netdev = rcu_dereference_protected(ctx->netdev,
106 !refcount_read(&ctx->refcount));
107
108 async_cleanup = netdev && ctx->tx_conf == TLS_HW;
109 if (async_cleanup) {
110 struct tls_offload_context_tx *offload_ctx = tls_offload_ctx_tx(ctx);
111
112 /* queue_work inside the spinlock
113 * to make sure tls_device_down waits for that work.
114 */
115 queue_work(destruct_wq, &offload_ctx->destruct_work);
116 }
117 spin_unlock_irqrestore(&tls_device_lock, flags);
118
119 if (!async_cleanup)
120 tls_device_free_ctx(ctx);
121}
122
123/* We assume that the socket is already connected */
124static struct net_device *get_netdev_for_sock(struct sock *sk)
125{
126 struct dst_entry *dst = sk_dst_get(sk);
127 struct net_device *netdev = NULL;
128
129 if (likely(dst)) {
130 netdev = netdev_sk_get_lowest_dev(dst->dev, sk);
131 dev_hold(netdev);
132 }
133
134 dst_release(dst);
135
136 return netdev;
137}
138
139static void destroy_record(struct tls_record_info *record)
140{
141 int i;
142
143 for (i = 0; i < record->num_frags; i++)
144 __skb_frag_unref(&record->frags[i], false);
145 kfree(record);
146}
147
148static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
149{
150 struct tls_record_info *info, *temp;
151
152 list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
153 list_del(&info->list);
154 destroy_record(info);
155 }
156
157 offload_ctx->retransmit_hint = NULL;
158}
159
160static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
161{
162 struct tls_context *tls_ctx = tls_get_ctx(sk);
163 struct tls_record_info *info, *temp;
164 struct tls_offload_context_tx *ctx;
165 u64 deleted_records = 0;
166 unsigned long flags;
167
168 if (!tls_ctx)
169 return;
170
171 ctx = tls_offload_ctx_tx(tls_ctx);
172
173 spin_lock_irqsave(&ctx->lock, flags);
174 info = ctx->retransmit_hint;
175 if (info && !before(acked_seq, info->end_seq))
176 ctx->retransmit_hint = NULL;
177
178 list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
179 if (before(acked_seq, info->end_seq))
180 break;
181 list_del(&info->list);
182
183 destroy_record(info);
184 deleted_records++;
185 }
186
187 ctx->unacked_record_sn += deleted_records;
188 spin_unlock_irqrestore(&ctx->lock, flags);
189}
190
191/* At this point, there should be no references on this
192 * socket and no in-flight SKBs associated with this
193 * socket, so it is safe to free all the resources.
194 */
195void tls_device_sk_destruct(struct sock *sk)
196{
197 struct tls_context *tls_ctx = tls_get_ctx(sk);
198 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
199
200 tls_ctx->sk_destruct(sk);
201
202 if (tls_ctx->tx_conf == TLS_HW) {
203 if (ctx->open_record)
204 destroy_record(ctx->open_record);
205 delete_all_records(ctx);
206 crypto_free_aead(ctx->aead_send);
207 clean_acked_data_disable(inet_csk(sk));
208 }
209
210 tls_device_queue_ctx_destruction(tls_ctx);
211}
212EXPORT_SYMBOL_GPL(tls_device_sk_destruct);
213
214void tls_device_free_resources_tx(struct sock *sk)
215{
216 struct tls_context *tls_ctx = tls_get_ctx(sk);
217
218 tls_free_partial_record(sk, tls_ctx);
219}
220
221void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq)
222{
223 struct tls_context *tls_ctx = tls_get_ctx(sk);
224
225 trace_tls_device_tx_resync_req(sk, got_seq, exp_seq);
226 WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
227}
228EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request);
229
230static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
231 u32 seq)
232{
233 struct net_device *netdev;
234 struct sk_buff *skb;
235 int err = 0;
236 u8 *rcd_sn;
237
238 skb = tcp_write_queue_tail(sk);
239 if (skb)
240 TCP_SKB_CB(skb)->eor = 1;
241
242 rcd_sn = tls_ctx->tx.rec_seq;
243
244 trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
245 down_read(&device_offload_lock);
246 netdev = rcu_dereference_protected(tls_ctx->netdev,
247 lockdep_is_held(&device_offload_lock));
248 if (netdev)
249 err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
250 rcd_sn,
251 TLS_OFFLOAD_CTX_DIR_TX);
252 up_read(&device_offload_lock);
253 if (err)
254 return;
255
256 clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
257}
258
259static void tls_append_frag(struct tls_record_info *record,
260 struct page_frag *pfrag,
261 int size)
262{
263 skb_frag_t *frag;
264
265 frag = &record->frags[record->num_frags - 1];
266 if (skb_frag_page(frag) == pfrag->page &&
267 skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) {
268 skb_frag_size_add(frag, size);
269 } else {
270 ++frag;
271 skb_frag_fill_page_desc(frag, pfrag->page, pfrag->offset,
272 size);
273 ++record->num_frags;
274 get_page(pfrag->page);
275 }
276
277 pfrag->offset += size;
278 record->len += size;
279}
280
281static int tls_push_record(struct sock *sk,
282 struct tls_context *ctx,
283 struct tls_offload_context_tx *offload_ctx,
284 struct tls_record_info *record,
285 int flags)
286{
287 struct tls_prot_info *prot = &ctx->prot_info;
288 struct tcp_sock *tp = tcp_sk(sk);
289 skb_frag_t *frag;
290 int i;
291
292 record->end_seq = tp->write_seq + record->len;
293 list_add_tail_rcu(&record->list, &offload_ctx->records_list);
294 offload_ctx->open_record = NULL;
295
296 if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags))
297 tls_device_resync_tx(sk, ctx, tp->write_seq);
298
299 tls_advance_record_sn(sk, prot, &ctx->tx);
300
301 for (i = 0; i < record->num_frags; i++) {
302 frag = &record->frags[i];
303 sg_unmark_end(&offload_ctx->sg_tx_data[i]);
304 sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
305 skb_frag_size(frag), skb_frag_off(frag));
306 sk_mem_charge(sk, skb_frag_size(frag));
307 get_page(skb_frag_page(frag));
308 }
309 sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
310
311 /* all ready, send */
312 return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
313}
314
315static void tls_device_record_close(struct sock *sk,
316 struct tls_context *ctx,
317 struct tls_record_info *record,
318 struct page_frag *pfrag,
319 unsigned char record_type)
320{
321 struct tls_prot_info *prot = &ctx->prot_info;
322 struct page_frag dummy_tag_frag;
323
324 /* append tag
325 * device will fill in the tag, we just need to append a placeholder
326 * use socket memory to improve coalescing (re-using a single buffer
327 * increases frag count)
328 * if we can't allocate memory now use the dummy page
329 */
330 if (unlikely(pfrag->size - pfrag->offset < prot->tag_size) &&
331 !skb_page_frag_refill(prot->tag_size, pfrag, sk->sk_allocation)) {
332 dummy_tag_frag.page = dummy_page;
333 dummy_tag_frag.offset = 0;
334 pfrag = &dummy_tag_frag;
335 }
336 tls_append_frag(record, pfrag, prot->tag_size);
337
338 /* fill prepend */
339 tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
340 record->len - prot->overhead_size,
341 record_type);
342}
343
344static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
345 struct page_frag *pfrag,
346 size_t prepend_size)
347{
348 struct tls_record_info *record;
349 skb_frag_t *frag;
350
351 record = kmalloc(sizeof(*record), GFP_KERNEL);
352 if (!record)
353 return -ENOMEM;
354
355 frag = &record->frags[0];
356 skb_frag_fill_page_desc(frag, pfrag->page, pfrag->offset,
357 prepend_size);
358
359 get_page(pfrag->page);
360 pfrag->offset += prepend_size;
361
362 record->num_frags = 1;
363 record->len = prepend_size;
364 offload_ctx->open_record = record;
365 return 0;
366}
367
368static int tls_do_allocation(struct sock *sk,
369 struct tls_offload_context_tx *offload_ctx,
370 struct page_frag *pfrag,
371 size_t prepend_size)
372{
373 int ret;
374
375 if (!offload_ctx->open_record) {
376 if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
377 sk->sk_allocation))) {
378 READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
379 sk_stream_moderate_sndbuf(sk);
380 return -ENOMEM;
381 }
382
383 ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
384 if (ret)
385 return ret;
386
387 if (pfrag->size > pfrag->offset)
388 return 0;
389 }
390
391 if (!sk_page_frag_refill(sk, pfrag))
392 return -ENOMEM;
393
394 return 0;
395}
396
397static int tls_device_copy_data(void *addr, size_t bytes, struct iov_iter *i)
398{
399 size_t pre_copy, nocache;
400
401 pre_copy = ~((unsigned long)addr - 1) & (SMP_CACHE_BYTES - 1);
402 if (pre_copy) {
403 pre_copy = min(pre_copy, bytes);
404 if (copy_from_iter(addr, pre_copy, i) != pre_copy)
405 return -EFAULT;
406 bytes -= pre_copy;
407 addr += pre_copy;
408 }
409
410 nocache = round_down(bytes, SMP_CACHE_BYTES);
411 if (copy_from_iter_nocache(addr, nocache, i) != nocache)
412 return -EFAULT;
413 bytes -= nocache;
414 addr += nocache;
415
416 if (bytes && copy_from_iter(addr, bytes, i) != bytes)
417 return -EFAULT;
418
419 return 0;
420}
421
422static int tls_push_data(struct sock *sk,
423 struct iov_iter *iter,
424 size_t size, int flags,
425 unsigned char record_type)
426{
427 struct tls_context *tls_ctx = tls_get_ctx(sk);
428 struct tls_prot_info *prot = &tls_ctx->prot_info;
429 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
430 struct tls_record_info *record;
431 int tls_push_record_flags;
432 struct page_frag *pfrag;
433 size_t orig_size = size;
434 u32 max_open_record_len;
435 bool more = false;
436 bool done = false;
437 int copy, rc = 0;
438 long timeo;
439
440 if (flags &
441 ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
442 MSG_SPLICE_PAGES | MSG_EOR))
443 return -EOPNOTSUPP;
444
445 if ((flags & (MSG_MORE | MSG_EOR)) == (MSG_MORE | MSG_EOR))
446 return -EINVAL;
447
448 if (unlikely(sk->sk_err))
449 return -sk->sk_err;
450
451 flags |= MSG_SENDPAGE_DECRYPTED;
452 tls_push_record_flags = flags | MSG_MORE;
453
454 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
455 if (tls_is_partially_sent_record(tls_ctx)) {
456 rc = tls_push_partial_record(sk, tls_ctx, flags);
457 if (rc < 0)
458 return rc;
459 }
460
461 pfrag = sk_page_frag(sk);
462
463 /* TLS_HEADER_SIZE is not counted as part of the TLS record, and
464 * we need to leave room for an authentication tag.
465 */
466 max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
467 prot->prepend_size;
468 do {
469 rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size);
470 if (unlikely(rc)) {
471 rc = sk_stream_wait_memory(sk, &timeo);
472 if (!rc)
473 continue;
474
475 record = ctx->open_record;
476 if (!record)
477 break;
478handle_error:
479 if (record_type != TLS_RECORD_TYPE_DATA) {
480 /* avoid sending partial
481 * record with type !=
482 * application_data
483 */
484 size = orig_size;
485 destroy_record(record);
486 ctx->open_record = NULL;
487 } else if (record->len > prot->prepend_size) {
488 goto last_record;
489 }
490
491 break;
492 }
493
494 record = ctx->open_record;
495
496 copy = min_t(size_t, size, max_open_record_len - record->len);
497 if (copy && (flags & MSG_SPLICE_PAGES)) {
498 struct page_frag zc_pfrag;
499 struct page **pages = &zc_pfrag.page;
500 size_t off;
501
502 rc = iov_iter_extract_pages(iter, &pages,
503 copy, 1, 0, &off);
504 if (rc <= 0) {
505 if (rc == 0)
506 rc = -EIO;
507 goto handle_error;
508 }
509 copy = rc;
510
511 if (WARN_ON_ONCE(!sendpage_ok(zc_pfrag.page))) {
512 iov_iter_revert(iter, copy);
513 rc = -EIO;
514 goto handle_error;
515 }
516
517 zc_pfrag.offset = off;
518 zc_pfrag.size = copy;
519 tls_append_frag(record, &zc_pfrag, copy);
520 } else if (copy) {
521 copy = min_t(size_t, copy, pfrag->size - pfrag->offset);
522
523 rc = tls_device_copy_data(page_address(pfrag->page) +
524 pfrag->offset, copy,
525 iter);
526 if (rc)
527 goto handle_error;
528 tls_append_frag(record, pfrag, copy);
529 }
530
531 size -= copy;
532 if (!size) {
533last_record:
534 tls_push_record_flags = flags;
535 if (flags & MSG_MORE) {
536 more = true;
537 break;
538 }
539
540 done = true;
541 }
542
543 if (done || record->len >= max_open_record_len ||
544 (record->num_frags >= MAX_SKB_FRAGS - 1)) {
545 tls_device_record_close(sk, tls_ctx, record,
546 pfrag, record_type);
547
548 rc = tls_push_record(sk,
549 tls_ctx,
550 ctx,
551 record,
552 tls_push_record_flags);
553 if (rc < 0)
554 break;
555 }
556 } while (!done);
557
558 tls_ctx->pending_open_record_frags = more;
559
560 if (orig_size - size > 0)
561 rc = orig_size - size;
562
563 return rc;
564}
565
566int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
567{
568 unsigned char record_type = TLS_RECORD_TYPE_DATA;
569 struct tls_context *tls_ctx = tls_get_ctx(sk);
570 int rc;
571
572 if (!tls_ctx->zerocopy_sendfile)
573 msg->msg_flags &= ~MSG_SPLICE_PAGES;
574
575 mutex_lock(&tls_ctx->tx_lock);
576 lock_sock(sk);
577
578 if (unlikely(msg->msg_controllen)) {
579 rc = tls_process_cmsg(sk, msg, &record_type);
580 if (rc)
581 goto out;
582 }
583
584 rc = tls_push_data(sk, &msg->msg_iter, size, msg->msg_flags,
585 record_type);
586
587out:
588 release_sock(sk);
589 mutex_unlock(&tls_ctx->tx_lock);
590 return rc;
591}
592
593void tls_device_splice_eof(struct socket *sock)
594{
595 struct sock *sk = sock->sk;
596 struct tls_context *tls_ctx = tls_get_ctx(sk);
597 struct iov_iter iter = {};
598
599 if (!tls_is_partially_sent_record(tls_ctx))
600 return;
601
602 mutex_lock(&tls_ctx->tx_lock);
603 lock_sock(sk);
604
605 if (tls_is_partially_sent_record(tls_ctx)) {
606 iov_iter_bvec(&iter, ITER_SOURCE, NULL, 0, 0);
607 tls_push_data(sk, &iter, 0, 0, TLS_RECORD_TYPE_DATA);
608 }
609
610 release_sock(sk);
611 mutex_unlock(&tls_ctx->tx_lock);
612}
613
614struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
615 u32 seq, u64 *p_record_sn)
616{
617 u64 record_sn = context->hint_record_sn;
618 struct tls_record_info *info, *last;
619
620 info = context->retransmit_hint;
621 if (!info ||
622 before(seq, info->end_seq - info->len)) {
623 /* if retransmit_hint is irrelevant start
624 * from the beginning of the list
625 */
626 info = list_first_entry_or_null(&context->records_list,
627 struct tls_record_info, list);
628 if (!info)
629 return NULL;
630 /* send the start_marker record if seq number is before the
631 * tls offload start marker sequence number. This record is
632 * required to handle TCP packets which are before TLS offload
633 * started.
634 * And if it's not start marker, look if this seq number
635 * belongs to the list.
636 */
637 if (likely(!tls_record_is_start_marker(info))) {
638 /* we have the first record, get the last record to see
639 * if this seq number belongs to the list.
640 */
641 last = list_last_entry(&context->records_list,
642 struct tls_record_info, list);
643
644 if (!between(seq, tls_record_start_seq(info),
645 last->end_seq))
646 return NULL;
647 }
648 record_sn = context->unacked_record_sn;
649 }
650
651 /* We just need the _rcu for the READ_ONCE() */
652 rcu_read_lock();
653 list_for_each_entry_from_rcu(info, &context->records_list, list) {
654 if (before(seq, info->end_seq)) {
655 if (!context->retransmit_hint ||
656 after(info->end_seq,
657 context->retransmit_hint->end_seq)) {
658 context->hint_record_sn = record_sn;
659 context->retransmit_hint = info;
660 }
661 *p_record_sn = record_sn;
662 goto exit_rcu_unlock;
663 }
664 record_sn++;
665 }
666 info = NULL;
667
668exit_rcu_unlock:
669 rcu_read_unlock();
670 return info;
671}
672EXPORT_SYMBOL(tls_get_record);
673
674static int tls_device_push_pending_record(struct sock *sk, int flags)
675{
676 struct iov_iter iter;
677
678 iov_iter_kvec(&iter, ITER_SOURCE, NULL, 0, 0);
679 return tls_push_data(sk, &iter, 0, flags, TLS_RECORD_TYPE_DATA);
680}
681
682void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
683{
684 if (tls_is_partially_sent_record(ctx)) {
685 gfp_t sk_allocation = sk->sk_allocation;
686
687 WARN_ON_ONCE(sk->sk_write_pending);
688
689 sk->sk_allocation = GFP_ATOMIC;
690 tls_push_partial_record(sk, ctx,
691 MSG_DONTWAIT | MSG_NOSIGNAL |
692 MSG_SENDPAGE_DECRYPTED);
693 sk->sk_allocation = sk_allocation;
694 }
695}
696
697static void tls_device_resync_rx(struct tls_context *tls_ctx,
698 struct sock *sk, u32 seq, u8 *rcd_sn)
699{
700 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
701 struct net_device *netdev;
702
703 trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
704 rcu_read_lock();
705 netdev = rcu_dereference(tls_ctx->netdev);
706 if (netdev)
707 netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
708 TLS_OFFLOAD_CTX_DIR_RX);
709 rcu_read_unlock();
710 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
711}
712
713static bool
714tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
715 s64 resync_req, u32 *seq, u16 *rcd_delta)
716{
717 u32 is_async = resync_req & RESYNC_REQ_ASYNC;
718 u32 req_seq = resync_req >> 32;
719 u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
720 u16 i;
721
722 *rcd_delta = 0;
723
724 if (is_async) {
725 /* shouldn't get to wraparound:
726 * too long in async stage, something bad happened
727 */
728 if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
729 return false;
730
731 /* asynchronous stage: log all headers seq such that
732 * req_seq <= seq <= end_seq, and wait for real resync request
733 */
734 if (before(*seq, req_seq))
735 return false;
736 if (!after(*seq, req_end) &&
737 resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
738 resync_async->log[resync_async->loglen++] = *seq;
739
740 resync_async->rcd_delta++;
741
742 return false;
743 }
744
745 /* synchronous stage: check against the logged entries and
746 * proceed to check the next entries if no match was found
747 */
748 for (i = 0; i < resync_async->loglen; i++)
749 if (req_seq == resync_async->log[i] &&
750 atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) {
751 *rcd_delta = resync_async->rcd_delta - i;
752 *seq = req_seq;
753 resync_async->loglen = 0;
754 resync_async->rcd_delta = 0;
755 return true;
756 }
757
758 resync_async->loglen = 0;
759 resync_async->rcd_delta = 0;
760
761 if (req_seq == *seq &&
762 atomic64_try_cmpxchg(&resync_async->req,
763 &resync_req, 0))
764 return true;
765
766 return false;
767}
768
769void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
770{
771 struct tls_context *tls_ctx = tls_get_ctx(sk);
772 struct tls_offload_context_rx *rx_ctx;
773 u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
774 u32 sock_data, is_req_pending;
775 struct tls_prot_info *prot;
776 s64 resync_req;
777 u16 rcd_delta;
778 u32 req_seq;
779
780 if (tls_ctx->rx_conf != TLS_HW)
781 return;
782 if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
783 return;
784
785 prot = &tls_ctx->prot_info;
786 rx_ctx = tls_offload_ctx_rx(tls_ctx);
787 memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
788
789 switch (rx_ctx->resync_type) {
790 case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ:
791 resync_req = atomic64_read(&rx_ctx->resync_req);
792 req_seq = resync_req >> 32;
793 seq += TLS_HEADER_SIZE - 1;
794 is_req_pending = resync_req;
795
796 if (likely(!is_req_pending) || req_seq != seq ||
797 !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
798 return;
799 break;
800 case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT:
801 if (likely(!rx_ctx->resync_nh_do_now))
802 return;
803
804 /* head of next rec is already in, note that the sock_inq will
805 * include the currently parsed message when called from parser
806 */
807 sock_data = tcp_inq(sk);
808 if (sock_data > rcd_len) {
809 trace_tls_device_rx_resync_nh_delay(sk, sock_data,
810 rcd_len);
811 return;
812 }
813
814 rx_ctx->resync_nh_do_now = 0;
815 seq += rcd_len;
816 tls_bigint_increment(rcd_sn, prot->rec_seq_size);
817 break;
818 case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC:
819 resync_req = atomic64_read(&rx_ctx->resync_async->req);
820 is_req_pending = resync_req;
821 if (likely(!is_req_pending))
822 return;
823
824 if (!tls_device_rx_resync_async(rx_ctx->resync_async,
825 resync_req, &seq, &rcd_delta))
826 return;
827 tls_bigint_subtract(rcd_sn, rcd_delta);
828 break;
829 }
830
831 tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
832}
833
834static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
835 struct tls_offload_context_rx *ctx,
836 struct sock *sk, struct sk_buff *skb)
837{
838 struct strp_msg *rxm;
839
840 /* device will request resyncs by itself based on stream scan */
841 if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
842 return;
843 /* already scheduled */
844 if (ctx->resync_nh_do_now)
845 return;
846 /* seen decrypted fragments since last fully-failed record */
847 if (ctx->resync_nh_reset) {
848 ctx->resync_nh_reset = 0;
849 ctx->resync_nh.decrypted_failed = 1;
850 ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
851 return;
852 }
853
854 if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
855 return;
856
857 /* doing resync, bump the next target in case it fails */
858 if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
859 ctx->resync_nh.decrypted_tgt *= 2;
860 else
861 ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
862
863 rxm = strp_msg(skb);
864
865 /* head of next rec is already in, parser will sync for us */
866 if (tcp_inq(sk) > rxm->full_len) {
867 trace_tls_device_rx_resync_nh_schedule(sk);
868 ctx->resync_nh_do_now = 1;
869 } else {
870 struct tls_prot_info *prot = &tls_ctx->prot_info;
871 u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
872
873 memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
874 tls_bigint_increment(rcd_sn, prot->rec_seq_size);
875
876 tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
877 rcd_sn);
878 }
879}
880
881static int
882tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)
883{
884 struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
885 const struct tls_cipher_desc *cipher_desc;
886 int err, offset, copy, data_len, pos;
887 struct sk_buff *skb, *skb_iter;
888 struct scatterlist sg[1];
889 struct strp_msg *rxm;
890 char *orig_buf, *buf;
891
892 cipher_desc = get_cipher_desc(tls_ctx->crypto_recv.info.cipher_type);
893 DEBUG_NET_WARN_ON_ONCE(!cipher_desc || !cipher_desc->offloadable);
894
895 rxm = strp_msg(tls_strp_msg(sw_ctx));
896 orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv,
897 sk->sk_allocation);
898 if (!orig_buf)
899 return -ENOMEM;
900 buf = orig_buf;
901
902 err = tls_strp_msg_cow(sw_ctx);
903 if (unlikely(err))
904 goto free_buf;
905
906 skb = tls_strp_msg(sw_ctx);
907 rxm = strp_msg(skb);
908 offset = rxm->offset;
909
910 sg_init_table(sg, 1);
911 sg_set_buf(&sg[0], buf,
912 rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv);
913 err = skb_copy_bits(skb, offset, buf, TLS_HEADER_SIZE + cipher_desc->iv);
914 if (err)
915 goto free_buf;
916
917 /* We are interested only in the decrypted data not the auth */
918 err = decrypt_skb(sk, sg);
919 if (err != -EBADMSG)
920 goto free_buf;
921 else
922 err = 0;
923
924 data_len = rxm->full_len - cipher_desc->tag;
925
926 if (skb_pagelen(skb) > offset) {
927 copy = min_t(int, skb_pagelen(skb) - offset, data_len);
928
929 if (skb->decrypted) {
930 err = skb_store_bits(skb, offset, buf, copy);
931 if (err)
932 goto free_buf;
933 }
934
935 offset += copy;
936 buf += copy;
937 }
938
939 pos = skb_pagelen(skb);
940 skb_walk_frags(skb, skb_iter) {
941 int frag_pos;
942
943 /* Practically all frags must belong to msg if reencrypt
944 * is needed with current strparser and coalescing logic,
945 * but strparser may "get optimized", so let's be safe.
946 */
947 if (pos + skb_iter->len <= offset)
948 goto done_with_frag;
949 if (pos >= data_len + rxm->offset)
950 break;
951
952 frag_pos = offset - pos;
953 copy = min_t(int, skb_iter->len - frag_pos,
954 data_len + rxm->offset - offset);
955
956 if (skb_iter->decrypted) {
957 err = skb_store_bits(skb_iter, frag_pos, buf, copy);
958 if (err)
959 goto free_buf;
960 }
961
962 offset += copy;
963 buf += copy;
964done_with_frag:
965 pos += skb_iter->len;
966 }
967
968free_buf:
969 kfree(orig_buf);
970 return err;
971}
972
973int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx)
974{
975 struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
976 struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
977 struct sk_buff *skb = tls_strp_msg(sw_ctx);
978 struct strp_msg *rxm = strp_msg(skb);
979 int is_decrypted, is_encrypted;
980
981 if (!tls_strp_msg_mixed_decrypted(sw_ctx)) {
982 is_decrypted = skb->decrypted;
983 is_encrypted = !is_decrypted;
984 } else {
985 is_decrypted = 0;
986 is_encrypted = 0;
987 }
988
989 trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,
990 tls_ctx->rx.rec_seq, rxm->full_len,
991 is_encrypted, is_decrypted);
992
993 if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
994 if (likely(is_encrypted || is_decrypted))
995 return is_decrypted;
996
997 /* After tls_device_down disables the offload, the next SKB will
998 * likely have initial fragments decrypted, and final ones not
999 * decrypted. We need to reencrypt that single SKB.
1000 */
1001 return tls_device_reencrypt(sk, tls_ctx);
1002 }
1003
1004 /* Return immediately if the record is either entirely plaintext or
1005 * entirely ciphertext. Otherwise handle reencrypt partially decrypted
1006 * record.
1007 */
1008 if (is_decrypted) {
1009 ctx->resync_nh_reset = 1;
1010 return is_decrypted;
1011 }
1012 if (is_encrypted) {
1013 tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
1014 return 0;
1015 }
1016
1017 ctx->resync_nh_reset = 1;
1018 return tls_device_reencrypt(sk, tls_ctx);
1019}
1020
1021static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
1022 struct net_device *netdev)
1023{
1024 if (sk->sk_destruct != tls_device_sk_destruct) {
1025 refcount_set(&ctx->refcount, 1);
1026 dev_hold(netdev);
1027 RCU_INIT_POINTER(ctx->netdev, netdev);
1028 spin_lock_irq(&tls_device_lock);
1029 list_add_tail(&ctx->list, &tls_device_list);
1030 spin_unlock_irq(&tls_device_lock);
1031
1032 ctx->sk_destruct = sk->sk_destruct;
1033 smp_store_release(&sk->sk_destruct, tls_device_sk_destruct);
1034 }
1035}
1036
1037static struct tls_offload_context_tx *alloc_offload_ctx_tx(struct tls_context *ctx)
1038{
1039 struct tls_offload_context_tx *offload_ctx;
1040 __be64 rcd_sn;
1041
1042 offload_ctx = kzalloc(sizeof(*offload_ctx), GFP_KERNEL);
1043 if (!offload_ctx)
1044 return NULL;
1045
1046 INIT_WORK(&offload_ctx->destruct_work, tls_device_tx_del_task);
1047 INIT_LIST_HEAD(&offload_ctx->records_list);
1048 spin_lock_init(&offload_ctx->lock);
1049 sg_init_table(offload_ctx->sg_tx_data,
1050 ARRAY_SIZE(offload_ctx->sg_tx_data));
1051
1052 /* start at rec_seq - 1 to account for the start marker record */
1053 memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
1054 offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
1055
1056 offload_ctx->ctx = ctx;
1057
1058 return offload_ctx;
1059}
1060
1061int tls_set_device_offload(struct sock *sk)
1062{
1063 struct tls_record_info *start_marker_record;
1064 struct tls_offload_context_tx *offload_ctx;
1065 const struct tls_cipher_desc *cipher_desc;
1066 struct tls_crypto_info *crypto_info;
1067 struct tls_prot_info *prot;
1068 struct net_device *netdev;
1069 struct tls_context *ctx;
1070 struct sk_buff *skb;
1071 char *iv, *rec_seq;
1072 int rc;
1073
1074 ctx = tls_get_ctx(sk);
1075 prot = &ctx->prot_info;
1076
1077 if (ctx->priv_ctx_tx)
1078 return -EEXIST;
1079
1080 netdev = get_netdev_for_sock(sk);
1081 if (!netdev) {
1082 pr_err_ratelimited("%s: netdev not found\n", __func__);
1083 return -EINVAL;
1084 }
1085
1086 if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
1087 rc = -EOPNOTSUPP;
1088 goto release_netdev;
1089 }
1090
1091 crypto_info = &ctx->crypto_send.info;
1092 if (crypto_info->version != TLS_1_2_VERSION) {
1093 rc = -EOPNOTSUPP;
1094 goto release_netdev;
1095 }
1096
1097 cipher_desc = get_cipher_desc(crypto_info->cipher_type);
1098 if (!cipher_desc || !cipher_desc->offloadable) {
1099 rc = -EINVAL;
1100 goto release_netdev;
1101 }
1102
1103 rc = init_prot_info(prot, crypto_info, cipher_desc);
1104 if (rc)
1105 goto release_netdev;
1106
1107 iv = crypto_info_iv(crypto_info, cipher_desc);
1108 rec_seq = crypto_info_rec_seq(crypto_info, cipher_desc);
1109
1110 memcpy(ctx->tx.iv + cipher_desc->salt, iv, cipher_desc->iv);
1111 memcpy(ctx->tx.rec_seq, rec_seq, cipher_desc->rec_seq);
1112
1113 start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
1114 if (!start_marker_record) {
1115 rc = -ENOMEM;
1116 goto release_netdev;
1117 }
1118
1119 offload_ctx = alloc_offload_ctx_tx(ctx);
1120 if (!offload_ctx) {
1121 rc = -ENOMEM;
1122 goto free_marker_record;
1123 }
1124
1125 rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
1126 if (rc)
1127 goto free_offload_ctx;
1128
1129 start_marker_record->end_seq = tcp_sk(sk)->write_seq;
1130 start_marker_record->len = 0;
1131 start_marker_record->num_frags = 0;
1132 list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
1133
1134 clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
1135 ctx->push_pending_record = tls_device_push_pending_record;
1136
1137 /* TLS offload is greatly simplified if we don't send
1138 * SKBs where only part of the payload needs to be encrypted.
1139 * So mark the last skb in the write queue as end of record.
1140 */
1141 skb = tcp_write_queue_tail(sk);
1142 if (skb)
1143 TCP_SKB_CB(skb)->eor = 1;
1144
1145 /* Avoid offloading if the device is down
1146 * We don't want to offload new flows after
1147 * the NETDEV_DOWN event
1148 *
1149 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
1150 * handler thus protecting from the device going down before
1151 * ctx was added to tls_device_list.
1152 */
1153 down_read(&device_offload_lock);
1154 if (!(netdev->flags & IFF_UP)) {
1155 rc = -EINVAL;
1156 goto release_lock;
1157 }
1158
1159 ctx->priv_ctx_tx = offload_ctx;
1160 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
1161 &ctx->crypto_send.info,
1162 tcp_sk(sk)->write_seq);
1163 trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX,
1164 tcp_sk(sk)->write_seq, rec_seq, rc);
1165 if (rc)
1166 goto release_lock;
1167
1168 tls_device_attach(ctx, sk, netdev);
1169 up_read(&device_offload_lock);
1170
1171 /* following this assignment tls_is_skb_tx_device_offloaded
1172 * will return true and the context might be accessed
1173 * by the netdev's xmit function.
1174 */
1175 smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
1176 dev_put(netdev);
1177
1178 return 0;
1179
1180release_lock:
1181 up_read(&device_offload_lock);
1182 clean_acked_data_disable(inet_csk(sk));
1183 crypto_free_aead(offload_ctx->aead_send);
1184free_offload_ctx:
1185 kfree(offload_ctx);
1186 ctx->priv_ctx_tx = NULL;
1187free_marker_record:
1188 kfree(start_marker_record);
1189release_netdev:
1190 dev_put(netdev);
1191 return rc;
1192}
1193
1194int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
1195{
1196 struct tls12_crypto_info_aes_gcm_128 *info;
1197 struct tls_offload_context_rx *context;
1198 struct net_device *netdev;
1199 int rc = 0;
1200
1201 if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
1202 return -EOPNOTSUPP;
1203
1204 netdev = get_netdev_for_sock(sk);
1205 if (!netdev) {
1206 pr_err_ratelimited("%s: netdev not found\n", __func__);
1207 return -EINVAL;
1208 }
1209
1210 if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
1211 rc = -EOPNOTSUPP;
1212 goto release_netdev;
1213 }
1214
1215 /* Avoid offloading if the device is down
1216 * We don't want to offload new flows after
1217 * the NETDEV_DOWN event
1218 *
1219 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
1220 * handler thus protecting from the device going down before
1221 * ctx was added to tls_device_list.
1222 */
1223 down_read(&device_offload_lock);
1224 if (!(netdev->flags & IFF_UP)) {
1225 rc = -EINVAL;
1226 goto release_lock;
1227 }
1228
1229 context = kzalloc(sizeof(*context), GFP_KERNEL);
1230 if (!context) {
1231 rc = -ENOMEM;
1232 goto release_lock;
1233 }
1234 context->resync_nh_reset = 1;
1235
1236 ctx->priv_ctx_rx = context;
1237 rc = tls_set_sw_offload(sk, 0);
1238 if (rc)
1239 goto release_ctx;
1240
1241 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
1242 &ctx->crypto_recv.info,
1243 tcp_sk(sk)->copied_seq);
1244 info = (void *)&ctx->crypto_recv.info;
1245 trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_RX,
1246 tcp_sk(sk)->copied_seq, info->rec_seq, rc);
1247 if (rc)
1248 goto free_sw_resources;
1249
1250 tls_device_attach(ctx, sk, netdev);
1251 up_read(&device_offload_lock);
1252
1253 dev_put(netdev);
1254
1255 return 0;
1256
1257free_sw_resources:
1258 up_read(&device_offload_lock);
1259 tls_sw_free_resources_rx(sk);
1260 down_read(&device_offload_lock);
1261release_ctx:
1262 ctx->priv_ctx_rx = NULL;
1263release_lock:
1264 up_read(&device_offload_lock);
1265release_netdev:
1266 dev_put(netdev);
1267 return rc;
1268}
1269
1270void tls_device_offload_cleanup_rx(struct sock *sk)
1271{
1272 struct tls_context *tls_ctx = tls_get_ctx(sk);
1273 struct net_device *netdev;
1274
1275 down_read(&device_offload_lock);
1276 netdev = rcu_dereference_protected(tls_ctx->netdev,
1277 lockdep_is_held(&device_offload_lock));
1278 if (!netdev)
1279 goto out;
1280
1281 netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
1282 TLS_OFFLOAD_CTX_DIR_RX);
1283
1284 if (tls_ctx->tx_conf != TLS_HW) {
1285 dev_put(netdev);
1286 rcu_assign_pointer(tls_ctx->netdev, NULL);
1287 } else {
1288 set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags);
1289 }
1290out:
1291 up_read(&device_offload_lock);
1292 tls_sw_release_resources_rx(sk);
1293}
1294
1295static int tls_device_down(struct net_device *netdev)
1296{
1297 struct tls_context *ctx, *tmp;
1298 unsigned long flags;
1299 LIST_HEAD(list);
1300
1301 /* Request a write lock to block new offload attempts */
1302 down_write(&device_offload_lock);
1303
1304 spin_lock_irqsave(&tls_device_lock, flags);
1305 list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
1306 struct net_device *ctx_netdev =
1307 rcu_dereference_protected(ctx->netdev,
1308 lockdep_is_held(&device_offload_lock));
1309
1310 if (ctx_netdev != netdev ||
1311 !refcount_inc_not_zero(&ctx->refcount))
1312 continue;
1313
1314 list_move(&ctx->list, &list);
1315 }
1316 spin_unlock_irqrestore(&tls_device_lock, flags);
1317
1318 list_for_each_entry_safe(ctx, tmp, &list, list) {
1319 /* Stop offloaded TX and switch to the fallback.
1320 * tls_is_skb_tx_device_offloaded will return false.
1321 */
1322 WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
1323
1324 /* Stop the RX and TX resync.
1325 * tls_dev_resync must not be called after tls_dev_del.
1326 */
1327 rcu_assign_pointer(ctx->netdev, NULL);
1328
1329 /* Start skipping the RX resync logic completely. */
1330 set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
1331
1332 /* Sync with inflight packets. After this point:
1333 * TX: no non-encrypted packets will be passed to the driver.
1334 * RX: resync requests from the driver will be ignored.
1335 */
1336 synchronize_net();
1337
1338 /* Release the offload context on the driver side. */
1339 if (ctx->tx_conf == TLS_HW)
1340 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1341 TLS_OFFLOAD_CTX_DIR_TX);
1342 if (ctx->rx_conf == TLS_HW &&
1343 !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
1344 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1345 TLS_OFFLOAD_CTX_DIR_RX);
1346
1347 dev_put(netdev);
1348
1349 /* Move the context to a separate list for two reasons:
1350 * 1. When the context is deallocated, list_del is called.
1351 * 2. It's no longer an offloaded context, so we don't want to
1352 * run offload-specific code on this context.
1353 */
1354 spin_lock_irqsave(&tls_device_lock, flags);
1355 list_move_tail(&ctx->list, &tls_device_down_list);
1356 spin_unlock_irqrestore(&tls_device_lock, flags);
1357
1358 /* Device contexts for RX and TX will be freed in on sk_destruct
1359 * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
1360 * Now release the ref taken above.
1361 */
1362 if (refcount_dec_and_test(&ctx->refcount)) {
1363 /* sk_destruct ran after tls_device_down took a ref, and
1364 * it returned early. Complete the destruction here.
1365 */
1366 list_del(&ctx->list);
1367 tls_device_free_ctx(ctx);
1368 }
1369 }
1370
1371 up_write(&device_offload_lock);
1372
1373 flush_workqueue(destruct_wq);
1374
1375 return NOTIFY_DONE;
1376}
1377
1378static int tls_dev_event(struct notifier_block *this, unsigned long event,
1379 void *ptr)
1380{
1381 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1382
1383 if (!dev->tlsdev_ops &&
1384 !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
1385 return NOTIFY_DONE;
1386
1387 switch (event) {
1388 case NETDEV_REGISTER:
1389 case NETDEV_FEAT_CHANGE:
1390 if (netif_is_bond_master(dev))
1391 return NOTIFY_DONE;
1392 if ((dev->features & NETIF_F_HW_TLS_RX) &&
1393 !dev->tlsdev_ops->tls_dev_resync)
1394 return NOTIFY_BAD;
1395
1396 if (dev->tlsdev_ops &&
1397 dev->tlsdev_ops->tls_dev_add &&
1398 dev->tlsdev_ops->tls_dev_del)
1399 return NOTIFY_DONE;
1400 else
1401 return NOTIFY_BAD;
1402 case NETDEV_DOWN:
1403 return tls_device_down(dev);
1404 }
1405 return NOTIFY_DONE;
1406}
1407
1408static struct notifier_block tls_dev_notifier = {
1409 .notifier_call = tls_dev_event,
1410};
1411
1412int __init tls_device_init(void)
1413{
1414 int err;
1415
1416 dummy_page = alloc_page(GFP_KERNEL);
1417 if (!dummy_page)
1418 return -ENOMEM;
1419
1420 destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0);
1421 if (!destruct_wq) {
1422 err = -ENOMEM;
1423 goto err_free_dummy;
1424 }
1425
1426 err = register_netdevice_notifier(&tls_dev_notifier);
1427 if (err)
1428 goto err_destroy_wq;
1429
1430 return 0;
1431
1432err_destroy_wq:
1433 destroy_workqueue(destruct_wq);
1434err_free_dummy:
1435 put_page(dummy_page);
1436 return err;
1437}
1438
1439void __exit tls_device_cleanup(void)
1440{
1441 unregister_netdevice_notifier(&tls_dev_notifier);
1442 destroy_workqueue(destruct_wq);
1443 clean_acked_data_flush();
1444 put_page(dummy_page);
1445}