Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4#include <linux/skmsg.h>
5#include <linux/skbuff.h>
6#include <linux/scatterlist.h>
7
8#include <net/sock.h>
9#include <net/tcp.h>
10#include <net/tls.h>
11
12static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
13{
14 if (msg->sg.end > msg->sg.start &&
15 elem_first_coalesce < msg->sg.end)
16 return true;
17
18 if (msg->sg.end < msg->sg.start &&
19 (elem_first_coalesce > msg->sg.start ||
20 elem_first_coalesce < msg->sg.end))
21 return true;
22
23 return false;
24}
25
26int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
27 int elem_first_coalesce)
28{
29 struct page_frag *pfrag = sk_page_frag(sk);
30 int ret = 0;
31
32 len -= msg->sg.size;
33 while (len > 0) {
34 struct scatterlist *sge;
35 u32 orig_offset;
36 int use, i;
37
38 if (!sk_page_frag_refill(sk, pfrag))
39 return -ENOMEM;
40
41 orig_offset = pfrag->offset;
42 use = min_t(int, len, pfrag->size - orig_offset);
43 if (!sk_wmem_schedule(sk, use))
44 return -ENOMEM;
45
46 i = msg->sg.end;
47 sk_msg_iter_var_prev(i);
48 sge = &msg->sg.data[i];
49
50 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
51 sg_page(sge) == pfrag->page &&
52 sge->offset + sge->length == orig_offset) {
53 sge->length += use;
54 } else {
55 if (sk_msg_full(msg)) {
56 ret = -ENOSPC;
57 break;
58 }
59
60 sge = &msg->sg.data[msg->sg.end];
61 sg_unmark_end(sge);
62 sg_set_page(sge, pfrag->page, use, orig_offset);
63 get_page(pfrag->page);
64 sk_msg_iter_next(msg, end);
65 }
66
67 sk_mem_charge(sk, use);
68 msg->sg.size += use;
69 pfrag->offset += use;
70 len -= use;
71 }
72
73 return ret;
74}
75EXPORT_SYMBOL_GPL(sk_msg_alloc);
76
77int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
78 u32 off, u32 len)
79{
80 int i = src->sg.start;
81 struct scatterlist *sge = sk_msg_elem(src, i);
82 struct scatterlist *sgd = NULL;
83 u32 sge_len, sge_off;
84
85 while (off) {
86 if (sge->length > off)
87 break;
88 off -= sge->length;
89 sk_msg_iter_var_next(i);
90 if (i == src->sg.end && off)
91 return -ENOSPC;
92 sge = sk_msg_elem(src, i);
93 }
94
95 while (len) {
96 sge_len = sge->length - off;
97 if (sge_len > len)
98 sge_len = len;
99
100 if (dst->sg.end)
101 sgd = sk_msg_elem(dst, dst->sg.end - 1);
102
103 if (sgd &&
104 (sg_page(sge) == sg_page(sgd)) &&
105 (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
106 sgd->length += sge_len;
107 dst->sg.size += sge_len;
108 } else if (!sk_msg_full(dst)) {
109 sge_off = sge->offset + off;
110 sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
111 } else {
112 return -ENOSPC;
113 }
114
115 off = 0;
116 len -= sge_len;
117 sk_mem_charge(sk, sge_len);
118 sk_msg_iter_var_next(i);
119 if (i == src->sg.end && len)
120 return -ENOSPC;
121 sge = sk_msg_elem(src, i);
122 }
123
124 return 0;
125}
126EXPORT_SYMBOL_GPL(sk_msg_clone);
127
128void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
129{
130 int i = msg->sg.start;
131
132 do {
133 struct scatterlist *sge = sk_msg_elem(msg, i);
134
135 if (bytes < sge->length) {
136 sge->length -= bytes;
137 sge->offset += bytes;
138 sk_mem_uncharge(sk, bytes);
139 break;
140 }
141
142 sk_mem_uncharge(sk, sge->length);
143 bytes -= sge->length;
144 sge->length = 0;
145 sge->offset = 0;
146 sk_msg_iter_var_next(i);
147 } while (bytes && i != msg->sg.end);
148 msg->sg.start = i;
149}
150EXPORT_SYMBOL_GPL(sk_msg_return_zero);
151
152void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
153{
154 int i = msg->sg.start;
155
156 do {
157 struct scatterlist *sge = &msg->sg.data[i];
158 int uncharge = (bytes < sge->length) ? bytes : sge->length;
159
160 sk_mem_uncharge(sk, uncharge);
161 bytes -= uncharge;
162 sk_msg_iter_var_next(i);
163 } while (i != msg->sg.end);
164}
165EXPORT_SYMBOL_GPL(sk_msg_return);
166
167static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
168 bool charge)
169{
170 struct scatterlist *sge = sk_msg_elem(msg, i);
171 u32 len = sge->length;
172
173 if (charge)
174 sk_mem_uncharge(sk, len);
175 if (!msg->skb)
176 put_page(sg_page(sge));
177 memset(sge, 0, sizeof(*sge));
178 return len;
179}
180
181static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
182 bool charge)
183{
184 struct scatterlist *sge = sk_msg_elem(msg, i);
185 int freed = 0;
186
187 while (msg->sg.size) {
188 msg->sg.size -= sge->length;
189 freed += sk_msg_free_elem(sk, msg, i, charge);
190 sk_msg_iter_var_next(i);
191 sk_msg_check_to_free(msg, i, msg->sg.size);
192 sge = sk_msg_elem(msg, i);
193 }
194 consume_skb(msg->skb);
195 sk_msg_init(msg);
196 return freed;
197}
198
199int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
200{
201 return __sk_msg_free(sk, msg, msg->sg.start, false);
202}
203EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
204
205int sk_msg_free(struct sock *sk, struct sk_msg *msg)
206{
207 return __sk_msg_free(sk, msg, msg->sg.start, true);
208}
209EXPORT_SYMBOL_GPL(sk_msg_free);
210
211static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
212 u32 bytes, bool charge)
213{
214 struct scatterlist *sge;
215 u32 i = msg->sg.start;
216
217 while (bytes) {
218 sge = sk_msg_elem(msg, i);
219 if (!sge->length)
220 break;
221 if (bytes < sge->length) {
222 if (charge)
223 sk_mem_uncharge(sk, bytes);
224 sge->length -= bytes;
225 sge->offset += bytes;
226 msg->sg.size -= bytes;
227 break;
228 }
229
230 msg->sg.size -= sge->length;
231 bytes -= sge->length;
232 sk_msg_free_elem(sk, msg, i, charge);
233 sk_msg_iter_var_next(i);
234 sk_msg_check_to_free(msg, i, bytes);
235 }
236 msg->sg.start = i;
237}
238
239void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
240{
241 __sk_msg_free_partial(sk, msg, bytes, true);
242}
243EXPORT_SYMBOL_GPL(sk_msg_free_partial);
244
245void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
246 u32 bytes)
247{
248 __sk_msg_free_partial(sk, msg, bytes, false);
249}
250
251void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
252{
253 int trim = msg->sg.size - len;
254 u32 i = msg->sg.end;
255
256 if (trim <= 0) {
257 WARN_ON(trim < 0);
258 return;
259 }
260
261 sk_msg_iter_var_prev(i);
262 msg->sg.size = len;
263 while (msg->sg.data[i].length &&
264 trim >= msg->sg.data[i].length) {
265 trim -= msg->sg.data[i].length;
266 sk_msg_free_elem(sk, msg, i, true);
267 sk_msg_iter_var_prev(i);
268 if (!trim)
269 goto out;
270 }
271
272 msg->sg.data[i].length -= trim;
273 sk_mem_uncharge(sk, trim);
274 /* Adjust copybreak if it falls into the trimmed part of last buf */
275 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
276 msg->sg.copybreak = msg->sg.data[i].length;
277out:
278 sk_msg_iter_var_next(i);
279 msg->sg.end = i;
280
281 /* If we trim data a full sg elem before curr pointer update
282 * copybreak and current so that any future copy operations
283 * start at new copy location.
284 * However trimed data that has not yet been used in a copy op
285 * does not require an update.
286 */
287 if (!msg->sg.size) {
288 msg->sg.curr = msg->sg.start;
289 msg->sg.copybreak = 0;
290 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
291 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
292 sk_msg_iter_var_prev(i);
293 msg->sg.curr = i;
294 msg->sg.copybreak = msg->sg.data[i].length;
295 }
296}
297EXPORT_SYMBOL_GPL(sk_msg_trim);
298
299int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
300 struct sk_msg *msg, u32 bytes)
301{
302 int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
303 const int to_max_pages = MAX_MSG_FRAGS;
304 struct page *pages[MAX_MSG_FRAGS];
305 ssize_t orig, copied, use, offset;
306
307 orig = msg->sg.size;
308 while (bytes > 0) {
309 i = 0;
310 maxpages = to_max_pages - num_elems;
311 if (maxpages == 0) {
312 ret = -EFAULT;
313 goto out;
314 }
315
316 copied = iov_iter_get_pages(from, pages, bytes, maxpages,
317 &offset);
318 if (copied <= 0) {
319 ret = -EFAULT;
320 goto out;
321 }
322
323 iov_iter_advance(from, copied);
324 bytes -= copied;
325 msg->sg.size += copied;
326
327 while (copied) {
328 use = min_t(int, copied, PAGE_SIZE - offset);
329 sg_set_page(&msg->sg.data[msg->sg.end],
330 pages[i], use, offset);
331 sg_unmark_end(&msg->sg.data[msg->sg.end]);
332 sk_mem_charge(sk, use);
333
334 offset = 0;
335 copied -= use;
336 sk_msg_iter_next(msg, end);
337 num_elems++;
338 i++;
339 }
340 /* When zerocopy is mixed with sk_msg_*copy* operations we
341 * may have a copybreak set in this case clear and prefer
342 * zerocopy remainder when possible.
343 */
344 msg->sg.copybreak = 0;
345 msg->sg.curr = msg->sg.end;
346 }
347out:
348 /* Revert iov_iter updates, msg will need to use 'trim' later if it
349 * also needs to be cleared.
350 */
351 if (ret)
352 iov_iter_revert(from, msg->sg.size - orig);
353 return ret;
354}
355EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
356
357int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
358 struct sk_msg *msg, u32 bytes)
359{
360 int ret = -ENOSPC, i = msg->sg.curr;
361 struct scatterlist *sge;
362 u32 copy, buf_size;
363 void *to;
364
365 do {
366 sge = sk_msg_elem(msg, i);
367 /* This is possible if a trim operation shrunk the buffer */
368 if (msg->sg.copybreak >= sge->length) {
369 msg->sg.copybreak = 0;
370 sk_msg_iter_var_next(i);
371 if (i == msg->sg.end)
372 break;
373 sge = sk_msg_elem(msg, i);
374 }
375
376 buf_size = sge->length - msg->sg.copybreak;
377 copy = (buf_size > bytes) ? bytes : buf_size;
378 to = sg_virt(sge) + msg->sg.copybreak;
379 msg->sg.copybreak += copy;
380 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
381 ret = copy_from_iter_nocache(to, copy, from);
382 else
383 ret = copy_from_iter(to, copy, from);
384 if (ret != copy) {
385 ret = -EFAULT;
386 goto out;
387 }
388 bytes -= copy;
389 if (!bytes)
390 break;
391 msg->sg.copybreak = 0;
392 sk_msg_iter_var_next(i);
393 } while (i != msg->sg.end);
394out:
395 msg->sg.curr = i;
396 return ret;
397}
398EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
399
400static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
401{
402 struct sock *sk = psock->sk;
403 int copied = 0, num_sge;
404 struct sk_msg *msg;
405
406 msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
407 if (unlikely(!msg))
408 return -EAGAIN;
409 if (!sk_rmem_schedule(sk, skb, skb->len)) {
410 kfree(msg);
411 return -EAGAIN;
412 }
413
414 sk_msg_init(msg);
415 num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
416 if (unlikely(num_sge < 0)) {
417 kfree(msg);
418 return num_sge;
419 }
420
421 sk_mem_charge(sk, skb->len);
422 copied = skb->len;
423 msg->sg.start = 0;
424 msg->sg.size = copied;
425 msg->sg.end = num_sge;
426 msg->skb = skb;
427
428 sk_psock_queue_msg(psock, msg);
429 sk_psock_data_ready(sk, psock);
430 return copied;
431}
432
433static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
434 u32 off, u32 len, bool ingress)
435{
436 if (!ingress) {
437 if (!sock_writeable(psock->sk))
438 return -EAGAIN;
439 return skb_send_sock_locked(psock->sk, skb, off, len);
440 }
441 return sk_psock_skb_ingress(psock, skb);
442}
443
444static void sk_psock_backlog(struct work_struct *work)
445{
446 struct sk_psock *psock = container_of(work, struct sk_psock, work);
447 struct sk_psock_work_state *state = &psock->work_state;
448 struct sk_buff *skb;
449 bool ingress;
450 u32 len, off;
451 int ret;
452
453 /* Lock sock to avoid losing sk_socket during loop. */
454 lock_sock(psock->sk);
455 if (state->skb) {
456 skb = state->skb;
457 len = state->len;
458 off = state->off;
459 state->skb = NULL;
460 goto start;
461 }
462
463 while ((skb = skb_dequeue(&psock->ingress_skb))) {
464 len = skb->len;
465 off = 0;
466start:
467 ingress = tcp_skb_bpf_ingress(skb);
468 do {
469 ret = -EIO;
470 if (likely(psock->sk->sk_socket))
471 ret = sk_psock_handle_skb(psock, skb, off,
472 len, ingress);
473 if (ret <= 0) {
474 if (ret == -EAGAIN) {
475 state->skb = skb;
476 state->len = len;
477 state->off = off;
478 goto end;
479 }
480 /* Hard errors break pipe and stop xmit. */
481 sk_psock_report_error(psock, ret ? -ret : EPIPE);
482 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
483 kfree_skb(skb);
484 goto end;
485 }
486 off += ret;
487 len -= ret;
488 } while (len);
489
490 if (!ingress)
491 kfree_skb(skb);
492 }
493end:
494 release_sock(psock->sk);
495}
496
497struct sk_psock *sk_psock_init(struct sock *sk, int node)
498{
499 struct sk_psock *psock;
500 struct proto *prot;
501
502 write_lock_bh(&sk->sk_callback_lock);
503
504 if (inet_csk_has_ulp(sk)) {
505 psock = ERR_PTR(-EINVAL);
506 goto out;
507 }
508
509 if (sk->sk_user_data) {
510 psock = ERR_PTR(-EBUSY);
511 goto out;
512 }
513
514 psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
515 if (!psock) {
516 psock = ERR_PTR(-ENOMEM);
517 goto out;
518 }
519
520 prot = READ_ONCE(sk->sk_prot);
521 psock->sk = sk;
522 psock->eval = __SK_NONE;
523 psock->sk_proto = prot;
524 psock->saved_unhash = prot->unhash;
525 psock->saved_close = prot->close;
526 psock->saved_write_space = sk->sk_write_space;
527
528 INIT_LIST_HEAD(&psock->link);
529 spin_lock_init(&psock->link_lock);
530
531 INIT_WORK(&psock->work, sk_psock_backlog);
532 INIT_LIST_HEAD(&psock->ingress_msg);
533 skb_queue_head_init(&psock->ingress_skb);
534
535 sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
536 refcount_set(&psock->refcnt, 1);
537
538 rcu_assign_sk_user_data_nocopy(sk, psock);
539 sock_hold(sk);
540
541out:
542 write_unlock_bh(&sk->sk_callback_lock);
543 return psock;
544}
545EXPORT_SYMBOL_GPL(sk_psock_init);
546
547struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
548{
549 struct sk_psock_link *link;
550
551 spin_lock_bh(&psock->link_lock);
552 link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
553 list);
554 if (link)
555 list_del(&link->list);
556 spin_unlock_bh(&psock->link_lock);
557 return link;
558}
559
560void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
561{
562 struct sk_msg *msg, *tmp;
563
564 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
565 list_del(&msg->list);
566 sk_msg_free(psock->sk, msg);
567 kfree(msg);
568 }
569}
570
571static void sk_psock_zap_ingress(struct sk_psock *psock)
572{
573 __skb_queue_purge(&psock->ingress_skb);
574 __sk_psock_purge_ingress_msg(psock);
575}
576
577static void sk_psock_link_destroy(struct sk_psock *psock)
578{
579 struct sk_psock_link *link, *tmp;
580
581 list_for_each_entry_safe(link, tmp, &psock->link, list) {
582 list_del(&link->list);
583 sk_psock_free_link(link);
584 }
585}
586
587static void sk_psock_destroy_deferred(struct work_struct *gc)
588{
589 struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
590
591 /* No sk_callback_lock since already detached. */
592
593 /* Parser has been stopped */
594 if (psock->progs.skb_parser)
595 strp_done(&psock->parser.strp);
596
597 cancel_work_sync(&psock->work);
598
599 psock_progs_drop(&psock->progs);
600
601 sk_psock_link_destroy(psock);
602 sk_psock_cork_free(psock);
603 sk_psock_zap_ingress(psock);
604
605 if (psock->sk_redir)
606 sock_put(psock->sk_redir);
607 sock_put(psock->sk);
608 kfree(psock);
609}
610
611void sk_psock_destroy(struct rcu_head *rcu)
612{
613 struct sk_psock *psock = container_of(rcu, struct sk_psock, rcu);
614
615 INIT_WORK(&psock->gc, sk_psock_destroy_deferred);
616 schedule_work(&psock->gc);
617}
618EXPORT_SYMBOL_GPL(sk_psock_destroy);
619
620void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
621{
622 sk_psock_cork_free(psock);
623 sk_psock_zap_ingress(psock);
624
625 write_lock_bh(&sk->sk_callback_lock);
626 sk_psock_restore_proto(sk, psock);
627 rcu_assign_sk_user_data(sk, NULL);
628 if (psock->progs.skb_parser)
629 sk_psock_stop_strp(sk, psock);
630 else if (psock->progs.skb_verdict)
631 sk_psock_stop_verdict(sk, psock);
632 write_unlock_bh(&sk->sk_callback_lock);
633 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
634
635 call_rcu(&psock->rcu, sk_psock_destroy);
636}
637EXPORT_SYMBOL_GPL(sk_psock_drop);
638
639static int sk_psock_map_verd(int verdict, bool redir)
640{
641 switch (verdict) {
642 case SK_PASS:
643 return redir ? __SK_REDIRECT : __SK_PASS;
644 case SK_DROP:
645 default:
646 break;
647 }
648
649 return __SK_DROP;
650}
651
652int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
653 struct sk_msg *msg)
654{
655 struct bpf_prog *prog;
656 int ret;
657
658 rcu_read_lock();
659 prog = READ_ONCE(psock->progs.msg_parser);
660 if (unlikely(!prog)) {
661 ret = __SK_PASS;
662 goto out;
663 }
664
665 sk_msg_compute_data_pointers(msg);
666 msg->sk = sk;
667 ret = bpf_prog_run_pin_on_cpu(prog, msg);
668 ret = sk_psock_map_verd(ret, msg->sk_redir);
669 psock->apply_bytes = msg->apply_bytes;
670 if (ret == __SK_REDIRECT) {
671 if (psock->sk_redir)
672 sock_put(psock->sk_redir);
673 psock->sk_redir = msg->sk_redir;
674 if (!psock->sk_redir) {
675 ret = __SK_DROP;
676 goto out;
677 }
678 sock_hold(psock->sk_redir);
679 }
680out:
681 rcu_read_unlock();
682 return ret;
683}
684EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
685
686static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
687 struct sk_buff *skb)
688{
689 bpf_compute_data_end_sk_skb(skb);
690 return bpf_prog_run_pin_on_cpu(prog, skb);
691}
692
693static struct sk_psock *sk_psock_from_strp(struct strparser *strp)
694{
695 struct sk_psock_parser *parser;
696
697 parser = container_of(strp, struct sk_psock_parser, strp);
698 return container_of(parser, struct sk_psock, parser);
699}
700
701static void sk_psock_skb_redirect(struct sk_buff *skb)
702{
703 struct sk_psock *psock_other;
704 struct sock *sk_other;
705
706 sk_other = tcp_skb_bpf_redirect_fetch(skb);
707 /* This error is a buggy BPF program, it returned a redirect
708 * return code, but then didn't set a redirect interface.
709 */
710 if (unlikely(!sk_other)) {
711 kfree_skb(skb);
712 return;
713 }
714 psock_other = sk_psock(sk_other);
715 /* This error indicates the socket is being torn down or had another
716 * error that caused the pipe to break. We can't send a packet on
717 * a socket that is in this state so we drop the skb.
718 */
719 if (!psock_other || sock_flag(sk_other, SOCK_DEAD) ||
720 !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
721 kfree_skb(skb);
722 return;
723 }
724
725 skb_queue_tail(&psock_other->ingress_skb, skb);
726 schedule_work(&psock_other->work);
727}
728
729static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int verdict)
730{
731 switch (verdict) {
732 case __SK_REDIRECT:
733 skb_set_owner_r(skb, sk);
734 sk_psock_skb_redirect(skb);
735 break;
736 case __SK_PASS:
737 case __SK_DROP:
738 default:
739 break;
740 }
741}
742
743int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
744{
745 struct bpf_prog *prog;
746 int ret = __SK_PASS;
747
748 rcu_read_lock();
749 prog = READ_ONCE(psock->progs.skb_verdict);
750 if (likely(prog)) {
751 /* We skip full set_owner_r here because if we do a SK_PASS
752 * or SK_DROP we can skip skb memory accounting and use the
753 * TLS context.
754 */
755 skb->sk = psock->sk;
756 tcp_skb_bpf_redirect_clear(skb);
757 ret = sk_psock_bpf_run(psock, prog, skb);
758 ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
759 skb->sk = NULL;
760 }
761 sk_psock_tls_verdict_apply(skb, psock->sk, ret);
762 rcu_read_unlock();
763 return ret;
764}
765EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
766
767static void sk_psock_verdict_apply(struct sk_psock *psock,
768 struct sk_buff *skb, int verdict)
769{
770 struct tcp_skb_cb *tcp;
771 struct sock *sk_other;
772 int err = -EIO;
773
774 switch (verdict) {
775 case __SK_PASS:
776 sk_other = psock->sk;
777 if (sock_flag(sk_other, SOCK_DEAD) ||
778 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
779 goto out_free;
780 }
781
782 tcp = TCP_SKB_CB(skb);
783 tcp->bpf.flags |= BPF_F_INGRESS;
784
785 /* If the queue is empty then we can submit directly
786 * into the msg queue. If its not empty we have to
787 * queue work otherwise we may get OOO data. Otherwise,
788 * if sk_psock_skb_ingress errors will be handled by
789 * retrying later from workqueue.
790 */
791 if (skb_queue_empty(&psock->ingress_skb)) {
792 err = sk_psock_skb_ingress(psock, skb);
793 }
794 if (err < 0) {
795 skb_queue_tail(&psock->ingress_skb, skb);
796 schedule_work(&psock->work);
797 }
798 break;
799 case __SK_REDIRECT:
800 sk_psock_skb_redirect(skb);
801 break;
802 case __SK_DROP:
803 default:
804out_free:
805 kfree_skb(skb);
806 }
807}
808
809static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
810{
811 struct sk_psock *psock;
812 struct bpf_prog *prog;
813 int ret = __SK_DROP;
814 struct sock *sk;
815
816 rcu_read_lock();
817 sk = strp->sk;
818 psock = sk_psock(sk);
819 if (unlikely(!psock)) {
820 kfree_skb(skb);
821 goto out;
822 }
823 skb_set_owner_r(skb, sk);
824 prog = READ_ONCE(psock->progs.skb_verdict);
825 if (likely(prog)) {
826 tcp_skb_bpf_redirect_clear(skb);
827 ret = sk_psock_bpf_run(psock, prog, skb);
828 ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
829 }
830 sk_psock_verdict_apply(psock, skb, ret);
831out:
832 rcu_read_unlock();
833}
834
835static int sk_psock_strp_read_done(struct strparser *strp, int err)
836{
837 return err;
838}
839
840static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
841{
842 struct sk_psock *psock = sk_psock_from_strp(strp);
843 struct bpf_prog *prog;
844 int ret = skb->len;
845
846 rcu_read_lock();
847 prog = READ_ONCE(psock->progs.skb_parser);
848 if (likely(prog)) {
849 skb->sk = psock->sk;
850 ret = sk_psock_bpf_run(psock, prog, skb);
851 skb->sk = NULL;
852 }
853 rcu_read_unlock();
854 return ret;
855}
856
857/* Called with socket lock held. */
858static void sk_psock_strp_data_ready(struct sock *sk)
859{
860 struct sk_psock *psock;
861
862 rcu_read_lock();
863 psock = sk_psock(sk);
864 if (likely(psock)) {
865 if (tls_sw_has_ctx_rx(sk)) {
866 psock->parser.saved_data_ready(sk);
867 } else {
868 write_lock_bh(&sk->sk_callback_lock);
869 strp_data_ready(&psock->parser.strp);
870 write_unlock_bh(&sk->sk_callback_lock);
871 }
872 }
873 rcu_read_unlock();
874}
875
876static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
877 unsigned int offset, size_t orig_len)
878{
879 struct sock *sk = (struct sock *)desc->arg.data;
880 struct sk_psock *psock;
881 struct bpf_prog *prog;
882 int ret = __SK_DROP;
883 int len = skb->len;
884
885 /* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
886 skb = skb_clone(skb, GFP_ATOMIC);
887 if (!skb) {
888 desc->error = -ENOMEM;
889 return 0;
890 }
891
892 rcu_read_lock();
893 psock = sk_psock(sk);
894 if (unlikely(!psock)) {
895 len = 0;
896 kfree_skb(skb);
897 goto out;
898 }
899 skb_set_owner_r(skb, sk);
900 prog = READ_ONCE(psock->progs.skb_verdict);
901 if (likely(prog)) {
902 tcp_skb_bpf_redirect_clear(skb);
903 ret = sk_psock_bpf_run(psock, prog, skb);
904 ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
905 }
906 sk_psock_verdict_apply(psock, skb, ret);
907out:
908 rcu_read_unlock();
909 return len;
910}
911
912static void sk_psock_verdict_data_ready(struct sock *sk)
913{
914 struct socket *sock = sk->sk_socket;
915 read_descriptor_t desc;
916
917 if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
918 return;
919
920 desc.arg.data = sk;
921 desc.error = 0;
922 desc.count = 1;
923
924 sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
925}
926
927static void sk_psock_write_space(struct sock *sk)
928{
929 struct sk_psock *psock;
930 void (*write_space)(struct sock *sk) = NULL;
931
932 rcu_read_lock();
933 psock = sk_psock(sk);
934 if (likely(psock)) {
935 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
936 schedule_work(&psock->work);
937 write_space = psock->saved_write_space;
938 }
939 rcu_read_unlock();
940 if (write_space)
941 write_space(sk);
942}
943
944int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
945{
946 static const struct strp_callbacks cb = {
947 .rcv_msg = sk_psock_strp_read,
948 .read_sock_done = sk_psock_strp_read_done,
949 .parse_msg = sk_psock_strp_parse,
950 };
951
952 psock->parser.enabled = false;
953 return strp_init(&psock->parser.strp, sk, &cb);
954}
955
956void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
957{
958 struct sk_psock_parser *parser = &psock->parser;
959
960 if (parser->enabled)
961 return;
962
963 parser->saved_data_ready = sk->sk_data_ready;
964 sk->sk_data_ready = sk_psock_verdict_data_ready;
965 sk->sk_write_space = sk_psock_write_space;
966 parser->enabled = true;
967}
968
969void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
970{
971 struct sk_psock_parser *parser = &psock->parser;
972
973 if (parser->enabled)
974 return;
975
976 parser->saved_data_ready = sk->sk_data_ready;
977 sk->sk_data_ready = sk_psock_strp_data_ready;
978 sk->sk_write_space = sk_psock_write_space;
979 parser->enabled = true;
980}
981
982void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
983{
984 struct sk_psock_parser *parser = &psock->parser;
985
986 if (!parser->enabled)
987 return;
988
989 sk->sk_data_ready = parser->saved_data_ready;
990 parser->saved_data_ready = NULL;
991 strp_stop(&parser->strp);
992 parser->enabled = false;
993}
994
995void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
996{
997 struct sk_psock_parser *parser = &psock->parser;
998
999 if (!parser->enabled)
1000 return;
1001
1002 sk->sk_data_ready = parser->saved_data_ready;
1003 parser->saved_data_ready = NULL;
1004 parser->enabled = false;
1005}