Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4#include <linux/skmsg.h>
5#include <linux/skbuff.h>
6#include <linux/scatterlist.h>
7
8#include <net/sock.h>
9#include <net/tcp.h>
10#include <net/tls.h>
11
12static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
13{
14 if (msg->sg.end > msg->sg.start &&
15 elem_first_coalesce < msg->sg.end)
16 return true;
17
18 if (msg->sg.end < msg->sg.start &&
19 (elem_first_coalesce > msg->sg.start ||
20 elem_first_coalesce < msg->sg.end))
21 return true;
22
23 return false;
24}
25
26int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
27 int elem_first_coalesce)
28{
29 struct page_frag *pfrag = sk_page_frag(sk);
30 int ret = 0;
31
32 len -= msg->sg.size;
33 while (len > 0) {
34 struct scatterlist *sge;
35 u32 orig_offset;
36 int use, i;
37
38 if (!sk_page_frag_refill(sk, pfrag))
39 return -ENOMEM;
40
41 orig_offset = pfrag->offset;
42 use = min_t(int, len, pfrag->size - orig_offset);
43 if (!sk_wmem_schedule(sk, use))
44 return -ENOMEM;
45
46 i = msg->sg.end;
47 sk_msg_iter_var_prev(i);
48 sge = &msg->sg.data[i];
49
50 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
51 sg_page(sge) == pfrag->page &&
52 sge->offset + sge->length == orig_offset) {
53 sge->length += use;
54 } else {
55 if (sk_msg_full(msg)) {
56 ret = -ENOSPC;
57 break;
58 }
59
60 sge = &msg->sg.data[msg->sg.end];
61 sg_unmark_end(sge);
62 sg_set_page(sge, pfrag->page, use, orig_offset);
63 get_page(pfrag->page);
64 sk_msg_iter_next(msg, end);
65 }
66
67 sk_mem_charge(sk, use);
68 msg->sg.size += use;
69 pfrag->offset += use;
70 len -= use;
71 }
72
73 return ret;
74}
75EXPORT_SYMBOL_GPL(sk_msg_alloc);
76
77int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
78 u32 off, u32 len)
79{
80 int i = src->sg.start;
81 struct scatterlist *sge = sk_msg_elem(src, i);
82 struct scatterlist *sgd = NULL;
83 u32 sge_len, sge_off;
84
85 while (off) {
86 if (sge->length > off)
87 break;
88 off -= sge->length;
89 sk_msg_iter_var_next(i);
90 if (i == src->sg.end && off)
91 return -ENOSPC;
92 sge = sk_msg_elem(src, i);
93 }
94
95 while (len) {
96 sge_len = sge->length - off;
97 if (sge_len > len)
98 sge_len = len;
99
100 if (dst->sg.end)
101 sgd = sk_msg_elem(dst, dst->sg.end - 1);
102
103 if (sgd &&
104 (sg_page(sge) == sg_page(sgd)) &&
105 (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
106 sgd->length += sge_len;
107 dst->sg.size += sge_len;
108 } else if (!sk_msg_full(dst)) {
109 sge_off = sge->offset + off;
110 sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
111 } else {
112 return -ENOSPC;
113 }
114
115 off = 0;
116 len -= sge_len;
117 sk_mem_charge(sk, sge_len);
118 sk_msg_iter_var_next(i);
119 if (i == src->sg.end && len)
120 return -ENOSPC;
121 sge = sk_msg_elem(src, i);
122 }
123
124 return 0;
125}
126EXPORT_SYMBOL_GPL(sk_msg_clone);
127
128void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
129{
130 int i = msg->sg.start;
131
132 do {
133 struct scatterlist *sge = sk_msg_elem(msg, i);
134
135 if (bytes < sge->length) {
136 sge->length -= bytes;
137 sge->offset += bytes;
138 sk_mem_uncharge(sk, bytes);
139 break;
140 }
141
142 sk_mem_uncharge(sk, sge->length);
143 bytes -= sge->length;
144 sge->length = 0;
145 sge->offset = 0;
146 sk_msg_iter_var_next(i);
147 } while (bytes && i != msg->sg.end);
148 msg->sg.start = i;
149}
150EXPORT_SYMBOL_GPL(sk_msg_return_zero);
151
152void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
153{
154 int i = msg->sg.start;
155
156 do {
157 struct scatterlist *sge = &msg->sg.data[i];
158 int uncharge = (bytes < sge->length) ? bytes : sge->length;
159
160 sk_mem_uncharge(sk, uncharge);
161 bytes -= uncharge;
162 sk_msg_iter_var_next(i);
163 } while (i != msg->sg.end);
164}
165EXPORT_SYMBOL_GPL(sk_msg_return);
166
167static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
168 bool charge)
169{
170 struct scatterlist *sge = sk_msg_elem(msg, i);
171 u32 len = sge->length;
172
173 /* When the skb owns the memory we free it from consume_skb path. */
174 if (!msg->skb) {
175 if (charge)
176 sk_mem_uncharge(sk, len);
177 put_page(sg_page(sge));
178 }
179 memset(sge, 0, sizeof(*sge));
180 return len;
181}
182
183static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
184 bool charge)
185{
186 struct scatterlist *sge = sk_msg_elem(msg, i);
187 int freed = 0;
188
189 while (msg->sg.size) {
190 msg->sg.size -= sge->length;
191 freed += sk_msg_free_elem(sk, msg, i, charge);
192 sk_msg_iter_var_next(i);
193 sk_msg_check_to_free(msg, i, msg->sg.size);
194 sge = sk_msg_elem(msg, i);
195 }
196 consume_skb(msg->skb);
197 sk_msg_init(msg);
198 return freed;
199}
200
201int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
202{
203 return __sk_msg_free(sk, msg, msg->sg.start, false);
204}
205EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
206
207int sk_msg_free(struct sock *sk, struct sk_msg *msg)
208{
209 return __sk_msg_free(sk, msg, msg->sg.start, true);
210}
211EXPORT_SYMBOL_GPL(sk_msg_free);
212
213static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
214 u32 bytes, bool charge)
215{
216 struct scatterlist *sge;
217 u32 i = msg->sg.start;
218
219 while (bytes) {
220 sge = sk_msg_elem(msg, i);
221 if (!sge->length)
222 break;
223 if (bytes < sge->length) {
224 if (charge)
225 sk_mem_uncharge(sk, bytes);
226 sge->length -= bytes;
227 sge->offset += bytes;
228 msg->sg.size -= bytes;
229 break;
230 }
231
232 msg->sg.size -= sge->length;
233 bytes -= sge->length;
234 sk_msg_free_elem(sk, msg, i, charge);
235 sk_msg_iter_var_next(i);
236 sk_msg_check_to_free(msg, i, bytes);
237 }
238 msg->sg.start = i;
239}
240
241void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
242{
243 __sk_msg_free_partial(sk, msg, bytes, true);
244}
245EXPORT_SYMBOL_GPL(sk_msg_free_partial);
246
247void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
248 u32 bytes)
249{
250 __sk_msg_free_partial(sk, msg, bytes, false);
251}
252
253void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
254{
255 int trim = msg->sg.size - len;
256 u32 i = msg->sg.end;
257
258 if (trim <= 0) {
259 WARN_ON(trim < 0);
260 return;
261 }
262
263 sk_msg_iter_var_prev(i);
264 msg->sg.size = len;
265 while (msg->sg.data[i].length &&
266 trim >= msg->sg.data[i].length) {
267 trim -= msg->sg.data[i].length;
268 sk_msg_free_elem(sk, msg, i, true);
269 sk_msg_iter_var_prev(i);
270 if (!trim)
271 goto out;
272 }
273
274 msg->sg.data[i].length -= trim;
275 sk_mem_uncharge(sk, trim);
276 /* Adjust copybreak if it falls into the trimmed part of last buf */
277 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
278 msg->sg.copybreak = msg->sg.data[i].length;
279out:
280 sk_msg_iter_var_next(i);
281 msg->sg.end = i;
282
283 /* If we trim data a full sg elem before curr pointer update
284 * copybreak and current so that any future copy operations
285 * start at new copy location.
286 * However trimed data that has not yet been used in a copy op
287 * does not require an update.
288 */
289 if (!msg->sg.size) {
290 msg->sg.curr = msg->sg.start;
291 msg->sg.copybreak = 0;
292 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
293 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
294 sk_msg_iter_var_prev(i);
295 msg->sg.curr = i;
296 msg->sg.copybreak = msg->sg.data[i].length;
297 }
298}
299EXPORT_SYMBOL_GPL(sk_msg_trim);
300
301int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
302 struct sk_msg *msg, u32 bytes)
303{
304 int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
305 const int to_max_pages = MAX_MSG_FRAGS;
306 struct page *pages[MAX_MSG_FRAGS];
307 ssize_t orig, copied, use, offset;
308
309 orig = msg->sg.size;
310 while (bytes > 0) {
311 i = 0;
312 maxpages = to_max_pages - num_elems;
313 if (maxpages == 0) {
314 ret = -EFAULT;
315 goto out;
316 }
317
318 copied = iov_iter_get_pages(from, pages, bytes, maxpages,
319 &offset);
320 if (copied <= 0) {
321 ret = -EFAULT;
322 goto out;
323 }
324
325 iov_iter_advance(from, copied);
326 bytes -= copied;
327 msg->sg.size += copied;
328
329 while (copied) {
330 use = min_t(int, copied, PAGE_SIZE - offset);
331 sg_set_page(&msg->sg.data[msg->sg.end],
332 pages[i], use, offset);
333 sg_unmark_end(&msg->sg.data[msg->sg.end]);
334 sk_mem_charge(sk, use);
335
336 offset = 0;
337 copied -= use;
338 sk_msg_iter_next(msg, end);
339 num_elems++;
340 i++;
341 }
342 /* When zerocopy is mixed with sk_msg_*copy* operations we
343 * may have a copybreak set in this case clear and prefer
344 * zerocopy remainder when possible.
345 */
346 msg->sg.copybreak = 0;
347 msg->sg.curr = msg->sg.end;
348 }
349out:
350 /* Revert iov_iter updates, msg will need to use 'trim' later if it
351 * also needs to be cleared.
352 */
353 if (ret)
354 iov_iter_revert(from, msg->sg.size - orig);
355 return ret;
356}
357EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
358
359int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
360 struct sk_msg *msg, u32 bytes)
361{
362 int ret = -ENOSPC, i = msg->sg.curr;
363 struct scatterlist *sge;
364 u32 copy, buf_size;
365 void *to;
366
367 do {
368 sge = sk_msg_elem(msg, i);
369 /* This is possible if a trim operation shrunk the buffer */
370 if (msg->sg.copybreak >= sge->length) {
371 msg->sg.copybreak = 0;
372 sk_msg_iter_var_next(i);
373 if (i == msg->sg.end)
374 break;
375 sge = sk_msg_elem(msg, i);
376 }
377
378 buf_size = sge->length - msg->sg.copybreak;
379 copy = (buf_size > bytes) ? bytes : buf_size;
380 to = sg_virt(sge) + msg->sg.copybreak;
381 msg->sg.copybreak += copy;
382 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
383 ret = copy_from_iter_nocache(to, copy, from);
384 else
385 ret = copy_from_iter(to, copy, from);
386 if (ret != copy) {
387 ret = -EFAULT;
388 goto out;
389 }
390 bytes -= copy;
391 if (!bytes)
392 break;
393 msg->sg.copybreak = 0;
394 sk_msg_iter_var_next(i);
395 } while (i != msg->sg.end);
396out:
397 msg->sg.curr = i;
398 return ret;
399}
400EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
401
402/* Receive sk_msg from psock->ingress_msg to @msg. */
403int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
404 int len, int flags)
405{
406 struct iov_iter *iter = &msg->msg_iter;
407 int peek = flags & MSG_PEEK;
408 struct sk_msg *msg_rx;
409 int i, copied = 0;
410
411 msg_rx = sk_psock_peek_msg(psock);
412 while (copied != len) {
413 struct scatterlist *sge;
414
415 if (unlikely(!msg_rx))
416 break;
417
418 i = msg_rx->sg.start;
419 do {
420 struct page *page;
421 int copy;
422
423 sge = sk_msg_elem(msg_rx, i);
424 copy = sge->length;
425 page = sg_page(sge);
426 if (copied + copy > len)
427 copy = len - copied;
428 copy = copy_page_to_iter(page, sge->offset, copy, iter);
429 if (!copy)
430 return copied ? copied : -EFAULT;
431
432 copied += copy;
433 if (likely(!peek)) {
434 sge->offset += copy;
435 sge->length -= copy;
436 if (!msg_rx->skb)
437 sk_mem_uncharge(sk, copy);
438 msg_rx->sg.size -= copy;
439
440 if (!sge->length) {
441 sk_msg_iter_var_next(i);
442 if (!msg_rx->skb)
443 put_page(page);
444 }
445 } else {
446 /* Lets not optimize peek case if copy_page_to_iter
447 * didn't copy the entire length lets just break.
448 */
449 if (copy != sge->length)
450 return copied;
451 sk_msg_iter_var_next(i);
452 }
453
454 if (copied == len)
455 break;
456 } while (i != msg_rx->sg.end);
457
458 if (unlikely(peek)) {
459 msg_rx = sk_psock_next_msg(psock, msg_rx);
460 if (!msg_rx)
461 break;
462 continue;
463 }
464
465 msg_rx->sg.start = i;
466 if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) {
467 msg_rx = sk_psock_dequeue_msg(psock);
468 kfree_sk_msg(msg_rx);
469 }
470 msg_rx = sk_psock_peek_msg(psock);
471 }
472
473 return copied;
474}
475EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
476
477static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
478 struct sk_buff *skb)
479{
480 struct sk_msg *msg;
481
482 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
483 return NULL;
484
485 if (!sk_rmem_schedule(sk, skb, skb->truesize))
486 return NULL;
487
488 msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
489 if (unlikely(!msg))
490 return NULL;
491
492 sk_msg_init(msg);
493 return msg;
494}
495
496static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
497 struct sk_psock *psock,
498 struct sock *sk,
499 struct sk_msg *msg)
500{
501 int num_sge, copied;
502
503 /* skb linearize may fail with ENOMEM, but lets simply try again
504 * later if this happens. Under memory pressure we don't want to
505 * drop the skb. We need to linearize the skb so that the mapping
506 * in skb_to_sgvec can not error.
507 */
508 if (skb_linearize(skb))
509 return -EAGAIN;
510 num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
511 if (unlikely(num_sge < 0)) {
512 kfree(msg);
513 return num_sge;
514 }
515
516 copied = skb->len;
517 msg->sg.start = 0;
518 msg->sg.size = copied;
519 msg->sg.end = num_sge;
520 msg->skb = skb;
521
522 sk_psock_queue_msg(psock, msg);
523 sk_psock_data_ready(sk, psock);
524 return copied;
525}
526
527static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb);
528
529static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
530{
531 struct sock *sk = psock->sk;
532 struct sk_msg *msg;
533
534 /* If we are receiving on the same sock skb->sk is already assigned,
535 * skip memory accounting and owner transition seeing it already set
536 * correctly.
537 */
538 if (unlikely(skb->sk == sk))
539 return sk_psock_skb_ingress_self(psock, skb);
540 msg = sk_psock_create_ingress_msg(sk, skb);
541 if (!msg)
542 return -EAGAIN;
543
544 /* This will transition ownership of the data from the socket where
545 * the BPF program was run initiating the redirect to the socket
546 * we will eventually receive this data on. The data will be released
547 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
548 * into user buffers.
549 */
550 skb_set_owner_r(skb, sk);
551 return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
552}
553
554/* Puts an skb on the ingress queue of the socket already assigned to the
555 * skb. In this case we do not need to check memory limits or skb_set_owner_r
556 * because the skb is already accounted for here.
557 */
558static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb)
559{
560 struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
561 struct sock *sk = psock->sk;
562
563 if (unlikely(!msg))
564 return -EAGAIN;
565 sk_msg_init(msg);
566 skb_set_owner_r(skb, sk);
567 return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
568}
569
570static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
571 u32 off, u32 len, bool ingress)
572{
573 if (!ingress) {
574 if (!sock_writeable(psock->sk))
575 return -EAGAIN;
576 return skb_send_sock(psock->sk, skb, off, len);
577 }
578 return sk_psock_skb_ingress(psock, skb);
579}
580
581static void sock_drop(struct sock *sk, struct sk_buff *skb)
582{
583 sk_drops_add(sk, skb);
584 kfree_skb(skb);
585}
586
587static void sk_psock_backlog(struct work_struct *work)
588{
589 struct sk_psock *psock = container_of(work, struct sk_psock, work);
590 struct sk_psock_work_state *state = &psock->work_state;
591 struct sk_buff *skb;
592 bool ingress;
593 u32 len, off;
594 int ret;
595
596 mutex_lock(&psock->work_mutex);
597 if (state->skb) {
598 skb = state->skb;
599 len = state->len;
600 off = state->off;
601 state->skb = NULL;
602 goto start;
603 }
604
605 while ((skb = skb_dequeue(&psock->ingress_skb))) {
606 len = skb->len;
607 off = 0;
608start:
609 ingress = skb_bpf_ingress(skb);
610 skb_bpf_redirect_clear(skb);
611 do {
612 ret = -EIO;
613 if (!sock_flag(psock->sk, SOCK_DEAD))
614 ret = sk_psock_handle_skb(psock, skb, off,
615 len, ingress);
616 if (ret <= 0) {
617 if (ret == -EAGAIN) {
618 state->skb = skb;
619 state->len = len;
620 state->off = off;
621 goto end;
622 }
623 /* Hard errors break pipe and stop xmit. */
624 sk_psock_report_error(psock, ret ? -ret : EPIPE);
625 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
626 sock_drop(psock->sk, skb);
627 goto end;
628 }
629 off += ret;
630 len -= ret;
631 } while (len);
632
633 if (!ingress)
634 kfree_skb(skb);
635 }
636end:
637 mutex_unlock(&psock->work_mutex);
638}
639
640struct sk_psock *sk_psock_init(struct sock *sk, int node)
641{
642 struct sk_psock *psock;
643 struct proto *prot;
644
645 write_lock_bh(&sk->sk_callback_lock);
646
647 if (sk->sk_user_data) {
648 psock = ERR_PTR(-EBUSY);
649 goto out;
650 }
651
652 psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
653 if (!psock) {
654 psock = ERR_PTR(-ENOMEM);
655 goto out;
656 }
657
658 prot = READ_ONCE(sk->sk_prot);
659 psock->sk = sk;
660 psock->eval = __SK_NONE;
661 psock->sk_proto = prot;
662 psock->saved_unhash = prot->unhash;
663 psock->saved_close = prot->close;
664 psock->saved_write_space = sk->sk_write_space;
665
666 INIT_LIST_HEAD(&psock->link);
667 spin_lock_init(&psock->link_lock);
668
669 INIT_WORK(&psock->work, sk_psock_backlog);
670 mutex_init(&psock->work_mutex);
671 INIT_LIST_HEAD(&psock->ingress_msg);
672 spin_lock_init(&psock->ingress_lock);
673 skb_queue_head_init(&psock->ingress_skb);
674
675 sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
676 refcount_set(&psock->refcnt, 1);
677
678 rcu_assign_sk_user_data_nocopy(sk, psock);
679 sock_hold(sk);
680
681out:
682 write_unlock_bh(&sk->sk_callback_lock);
683 return psock;
684}
685EXPORT_SYMBOL_GPL(sk_psock_init);
686
687struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
688{
689 struct sk_psock_link *link;
690
691 spin_lock_bh(&psock->link_lock);
692 link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
693 list);
694 if (link)
695 list_del(&link->list);
696 spin_unlock_bh(&psock->link_lock);
697 return link;
698}
699
700static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
701{
702 struct sk_msg *msg, *tmp;
703
704 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
705 list_del(&msg->list);
706 sk_msg_free(psock->sk, msg);
707 kfree(msg);
708 }
709}
710
711static void __sk_psock_zap_ingress(struct sk_psock *psock)
712{
713 struct sk_buff *skb;
714
715 while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
716 skb_bpf_redirect_clear(skb);
717 sock_drop(psock->sk, skb);
718 }
719 __sk_psock_purge_ingress_msg(psock);
720}
721
722static void sk_psock_link_destroy(struct sk_psock *psock)
723{
724 struct sk_psock_link *link, *tmp;
725
726 list_for_each_entry_safe(link, tmp, &psock->link, list) {
727 list_del(&link->list);
728 sk_psock_free_link(link);
729 }
730}
731
732void sk_psock_stop(struct sk_psock *psock, bool wait)
733{
734 spin_lock_bh(&psock->ingress_lock);
735 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
736 sk_psock_cork_free(psock);
737 __sk_psock_zap_ingress(psock);
738 spin_unlock_bh(&psock->ingress_lock);
739
740 if (wait)
741 cancel_work_sync(&psock->work);
742}
743
744static void sk_psock_done_strp(struct sk_psock *psock);
745
746static void sk_psock_destroy(struct work_struct *work)
747{
748 struct sk_psock *psock = container_of(to_rcu_work(work),
749 struct sk_psock, rwork);
750 /* No sk_callback_lock since already detached. */
751
752 sk_psock_done_strp(psock);
753
754 cancel_work_sync(&psock->work);
755 mutex_destroy(&psock->work_mutex);
756
757 psock_progs_drop(&psock->progs);
758
759 sk_psock_link_destroy(psock);
760 sk_psock_cork_free(psock);
761
762 if (psock->sk_redir)
763 sock_put(psock->sk_redir);
764 sock_put(psock->sk);
765 kfree(psock);
766}
767
768void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
769{
770 sk_psock_stop(psock, false);
771
772 write_lock_bh(&sk->sk_callback_lock);
773 sk_psock_restore_proto(sk, psock);
774 rcu_assign_sk_user_data(sk, NULL);
775 if (psock->progs.stream_parser)
776 sk_psock_stop_strp(sk, psock);
777 else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
778 sk_psock_stop_verdict(sk, psock);
779 write_unlock_bh(&sk->sk_callback_lock);
780
781 INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
782 queue_rcu_work(system_wq, &psock->rwork);
783}
784EXPORT_SYMBOL_GPL(sk_psock_drop);
785
786static int sk_psock_map_verd(int verdict, bool redir)
787{
788 switch (verdict) {
789 case SK_PASS:
790 return redir ? __SK_REDIRECT : __SK_PASS;
791 case SK_DROP:
792 default:
793 break;
794 }
795
796 return __SK_DROP;
797}
798
799int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
800 struct sk_msg *msg)
801{
802 struct bpf_prog *prog;
803 int ret;
804
805 rcu_read_lock();
806 prog = READ_ONCE(psock->progs.msg_parser);
807 if (unlikely(!prog)) {
808 ret = __SK_PASS;
809 goto out;
810 }
811
812 sk_msg_compute_data_pointers(msg);
813 msg->sk = sk;
814 ret = bpf_prog_run_pin_on_cpu(prog, msg);
815 ret = sk_psock_map_verd(ret, msg->sk_redir);
816 psock->apply_bytes = msg->apply_bytes;
817 if (ret == __SK_REDIRECT) {
818 if (psock->sk_redir)
819 sock_put(psock->sk_redir);
820 psock->sk_redir = msg->sk_redir;
821 if (!psock->sk_redir) {
822 ret = __SK_DROP;
823 goto out;
824 }
825 sock_hold(psock->sk_redir);
826 }
827out:
828 rcu_read_unlock();
829 return ret;
830}
831EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
832
833static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
834{
835 struct sk_psock *psock_other;
836 struct sock *sk_other;
837
838 sk_other = skb_bpf_redirect_fetch(skb);
839 /* This error is a buggy BPF program, it returned a redirect
840 * return code, but then didn't set a redirect interface.
841 */
842 if (unlikely(!sk_other)) {
843 sock_drop(from->sk, skb);
844 return -EIO;
845 }
846 psock_other = sk_psock(sk_other);
847 /* This error indicates the socket is being torn down or had another
848 * error that caused the pipe to break. We can't send a packet on
849 * a socket that is in this state so we drop the skb.
850 */
851 if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
852 skb_bpf_redirect_clear(skb);
853 sock_drop(from->sk, skb);
854 return -EIO;
855 }
856 spin_lock_bh(&psock_other->ingress_lock);
857 if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
858 spin_unlock_bh(&psock_other->ingress_lock);
859 skb_bpf_redirect_clear(skb);
860 sock_drop(from->sk, skb);
861 return -EIO;
862 }
863
864 skb_queue_tail(&psock_other->ingress_skb, skb);
865 schedule_work(&psock_other->work);
866 spin_unlock_bh(&psock_other->ingress_lock);
867 return 0;
868}
869
870static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
871 struct sk_psock *from, int verdict)
872{
873 switch (verdict) {
874 case __SK_REDIRECT:
875 sk_psock_skb_redirect(from, skb);
876 break;
877 case __SK_PASS:
878 case __SK_DROP:
879 default:
880 break;
881 }
882}
883
884int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
885{
886 struct bpf_prog *prog;
887 int ret = __SK_PASS;
888
889 rcu_read_lock();
890 prog = READ_ONCE(psock->progs.stream_verdict);
891 if (likely(prog)) {
892 skb->sk = psock->sk;
893 skb_dst_drop(skb);
894 skb_bpf_redirect_clear(skb);
895 ret = bpf_prog_run_pin_on_cpu(prog, skb);
896 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
897 skb->sk = NULL;
898 }
899 sk_psock_tls_verdict_apply(skb, psock, ret);
900 rcu_read_unlock();
901 return ret;
902}
903EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
904
905static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
906 int verdict)
907{
908 struct sock *sk_other;
909 int err = 0;
910
911 switch (verdict) {
912 case __SK_PASS:
913 err = -EIO;
914 sk_other = psock->sk;
915 if (sock_flag(sk_other, SOCK_DEAD) ||
916 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
917 goto out_free;
918 }
919
920 skb_bpf_set_ingress(skb);
921
922 /* If the queue is empty then we can submit directly
923 * into the msg queue. If its not empty we have to
924 * queue work otherwise we may get OOO data. Otherwise,
925 * if sk_psock_skb_ingress errors will be handled by
926 * retrying later from workqueue.
927 */
928 if (skb_queue_empty(&psock->ingress_skb)) {
929 err = sk_psock_skb_ingress_self(psock, skb);
930 }
931 if (err < 0) {
932 spin_lock_bh(&psock->ingress_lock);
933 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
934 skb_queue_tail(&psock->ingress_skb, skb);
935 schedule_work(&psock->work);
936 err = 0;
937 }
938 spin_unlock_bh(&psock->ingress_lock);
939 if (err < 0) {
940 skb_bpf_redirect_clear(skb);
941 goto out_free;
942 }
943 }
944 break;
945 case __SK_REDIRECT:
946 err = sk_psock_skb_redirect(psock, skb);
947 break;
948 case __SK_DROP:
949 default:
950out_free:
951 sock_drop(psock->sk, skb);
952 }
953
954 return err;
955}
956
957static void sk_psock_write_space(struct sock *sk)
958{
959 struct sk_psock *psock;
960 void (*write_space)(struct sock *sk) = NULL;
961
962 rcu_read_lock();
963 psock = sk_psock(sk);
964 if (likely(psock)) {
965 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
966 schedule_work(&psock->work);
967 write_space = psock->saved_write_space;
968 }
969 rcu_read_unlock();
970 if (write_space)
971 write_space(sk);
972}
973
974#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
975static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
976{
977 struct sk_psock *psock;
978 struct bpf_prog *prog;
979 int ret = __SK_DROP;
980 struct sock *sk;
981
982 rcu_read_lock();
983 sk = strp->sk;
984 psock = sk_psock(sk);
985 if (unlikely(!psock)) {
986 sock_drop(sk, skb);
987 goto out;
988 }
989 prog = READ_ONCE(psock->progs.stream_verdict);
990 if (likely(prog)) {
991 skb->sk = sk;
992 skb_dst_drop(skb);
993 skb_bpf_redirect_clear(skb);
994 ret = bpf_prog_run_pin_on_cpu(prog, skb);
995 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
996 skb->sk = NULL;
997 }
998 sk_psock_verdict_apply(psock, skb, ret);
999out:
1000 rcu_read_unlock();
1001}
1002
1003static int sk_psock_strp_read_done(struct strparser *strp, int err)
1004{
1005 return err;
1006}
1007
1008static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1009{
1010 struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1011 struct bpf_prog *prog;
1012 int ret = skb->len;
1013
1014 rcu_read_lock();
1015 prog = READ_ONCE(psock->progs.stream_parser);
1016 if (likely(prog)) {
1017 skb->sk = psock->sk;
1018 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1019 skb->sk = NULL;
1020 }
1021 rcu_read_unlock();
1022 return ret;
1023}
1024
1025/* Called with socket lock held. */
1026static void sk_psock_strp_data_ready(struct sock *sk)
1027{
1028 struct sk_psock *psock;
1029
1030 rcu_read_lock();
1031 psock = sk_psock(sk);
1032 if (likely(psock)) {
1033 if (tls_sw_has_ctx_rx(sk)) {
1034 psock->saved_data_ready(sk);
1035 } else {
1036 write_lock_bh(&sk->sk_callback_lock);
1037 strp_data_ready(&psock->strp);
1038 write_unlock_bh(&sk->sk_callback_lock);
1039 }
1040 }
1041 rcu_read_unlock();
1042}
1043
1044int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1045{
1046 static const struct strp_callbacks cb = {
1047 .rcv_msg = sk_psock_strp_read,
1048 .read_sock_done = sk_psock_strp_read_done,
1049 .parse_msg = sk_psock_strp_parse,
1050 };
1051
1052 return strp_init(&psock->strp, sk, &cb);
1053}
1054
1055void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1056{
1057 if (psock->saved_data_ready)
1058 return;
1059
1060 psock->saved_data_ready = sk->sk_data_ready;
1061 sk->sk_data_ready = sk_psock_strp_data_ready;
1062 sk->sk_write_space = sk_psock_write_space;
1063}
1064
1065void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1066{
1067 if (!psock->saved_data_ready)
1068 return;
1069
1070 sk->sk_data_ready = psock->saved_data_ready;
1071 psock->saved_data_ready = NULL;
1072 strp_stop(&psock->strp);
1073}
1074
1075static void sk_psock_done_strp(struct sk_psock *psock)
1076{
1077 /* Parser has been stopped */
1078 if (psock->progs.stream_parser)
1079 strp_done(&psock->strp);
1080}
1081#else
1082static void sk_psock_done_strp(struct sk_psock *psock)
1083{
1084}
1085#endif /* CONFIG_BPF_STREAM_PARSER */
1086
1087static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
1088 unsigned int offset, size_t orig_len)
1089{
1090 struct sock *sk = (struct sock *)desc->arg.data;
1091 struct sk_psock *psock;
1092 struct bpf_prog *prog;
1093 int ret = __SK_DROP;
1094 int len = skb->len;
1095
1096 /* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
1097 skb = skb_clone(skb, GFP_ATOMIC);
1098 if (!skb) {
1099 desc->error = -ENOMEM;
1100 return 0;
1101 }
1102
1103 rcu_read_lock();
1104 psock = sk_psock(sk);
1105 if (unlikely(!psock)) {
1106 len = 0;
1107 sock_drop(sk, skb);
1108 goto out;
1109 }
1110 prog = READ_ONCE(psock->progs.stream_verdict);
1111 if (!prog)
1112 prog = READ_ONCE(psock->progs.skb_verdict);
1113 if (likely(prog)) {
1114 skb->sk = sk;
1115 skb_dst_drop(skb);
1116 skb_bpf_redirect_clear(skb);
1117 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1118 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1119 skb->sk = NULL;
1120 }
1121 if (sk_psock_verdict_apply(psock, skb, ret) < 0)
1122 len = 0;
1123out:
1124 rcu_read_unlock();
1125 return len;
1126}
1127
1128static void sk_psock_verdict_data_ready(struct sock *sk)
1129{
1130 struct socket *sock = sk->sk_socket;
1131 read_descriptor_t desc;
1132
1133 if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
1134 return;
1135
1136 desc.arg.data = sk;
1137 desc.error = 0;
1138 desc.count = 1;
1139
1140 sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
1141}
1142
1143void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1144{
1145 if (psock->saved_data_ready)
1146 return;
1147
1148 psock->saved_data_ready = sk->sk_data_ready;
1149 sk->sk_data_ready = sk_psock_verdict_data_ready;
1150 sk->sk_write_space = sk_psock_write_space;
1151}
1152
1153void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1154{
1155 if (!psock->saved_data_ready)
1156 return;
1157
1158 sk->sk_data_ready = psock->saved_data_ready;
1159 psock->saved_data_ready = NULL;
1160}