Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4#include <linux/skmsg.h>
5#include <linux/skbuff.h>
6#include <linux/scatterlist.h>
7
8#include <net/sock.h>
9#include <net/tcp.h>
10#include <net/tls.h>
11
12static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
13{
14 if (msg->sg.end > msg->sg.start &&
15 elem_first_coalesce < msg->sg.end)
16 return true;
17
18 if (msg->sg.end < msg->sg.start &&
19 (elem_first_coalesce > msg->sg.start ||
20 elem_first_coalesce < msg->sg.end))
21 return true;
22
23 return false;
24}
25
26int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
27 int elem_first_coalesce)
28{
29 struct page_frag *pfrag = sk_page_frag(sk);
30 u32 osize = msg->sg.size;
31 int ret = 0;
32
33 len -= msg->sg.size;
34 while (len > 0) {
35 struct scatterlist *sge;
36 u32 orig_offset;
37 int use, i;
38
39 if (!sk_page_frag_refill(sk, pfrag)) {
40 ret = -ENOMEM;
41 goto msg_trim;
42 }
43
44 orig_offset = pfrag->offset;
45 use = min_t(int, len, pfrag->size - orig_offset);
46 if (!sk_wmem_schedule(sk, use)) {
47 ret = -ENOMEM;
48 goto msg_trim;
49 }
50
51 i = msg->sg.end;
52 sk_msg_iter_var_prev(i);
53 sge = &msg->sg.data[i];
54
55 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
56 sg_page(sge) == pfrag->page &&
57 sge->offset + sge->length == orig_offset) {
58 sge->length += use;
59 } else {
60 if (sk_msg_full(msg)) {
61 ret = -ENOSPC;
62 break;
63 }
64
65 sge = &msg->sg.data[msg->sg.end];
66 sg_unmark_end(sge);
67 sg_set_page(sge, pfrag->page, use, orig_offset);
68 get_page(pfrag->page);
69 sk_msg_iter_next(msg, end);
70 }
71
72 sk_mem_charge(sk, use);
73 msg->sg.size += use;
74 pfrag->offset += use;
75 len -= use;
76 }
77
78 return ret;
79
80msg_trim:
81 sk_msg_trim(sk, msg, osize);
82 return ret;
83}
84EXPORT_SYMBOL_GPL(sk_msg_alloc);
85
86int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
87 u32 off, u32 len)
88{
89 int i = src->sg.start;
90 struct scatterlist *sge = sk_msg_elem(src, i);
91 struct scatterlist *sgd = NULL;
92 u32 sge_len, sge_off;
93
94 while (off) {
95 if (sge->length > off)
96 break;
97 off -= sge->length;
98 sk_msg_iter_var_next(i);
99 if (i == src->sg.end && off)
100 return -ENOSPC;
101 sge = sk_msg_elem(src, i);
102 }
103
104 while (len) {
105 sge_len = sge->length - off;
106 if (sge_len > len)
107 sge_len = len;
108
109 if (dst->sg.end)
110 sgd = sk_msg_elem(dst, dst->sg.end - 1);
111
112 if (sgd &&
113 (sg_page(sge) == sg_page(sgd)) &&
114 (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
115 sgd->length += sge_len;
116 dst->sg.size += sge_len;
117 } else if (!sk_msg_full(dst)) {
118 sge_off = sge->offset + off;
119 sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
120 } else {
121 return -ENOSPC;
122 }
123
124 off = 0;
125 len -= sge_len;
126 sk_mem_charge(sk, sge_len);
127 sk_msg_iter_var_next(i);
128 if (i == src->sg.end && len)
129 return -ENOSPC;
130 sge = sk_msg_elem(src, i);
131 }
132
133 return 0;
134}
135EXPORT_SYMBOL_GPL(sk_msg_clone);
136
137void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
138{
139 int i = msg->sg.start;
140
141 do {
142 struct scatterlist *sge = sk_msg_elem(msg, i);
143
144 if (bytes < sge->length) {
145 sge->length -= bytes;
146 sge->offset += bytes;
147 sk_mem_uncharge(sk, bytes);
148 break;
149 }
150
151 sk_mem_uncharge(sk, sge->length);
152 bytes -= sge->length;
153 sge->length = 0;
154 sge->offset = 0;
155 sk_msg_iter_var_next(i);
156 } while (bytes && i != msg->sg.end);
157 msg->sg.start = i;
158}
159EXPORT_SYMBOL_GPL(sk_msg_return_zero);
160
161void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
162{
163 int i = msg->sg.start;
164
165 do {
166 struct scatterlist *sge = &msg->sg.data[i];
167 int uncharge = (bytes < sge->length) ? bytes : sge->length;
168
169 sk_mem_uncharge(sk, uncharge);
170 bytes -= uncharge;
171 sk_msg_iter_var_next(i);
172 } while (i != msg->sg.end);
173}
174EXPORT_SYMBOL_GPL(sk_msg_return);
175
176static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
177 bool charge)
178{
179 struct scatterlist *sge = sk_msg_elem(msg, i);
180 u32 len = sge->length;
181
182 /* When the skb owns the memory we free it from consume_skb path. */
183 if (!msg->skb) {
184 if (charge)
185 sk_mem_uncharge(sk, len);
186 put_page(sg_page(sge));
187 }
188 memset(sge, 0, sizeof(*sge));
189 return len;
190}
191
192static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
193 bool charge)
194{
195 struct scatterlist *sge = sk_msg_elem(msg, i);
196 int freed = 0;
197
198 while (msg->sg.size) {
199 msg->sg.size -= sge->length;
200 freed += sk_msg_free_elem(sk, msg, i, charge);
201 sk_msg_iter_var_next(i);
202 sk_msg_check_to_free(msg, i, msg->sg.size);
203 sge = sk_msg_elem(msg, i);
204 }
205 consume_skb(msg->skb);
206 sk_msg_init(msg);
207 return freed;
208}
209
210int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
211{
212 return __sk_msg_free(sk, msg, msg->sg.start, false);
213}
214EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
215
216int sk_msg_free(struct sock *sk, struct sk_msg *msg)
217{
218 return __sk_msg_free(sk, msg, msg->sg.start, true);
219}
220EXPORT_SYMBOL_GPL(sk_msg_free);
221
222static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
223 u32 bytes, bool charge)
224{
225 struct scatterlist *sge;
226 u32 i = msg->sg.start;
227
228 while (bytes) {
229 sge = sk_msg_elem(msg, i);
230 if (!sge->length)
231 break;
232 if (bytes < sge->length) {
233 if (charge)
234 sk_mem_uncharge(sk, bytes);
235 sge->length -= bytes;
236 sge->offset += bytes;
237 msg->sg.size -= bytes;
238 break;
239 }
240
241 msg->sg.size -= sge->length;
242 bytes -= sge->length;
243 sk_msg_free_elem(sk, msg, i, charge);
244 sk_msg_iter_var_next(i);
245 sk_msg_check_to_free(msg, i, bytes);
246 }
247 msg->sg.start = i;
248}
249
250void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
251{
252 __sk_msg_free_partial(sk, msg, bytes, true);
253}
254EXPORT_SYMBOL_GPL(sk_msg_free_partial);
255
256void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
257 u32 bytes)
258{
259 __sk_msg_free_partial(sk, msg, bytes, false);
260}
261
262void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
263{
264 int trim = msg->sg.size - len;
265 u32 i = msg->sg.end;
266
267 if (trim <= 0) {
268 WARN_ON(trim < 0);
269 return;
270 }
271
272 sk_msg_iter_var_prev(i);
273 msg->sg.size = len;
274 while (msg->sg.data[i].length &&
275 trim >= msg->sg.data[i].length) {
276 trim -= msg->sg.data[i].length;
277 sk_msg_free_elem(sk, msg, i, true);
278 sk_msg_iter_var_prev(i);
279 if (!trim)
280 goto out;
281 }
282
283 msg->sg.data[i].length -= trim;
284 sk_mem_uncharge(sk, trim);
285 /* Adjust copybreak if it falls into the trimmed part of last buf */
286 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
287 msg->sg.copybreak = msg->sg.data[i].length;
288out:
289 sk_msg_iter_var_next(i);
290 msg->sg.end = i;
291
292 /* If we trim data a full sg elem before curr pointer update
293 * copybreak and current so that any future copy operations
294 * start at new copy location.
295 * However trimed data that has not yet been used in a copy op
296 * does not require an update.
297 */
298 if (!msg->sg.size) {
299 msg->sg.curr = msg->sg.start;
300 msg->sg.copybreak = 0;
301 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
302 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
303 sk_msg_iter_var_prev(i);
304 msg->sg.curr = i;
305 msg->sg.copybreak = msg->sg.data[i].length;
306 }
307}
308EXPORT_SYMBOL_GPL(sk_msg_trim);
309
310int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
311 struct sk_msg *msg, u32 bytes)
312{
313 int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
314 const int to_max_pages = MAX_MSG_FRAGS;
315 struct page *pages[MAX_MSG_FRAGS];
316 ssize_t orig, copied, use, offset;
317
318 orig = msg->sg.size;
319 while (bytes > 0) {
320 i = 0;
321 maxpages = to_max_pages - num_elems;
322 if (maxpages == 0) {
323 ret = -EFAULT;
324 goto out;
325 }
326
327 copied = iov_iter_get_pages(from, pages, bytes, maxpages,
328 &offset);
329 if (copied <= 0) {
330 ret = -EFAULT;
331 goto out;
332 }
333
334 iov_iter_advance(from, copied);
335 bytes -= copied;
336 msg->sg.size += copied;
337
338 while (copied) {
339 use = min_t(int, copied, PAGE_SIZE - offset);
340 sg_set_page(&msg->sg.data[msg->sg.end],
341 pages[i], use, offset);
342 sg_unmark_end(&msg->sg.data[msg->sg.end]);
343 sk_mem_charge(sk, use);
344
345 offset = 0;
346 copied -= use;
347 sk_msg_iter_next(msg, end);
348 num_elems++;
349 i++;
350 }
351 /* When zerocopy is mixed with sk_msg_*copy* operations we
352 * may have a copybreak set in this case clear and prefer
353 * zerocopy remainder when possible.
354 */
355 msg->sg.copybreak = 0;
356 msg->sg.curr = msg->sg.end;
357 }
358out:
359 /* Revert iov_iter updates, msg will need to use 'trim' later if it
360 * also needs to be cleared.
361 */
362 if (ret)
363 iov_iter_revert(from, msg->sg.size - orig);
364 return ret;
365}
366EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
367
368int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
369 struct sk_msg *msg, u32 bytes)
370{
371 int ret = -ENOSPC, i = msg->sg.curr;
372 struct scatterlist *sge;
373 u32 copy, buf_size;
374 void *to;
375
376 do {
377 sge = sk_msg_elem(msg, i);
378 /* This is possible if a trim operation shrunk the buffer */
379 if (msg->sg.copybreak >= sge->length) {
380 msg->sg.copybreak = 0;
381 sk_msg_iter_var_next(i);
382 if (i == msg->sg.end)
383 break;
384 sge = sk_msg_elem(msg, i);
385 }
386
387 buf_size = sge->length - msg->sg.copybreak;
388 copy = (buf_size > bytes) ? bytes : buf_size;
389 to = sg_virt(sge) + msg->sg.copybreak;
390 msg->sg.copybreak += copy;
391 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
392 ret = copy_from_iter_nocache(to, copy, from);
393 else
394 ret = copy_from_iter(to, copy, from);
395 if (ret != copy) {
396 ret = -EFAULT;
397 goto out;
398 }
399 bytes -= copy;
400 if (!bytes)
401 break;
402 msg->sg.copybreak = 0;
403 sk_msg_iter_var_next(i);
404 } while (i != msg->sg.end);
405out:
406 msg->sg.curr = i;
407 return ret;
408}
409EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
410
411/* Receive sk_msg from psock->ingress_msg to @msg. */
412int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
413 int len, int flags)
414{
415 struct iov_iter *iter = &msg->msg_iter;
416 int peek = flags & MSG_PEEK;
417 struct sk_msg *msg_rx;
418 int i, copied = 0;
419
420 msg_rx = sk_psock_peek_msg(psock);
421 while (copied != len) {
422 struct scatterlist *sge;
423
424 if (unlikely(!msg_rx))
425 break;
426
427 i = msg_rx->sg.start;
428 do {
429 struct page *page;
430 int copy;
431
432 sge = sk_msg_elem(msg_rx, i);
433 copy = sge->length;
434 page = sg_page(sge);
435 if (copied + copy > len)
436 copy = len - copied;
437 copy = copy_page_to_iter(page, sge->offset, copy, iter);
438 if (!copy)
439 return copied ? copied : -EFAULT;
440
441 copied += copy;
442 if (likely(!peek)) {
443 sge->offset += copy;
444 sge->length -= copy;
445 if (!msg_rx->skb)
446 sk_mem_uncharge(sk, copy);
447 msg_rx->sg.size -= copy;
448
449 if (!sge->length) {
450 sk_msg_iter_var_next(i);
451 if (!msg_rx->skb)
452 put_page(page);
453 }
454 } else {
455 /* Lets not optimize peek case if copy_page_to_iter
456 * didn't copy the entire length lets just break.
457 */
458 if (copy != sge->length)
459 return copied;
460 sk_msg_iter_var_next(i);
461 }
462
463 if (copied == len)
464 break;
465 } while (i != msg_rx->sg.end);
466
467 if (unlikely(peek)) {
468 msg_rx = sk_psock_next_msg(psock, msg_rx);
469 if (!msg_rx)
470 break;
471 continue;
472 }
473
474 msg_rx->sg.start = i;
475 if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) {
476 msg_rx = sk_psock_dequeue_msg(psock);
477 kfree_sk_msg(msg_rx);
478 }
479 msg_rx = sk_psock_peek_msg(psock);
480 }
481
482 return copied;
483}
484EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
485
486bool sk_msg_is_readable(struct sock *sk)
487{
488 struct sk_psock *psock;
489 bool empty = true;
490
491 rcu_read_lock();
492 psock = sk_psock(sk);
493 if (likely(psock))
494 empty = list_empty(&psock->ingress_msg);
495 rcu_read_unlock();
496 return !empty;
497}
498EXPORT_SYMBOL_GPL(sk_msg_is_readable);
499
500static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
501 struct sk_buff *skb)
502{
503 struct sk_msg *msg;
504
505 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
506 return NULL;
507
508 if (!sk_rmem_schedule(sk, skb, skb->truesize))
509 return NULL;
510
511 msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
512 if (unlikely(!msg))
513 return NULL;
514
515 sk_msg_init(msg);
516 return msg;
517}
518
519static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
520 u32 off, u32 len,
521 struct sk_psock *psock,
522 struct sock *sk,
523 struct sk_msg *msg)
524{
525 int num_sge, copied;
526
527 /* skb linearize may fail with ENOMEM, but lets simply try again
528 * later if this happens. Under memory pressure we don't want to
529 * drop the skb. We need to linearize the skb so that the mapping
530 * in skb_to_sgvec can not error.
531 */
532 if (skb_linearize(skb))
533 return -EAGAIN;
534 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
535 if (unlikely(num_sge < 0))
536 return num_sge;
537
538 copied = len;
539 msg->sg.start = 0;
540 msg->sg.size = copied;
541 msg->sg.end = num_sge;
542 msg->skb = skb;
543
544 sk_psock_queue_msg(psock, msg);
545 sk_psock_data_ready(sk, psock);
546 return copied;
547}
548
549static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
550 u32 off, u32 len);
551
552static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
553 u32 off, u32 len)
554{
555 struct sock *sk = psock->sk;
556 struct sk_msg *msg;
557 int err;
558
559 /* If we are receiving on the same sock skb->sk is already assigned,
560 * skip memory accounting and owner transition seeing it already set
561 * correctly.
562 */
563 if (unlikely(skb->sk == sk))
564 return sk_psock_skb_ingress_self(psock, skb, off, len);
565 msg = sk_psock_create_ingress_msg(sk, skb);
566 if (!msg)
567 return -EAGAIN;
568
569 /* This will transition ownership of the data from the socket where
570 * the BPF program was run initiating the redirect to the socket
571 * we will eventually receive this data on. The data will be released
572 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
573 * into user buffers.
574 */
575 skb_set_owner_r(skb, sk);
576 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
577 if (err < 0)
578 kfree(msg);
579 return err;
580}
581
582/* Puts an skb on the ingress queue of the socket already assigned to the
583 * skb. In this case we do not need to check memory limits or skb_set_owner_r
584 * because the skb is already accounted for here.
585 */
586static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
587 u32 off, u32 len)
588{
589 struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
590 struct sock *sk = psock->sk;
591 int err;
592
593 if (unlikely(!msg))
594 return -EAGAIN;
595 sk_msg_init(msg);
596 skb_set_owner_r(skb, sk);
597 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
598 if (err < 0)
599 kfree(msg);
600 return err;
601}
602
603static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
604 u32 off, u32 len, bool ingress)
605{
606 if (!ingress) {
607 if (!sock_writeable(psock->sk))
608 return -EAGAIN;
609 return skb_send_sock(psock->sk, skb, off, len);
610 }
611 return sk_psock_skb_ingress(psock, skb, off, len);
612}
613
614static void sk_psock_skb_state(struct sk_psock *psock,
615 struct sk_psock_work_state *state,
616 struct sk_buff *skb,
617 int len, int off)
618{
619 spin_lock_bh(&psock->ingress_lock);
620 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
621 state->skb = skb;
622 state->len = len;
623 state->off = off;
624 } else {
625 sock_drop(psock->sk, skb);
626 }
627 spin_unlock_bh(&psock->ingress_lock);
628}
629
630static void sk_psock_backlog(struct work_struct *work)
631{
632 struct sk_psock *psock = container_of(work, struct sk_psock, work);
633 struct sk_psock_work_state *state = &psock->work_state;
634 struct sk_buff *skb = NULL;
635 bool ingress;
636 u32 len, off;
637 int ret;
638
639 mutex_lock(&psock->work_mutex);
640 if (unlikely(state->skb)) {
641 spin_lock_bh(&psock->ingress_lock);
642 skb = state->skb;
643 len = state->len;
644 off = state->off;
645 state->skb = NULL;
646 spin_unlock_bh(&psock->ingress_lock);
647 }
648 if (skb)
649 goto start;
650
651 while ((skb = skb_dequeue(&psock->ingress_skb))) {
652 len = skb->len;
653 off = 0;
654 if (skb_bpf_strparser(skb)) {
655 struct strp_msg *stm = strp_msg(skb);
656
657 off = stm->offset;
658 len = stm->full_len;
659 }
660start:
661 ingress = skb_bpf_ingress(skb);
662 skb_bpf_redirect_clear(skb);
663 do {
664 ret = -EIO;
665 if (!sock_flag(psock->sk, SOCK_DEAD))
666 ret = sk_psock_handle_skb(psock, skb, off,
667 len, ingress);
668 if (ret <= 0) {
669 if (ret == -EAGAIN) {
670 sk_psock_skb_state(psock, state, skb,
671 len, off);
672 goto end;
673 }
674 /* Hard errors break pipe and stop xmit. */
675 sk_psock_report_error(psock, ret ? -ret : EPIPE);
676 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
677 sock_drop(psock->sk, skb);
678 goto end;
679 }
680 off += ret;
681 len -= ret;
682 } while (len);
683
684 if (!ingress)
685 kfree_skb(skb);
686 }
687end:
688 mutex_unlock(&psock->work_mutex);
689}
690
691struct sk_psock *sk_psock_init(struct sock *sk, int node)
692{
693 struct sk_psock *psock;
694 struct proto *prot;
695
696 write_lock_bh(&sk->sk_callback_lock);
697
698 if (sk->sk_user_data) {
699 psock = ERR_PTR(-EBUSY);
700 goto out;
701 }
702
703 psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
704 if (!psock) {
705 psock = ERR_PTR(-ENOMEM);
706 goto out;
707 }
708
709 prot = READ_ONCE(sk->sk_prot);
710 psock->sk = sk;
711 psock->eval = __SK_NONE;
712 psock->sk_proto = prot;
713 psock->saved_unhash = prot->unhash;
714 psock->saved_close = prot->close;
715 psock->saved_write_space = sk->sk_write_space;
716
717 INIT_LIST_HEAD(&psock->link);
718 spin_lock_init(&psock->link_lock);
719
720 INIT_WORK(&psock->work, sk_psock_backlog);
721 mutex_init(&psock->work_mutex);
722 INIT_LIST_HEAD(&psock->ingress_msg);
723 spin_lock_init(&psock->ingress_lock);
724 skb_queue_head_init(&psock->ingress_skb);
725
726 sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
727 refcount_set(&psock->refcnt, 1);
728
729 rcu_assign_sk_user_data_nocopy(sk, psock);
730 sock_hold(sk);
731
732out:
733 write_unlock_bh(&sk->sk_callback_lock);
734 return psock;
735}
736EXPORT_SYMBOL_GPL(sk_psock_init);
737
738struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
739{
740 struct sk_psock_link *link;
741
742 spin_lock_bh(&psock->link_lock);
743 link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
744 list);
745 if (link)
746 list_del(&link->list);
747 spin_unlock_bh(&psock->link_lock);
748 return link;
749}
750
751static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
752{
753 struct sk_msg *msg, *tmp;
754
755 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
756 list_del(&msg->list);
757 sk_msg_free(psock->sk, msg);
758 kfree(msg);
759 }
760}
761
762static void __sk_psock_zap_ingress(struct sk_psock *psock)
763{
764 struct sk_buff *skb;
765
766 while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
767 skb_bpf_redirect_clear(skb);
768 sock_drop(psock->sk, skb);
769 }
770 kfree_skb(psock->work_state.skb);
771 /* We null the skb here to ensure that calls to sk_psock_backlog
772 * do not pick up the free'd skb.
773 */
774 psock->work_state.skb = NULL;
775 __sk_psock_purge_ingress_msg(psock);
776}
777
778static void sk_psock_link_destroy(struct sk_psock *psock)
779{
780 struct sk_psock_link *link, *tmp;
781
782 list_for_each_entry_safe(link, tmp, &psock->link, list) {
783 list_del(&link->list);
784 sk_psock_free_link(link);
785 }
786}
787
788void sk_psock_stop(struct sk_psock *psock, bool wait)
789{
790 spin_lock_bh(&psock->ingress_lock);
791 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
792 sk_psock_cork_free(psock);
793 __sk_psock_zap_ingress(psock);
794 spin_unlock_bh(&psock->ingress_lock);
795
796 if (wait)
797 cancel_work_sync(&psock->work);
798}
799
800static void sk_psock_done_strp(struct sk_psock *psock);
801
802static void sk_psock_destroy(struct work_struct *work)
803{
804 struct sk_psock *psock = container_of(to_rcu_work(work),
805 struct sk_psock, rwork);
806 /* No sk_callback_lock since already detached. */
807
808 sk_psock_done_strp(psock);
809
810 cancel_work_sync(&psock->work);
811 mutex_destroy(&psock->work_mutex);
812
813 psock_progs_drop(&psock->progs);
814
815 sk_psock_link_destroy(psock);
816 sk_psock_cork_free(psock);
817
818 if (psock->sk_redir)
819 sock_put(psock->sk_redir);
820 sock_put(psock->sk);
821 kfree(psock);
822}
823
824void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
825{
826 write_lock_bh(&sk->sk_callback_lock);
827 sk_psock_restore_proto(sk, psock);
828 rcu_assign_sk_user_data(sk, NULL);
829 if (psock->progs.stream_parser)
830 sk_psock_stop_strp(sk, psock);
831 else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
832 sk_psock_stop_verdict(sk, psock);
833 write_unlock_bh(&sk->sk_callback_lock);
834
835 sk_psock_stop(psock, false);
836
837 INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
838 queue_rcu_work(system_wq, &psock->rwork);
839}
840EXPORT_SYMBOL_GPL(sk_psock_drop);
841
842static int sk_psock_map_verd(int verdict, bool redir)
843{
844 switch (verdict) {
845 case SK_PASS:
846 return redir ? __SK_REDIRECT : __SK_PASS;
847 case SK_DROP:
848 default:
849 break;
850 }
851
852 return __SK_DROP;
853}
854
855int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
856 struct sk_msg *msg)
857{
858 struct bpf_prog *prog;
859 int ret;
860
861 rcu_read_lock();
862 prog = READ_ONCE(psock->progs.msg_parser);
863 if (unlikely(!prog)) {
864 ret = __SK_PASS;
865 goto out;
866 }
867
868 sk_msg_compute_data_pointers(msg);
869 msg->sk = sk;
870 ret = bpf_prog_run_pin_on_cpu(prog, msg);
871 ret = sk_psock_map_verd(ret, msg->sk_redir);
872 psock->apply_bytes = msg->apply_bytes;
873 if (ret == __SK_REDIRECT) {
874 if (psock->sk_redir)
875 sock_put(psock->sk_redir);
876 psock->sk_redir = msg->sk_redir;
877 if (!psock->sk_redir) {
878 ret = __SK_DROP;
879 goto out;
880 }
881 sock_hold(psock->sk_redir);
882 }
883out:
884 rcu_read_unlock();
885 return ret;
886}
887EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
888
889static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
890{
891 struct sk_psock *psock_other;
892 struct sock *sk_other;
893
894 sk_other = skb_bpf_redirect_fetch(skb);
895 /* This error is a buggy BPF program, it returned a redirect
896 * return code, but then didn't set a redirect interface.
897 */
898 if (unlikely(!sk_other)) {
899 skb_bpf_redirect_clear(skb);
900 sock_drop(from->sk, skb);
901 return -EIO;
902 }
903 psock_other = sk_psock(sk_other);
904 /* This error indicates the socket is being torn down or had another
905 * error that caused the pipe to break. We can't send a packet on
906 * a socket that is in this state so we drop the skb.
907 */
908 if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
909 skb_bpf_redirect_clear(skb);
910 sock_drop(from->sk, skb);
911 return -EIO;
912 }
913 spin_lock_bh(&psock_other->ingress_lock);
914 if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
915 spin_unlock_bh(&psock_other->ingress_lock);
916 skb_bpf_redirect_clear(skb);
917 sock_drop(from->sk, skb);
918 return -EIO;
919 }
920
921 skb_queue_tail(&psock_other->ingress_skb, skb);
922 schedule_work(&psock_other->work);
923 spin_unlock_bh(&psock_other->ingress_lock);
924 return 0;
925}
926
927static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
928 struct sk_psock *from, int verdict)
929{
930 switch (verdict) {
931 case __SK_REDIRECT:
932 sk_psock_skb_redirect(from, skb);
933 break;
934 case __SK_PASS:
935 case __SK_DROP:
936 default:
937 break;
938 }
939}
940
941int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
942{
943 struct bpf_prog *prog;
944 int ret = __SK_PASS;
945
946 rcu_read_lock();
947 prog = READ_ONCE(psock->progs.stream_verdict);
948 if (likely(prog)) {
949 skb->sk = psock->sk;
950 skb_dst_drop(skb);
951 skb_bpf_redirect_clear(skb);
952 ret = bpf_prog_run_pin_on_cpu(prog, skb);
953 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
954 skb->sk = NULL;
955 }
956 sk_psock_tls_verdict_apply(skb, psock, ret);
957 rcu_read_unlock();
958 return ret;
959}
960EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
961
962static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
963 int verdict)
964{
965 struct sock *sk_other;
966 int err = 0;
967 u32 len, off;
968
969 switch (verdict) {
970 case __SK_PASS:
971 err = -EIO;
972 sk_other = psock->sk;
973 if (sock_flag(sk_other, SOCK_DEAD) ||
974 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
975 skb_bpf_redirect_clear(skb);
976 goto out_free;
977 }
978
979 skb_bpf_set_ingress(skb);
980
981 /* If the queue is empty then we can submit directly
982 * into the msg queue. If its not empty we have to
983 * queue work otherwise we may get OOO data. Otherwise,
984 * if sk_psock_skb_ingress errors will be handled by
985 * retrying later from workqueue.
986 */
987 if (skb_queue_empty(&psock->ingress_skb)) {
988 len = skb->len;
989 off = 0;
990 if (skb_bpf_strparser(skb)) {
991 struct strp_msg *stm = strp_msg(skb);
992
993 off = stm->offset;
994 len = stm->full_len;
995 }
996 err = sk_psock_skb_ingress_self(psock, skb, off, len);
997 }
998 if (err < 0) {
999 spin_lock_bh(&psock->ingress_lock);
1000 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
1001 skb_queue_tail(&psock->ingress_skb, skb);
1002 schedule_work(&psock->work);
1003 err = 0;
1004 }
1005 spin_unlock_bh(&psock->ingress_lock);
1006 if (err < 0) {
1007 skb_bpf_redirect_clear(skb);
1008 goto out_free;
1009 }
1010 }
1011 break;
1012 case __SK_REDIRECT:
1013 err = sk_psock_skb_redirect(psock, skb);
1014 break;
1015 case __SK_DROP:
1016 default:
1017out_free:
1018 sock_drop(psock->sk, skb);
1019 }
1020
1021 return err;
1022}
1023
1024static void sk_psock_write_space(struct sock *sk)
1025{
1026 struct sk_psock *psock;
1027 void (*write_space)(struct sock *sk) = NULL;
1028
1029 rcu_read_lock();
1030 psock = sk_psock(sk);
1031 if (likely(psock)) {
1032 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1033 schedule_work(&psock->work);
1034 write_space = psock->saved_write_space;
1035 }
1036 rcu_read_unlock();
1037 if (write_space)
1038 write_space(sk);
1039}
1040
1041#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1042static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1043{
1044 struct sk_psock *psock;
1045 struct bpf_prog *prog;
1046 int ret = __SK_DROP;
1047 struct sock *sk;
1048
1049 rcu_read_lock();
1050 sk = strp->sk;
1051 psock = sk_psock(sk);
1052 if (unlikely(!psock)) {
1053 sock_drop(sk, skb);
1054 goto out;
1055 }
1056 prog = READ_ONCE(psock->progs.stream_verdict);
1057 if (likely(prog)) {
1058 skb->sk = sk;
1059 skb_dst_drop(skb);
1060 skb_bpf_redirect_clear(skb);
1061 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1062 if (ret == SK_PASS)
1063 skb_bpf_set_strparser(skb);
1064 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1065 skb->sk = NULL;
1066 }
1067 sk_psock_verdict_apply(psock, skb, ret);
1068out:
1069 rcu_read_unlock();
1070}
1071
1072static int sk_psock_strp_read_done(struct strparser *strp, int err)
1073{
1074 return err;
1075}
1076
1077static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1078{
1079 struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1080 struct bpf_prog *prog;
1081 int ret = skb->len;
1082
1083 rcu_read_lock();
1084 prog = READ_ONCE(psock->progs.stream_parser);
1085 if (likely(prog)) {
1086 skb->sk = psock->sk;
1087 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1088 skb->sk = NULL;
1089 }
1090 rcu_read_unlock();
1091 return ret;
1092}
1093
1094/* Called with socket lock held. */
1095static void sk_psock_strp_data_ready(struct sock *sk)
1096{
1097 struct sk_psock *psock;
1098
1099 rcu_read_lock();
1100 psock = sk_psock(sk);
1101 if (likely(psock)) {
1102 if (tls_sw_has_ctx_rx(sk)) {
1103 psock->saved_data_ready(sk);
1104 } else {
1105 write_lock_bh(&sk->sk_callback_lock);
1106 strp_data_ready(&psock->strp);
1107 write_unlock_bh(&sk->sk_callback_lock);
1108 }
1109 }
1110 rcu_read_unlock();
1111}
1112
1113int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1114{
1115 static const struct strp_callbacks cb = {
1116 .rcv_msg = sk_psock_strp_read,
1117 .read_sock_done = sk_psock_strp_read_done,
1118 .parse_msg = sk_psock_strp_parse,
1119 };
1120
1121 return strp_init(&psock->strp, sk, &cb);
1122}
1123
1124void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1125{
1126 if (psock->saved_data_ready)
1127 return;
1128
1129 psock->saved_data_ready = sk->sk_data_ready;
1130 sk->sk_data_ready = sk_psock_strp_data_ready;
1131 sk->sk_write_space = sk_psock_write_space;
1132}
1133
1134void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1135{
1136 psock_set_prog(&psock->progs.stream_parser, NULL);
1137
1138 if (!psock->saved_data_ready)
1139 return;
1140
1141 sk->sk_data_ready = psock->saved_data_ready;
1142 psock->saved_data_ready = NULL;
1143 strp_stop(&psock->strp);
1144}
1145
1146static void sk_psock_done_strp(struct sk_psock *psock)
1147{
1148 /* Parser has been stopped */
1149 if (psock->progs.stream_parser)
1150 strp_done(&psock->strp);
1151}
1152#else
1153static void sk_psock_done_strp(struct sk_psock *psock)
1154{
1155}
1156#endif /* CONFIG_BPF_STREAM_PARSER */
1157
1158static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
1159 unsigned int offset, size_t orig_len)
1160{
1161 struct sock *sk = (struct sock *)desc->arg.data;
1162 struct sk_psock *psock;
1163 struct bpf_prog *prog;
1164 int ret = __SK_DROP;
1165 int len = orig_len;
1166
1167 /* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
1168 skb = skb_clone(skb, GFP_ATOMIC);
1169 if (!skb) {
1170 desc->error = -ENOMEM;
1171 return 0;
1172 }
1173
1174 rcu_read_lock();
1175 psock = sk_psock(sk);
1176 if (unlikely(!psock)) {
1177 len = 0;
1178 sock_drop(sk, skb);
1179 goto out;
1180 }
1181 prog = READ_ONCE(psock->progs.stream_verdict);
1182 if (!prog)
1183 prog = READ_ONCE(psock->progs.skb_verdict);
1184 if (likely(prog)) {
1185 skb->sk = sk;
1186 skb_dst_drop(skb);
1187 skb_bpf_redirect_clear(skb);
1188 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1189 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1190 skb->sk = NULL;
1191 }
1192 if (sk_psock_verdict_apply(psock, skb, ret) < 0)
1193 len = 0;
1194out:
1195 rcu_read_unlock();
1196 return len;
1197}
1198
1199static void sk_psock_verdict_data_ready(struct sock *sk)
1200{
1201 struct socket *sock = sk->sk_socket;
1202 read_descriptor_t desc;
1203
1204 if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
1205 return;
1206
1207 desc.arg.data = sk;
1208 desc.error = 0;
1209 desc.count = 1;
1210
1211 sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
1212}
1213
1214void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1215{
1216 if (psock->saved_data_ready)
1217 return;
1218
1219 psock->saved_data_ready = sk->sk_data_ready;
1220 sk->sk_data_ready = sk_psock_verdict_data_ready;
1221 sk->sk_write_space = sk_psock_write_space;
1222}
1223
1224void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1225{
1226 psock_set_prog(&psock->progs.stream_verdict, NULL);
1227 psock_set_prog(&psock->progs.skb_verdict, NULL);
1228
1229 if (!psock->saved_data_ready)
1230 return;
1231
1232 sk->sk_data_ready = psock->saved_data_ready;
1233 psock->saved_data_ready = NULL;
1234}