Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4#include <linux/skmsg.h>
5#include <linux/skbuff.h>
6#include <linux/scatterlist.h>
7
8#include <net/sock.h>
9#include <net/tcp.h>
10#include <net/tls.h>
11#include <trace/events/sock.h>
12
13static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
14{
15 if (msg->sg.end > msg->sg.start &&
16 elem_first_coalesce < msg->sg.end)
17 return true;
18
19 if (msg->sg.end < msg->sg.start &&
20 (elem_first_coalesce > msg->sg.start ||
21 elem_first_coalesce < msg->sg.end))
22 return true;
23
24 return false;
25}
26
27int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
28 int elem_first_coalesce)
29{
30 struct page_frag *pfrag = sk_page_frag(sk);
31 u32 osize = msg->sg.size;
32 int ret = 0;
33
34 len -= msg->sg.size;
35 while (len > 0) {
36 struct scatterlist *sge;
37 u32 orig_offset;
38 int use, i;
39
40 if (!sk_page_frag_refill(sk, pfrag)) {
41 ret = -ENOMEM;
42 goto msg_trim;
43 }
44
45 orig_offset = pfrag->offset;
46 use = min_t(int, len, pfrag->size - orig_offset);
47 if (!sk_wmem_schedule(sk, use)) {
48 ret = -ENOMEM;
49 goto msg_trim;
50 }
51
52 i = msg->sg.end;
53 sk_msg_iter_var_prev(i);
54 sge = &msg->sg.data[i];
55
56 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
57 sg_page(sge) == pfrag->page &&
58 sge->offset + sge->length == orig_offset) {
59 sge->length += use;
60 } else {
61 if (sk_msg_full(msg)) {
62 ret = -ENOSPC;
63 break;
64 }
65
66 sge = &msg->sg.data[msg->sg.end];
67 sg_unmark_end(sge);
68 sg_set_page(sge, pfrag->page, use, orig_offset);
69 get_page(pfrag->page);
70 sk_msg_iter_next(msg, end);
71 }
72
73 sk_mem_charge(sk, use);
74 msg->sg.size += use;
75 pfrag->offset += use;
76 len -= use;
77 }
78
79 return ret;
80
81msg_trim:
82 sk_msg_trim(sk, msg, osize);
83 return ret;
84}
85EXPORT_SYMBOL_GPL(sk_msg_alloc);
86
87int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
88 u32 off, u32 len)
89{
90 int i = src->sg.start;
91 struct scatterlist *sge = sk_msg_elem(src, i);
92 struct scatterlist *sgd = NULL;
93 u32 sge_len, sge_off;
94
95 while (off) {
96 if (sge->length > off)
97 break;
98 off -= sge->length;
99 sk_msg_iter_var_next(i);
100 if (i == src->sg.end && off)
101 return -ENOSPC;
102 sge = sk_msg_elem(src, i);
103 }
104
105 while (len) {
106 sge_len = sge->length - off;
107 if (sge_len > len)
108 sge_len = len;
109
110 if (dst->sg.end)
111 sgd = sk_msg_elem(dst, dst->sg.end - 1);
112
113 if (sgd &&
114 (sg_page(sge) == sg_page(sgd)) &&
115 (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
116 sgd->length += sge_len;
117 dst->sg.size += sge_len;
118 } else if (!sk_msg_full(dst)) {
119 sge_off = sge->offset + off;
120 sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
121 } else {
122 return -ENOSPC;
123 }
124
125 off = 0;
126 len -= sge_len;
127 sk_mem_charge(sk, sge_len);
128 sk_msg_iter_var_next(i);
129 if (i == src->sg.end && len)
130 return -ENOSPC;
131 sge = sk_msg_elem(src, i);
132 }
133
134 return 0;
135}
136EXPORT_SYMBOL_GPL(sk_msg_clone);
137
138void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
139{
140 int i = msg->sg.start;
141
142 do {
143 struct scatterlist *sge = sk_msg_elem(msg, i);
144
145 if (bytes < sge->length) {
146 sge->length -= bytes;
147 sge->offset += bytes;
148 sk_mem_uncharge(sk, bytes);
149 break;
150 }
151
152 sk_mem_uncharge(sk, sge->length);
153 bytes -= sge->length;
154 sge->length = 0;
155 sge->offset = 0;
156 sk_msg_iter_var_next(i);
157 } while (bytes && i != msg->sg.end);
158 msg->sg.start = i;
159}
160EXPORT_SYMBOL_GPL(sk_msg_return_zero);
161
162void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
163{
164 int i = msg->sg.start;
165
166 do {
167 struct scatterlist *sge = &msg->sg.data[i];
168 int uncharge = (bytes < sge->length) ? bytes : sge->length;
169
170 sk_mem_uncharge(sk, uncharge);
171 bytes -= uncharge;
172 sk_msg_iter_var_next(i);
173 } while (i != msg->sg.end);
174}
175EXPORT_SYMBOL_GPL(sk_msg_return);
176
177static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
178 bool charge)
179{
180 struct scatterlist *sge = sk_msg_elem(msg, i);
181 u32 len = sge->length;
182
183 /* When the skb owns the memory we free it from consume_skb path. */
184 if (!msg->skb) {
185 if (charge)
186 sk_mem_uncharge(sk, len);
187 put_page(sg_page(sge));
188 }
189 memset(sge, 0, sizeof(*sge));
190 return len;
191}
192
193static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
194 bool charge)
195{
196 struct scatterlist *sge = sk_msg_elem(msg, i);
197 int freed = 0;
198
199 while (msg->sg.size) {
200 msg->sg.size -= sge->length;
201 freed += sk_msg_free_elem(sk, msg, i, charge);
202 sk_msg_iter_var_next(i);
203 sk_msg_check_to_free(msg, i, msg->sg.size);
204 sge = sk_msg_elem(msg, i);
205 }
206 consume_skb(msg->skb);
207 sk_msg_init(msg);
208 return freed;
209}
210
211int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
212{
213 return __sk_msg_free(sk, msg, msg->sg.start, false);
214}
215EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
216
217int sk_msg_free(struct sock *sk, struct sk_msg *msg)
218{
219 return __sk_msg_free(sk, msg, msg->sg.start, true);
220}
221EXPORT_SYMBOL_GPL(sk_msg_free);
222
223static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
224 u32 bytes, bool charge)
225{
226 struct scatterlist *sge;
227 u32 i = msg->sg.start;
228
229 while (bytes) {
230 sge = sk_msg_elem(msg, i);
231 if (!sge->length)
232 break;
233 if (bytes < sge->length) {
234 if (charge)
235 sk_mem_uncharge(sk, bytes);
236 sge->length -= bytes;
237 sge->offset += bytes;
238 msg->sg.size -= bytes;
239 break;
240 }
241
242 msg->sg.size -= sge->length;
243 bytes -= sge->length;
244 sk_msg_free_elem(sk, msg, i, charge);
245 sk_msg_iter_var_next(i);
246 sk_msg_check_to_free(msg, i, bytes);
247 }
248 msg->sg.start = i;
249}
250
251void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
252{
253 __sk_msg_free_partial(sk, msg, bytes, true);
254}
255EXPORT_SYMBOL_GPL(sk_msg_free_partial);
256
257void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
258 u32 bytes)
259{
260 __sk_msg_free_partial(sk, msg, bytes, false);
261}
262
263void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
264{
265 int trim = msg->sg.size - len;
266 u32 i = msg->sg.end;
267
268 if (trim <= 0) {
269 WARN_ON(trim < 0);
270 return;
271 }
272
273 sk_msg_iter_var_prev(i);
274 msg->sg.size = len;
275 while (msg->sg.data[i].length &&
276 trim >= msg->sg.data[i].length) {
277 trim -= msg->sg.data[i].length;
278 sk_msg_free_elem(sk, msg, i, true);
279 sk_msg_iter_var_prev(i);
280 if (!trim)
281 goto out;
282 }
283
284 msg->sg.data[i].length -= trim;
285 sk_mem_uncharge(sk, trim);
286 /* Adjust copybreak if it falls into the trimmed part of last buf */
287 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
288 msg->sg.copybreak = msg->sg.data[i].length;
289out:
290 sk_msg_iter_var_next(i);
291 msg->sg.end = i;
292
293 /* If we trim data a full sg elem before curr pointer update
294 * copybreak and current so that any future copy operations
295 * start at new copy location.
296 * However trimmed data that has not yet been used in a copy op
297 * does not require an update.
298 */
299 if (!msg->sg.size) {
300 msg->sg.curr = msg->sg.start;
301 msg->sg.copybreak = 0;
302 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
303 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
304 sk_msg_iter_var_prev(i);
305 msg->sg.curr = i;
306 msg->sg.copybreak = msg->sg.data[i].length;
307 }
308}
309EXPORT_SYMBOL_GPL(sk_msg_trim);
310
311int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
312 struct sk_msg *msg, u32 bytes)
313{
314 int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
315 const int to_max_pages = MAX_MSG_FRAGS;
316 struct page *pages[MAX_MSG_FRAGS];
317 ssize_t orig, copied, use, offset;
318
319 orig = msg->sg.size;
320 while (bytes > 0) {
321 i = 0;
322 maxpages = to_max_pages - num_elems;
323 if (maxpages == 0) {
324 ret = -EFAULT;
325 goto out;
326 }
327
328 copied = iov_iter_get_pages2(from, pages, bytes, maxpages,
329 &offset);
330 if (copied <= 0) {
331 ret = -EFAULT;
332 goto out;
333 }
334
335 bytes -= copied;
336 msg->sg.size += copied;
337
338 while (copied) {
339 use = min_t(int, copied, PAGE_SIZE - offset);
340 sg_set_page(&msg->sg.data[msg->sg.end],
341 pages[i], use, offset);
342 sg_unmark_end(&msg->sg.data[msg->sg.end]);
343 sk_mem_charge(sk, use);
344
345 offset = 0;
346 copied -= use;
347 sk_msg_iter_next(msg, end);
348 num_elems++;
349 i++;
350 }
351 /* When zerocopy is mixed with sk_msg_*copy* operations we
352 * may have a copybreak set in this case clear and prefer
353 * zerocopy remainder when possible.
354 */
355 msg->sg.copybreak = 0;
356 msg->sg.curr = msg->sg.end;
357 }
358out:
359 /* Revert iov_iter updates, msg will need to use 'trim' later if it
360 * also needs to be cleared.
361 */
362 if (ret)
363 iov_iter_revert(from, msg->sg.size - orig);
364 return ret;
365}
366EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
367
368int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
369 struct sk_msg *msg, u32 bytes)
370{
371 int ret = -ENOSPC, i = msg->sg.curr;
372 u32 copy, buf_size, copied = 0;
373 struct scatterlist *sge;
374 void *to;
375
376 do {
377 sge = sk_msg_elem(msg, i);
378 /* This is possible if a trim operation shrunk the buffer */
379 if (msg->sg.copybreak >= sge->length) {
380 msg->sg.copybreak = 0;
381 sk_msg_iter_var_next(i);
382 if (i == msg->sg.end)
383 break;
384 sge = sk_msg_elem(msg, i);
385 }
386
387 buf_size = sge->length - msg->sg.copybreak;
388 copy = (buf_size > bytes) ? bytes : buf_size;
389 to = sg_virt(sge) + msg->sg.copybreak;
390 msg->sg.copybreak += copy;
391 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
392 ret = copy_from_iter_nocache(to, copy, from);
393 else
394 ret = copy_from_iter(to, copy, from);
395 if (ret != copy) {
396 ret = -EFAULT;
397 goto out;
398 }
399 bytes -= copy;
400 copied += copy;
401 if (!bytes)
402 break;
403 msg->sg.copybreak = 0;
404 sk_msg_iter_var_next(i);
405 } while (i != msg->sg.end);
406out:
407 msg->sg.curr = i;
408 return (ret < 0) ? ret : copied;
409}
410EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
411
412/* Receive sk_msg from psock->ingress_msg to @msg. */
413int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
414 int len, int flags)
415{
416 struct iov_iter *iter = &msg->msg_iter;
417 int peek = flags & MSG_PEEK;
418 struct sk_msg *msg_rx;
419 int i, copied = 0;
420
421 msg_rx = sk_psock_peek_msg(psock);
422 while (copied != len) {
423 struct scatterlist *sge;
424
425 if (unlikely(!msg_rx))
426 break;
427
428 i = msg_rx->sg.start;
429 do {
430 struct page *page;
431 int copy;
432
433 sge = sk_msg_elem(msg_rx, i);
434 copy = sge->length;
435 page = sg_page(sge);
436 if (copied + copy > len)
437 copy = len - copied;
438 if (copy)
439 copy = copy_page_to_iter(page, sge->offset, copy, iter);
440 if (!copy) {
441 copied = copied ? copied : -EFAULT;
442 goto out;
443 }
444
445 copied += copy;
446 if (likely(!peek)) {
447 sge->offset += copy;
448 sge->length -= copy;
449 if (!msg_rx->skb) {
450 sk_mem_uncharge(sk, copy);
451 atomic_sub(copy, &sk->sk_rmem_alloc);
452 }
453 msg_rx->sg.size -= copy;
454
455 if (!sge->length) {
456 sk_msg_iter_var_next(i);
457 if (!msg_rx->skb)
458 put_page(page);
459 }
460 } else {
461 /* Lets not optimize peek case if copy_page_to_iter
462 * didn't copy the entire length lets just break.
463 */
464 if (copy != sge->length)
465 goto out;
466 sk_msg_iter_var_next(i);
467 }
468
469 if (copied == len)
470 break;
471 } while ((i != msg_rx->sg.end) && !sg_is_last(sge));
472
473 if (unlikely(peek)) {
474 msg_rx = sk_psock_next_msg(psock, msg_rx);
475 if (!msg_rx)
476 break;
477 continue;
478 }
479
480 msg_rx->sg.start = i;
481 if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) {
482 msg_rx = sk_psock_dequeue_msg(psock);
483 kfree_sk_msg(msg_rx);
484 }
485 msg_rx = sk_psock_peek_msg(psock);
486 }
487out:
488 return copied;
489}
490EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
491
492bool sk_msg_is_readable(struct sock *sk)
493{
494 struct sk_psock *psock;
495 bool empty = true;
496
497 rcu_read_lock();
498 psock = sk_psock(sk);
499 if (likely(psock))
500 empty = list_empty(&psock->ingress_msg);
501 rcu_read_unlock();
502 return !empty;
503}
504EXPORT_SYMBOL_GPL(sk_msg_is_readable);
505
506static struct sk_msg *alloc_sk_msg(gfp_t gfp)
507{
508 struct sk_msg *msg;
509
510 msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN);
511 if (unlikely(!msg))
512 return NULL;
513 sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
514 return msg;
515}
516
517static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
518 struct sk_buff *skb)
519{
520 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
521 return NULL;
522
523 if (!sk_rmem_schedule(sk, skb, skb->truesize))
524 return NULL;
525
526 return alloc_sk_msg(GFP_KERNEL);
527}
528
529static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
530 u32 off, u32 len,
531 struct sk_psock *psock,
532 struct sock *sk,
533 struct sk_msg *msg,
534 bool take_ref)
535{
536 int num_sge, copied;
537
538 /* skb_to_sgvec will fail when the total number of fragments in
539 * frag_list and frags exceeds MAX_MSG_FRAGS. For example, the
540 * caller may aggregate multiple skbs.
541 */
542 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
543 if (num_sge < 0) {
544 /* skb linearize may fail with ENOMEM, but lets simply try again
545 * later if this happens. Under memory pressure we don't want to
546 * drop the skb. We need to linearize the skb so that the mapping
547 * in skb_to_sgvec can not error.
548 * Note that skb_linearize requires the skb not to be shared.
549 */
550 if (skb_linearize(skb))
551 return -EAGAIN;
552
553 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
554 if (unlikely(num_sge < 0))
555 return num_sge;
556 }
557
558#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
559 psock->ingress_bytes += len;
560#endif
561 copied = len;
562 msg->sg.start = 0;
563 msg->sg.size = copied;
564 msg->sg.end = num_sge;
565 msg->skb = take_ref ? skb_get(skb) : skb;
566
567 sk_psock_queue_msg(psock, msg);
568 sk_psock_data_ready(sk, psock);
569 return copied;
570}
571
572static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
573 u32 off, u32 len, bool take_ref);
574
575static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
576 u32 off, u32 len)
577{
578 struct sock *sk = psock->sk;
579 struct sk_msg *msg;
580 int err;
581
582 /* If we are receiving on the same sock skb->sk is already assigned,
583 * skip memory accounting and owner transition seeing it already set
584 * correctly.
585 */
586 if (unlikely(skb->sk == sk))
587 return sk_psock_skb_ingress_self(psock, skb, off, len, true);
588 msg = sk_psock_create_ingress_msg(sk, skb);
589 if (!msg)
590 return -EAGAIN;
591
592 /* This will transition ownership of the data from the socket where
593 * the BPF program was run initiating the redirect to the socket
594 * we will eventually receive this data on. The data will be released
595 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
596 * into user buffers.
597 */
598 skb_set_owner_r(skb, sk);
599 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, true);
600 if (err < 0)
601 kfree(msg);
602 return err;
603}
604
605/* Puts an skb on the ingress queue of the socket already assigned to the
606 * skb. In this case we do not need to check memory limits or skb_set_owner_r
607 * because the skb is already accounted for here.
608 */
609static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
610 u32 off, u32 len, bool take_ref)
611{
612 struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC);
613 struct sock *sk = psock->sk;
614 int err;
615
616 if (unlikely(!msg))
617 return -EAGAIN;
618 skb_set_owner_r(skb, sk);
619 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, take_ref);
620 if (err < 0)
621 kfree(msg);
622 return err;
623}
624
625static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
626 u32 off, u32 len, bool ingress)
627{
628 if (!ingress) {
629 if (!sock_writeable(psock->sk))
630 return -EAGAIN;
631 return skb_send_sock(psock->sk, skb, off, len);
632 }
633
634 return sk_psock_skb_ingress(psock, skb, off, len);
635}
636
637static void sk_psock_skb_state(struct sk_psock *psock,
638 struct sk_psock_work_state *state,
639 int len, int off)
640{
641 spin_lock_bh(&psock->ingress_lock);
642 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
643 state->len = len;
644 state->off = off;
645 }
646 spin_unlock_bh(&psock->ingress_lock);
647}
648
649static void sk_psock_backlog(struct work_struct *work)
650{
651 struct delayed_work *dwork = to_delayed_work(work);
652 struct sk_psock *psock = container_of(dwork, struct sk_psock, work);
653 struct sk_psock_work_state *state = &psock->work_state;
654 struct sk_buff *skb = NULL;
655 u32 len = 0, off = 0;
656 bool ingress;
657 int ret;
658
659 /* Increment the psock refcnt to synchronize with close(fd) path in
660 * sock_map_close(), ensuring we wait for backlog thread completion
661 * before sk_socket freed. If refcnt increment fails, it indicates
662 * sock_map_close() completed with sk_socket potentially already freed.
663 */
664 if (!sk_psock_get(psock->sk))
665 return;
666 mutex_lock(&psock->work_mutex);
667 while ((skb = skb_peek(&psock->ingress_skb))) {
668 len = skb->len;
669 off = 0;
670 if (skb_bpf_strparser(skb)) {
671 struct strp_msg *stm = strp_msg(skb);
672
673 off = stm->offset;
674 len = stm->full_len;
675 }
676
677 /* Resume processing from previous partial state */
678 if (unlikely(state->len)) {
679 len = state->len;
680 off = state->off;
681 }
682
683 ingress = skb_bpf_ingress(skb);
684 skb_bpf_redirect_clear(skb);
685 do {
686 ret = -EIO;
687 if (!sock_flag(psock->sk, SOCK_DEAD))
688 ret = sk_psock_handle_skb(psock, skb, off,
689 len, ingress);
690 if (ret <= 0) {
691 if (ret == -EAGAIN) {
692 sk_psock_skb_state(psock, state, len, off);
693 /* Restore redir info we cleared before */
694 skb_bpf_set_redir(skb, psock->sk, ingress);
695 /* Delay slightly to prioritize any
696 * other work that might be here.
697 */
698 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
699 schedule_delayed_work(&psock->work, 1);
700 goto end;
701 }
702 /* Hard errors break pipe and stop xmit. */
703 sk_psock_report_error(psock, ret ? -ret : EPIPE);
704 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
705 goto end;
706 }
707 off += ret;
708 len -= ret;
709 } while (len);
710
711 /* The entire skb sent, clear state */
712 sk_psock_skb_state(psock, state, 0, 0);
713 skb = skb_dequeue(&psock->ingress_skb);
714 kfree_skb(skb);
715 }
716end:
717 mutex_unlock(&psock->work_mutex);
718 sk_psock_put(psock->sk, psock);
719}
720
721struct sk_psock *sk_psock_init(struct sock *sk, int node)
722{
723 struct sk_psock *psock;
724 struct proto *prot;
725
726 write_lock_bh(&sk->sk_callback_lock);
727
728 if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) {
729 psock = ERR_PTR(-EINVAL);
730 goto out;
731 }
732
733 if (sk->sk_user_data) {
734 psock = ERR_PTR(-EBUSY);
735 goto out;
736 }
737
738 psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
739 if (!psock) {
740 psock = ERR_PTR(-ENOMEM);
741 goto out;
742 }
743
744 prot = READ_ONCE(sk->sk_prot);
745 psock->sk = sk;
746 psock->eval = __SK_NONE;
747 psock->sk_proto = prot;
748 psock->saved_unhash = prot->unhash;
749 psock->saved_destroy = prot->destroy;
750 psock->saved_close = prot->close;
751 psock->saved_write_space = sk->sk_write_space;
752
753 INIT_LIST_HEAD(&psock->link);
754 spin_lock_init(&psock->link_lock);
755
756 INIT_DELAYED_WORK(&psock->work, sk_psock_backlog);
757 mutex_init(&psock->work_mutex);
758 INIT_LIST_HEAD(&psock->ingress_msg);
759 spin_lock_init(&psock->ingress_lock);
760 skb_queue_head_init(&psock->ingress_skb);
761
762 sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
763 refcount_set(&psock->refcnt, 1);
764
765 __rcu_assign_sk_user_data_with_flags(sk, psock,
766 SK_USER_DATA_NOCOPY |
767 SK_USER_DATA_PSOCK);
768 sock_hold(sk);
769
770out:
771 write_unlock_bh(&sk->sk_callback_lock);
772 return psock;
773}
774EXPORT_SYMBOL_GPL(sk_psock_init);
775
776struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
777{
778 struct sk_psock_link *link;
779
780 spin_lock_bh(&psock->link_lock);
781 link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
782 list);
783 if (link)
784 list_del(&link->list);
785 spin_unlock_bh(&psock->link_lock);
786 return link;
787}
788
789static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
790{
791 struct sk_msg *msg, *tmp;
792
793 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
794 list_del(&msg->list);
795 if (!msg->skb)
796 atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc);
797 sk_msg_free(psock->sk, msg);
798 kfree(msg);
799 }
800}
801
802static void __sk_psock_zap_ingress(struct sk_psock *psock)
803{
804 struct sk_buff *skb;
805
806 while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
807 skb_bpf_redirect_clear(skb);
808 sock_drop(psock->sk, skb);
809 }
810 __sk_psock_purge_ingress_msg(psock);
811}
812
813static void sk_psock_link_destroy(struct sk_psock *psock)
814{
815 struct sk_psock_link *link, *tmp;
816
817 list_for_each_entry_safe(link, tmp, &psock->link, list) {
818 list_del(&link->list);
819 sk_psock_free_link(link);
820 }
821}
822
823void sk_psock_stop(struct sk_psock *psock)
824{
825 spin_lock_bh(&psock->ingress_lock);
826 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
827 sk_psock_cork_free(psock);
828 spin_unlock_bh(&psock->ingress_lock);
829}
830
831static void sk_psock_done_strp(struct sk_psock *psock);
832
833static void sk_psock_destroy(struct work_struct *work)
834{
835 struct sk_psock *psock = container_of(to_rcu_work(work),
836 struct sk_psock, rwork);
837 /* No sk_callback_lock since already detached. */
838
839 sk_psock_done_strp(psock);
840
841 cancel_delayed_work_sync(&psock->work);
842 __sk_psock_zap_ingress(psock);
843 mutex_destroy(&psock->work_mutex);
844
845 psock_progs_drop(&psock->progs);
846
847 sk_psock_link_destroy(psock);
848 sk_psock_cork_free(psock);
849
850 if (psock->sk_redir)
851 sock_put(psock->sk_redir);
852 if (psock->sk_pair)
853 sock_put(psock->sk_pair);
854 sock_put(psock->sk);
855 kfree(psock);
856}
857
858void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
859{
860 write_lock_bh(&sk->sk_callback_lock);
861 sk_psock_restore_proto(sk, psock);
862 rcu_assign_sk_user_data(sk, NULL);
863 if (psock->progs.stream_parser)
864 sk_psock_stop_strp(sk, psock);
865 else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
866 sk_psock_stop_verdict(sk, psock);
867 write_unlock_bh(&sk->sk_callback_lock);
868
869 sk_psock_stop(psock);
870
871 INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
872 queue_rcu_work(system_wq, &psock->rwork);
873}
874EXPORT_SYMBOL_GPL(sk_psock_drop);
875
876static int sk_psock_map_verd(int verdict, bool redir)
877{
878 switch (verdict) {
879 case SK_PASS:
880 return redir ? __SK_REDIRECT : __SK_PASS;
881 case SK_DROP:
882 default:
883 break;
884 }
885
886 return __SK_DROP;
887}
888
889int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
890 struct sk_msg *msg)
891{
892 struct bpf_prog *prog;
893 int ret;
894
895 rcu_read_lock();
896 prog = READ_ONCE(psock->progs.msg_parser);
897 if (unlikely(!prog)) {
898 ret = __SK_PASS;
899 goto out;
900 }
901
902 sk_msg_compute_data_pointers(msg);
903 msg->sk = sk;
904 ret = bpf_prog_run_pin_on_cpu(prog, msg);
905 ret = sk_psock_map_verd(ret, msg->sk_redir);
906 psock->apply_bytes = msg->apply_bytes;
907 if (ret == __SK_REDIRECT) {
908 if (psock->sk_redir) {
909 sock_put(psock->sk_redir);
910 psock->sk_redir = NULL;
911 }
912 if (!msg->sk_redir) {
913 ret = __SK_DROP;
914 goto out;
915 }
916 psock->redir_ingress = sk_msg_to_ingress(msg);
917 psock->sk_redir = msg->sk_redir;
918 sock_hold(psock->sk_redir);
919 }
920out:
921 rcu_read_unlock();
922 return ret;
923}
924EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
925
926static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
927{
928 struct sk_psock *psock_other;
929 struct sock *sk_other;
930
931 sk_other = skb_bpf_redirect_fetch(skb);
932 /* This error is a buggy BPF program, it returned a redirect
933 * return code, but then didn't set a redirect interface.
934 */
935 if (unlikely(!sk_other)) {
936 skb_bpf_redirect_clear(skb);
937 sock_drop(from->sk, skb);
938 return -EIO;
939 }
940 psock_other = sk_psock(sk_other);
941 /* This error indicates the socket is being torn down or had another
942 * error that caused the pipe to break. We can't send a packet on
943 * a socket that is in this state so we drop the skb.
944 */
945 if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
946 skb_bpf_redirect_clear(skb);
947 sock_drop(from->sk, skb);
948 return -EIO;
949 }
950 spin_lock_bh(&psock_other->ingress_lock);
951 if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
952 spin_unlock_bh(&psock_other->ingress_lock);
953 skb_bpf_redirect_clear(skb);
954 sock_drop(from->sk, skb);
955 return -EIO;
956 }
957
958 skb_queue_tail(&psock_other->ingress_skb, skb);
959 schedule_delayed_work(&psock_other->work, 0);
960 spin_unlock_bh(&psock_other->ingress_lock);
961 return 0;
962}
963
964static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
965 struct sk_psock *from, int verdict)
966{
967 switch (verdict) {
968 case __SK_REDIRECT:
969 sk_psock_skb_redirect(from, skb);
970 break;
971 case __SK_PASS:
972 case __SK_DROP:
973 default:
974 break;
975 }
976}
977
978int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
979{
980 struct bpf_prog *prog;
981 int ret = __SK_PASS;
982
983 rcu_read_lock();
984 prog = READ_ONCE(psock->progs.stream_verdict);
985 if (likely(prog)) {
986 skb->sk = psock->sk;
987 skb_dst_drop(skb);
988 skb_bpf_redirect_clear(skb);
989 ret = bpf_prog_run_pin_on_cpu(prog, skb);
990 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
991 skb->sk = NULL;
992 }
993 sk_psock_tls_verdict_apply(skb, psock, ret);
994 rcu_read_unlock();
995 return ret;
996}
997EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
998
999static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
1000 int verdict)
1001{
1002 struct sock *sk_other;
1003 int err = 0;
1004 u32 len, off;
1005
1006 switch (verdict) {
1007 case __SK_PASS:
1008 err = -EIO;
1009 sk_other = psock->sk;
1010 if (sock_flag(sk_other, SOCK_DEAD) ||
1011 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1012 goto out_free;
1013
1014 skb_bpf_set_ingress(skb);
1015
1016 /* If the queue is empty then we can submit directly
1017 * into the msg queue. If its not empty we have to
1018 * queue work otherwise we may get OOO data. Otherwise,
1019 * if sk_psock_skb_ingress errors will be handled by
1020 * retrying later from workqueue.
1021 */
1022 if (skb_queue_empty(&psock->ingress_skb)) {
1023 len = skb->len;
1024 off = 0;
1025 if (skb_bpf_strparser(skb)) {
1026 struct strp_msg *stm = strp_msg(skb);
1027
1028 off = stm->offset;
1029 len = stm->full_len;
1030 }
1031 err = sk_psock_skb_ingress_self(psock, skb, off, len, false);
1032 }
1033 if (err < 0) {
1034 spin_lock_bh(&psock->ingress_lock);
1035 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
1036 skb_queue_tail(&psock->ingress_skb, skb);
1037 schedule_delayed_work(&psock->work, 0);
1038 err = 0;
1039 }
1040 spin_unlock_bh(&psock->ingress_lock);
1041 if (err < 0)
1042 goto out_free;
1043 }
1044 break;
1045 case __SK_REDIRECT:
1046 tcp_eat_skb(psock->sk, skb);
1047 err = sk_psock_skb_redirect(psock, skb);
1048 break;
1049 case __SK_DROP:
1050 default:
1051out_free:
1052 skb_bpf_redirect_clear(skb);
1053 tcp_eat_skb(psock->sk, skb);
1054 sock_drop(psock->sk, skb);
1055 }
1056
1057 return err;
1058}
1059
1060static void sk_psock_write_space(struct sock *sk)
1061{
1062 struct sk_psock *psock;
1063 void (*write_space)(struct sock *sk) = NULL;
1064
1065 rcu_read_lock();
1066 psock = sk_psock(sk);
1067 if (likely(psock)) {
1068 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1069 schedule_delayed_work(&psock->work, 0);
1070 write_space = psock->saved_write_space;
1071 }
1072 rcu_read_unlock();
1073 if (write_space)
1074 write_space(sk);
1075}
1076
1077#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1078static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1079{
1080 struct sk_psock *psock;
1081 struct bpf_prog *prog;
1082 int ret = __SK_DROP;
1083 struct sock *sk;
1084
1085 rcu_read_lock();
1086 sk = strp->sk;
1087 psock = sk_psock(sk);
1088 if (unlikely(!psock)) {
1089 sock_drop(sk, skb);
1090 goto out;
1091 }
1092 prog = READ_ONCE(psock->progs.stream_verdict);
1093 if (likely(prog)) {
1094 skb->sk = sk;
1095 skb_dst_drop(skb);
1096 skb_bpf_redirect_clear(skb);
1097 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1098 skb_bpf_set_strparser(skb);
1099 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1100 skb->sk = NULL;
1101 }
1102 sk_psock_verdict_apply(psock, skb, ret);
1103out:
1104 rcu_read_unlock();
1105}
1106
1107static int sk_psock_strp_read_done(struct strparser *strp, int err)
1108{
1109 return err;
1110}
1111
1112static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1113{
1114 struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1115 struct bpf_prog *prog;
1116 int ret = skb->len;
1117
1118 rcu_read_lock();
1119 prog = READ_ONCE(psock->progs.stream_parser);
1120 if (likely(prog)) {
1121 skb->sk = psock->sk;
1122 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1123 skb->sk = NULL;
1124 }
1125 rcu_read_unlock();
1126 return ret;
1127}
1128
1129/* Called with socket lock held. */
1130static void sk_psock_strp_data_ready(struct sock *sk)
1131{
1132 struct sk_psock *psock;
1133
1134 trace_sk_data_ready(sk);
1135
1136 rcu_read_lock();
1137 psock = sk_psock(sk);
1138 if (likely(psock)) {
1139 if (tls_sw_has_ctx_rx(sk)) {
1140 psock->saved_data_ready(sk);
1141 } else {
1142 read_lock_bh(&sk->sk_callback_lock);
1143 strp_data_ready(&psock->strp);
1144 read_unlock_bh(&sk->sk_callback_lock);
1145 }
1146 }
1147 rcu_read_unlock();
1148}
1149
1150int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1151{
1152 int ret;
1153
1154 static const struct strp_callbacks cb = {
1155 .rcv_msg = sk_psock_strp_read,
1156 .read_sock_done = sk_psock_strp_read_done,
1157 .parse_msg = sk_psock_strp_parse,
1158 };
1159
1160 ret = strp_init(&psock->strp, sk, &cb);
1161 if (!ret)
1162 sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED);
1163
1164 if (sk_is_tcp(sk)) {
1165 psock->strp.cb.read_sock = tcp_bpf_strp_read_sock;
1166 psock->copied_seq = tcp_sk(sk)->copied_seq;
1167 }
1168 return ret;
1169}
1170
1171void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1172{
1173 if (psock->saved_data_ready)
1174 return;
1175
1176 psock->saved_data_ready = sk->sk_data_ready;
1177 sk->sk_data_ready = sk_psock_strp_data_ready;
1178 sk->sk_write_space = sk_psock_write_space;
1179}
1180
1181void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1182{
1183 psock_set_prog(&psock->progs.stream_parser, NULL);
1184
1185 if (!psock->saved_data_ready)
1186 return;
1187
1188 sk->sk_data_ready = psock->saved_data_ready;
1189 psock->saved_data_ready = NULL;
1190 strp_stop(&psock->strp);
1191}
1192
1193static void sk_psock_done_strp(struct sk_psock *psock)
1194{
1195 /* Parser has been stopped */
1196 if (sk_psock_test_state(psock, SK_PSOCK_RX_STRP_ENABLED))
1197 strp_done(&psock->strp);
1198}
1199#else
1200static void sk_psock_done_strp(struct sk_psock *psock)
1201{
1202}
1203#endif /* CONFIG_BPF_STREAM_PARSER */
1204
1205static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
1206{
1207 struct sk_psock *psock;
1208 struct bpf_prog *prog;
1209 int ret = __SK_DROP;
1210 int len = skb->len;
1211
1212 rcu_read_lock();
1213 psock = sk_psock(sk);
1214 if (unlikely(!psock)) {
1215 len = 0;
1216 tcp_eat_skb(sk, skb);
1217 sock_drop(sk, skb);
1218 goto out;
1219 }
1220 prog = READ_ONCE(psock->progs.stream_verdict);
1221 if (!prog)
1222 prog = READ_ONCE(psock->progs.skb_verdict);
1223 if (likely(prog)) {
1224 skb_dst_drop(skb);
1225 skb_bpf_redirect_clear(skb);
1226 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1227 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1228 }
1229 ret = sk_psock_verdict_apply(psock, skb, ret);
1230 if (ret < 0)
1231 len = ret;
1232out:
1233 rcu_read_unlock();
1234 return len;
1235}
1236
1237static void sk_psock_verdict_data_ready(struct sock *sk)
1238{
1239 struct socket *sock = sk->sk_socket;
1240 const struct proto_ops *ops;
1241 int copied;
1242
1243 trace_sk_data_ready(sk);
1244
1245 if (unlikely(!sock))
1246 return;
1247 ops = READ_ONCE(sock->ops);
1248 if (!ops || !ops->read_skb)
1249 return;
1250 copied = ops->read_skb(sk, sk_psock_verdict_recv);
1251 if (copied >= 0) {
1252 struct sk_psock *psock;
1253
1254 rcu_read_lock();
1255 psock = sk_psock(sk);
1256 if (psock)
1257 sk_psock_data_ready(sk, psock);
1258 rcu_read_unlock();
1259 }
1260}
1261
1262void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1263{
1264 if (psock->saved_data_ready)
1265 return;
1266
1267 psock->saved_data_ready = sk->sk_data_ready;
1268 sk->sk_data_ready = sk_psock_verdict_data_ready;
1269 sk->sk_write_space = sk_psock_write_space;
1270}
1271
1272void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1273{
1274 psock_set_prog(&psock->progs.stream_verdict, NULL);
1275 psock_set_prog(&psock->progs.skb_verdict, NULL);
1276
1277 if (!psock->saved_data_ready)
1278 return;
1279
1280 sk->sk_data_ready = psock->saved_data_ready;
1281 psock->saved_data_ready = NULL;
1282}