Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4#include <linux/bpf.h>
5#include <linux/btf_ids.h>
6#include <linux/filter.h>
7#include <linux/errno.h>
8#include <linux/file.h>
9#include <linux/net.h>
10#include <linux/workqueue.h>
11#include <linux/skmsg.h>
12#include <linux/list.h>
13#include <linux/jhash.h>
14#include <linux/sock_diag.h>
15#include <net/udp.h>
16
17struct bpf_stab {
18 struct bpf_map map;
19 struct sock **sks;
20 struct sk_psock_progs progs;
21 raw_spinlock_t lock;
22};
23
24#define SOCK_CREATE_FLAG_MASK \
25 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
26
27static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
28 struct bpf_prog *old, u32 which);
29static struct sk_psock_progs *sock_map_progs(struct bpf_map *map);
30
31static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
32{
33 struct bpf_stab *stab;
34
35 if (!capable(CAP_NET_ADMIN))
36 return ERR_PTR(-EPERM);
37 if (attr->max_entries == 0 ||
38 attr->key_size != 4 ||
39 (attr->value_size != sizeof(u32) &&
40 attr->value_size != sizeof(u64)) ||
41 attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
42 return ERR_PTR(-EINVAL);
43
44 stab = kzalloc(sizeof(*stab), GFP_USER | __GFP_ACCOUNT);
45 if (!stab)
46 return ERR_PTR(-ENOMEM);
47
48 bpf_map_init_from_attr(&stab->map, attr);
49 raw_spin_lock_init(&stab->lock);
50
51 stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
52 sizeof(struct sock *),
53 stab->map.numa_node);
54 if (!stab->sks) {
55 kfree(stab);
56 return ERR_PTR(-ENOMEM);
57 }
58
59 return &stab->map;
60}
61
62int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
63{
64 u32 ufd = attr->target_fd;
65 struct bpf_map *map;
66 struct fd f;
67 int ret;
68
69 if (attr->attach_flags || attr->replace_bpf_fd)
70 return -EINVAL;
71
72 f = fdget(ufd);
73 map = __bpf_map_get(f);
74 if (IS_ERR(map))
75 return PTR_ERR(map);
76 ret = sock_map_prog_update(map, prog, NULL, attr->attach_type);
77 fdput(f);
78 return ret;
79}
80
81int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
82{
83 u32 ufd = attr->target_fd;
84 struct bpf_prog *prog;
85 struct bpf_map *map;
86 struct fd f;
87 int ret;
88
89 if (attr->attach_flags || attr->replace_bpf_fd)
90 return -EINVAL;
91
92 f = fdget(ufd);
93 map = __bpf_map_get(f);
94 if (IS_ERR(map))
95 return PTR_ERR(map);
96
97 prog = bpf_prog_get(attr->attach_bpf_fd);
98 if (IS_ERR(prog)) {
99 ret = PTR_ERR(prog);
100 goto put_map;
101 }
102
103 if (prog->type != ptype) {
104 ret = -EINVAL;
105 goto put_prog;
106 }
107
108 ret = sock_map_prog_update(map, NULL, prog, attr->attach_type);
109put_prog:
110 bpf_prog_put(prog);
111put_map:
112 fdput(f);
113 return ret;
114}
115
116static void sock_map_sk_acquire(struct sock *sk)
117 __acquires(&sk->sk_lock.slock)
118{
119 lock_sock(sk);
120 preempt_disable();
121 rcu_read_lock();
122}
123
124static void sock_map_sk_release(struct sock *sk)
125 __releases(&sk->sk_lock.slock)
126{
127 rcu_read_unlock();
128 preempt_enable();
129 release_sock(sk);
130}
131
132static void sock_map_add_link(struct sk_psock *psock,
133 struct sk_psock_link *link,
134 struct bpf_map *map, void *link_raw)
135{
136 link->link_raw = link_raw;
137 link->map = map;
138 spin_lock_bh(&psock->link_lock);
139 list_add_tail(&link->list, &psock->link);
140 spin_unlock_bh(&psock->link_lock);
141}
142
143static void sock_map_del_link(struct sock *sk,
144 struct sk_psock *psock, void *link_raw)
145{
146 bool strp_stop = false, verdict_stop = false;
147 struct sk_psock_link *link, *tmp;
148
149 spin_lock_bh(&psock->link_lock);
150 list_for_each_entry_safe(link, tmp, &psock->link, list) {
151 if (link->link_raw == link_raw) {
152 struct bpf_map *map = link->map;
153 struct bpf_stab *stab = container_of(map, struct bpf_stab,
154 map);
155 if (psock->saved_data_ready && stab->progs.stream_parser)
156 strp_stop = true;
157 if (psock->saved_data_ready && stab->progs.stream_verdict)
158 verdict_stop = true;
159 if (psock->saved_data_ready && stab->progs.skb_verdict)
160 verdict_stop = true;
161 list_del(&link->list);
162 sk_psock_free_link(link);
163 }
164 }
165 spin_unlock_bh(&psock->link_lock);
166 if (strp_stop || verdict_stop) {
167 write_lock_bh(&sk->sk_callback_lock);
168 if (strp_stop)
169 sk_psock_stop_strp(sk, psock);
170 if (verdict_stop)
171 sk_psock_stop_verdict(sk, psock);
172
173 if (psock->psock_update_sk_prot)
174 psock->psock_update_sk_prot(sk, psock, false);
175 write_unlock_bh(&sk->sk_callback_lock);
176 }
177}
178
179static void sock_map_unref(struct sock *sk, void *link_raw)
180{
181 struct sk_psock *psock = sk_psock(sk);
182
183 if (likely(psock)) {
184 sock_map_del_link(sk, psock, link_raw);
185 sk_psock_put(sk, psock);
186 }
187}
188
189static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock)
190{
191 if (!sk->sk_prot->psock_update_sk_prot)
192 return -EINVAL;
193 psock->psock_update_sk_prot = sk->sk_prot->psock_update_sk_prot;
194 return sk->sk_prot->psock_update_sk_prot(sk, psock, false);
195}
196
197static struct sk_psock *sock_map_psock_get_checked(struct sock *sk)
198{
199 struct sk_psock *psock;
200
201 rcu_read_lock();
202 psock = sk_psock(sk);
203 if (psock) {
204 if (sk->sk_prot->close != sock_map_close) {
205 psock = ERR_PTR(-EBUSY);
206 goto out;
207 }
208
209 if (!refcount_inc_not_zero(&psock->refcnt))
210 psock = ERR_PTR(-EBUSY);
211 }
212out:
213 rcu_read_unlock();
214 return psock;
215}
216
217static int sock_map_link(struct bpf_map *map, struct sock *sk)
218{
219 struct sk_psock_progs *progs = sock_map_progs(map);
220 struct bpf_prog *stream_verdict = NULL;
221 struct bpf_prog *stream_parser = NULL;
222 struct bpf_prog *skb_verdict = NULL;
223 struct bpf_prog *msg_parser = NULL;
224 struct sk_psock *psock;
225 int ret;
226
227 stream_verdict = READ_ONCE(progs->stream_verdict);
228 if (stream_verdict) {
229 stream_verdict = bpf_prog_inc_not_zero(stream_verdict);
230 if (IS_ERR(stream_verdict))
231 return PTR_ERR(stream_verdict);
232 }
233
234 stream_parser = READ_ONCE(progs->stream_parser);
235 if (stream_parser) {
236 stream_parser = bpf_prog_inc_not_zero(stream_parser);
237 if (IS_ERR(stream_parser)) {
238 ret = PTR_ERR(stream_parser);
239 goto out_put_stream_verdict;
240 }
241 }
242
243 msg_parser = READ_ONCE(progs->msg_parser);
244 if (msg_parser) {
245 msg_parser = bpf_prog_inc_not_zero(msg_parser);
246 if (IS_ERR(msg_parser)) {
247 ret = PTR_ERR(msg_parser);
248 goto out_put_stream_parser;
249 }
250 }
251
252 skb_verdict = READ_ONCE(progs->skb_verdict);
253 if (skb_verdict) {
254 skb_verdict = bpf_prog_inc_not_zero(skb_verdict);
255 if (IS_ERR(skb_verdict)) {
256 ret = PTR_ERR(skb_verdict);
257 goto out_put_msg_parser;
258 }
259 }
260
261 psock = sock_map_psock_get_checked(sk);
262 if (IS_ERR(psock)) {
263 ret = PTR_ERR(psock);
264 goto out_progs;
265 }
266
267 if (psock) {
268 if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) ||
269 (stream_parser && READ_ONCE(psock->progs.stream_parser)) ||
270 (skb_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
271 (skb_verdict && READ_ONCE(psock->progs.stream_verdict)) ||
272 (stream_verdict && READ_ONCE(psock->progs.skb_verdict)) ||
273 (stream_verdict && READ_ONCE(psock->progs.stream_verdict))) {
274 sk_psock_put(sk, psock);
275 ret = -EBUSY;
276 goto out_progs;
277 }
278 } else {
279 psock = sk_psock_init(sk, map->numa_node);
280 if (IS_ERR(psock)) {
281 ret = PTR_ERR(psock);
282 goto out_progs;
283 }
284 }
285
286 if (msg_parser)
287 psock_set_prog(&psock->progs.msg_parser, msg_parser);
288 if (stream_parser)
289 psock_set_prog(&psock->progs.stream_parser, stream_parser);
290 if (stream_verdict)
291 psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
292 if (skb_verdict)
293 psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
294
295 /* msg_* and stream_* programs references tracked in psock after this
296 * point. Reference dec and cleanup will occur through psock destructor
297 */
298 ret = sock_map_init_proto(sk, psock);
299 if (ret < 0) {
300 sk_psock_put(sk, psock);
301 goto out;
302 }
303
304 write_lock_bh(&sk->sk_callback_lock);
305 if (stream_parser && stream_verdict && !psock->saved_data_ready) {
306 ret = sk_psock_init_strp(sk, psock);
307 if (ret) {
308 write_unlock_bh(&sk->sk_callback_lock);
309 sk_psock_put(sk, psock);
310 goto out;
311 }
312 sk_psock_start_strp(sk, psock);
313 } else if (!stream_parser && stream_verdict && !psock->saved_data_ready) {
314 sk_psock_start_verdict(sk,psock);
315 } else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) {
316 sk_psock_start_verdict(sk, psock);
317 }
318 write_unlock_bh(&sk->sk_callback_lock);
319 return 0;
320out_progs:
321 if (skb_verdict)
322 bpf_prog_put(skb_verdict);
323out_put_msg_parser:
324 if (msg_parser)
325 bpf_prog_put(msg_parser);
326out_put_stream_parser:
327 if (stream_parser)
328 bpf_prog_put(stream_parser);
329out_put_stream_verdict:
330 if (stream_verdict)
331 bpf_prog_put(stream_verdict);
332out:
333 return ret;
334}
335
336static void sock_map_free(struct bpf_map *map)
337{
338 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
339 int i;
340
341 /* After the sync no updates or deletes will be in-flight so it
342 * is safe to walk map and remove entries without risking a race
343 * in EEXIST update case.
344 */
345 synchronize_rcu();
346 for (i = 0; i < stab->map.max_entries; i++) {
347 struct sock **psk = &stab->sks[i];
348 struct sock *sk;
349
350 sk = xchg(psk, NULL);
351 if (sk) {
352 lock_sock(sk);
353 rcu_read_lock();
354 sock_map_unref(sk, psk);
355 rcu_read_unlock();
356 release_sock(sk);
357 }
358 }
359
360 /* wait for psock readers accessing its map link */
361 synchronize_rcu();
362
363 bpf_map_area_free(stab->sks);
364 kfree(stab);
365}
366
367static void sock_map_release_progs(struct bpf_map *map)
368{
369 psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs);
370}
371
372static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
373{
374 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
375
376 WARN_ON_ONCE(!rcu_read_lock_held());
377
378 if (unlikely(key >= map->max_entries))
379 return NULL;
380 return READ_ONCE(stab->sks[key]);
381}
382
383static void *sock_map_lookup(struct bpf_map *map, void *key)
384{
385 struct sock *sk;
386
387 sk = __sock_map_lookup_elem(map, *(u32 *)key);
388 if (!sk)
389 return NULL;
390 if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
391 return NULL;
392 return sk;
393}
394
395static void *sock_map_lookup_sys(struct bpf_map *map, void *key)
396{
397 struct sock *sk;
398
399 if (map->value_size != sizeof(u64))
400 return ERR_PTR(-ENOSPC);
401
402 sk = __sock_map_lookup_elem(map, *(u32 *)key);
403 if (!sk)
404 return ERR_PTR(-ENOENT);
405
406 __sock_gen_cookie(sk);
407 return &sk->sk_cookie;
408}
409
410static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
411 struct sock **psk)
412{
413 struct sock *sk;
414 int err = 0;
415
416 raw_spin_lock_bh(&stab->lock);
417 sk = *psk;
418 if (!sk_test || sk_test == sk)
419 sk = xchg(psk, NULL);
420
421 if (likely(sk))
422 sock_map_unref(sk, psk);
423 else
424 err = -EINVAL;
425
426 raw_spin_unlock_bh(&stab->lock);
427 return err;
428}
429
430static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
431 void *link_raw)
432{
433 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
434
435 __sock_map_delete(stab, sk, link_raw);
436}
437
438static int sock_map_delete_elem(struct bpf_map *map, void *key)
439{
440 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
441 u32 i = *(u32 *)key;
442 struct sock **psk;
443
444 if (unlikely(i >= map->max_entries))
445 return -EINVAL;
446
447 psk = &stab->sks[i];
448 return __sock_map_delete(stab, NULL, psk);
449}
450
451static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next)
452{
453 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
454 u32 i = key ? *(u32 *)key : U32_MAX;
455 u32 *key_next = next;
456
457 if (i == stab->map.max_entries - 1)
458 return -ENOENT;
459 if (i >= stab->map.max_entries)
460 *key_next = 0;
461 else
462 *key_next = i + 1;
463 return 0;
464}
465
466static int sock_map_update_common(struct bpf_map *map, u32 idx,
467 struct sock *sk, u64 flags)
468{
469 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
470 struct sk_psock_link *link;
471 struct sk_psock *psock;
472 struct sock *osk;
473 int ret;
474
475 WARN_ON_ONCE(!rcu_read_lock_held());
476 if (unlikely(flags > BPF_EXIST))
477 return -EINVAL;
478 if (unlikely(idx >= map->max_entries))
479 return -E2BIG;
480
481 link = sk_psock_init_link();
482 if (!link)
483 return -ENOMEM;
484
485 ret = sock_map_link(map, sk);
486 if (ret < 0)
487 goto out_free;
488
489 psock = sk_psock(sk);
490 WARN_ON_ONCE(!psock);
491
492 raw_spin_lock_bh(&stab->lock);
493 osk = stab->sks[idx];
494 if (osk && flags == BPF_NOEXIST) {
495 ret = -EEXIST;
496 goto out_unlock;
497 } else if (!osk && flags == BPF_EXIST) {
498 ret = -ENOENT;
499 goto out_unlock;
500 }
501
502 sock_map_add_link(psock, link, map, &stab->sks[idx]);
503 stab->sks[idx] = sk;
504 if (osk)
505 sock_map_unref(osk, &stab->sks[idx]);
506 raw_spin_unlock_bh(&stab->lock);
507 return 0;
508out_unlock:
509 raw_spin_unlock_bh(&stab->lock);
510 if (psock)
511 sk_psock_put(sk, psock);
512out_free:
513 sk_psock_free_link(link);
514 return ret;
515}
516
517static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops)
518{
519 return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
520 ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB ||
521 ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB;
522}
523
524static bool sock_map_redirect_allowed(const struct sock *sk)
525{
526 if (sk_is_tcp(sk))
527 return sk->sk_state != TCP_LISTEN;
528 else
529 return sk->sk_state == TCP_ESTABLISHED;
530}
531
532static bool sock_map_sk_is_suitable(const struct sock *sk)
533{
534 return !!sk->sk_prot->psock_update_sk_prot;
535}
536
537static bool sock_map_sk_state_allowed(const struct sock *sk)
538{
539 if (sk_is_tcp(sk))
540 return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
541 return true;
542}
543
544static int sock_hash_update_common(struct bpf_map *map, void *key,
545 struct sock *sk, u64 flags);
546
547int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
548 u64 flags)
549{
550 struct socket *sock;
551 struct sock *sk;
552 int ret;
553 u64 ufd;
554
555 if (map->value_size == sizeof(u64))
556 ufd = *(u64 *)value;
557 else
558 ufd = *(u32 *)value;
559 if (ufd > S32_MAX)
560 return -EINVAL;
561
562 sock = sockfd_lookup(ufd, &ret);
563 if (!sock)
564 return ret;
565 sk = sock->sk;
566 if (!sk) {
567 ret = -EINVAL;
568 goto out;
569 }
570 if (!sock_map_sk_is_suitable(sk)) {
571 ret = -EOPNOTSUPP;
572 goto out;
573 }
574
575 sock_map_sk_acquire(sk);
576 if (!sock_map_sk_state_allowed(sk))
577 ret = -EOPNOTSUPP;
578 else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
579 ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
580 else
581 ret = sock_hash_update_common(map, key, sk, flags);
582 sock_map_sk_release(sk);
583out:
584 sockfd_put(sock);
585 return ret;
586}
587
588static int sock_map_update_elem(struct bpf_map *map, void *key,
589 void *value, u64 flags)
590{
591 struct sock *sk = (struct sock *)value;
592 int ret;
593
594 if (unlikely(!sk || !sk_fullsock(sk)))
595 return -EINVAL;
596
597 if (!sock_map_sk_is_suitable(sk))
598 return -EOPNOTSUPP;
599
600 local_bh_disable();
601 bh_lock_sock(sk);
602 if (!sock_map_sk_state_allowed(sk))
603 ret = -EOPNOTSUPP;
604 else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
605 ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
606 else
607 ret = sock_hash_update_common(map, key, sk, flags);
608 bh_unlock_sock(sk);
609 local_bh_enable();
610 return ret;
611}
612
613BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops,
614 struct bpf_map *, map, void *, key, u64, flags)
615{
616 WARN_ON_ONCE(!rcu_read_lock_held());
617
618 if (likely(sock_map_sk_is_suitable(sops->sk) &&
619 sock_map_op_okay(sops)))
620 return sock_map_update_common(map, *(u32 *)key, sops->sk,
621 flags);
622 return -EOPNOTSUPP;
623}
624
625const struct bpf_func_proto bpf_sock_map_update_proto = {
626 .func = bpf_sock_map_update,
627 .gpl_only = false,
628 .pkt_access = true,
629 .ret_type = RET_INTEGER,
630 .arg1_type = ARG_PTR_TO_CTX,
631 .arg2_type = ARG_CONST_MAP_PTR,
632 .arg3_type = ARG_PTR_TO_MAP_KEY,
633 .arg4_type = ARG_ANYTHING,
634};
635
636BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
637 struct bpf_map *, map, u32, key, u64, flags)
638{
639 struct sock *sk;
640
641 if (unlikely(flags & ~(BPF_F_INGRESS)))
642 return SK_DROP;
643
644 sk = __sock_map_lookup_elem(map, key);
645 if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
646 return SK_DROP;
647
648 skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
649 return SK_PASS;
650}
651
652const struct bpf_func_proto bpf_sk_redirect_map_proto = {
653 .func = bpf_sk_redirect_map,
654 .gpl_only = false,
655 .ret_type = RET_INTEGER,
656 .arg1_type = ARG_PTR_TO_CTX,
657 .arg2_type = ARG_CONST_MAP_PTR,
658 .arg3_type = ARG_ANYTHING,
659 .arg4_type = ARG_ANYTHING,
660};
661
662BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
663 struct bpf_map *, map, u32, key, u64, flags)
664{
665 struct sock *sk;
666
667 if (unlikely(flags & ~(BPF_F_INGRESS)))
668 return SK_DROP;
669
670 sk = __sock_map_lookup_elem(map, key);
671 if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
672 return SK_DROP;
673
674 msg->flags = flags;
675 msg->sk_redir = sk;
676 return SK_PASS;
677}
678
679const struct bpf_func_proto bpf_msg_redirect_map_proto = {
680 .func = bpf_msg_redirect_map,
681 .gpl_only = false,
682 .ret_type = RET_INTEGER,
683 .arg1_type = ARG_PTR_TO_CTX,
684 .arg2_type = ARG_CONST_MAP_PTR,
685 .arg3_type = ARG_ANYTHING,
686 .arg4_type = ARG_ANYTHING,
687};
688
689struct sock_map_seq_info {
690 struct bpf_map *map;
691 struct sock *sk;
692 u32 index;
693};
694
695struct bpf_iter__sockmap {
696 __bpf_md_ptr(struct bpf_iter_meta *, meta);
697 __bpf_md_ptr(struct bpf_map *, map);
698 __bpf_md_ptr(void *, key);
699 __bpf_md_ptr(struct sock *, sk);
700};
701
702DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta,
703 struct bpf_map *map, void *key,
704 struct sock *sk)
705
706static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info)
707{
708 if (unlikely(info->index >= info->map->max_entries))
709 return NULL;
710
711 info->sk = __sock_map_lookup_elem(info->map, info->index);
712
713 /* can't return sk directly, since that might be NULL */
714 return info;
715}
716
717static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos)
718 __acquires(rcu)
719{
720 struct sock_map_seq_info *info = seq->private;
721
722 if (*pos == 0)
723 ++*pos;
724
725 /* pairs with sock_map_seq_stop */
726 rcu_read_lock();
727 return sock_map_seq_lookup_elem(info);
728}
729
730static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
731 __must_hold(rcu)
732{
733 struct sock_map_seq_info *info = seq->private;
734
735 ++*pos;
736 ++info->index;
737
738 return sock_map_seq_lookup_elem(info);
739}
740
741static int sock_map_seq_show(struct seq_file *seq, void *v)
742 __must_hold(rcu)
743{
744 struct sock_map_seq_info *info = seq->private;
745 struct bpf_iter__sockmap ctx = {};
746 struct bpf_iter_meta meta;
747 struct bpf_prog *prog;
748
749 meta.seq = seq;
750 prog = bpf_iter_get_info(&meta, !v);
751 if (!prog)
752 return 0;
753
754 ctx.meta = &meta;
755 ctx.map = info->map;
756 if (v) {
757 ctx.key = &info->index;
758 ctx.sk = info->sk;
759 }
760
761 return bpf_iter_run_prog(prog, &ctx);
762}
763
764static void sock_map_seq_stop(struct seq_file *seq, void *v)
765 __releases(rcu)
766{
767 if (!v)
768 (void)sock_map_seq_show(seq, NULL);
769
770 /* pairs with sock_map_seq_start */
771 rcu_read_unlock();
772}
773
774static const struct seq_operations sock_map_seq_ops = {
775 .start = sock_map_seq_start,
776 .next = sock_map_seq_next,
777 .stop = sock_map_seq_stop,
778 .show = sock_map_seq_show,
779};
780
781static int sock_map_init_seq_private(void *priv_data,
782 struct bpf_iter_aux_info *aux)
783{
784 struct sock_map_seq_info *info = priv_data;
785
786 info->map = aux->map;
787 return 0;
788}
789
790static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
791 .seq_ops = &sock_map_seq_ops,
792 .init_seq_private = sock_map_init_seq_private,
793 .seq_priv_size = sizeof(struct sock_map_seq_info),
794};
795
796static int sock_map_btf_id;
797const struct bpf_map_ops sock_map_ops = {
798 .map_meta_equal = bpf_map_meta_equal,
799 .map_alloc = sock_map_alloc,
800 .map_free = sock_map_free,
801 .map_get_next_key = sock_map_get_next_key,
802 .map_lookup_elem_sys_only = sock_map_lookup_sys,
803 .map_update_elem = sock_map_update_elem,
804 .map_delete_elem = sock_map_delete_elem,
805 .map_lookup_elem = sock_map_lookup,
806 .map_release_uref = sock_map_release_progs,
807 .map_check_btf = map_check_no_btf,
808 .map_btf_name = "bpf_stab",
809 .map_btf_id = &sock_map_btf_id,
810 .iter_seq_info = &sock_map_iter_seq_info,
811};
812
813struct bpf_shtab_elem {
814 struct rcu_head rcu;
815 u32 hash;
816 struct sock *sk;
817 struct hlist_node node;
818 u8 key[];
819};
820
821struct bpf_shtab_bucket {
822 struct hlist_head head;
823 raw_spinlock_t lock;
824};
825
826struct bpf_shtab {
827 struct bpf_map map;
828 struct bpf_shtab_bucket *buckets;
829 u32 buckets_num;
830 u32 elem_size;
831 struct sk_psock_progs progs;
832 atomic_t count;
833};
834
835static inline u32 sock_hash_bucket_hash(const void *key, u32 len)
836{
837 return jhash(key, len, 0);
838}
839
840static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab,
841 u32 hash)
842{
843 return &htab->buckets[hash & (htab->buckets_num - 1)];
844}
845
846static struct bpf_shtab_elem *
847sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key,
848 u32 key_size)
849{
850 struct bpf_shtab_elem *elem;
851
852 hlist_for_each_entry_rcu(elem, head, node) {
853 if (elem->hash == hash &&
854 !memcmp(&elem->key, key, key_size))
855 return elem;
856 }
857
858 return NULL;
859}
860
861static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
862{
863 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
864 u32 key_size = map->key_size, hash;
865 struct bpf_shtab_bucket *bucket;
866 struct bpf_shtab_elem *elem;
867
868 WARN_ON_ONCE(!rcu_read_lock_held());
869
870 hash = sock_hash_bucket_hash(key, key_size);
871 bucket = sock_hash_select_bucket(htab, hash);
872 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
873
874 return elem ? elem->sk : NULL;
875}
876
877static void sock_hash_free_elem(struct bpf_shtab *htab,
878 struct bpf_shtab_elem *elem)
879{
880 atomic_dec(&htab->count);
881 kfree_rcu(elem, rcu);
882}
883
884static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
885 void *link_raw)
886{
887 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
888 struct bpf_shtab_elem *elem_probe, *elem = link_raw;
889 struct bpf_shtab_bucket *bucket;
890
891 WARN_ON_ONCE(!rcu_read_lock_held());
892 bucket = sock_hash_select_bucket(htab, elem->hash);
893
894 /* elem may be deleted in parallel from the map, but access here
895 * is okay since it's going away only after RCU grace period.
896 * However, we need to check whether it's still present.
897 */
898 raw_spin_lock_bh(&bucket->lock);
899 elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash,
900 elem->key, map->key_size);
901 if (elem_probe && elem_probe == elem) {
902 hlist_del_rcu(&elem->node);
903 sock_map_unref(elem->sk, elem);
904 sock_hash_free_elem(htab, elem);
905 }
906 raw_spin_unlock_bh(&bucket->lock);
907}
908
909static int sock_hash_delete_elem(struct bpf_map *map, void *key)
910{
911 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
912 u32 hash, key_size = map->key_size;
913 struct bpf_shtab_bucket *bucket;
914 struct bpf_shtab_elem *elem;
915 int ret = -ENOENT;
916
917 hash = sock_hash_bucket_hash(key, key_size);
918 bucket = sock_hash_select_bucket(htab, hash);
919
920 raw_spin_lock_bh(&bucket->lock);
921 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
922 if (elem) {
923 hlist_del_rcu(&elem->node);
924 sock_map_unref(elem->sk, elem);
925 sock_hash_free_elem(htab, elem);
926 ret = 0;
927 }
928 raw_spin_unlock_bh(&bucket->lock);
929 return ret;
930}
931
932static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab,
933 void *key, u32 key_size,
934 u32 hash, struct sock *sk,
935 struct bpf_shtab_elem *old)
936{
937 struct bpf_shtab_elem *new;
938
939 if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
940 if (!old) {
941 atomic_dec(&htab->count);
942 return ERR_PTR(-E2BIG);
943 }
944 }
945
946 new = bpf_map_kmalloc_node(&htab->map, htab->elem_size,
947 GFP_ATOMIC | __GFP_NOWARN,
948 htab->map.numa_node);
949 if (!new) {
950 atomic_dec(&htab->count);
951 return ERR_PTR(-ENOMEM);
952 }
953 memcpy(new->key, key, key_size);
954 new->sk = sk;
955 new->hash = hash;
956 return new;
957}
958
959static int sock_hash_update_common(struct bpf_map *map, void *key,
960 struct sock *sk, u64 flags)
961{
962 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
963 u32 key_size = map->key_size, hash;
964 struct bpf_shtab_elem *elem, *elem_new;
965 struct bpf_shtab_bucket *bucket;
966 struct sk_psock_link *link;
967 struct sk_psock *psock;
968 int ret;
969
970 WARN_ON_ONCE(!rcu_read_lock_held());
971 if (unlikely(flags > BPF_EXIST))
972 return -EINVAL;
973
974 link = sk_psock_init_link();
975 if (!link)
976 return -ENOMEM;
977
978 ret = sock_map_link(map, sk);
979 if (ret < 0)
980 goto out_free;
981
982 psock = sk_psock(sk);
983 WARN_ON_ONCE(!psock);
984
985 hash = sock_hash_bucket_hash(key, key_size);
986 bucket = sock_hash_select_bucket(htab, hash);
987
988 raw_spin_lock_bh(&bucket->lock);
989 elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
990 if (elem && flags == BPF_NOEXIST) {
991 ret = -EEXIST;
992 goto out_unlock;
993 } else if (!elem && flags == BPF_EXIST) {
994 ret = -ENOENT;
995 goto out_unlock;
996 }
997
998 elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem);
999 if (IS_ERR(elem_new)) {
1000 ret = PTR_ERR(elem_new);
1001 goto out_unlock;
1002 }
1003
1004 sock_map_add_link(psock, link, map, elem_new);
1005 /* Add new element to the head of the list, so that
1006 * concurrent search will find it before old elem.
1007 */
1008 hlist_add_head_rcu(&elem_new->node, &bucket->head);
1009 if (elem) {
1010 hlist_del_rcu(&elem->node);
1011 sock_map_unref(elem->sk, elem);
1012 sock_hash_free_elem(htab, elem);
1013 }
1014 raw_spin_unlock_bh(&bucket->lock);
1015 return 0;
1016out_unlock:
1017 raw_spin_unlock_bh(&bucket->lock);
1018 sk_psock_put(sk, psock);
1019out_free:
1020 sk_psock_free_link(link);
1021 return ret;
1022}
1023
1024static int sock_hash_get_next_key(struct bpf_map *map, void *key,
1025 void *key_next)
1026{
1027 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1028 struct bpf_shtab_elem *elem, *elem_next;
1029 u32 hash, key_size = map->key_size;
1030 struct hlist_head *head;
1031 int i = 0;
1032
1033 if (!key)
1034 goto find_first_elem;
1035 hash = sock_hash_bucket_hash(key, key_size);
1036 head = &sock_hash_select_bucket(htab, hash)->head;
1037 elem = sock_hash_lookup_elem_raw(head, hash, key, key_size);
1038 if (!elem)
1039 goto find_first_elem;
1040
1041 elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)),
1042 struct bpf_shtab_elem, node);
1043 if (elem_next) {
1044 memcpy(key_next, elem_next->key, key_size);
1045 return 0;
1046 }
1047
1048 i = hash & (htab->buckets_num - 1);
1049 i++;
1050find_first_elem:
1051 for (; i < htab->buckets_num; i++) {
1052 head = &sock_hash_select_bucket(htab, i)->head;
1053 elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)),
1054 struct bpf_shtab_elem, node);
1055 if (elem_next) {
1056 memcpy(key_next, elem_next->key, key_size);
1057 return 0;
1058 }
1059 }
1060
1061 return -ENOENT;
1062}
1063
1064static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
1065{
1066 struct bpf_shtab *htab;
1067 int i, err;
1068
1069 if (!capable(CAP_NET_ADMIN))
1070 return ERR_PTR(-EPERM);
1071 if (attr->max_entries == 0 ||
1072 attr->key_size == 0 ||
1073 (attr->value_size != sizeof(u32) &&
1074 attr->value_size != sizeof(u64)) ||
1075 attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1076 return ERR_PTR(-EINVAL);
1077 if (attr->key_size > MAX_BPF_STACK)
1078 return ERR_PTR(-E2BIG);
1079
1080 htab = kzalloc(sizeof(*htab), GFP_USER | __GFP_ACCOUNT);
1081 if (!htab)
1082 return ERR_PTR(-ENOMEM);
1083
1084 bpf_map_init_from_attr(&htab->map, attr);
1085
1086 htab->buckets_num = roundup_pow_of_two(htab->map.max_entries);
1087 htab->elem_size = sizeof(struct bpf_shtab_elem) +
1088 round_up(htab->map.key_size, 8);
1089 if (htab->buckets_num == 0 ||
1090 htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) {
1091 err = -EINVAL;
1092 goto free_htab;
1093 }
1094
1095 htab->buckets = bpf_map_area_alloc(htab->buckets_num *
1096 sizeof(struct bpf_shtab_bucket),
1097 htab->map.numa_node);
1098 if (!htab->buckets) {
1099 err = -ENOMEM;
1100 goto free_htab;
1101 }
1102
1103 for (i = 0; i < htab->buckets_num; i++) {
1104 INIT_HLIST_HEAD(&htab->buckets[i].head);
1105 raw_spin_lock_init(&htab->buckets[i].lock);
1106 }
1107
1108 return &htab->map;
1109free_htab:
1110 kfree(htab);
1111 return ERR_PTR(err);
1112}
1113
1114static void sock_hash_free(struct bpf_map *map)
1115{
1116 struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
1117 struct bpf_shtab_bucket *bucket;
1118 struct hlist_head unlink_list;
1119 struct bpf_shtab_elem *elem;
1120 struct hlist_node *node;
1121 int i;
1122
1123 /* After the sync no updates or deletes will be in-flight so it
1124 * is safe to walk map and remove entries without risking a race
1125 * in EEXIST update case.
1126 */
1127 synchronize_rcu();
1128 for (i = 0; i < htab->buckets_num; i++) {
1129 bucket = sock_hash_select_bucket(htab, i);
1130
1131 /* We are racing with sock_hash_delete_from_link to
1132 * enter the spin-lock critical section. Every socket on
1133 * the list is still linked to sockhash. Since link
1134 * exists, psock exists and holds a ref to socket. That
1135 * lets us to grab a socket ref too.
1136 */
1137 raw_spin_lock_bh(&bucket->lock);
1138 hlist_for_each_entry(elem, &bucket->head, node)
1139 sock_hold(elem->sk);
1140 hlist_move_list(&bucket->head, &unlink_list);
1141 raw_spin_unlock_bh(&bucket->lock);
1142
1143 /* Process removed entries out of atomic context to
1144 * block for socket lock before deleting the psock's
1145 * link to sockhash.
1146 */
1147 hlist_for_each_entry_safe(elem, node, &unlink_list, node) {
1148 hlist_del(&elem->node);
1149 lock_sock(elem->sk);
1150 rcu_read_lock();
1151 sock_map_unref(elem->sk, elem);
1152 rcu_read_unlock();
1153 release_sock(elem->sk);
1154 sock_put(elem->sk);
1155 sock_hash_free_elem(htab, elem);
1156 }
1157 }
1158
1159 /* wait for psock readers accessing its map link */
1160 synchronize_rcu();
1161
1162 bpf_map_area_free(htab->buckets);
1163 kfree(htab);
1164}
1165
1166static void *sock_hash_lookup_sys(struct bpf_map *map, void *key)
1167{
1168 struct sock *sk;
1169
1170 if (map->value_size != sizeof(u64))
1171 return ERR_PTR(-ENOSPC);
1172
1173 sk = __sock_hash_lookup_elem(map, key);
1174 if (!sk)
1175 return ERR_PTR(-ENOENT);
1176
1177 __sock_gen_cookie(sk);
1178 return &sk->sk_cookie;
1179}
1180
1181static void *sock_hash_lookup(struct bpf_map *map, void *key)
1182{
1183 struct sock *sk;
1184
1185 sk = __sock_hash_lookup_elem(map, key);
1186 if (!sk)
1187 return NULL;
1188 if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
1189 return NULL;
1190 return sk;
1191}
1192
1193static void sock_hash_release_progs(struct bpf_map *map)
1194{
1195 psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs);
1196}
1197
1198BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops,
1199 struct bpf_map *, map, void *, key, u64, flags)
1200{
1201 WARN_ON_ONCE(!rcu_read_lock_held());
1202
1203 if (likely(sock_map_sk_is_suitable(sops->sk) &&
1204 sock_map_op_okay(sops)))
1205 return sock_hash_update_common(map, key, sops->sk, flags);
1206 return -EOPNOTSUPP;
1207}
1208
1209const struct bpf_func_proto bpf_sock_hash_update_proto = {
1210 .func = bpf_sock_hash_update,
1211 .gpl_only = false,
1212 .pkt_access = true,
1213 .ret_type = RET_INTEGER,
1214 .arg1_type = ARG_PTR_TO_CTX,
1215 .arg2_type = ARG_CONST_MAP_PTR,
1216 .arg3_type = ARG_PTR_TO_MAP_KEY,
1217 .arg4_type = ARG_ANYTHING,
1218};
1219
1220BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
1221 struct bpf_map *, map, void *, key, u64, flags)
1222{
1223 struct sock *sk;
1224
1225 if (unlikely(flags & ~(BPF_F_INGRESS)))
1226 return SK_DROP;
1227
1228 sk = __sock_hash_lookup_elem(map, key);
1229 if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1230 return SK_DROP;
1231
1232 skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
1233 return SK_PASS;
1234}
1235
1236const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
1237 .func = bpf_sk_redirect_hash,
1238 .gpl_only = false,
1239 .ret_type = RET_INTEGER,
1240 .arg1_type = ARG_PTR_TO_CTX,
1241 .arg2_type = ARG_CONST_MAP_PTR,
1242 .arg3_type = ARG_PTR_TO_MAP_KEY,
1243 .arg4_type = ARG_ANYTHING,
1244};
1245
1246BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
1247 struct bpf_map *, map, void *, key, u64, flags)
1248{
1249 struct sock *sk;
1250
1251 if (unlikely(flags & ~(BPF_F_INGRESS)))
1252 return SK_DROP;
1253
1254 sk = __sock_hash_lookup_elem(map, key);
1255 if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
1256 return SK_DROP;
1257
1258 msg->flags = flags;
1259 msg->sk_redir = sk;
1260 return SK_PASS;
1261}
1262
1263const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
1264 .func = bpf_msg_redirect_hash,
1265 .gpl_only = false,
1266 .ret_type = RET_INTEGER,
1267 .arg1_type = ARG_PTR_TO_CTX,
1268 .arg2_type = ARG_CONST_MAP_PTR,
1269 .arg3_type = ARG_PTR_TO_MAP_KEY,
1270 .arg4_type = ARG_ANYTHING,
1271};
1272
1273struct sock_hash_seq_info {
1274 struct bpf_map *map;
1275 struct bpf_shtab *htab;
1276 u32 bucket_id;
1277};
1278
1279static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info,
1280 struct bpf_shtab_elem *prev_elem)
1281{
1282 const struct bpf_shtab *htab = info->htab;
1283 struct bpf_shtab_bucket *bucket;
1284 struct bpf_shtab_elem *elem;
1285 struct hlist_node *node;
1286
1287 /* try to find next elem in the same bucket */
1288 if (prev_elem) {
1289 node = rcu_dereference(hlist_next_rcu(&prev_elem->node));
1290 elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
1291 if (elem)
1292 return elem;
1293
1294 /* no more elements, continue in the next bucket */
1295 info->bucket_id++;
1296 }
1297
1298 for (; info->bucket_id < htab->buckets_num; info->bucket_id++) {
1299 bucket = &htab->buckets[info->bucket_id];
1300 node = rcu_dereference(hlist_first_rcu(&bucket->head));
1301 elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
1302 if (elem)
1303 return elem;
1304 }
1305
1306 return NULL;
1307}
1308
1309static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos)
1310 __acquires(rcu)
1311{
1312 struct sock_hash_seq_info *info = seq->private;
1313
1314 if (*pos == 0)
1315 ++*pos;
1316
1317 /* pairs with sock_hash_seq_stop */
1318 rcu_read_lock();
1319 return sock_hash_seq_find_next(info, NULL);
1320}
1321
1322static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1323 __must_hold(rcu)
1324{
1325 struct sock_hash_seq_info *info = seq->private;
1326
1327 ++*pos;
1328 return sock_hash_seq_find_next(info, v);
1329}
1330
1331static int sock_hash_seq_show(struct seq_file *seq, void *v)
1332 __must_hold(rcu)
1333{
1334 struct sock_hash_seq_info *info = seq->private;
1335 struct bpf_iter__sockmap ctx = {};
1336 struct bpf_shtab_elem *elem = v;
1337 struct bpf_iter_meta meta;
1338 struct bpf_prog *prog;
1339
1340 meta.seq = seq;
1341 prog = bpf_iter_get_info(&meta, !elem);
1342 if (!prog)
1343 return 0;
1344
1345 ctx.meta = &meta;
1346 ctx.map = info->map;
1347 if (elem) {
1348 ctx.key = elem->key;
1349 ctx.sk = elem->sk;
1350 }
1351
1352 return bpf_iter_run_prog(prog, &ctx);
1353}
1354
1355static void sock_hash_seq_stop(struct seq_file *seq, void *v)
1356 __releases(rcu)
1357{
1358 if (!v)
1359 (void)sock_hash_seq_show(seq, NULL);
1360
1361 /* pairs with sock_hash_seq_start */
1362 rcu_read_unlock();
1363}
1364
1365static const struct seq_operations sock_hash_seq_ops = {
1366 .start = sock_hash_seq_start,
1367 .next = sock_hash_seq_next,
1368 .stop = sock_hash_seq_stop,
1369 .show = sock_hash_seq_show,
1370};
1371
1372static int sock_hash_init_seq_private(void *priv_data,
1373 struct bpf_iter_aux_info *aux)
1374{
1375 struct sock_hash_seq_info *info = priv_data;
1376
1377 info->map = aux->map;
1378 info->htab = container_of(aux->map, struct bpf_shtab, map);
1379 return 0;
1380}
1381
1382static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
1383 .seq_ops = &sock_hash_seq_ops,
1384 .init_seq_private = sock_hash_init_seq_private,
1385 .seq_priv_size = sizeof(struct sock_hash_seq_info),
1386};
1387
1388static int sock_hash_map_btf_id;
1389const struct bpf_map_ops sock_hash_ops = {
1390 .map_meta_equal = bpf_map_meta_equal,
1391 .map_alloc = sock_hash_alloc,
1392 .map_free = sock_hash_free,
1393 .map_get_next_key = sock_hash_get_next_key,
1394 .map_update_elem = sock_map_update_elem,
1395 .map_delete_elem = sock_hash_delete_elem,
1396 .map_lookup_elem = sock_hash_lookup,
1397 .map_lookup_elem_sys_only = sock_hash_lookup_sys,
1398 .map_release_uref = sock_hash_release_progs,
1399 .map_check_btf = map_check_no_btf,
1400 .map_btf_name = "bpf_shtab",
1401 .map_btf_id = &sock_hash_map_btf_id,
1402 .iter_seq_info = &sock_hash_iter_seq_info,
1403};
1404
1405static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
1406{
1407 switch (map->map_type) {
1408 case BPF_MAP_TYPE_SOCKMAP:
1409 return &container_of(map, struct bpf_stab, map)->progs;
1410 case BPF_MAP_TYPE_SOCKHASH:
1411 return &container_of(map, struct bpf_shtab, map)->progs;
1412 default:
1413 break;
1414 }
1415
1416 return NULL;
1417}
1418
1419static int sock_map_prog_lookup(struct bpf_map *map, struct bpf_prog ***pprog,
1420 u32 which)
1421{
1422 struct sk_psock_progs *progs = sock_map_progs(map);
1423
1424 if (!progs)
1425 return -EOPNOTSUPP;
1426
1427 switch (which) {
1428 case BPF_SK_MSG_VERDICT:
1429 *pprog = &progs->msg_parser;
1430 break;
1431#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1432 case BPF_SK_SKB_STREAM_PARSER:
1433 *pprog = &progs->stream_parser;
1434 break;
1435#endif
1436 case BPF_SK_SKB_STREAM_VERDICT:
1437 if (progs->skb_verdict)
1438 return -EBUSY;
1439 *pprog = &progs->stream_verdict;
1440 break;
1441 case BPF_SK_SKB_VERDICT:
1442 if (progs->stream_verdict)
1443 return -EBUSY;
1444 *pprog = &progs->skb_verdict;
1445 break;
1446 default:
1447 return -EOPNOTSUPP;
1448 }
1449
1450 return 0;
1451}
1452
1453static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
1454 struct bpf_prog *old, u32 which)
1455{
1456 struct bpf_prog **pprog;
1457 int ret;
1458
1459 ret = sock_map_prog_lookup(map, &pprog, which);
1460 if (ret)
1461 return ret;
1462
1463 if (old)
1464 return psock_replace_prog(pprog, prog, old);
1465
1466 psock_set_prog(pprog, prog);
1467 return 0;
1468}
1469
1470int sock_map_bpf_prog_query(const union bpf_attr *attr,
1471 union bpf_attr __user *uattr)
1472{
1473 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
1474 u32 prog_cnt = 0, flags = 0, ufd = attr->target_fd;
1475 struct bpf_prog **pprog;
1476 struct bpf_prog *prog;
1477 struct bpf_map *map;
1478 struct fd f;
1479 u32 id = 0;
1480 int ret;
1481
1482 if (attr->query.query_flags)
1483 return -EINVAL;
1484
1485 f = fdget(ufd);
1486 map = __bpf_map_get(f);
1487 if (IS_ERR(map))
1488 return PTR_ERR(map);
1489
1490 rcu_read_lock();
1491
1492 ret = sock_map_prog_lookup(map, &pprog, attr->query.attach_type);
1493 if (ret)
1494 goto end;
1495
1496 prog = *pprog;
1497 prog_cnt = !prog ? 0 : 1;
1498
1499 if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
1500 goto end;
1501
1502 /* we do not hold the refcnt, the bpf prog may be released
1503 * asynchronously and the id would be set to 0.
1504 */
1505 id = data_race(prog->aux->id);
1506 if (id == 0)
1507 prog_cnt = 0;
1508
1509end:
1510 rcu_read_unlock();
1511
1512 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)) ||
1513 (id != 0 && copy_to_user(prog_ids, &id, sizeof(u32))) ||
1514 copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
1515 ret = -EFAULT;
1516
1517 fdput(f);
1518 return ret;
1519}
1520
1521static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link)
1522{
1523 switch (link->map->map_type) {
1524 case BPF_MAP_TYPE_SOCKMAP:
1525 return sock_map_delete_from_link(link->map, sk,
1526 link->link_raw);
1527 case BPF_MAP_TYPE_SOCKHASH:
1528 return sock_hash_delete_from_link(link->map, sk,
1529 link->link_raw);
1530 default:
1531 break;
1532 }
1533}
1534
1535static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock)
1536{
1537 struct sk_psock_link *link;
1538
1539 while ((link = sk_psock_link_pop(psock))) {
1540 sock_map_unlink(sk, link);
1541 sk_psock_free_link(link);
1542 }
1543}
1544
1545void sock_map_unhash(struct sock *sk)
1546{
1547 void (*saved_unhash)(struct sock *sk);
1548 struct sk_psock *psock;
1549
1550 rcu_read_lock();
1551 psock = sk_psock(sk);
1552 if (unlikely(!psock)) {
1553 rcu_read_unlock();
1554 if (sk->sk_prot->unhash)
1555 sk->sk_prot->unhash(sk);
1556 return;
1557 }
1558
1559 saved_unhash = psock->saved_unhash;
1560 sock_map_remove_links(sk, psock);
1561 rcu_read_unlock();
1562 saved_unhash(sk);
1563}
1564EXPORT_SYMBOL_GPL(sock_map_unhash);
1565
1566void sock_map_close(struct sock *sk, long timeout)
1567{
1568 void (*saved_close)(struct sock *sk, long timeout);
1569 struct sk_psock *psock;
1570
1571 lock_sock(sk);
1572 rcu_read_lock();
1573 psock = sk_psock_get(sk);
1574 if (unlikely(!psock)) {
1575 rcu_read_unlock();
1576 release_sock(sk);
1577 return sk->sk_prot->close(sk, timeout);
1578 }
1579
1580 saved_close = psock->saved_close;
1581 sock_map_remove_links(sk, psock);
1582 rcu_read_unlock();
1583 sk_psock_stop(psock, true);
1584 sk_psock_put(sk, psock);
1585 release_sock(sk);
1586 saved_close(sk, timeout);
1587}
1588EXPORT_SYMBOL_GPL(sock_map_close);
1589
1590static int sock_map_iter_attach_target(struct bpf_prog *prog,
1591 union bpf_iter_link_info *linfo,
1592 struct bpf_iter_aux_info *aux)
1593{
1594 struct bpf_map *map;
1595 int err = -EINVAL;
1596
1597 if (!linfo->map.map_fd)
1598 return -EBADF;
1599
1600 map = bpf_map_get_with_uref(linfo->map.map_fd);
1601 if (IS_ERR(map))
1602 return PTR_ERR(map);
1603
1604 if (map->map_type != BPF_MAP_TYPE_SOCKMAP &&
1605 map->map_type != BPF_MAP_TYPE_SOCKHASH)
1606 goto put_map;
1607
1608 if (prog->aux->max_rdonly_access > map->key_size) {
1609 err = -EACCES;
1610 goto put_map;
1611 }
1612
1613 aux->map = map;
1614 return 0;
1615
1616put_map:
1617 bpf_map_put_with_uref(map);
1618 return err;
1619}
1620
1621static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux)
1622{
1623 bpf_map_put_with_uref(aux->map);
1624}
1625
1626static struct bpf_iter_reg sock_map_iter_reg = {
1627 .target = "sockmap",
1628 .attach_target = sock_map_iter_attach_target,
1629 .detach_target = sock_map_iter_detach_target,
1630 .show_fdinfo = bpf_iter_map_show_fdinfo,
1631 .fill_link_info = bpf_iter_map_fill_link_info,
1632 .ctx_arg_info_size = 2,
1633 .ctx_arg_info = {
1634 { offsetof(struct bpf_iter__sockmap, key),
1635 PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY },
1636 { offsetof(struct bpf_iter__sockmap, sk),
1637 PTR_TO_BTF_ID_OR_NULL },
1638 },
1639};
1640
1641static int __init bpf_sockmap_iter_init(void)
1642{
1643 sock_map_iter_reg.ctx_arg_info[1].btf_id =
1644 btf_sock_ids[BTF_SOCK_TYPE_SOCK];
1645 return bpf_iter_reg_target(&sock_map_iter_reg);
1646}
1647late_initcall(bpf_sockmap_iter_init);