Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'bpf: Add socket destroy capability'

Aditi Ghag says:

====================

This patch set adds the capability to destroy sockets in BPF. We plan to
use the capability in Cilium to force client sockets to reconnect when
their remote load-balancing backends are deleted. The other use case is
on-the-fly policy enforcement where existing socket connections
prevented by policies need to be terminated.

The use cases, and more details around
the selected approach were presented at LPC 2022 -
https://lpc.events/event/16/contributions/1358/.
RFC discussion -
https://lore.kernel.org/netdev/CABG=zsBEh-P4NXk23eBJw7eajB5YJeRS7oPXnTAzs=yob4EMoQ@mail.gmail.com/T/#u.
v8 patch series -
https://lore.kernel.org/bpf/20230517175359.527917-1-aditi.ghag@isovalent.com/

v9 highlights:
Address review comments:
Martin:
- Rearranged the kfunc filter patch, and added the missing break
statement.
- Squashed the extended selftest/bpf patch.
Yonghong:
- Revised commit message for patch 1.

(Below notes are same as v8 patch series that are still relevant. Refer to
earlier patch series versions for other notes.)
- I hit a snag while writing the kfunc where verifier complained about the
`sock_common` type passed from TCP iterator. With kfuncs, there don't
seem to be any options available to pass BTF type hints to the verifier
(equivalent of `ARG_PTR_TO_BTF_ID_SOCK_COMMON`, as was the case with the
helper). As a result, I changed the argument type of the sock_destory
kfunc to `sock_common`.
====================

Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>

+794 -79
+11 -7
include/linux/btf.h
··· 98 98 union bpf_attr; 99 99 struct btf_show; 100 100 struct btf_id_set; 101 + struct bpf_prog; 102 + 103 + typedef int (*btf_kfunc_filter_t)(const struct bpf_prog *prog, u32 kfunc_id); 101 104 102 105 struct btf_kfunc_id_set { 103 106 struct module *owner; 104 107 struct btf_id_set8 *set; 108 + btf_kfunc_filter_t filter; 105 109 }; 106 110 107 111 struct btf_id_dtor_kfunc { ··· 483 479 return bsearch(&id, set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func); 484 480 } 485 481 486 - struct bpf_prog; 487 482 struct bpf_verifier_log; 488 483 489 484 #ifdef CONFIG_BPF_SYSCALL ··· 490 487 const char *btf_name_by_offset(const struct btf *btf, u32 offset); 491 488 struct btf *btf_parse_vmlinux(void); 492 489 struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog); 493 - u32 *btf_kfunc_id_set_contains(const struct btf *btf, 494 - enum bpf_prog_type prog_type, 495 - u32 kfunc_btf_id); 496 - u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id); 490 + u32 *btf_kfunc_id_set_contains(const struct btf *btf, u32 kfunc_btf_id, 491 + const struct bpf_prog *prog); 492 + u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id, 493 + const struct bpf_prog *prog); 497 494 int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, 498 495 const struct btf_kfunc_id_set *s); 499 496 int register_btf_fmodret_id_set(const struct btf_kfunc_id_set *kset); ··· 520 517 return NULL; 521 518 } 522 519 static inline u32 *btf_kfunc_id_set_contains(const struct btf *btf, 523 - enum bpf_prog_type prog_type, 524 - u32 kfunc_btf_id) 520 + u32 kfunc_btf_id, 521 + struct bpf_prog *prog) 522 + 525 523 { 526 524 return NULL; 527 525 }
-1
include/net/udp.h
··· 437 437 struct udp_iter_state { 438 438 struct seq_net_private p; 439 439 int bucket; 440 - struct udp_seq_afinfo *bpf_seq_afinfo; 441 440 }; 442 441 443 442 void *udp_seq_start(struct seq_file *seq, loff_t *pos);
+54 -11
kernel/bpf/btf.c
··· 222 222 enum { 223 223 BTF_KFUNC_SET_MAX_CNT = 256, 224 224 BTF_DTOR_KFUNC_MAX_CNT = 256, 225 + BTF_KFUNC_FILTER_MAX_CNT = 16, 226 + }; 227 + 228 + struct btf_kfunc_hook_filter { 229 + btf_kfunc_filter_t filters[BTF_KFUNC_FILTER_MAX_CNT]; 230 + u32 nr_filters; 225 231 }; 226 232 227 233 struct btf_kfunc_set_tab { 228 234 struct btf_id_set8 *sets[BTF_KFUNC_HOOK_MAX]; 235 + struct btf_kfunc_hook_filter hook_filters[BTF_KFUNC_HOOK_MAX]; 229 236 }; 230 237 231 238 struct btf_id_dtor_kfunc_tab { ··· 7676 7669 /* Kernel Function (kfunc) BTF ID set registration API */ 7677 7670 7678 7671 static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, 7679 - struct btf_id_set8 *add_set) 7672 + const struct btf_kfunc_id_set *kset) 7680 7673 { 7674 + struct btf_kfunc_hook_filter *hook_filter; 7675 + struct btf_id_set8 *add_set = kset->set; 7681 7676 bool vmlinux_set = !btf_is_module(btf); 7677 + bool add_filter = !!kset->filter; 7682 7678 struct btf_kfunc_set_tab *tab; 7683 7679 struct btf_id_set8 *set; 7684 7680 u32 set_cnt; ··· 7696 7686 return 0; 7697 7687 7698 7688 tab = btf->kfunc_set_tab; 7689 + 7690 + if (tab && add_filter) { 7691 + u32 i; 7692 + 7693 + hook_filter = &tab->hook_filters[hook]; 7694 + for (i = 0; i < hook_filter->nr_filters; i++) { 7695 + if (hook_filter->filters[i] == kset->filter) { 7696 + add_filter = false; 7697 + break; 7698 + } 7699 + } 7700 + 7701 + if (add_filter && hook_filter->nr_filters == BTF_KFUNC_FILTER_MAX_CNT) { 7702 + ret = -E2BIG; 7703 + goto end; 7704 + } 7705 + } 7706 + 7699 7707 if (!tab) { 7700 7708 tab = kzalloc(sizeof(*tab), GFP_KERNEL | __GFP_NOWARN); 7701 7709 if (!tab) ··· 7736 7708 */ 7737 7709 if (!vmlinux_set) { 7738 7710 tab->sets[hook] = add_set; 7739 - return 0; 7711 + goto do_add_filter; 7740 7712 } 7741 7713 7742 7714 /* In case of vmlinux sets, there may be more than one set being ··· 7778 7750 7779 7751 sort(set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func, NULL); 7780 7752 7753 + do_add_filter: 7754 + if (add_filter) { 7755 + hook_filter = &tab->hook_filters[hook]; 7756 + hook_filter->filters[hook_filter->nr_filters++] = kset->filter; 7757 + } 7781 7758 return 0; 7782 7759 end: 7783 7760 btf_free_kfunc_set_tab(btf); ··· 7791 7758 7792 7759 static u32 *__btf_kfunc_id_set_contains(const struct btf *btf, 7793 7760 enum btf_kfunc_hook hook, 7794 - u32 kfunc_btf_id) 7761 + u32 kfunc_btf_id, 7762 + const struct bpf_prog *prog) 7795 7763 { 7764 + struct btf_kfunc_hook_filter *hook_filter; 7796 7765 struct btf_id_set8 *set; 7797 - u32 *id; 7766 + u32 *id, i; 7798 7767 7799 7768 if (hook >= BTF_KFUNC_HOOK_MAX) 7800 7769 return NULL; 7801 7770 if (!btf->kfunc_set_tab) 7802 7771 return NULL; 7772 + hook_filter = &btf->kfunc_set_tab->hook_filters[hook]; 7773 + for (i = 0; i < hook_filter->nr_filters; i++) { 7774 + if (hook_filter->filters[i](prog, kfunc_btf_id)) 7775 + return NULL; 7776 + } 7803 7777 set = btf->kfunc_set_tab->sets[hook]; 7804 7778 if (!set) 7805 7779 return NULL; ··· 7861 7821 * protection for looking up a well-formed btf->kfunc_set_tab. 7862 7822 */ 7863 7823 u32 *btf_kfunc_id_set_contains(const struct btf *btf, 7864 - enum bpf_prog_type prog_type, 7865 - u32 kfunc_btf_id) 7824 + u32 kfunc_btf_id, 7825 + const struct bpf_prog *prog) 7866 7826 { 7827 + enum bpf_prog_type prog_type = resolve_prog_type(prog); 7867 7828 enum btf_kfunc_hook hook; 7868 7829 u32 *kfunc_flags; 7869 7830 7870 - kfunc_flags = __btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_COMMON, kfunc_btf_id); 7831 + kfunc_flags = __btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_COMMON, kfunc_btf_id, prog); 7871 7832 if (kfunc_flags) 7872 7833 return kfunc_flags; 7873 7834 7874 7835 hook = bpf_prog_type_to_kfunc_hook(prog_type); 7875 - return __btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id); 7836 + return __btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id, prog); 7876 7837 } 7877 7838 7878 - u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id) 7839 + u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id, 7840 + const struct bpf_prog *prog) 7879 7841 { 7880 - return __btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_FMODRET, kfunc_btf_id); 7842 + return __btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_FMODRET, kfunc_btf_id, prog); 7881 7843 } 7882 7844 7883 7845 static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook, ··· 7910 7868 goto err_out; 7911 7869 } 7912 7870 7913 - ret = btf_populate_kfunc_set(btf, hook, kset->set); 7871 + ret = btf_populate_kfunc_set(btf, hook, kset); 7872 + 7914 7873 err_out: 7915 7874 btf_put(btf); 7916 7875 return ret;
+4 -3
kernel/bpf/verifier.c
··· 10939 10939 *kfunc_name = func_name; 10940 10940 func_proto = btf_type_by_id(desc_btf, func->type); 10941 10941 10942 - kfunc_flags = btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog), func_id); 10942 + kfunc_flags = btf_kfunc_id_set_contains(desc_btf, func_id, env->prog); 10943 10943 if (!kfunc_flags) { 10944 10944 return -EACCES; 10945 10945 } ··· 19010 19010 * in the fmodret id set with the KF_SLEEPABLE flag. 19011 19011 */ 19012 19012 else { 19013 - u32 *flags = btf_kfunc_is_modify_return(btf, btf_id); 19013 + u32 *flags = btf_kfunc_is_modify_return(btf, btf_id, 19014 + prog); 19014 19015 19015 19016 if (flags && (*flags & KF_SLEEPABLE)) 19016 19017 ret = 0; ··· 19039 19038 return -EINVAL; 19040 19039 } 19041 19040 ret = -EINVAL; 19042 - if (btf_kfunc_is_modify_return(btf, btf_id) || 19041 + if (btf_kfunc_is_modify_return(btf, btf_id, prog) || 19043 19042 !check_attach_modify_return(addr, tname)) 19044 19043 ret = 0; 19045 19044 if (ret) {
+63
net/core/filter.c
··· 11723 11723 return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &bpf_kfunc_set_xdp); 11724 11724 } 11725 11725 late_initcall(bpf_kfunc_init); 11726 + 11727 + /* Disables missing prototype warnings */ 11728 + __diag_push(); 11729 + __diag_ignore_all("-Wmissing-prototypes", 11730 + "Global functions as their definitions will be in vmlinux BTF"); 11731 + 11732 + /* bpf_sock_destroy: Destroy the given socket with ECONNABORTED error code. 11733 + * 11734 + * The function expects a non-NULL pointer to a socket, and invokes the 11735 + * protocol specific socket destroy handlers. 11736 + * 11737 + * The helper can only be called from BPF contexts that have acquired the socket 11738 + * locks. 11739 + * 11740 + * Parameters: 11741 + * @sock: Pointer to socket to be destroyed 11742 + * 11743 + * Return: 11744 + * On error, may return EPROTONOSUPPORT, EINVAL. 11745 + * EPROTONOSUPPORT if protocol specific destroy handler is not supported. 11746 + * 0 otherwise 11747 + */ 11748 + __bpf_kfunc int bpf_sock_destroy(struct sock_common *sock) 11749 + { 11750 + struct sock *sk = (struct sock *)sock; 11751 + 11752 + /* The locking semantics that allow for synchronous execution of the 11753 + * destroy handlers are only supported for TCP and UDP. 11754 + * Supporting protocols will need to acquire sock lock in the BPF context 11755 + * prior to invoking this kfunc. 11756 + */ 11757 + if (!sk->sk_prot->diag_destroy || (sk->sk_protocol != IPPROTO_TCP && 11758 + sk->sk_protocol != IPPROTO_UDP)) 11759 + return -EOPNOTSUPP; 11760 + 11761 + return sk->sk_prot->diag_destroy(sk, ECONNABORTED); 11762 + } 11763 + 11764 + __diag_pop() 11765 + 11766 + BTF_SET8_START(bpf_sk_iter_kfunc_ids) 11767 + BTF_ID_FLAGS(func, bpf_sock_destroy, KF_TRUSTED_ARGS) 11768 + BTF_SET8_END(bpf_sk_iter_kfunc_ids) 11769 + 11770 + static int tracing_iter_filter(const struct bpf_prog *prog, u32 kfunc_id) 11771 + { 11772 + if (btf_id_set8_contains(&bpf_sk_iter_kfunc_ids, kfunc_id) && 11773 + prog->expected_attach_type != BPF_TRACE_ITER) 11774 + return -EACCES; 11775 + return 0; 11776 + } 11777 + 11778 + static const struct btf_kfunc_id_set bpf_sk_iter_kfunc_set = { 11779 + .owner = THIS_MODULE, 11780 + .set = &bpf_sk_iter_kfunc_ids, 11781 + .filter = tracing_iter_filter, 11782 + }; 11783 + 11784 + static int init_subsystem(void) 11785 + { 11786 + return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_sk_iter_kfunc_set); 11787 + } 11788 + late_initcall(init_subsystem);
+6 -3
net/ipv4/tcp.c
··· 4682 4682 return 0; 4683 4683 } 4684 4684 4685 - /* Don't race with userspace socket closes such as tcp_close. */ 4686 - lock_sock(sk); 4685 + /* BPF context ensures sock locking. */ 4686 + if (!has_current_bpf_ctx()) 4687 + /* Don't race with userspace socket closes such as tcp_close. */ 4688 + lock_sock(sk); 4687 4689 4688 4690 if (sk->sk_state == TCP_LISTEN) { 4689 4691 tcp_set_state(sk, TCP_CLOSE); ··· 4709 4707 bh_unlock_sock(sk); 4710 4708 local_bh_enable(); 4711 4709 tcp_write_queue_purge(sk); 4712 - release_sock(sk); 4710 + if (!has_current_bpf_ctx()) 4711 + release_sock(sk); 4713 4712 return 0; 4714 4713 } 4715 4714 EXPORT_SYMBOL_GPL(tcp_abort);
+3 -4
net/ipv4/tcp_ipv4.c
··· 2962 2962 struct bpf_iter_meta meta; 2963 2963 struct bpf_prog *prog; 2964 2964 struct sock *sk = v; 2965 - bool slow; 2966 2965 uid_t uid; 2967 2966 int ret; 2968 2967 ··· 2969 2970 return 0; 2970 2971 2971 2972 if (sk_fullsock(sk)) 2972 - slow = lock_sock_fast(sk); 2973 + lock_sock(sk); 2973 2974 2974 2975 if (unlikely(sk_unhashed(sk))) { 2975 2976 ret = SEQ_SKIP; ··· 2993 2994 2994 2995 unlock: 2995 2996 if (sk_fullsock(sk)) 2996 - unlock_sock_fast(sk, slow); 2997 + release_sock(sk); 2997 2998 return ret; 2998 2999 2999 3000 } ··· 3355 3356 .ctx_arg_info_size = 1, 3356 3357 .ctx_arg_info = { 3357 3358 { offsetof(struct bpf_iter__tcp, sk_common), 3358 - PTR_TO_BTF_ID_OR_NULL }, 3359 + PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED }, 3359 3360 }, 3360 3361 .get_func_proto = bpf_iter_tcp_get_func_proto, 3361 3362 .seq_info = &tcp_seq_info,
+241 -50
net/ipv4/udp.c
··· 2930 2930 2931 2931 int udp_abort(struct sock *sk, int err) 2932 2932 { 2933 - lock_sock(sk); 2933 + if (!has_current_bpf_ctx()) 2934 + lock_sock(sk); 2934 2935 2935 2936 /* udp{v6}_destroy_sock() sets it under the sk lock, avoid racing 2936 2937 * with close() ··· 2944 2943 __udp_disconnect(sk, 0); 2945 2944 2946 2945 out: 2947 - release_sock(sk); 2946 + if (!has_current_bpf_ctx()) 2947 + release_sock(sk); 2948 2948 2949 2949 return 0; 2950 2950 } ··· 2990 2988 /* ------------------------------------------------------------------------ */ 2991 2989 #ifdef CONFIG_PROC_FS 2992 2990 2993 - static struct udp_table *udp_get_table_afinfo(struct udp_seq_afinfo *afinfo, 2994 - struct net *net) 2991 + static unsigned short seq_file_family(const struct seq_file *seq); 2992 + static bool seq_sk_match(struct seq_file *seq, const struct sock *sk) 2995 2993 { 2994 + unsigned short family = seq_file_family(seq); 2995 + 2996 + /* AF_UNSPEC is used as a match all */ 2997 + return ((family == AF_UNSPEC || family == sk->sk_family) && 2998 + net_eq(sock_net(sk), seq_file_net(seq))); 2999 + } 3000 + 3001 + #ifdef CONFIG_BPF_SYSCALL 3002 + static const struct seq_operations bpf_iter_udp_seq_ops; 3003 + #endif 3004 + static struct udp_table *udp_get_table_seq(struct seq_file *seq, 3005 + struct net *net) 3006 + { 3007 + const struct udp_seq_afinfo *afinfo; 3008 + 3009 + #ifdef CONFIG_BPF_SYSCALL 3010 + if (seq->op == &bpf_iter_udp_seq_ops) 3011 + return net->ipv4.udp_table; 3012 + #endif 3013 + 3014 + afinfo = pde_data(file_inode(seq->file)); 2996 3015 return afinfo->udp_table ? : net->ipv4.udp_table; 2997 3016 } 2998 3017 ··· 3021 2998 { 3022 2999 struct udp_iter_state *state = seq->private; 3023 3000 struct net *net = seq_file_net(seq); 3024 - struct udp_seq_afinfo *afinfo; 3025 3001 struct udp_table *udptable; 3026 3002 struct sock *sk; 3027 3003 3028 - if (state->bpf_seq_afinfo) 3029 - afinfo = state->bpf_seq_afinfo; 3030 - else 3031 - afinfo = pde_data(file_inode(seq->file)); 3032 - 3033 - udptable = udp_get_table_afinfo(afinfo, net); 3004 + udptable = udp_get_table_seq(seq, net); 3034 3005 3035 3006 for (state->bucket = start; state->bucket <= udptable->mask; 3036 3007 ++state->bucket) { ··· 3035 3018 3036 3019 spin_lock_bh(&hslot->lock); 3037 3020 sk_for_each(sk, &hslot->head) { 3038 - if (!net_eq(sock_net(sk), net)) 3039 - continue; 3040 - if (afinfo->family == AF_UNSPEC || 3041 - sk->sk_family == afinfo->family) 3021 + if (seq_sk_match(seq, sk)) 3042 3022 goto found; 3043 3023 } 3044 3024 spin_unlock_bh(&hslot->lock); ··· 3049 3035 { 3050 3036 struct udp_iter_state *state = seq->private; 3051 3037 struct net *net = seq_file_net(seq); 3052 - struct udp_seq_afinfo *afinfo; 3053 3038 struct udp_table *udptable; 3054 - 3055 - if (state->bpf_seq_afinfo) 3056 - afinfo = state->bpf_seq_afinfo; 3057 - else 3058 - afinfo = pde_data(file_inode(seq->file)); 3059 3039 3060 3040 do { 3061 3041 sk = sk_next(sk); 3062 - } while (sk && (!net_eq(sock_net(sk), net) || 3063 - (afinfo->family != AF_UNSPEC && 3064 - sk->sk_family != afinfo->family))); 3042 + } while (sk && !seq_sk_match(seq, sk)); 3065 3043 3066 3044 if (!sk) { 3067 - udptable = udp_get_table_afinfo(afinfo, net); 3045 + udptable = udp_get_table_seq(seq, net); 3068 3046 3069 3047 if (state->bucket <= udptable->mask) 3070 3048 spin_unlock_bh(&udptable->hash[state->bucket].lock); ··· 3102 3096 void udp_seq_stop(struct seq_file *seq, void *v) 3103 3097 { 3104 3098 struct udp_iter_state *state = seq->private; 3105 - struct udp_seq_afinfo *afinfo; 3106 3099 struct udp_table *udptable; 3107 3100 3108 - if (state->bpf_seq_afinfo) 3109 - afinfo = state->bpf_seq_afinfo; 3110 - else 3111 - afinfo = pde_data(file_inode(seq->file)); 3112 - 3113 - udptable = udp_get_table_afinfo(afinfo, seq_file_net(seq)); 3101 + udptable = udp_get_table_seq(seq, seq_file_net(seq)); 3114 3102 3115 3103 if (state->bucket <= udptable->mask) 3116 3104 spin_unlock_bh(&udptable->hash[state->bucket].lock); ··· 3157 3157 int bucket __aligned(8); 3158 3158 }; 3159 3159 3160 + struct bpf_udp_iter_state { 3161 + struct udp_iter_state state; 3162 + unsigned int cur_sk; 3163 + unsigned int end_sk; 3164 + unsigned int max_sk; 3165 + int offset; 3166 + struct sock **batch; 3167 + bool st_bucket_done; 3168 + }; 3169 + 3170 + static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter, 3171 + unsigned int new_batch_sz); 3172 + static struct sock *bpf_iter_udp_batch(struct seq_file *seq) 3173 + { 3174 + struct bpf_udp_iter_state *iter = seq->private; 3175 + struct udp_iter_state *state = &iter->state; 3176 + struct net *net = seq_file_net(seq); 3177 + struct udp_table *udptable; 3178 + unsigned int batch_sks = 0; 3179 + bool resized = false; 3180 + struct sock *sk; 3181 + 3182 + /* The current batch is done, so advance the bucket. */ 3183 + if (iter->st_bucket_done) { 3184 + state->bucket++; 3185 + iter->offset = 0; 3186 + } 3187 + 3188 + udptable = udp_get_table_seq(seq, net); 3189 + 3190 + again: 3191 + /* New batch for the next bucket. 3192 + * Iterate over the hash table to find a bucket with sockets matching 3193 + * the iterator attributes, and return the first matching socket from 3194 + * the bucket. The remaining matched sockets from the bucket are batched 3195 + * before releasing the bucket lock. This allows BPF programs that are 3196 + * called in seq_show to acquire the bucket lock if needed. 3197 + */ 3198 + iter->cur_sk = 0; 3199 + iter->end_sk = 0; 3200 + iter->st_bucket_done = false; 3201 + batch_sks = 0; 3202 + 3203 + for (; state->bucket <= udptable->mask; state->bucket++) { 3204 + struct udp_hslot *hslot2 = &udptable->hash2[state->bucket]; 3205 + 3206 + if (hlist_empty(&hslot2->head)) { 3207 + iter->offset = 0; 3208 + continue; 3209 + } 3210 + 3211 + spin_lock_bh(&hslot2->lock); 3212 + udp_portaddr_for_each_entry(sk, &hslot2->head) { 3213 + if (seq_sk_match(seq, sk)) { 3214 + /* Resume from the last iterated socket at the 3215 + * offset in the bucket before iterator was stopped. 3216 + */ 3217 + if (iter->offset) { 3218 + --iter->offset; 3219 + continue; 3220 + } 3221 + if (iter->end_sk < iter->max_sk) { 3222 + sock_hold(sk); 3223 + iter->batch[iter->end_sk++] = sk; 3224 + } 3225 + batch_sks++; 3226 + } 3227 + } 3228 + spin_unlock_bh(&hslot2->lock); 3229 + 3230 + if (iter->end_sk) 3231 + break; 3232 + 3233 + /* Reset the current bucket's offset before moving to the next bucket. */ 3234 + iter->offset = 0; 3235 + } 3236 + 3237 + /* All done: no batch made. */ 3238 + if (!iter->end_sk) 3239 + return NULL; 3240 + 3241 + if (iter->end_sk == batch_sks) { 3242 + /* Batching is done for the current bucket; return the first 3243 + * socket to be iterated from the batch. 3244 + */ 3245 + iter->st_bucket_done = true; 3246 + goto done; 3247 + } 3248 + if (!resized && !bpf_iter_udp_realloc_batch(iter, batch_sks * 3 / 2)) { 3249 + resized = true; 3250 + /* After allocating a larger batch, retry one more time to grab 3251 + * the whole bucket. 3252 + */ 3253 + state->bucket--; 3254 + goto again; 3255 + } 3256 + done: 3257 + return iter->batch[0]; 3258 + } 3259 + 3260 + static void *bpf_iter_udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3261 + { 3262 + struct bpf_udp_iter_state *iter = seq->private; 3263 + struct sock *sk; 3264 + 3265 + /* Whenever seq_next() is called, the iter->cur_sk is 3266 + * done with seq_show(), so unref the iter->cur_sk. 3267 + */ 3268 + if (iter->cur_sk < iter->end_sk) { 3269 + sock_put(iter->batch[iter->cur_sk++]); 3270 + ++iter->offset; 3271 + } 3272 + 3273 + /* After updating iter->cur_sk, check if there are more sockets 3274 + * available in the current bucket batch. 3275 + */ 3276 + if (iter->cur_sk < iter->end_sk) 3277 + sk = iter->batch[iter->cur_sk]; 3278 + else 3279 + /* Prepare a new batch. */ 3280 + sk = bpf_iter_udp_batch(seq); 3281 + 3282 + ++*pos; 3283 + return sk; 3284 + } 3285 + 3286 + static void *bpf_iter_udp_seq_start(struct seq_file *seq, loff_t *pos) 3287 + { 3288 + /* bpf iter does not support lseek, so it always 3289 + * continue from where it was stop()-ped. 3290 + */ 3291 + if (*pos) 3292 + return bpf_iter_udp_batch(seq); 3293 + 3294 + return SEQ_START_TOKEN; 3295 + } 3296 + 3160 3297 static int udp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, 3161 3298 struct udp_sock *udp_sk, uid_t uid, int bucket) 3162 3299 { ··· 3314 3177 struct bpf_prog *prog; 3315 3178 struct sock *sk = v; 3316 3179 uid_t uid; 3180 + int ret; 3317 3181 3318 3182 if (v == SEQ_START_TOKEN) 3319 3183 return 0; 3320 3184 3185 + lock_sock(sk); 3186 + 3187 + if (unlikely(sk_unhashed(sk))) { 3188 + ret = SEQ_SKIP; 3189 + goto unlock; 3190 + } 3191 + 3321 3192 uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); 3322 3193 meta.seq = seq; 3323 3194 prog = bpf_iter_get_info(&meta, false); 3324 - return udp_prog_seq_show(prog, &meta, v, uid, state->bucket); 3195 + ret = udp_prog_seq_show(prog, &meta, v, uid, state->bucket); 3196 + 3197 + unlock: 3198 + release_sock(sk); 3199 + return ret; 3200 + } 3201 + 3202 + static void bpf_iter_udp_put_batch(struct bpf_udp_iter_state *iter) 3203 + { 3204 + while (iter->cur_sk < iter->end_sk) 3205 + sock_put(iter->batch[iter->cur_sk++]); 3325 3206 } 3326 3207 3327 3208 static void bpf_iter_udp_seq_stop(struct seq_file *seq, void *v) 3328 3209 { 3210 + struct bpf_udp_iter_state *iter = seq->private; 3329 3211 struct bpf_iter_meta meta; 3330 3212 struct bpf_prog *prog; 3331 3213 ··· 3355 3199 (void)udp_prog_seq_show(prog, &meta, v, 0, 0); 3356 3200 } 3357 3201 3358 - udp_seq_stop(seq, v); 3202 + if (iter->cur_sk < iter->end_sk) { 3203 + bpf_iter_udp_put_batch(iter); 3204 + iter->st_bucket_done = false; 3205 + } 3359 3206 } 3360 3207 3361 3208 static const struct seq_operations bpf_iter_udp_seq_ops = { 3362 - .start = udp_seq_start, 3363 - .next = udp_seq_next, 3209 + .start = bpf_iter_udp_seq_start, 3210 + .next = bpf_iter_udp_seq_next, 3364 3211 .stop = bpf_iter_udp_seq_stop, 3365 3212 .show = bpf_iter_udp_seq_show, 3366 3213 }; 3367 3214 #endif 3215 + 3216 + static unsigned short seq_file_family(const struct seq_file *seq) 3217 + { 3218 + const struct udp_seq_afinfo *afinfo; 3219 + 3220 + #ifdef CONFIG_BPF_SYSCALL 3221 + /* BPF iterator: bpf programs to filter sockets. */ 3222 + if (seq->op == &bpf_iter_udp_seq_ops) 3223 + return AF_UNSPEC; 3224 + #endif 3225 + 3226 + /* Proc fs iterator */ 3227 + afinfo = pde_data(file_inode(seq->file)); 3228 + return afinfo->family; 3229 + } 3368 3230 3369 3231 const struct seq_operations udp_seq_ops = { 3370 3232 .start = udp_seq_start, ··· 3592 3418 DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta, 3593 3419 struct udp_sock *udp_sk, uid_t uid, int bucket) 3594 3420 3595 - static int bpf_iter_init_udp(void *priv_data, struct bpf_iter_aux_info *aux) 3421 + static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter, 3422 + unsigned int new_batch_sz) 3596 3423 { 3597 - struct udp_iter_state *st = priv_data; 3598 - struct udp_seq_afinfo *afinfo; 3599 - int ret; 3424 + struct sock **new_batch; 3600 3425 3601 - afinfo = kmalloc(sizeof(*afinfo), GFP_USER | __GFP_NOWARN); 3602 - if (!afinfo) 3426 + new_batch = kvmalloc_array(new_batch_sz, sizeof(*new_batch), 3427 + GFP_USER | __GFP_NOWARN); 3428 + if (!new_batch) 3603 3429 return -ENOMEM; 3604 3430 3605 - afinfo->family = AF_UNSPEC; 3606 - afinfo->udp_table = NULL; 3607 - st->bpf_seq_afinfo = afinfo; 3431 + bpf_iter_udp_put_batch(iter); 3432 + kvfree(iter->batch); 3433 + iter->batch = new_batch; 3434 + iter->max_sk = new_batch_sz; 3435 + 3436 + return 0; 3437 + } 3438 + 3439 + #define INIT_BATCH_SZ 16 3440 + 3441 + static int bpf_iter_init_udp(void *priv_data, struct bpf_iter_aux_info *aux) 3442 + { 3443 + struct bpf_udp_iter_state *iter = priv_data; 3444 + int ret; 3445 + 3608 3446 ret = bpf_iter_init_seq_net(priv_data, aux); 3609 3447 if (ret) 3610 - kfree(afinfo); 3448 + return ret; 3449 + 3450 + ret = bpf_iter_udp_realloc_batch(iter, INIT_BATCH_SZ); 3451 + if (ret) 3452 + bpf_iter_fini_seq_net(priv_data); 3453 + 3611 3454 return ret; 3612 3455 } 3613 3456 3614 3457 static void bpf_iter_fini_udp(void *priv_data) 3615 3458 { 3616 - struct udp_iter_state *st = priv_data; 3459 + struct bpf_udp_iter_state *iter = priv_data; 3617 3460 3618 - kfree(st->bpf_seq_afinfo); 3619 3461 bpf_iter_fini_seq_net(priv_data); 3462 + kvfree(iter->batch); 3620 3463 } 3621 3464 3622 3465 static const struct bpf_iter_seq_info udp_seq_info = { 3623 3466 .seq_ops = &bpf_iter_udp_seq_ops, 3624 3467 .init_seq_private = bpf_iter_init_udp, 3625 3468 .fini_seq_private = bpf_iter_fini_udp, 3626 - .seq_priv_size = sizeof(struct udp_iter_state), 3469 + .seq_priv_size = sizeof(struct bpf_udp_iter_state), 3627 3470 }; 3628 3471 3629 3472 static struct bpf_iter_reg udp_reg_info = { ··· 3648 3457 .ctx_arg_info_size = 1, 3649 3458 .ctx_arg_info = { 3650 3459 { offsetof(struct bpf_iter__udp, udp_sk), 3651 - PTR_TO_BTF_ID_OR_NULL }, 3460 + PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED }, 3652 3461 }, 3653 3462 .seq_info = &udp_seq_info, 3654 3463 };
+23
tools/testing/selftests/bpf/network_helpers.c
··· 427 427 close(token->orig_netns_fd); 428 428 free(token); 429 429 } 430 + 431 + int get_socket_local_port(int sock_fd) 432 + { 433 + struct sockaddr_storage addr; 434 + socklen_t addrlen = sizeof(addr); 435 + int err; 436 + 437 + err = getsockname(sock_fd, (struct sockaddr *)&addr, &addrlen); 438 + if (err < 0) 439 + return err; 440 + 441 + if (addr.ss_family == AF_INET) { 442 + struct sockaddr_in *sin = (struct sockaddr_in *)&addr; 443 + 444 + return sin->sin_port; 445 + } else if (addr.ss_family == AF_INET6) { 446 + struct sockaddr_in6 *sin = (struct sockaddr_in6 *)&addr; 447 + 448 + return sin->sin6_port; 449 + } 450 + 451 + return -1; 452 + }
+1
tools/testing/selftests/bpf/network_helpers.h
··· 56 56 int make_sockaddr(int family, const char *addr_str, __u16 port, 57 57 struct sockaddr_storage *addr, socklen_t *len); 58 58 char *ping_command(int family); 59 + int get_socket_local_port(int sock_fd); 59 60 60 61 struct nstoken; 61 62 /**
+221
tools/testing/selftests/bpf/prog_tests/sock_destroy.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <test_progs.h> 3 + #include <bpf/bpf_endian.h> 4 + 5 + #include "sock_destroy_prog.skel.h" 6 + #include "sock_destroy_prog_fail.skel.h" 7 + #include "network_helpers.h" 8 + 9 + #define TEST_NS "sock_destroy_netns" 10 + 11 + static void start_iter_sockets(struct bpf_program *prog) 12 + { 13 + struct bpf_link *link; 14 + char buf[50] = {}; 15 + int iter_fd, len; 16 + 17 + link = bpf_program__attach_iter(prog, NULL); 18 + if (!ASSERT_OK_PTR(link, "attach_iter")) 19 + return; 20 + 21 + iter_fd = bpf_iter_create(bpf_link__fd(link)); 22 + if (!ASSERT_GE(iter_fd, 0, "create_iter")) 23 + goto free_link; 24 + 25 + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 26 + ; 27 + ASSERT_GE(len, 0, "read"); 28 + 29 + close(iter_fd); 30 + 31 + free_link: 32 + bpf_link__destroy(link); 33 + } 34 + 35 + static void test_tcp_client(struct sock_destroy_prog *skel) 36 + { 37 + int serv = -1, clien = -1, accept_serv = -1, n; 38 + 39 + serv = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0); 40 + if (!ASSERT_GE(serv, 0, "start_server")) 41 + goto cleanup; 42 + 43 + clien = connect_to_fd(serv, 0); 44 + if (!ASSERT_GE(clien, 0, "connect_to_fd")) 45 + goto cleanup; 46 + 47 + accept_serv = accept(serv, NULL, NULL); 48 + if (!ASSERT_GE(accept_serv, 0, "serv accept")) 49 + goto cleanup; 50 + 51 + n = send(clien, "t", 1, 0); 52 + if (!ASSERT_EQ(n, 1, "client send")) 53 + goto cleanup; 54 + 55 + /* Run iterator program that destroys connected client sockets. */ 56 + start_iter_sockets(skel->progs.iter_tcp6_client); 57 + 58 + n = send(clien, "t", 1, 0); 59 + if (!ASSERT_LT(n, 0, "client_send on destroyed socket")) 60 + goto cleanup; 61 + ASSERT_EQ(errno, ECONNABORTED, "error code on destroyed socket"); 62 + 63 + cleanup: 64 + if (clien != -1) 65 + close(clien); 66 + if (accept_serv != -1) 67 + close(accept_serv); 68 + if (serv != -1) 69 + close(serv); 70 + } 71 + 72 + static void test_tcp_server(struct sock_destroy_prog *skel) 73 + { 74 + int serv = -1, clien = -1, accept_serv = -1, n, serv_port; 75 + 76 + serv = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0); 77 + if (!ASSERT_GE(serv, 0, "start_server")) 78 + goto cleanup; 79 + serv_port = get_socket_local_port(serv); 80 + if (!ASSERT_GE(serv_port, 0, "get_sock_local_port")) 81 + goto cleanup; 82 + skel->bss->serv_port = (__be16) serv_port; 83 + 84 + clien = connect_to_fd(serv, 0); 85 + if (!ASSERT_GE(clien, 0, "connect_to_fd")) 86 + goto cleanup; 87 + 88 + accept_serv = accept(serv, NULL, NULL); 89 + if (!ASSERT_GE(accept_serv, 0, "serv accept")) 90 + goto cleanup; 91 + 92 + n = send(clien, "t", 1, 0); 93 + if (!ASSERT_EQ(n, 1, "client send")) 94 + goto cleanup; 95 + 96 + /* Run iterator program that destroys server sockets. */ 97 + start_iter_sockets(skel->progs.iter_tcp6_server); 98 + 99 + n = send(clien, "t", 1, 0); 100 + if (!ASSERT_LT(n, 0, "client_send on destroyed socket")) 101 + goto cleanup; 102 + ASSERT_EQ(errno, ECONNRESET, "error code on destroyed socket"); 103 + 104 + cleanup: 105 + if (clien != -1) 106 + close(clien); 107 + if (accept_serv != -1) 108 + close(accept_serv); 109 + if (serv != -1) 110 + close(serv); 111 + } 112 + 113 + static void test_udp_client(struct sock_destroy_prog *skel) 114 + { 115 + int serv = -1, clien = -1, n = 0; 116 + 117 + serv = start_server(AF_INET6, SOCK_DGRAM, NULL, 0, 0); 118 + if (!ASSERT_GE(serv, 0, "start_server")) 119 + goto cleanup; 120 + 121 + clien = connect_to_fd(serv, 0); 122 + if (!ASSERT_GE(clien, 0, "connect_to_fd")) 123 + goto cleanup; 124 + 125 + n = send(clien, "t", 1, 0); 126 + if (!ASSERT_EQ(n, 1, "client send")) 127 + goto cleanup; 128 + 129 + /* Run iterator program that destroys sockets. */ 130 + start_iter_sockets(skel->progs.iter_udp6_client); 131 + 132 + n = send(clien, "t", 1, 0); 133 + if (!ASSERT_LT(n, 0, "client_send on destroyed socket")) 134 + goto cleanup; 135 + /* UDP sockets have an overriding error code after they are disconnected, 136 + * so we don't check for ECONNABORTED error code. 137 + */ 138 + 139 + cleanup: 140 + if (clien != -1) 141 + close(clien); 142 + if (serv != -1) 143 + close(serv); 144 + } 145 + 146 + static void test_udp_server(struct sock_destroy_prog *skel) 147 + { 148 + int *listen_fds = NULL, n, i, serv_port; 149 + unsigned int num_listens = 5; 150 + char buf[1]; 151 + 152 + /* Start reuseport servers. */ 153 + listen_fds = start_reuseport_server(AF_INET6, SOCK_DGRAM, 154 + "::1", 0, 0, num_listens); 155 + if (!ASSERT_OK_PTR(listen_fds, "start_reuseport_server")) 156 + goto cleanup; 157 + serv_port = get_socket_local_port(listen_fds[0]); 158 + if (!ASSERT_GE(serv_port, 0, "get_sock_local_port")) 159 + goto cleanup; 160 + skel->bss->serv_port = (__be16) serv_port; 161 + 162 + /* Run iterator program that destroys server sockets. */ 163 + start_iter_sockets(skel->progs.iter_udp6_server); 164 + 165 + for (i = 0; i < num_listens; ++i) { 166 + n = read(listen_fds[i], buf, sizeof(buf)); 167 + if (!ASSERT_EQ(n, -1, "read") || 168 + !ASSERT_EQ(errno, ECONNABORTED, "error code on destroyed socket")) 169 + break; 170 + } 171 + ASSERT_EQ(i, num_listens, "server socket"); 172 + 173 + cleanup: 174 + free_fds(listen_fds, num_listens); 175 + } 176 + 177 + void test_sock_destroy(void) 178 + { 179 + struct sock_destroy_prog *skel; 180 + struct nstoken *nstoken = NULL; 181 + int cgroup_fd; 182 + 183 + skel = sock_destroy_prog__open_and_load(); 184 + if (!ASSERT_OK_PTR(skel, "skel_open")) 185 + return; 186 + 187 + cgroup_fd = test__join_cgroup("/sock_destroy"); 188 + if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup")) 189 + goto cleanup; 190 + 191 + skel->links.sock_connect = bpf_program__attach_cgroup( 192 + skel->progs.sock_connect, cgroup_fd); 193 + if (!ASSERT_OK_PTR(skel->links.sock_connect, "prog_attach")) 194 + goto cleanup; 195 + 196 + SYS(cleanup, "ip netns add %s", TEST_NS); 197 + SYS(cleanup, "ip -net %s link set dev lo up", TEST_NS); 198 + 199 + nstoken = open_netns(TEST_NS); 200 + if (!ASSERT_OK_PTR(nstoken, "open_netns")) 201 + goto cleanup; 202 + 203 + if (test__start_subtest("tcp_client")) 204 + test_tcp_client(skel); 205 + if (test__start_subtest("tcp_server")) 206 + test_tcp_server(skel); 207 + if (test__start_subtest("udp_client")) 208 + test_udp_client(skel); 209 + if (test__start_subtest("udp_server")) 210 + test_udp_server(skel); 211 + 212 + RUN_TESTS(sock_destroy_prog_fail); 213 + 214 + cleanup: 215 + if (nstoken) 216 + close_netns(nstoken); 217 + SYS_NOFAIL("ip netns del " TEST_NS " &> /dev/null"); 218 + if (cgroup_fd >= 0) 219 + close(cgroup_fd); 220 + sock_destroy_prog__destroy(skel); 221 + }
+145
tools/testing/selftests/bpf/progs/sock_destroy_prog.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include "vmlinux.h" 4 + #include <bpf/bpf_helpers.h> 5 + #include <bpf/bpf_endian.h> 6 + 7 + #include "bpf_tracing_net.h" 8 + 9 + __be16 serv_port = 0; 10 + 11 + int bpf_sock_destroy(struct sock_common *sk) __ksym; 12 + 13 + struct { 14 + __uint(type, BPF_MAP_TYPE_ARRAY); 15 + __uint(max_entries, 1); 16 + __type(key, __u32); 17 + __type(value, __u64); 18 + } tcp_conn_sockets SEC(".maps"); 19 + 20 + struct { 21 + __uint(type, BPF_MAP_TYPE_ARRAY); 22 + __uint(max_entries, 1); 23 + __type(key, __u32); 24 + __type(value, __u64); 25 + } udp_conn_sockets SEC(".maps"); 26 + 27 + SEC("cgroup/connect6") 28 + int sock_connect(struct bpf_sock_addr *ctx) 29 + { 30 + __u64 sock_cookie = 0; 31 + int key = 0; 32 + __u32 keyc = 0; 33 + 34 + if (ctx->family != AF_INET6 || ctx->user_family != AF_INET6) 35 + return 1; 36 + 37 + sock_cookie = bpf_get_socket_cookie(ctx); 38 + if (ctx->protocol == IPPROTO_TCP) 39 + bpf_map_update_elem(&tcp_conn_sockets, &key, &sock_cookie, 0); 40 + else if (ctx->protocol == IPPROTO_UDP) 41 + bpf_map_update_elem(&udp_conn_sockets, &keyc, &sock_cookie, 0); 42 + else 43 + return 1; 44 + 45 + return 1; 46 + } 47 + 48 + SEC("iter/tcp") 49 + int iter_tcp6_client(struct bpf_iter__tcp *ctx) 50 + { 51 + struct sock_common *sk_common = ctx->sk_common; 52 + __u64 sock_cookie = 0; 53 + __u64 *val; 54 + int key = 0; 55 + 56 + if (!sk_common) 57 + return 0; 58 + 59 + if (sk_common->skc_family != AF_INET6) 60 + return 0; 61 + 62 + sock_cookie = bpf_get_socket_cookie(sk_common); 63 + val = bpf_map_lookup_elem(&tcp_conn_sockets, &key); 64 + if (!val) 65 + return 0; 66 + /* Destroy connected client sockets. */ 67 + if (sock_cookie == *val) 68 + bpf_sock_destroy(sk_common); 69 + 70 + return 0; 71 + } 72 + 73 + SEC("iter/tcp") 74 + int iter_tcp6_server(struct bpf_iter__tcp *ctx) 75 + { 76 + struct sock_common *sk_common = ctx->sk_common; 77 + const struct inet_connection_sock *icsk; 78 + const struct inet_sock *inet; 79 + struct tcp6_sock *tcp_sk; 80 + __be16 srcp; 81 + 82 + if (!sk_common) 83 + return 0; 84 + 85 + if (sk_common->skc_family != AF_INET6) 86 + return 0; 87 + 88 + tcp_sk = bpf_skc_to_tcp6_sock(sk_common); 89 + if (!tcp_sk) 90 + return 0; 91 + 92 + icsk = &tcp_sk->tcp.inet_conn; 93 + inet = &icsk->icsk_inet; 94 + srcp = inet->inet_sport; 95 + 96 + /* Destroy server sockets. */ 97 + if (srcp == serv_port) 98 + bpf_sock_destroy(sk_common); 99 + 100 + return 0; 101 + } 102 + 103 + 104 + SEC("iter/udp") 105 + int iter_udp6_client(struct bpf_iter__udp *ctx) 106 + { 107 + struct udp_sock *udp_sk = ctx->udp_sk; 108 + struct sock *sk = (struct sock *) udp_sk; 109 + __u64 sock_cookie = 0, *val; 110 + int key = 0; 111 + 112 + if (!sk) 113 + return 0; 114 + 115 + sock_cookie = bpf_get_socket_cookie(sk); 116 + val = bpf_map_lookup_elem(&udp_conn_sockets, &key); 117 + if (!val) 118 + return 0; 119 + /* Destroy connected client sockets. */ 120 + if (sock_cookie == *val) 121 + bpf_sock_destroy((struct sock_common *)sk); 122 + 123 + return 0; 124 + } 125 + 126 + SEC("iter/udp") 127 + int iter_udp6_server(struct bpf_iter__udp *ctx) 128 + { 129 + struct udp_sock *udp_sk = ctx->udp_sk; 130 + struct sock *sk = (struct sock *) udp_sk; 131 + struct inet_sock *inet; 132 + __be16 srcp; 133 + 134 + if (!sk) 135 + return 0; 136 + 137 + inet = &udp_sk->inet; 138 + srcp = inet->inet_sport; 139 + if (srcp == serv_port) 140 + bpf_sock_destroy((struct sock_common *)sk); 141 + 142 + return 0; 143 + } 144 + 145 + char _license[] SEC("license") = "GPL";
+22
tools/testing/selftests/bpf/progs/sock_destroy_prog_fail.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include "vmlinux.h" 4 + #include <bpf/bpf_tracing.h> 5 + #include <bpf/bpf_helpers.h> 6 + 7 + #include "bpf_misc.h" 8 + 9 + char _license[] SEC("license") = "GPL"; 10 + 11 + int bpf_sock_destroy(struct sock_common *sk) __ksym; 12 + 13 + SEC("tp_btf/tcp_destroy_sock") 14 + __failure __msg("calling kernel function bpf_sock_destroy is not allowed") 15 + int BPF_PROG(trace_tcp_destroy_sock, struct sock *sk) 16 + { 17 + /* should not load */ 18 + bpf_sock_destroy((struct sock_common *)sk); 19 + 20 + return 0; 21 + } 22 +