Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Pull bpf fixes from Alexei Starovoitov:

- Fix interaction between livepatch and BPF fexit programs (Song Liu)
With Steven and Masami acks.

- Fix stack ORC unwind from BPF kprobe_multi (Jiri Olsa)
With Steven and Masami acks.

- Fix out of bounds access in widen_imprecise_scalars() in the verifier
(Eduard Zingerman)

- Fix conflicts between MPTCP and BPF sockmap (Jiayuan Chen)

- Fix net_sched storage collision with BPF data_meta/data_end (Eric
Dumazet)

- Add _impl suffix to BPF kfuncs with implicit args to avoid breaking
them in bpf-next when KF_IMPLICIT_ARGS is added (Mykyta Yatsenko)

* tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
selftests/bpf: Test widen_imprecise_scalars() with different stack depth
bpf: account for current allocated stack depth in widen_imprecise_scalars()
bpf: Add bpf_prog_run_data_pointers()
selftests/bpf: Add mptcp test with sockmap
mptcp: Fix proto fallback detection with BPF
mptcp: Disallow MPTCP subflows from sockmap
selftests/bpf: Add stacktrace ips test for raw_tp
selftests/bpf: Add stacktrace ips test for kprobe_multi/kretprobe_multi
x86/fgraph,bpf: Fix stack ORC unwind from kprobe_multi return probe
Revert "perf/x86: Always store regs->ip in perf_callchain_kernel()"
bpf: add _impl suffix for bpf_stream_vprintk() kfunc
bpf:add _impl suffix for bpf_task_work_schedule* kfuncs
selftests/bpf: Add tests for livepatch + bpf trampoline
ftrace: bpf: Fix IPMODIFY + DIRECT in modify_ftrace_direct()
ftrace: Fix BPF fexit with livepatch

+761 -83
+5 -5
arch/x86/events/core.c
··· 2789 2789 return; 2790 2790 } 2791 2791 2792 - if (perf_callchain_store(entry, regs->ip)) 2793 - return; 2794 - 2795 - if (perf_hw_regs(regs)) 2792 + if (perf_hw_regs(regs)) { 2793 + if (perf_callchain_store(entry, regs->ip)) 2794 + return; 2796 2795 unwind_start(&state, current, regs, NULL); 2797 - else 2796 + } else { 2798 2797 unwind_start(&state, current, NULL, (void *)regs->sp); 2798 + } 2799 2799 2800 2800 for (; !unwind_done(&state); unwind_next_frame(&state)) { 2801 2801 addr = unwind_get_return_address(&state);
+5
arch/x86/include/asm/ftrace.h
··· 56 56 return &arch_ftrace_regs(fregs)->regs; 57 57 } 58 58 59 + #define arch_ftrace_partial_regs(regs) do { \ 60 + regs->flags &= ~X86_EFLAGS_FIXED; \ 61 + regs->cs = __KERNEL_CS; \ 62 + } while (0) 63 + 59 64 #define arch_ftrace_fill_perf_regs(fregs, _regs) do { \ 60 65 (_regs)->ip = arch_ftrace_regs(fregs)->regs.ip; \ 61 66 (_regs)->sp = arch_ftrace_regs(fregs)->regs.sp; \
+7 -1
arch/x86/kernel/ftrace_64.S
··· 354 354 UNWIND_HINT_UNDEFINED 355 355 ANNOTATE_NOENDBR 356 356 357 + /* Restore return_to_handler value that got eaten by previous ret instruction. */ 358 + subq $8, %rsp 359 + UNWIND_HINT_FUNC 360 + 357 361 /* Save ftrace_regs for function exit context */ 358 362 subq $(FRAME_SIZE), %rsp 359 363 360 364 movq %rax, RAX(%rsp) 361 365 movq %rdx, RDX(%rsp) 362 366 movq %rbp, RBP(%rsp) 367 + movq %rsp, RSP(%rsp) 363 368 movq %rsp, %rdi 364 369 365 370 call ftrace_return_to_handler ··· 373 368 movq RDX(%rsp), %rdx 374 369 movq RAX(%rsp), %rax 375 370 376 - addq $(FRAME_SIZE), %rsp 371 + addq $(FRAME_SIZE) + 8, %rsp 372 + 377 373 /* 378 374 * Jump back to the old return address. This cannot be JMP_NOSPEC rdi 379 375 * since IBT would demand that contain ENDBR, which simply isn't so for
+20
include/linux/filter.h
··· 901 901 cb->data_end = skb->data + skb_headlen(skb); 902 902 } 903 903 904 + static inline int bpf_prog_run_data_pointers( 905 + const struct bpf_prog *prog, 906 + struct sk_buff *skb) 907 + { 908 + struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; 909 + void *save_data_meta, *save_data_end; 910 + int res; 911 + 912 + save_data_meta = cb->data_meta; 913 + save_data_end = cb->data_end; 914 + 915 + bpf_compute_data_pointers(skb); 916 + res = bpf_prog_run(prog, skb); 917 + 918 + cb->data_meta = save_data_meta; 919 + cb->data_end = save_data_end; 920 + 921 + return res; 922 + } 923 + 904 924 /* Similar to bpf_compute_data_pointers(), except that save orginal 905 925 * data in cb->data and cb->meta_data for restore. 906 926 */
+9 -1
include/linux/ftrace.h
··· 193 193 #if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) || \ 194 194 defined(CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS) 195 195 196 + #ifndef arch_ftrace_partial_regs 197 + #define arch_ftrace_partial_regs(regs) do {} while (0) 198 + #endif 199 + 196 200 static __always_inline struct pt_regs * 197 201 ftrace_partial_regs(struct ftrace_regs *fregs, struct pt_regs *regs) 198 202 { ··· 206 202 * Since arch_ftrace_get_regs() will check some members and may return 207 203 * NULL, we can not use it. 208 204 */ 209 - return &arch_ftrace_regs(fregs)->regs; 205 + regs = &arch_ftrace_regs(fregs)->regs; 206 + 207 + /* Allow arch specific updates to regs. */ 208 + arch_ftrace_partial_regs(regs); 209 + return regs; 210 210 } 211 211 212 212 #endif /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS || CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS */
+15 -11
kernel/bpf/helpers.c
··· 4169 4169 } 4170 4170 4171 4171 /** 4172 - * bpf_task_work_schedule_signal - Schedule BPF callback using task_work_add with TWA_SIGNAL mode 4172 + * bpf_task_work_schedule_signal_impl - Schedule BPF callback using task_work_add with TWA_SIGNAL 4173 + * mode 4173 4174 * @task: Task struct for which callback should be scheduled 4174 4175 * @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping 4175 4176 * @map__map: bpf_map that embeds struct bpf_task_work in the values ··· 4179 4178 * 4180 4179 * Return: 0 if task work has been scheduled successfully, negative error code otherwise 4181 4180 */ 4182 - __bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct bpf_task_work *tw, 4183 - void *map__map, bpf_task_work_callback_t callback, 4184 - void *aux__prog) 4181 + __bpf_kfunc int bpf_task_work_schedule_signal_impl(struct task_struct *task, 4182 + struct bpf_task_work *tw, void *map__map, 4183 + bpf_task_work_callback_t callback, 4184 + void *aux__prog) 4185 4185 { 4186 4186 return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_SIGNAL); 4187 4187 } 4188 4188 4189 4189 /** 4190 - * bpf_task_work_schedule_resume - Schedule BPF callback using task_work_add with TWA_RESUME mode 4190 + * bpf_task_work_schedule_resume_impl - Schedule BPF callback using task_work_add with TWA_RESUME 4191 + * mode 4191 4192 * @task: Task struct for which callback should be scheduled 4192 4193 * @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping 4193 4194 * @map__map: bpf_map that embeds struct bpf_task_work in the values ··· 4198 4195 * 4199 4196 * Return: 0 if task work has been scheduled successfully, negative error code otherwise 4200 4197 */ 4201 - __bpf_kfunc int bpf_task_work_schedule_resume(struct task_struct *task, struct bpf_task_work *tw, 4202 - void *map__map, bpf_task_work_callback_t callback, 4203 - void *aux__prog) 4198 + __bpf_kfunc int bpf_task_work_schedule_resume_impl(struct task_struct *task, 4199 + struct bpf_task_work *tw, void *map__map, 4200 + bpf_task_work_callback_t callback, 4201 + void *aux__prog) 4204 4202 { 4205 4203 return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_RESUME); 4206 4204 } ··· 4380 4376 #if defined(CONFIG_BPF_LSM) && defined(CONFIG_CGROUPS) 4381 4377 BTF_ID_FLAGS(func, bpf_cgroup_read_xattr, KF_RCU) 4382 4378 #endif 4383 - BTF_ID_FLAGS(func, bpf_stream_vprintk, KF_TRUSTED_ARGS) 4384 - BTF_ID_FLAGS(func, bpf_task_work_schedule_signal, KF_TRUSTED_ARGS) 4385 - BTF_ID_FLAGS(func, bpf_task_work_schedule_resume, KF_TRUSTED_ARGS) 4379 + BTF_ID_FLAGS(func, bpf_stream_vprintk_impl, KF_TRUSTED_ARGS) 4380 + BTF_ID_FLAGS(func, bpf_task_work_schedule_signal_impl, KF_TRUSTED_ARGS) 4381 + BTF_ID_FLAGS(func, bpf_task_work_schedule_resume_impl, KF_TRUSTED_ARGS) 4386 4382 BTF_KFUNCS_END(common_btf_ids) 4387 4383 4388 4384 static const struct btf_kfunc_id_set common_kfunc_set = {
+2 -1
kernel/bpf/stream.c
··· 355 355 * Avoid using enum bpf_stream_id so that kfunc users don't have to pull in the 356 356 * enum in headers. 357 357 */ 358 - __bpf_kfunc int bpf_stream_vprintk(int stream_id, const char *fmt__str, const void *args, u32 len__sz, void *aux__prog) 358 + __bpf_kfunc int bpf_stream_vprintk_impl(int stream_id, const char *fmt__str, const void *args, 359 + u32 len__sz, void *aux__prog) 359 360 { 360 361 struct bpf_bprintf_data data = { 361 362 .get_bin_args = true,
-5
kernel/bpf/trampoline.c
··· 479 479 * BPF_TRAMP_F_SHARE_IPMODIFY is set, we can generate the 480 480 * trampoline again, and retry register. 481 481 */ 482 - /* reset fops->func and fops->trampoline for re-register */ 483 - tr->fops->func = NULL; 484 - tr->fops->trampoline = 0; 485 - 486 - /* free im memory and reallocate later */ 487 482 bpf_tramp_image_free(im); 488 483 goto again; 489 484 }
+10 -8
kernel/bpf/verifier.c
··· 8866 8866 struct bpf_verifier_state *cur) 8867 8867 { 8868 8868 struct bpf_func_state *fold, *fcur; 8869 - int i, fr; 8869 + int i, fr, num_slots; 8870 8870 8871 8871 reset_idmap_scratch(env); 8872 8872 for (fr = old->curframe; fr >= 0; fr--) { ··· 8879 8879 &fcur->regs[i], 8880 8880 &env->idmap_scratch); 8881 8881 8882 - for (i = 0; i < fold->allocated_stack / BPF_REG_SIZE; i++) { 8882 + num_slots = min(fold->allocated_stack / BPF_REG_SIZE, 8883 + fcur->allocated_stack / BPF_REG_SIZE); 8884 + for (i = 0; i < num_slots; i++) { 8883 8885 if (!is_spilled_reg(&fold->stack[i]) || 8884 8886 !is_spilled_reg(&fcur->stack[i])) 8885 8887 continue; ··· 12261 12259 KF_bpf_res_spin_lock_irqsave, 12262 12260 KF_bpf_res_spin_unlock_irqrestore, 12263 12261 KF___bpf_trap, 12264 - KF_bpf_task_work_schedule_signal, 12265 - KF_bpf_task_work_schedule_resume, 12262 + KF_bpf_task_work_schedule_signal_impl, 12263 + KF_bpf_task_work_schedule_resume_impl, 12266 12264 }; 12267 12265 12268 12266 BTF_ID_LIST(special_kfunc_list) ··· 12333 12331 BTF_ID(func, bpf_res_spin_lock_irqsave) 12334 12332 BTF_ID(func, bpf_res_spin_unlock_irqrestore) 12335 12333 BTF_ID(func, __bpf_trap) 12336 - BTF_ID(func, bpf_task_work_schedule_signal) 12337 - BTF_ID(func, bpf_task_work_schedule_resume) 12334 + BTF_ID(func, bpf_task_work_schedule_signal_impl) 12335 + BTF_ID(func, bpf_task_work_schedule_resume_impl) 12338 12336 12339 12337 static bool is_task_work_add_kfunc(u32 func_id) 12340 12338 { 12341 - return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal] || 12342 - func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume]; 12339 + return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal_impl] || 12340 + func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume_impl]; 12343 12341 } 12344 12342 12345 12343 static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
+45 -15
kernel/trace/ftrace.c
··· 1971 1971 */ 1972 1972 static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops, 1973 1973 struct ftrace_hash *old_hash, 1974 - struct ftrace_hash *new_hash) 1974 + struct ftrace_hash *new_hash, 1975 + bool update_target) 1975 1976 { 1976 1977 struct ftrace_page *pg; 1977 1978 struct dyn_ftrace *rec, *end = NULL; ··· 2007 2006 if (rec->flags & FTRACE_FL_DISABLED) 2008 2007 continue; 2009 2008 2010 - /* We need to update only differences of filter_hash */ 2009 + /* 2010 + * Unless we are updating the target of a direct function, 2011 + * we only need to update differences of filter_hash 2012 + */ 2011 2013 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); 2012 2014 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); 2013 - if (in_old == in_new) 2015 + if (!update_target && (in_old == in_new)) 2014 2016 continue; 2015 2017 2016 2018 if (in_new) { ··· 2024 2020 if (is_ipmodify) 2025 2021 goto rollback; 2026 2022 2027 - FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT); 2023 + /* 2024 + * If this is called by __modify_ftrace_direct() 2025 + * then it is only changing where the direct 2026 + * pointer is jumping to, and the record already 2027 + * points to a direct trampoline. If it isn't, 2028 + * then it is a bug to update ipmodify on a direct 2029 + * caller. 2030 + */ 2031 + FTRACE_WARN_ON(!update_target && 2032 + (rec->flags & FTRACE_FL_DIRECT)); 2028 2033 2029 2034 /* 2030 2035 * Another ops with IPMODIFY is already ··· 2089 2076 if (ftrace_hash_empty(hash)) 2090 2077 hash = NULL; 2091 2078 2092 - return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash); 2079 + return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash, false); 2093 2080 } 2094 2081 2095 2082 /* Disabling always succeeds */ ··· 2100 2087 if (ftrace_hash_empty(hash)) 2101 2088 hash = NULL; 2102 2089 2103 - __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH); 2090 + __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH, false); 2104 2091 } 2105 2092 2106 2093 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, ··· 2114 2101 if (ftrace_hash_empty(new_hash)) 2115 2102 new_hash = NULL; 2116 2103 2117 - return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash); 2104 + return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash, false); 2118 2105 } 2119 2106 2120 2107 static void print_ip_ins(const char *fmt, const unsigned char *p) ··· 5966 5953 free_ftrace_hash(fhp); 5967 5954 } 5968 5955 5956 + static void reset_direct(struct ftrace_ops *ops, unsigned long addr) 5957 + { 5958 + struct ftrace_hash *hash = ops->func_hash->filter_hash; 5959 + 5960 + remove_direct_functions_hash(hash, addr); 5961 + 5962 + /* cleanup for possible another register call */ 5963 + ops->func = NULL; 5964 + ops->trampoline = 0; 5965 + } 5966 + 5969 5967 /** 5970 5968 * register_ftrace_direct - Call a custom trampoline directly 5971 5969 * for multiple functions registered in @ops ··· 6072 6048 ops->direct_call = addr; 6073 6049 6074 6050 err = register_ftrace_function_nolock(ops); 6051 + if (err) 6052 + reset_direct(ops, addr); 6075 6053 6076 6054 out_unlock: 6077 6055 mutex_unlock(&direct_mutex); ··· 6106 6080 int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr, 6107 6081 bool free_filters) 6108 6082 { 6109 - struct ftrace_hash *hash = ops->func_hash->filter_hash; 6110 6083 int err; 6111 6084 6112 6085 if (check_direct_multi(ops)) ··· 6115 6090 6116 6091 mutex_lock(&direct_mutex); 6117 6092 err = unregister_ftrace_function(ops); 6118 - remove_direct_functions_hash(hash, addr); 6093 + reset_direct(ops, addr); 6119 6094 mutex_unlock(&direct_mutex); 6120 - 6121 - /* cleanup for possible another register call */ 6122 - ops->func = NULL; 6123 - ops->trampoline = 0; 6124 6095 6125 6096 if (free_filters) 6126 6097 ftrace_free_filter(ops); ··· 6127 6106 static int 6128 6107 __modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) 6129 6108 { 6130 - struct ftrace_hash *hash; 6109 + struct ftrace_hash *hash = ops->func_hash->filter_hash; 6131 6110 struct ftrace_func_entry *entry, *iter; 6132 6111 static struct ftrace_ops tmp_ops = { 6133 6112 .func = ftrace_stub, ··· 6148 6127 return err; 6149 6128 6150 6129 /* 6130 + * Call __ftrace_hash_update_ipmodify() here, so that we can call 6131 + * ops->ops_func for the ops. This is needed because the above 6132 + * register_ftrace_function_nolock() worked on tmp_ops. 6133 + */ 6134 + err = __ftrace_hash_update_ipmodify(ops, hash, hash, true); 6135 + if (err) 6136 + goto out; 6137 + 6138 + /* 6151 6139 * Now the ftrace_ops_list_func() is called to do the direct callers. 6152 6140 * We can safely change the direct functions attached to each entry. 6153 6141 */ 6154 6142 mutex_lock(&ftrace_lock); 6155 6143 6156 - hash = ops->func_hash->filter_hash; 6157 6144 size = 1 << hash->size_bits; 6158 6145 for (i = 0; i < size; i++) { 6159 6146 hlist_for_each_entry(iter, &hash->buckets[i], hlist) { ··· 6176 6147 6177 6148 mutex_unlock(&ftrace_lock); 6178 6149 6150 + out: 6179 6151 /* Removing the tmp_ops will add the updated direct callers to the functions */ 6180 6152 unregister_ftrace_function(&tmp_ops); 6181 6153
+4 -2
net/mptcp/protocol.c
··· 61 61 62 62 static const struct proto_ops *mptcp_fallback_tcp_ops(const struct sock *sk) 63 63 { 64 + unsigned short family = READ_ONCE(sk->sk_family); 65 + 64 66 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 65 - if (sk->sk_prot == &tcpv6_prot) 67 + if (family == AF_INET6) 66 68 return &inet6_stream_ops; 67 69 #endif 68 - WARN_ON_ONCE(sk->sk_prot != &tcp_prot); 70 + WARN_ON_ONCE(family != AF_INET); 69 71 return &inet_stream_ops; 70 72 } 71 73
+8
net/mptcp/subflow.c
··· 2144 2144 tcp_prot_override = tcp_prot; 2145 2145 tcp_prot_override.release_cb = tcp_release_cb_override; 2146 2146 tcp_prot_override.diag_destroy = tcp_abort_override; 2147 + #ifdef CONFIG_BPF_SYSCALL 2148 + /* Disable sockmap processing for subflows */ 2149 + tcp_prot_override.psock_update_sk_prot = NULL; 2150 + #endif 2147 2151 2148 2152 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 2149 2153 /* In struct mptcp_subflow_request_sock, we assume the TCP request sock ··· 2184 2180 tcpv6_prot_override = tcpv6_prot; 2185 2181 tcpv6_prot_override.release_cb = tcp_release_cb_override; 2186 2182 tcpv6_prot_override.diag_destroy = tcp_abort_override; 2183 + #ifdef CONFIG_BPF_SYSCALL 2184 + /* Disable sockmap processing for subflows */ 2185 + tcpv6_prot_override.psock_update_sk_prot = NULL; 2186 + #endif 2187 2187 #endif 2188 2188 2189 2189 mptcp_diag_subflow_init(&subflow_ulp_ops);
+2 -4
net/sched/act_bpf.c
··· 47 47 filter = rcu_dereference(prog->filter); 48 48 if (at_ingress) { 49 49 __skb_push(skb, skb->mac_len); 50 - bpf_compute_data_pointers(skb); 51 - filter_res = bpf_prog_run(filter, skb); 50 + filter_res = bpf_prog_run_data_pointers(filter, skb); 52 51 __skb_pull(skb, skb->mac_len); 53 52 } else { 54 - bpf_compute_data_pointers(skb); 55 - filter_res = bpf_prog_run(filter, skb); 53 + filter_res = bpf_prog_run_data_pointers(filter, skb); 56 54 } 57 55 if (unlikely(!skb->tstamp && skb->tstamp_type)) 58 56 skb->tstamp_type = SKB_CLOCK_REALTIME;
+2 -4
net/sched/cls_bpf.c
··· 97 97 } else if (at_ingress) { 98 98 /* It is safe to push/pull even if skb_shared() */ 99 99 __skb_push(skb, skb->mac_len); 100 - bpf_compute_data_pointers(skb); 101 - filter_res = bpf_prog_run(prog->filter, skb); 100 + filter_res = bpf_prog_run_data_pointers(prog->filter, skb); 102 101 __skb_pull(skb, skb->mac_len); 103 102 } else { 104 - bpf_compute_data_pointers(skb); 105 - filter_res = bpf_prog_run(prog->filter, skb); 103 + filter_res = bpf_prog_run_data_pointers(prog->filter, skb); 106 104 } 107 105 if (unlikely(!skb->tstamp && skb->tstamp_type)) 108 106 skb->tstamp_type = SKB_CLOCK_REALTIME;
+1 -1
tools/bpf/bpftool/Documentation/bpftool-prog.rst
··· 182 182 183 183 bpftool prog tracelog { stdout | stderr } *PROG* 184 184 Dump the BPF stream of the program. BPF programs can write to these streams 185 - at runtime with the **bpf_stream_vprintk**\ () kfunc. The kernel may write 185 + at runtime with the **bpf_stream_vprintk_impl**\ () kfunc. The kernel may write 186 186 error messages to the standard error stream. This facility should be used 187 187 only for debugging purposes. 188 188
+13 -13
tools/lib/bpf/bpf_helpers.h
··· 315 315 ___param, sizeof(___param)); \ 316 316 }) 317 317 318 - extern int bpf_stream_vprintk(int stream_id, const char *fmt__str, const void *args, 319 - __u32 len__sz, void *aux__prog) __weak __ksym; 318 + extern int bpf_stream_vprintk_impl(int stream_id, const char *fmt__str, const void *args, 319 + __u32 len__sz, void *aux__prog) __weak __ksym; 320 320 321 - #define bpf_stream_printk(stream_id, fmt, args...) \ 322 - ({ \ 323 - static const char ___fmt[] = fmt; \ 324 - unsigned long long ___param[___bpf_narg(args)]; \ 325 - \ 326 - _Pragma("GCC diagnostic push") \ 327 - _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ 328 - ___bpf_fill(___param, args); \ 329 - _Pragma("GCC diagnostic pop") \ 330 - \ 331 - bpf_stream_vprintk(stream_id, ___fmt, ___param, sizeof(___param), NULL);\ 321 + #define bpf_stream_printk(stream_id, fmt, args...) \ 322 + ({ \ 323 + static const char ___fmt[] = fmt; \ 324 + unsigned long long ___param[___bpf_narg(args)]; \ 325 + \ 326 + _Pragma("GCC diagnostic push") \ 327 + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ 328 + ___bpf_fill(___param, args); \ 329 + _Pragma("GCC diagnostic pop") \ 330 + \ 331 + bpf_stream_vprintk_impl(stream_id, ___fmt, ___param, sizeof(___param), NULL); \ 332 332 }) 333 333 334 334 /* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args
+3
tools/testing/selftests/bpf/config
··· 50 50 CONFIG_IPV6_TUNNEL=y 51 51 CONFIG_KEYS=y 52 52 CONFIG_LIRC=y 53 + CONFIG_LIVEPATCH=y 53 54 CONFIG_LWTUNNEL=y 54 55 CONFIG_MODULE_SIG=y 55 56 CONFIG_MODULE_SRCVERSION_ALL=y ··· 112 111 CONFIG_NF_NAT=y 113 112 CONFIG_PACKET=y 114 113 CONFIG_RC_CORE=y 114 + CONFIG_SAMPLES=y 115 + CONFIG_SAMPLE_LIVEPATCH=m 115 116 CONFIG_SECURITY=y 116 117 CONFIG_SECURITYFS=y 117 118 CONFIG_SYN_COOKIES=y
+107
tools/testing/selftests/bpf/prog_tests/livepatch_trampoline.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ 3 + 4 + #include <test_progs.h> 5 + #include "testing_helpers.h" 6 + #include "livepatch_trampoline.skel.h" 7 + 8 + static int load_livepatch(void) 9 + { 10 + char path[4096]; 11 + 12 + /* CI will set KBUILD_OUTPUT */ 13 + snprintf(path, sizeof(path), "%s/samples/livepatch/livepatch-sample.ko", 14 + getenv("KBUILD_OUTPUT") ? : "../../../.."); 15 + 16 + return load_module(path, env_verbosity > VERBOSE_NONE); 17 + } 18 + 19 + static void unload_livepatch(void) 20 + { 21 + /* Disable the livepatch before unloading the module */ 22 + system("echo 0 > /sys/kernel/livepatch/livepatch_sample/enabled"); 23 + 24 + unload_module("livepatch_sample", env_verbosity > VERBOSE_NONE); 25 + } 26 + 27 + static void read_proc_cmdline(void) 28 + { 29 + char buf[4096]; 30 + int fd, ret; 31 + 32 + fd = open("/proc/cmdline", O_RDONLY); 33 + if (!ASSERT_OK_FD(fd, "open /proc/cmdline")) 34 + return; 35 + 36 + ret = read(fd, buf, sizeof(buf)); 37 + if (!ASSERT_GT(ret, 0, "read /proc/cmdline")) 38 + goto out; 39 + 40 + ASSERT_OK(strncmp(buf, "this has been live patched", 26), "strncmp"); 41 + 42 + out: 43 + close(fd); 44 + } 45 + 46 + static void __test_livepatch_trampoline(bool fexit_first) 47 + { 48 + struct livepatch_trampoline *skel = NULL; 49 + int err; 50 + 51 + skel = livepatch_trampoline__open_and_load(); 52 + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) 53 + goto out; 54 + 55 + skel->bss->my_pid = getpid(); 56 + 57 + if (!fexit_first) { 58 + /* fentry program is loaded first by default */ 59 + err = livepatch_trampoline__attach(skel); 60 + if (!ASSERT_OK(err, "skel_attach")) 61 + goto out; 62 + } else { 63 + /* Manually load fexit program first. */ 64 + skel->links.fexit_cmdline = bpf_program__attach(skel->progs.fexit_cmdline); 65 + if (!ASSERT_OK_PTR(skel->links.fexit_cmdline, "attach_fexit")) 66 + goto out; 67 + 68 + skel->links.fentry_cmdline = bpf_program__attach(skel->progs.fentry_cmdline); 69 + if (!ASSERT_OK_PTR(skel->links.fentry_cmdline, "attach_fentry")) 70 + goto out; 71 + } 72 + 73 + read_proc_cmdline(); 74 + 75 + ASSERT_EQ(skel->bss->fentry_hit, 1, "fentry_hit"); 76 + ASSERT_EQ(skel->bss->fexit_hit, 1, "fexit_hit"); 77 + out: 78 + livepatch_trampoline__destroy(skel); 79 + } 80 + 81 + void test_livepatch_trampoline(void) 82 + { 83 + int retry_cnt = 0; 84 + 85 + retry: 86 + if (load_livepatch()) { 87 + if (retry_cnt) { 88 + ASSERT_OK(1, "load_livepatch"); 89 + goto out; 90 + } 91 + /* 92 + * Something else (previous run of the same test?) loaded 93 + * the KLP module. Unload the KLP module and retry. 94 + */ 95 + unload_livepatch(); 96 + retry_cnt++; 97 + goto retry; 98 + } 99 + 100 + if (test__start_subtest("fentry_first")) 101 + __test_livepatch_trampoline(false); 102 + 103 + if (test__start_subtest("fexit_first")) 104 + __test_livepatch_trampoline(true); 105 + out: 106 + unload_livepatch(); 107 + }
+140
tools/testing/selftests/bpf/prog_tests/mptcp.c
··· 6 6 #include <netinet/in.h> 7 7 #include <test_progs.h> 8 8 #include <unistd.h> 9 + #include <errno.h> 9 10 #include "cgroup_helpers.h" 10 11 #include "network_helpers.h" 11 12 #include "mptcp_sock.skel.h" 12 13 #include "mptcpify.skel.h" 13 14 #include "mptcp_subflow.skel.h" 15 + #include "mptcp_sockmap.skel.h" 14 16 15 17 #define NS_TEST "mptcp_ns" 16 18 #define ADDR_1 "10.0.1.1" ··· 438 436 close(cgroup_fd); 439 437 } 440 438 439 + /* Test sockmap on MPTCP server handling non-mp-capable clients. */ 440 + static void test_sockmap_with_mptcp_fallback(struct mptcp_sockmap *skel) 441 + { 442 + int listen_fd = -1, client_fd1 = -1, client_fd2 = -1; 443 + int server_fd1 = -1, server_fd2 = -1, sent, recvd; 444 + char snd[9] = "123456789"; 445 + char rcv[10]; 446 + 447 + /* start server with MPTCP enabled */ 448 + listen_fd = start_mptcp_server(AF_INET, NULL, 0, 0); 449 + if (!ASSERT_OK_FD(listen_fd, "sockmap-fb:start_mptcp_server")) 450 + return; 451 + 452 + skel->bss->trace_port = ntohs(get_socket_local_port(listen_fd)); 453 + skel->bss->sk_index = 0; 454 + /* create client without MPTCP enabled */ 455 + client_fd1 = connect_to_fd_opts(listen_fd, NULL); 456 + if (!ASSERT_OK_FD(client_fd1, "sockmap-fb:connect_to_fd")) 457 + goto end; 458 + 459 + server_fd1 = accept(listen_fd, NULL, 0); 460 + skel->bss->sk_index = 1; 461 + client_fd2 = connect_to_fd_opts(listen_fd, NULL); 462 + if (!ASSERT_OK_FD(client_fd2, "sockmap-fb:connect_to_fd")) 463 + goto end; 464 + 465 + server_fd2 = accept(listen_fd, NULL, 0); 466 + /* test normal redirect behavior: data sent by client_fd1 can be 467 + * received by client_fd2 468 + */ 469 + skel->bss->redirect_idx = 1; 470 + sent = send(client_fd1, snd, sizeof(snd), 0); 471 + if (!ASSERT_EQ(sent, sizeof(snd), "sockmap-fb:send(client_fd1)")) 472 + goto end; 473 + 474 + /* try to recv more bytes to avoid truncation check */ 475 + recvd = recv(client_fd2, rcv, sizeof(rcv), 0); 476 + if (!ASSERT_EQ(recvd, sizeof(snd), "sockmap-fb:recv(client_fd2)")) 477 + goto end; 478 + 479 + end: 480 + if (client_fd1 >= 0) 481 + close(client_fd1); 482 + if (client_fd2 >= 0) 483 + close(client_fd2); 484 + if (server_fd1 >= 0) 485 + close(server_fd1); 486 + if (server_fd2 >= 0) 487 + close(server_fd2); 488 + close(listen_fd); 489 + } 490 + 491 + /* Test sockmap rejection of MPTCP sockets - both server and client sides. */ 492 + static void test_sockmap_reject_mptcp(struct mptcp_sockmap *skel) 493 + { 494 + int listen_fd = -1, server_fd = -1, client_fd1 = -1; 495 + int err, zero = 0; 496 + 497 + /* start server with MPTCP enabled */ 498 + listen_fd = start_mptcp_server(AF_INET, NULL, 0, 0); 499 + if (!ASSERT_OK_FD(listen_fd, "start_mptcp_server")) 500 + return; 501 + 502 + skel->bss->trace_port = ntohs(get_socket_local_port(listen_fd)); 503 + skel->bss->sk_index = 0; 504 + /* create client with MPTCP enabled */ 505 + client_fd1 = connect_to_fd(listen_fd, 0); 506 + if (!ASSERT_OK_FD(client_fd1, "connect_to_fd client_fd1")) 507 + goto end; 508 + 509 + /* bpf_sock_map_update() called from sockops should reject MPTCP sk */ 510 + if (!ASSERT_EQ(skel->bss->helper_ret, -EOPNOTSUPP, "should reject")) 511 + goto end; 512 + 513 + server_fd = accept(listen_fd, NULL, 0); 514 + err = bpf_map_update_elem(bpf_map__fd(skel->maps.sock_map), 515 + &zero, &server_fd, BPF_NOEXIST); 516 + if (!ASSERT_EQ(err, -EOPNOTSUPP, "server should be disallowed")) 517 + goto end; 518 + 519 + /* MPTCP client should also be disallowed */ 520 + err = bpf_map_update_elem(bpf_map__fd(skel->maps.sock_map), 521 + &zero, &client_fd1, BPF_NOEXIST); 522 + if (!ASSERT_EQ(err, -EOPNOTSUPP, "client should be disallowed")) 523 + goto end; 524 + end: 525 + if (client_fd1 >= 0) 526 + close(client_fd1); 527 + if (server_fd >= 0) 528 + close(server_fd); 529 + close(listen_fd); 530 + } 531 + 532 + static void test_mptcp_sockmap(void) 533 + { 534 + struct mptcp_sockmap *skel; 535 + struct netns_obj *netns; 536 + int cgroup_fd, err; 537 + 538 + cgroup_fd = test__join_cgroup("/mptcp_sockmap"); 539 + if (!ASSERT_OK_FD(cgroup_fd, "join_cgroup: mptcp_sockmap")) 540 + return; 541 + 542 + skel = mptcp_sockmap__open_and_load(); 543 + if (!ASSERT_OK_PTR(skel, "skel_open_load: mptcp_sockmap")) 544 + goto close_cgroup; 545 + 546 + skel->links.mptcp_sockmap_inject = 547 + bpf_program__attach_cgroup(skel->progs.mptcp_sockmap_inject, cgroup_fd); 548 + if (!ASSERT_OK_PTR(skel->links.mptcp_sockmap_inject, "attach sockmap")) 549 + goto skel_destroy; 550 + 551 + err = bpf_prog_attach(bpf_program__fd(skel->progs.mptcp_sockmap_redirect), 552 + bpf_map__fd(skel->maps.sock_map), 553 + BPF_SK_SKB_STREAM_VERDICT, 0); 554 + if (!ASSERT_OK(err, "bpf_prog_attach stream verdict")) 555 + goto skel_destroy; 556 + 557 + netns = netns_new(NS_TEST, true); 558 + if (!ASSERT_OK_PTR(netns, "netns_new: mptcp_sockmap")) 559 + goto skel_destroy; 560 + 561 + if (endpoint_init("subflow") < 0) 562 + goto close_netns; 563 + 564 + test_sockmap_with_mptcp_fallback(skel); 565 + test_sockmap_reject_mptcp(skel); 566 + 567 + close_netns: 568 + netns_free(netns); 569 + skel_destroy: 570 + mptcp_sockmap__destroy(skel); 571 + close_cgroup: 572 + close(cgroup_fd); 573 + } 574 + 441 575 void test_mptcp(void) 442 576 { 443 577 if (test__start_subtest("base")) ··· 582 444 test_mptcpify(); 583 445 if (test__start_subtest("subflow")) 584 446 test_subflow(); 447 + if (test__start_subtest("sockmap")) 448 + test_mptcp_sockmap(); 585 449 }
+150
tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <test_progs.h> 3 + #include "stacktrace_ips.skel.h" 4 + 5 + #ifdef __x86_64__ 6 + static int check_stacktrace_ips(int fd, __u32 key, int cnt, ...) 7 + { 8 + __u64 ips[PERF_MAX_STACK_DEPTH]; 9 + struct ksyms *ksyms = NULL; 10 + int i, err = 0; 11 + va_list args; 12 + 13 + /* sorted by addr */ 14 + ksyms = load_kallsyms_local(); 15 + if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_local")) 16 + return -1; 17 + 18 + /* unlikely, but... */ 19 + if (!ASSERT_LT(cnt, PERF_MAX_STACK_DEPTH, "check_max")) 20 + return -1; 21 + 22 + err = bpf_map_lookup_elem(fd, &key, ips); 23 + if (err) 24 + goto out; 25 + 26 + /* 27 + * Compare all symbols provided via arguments with stacktrace ips, 28 + * and their related symbol addresses.t 29 + */ 30 + va_start(args, cnt); 31 + 32 + for (i = 0; i < cnt; i++) { 33 + unsigned long val; 34 + struct ksym *ksym; 35 + 36 + val = va_arg(args, unsigned long); 37 + ksym = ksym_search_local(ksyms, ips[i]); 38 + if (!ASSERT_OK_PTR(ksym, "ksym_search_local")) 39 + break; 40 + ASSERT_EQ(ksym->addr, val, "stack_cmp"); 41 + } 42 + 43 + va_end(args); 44 + 45 + out: 46 + free_kallsyms_local(ksyms); 47 + return err; 48 + } 49 + 50 + static void test_stacktrace_ips_kprobe_multi(bool retprobe) 51 + { 52 + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts, 53 + .retprobe = retprobe 54 + ); 55 + LIBBPF_OPTS(bpf_test_run_opts, topts); 56 + struct stacktrace_ips *skel; 57 + 58 + skel = stacktrace_ips__open_and_load(); 59 + if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load")) 60 + return; 61 + 62 + if (!skel->kconfig->CONFIG_UNWINDER_ORC) { 63 + test__skip(); 64 + goto cleanup; 65 + } 66 + 67 + skel->links.kprobe_multi_test = bpf_program__attach_kprobe_multi_opts( 68 + skel->progs.kprobe_multi_test, 69 + "bpf_testmod_stacktrace_test", &opts); 70 + if (!ASSERT_OK_PTR(skel->links.kprobe_multi_test, "bpf_program__attach_kprobe_multi_opts")) 71 + goto cleanup; 72 + 73 + trigger_module_test_read(1); 74 + 75 + load_kallsyms(); 76 + 77 + check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 4, 78 + ksym_get_addr("bpf_testmod_stacktrace_test_3"), 79 + ksym_get_addr("bpf_testmod_stacktrace_test_2"), 80 + ksym_get_addr("bpf_testmod_stacktrace_test_1"), 81 + ksym_get_addr("bpf_testmod_test_read")); 82 + 83 + cleanup: 84 + stacktrace_ips__destroy(skel); 85 + } 86 + 87 + static void test_stacktrace_ips_raw_tp(void) 88 + { 89 + __u32 info_len = sizeof(struct bpf_prog_info); 90 + LIBBPF_OPTS(bpf_test_run_opts, topts); 91 + struct bpf_prog_info info = {}; 92 + struct stacktrace_ips *skel; 93 + __u64 bpf_prog_ksym = 0; 94 + int err; 95 + 96 + skel = stacktrace_ips__open_and_load(); 97 + if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load")) 98 + return; 99 + 100 + if (!skel->kconfig->CONFIG_UNWINDER_ORC) { 101 + test__skip(); 102 + goto cleanup; 103 + } 104 + 105 + skel->links.rawtp_test = bpf_program__attach_raw_tracepoint( 106 + skel->progs.rawtp_test, 107 + "bpf_testmod_test_read"); 108 + if (!ASSERT_OK_PTR(skel->links.rawtp_test, "bpf_program__attach_raw_tracepoint")) 109 + goto cleanup; 110 + 111 + /* get bpf program address */ 112 + info.jited_ksyms = ptr_to_u64(&bpf_prog_ksym); 113 + info.nr_jited_ksyms = 1; 114 + err = bpf_prog_get_info_by_fd(bpf_program__fd(skel->progs.rawtp_test), 115 + &info, &info_len); 116 + if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd")) 117 + goto cleanup; 118 + 119 + trigger_module_test_read(1); 120 + 121 + load_kallsyms(); 122 + 123 + check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 2, 124 + bpf_prog_ksym, 125 + ksym_get_addr("bpf_trace_run2")); 126 + 127 + cleanup: 128 + stacktrace_ips__destroy(skel); 129 + } 130 + 131 + static void __test_stacktrace_ips(void) 132 + { 133 + if (test__start_subtest("kprobe_multi")) 134 + test_stacktrace_ips_kprobe_multi(false); 135 + if (test__start_subtest("kretprobe_multi")) 136 + test_stacktrace_ips_kprobe_multi(true); 137 + if (test__start_subtest("raw_tp")) 138 + test_stacktrace_ips_raw_tp(); 139 + } 140 + #else 141 + static void __test_stacktrace_ips(void) 142 + { 143 + test__skip(); 144 + } 145 + #endif 146 + 147 + void test_stacktrace_ips(void) 148 + { 149 + __test_stacktrace_ips(); 150 + }
+53
tools/testing/selftests/bpf/progs/iters_looping.c
··· 161 161 162 162 return 0; 163 163 } 164 + 165 + __used 166 + static void iterator_with_diff_stack_depth(int x) 167 + { 168 + struct bpf_iter_num iter; 169 + 170 + asm volatile ( 171 + "if r1 == 42 goto 0f;" 172 + "*(u64 *)(r10 - 128) = 0;" 173 + "0:" 174 + /* create iterator */ 175 + "r1 = %[iter];" 176 + "r2 = 0;" 177 + "r3 = 10;" 178 + "call %[bpf_iter_num_new];" 179 + "1:" 180 + /* consume next item */ 181 + "r1 = %[iter];" 182 + "call %[bpf_iter_num_next];" 183 + "if r0 == 0 goto 2f;" 184 + "goto 1b;" 185 + "2:" 186 + /* destroy iterator */ 187 + "r1 = %[iter];" 188 + "call %[bpf_iter_num_destroy];" 189 + : 190 + : __imm_ptr(iter), ITER_HELPERS 191 + : __clobber_common, "r6" 192 + ); 193 + } 194 + 195 + SEC("socket") 196 + __success 197 + __naked int widening_stack_size_bug(void *ctx) 198 + { 199 + /* 200 + * Depending on iterator_with_diff_stack_depth() parameter value, 201 + * subprogram stack depth is either 8 or 128 bytes. Arrange values so 202 + * that it is 128 on a first call and 8 on a second. This triggered a 203 + * bug in verifier's widen_imprecise_scalars() logic. 204 + */ 205 + asm volatile ( 206 + "r6 = 0;" 207 + "r1 = 0;" 208 + "1:" 209 + "call iterator_with_diff_stack_depth;" 210 + "r1 = 42;" 211 + "r6 += 1;" 212 + "if r6 < 2 goto 1b;" 213 + "r0 = 0;" 214 + "exit;" 215 + ::: __clobber_all); 216 + }
+30
tools/testing/selftests/bpf/progs/livepatch_trampoline.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ 3 + 4 + #include <linux/bpf.h> 5 + #include <bpf/bpf_helpers.h> 6 + #include <bpf/bpf_tracing.h> 7 + 8 + int fentry_hit; 9 + int fexit_hit; 10 + int my_pid; 11 + 12 + SEC("fentry/cmdline_proc_show") 13 + int BPF_PROG(fentry_cmdline) 14 + { 15 + if (my_pid != (bpf_get_current_pid_tgid() >> 32)) 16 + return 0; 17 + 18 + fentry_hit = 1; 19 + return 0; 20 + } 21 + 22 + SEC("fexit/cmdline_proc_show") 23 + int BPF_PROG(fexit_cmdline) 24 + { 25 + if (my_pid != (bpf_get_current_pid_tgid() >> 32)) 26 + return 0; 27 + 28 + fexit_hit = 1; 29 + return 0; 30 + }
+43
tools/testing/selftests/bpf/progs/mptcp_sockmap.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include "bpf_tracing_net.h" 4 + 5 + char _license[] SEC("license") = "GPL"; 6 + 7 + int sk_index; 8 + int redirect_idx; 9 + int trace_port; 10 + int helper_ret; 11 + struct { 12 + __uint(type, BPF_MAP_TYPE_SOCKMAP); 13 + __uint(key_size, sizeof(__u32)); 14 + __uint(value_size, sizeof(__u32)); 15 + __uint(max_entries, 100); 16 + } sock_map SEC(".maps"); 17 + 18 + SEC("sockops") 19 + int mptcp_sockmap_inject(struct bpf_sock_ops *skops) 20 + { 21 + struct bpf_sock *sk; 22 + 23 + /* only accept specified connection */ 24 + if (skops->local_port != trace_port || 25 + skops->op != BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB) 26 + return 1; 27 + 28 + sk = skops->sk; 29 + if (!sk) 30 + return 1; 31 + 32 + /* update sk handler */ 33 + helper_ret = bpf_sock_map_update(skops, &sock_map, &sk_index, BPF_NOEXIST); 34 + 35 + return 1; 36 + } 37 + 38 + SEC("sk_skb/stream_verdict") 39 + int mptcp_sockmap_redirect(struct __sk_buff *skb) 40 + { 41 + /* redirect skb to the sk under sock_map[redirect_idx] */ 42 + return bpf_sk_redirect_map(skb, &sock_map, redirect_idx, 0); 43 + }
+49
tools/testing/selftests/bpf/progs/stacktrace_ips.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (c) 2018 Facebook 3 + 4 + #include <vmlinux.h> 5 + #include <bpf/bpf_helpers.h> 6 + #include <bpf/bpf_tracing.h> 7 + 8 + #ifndef PERF_MAX_STACK_DEPTH 9 + #define PERF_MAX_STACK_DEPTH 127 10 + #endif 11 + 12 + typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH]; 13 + 14 + struct { 15 + __uint(type, BPF_MAP_TYPE_STACK_TRACE); 16 + __uint(max_entries, 16384); 17 + __type(key, __u32); 18 + __type(value, stack_trace_t); 19 + } stackmap SEC(".maps"); 20 + 21 + extern bool CONFIG_UNWINDER_ORC __kconfig __weak; 22 + 23 + /* 24 + * This function is here to have CONFIG_UNWINDER_ORC 25 + * used and added to object BTF. 26 + */ 27 + int unused(void) 28 + { 29 + return CONFIG_UNWINDER_ORC ? 0 : 1; 30 + } 31 + 32 + __u32 stack_key; 33 + 34 + SEC("kprobe.multi") 35 + int kprobe_multi_test(struct pt_regs *ctx) 36 + { 37 + stack_key = bpf_get_stackid(ctx, &stackmap, 0); 38 + return 0; 39 + } 40 + 41 + SEC("raw_tp/bpf_testmod_test_read") 42 + int rawtp_test(void *ctx) 43 + { 44 + /* Skip ebpf program entry in the stack. */ 45 + stack_key = bpf_get_stackid(ctx, &stackmap, 0); 46 + return 0; 47 + } 48 + 49 + char _license[] SEC("license") = "GPL";
+3 -3
tools/testing/selftests/bpf/progs/stream_fail.c
··· 10 10 __failure __msg("Possibly NULL pointer passed") 11 11 int stream_vprintk_null_arg(void *ctx) 12 12 { 13 - bpf_stream_vprintk(BPF_STDOUT, "", NULL, 0, NULL); 13 + bpf_stream_vprintk_impl(BPF_STDOUT, "", NULL, 0, NULL); 14 14 return 0; 15 15 } 16 16 ··· 18 18 __failure __msg("R3 type=scalar expected=") 19 19 int stream_vprintk_scalar_arg(void *ctx) 20 20 { 21 - bpf_stream_vprintk(BPF_STDOUT, "", (void *)46, 0, NULL); 21 + bpf_stream_vprintk_impl(BPF_STDOUT, "", (void *)46, 0, NULL); 22 22 return 0; 23 23 } 24 24 ··· 26 26 __failure __msg("arg#1 doesn't point to a const string") 27 27 int stream_vprintk_string_arg(void *ctx) 28 28 { 29 - bpf_stream_vprintk(BPF_STDOUT, ctx, NULL, 0, NULL); 29 + bpf_stream_vprintk_impl(BPF_STDOUT, ctx, NULL, 0, NULL); 30 30 return 0; 31 31 } 32 32
+3 -3
tools/testing/selftests/bpf/progs/task_work.c
··· 66 66 if (!work) 67 67 return 0; 68 68 69 - bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work, NULL); 69 + bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL); 70 70 return 0; 71 71 } 72 72 ··· 80 80 work = bpf_map_lookup_elem(&arrmap, &key); 81 81 if (!work) 82 82 return 0; 83 - bpf_task_work_schedule_signal(task, &work->tw, &arrmap, process_work, NULL); 83 + bpf_task_work_schedule_signal_impl(task, &work->tw, &arrmap, process_work, NULL); 84 84 return 0; 85 85 } 86 86 ··· 102 102 work = bpf_map_lookup_elem(&lrumap, &key); 103 103 if (!work || work->data[0]) 104 104 return 0; 105 - bpf_task_work_schedule_resume(task, &work->tw, &lrumap, process_work, NULL); 105 + bpf_task_work_schedule_resume_impl(task, &work->tw, &lrumap, process_work, NULL); 106 106 return 0; 107 107 }
+4 -4
tools/testing/selftests/bpf/progs/task_work_fail.c
··· 53 53 work = bpf_map_lookup_elem(&arrmap, &key); 54 54 if (!work) 55 55 return 0; 56 - bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work, NULL); 56 + bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL); 57 57 return 0; 58 58 } 59 59 ··· 65 65 struct bpf_task_work tw; 66 66 67 67 task = bpf_get_current_task_btf(); 68 - bpf_task_work_schedule_resume(task, &tw, &hmap, process_work, NULL); 68 + bpf_task_work_schedule_resume_impl(task, &tw, &hmap, process_work, NULL); 69 69 return 0; 70 70 } 71 71 ··· 76 76 struct task_struct *task; 77 77 78 78 task = bpf_get_current_task_btf(); 79 - bpf_task_work_schedule_resume(task, NULL, &hmap, process_work, NULL); 79 + bpf_task_work_schedule_resume_impl(task, NULL, &hmap, process_work, NULL); 80 80 return 0; 81 81 } 82 82 ··· 91 91 work = bpf_map_lookup_elem(&arrmap, &key); 92 92 if (!work) 93 93 return 0; 94 - bpf_task_work_schedule_resume(task, &work->tw, NULL, process_work, NULL); 94 + bpf_task_work_schedule_resume_impl(task, &work->tw, NULL, process_work, NULL); 95 95 return 0; 96 96 }
+2 -2
tools/testing/selftests/bpf/progs/task_work_stress.c
··· 51 51 if (!work) 52 52 return 0; 53 53 } 54 - err = bpf_task_work_schedule_signal(bpf_get_current_task_btf(), &work->tw, &hmap, 55 - process_work, NULL); 54 + err = bpf_task_work_schedule_signal_impl(bpf_get_current_task_btf(), &work->tw, &hmap, 55 + process_work, NULL); 56 56 if (err) 57 57 __sync_fetch_and_add(&schedule_error, 1); 58 58 else
+26
tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
··· 417 417 return a + (long)b + c + d + (long)e + f + g + h + i + j + k; 418 418 } 419 419 420 + noinline void bpf_testmod_stacktrace_test(void) 421 + { 422 + /* used for stacktrace test as attach function */ 423 + asm volatile (""); 424 + } 425 + 426 + noinline void bpf_testmod_stacktrace_test_3(void) 427 + { 428 + bpf_testmod_stacktrace_test(); 429 + asm volatile (""); 430 + } 431 + 432 + noinline void bpf_testmod_stacktrace_test_2(void) 433 + { 434 + bpf_testmod_stacktrace_test_3(); 435 + asm volatile (""); 436 + } 437 + 438 + noinline void bpf_testmod_stacktrace_test_1(void) 439 + { 440 + bpf_testmod_stacktrace_test_2(); 441 + asm volatile (""); 442 + } 443 + 420 444 int bpf_testmod_fentry_ok; 421 445 422 446 noinline ssize_t ··· 520 496 bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20, 521 497 21, 22, 23, 24, 25, 26) != 231) 522 498 goto out; 499 + 500 + bpf_testmod_stacktrace_test_1(); 523 501 524 502 bpf_testmod_fentry_ok = 1; 525 503 out: