Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'fix-ftrace-for-livepatch-bpf-fexit-programs'

Song Liu says:

====================
Fix ftrace for livepatch + BPF fexit programs

livepatch and BPF trampoline are two special users of ftrace. livepatch
uses ftrace with IPMODIFY flag and BPF trampoline uses ftrace direct
functions. When livepatch and BPF trampoline with fexit programs attach to
the same kernel function, BPF trampoline needs to call into the patched
version of the kernel function.

1/3 and 2/3 of this patchset fix two issues with livepatch + fexit cases,
one in the register_ftrace_direct path, the other in the
modify_ftrace_direct path.

3/3 adds selftests for both cases.
Acked-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
v4: https://patch.msgid.link/20251027175023.1521602-1-song@kernel.org

Changes v3 => v4:
1. Add helper reset_direct. (Steven)
2. Add Reviewed-by from Jiri.
3. Fix minor typo in comments.

v3: https://lore.kernel.org/bpf/20251026205445.1639632-1-song@kernel.org/

Changes v2 => v3:
1. Incorporate feedback by AI, which also fixes build error reported by
Steven and kernel test robot.

v2: https://lore.kernel.org/bpf/20251024182901.3247573-1-song@kernel.org/

Changes v1 => v2:
1. Target bpf tree. (Alexei)
2. Bring back the FTRACE_WARN_ON in __ftrace_hash_update_ipmodify
for valid code paths. (Steven)
3. Update selftests with cleaner way to find livepatch-sample.ko.
(offlline discussion with Ihor)

v1: https://lore.kernel.org/bpf/20251024071257.3956031-1-song@kernel.org/
====================

Signed-off-by: Alexei Starovoitov <ast@kernel.org>

+185 -20
-5
kernel/bpf/trampoline.c
··· 479 479 * BPF_TRAMP_F_SHARE_IPMODIFY is set, we can generate the 480 480 * trampoline again, and retry register. 481 481 */ 482 - /* reset fops->func and fops->trampoline for re-register */ 483 - tr->fops->func = NULL; 484 - tr->fops->trampoline = 0; 485 - 486 - /* free im memory and reallocate later */ 487 482 bpf_tramp_image_free(im); 488 483 goto again; 489 484 }
+45 -15
kernel/trace/ftrace.c
··· 1971 1971 */ 1972 1972 static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops, 1973 1973 struct ftrace_hash *old_hash, 1974 - struct ftrace_hash *new_hash) 1974 + struct ftrace_hash *new_hash, 1975 + bool update_target) 1975 1976 { 1976 1977 struct ftrace_page *pg; 1977 1978 struct dyn_ftrace *rec, *end = NULL; ··· 2007 2006 if (rec->flags & FTRACE_FL_DISABLED) 2008 2007 continue; 2009 2008 2010 - /* We need to update only differences of filter_hash */ 2009 + /* 2010 + * Unless we are updating the target of a direct function, 2011 + * we only need to update differences of filter_hash 2012 + */ 2011 2013 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); 2012 2014 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); 2013 - if (in_old == in_new) 2015 + if (!update_target && (in_old == in_new)) 2014 2016 continue; 2015 2017 2016 2018 if (in_new) { ··· 2024 2020 if (is_ipmodify) 2025 2021 goto rollback; 2026 2022 2027 - FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT); 2023 + /* 2024 + * If this is called by __modify_ftrace_direct() 2025 + * then it is only changing where the direct 2026 + * pointer is jumping to, and the record already 2027 + * points to a direct trampoline. If it isn't, 2028 + * then it is a bug to update ipmodify on a direct 2029 + * caller. 2030 + */ 2031 + FTRACE_WARN_ON(!update_target && 2032 + (rec->flags & FTRACE_FL_DIRECT)); 2028 2033 2029 2034 /* 2030 2035 * Another ops with IPMODIFY is already ··· 2089 2076 if (ftrace_hash_empty(hash)) 2090 2077 hash = NULL; 2091 2078 2092 - return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash); 2079 + return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash, false); 2093 2080 } 2094 2081 2095 2082 /* Disabling always succeeds */ ··· 2100 2087 if (ftrace_hash_empty(hash)) 2101 2088 hash = NULL; 2102 2089 2103 - __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH); 2090 + __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH, false); 2104 2091 } 2105 2092 2106 2093 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, ··· 2114 2101 if (ftrace_hash_empty(new_hash)) 2115 2102 new_hash = NULL; 2116 2103 2117 - return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash); 2104 + return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash, false); 2118 2105 } 2119 2106 2120 2107 static void print_ip_ins(const char *fmt, const unsigned char *p) ··· 5966 5953 free_ftrace_hash(fhp); 5967 5954 } 5968 5955 5956 + static void reset_direct(struct ftrace_ops *ops, unsigned long addr) 5957 + { 5958 + struct ftrace_hash *hash = ops->func_hash->filter_hash; 5959 + 5960 + remove_direct_functions_hash(hash, addr); 5961 + 5962 + /* cleanup for possible another register call */ 5963 + ops->func = NULL; 5964 + ops->trampoline = 0; 5965 + } 5966 + 5969 5967 /** 5970 5968 * register_ftrace_direct - Call a custom trampoline directly 5971 5969 * for multiple functions registered in @ops ··· 6072 6048 ops->direct_call = addr; 6073 6049 6074 6050 err = register_ftrace_function_nolock(ops); 6051 + if (err) 6052 + reset_direct(ops, addr); 6075 6053 6076 6054 out_unlock: 6077 6055 mutex_unlock(&direct_mutex); ··· 6106 6080 int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr, 6107 6081 bool free_filters) 6108 6082 { 6109 - struct ftrace_hash *hash = ops->func_hash->filter_hash; 6110 6083 int err; 6111 6084 6112 6085 if (check_direct_multi(ops)) ··· 6115 6090 6116 6091 mutex_lock(&direct_mutex); 6117 6092 err = unregister_ftrace_function(ops); 6118 - remove_direct_functions_hash(hash, addr); 6093 + reset_direct(ops, addr); 6119 6094 mutex_unlock(&direct_mutex); 6120 - 6121 - /* cleanup for possible another register call */ 6122 - ops->func = NULL; 6123 - ops->trampoline = 0; 6124 6095 6125 6096 if (free_filters) 6126 6097 ftrace_free_filter(ops); ··· 6127 6106 static int 6128 6107 __modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) 6129 6108 { 6130 - struct ftrace_hash *hash; 6109 + struct ftrace_hash *hash = ops->func_hash->filter_hash; 6131 6110 struct ftrace_func_entry *entry, *iter; 6132 6111 static struct ftrace_ops tmp_ops = { 6133 6112 .func = ftrace_stub, ··· 6148 6127 return err; 6149 6128 6150 6129 /* 6130 + * Call __ftrace_hash_update_ipmodify() here, so that we can call 6131 + * ops->ops_func for the ops. This is needed because the above 6132 + * register_ftrace_function_nolock() worked on tmp_ops. 6133 + */ 6134 + err = __ftrace_hash_update_ipmodify(ops, hash, hash, true); 6135 + if (err) 6136 + goto out; 6137 + 6138 + /* 6151 6139 * Now the ftrace_ops_list_func() is called to do the direct callers. 6152 6140 * We can safely change the direct functions attached to each entry. 6153 6141 */ 6154 6142 mutex_lock(&ftrace_lock); 6155 6143 6156 - hash = ops->func_hash->filter_hash; 6157 6144 size = 1 << hash->size_bits; 6158 6145 for (i = 0; i < size; i++) { 6159 6146 hlist_for_each_entry(iter, &hash->buckets[i], hlist) { ··· 6176 6147 6177 6148 mutex_unlock(&ftrace_lock); 6178 6149 6150 + out: 6179 6151 /* Removing the tmp_ops will add the updated direct callers to the functions */ 6180 6152 unregister_ftrace_function(&tmp_ops); 6181 6153
+3
tools/testing/selftests/bpf/config
··· 50 50 CONFIG_IPV6_TUNNEL=y 51 51 CONFIG_KEYS=y 52 52 CONFIG_LIRC=y 53 + CONFIG_LIVEPATCH=y 53 54 CONFIG_LWTUNNEL=y 54 55 CONFIG_MODULE_SIG=y 55 56 CONFIG_MODULE_SRCVERSION_ALL=y ··· 112 111 CONFIG_NF_NAT=y 113 112 CONFIG_PACKET=y 114 113 CONFIG_RC_CORE=y 114 + CONFIG_SAMPLES=y 115 + CONFIG_SAMPLE_LIVEPATCH=m 115 116 CONFIG_SECURITY=y 116 117 CONFIG_SECURITYFS=y 117 118 CONFIG_SYN_COOKIES=y
+107
tools/testing/selftests/bpf/prog_tests/livepatch_trampoline.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ 3 + 4 + #include <test_progs.h> 5 + #include "testing_helpers.h" 6 + #include "livepatch_trampoline.skel.h" 7 + 8 + static int load_livepatch(void) 9 + { 10 + char path[4096]; 11 + 12 + /* CI will set KBUILD_OUTPUT */ 13 + snprintf(path, sizeof(path), "%s/samples/livepatch/livepatch-sample.ko", 14 + getenv("KBUILD_OUTPUT") ? : "../../../.."); 15 + 16 + return load_module(path, env_verbosity > VERBOSE_NONE); 17 + } 18 + 19 + static void unload_livepatch(void) 20 + { 21 + /* Disable the livepatch before unloading the module */ 22 + system("echo 0 > /sys/kernel/livepatch/livepatch_sample/enabled"); 23 + 24 + unload_module("livepatch_sample", env_verbosity > VERBOSE_NONE); 25 + } 26 + 27 + static void read_proc_cmdline(void) 28 + { 29 + char buf[4096]; 30 + int fd, ret; 31 + 32 + fd = open("/proc/cmdline", O_RDONLY); 33 + if (!ASSERT_OK_FD(fd, "open /proc/cmdline")) 34 + return; 35 + 36 + ret = read(fd, buf, sizeof(buf)); 37 + if (!ASSERT_GT(ret, 0, "read /proc/cmdline")) 38 + goto out; 39 + 40 + ASSERT_OK(strncmp(buf, "this has been live patched", 26), "strncmp"); 41 + 42 + out: 43 + close(fd); 44 + } 45 + 46 + static void __test_livepatch_trampoline(bool fexit_first) 47 + { 48 + struct livepatch_trampoline *skel = NULL; 49 + int err; 50 + 51 + skel = livepatch_trampoline__open_and_load(); 52 + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) 53 + goto out; 54 + 55 + skel->bss->my_pid = getpid(); 56 + 57 + if (!fexit_first) { 58 + /* fentry program is loaded first by default */ 59 + err = livepatch_trampoline__attach(skel); 60 + if (!ASSERT_OK(err, "skel_attach")) 61 + goto out; 62 + } else { 63 + /* Manually load fexit program first. */ 64 + skel->links.fexit_cmdline = bpf_program__attach(skel->progs.fexit_cmdline); 65 + if (!ASSERT_OK_PTR(skel->links.fexit_cmdline, "attach_fexit")) 66 + goto out; 67 + 68 + skel->links.fentry_cmdline = bpf_program__attach(skel->progs.fentry_cmdline); 69 + if (!ASSERT_OK_PTR(skel->links.fentry_cmdline, "attach_fentry")) 70 + goto out; 71 + } 72 + 73 + read_proc_cmdline(); 74 + 75 + ASSERT_EQ(skel->bss->fentry_hit, 1, "fentry_hit"); 76 + ASSERT_EQ(skel->bss->fexit_hit, 1, "fexit_hit"); 77 + out: 78 + livepatch_trampoline__destroy(skel); 79 + } 80 + 81 + void test_livepatch_trampoline(void) 82 + { 83 + int retry_cnt = 0; 84 + 85 + retry: 86 + if (load_livepatch()) { 87 + if (retry_cnt) { 88 + ASSERT_OK(1, "load_livepatch"); 89 + goto out; 90 + } 91 + /* 92 + * Something else (previous run of the same test?) loaded 93 + * the KLP module. Unload the KLP module and retry. 94 + */ 95 + unload_livepatch(); 96 + retry_cnt++; 97 + goto retry; 98 + } 99 + 100 + if (test__start_subtest("fentry_first")) 101 + __test_livepatch_trampoline(false); 102 + 103 + if (test__start_subtest("fexit_first")) 104 + __test_livepatch_trampoline(true); 105 + out: 106 + unload_livepatch(); 107 + }
+30
tools/testing/selftests/bpf/progs/livepatch_trampoline.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ 3 + 4 + #include <linux/bpf.h> 5 + #include <bpf/bpf_helpers.h> 6 + #include <bpf/bpf_tracing.h> 7 + 8 + int fentry_hit; 9 + int fexit_hit; 10 + int my_pid; 11 + 12 + SEC("fentry/cmdline_proc_show") 13 + int BPF_PROG(fentry_cmdline) 14 + { 15 + if (my_pid != (bpf_get_current_pid_tgid() >> 32)) 16 + return 0; 17 + 18 + fentry_hit = 1; 19 + return 0; 20 + } 21 + 22 + SEC("fexit/cmdline_proc_show") 23 + int BPF_PROG(fexit_cmdline) 24 + { 25 + if (my_pid != (bpf_get_current_pid_tgid() >> 32)) 26 + return 0; 27 + 28 + fexit_hit = 1; 29 + return 0; 30 + }