Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

static_call: Add call depth tracking support

When indirect calls are switched to direct calls then it has to be ensured
that the call target is not the function, but the call thunk when call
depth tracking is enabled. But static calls are available before call
thunks have been set up.

Ensure a second run through the static call patching code after call thunks
have been created. When call thunks are not enabled this has no side
effects.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220915111148.306100465@infradead.org

+44 -5
+5
arch/x86/include/asm/alternative.h
··· 91 91 extern void callthunks_patch_builtin_calls(void); 92 92 extern void callthunks_patch_module_calls(struct callthunk_sites *sites, 93 93 struct module *mod); 94 + extern void *callthunks_translate_call_dest(void *dest); 94 95 #else 95 96 static __always_inline void callthunks_patch_builtin_calls(void) {} 96 97 static __always_inline void 97 98 callthunks_patch_module_calls(struct callthunk_sites *sites, 98 99 struct module *mod) {} 100 + static __always_inline void *callthunks_translate_call_dest(void *dest) 101 + { 102 + return dest; 103 + } 99 104 #endif 100 105 101 106 #ifdef CONFIG_SMP
+18
arch/x86/kernel/callthunks.c
··· 6 6 #include <linux/kallsyms.h> 7 7 #include <linux/memory.h> 8 8 #include <linux/moduleloader.h> 9 + #include <linux/static_call.h> 9 10 10 11 #include <asm/alternative.h> 11 12 #include <asm/asm-offsets.h> ··· 272 271 pr_info("Setting up call depth tracking\n"); 273 272 mutex_lock(&text_mutex); 274 273 callthunks_setup(&cs, &builtin_coretext); 274 + static_call_force_reinit(); 275 275 thunks_initialized = true; 276 276 mutex_unlock(&text_mutex); 277 + } 278 + 279 + void *callthunks_translate_call_dest(void *dest) 280 + { 281 + void *target; 282 + 283 + lockdep_assert_held(&text_mutex); 284 + 285 + if (!thunks_initialized || skip_addr(dest)) 286 + return dest; 287 + 288 + if (!is_coretext(NULL, dest)) 289 + return dest; 290 + 291 + target = patch_dest(dest, false); 292 + return target ? : dest; 277 293 } 278 294 279 295 #ifdef CONFIG_MODULES
+1
arch/x86/kernel/static_call.c
··· 34 34 35 35 switch (type) { 36 36 case CALL: 37 + func = callthunks_translate_call_dest(func); 37 38 code = text_gen_insn(CALL_INSN_OPCODE, insn, func); 38 39 if (func == &__static_call_return0) { 39 40 emulate = code;
+2
include/linux/static_call.h
··· 162 162 163 163 extern int __init static_call_init(void); 164 164 165 + extern void static_call_force_reinit(void); 166 + 165 167 struct static_call_mod { 166 168 struct static_call_mod *next; 167 169 struct module *mod; /* for vmlinux, mod == NULL */
+18 -5
kernel/static_call_inline.c
··· 15 15 extern struct static_call_tramp_key __start_static_call_tramp_key[], 16 16 __stop_static_call_tramp_key[]; 17 17 18 - static bool static_call_initialized; 18 + static int static_call_initialized; 19 + 20 + /* 21 + * Must be called before early_initcall() to be effective. 22 + */ 23 + void static_call_force_reinit(void) 24 + { 25 + if (WARN_ON_ONCE(!static_call_initialized)) 26 + return; 27 + 28 + static_call_initialized++; 29 + } 19 30 20 31 /* mutex to protect key modules/sites */ 21 32 static DEFINE_MUTEX(static_call_mutex); ··· 486 475 { 487 476 int ret; 488 477 489 - if (static_call_initialized) 478 + /* See static_call_force_reinit(). */ 479 + if (static_call_initialized == 1) 490 480 return 0; 491 481 492 482 cpus_read_lock(); ··· 502 490 BUG(); 503 491 } 504 492 505 - static_call_initialized = true; 506 - 507 493 #ifdef CONFIG_MODULES 508 - register_module_notifier(&static_call_module_nb); 494 + if (!static_call_initialized) 495 + register_module_notifier(&static_call_module_nb); 509 496 #endif 497 + 498 + static_call_initialized = 1; 510 499 return 0; 511 500 } 512 501 early_initcall(static_call_init);