Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/retbleed: Add fine grained Kconfig knobs

Do fine-grained Kconfig for all the various retbleed parts.

NOTE: if your compiler doesn't support return thunks this will
silently 'upgrade' your mitigation to IBPB, you might not like this.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>

authored by

Peter Zijlstra and committed by
Borislav Petkov
f43b9876 26aae8cc

+178 -69
+85 -26
arch/x86/Kconfig
··· 462 462 def_bool y 463 463 depends on X86_GOLDFISH 464 464 465 - config RETPOLINE 466 - bool "Avoid speculative indirect branches in kernel" 467 - select OBJTOOL if HAVE_OBJTOOL 468 - default y 469 - help 470 - Compile kernel with the retpoline compiler options to guard against 471 - kernel-to-user data leaks by avoiding speculative indirect 472 - branches. Requires a compiler with -mindirect-branch=thunk-extern 473 - support for full protection. The kernel may run slower. 474 - 475 - config CC_HAS_SLS 476 - def_bool $(cc-option,-mharden-sls=all) 477 - 478 - config CC_HAS_RETURN_THUNK 479 - def_bool $(cc-option,-mfunction-return=thunk-extern) 480 - 481 - config SLS 482 - bool "Mitigate Straight-Line-Speculation" 483 - depends on CC_HAS_SLS && X86_64 484 - select OBJTOOL if HAVE_OBJTOOL 485 - default n 486 - help 487 - Compile the kernel with straight-line-speculation options to guard 488 - against straight line speculation. The kernel image might be slightly 489 - larger. 490 - 491 465 config X86_CPU_RESCTRL 492 466 bool "x86 CPU resource control support" 493 467 depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) ··· 2429 2455 source "kernel/livepatch/Kconfig" 2430 2456 2431 2457 endmenu 2458 + 2459 + config CC_HAS_SLS 2460 + def_bool $(cc-option,-mharden-sls=all) 2461 + 2462 + config CC_HAS_RETURN_THUNK 2463 + def_bool $(cc-option,-mfunction-return=thunk-extern) 2464 + 2465 + menuconfig SPECULATION_MITIGATIONS 2466 + bool "Mitigations for speculative execution vulnerabilities" 2467 + default y 2468 + help 2469 + Say Y here to enable options which enable mitigations for 2470 + speculative execution hardware vulnerabilities. 2471 + 2472 + If you say N, all mitigations will be disabled. You really 2473 + should know what you are doing to say so. 2474 + 2475 + if SPECULATION_MITIGATIONS 2476 + 2477 + config PAGE_TABLE_ISOLATION 2478 + bool "Remove the kernel mapping in user mode" 2479 + default y 2480 + depends on (X86_64 || X86_PAE) 2481 + help 2482 + This feature reduces the number of hardware side channels by 2483 + ensuring that the majority of kernel addresses are not mapped 2484 + into userspace. 2485 + 2486 + See Documentation/x86/pti.rst for more details. 2487 + 2488 + config RETPOLINE 2489 + bool "Avoid speculative indirect branches in kernel" 2490 + select OBJTOOL if HAVE_OBJTOOL 2491 + default y 2492 + help 2493 + Compile kernel with the retpoline compiler options to guard against 2494 + kernel-to-user data leaks by avoiding speculative indirect 2495 + branches. Requires a compiler with -mindirect-branch=thunk-extern 2496 + support for full protection. The kernel may run slower. 2497 + 2498 + config RETHUNK 2499 + bool "Enable return-thunks" 2500 + depends on RETPOLINE && CC_HAS_RETURN_THUNK 2501 + select OBJTOOL if HAVE_OBJTOOL 2502 + default y 2503 + help 2504 + Compile the kernel with the return-thunks compiler option to guard 2505 + against kernel-to-user data leaks by avoiding return speculation. 2506 + Requires a compiler with -mfunction-return=thunk-extern 2507 + support for full protection. The kernel may run slower. 2508 + 2509 + config CPU_UNRET_ENTRY 2510 + bool "Enable UNRET on kernel entry" 2511 + depends on CPU_SUP_AMD && RETHUNK 2512 + default y 2513 + help 2514 + Compile the kernel with support for the retbleed=unret mitigation. 2515 + 2516 + config CPU_IBPB_ENTRY 2517 + bool "Enable IBPB on kernel entry" 2518 + depends on CPU_SUP_AMD 2519 + default y 2520 + help 2521 + Compile the kernel with support for the retbleed=ibpb mitigation. 2522 + 2523 + config CPU_IBRS_ENTRY 2524 + bool "Enable IBRS on kernel entry" 2525 + depends on CPU_SUP_INTEL 2526 + default y 2527 + help 2528 + Compile the kernel with support for the spectre_v2=ibrs mitigation. 2529 + This mitigates both spectre_v2 and retbleed at great cost to 2530 + performance. 2531 + 2532 + config SLS 2533 + bool "Mitigate Straight-Line-Speculation" 2534 + depends on CC_HAS_SLS && X86_64 2535 + select OBJTOOL if HAVE_OBJTOOL 2536 + default n 2537 + help 2538 + Compile the kernel with straight-line-speculation options to guard 2539 + against straight line speculation. The kernel image might be slightly 2540 + larger. 2541 + 2542 + endif 2432 2543 2433 2544 config ARCH_HAS_ADD_PAGES 2434 2545 def_bool y
+6 -2
arch/x86/Makefile
··· 15 15 ifdef CONFIG_CC_IS_GCC 16 16 RETPOLINE_CFLAGS := $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register) 17 17 RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch-cs-prefix) 18 - RETPOLINE_CFLAGS += $(call cc-option,-mfunction-return=thunk-extern) 19 18 RETPOLINE_VDSO_CFLAGS := $(call cc-option,-mindirect-branch=thunk-inline -mindirect-branch-register) 20 19 endif 21 20 ifdef CONFIG_CC_IS_CLANG 22 21 RETPOLINE_CFLAGS := -mretpoline-external-thunk 23 22 RETPOLINE_VDSO_CFLAGS := -mretpoline 24 - RETPOLINE_CFLAGS += $(call cc-option,-mfunction-return=thunk-extern) 25 23 endif 24 + 25 + ifdef CONFIG_RETHUNK 26 + RETHUNK_CFLAGS := -mfunction-return=thunk-extern 27 + RETPOLINE_CFLAGS += $(RETHUNK_CFLAGS) 28 + endif 29 + 26 30 export RETPOLINE_CFLAGS 27 31 export RETPOLINE_VDSO_CFLAGS 28 32
+4
arch/x86/entry/calling.h
··· 297 297 * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set. 298 298 */ 299 299 .macro IBRS_ENTER save_reg 300 + #ifdef CONFIG_CPU_IBRS_ENTRY 300 301 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS 301 302 movl $MSR_IA32_SPEC_CTRL, %ecx 302 303 ··· 318 317 shr $32, %rdx 319 318 wrmsr 320 319 .Lend_\@: 320 + #endif 321 321 .endm 322 322 323 323 /* ··· 326 324 * regs. Must be called after the last RET. 327 325 */ 328 326 .macro IBRS_EXIT save_reg 327 + #ifdef CONFIG_CPU_IBRS_ENTRY 329 328 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS 330 329 movl $MSR_IA32_SPEC_CTRL, %ecx 331 330 ··· 341 338 shr $32, %rdx 342 339 wrmsr 343 340 .Lend_\@: 341 + #endif 344 342 .endm 345 343 346 344 /*
+14 -4
arch/x86/include/asm/disabled-features.h
··· 54 54 # define DISABLE_RETPOLINE 0 55 55 #else 56 56 # define DISABLE_RETPOLINE ((1 << (X86_FEATURE_RETPOLINE & 31)) | \ 57 - (1 << (X86_FEATURE_RETPOLINE_LFENCE & 31)) | \ 58 - (1 << (X86_FEATURE_RETHUNK & 31)) | \ 59 - (1 << (X86_FEATURE_UNRET & 31))) 57 + (1 << (X86_FEATURE_RETPOLINE_LFENCE & 31))) 58 + #endif 59 + 60 + #ifdef CONFIG_RETHUNK 61 + # define DISABLE_RETHUNK 0 62 + #else 63 + # define DISABLE_RETHUNK (1 << (X86_FEATURE_RETHUNK & 31)) 64 + #endif 65 + 66 + #ifdef CONFIG_CPU_UNRET_ENTRY 67 + # define DISABLE_UNRET 0 68 + #else 69 + # define DISABLE_UNRET (1 << (X86_FEATURE_UNRET & 31)) 60 70 #endif 61 71 62 72 #ifdef CONFIG_INTEL_IOMMU_SVM ··· 101 91 #define DISABLED_MASK8 (DISABLE_TDX_GUEST) 102 92 #define DISABLED_MASK9 (DISABLE_SGX) 103 93 #define DISABLED_MASK10 0 104 - #define DISABLED_MASK11 (DISABLE_RETPOLINE) 94 + #define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET) 105 95 #define DISABLED_MASK12 0 106 96 #define DISABLED_MASK13 0 107 97 #define DISABLED_MASK14 0
+2 -2
arch/x86/include/asm/linkage.h
··· 19 19 #define __ALIGN_STR __stringify(__ALIGN) 20 20 #endif 21 21 22 - #if defined(CONFIG_RETPOLINE) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) 22 + #if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) 23 23 #define RET jmp __x86_return_thunk 24 24 #else /* CONFIG_RETPOLINE */ 25 25 #ifdef CONFIG_SLS ··· 31 31 32 32 #else /* __ASSEMBLY__ */ 33 33 34 - #if defined(CONFIG_RETPOLINE) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) 34 + #if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) 35 35 #define ASM_RET "jmp __x86_return_thunk\n\t" 36 36 #else /* CONFIG_RETPOLINE */ 37 37 #ifdef CONFIG_SLS
+8 -2
arch/x86/include/asm/nospec-branch.h
··· 127 127 .Lskip_rsb_\@: 128 128 .endm 129 129 130 + #ifdef CONFIG_CPU_UNRET_ENTRY 131 + #define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret" 132 + #else 133 + #define CALL_ZEN_UNTRAIN_RET "" 134 + #endif 135 + 130 136 /* 131 137 * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the 132 138 * return thunk isn't mapped into the userspace tables (then again, AMD ··· 145 139 * where we have a stack but before any RET instruction. 146 140 */ 147 141 .macro UNTRAIN_RET 148 - #ifdef CONFIG_RETPOLINE 142 + #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) 149 143 ANNOTATE_UNRET_END 150 144 ALTERNATIVE_2 "", \ 151 - "call zen_untrain_ret", X86_FEATURE_UNRET, \ 145 + CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ 152 146 "call entry_ibpb", X86_FEATURE_ENTRY_IBPB 153 147 #endif 154 148 .endm
+1 -1
arch/x86/include/asm/static_call.h
··· 46 46 #define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \ 47 47 __ARCH_DEFINE_STATIC_CALL_TRAMP(name, ".byte 0xe9; .long " #func " - (. + 4)") 48 48 49 - #ifdef CONFIG_RETPOLINE 49 + #ifdef CONFIG_RETHUNK 50 50 #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \ 51 51 __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "jmp __x86_return_thunk") 52 52 #else
+5
arch/x86/kernel/alternative.c
··· 508 508 } 509 509 } 510 510 511 + #ifdef CONFIG_RETHUNK 511 512 /* 512 513 * Rewrite the compiler generated return thunk tail-calls. 513 514 * ··· 570 569 } 571 570 } 572 571 } 572 + #else 573 + void __init_or_module noinline apply_returns(s32 *start, s32 *end) { } 574 + #endif /* CONFIG_RETHUNK */ 575 + 573 576 #else /* !CONFIG_RETPOLINE || !CONFIG_OBJTOOL */ 574 577 575 578 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { }
+2
arch/x86/kernel/cpu/amd.c
··· 864 864 865 865 void init_spectral_chicken(struct cpuinfo_x86 *c) 866 866 { 867 + #ifdef CONFIG_CPU_UNRET_ENTRY 867 868 u64 value; 868 869 869 870 /* ··· 881 880 wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value); 882 881 } 883 882 } 883 + #endif 884 884 } 885 885 886 886 static void init_amd_zn(struct cpuinfo_x86 *c)
+27 -15
arch/x86/kernel/cpu/bugs.c
··· 842 842 early_param("retbleed", retbleed_parse_cmdline); 843 843 844 844 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" 845 - #define RETBLEED_COMPILER_MSG "WARNING: kernel not compiled with RETPOLINE or -mfunction-return capable compiler; falling back to IBPB!\n" 846 845 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" 847 846 848 847 static void __init retbleed_select_mitigation(void) ··· 856 857 return; 857 858 858 859 case RETBLEED_CMD_UNRET: 859 - retbleed_mitigation = RETBLEED_MITIGATION_UNRET; 860 + if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) { 861 + retbleed_mitigation = RETBLEED_MITIGATION_UNRET; 862 + } else { 863 + pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n"); 864 + goto do_cmd_auto; 865 + } 860 866 break; 861 867 862 868 case RETBLEED_CMD_IBPB: 863 - retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 869 + if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) { 870 + retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 871 + } else { 872 + pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); 873 + goto do_cmd_auto; 874 + } 864 875 break; 865 876 877 + do_cmd_auto: 866 878 case RETBLEED_CMD_AUTO: 867 879 default: 868 880 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 869 - boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) 870 - retbleed_mitigation = RETBLEED_MITIGATION_UNRET; 881 + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { 882 + if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) 883 + retbleed_mitigation = RETBLEED_MITIGATION_UNRET; 884 + else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) 885 + retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 886 + } 871 887 872 888 /* 873 889 * The Intel mitigation (IBRS or eIBRS) was already selected in ··· 895 881 896 882 switch (retbleed_mitigation) { 897 883 case RETBLEED_MITIGATION_UNRET: 898 - 899 - if (!IS_ENABLED(CONFIG_RETPOLINE) || 900 - !IS_ENABLED(CONFIG_CC_HAS_RETURN_THUNK)) { 901 - pr_err(RETBLEED_COMPILER_MSG); 902 - retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 903 - goto retbleed_force_ibpb; 904 - } 905 - 906 884 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 907 885 setup_force_cpu_cap(X86_FEATURE_UNRET); 908 886 ··· 906 900 break; 907 901 908 902 case RETBLEED_MITIGATION_IBPB: 909 - retbleed_force_ibpb: 910 903 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); 911 904 mitigate_smt = true; 912 905 break; ··· 1276 1271 return SPECTRE_V2_CMD_AUTO; 1277 1272 } 1278 1273 1274 + if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY)) { 1275 + pr_err("%s selected but not compiled in. Switching to AUTO select\n", 1276 + mitigation_options[i].option); 1277 + return SPECTRE_V2_CMD_AUTO; 1278 + } 1279 + 1279 1280 if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { 1280 1281 pr_err("%s selected but not Intel CPU. Switching to AUTO select\n", 1281 1282 mitigation_options[i].option); ··· 1339 1328 break; 1340 1329 } 1341 1330 1342 - if (boot_cpu_has_bug(X86_BUG_RETBLEED) && 1331 + if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) && 1332 + boot_cpu_has_bug(X86_BUG_RETBLEED) && 1343 1333 retbleed_cmd != RETBLEED_CMD_OFF && 1344 1334 boot_cpu_has(X86_FEATURE_IBRS) && 1345 1335 boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
+1 -1
arch/x86/kernel/static_call.c
··· 126 126 } 127 127 EXPORT_SYMBOL_GPL(arch_static_call_transform); 128 128 129 - #ifdef CONFIG_RETPOLINE 129 + #ifdef CONFIG_RETHUNK 130 130 /* 131 131 * This is called by apply_returns() to fix up static call trampolines, 132 132 * specifically ARCH_DEFINE_STATIC_CALL_NULL_TRAMP which is recorded as
+2 -2
arch/x86/kvm/emulate.c
··· 439 439 * 440 440 * ENDBR [4 bytes; CONFIG_X86_KERNEL_IBT] 441 441 * SETcc %al [3 bytes] 442 - * RET | JMP __x86_return_thunk [1,5 bytes; CONFIG_RETPOLINE] 442 + * RET | JMP __x86_return_thunk [1,5 bytes; CONFIG_RETHUNK] 443 443 * INT3 [1 byte; CONFIG_SLS] 444 444 */ 445 - #define RET_LENGTH (1 + (4 * IS_ENABLED(CONFIG_RETPOLINE)) + \ 445 + #define RET_LENGTH (1 + (4 * IS_ENABLED(CONFIG_RETHUNK)) + \ 446 446 IS_ENABLED(CONFIG_SLS)) 447 447 #define SETCC_LENGTH (ENDBR_INSN_SIZE + 3 + RET_LENGTH) 448 448 #define SETCC_ALIGN (4 << ((SETCC_LENGTH > 4) & 1) << ((SETCC_LENGTH > 8) & 1))
+4
arch/x86/lib/retpoline.S
··· 72 72 * This function name is magical and is used by -mfunction-return=thunk-extern 73 73 * for the compiler to generate JMPs to it. 74 74 */ 75 + #ifdef CONFIG_RETHUNK 76 + 75 77 .section .text.__x86.return_thunk 76 78 77 79 /* ··· 138 136 __EXPORT_THUNK(zen_untrain_ret) 139 137 140 138 EXPORT_SYMBOL(__x86_return_thunk) 139 + 140 + #endif /* CONFIG_RETHUNK */
+1
scripts/Makefile.lib
··· 236 236 $(if $(CONFIG_FTRACE_MCOUNT_USE_OBJTOOL), --mcount) \ 237 237 $(if $(CONFIG_UNWINDER_ORC), --orc) \ 238 238 $(if $(CONFIG_RETPOLINE), --retpoline) \ 239 + $(if $(CONFIG_RETHUNK), --rethunk) \ 239 240 $(if $(CONFIG_SLS), --sls) \ 240 241 $(if $(CONFIG_STACK_VALIDATION), --stackval) \ 241 242 $(if $(CONFIG_HAVE_STATIC_CALL_INLINE), --static-call) \
+1 -1
scripts/Makefile.vmlinux_o
··· 44 44 45 45 objtool_args := \ 46 46 $(if $(delay-objtool),$(objtool_args)) \ 47 - $(if $(CONFIG_NOINSTR_VALIDATION), --noinstr $(if $(CONFIG_RETPOLINE), --unret)) \ 47 + $(if $(CONFIG_NOINSTR_VALIDATION), --noinstr $(if $(CONFIG_CPU_UNRET_ENTRY), --unret)) \ 48 48 $(if $(CONFIG_GCOV_KERNEL), --no-unreachable) \ 49 49 --link 50 50
-11
security/Kconfig
··· 54 54 implement socket and networking access controls. 55 55 If you are unsure how to answer this question, answer N. 56 56 57 - config PAGE_TABLE_ISOLATION 58 - bool "Remove the kernel mapping in user mode" 59 - default y 60 - depends on (X86_64 || X86_PAE) && !UML 61 - help 62 - This feature reduces the number of hardware side channels by 63 - ensuring that the majority of kernel addresses are not mapped 64 - into userspace. 65 - 66 - See Documentation/x86/pti.rst for more details. 67 - 68 57 config SECURITY_INFINIBAND 69 58 bool "Infiniband Security Hooks" 70 59 depends on SECURITY && INFINIBAND
+7
tools/objtool/builtin-check.c
··· 68 68 OPT_BOOLEAN('n', "noinstr", &opts.noinstr, "validate noinstr rules"), 69 69 OPT_BOOLEAN('o', "orc", &opts.orc, "generate ORC metadata"), 70 70 OPT_BOOLEAN('r', "retpoline", &opts.retpoline, "validate and annotate retpoline usage"), 71 + OPT_BOOLEAN(0, "rethunk", &opts.rethunk, "validate and annotate rethunk usage"), 71 72 OPT_BOOLEAN(0, "unret", &opts.unret, "validate entry unret placement"), 72 73 OPT_BOOLEAN('l', "sls", &opts.sls, "validate straight-line-speculation mitigations"), 73 74 OPT_BOOLEAN('s', "stackval", &opts.stackval, "validate frame pointer rules"), ··· 125 124 opts.noinstr || 126 125 opts.orc || 127 126 opts.retpoline || 127 + opts.rethunk || 128 128 opts.sls || 129 129 opts.stackval || 130 130 opts.static_call || ··· 136 134 } 137 135 138 136 return true; 137 + } 138 + 139 + if (opts.unret && !opts.rethunk) { 140 + ERROR("--unret requires --rethunk"); 141 + return false; 139 142 } 140 143 141 144 if (opts.dump_orc)
+7 -2
tools/objtool/check.c
··· 3732 3732 continue; 3733 3733 3734 3734 if (insn->type == INSN_RETURN) { 3735 - WARN_FUNC("'naked' return found in RETPOLINE build", 3736 - insn->sec, insn->offset); 3735 + if (opts.rethunk) { 3736 + WARN_FUNC("'naked' return found in RETHUNK build", 3737 + insn->sec, insn->offset); 3738 + } else 3739 + continue; 3737 3740 } else { 3738 3741 WARN_FUNC("indirect %s found in RETPOLINE build", 3739 3742 insn->sec, insn->offset, ··· 4267 4264 if (ret < 0) 4268 4265 goto out; 4269 4266 warnings += ret; 4267 + } 4270 4268 4269 + if (opts.rethunk) { 4271 4270 ret = create_return_sites_sections(file); 4272 4271 if (ret < 0) 4273 4272 goto out;
+1
tools/objtool/include/objtool/builtin.h
··· 19 19 bool noinstr; 20 20 bool orc; 21 21 bool retpoline; 22 + bool rethunk; 22 23 bool unret; 23 24 bool sls; 24 25 bool stackval;