Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/bugs: Rename CONFIG_RETPOLINE => CONFIG_MITIGATION_RETPOLINE

Step 5/10 of the namespace unification of CPU mitigations related Kconfig options.

[ mingo: Converted a few more uses in comments/messages as well. ]

Suggested-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Breno Leitao <leitao@debian.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Ariel Miculas <amiculas@cisco.com>
Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: https://lore.kernel.org/r/20231121160740.1249350-6-leitao@debian.org

authored by

Breno Leitao and committed by
Ingo Molnar
aefb2f2e ea4654e0

+62 -62
+4 -4
Documentation/admin-guide/hw-vuln/spectre.rst
··· 473 473 -mindirect-branch=thunk-extern -mindirect-branch-register options. 474 474 If the kernel is compiled with a Clang compiler, the compiler needs 475 475 to support -mretpoline-external-thunk option. The kernel config 476 - CONFIG_RETPOLINE needs to be turned on, and the CPU needs to run with 477 - the latest updated microcode. 476 + CONFIG_MITIGATION_RETPOLINE needs to be turned on, and the CPU needs 477 + to run with the latest updated microcode. 478 478 479 479 On Intel Skylake-era systems the mitigation covers most, but not all, 480 480 cases. See :ref:`[3] <spec_ref3>` for more details. ··· 609 609 Selecting 'on' will, and 'auto' may, choose a 610 610 mitigation method at run time according to the 611 611 CPU, the available microcode, the setting of the 612 - CONFIG_RETPOLINE configuration option, and the 613 - compiler with which the kernel was built. 612 + CONFIG_MITIGATION_RETPOLINE configuration option, 613 + and the compiler with which the kernel was built. 614 614 615 615 Selecting 'on' will also enable the mitigation 616 616 against user space to user space task attacks.
+2 -2
Documentation/admin-guide/kernel-parameters.txt
··· 6007 6007 Selecting 'on' will, and 'auto' may, choose a 6008 6008 mitigation method at run time according to the 6009 6009 CPU, the available microcode, the setting of the 6010 - CONFIG_RETPOLINE configuration option, and the 6011 - compiler with which the kernel was built. 6010 + CONFIG_MITIGATION_RETPOLINE configuration option, 6011 + and the compiler with which the kernel was built. 6012 6012 6013 6013 Selecting 'on' will also enable the mitigation 6014 6014 against user space to user space task attacks.
+3 -3
arch/x86/Kconfig
··· 2457 2457 2458 2458 config FINEIBT 2459 2459 def_bool y 2460 - depends on X86_KERNEL_IBT && CFI_CLANG && RETPOLINE 2460 + depends on X86_KERNEL_IBT && CFI_CLANG && MITIGATION_RETPOLINE 2461 2461 select CALL_PADDING 2462 2462 2463 2463 config HAVE_CALL_THUNKS ··· 2495 2495 2496 2496 See Documentation/arch/x86/pti.rst for more details. 2497 2497 2498 - config RETPOLINE 2498 + config MITIGATION_RETPOLINE 2499 2499 bool "Avoid speculative indirect branches in kernel" 2500 2500 select OBJTOOL if HAVE_OBJTOOL 2501 2501 default y ··· 2507 2507 2508 2508 config RETHUNK 2509 2509 bool "Enable return-thunks" 2510 - depends on RETPOLINE && CC_HAS_RETURN_THUNK 2510 + depends on MITIGATION_RETPOLINE && CC_HAS_RETURN_THUNK 2511 2511 select OBJTOOL if HAVE_OBJTOOL 2512 2512 default y if X86_64 2513 2513 help
+2 -2
arch/x86/Makefile
··· 192 192 KBUILD_CFLAGS += -fno-asynchronous-unwind-tables 193 193 194 194 # Avoid indirect branches in kernel to deal with Spectre 195 - ifdef CONFIG_RETPOLINE 195 + ifdef CONFIG_MITIGATION_RETPOLINE 196 196 KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) 197 197 # Additionally, avoid generating expensive indirect jumps which 198 198 # are subject to retpolines for small number of switch cases. ··· 301 301 302 302 archprepare: checkbin 303 303 checkbin: 304 - ifdef CONFIG_RETPOLINE 304 + ifdef CONFIG_MITIGATION_RETPOLINE 305 305 ifeq ($(RETPOLINE_CFLAGS),) 306 306 @echo "You are building kernel with non-retpoline compiler." >&2 307 307 @echo "Please update your compiler." >&2
+2 -2
arch/x86/entry/vdso/Makefile
··· 87 87 -fno-omit-frame-pointer -foptimize-sibling-calls \ 88 88 -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO 89 89 90 - ifdef CONFIG_RETPOLINE 90 + ifdef CONFIG_MITIGATION_RETPOLINE 91 91 ifneq ($(RETPOLINE_VDSO_CFLAGS),) 92 92 CFL += $(RETPOLINE_VDSO_CFLAGS) 93 93 endif ··· 164 164 KBUILD_CFLAGS_32 += -fno-omit-frame-pointer 165 165 KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING 166 166 167 - ifdef CONFIG_RETPOLINE 167 + ifdef CONFIG_MITIGATION_RETPOLINE 168 168 ifneq ($(RETPOLINE_VDSO_CFLAGS),) 169 169 KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS) 170 170 endif
+1 -1
arch/x86/include/asm/disabled-features.h
··· 50 50 # define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31)) 51 51 #endif 52 52 53 - #ifdef CONFIG_RETPOLINE 53 + #ifdef CONFIG_MITIGATION_RETPOLINE 54 54 # define DISABLE_RETPOLINE 0 55 55 #else 56 56 # define DISABLE_RETPOLINE ((1 << (X86_FEATURE_RETPOLINE & 31)) | \
+4 -4
arch/x86/include/asm/linkage.h
··· 42 42 43 43 #if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) 44 44 #define RET jmp __x86_return_thunk 45 - #else /* CONFIG_RETPOLINE */ 45 + #else /* CONFIG_MITIGATION_RETPOLINE */ 46 46 #ifdef CONFIG_SLS 47 47 #define RET ret; int3 48 48 #else 49 49 #define RET ret 50 50 #endif 51 - #endif /* CONFIG_RETPOLINE */ 51 + #endif /* CONFIG_MITIGATION_RETPOLINE */ 52 52 53 53 #else /* __ASSEMBLY__ */ 54 54 55 55 #if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) 56 56 #define ASM_RET "jmp __x86_return_thunk\n\t" 57 - #else /* CONFIG_RETPOLINE */ 57 + #else /* CONFIG_MITIGATION_RETPOLINE */ 58 58 #ifdef CONFIG_SLS 59 59 #define ASM_RET "ret; int3\n\t" 60 60 #else 61 61 #define ASM_RET "ret\n\t" 62 62 #endif 63 - #endif /* CONFIG_RETPOLINE */ 63 + #endif /* CONFIG_MITIGATION_RETPOLINE */ 64 64 65 65 #endif /* __ASSEMBLY__ */ 66 66
+4 -4
arch/x86/include/asm/nospec-branch.h
··· 241 241 * instruction irrespective of kCFI. 242 242 */ 243 243 .macro JMP_NOSPEC reg:req 244 - #ifdef CONFIG_RETPOLINE 244 + #ifdef CONFIG_MITIGATION_RETPOLINE 245 245 __CS_PREFIX \reg 246 246 jmp __x86_indirect_thunk_\reg 247 247 #else ··· 251 251 .endm 252 252 253 253 .macro CALL_NOSPEC reg:req 254 - #ifdef CONFIG_RETPOLINE 254 + #ifdef CONFIG_MITIGATION_RETPOLINE 255 255 __CS_PREFIX \reg 256 256 call __x86_indirect_thunk_\reg 257 257 #else ··· 378 378 379 379 #endif /* CONFIG_MITIGATION_CALL_DEPTH_TRACKING */ 380 380 381 - #ifdef CONFIG_RETPOLINE 381 + #ifdef CONFIG_MITIGATION_RETPOLINE 382 382 383 383 #define GEN(reg) \ 384 384 extern retpoline_thunk_t __x86_indirect_thunk_ ## reg; ··· 399 399 400 400 /* 401 401 * Inline asm uses the %V modifier which is only in newer GCC 402 - * which is ensured when CONFIG_RETPOLINE is defined. 402 + * which is ensured when CONFIG_MITIGATION_RETPOLINE is defined. 403 403 */ 404 404 # define CALL_NOSPEC \ 405 405 ALTERNATIVE_2( \
+3 -3
arch/x86/kernel/alternative.c
··· 544 544 return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80; 545 545 } 546 546 547 - #if defined(CONFIG_RETPOLINE) && defined(CONFIG_OBJTOOL) 547 + #if defined(CONFIG_MITIGATION_RETPOLINE) && defined(CONFIG_OBJTOOL) 548 548 549 549 /* 550 550 * CALL/JMP *%\reg ··· 844 844 void __init_or_module noinline apply_returns(s32 *start, s32 *end) { } 845 845 #endif /* CONFIG_RETHUNK */ 846 846 847 - #else /* !CONFIG_RETPOLINE || !CONFIG_OBJTOOL */ 847 + #else /* !CONFIG_MITIGATION_RETPOLINE || !CONFIG_OBJTOOL */ 848 848 849 849 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { } 850 850 void __init_or_module noinline apply_returns(s32 *start, s32 *end) { } 851 851 852 - #endif /* CONFIG_RETPOLINE && CONFIG_OBJTOOL */ 852 + #endif /* CONFIG_MITIGATION_RETPOLINE && CONFIG_OBJTOOL */ 853 853 854 854 #ifdef CONFIG_X86_KERNEL_IBT 855 855
+3 -3
arch/x86/kernel/cpu/bugs.c
··· 1103 1103 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = 1104 1104 SPECTRE_V2_USER_NONE; 1105 1105 1106 - #ifdef CONFIG_RETPOLINE 1106 + #ifdef CONFIG_MITIGATION_RETPOLINE 1107 1107 static bool spectre_v2_bad_module; 1108 1108 1109 1109 bool retpoline_module_ok(bool has_retpoline) ··· 1416 1416 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC || 1417 1417 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || 1418 1418 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && 1419 - !IS_ENABLED(CONFIG_RETPOLINE)) { 1419 + !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) { 1420 1420 pr_err("%s selected but not compiled in. Switching to AUTO select\n", 1421 1421 mitigation_options[i].option); 1422 1422 return SPECTRE_V2_CMD_AUTO; ··· 1470 1470 1471 1471 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void) 1472 1472 { 1473 - if (!IS_ENABLED(CONFIG_RETPOLINE)) { 1473 + if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) { 1474 1474 pr_err("Kernel not compiled with retpoline; no mitigation available!"); 1475 1475 return SPECTRE_V2_NONE; 1476 1476 }
+1 -1
arch/x86/kernel/ftrace.c
··· 307 307 } __attribute__((packed)); 308 308 }; 309 309 310 - #define RET_SIZE (IS_ENABLED(CONFIG_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_SLS)) 310 + #define RET_SIZE (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_SLS)) 311 311 312 312 static unsigned long 313 313 create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
+1 -1
arch/x86/kernel/kprobes/opt.c
··· 324 324 * However, the kernel built with retpolines or IBT has jump 325 325 * tables disabled so the check can be skipped altogether. 326 326 */ 327 - if (!IS_ENABLED(CONFIG_RETPOLINE) && 327 + if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && 328 328 !IS_ENABLED(CONFIG_X86_KERNEL_IBT) && 329 329 insn_is_indirect_jump(&insn)) 330 330 return 0;
+2 -2
arch/x86/kernel/vmlinux.lds.S
··· 132 132 LOCK_TEXT 133 133 KPROBES_TEXT 134 134 SOFTIRQENTRY_TEXT 135 - #ifdef CONFIG_RETPOLINE 135 + #ifdef CONFIG_MITIGATION_RETPOLINE 136 136 *(.text..__x86.indirect_thunk) 137 137 *(.text..__x86.return_thunk) 138 138 #endif ··· 267 267 } 268 268 #endif 269 269 270 - #ifdef CONFIG_RETPOLINE 270 + #ifdef CONFIG_MITIGATION_RETPOLINE 271 271 /* 272 272 * List of instructions that call/jmp/jcc to retpoline thunks 273 273 * __x86_indirect_thunk_*(). These instructions can be patched along
+1 -1
arch/x86/kvm/mmu/mmu.c
··· 263 263 static inline unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu, 264 264 struct kvm_mmu *mmu) 265 265 { 266 - if (IS_ENABLED(CONFIG_RETPOLINE) && mmu->get_guest_pgd == get_guest_cr3) 266 + if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && mmu->get_guest_pgd == get_guest_cr3) 267 267 return kvm_read_cr3(vcpu); 268 268 269 269 return mmu->get_guest_pgd(vcpu);
+1 -1
arch/x86/kvm/mmu/mmu_internal.h
··· 312 312 if (!prefetch) 313 313 vcpu->stat.pf_taken++; 314 314 315 - if (IS_ENABLED(CONFIG_RETPOLINE) && fault.is_tdp) 315 + if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && fault.is_tdp) 316 316 r = kvm_tdp_page_fault(vcpu, &fault); 317 317 else 318 318 r = vcpu->arch.mmu->page_fault(vcpu, &fault);
+1 -1
arch/x86/kvm/svm/svm.c
··· 3455 3455 if (!svm_check_exit_valid(exit_code)) 3456 3456 return svm_handle_invalid_exit(vcpu, exit_code); 3457 3457 3458 - #ifdef CONFIG_RETPOLINE 3458 + #ifdef CONFIG_MITIGATION_RETPOLINE 3459 3459 if (exit_code == SVM_EXIT_MSR) 3460 3460 return msr_interception(vcpu); 3461 3461 else if (exit_code == SVM_EXIT_VINTR)
+2 -2
arch/x86/kvm/svm/vmenter.S
··· 207 207 7: vmload %_ASM_AX 208 208 8: 209 209 210 - #ifdef CONFIG_RETPOLINE 210 + #ifdef CONFIG_MITIGATION_RETPOLINE 211 211 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ 212 212 FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE 213 213 #endif ··· 344 344 /* Pop @svm to RDI, guest registers have been saved already. */ 345 345 pop %_ASM_DI 346 346 347 - #ifdef CONFIG_RETPOLINE 347 + #ifdef CONFIG_MITIGATION_RETPOLINE 348 348 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ 349 349 FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE 350 350 #endif
+1 -1
arch/x86/kvm/vmx/vmx.c
··· 6544 6544 6545 6545 if (exit_reason.basic >= kvm_vmx_max_exit_handlers) 6546 6546 goto unexpected_vmexit; 6547 - #ifdef CONFIG_RETPOLINE 6547 + #ifdef CONFIG_MITIGATION_RETPOLINE 6548 6548 if (exit_reason.basic == EXIT_REASON_MSR_WRITE) 6549 6549 return kvm_emulate_wrmsr(vcpu); 6550 6550 else if (exit_reason.basic == EXIT_REASON_PREEMPTION_TIMER)
+1 -1
arch/x86/lib/Makefile
··· 49 49 lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o 50 50 lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o 51 51 lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o 52 - lib-$(CONFIG_RETPOLINE) += retpoline.o 52 + lib-$(CONFIG_MITIGATION_RETPOLINE) += retpoline.o 53 53 54 54 obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o 55 55 obj-y += iomem.o
+1 -1
arch/x86/net/bpf_jit_comp.c
··· 469 469 emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip); 470 470 } else { 471 471 EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */ 472 - if (IS_ENABLED(CONFIG_RETPOLINE) || IS_ENABLED(CONFIG_SLS)) 472 + if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || IS_ENABLED(CONFIG_SLS)) 473 473 EMIT1(0xCC); /* int3 */ 474 474 } 475 475
+1 -1
arch/x86/net/bpf_jit_comp32.c
··· 1273 1273 u8 *prog = *pprog; 1274 1274 int cnt = 0; 1275 1275 1276 - #ifdef CONFIG_RETPOLINE 1276 + #ifdef CONFIG_MITIGATION_RETPOLINE 1277 1277 EMIT1_off32(0xE9, (u8 *)__x86_indirect_thunk_edx - (ip + 5)); 1278 1278 #else 1279 1279 EMIT2(0xFF, 0xE2);
+1 -1
arch/x86/purgatory/Makefile
··· 61 61 PURGATORY_CFLAGS_REMOVE += -fstack-protector-strong 62 62 endif 63 63 64 - ifdef CONFIG_RETPOLINE 64 + ifdef CONFIG_MITIGATION_RETPOLINE 65 65 PURGATORY_CFLAGS_REMOVE += $(RETPOLINE_CFLAGS) 66 66 endif 67 67
+1 -1
include/linux/compiler-gcc.h
··· 35 35 (typeof(ptr)) (__ptr + (off)); \ 36 36 }) 37 37 38 - #ifdef CONFIG_RETPOLINE 38 + #ifdef CONFIG_MITIGATION_RETPOLINE 39 39 #define __noretpoline __attribute__((__indirect_branch__("keep"))) 40 40 #endif 41 41
+1 -1
include/linux/indirect_call_wrapper.h
··· 2 2 #ifndef _LINUX_INDIRECT_CALL_WRAPPER_H 3 3 #define _LINUX_INDIRECT_CALL_WRAPPER_H 4 4 5 - #ifdef CONFIG_RETPOLINE 5 + #ifdef CONFIG_MITIGATION_RETPOLINE 6 6 7 7 /* 8 8 * INDIRECT_CALL_$NR - wrapper for indirect calls with $NR known builtin
+1 -1
include/linux/module.h
··· 885 885 static inline void module_bug_cleanup(struct module *mod) {} 886 886 #endif /* CONFIG_GENERIC_BUG */ 887 887 888 - #ifdef CONFIG_RETPOLINE 888 + #ifdef CONFIG_MITIGATION_RETPOLINE 889 889 extern bool retpoline_module_ok(bool has_retpoline); 890 890 #else 891 891 static inline bool retpoline_module_ok(bool has_retpoline)
+1 -1
include/net/netfilter/nf_tables_core.h
··· 93 93 extern const struct nft_set_type nft_set_pipapo_type; 94 94 extern const struct nft_set_type nft_set_pipapo_avx2_type; 95 95 96 - #ifdef CONFIG_RETPOLINE 96 + #ifdef CONFIG_MITIGATION_RETPOLINE 97 97 bool nft_rhash_lookup(const struct net *net, const struct nft_set *set, 98 98 const u32 *key, const struct nft_set_ext **ext); 99 99 bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+1 -1
include/net/tc_wrapper.h
··· 4 4 5 5 #include <net/pkt_cls.h> 6 6 7 - #if IS_ENABLED(CONFIG_RETPOLINE) 7 + #if IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) 8 8 9 9 #include <linux/cpufeature.h> 10 10 #include <linux/static_key.h>
+1 -1
kernel/trace/ring_buffer.c
··· 1156 1156 u64 ts; 1157 1157 1158 1158 /* Skip retpolines :-( */ 1159 - if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local)) 1159 + if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && likely(buffer->clock == trace_clock_local)) 1160 1160 ts = trace_clock_local(); 1161 1161 else 1162 1162 ts = buffer->clock();
+1 -1
net/netfilter/Makefile
··· 101 101 endif 102 102 103 103 ifdef CONFIG_NFT_CT 104 - ifdef CONFIG_RETPOLINE 104 + ifdef CONFIG_MITIGATION_RETPOLINE 105 105 nf_tables-objs += nft_ct_fast.o 106 106 endif 107 107 endif
+3 -3
net/netfilter/nf_tables_core.c
··· 21 21 #include <net/netfilter/nf_log.h> 22 22 #include <net/netfilter/nft_meta.h> 23 23 24 - #if defined(CONFIG_RETPOLINE) && defined(CONFIG_X86) 24 + #if defined(CONFIG_MITIGATION_RETPOLINE) && defined(CONFIG_X86) 25 25 26 26 static struct static_key_false nf_tables_skip_direct_calls; 27 27 ··· 207 207 struct nft_regs *regs, 208 208 struct nft_pktinfo *pkt) 209 209 { 210 - #ifdef CONFIG_RETPOLINE 210 + #ifdef CONFIG_MITIGATION_RETPOLINE 211 211 unsigned long e; 212 212 213 213 if (nf_skip_indirect_calls()) ··· 236 236 X(e, nft_objref_map_eval); 237 237 #undef X 238 238 indirect_call: 239 - #endif /* CONFIG_RETPOLINE */ 239 + #endif /* CONFIG_MITIGATION_RETPOLINE */ 240 240 expr->ops->eval(expr, regs, pkt); 241 241 } 242 242
+2 -2
net/netfilter/nft_ct.c
··· 751 751 return false; 752 752 } 753 753 754 - #ifdef CONFIG_RETPOLINE 754 + #ifdef CONFIG_MITIGATION_RETPOLINE 755 755 static const struct nft_expr_ops nft_ct_get_fast_ops = { 756 756 .type = &nft_ct_type, 757 757 .size = NFT_EXPR_SIZE(sizeof(struct nft_ct)), ··· 796 796 return ERR_PTR(-EINVAL); 797 797 798 798 if (tb[NFTA_CT_DREG]) { 799 - #ifdef CONFIG_RETPOLINE 799 + #ifdef CONFIG_MITIGATION_RETPOLINE 800 800 u32 k = ntohl(nla_get_be32(tb[NFTA_CT_KEY])); 801 801 802 802 switch (k) {
+1 -1
net/netfilter/nft_lookup.c
··· 24 24 struct nft_set_binding binding; 25 25 }; 26 26 27 - #ifdef CONFIG_RETPOLINE 27 + #ifdef CONFIG_MITIGATION_RETPOLINE 28 28 bool nft_set_do_lookup(const struct net *net, const struct nft_set *set, 29 29 const u32 *key, const struct nft_set_ext **ext) 30 30 {
+1 -1
net/sched/sch_api.c
··· 2353 2353 .exit = psched_net_exit, 2354 2354 }; 2355 2355 2356 - #if IS_ENABLED(CONFIG_RETPOLINE) 2356 + #if IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) 2357 2357 DEFINE_STATIC_KEY_FALSE(tc_skip_wrapper); 2358 2358 #endif 2359 2359
+1 -1
scripts/Makefile.lib
··· 262 262 objtool-args-$(CONFIG_HAVE_OBJTOOL_NOP_MCOUNT) += --mnop 263 263 endif 264 264 objtool-args-$(CONFIG_UNWINDER_ORC) += --orc 265 - objtool-args-$(CONFIG_RETPOLINE) += --retpoline 265 + objtool-args-$(CONFIG_MITIGATION_RETPOLINE) += --retpoline 266 266 objtool-args-$(CONFIG_RETHUNK) += --rethunk 267 267 objtool-args-$(CONFIG_SLS) += --sls 268 268 objtool-args-$(CONFIG_STACK_VALIDATION) += --stackval
+1 -1
scripts/generate_rust_target.rs
··· 155 155 "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", 156 156 ); 157 157 let mut features = "-3dnow,-3dnowa,-mmx,+soft-float".to_string(); 158 - if cfg.has("RETPOLINE") { 158 + if cfg.has("MITIGATION_RETPOLINE") { 159 159 features += ",+retpoline-external-thunk"; 160 160 } 161 161 ts.push("features", features);
+1 -1
scripts/mod/modpost.c
··· 1843 1843 1844 1844 buf_printf(b, 1845 1845 "\n" 1846 - "#ifdef CONFIG_RETPOLINE\n" 1846 + "#ifdef CONFIG_MITIGATION_RETPOLINE\n" 1847 1847 "MODULE_INFO(retpoline, \"Y\");\n" 1848 1848 "#endif\n"); 1849 1849
+1 -1
tools/arch/x86/include/asm/disabled-features.h
··· 50 50 # define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31)) 51 51 #endif 52 52 53 - #ifdef CONFIG_RETPOLINE 53 + #ifdef CONFIG_MITIGATION_RETPOLINE 54 54 # define DISABLE_RETPOLINE 0 55 55 #else 56 56 # define DISABLE_RETPOLINE ((1 << (X86_FEATURE_RETPOLINE & 31)) | \
+1 -1
tools/objtool/arch/x86/special.c
··· 83 83 * TODO: Once we have DWARF CFI and smarter instruction decoding logic, 84 84 * ensure the same register is used in the mov and jump instructions. 85 85 * 86 - * NOTE: RETPOLINE made it harder still to decode dynamic jumps. 86 + * NOTE: MITIGATION_RETPOLINE made it harder still to decode dynamic jumps. 87 87 */ 88 88 struct reloc *arch_find_switch_table(struct objtool_file *file, 89 89 struct instruction *insn)
+1 -1
tools/objtool/check.c
··· 3984 3984 } else 3985 3985 continue; 3986 3986 } else { 3987 - WARN_INSN(insn, "indirect %s found in RETPOLINE build", 3987 + WARN_INSN(insn, "indirect %s found in MITIGATION_RETPOLINE build", 3988 3988 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call"); 3989 3989 } 3990 3990