Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef _ASM_X86_NOSPEC_BRANCH_H_
4#define _ASM_X86_NOSPEC_BRANCH_H_
5
6#include <linux/static_key.h>
7#include <linux/objtool.h>
8#include <linux/linkage.h>
9
10#include <asm/alternative.h>
11#include <asm/cpufeatures.h>
12#include <asm/msr-index.h>
13#include <asm/unwind_hints.h>
14#include <asm/percpu.h>
15#include <asm/current.h>
16
17/*
18 * Call depth tracking for Intel SKL CPUs to address the RSB underflow
19 * issue in software.
20 *
21 * The tracking does not use a counter. It uses uses arithmetic shift
22 * right on call entry and logical shift left on return.
23 *
24 * The depth tracking variable is initialized to 0x8000.... when the call
25 * depth is zero. The arithmetic shift right sign extends the MSB and
26 * saturates after the 12th call. The shift count is 5 for both directions
27 * so the tracking covers 12 nested calls.
28 *
29 * Call
30 * 0: 0x8000000000000000 0x0000000000000000
31 * 1: 0xfc00000000000000 0xf000000000000000
32 * ...
33 * 11: 0xfffffffffffffff8 0xfffffffffffffc00
34 * 12: 0xffffffffffffffff 0xffffffffffffffe0
35 *
36 * After a return buffer fill the depth is credited 12 calls before the
37 * next stuffing has to take place.
38 *
39 * There is a inaccuracy for situations like this:
40 *
41 * 10 calls
42 * 5 returns
43 * 3 calls
44 * 4 returns
45 * 3 calls
46 * ....
47 *
48 * The shift count might cause this to be off by one in either direction,
49 * but there is still a cushion vs. the RSB depth. The algorithm does not
50 * claim to be perfect and it can be speculated around by the CPU, but it
51 * is considered that it obfuscates the problem enough to make exploitation
52 * extremly difficult.
53 */
54#define RET_DEPTH_SHIFT 5
55#define RSB_RET_STUFF_LOOPS 16
56#define RET_DEPTH_INIT 0x8000000000000000ULL
57#define RET_DEPTH_INIT_FROM_CALL 0xfc00000000000000ULL
58#define RET_DEPTH_CREDIT 0xffffffffffffffffULL
59
60#ifdef CONFIG_CALL_THUNKS_DEBUG
61# define CALL_THUNKS_DEBUG_INC_CALLS \
62 incq %gs:__x86_call_count;
63# define CALL_THUNKS_DEBUG_INC_RETS \
64 incq %gs:__x86_ret_count;
65# define CALL_THUNKS_DEBUG_INC_STUFFS \
66 incq %gs:__x86_stuffs_count;
67# define CALL_THUNKS_DEBUG_INC_CTXSW \
68 incq %gs:__x86_ctxsw_count;
69#else
70# define CALL_THUNKS_DEBUG_INC_CALLS
71# define CALL_THUNKS_DEBUG_INC_RETS
72# define CALL_THUNKS_DEBUG_INC_STUFFS
73# define CALL_THUNKS_DEBUG_INC_CTXSW
74#endif
75
76#if defined(CONFIG_CALL_DEPTH_TRACKING) && !defined(COMPILE_OFFSETS)
77
78#include <asm/asm-offsets.h>
79
80#define CREDIT_CALL_DEPTH \
81 movq $-1, PER_CPU_VAR(pcpu_hot + X86_call_depth);
82
83#define ASM_CREDIT_CALL_DEPTH \
84 movq $-1, PER_CPU_VAR(pcpu_hot + X86_call_depth);
85
86#define RESET_CALL_DEPTH \
87 mov $0x80, %rax; \
88 shl $56, %rax; \
89 movq %rax, PER_CPU_VAR(pcpu_hot + X86_call_depth);
90
91#define RESET_CALL_DEPTH_FROM_CALL \
92 mov $0xfc, %rax; \
93 shl $56, %rax; \
94 movq %rax, PER_CPU_VAR(pcpu_hot + X86_call_depth); \
95 CALL_THUNKS_DEBUG_INC_CALLS
96
97#define INCREMENT_CALL_DEPTH \
98 sarq $5, %gs:pcpu_hot + X86_call_depth; \
99 CALL_THUNKS_DEBUG_INC_CALLS
100
101#define ASM_INCREMENT_CALL_DEPTH \
102 sarq $5, PER_CPU_VAR(pcpu_hot + X86_call_depth); \
103 CALL_THUNKS_DEBUG_INC_CALLS
104
105#else
106#define CREDIT_CALL_DEPTH
107#define ASM_CREDIT_CALL_DEPTH
108#define RESET_CALL_DEPTH
109#define INCREMENT_CALL_DEPTH
110#define ASM_INCREMENT_CALL_DEPTH
111#define RESET_CALL_DEPTH_FROM_CALL
112#endif
113
114/*
115 * Fill the CPU return stack buffer.
116 *
117 * Each entry in the RSB, if used for a speculative 'ret', contains an
118 * infinite 'pause; lfence; jmp' loop to capture speculative execution.
119 *
120 * This is required in various cases for retpoline and IBRS-based
121 * mitigations for the Spectre variant 2 vulnerability. Sometimes to
122 * eliminate potentially bogus entries from the RSB, and sometimes
123 * purely to ensure that it doesn't get empty, which on some CPUs would
124 * allow predictions from other (unwanted!) sources to be used.
125 *
126 * We define a CPP macro such that it can be used from both .S files and
127 * inline assembly. It's possible to do a .macro and then include that
128 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
129 */
130
131#define RETPOLINE_THUNK_SIZE 32
132#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
133
134/*
135 * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
136 */
137#define __FILL_RETURN_SLOT \
138 ANNOTATE_INTRA_FUNCTION_CALL; \
139 call 772f; \
140 int3; \
141772:
142
143/*
144 * Stuff the entire RSB.
145 *
146 * Google experimented with loop-unrolling and this turned out to be
147 * the optimal version - two calls, each with their own speculation
148 * trap should their return address end up getting used, in a loop.
149 */
150#ifdef CONFIG_X86_64
151#define __FILL_RETURN_BUFFER(reg, nr) \
152 mov $(nr/2), reg; \
153771: \
154 __FILL_RETURN_SLOT \
155 __FILL_RETURN_SLOT \
156 add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \
157 dec reg; \
158 jnz 771b; \
159 /* barrier for jnz misprediction */ \
160 lfence; \
161 ASM_CREDIT_CALL_DEPTH \
162 CALL_THUNKS_DEBUG_INC_CTXSW
163#else
164/*
165 * i386 doesn't unconditionally have LFENCE, as such it can't
166 * do a loop.
167 */
168#define __FILL_RETURN_BUFFER(reg, nr) \
169 .rept nr; \
170 __FILL_RETURN_SLOT; \
171 .endr; \
172 add $(BITS_PER_LONG/8) * nr, %_ASM_SP;
173#endif
174
175/*
176 * Stuff a single RSB slot.
177 *
178 * To mitigate Post-Barrier RSB speculation, one CALL instruction must be
179 * forced to retire before letting a RET instruction execute.
180 *
181 * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
182 * before this point.
183 */
184#define __FILL_ONE_RETURN \
185 __FILL_RETURN_SLOT \
186 add $(BITS_PER_LONG/8), %_ASM_SP; \
187 lfence;
188
189#ifdef __ASSEMBLY__
190
191/*
192 * This should be used immediately before an indirect jump/call. It tells
193 * objtool the subsequent indirect jump/call is vouched safe for retpoline
194 * builds.
195 */
196.macro ANNOTATE_RETPOLINE_SAFE
197 .Lannotate_\@:
198 .pushsection .discard.retpoline_safe
199 _ASM_PTR .Lannotate_\@
200 .popsection
201.endm
202
203/*
204 * (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions
205 * vs RETBleed validation.
206 */
207#define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE
208
209/*
210 * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should
211 * eventually turn into it's own annotation.
212 */
213.macro ANNOTATE_UNRET_END
214#ifdef CONFIG_DEBUG_ENTRY
215 ANNOTATE_RETPOLINE_SAFE
216 nop
217#endif
218.endm
219
220/*
221 * Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call
222 * to the retpoline thunk with a CS prefix when the register requires
223 * a RAX prefix byte to encode. Also see apply_retpolines().
224 */
225.macro __CS_PREFIX reg:req
226 .irp rs,r8,r9,r10,r11,r12,r13,r14,r15
227 .ifc \reg,\rs
228 .byte 0x2e
229 .endif
230 .endr
231.endm
232
233/*
234 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
235 * indirect jmp/call which may be susceptible to the Spectre variant 2
236 * attack.
237 */
238.macro JMP_NOSPEC reg:req
239#ifdef CONFIG_RETPOLINE
240 __CS_PREFIX \reg
241 jmp __x86_indirect_thunk_\reg
242#else
243 jmp *%\reg
244 int3
245#endif
246.endm
247
248.macro CALL_NOSPEC reg:req
249#ifdef CONFIG_RETPOLINE
250 __CS_PREFIX \reg
251 call __x86_indirect_thunk_\reg
252#else
253 call *%\reg
254#endif
255.endm
256
257 /*
258 * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
259 * monstrosity above, manually.
260 */
261.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS)
262 ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \
263 __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
264 __stringify(nop;nop;__FILL_ONE_RETURN), \ftr2
265
266.Lskip_rsb_\@:
267.endm
268
269#ifdef CONFIG_CPU_UNRET_ENTRY
270#define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret"
271#else
272#define CALL_ZEN_UNTRAIN_RET ""
273#endif
274
275/*
276 * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
277 * return thunk isn't mapped into the userspace tables (then again, AMD
278 * typically has NO_MELTDOWN).
279 *
280 * While zen_untrain_ret() doesn't clobber anything but requires stack,
281 * entry_ibpb() will clobber AX, CX, DX.
282 *
283 * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
284 * where we have a stack but before any RET instruction.
285 */
286.macro UNTRAIN_RET
287#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
288 defined(CONFIG_CALL_DEPTH_TRACKING)
289 ANNOTATE_UNRET_END
290 ALTERNATIVE_3 "", \
291 CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \
292 "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
293 __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
294#endif
295.endm
296
297.macro UNTRAIN_RET_FROM_CALL
298#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
299 defined(CONFIG_CALL_DEPTH_TRACKING)
300 ANNOTATE_UNRET_END
301 ALTERNATIVE_3 "", \
302 CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \
303 "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
304 __stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH
305#endif
306.endm
307
308
309.macro CALL_DEPTH_ACCOUNT
310#ifdef CONFIG_CALL_DEPTH_TRACKING
311 ALTERNATIVE "", \
312 __stringify(ASM_INCREMENT_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
313#endif
314.endm
315
316#else /* __ASSEMBLY__ */
317
318#define ANNOTATE_RETPOLINE_SAFE \
319 "999:\n\t" \
320 ".pushsection .discard.retpoline_safe\n\t" \
321 _ASM_PTR " 999b\n\t" \
322 ".popsection\n\t"
323
324typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
325extern retpoline_thunk_t __x86_indirect_thunk_array[];
326extern retpoline_thunk_t __x86_indirect_call_thunk_array[];
327extern retpoline_thunk_t __x86_indirect_jump_thunk_array[];
328
329extern void __x86_return_thunk(void);
330extern void zen_untrain_ret(void);
331extern void entry_ibpb(void);
332
333#ifdef CONFIG_CALL_THUNKS
334extern void (*x86_return_thunk)(void);
335#else
336#define x86_return_thunk (&__x86_return_thunk)
337#endif
338
339#ifdef CONFIG_CALL_DEPTH_TRACKING
340extern void __x86_return_skl(void);
341
342static inline void x86_set_skl_return_thunk(void)
343{
344 x86_return_thunk = &__x86_return_skl;
345}
346
347#define CALL_DEPTH_ACCOUNT \
348 ALTERNATIVE("", \
349 __stringify(INCREMENT_CALL_DEPTH), \
350 X86_FEATURE_CALL_DEPTH)
351
352#ifdef CONFIG_CALL_THUNKS_DEBUG
353DECLARE_PER_CPU(u64, __x86_call_count);
354DECLARE_PER_CPU(u64, __x86_ret_count);
355DECLARE_PER_CPU(u64, __x86_stuffs_count);
356DECLARE_PER_CPU(u64, __x86_ctxsw_count);
357#endif
358#else
359static inline void x86_set_skl_return_thunk(void) {}
360
361#define CALL_DEPTH_ACCOUNT ""
362
363#endif
364
365#ifdef CONFIG_RETPOLINE
366
367#define GEN(reg) \
368 extern retpoline_thunk_t __x86_indirect_thunk_ ## reg;
369#include <asm/GEN-for-each-reg.h>
370#undef GEN
371
372#define GEN(reg) \
373 extern retpoline_thunk_t __x86_indirect_call_thunk_ ## reg;
374#include <asm/GEN-for-each-reg.h>
375#undef GEN
376
377#define GEN(reg) \
378 extern retpoline_thunk_t __x86_indirect_jump_thunk_ ## reg;
379#include <asm/GEN-for-each-reg.h>
380#undef GEN
381
382#ifdef CONFIG_X86_64
383
384/*
385 * Inline asm uses the %V modifier which is only in newer GCC
386 * which is ensured when CONFIG_RETPOLINE is defined.
387 */
388# define CALL_NOSPEC \
389 ALTERNATIVE_2( \
390 ANNOTATE_RETPOLINE_SAFE \
391 "call *%[thunk_target]\n", \
392 "call __x86_indirect_thunk_%V[thunk_target]\n", \
393 X86_FEATURE_RETPOLINE, \
394 "lfence;\n" \
395 ANNOTATE_RETPOLINE_SAFE \
396 "call *%[thunk_target]\n", \
397 X86_FEATURE_RETPOLINE_LFENCE)
398
399# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
400
401#else /* CONFIG_X86_32 */
402/*
403 * For i386 we use the original ret-equivalent retpoline, because
404 * otherwise we'll run out of registers. We don't care about CET
405 * here, anyway.
406 */
407# define CALL_NOSPEC \
408 ALTERNATIVE_2( \
409 ANNOTATE_RETPOLINE_SAFE \
410 "call *%[thunk_target]\n", \
411 " jmp 904f;\n" \
412 " .align 16\n" \
413 "901: call 903f;\n" \
414 "902: pause;\n" \
415 " lfence;\n" \
416 " jmp 902b;\n" \
417 " .align 16\n" \
418 "903: lea 4(%%esp), %%esp;\n" \
419 " pushl %[thunk_target];\n" \
420 " ret;\n" \
421 " .align 16\n" \
422 "904: call 901b;\n", \
423 X86_FEATURE_RETPOLINE, \
424 "lfence;\n" \
425 ANNOTATE_RETPOLINE_SAFE \
426 "call *%[thunk_target]\n", \
427 X86_FEATURE_RETPOLINE_LFENCE)
428
429# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
430#endif
431#else /* No retpoline for C / inline asm */
432# define CALL_NOSPEC "call *%[thunk_target]\n"
433# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
434#endif
435
436/* The Spectre V2 mitigation variants */
437enum spectre_v2_mitigation {
438 SPECTRE_V2_NONE,
439 SPECTRE_V2_RETPOLINE,
440 SPECTRE_V2_LFENCE,
441 SPECTRE_V2_EIBRS,
442 SPECTRE_V2_EIBRS_RETPOLINE,
443 SPECTRE_V2_EIBRS_LFENCE,
444 SPECTRE_V2_IBRS,
445};
446
447/* The indirect branch speculation control variants */
448enum spectre_v2_user_mitigation {
449 SPECTRE_V2_USER_NONE,
450 SPECTRE_V2_USER_STRICT,
451 SPECTRE_V2_USER_STRICT_PREFERRED,
452 SPECTRE_V2_USER_PRCTL,
453 SPECTRE_V2_USER_SECCOMP,
454};
455
456/* The Speculative Store Bypass disable variants */
457enum ssb_mitigation {
458 SPEC_STORE_BYPASS_NONE,
459 SPEC_STORE_BYPASS_DISABLE,
460 SPEC_STORE_BYPASS_PRCTL,
461 SPEC_STORE_BYPASS_SECCOMP,
462};
463
464extern char __indirect_thunk_start[];
465extern char __indirect_thunk_end[];
466
467static __always_inline
468void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
469{
470 asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
471 : : "c" (msr),
472 "a" ((u32)val),
473 "d" ((u32)(val >> 32)),
474 [feature] "i" (feature)
475 : "memory");
476}
477
478static inline void indirect_branch_prediction_barrier(void)
479{
480 u64 val = PRED_CMD_IBPB;
481
482 alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
483}
484
485/* The Intel SPEC CTRL MSR base value cache */
486extern u64 x86_spec_ctrl_base;
487DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
488extern void update_spec_ctrl_cond(u64 val);
489extern u64 spec_ctrl_current(void);
490
491/*
492 * With retpoline, we must use IBRS to restrict branch prediction
493 * before calling into firmware.
494 *
495 * (Implemented as CPP macros due to header hell.)
496 */
497#define firmware_restrict_branch_speculation_start() \
498do { \
499 preempt_disable(); \
500 alternative_msr_write(MSR_IA32_SPEC_CTRL, \
501 spec_ctrl_current() | SPEC_CTRL_IBRS, \
502 X86_FEATURE_USE_IBRS_FW); \
503 alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, \
504 X86_FEATURE_USE_IBPB_FW); \
505} while (0)
506
507#define firmware_restrict_branch_speculation_end() \
508do { \
509 alternative_msr_write(MSR_IA32_SPEC_CTRL, \
510 spec_ctrl_current(), \
511 X86_FEATURE_USE_IBRS_FW); \
512 preempt_enable(); \
513} while (0)
514
515DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
516DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
517DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
518
519DECLARE_STATIC_KEY_FALSE(mds_user_clear);
520DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
521
522DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
523
524DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
525
526#include <asm/segment.h>
527
528/**
529 * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
530 *
531 * This uses the otherwise unused and obsolete VERW instruction in
532 * combination with microcode which triggers a CPU buffer flush when the
533 * instruction is executed.
534 */
535static __always_inline void mds_clear_cpu_buffers(void)
536{
537 static const u16 ds = __KERNEL_DS;
538
539 /*
540 * Has to be the memory-operand variant because only that
541 * guarantees the CPU buffer flush functionality according to
542 * documentation. The register-operand variant does not.
543 * Works with any segment selector, but a valid writable
544 * data segment is the fastest variant.
545 *
546 * "cc" clobber is required because VERW modifies ZF.
547 */
548 asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
549}
550
551/**
552 * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
553 *
554 * Clear CPU buffers if the corresponding static key is enabled
555 */
556static __always_inline void mds_user_clear_cpu_buffers(void)
557{
558 if (static_branch_likely(&mds_user_clear))
559 mds_clear_cpu_buffers();
560}
561
562/**
563 * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
564 *
565 * Clear CPU buffers if the corresponding static key is enabled
566 */
567static __always_inline void mds_idle_clear_cpu_buffers(void)
568{
569 if (static_branch_likely(&mds_idle_clear))
570 mds_clear_cpu_buffers();
571}
572
573#endif /* __ASSEMBLY__ */
574
575#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */