Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_PROCESSOR_H
3#define _ASM_X86_PROCESSOR_H
4
5#include <asm/processor-flags.h>
6
7/* Forward declaration, a strange C thing */
8struct task_struct;
9struct mm_struct;
10struct io_bitmap;
11struct vm86;
12
13#include <asm/math_emu.h>
14#include <asm/segment.h>
15#include <asm/types.h>
16#include <uapi/asm/sigcontext.h>
17#include <asm/current.h>
18#include <asm/cpufeatures.h>
19#include <asm/cpuid.h>
20#include <asm/page.h>
21#include <asm/pgtable_types.h>
22#include <asm/percpu.h>
23#include <asm/msr.h>
24#include <asm/desc_defs.h>
25#include <asm/nops.h>
26#include <asm/special_insns.h>
27#include <asm/fpu/types.h>
28#include <asm/unwind_hints.h>
29#include <asm/vmxfeatures.h>
30#include <asm/vdso/processor.h>
31
32#include <linux/personality.h>
33#include <linux/cache.h>
34#include <linux/threads.h>
35#include <linux/math64.h>
36#include <linux/err.h>
37#include <linux/irqflags.h>
38#include <linux/mem_encrypt.h>
39
40/*
41 * We handle most unaligned accesses in hardware. On the other hand
42 * unaligned DMA can be quite expensive on some Nehalem processors.
43 *
44 * Based on this we disable the IP header alignment in network drivers.
45 */
46#define NET_IP_ALIGN 0
47
48#define HBP_NUM 4
49
50/*
51 * These alignment constraints are for performance in the vSMP case,
52 * but in the task_struct case we must also meet hardware imposed
53 * alignment requirements of the FPU state:
54 */
55#ifdef CONFIG_X86_VSMP
56# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
57# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
58#else
59# define ARCH_MIN_TASKALIGN __alignof__(union fpregs_state)
60# define ARCH_MIN_MMSTRUCT_ALIGN 0
61#endif
62
63enum tlb_infos {
64 ENTRIES,
65 NR_INFO
66};
67
68extern u16 __read_mostly tlb_lli_4k[NR_INFO];
69extern u16 __read_mostly tlb_lli_2m[NR_INFO];
70extern u16 __read_mostly tlb_lli_4m[NR_INFO];
71extern u16 __read_mostly tlb_lld_4k[NR_INFO];
72extern u16 __read_mostly tlb_lld_2m[NR_INFO];
73extern u16 __read_mostly tlb_lld_4m[NR_INFO];
74extern u16 __read_mostly tlb_lld_1g[NR_INFO];
75
76/*
77 * CPU type and hardware bug flags. Kept separately for each CPU.
78 * Members of this structure are referenced in head_32.S, so think twice
79 * before touching them. [mj]
80 */
81
82struct cpuinfo_x86 {
83 __u8 x86; /* CPU family */
84 __u8 x86_vendor; /* CPU vendor */
85 __u8 x86_model;
86 __u8 x86_stepping;
87#ifdef CONFIG_X86_64
88 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
89 int x86_tlbsize;
90#endif
91#ifdef CONFIG_X86_VMX_FEATURE_NAMES
92 __u32 vmx_capability[NVMXINTS];
93#endif
94 __u8 x86_virt_bits;
95 __u8 x86_phys_bits;
96 /* CPUID returned core id bits: */
97 __u8 x86_coreid_bits;
98 __u8 cu_id;
99 /* Max extended CPUID function supported: */
100 __u32 extended_cpuid_level;
101 /* Maximum supported CPUID level, -1=no CPUID: */
102 int cpuid_level;
103 /*
104 * Align to size of unsigned long because the x86_capability array
105 * is passed to bitops which require the alignment. Use unnamed
106 * union to enforce the array is aligned to size of unsigned long.
107 */
108 union {
109 __u32 x86_capability[NCAPINTS + NBUGINTS];
110 unsigned long x86_capability_alignment;
111 };
112 char x86_vendor_id[16];
113 char x86_model_id[64];
114 /* in KB - valid for CPUS which support this call: */
115 unsigned int x86_cache_size;
116 int x86_cache_alignment; /* In bytes */
117 /* Cache QoS architectural values, valid only on the BSP: */
118 int x86_cache_max_rmid; /* max index */
119 int x86_cache_occ_scale; /* scale to bytes */
120 int x86_cache_mbm_width_offset;
121 int x86_power;
122 unsigned long loops_per_jiffy;
123 /* protected processor identification number */
124 u64 ppin;
125 /* cpuid returned max cores value: */
126 u16 x86_max_cores;
127 u16 apicid;
128 u16 initial_apicid;
129 u16 x86_clflush_size;
130 /* number of cores as seen by the OS: */
131 u16 booted_cores;
132 /* Physical processor id: */
133 u16 phys_proc_id;
134 /* Logical processor id: */
135 u16 logical_proc_id;
136 /* Core id: */
137 u16 cpu_core_id;
138 u16 cpu_die_id;
139 u16 logical_die_id;
140 /* Index into per_cpu list: */
141 u16 cpu_index;
142 /* Is SMT active on this core? */
143 bool smt_active;
144 u32 microcode;
145 /* Address space bits used by the cache internally */
146 u8 x86_cache_bits;
147 unsigned initialized : 1;
148} __randomize_layout;
149
150#define X86_VENDOR_INTEL 0
151#define X86_VENDOR_CYRIX 1
152#define X86_VENDOR_AMD 2
153#define X86_VENDOR_UMC 3
154#define X86_VENDOR_CENTAUR 5
155#define X86_VENDOR_TRANSMETA 7
156#define X86_VENDOR_NSC 8
157#define X86_VENDOR_HYGON 9
158#define X86_VENDOR_ZHAOXIN 10
159#define X86_VENDOR_VORTEX 11
160#define X86_VENDOR_NUM 12
161
162#define X86_VENDOR_UNKNOWN 0xff
163
164/*
165 * capabilities of CPUs
166 */
167extern struct cpuinfo_x86 boot_cpu_data;
168extern struct cpuinfo_x86 new_cpu_data;
169
170extern __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
171extern __u32 cpu_caps_set[NCAPINTS + NBUGINTS];
172
173#ifdef CONFIG_SMP
174DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
175#define cpu_data(cpu) per_cpu(cpu_info, cpu)
176#else
177#define cpu_info boot_cpu_data
178#define cpu_data(cpu) boot_cpu_data
179#endif
180
181extern const struct seq_operations cpuinfo_op;
182
183#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
184
185extern void cpu_detect(struct cpuinfo_x86 *c);
186
187static inline unsigned long long l1tf_pfn_limit(void)
188{
189 return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
190}
191
192extern void early_cpu_init(void);
193extern void identify_boot_cpu(void);
194extern void identify_secondary_cpu(struct cpuinfo_x86 *);
195extern void print_cpu_info(struct cpuinfo_x86 *);
196void print_cpu_msr(struct cpuinfo_x86 *);
197
198/*
199 * Friendlier CR3 helpers.
200 */
201static inline unsigned long read_cr3_pa(void)
202{
203 return __read_cr3() & CR3_ADDR_MASK;
204}
205
206static inline unsigned long native_read_cr3_pa(void)
207{
208 return __native_read_cr3() & CR3_ADDR_MASK;
209}
210
211static inline void load_cr3(pgd_t *pgdir)
212{
213 write_cr3(__sme_pa(pgdir));
214}
215
216/*
217 * Note that while the legacy 'TSS' name comes from 'Task State Segment',
218 * on modern x86 CPUs the TSS also holds information important to 64-bit mode,
219 * unrelated to the task-switch mechanism:
220 */
221#ifdef CONFIG_X86_32
222/* This is the TSS defined by the hardware. */
223struct x86_hw_tss {
224 unsigned short back_link, __blh;
225 unsigned long sp0;
226 unsigned short ss0, __ss0h;
227 unsigned long sp1;
228
229 /*
230 * We don't use ring 1, so ss1 is a convenient scratch space in
231 * the same cacheline as sp0. We use ss1 to cache the value in
232 * MSR_IA32_SYSENTER_CS. When we context switch
233 * MSR_IA32_SYSENTER_CS, we first check if the new value being
234 * written matches ss1, and, if it's not, then we wrmsr the new
235 * value and update ss1.
236 *
237 * The only reason we context switch MSR_IA32_SYSENTER_CS is
238 * that we set it to zero in vm86 tasks to avoid corrupting the
239 * stack if we were to go through the sysenter path from vm86
240 * mode.
241 */
242 unsigned short ss1; /* MSR_IA32_SYSENTER_CS */
243
244 unsigned short __ss1h;
245 unsigned long sp2;
246 unsigned short ss2, __ss2h;
247 unsigned long __cr3;
248 unsigned long ip;
249 unsigned long flags;
250 unsigned long ax;
251 unsigned long cx;
252 unsigned long dx;
253 unsigned long bx;
254 unsigned long sp;
255 unsigned long bp;
256 unsigned long si;
257 unsigned long di;
258 unsigned short es, __esh;
259 unsigned short cs, __csh;
260 unsigned short ss, __ssh;
261 unsigned short ds, __dsh;
262 unsigned short fs, __fsh;
263 unsigned short gs, __gsh;
264 unsigned short ldt, __ldth;
265 unsigned short trace;
266 unsigned short io_bitmap_base;
267
268} __attribute__((packed));
269#else
270struct x86_hw_tss {
271 u32 reserved1;
272 u64 sp0;
273 u64 sp1;
274
275 /*
276 * Since Linux does not use ring 2, the 'sp2' slot is unused by
277 * hardware. entry_SYSCALL_64 uses it as scratch space to stash
278 * the user RSP value.
279 */
280 u64 sp2;
281
282 u64 reserved2;
283 u64 ist[7];
284 u32 reserved3;
285 u32 reserved4;
286 u16 reserved5;
287 u16 io_bitmap_base;
288
289} __attribute__((packed));
290#endif
291
292/*
293 * IO-bitmap sizes:
294 */
295#define IO_BITMAP_BITS 65536
296#define IO_BITMAP_BYTES (IO_BITMAP_BITS / BITS_PER_BYTE)
297#define IO_BITMAP_LONGS (IO_BITMAP_BYTES / sizeof(long))
298
299#define IO_BITMAP_OFFSET_VALID_MAP \
300 (offsetof(struct tss_struct, io_bitmap.bitmap) - \
301 offsetof(struct tss_struct, x86_tss))
302
303#define IO_BITMAP_OFFSET_VALID_ALL \
304 (offsetof(struct tss_struct, io_bitmap.mapall) - \
305 offsetof(struct tss_struct, x86_tss))
306
307#ifdef CONFIG_X86_IOPL_IOPERM
308/*
309 * sizeof(unsigned long) coming from an extra "long" at the end of the
310 * iobitmap. The limit is inclusive, i.e. the last valid byte.
311 */
312# define __KERNEL_TSS_LIMIT \
313 (IO_BITMAP_OFFSET_VALID_ALL + IO_BITMAP_BYTES + \
314 sizeof(unsigned long) - 1)
315#else
316# define __KERNEL_TSS_LIMIT \
317 (offsetof(struct tss_struct, x86_tss) + sizeof(struct x86_hw_tss) - 1)
318#endif
319
320/* Base offset outside of TSS_LIMIT so unpriviledged IO causes #GP */
321#define IO_BITMAP_OFFSET_INVALID (__KERNEL_TSS_LIMIT + 1)
322
323struct entry_stack {
324 char stack[PAGE_SIZE];
325};
326
327struct entry_stack_page {
328 struct entry_stack stack;
329} __aligned(PAGE_SIZE);
330
331/*
332 * All IO bitmap related data stored in the TSS:
333 */
334struct x86_io_bitmap {
335 /* The sequence number of the last active bitmap. */
336 u64 prev_sequence;
337
338 /*
339 * Store the dirty size of the last io bitmap offender. The next
340 * one will have to do the cleanup as the switch out to a non io
341 * bitmap user will just set x86_tss.io_bitmap_base to a value
342 * outside of the TSS limit. So for sane tasks there is no need to
343 * actually touch the io_bitmap at all.
344 */
345 unsigned int prev_max;
346
347 /*
348 * The extra 1 is there because the CPU will access an
349 * additional byte beyond the end of the IO permission
350 * bitmap. The extra byte must be all 1 bits, and must
351 * be within the limit.
352 */
353 unsigned long bitmap[IO_BITMAP_LONGS + 1];
354
355 /*
356 * Special I/O bitmap to emulate IOPL(3). All bytes zero,
357 * except the additional byte at the end.
358 */
359 unsigned long mapall[IO_BITMAP_LONGS + 1];
360};
361
362struct tss_struct {
363 /*
364 * The fixed hardware portion. This must not cross a page boundary
365 * at risk of violating the SDM's advice and potentially triggering
366 * errata.
367 */
368 struct x86_hw_tss x86_tss;
369
370 struct x86_io_bitmap io_bitmap;
371} __aligned(PAGE_SIZE);
372
373DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
374
375/* Per CPU interrupt stacks */
376struct irq_stack {
377 char stack[IRQ_STACK_SIZE];
378} __aligned(IRQ_STACK_SIZE);
379
380#ifdef CONFIG_X86_64
381struct fixed_percpu_data {
382 /*
383 * GCC hardcodes the stack canary as %gs:40. Since the
384 * irq_stack is the object at %gs:0, we reserve the bottom
385 * 48 bytes of the irq stack for the canary.
386 *
387 * Once we are willing to require -mstack-protector-guard-symbol=
388 * support for x86_64 stackprotector, we can get rid of this.
389 */
390 char gs_base[40];
391 unsigned long stack_canary;
392};
393
394DECLARE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __visible;
395DECLARE_INIT_PER_CPU(fixed_percpu_data);
396
397static inline unsigned long cpu_kernelmode_gs_base(int cpu)
398{
399 return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
400}
401
402extern asmlinkage void ignore_sysret(void);
403
404/* Save actual FS/GS selectors and bases to current->thread */
405void current_save_fsgs(void);
406#else /* X86_64 */
407#ifdef CONFIG_STACKPROTECTOR
408DECLARE_PER_CPU(unsigned long, __stack_chk_guard);
409#endif
410#endif /* !X86_64 */
411
412struct perf_event;
413
414struct thread_struct {
415 /* Cached TLS descriptors: */
416 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
417#ifdef CONFIG_X86_32
418 unsigned long sp0;
419#endif
420 unsigned long sp;
421#ifdef CONFIG_X86_32
422 unsigned long sysenter_cs;
423#else
424 unsigned short es;
425 unsigned short ds;
426 unsigned short fsindex;
427 unsigned short gsindex;
428#endif
429
430#ifdef CONFIG_X86_64
431 unsigned long fsbase;
432 unsigned long gsbase;
433#else
434 /*
435 * XXX: this could presumably be unsigned short. Alternatively,
436 * 32-bit kernels could be taught to use fsindex instead.
437 */
438 unsigned long fs;
439 unsigned long gs;
440#endif
441
442 /* Save middle states of ptrace breakpoints */
443 struct perf_event *ptrace_bps[HBP_NUM];
444 /* Debug status used for traps, single steps, etc... */
445 unsigned long virtual_dr6;
446 /* Keep track of the exact dr7 value set by the user */
447 unsigned long ptrace_dr7;
448 /* Fault info: */
449 unsigned long cr2;
450 unsigned long trap_nr;
451 unsigned long error_code;
452#ifdef CONFIG_VM86
453 /* Virtual 86 mode info */
454 struct vm86 *vm86;
455#endif
456 /* IO permissions: */
457 struct io_bitmap *io_bitmap;
458
459 /*
460 * IOPL. Privilege level dependent I/O permission which is
461 * emulated via the I/O bitmap to prevent user space from disabling
462 * interrupts.
463 */
464 unsigned long iopl_emul;
465
466 unsigned int iopl_warn:1;
467 unsigned int sig_on_uaccess_err:1;
468
469 /*
470 * Protection Keys Register for Userspace. Loaded immediately on
471 * context switch. Store it in thread_struct to avoid a lookup in
472 * the tasks's FPU xstate buffer. This value is only valid when a
473 * task is scheduled out. For 'current' the authoritative source of
474 * PKRU is the hardware itself.
475 */
476 u32 pkru;
477
478 /* Floating point and extended processor state */
479 struct fpu fpu;
480 /*
481 * WARNING: 'fpu' is dynamically-sized. It *MUST* be at
482 * the end.
483 */
484};
485
486extern void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size);
487
488static inline void arch_thread_struct_whitelist(unsigned long *offset,
489 unsigned long *size)
490{
491 fpu_thread_struct_whitelist(offset, size);
492}
493
494static inline void
495native_load_sp0(unsigned long sp0)
496{
497 this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
498}
499
500static __always_inline void native_swapgs(void)
501{
502#ifdef CONFIG_X86_64
503 asm volatile("swapgs" ::: "memory");
504#endif
505}
506
507static __always_inline unsigned long current_top_of_stack(void)
508{
509 /*
510 * We can't read directly from tss.sp0: sp0 on x86_32 is special in
511 * and around vm86 mode and sp0 on x86_64 is special because of the
512 * entry trampoline.
513 */
514 return this_cpu_read_stable(pcpu_hot.top_of_stack);
515}
516
517static __always_inline bool on_thread_stack(void)
518{
519 return (unsigned long)(current_top_of_stack() -
520 current_stack_pointer) < THREAD_SIZE;
521}
522
523#ifdef CONFIG_PARAVIRT_XXL
524#include <asm/paravirt.h>
525#else
526
527static inline void load_sp0(unsigned long sp0)
528{
529 native_load_sp0(sp0);
530}
531
532#endif /* CONFIG_PARAVIRT_XXL */
533
534unsigned long __get_wchan(struct task_struct *p);
535
536extern void select_idle_routine(const struct cpuinfo_x86 *c);
537extern void amd_e400_c1e_apic_setup(void);
538
539extern unsigned long boot_option_idle_override;
540
541enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
542 IDLE_POLL};
543
544extern void enable_sep_cpu(void);
545extern int sysenter_setup(void);
546
547
548/* Defined in head.S */
549extern struct desc_ptr early_gdt_descr;
550
551extern void switch_gdt_and_percpu_base(int);
552extern void load_direct_gdt(int);
553extern void load_fixmap_gdt(int);
554extern void cpu_init(void);
555extern void cpu_init_secondary(void);
556extern void cpu_init_exception_handling(void);
557extern void cr4_init(void);
558
559static inline unsigned long get_debugctlmsr(void)
560{
561 unsigned long debugctlmsr = 0;
562
563#ifndef CONFIG_X86_DEBUGCTLMSR
564 if (boot_cpu_data.x86 < 6)
565 return 0;
566#endif
567 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
568
569 return debugctlmsr;
570}
571
572static inline void update_debugctlmsr(unsigned long debugctlmsr)
573{
574#ifndef CONFIG_X86_DEBUGCTLMSR
575 if (boot_cpu_data.x86 < 6)
576 return;
577#endif
578 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
579}
580
581extern void set_task_blockstep(struct task_struct *task, bool on);
582
583/* Boot loader type from the setup header: */
584extern int bootloader_type;
585extern int bootloader_version;
586
587extern char ignore_fpu_irq;
588
589#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
590#define ARCH_HAS_PREFETCHW
591#define ARCH_HAS_SPINLOCK_PREFETCH
592
593#ifdef CONFIG_X86_32
594# define BASE_PREFETCH ""
595# define ARCH_HAS_PREFETCH
596#else
597# define BASE_PREFETCH "prefetcht0 %P1"
598#endif
599
600/*
601 * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
602 *
603 * It's not worth to care about 3dnow prefetches for the K6
604 * because they are microcoded there and very slow.
605 */
606static inline void prefetch(const void *x)
607{
608 alternative_input(BASE_PREFETCH, "prefetchnta %P1",
609 X86_FEATURE_XMM,
610 "m" (*(const char *)x));
611}
612
613/*
614 * 3dnow prefetch to get an exclusive cache line.
615 * Useful for spinlocks to avoid one state transition in the
616 * cache coherency protocol:
617 */
618static __always_inline void prefetchw(const void *x)
619{
620 alternative_input(BASE_PREFETCH, "prefetchw %P1",
621 X86_FEATURE_3DNOWPREFETCH,
622 "m" (*(const char *)x));
623}
624
625static inline void spin_lock_prefetch(const void *x)
626{
627 prefetchw(x);
628}
629
630#define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
631 TOP_OF_KERNEL_STACK_PADDING)
632
633#define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1))
634
635#define task_pt_regs(task) \
636({ \
637 unsigned long __ptr = (unsigned long)task_stack_page(task); \
638 __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \
639 ((struct pt_regs *)__ptr) - 1; \
640})
641
642#ifdef CONFIG_X86_32
643#define INIT_THREAD { \
644 .sp0 = TOP_OF_INIT_STACK, \
645 .sysenter_cs = __KERNEL_CS, \
646}
647
648#define KSTK_ESP(task) (task_pt_regs(task)->sp)
649
650#else
651#define INIT_THREAD { }
652
653extern unsigned long KSTK_ESP(struct task_struct *task);
654
655#endif /* CONFIG_X86_64 */
656
657extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
658 unsigned long new_sp);
659
660/*
661 * This decides where the kernel will search for a free chunk of vm
662 * space during mmap's.
663 */
664#define __TASK_UNMAPPED_BASE(task_size) (PAGE_ALIGN(task_size / 3))
665#define TASK_UNMAPPED_BASE __TASK_UNMAPPED_BASE(TASK_SIZE_LOW)
666
667#define KSTK_EIP(task) (task_pt_regs(task)->ip)
668
669/* Get/set a process' ability to use the timestamp counter instruction */
670#define GET_TSC_CTL(adr) get_tsc_mode((adr))
671#define SET_TSC_CTL(val) set_tsc_mode((val))
672
673extern int get_tsc_mode(unsigned long adr);
674extern int set_tsc_mode(unsigned int val);
675
676DECLARE_PER_CPU(u64, msr_misc_features_shadow);
677
678extern u16 get_llc_id(unsigned int cpu);
679
680#ifdef CONFIG_CPU_SUP_AMD
681extern u32 amd_get_nodes_per_socket(void);
682extern u32 amd_get_highest_perf(void);
683#else
684static inline u32 amd_get_nodes_per_socket(void) { return 0; }
685static inline u32 amd_get_highest_perf(void) { return 0; }
686#endif
687
688extern unsigned long arch_align_stack(unsigned long sp);
689void free_init_pages(const char *what, unsigned long begin, unsigned long end);
690extern void free_kernel_image_pages(const char *what, void *begin, void *end);
691
692void default_idle(void);
693#ifdef CONFIG_XEN
694bool xen_set_default_idle(void);
695#else
696#define xen_set_default_idle 0
697#endif
698
699void __noreturn stop_this_cpu(void *dummy);
700void microcode_check(void);
701
702enum l1tf_mitigations {
703 L1TF_MITIGATION_OFF,
704 L1TF_MITIGATION_FLUSH_NOWARN,
705 L1TF_MITIGATION_FLUSH,
706 L1TF_MITIGATION_FLUSH_NOSMT,
707 L1TF_MITIGATION_FULL,
708 L1TF_MITIGATION_FULL_FORCE
709};
710
711extern enum l1tf_mitigations l1tf_mitigation;
712
713enum mds_mitigations {
714 MDS_MITIGATION_OFF,
715 MDS_MITIGATION_FULL,
716 MDS_MITIGATION_VMWERV,
717};
718
719#ifdef CONFIG_X86_SGX
720int arch_memory_failure(unsigned long pfn, int flags);
721#define arch_memory_failure arch_memory_failure
722
723bool arch_is_platform_page(u64 paddr);
724#define arch_is_platform_page arch_is_platform_page
725#endif
726
727#endif /* _ASM_X86_PROCESSOR_H */