Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_PROCESSOR_H
3#define _ASM_X86_PROCESSOR_H
4
5#include <asm/processor-flags.h>
6
7/* Forward declaration, a strange C thing */
8struct task_struct;
9struct mm_struct;
10struct io_bitmap;
11struct vm86;
12
13#include <asm/math_emu.h>
14#include <asm/segment.h>
15#include <asm/types.h>
16#include <uapi/asm/sigcontext.h>
17#include <asm/current.h>
18#include <asm/cpufeatures.h>
19#include <asm/page.h>
20#include <asm/pgtable_types.h>
21#include <asm/percpu.h>
22#include <asm/msr.h>
23#include <asm/desc_defs.h>
24#include <asm/nops.h>
25#include <asm/special_insns.h>
26#include <asm/fpu/types.h>
27#include <asm/unwind_hints.h>
28#include <asm/vmxfeatures.h>
29#include <asm/vdso/processor.h>
30
31#include <linux/personality.h>
32#include <linux/cache.h>
33#include <linux/threads.h>
34#include <linux/math64.h>
35#include <linux/err.h>
36#include <linux/irqflags.h>
37#include <linux/mem_encrypt.h>
38
39/*
40 * We handle most unaligned accesses in hardware. On the other hand
41 * unaligned DMA can be quite expensive on some Nehalem processors.
42 *
43 * Based on this we disable the IP header alignment in network drivers.
44 */
45#define NET_IP_ALIGN 0
46
47#define HBP_NUM 4
48
49/*
50 * These alignment constraints are for performance in the vSMP case,
51 * but in the task_struct case we must also meet hardware imposed
52 * alignment requirements of the FPU state:
53 */
54#ifdef CONFIG_X86_VSMP
55# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
56# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
57#else
58# define ARCH_MIN_TASKALIGN __alignof__(union fpregs_state)
59# define ARCH_MIN_MMSTRUCT_ALIGN 0
60#endif
61
62enum tlb_infos {
63 ENTRIES,
64 NR_INFO
65};
66
67extern u16 __read_mostly tlb_lli_4k[NR_INFO];
68extern u16 __read_mostly tlb_lli_2m[NR_INFO];
69extern u16 __read_mostly tlb_lli_4m[NR_INFO];
70extern u16 __read_mostly tlb_lld_4k[NR_INFO];
71extern u16 __read_mostly tlb_lld_2m[NR_INFO];
72extern u16 __read_mostly tlb_lld_4m[NR_INFO];
73extern u16 __read_mostly tlb_lld_1g[NR_INFO];
74
75/*
76 * CPU type and hardware bug flags. Kept separately for each CPU.
77 * Members of this structure are referenced in head_32.S, so think twice
78 * before touching them. [mj]
79 */
80
81struct cpuinfo_x86 {
82 __u8 x86; /* CPU family */
83 __u8 x86_vendor; /* CPU vendor */
84 __u8 x86_model;
85 __u8 x86_stepping;
86#ifdef CONFIG_X86_64
87 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
88 int x86_tlbsize;
89#endif
90#ifdef CONFIG_X86_VMX_FEATURE_NAMES
91 __u32 vmx_capability[NVMXINTS];
92#endif
93 __u8 x86_virt_bits;
94 __u8 x86_phys_bits;
95 /* CPUID returned core id bits: */
96 __u8 x86_coreid_bits;
97 __u8 cu_id;
98 /* Max extended CPUID function supported: */
99 __u32 extended_cpuid_level;
100 /* Maximum supported CPUID level, -1=no CPUID: */
101 int cpuid_level;
102 /*
103 * Align to size of unsigned long because the x86_capability array
104 * is passed to bitops which require the alignment. Use unnamed
105 * union to enforce the array is aligned to size of unsigned long.
106 */
107 union {
108 __u32 x86_capability[NCAPINTS + NBUGINTS];
109 unsigned long x86_capability_alignment;
110 };
111 char x86_vendor_id[16];
112 char x86_model_id[64];
113 /* in KB - valid for CPUS which support this call: */
114 unsigned int x86_cache_size;
115 int x86_cache_alignment; /* In bytes */
116 /* Cache QoS architectural values: */
117 int x86_cache_max_rmid; /* max index */
118 int x86_cache_occ_scale; /* scale to bytes */
119 int x86_power;
120 unsigned long loops_per_jiffy;
121 /* cpuid returned max cores value: */
122 u16 x86_max_cores;
123 u16 apicid;
124 u16 initial_apicid;
125 u16 x86_clflush_size;
126 /* number of cores as seen by the OS: */
127 u16 booted_cores;
128 /* Physical processor id: */
129 u16 phys_proc_id;
130 /* Logical processor id: */
131 u16 logical_proc_id;
132 /* Core id: */
133 u16 cpu_core_id;
134 u16 cpu_die_id;
135 u16 logical_die_id;
136 /* Index into per_cpu list: */
137 u16 cpu_index;
138 u32 microcode;
139 /* Address space bits used by the cache internally */
140 u8 x86_cache_bits;
141 unsigned initialized : 1;
142} __randomize_layout;
143
144struct cpuid_regs {
145 u32 eax, ebx, ecx, edx;
146};
147
148enum cpuid_regs_idx {
149 CPUID_EAX = 0,
150 CPUID_EBX,
151 CPUID_ECX,
152 CPUID_EDX,
153};
154
155#define X86_VENDOR_INTEL 0
156#define X86_VENDOR_CYRIX 1
157#define X86_VENDOR_AMD 2
158#define X86_VENDOR_UMC 3
159#define X86_VENDOR_CENTAUR 5
160#define X86_VENDOR_TRANSMETA 7
161#define X86_VENDOR_NSC 8
162#define X86_VENDOR_HYGON 9
163#define X86_VENDOR_ZHAOXIN 10
164#define X86_VENDOR_NUM 11
165
166#define X86_VENDOR_UNKNOWN 0xff
167
168/*
169 * capabilities of CPUs
170 */
171extern struct cpuinfo_x86 boot_cpu_data;
172extern struct cpuinfo_x86 new_cpu_data;
173
174extern __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
175extern __u32 cpu_caps_set[NCAPINTS + NBUGINTS];
176
177#ifdef CONFIG_SMP
178DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
179#define cpu_data(cpu) per_cpu(cpu_info, cpu)
180#else
181#define cpu_info boot_cpu_data
182#define cpu_data(cpu) boot_cpu_data
183#endif
184
185extern const struct seq_operations cpuinfo_op;
186
187#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
188
189extern void cpu_detect(struct cpuinfo_x86 *c);
190
191static inline unsigned long long l1tf_pfn_limit(void)
192{
193 return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
194}
195
196extern void early_cpu_init(void);
197extern void identify_boot_cpu(void);
198extern void identify_secondary_cpu(struct cpuinfo_x86 *);
199extern void print_cpu_info(struct cpuinfo_x86 *);
200void print_cpu_msr(struct cpuinfo_x86 *);
201
202#ifdef CONFIG_X86_32
203extern int have_cpuid_p(void);
204#else
205static inline int have_cpuid_p(void)
206{
207 return 1;
208}
209#endif
210static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
211 unsigned int *ecx, unsigned int *edx)
212{
213 /* ecx is often an input as well as an output. */
214 asm volatile("cpuid"
215 : "=a" (*eax),
216 "=b" (*ebx),
217 "=c" (*ecx),
218 "=d" (*edx)
219 : "0" (*eax), "2" (*ecx)
220 : "memory");
221}
222
223#define native_cpuid_reg(reg) \
224static inline unsigned int native_cpuid_##reg(unsigned int op) \
225{ \
226 unsigned int eax = op, ebx, ecx = 0, edx; \
227 \
228 native_cpuid(&eax, &ebx, &ecx, &edx); \
229 \
230 return reg; \
231}
232
233/*
234 * Native CPUID functions returning a single datum.
235 */
236native_cpuid_reg(eax)
237native_cpuid_reg(ebx)
238native_cpuid_reg(ecx)
239native_cpuid_reg(edx)
240
241/*
242 * Friendlier CR3 helpers.
243 */
244static inline unsigned long read_cr3_pa(void)
245{
246 return __read_cr3() & CR3_ADDR_MASK;
247}
248
249static inline unsigned long native_read_cr3_pa(void)
250{
251 return __native_read_cr3() & CR3_ADDR_MASK;
252}
253
254static inline void load_cr3(pgd_t *pgdir)
255{
256 write_cr3(__sme_pa(pgdir));
257}
258
259/*
260 * Note that while the legacy 'TSS' name comes from 'Task State Segment',
261 * on modern x86 CPUs the TSS also holds information important to 64-bit mode,
262 * unrelated to the task-switch mechanism:
263 */
264#ifdef CONFIG_X86_32
265/* This is the TSS defined by the hardware. */
266struct x86_hw_tss {
267 unsigned short back_link, __blh;
268 unsigned long sp0;
269 unsigned short ss0, __ss0h;
270 unsigned long sp1;
271
272 /*
273 * We don't use ring 1, so ss1 is a convenient scratch space in
274 * the same cacheline as sp0. We use ss1 to cache the value in
275 * MSR_IA32_SYSENTER_CS. When we context switch
276 * MSR_IA32_SYSENTER_CS, we first check if the new value being
277 * written matches ss1, and, if it's not, then we wrmsr the new
278 * value and update ss1.
279 *
280 * The only reason we context switch MSR_IA32_SYSENTER_CS is
281 * that we set it to zero in vm86 tasks to avoid corrupting the
282 * stack if we were to go through the sysenter path from vm86
283 * mode.
284 */
285 unsigned short ss1; /* MSR_IA32_SYSENTER_CS */
286
287 unsigned short __ss1h;
288 unsigned long sp2;
289 unsigned short ss2, __ss2h;
290 unsigned long __cr3;
291 unsigned long ip;
292 unsigned long flags;
293 unsigned long ax;
294 unsigned long cx;
295 unsigned long dx;
296 unsigned long bx;
297 unsigned long sp;
298 unsigned long bp;
299 unsigned long si;
300 unsigned long di;
301 unsigned short es, __esh;
302 unsigned short cs, __csh;
303 unsigned short ss, __ssh;
304 unsigned short ds, __dsh;
305 unsigned short fs, __fsh;
306 unsigned short gs, __gsh;
307 unsigned short ldt, __ldth;
308 unsigned short trace;
309 unsigned short io_bitmap_base;
310
311} __attribute__((packed));
312#else
313struct x86_hw_tss {
314 u32 reserved1;
315 u64 sp0;
316
317 /*
318 * We store cpu_current_top_of_stack in sp1 so it's always accessible.
319 * Linux does not use ring 1, so sp1 is not otherwise needed.
320 */
321 u64 sp1;
322
323 /*
324 * Since Linux does not use ring 2, the 'sp2' slot is unused by
325 * hardware. entry_SYSCALL_64 uses it as scratch space to stash
326 * the user RSP value.
327 */
328 u64 sp2;
329
330 u64 reserved2;
331 u64 ist[7];
332 u32 reserved3;
333 u32 reserved4;
334 u16 reserved5;
335 u16 io_bitmap_base;
336
337} __attribute__((packed));
338#endif
339
340/*
341 * IO-bitmap sizes:
342 */
343#define IO_BITMAP_BITS 65536
344#define IO_BITMAP_BYTES (IO_BITMAP_BITS / BITS_PER_BYTE)
345#define IO_BITMAP_LONGS (IO_BITMAP_BYTES / sizeof(long))
346
347#define IO_BITMAP_OFFSET_VALID_MAP \
348 (offsetof(struct tss_struct, io_bitmap.bitmap) - \
349 offsetof(struct tss_struct, x86_tss))
350
351#define IO_BITMAP_OFFSET_VALID_ALL \
352 (offsetof(struct tss_struct, io_bitmap.mapall) - \
353 offsetof(struct tss_struct, x86_tss))
354
355#ifdef CONFIG_X86_IOPL_IOPERM
356/*
357 * sizeof(unsigned long) coming from an extra "long" at the end of the
358 * iobitmap. The limit is inclusive, i.e. the last valid byte.
359 */
360# define __KERNEL_TSS_LIMIT \
361 (IO_BITMAP_OFFSET_VALID_ALL + IO_BITMAP_BYTES + \
362 sizeof(unsigned long) - 1)
363#else
364# define __KERNEL_TSS_LIMIT \
365 (offsetof(struct tss_struct, x86_tss) + sizeof(struct x86_hw_tss) - 1)
366#endif
367
368/* Base offset outside of TSS_LIMIT so unpriviledged IO causes #GP */
369#define IO_BITMAP_OFFSET_INVALID (__KERNEL_TSS_LIMIT + 1)
370
371struct entry_stack {
372 unsigned long words[64];
373};
374
375struct entry_stack_page {
376 struct entry_stack stack;
377} __aligned(PAGE_SIZE);
378
379/*
380 * All IO bitmap related data stored in the TSS:
381 */
382struct x86_io_bitmap {
383 /* The sequence number of the last active bitmap. */
384 u64 prev_sequence;
385
386 /*
387 * Store the dirty size of the last io bitmap offender. The next
388 * one will have to do the cleanup as the switch out to a non io
389 * bitmap user will just set x86_tss.io_bitmap_base to a value
390 * outside of the TSS limit. So for sane tasks there is no need to
391 * actually touch the io_bitmap at all.
392 */
393 unsigned int prev_max;
394
395 /*
396 * The extra 1 is there because the CPU will access an
397 * additional byte beyond the end of the IO permission
398 * bitmap. The extra byte must be all 1 bits, and must
399 * be within the limit.
400 */
401 unsigned long bitmap[IO_BITMAP_LONGS + 1];
402
403 /*
404 * Special I/O bitmap to emulate IOPL(3). All bytes zero,
405 * except the additional byte at the end.
406 */
407 unsigned long mapall[IO_BITMAP_LONGS + 1];
408};
409
410struct tss_struct {
411 /*
412 * The fixed hardware portion. This must not cross a page boundary
413 * at risk of violating the SDM's advice and potentially triggering
414 * errata.
415 */
416 struct x86_hw_tss x86_tss;
417
418 struct x86_io_bitmap io_bitmap;
419} __aligned(PAGE_SIZE);
420
421DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
422
423/* Per CPU interrupt stacks */
424struct irq_stack {
425 char stack[IRQ_STACK_SIZE];
426} __aligned(IRQ_STACK_SIZE);
427
428DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
429
430#ifdef CONFIG_X86_32
431DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
432#else
433/* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */
434#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1
435#endif
436
437#ifdef CONFIG_X86_64
438struct fixed_percpu_data {
439 /*
440 * GCC hardcodes the stack canary as %gs:40. Since the
441 * irq_stack is the object at %gs:0, we reserve the bottom
442 * 48 bytes of the irq stack for the canary.
443 */
444 char gs_base[40];
445 unsigned long stack_canary;
446};
447
448DECLARE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __visible;
449DECLARE_INIT_PER_CPU(fixed_percpu_data);
450
451static inline unsigned long cpu_kernelmode_gs_base(int cpu)
452{
453 return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
454}
455
456DECLARE_PER_CPU(unsigned int, irq_count);
457extern asmlinkage void ignore_sysret(void);
458
459#if IS_ENABLED(CONFIG_KVM)
460/* Save actual FS/GS selectors and bases to current->thread */
461void save_fsgs_for_kvm(void);
462#endif
463#else /* X86_64 */
464#ifdef CONFIG_STACKPROTECTOR
465/*
466 * Make sure stack canary segment base is cached-aligned:
467 * "For Intel Atom processors, avoid non zero segment base address
468 * that is not aligned to cache line boundary at all cost."
469 * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
470 */
471struct stack_canary {
472 char __pad[20]; /* canary at %gs:20 */
473 unsigned long canary;
474};
475DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
476#endif
477/* Per CPU softirq stack pointer */
478DECLARE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
479#endif /* X86_64 */
480
481extern unsigned int fpu_kernel_xstate_size;
482extern unsigned int fpu_user_xstate_size;
483
484struct perf_event;
485
486typedef struct {
487 unsigned long seg;
488} mm_segment_t;
489
490struct thread_struct {
491 /* Cached TLS descriptors: */
492 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
493#ifdef CONFIG_X86_32
494 unsigned long sp0;
495#endif
496 unsigned long sp;
497#ifdef CONFIG_X86_32
498 unsigned long sysenter_cs;
499#else
500 unsigned short es;
501 unsigned short ds;
502 unsigned short fsindex;
503 unsigned short gsindex;
504#endif
505
506#ifdef CONFIG_X86_64
507 unsigned long fsbase;
508 unsigned long gsbase;
509#else
510 /*
511 * XXX: this could presumably be unsigned short. Alternatively,
512 * 32-bit kernels could be taught to use fsindex instead.
513 */
514 unsigned long fs;
515 unsigned long gs;
516#endif
517
518 /* Save middle states of ptrace breakpoints */
519 struct perf_event *ptrace_bps[HBP_NUM];
520 /* Debug status used for traps, single steps, etc... */
521 unsigned long debugreg6;
522 /* Keep track of the exact dr7 value set by the user */
523 unsigned long ptrace_dr7;
524 /* Fault info: */
525 unsigned long cr2;
526 unsigned long trap_nr;
527 unsigned long error_code;
528#ifdef CONFIG_VM86
529 /* Virtual 86 mode info */
530 struct vm86 *vm86;
531#endif
532 /* IO permissions: */
533 struct io_bitmap *io_bitmap;
534
535 /*
536 * IOPL. Priviledge level dependent I/O permission which is
537 * emulated via the I/O bitmap to prevent user space from disabling
538 * interrupts.
539 */
540 unsigned long iopl_emul;
541
542 mm_segment_t addr_limit;
543
544 unsigned int sig_on_uaccess_err:1;
545
546 /* Floating point and extended processor state */
547 struct fpu fpu;
548 /*
549 * WARNING: 'fpu' is dynamically-sized. It *MUST* be at
550 * the end.
551 */
552};
553
554/* Whitelist the FPU state from the task_struct for hardened usercopy. */
555static inline void arch_thread_struct_whitelist(unsigned long *offset,
556 unsigned long *size)
557{
558 *offset = offsetof(struct thread_struct, fpu.state);
559 *size = fpu_kernel_xstate_size;
560}
561
562/*
563 * Thread-synchronous status.
564 *
565 * This is different from the flags in that nobody else
566 * ever touches our thread-synchronous status, so we don't
567 * have to worry about atomic accesses.
568 */
569#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
570
571static inline void
572native_load_sp0(unsigned long sp0)
573{
574 this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
575}
576
577static inline void native_swapgs(void)
578{
579#ifdef CONFIG_X86_64
580 asm volatile("swapgs" ::: "memory");
581#endif
582}
583
584static inline unsigned long current_top_of_stack(void)
585{
586 /*
587 * We can't read directly from tss.sp0: sp0 on x86_32 is special in
588 * and around vm86 mode and sp0 on x86_64 is special because of the
589 * entry trampoline.
590 */
591 return this_cpu_read_stable(cpu_current_top_of_stack);
592}
593
594static inline bool on_thread_stack(void)
595{
596 return (unsigned long)(current_top_of_stack() -
597 current_stack_pointer) < THREAD_SIZE;
598}
599
600#ifdef CONFIG_PARAVIRT_XXL
601#include <asm/paravirt.h>
602#else
603#define __cpuid native_cpuid
604
605static inline void load_sp0(unsigned long sp0)
606{
607 native_load_sp0(sp0);
608}
609
610#endif /* CONFIG_PARAVIRT_XXL */
611
612/* Free all resources held by a thread. */
613extern void release_thread(struct task_struct *);
614
615unsigned long get_wchan(struct task_struct *p);
616
617/*
618 * Generic CPUID function
619 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
620 * resulting in stale register contents being returned.
621 */
622static inline void cpuid(unsigned int op,
623 unsigned int *eax, unsigned int *ebx,
624 unsigned int *ecx, unsigned int *edx)
625{
626 *eax = op;
627 *ecx = 0;
628 __cpuid(eax, ebx, ecx, edx);
629}
630
631/* Some CPUID calls want 'count' to be placed in ecx */
632static inline void cpuid_count(unsigned int op, int count,
633 unsigned int *eax, unsigned int *ebx,
634 unsigned int *ecx, unsigned int *edx)
635{
636 *eax = op;
637 *ecx = count;
638 __cpuid(eax, ebx, ecx, edx);
639}
640
641/*
642 * CPUID functions returning a single datum
643 */
644static inline unsigned int cpuid_eax(unsigned int op)
645{
646 unsigned int eax, ebx, ecx, edx;
647
648 cpuid(op, &eax, &ebx, &ecx, &edx);
649
650 return eax;
651}
652
653static inline unsigned int cpuid_ebx(unsigned int op)
654{
655 unsigned int eax, ebx, ecx, edx;
656
657 cpuid(op, &eax, &ebx, &ecx, &edx);
658
659 return ebx;
660}
661
662static inline unsigned int cpuid_ecx(unsigned int op)
663{
664 unsigned int eax, ebx, ecx, edx;
665
666 cpuid(op, &eax, &ebx, &ecx, &edx);
667
668 return ecx;
669}
670
671static inline unsigned int cpuid_edx(unsigned int op)
672{
673 unsigned int eax, ebx, ecx, edx;
674
675 cpuid(op, &eax, &ebx, &ecx, &edx);
676
677 return edx;
678}
679
680/*
681 * This function forces the icache and prefetched instruction stream to
682 * catch up with reality in two very specific cases:
683 *
684 * a) Text was modified using one virtual address and is about to be executed
685 * from the same physical page at a different virtual address.
686 *
687 * b) Text was modified on a different CPU, may subsequently be
688 * executed on this CPU, and you want to make sure the new version
689 * gets executed. This generally means you're calling this in a IPI.
690 *
691 * If you're calling this for a different reason, you're probably doing
692 * it wrong.
693 */
694static inline void sync_core(void)
695{
696 /*
697 * There are quite a few ways to do this. IRET-to-self is nice
698 * because it works on every CPU, at any CPL (so it's compatible
699 * with paravirtualization), and it never exits to a hypervisor.
700 * The only down sides are that it's a bit slow (it seems to be
701 * a bit more than 2x slower than the fastest options) and that
702 * it unmasks NMIs. The "push %cs" is needed because, in
703 * paravirtual environments, __KERNEL_CS may not be a valid CS
704 * value when we do IRET directly.
705 *
706 * In case NMI unmasking or performance ever becomes a problem,
707 * the next best option appears to be MOV-to-CR2 and an
708 * unconditional jump. That sequence also works on all CPUs,
709 * but it will fault at CPL3 (i.e. Xen PV).
710 *
711 * CPUID is the conventional way, but it's nasty: it doesn't
712 * exist on some 486-like CPUs, and it usually exits to a
713 * hypervisor.
714 *
715 * Like all of Linux's memory ordering operations, this is a
716 * compiler barrier as well.
717 */
718#ifdef CONFIG_X86_32
719 asm volatile (
720 "pushfl\n\t"
721 "pushl %%cs\n\t"
722 "pushl $1f\n\t"
723 "iret\n\t"
724 "1:"
725 : ASM_CALL_CONSTRAINT : : "memory");
726#else
727 unsigned int tmp;
728
729 asm volatile (
730 UNWIND_HINT_SAVE
731 "mov %%ss, %0\n\t"
732 "pushq %q0\n\t"
733 "pushq %%rsp\n\t"
734 "addq $8, (%%rsp)\n\t"
735 "pushfq\n\t"
736 "mov %%cs, %0\n\t"
737 "pushq %q0\n\t"
738 "pushq $1f\n\t"
739 "iretq\n\t"
740 UNWIND_HINT_RESTORE
741 "1:"
742 : "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
743#endif
744}
745
746extern void select_idle_routine(const struct cpuinfo_x86 *c);
747extern void amd_e400_c1e_apic_setup(void);
748
749extern unsigned long boot_option_idle_override;
750
751enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
752 IDLE_POLL};
753
754extern void enable_sep_cpu(void);
755extern int sysenter_setup(void);
756
757
758/* Defined in head.S */
759extern struct desc_ptr early_gdt_descr;
760
761extern void switch_to_new_gdt(int);
762extern void load_direct_gdt(int);
763extern void load_fixmap_gdt(int);
764extern void load_percpu_segment(int);
765extern void cpu_init(void);
766extern void cr4_init(void);
767
768static inline unsigned long get_debugctlmsr(void)
769{
770 unsigned long debugctlmsr = 0;
771
772#ifndef CONFIG_X86_DEBUGCTLMSR
773 if (boot_cpu_data.x86 < 6)
774 return 0;
775#endif
776 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
777
778 return debugctlmsr;
779}
780
781static inline void update_debugctlmsr(unsigned long debugctlmsr)
782{
783#ifndef CONFIG_X86_DEBUGCTLMSR
784 if (boot_cpu_data.x86 < 6)
785 return;
786#endif
787 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
788}
789
790extern void set_task_blockstep(struct task_struct *task, bool on);
791
792/* Boot loader type from the setup header: */
793extern int bootloader_type;
794extern int bootloader_version;
795
796extern char ignore_fpu_irq;
797
798#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
799#define ARCH_HAS_PREFETCHW
800#define ARCH_HAS_SPINLOCK_PREFETCH
801
802#ifdef CONFIG_X86_32
803# define BASE_PREFETCH ""
804# define ARCH_HAS_PREFETCH
805#else
806# define BASE_PREFETCH "prefetcht0 %P1"
807#endif
808
809/*
810 * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
811 *
812 * It's not worth to care about 3dnow prefetches for the K6
813 * because they are microcoded there and very slow.
814 */
815static inline void prefetch(const void *x)
816{
817 alternative_input(BASE_PREFETCH, "prefetchnta %P1",
818 X86_FEATURE_XMM,
819 "m" (*(const char *)x));
820}
821
822/*
823 * 3dnow prefetch to get an exclusive cache line.
824 * Useful for spinlocks to avoid one state transition in the
825 * cache coherency protocol:
826 */
827static inline void prefetchw(const void *x)
828{
829 alternative_input(BASE_PREFETCH, "prefetchw %P1",
830 X86_FEATURE_3DNOWPREFETCH,
831 "m" (*(const char *)x));
832}
833
834static inline void spin_lock_prefetch(const void *x)
835{
836 prefetchw(x);
837}
838
839#define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
840 TOP_OF_KERNEL_STACK_PADDING)
841
842#define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1))
843
844#define task_pt_regs(task) \
845({ \
846 unsigned long __ptr = (unsigned long)task_stack_page(task); \
847 __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \
848 ((struct pt_regs *)__ptr) - 1; \
849})
850
851#ifdef CONFIG_X86_32
852/*
853 * User space process size: 3GB (default).
854 */
855#define IA32_PAGE_OFFSET PAGE_OFFSET
856#define TASK_SIZE PAGE_OFFSET
857#define TASK_SIZE_LOW TASK_SIZE
858#define TASK_SIZE_MAX TASK_SIZE
859#define DEFAULT_MAP_WINDOW TASK_SIZE
860#define STACK_TOP TASK_SIZE
861#define STACK_TOP_MAX STACK_TOP
862
863#define INIT_THREAD { \
864 .sp0 = TOP_OF_INIT_STACK, \
865 .sysenter_cs = __KERNEL_CS, \
866 .addr_limit = KERNEL_DS, \
867}
868
869#define KSTK_ESP(task) (task_pt_regs(task)->sp)
870
871#else
872/*
873 * User space process size. This is the first address outside the user range.
874 * There are a few constraints that determine this:
875 *
876 * On Intel CPUs, if a SYSCALL instruction is at the highest canonical
877 * address, then that syscall will enter the kernel with a
878 * non-canonical return address, and SYSRET will explode dangerously.
879 * We avoid this particular problem by preventing anything executable
880 * from being mapped at the maximum canonical address.
881 *
882 * On AMD CPUs in the Ryzen family, there's a nasty bug in which the
883 * CPUs malfunction if they execute code from the highest canonical page.
884 * They'll speculate right off the end of the canonical space, and
885 * bad things happen. This is worked around in the same way as the
886 * Intel problem.
887 *
888 * With page table isolation enabled, we map the LDT in ... [stay tuned]
889 */
890#define TASK_SIZE_MAX ((1UL << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE)
891
892#define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE)
893
894/* This decides where the kernel will search for a free chunk of vm
895 * space during mmap's.
896 */
897#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
898 0xc0000000 : 0xFFFFe000)
899
900#define TASK_SIZE_LOW (test_thread_flag(TIF_ADDR32) ? \
901 IA32_PAGE_OFFSET : DEFAULT_MAP_WINDOW)
902#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
903 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
904#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
905 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
906
907#define STACK_TOP TASK_SIZE_LOW
908#define STACK_TOP_MAX TASK_SIZE_MAX
909
910#define INIT_THREAD { \
911 .addr_limit = KERNEL_DS, \
912}
913
914extern unsigned long KSTK_ESP(struct task_struct *task);
915
916#endif /* CONFIG_X86_64 */
917
918extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
919 unsigned long new_sp);
920
921/*
922 * This decides where the kernel will search for a free chunk of vm
923 * space during mmap's.
924 */
925#define __TASK_UNMAPPED_BASE(task_size) (PAGE_ALIGN(task_size / 3))
926#define TASK_UNMAPPED_BASE __TASK_UNMAPPED_BASE(TASK_SIZE_LOW)
927
928#define KSTK_EIP(task) (task_pt_regs(task)->ip)
929
930/* Get/set a process' ability to use the timestamp counter instruction */
931#define GET_TSC_CTL(adr) get_tsc_mode((adr))
932#define SET_TSC_CTL(val) set_tsc_mode((val))
933
934extern int get_tsc_mode(unsigned long adr);
935extern int set_tsc_mode(unsigned int val);
936
937DECLARE_PER_CPU(u64, msr_misc_features_shadow);
938
939#ifdef CONFIG_CPU_SUP_AMD
940extern u16 amd_get_nb_id(int cpu);
941extern u32 amd_get_nodes_per_socket(void);
942#else
943static inline u16 amd_get_nb_id(int cpu) { return 0; }
944static inline u32 amd_get_nodes_per_socket(void) { return 0; }
945#endif
946
947static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
948{
949 uint32_t base, eax, signature[3];
950
951 for (base = 0x40000000; base < 0x40010000; base += 0x100) {
952 cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
953
954 if (!memcmp(sig, signature, 12) &&
955 (leaves == 0 || ((eax - base) >= leaves)))
956 return base;
957 }
958
959 return 0;
960}
961
962extern unsigned long arch_align_stack(unsigned long sp);
963void free_init_pages(const char *what, unsigned long begin, unsigned long end);
964extern void free_kernel_image_pages(const char *what, void *begin, void *end);
965
966void default_idle(void);
967#ifdef CONFIG_XEN
968bool xen_set_default_idle(void);
969#else
970#define xen_set_default_idle 0
971#endif
972
973void stop_this_cpu(void *dummy);
974void microcode_check(void);
975
976enum l1tf_mitigations {
977 L1TF_MITIGATION_OFF,
978 L1TF_MITIGATION_FLUSH_NOWARN,
979 L1TF_MITIGATION_FLUSH,
980 L1TF_MITIGATION_FLUSH_NOSMT,
981 L1TF_MITIGATION_FULL,
982 L1TF_MITIGATION_FULL_FORCE
983};
984
985extern enum l1tf_mitigations l1tf_mitigation;
986
987enum mds_mitigations {
988 MDS_MITIGATION_OFF,
989 MDS_MITIGATION_FULL,
990 MDS_MITIGATION_VMWERV,
991};
992
993#endif /* _ASM_X86_PROCESSOR_H */