Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef _ASM_X86_CPU_ENTRY_AREA_H
4#define _ASM_X86_CPU_ENTRY_AREA_H
5
6#include <linux/percpu-defs.h>
7#include <asm/processor.h>
8#include <asm/intel_ds.h>
9
10#ifdef CONFIG_X86_64
11
12/* Macro to enforce the same ordering and stack sizes */
13#define ESTACKS_MEMBERS(guardsize, db2_holesize)\
14 char DF_stack_guard[guardsize]; \
15 char DF_stack[EXCEPTION_STKSZ]; \
16 char NMI_stack_guard[guardsize]; \
17 char NMI_stack[EXCEPTION_STKSZ]; \
18 char DB2_stack_guard[guardsize]; \
19 char DB2_stack[db2_holesize]; \
20 char DB1_stack_guard[guardsize]; \
21 char DB1_stack[EXCEPTION_STKSZ]; \
22 char DB_stack_guard[guardsize]; \
23 char DB_stack[EXCEPTION_STKSZ]; \
24 char MCE_stack_guard[guardsize]; \
25 char MCE_stack[EXCEPTION_STKSZ]; \
26 char IST_top_guard[guardsize]; \
27
28/* The exception stacks' physical storage. No guard pages required */
29struct exception_stacks {
30 ESTACKS_MEMBERS(0, 0)
31};
32
33/* The effective cpu entry area mapping with guard pages. */
34struct cea_exception_stacks {
35 ESTACKS_MEMBERS(PAGE_SIZE, EXCEPTION_STKSZ)
36};
37
38/*
39 * The exception stack ordering in [cea_]exception_stacks
40 */
41enum exception_stack_ordering {
42 ESTACK_DF,
43 ESTACK_NMI,
44 ESTACK_DB2,
45 ESTACK_DB1,
46 ESTACK_DB,
47 ESTACK_MCE,
48 N_EXCEPTION_STACKS
49};
50
51#define CEA_ESTACK_SIZE(st) \
52 sizeof(((struct cea_exception_stacks *)0)->st## _stack)
53
54#define CEA_ESTACK_BOT(ceastp, st) \
55 ((unsigned long)&(ceastp)->st## _stack)
56
57#define CEA_ESTACK_TOP(ceastp, st) \
58 (CEA_ESTACK_BOT(ceastp, st) + CEA_ESTACK_SIZE(st))
59
60#define CEA_ESTACK_OFFS(st) \
61 offsetof(struct cea_exception_stacks, st## _stack)
62
63#define CEA_ESTACK_PAGES \
64 (sizeof(struct cea_exception_stacks) / PAGE_SIZE)
65
66#endif
67
68#ifdef CONFIG_X86_32
69struct doublefault_stack {
70 unsigned long stack[(PAGE_SIZE - sizeof(struct x86_hw_tss)) / sizeof(unsigned long)];
71 struct x86_hw_tss tss;
72} __aligned(PAGE_SIZE);
73#endif
74
75/*
76 * cpu_entry_area is a percpu region that contains things needed by the CPU
77 * and early entry/exit code. Real types aren't used for all fields here
78 * to avoid circular header dependencies.
79 *
80 * Every field is a virtual alias of some other allocated backing store.
81 * There is no direct allocation of a struct cpu_entry_area.
82 */
83struct cpu_entry_area {
84 char gdt[PAGE_SIZE];
85
86 /*
87 * The GDT is just below entry_stack and thus serves (on x86_64) as
88 * a read-only guard page. On 32-bit the GDT must be writeable, so
89 * it needs an extra guard page.
90 */
91#ifdef CONFIG_X86_32
92 char guard_entry_stack[PAGE_SIZE];
93#endif
94 struct entry_stack_page entry_stack_page;
95
96#ifdef CONFIG_X86_32
97 char guard_doublefault_stack[PAGE_SIZE];
98 struct doublefault_stack doublefault_stack;
99#endif
100
101 /*
102 * On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because
103 * we need task switches to work, and task switches write to the TSS.
104 */
105 struct tss_struct tss;
106
107#ifdef CONFIG_X86_64
108 /*
109 * Exception stacks used for IST entries with guard pages.
110 */
111 struct cea_exception_stacks estacks;
112#endif
113 /*
114 * Per CPU debug store for Intel performance monitoring. Wastes a
115 * full page at the moment.
116 */
117 struct debug_store cpu_debug_store;
118 /*
119 * The actual PEBS/BTS buffers must be mapped to user space
120 * Reserve enough fixmap PTEs.
121 */
122 struct debug_store_buffers cpu_debug_buffers;
123};
124
125#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
126#define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
127
128/* Total size includes the readonly IDT mapping page as well: */
129#define CPU_ENTRY_AREA_TOTAL_SIZE (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE)
130
131DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
132DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
133
134extern void setup_cpu_entry_areas(void);
135extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
136
137/* Single page reserved for the readonly IDT mapping: */
138#define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE
139#define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
140
141#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)
142
143#define CPU_ENTRY_AREA_MAP_SIZE \
144 (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE)
145
146extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
147
148static inline struct entry_stack *cpu_entry_stack(int cpu)
149{
150 return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
151}
152
153#define __this_cpu_ist_top_va(name) \
154 CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name)
155
156#endif