Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_SPECIAL_INSNS_H
3#define _ASM_X86_SPECIAL_INSNS_H
4
5
6#ifdef __KERNEL__
7
8#include <asm/nops.h>
9#include <asm/processor-flags.h>
10#include <linux/irqflags.h>
11#include <linux/jump_label.h>
12
13/*
14 * Volatile isn't enough to prevent the compiler from reordering the
15 * read/write functions for the control registers and messing everything up.
16 * A memory clobber would solve the problem, but would prevent reordering of
17 * all loads stores around it, which can hurt performance. Solution is to
18 * use a variable and mimic reads and writes to it to enforce serialization
19 */
20extern unsigned long __force_order;
21
22void native_write_cr0(unsigned long val);
23
24static inline unsigned long native_read_cr0(void)
25{
26 unsigned long val;
27 asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
28 return val;
29}
30
31static __always_inline unsigned long native_read_cr2(void)
32{
33 unsigned long val;
34 asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
35 return val;
36}
37
38static __always_inline void native_write_cr2(unsigned long val)
39{
40 asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
41}
42
43static inline unsigned long __native_read_cr3(void)
44{
45 unsigned long val;
46 asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
47 return val;
48}
49
50static inline void native_write_cr3(unsigned long val)
51{
52 asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
53}
54
55static inline unsigned long native_read_cr4(void)
56{
57 unsigned long val;
58#ifdef CONFIG_X86_32
59 /*
60 * This could fault if CR4 does not exist. Non-existent CR4
61 * is functionally equivalent to CR4 == 0. Keep it simple and pretend
62 * that CR4 == 0 on CPUs that don't have CR4.
63 */
64 asm volatile("1: mov %%cr4, %0\n"
65 "2:\n"
66 _ASM_EXTABLE(1b, 2b)
67 : "=r" (val), "=m" (__force_order) : "0" (0));
68#else
69 /* CR4 always exists on x86_64. */
70 asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
71#endif
72 return val;
73}
74
75void native_write_cr4(unsigned long val);
76
77#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
78static inline u32 rdpkru(void)
79{
80 u32 ecx = 0;
81 u32 edx, pkru;
82
83 /*
84 * "rdpkru" instruction. Places PKRU contents in to EAX,
85 * clears EDX and requires that ecx=0.
86 */
87 asm volatile(".byte 0x0f,0x01,0xee\n\t"
88 : "=a" (pkru), "=d" (edx)
89 : "c" (ecx));
90 return pkru;
91}
92
93static inline void wrpkru(u32 pkru)
94{
95 u32 ecx = 0, edx = 0;
96
97 /*
98 * "wrpkru" instruction. Loads contents in EAX to PKRU,
99 * requires that ecx = edx = 0.
100 */
101 asm volatile(".byte 0x0f,0x01,0xef\n\t"
102 : : "a" (pkru), "c"(ecx), "d"(edx));
103}
104
105static inline void __write_pkru(u32 pkru)
106{
107 /*
108 * WRPKRU is relatively expensive compared to RDPKRU.
109 * Avoid WRPKRU when it would not change the value.
110 */
111 if (pkru == rdpkru())
112 return;
113
114 wrpkru(pkru);
115}
116
117#else
118static inline u32 rdpkru(void)
119{
120 return 0;
121}
122
123static inline void __write_pkru(u32 pkru)
124{
125}
126#endif
127
128static inline void native_wbinvd(void)
129{
130 asm volatile("wbinvd": : :"memory");
131}
132
133extern asmlinkage void asm_load_gs_index(unsigned int selector);
134
135static inline void native_load_gs_index(unsigned int selector)
136{
137 unsigned long flags;
138
139 local_irq_save(flags);
140 asm_load_gs_index(selector);
141 local_irq_restore(flags);
142}
143
144static inline unsigned long __read_cr4(void)
145{
146 return native_read_cr4();
147}
148
149#ifdef CONFIG_PARAVIRT_XXL
150#include <asm/paravirt.h>
151#else
152
153static inline unsigned long read_cr0(void)
154{
155 return native_read_cr0();
156}
157
158static inline void write_cr0(unsigned long x)
159{
160 native_write_cr0(x);
161}
162
163static __always_inline unsigned long read_cr2(void)
164{
165 return native_read_cr2();
166}
167
168static __always_inline void write_cr2(unsigned long x)
169{
170 native_write_cr2(x);
171}
172
173/*
174 * Careful! CR3 contains more than just an address. You probably want
175 * read_cr3_pa() instead.
176 */
177static inline unsigned long __read_cr3(void)
178{
179 return __native_read_cr3();
180}
181
182static inline void write_cr3(unsigned long x)
183{
184 native_write_cr3(x);
185}
186
187static inline void __write_cr4(unsigned long x)
188{
189 native_write_cr4(x);
190}
191
192static inline void wbinvd(void)
193{
194 native_wbinvd();
195}
196
197#ifdef CONFIG_X86_64
198
199static inline void load_gs_index(unsigned int selector)
200{
201 native_load_gs_index(selector);
202}
203
204#endif
205
206#endif /* CONFIG_PARAVIRT_XXL */
207
208static inline void clflush(volatile void *__p)
209{
210 asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
211}
212
213static inline void clflushopt(volatile void *__p)
214{
215 alternative_io(".byte " __stringify(NOP_DS_PREFIX) "; clflush %P0",
216 ".byte 0x66; clflush %P0",
217 X86_FEATURE_CLFLUSHOPT,
218 "+m" (*(volatile char __force *)__p));
219}
220
221static inline void clwb(volatile void *__p)
222{
223 volatile struct { char x[64]; } *p = __p;
224
225 asm volatile(ALTERNATIVE_2(
226 ".byte " __stringify(NOP_DS_PREFIX) "; clflush (%[pax])",
227 ".byte 0x66; clflush (%[pax])", /* clflushopt (%%rax) */
228 X86_FEATURE_CLFLUSHOPT,
229 ".byte 0x66, 0x0f, 0xae, 0x30", /* clwb (%%rax) */
230 X86_FEATURE_CLWB)
231 : [p] "+m" (*p)
232 : [pax] "a" (p));
233}
234
235#define nop() asm volatile ("nop")
236
237#endif /* __KERNEL__ */
238
239#endif /* _ASM_X86_SPECIAL_INSNS_H */