Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * tools/testing/selftests/kvm/include/x86_64/processor.h
4 *
5 * Copyright (C) 2018, Google LLC.
6 */
7
8#ifndef SELFTEST_KVM_PROCESSOR_H
9#define SELFTEST_KVM_PROCESSOR_H
10
11#include <assert.h>
12#include <stdint.h>
13#include <syscall.h>
14
15#include <asm/msr-index.h>
16#include <asm/prctl.h>
17
18#include <linux/kvm_para.h>
19#include <linux/stringify.h>
20
21#include "kvm_util.h"
22#include "ucall_common.h"
23
24extern bool host_cpu_is_intel;
25extern bool host_cpu_is_amd;
26
27/* Forced emulation prefix, used to invoke the emulator unconditionally. */
28#define KVM_FEP "ud2; .byte 'k', 'v', 'm';"
29
30#define NMI_VECTOR 0x02
31
32#define X86_EFLAGS_FIXED (1u << 1)
33
34#define X86_CR4_VME (1ul << 0)
35#define X86_CR4_PVI (1ul << 1)
36#define X86_CR4_TSD (1ul << 2)
37#define X86_CR4_DE (1ul << 3)
38#define X86_CR4_PSE (1ul << 4)
39#define X86_CR4_PAE (1ul << 5)
40#define X86_CR4_MCE (1ul << 6)
41#define X86_CR4_PGE (1ul << 7)
42#define X86_CR4_PCE (1ul << 8)
43#define X86_CR4_OSFXSR (1ul << 9)
44#define X86_CR4_OSXMMEXCPT (1ul << 10)
45#define X86_CR4_UMIP (1ul << 11)
46#define X86_CR4_LA57 (1ul << 12)
47#define X86_CR4_VMXE (1ul << 13)
48#define X86_CR4_SMXE (1ul << 14)
49#define X86_CR4_FSGSBASE (1ul << 16)
50#define X86_CR4_PCIDE (1ul << 17)
51#define X86_CR4_OSXSAVE (1ul << 18)
52#define X86_CR4_SMEP (1ul << 20)
53#define X86_CR4_SMAP (1ul << 21)
54#define X86_CR4_PKE (1ul << 22)
55
56struct xstate_header {
57 u64 xstate_bv;
58 u64 xcomp_bv;
59 u64 reserved[6];
60} __attribute__((packed));
61
62struct xstate {
63 u8 i387[512];
64 struct xstate_header header;
65 u8 extended_state_area[0];
66} __attribute__ ((packed, aligned (64)));
67
68#define XFEATURE_MASK_FP BIT_ULL(0)
69#define XFEATURE_MASK_SSE BIT_ULL(1)
70#define XFEATURE_MASK_YMM BIT_ULL(2)
71#define XFEATURE_MASK_BNDREGS BIT_ULL(3)
72#define XFEATURE_MASK_BNDCSR BIT_ULL(4)
73#define XFEATURE_MASK_OPMASK BIT_ULL(5)
74#define XFEATURE_MASK_ZMM_Hi256 BIT_ULL(6)
75#define XFEATURE_MASK_Hi16_ZMM BIT_ULL(7)
76#define XFEATURE_MASK_PT BIT_ULL(8)
77#define XFEATURE_MASK_PKRU BIT_ULL(9)
78#define XFEATURE_MASK_PASID BIT_ULL(10)
79#define XFEATURE_MASK_CET_USER BIT_ULL(11)
80#define XFEATURE_MASK_CET_KERNEL BIT_ULL(12)
81#define XFEATURE_MASK_LBR BIT_ULL(15)
82#define XFEATURE_MASK_XTILE_CFG BIT_ULL(17)
83#define XFEATURE_MASK_XTILE_DATA BIT_ULL(18)
84
85#define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK | \
86 XFEATURE_MASK_ZMM_Hi256 | \
87 XFEATURE_MASK_Hi16_ZMM)
88#define XFEATURE_MASK_XTILE (XFEATURE_MASK_XTILE_DATA | \
89 XFEATURE_MASK_XTILE_CFG)
90
91/* Note, these are ordered alphabetically to match kvm_cpuid_entry2. Eww. */
92enum cpuid_output_regs {
93 KVM_CPUID_EAX,
94 KVM_CPUID_EBX,
95 KVM_CPUID_ECX,
96 KVM_CPUID_EDX
97};
98
99/*
100 * Pack the information into a 64-bit value so that each X86_FEATURE_XXX can be
101 * passed by value with no overhead.
102 */
103struct kvm_x86_cpu_feature {
104 u32 function;
105 u16 index;
106 u8 reg;
107 u8 bit;
108};
109#define KVM_X86_CPU_FEATURE(fn, idx, gpr, __bit) \
110({ \
111 struct kvm_x86_cpu_feature feature = { \
112 .function = fn, \
113 .index = idx, \
114 .reg = KVM_CPUID_##gpr, \
115 .bit = __bit, \
116 }; \
117 \
118 kvm_static_assert((fn & 0xc0000000) == 0 || \
119 (fn & 0xc0000000) == 0x40000000 || \
120 (fn & 0xc0000000) == 0x80000000 || \
121 (fn & 0xc0000000) == 0xc0000000); \
122 kvm_static_assert(idx < BIT(sizeof(feature.index) * BITS_PER_BYTE)); \
123 feature; \
124})
125
126/*
127 * Basic Leafs, a.k.a. Intel defined
128 */
129#define X86_FEATURE_MWAIT KVM_X86_CPU_FEATURE(0x1, 0, ECX, 3)
130#define X86_FEATURE_VMX KVM_X86_CPU_FEATURE(0x1, 0, ECX, 5)
131#define X86_FEATURE_SMX KVM_X86_CPU_FEATURE(0x1, 0, ECX, 6)
132#define X86_FEATURE_PDCM KVM_X86_CPU_FEATURE(0x1, 0, ECX, 15)
133#define X86_FEATURE_PCID KVM_X86_CPU_FEATURE(0x1, 0, ECX, 17)
134#define X86_FEATURE_X2APIC KVM_X86_CPU_FEATURE(0x1, 0, ECX, 21)
135#define X86_FEATURE_MOVBE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 22)
136#define X86_FEATURE_TSC_DEADLINE_TIMER KVM_X86_CPU_FEATURE(0x1, 0, ECX, 24)
137#define X86_FEATURE_XSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 26)
138#define X86_FEATURE_OSXSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 27)
139#define X86_FEATURE_RDRAND KVM_X86_CPU_FEATURE(0x1, 0, ECX, 30)
140#define X86_FEATURE_HYPERVISOR KVM_X86_CPU_FEATURE(0x1, 0, ECX, 31)
141#define X86_FEATURE_PAE KVM_X86_CPU_FEATURE(0x1, 0, EDX, 6)
142#define X86_FEATURE_MCE KVM_X86_CPU_FEATURE(0x1, 0, EDX, 7)
143#define X86_FEATURE_APIC KVM_X86_CPU_FEATURE(0x1, 0, EDX, 9)
144#define X86_FEATURE_CLFLUSH KVM_X86_CPU_FEATURE(0x1, 0, EDX, 19)
145#define X86_FEATURE_XMM KVM_X86_CPU_FEATURE(0x1, 0, EDX, 25)
146#define X86_FEATURE_XMM2 KVM_X86_CPU_FEATURE(0x1, 0, EDX, 26)
147#define X86_FEATURE_FSGSBASE KVM_X86_CPU_FEATURE(0x7, 0, EBX, 0)
148#define X86_FEATURE_TSC_ADJUST KVM_X86_CPU_FEATURE(0x7, 0, EBX, 1)
149#define X86_FEATURE_SGX KVM_X86_CPU_FEATURE(0x7, 0, EBX, 2)
150#define X86_FEATURE_HLE KVM_X86_CPU_FEATURE(0x7, 0, EBX, 4)
151#define X86_FEATURE_SMEP KVM_X86_CPU_FEATURE(0x7, 0, EBX, 7)
152#define X86_FEATURE_INVPCID KVM_X86_CPU_FEATURE(0x7, 0, EBX, 10)
153#define X86_FEATURE_RTM KVM_X86_CPU_FEATURE(0x7, 0, EBX, 11)
154#define X86_FEATURE_MPX KVM_X86_CPU_FEATURE(0x7, 0, EBX, 14)
155#define X86_FEATURE_SMAP KVM_X86_CPU_FEATURE(0x7, 0, EBX, 20)
156#define X86_FEATURE_PCOMMIT KVM_X86_CPU_FEATURE(0x7, 0, EBX, 22)
157#define X86_FEATURE_CLFLUSHOPT KVM_X86_CPU_FEATURE(0x7, 0, EBX, 23)
158#define X86_FEATURE_CLWB KVM_X86_CPU_FEATURE(0x7, 0, EBX, 24)
159#define X86_FEATURE_UMIP KVM_X86_CPU_FEATURE(0x7, 0, ECX, 2)
160#define X86_FEATURE_PKU KVM_X86_CPU_FEATURE(0x7, 0, ECX, 3)
161#define X86_FEATURE_OSPKE KVM_X86_CPU_FEATURE(0x7, 0, ECX, 4)
162#define X86_FEATURE_LA57 KVM_X86_CPU_FEATURE(0x7, 0, ECX, 16)
163#define X86_FEATURE_RDPID KVM_X86_CPU_FEATURE(0x7, 0, ECX, 22)
164#define X86_FEATURE_SGX_LC KVM_X86_CPU_FEATURE(0x7, 0, ECX, 30)
165#define X86_FEATURE_SHSTK KVM_X86_CPU_FEATURE(0x7, 0, ECX, 7)
166#define X86_FEATURE_IBT KVM_X86_CPU_FEATURE(0x7, 0, EDX, 20)
167#define X86_FEATURE_AMX_TILE KVM_X86_CPU_FEATURE(0x7, 0, EDX, 24)
168#define X86_FEATURE_SPEC_CTRL KVM_X86_CPU_FEATURE(0x7, 0, EDX, 26)
169#define X86_FEATURE_ARCH_CAPABILITIES KVM_X86_CPU_FEATURE(0x7, 0, EDX, 29)
170#define X86_FEATURE_PKS KVM_X86_CPU_FEATURE(0x7, 0, ECX, 31)
171#define X86_FEATURE_XTILECFG KVM_X86_CPU_FEATURE(0xD, 0, EAX, 17)
172#define X86_FEATURE_XTILEDATA KVM_X86_CPU_FEATURE(0xD, 0, EAX, 18)
173#define X86_FEATURE_XSAVES KVM_X86_CPU_FEATURE(0xD, 1, EAX, 3)
174#define X86_FEATURE_XFD KVM_X86_CPU_FEATURE(0xD, 1, EAX, 4)
175#define X86_FEATURE_XTILEDATA_XFD KVM_X86_CPU_FEATURE(0xD, 18, ECX, 2)
176
177/*
178 * Extended Leafs, a.k.a. AMD defined
179 */
180#define X86_FEATURE_SVM KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 2)
181#define X86_FEATURE_NX KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 20)
182#define X86_FEATURE_GBPAGES KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 26)
183#define X86_FEATURE_RDTSCP KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 27)
184#define X86_FEATURE_LM KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 29)
185#define X86_FEATURE_INVTSC KVM_X86_CPU_FEATURE(0x80000007, 0, EDX, 8)
186#define X86_FEATURE_RDPRU KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 4)
187#define X86_FEATURE_AMD_IBPB KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 12)
188#define X86_FEATURE_NPT KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 0)
189#define X86_FEATURE_LBRV KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 1)
190#define X86_FEATURE_NRIPS KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 3)
191#define X86_FEATURE_TSCRATEMSR KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 4)
192#define X86_FEATURE_PAUSEFILTER KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 10)
193#define X86_FEATURE_PFTHRESHOLD KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 12)
194#define X86_FEATURE_VGIF KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 16)
195#define X86_FEATURE_SEV KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 1)
196#define X86_FEATURE_SEV_ES KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 3)
197
198/*
199 * KVM defined paravirt features.
200 */
201#define X86_FEATURE_KVM_CLOCKSOURCE KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 0)
202#define X86_FEATURE_KVM_NOP_IO_DELAY KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 1)
203#define X86_FEATURE_KVM_MMU_OP KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 2)
204#define X86_FEATURE_KVM_CLOCKSOURCE2 KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 3)
205#define X86_FEATURE_KVM_ASYNC_PF KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 4)
206#define X86_FEATURE_KVM_STEAL_TIME KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 5)
207#define X86_FEATURE_KVM_PV_EOI KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 6)
208#define X86_FEATURE_KVM_PV_UNHALT KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 7)
209/* Bit 8 apparently isn't used?!?! */
210#define X86_FEATURE_KVM_PV_TLB_FLUSH KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 9)
211#define X86_FEATURE_KVM_ASYNC_PF_VMEXIT KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 10)
212#define X86_FEATURE_KVM_PV_SEND_IPI KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 11)
213#define X86_FEATURE_KVM_POLL_CONTROL KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 12)
214#define X86_FEATURE_KVM_PV_SCHED_YIELD KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 13)
215#define X86_FEATURE_KVM_ASYNC_PF_INT KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 14)
216#define X86_FEATURE_KVM_MSI_EXT_DEST_ID KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 15)
217#define X86_FEATURE_KVM_HC_MAP_GPA_RANGE KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 16)
218#define X86_FEATURE_KVM_MIGRATION_CONTROL KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 17)
219
220/*
221 * Same idea as X86_FEATURE_XXX, but X86_PROPERTY_XXX retrieves a multi-bit
222 * value/property as opposed to a single-bit feature. Again, pack the info
223 * into a 64-bit value to pass by value with no overhead.
224 */
225struct kvm_x86_cpu_property {
226 u32 function;
227 u8 index;
228 u8 reg;
229 u8 lo_bit;
230 u8 hi_bit;
231};
232#define KVM_X86_CPU_PROPERTY(fn, idx, gpr, low_bit, high_bit) \
233({ \
234 struct kvm_x86_cpu_property property = { \
235 .function = fn, \
236 .index = idx, \
237 .reg = KVM_CPUID_##gpr, \
238 .lo_bit = low_bit, \
239 .hi_bit = high_bit, \
240 }; \
241 \
242 kvm_static_assert(low_bit < high_bit); \
243 kvm_static_assert((fn & 0xc0000000) == 0 || \
244 (fn & 0xc0000000) == 0x40000000 || \
245 (fn & 0xc0000000) == 0x80000000 || \
246 (fn & 0xc0000000) == 0xc0000000); \
247 kvm_static_assert(idx < BIT(sizeof(property.index) * BITS_PER_BYTE)); \
248 property; \
249})
250
251#define X86_PROPERTY_MAX_BASIC_LEAF KVM_X86_CPU_PROPERTY(0, 0, EAX, 0, 31)
252#define X86_PROPERTY_PMU_VERSION KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 0, 7)
253#define X86_PROPERTY_PMU_NR_GP_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15)
254#define X86_PROPERTY_PMU_GP_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 16, 23)
255#define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31)
256#define X86_PROPERTY_PMU_EVENTS_MASK KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 7)
257#define X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK KVM_X86_CPU_PROPERTY(0xa, 0, ECX, 0, 31)
258#define X86_PROPERTY_PMU_NR_FIXED_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 0, 4)
259#define X86_PROPERTY_PMU_FIXED_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 5, 12)
260
261#define X86_PROPERTY_SUPPORTED_XCR0_LO KVM_X86_CPU_PROPERTY(0xd, 0, EAX, 0, 31)
262#define X86_PROPERTY_XSTATE_MAX_SIZE_XCR0 KVM_X86_CPU_PROPERTY(0xd, 0, EBX, 0, 31)
263#define X86_PROPERTY_XSTATE_MAX_SIZE KVM_X86_CPU_PROPERTY(0xd, 0, ECX, 0, 31)
264#define X86_PROPERTY_SUPPORTED_XCR0_HI KVM_X86_CPU_PROPERTY(0xd, 0, EDX, 0, 31)
265
266#define X86_PROPERTY_XSTATE_TILE_SIZE KVM_X86_CPU_PROPERTY(0xd, 18, EAX, 0, 31)
267#define X86_PROPERTY_XSTATE_TILE_OFFSET KVM_X86_CPU_PROPERTY(0xd, 18, EBX, 0, 31)
268#define X86_PROPERTY_AMX_MAX_PALETTE_TABLES KVM_X86_CPU_PROPERTY(0x1d, 0, EAX, 0, 31)
269#define X86_PROPERTY_AMX_TOTAL_TILE_BYTES KVM_X86_CPU_PROPERTY(0x1d, 1, EAX, 0, 15)
270#define X86_PROPERTY_AMX_BYTES_PER_TILE KVM_X86_CPU_PROPERTY(0x1d, 1, EAX, 16, 31)
271#define X86_PROPERTY_AMX_BYTES_PER_ROW KVM_X86_CPU_PROPERTY(0x1d, 1, EBX, 0, 15)
272#define X86_PROPERTY_AMX_NR_TILE_REGS KVM_X86_CPU_PROPERTY(0x1d, 1, EBX, 16, 31)
273#define X86_PROPERTY_AMX_MAX_ROWS KVM_X86_CPU_PROPERTY(0x1d, 1, ECX, 0, 15)
274
275#define X86_PROPERTY_MAX_KVM_LEAF KVM_X86_CPU_PROPERTY(0x40000000, 0, EAX, 0, 31)
276
277#define X86_PROPERTY_MAX_EXT_LEAF KVM_X86_CPU_PROPERTY(0x80000000, 0, EAX, 0, 31)
278#define X86_PROPERTY_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7)
279#define X86_PROPERTY_MAX_VIRT_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15)
280#define X86_PROPERTY_SEV_C_BIT KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 0, 5)
281#define X86_PROPERTY_PHYS_ADDR_REDUCTION KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11)
282
283#define X86_PROPERTY_MAX_CENTAUR_LEAF KVM_X86_CPU_PROPERTY(0xC0000000, 0, EAX, 0, 31)
284
285/*
286 * Intel's architectural PMU events are bizarre. They have a "feature" bit
287 * that indicates the feature is _not_ supported, and a property that states
288 * the length of the bit mask of unsupported features. A feature is supported
289 * if the size of the bit mask is larger than the "unavailable" bit, and said
290 * bit is not set. Fixed counters also bizarre enumeration, but inverted from
291 * arch events for general purpose counters. Fixed counters are supported if a
292 * feature flag is set **OR** the total number of fixed counters is greater
293 * than index of the counter.
294 *
295 * Wrap the events for general purpose and fixed counters to simplify checking
296 * whether or not a given architectural event is supported.
297 */
298struct kvm_x86_pmu_feature {
299 struct kvm_x86_cpu_feature f;
300};
301#define KVM_X86_PMU_FEATURE(__reg, __bit) \
302({ \
303 struct kvm_x86_pmu_feature feature = { \
304 .f = KVM_X86_CPU_FEATURE(0xa, 0, __reg, __bit), \
305 }; \
306 \
307 kvm_static_assert(KVM_CPUID_##__reg == KVM_CPUID_EBX || \
308 KVM_CPUID_##__reg == KVM_CPUID_ECX); \
309 feature; \
310})
311
312#define X86_PMU_FEATURE_CPU_CYCLES KVM_X86_PMU_FEATURE(EBX, 0)
313#define X86_PMU_FEATURE_INSNS_RETIRED KVM_X86_PMU_FEATURE(EBX, 1)
314#define X86_PMU_FEATURE_REFERENCE_CYCLES KVM_X86_PMU_FEATURE(EBX, 2)
315#define X86_PMU_FEATURE_LLC_REFERENCES KVM_X86_PMU_FEATURE(EBX, 3)
316#define X86_PMU_FEATURE_LLC_MISSES KVM_X86_PMU_FEATURE(EBX, 4)
317#define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED KVM_X86_PMU_FEATURE(EBX, 5)
318#define X86_PMU_FEATURE_BRANCHES_MISPREDICTED KVM_X86_PMU_FEATURE(EBX, 6)
319#define X86_PMU_FEATURE_TOPDOWN_SLOTS KVM_X86_PMU_FEATURE(EBX, 7)
320
321#define X86_PMU_FEATURE_INSNS_RETIRED_FIXED KVM_X86_PMU_FEATURE(ECX, 0)
322#define X86_PMU_FEATURE_CPU_CYCLES_FIXED KVM_X86_PMU_FEATURE(ECX, 1)
323#define X86_PMU_FEATURE_REFERENCE_TSC_CYCLES_FIXED KVM_X86_PMU_FEATURE(ECX, 2)
324#define X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED KVM_X86_PMU_FEATURE(ECX, 3)
325
326static inline unsigned int x86_family(unsigned int eax)
327{
328 unsigned int x86;
329
330 x86 = (eax >> 8) & 0xf;
331
332 if (x86 == 0xf)
333 x86 += (eax >> 20) & 0xff;
334
335 return x86;
336}
337
338static inline unsigned int x86_model(unsigned int eax)
339{
340 return ((eax >> 12) & 0xf0) | ((eax >> 4) & 0x0f);
341}
342
343/* Page table bitfield declarations */
344#define PTE_PRESENT_MASK BIT_ULL(0)
345#define PTE_WRITABLE_MASK BIT_ULL(1)
346#define PTE_USER_MASK BIT_ULL(2)
347#define PTE_ACCESSED_MASK BIT_ULL(5)
348#define PTE_DIRTY_MASK BIT_ULL(6)
349#define PTE_LARGE_MASK BIT_ULL(7)
350#define PTE_GLOBAL_MASK BIT_ULL(8)
351#define PTE_NX_MASK BIT_ULL(63)
352
353#define PHYSICAL_PAGE_MASK GENMASK_ULL(51, 12)
354
355#define PAGE_SHIFT 12
356#define PAGE_SIZE (1ULL << PAGE_SHIFT)
357#define PAGE_MASK (~(PAGE_SIZE-1) & PHYSICAL_PAGE_MASK)
358
359#define HUGEPAGE_SHIFT(x) (PAGE_SHIFT + (((x) - 1) * 9))
360#define HUGEPAGE_SIZE(x) (1UL << HUGEPAGE_SHIFT(x))
361#define HUGEPAGE_MASK(x) (~(HUGEPAGE_SIZE(x) - 1) & PHYSICAL_PAGE_MASK)
362
363#define PTE_GET_PA(pte) ((pte) & PHYSICAL_PAGE_MASK)
364#define PTE_GET_PFN(pte) (PTE_GET_PA(pte) >> PAGE_SHIFT)
365
366/* General Registers in 64-Bit Mode */
367struct gpr64_regs {
368 u64 rax;
369 u64 rcx;
370 u64 rdx;
371 u64 rbx;
372 u64 rsp;
373 u64 rbp;
374 u64 rsi;
375 u64 rdi;
376 u64 r8;
377 u64 r9;
378 u64 r10;
379 u64 r11;
380 u64 r12;
381 u64 r13;
382 u64 r14;
383 u64 r15;
384};
385
386struct desc64 {
387 uint16_t limit0;
388 uint16_t base0;
389 unsigned base1:8, type:4, s:1, dpl:2, p:1;
390 unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8;
391 uint32_t base3;
392 uint32_t zero1;
393} __attribute__((packed));
394
395struct desc_ptr {
396 uint16_t size;
397 uint64_t address;
398} __attribute__((packed));
399
400struct kvm_x86_state {
401 struct kvm_xsave *xsave;
402 struct kvm_vcpu_events events;
403 struct kvm_mp_state mp_state;
404 struct kvm_regs regs;
405 struct kvm_xcrs xcrs;
406 struct kvm_sregs sregs;
407 struct kvm_debugregs debugregs;
408 union {
409 struct kvm_nested_state nested;
410 char nested_[16384];
411 };
412 struct kvm_msrs msrs;
413};
414
415static inline uint64_t get_desc64_base(const struct desc64 *desc)
416{
417 return ((uint64_t)desc->base3 << 32) |
418 (desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
419}
420
421static inline uint64_t rdtsc(void)
422{
423 uint32_t eax, edx;
424 uint64_t tsc_val;
425 /*
426 * The lfence is to wait (on Intel CPUs) until all previous
427 * instructions have been executed. If software requires RDTSC to be
428 * executed prior to execution of any subsequent instruction, it can
429 * execute LFENCE immediately after RDTSC
430 */
431 __asm__ __volatile__("lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx));
432 tsc_val = ((uint64_t)edx) << 32 | eax;
433 return tsc_val;
434}
435
436static inline uint64_t rdtscp(uint32_t *aux)
437{
438 uint32_t eax, edx;
439
440 __asm__ __volatile__("rdtscp" : "=a"(eax), "=d"(edx), "=c"(*aux));
441 return ((uint64_t)edx) << 32 | eax;
442}
443
444static inline uint64_t rdmsr(uint32_t msr)
445{
446 uint32_t a, d;
447
448 __asm__ __volatile__("rdmsr" : "=a"(a), "=d"(d) : "c"(msr) : "memory");
449
450 return a | ((uint64_t) d << 32);
451}
452
453static inline void wrmsr(uint32_t msr, uint64_t value)
454{
455 uint32_t a = value;
456 uint32_t d = value >> 32;
457
458 __asm__ __volatile__("wrmsr" :: "a"(a), "d"(d), "c"(msr) : "memory");
459}
460
461
462static inline uint16_t inw(uint16_t port)
463{
464 uint16_t tmp;
465
466 __asm__ __volatile__("in %%dx, %%ax"
467 : /* output */ "=a" (tmp)
468 : /* input */ "d" (port));
469
470 return tmp;
471}
472
473static inline uint16_t get_es(void)
474{
475 uint16_t es;
476
477 __asm__ __volatile__("mov %%es, %[es]"
478 : /* output */ [es]"=rm"(es));
479 return es;
480}
481
482static inline uint16_t get_cs(void)
483{
484 uint16_t cs;
485
486 __asm__ __volatile__("mov %%cs, %[cs]"
487 : /* output */ [cs]"=rm"(cs));
488 return cs;
489}
490
491static inline uint16_t get_ss(void)
492{
493 uint16_t ss;
494
495 __asm__ __volatile__("mov %%ss, %[ss]"
496 : /* output */ [ss]"=rm"(ss));
497 return ss;
498}
499
500static inline uint16_t get_ds(void)
501{
502 uint16_t ds;
503
504 __asm__ __volatile__("mov %%ds, %[ds]"
505 : /* output */ [ds]"=rm"(ds));
506 return ds;
507}
508
509static inline uint16_t get_fs(void)
510{
511 uint16_t fs;
512
513 __asm__ __volatile__("mov %%fs, %[fs]"
514 : /* output */ [fs]"=rm"(fs));
515 return fs;
516}
517
518static inline uint16_t get_gs(void)
519{
520 uint16_t gs;
521
522 __asm__ __volatile__("mov %%gs, %[gs]"
523 : /* output */ [gs]"=rm"(gs));
524 return gs;
525}
526
527static inline uint16_t get_tr(void)
528{
529 uint16_t tr;
530
531 __asm__ __volatile__("str %[tr]"
532 : /* output */ [tr]"=rm"(tr));
533 return tr;
534}
535
536static inline uint64_t get_cr0(void)
537{
538 uint64_t cr0;
539
540 __asm__ __volatile__("mov %%cr0, %[cr0]"
541 : /* output */ [cr0]"=r"(cr0));
542 return cr0;
543}
544
545static inline uint64_t get_cr3(void)
546{
547 uint64_t cr3;
548
549 __asm__ __volatile__("mov %%cr3, %[cr3]"
550 : /* output */ [cr3]"=r"(cr3));
551 return cr3;
552}
553
554static inline uint64_t get_cr4(void)
555{
556 uint64_t cr4;
557
558 __asm__ __volatile__("mov %%cr4, %[cr4]"
559 : /* output */ [cr4]"=r"(cr4));
560 return cr4;
561}
562
563static inline void set_cr4(uint64_t val)
564{
565 __asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory");
566}
567
568static inline u64 xgetbv(u32 index)
569{
570 u32 eax, edx;
571
572 __asm__ __volatile__("xgetbv;"
573 : "=a" (eax), "=d" (edx)
574 : "c" (index));
575 return eax | ((u64)edx << 32);
576}
577
578static inline void xsetbv(u32 index, u64 value)
579{
580 u32 eax = value;
581 u32 edx = value >> 32;
582
583 __asm__ __volatile__("xsetbv" :: "a" (eax), "d" (edx), "c" (index));
584}
585
586static inline void wrpkru(u32 pkru)
587{
588 /* Note, ECX and EDX are architecturally required to be '0'. */
589 asm volatile(".byte 0x0f,0x01,0xef\n\t"
590 : : "a" (pkru), "c"(0), "d"(0));
591}
592
593static inline struct desc_ptr get_gdt(void)
594{
595 struct desc_ptr gdt;
596 __asm__ __volatile__("sgdt %[gdt]"
597 : /* output */ [gdt]"=m"(gdt));
598 return gdt;
599}
600
601static inline struct desc_ptr get_idt(void)
602{
603 struct desc_ptr idt;
604 __asm__ __volatile__("sidt %[idt]"
605 : /* output */ [idt]"=m"(idt));
606 return idt;
607}
608
609static inline void outl(uint16_t port, uint32_t value)
610{
611 __asm__ __volatile__("outl %%eax, %%dx" : : "d"(port), "a"(value));
612}
613
614static inline void __cpuid(uint32_t function, uint32_t index,
615 uint32_t *eax, uint32_t *ebx,
616 uint32_t *ecx, uint32_t *edx)
617{
618 *eax = function;
619 *ecx = index;
620
621 asm volatile("cpuid"
622 : "=a" (*eax),
623 "=b" (*ebx),
624 "=c" (*ecx),
625 "=d" (*edx)
626 : "0" (*eax), "2" (*ecx)
627 : "memory");
628}
629
630static inline void cpuid(uint32_t function,
631 uint32_t *eax, uint32_t *ebx,
632 uint32_t *ecx, uint32_t *edx)
633{
634 return __cpuid(function, 0, eax, ebx, ecx, edx);
635}
636
637static inline uint32_t this_cpu_fms(void)
638{
639 uint32_t eax, ebx, ecx, edx;
640
641 cpuid(1, &eax, &ebx, &ecx, &edx);
642 return eax;
643}
644
645static inline uint32_t this_cpu_family(void)
646{
647 return x86_family(this_cpu_fms());
648}
649
650static inline uint32_t this_cpu_model(void)
651{
652 return x86_model(this_cpu_fms());
653}
654
655static inline bool this_cpu_vendor_string_is(const char *vendor)
656{
657 const uint32_t *chunk = (const uint32_t *)vendor;
658 uint32_t eax, ebx, ecx, edx;
659
660 cpuid(0, &eax, &ebx, &ecx, &edx);
661 return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
662}
663
664static inline bool this_cpu_is_intel(void)
665{
666 return this_cpu_vendor_string_is("GenuineIntel");
667}
668
669/*
670 * Exclude early K5 samples with a vendor string of "AMDisbetter!"
671 */
672static inline bool this_cpu_is_amd(void)
673{
674 return this_cpu_vendor_string_is("AuthenticAMD");
675}
676
677static inline uint32_t __this_cpu_has(uint32_t function, uint32_t index,
678 uint8_t reg, uint8_t lo, uint8_t hi)
679{
680 uint32_t gprs[4];
681
682 __cpuid(function, index,
683 &gprs[KVM_CPUID_EAX], &gprs[KVM_CPUID_EBX],
684 &gprs[KVM_CPUID_ECX], &gprs[KVM_CPUID_EDX]);
685
686 return (gprs[reg] & GENMASK(hi, lo)) >> lo;
687}
688
689static inline bool this_cpu_has(struct kvm_x86_cpu_feature feature)
690{
691 return __this_cpu_has(feature.function, feature.index,
692 feature.reg, feature.bit, feature.bit);
693}
694
695static inline uint32_t this_cpu_property(struct kvm_x86_cpu_property property)
696{
697 return __this_cpu_has(property.function, property.index,
698 property.reg, property.lo_bit, property.hi_bit);
699}
700
701static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property)
702{
703 uint32_t max_leaf;
704
705 switch (property.function & 0xc0000000) {
706 case 0:
707 max_leaf = this_cpu_property(X86_PROPERTY_MAX_BASIC_LEAF);
708 break;
709 case 0x40000000:
710 max_leaf = this_cpu_property(X86_PROPERTY_MAX_KVM_LEAF);
711 break;
712 case 0x80000000:
713 max_leaf = this_cpu_property(X86_PROPERTY_MAX_EXT_LEAF);
714 break;
715 case 0xc0000000:
716 max_leaf = this_cpu_property(X86_PROPERTY_MAX_CENTAUR_LEAF);
717 }
718 return max_leaf >= property.function;
719}
720
721static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature)
722{
723 uint32_t nr_bits;
724
725 if (feature.f.reg == KVM_CPUID_EBX) {
726 nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
727 return nr_bits > feature.f.bit && !this_cpu_has(feature.f);
728 }
729
730 GUEST_ASSERT(feature.f.reg == KVM_CPUID_ECX);
731 nr_bits = this_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
732 return nr_bits > feature.f.bit || this_cpu_has(feature.f);
733}
734
735static __always_inline uint64_t this_cpu_supported_xcr0(void)
736{
737 if (!this_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO))
738 return 0;
739
740 return this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) |
741 ((uint64_t)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
742}
743
744typedef u32 __attribute__((vector_size(16))) sse128_t;
745#define __sse128_u union { sse128_t vec; u64 as_u64[2]; u32 as_u32[4]; }
746#define sse128_lo(x) ({ __sse128_u t; t.vec = x; t.as_u64[0]; })
747#define sse128_hi(x) ({ __sse128_u t; t.vec = x; t.as_u64[1]; })
748
749static inline void read_sse_reg(int reg, sse128_t *data)
750{
751 switch (reg) {
752 case 0:
753 asm("movdqa %%xmm0, %0" : "=m"(*data));
754 break;
755 case 1:
756 asm("movdqa %%xmm1, %0" : "=m"(*data));
757 break;
758 case 2:
759 asm("movdqa %%xmm2, %0" : "=m"(*data));
760 break;
761 case 3:
762 asm("movdqa %%xmm3, %0" : "=m"(*data));
763 break;
764 case 4:
765 asm("movdqa %%xmm4, %0" : "=m"(*data));
766 break;
767 case 5:
768 asm("movdqa %%xmm5, %0" : "=m"(*data));
769 break;
770 case 6:
771 asm("movdqa %%xmm6, %0" : "=m"(*data));
772 break;
773 case 7:
774 asm("movdqa %%xmm7, %0" : "=m"(*data));
775 break;
776 default:
777 BUG();
778 }
779}
780
781static inline void write_sse_reg(int reg, const sse128_t *data)
782{
783 switch (reg) {
784 case 0:
785 asm("movdqa %0, %%xmm0" : : "m"(*data));
786 break;
787 case 1:
788 asm("movdqa %0, %%xmm1" : : "m"(*data));
789 break;
790 case 2:
791 asm("movdqa %0, %%xmm2" : : "m"(*data));
792 break;
793 case 3:
794 asm("movdqa %0, %%xmm3" : : "m"(*data));
795 break;
796 case 4:
797 asm("movdqa %0, %%xmm4" : : "m"(*data));
798 break;
799 case 5:
800 asm("movdqa %0, %%xmm5" : : "m"(*data));
801 break;
802 case 6:
803 asm("movdqa %0, %%xmm6" : : "m"(*data));
804 break;
805 case 7:
806 asm("movdqa %0, %%xmm7" : : "m"(*data));
807 break;
808 default:
809 BUG();
810 }
811}
812
813static inline void cpu_relax(void)
814{
815 asm volatile("rep; nop" ::: "memory");
816}
817
818#define ud2() \
819 __asm__ __volatile__( \
820 "ud2\n" \
821 )
822
823#define hlt() \
824 __asm__ __volatile__( \
825 "hlt\n" \
826 )
827
828struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu);
829void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state);
830void kvm_x86_state_cleanup(struct kvm_x86_state *state);
831
832const struct kvm_msr_list *kvm_get_msr_index_list(void);
833const struct kvm_msr_list *kvm_get_feature_msr_index_list(void);
834bool kvm_msr_is_in_save_restore_list(uint32_t msr_index);
835uint64_t kvm_get_feature_msr(uint64_t msr_index);
836
837static inline void vcpu_msrs_get(struct kvm_vcpu *vcpu,
838 struct kvm_msrs *msrs)
839{
840 int r = __vcpu_ioctl(vcpu, KVM_GET_MSRS, msrs);
841
842 TEST_ASSERT(r == msrs->nmsrs,
843 "KVM_GET_MSRS failed, r: %i (failed on MSR %x)",
844 r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index);
845}
846static inline void vcpu_msrs_set(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs)
847{
848 int r = __vcpu_ioctl(vcpu, KVM_SET_MSRS, msrs);
849
850 TEST_ASSERT(r == msrs->nmsrs,
851 "KVM_SET_MSRS failed, r: %i (failed on MSR %x)",
852 r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index);
853}
854static inline void vcpu_debugregs_get(struct kvm_vcpu *vcpu,
855 struct kvm_debugregs *debugregs)
856{
857 vcpu_ioctl(vcpu, KVM_GET_DEBUGREGS, debugregs);
858}
859static inline void vcpu_debugregs_set(struct kvm_vcpu *vcpu,
860 struct kvm_debugregs *debugregs)
861{
862 vcpu_ioctl(vcpu, KVM_SET_DEBUGREGS, debugregs);
863}
864static inline void vcpu_xsave_get(struct kvm_vcpu *vcpu,
865 struct kvm_xsave *xsave)
866{
867 vcpu_ioctl(vcpu, KVM_GET_XSAVE, xsave);
868}
869static inline void vcpu_xsave2_get(struct kvm_vcpu *vcpu,
870 struct kvm_xsave *xsave)
871{
872 vcpu_ioctl(vcpu, KVM_GET_XSAVE2, xsave);
873}
874static inline void vcpu_xsave_set(struct kvm_vcpu *vcpu,
875 struct kvm_xsave *xsave)
876{
877 vcpu_ioctl(vcpu, KVM_SET_XSAVE, xsave);
878}
879static inline void vcpu_xcrs_get(struct kvm_vcpu *vcpu,
880 struct kvm_xcrs *xcrs)
881{
882 vcpu_ioctl(vcpu, KVM_GET_XCRS, xcrs);
883}
884static inline void vcpu_xcrs_set(struct kvm_vcpu *vcpu, struct kvm_xcrs *xcrs)
885{
886 vcpu_ioctl(vcpu, KVM_SET_XCRS, xcrs);
887}
888
889const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
890 uint32_t function, uint32_t index);
891const struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
892const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void);
893const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu);
894
895static inline uint32_t kvm_cpu_fms(void)
896{
897 return get_cpuid_entry(kvm_get_supported_cpuid(), 0x1, 0)->eax;
898}
899
900static inline uint32_t kvm_cpu_family(void)
901{
902 return x86_family(kvm_cpu_fms());
903}
904
905static inline uint32_t kvm_cpu_model(void)
906{
907 return x86_model(kvm_cpu_fms());
908}
909
910bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid,
911 struct kvm_x86_cpu_feature feature);
912
913static inline bool kvm_cpu_has(struct kvm_x86_cpu_feature feature)
914{
915 return kvm_cpuid_has(kvm_get_supported_cpuid(), feature);
916}
917
918uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
919 struct kvm_x86_cpu_property property);
920
921static inline uint32_t kvm_cpu_property(struct kvm_x86_cpu_property property)
922{
923 return kvm_cpuid_property(kvm_get_supported_cpuid(), property);
924}
925
926static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property)
927{
928 uint32_t max_leaf;
929
930 switch (property.function & 0xc0000000) {
931 case 0:
932 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_BASIC_LEAF);
933 break;
934 case 0x40000000:
935 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_KVM_LEAF);
936 break;
937 case 0x80000000:
938 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_EXT_LEAF);
939 break;
940 case 0xc0000000:
941 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_CENTAUR_LEAF);
942 }
943 return max_leaf >= property.function;
944}
945
946static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature)
947{
948 uint32_t nr_bits;
949
950 if (feature.f.reg == KVM_CPUID_EBX) {
951 nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
952 return nr_bits > feature.f.bit && !kvm_cpu_has(feature.f);
953 }
954
955 TEST_ASSERT_EQ(feature.f.reg, KVM_CPUID_ECX);
956 nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
957 return nr_bits > feature.f.bit || kvm_cpu_has(feature.f);
958}
959
960static __always_inline uint64_t kvm_cpu_supported_xcr0(void)
961{
962 if (!kvm_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO))
963 return 0;
964
965 return kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) |
966 ((uint64_t)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
967}
968
969static inline size_t kvm_cpuid2_size(int nr_entries)
970{
971 return sizeof(struct kvm_cpuid2) +
972 sizeof(struct kvm_cpuid_entry2) * nr_entries;
973}
974
975/*
976 * Allocate a "struct kvm_cpuid2* instance, with the 0-length arrary of
977 * entries sized to hold @nr_entries. The caller is responsible for freeing
978 * the struct.
979 */
980static inline struct kvm_cpuid2 *allocate_kvm_cpuid2(int nr_entries)
981{
982 struct kvm_cpuid2 *cpuid;
983
984 cpuid = malloc(kvm_cpuid2_size(nr_entries));
985 TEST_ASSERT(cpuid, "-ENOMEM when allocating kvm_cpuid2");
986
987 cpuid->nent = nr_entries;
988
989 return cpuid;
990}
991
992void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid);
993void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu);
994
995static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
996 uint32_t function,
997 uint32_t index)
998{
999 return (struct kvm_cpuid_entry2 *)get_cpuid_entry(vcpu->cpuid,
1000 function, index);
1001}
1002
1003static inline struct kvm_cpuid_entry2 *vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
1004 uint32_t function)
1005{
1006 return __vcpu_get_cpuid_entry(vcpu, function, 0);
1007}
1008
1009static inline int __vcpu_set_cpuid(struct kvm_vcpu *vcpu)
1010{
1011 int r;
1012
1013 TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first");
1014 r = __vcpu_ioctl(vcpu, KVM_SET_CPUID2, vcpu->cpuid);
1015 if (r)
1016 return r;
1017
1018 /* On success, refresh the cache to pick up adjustments made by KVM. */
1019 vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
1020 return 0;
1021}
1022
1023static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu)
1024{
1025 TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first");
1026 vcpu_ioctl(vcpu, KVM_SET_CPUID2, vcpu->cpuid);
1027
1028 /* Refresh the cache to pick up adjustments made by KVM. */
1029 vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
1030}
1031
1032void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
1033 struct kvm_x86_cpu_property property,
1034 uint32_t value);
1035void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr);
1036
1037void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function);
1038
1039static inline bool vcpu_cpuid_has(struct kvm_vcpu *vcpu,
1040 struct kvm_x86_cpu_feature feature)
1041{
1042 struct kvm_cpuid_entry2 *entry;
1043
1044 entry = __vcpu_get_cpuid_entry(vcpu, feature.function, feature.index);
1045 return *((&entry->eax) + feature.reg) & BIT(feature.bit);
1046}
1047
1048void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu,
1049 struct kvm_x86_cpu_feature feature,
1050 bool set);
1051
1052static inline void vcpu_set_cpuid_feature(struct kvm_vcpu *vcpu,
1053 struct kvm_x86_cpu_feature feature)
1054{
1055 vcpu_set_or_clear_cpuid_feature(vcpu, feature, true);
1056
1057}
1058
1059static inline void vcpu_clear_cpuid_feature(struct kvm_vcpu *vcpu,
1060 struct kvm_x86_cpu_feature feature)
1061{
1062 vcpu_set_or_clear_cpuid_feature(vcpu, feature, false);
1063}
1064
1065uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index);
1066int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value);
1067
1068/*
1069 * Assert on an MSR access(es) and pretty print the MSR name when possible.
1070 * Note, the caller provides the stringified name so that the name of macro is
1071 * printed, not the value the macro resolves to (due to macro expansion).
1072 */
1073#define TEST_ASSERT_MSR(cond, fmt, msr, str, args...) \
1074do { \
1075 if (__builtin_constant_p(msr)) { \
1076 TEST_ASSERT(cond, fmt, str, args); \
1077 } else if (!(cond)) { \
1078 char buf[16]; \
1079 \
1080 snprintf(buf, sizeof(buf), "MSR 0x%x", msr); \
1081 TEST_ASSERT(cond, fmt, buf, args); \
1082 } \
1083} while (0)
1084
1085/*
1086 * Returns true if KVM should return the last written value when reading an MSR
1087 * from userspace, e.g. the MSR isn't a command MSR, doesn't emulate state that
1088 * is changing, etc. This is NOT an exhaustive list! The intent is to filter
1089 * out MSRs that are not durable _and_ that a selftest wants to write.
1090 */
1091static inline bool is_durable_msr(uint32_t msr)
1092{
1093 return msr != MSR_IA32_TSC;
1094}
1095
1096#define vcpu_set_msr(vcpu, msr, val) \
1097do { \
1098 uint64_t r, v = val; \
1099 \
1100 TEST_ASSERT_MSR(_vcpu_set_msr(vcpu, msr, v) == 1, \
1101 "KVM_SET_MSRS failed on %s, value = 0x%lx", msr, #msr, v); \
1102 if (!is_durable_msr(msr)) \
1103 break; \
1104 r = vcpu_get_msr(vcpu, msr); \
1105 TEST_ASSERT_MSR(r == v, "Set %s to '0x%lx', got back '0x%lx'", msr, #msr, v, r);\
1106} while (0)
1107
1108void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
1109void kvm_init_vm_address_properties(struct kvm_vm *vm);
1110bool vm_is_unrestricted_guest(struct kvm_vm *vm);
1111
1112struct ex_regs {
1113 uint64_t rax, rcx, rdx, rbx;
1114 uint64_t rbp, rsi, rdi;
1115 uint64_t r8, r9, r10, r11;
1116 uint64_t r12, r13, r14, r15;
1117 uint64_t vector;
1118 uint64_t error_code;
1119 uint64_t rip;
1120 uint64_t cs;
1121 uint64_t rflags;
1122};
1123
1124struct idt_entry {
1125 uint16_t offset0;
1126 uint16_t selector;
1127 uint16_t ist : 3;
1128 uint16_t : 5;
1129 uint16_t type : 4;
1130 uint16_t : 1;
1131 uint16_t dpl : 2;
1132 uint16_t p : 1;
1133 uint16_t offset1;
1134 uint32_t offset2; uint32_t reserved;
1135};
1136
1137void vm_install_exception_handler(struct kvm_vm *vm, int vector,
1138 void (*handler)(struct ex_regs *));
1139
1140/* If a toddler were to say "abracadabra". */
1141#define KVM_EXCEPTION_MAGIC 0xabacadabaULL
1142
1143/*
1144 * KVM selftest exception fixup uses registers to coordinate with the exception
1145 * handler, versus the kernel's in-memory tables and KVM-Unit-Tests's in-memory
1146 * per-CPU data. Using only registers avoids having to map memory into the
1147 * guest, doesn't require a valid, stable GS.base, and reduces the risk of
1148 * for recursive faults when accessing memory in the handler. The downside to
1149 * using registers is that it restricts what registers can be used by the actual
1150 * instruction. But, selftests are 64-bit only, making register* pressure a
1151 * minor concern. Use r9-r11 as they are volatile, i.e. don't need to be saved
1152 * by the callee, and except for r11 are not implicit parameters to any
1153 * instructions. Ideally, fixup would use r8-r10 and thus avoid implicit
1154 * parameters entirely, but Hyper-V's hypercall ABI uses r8 and testing Hyper-V
1155 * is higher priority than testing non-faulting SYSCALL/SYSRET.
1156 *
1157 * Note, the fixup handler deliberately does not handle #DE, i.e. the vector
1158 * is guaranteed to be non-zero on fault.
1159 *
1160 * REGISTER INPUTS:
1161 * r9 = MAGIC
1162 * r10 = RIP
1163 * r11 = new RIP on fault
1164 *
1165 * REGISTER OUTPUTS:
1166 * r9 = exception vector (non-zero)
1167 * r10 = error code
1168 */
1169#define __KVM_ASM_SAFE(insn, fep) \
1170 "mov $" __stringify(KVM_EXCEPTION_MAGIC) ", %%r9\n\t" \
1171 "lea 1f(%%rip), %%r10\n\t" \
1172 "lea 2f(%%rip), %%r11\n\t" \
1173 fep "1: " insn "\n\t" \
1174 "xor %%r9, %%r9\n\t" \
1175 "2:\n\t" \
1176 "mov %%r9b, %[vector]\n\t" \
1177 "mov %%r10, %[error_code]\n\t"
1178
1179#define KVM_ASM_SAFE(insn) __KVM_ASM_SAFE(insn, "")
1180#define KVM_ASM_SAFE_FEP(insn) __KVM_ASM_SAFE(insn, KVM_FEP)
1181
1182#define KVM_ASM_SAFE_OUTPUTS(v, ec) [vector] "=qm"(v), [error_code] "=rm"(ec)
1183#define KVM_ASM_SAFE_CLOBBERS "r9", "r10", "r11"
1184
1185#define kvm_asm_safe(insn, inputs...) \
1186({ \
1187 uint64_t ign_error_code; \
1188 uint8_t vector; \
1189 \
1190 asm volatile(KVM_ASM_SAFE(insn) \
1191 : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \
1192 : inputs \
1193 : KVM_ASM_SAFE_CLOBBERS); \
1194 vector; \
1195})
1196
1197#define kvm_asm_safe_ec(insn, error_code, inputs...) \
1198({ \
1199 uint8_t vector; \
1200 \
1201 asm volatile(KVM_ASM_SAFE(insn) \
1202 : KVM_ASM_SAFE_OUTPUTS(vector, error_code) \
1203 : inputs \
1204 : KVM_ASM_SAFE_CLOBBERS); \
1205 vector; \
1206})
1207
1208#define kvm_asm_safe_fep(insn, inputs...) \
1209({ \
1210 uint64_t ign_error_code; \
1211 uint8_t vector; \
1212 \
1213 asm volatile(KVM_ASM_SAFE(insn) \
1214 : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \
1215 : inputs \
1216 : KVM_ASM_SAFE_CLOBBERS); \
1217 vector; \
1218})
1219
1220#define kvm_asm_safe_ec_fep(insn, error_code, inputs...) \
1221({ \
1222 uint8_t vector; \
1223 \
1224 asm volatile(KVM_ASM_SAFE_FEP(insn) \
1225 : KVM_ASM_SAFE_OUTPUTS(vector, error_code) \
1226 : inputs \
1227 : KVM_ASM_SAFE_CLOBBERS); \
1228 vector; \
1229})
1230
1231#define BUILD_READ_U64_SAFE_HELPER(insn, _fep, _FEP) \
1232static inline uint8_t insn##_safe ##_fep(uint32_t idx, uint64_t *val) \
1233{ \
1234 uint64_t error_code; \
1235 uint8_t vector; \
1236 uint32_t a, d; \
1237 \
1238 asm volatile(KVM_ASM_SAFE##_FEP(#insn) \
1239 : "=a"(a), "=d"(d), \
1240 KVM_ASM_SAFE_OUTPUTS(vector, error_code) \
1241 : "c"(idx) \
1242 : KVM_ASM_SAFE_CLOBBERS); \
1243 \
1244 *val = (uint64_t)a | ((uint64_t)d << 32); \
1245 return vector; \
1246}
1247
1248/*
1249 * Generate {insn}_safe() and {insn}_safe_fep() helpers for instructions that
1250 * use ECX as in input index, and EDX:EAX as a 64-bit output.
1251 */
1252#define BUILD_READ_U64_SAFE_HELPERS(insn) \
1253 BUILD_READ_U64_SAFE_HELPER(insn, , ) \
1254 BUILD_READ_U64_SAFE_HELPER(insn, _fep, _FEP) \
1255
1256BUILD_READ_U64_SAFE_HELPERS(rdmsr)
1257BUILD_READ_U64_SAFE_HELPERS(rdpmc)
1258BUILD_READ_U64_SAFE_HELPERS(xgetbv)
1259
1260static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val)
1261{
1262 return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr));
1263}
1264
1265static inline uint8_t xsetbv_safe(uint32_t index, uint64_t value)
1266{
1267 u32 eax = value;
1268 u32 edx = value >> 32;
1269
1270 return kvm_asm_safe("xsetbv", "a" (eax), "d" (edx), "c" (index));
1271}
1272
1273bool kvm_is_tdp_enabled(void);
1274
1275static inline bool kvm_is_pmu_enabled(void)
1276{
1277 return get_kvm_param_bool("enable_pmu");
1278}
1279
1280static inline bool kvm_is_forced_emulation_enabled(void)
1281{
1282 return !!get_kvm_param_integer("force_emulation_prefix");
1283}
1284
1285uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
1286 int *level);
1287uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr);
1288
1289uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
1290 uint64_t a3);
1291uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1);
1292void xen_hypercall(uint64_t nr, uint64_t a0, void *a1);
1293
1294static inline uint64_t __kvm_hypercall_map_gpa_range(uint64_t gpa,
1295 uint64_t size, uint64_t flags)
1296{
1297 return kvm_hypercall(KVM_HC_MAP_GPA_RANGE, gpa, size >> PAGE_SHIFT, flags, 0);
1298}
1299
1300static inline void kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size,
1301 uint64_t flags)
1302{
1303 uint64_t ret = __kvm_hypercall_map_gpa_range(gpa, size, flags);
1304
1305 GUEST_ASSERT(!ret);
1306}
1307
1308void __vm_xsave_require_permission(uint64_t xfeature, const char *name);
1309
1310#define vm_xsave_require_permission(xfeature) \
1311 __vm_xsave_require_permission(xfeature, #xfeature)
1312
1313enum pg_level {
1314 PG_LEVEL_NONE,
1315 PG_LEVEL_4K,
1316 PG_LEVEL_2M,
1317 PG_LEVEL_1G,
1318 PG_LEVEL_512G,
1319 PG_LEVEL_NUM
1320};
1321
1322#define PG_LEVEL_SHIFT(_level) ((_level - 1) * 9 + 12)
1323#define PG_LEVEL_SIZE(_level) (1ull << PG_LEVEL_SHIFT(_level))
1324
1325#define PG_SIZE_4K PG_LEVEL_SIZE(PG_LEVEL_4K)
1326#define PG_SIZE_2M PG_LEVEL_SIZE(PG_LEVEL_2M)
1327#define PG_SIZE_1G PG_LEVEL_SIZE(PG_LEVEL_1G)
1328
1329void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level);
1330void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
1331 uint64_t nr_bytes, int level);
1332
1333/*
1334 * Basic CPU control in CR0
1335 */
1336#define X86_CR0_PE (1UL<<0) /* Protection Enable */
1337#define X86_CR0_MP (1UL<<1) /* Monitor Coprocessor */
1338#define X86_CR0_EM (1UL<<2) /* Emulation */
1339#define X86_CR0_TS (1UL<<3) /* Task Switched */
1340#define X86_CR0_ET (1UL<<4) /* Extension Type */
1341#define X86_CR0_NE (1UL<<5) /* Numeric Error */
1342#define X86_CR0_WP (1UL<<16) /* Write Protect */
1343#define X86_CR0_AM (1UL<<18) /* Alignment Mask */
1344#define X86_CR0_NW (1UL<<29) /* Not Write-through */
1345#define X86_CR0_CD (1UL<<30) /* Cache Disable */
1346#define X86_CR0_PG (1UL<<31) /* Paging */
1347
1348#define PFERR_PRESENT_BIT 0
1349#define PFERR_WRITE_BIT 1
1350#define PFERR_USER_BIT 2
1351#define PFERR_RSVD_BIT 3
1352#define PFERR_FETCH_BIT 4
1353#define PFERR_PK_BIT 5
1354#define PFERR_SGX_BIT 15
1355#define PFERR_GUEST_FINAL_BIT 32
1356#define PFERR_GUEST_PAGE_BIT 33
1357#define PFERR_IMPLICIT_ACCESS_BIT 48
1358
1359#define PFERR_PRESENT_MASK BIT(PFERR_PRESENT_BIT)
1360#define PFERR_WRITE_MASK BIT(PFERR_WRITE_BIT)
1361#define PFERR_USER_MASK BIT(PFERR_USER_BIT)
1362#define PFERR_RSVD_MASK BIT(PFERR_RSVD_BIT)
1363#define PFERR_FETCH_MASK BIT(PFERR_FETCH_BIT)
1364#define PFERR_PK_MASK BIT(PFERR_PK_BIT)
1365#define PFERR_SGX_MASK BIT(PFERR_SGX_BIT)
1366#define PFERR_GUEST_FINAL_MASK BIT_ULL(PFERR_GUEST_FINAL_BIT)
1367#define PFERR_GUEST_PAGE_MASK BIT_ULL(PFERR_GUEST_PAGE_BIT)
1368#define PFERR_IMPLICIT_ACCESS BIT_ULL(PFERR_IMPLICIT_ACCESS_BIT)
1369
1370bool sys_clocksource_is_based_on_tsc(void);
1371
1372#endif /* SELFTEST_KVM_PROCESSOR_H */