at master 46 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2018, Google LLC. 4 */ 5 6#ifndef SELFTEST_KVM_PROCESSOR_H 7#define SELFTEST_KVM_PROCESSOR_H 8 9#include <assert.h> 10#include <stdint.h> 11#include <syscall.h> 12 13#include <asm/msr-index.h> 14#include <asm/prctl.h> 15 16#include <linux/kvm_para.h> 17#include <linux/stringify.h> 18 19#include "kvm_util.h" 20#include "ucall_common.h" 21 22extern bool host_cpu_is_intel; 23extern bool host_cpu_is_amd; 24extern uint64_t guest_tsc_khz; 25 26#ifndef MAX_NR_CPUID_ENTRIES 27#define MAX_NR_CPUID_ENTRIES 100 28#endif 29 30#define NONCANONICAL 0xaaaaaaaaaaaaaaaaull 31 32/* Forced emulation prefix, used to invoke the emulator unconditionally. */ 33#define KVM_FEP "ud2; .byte 'k', 'v', 'm';" 34 35#define NMI_VECTOR 0x02 36 37const char *ex_str(int vector); 38 39#define X86_EFLAGS_FIXED (1u << 1) 40 41#define X86_CR4_VME (1ul << 0) 42#define X86_CR4_PVI (1ul << 1) 43#define X86_CR4_TSD (1ul << 2) 44#define X86_CR4_DE (1ul << 3) 45#define X86_CR4_PSE (1ul << 4) 46#define X86_CR4_PAE (1ul << 5) 47#define X86_CR4_MCE (1ul << 6) 48#define X86_CR4_PGE (1ul << 7) 49#define X86_CR4_PCE (1ul << 8) 50#define X86_CR4_OSFXSR (1ul << 9) 51#define X86_CR4_OSXMMEXCPT (1ul << 10) 52#define X86_CR4_UMIP (1ul << 11) 53#define X86_CR4_LA57 (1ul << 12) 54#define X86_CR4_VMXE (1ul << 13) 55#define X86_CR4_SMXE (1ul << 14) 56#define X86_CR4_FSGSBASE (1ul << 16) 57#define X86_CR4_PCIDE (1ul << 17) 58#define X86_CR4_OSXSAVE (1ul << 18) 59#define X86_CR4_SMEP (1ul << 20) 60#define X86_CR4_SMAP (1ul << 21) 61#define X86_CR4_PKE (1ul << 22) 62 63struct xstate_header { 64 u64 xstate_bv; 65 u64 xcomp_bv; 66 u64 reserved[6]; 67} __attribute__((packed)); 68 69struct xstate { 70 u8 i387[512]; 71 struct xstate_header header; 72 u8 extended_state_area[0]; 73} __attribute__ ((packed, aligned (64))); 74 75#define XFEATURE_MASK_FP BIT_ULL(0) 76#define XFEATURE_MASK_SSE BIT_ULL(1) 77#define XFEATURE_MASK_YMM BIT_ULL(2) 78#define XFEATURE_MASK_BNDREGS BIT_ULL(3) 79#define XFEATURE_MASK_BNDCSR BIT_ULL(4) 80#define XFEATURE_MASK_OPMASK BIT_ULL(5) 81#define XFEATURE_MASK_ZMM_Hi256 BIT_ULL(6) 82#define XFEATURE_MASK_Hi16_ZMM BIT_ULL(7) 83#define XFEATURE_MASK_PT BIT_ULL(8) 84#define XFEATURE_MASK_PKRU BIT_ULL(9) 85#define XFEATURE_MASK_PASID BIT_ULL(10) 86#define XFEATURE_MASK_CET_USER BIT_ULL(11) 87#define XFEATURE_MASK_CET_KERNEL BIT_ULL(12) 88#define XFEATURE_MASK_LBR BIT_ULL(15) 89#define XFEATURE_MASK_XTILE_CFG BIT_ULL(17) 90#define XFEATURE_MASK_XTILE_DATA BIT_ULL(18) 91 92#define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK | \ 93 XFEATURE_MASK_ZMM_Hi256 | \ 94 XFEATURE_MASK_Hi16_ZMM) 95#define XFEATURE_MASK_XTILE (XFEATURE_MASK_XTILE_DATA | \ 96 XFEATURE_MASK_XTILE_CFG) 97 98/* Note, these are ordered alphabetically to match kvm_cpuid_entry2. Eww. */ 99enum cpuid_output_regs { 100 KVM_CPUID_EAX, 101 KVM_CPUID_EBX, 102 KVM_CPUID_ECX, 103 KVM_CPUID_EDX 104}; 105 106/* 107 * Pack the information into a 64-bit value so that each X86_FEATURE_XXX can be 108 * passed by value with no overhead. 109 */ 110struct kvm_x86_cpu_feature { 111 u32 function; 112 u16 index; 113 u8 reg; 114 u8 bit; 115}; 116#define KVM_X86_CPU_FEATURE(fn, idx, gpr, __bit) \ 117({ \ 118 struct kvm_x86_cpu_feature feature = { \ 119 .function = fn, \ 120 .index = idx, \ 121 .reg = KVM_CPUID_##gpr, \ 122 .bit = __bit, \ 123 }; \ 124 \ 125 kvm_static_assert((fn & 0xc0000000) == 0 || \ 126 (fn & 0xc0000000) == 0x40000000 || \ 127 (fn & 0xc0000000) == 0x80000000 || \ 128 (fn & 0xc0000000) == 0xc0000000); \ 129 kvm_static_assert(idx < BIT(sizeof(feature.index) * BITS_PER_BYTE)); \ 130 feature; \ 131}) 132 133/* 134 * Basic Leafs, a.k.a. Intel defined 135 */ 136#define X86_FEATURE_MWAIT KVM_X86_CPU_FEATURE(0x1, 0, ECX, 3) 137#define X86_FEATURE_VMX KVM_X86_CPU_FEATURE(0x1, 0, ECX, 5) 138#define X86_FEATURE_SMX KVM_X86_CPU_FEATURE(0x1, 0, ECX, 6) 139#define X86_FEATURE_PDCM KVM_X86_CPU_FEATURE(0x1, 0, ECX, 15) 140#define X86_FEATURE_PCID KVM_X86_CPU_FEATURE(0x1, 0, ECX, 17) 141#define X86_FEATURE_X2APIC KVM_X86_CPU_FEATURE(0x1, 0, ECX, 21) 142#define X86_FEATURE_MOVBE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 22) 143#define X86_FEATURE_TSC_DEADLINE_TIMER KVM_X86_CPU_FEATURE(0x1, 0, ECX, 24) 144#define X86_FEATURE_XSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 26) 145#define X86_FEATURE_OSXSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 27) 146#define X86_FEATURE_RDRAND KVM_X86_CPU_FEATURE(0x1, 0, ECX, 30) 147#define X86_FEATURE_HYPERVISOR KVM_X86_CPU_FEATURE(0x1, 0, ECX, 31) 148#define X86_FEATURE_PAE KVM_X86_CPU_FEATURE(0x1, 0, EDX, 6) 149#define X86_FEATURE_MCE KVM_X86_CPU_FEATURE(0x1, 0, EDX, 7) 150#define X86_FEATURE_APIC KVM_X86_CPU_FEATURE(0x1, 0, EDX, 9) 151#define X86_FEATURE_CLFLUSH KVM_X86_CPU_FEATURE(0x1, 0, EDX, 19) 152#define X86_FEATURE_XMM KVM_X86_CPU_FEATURE(0x1, 0, EDX, 25) 153#define X86_FEATURE_XMM2 KVM_X86_CPU_FEATURE(0x1, 0, EDX, 26) 154#define X86_FEATURE_FSGSBASE KVM_X86_CPU_FEATURE(0x7, 0, EBX, 0) 155#define X86_FEATURE_TSC_ADJUST KVM_X86_CPU_FEATURE(0x7, 0, EBX, 1) 156#define X86_FEATURE_SGX KVM_X86_CPU_FEATURE(0x7, 0, EBX, 2) 157#define X86_FEATURE_HLE KVM_X86_CPU_FEATURE(0x7, 0, EBX, 4) 158#define X86_FEATURE_SMEP KVM_X86_CPU_FEATURE(0x7, 0, EBX, 7) 159#define X86_FEATURE_INVPCID KVM_X86_CPU_FEATURE(0x7, 0, EBX, 10) 160#define X86_FEATURE_RTM KVM_X86_CPU_FEATURE(0x7, 0, EBX, 11) 161#define X86_FEATURE_MPX KVM_X86_CPU_FEATURE(0x7, 0, EBX, 14) 162#define X86_FEATURE_SMAP KVM_X86_CPU_FEATURE(0x7, 0, EBX, 20) 163#define X86_FEATURE_PCOMMIT KVM_X86_CPU_FEATURE(0x7, 0, EBX, 22) 164#define X86_FEATURE_CLFLUSHOPT KVM_X86_CPU_FEATURE(0x7, 0, EBX, 23) 165#define X86_FEATURE_CLWB KVM_X86_CPU_FEATURE(0x7, 0, EBX, 24) 166#define X86_FEATURE_UMIP KVM_X86_CPU_FEATURE(0x7, 0, ECX, 2) 167#define X86_FEATURE_PKU KVM_X86_CPU_FEATURE(0x7, 0, ECX, 3) 168#define X86_FEATURE_OSPKE KVM_X86_CPU_FEATURE(0x7, 0, ECX, 4) 169#define X86_FEATURE_LA57 KVM_X86_CPU_FEATURE(0x7, 0, ECX, 16) 170#define X86_FEATURE_RDPID KVM_X86_CPU_FEATURE(0x7, 0, ECX, 22) 171#define X86_FEATURE_SGX_LC KVM_X86_CPU_FEATURE(0x7, 0, ECX, 30) 172#define X86_FEATURE_SHSTK KVM_X86_CPU_FEATURE(0x7, 0, ECX, 7) 173#define X86_FEATURE_IBT KVM_X86_CPU_FEATURE(0x7, 0, EDX, 20) 174#define X86_FEATURE_AMX_TILE KVM_X86_CPU_FEATURE(0x7, 0, EDX, 24) 175#define X86_FEATURE_SPEC_CTRL KVM_X86_CPU_FEATURE(0x7, 0, EDX, 26) 176#define X86_FEATURE_ARCH_CAPABILITIES KVM_X86_CPU_FEATURE(0x7, 0, EDX, 29) 177#define X86_FEATURE_PKS KVM_X86_CPU_FEATURE(0x7, 0, ECX, 31) 178#define X86_FEATURE_XTILECFG KVM_X86_CPU_FEATURE(0xD, 0, EAX, 17) 179#define X86_FEATURE_XTILEDATA KVM_X86_CPU_FEATURE(0xD, 0, EAX, 18) 180#define X86_FEATURE_XSAVES KVM_X86_CPU_FEATURE(0xD, 1, EAX, 3) 181#define X86_FEATURE_XFD KVM_X86_CPU_FEATURE(0xD, 1, EAX, 4) 182#define X86_FEATURE_XTILEDATA_XFD KVM_X86_CPU_FEATURE(0xD, 18, ECX, 2) 183 184/* 185 * Extended Leafs, a.k.a. AMD defined 186 */ 187#define X86_FEATURE_SVM KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 2) 188#define X86_FEATURE_PERFCTR_CORE KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 23) 189#define X86_FEATURE_PERFCTR_NB KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 24) 190#define X86_FEATURE_PERFCTR_LLC KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 28) 191#define X86_FEATURE_NX KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 20) 192#define X86_FEATURE_GBPAGES KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 26) 193#define X86_FEATURE_RDTSCP KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 27) 194#define X86_FEATURE_LM KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 29) 195#define X86_FEATURE_INVTSC KVM_X86_CPU_FEATURE(0x80000007, 0, EDX, 8) 196#define X86_FEATURE_RDPRU KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 4) 197#define X86_FEATURE_AMD_IBPB KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 12) 198#define X86_FEATURE_NPT KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 0) 199#define X86_FEATURE_LBRV KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 1) 200#define X86_FEATURE_NRIPS KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 3) 201#define X86_FEATURE_TSCRATEMSR KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 4) 202#define X86_FEATURE_PAUSEFILTER KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 10) 203#define X86_FEATURE_PFTHRESHOLD KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 12) 204#define X86_FEATURE_VGIF KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 16) 205#define X86_FEATURE_IDLE_HLT KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 30) 206#define X86_FEATURE_SEV KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 1) 207#define X86_FEATURE_SEV_ES KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 3) 208#define X86_FEATURE_SEV_SNP KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 4) 209#define X86_FEATURE_PERFMON_V2 KVM_X86_CPU_FEATURE(0x80000022, 0, EAX, 0) 210#define X86_FEATURE_LBR_PMC_FREEZE KVM_X86_CPU_FEATURE(0x80000022, 0, EAX, 2) 211 212/* 213 * KVM defined paravirt features. 214 */ 215#define X86_FEATURE_KVM_CLOCKSOURCE KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 0) 216#define X86_FEATURE_KVM_NOP_IO_DELAY KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 1) 217#define X86_FEATURE_KVM_MMU_OP KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 2) 218#define X86_FEATURE_KVM_CLOCKSOURCE2 KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 3) 219#define X86_FEATURE_KVM_ASYNC_PF KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 4) 220#define X86_FEATURE_KVM_STEAL_TIME KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 5) 221#define X86_FEATURE_KVM_PV_EOI KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 6) 222#define X86_FEATURE_KVM_PV_UNHALT KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 7) 223/* Bit 8 apparently isn't used?!?! */ 224#define X86_FEATURE_KVM_PV_TLB_FLUSH KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 9) 225#define X86_FEATURE_KVM_ASYNC_PF_VMEXIT KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 10) 226#define X86_FEATURE_KVM_PV_SEND_IPI KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 11) 227#define X86_FEATURE_KVM_POLL_CONTROL KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 12) 228#define X86_FEATURE_KVM_PV_SCHED_YIELD KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 13) 229#define X86_FEATURE_KVM_ASYNC_PF_INT KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 14) 230#define X86_FEATURE_KVM_MSI_EXT_DEST_ID KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 15) 231#define X86_FEATURE_KVM_HC_MAP_GPA_RANGE KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 16) 232#define X86_FEATURE_KVM_MIGRATION_CONTROL KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 17) 233 234/* 235 * Same idea as X86_FEATURE_XXX, but X86_PROPERTY_XXX retrieves a multi-bit 236 * value/property as opposed to a single-bit feature. Again, pack the info 237 * into a 64-bit value to pass by value with no overhead. 238 */ 239struct kvm_x86_cpu_property { 240 u32 function; 241 u8 index; 242 u8 reg; 243 u8 lo_bit; 244 u8 hi_bit; 245}; 246#define KVM_X86_CPU_PROPERTY(fn, idx, gpr, low_bit, high_bit) \ 247({ \ 248 struct kvm_x86_cpu_property property = { \ 249 .function = fn, \ 250 .index = idx, \ 251 .reg = KVM_CPUID_##gpr, \ 252 .lo_bit = low_bit, \ 253 .hi_bit = high_bit, \ 254 }; \ 255 \ 256 kvm_static_assert(low_bit < high_bit); \ 257 kvm_static_assert((fn & 0xc0000000) == 0 || \ 258 (fn & 0xc0000000) == 0x40000000 || \ 259 (fn & 0xc0000000) == 0x80000000 || \ 260 (fn & 0xc0000000) == 0xc0000000); \ 261 kvm_static_assert(idx < BIT(sizeof(property.index) * BITS_PER_BYTE)); \ 262 property; \ 263}) 264 265#define X86_PROPERTY_MAX_BASIC_LEAF KVM_X86_CPU_PROPERTY(0, 0, EAX, 0, 31) 266#define X86_PROPERTY_PMU_VERSION KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 0, 7) 267#define X86_PROPERTY_PMU_NR_GP_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15) 268#define X86_PROPERTY_PMU_GP_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 16, 23) 269#define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31) 270#define X86_PROPERTY_PMU_EVENTS_MASK KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 12) 271#define X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK KVM_X86_CPU_PROPERTY(0xa, 0, ECX, 0, 31) 272#define X86_PROPERTY_PMU_NR_FIXED_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 0, 4) 273#define X86_PROPERTY_PMU_FIXED_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 5, 12) 274 275#define X86_PROPERTY_SUPPORTED_XCR0_LO KVM_X86_CPU_PROPERTY(0xd, 0, EAX, 0, 31) 276#define X86_PROPERTY_XSTATE_MAX_SIZE_XCR0 KVM_X86_CPU_PROPERTY(0xd, 0, EBX, 0, 31) 277#define X86_PROPERTY_XSTATE_MAX_SIZE KVM_X86_CPU_PROPERTY(0xd, 0, ECX, 0, 31) 278#define X86_PROPERTY_SUPPORTED_XCR0_HI KVM_X86_CPU_PROPERTY(0xd, 0, EDX, 0, 31) 279 280#define X86_PROPERTY_XSTATE_TILE_SIZE KVM_X86_CPU_PROPERTY(0xd, 18, EAX, 0, 31) 281#define X86_PROPERTY_XSTATE_TILE_OFFSET KVM_X86_CPU_PROPERTY(0xd, 18, EBX, 0, 31) 282#define X86_PROPERTY_AMX_MAX_PALETTE_TABLES KVM_X86_CPU_PROPERTY(0x1d, 0, EAX, 0, 31) 283#define X86_PROPERTY_AMX_TOTAL_TILE_BYTES KVM_X86_CPU_PROPERTY(0x1d, 1, EAX, 0, 15) 284#define X86_PROPERTY_AMX_BYTES_PER_TILE KVM_X86_CPU_PROPERTY(0x1d, 1, EAX, 16, 31) 285#define X86_PROPERTY_AMX_BYTES_PER_ROW KVM_X86_CPU_PROPERTY(0x1d, 1, EBX, 0, 15) 286#define X86_PROPERTY_AMX_NR_TILE_REGS KVM_X86_CPU_PROPERTY(0x1d, 1, EBX, 16, 31) 287#define X86_PROPERTY_AMX_MAX_ROWS KVM_X86_CPU_PROPERTY(0x1d, 1, ECX, 0, 15) 288 289#define X86_PROPERTY_MAX_KVM_LEAF KVM_X86_CPU_PROPERTY(0x40000000, 0, EAX, 0, 31) 290 291#define X86_PROPERTY_MAX_EXT_LEAF KVM_X86_CPU_PROPERTY(0x80000000, 0, EAX, 0, 31) 292#define X86_PROPERTY_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7) 293#define X86_PROPERTY_MAX_VIRT_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15) 294#define X86_PROPERTY_GUEST_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 16, 23) 295#define X86_PROPERTY_SEV_C_BIT KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 0, 5) 296#define X86_PROPERTY_PHYS_ADDR_REDUCTION KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11) 297#define X86_PROPERTY_NR_PERFCTR_CORE KVM_X86_CPU_PROPERTY(0x80000022, 0, EBX, 0, 3) 298#define X86_PROPERTY_NR_PERFCTR_NB KVM_X86_CPU_PROPERTY(0x80000022, 0, EBX, 10, 15) 299 300#define X86_PROPERTY_MAX_CENTAUR_LEAF KVM_X86_CPU_PROPERTY(0xC0000000, 0, EAX, 0, 31) 301 302/* 303 * Intel's architectural PMU events are bizarre. They have a "feature" bit 304 * that indicates the feature is _not_ supported, and a property that states 305 * the length of the bit mask of unsupported features. A feature is supported 306 * if the size of the bit mask is larger than the "unavailable" bit, and said 307 * bit is not set. Fixed counters also bizarre enumeration, but inverted from 308 * arch events for general purpose counters. Fixed counters are supported if a 309 * feature flag is set **OR** the total number of fixed counters is greater 310 * than index of the counter. 311 * 312 * Wrap the events for general purpose and fixed counters to simplify checking 313 * whether or not a given architectural event is supported. 314 */ 315struct kvm_x86_pmu_feature { 316 struct kvm_x86_cpu_feature f; 317}; 318#define KVM_X86_PMU_FEATURE(__reg, __bit) \ 319({ \ 320 struct kvm_x86_pmu_feature feature = { \ 321 .f = KVM_X86_CPU_FEATURE(0xa, 0, __reg, __bit), \ 322 }; \ 323 \ 324 kvm_static_assert(KVM_CPUID_##__reg == KVM_CPUID_EBX || \ 325 KVM_CPUID_##__reg == KVM_CPUID_ECX); \ 326 feature; \ 327}) 328 329#define X86_PMU_FEATURE_CPU_CYCLES KVM_X86_PMU_FEATURE(EBX, 0) 330#define X86_PMU_FEATURE_INSNS_RETIRED KVM_X86_PMU_FEATURE(EBX, 1) 331#define X86_PMU_FEATURE_REFERENCE_CYCLES KVM_X86_PMU_FEATURE(EBX, 2) 332#define X86_PMU_FEATURE_LLC_REFERENCES KVM_X86_PMU_FEATURE(EBX, 3) 333#define X86_PMU_FEATURE_LLC_MISSES KVM_X86_PMU_FEATURE(EBX, 4) 334#define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED KVM_X86_PMU_FEATURE(EBX, 5) 335#define X86_PMU_FEATURE_BRANCHES_MISPREDICTED KVM_X86_PMU_FEATURE(EBX, 6) 336#define X86_PMU_FEATURE_TOPDOWN_SLOTS KVM_X86_PMU_FEATURE(EBX, 7) 337#define X86_PMU_FEATURE_TOPDOWN_BE_BOUND KVM_X86_PMU_FEATURE(EBX, 8) 338#define X86_PMU_FEATURE_TOPDOWN_BAD_SPEC KVM_X86_PMU_FEATURE(EBX, 9) 339#define X86_PMU_FEATURE_TOPDOWN_FE_BOUND KVM_X86_PMU_FEATURE(EBX, 10) 340#define X86_PMU_FEATURE_TOPDOWN_RETIRING KVM_X86_PMU_FEATURE(EBX, 11) 341#define X86_PMU_FEATURE_LBR_INSERTS KVM_X86_PMU_FEATURE(EBX, 12) 342 343#define X86_PMU_FEATURE_INSNS_RETIRED_FIXED KVM_X86_PMU_FEATURE(ECX, 0) 344#define X86_PMU_FEATURE_CPU_CYCLES_FIXED KVM_X86_PMU_FEATURE(ECX, 1) 345#define X86_PMU_FEATURE_REFERENCE_TSC_CYCLES_FIXED KVM_X86_PMU_FEATURE(ECX, 2) 346#define X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED KVM_X86_PMU_FEATURE(ECX, 3) 347 348static inline unsigned int x86_family(unsigned int eax) 349{ 350 unsigned int x86; 351 352 x86 = (eax >> 8) & 0xf; 353 354 if (x86 == 0xf) 355 x86 += (eax >> 20) & 0xff; 356 357 return x86; 358} 359 360static inline unsigned int x86_model(unsigned int eax) 361{ 362 return ((eax >> 12) & 0xf0) | ((eax >> 4) & 0x0f); 363} 364 365/* Page table bitfield declarations */ 366#define PTE_PRESENT_MASK BIT_ULL(0) 367#define PTE_WRITABLE_MASK BIT_ULL(1) 368#define PTE_USER_MASK BIT_ULL(2) 369#define PTE_ACCESSED_MASK BIT_ULL(5) 370#define PTE_DIRTY_MASK BIT_ULL(6) 371#define PTE_LARGE_MASK BIT_ULL(7) 372#define PTE_GLOBAL_MASK BIT_ULL(8) 373#define PTE_NX_MASK BIT_ULL(63) 374 375#define PHYSICAL_PAGE_MASK GENMASK_ULL(51, 12) 376 377#define PAGE_SHIFT 12 378#define PAGE_SIZE (1ULL << PAGE_SHIFT) 379#define PAGE_MASK (~(PAGE_SIZE-1) & PHYSICAL_PAGE_MASK) 380 381#define HUGEPAGE_SHIFT(x) (PAGE_SHIFT + (((x) - 1) * 9)) 382#define HUGEPAGE_SIZE(x) (1UL << HUGEPAGE_SHIFT(x)) 383#define HUGEPAGE_MASK(x) (~(HUGEPAGE_SIZE(x) - 1) & PHYSICAL_PAGE_MASK) 384 385#define PTE_GET_PA(pte) ((pte) & PHYSICAL_PAGE_MASK) 386#define PTE_GET_PFN(pte) (PTE_GET_PA(pte) >> PAGE_SHIFT) 387 388/* General Registers in 64-Bit Mode */ 389struct gpr64_regs { 390 u64 rax; 391 u64 rcx; 392 u64 rdx; 393 u64 rbx; 394 u64 rsp; 395 u64 rbp; 396 u64 rsi; 397 u64 rdi; 398 u64 r8; 399 u64 r9; 400 u64 r10; 401 u64 r11; 402 u64 r12; 403 u64 r13; 404 u64 r14; 405 u64 r15; 406}; 407 408struct desc64 { 409 uint16_t limit0; 410 uint16_t base0; 411 unsigned base1:8, type:4, s:1, dpl:2, p:1; 412 unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8; 413 uint32_t base3; 414 uint32_t zero1; 415} __attribute__((packed)); 416 417struct desc_ptr { 418 uint16_t size; 419 uint64_t address; 420} __attribute__((packed)); 421 422struct kvm_x86_state { 423 struct kvm_xsave *xsave; 424 struct kvm_vcpu_events events; 425 struct kvm_mp_state mp_state; 426 struct kvm_regs regs; 427 struct kvm_xcrs xcrs; 428 struct kvm_sregs sregs; 429 struct kvm_debugregs debugregs; 430 union { 431 struct kvm_nested_state nested; 432 char nested_[16384]; 433 }; 434 struct kvm_msrs msrs; 435}; 436 437static inline uint64_t get_desc64_base(const struct desc64 *desc) 438{ 439 return ((uint64_t)desc->base3 << 32) | 440 (desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); 441} 442 443static inline uint64_t rdtsc(void) 444{ 445 uint32_t eax, edx; 446 uint64_t tsc_val; 447 /* 448 * The lfence is to wait (on Intel CPUs) until all previous 449 * instructions have been executed. If software requires RDTSC to be 450 * executed prior to execution of any subsequent instruction, it can 451 * execute LFENCE immediately after RDTSC 452 */ 453 __asm__ __volatile__("lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx)); 454 tsc_val = ((uint64_t)edx) << 32 | eax; 455 return tsc_val; 456} 457 458static inline uint64_t rdtscp(uint32_t *aux) 459{ 460 uint32_t eax, edx; 461 462 __asm__ __volatile__("rdtscp" : "=a"(eax), "=d"(edx), "=c"(*aux)); 463 return ((uint64_t)edx) << 32 | eax; 464} 465 466static inline uint64_t rdmsr(uint32_t msr) 467{ 468 uint32_t a, d; 469 470 __asm__ __volatile__("rdmsr" : "=a"(a), "=d"(d) : "c"(msr) : "memory"); 471 472 return a | ((uint64_t) d << 32); 473} 474 475static inline void wrmsr(uint32_t msr, uint64_t value) 476{ 477 uint32_t a = value; 478 uint32_t d = value >> 32; 479 480 __asm__ __volatile__("wrmsr" :: "a"(a), "d"(d), "c"(msr) : "memory"); 481} 482 483 484static inline uint16_t inw(uint16_t port) 485{ 486 uint16_t tmp; 487 488 __asm__ __volatile__("in %%dx, %%ax" 489 : /* output */ "=a" (tmp) 490 : /* input */ "d" (port)); 491 492 return tmp; 493} 494 495static inline uint16_t get_es(void) 496{ 497 uint16_t es; 498 499 __asm__ __volatile__("mov %%es, %[es]" 500 : /* output */ [es]"=rm"(es)); 501 return es; 502} 503 504static inline uint16_t get_cs(void) 505{ 506 uint16_t cs; 507 508 __asm__ __volatile__("mov %%cs, %[cs]" 509 : /* output */ [cs]"=rm"(cs)); 510 return cs; 511} 512 513static inline uint16_t get_ss(void) 514{ 515 uint16_t ss; 516 517 __asm__ __volatile__("mov %%ss, %[ss]" 518 : /* output */ [ss]"=rm"(ss)); 519 return ss; 520} 521 522static inline uint16_t get_ds(void) 523{ 524 uint16_t ds; 525 526 __asm__ __volatile__("mov %%ds, %[ds]" 527 : /* output */ [ds]"=rm"(ds)); 528 return ds; 529} 530 531static inline uint16_t get_fs(void) 532{ 533 uint16_t fs; 534 535 __asm__ __volatile__("mov %%fs, %[fs]" 536 : /* output */ [fs]"=rm"(fs)); 537 return fs; 538} 539 540static inline uint16_t get_gs(void) 541{ 542 uint16_t gs; 543 544 __asm__ __volatile__("mov %%gs, %[gs]" 545 : /* output */ [gs]"=rm"(gs)); 546 return gs; 547} 548 549static inline uint16_t get_tr(void) 550{ 551 uint16_t tr; 552 553 __asm__ __volatile__("str %[tr]" 554 : /* output */ [tr]"=rm"(tr)); 555 return tr; 556} 557 558static inline uint64_t get_cr0(void) 559{ 560 uint64_t cr0; 561 562 __asm__ __volatile__("mov %%cr0, %[cr0]" 563 : /* output */ [cr0]"=r"(cr0)); 564 return cr0; 565} 566 567static inline uint64_t get_cr3(void) 568{ 569 uint64_t cr3; 570 571 __asm__ __volatile__("mov %%cr3, %[cr3]" 572 : /* output */ [cr3]"=r"(cr3)); 573 return cr3; 574} 575 576static inline uint64_t get_cr4(void) 577{ 578 uint64_t cr4; 579 580 __asm__ __volatile__("mov %%cr4, %[cr4]" 581 : /* output */ [cr4]"=r"(cr4)); 582 return cr4; 583} 584 585static inline void set_cr4(uint64_t val) 586{ 587 __asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory"); 588} 589 590static inline void set_idt(const struct desc_ptr *idt_desc) 591{ 592 __asm__ __volatile__("lidt %0"::"m"(*idt_desc)); 593} 594 595static inline u64 xgetbv(u32 index) 596{ 597 u32 eax, edx; 598 599 __asm__ __volatile__("xgetbv;" 600 : "=a" (eax), "=d" (edx) 601 : "c" (index)); 602 return eax | ((u64)edx << 32); 603} 604 605static inline void xsetbv(u32 index, u64 value) 606{ 607 u32 eax = value; 608 u32 edx = value >> 32; 609 610 __asm__ __volatile__("xsetbv" :: "a" (eax), "d" (edx), "c" (index)); 611} 612 613static inline void wrpkru(u32 pkru) 614{ 615 /* Note, ECX and EDX are architecturally required to be '0'. */ 616 asm volatile(".byte 0x0f,0x01,0xef\n\t" 617 : : "a" (pkru), "c"(0), "d"(0)); 618} 619 620static inline struct desc_ptr get_gdt(void) 621{ 622 struct desc_ptr gdt; 623 __asm__ __volatile__("sgdt %[gdt]" 624 : /* output */ [gdt]"=m"(gdt)); 625 return gdt; 626} 627 628static inline struct desc_ptr get_idt(void) 629{ 630 struct desc_ptr idt; 631 __asm__ __volatile__("sidt %[idt]" 632 : /* output */ [idt]"=m"(idt)); 633 return idt; 634} 635 636static inline void outl(uint16_t port, uint32_t value) 637{ 638 __asm__ __volatile__("outl %%eax, %%dx" : : "d"(port), "a"(value)); 639} 640 641static inline void __cpuid(uint32_t function, uint32_t index, 642 uint32_t *eax, uint32_t *ebx, 643 uint32_t *ecx, uint32_t *edx) 644{ 645 *eax = function; 646 *ecx = index; 647 648 asm volatile("cpuid" 649 : "=a" (*eax), 650 "=b" (*ebx), 651 "=c" (*ecx), 652 "=d" (*edx) 653 : "0" (*eax), "2" (*ecx) 654 : "memory"); 655} 656 657static inline void cpuid(uint32_t function, 658 uint32_t *eax, uint32_t *ebx, 659 uint32_t *ecx, uint32_t *edx) 660{ 661 return __cpuid(function, 0, eax, ebx, ecx, edx); 662} 663 664static inline uint32_t this_cpu_fms(void) 665{ 666 uint32_t eax, ebx, ecx, edx; 667 668 cpuid(1, &eax, &ebx, &ecx, &edx); 669 return eax; 670} 671 672static inline uint32_t this_cpu_family(void) 673{ 674 return x86_family(this_cpu_fms()); 675} 676 677static inline uint32_t this_cpu_model(void) 678{ 679 return x86_model(this_cpu_fms()); 680} 681 682static inline bool this_cpu_vendor_string_is(const char *vendor) 683{ 684 const uint32_t *chunk = (const uint32_t *)vendor; 685 uint32_t eax, ebx, ecx, edx; 686 687 cpuid(0, &eax, &ebx, &ecx, &edx); 688 return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]); 689} 690 691static inline bool this_cpu_is_intel(void) 692{ 693 return this_cpu_vendor_string_is("GenuineIntel"); 694} 695 696/* 697 * Exclude early K5 samples with a vendor string of "AMDisbetter!" 698 */ 699static inline bool this_cpu_is_amd(void) 700{ 701 return this_cpu_vendor_string_is("AuthenticAMD"); 702} 703 704static inline uint32_t __this_cpu_has(uint32_t function, uint32_t index, 705 uint8_t reg, uint8_t lo, uint8_t hi) 706{ 707 uint32_t gprs[4]; 708 709 __cpuid(function, index, 710 &gprs[KVM_CPUID_EAX], &gprs[KVM_CPUID_EBX], 711 &gprs[KVM_CPUID_ECX], &gprs[KVM_CPUID_EDX]); 712 713 return (gprs[reg] & GENMASK(hi, lo)) >> lo; 714} 715 716static inline bool this_cpu_has(struct kvm_x86_cpu_feature feature) 717{ 718 return __this_cpu_has(feature.function, feature.index, 719 feature.reg, feature.bit, feature.bit); 720} 721 722static inline uint32_t this_cpu_property(struct kvm_x86_cpu_property property) 723{ 724 return __this_cpu_has(property.function, property.index, 725 property.reg, property.lo_bit, property.hi_bit); 726} 727 728static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property) 729{ 730 uint32_t max_leaf; 731 732 switch (property.function & 0xc0000000) { 733 case 0: 734 max_leaf = this_cpu_property(X86_PROPERTY_MAX_BASIC_LEAF); 735 break; 736 case 0x40000000: 737 max_leaf = this_cpu_property(X86_PROPERTY_MAX_KVM_LEAF); 738 break; 739 case 0x80000000: 740 max_leaf = this_cpu_property(X86_PROPERTY_MAX_EXT_LEAF); 741 break; 742 case 0xc0000000: 743 max_leaf = this_cpu_property(X86_PROPERTY_MAX_CENTAUR_LEAF); 744 } 745 return max_leaf >= property.function; 746} 747 748static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature) 749{ 750 uint32_t nr_bits; 751 752 if (feature.f.reg == KVM_CPUID_EBX) { 753 nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); 754 return nr_bits > feature.f.bit && !this_cpu_has(feature.f); 755 } 756 757 GUEST_ASSERT(feature.f.reg == KVM_CPUID_ECX); 758 nr_bits = this_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); 759 return nr_bits > feature.f.bit || this_cpu_has(feature.f); 760} 761 762static __always_inline uint64_t this_cpu_supported_xcr0(void) 763{ 764 if (!this_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO)) 765 return 0; 766 767 return this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) | 768 ((uint64_t)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32); 769} 770 771typedef u32 __attribute__((vector_size(16))) sse128_t; 772#define __sse128_u union { sse128_t vec; u64 as_u64[2]; u32 as_u32[4]; } 773#define sse128_lo(x) ({ __sse128_u t; t.vec = x; t.as_u64[0]; }) 774#define sse128_hi(x) ({ __sse128_u t; t.vec = x; t.as_u64[1]; }) 775 776static inline void read_sse_reg(int reg, sse128_t *data) 777{ 778 switch (reg) { 779 case 0: 780 asm("movdqa %%xmm0, %0" : "=m"(*data)); 781 break; 782 case 1: 783 asm("movdqa %%xmm1, %0" : "=m"(*data)); 784 break; 785 case 2: 786 asm("movdqa %%xmm2, %0" : "=m"(*data)); 787 break; 788 case 3: 789 asm("movdqa %%xmm3, %0" : "=m"(*data)); 790 break; 791 case 4: 792 asm("movdqa %%xmm4, %0" : "=m"(*data)); 793 break; 794 case 5: 795 asm("movdqa %%xmm5, %0" : "=m"(*data)); 796 break; 797 case 6: 798 asm("movdqa %%xmm6, %0" : "=m"(*data)); 799 break; 800 case 7: 801 asm("movdqa %%xmm7, %0" : "=m"(*data)); 802 break; 803 default: 804 BUG(); 805 } 806} 807 808static inline void write_sse_reg(int reg, const sse128_t *data) 809{ 810 switch (reg) { 811 case 0: 812 asm("movdqa %0, %%xmm0" : : "m"(*data)); 813 break; 814 case 1: 815 asm("movdqa %0, %%xmm1" : : "m"(*data)); 816 break; 817 case 2: 818 asm("movdqa %0, %%xmm2" : : "m"(*data)); 819 break; 820 case 3: 821 asm("movdqa %0, %%xmm3" : : "m"(*data)); 822 break; 823 case 4: 824 asm("movdqa %0, %%xmm4" : : "m"(*data)); 825 break; 826 case 5: 827 asm("movdqa %0, %%xmm5" : : "m"(*data)); 828 break; 829 case 6: 830 asm("movdqa %0, %%xmm6" : : "m"(*data)); 831 break; 832 case 7: 833 asm("movdqa %0, %%xmm7" : : "m"(*data)); 834 break; 835 default: 836 BUG(); 837 } 838} 839 840static inline void cpu_relax(void) 841{ 842 asm volatile("rep; nop" ::: "memory"); 843} 844 845static inline void udelay(unsigned long usec) 846{ 847 uint64_t start, now, cycles; 848 849 GUEST_ASSERT(guest_tsc_khz); 850 cycles = guest_tsc_khz / 1000 * usec; 851 852 /* 853 * Deliberately don't PAUSE, a.k.a. cpu_relax(), so that the delay is 854 * as accurate as possible, e.g. doesn't trigger PAUSE-Loop VM-Exits. 855 */ 856 start = rdtsc(); 857 do { 858 now = rdtsc(); 859 } while (now - start < cycles); 860} 861 862#define ud2() \ 863 __asm__ __volatile__( \ 864 "ud2\n" \ 865 ) 866 867#define hlt() \ 868 __asm__ __volatile__( \ 869 "hlt\n" \ 870 ) 871 872struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu); 873void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state); 874void kvm_x86_state_cleanup(struct kvm_x86_state *state); 875 876const struct kvm_msr_list *kvm_get_msr_index_list(void); 877const struct kvm_msr_list *kvm_get_feature_msr_index_list(void); 878bool kvm_msr_is_in_save_restore_list(uint32_t msr_index); 879uint64_t kvm_get_feature_msr(uint64_t msr_index); 880 881static inline void vcpu_msrs_get(struct kvm_vcpu *vcpu, 882 struct kvm_msrs *msrs) 883{ 884 int r = __vcpu_ioctl(vcpu, KVM_GET_MSRS, msrs); 885 886 TEST_ASSERT(r == msrs->nmsrs, 887 "KVM_GET_MSRS failed, r: %i (failed on MSR %x)", 888 r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index); 889} 890static inline void vcpu_msrs_set(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs) 891{ 892 int r = __vcpu_ioctl(vcpu, KVM_SET_MSRS, msrs); 893 894 TEST_ASSERT(r == msrs->nmsrs, 895 "KVM_SET_MSRS failed, r: %i (failed on MSR %x)", 896 r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index); 897} 898static inline void vcpu_debugregs_get(struct kvm_vcpu *vcpu, 899 struct kvm_debugregs *debugregs) 900{ 901 vcpu_ioctl(vcpu, KVM_GET_DEBUGREGS, debugregs); 902} 903static inline void vcpu_debugregs_set(struct kvm_vcpu *vcpu, 904 struct kvm_debugregs *debugregs) 905{ 906 vcpu_ioctl(vcpu, KVM_SET_DEBUGREGS, debugregs); 907} 908static inline void vcpu_xsave_get(struct kvm_vcpu *vcpu, 909 struct kvm_xsave *xsave) 910{ 911 vcpu_ioctl(vcpu, KVM_GET_XSAVE, xsave); 912} 913static inline void vcpu_xsave2_get(struct kvm_vcpu *vcpu, 914 struct kvm_xsave *xsave) 915{ 916 vcpu_ioctl(vcpu, KVM_GET_XSAVE2, xsave); 917} 918static inline void vcpu_xsave_set(struct kvm_vcpu *vcpu, 919 struct kvm_xsave *xsave) 920{ 921 vcpu_ioctl(vcpu, KVM_SET_XSAVE, xsave); 922} 923static inline void vcpu_xcrs_get(struct kvm_vcpu *vcpu, 924 struct kvm_xcrs *xcrs) 925{ 926 vcpu_ioctl(vcpu, KVM_GET_XCRS, xcrs); 927} 928static inline void vcpu_xcrs_set(struct kvm_vcpu *vcpu, struct kvm_xcrs *xcrs) 929{ 930 vcpu_ioctl(vcpu, KVM_SET_XCRS, xcrs); 931} 932 933const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid, 934 uint32_t function, uint32_t index); 935const struct kvm_cpuid2 *kvm_get_supported_cpuid(void); 936 937static inline uint32_t kvm_cpu_fms(void) 938{ 939 return get_cpuid_entry(kvm_get_supported_cpuid(), 0x1, 0)->eax; 940} 941 942static inline uint32_t kvm_cpu_family(void) 943{ 944 return x86_family(kvm_cpu_fms()); 945} 946 947static inline uint32_t kvm_cpu_model(void) 948{ 949 return x86_model(kvm_cpu_fms()); 950} 951 952bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid, 953 struct kvm_x86_cpu_feature feature); 954 955static inline bool kvm_cpu_has(struct kvm_x86_cpu_feature feature) 956{ 957 return kvm_cpuid_has(kvm_get_supported_cpuid(), feature); 958} 959 960uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, 961 struct kvm_x86_cpu_property property); 962 963static inline uint32_t kvm_cpu_property(struct kvm_x86_cpu_property property) 964{ 965 return kvm_cpuid_property(kvm_get_supported_cpuid(), property); 966} 967 968static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property) 969{ 970 uint32_t max_leaf; 971 972 switch (property.function & 0xc0000000) { 973 case 0: 974 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_BASIC_LEAF); 975 break; 976 case 0x40000000: 977 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_KVM_LEAF); 978 break; 979 case 0x80000000: 980 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_EXT_LEAF); 981 break; 982 case 0xc0000000: 983 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_CENTAUR_LEAF); 984 } 985 return max_leaf >= property.function; 986} 987 988static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature) 989{ 990 uint32_t nr_bits; 991 992 if (feature.f.reg == KVM_CPUID_EBX) { 993 nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); 994 return nr_bits > feature.f.bit && !kvm_cpu_has(feature.f); 995 } 996 997 TEST_ASSERT_EQ(feature.f.reg, KVM_CPUID_ECX); 998 nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS); 999 return nr_bits > feature.f.bit || kvm_cpu_has(feature.f); 1000} 1001 1002static __always_inline uint64_t kvm_cpu_supported_xcr0(void) 1003{ 1004 if (!kvm_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO)) 1005 return 0; 1006 1007 return kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) | 1008 ((uint64_t)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32); 1009} 1010 1011static inline size_t kvm_cpuid2_size(int nr_entries) 1012{ 1013 return sizeof(struct kvm_cpuid2) + 1014 sizeof(struct kvm_cpuid_entry2) * nr_entries; 1015} 1016 1017/* 1018 * Allocate a "struct kvm_cpuid2* instance, with the 0-length arrary of 1019 * entries sized to hold @nr_entries. The caller is responsible for freeing 1020 * the struct. 1021 */ 1022static inline struct kvm_cpuid2 *allocate_kvm_cpuid2(int nr_entries) 1023{ 1024 struct kvm_cpuid2 *cpuid; 1025 1026 cpuid = malloc(kvm_cpuid2_size(nr_entries)); 1027 TEST_ASSERT(cpuid, "-ENOMEM when allocating kvm_cpuid2"); 1028 1029 cpuid->nent = nr_entries; 1030 1031 return cpuid; 1032} 1033 1034void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid); 1035 1036static inline void vcpu_get_cpuid(struct kvm_vcpu *vcpu) 1037{ 1038 vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid); 1039} 1040 1041static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu, 1042 uint32_t function, 1043 uint32_t index) 1044{ 1045 TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first (or equivalent)"); 1046 1047 vcpu_get_cpuid(vcpu); 1048 1049 return (struct kvm_cpuid_entry2 *)get_cpuid_entry(vcpu->cpuid, 1050 function, index); 1051} 1052 1053static inline struct kvm_cpuid_entry2 *vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu, 1054 uint32_t function) 1055{ 1056 return __vcpu_get_cpuid_entry(vcpu, function, 0); 1057} 1058 1059static inline int __vcpu_set_cpuid(struct kvm_vcpu *vcpu) 1060{ 1061 int r; 1062 1063 TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first"); 1064 r = __vcpu_ioctl(vcpu, KVM_SET_CPUID2, vcpu->cpuid); 1065 if (r) 1066 return r; 1067 1068 /* On success, refresh the cache to pick up adjustments made by KVM. */ 1069 vcpu_get_cpuid(vcpu); 1070 return 0; 1071} 1072 1073static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu) 1074{ 1075 TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first"); 1076 vcpu_ioctl(vcpu, KVM_SET_CPUID2, vcpu->cpuid); 1077 1078 /* Refresh the cache to pick up adjustments made by KVM. */ 1079 vcpu_get_cpuid(vcpu); 1080} 1081 1082void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu, 1083 struct kvm_x86_cpu_property property, 1084 uint32_t value); 1085void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr); 1086 1087void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function); 1088 1089static inline bool vcpu_cpuid_has(struct kvm_vcpu *vcpu, 1090 struct kvm_x86_cpu_feature feature) 1091{ 1092 struct kvm_cpuid_entry2 *entry; 1093 1094 entry = __vcpu_get_cpuid_entry(vcpu, feature.function, feature.index); 1095 return *((&entry->eax) + feature.reg) & BIT(feature.bit); 1096} 1097 1098void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu, 1099 struct kvm_x86_cpu_feature feature, 1100 bool set); 1101 1102static inline void vcpu_set_cpuid_feature(struct kvm_vcpu *vcpu, 1103 struct kvm_x86_cpu_feature feature) 1104{ 1105 vcpu_set_or_clear_cpuid_feature(vcpu, feature, true); 1106 1107} 1108 1109static inline void vcpu_clear_cpuid_feature(struct kvm_vcpu *vcpu, 1110 struct kvm_x86_cpu_feature feature) 1111{ 1112 vcpu_set_or_clear_cpuid_feature(vcpu, feature, false); 1113} 1114 1115uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index); 1116int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value); 1117 1118/* 1119 * Assert on an MSR access(es) and pretty print the MSR name when possible. 1120 * Note, the caller provides the stringified name so that the name of macro is 1121 * printed, not the value the macro resolves to (due to macro expansion). 1122 */ 1123#define TEST_ASSERT_MSR(cond, fmt, msr, str, args...) \ 1124do { \ 1125 if (__builtin_constant_p(msr)) { \ 1126 TEST_ASSERT(cond, fmt, str, args); \ 1127 } else if (!(cond)) { \ 1128 char buf[16]; \ 1129 \ 1130 snprintf(buf, sizeof(buf), "MSR 0x%x", msr); \ 1131 TEST_ASSERT(cond, fmt, buf, args); \ 1132 } \ 1133} while (0) 1134 1135/* 1136 * Returns true if KVM should return the last written value when reading an MSR 1137 * from userspace, e.g. the MSR isn't a command MSR, doesn't emulate state that 1138 * is changing, etc. This is NOT an exhaustive list! The intent is to filter 1139 * out MSRs that are not durable _and_ that a selftest wants to write. 1140 */ 1141static inline bool is_durable_msr(uint32_t msr) 1142{ 1143 return msr != MSR_IA32_TSC; 1144} 1145 1146#define vcpu_set_msr(vcpu, msr, val) \ 1147do { \ 1148 uint64_t r, v = val; \ 1149 \ 1150 TEST_ASSERT_MSR(_vcpu_set_msr(vcpu, msr, v) == 1, \ 1151 "KVM_SET_MSRS failed on %s, value = 0x%lx", msr, #msr, v); \ 1152 if (!is_durable_msr(msr)) \ 1153 break; \ 1154 r = vcpu_get_msr(vcpu, msr); \ 1155 TEST_ASSERT_MSR(r == v, "Set %s to '0x%lx', got back '0x%lx'", msr, #msr, v, r);\ 1156} while (0) 1157 1158void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits); 1159void kvm_init_vm_address_properties(struct kvm_vm *vm); 1160 1161struct ex_regs { 1162 uint64_t rax, rcx, rdx, rbx; 1163 uint64_t rbp, rsi, rdi; 1164 uint64_t r8, r9, r10, r11; 1165 uint64_t r12, r13, r14, r15; 1166 uint64_t vector; 1167 uint64_t error_code; 1168 uint64_t rip; 1169 uint64_t cs; 1170 uint64_t rflags; 1171}; 1172 1173struct idt_entry { 1174 uint16_t offset0; 1175 uint16_t selector; 1176 uint16_t ist : 3; 1177 uint16_t : 5; 1178 uint16_t type : 4; 1179 uint16_t : 1; 1180 uint16_t dpl : 2; 1181 uint16_t p : 1; 1182 uint16_t offset1; 1183 uint32_t offset2; uint32_t reserved; 1184}; 1185 1186void vm_install_exception_handler(struct kvm_vm *vm, int vector, 1187 void (*handler)(struct ex_regs *)); 1188 1189/* 1190 * Exception fixup morphs #DE to an arbitrary magic vector so that '0' can be 1191 * used to signal "no expcetion". 1192 */ 1193#define KVM_MAGIC_DE_VECTOR 0xff 1194 1195/* If a toddler were to say "abracadabra". */ 1196#define KVM_EXCEPTION_MAGIC 0xabacadabaULL 1197 1198/* 1199 * KVM selftest exception fixup uses registers to coordinate with the exception 1200 * handler, versus the kernel's in-memory tables and KVM-Unit-Tests's in-memory 1201 * per-CPU data. Using only registers avoids having to map memory into the 1202 * guest, doesn't require a valid, stable GS.base, and reduces the risk of 1203 * for recursive faults when accessing memory in the handler. The downside to 1204 * using registers is that it restricts what registers can be used by the actual 1205 * instruction. But, selftests are 64-bit only, making register* pressure a 1206 * minor concern. Use r9-r11 as they are volatile, i.e. don't need to be saved 1207 * by the callee, and except for r11 are not implicit parameters to any 1208 * instructions. Ideally, fixup would use r8-r10 and thus avoid implicit 1209 * parameters entirely, but Hyper-V's hypercall ABI uses r8 and testing Hyper-V 1210 * is higher priority than testing non-faulting SYSCALL/SYSRET. 1211 * 1212 * Note, the fixup handler deliberately does not handle #DE, i.e. the vector 1213 * is guaranteed to be non-zero on fault. 1214 * 1215 * REGISTER INPUTS: 1216 * r9 = MAGIC 1217 * r10 = RIP 1218 * r11 = new RIP on fault 1219 * 1220 * REGISTER OUTPUTS: 1221 * r9 = exception vector (non-zero) 1222 * r10 = error code 1223 */ 1224#define __KVM_ASM_SAFE(insn, fep) \ 1225 "mov $" __stringify(KVM_EXCEPTION_MAGIC) ", %%r9\n\t" \ 1226 "lea 1f(%%rip), %%r10\n\t" \ 1227 "lea 2f(%%rip), %%r11\n\t" \ 1228 fep "1: " insn "\n\t" \ 1229 "xor %%r9, %%r9\n\t" \ 1230 "2:\n\t" \ 1231 "mov %%r9b, %[vector]\n\t" \ 1232 "mov %%r10, %[error_code]\n\t" 1233 1234#define KVM_ASM_SAFE(insn) __KVM_ASM_SAFE(insn, "") 1235#define KVM_ASM_SAFE_FEP(insn) __KVM_ASM_SAFE(insn, KVM_FEP) 1236 1237#define KVM_ASM_SAFE_OUTPUTS(v, ec) [vector] "=qm"(v), [error_code] "=rm"(ec) 1238#define KVM_ASM_SAFE_CLOBBERS "r9", "r10", "r11" 1239 1240#define kvm_asm_safe(insn, inputs...) \ 1241({ \ 1242 uint64_t ign_error_code; \ 1243 uint8_t vector; \ 1244 \ 1245 asm volatile(KVM_ASM_SAFE(insn) \ 1246 : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \ 1247 : inputs \ 1248 : KVM_ASM_SAFE_CLOBBERS); \ 1249 vector; \ 1250}) 1251 1252#define kvm_asm_safe_ec(insn, error_code, inputs...) \ 1253({ \ 1254 uint8_t vector; \ 1255 \ 1256 asm volatile(KVM_ASM_SAFE(insn) \ 1257 : KVM_ASM_SAFE_OUTPUTS(vector, error_code) \ 1258 : inputs \ 1259 : KVM_ASM_SAFE_CLOBBERS); \ 1260 vector; \ 1261}) 1262 1263#define kvm_asm_safe_fep(insn, inputs...) \ 1264({ \ 1265 uint64_t ign_error_code; \ 1266 uint8_t vector; \ 1267 \ 1268 asm volatile(KVM_ASM_SAFE_FEP(insn) \ 1269 : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \ 1270 : inputs \ 1271 : KVM_ASM_SAFE_CLOBBERS); \ 1272 vector; \ 1273}) 1274 1275#define kvm_asm_safe_ec_fep(insn, error_code, inputs...) \ 1276({ \ 1277 uint8_t vector; \ 1278 \ 1279 asm volatile(KVM_ASM_SAFE_FEP(insn) \ 1280 : KVM_ASM_SAFE_OUTPUTS(vector, error_code) \ 1281 : inputs \ 1282 : KVM_ASM_SAFE_CLOBBERS); \ 1283 vector; \ 1284}) 1285 1286#define BUILD_READ_U64_SAFE_HELPER(insn, _fep, _FEP) \ 1287static inline uint8_t insn##_safe ##_fep(uint32_t idx, uint64_t *val) \ 1288{ \ 1289 uint64_t error_code; \ 1290 uint8_t vector; \ 1291 uint32_t a, d; \ 1292 \ 1293 asm volatile(KVM_ASM_SAFE##_FEP(#insn) \ 1294 : "=a"(a), "=d"(d), \ 1295 KVM_ASM_SAFE_OUTPUTS(vector, error_code) \ 1296 : "c"(idx) \ 1297 : KVM_ASM_SAFE_CLOBBERS); \ 1298 \ 1299 *val = (uint64_t)a | ((uint64_t)d << 32); \ 1300 return vector; \ 1301} 1302 1303/* 1304 * Generate {insn}_safe() and {insn}_safe_fep() helpers for instructions that 1305 * use ECX as in input index, and EDX:EAX as a 64-bit output. 1306 */ 1307#define BUILD_READ_U64_SAFE_HELPERS(insn) \ 1308 BUILD_READ_U64_SAFE_HELPER(insn, , ) \ 1309 BUILD_READ_U64_SAFE_HELPER(insn, _fep, _FEP) \ 1310 1311BUILD_READ_U64_SAFE_HELPERS(rdmsr) 1312BUILD_READ_U64_SAFE_HELPERS(rdpmc) 1313BUILD_READ_U64_SAFE_HELPERS(xgetbv) 1314 1315static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val) 1316{ 1317 return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr)); 1318} 1319 1320static inline uint8_t xsetbv_safe(uint32_t index, uint64_t value) 1321{ 1322 u32 eax = value; 1323 u32 edx = value >> 32; 1324 1325 return kvm_asm_safe("xsetbv", "a" (eax), "d" (edx), "c" (index)); 1326} 1327 1328bool kvm_is_tdp_enabled(void); 1329 1330static inline bool get_kvm_intel_param_bool(const char *param) 1331{ 1332 return kvm_get_module_param_bool("kvm_intel", param); 1333} 1334 1335static inline bool get_kvm_amd_param_bool(const char *param) 1336{ 1337 return kvm_get_module_param_bool("kvm_amd", param); 1338} 1339 1340static inline int get_kvm_intel_param_integer(const char *param) 1341{ 1342 return kvm_get_module_param_integer("kvm_intel", param); 1343} 1344 1345static inline int get_kvm_amd_param_integer(const char *param) 1346{ 1347 return kvm_get_module_param_integer("kvm_amd", param); 1348} 1349 1350static inline bool kvm_is_pmu_enabled(void) 1351{ 1352 return get_kvm_param_bool("enable_pmu"); 1353} 1354 1355static inline bool kvm_is_forced_emulation_enabled(void) 1356{ 1357 return !!get_kvm_param_integer("force_emulation_prefix"); 1358} 1359 1360static inline bool kvm_is_unrestricted_guest_enabled(void) 1361{ 1362 return get_kvm_intel_param_bool("unrestricted_guest"); 1363} 1364 1365static inline bool kvm_is_ignore_msrs(void) 1366{ 1367 return get_kvm_param_bool("ignore_msrs"); 1368} 1369 1370uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr, 1371 int *level); 1372uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr); 1373 1374uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, 1375 uint64_t a3); 1376uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1); 1377void xen_hypercall(uint64_t nr, uint64_t a0, void *a1); 1378 1379static inline uint64_t __kvm_hypercall_map_gpa_range(uint64_t gpa, 1380 uint64_t size, uint64_t flags) 1381{ 1382 return kvm_hypercall(KVM_HC_MAP_GPA_RANGE, gpa, size >> PAGE_SHIFT, flags, 0); 1383} 1384 1385static inline void kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size, 1386 uint64_t flags) 1387{ 1388 uint64_t ret = __kvm_hypercall_map_gpa_range(gpa, size, flags); 1389 1390 GUEST_ASSERT(!ret); 1391} 1392 1393/* 1394 * Execute HLT in an STI interrupt shadow to ensure that a pending IRQ that's 1395 * intended to be a wake event arrives *after* HLT is executed. Modern CPUs, 1396 * except for a few oddballs that KVM is unlikely to run on, block IRQs for one 1397 * instruction after STI, *if* RFLAGS.IF=0 before STI. Note, Intel CPUs may 1398 * block other events beyond regular IRQs, e.g. may block NMIs and SMIs too. 1399 */ 1400static inline void safe_halt(void) 1401{ 1402 asm volatile("sti; hlt"); 1403} 1404 1405/* 1406 * Enable interrupts and ensure that interrupts are evaluated upon return from 1407 * this function, i.e. execute a nop to consume the STi interrupt shadow. 1408 */ 1409static inline void sti_nop(void) 1410{ 1411 asm volatile ("sti; nop"); 1412} 1413 1414/* 1415 * Enable interrupts for one instruction (nop), to allow the CPU to process all 1416 * interrupts that are already pending. 1417 */ 1418static inline void sti_nop_cli(void) 1419{ 1420 asm volatile ("sti; nop; cli"); 1421} 1422 1423static inline void sti(void) 1424{ 1425 asm volatile("sti"); 1426} 1427 1428static inline void cli(void) 1429{ 1430 asm volatile ("cli"); 1431} 1432 1433void __vm_xsave_require_permission(uint64_t xfeature, const char *name); 1434 1435#define vm_xsave_require_permission(xfeature) \ 1436 __vm_xsave_require_permission(xfeature, #xfeature) 1437 1438enum pg_level { 1439 PG_LEVEL_NONE, 1440 PG_LEVEL_4K, 1441 PG_LEVEL_2M, 1442 PG_LEVEL_1G, 1443 PG_LEVEL_512G, 1444 PG_LEVEL_256T 1445}; 1446 1447#define PG_LEVEL_SHIFT(_level) ((_level - 1) * 9 + 12) 1448#define PG_LEVEL_SIZE(_level) (1ull << PG_LEVEL_SHIFT(_level)) 1449 1450#define PG_SIZE_4K PG_LEVEL_SIZE(PG_LEVEL_4K) 1451#define PG_SIZE_2M PG_LEVEL_SIZE(PG_LEVEL_2M) 1452#define PG_SIZE_1G PG_LEVEL_SIZE(PG_LEVEL_1G) 1453 1454void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level); 1455void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 1456 uint64_t nr_bytes, int level); 1457 1458/* 1459 * Basic CPU control in CR0 1460 */ 1461#define X86_CR0_PE (1UL<<0) /* Protection Enable */ 1462#define X86_CR0_MP (1UL<<1) /* Monitor Coprocessor */ 1463#define X86_CR0_EM (1UL<<2) /* Emulation */ 1464#define X86_CR0_TS (1UL<<3) /* Task Switched */ 1465#define X86_CR0_ET (1UL<<4) /* Extension Type */ 1466#define X86_CR0_NE (1UL<<5) /* Numeric Error */ 1467#define X86_CR0_WP (1UL<<16) /* Write Protect */ 1468#define X86_CR0_AM (1UL<<18) /* Alignment Mask */ 1469#define X86_CR0_NW (1UL<<29) /* Not Write-through */ 1470#define X86_CR0_CD (1UL<<30) /* Cache Disable */ 1471#define X86_CR0_PG (1UL<<31) /* Paging */ 1472 1473#define PFERR_PRESENT_BIT 0 1474#define PFERR_WRITE_BIT 1 1475#define PFERR_USER_BIT 2 1476#define PFERR_RSVD_BIT 3 1477#define PFERR_FETCH_BIT 4 1478#define PFERR_PK_BIT 5 1479#define PFERR_SGX_BIT 15 1480#define PFERR_GUEST_FINAL_BIT 32 1481#define PFERR_GUEST_PAGE_BIT 33 1482#define PFERR_IMPLICIT_ACCESS_BIT 48 1483 1484#define PFERR_PRESENT_MASK BIT(PFERR_PRESENT_BIT) 1485#define PFERR_WRITE_MASK BIT(PFERR_WRITE_BIT) 1486#define PFERR_USER_MASK BIT(PFERR_USER_BIT) 1487#define PFERR_RSVD_MASK BIT(PFERR_RSVD_BIT) 1488#define PFERR_FETCH_MASK BIT(PFERR_FETCH_BIT) 1489#define PFERR_PK_MASK BIT(PFERR_PK_BIT) 1490#define PFERR_SGX_MASK BIT(PFERR_SGX_BIT) 1491#define PFERR_GUEST_FINAL_MASK BIT_ULL(PFERR_GUEST_FINAL_BIT) 1492#define PFERR_GUEST_PAGE_MASK BIT_ULL(PFERR_GUEST_PAGE_BIT) 1493#define PFERR_IMPLICIT_ACCESS BIT_ULL(PFERR_IMPLICIT_ACCESS_BIT) 1494 1495bool sys_clocksource_is_based_on_tsc(void); 1496 1497#endif /* SELFTEST_KVM_PROCESSOR_H */