at v6.2-rc4 1114 lines 33 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * tools/testing/selftests/kvm/include/x86_64/processor.h 4 * 5 * Copyright (C) 2018, Google LLC. 6 */ 7 8#ifndef SELFTEST_KVM_PROCESSOR_H 9#define SELFTEST_KVM_PROCESSOR_H 10 11#include <assert.h> 12#include <stdint.h> 13#include <syscall.h> 14 15#include <asm/msr-index.h> 16#include <asm/prctl.h> 17 18#include <linux/stringify.h> 19 20#include "../kvm_util.h" 21 22#define NMI_VECTOR 0x02 23 24#define X86_EFLAGS_FIXED (1u << 1) 25 26#define X86_CR4_VME (1ul << 0) 27#define X86_CR4_PVI (1ul << 1) 28#define X86_CR4_TSD (1ul << 2) 29#define X86_CR4_DE (1ul << 3) 30#define X86_CR4_PSE (1ul << 4) 31#define X86_CR4_PAE (1ul << 5) 32#define X86_CR4_MCE (1ul << 6) 33#define X86_CR4_PGE (1ul << 7) 34#define X86_CR4_PCE (1ul << 8) 35#define X86_CR4_OSFXSR (1ul << 9) 36#define X86_CR4_OSXMMEXCPT (1ul << 10) 37#define X86_CR4_UMIP (1ul << 11) 38#define X86_CR4_LA57 (1ul << 12) 39#define X86_CR4_VMXE (1ul << 13) 40#define X86_CR4_SMXE (1ul << 14) 41#define X86_CR4_FSGSBASE (1ul << 16) 42#define X86_CR4_PCIDE (1ul << 17) 43#define X86_CR4_OSXSAVE (1ul << 18) 44#define X86_CR4_SMEP (1ul << 20) 45#define X86_CR4_SMAP (1ul << 21) 46#define X86_CR4_PKE (1ul << 22) 47 48/* Note, these are ordered alphabetically to match kvm_cpuid_entry2. Eww. */ 49enum cpuid_output_regs { 50 KVM_CPUID_EAX, 51 KVM_CPUID_EBX, 52 KVM_CPUID_ECX, 53 KVM_CPUID_EDX 54}; 55 56/* 57 * Pack the information into a 64-bit value so that each X86_FEATURE_XXX can be 58 * passed by value with no overhead. 59 */ 60struct kvm_x86_cpu_feature { 61 u32 function; 62 u16 index; 63 u8 reg; 64 u8 bit; 65}; 66#define KVM_X86_CPU_FEATURE(fn, idx, gpr, __bit) \ 67({ \ 68 struct kvm_x86_cpu_feature feature = { \ 69 .function = fn, \ 70 .index = idx, \ 71 .reg = KVM_CPUID_##gpr, \ 72 .bit = __bit, \ 73 }; \ 74 \ 75 kvm_static_assert((fn & 0xc0000000) == 0 || \ 76 (fn & 0xc0000000) == 0x40000000 || \ 77 (fn & 0xc0000000) == 0x80000000 || \ 78 (fn & 0xc0000000) == 0xc0000000); \ 79 kvm_static_assert(idx < BIT(sizeof(feature.index) * BITS_PER_BYTE)); \ 80 feature; \ 81}) 82 83/* 84 * Basic Leafs, a.k.a. Intel defined 85 */ 86#define X86_FEATURE_MWAIT KVM_X86_CPU_FEATURE(0x1, 0, ECX, 3) 87#define X86_FEATURE_VMX KVM_X86_CPU_FEATURE(0x1, 0, ECX, 5) 88#define X86_FEATURE_SMX KVM_X86_CPU_FEATURE(0x1, 0, ECX, 6) 89#define X86_FEATURE_PDCM KVM_X86_CPU_FEATURE(0x1, 0, ECX, 15) 90#define X86_FEATURE_PCID KVM_X86_CPU_FEATURE(0x1, 0, ECX, 17) 91#define X86_FEATURE_X2APIC KVM_X86_CPU_FEATURE(0x1, 0, ECX, 21) 92#define X86_FEATURE_MOVBE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 22) 93#define X86_FEATURE_TSC_DEADLINE_TIMER KVM_X86_CPU_FEATURE(0x1, 0, ECX, 24) 94#define X86_FEATURE_XSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 26) 95#define X86_FEATURE_OSXSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 27) 96#define X86_FEATURE_RDRAND KVM_X86_CPU_FEATURE(0x1, 0, ECX, 30) 97#define X86_FEATURE_HYPERVISOR KVM_X86_CPU_FEATURE(0x1, 0, ECX, 31) 98#define X86_FEATURE_PAE KVM_X86_CPU_FEATURE(0x1, 0, EDX, 6) 99#define X86_FEATURE_MCE KVM_X86_CPU_FEATURE(0x1, 0, EDX, 7) 100#define X86_FEATURE_APIC KVM_X86_CPU_FEATURE(0x1, 0, EDX, 9) 101#define X86_FEATURE_CLFLUSH KVM_X86_CPU_FEATURE(0x1, 0, EDX, 19) 102#define X86_FEATURE_XMM KVM_X86_CPU_FEATURE(0x1, 0, EDX, 25) 103#define X86_FEATURE_XMM2 KVM_X86_CPU_FEATURE(0x1, 0, EDX, 26) 104#define X86_FEATURE_FSGSBASE KVM_X86_CPU_FEATURE(0x7, 0, EBX, 0) 105#define X86_FEATURE_TSC_ADJUST KVM_X86_CPU_FEATURE(0x7, 0, EBX, 1) 106#define X86_FEATURE_SGX KVM_X86_CPU_FEATURE(0x7, 0, EBX, 2) 107#define X86_FEATURE_HLE KVM_X86_CPU_FEATURE(0x7, 0, EBX, 4) 108#define X86_FEATURE_SMEP KVM_X86_CPU_FEATURE(0x7, 0, EBX, 7) 109#define X86_FEATURE_INVPCID KVM_X86_CPU_FEATURE(0x7, 0, EBX, 10) 110#define X86_FEATURE_RTM KVM_X86_CPU_FEATURE(0x7, 0, EBX, 11) 111#define X86_FEATURE_MPX KVM_X86_CPU_FEATURE(0x7, 0, EBX, 14) 112#define X86_FEATURE_SMAP KVM_X86_CPU_FEATURE(0x7, 0, EBX, 20) 113#define X86_FEATURE_PCOMMIT KVM_X86_CPU_FEATURE(0x7, 0, EBX, 22) 114#define X86_FEATURE_CLFLUSHOPT KVM_X86_CPU_FEATURE(0x7, 0, EBX, 23) 115#define X86_FEATURE_CLWB KVM_X86_CPU_FEATURE(0x7, 0, EBX, 24) 116#define X86_FEATURE_UMIP KVM_X86_CPU_FEATURE(0x7, 0, ECX, 2) 117#define X86_FEATURE_PKU KVM_X86_CPU_FEATURE(0x7, 0, ECX, 3) 118#define X86_FEATURE_LA57 KVM_X86_CPU_FEATURE(0x7, 0, ECX, 16) 119#define X86_FEATURE_RDPID KVM_X86_CPU_FEATURE(0x7, 0, ECX, 22) 120#define X86_FEATURE_SGX_LC KVM_X86_CPU_FEATURE(0x7, 0, ECX, 30) 121#define X86_FEATURE_SHSTK KVM_X86_CPU_FEATURE(0x7, 0, ECX, 7) 122#define X86_FEATURE_IBT KVM_X86_CPU_FEATURE(0x7, 0, EDX, 20) 123#define X86_FEATURE_AMX_TILE KVM_X86_CPU_FEATURE(0x7, 0, EDX, 24) 124#define X86_FEATURE_SPEC_CTRL KVM_X86_CPU_FEATURE(0x7, 0, EDX, 26) 125#define X86_FEATURE_ARCH_CAPABILITIES KVM_X86_CPU_FEATURE(0x7, 0, EDX, 29) 126#define X86_FEATURE_PKS KVM_X86_CPU_FEATURE(0x7, 0, ECX, 31) 127#define X86_FEATURE_XTILECFG KVM_X86_CPU_FEATURE(0xD, 0, EAX, 17) 128#define X86_FEATURE_XTILEDATA KVM_X86_CPU_FEATURE(0xD, 0, EAX, 18) 129#define X86_FEATURE_XSAVES KVM_X86_CPU_FEATURE(0xD, 1, EAX, 3) 130#define X86_FEATURE_XFD KVM_X86_CPU_FEATURE(0xD, 1, EAX, 4) 131 132/* 133 * Extended Leafs, a.k.a. AMD defined 134 */ 135#define X86_FEATURE_SVM KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 2) 136#define X86_FEATURE_NX KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 20) 137#define X86_FEATURE_GBPAGES KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 26) 138#define X86_FEATURE_RDTSCP KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 27) 139#define X86_FEATURE_LM KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 29) 140#define X86_FEATURE_RDPRU KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 4) 141#define X86_FEATURE_AMD_IBPB KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 12) 142#define X86_FEATURE_NPT KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 0) 143#define X86_FEATURE_LBRV KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 1) 144#define X86_FEATURE_NRIPS KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 3) 145#define X86_FEATURE_TSCRATEMSR KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 4) 146#define X86_FEATURE_PAUSEFILTER KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 10) 147#define X86_FEATURE_PFTHRESHOLD KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 12) 148#define X86_FEATURE_VGIF KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 16) 149#define X86_FEATURE_SEV KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 1) 150#define X86_FEATURE_SEV_ES KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 3) 151 152/* 153 * KVM defined paravirt features. 154 */ 155#define X86_FEATURE_KVM_CLOCKSOURCE KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 0) 156#define X86_FEATURE_KVM_NOP_IO_DELAY KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 1) 157#define X86_FEATURE_KVM_MMU_OP KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 2) 158#define X86_FEATURE_KVM_CLOCKSOURCE2 KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 3) 159#define X86_FEATURE_KVM_ASYNC_PF KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 4) 160#define X86_FEATURE_KVM_STEAL_TIME KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 5) 161#define X86_FEATURE_KVM_PV_EOI KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 6) 162#define X86_FEATURE_KVM_PV_UNHALT KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 7) 163/* Bit 8 apparently isn't used?!?! */ 164#define X86_FEATURE_KVM_PV_TLB_FLUSH KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 9) 165#define X86_FEATURE_KVM_ASYNC_PF_VMEXIT KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 10) 166#define X86_FEATURE_KVM_PV_SEND_IPI KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 11) 167#define X86_FEATURE_KVM_POLL_CONTROL KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 12) 168#define X86_FEATURE_KVM_PV_SCHED_YIELD KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 13) 169#define X86_FEATURE_KVM_ASYNC_PF_INT KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 14) 170#define X86_FEATURE_KVM_MSI_EXT_DEST_ID KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 15) 171#define X86_FEATURE_KVM_HC_MAP_GPA_RANGE KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 16) 172#define X86_FEATURE_KVM_MIGRATION_CONTROL KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 17) 173 174/* 175 * Same idea as X86_FEATURE_XXX, but X86_PROPERTY_XXX retrieves a multi-bit 176 * value/property as opposed to a single-bit feature. Again, pack the info 177 * into a 64-bit value to pass by value with no overhead. 178 */ 179struct kvm_x86_cpu_property { 180 u32 function; 181 u8 index; 182 u8 reg; 183 u8 lo_bit; 184 u8 hi_bit; 185}; 186#define KVM_X86_CPU_PROPERTY(fn, idx, gpr, low_bit, high_bit) \ 187({ \ 188 struct kvm_x86_cpu_property property = { \ 189 .function = fn, \ 190 .index = idx, \ 191 .reg = KVM_CPUID_##gpr, \ 192 .lo_bit = low_bit, \ 193 .hi_bit = high_bit, \ 194 }; \ 195 \ 196 kvm_static_assert(low_bit < high_bit); \ 197 kvm_static_assert((fn & 0xc0000000) == 0 || \ 198 (fn & 0xc0000000) == 0x40000000 || \ 199 (fn & 0xc0000000) == 0x80000000 || \ 200 (fn & 0xc0000000) == 0xc0000000); \ 201 kvm_static_assert(idx < BIT(sizeof(property.index) * BITS_PER_BYTE)); \ 202 property; \ 203}) 204 205#define X86_PROPERTY_MAX_BASIC_LEAF KVM_X86_CPU_PROPERTY(0, 0, EAX, 0, 31) 206#define X86_PROPERTY_PMU_VERSION KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 0, 7) 207#define X86_PROPERTY_PMU_NR_GP_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15) 208#define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31) 209 210#define X86_PROPERTY_XSTATE_MAX_SIZE_XCR0 KVM_X86_CPU_PROPERTY(0xd, 0, EBX, 0, 31) 211#define X86_PROPERTY_XSTATE_MAX_SIZE KVM_X86_CPU_PROPERTY(0xd, 0, ECX, 0, 31) 212#define X86_PROPERTY_XSTATE_TILE_SIZE KVM_X86_CPU_PROPERTY(0xd, 18, EAX, 0, 31) 213#define X86_PROPERTY_XSTATE_TILE_OFFSET KVM_X86_CPU_PROPERTY(0xd, 18, EBX, 0, 31) 214#define X86_PROPERTY_AMX_TOTAL_TILE_BYTES KVM_X86_CPU_PROPERTY(0x1d, 1, EAX, 0, 15) 215#define X86_PROPERTY_AMX_BYTES_PER_TILE KVM_X86_CPU_PROPERTY(0x1d, 1, EAX, 16, 31) 216#define X86_PROPERTY_AMX_BYTES_PER_ROW KVM_X86_CPU_PROPERTY(0x1d, 1, EBX, 0, 15) 217#define X86_PROPERTY_AMX_NR_TILE_REGS KVM_X86_CPU_PROPERTY(0x1d, 1, EBX, 16, 31) 218#define X86_PROPERTY_AMX_MAX_ROWS KVM_X86_CPU_PROPERTY(0x1d, 1, ECX, 0, 15) 219 220#define X86_PROPERTY_MAX_KVM_LEAF KVM_X86_CPU_PROPERTY(0x40000000, 0, EAX, 0, 31) 221 222#define X86_PROPERTY_MAX_EXT_LEAF KVM_X86_CPU_PROPERTY(0x80000000, 0, EAX, 0, 31) 223#define X86_PROPERTY_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7) 224#define X86_PROPERTY_MAX_VIRT_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15) 225#define X86_PROPERTY_PHYS_ADDR_REDUCTION KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11) 226 227#define X86_PROPERTY_MAX_CENTAUR_LEAF KVM_X86_CPU_PROPERTY(0xC0000000, 0, EAX, 0, 31) 228 229/* 230 * Intel's architectural PMU events are bizarre. They have a "feature" bit 231 * that indicates the feature is _not_ supported, and a property that states 232 * the length of the bit mask of unsupported features. A feature is supported 233 * if the size of the bit mask is larger than the "unavailable" bit, and said 234 * bit is not set. 235 * 236 * Wrap the "unavailable" feature to simplify checking whether or not a given 237 * architectural event is supported. 238 */ 239struct kvm_x86_pmu_feature { 240 struct kvm_x86_cpu_feature anti_feature; 241}; 242#define KVM_X86_PMU_FEATURE(name, __bit) \ 243({ \ 244 struct kvm_x86_pmu_feature feature = { \ 245 .anti_feature = KVM_X86_CPU_FEATURE(0xa, 0, EBX, __bit), \ 246 }; \ 247 \ 248 feature; \ 249}) 250 251#define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED KVM_X86_PMU_FEATURE(BRANCH_INSNS_RETIRED, 5) 252 253static inline unsigned int x86_family(unsigned int eax) 254{ 255 unsigned int x86; 256 257 x86 = (eax >> 8) & 0xf; 258 259 if (x86 == 0xf) 260 x86 += (eax >> 20) & 0xff; 261 262 return x86; 263} 264 265static inline unsigned int x86_model(unsigned int eax) 266{ 267 return ((eax >> 12) & 0xf0) | ((eax >> 4) & 0x0f); 268} 269 270/* Page table bitfield declarations */ 271#define PTE_PRESENT_MASK BIT_ULL(0) 272#define PTE_WRITABLE_MASK BIT_ULL(1) 273#define PTE_USER_MASK BIT_ULL(2) 274#define PTE_ACCESSED_MASK BIT_ULL(5) 275#define PTE_DIRTY_MASK BIT_ULL(6) 276#define PTE_LARGE_MASK BIT_ULL(7) 277#define PTE_GLOBAL_MASK BIT_ULL(8) 278#define PTE_NX_MASK BIT_ULL(63) 279 280#define PHYSICAL_PAGE_MASK GENMASK_ULL(51, 12) 281 282#define PAGE_SHIFT 12 283#define PAGE_SIZE (1ULL << PAGE_SHIFT) 284#define PAGE_MASK (~(PAGE_SIZE-1) & PHYSICAL_PAGE_MASK) 285 286#define HUGEPAGE_SHIFT(x) (PAGE_SHIFT + (((x) - 1) * 9)) 287#define HUGEPAGE_SIZE(x) (1UL << HUGEPAGE_SHIFT(x)) 288#define HUGEPAGE_MASK(x) (~(HUGEPAGE_SIZE(x) - 1) & PHYSICAL_PAGE_MASK) 289 290#define PTE_GET_PA(pte) ((pte) & PHYSICAL_PAGE_MASK) 291#define PTE_GET_PFN(pte) (PTE_GET_PA(pte) >> PAGE_SHIFT) 292 293/* General Registers in 64-Bit Mode */ 294struct gpr64_regs { 295 u64 rax; 296 u64 rcx; 297 u64 rdx; 298 u64 rbx; 299 u64 rsp; 300 u64 rbp; 301 u64 rsi; 302 u64 rdi; 303 u64 r8; 304 u64 r9; 305 u64 r10; 306 u64 r11; 307 u64 r12; 308 u64 r13; 309 u64 r14; 310 u64 r15; 311}; 312 313struct desc64 { 314 uint16_t limit0; 315 uint16_t base0; 316 unsigned base1:8, type:4, s:1, dpl:2, p:1; 317 unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8; 318 uint32_t base3; 319 uint32_t zero1; 320} __attribute__((packed)); 321 322struct desc_ptr { 323 uint16_t size; 324 uint64_t address; 325} __attribute__((packed)); 326 327struct kvm_x86_state { 328 struct kvm_xsave *xsave; 329 struct kvm_vcpu_events events; 330 struct kvm_mp_state mp_state; 331 struct kvm_regs regs; 332 struct kvm_xcrs xcrs; 333 struct kvm_sregs sregs; 334 struct kvm_debugregs debugregs; 335 union { 336 struct kvm_nested_state nested; 337 char nested_[16384]; 338 }; 339 struct kvm_msrs msrs; 340}; 341 342static inline uint64_t get_desc64_base(const struct desc64 *desc) 343{ 344 return ((uint64_t)desc->base3 << 32) | 345 (desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); 346} 347 348static inline uint64_t rdtsc(void) 349{ 350 uint32_t eax, edx; 351 uint64_t tsc_val; 352 /* 353 * The lfence is to wait (on Intel CPUs) until all previous 354 * instructions have been executed. If software requires RDTSC to be 355 * executed prior to execution of any subsequent instruction, it can 356 * execute LFENCE immediately after RDTSC 357 */ 358 __asm__ __volatile__("lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx)); 359 tsc_val = ((uint64_t)edx) << 32 | eax; 360 return tsc_val; 361} 362 363static inline uint64_t rdtscp(uint32_t *aux) 364{ 365 uint32_t eax, edx; 366 367 __asm__ __volatile__("rdtscp" : "=a"(eax), "=d"(edx), "=c"(*aux)); 368 return ((uint64_t)edx) << 32 | eax; 369} 370 371static inline uint64_t rdmsr(uint32_t msr) 372{ 373 uint32_t a, d; 374 375 __asm__ __volatile__("rdmsr" : "=a"(a), "=d"(d) : "c"(msr) : "memory"); 376 377 return a | ((uint64_t) d << 32); 378} 379 380static inline void wrmsr(uint32_t msr, uint64_t value) 381{ 382 uint32_t a = value; 383 uint32_t d = value >> 32; 384 385 __asm__ __volatile__("wrmsr" :: "a"(a), "d"(d), "c"(msr) : "memory"); 386} 387 388 389static inline uint16_t inw(uint16_t port) 390{ 391 uint16_t tmp; 392 393 __asm__ __volatile__("in %%dx, %%ax" 394 : /* output */ "=a" (tmp) 395 : /* input */ "d" (port)); 396 397 return tmp; 398} 399 400static inline uint16_t get_es(void) 401{ 402 uint16_t es; 403 404 __asm__ __volatile__("mov %%es, %[es]" 405 : /* output */ [es]"=rm"(es)); 406 return es; 407} 408 409static inline uint16_t get_cs(void) 410{ 411 uint16_t cs; 412 413 __asm__ __volatile__("mov %%cs, %[cs]" 414 : /* output */ [cs]"=rm"(cs)); 415 return cs; 416} 417 418static inline uint16_t get_ss(void) 419{ 420 uint16_t ss; 421 422 __asm__ __volatile__("mov %%ss, %[ss]" 423 : /* output */ [ss]"=rm"(ss)); 424 return ss; 425} 426 427static inline uint16_t get_ds(void) 428{ 429 uint16_t ds; 430 431 __asm__ __volatile__("mov %%ds, %[ds]" 432 : /* output */ [ds]"=rm"(ds)); 433 return ds; 434} 435 436static inline uint16_t get_fs(void) 437{ 438 uint16_t fs; 439 440 __asm__ __volatile__("mov %%fs, %[fs]" 441 : /* output */ [fs]"=rm"(fs)); 442 return fs; 443} 444 445static inline uint16_t get_gs(void) 446{ 447 uint16_t gs; 448 449 __asm__ __volatile__("mov %%gs, %[gs]" 450 : /* output */ [gs]"=rm"(gs)); 451 return gs; 452} 453 454static inline uint16_t get_tr(void) 455{ 456 uint16_t tr; 457 458 __asm__ __volatile__("str %[tr]" 459 : /* output */ [tr]"=rm"(tr)); 460 return tr; 461} 462 463static inline uint64_t get_cr0(void) 464{ 465 uint64_t cr0; 466 467 __asm__ __volatile__("mov %%cr0, %[cr0]" 468 : /* output */ [cr0]"=r"(cr0)); 469 return cr0; 470} 471 472static inline uint64_t get_cr3(void) 473{ 474 uint64_t cr3; 475 476 __asm__ __volatile__("mov %%cr3, %[cr3]" 477 : /* output */ [cr3]"=r"(cr3)); 478 return cr3; 479} 480 481static inline uint64_t get_cr4(void) 482{ 483 uint64_t cr4; 484 485 __asm__ __volatile__("mov %%cr4, %[cr4]" 486 : /* output */ [cr4]"=r"(cr4)); 487 return cr4; 488} 489 490static inline void set_cr4(uint64_t val) 491{ 492 __asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory"); 493} 494 495static inline struct desc_ptr get_gdt(void) 496{ 497 struct desc_ptr gdt; 498 __asm__ __volatile__("sgdt %[gdt]" 499 : /* output */ [gdt]"=m"(gdt)); 500 return gdt; 501} 502 503static inline struct desc_ptr get_idt(void) 504{ 505 struct desc_ptr idt; 506 __asm__ __volatile__("sidt %[idt]" 507 : /* output */ [idt]"=m"(idt)); 508 return idt; 509} 510 511static inline void outl(uint16_t port, uint32_t value) 512{ 513 __asm__ __volatile__("outl %%eax, %%dx" : : "d"(port), "a"(value)); 514} 515 516static inline void __cpuid(uint32_t function, uint32_t index, 517 uint32_t *eax, uint32_t *ebx, 518 uint32_t *ecx, uint32_t *edx) 519{ 520 *eax = function; 521 *ecx = index; 522 523 asm volatile("cpuid" 524 : "=a" (*eax), 525 "=b" (*ebx), 526 "=c" (*ecx), 527 "=d" (*edx) 528 : "0" (*eax), "2" (*ecx) 529 : "memory"); 530} 531 532static inline void cpuid(uint32_t function, 533 uint32_t *eax, uint32_t *ebx, 534 uint32_t *ecx, uint32_t *edx) 535{ 536 return __cpuid(function, 0, eax, ebx, ecx, edx); 537} 538 539static inline uint32_t this_cpu_fms(void) 540{ 541 uint32_t eax, ebx, ecx, edx; 542 543 cpuid(1, &eax, &ebx, &ecx, &edx); 544 return eax; 545} 546 547static inline uint32_t this_cpu_family(void) 548{ 549 return x86_family(this_cpu_fms()); 550} 551 552static inline uint32_t this_cpu_model(void) 553{ 554 return x86_model(this_cpu_fms()); 555} 556 557static inline uint32_t __this_cpu_has(uint32_t function, uint32_t index, 558 uint8_t reg, uint8_t lo, uint8_t hi) 559{ 560 uint32_t gprs[4]; 561 562 __cpuid(function, index, 563 &gprs[KVM_CPUID_EAX], &gprs[KVM_CPUID_EBX], 564 &gprs[KVM_CPUID_ECX], &gprs[KVM_CPUID_EDX]); 565 566 return (gprs[reg] & GENMASK(hi, lo)) >> lo; 567} 568 569static inline bool this_cpu_has(struct kvm_x86_cpu_feature feature) 570{ 571 return __this_cpu_has(feature.function, feature.index, 572 feature.reg, feature.bit, feature.bit); 573} 574 575static inline uint32_t this_cpu_property(struct kvm_x86_cpu_property property) 576{ 577 return __this_cpu_has(property.function, property.index, 578 property.reg, property.lo_bit, property.hi_bit); 579} 580 581static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property) 582{ 583 uint32_t max_leaf; 584 585 switch (property.function & 0xc0000000) { 586 case 0: 587 max_leaf = this_cpu_property(X86_PROPERTY_MAX_BASIC_LEAF); 588 break; 589 case 0x40000000: 590 max_leaf = this_cpu_property(X86_PROPERTY_MAX_KVM_LEAF); 591 break; 592 case 0x80000000: 593 max_leaf = this_cpu_property(X86_PROPERTY_MAX_EXT_LEAF); 594 break; 595 case 0xc0000000: 596 max_leaf = this_cpu_property(X86_PROPERTY_MAX_CENTAUR_LEAF); 597 } 598 return max_leaf >= property.function; 599} 600 601static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature) 602{ 603 uint32_t nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); 604 605 return nr_bits > feature.anti_feature.bit && 606 !this_cpu_has(feature.anti_feature); 607} 608 609typedef u32 __attribute__((vector_size(16))) sse128_t; 610#define __sse128_u union { sse128_t vec; u64 as_u64[2]; u32 as_u32[4]; } 611#define sse128_lo(x) ({ __sse128_u t; t.vec = x; t.as_u64[0]; }) 612#define sse128_hi(x) ({ __sse128_u t; t.vec = x; t.as_u64[1]; }) 613 614static inline void read_sse_reg(int reg, sse128_t *data) 615{ 616 switch (reg) { 617 case 0: 618 asm("movdqa %%xmm0, %0" : "=m"(*data)); 619 break; 620 case 1: 621 asm("movdqa %%xmm1, %0" : "=m"(*data)); 622 break; 623 case 2: 624 asm("movdqa %%xmm2, %0" : "=m"(*data)); 625 break; 626 case 3: 627 asm("movdqa %%xmm3, %0" : "=m"(*data)); 628 break; 629 case 4: 630 asm("movdqa %%xmm4, %0" : "=m"(*data)); 631 break; 632 case 5: 633 asm("movdqa %%xmm5, %0" : "=m"(*data)); 634 break; 635 case 6: 636 asm("movdqa %%xmm6, %0" : "=m"(*data)); 637 break; 638 case 7: 639 asm("movdqa %%xmm7, %0" : "=m"(*data)); 640 break; 641 default: 642 BUG(); 643 } 644} 645 646static inline void write_sse_reg(int reg, const sse128_t *data) 647{ 648 switch (reg) { 649 case 0: 650 asm("movdqa %0, %%xmm0" : : "m"(*data)); 651 break; 652 case 1: 653 asm("movdqa %0, %%xmm1" : : "m"(*data)); 654 break; 655 case 2: 656 asm("movdqa %0, %%xmm2" : : "m"(*data)); 657 break; 658 case 3: 659 asm("movdqa %0, %%xmm3" : : "m"(*data)); 660 break; 661 case 4: 662 asm("movdqa %0, %%xmm4" : : "m"(*data)); 663 break; 664 case 5: 665 asm("movdqa %0, %%xmm5" : : "m"(*data)); 666 break; 667 case 6: 668 asm("movdqa %0, %%xmm6" : : "m"(*data)); 669 break; 670 case 7: 671 asm("movdqa %0, %%xmm7" : : "m"(*data)); 672 break; 673 default: 674 BUG(); 675 } 676} 677 678static inline void cpu_relax(void) 679{ 680 asm volatile("rep; nop" ::: "memory"); 681} 682 683#define ud2() \ 684 __asm__ __volatile__( \ 685 "ud2\n" \ 686 ) 687 688#define hlt() \ 689 __asm__ __volatile__( \ 690 "hlt\n" \ 691 ) 692 693bool is_intel_cpu(void); 694bool is_amd_cpu(void); 695 696struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu); 697void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state); 698void kvm_x86_state_cleanup(struct kvm_x86_state *state); 699 700const struct kvm_msr_list *kvm_get_msr_index_list(void); 701const struct kvm_msr_list *kvm_get_feature_msr_index_list(void); 702bool kvm_msr_is_in_save_restore_list(uint32_t msr_index); 703uint64_t kvm_get_feature_msr(uint64_t msr_index); 704 705static inline void vcpu_msrs_get(struct kvm_vcpu *vcpu, 706 struct kvm_msrs *msrs) 707{ 708 int r = __vcpu_ioctl(vcpu, KVM_GET_MSRS, msrs); 709 710 TEST_ASSERT(r == msrs->nmsrs, 711 "KVM_GET_MSRS failed, r: %i (failed on MSR %x)", 712 r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index); 713} 714static inline void vcpu_msrs_set(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs) 715{ 716 int r = __vcpu_ioctl(vcpu, KVM_SET_MSRS, msrs); 717 718 TEST_ASSERT(r == msrs->nmsrs, 719 "KVM_GET_MSRS failed, r: %i (failed on MSR %x)", 720 r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index); 721} 722static inline void vcpu_debugregs_get(struct kvm_vcpu *vcpu, 723 struct kvm_debugregs *debugregs) 724{ 725 vcpu_ioctl(vcpu, KVM_GET_DEBUGREGS, debugregs); 726} 727static inline void vcpu_debugregs_set(struct kvm_vcpu *vcpu, 728 struct kvm_debugregs *debugregs) 729{ 730 vcpu_ioctl(vcpu, KVM_SET_DEBUGREGS, debugregs); 731} 732static inline void vcpu_xsave_get(struct kvm_vcpu *vcpu, 733 struct kvm_xsave *xsave) 734{ 735 vcpu_ioctl(vcpu, KVM_GET_XSAVE, xsave); 736} 737static inline void vcpu_xsave2_get(struct kvm_vcpu *vcpu, 738 struct kvm_xsave *xsave) 739{ 740 vcpu_ioctl(vcpu, KVM_GET_XSAVE2, xsave); 741} 742static inline void vcpu_xsave_set(struct kvm_vcpu *vcpu, 743 struct kvm_xsave *xsave) 744{ 745 vcpu_ioctl(vcpu, KVM_SET_XSAVE, xsave); 746} 747static inline void vcpu_xcrs_get(struct kvm_vcpu *vcpu, 748 struct kvm_xcrs *xcrs) 749{ 750 vcpu_ioctl(vcpu, KVM_GET_XCRS, xcrs); 751} 752static inline void vcpu_xcrs_set(struct kvm_vcpu *vcpu, struct kvm_xcrs *xcrs) 753{ 754 vcpu_ioctl(vcpu, KVM_SET_XCRS, xcrs); 755} 756 757const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid, 758 uint32_t function, uint32_t index); 759const struct kvm_cpuid2 *kvm_get_supported_cpuid(void); 760const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void); 761const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu); 762 763static inline uint32_t kvm_cpu_fms(void) 764{ 765 return get_cpuid_entry(kvm_get_supported_cpuid(), 0x1, 0)->eax; 766} 767 768static inline uint32_t kvm_cpu_family(void) 769{ 770 return x86_family(kvm_cpu_fms()); 771} 772 773static inline uint32_t kvm_cpu_model(void) 774{ 775 return x86_model(kvm_cpu_fms()); 776} 777 778bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid, 779 struct kvm_x86_cpu_feature feature); 780 781static inline bool kvm_cpu_has(struct kvm_x86_cpu_feature feature) 782{ 783 return kvm_cpuid_has(kvm_get_supported_cpuid(), feature); 784} 785 786uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, 787 struct kvm_x86_cpu_property property); 788 789static inline uint32_t kvm_cpu_property(struct kvm_x86_cpu_property property) 790{ 791 return kvm_cpuid_property(kvm_get_supported_cpuid(), property); 792} 793 794static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property) 795{ 796 uint32_t max_leaf; 797 798 switch (property.function & 0xc0000000) { 799 case 0: 800 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_BASIC_LEAF); 801 break; 802 case 0x40000000: 803 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_KVM_LEAF); 804 break; 805 case 0x80000000: 806 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_EXT_LEAF); 807 break; 808 case 0xc0000000: 809 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_CENTAUR_LEAF); 810 } 811 return max_leaf >= property.function; 812} 813 814static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature) 815{ 816 uint32_t nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH); 817 818 return nr_bits > feature.anti_feature.bit && 819 !kvm_cpu_has(feature.anti_feature); 820} 821 822static inline size_t kvm_cpuid2_size(int nr_entries) 823{ 824 return sizeof(struct kvm_cpuid2) + 825 sizeof(struct kvm_cpuid_entry2) * nr_entries; 826} 827 828/* 829 * Allocate a "struct kvm_cpuid2* instance, with the 0-length arrary of 830 * entries sized to hold @nr_entries. The caller is responsible for freeing 831 * the struct. 832 */ 833static inline struct kvm_cpuid2 *allocate_kvm_cpuid2(int nr_entries) 834{ 835 struct kvm_cpuid2 *cpuid; 836 837 cpuid = malloc(kvm_cpuid2_size(nr_entries)); 838 TEST_ASSERT(cpuid, "-ENOMEM when allocating kvm_cpuid2"); 839 840 cpuid->nent = nr_entries; 841 842 return cpuid; 843} 844 845void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid); 846void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu); 847 848static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu, 849 uint32_t function, 850 uint32_t index) 851{ 852 return (struct kvm_cpuid_entry2 *)get_cpuid_entry(vcpu->cpuid, 853 function, index); 854} 855 856static inline struct kvm_cpuid_entry2 *vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu, 857 uint32_t function) 858{ 859 return __vcpu_get_cpuid_entry(vcpu, function, 0); 860} 861 862static inline int __vcpu_set_cpuid(struct kvm_vcpu *vcpu) 863{ 864 int r; 865 866 TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first"); 867 r = __vcpu_ioctl(vcpu, KVM_SET_CPUID2, vcpu->cpuid); 868 if (r) 869 return r; 870 871 /* On success, refresh the cache to pick up adjustments made by KVM. */ 872 vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid); 873 return 0; 874} 875 876static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu) 877{ 878 TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first"); 879 vcpu_ioctl(vcpu, KVM_SET_CPUID2, vcpu->cpuid); 880 881 /* Refresh the cache to pick up adjustments made by KVM. */ 882 vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid); 883} 884 885void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr); 886 887void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function); 888void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu, 889 struct kvm_x86_cpu_feature feature, 890 bool set); 891 892static inline void vcpu_set_cpuid_feature(struct kvm_vcpu *vcpu, 893 struct kvm_x86_cpu_feature feature) 894{ 895 vcpu_set_or_clear_cpuid_feature(vcpu, feature, true); 896 897} 898 899static inline void vcpu_clear_cpuid_feature(struct kvm_vcpu *vcpu, 900 struct kvm_x86_cpu_feature feature) 901{ 902 vcpu_set_or_clear_cpuid_feature(vcpu, feature, false); 903} 904 905uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index); 906int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value); 907 908static inline void vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, 909 uint64_t msr_value) 910{ 911 int r = _vcpu_set_msr(vcpu, msr_index, msr_value); 912 913 TEST_ASSERT(r == 1, KVM_IOCTL_ERROR(KVM_SET_MSRS, r)); 914} 915 916 917void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits); 918bool vm_is_unrestricted_guest(struct kvm_vm *vm); 919 920struct ex_regs { 921 uint64_t rax, rcx, rdx, rbx; 922 uint64_t rbp, rsi, rdi; 923 uint64_t r8, r9, r10, r11; 924 uint64_t r12, r13, r14, r15; 925 uint64_t vector; 926 uint64_t error_code; 927 uint64_t rip; 928 uint64_t cs; 929 uint64_t rflags; 930}; 931 932struct idt_entry { 933 uint16_t offset0; 934 uint16_t selector; 935 uint16_t ist : 3; 936 uint16_t : 5; 937 uint16_t type : 4; 938 uint16_t : 1; 939 uint16_t dpl : 2; 940 uint16_t p : 1; 941 uint16_t offset1; 942 uint32_t offset2; uint32_t reserved; 943}; 944 945void vm_init_descriptor_tables(struct kvm_vm *vm); 946void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu); 947void vm_install_exception_handler(struct kvm_vm *vm, int vector, 948 void (*handler)(struct ex_regs *)); 949 950/* If a toddler were to say "abracadabra". */ 951#define KVM_EXCEPTION_MAGIC 0xabacadabaULL 952 953/* 954 * KVM selftest exception fixup uses registers to coordinate with the exception 955 * handler, versus the kernel's in-memory tables and KVM-Unit-Tests's in-memory 956 * per-CPU data. Using only registers avoids having to map memory into the 957 * guest, doesn't require a valid, stable GS.base, and reduces the risk of 958 * for recursive faults when accessing memory in the handler. The downside to 959 * using registers is that it restricts what registers can be used by the actual 960 * instruction. But, selftests are 64-bit only, making register* pressure a 961 * minor concern. Use r9-r11 as they are volatile, i.e. don't need to be saved 962 * by the callee, and except for r11 are not implicit parameters to any 963 * instructions. Ideally, fixup would use r8-r10 and thus avoid implicit 964 * parameters entirely, but Hyper-V's hypercall ABI uses r8 and testing Hyper-V 965 * is higher priority than testing non-faulting SYSCALL/SYSRET. 966 * 967 * Note, the fixup handler deliberately does not handle #DE, i.e. the vector 968 * is guaranteed to be non-zero on fault. 969 * 970 * REGISTER INPUTS: 971 * r9 = MAGIC 972 * r10 = RIP 973 * r11 = new RIP on fault 974 * 975 * REGISTER OUTPUTS: 976 * r9 = exception vector (non-zero) 977 * r10 = error code 978 */ 979#define KVM_ASM_SAFE(insn) \ 980 "mov $" __stringify(KVM_EXCEPTION_MAGIC) ", %%r9\n\t" \ 981 "lea 1f(%%rip), %%r10\n\t" \ 982 "lea 2f(%%rip), %%r11\n\t" \ 983 "1: " insn "\n\t" \ 984 "xor %%r9, %%r9\n\t" \ 985 "2:\n\t" \ 986 "mov %%r9b, %[vector]\n\t" \ 987 "mov %%r10, %[error_code]\n\t" 988 989#define KVM_ASM_SAFE_OUTPUTS(v, ec) [vector] "=qm"(v), [error_code] "=rm"(ec) 990#define KVM_ASM_SAFE_CLOBBERS "r9", "r10", "r11" 991 992#define kvm_asm_safe(insn, inputs...) \ 993({ \ 994 uint64_t ign_error_code; \ 995 uint8_t vector; \ 996 \ 997 asm volatile(KVM_ASM_SAFE(insn) \ 998 : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \ 999 : inputs \ 1000 : KVM_ASM_SAFE_CLOBBERS); \ 1001 vector; \ 1002}) 1003 1004#define kvm_asm_safe_ec(insn, error_code, inputs...) \ 1005({ \ 1006 uint8_t vector; \ 1007 \ 1008 asm volatile(KVM_ASM_SAFE(insn) \ 1009 : KVM_ASM_SAFE_OUTPUTS(vector, error_code) \ 1010 : inputs \ 1011 : KVM_ASM_SAFE_CLOBBERS); \ 1012 vector; \ 1013}) 1014 1015static inline uint8_t rdmsr_safe(uint32_t msr, uint64_t *val) 1016{ 1017 uint64_t error_code; 1018 uint8_t vector; 1019 uint32_t a, d; 1020 1021 asm volatile(KVM_ASM_SAFE("rdmsr") 1022 : "=a"(a), "=d"(d), KVM_ASM_SAFE_OUTPUTS(vector, error_code) 1023 : "c"(msr) 1024 : KVM_ASM_SAFE_CLOBBERS); 1025 1026 *val = (uint64_t)a | ((uint64_t)d << 32); 1027 return vector; 1028} 1029 1030static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val) 1031{ 1032 return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr)); 1033} 1034 1035bool kvm_is_tdp_enabled(void); 1036 1037uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr, 1038 int *level); 1039uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr); 1040 1041uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, 1042 uint64_t a3); 1043 1044void __vm_xsave_require_permission(int bit, const char *name); 1045 1046#define vm_xsave_require_permission(perm) \ 1047 __vm_xsave_require_permission(perm, #perm) 1048 1049enum pg_level { 1050 PG_LEVEL_NONE, 1051 PG_LEVEL_4K, 1052 PG_LEVEL_2M, 1053 PG_LEVEL_1G, 1054 PG_LEVEL_512G, 1055 PG_LEVEL_NUM 1056}; 1057 1058#define PG_LEVEL_SHIFT(_level) ((_level - 1) * 9 + 12) 1059#define PG_LEVEL_SIZE(_level) (1ull << PG_LEVEL_SHIFT(_level)) 1060 1061#define PG_SIZE_4K PG_LEVEL_SIZE(PG_LEVEL_4K) 1062#define PG_SIZE_2M PG_LEVEL_SIZE(PG_LEVEL_2M) 1063#define PG_SIZE_1G PG_LEVEL_SIZE(PG_LEVEL_1G) 1064 1065void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level); 1066void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 1067 uint64_t nr_bytes, int level); 1068 1069/* 1070 * Basic CPU control in CR0 1071 */ 1072#define X86_CR0_PE (1UL<<0) /* Protection Enable */ 1073#define X86_CR0_MP (1UL<<1) /* Monitor Coprocessor */ 1074#define X86_CR0_EM (1UL<<2) /* Emulation */ 1075#define X86_CR0_TS (1UL<<3) /* Task Switched */ 1076#define X86_CR0_ET (1UL<<4) /* Extension Type */ 1077#define X86_CR0_NE (1UL<<5) /* Numeric Error */ 1078#define X86_CR0_WP (1UL<<16) /* Write Protect */ 1079#define X86_CR0_AM (1UL<<18) /* Alignment Mask */ 1080#define X86_CR0_NW (1UL<<29) /* Not Write-through */ 1081#define X86_CR0_CD (1UL<<30) /* Cache Disable */ 1082#define X86_CR0_PG (1UL<<31) /* Paging */ 1083 1084#define XSTATE_XTILE_CFG_BIT 17 1085#define XSTATE_XTILE_DATA_BIT 18 1086 1087#define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT) 1088#define XSTATE_XTILE_DATA_MASK (1ULL << XSTATE_XTILE_DATA_BIT) 1089#define XFEATURE_XTILE_MASK (XSTATE_XTILE_CFG_MASK | \ 1090 XSTATE_XTILE_DATA_MASK) 1091 1092#define PFERR_PRESENT_BIT 0 1093#define PFERR_WRITE_BIT 1 1094#define PFERR_USER_BIT 2 1095#define PFERR_RSVD_BIT 3 1096#define PFERR_FETCH_BIT 4 1097#define PFERR_PK_BIT 5 1098#define PFERR_SGX_BIT 15 1099#define PFERR_GUEST_FINAL_BIT 32 1100#define PFERR_GUEST_PAGE_BIT 33 1101#define PFERR_IMPLICIT_ACCESS_BIT 48 1102 1103#define PFERR_PRESENT_MASK BIT(PFERR_PRESENT_BIT) 1104#define PFERR_WRITE_MASK BIT(PFERR_WRITE_BIT) 1105#define PFERR_USER_MASK BIT(PFERR_USER_BIT) 1106#define PFERR_RSVD_MASK BIT(PFERR_RSVD_BIT) 1107#define PFERR_FETCH_MASK BIT(PFERR_FETCH_BIT) 1108#define PFERR_PK_MASK BIT(PFERR_PK_BIT) 1109#define PFERR_SGX_MASK BIT(PFERR_SGX_BIT) 1110#define PFERR_GUEST_FINAL_MASK BIT_ULL(PFERR_GUEST_FINAL_BIT) 1111#define PFERR_GUEST_PAGE_MASK BIT_ULL(PFERR_GUEST_PAGE_BIT) 1112#define PFERR_IMPLICIT_ACCESS BIT_ULL(PFERR_IMPLICIT_ACCESS_BIT) 1113 1114#endif /* SELFTEST_KVM_PROCESSOR_H */