Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests: KVM: AMD Nested test infrastructure

Add the basic infrastructure needed to test AMD nested SVM.
This is largely copied from the KVM unit test infrastructure.

Signed-off-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Eric Auger and committed by
Paolo Bonzini
20ba262f 1ecaabed

+517 -1
+1 -1
tools/testing/selftests/kvm/Makefile
··· 8 8 UNAME_M := $(shell uname -m) 9 9 10 10 LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c 11 - LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/ucall.c 11 + LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c 12 12 LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c 13 13 LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c 14 14
+20
tools/testing/selftests/kvm/include/x86_64/processor.h
··· 56 56 R15, 57 57 }; 58 58 59 + /* General Registers in 64-Bit Mode */ 60 + struct gpr64_regs { 61 + u64 rax; 62 + u64 rcx; 63 + u64 rdx; 64 + u64 rbx; 65 + u64 rsp; 66 + u64 rbp; 67 + u64 rsi; 68 + u64 rdi; 69 + u64 r8; 70 + u64 r9; 71 + u64 r10; 72 + u64 r11; 73 + u64 r12; 74 + u64 r13; 75 + u64 r14; 76 + u64 r15; 77 + }; 78 + 59 79 struct desc64 { 60 80 uint16_t limit0; 61 81 uint16_t base0;
+297
tools/testing/selftests/kvm/include/x86_64/svm.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * tools/testing/selftests/kvm/include/x86_64/svm.h 4 + * This is a copy of arch/x86/include/asm/svm.h 5 + * 6 + */ 7 + 8 + #ifndef SELFTEST_KVM_SVM_H 9 + #define SELFTEST_KVM_SVM_H 10 + 11 + enum { 12 + INTERCEPT_INTR, 13 + INTERCEPT_NMI, 14 + INTERCEPT_SMI, 15 + INTERCEPT_INIT, 16 + INTERCEPT_VINTR, 17 + INTERCEPT_SELECTIVE_CR0, 18 + INTERCEPT_STORE_IDTR, 19 + INTERCEPT_STORE_GDTR, 20 + INTERCEPT_STORE_LDTR, 21 + INTERCEPT_STORE_TR, 22 + INTERCEPT_LOAD_IDTR, 23 + INTERCEPT_LOAD_GDTR, 24 + INTERCEPT_LOAD_LDTR, 25 + INTERCEPT_LOAD_TR, 26 + INTERCEPT_RDTSC, 27 + INTERCEPT_RDPMC, 28 + INTERCEPT_PUSHF, 29 + INTERCEPT_POPF, 30 + INTERCEPT_CPUID, 31 + INTERCEPT_RSM, 32 + INTERCEPT_IRET, 33 + INTERCEPT_INTn, 34 + INTERCEPT_INVD, 35 + INTERCEPT_PAUSE, 36 + INTERCEPT_HLT, 37 + INTERCEPT_INVLPG, 38 + INTERCEPT_INVLPGA, 39 + INTERCEPT_IOIO_PROT, 40 + INTERCEPT_MSR_PROT, 41 + INTERCEPT_TASK_SWITCH, 42 + INTERCEPT_FERR_FREEZE, 43 + INTERCEPT_SHUTDOWN, 44 + INTERCEPT_VMRUN, 45 + INTERCEPT_VMMCALL, 46 + INTERCEPT_VMLOAD, 47 + INTERCEPT_VMSAVE, 48 + INTERCEPT_STGI, 49 + INTERCEPT_CLGI, 50 + INTERCEPT_SKINIT, 51 + INTERCEPT_RDTSCP, 52 + INTERCEPT_ICEBP, 53 + INTERCEPT_WBINVD, 54 + INTERCEPT_MONITOR, 55 + INTERCEPT_MWAIT, 56 + INTERCEPT_MWAIT_COND, 57 + INTERCEPT_XSETBV, 58 + INTERCEPT_RDPRU, 59 + }; 60 + 61 + 62 + struct __attribute__ ((__packed__)) vmcb_control_area { 63 + u32 intercept_cr; 64 + u32 intercept_dr; 65 + u32 intercept_exceptions; 66 + u64 intercept; 67 + u8 reserved_1[40]; 68 + u16 pause_filter_thresh; 69 + u16 pause_filter_count; 70 + u64 iopm_base_pa; 71 + u64 msrpm_base_pa; 72 + u64 tsc_offset; 73 + u32 asid; 74 + u8 tlb_ctl; 75 + u8 reserved_2[3]; 76 + u32 int_ctl; 77 + u32 int_vector; 78 + u32 int_state; 79 + u8 reserved_3[4]; 80 + u32 exit_code; 81 + u32 exit_code_hi; 82 + u64 exit_info_1; 83 + u64 exit_info_2; 84 + u32 exit_int_info; 85 + u32 exit_int_info_err; 86 + u64 nested_ctl; 87 + u64 avic_vapic_bar; 88 + u8 reserved_4[8]; 89 + u32 event_inj; 90 + u32 event_inj_err; 91 + u64 nested_cr3; 92 + u64 virt_ext; 93 + u32 clean; 94 + u32 reserved_5; 95 + u64 next_rip; 96 + u8 insn_len; 97 + u8 insn_bytes[15]; 98 + u64 avic_backing_page; /* Offset 0xe0 */ 99 + u8 reserved_6[8]; /* Offset 0xe8 */ 100 + u64 avic_logical_id; /* Offset 0xf0 */ 101 + u64 avic_physical_id; /* Offset 0xf8 */ 102 + u8 reserved_7[768]; 103 + }; 104 + 105 + 106 + #define TLB_CONTROL_DO_NOTHING 0 107 + #define TLB_CONTROL_FLUSH_ALL_ASID 1 108 + #define TLB_CONTROL_FLUSH_ASID 3 109 + #define TLB_CONTROL_FLUSH_ASID_LOCAL 7 110 + 111 + #define V_TPR_MASK 0x0f 112 + 113 + #define V_IRQ_SHIFT 8 114 + #define V_IRQ_MASK (1 << V_IRQ_SHIFT) 115 + 116 + #define V_GIF_SHIFT 9 117 + #define V_GIF_MASK (1 << V_GIF_SHIFT) 118 + 119 + #define V_INTR_PRIO_SHIFT 16 120 + #define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT) 121 + 122 + #define V_IGN_TPR_SHIFT 20 123 + #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT) 124 + 125 + #define V_INTR_MASKING_SHIFT 24 126 + #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT) 127 + 128 + #define V_GIF_ENABLE_SHIFT 25 129 + #define V_GIF_ENABLE_MASK (1 << V_GIF_ENABLE_SHIFT) 130 + 131 + #define AVIC_ENABLE_SHIFT 31 132 + #define AVIC_ENABLE_MASK (1 << AVIC_ENABLE_SHIFT) 133 + 134 + #define LBR_CTL_ENABLE_MASK BIT_ULL(0) 135 + #define VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK BIT_ULL(1) 136 + 137 + #define SVM_INTERRUPT_SHADOW_MASK 1 138 + 139 + #define SVM_IOIO_STR_SHIFT 2 140 + #define SVM_IOIO_REP_SHIFT 3 141 + #define SVM_IOIO_SIZE_SHIFT 4 142 + #define SVM_IOIO_ASIZE_SHIFT 7 143 + 144 + #define SVM_IOIO_TYPE_MASK 1 145 + #define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT) 146 + #define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT) 147 + #define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT) 148 + #define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT) 149 + 150 + #define SVM_VM_CR_VALID_MASK 0x001fULL 151 + #define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL 152 + #define SVM_VM_CR_SVM_DIS_MASK 0x0010ULL 153 + 154 + #define SVM_NESTED_CTL_NP_ENABLE BIT(0) 155 + #define SVM_NESTED_CTL_SEV_ENABLE BIT(1) 156 + 157 + struct __attribute__ ((__packed__)) vmcb_seg { 158 + u16 selector; 159 + u16 attrib; 160 + u32 limit; 161 + u64 base; 162 + }; 163 + 164 + struct __attribute__ ((__packed__)) vmcb_save_area { 165 + struct vmcb_seg es; 166 + struct vmcb_seg cs; 167 + struct vmcb_seg ss; 168 + struct vmcb_seg ds; 169 + struct vmcb_seg fs; 170 + struct vmcb_seg gs; 171 + struct vmcb_seg gdtr; 172 + struct vmcb_seg ldtr; 173 + struct vmcb_seg idtr; 174 + struct vmcb_seg tr; 175 + u8 reserved_1[43]; 176 + u8 cpl; 177 + u8 reserved_2[4]; 178 + u64 efer; 179 + u8 reserved_3[112]; 180 + u64 cr4; 181 + u64 cr3; 182 + u64 cr0; 183 + u64 dr7; 184 + u64 dr6; 185 + u64 rflags; 186 + u64 rip; 187 + u8 reserved_4[88]; 188 + u64 rsp; 189 + u8 reserved_5[24]; 190 + u64 rax; 191 + u64 star; 192 + u64 lstar; 193 + u64 cstar; 194 + u64 sfmask; 195 + u64 kernel_gs_base; 196 + u64 sysenter_cs; 197 + u64 sysenter_esp; 198 + u64 sysenter_eip; 199 + u64 cr2; 200 + u8 reserved_6[32]; 201 + u64 g_pat; 202 + u64 dbgctl; 203 + u64 br_from; 204 + u64 br_to; 205 + u64 last_excp_from; 206 + u64 last_excp_to; 207 + }; 208 + 209 + struct __attribute__ ((__packed__)) vmcb { 210 + struct vmcb_control_area control; 211 + struct vmcb_save_area save; 212 + }; 213 + 214 + #define SVM_CPUID_FUNC 0x8000000a 215 + 216 + #define SVM_VM_CR_SVM_DISABLE 4 217 + 218 + #define SVM_SELECTOR_S_SHIFT 4 219 + #define SVM_SELECTOR_DPL_SHIFT 5 220 + #define SVM_SELECTOR_P_SHIFT 7 221 + #define SVM_SELECTOR_AVL_SHIFT 8 222 + #define SVM_SELECTOR_L_SHIFT 9 223 + #define SVM_SELECTOR_DB_SHIFT 10 224 + #define SVM_SELECTOR_G_SHIFT 11 225 + 226 + #define SVM_SELECTOR_TYPE_MASK (0xf) 227 + #define SVM_SELECTOR_S_MASK (1 << SVM_SELECTOR_S_SHIFT) 228 + #define SVM_SELECTOR_DPL_MASK (3 << SVM_SELECTOR_DPL_SHIFT) 229 + #define SVM_SELECTOR_P_MASK (1 << SVM_SELECTOR_P_SHIFT) 230 + #define SVM_SELECTOR_AVL_MASK (1 << SVM_SELECTOR_AVL_SHIFT) 231 + #define SVM_SELECTOR_L_MASK (1 << SVM_SELECTOR_L_SHIFT) 232 + #define SVM_SELECTOR_DB_MASK (1 << SVM_SELECTOR_DB_SHIFT) 233 + #define SVM_SELECTOR_G_MASK (1 << SVM_SELECTOR_G_SHIFT) 234 + 235 + #define SVM_SELECTOR_WRITE_MASK (1 << 1) 236 + #define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK 237 + #define SVM_SELECTOR_CODE_MASK (1 << 3) 238 + 239 + #define INTERCEPT_CR0_READ 0 240 + #define INTERCEPT_CR3_READ 3 241 + #define INTERCEPT_CR4_READ 4 242 + #define INTERCEPT_CR8_READ 8 243 + #define INTERCEPT_CR0_WRITE (16 + 0) 244 + #define INTERCEPT_CR3_WRITE (16 + 3) 245 + #define INTERCEPT_CR4_WRITE (16 + 4) 246 + #define INTERCEPT_CR8_WRITE (16 + 8) 247 + 248 + #define INTERCEPT_DR0_READ 0 249 + #define INTERCEPT_DR1_READ 1 250 + #define INTERCEPT_DR2_READ 2 251 + #define INTERCEPT_DR3_READ 3 252 + #define INTERCEPT_DR4_READ 4 253 + #define INTERCEPT_DR5_READ 5 254 + #define INTERCEPT_DR6_READ 6 255 + #define INTERCEPT_DR7_READ 7 256 + #define INTERCEPT_DR0_WRITE (16 + 0) 257 + #define INTERCEPT_DR1_WRITE (16 + 1) 258 + #define INTERCEPT_DR2_WRITE (16 + 2) 259 + #define INTERCEPT_DR3_WRITE (16 + 3) 260 + #define INTERCEPT_DR4_WRITE (16 + 4) 261 + #define INTERCEPT_DR5_WRITE (16 + 5) 262 + #define INTERCEPT_DR6_WRITE (16 + 6) 263 + #define INTERCEPT_DR7_WRITE (16 + 7) 264 + 265 + #define SVM_EVTINJ_VEC_MASK 0xff 266 + 267 + #define SVM_EVTINJ_TYPE_SHIFT 8 268 + #define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT) 269 + 270 + #define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT) 271 + #define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT) 272 + #define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT) 273 + #define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT) 274 + 275 + #define SVM_EVTINJ_VALID (1 << 31) 276 + #define SVM_EVTINJ_VALID_ERR (1 << 11) 277 + 278 + #define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK 279 + #define SVM_EXITINTINFO_TYPE_MASK SVM_EVTINJ_TYPE_MASK 280 + 281 + #define SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR 282 + #define SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI 283 + #define SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT 284 + #define SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT 285 + 286 + #define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID 287 + #define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR 288 + 289 + #define SVM_EXITINFOSHIFT_TS_REASON_IRET 36 290 + #define SVM_EXITINFOSHIFT_TS_REASON_JMP 38 291 + #define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44 292 + 293 + #define SVM_EXITINFO_REG_MASK 0x0F 294 + 295 + #define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP) 296 + 297 + #endif /* SELFTEST_KVM_SVM_H */
+38
tools/testing/selftests/kvm/include/x86_64/svm_util.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * tools/testing/selftests/kvm/include/x86_64/svm_utils.h 4 + * Header for nested SVM testing 5 + * 6 + * Copyright (C) 2020, Red Hat, Inc. 7 + */ 8 + 9 + #ifndef SELFTEST_KVM_SVM_UTILS_H 10 + #define SELFTEST_KVM_SVM_UTILS_H 11 + 12 + #include <stdint.h> 13 + #include "svm.h" 14 + #include "processor.h" 15 + 16 + #define CPUID_SVM_BIT 2 17 + #define CPUID_SVM BIT_ULL(CPUID_SVM_BIT) 18 + 19 + #define SVM_EXIT_VMMCALL 0x081 20 + 21 + struct svm_test_data { 22 + /* VMCB */ 23 + struct vmcb *vmcb; /* gva */ 24 + void *vmcb_hva; 25 + uint64_t vmcb_gpa; 26 + 27 + /* host state-save area */ 28 + struct vmcb_save_area *save_area; /* gva */ 29 + void *save_area_hva; 30 + uint64_t save_area_gpa; 31 + }; 32 + 33 + struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva); 34 + void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp); 35 + void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa); 36 + void nested_svm_check_supported(void); 37 + 38 + #endif /* SELFTEST_KVM_SVM_UTILS_H */
+161
tools/testing/selftests/kvm/lib/x86_64/svm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * tools/testing/selftests/kvm/lib/x86_64/svm.c 4 + * Helpers used for nested SVM testing 5 + * Largely inspired from KVM unit test svm.c 6 + * 7 + * Copyright (C) 2020, Red Hat, Inc. 8 + */ 9 + 10 + #include "test_util.h" 11 + #include "kvm_util.h" 12 + #include "../kvm_util_internal.h" 13 + #include "processor.h" 14 + #include "svm_util.h" 15 + 16 + struct gpr64_regs guest_regs; 17 + u64 rflags; 18 + 19 + /* Allocate memory regions for nested SVM tests. 20 + * 21 + * Input Args: 22 + * vm - The VM to allocate guest-virtual addresses in. 23 + * 24 + * Output Args: 25 + * p_svm_gva - The guest virtual address for the struct svm_test_data. 26 + * 27 + * Return: 28 + * Pointer to structure with the addresses of the SVM areas. 29 + */ 30 + struct svm_test_data * 31 + vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva) 32 + { 33 + vm_vaddr_t svm_gva = vm_vaddr_alloc(vm, getpagesize(), 34 + 0x10000, 0, 0); 35 + struct svm_test_data *svm = addr_gva2hva(vm, svm_gva); 36 + 37 + svm->vmcb = (void *)vm_vaddr_alloc(vm, getpagesize(), 38 + 0x10000, 0, 0); 39 + svm->vmcb_hva = addr_gva2hva(vm, (uintptr_t)svm->vmcb); 40 + svm->vmcb_gpa = addr_gva2gpa(vm, (uintptr_t)svm->vmcb); 41 + 42 + svm->save_area = (void *)vm_vaddr_alloc(vm, getpagesize(), 43 + 0x10000, 0, 0); 44 + svm->save_area_hva = addr_gva2hva(vm, (uintptr_t)svm->save_area); 45 + svm->save_area_gpa = addr_gva2gpa(vm, (uintptr_t)svm->save_area); 46 + 47 + *p_svm_gva = svm_gva; 48 + return svm; 49 + } 50 + 51 + static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector, 52 + u64 base, u32 limit, u32 attr) 53 + { 54 + seg->selector = selector; 55 + seg->attrib = attr; 56 + seg->limit = limit; 57 + seg->base = base; 58 + } 59 + 60 + void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp) 61 + { 62 + struct vmcb *vmcb = svm->vmcb; 63 + uint64_t vmcb_gpa = svm->vmcb_gpa; 64 + struct vmcb_save_area *save = &vmcb->save; 65 + struct vmcb_control_area *ctrl = &vmcb->control; 66 + u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK 67 + | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK; 68 + u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK 69 + | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK; 70 + uint64_t efer; 71 + 72 + efer = rdmsr(MSR_EFER); 73 + wrmsr(MSR_EFER, efer | EFER_SVME); 74 + wrmsr(MSR_VM_HSAVE_PA, svm->save_area_gpa); 75 + 76 + memset(vmcb, 0, sizeof(*vmcb)); 77 + asm volatile ("vmsave\n\t" : : "a" (vmcb_gpa) : "memory"); 78 + vmcb_set_seg(&save->es, get_es(), 0, -1U, data_seg_attr); 79 + vmcb_set_seg(&save->cs, get_cs(), 0, -1U, code_seg_attr); 80 + vmcb_set_seg(&save->ss, get_ss(), 0, -1U, data_seg_attr); 81 + vmcb_set_seg(&save->ds, get_ds(), 0, -1U, data_seg_attr); 82 + vmcb_set_seg(&save->gdtr, 0, get_gdt().address, get_gdt().size, 0); 83 + vmcb_set_seg(&save->idtr, 0, get_idt().address, get_idt().size, 0); 84 + 85 + ctrl->asid = 1; 86 + save->cpl = 0; 87 + save->efer = rdmsr(MSR_EFER); 88 + asm volatile ("mov %%cr4, %0" : "=r"(save->cr4) : : "memory"); 89 + asm volatile ("mov %%cr3, %0" : "=r"(save->cr3) : : "memory"); 90 + asm volatile ("mov %%cr0, %0" : "=r"(save->cr0) : : "memory"); 91 + asm volatile ("mov %%dr7, %0" : "=r"(save->dr7) : : "memory"); 92 + asm volatile ("mov %%dr6, %0" : "=r"(save->dr6) : : "memory"); 93 + asm volatile ("mov %%cr2, %0" : "=r"(save->cr2) : : "memory"); 94 + save->g_pat = rdmsr(MSR_IA32_CR_PAT); 95 + save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); 96 + ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | 97 + (1ULL << INTERCEPT_VMMCALL); 98 + 99 + vmcb->save.rip = (u64)guest_rip; 100 + vmcb->save.rsp = (u64)guest_rsp; 101 + guest_regs.rdi = (u64)svm; 102 + } 103 + 104 + /* 105 + * save/restore 64-bit general registers except rax, rip, rsp 106 + * which are directly handed through the VMCB guest processor state 107 + */ 108 + #define SAVE_GPR_C \ 109 + "xchg %%rbx, guest_regs+0x20\n\t" \ 110 + "xchg %%rcx, guest_regs+0x10\n\t" \ 111 + "xchg %%rdx, guest_regs+0x18\n\t" \ 112 + "xchg %%rbp, guest_regs+0x30\n\t" \ 113 + "xchg %%rsi, guest_regs+0x38\n\t" \ 114 + "xchg %%rdi, guest_regs+0x40\n\t" \ 115 + "xchg %%r8, guest_regs+0x48\n\t" \ 116 + "xchg %%r9, guest_regs+0x50\n\t" \ 117 + "xchg %%r10, guest_regs+0x58\n\t" \ 118 + "xchg %%r11, guest_regs+0x60\n\t" \ 119 + "xchg %%r12, guest_regs+0x68\n\t" \ 120 + "xchg %%r13, guest_regs+0x70\n\t" \ 121 + "xchg %%r14, guest_regs+0x78\n\t" \ 122 + "xchg %%r15, guest_regs+0x80\n\t" 123 + 124 + #define LOAD_GPR_C SAVE_GPR_C 125 + 126 + /* 127 + * selftests do not use interrupts so we dropped clgi/sti/cli/stgi 128 + * for now. registers involved in LOAD/SAVE_GPR_C are eventually 129 + * unmodified so they do not need to be in the clobber list. 130 + */ 131 + void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa) 132 + { 133 + asm volatile ( 134 + "vmload\n\t" 135 + "mov rflags, %%r15\n\t" // rflags 136 + "mov %%r15, 0x170(%[vmcb])\n\t" 137 + "mov guest_regs, %%r15\n\t" // rax 138 + "mov %%r15, 0x1f8(%[vmcb])\n\t" 139 + LOAD_GPR_C 140 + "vmrun\n\t" 141 + SAVE_GPR_C 142 + "mov 0x170(%[vmcb]), %%r15\n\t" // rflags 143 + "mov %%r15, rflags\n\t" 144 + "mov 0x1f8(%[vmcb]), %%r15\n\t" // rax 145 + "mov %%r15, guest_regs\n\t" 146 + "vmsave\n\t" 147 + : : [vmcb] "r" (vmcb), [vmcb_gpa] "a" (vmcb_gpa) 148 + : "r15", "memory"); 149 + } 150 + 151 + void nested_svm_check_supported(void) 152 + { 153 + struct kvm_cpuid_entry2 *entry = 154 + kvm_get_supported_cpuid_entry(0x80000001); 155 + 156 + if (!(entry->ecx & CPUID_SVM)) { 157 + fprintf(stderr, "nested SVM not enabled, skipping test\n"); 158 + exit(KSFT_SKIP); 159 + } 160 + } 161 +