Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: selftests: Extend vmx_close_while_nested_test to cover SVM

Add SVM L1 code to run the nested guest, and allow the test to run with
SVM as well as VMX.

Reviewed-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Link: https://patch.msgid.link/20251021074736.1324328-4-yosry.ahmed@linux.dev
[sean: rename to "nested_close_kvm_test" to provide nested_* sorting]
Signed-off-by: Sean Christopherson <seanjc@google.com>

authored by

Yosry Ahmed and committed by
Sean Christopherson
0a9eb2af 9e4ce7a8

+34 -10
+1 -1
tools/testing/selftests/kvm/Makefile.kvm
··· 88 88 TEST_GEN_PROGS_x86 += x86/kvm_buslock_test 89 89 TEST_GEN_PROGS_x86 += x86/monitor_mwait_test 90 90 TEST_GEN_PROGS_x86 += x86/msrs_test 91 + TEST_GEN_PROGS_x86 += x86/nested_close_kvm_test 91 92 TEST_GEN_PROGS_x86 += x86/nested_emulation_test 92 93 TEST_GEN_PROGS_x86 += x86/nested_exceptions_test 93 94 TEST_GEN_PROGS_x86 += x86/platform_info_test ··· 112 111 TEST_GEN_PROGS_x86 += x86/userspace_io_test 113 112 TEST_GEN_PROGS_x86 += x86/userspace_msr_exit_test 114 113 TEST_GEN_PROGS_x86 += x86/vmx_apic_access_test 115 - TEST_GEN_PROGS_x86 += x86/vmx_close_while_nested_test 116 114 TEST_GEN_PROGS_x86 += x86/vmx_dirty_log_test 117 115 TEST_GEN_PROGS_x86 += x86/vmx_exception_with_invalid_guest_state 118 116 TEST_GEN_PROGS_x86 += x86/vmx_msrs_test
+33 -9
tools/testing/selftests/kvm/x86/vmx_close_while_nested_test.c tools/testing/selftests/kvm/x86/nested_close_kvm_test.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * vmx_close_while_nested 4 - * 5 3 * Copyright (C) 2019, Red Hat, Inc. 6 4 * 7 5 * Verify that nothing bad happens if a KVM user exits with open ··· 10 12 #include "kvm_util.h" 11 13 #include "processor.h" 12 14 #include "vmx.h" 15 + #include "svm_util.h" 13 16 14 17 #include <string.h> 15 18 #include <sys/ioctl.h> ··· 21 22 PORT_L0_EXIT = 0x2000, 22 23 }; 23 24 25 + #define L2_GUEST_STACK_SIZE 64 26 + 24 27 static void l2_guest_code(void) 25 28 { 26 29 /* Exit to L0 */ ··· 30 29 : : [port] "d" (PORT_L0_EXIT) : "rax"); 31 30 } 32 31 33 - static void l1_guest_code(struct vmx_pages *vmx_pages) 32 + static void l1_vmx_code(struct vmx_pages *vmx_pages) 34 33 { 35 - #define L2_GUEST_STACK_SIZE 64 36 34 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 37 35 38 36 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); ··· 45 45 GUEST_ASSERT(0); 46 46 } 47 47 48 + static void l1_svm_code(struct svm_test_data *svm) 49 + { 50 + unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 51 + 52 + /* Prepare the VMCB for L2 execution. */ 53 + generic_svm_setup(svm, l2_guest_code, 54 + &l2_guest_stack[L2_GUEST_STACK_SIZE]); 55 + 56 + run_guest(svm->vmcb, svm->vmcb_gpa); 57 + GUEST_ASSERT(0); 58 + } 59 + 60 + static void l1_guest_code(void *data) 61 + { 62 + if (this_cpu_has(X86_FEATURE_VMX)) 63 + l1_vmx_code(data); 64 + else 65 + l1_svm_code(data); 66 + } 67 + 48 68 int main(int argc, char *argv[]) 49 69 { 50 - vm_vaddr_t vmx_pages_gva; 70 + vm_vaddr_t guest_gva; 51 71 struct kvm_vcpu *vcpu; 52 72 struct kvm_vm *vm; 53 73 54 - TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX)); 74 + TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX) || 75 + kvm_cpu_has(X86_FEATURE_SVM)); 55 76 56 77 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); 57 78 58 - /* Allocate VMX pages and shared descriptors (vmx_pages). */ 59 - vcpu_alloc_vmx(vm, &vmx_pages_gva); 60 - vcpu_args_set(vcpu, 1, vmx_pages_gva); 79 + if (kvm_cpu_has(X86_FEATURE_VMX)) 80 + vcpu_alloc_vmx(vm, &guest_gva); 81 + else 82 + vcpu_alloc_svm(vm, &guest_gva); 83 + 84 + vcpu_args_set(vcpu, 1, guest_gva); 61 85 62 86 for (;;) { 63 87 volatile struct kvm_run *run = vcpu->run;