Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: x86: hyper-v: Make Hyper-V emulation enablement conditional

Hyper-V emulation is enabled in KVM unconditionally. This is bad at least
from security standpoint as it is an extra attack surface. Ideally, there
should be a per-VM capability explicitly enabled by VMM but currently it
is not the case and we can't mandate one without breaking backwards
compatibility. We can, however, check guest visible CPUIDs and only enable
Hyper-V emulation when "Hv#1" interface was exposed in
HYPERV_CPUID_INTERFACE.

Note, VMMs are free to act in any sequence they like, e.g. they can try
to set MSRs first and CPUIDs later so we still need to allow the host
to read/write Hyper-V specific MSRs unconditionally.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20210126134816.1880136-14-vkuznets@redhat.com>
[Add selftest vcpu_set_hv_cpuid API to avoid breaking xen_vmcall_test. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Vitaly Kuznetsov and committed by
Paolo Bonzini
8f014550 4592b7ea

+69 -46
+1
arch/x86/include/asm/kvm_host.h
··· 736 736 /* used for guest single stepping over the given code position */ 737 737 unsigned long singlestep_rip; 738 738 739 + bool hyperv_enabled; 739 740 struct kvm_vcpu_hv *hyperv; 740 741 struct kvm_vcpu_xen xen; 741 742
+2
arch/x86/kvm/cpuid.c
··· 179 179 vcpu->arch.cr4_guest_rsvd_bits = 180 180 __cr4_reserved_bits(guest_cpuid_has, vcpu); 181 181 182 + kvm_hv_set_cpuid(vcpu); 183 + 182 184 /* Invoke the vendor callback only after the above state is updated. */ 183 185 static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu); 184 186
+23 -4
arch/x86/kvm/hyperv.c
··· 37 37 #include "trace.h" 38 38 #include "irq.h" 39 39 40 + /* "Hv#1" signature */ 41 + #define HYPERV_CPUID_SIGNATURE_EAX 0x31237648 42 + 40 43 #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64) 41 44 42 45 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer, ··· 1476 1473 { 1477 1474 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); 1478 1475 1476 + if (!host && !vcpu->arch.hyperv_enabled) 1477 + return 1; 1478 + 1479 1479 if (kvm_hv_msr_partition_wide(msr)) { 1480 1480 int r; 1481 1481 ··· 1493 1487 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) 1494 1488 { 1495 1489 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); 1490 + 1491 + if (!host && !vcpu->arch.hyperv_enabled) 1492 + return 1; 1496 1493 1497 1494 if (kvm_hv_msr_partition_wide(msr)) { 1498 1495 int r; ··· 1710 1701 return HV_STATUS_SUCCESS; 1711 1702 } 1712 1703 1713 - bool kvm_hv_hypercall_enabled(struct kvm *kvm) 1704 + void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu) 1714 1705 { 1715 - return to_kvm_hv(kvm)->hv_guest_os_id != 0; 1706 + struct kvm_cpuid_entry2 *entry; 1707 + 1708 + entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0); 1709 + if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) 1710 + vcpu->arch.hyperv_enabled = true; 1711 + else 1712 + vcpu->arch.hyperv_enabled = false; 1713 + } 1714 + 1715 + bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu) 1716 + { 1717 + return vcpu->arch.hyperv_enabled && to_kvm_hv(vcpu->kvm)->hv_guest_os_id; 1716 1718 } 1717 1719 1718 1720 static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) ··· 2056 2036 break; 2057 2037 2058 2038 case HYPERV_CPUID_INTERFACE: 2059 - memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12); 2060 - ent->eax = signature[0]; 2039 + ent->eax = HYPERV_CPUID_SIGNATURE_EAX; 2061 2040 break; 2062 2041 2063 2042 case HYPERV_CPUID_VERSION:
+2 -1
arch/x86/kvm/hyperv.h
··· 92 92 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host); 93 93 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host); 94 94 95 - bool kvm_hv_hypercall_enabled(struct kvm *kvm); 95 + bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu); 96 96 int kvm_hv_hypercall(struct kvm_vcpu *vcpu); 97 97 98 98 void kvm_hv_irq_routing_update(struct kvm *kvm); ··· 141 141 142 142 void kvm_hv_init_vm(struct kvm *kvm); 143 143 void kvm_hv_destroy_vm(struct kvm *kvm); 144 + void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu); 144 145 int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args); 145 146 int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, 146 147 struct kvm_cpuid_entry2 __user *entries);
+1 -1
arch/x86/kvm/x86.c
··· 8162 8162 if (kvm_xen_hypercall_enabled(vcpu->kvm)) 8163 8163 return kvm_xen_hypercall(vcpu); 8164 8164 8165 - if (kvm_hv_hypercall_enabled(vcpu->kvm)) 8165 + if (kvm_hv_hypercall_enabled(vcpu)) 8166 8166 return kvm_hv_hypercall(vcpu); 8167 8167 8168 8168 nr = kvm_rax_read(vcpu);
+1 -1
arch/x86/kvm/xen.c
··· 369 369 370 370 /* Hyper-V hypercalls get bit 31 set in EAX */ 371 371 if ((input & 0x80000000) && 372 - kvm_hv_hypercall_enabled(vcpu->kvm)) 372 + kvm_hv_hypercall_enabled(vcpu)) 373 373 return kvm_hv_hypercall(vcpu); 374 374 375 375 longmode = is_64_bit_mode(vcpu);
+1
tools/testing/selftests/kvm/include/x86_64/processor.h
··· 407 407 uint64_t a3); 408 408 409 409 struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void); 410 + void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid); 410 411 struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid); 411 412 412 413 /*
+35
tools/testing/selftests/kvm/lib/x86_64/processor.c
··· 1323 1323 return cpuid; 1324 1324 } 1325 1325 1326 + void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid) 1327 + { 1328 + static struct kvm_cpuid2 *cpuid_full; 1329 + struct kvm_cpuid2 *cpuid_sys, *cpuid_hv; 1330 + int i, nent = 0; 1331 + 1332 + if (!cpuid_full) { 1333 + cpuid_sys = kvm_get_supported_cpuid(); 1334 + cpuid_hv = kvm_get_supported_hv_cpuid(); 1335 + 1336 + cpuid_full = malloc(sizeof(*cpuid_full) + 1337 + (cpuid_sys->nent + cpuid_hv->nent) * 1338 + sizeof(struct kvm_cpuid_entry2)); 1339 + if (!cpuid_full) { 1340 + perror("malloc"); 1341 + abort(); 1342 + } 1343 + 1344 + /* Need to skip KVM CPUID leaves 0x400000xx */ 1345 + for (i = 0; i < cpuid_sys->nent; i++) { 1346 + if (cpuid_sys->entries[i].function >= 0x40000000 && 1347 + cpuid_sys->entries[i].function < 0x40000100) 1348 + continue; 1349 + cpuid_full->entries[nent] = cpuid_sys->entries[i]; 1350 + nent++; 1351 + } 1352 + 1353 + memcpy(&cpuid_full->entries[nent], cpuid_hv->entries, 1354 + cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2)); 1355 + cpuid_full->nent = nent + cpuid_hv->nent; 1356 + } 1357 + 1358 + vcpu_set_cpuid(vm, vcpuid, cpuid_full); 1359 + } 1360 + 1326 1361 struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid) 1327 1362 { 1328 1363 static struct kvm_cpuid2 *cpuid;
+2 -38
tools/testing/selftests/kvm/x86_64/evmcs_test.c
··· 78 78 GUEST_ASSERT(vmlaunch()); 79 79 } 80 80 81 - struct kvm_cpuid2 *guest_get_cpuid(void) 82 - { 83 - static struct kvm_cpuid2 *cpuid_full; 84 - struct kvm_cpuid2 *cpuid_sys, *cpuid_hv; 85 - int i, nent = 0; 86 - 87 - if (cpuid_full) 88 - return cpuid_full; 89 - 90 - cpuid_sys = kvm_get_supported_cpuid(); 91 - cpuid_hv = kvm_get_supported_hv_cpuid(); 92 - 93 - cpuid_full = malloc(sizeof(*cpuid_full) + 94 - (cpuid_sys->nent + cpuid_hv->nent) * 95 - sizeof(struct kvm_cpuid_entry2)); 96 - if (!cpuid_full) { 97 - perror("malloc"); 98 - abort(); 99 - } 100 - 101 - /* Need to skip KVM CPUID leaves 0x400000xx */ 102 - for (i = 0; i < cpuid_sys->nent; i++) { 103 - if (cpuid_sys->entries[i].function >= 0x40000000 && 104 - cpuid_sys->entries[i].function < 0x40000100) 105 - continue; 106 - cpuid_full->entries[nent] = cpuid_sys->entries[i]; 107 - nent++; 108 - } 109 - 110 - memcpy(&cpuid_full->entries[nent], cpuid_hv->entries, 111 - cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2)); 112 - cpuid_full->nent = nent + cpuid_hv->nent; 113 - 114 - return cpuid_full; 115 - } 116 - 117 81 int main(int argc, char *argv[]) 118 82 { 119 83 vm_vaddr_t vmx_pages_gva = 0; ··· 99 135 exit(KSFT_SKIP); 100 136 } 101 137 102 - vcpu_set_cpuid(vm, VCPU_ID, guest_get_cpuid()); 138 + vcpu_set_hv_cpuid(vm, VCPU_ID); 103 139 vcpu_enable_evmcs(vm, VCPU_ID); 104 140 105 141 run = vcpu_state(vm, VCPU_ID); ··· 143 179 /* Restore state in a new VM. */ 144 180 kvm_vm_restart(vm, O_RDWR); 145 181 vm_vcpu_add(vm, VCPU_ID); 146 - vcpu_set_cpuid(vm, VCPU_ID, guest_get_cpuid()); 182 + vcpu_set_hv_cpuid(vm, VCPU_ID); 147 183 vcpu_enable_evmcs(vm, VCPU_ID); 148 184 vcpu_load_state(vm, VCPU_ID, state); 149 185 run = vcpu_state(vm, VCPU_ID);
+1 -1
tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
··· 92 92 } 93 93 94 94 vm = vm_create_default(VCPU_ID, 0, (void *) guest_code); 95 - vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); 95 + vcpu_set_hv_cpuid(vm, VCPU_ID); 96 96 97 97 struct kvm_xen_hvm_config hvmc = { 98 98 .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL,