Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Paolo Bonzini:
"Bugfixes and strengthening the validity checks on inputs from new
userspace APIs.

Now I know why I shouldn't prepare pull requests on the weekend, it's
hard to concentrate if your son is shouting about his latest Minecraft
builds in your ear. Fortunately all the patches were ready and I just
had to check the test results..."

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: SVM: Fix disable pause loop exit/pause filtering capability on SVM
KVM: LAPIC: Prevent setting the tscdeadline timer if the lapic is hw disabled
KVM: arm64: Don't inherit exec permission across page-table levels
KVM: arm64: Prevent vcpu_has_ptrauth from generating OOL functions
KVM: nVMX: check for invalid hdr.vmx.flags
KVM: nVMX: check for required but missing VMCS12 in KVM_SET_NESTED_STATE
selftests: kvm: do not set guest mode flag

Changed files
+72 -24
arch
arm64
include
kvm
x86
tools
testing
selftests
+8 -3
arch/arm64/include/asm/kvm_host.h
··· 380 380 #define vcpu_has_sve(vcpu) (system_supports_sve() && \ 381 381 ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) 382 382 383 - #define vcpu_has_ptrauth(vcpu) ((system_supports_address_auth() || \ 384 - system_supports_generic_auth()) && \ 385 - ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)) 383 + #ifdef CONFIG_ARM64_PTR_AUTH 384 + #define vcpu_has_ptrauth(vcpu) \ 385 + ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \ 386 + cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \ 387 + (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH) 388 + #else 389 + #define vcpu_has_ptrauth(vcpu) false 390 + #endif 386 391 387 392 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) 388 393
+6 -5
arch/arm64/kvm/mmu.c
··· 1326 1326 return true; 1327 1327 } 1328 1328 1329 - static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr) 1329 + static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr, unsigned long sz) 1330 1330 { 1331 1331 pud_t *pudp; 1332 1332 pmd_t *pmdp; ··· 1338 1338 return false; 1339 1339 1340 1340 if (pudp) 1341 - return kvm_s2pud_exec(pudp); 1341 + return sz <= PUD_SIZE && kvm_s2pud_exec(pudp); 1342 1342 else if (pmdp) 1343 - return kvm_s2pmd_exec(pmdp); 1343 + return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp); 1344 1344 else 1345 - return kvm_s2pte_exec(ptep); 1345 + return sz == PAGE_SIZE && kvm_s2pte_exec(ptep); 1346 1346 } 1347 1347 1348 1348 static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, ··· 1958 1958 * execute permissions, and we preserve whatever we have. 1959 1959 */ 1960 1960 needs_exec = exec_fault || 1961 - (fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa)); 1961 + (fault_status == FSC_PERM && 1962 + stage2_is_exec(kvm, fault_ipa, vma_pagesize)); 1962 1963 1963 1964 if (vma_pagesize == PUD_SIZE) { 1964 1965 pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
+1 -1
arch/x86/kvm/lapic.c
··· 2195 2195 { 2196 2196 struct kvm_lapic *apic = vcpu->arch.apic; 2197 2197 2198 - if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) || 2198 + if (!kvm_apic_present(vcpu) || apic_lvtt_oneshot(apic) || 2199 2199 apic_lvtt_period(apic)) 2200 2200 return; 2201 2201
+6 -3
arch/x86/kvm/svm/svm.c
··· 1090 1090 svm->nested.vmcb = 0; 1091 1091 svm->vcpu.arch.hflags = 0; 1092 1092 1093 - if (pause_filter_count) { 1093 + if (!kvm_pause_in_guest(svm->vcpu.kvm)) { 1094 1094 control->pause_filter_count = pause_filter_count; 1095 1095 if (pause_filter_thresh) 1096 1096 control->pause_filter_thresh = pause_filter_thresh; ··· 2693 2693 struct kvm_vcpu *vcpu = &svm->vcpu; 2694 2694 bool in_kernel = (svm_get_cpl(vcpu) == 0); 2695 2695 2696 - if (pause_filter_thresh) 2696 + if (!kvm_pause_in_guest(vcpu->kvm)) 2697 2697 grow_ple_window(vcpu); 2698 2698 2699 2699 kvm_vcpu_on_spin(vcpu, in_kernel); ··· 3780 3780 3781 3781 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) 3782 3782 { 3783 - if (pause_filter_thresh) 3783 + if (!kvm_pause_in_guest(vcpu->kvm)) 3784 3784 shrink_ple_window(vcpu); 3785 3785 } 3786 3786 ··· 3958 3958 3959 3959 static int svm_vm_init(struct kvm *kvm) 3960 3960 { 3961 + if (!pause_filter_count || !pause_filter_thresh) 3962 + kvm->arch.pause_in_guest = true; 3963 + 3961 3964 if (avic) { 3962 3965 int ret = avic_vm_init(kvm); 3963 3966 if (ret)
+13 -3
arch/x86/kvm/vmx/nested.c
··· 6079 6079 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) 6080 6080 return -EINVAL; 6081 6081 6082 + if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) 6083 + return -EINVAL; 6084 + 6082 6085 /* 6083 6086 * SMM temporarily disables VMX, so we cannot be in guest mode, 6084 6087 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags ··· 6111 6108 if (ret) 6112 6109 return ret; 6113 6110 6114 - /* Empty 'VMXON' state is permitted */ 6115 - if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) 6116 - return 0; 6111 + /* Empty 'VMXON' state is permitted if no VMCS loaded */ 6112 + if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) { 6113 + /* See vmx_has_valid_vmcs12. */ 6114 + if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) || 6115 + (kvm_state->flags & KVM_STATE_NESTED_EVMCS) || 6116 + (kvm_state->hdr.vmx.vmcs12_pa != -1ull)) 6117 + return -EINVAL; 6118 + else 6119 + return 0; 6120 + } 6117 6121 6118 6122 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) { 6119 6123 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
+5
arch/x86/kvm/vmx/nested.h
··· 47 47 return to_vmx(vcpu)->nested.cached_shadow_vmcs12; 48 48 } 49 49 50 + /* 51 + * Note: the same condition is checked against the state provided by userspace 52 + * in vmx_set_nested_state; if it is satisfied, the nested state must include 53 + * the VMCS12. 54 + */ 50 55 static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu) 51 56 { 52 57 struct vcpu_vmx *vmx = to_vmx(vcpu);
+33 -9
tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
··· 76 76 void set_default_vmx_state(struct kvm_nested_state *state, int size) 77 77 { 78 78 memset(state, 0, size); 79 - state->flags = KVM_STATE_NESTED_GUEST_MODE | 80 - KVM_STATE_NESTED_RUN_PENDING; 81 79 if (have_evmcs) 82 - state->flags |= KVM_STATE_NESTED_EVMCS; 80 + state->flags = KVM_STATE_NESTED_EVMCS; 83 81 state->format = 0; 84 82 state->size = size; 85 83 state->hdr.vmx.vmxon_pa = 0x1000; ··· 146 148 state->hdr.vmx.smm.flags = 1; 147 149 test_nested_state_expect_einval(vm, state); 148 150 151 + /* Invalid flags are rejected. */ 152 + set_default_vmx_state(state, state_sz); 153 + state->hdr.vmx.flags = ~0; 154 + test_nested_state_expect_einval(vm, state); 155 + 149 156 /* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */ 150 157 set_default_vmx_state(state, state_sz); 151 158 state->hdr.vmx.vmxon_pa = -1ull; ··· 188 185 state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE; 189 186 test_nested_state_expect_einval(vm, state); 190 187 191 - /* Size must be large enough to fit kvm_nested_state and vmcs12. */ 188 + /* 189 + * Size must be large enough to fit kvm_nested_state and vmcs12 190 + * if VMCS12 physical address is set 191 + */ 192 192 set_default_vmx_state(state, state_sz); 193 193 state->size = sizeof(*state); 194 + state->flags = 0; 195 + test_nested_state_expect_einval(vm, state); 196 + 197 + set_default_vmx_state(state, state_sz); 198 + state->size = sizeof(*state); 199 + state->flags = 0; 200 + state->hdr.vmx.vmcs12_pa = -1; 194 201 test_nested_state(vm, state); 202 + 203 + /* 204 + * KVM_SET_NESTED_STATE succeeds with invalid VMCS 205 + * contents but L2 not running. 206 + */ 207 + set_default_vmx_state(state, state_sz); 208 + state->flags = 0; 209 + test_nested_state(vm, state); 210 + 211 + /* Invalid flags are rejected, even if no VMCS loaded. */ 212 + set_default_vmx_state(state, state_sz); 213 + state->size = sizeof(*state); 214 + state->flags = 0; 215 + state->hdr.vmx.vmcs12_pa = -1; 216 + state->hdr.vmx.flags = ~0; 217 + test_nested_state_expect_einval(vm, state); 195 218 196 219 /* vmxon_pa cannot be the same address as vmcs_pa. */ 197 220 set_default_vmx_state(state, state_sz); 198 221 state->hdr.vmx.vmxon_pa = 0; 199 222 state->hdr.vmx.vmcs12_pa = 0; 200 - test_nested_state_expect_einval(vm, state); 201 - 202 - /* The revision id for vmcs12 must be VMCS12_REVISION. */ 203 - set_default_vmx_state(state, state_sz); 204 - set_revision_id_for_vmcs12(state, 0); 205 223 test_nested_state_expect_einval(vm, state); 206 224 207 225 /*