Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
"ARM64:

- Fix pKVM error path on init, making sure we do not change critical
system registers as we're about to fail

- Make sure that the host's vector length is at capped by a value
common to all CPUs

- Fix kvm_has_feat*() handling of "negative" features, as the current
code is pretty broken

- Promote Joey to the status of official reviewer, while James steps
down -- hopefully only temporarly

x86:

- Fix compilation with KVM_INTEL=KVM_AMD=n

- Fix disabling KVM_X86_QUIRK_SLOT_ZAP_ALL when shadow MMU is in use

Selftests:

- Fix compilation on non-x86 architectures"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
x86/reboot: emergency callbacks are now registered by common KVM code
KVM: x86: leave kvm.ko out of the build if no vendor module is requested
KVM: x86/mmu: fix KVM_X86_QUIRK_SLOT_ZAP_ALL for shadow MMU
KVM: arm64: Fix kvm_has_feat*() handling of negative features
KVM: selftests: Fix build on architectures other than x86_64
KVM: arm64: Another reviewer reshuffle
KVM: arm64: Constrain the host to the maximum shared SVE VL with pKVM
KVM: arm64: Fix __pkvm_init_vcpu cptr_el2 error path

Changed files
+91 -43
arch
arm64
include
kvm
hyp
include
hyp
nvhe
x86
include
asm
kernel
kvm
tools
+1 -1
MAINTAINERS
··· 12463 12463 KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64) 12464 12464 M: Marc Zyngier <maz@kernel.org> 12465 12465 M: Oliver Upton <oliver.upton@linux.dev> 12466 - R: James Morse <james.morse@arm.com> 12466 + R: Joey Gouly <joey.gouly@arm.com> 12467 12467 R: Suzuki K Poulose <suzuki.poulose@arm.com> 12468 12468 R: Zenghui Yu <yuzenghui@huawei.com> 12469 12469 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+13 -12
arch/arm64/include/asm/kvm_host.h
··· 1441 1441 sign_extend64(__val, id##_##fld##_WIDTH - 1); \ 1442 1442 }) 1443 1443 1444 - #define expand_field_sign(id, fld, val) \ 1445 - (id##_##fld##_SIGNED ? \ 1446 - __expand_field_sign_signed(id, fld, val) : \ 1447 - __expand_field_sign_unsigned(id, fld, val)) 1448 - 1449 1444 #define get_idreg_field_unsigned(kvm, id, fld) \ 1450 1445 ({ \ 1451 1446 u64 __val = kvm_read_vm_id_reg((kvm), SYS_##id); \ ··· 1456 1461 #define get_idreg_field_enum(kvm, id, fld) \ 1457 1462 get_idreg_field_unsigned(kvm, id, fld) 1458 1463 1459 - #define get_idreg_field(kvm, id, fld) \ 1464 + #define kvm_cmp_feat_signed(kvm, id, fld, op, limit) \ 1465 + (get_idreg_field_signed((kvm), id, fld) op __expand_field_sign_signed(id, fld, limit)) 1466 + 1467 + #define kvm_cmp_feat_unsigned(kvm, id, fld, op, limit) \ 1468 + (get_idreg_field_unsigned((kvm), id, fld) op __expand_field_sign_unsigned(id, fld, limit)) 1469 + 1470 + #define kvm_cmp_feat(kvm, id, fld, op, limit) \ 1460 1471 (id##_##fld##_SIGNED ? \ 1461 - get_idreg_field_signed(kvm, id, fld) : \ 1462 - get_idreg_field_unsigned(kvm, id, fld)) 1472 + kvm_cmp_feat_signed(kvm, id, fld, op, limit) : \ 1473 + kvm_cmp_feat_unsigned(kvm, id, fld, op, limit)) 1463 1474 1464 1475 #define kvm_has_feat(kvm, id, fld, limit) \ 1465 - (get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, limit)) 1476 + kvm_cmp_feat(kvm, id, fld, >=, limit) 1466 1477 1467 1478 #define kvm_has_feat_enum(kvm, id, fld, val) \ 1468 - (get_idreg_field_unsigned((kvm), id, fld) == __expand_field_sign_unsigned(id, fld, val)) 1479 + kvm_cmp_feat_unsigned(kvm, id, fld, ==, val) 1469 1480 1470 1481 #define kvm_has_feat_range(kvm, id, fld, min, max) \ 1471 - (get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, min) && \ 1472 - get_idreg_field((kvm), id, fld) <= expand_field_sign(id, fld, max)) 1482 + (kvm_cmp_feat(kvm, id, fld, >=, min) && \ 1483 + kvm_cmp_feat(kvm, id, fld, <=, max)) 1473 1484 1474 1485 /* Check for a given level of PAuth support */ 1475 1486 #define kvm_has_pauth(k, l) \
+1 -1
arch/arm64/kvm/hyp/include/hyp/switch.h
··· 338 338 struct cpu_sve_state *sve_state = *host_data_ptr(sve_state); 339 339 340 340 sve_state->zcr_el1 = read_sysreg_el1(SYS_ZCR); 341 - write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2); 341 + write_sysreg_s(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, SYS_ZCR_EL2); 342 342 __sve_save_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl), 343 343 &sve_state->fpsr, 344 344 true);
+7 -5
arch/arm64/kvm/hyp/nvhe/hyp-main.c
··· 33 33 */ 34 34 sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2); 35 35 __sve_save_state(vcpu_sve_pffr(vcpu), &vcpu->arch.ctxt.fp_regs.fpsr, true); 36 - write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2); 36 + write_sysreg_s(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, SYS_ZCR_EL2); 37 37 } 38 38 39 39 static void __hyp_sve_restore_host(void) ··· 45 45 * the host. The layout of the data when saving the sve state depends 46 46 * on the VL, so use a consistent (i.e., the maximum) host VL. 47 47 * 48 - * Setting ZCR_EL2 to ZCR_ELx_LEN_MASK sets the effective length 49 - * supported by the system (or limited at EL3). 48 + * Note that this constrains the PE to the maximum shared VL 49 + * that was discovered, if we wish to use larger VLs this will 50 + * need to be revisited. 50 51 */ 51 - write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2); 52 + write_sysreg_s(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, SYS_ZCR_EL2); 52 53 __sve_restore_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl), 53 54 &sve_state->fpsr, 54 55 true); ··· 489 488 case ESR_ELx_EC_SVE: 490 489 cpacr_clear_set(0, CPACR_ELx_ZEN); 491 490 isb(); 492 - sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2); 491 + sve_cond_update_zcr_vq(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, 492 + SYS_ZCR_EL2); 493 493 break; 494 494 case ESR_ELx_EC_IABT_LOW: 495 495 case ESR_ELx_EC_DABT_LOW:
+4 -2
arch/arm64/kvm/hyp/nvhe/pkvm.c
··· 574 574 unlock: 575 575 hyp_spin_unlock(&vm_table_lock); 576 576 577 - if (ret) 577 + if (ret) { 578 578 unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu)); 579 + return ret; 580 + } 579 581 580 582 hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu); 581 583 582 - return ret; 584 + return 0; 583 585 } 584 586 585 587 static void
+2 -2
arch/x86/include/asm/reboot.h
··· 26 26 #define MRR_APM 1 27 27 28 28 typedef void (cpu_emergency_virt_cb)(void); 29 - #if IS_ENABLED(CONFIG_KVM_INTEL) || IS_ENABLED(CONFIG_KVM_AMD) 29 + #if IS_ENABLED(CONFIG_KVM_X86) 30 30 void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback); 31 31 void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback); 32 32 void cpu_emergency_disable_virtualization(void); ··· 34 34 static inline void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback) {} 35 35 static inline void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback) {} 36 36 static inline void cpu_emergency_disable_virtualization(void) {} 37 - #endif /* CONFIG_KVM_INTEL || CONFIG_KVM_AMD */ 37 + #endif /* CONFIG_KVM_X86 */ 38 38 39 39 typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); 40 40 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
+2 -2
arch/x86/kernel/reboot.c
··· 530 530 531 531 static inline void nmi_shootdown_cpus_on_restart(void); 532 532 533 - #if IS_ENABLED(CONFIG_KVM_INTEL) || IS_ENABLED(CONFIG_KVM_AMD) 533 + #if IS_ENABLED(CONFIG_KVM_X86) 534 534 /* RCU-protected callback to disable virtualization prior to reboot. */ 535 535 static cpu_emergency_virt_cb __rcu *cpu_emergency_virt_callback; 536 536 ··· 600 600 } 601 601 #else 602 602 static void emergency_reboot_disable_virtualization(void) { } 603 - #endif /* CONFIG_KVM_INTEL || CONFIG_KVM_AMD */ 603 + #endif /* CONFIG_KVM_X86 */ 604 604 605 605 void __attribute__((weak)) mach_reboot_fixups(void) 606 606 {
+6 -3
arch/x86/kvm/Kconfig
··· 17 17 18 18 if VIRTUALIZATION 19 19 20 - config KVM 21 - tristate "Kernel-based Virtual Machine (KVM) support" 20 + config KVM_X86 21 + def_tristate KVM if KVM_INTEL || KVM_AMD 22 22 depends on X86_LOCAL_APIC 23 23 select KVM_COMMON 24 24 select KVM_GENERIC_MMU_NOTIFIER ··· 44 44 select HAVE_KVM_PM_NOTIFIER if PM 45 45 select KVM_GENERIC_HARDWARE_ENABLING 46 46 select KVM_GENERIC_PRE_FAULT_MEMORY 47 + select KVM_GENERIC_PRIVATE_MEM if KVM_SW_PROTECTED_VM 47 48 select KVM_WERROR if WERROR 49 + 50 + config KVM 51 + tristate "Kernel-based Virtual Machine (KVM) support" 48 52 help 49 53 Support hosting fully virtualized guest machines using hardware 50 54 virtualization extensions. You will need a fairly recent ··· 81 77 bool "Enable support for KVM software-protected VMs" 82 78 depends on EXPERT 83 79 depends on KVM && X86_64 84 - select KVM_GENERIC_PRIVATE_MEM 85 80 help 86 81 Enable support for KVM software-protected VMs. Currently, software- 87 82 protected VMs are purely a development and testing vehicle for
+1 -1
arch/x86/kvm/Makefile
··· 32 32 kvm-amd-y += svm/svm_onhyperv.o 33 33 endif 34 34 35 - obj-$(CONFIG_KVM) += kvm.o 35 + obj-$(CONFIG_KVM_X86) += kvm.o 36 36 obj-$(CONFIG_KVM_INTEL) += kvm-intel.o 37 37 obj-$(CONFIG_KVM_AMD) += kvm-amd.o 38 38
+46 -14
arch/x86/kvm/mmu/mmu.c
··· 1884 1884 if (is_obsolete_sp((_kvm), (_sp))) { \ 1885 1885 } else 1886 1886 1887 - #define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn) \ 1887 + #define for_each_gfn_valid_sp(_kvm, _sp, _gfn) \ 1888 1888 for_each_valid_sp(_kvm, _sp, \ 1889 1889 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \ 1890 - if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else 1890 + if ((_sp)->gfn != (_gfn)) {} else 1891 + 1892 + #define for_each_gfn_valid_sp_with_gptes(_kvm, _sp, _gfn) \ 1893 + for_each_gfn_valid_sp(_kvm, _sp, _gfn) \ 1894 + if (!sp_has_gptes(_sp)) {} else 1891 1895 1892 1896 static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 1893 1897 { ··· 7051 7047 kvm_mmu_zap_all(kvm); 7052 7048 } 7053 7049 7054 - /* 7055 - * Zapping leaf SPTEs with memslot range when a memslot is moved/deleted. 7056 - * 7057 - * Zapping non-leaf SPTEs, a.k.a. not-last SPTEs, isn't required, worst 7058 - * case scenario we'll have unused shadow pages lying around until they 7059 - * are recycled due to age or when the VM is destroyed. 7060 - */ 7061 - static void kvm_mmu_zap_memslot_leafs(struct kvm *kvm, struct kvm_memory_slot *slot) 7050 + static void kvm_mmu_zap_memslot_pages_and_flush(struct kvm *kvm, 7051 + struct kvm_memory_slot *slot, 7052 + bool flush) 7053 + { 7054 + LIST_HEAD(invalid_list); 7055 + unsigned long i; 7056 + 7057 + if (list_empty(&kvm->arch.active_mmu_pages)) 7058 + goto out_flush; 7059 + 7060 + /* 7061 + * Since accounting information is stored in struct kvm_arch_memory_slot, 7062 + * shadow pages deletion (e.g. unaccount_shadowed()) requires that all 7063 + * gfns with a shadow page have a corresponding memslot. Do so before 7064 + * the memslot goes away. 7065 + */ 7066 + for (i = 0; i < slot->npages; i++) { 7067 + struct kvm_mmu_page *sp; 7068 + gfn_t gfn = slot->base_gfn + i; 7069 + 7070 + for_each_gfn_valid_sp(kvm, sp, gfn) 7071 + kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); 7072 + 7073 + if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { 7074 + kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); 7075 + flush = false; 7076 + cond_resched_rwlock_write(&kvm->mmu_lock); 7077 + } 7078 + } 7079 + 7080 + out_flush: 7081 + kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); 7082 + } 7083 + 7084 + static void kvm_mmu_zap_memslot(struct kvm *kvm, 7085 + struct kvm_memory_slot *slot) 7062 7086 { 7063 7087 struct kvm_gfn_range range = { 7064 7088 .slot = slot, ··· 7094 7062 .end = slot->base_gfn + slot->npages, 7095 7063 .may_block = true, 7096 7064 }; 7065 + bool flush; 7097 7066 7098 7067 write_lock(&kvm->mmu_lock); 7099 - if (kvm_unmap_gfn_range(kvm, &range)) 7100 - kvm_flush_remote_tlbs_memslot(kvm, slot); 7101 - 7068 + flush = kvm_unmap_gfn_range(kvm, &range); 7069 + kvm_mmu_zap_memslot_pages_and_flush(kvm, slot, flush); 7102 7070 write_unlock(&kvm->mmu_lock); 7103 7071 } 7104 7072 ··· 7114 7082 if (kvm_memslot_flush_zap_all(kvm)) 7115 7083 kvm_mmu_zap_all_fast(kvm); 7116 7084 else 7117 - kvm_mmu_zap_memslot_leafs(kvm, slot); 7085 + kvm_mmu_zap_memslot(kvm, slot); 7118 7086 } 7119 7087 7120 7088 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
+2
tools/testing/selftests/kvm/memslot_modification_stress_test.c
··· 169 169 case 'i': 170 170 p.nr_iterations = atoi_positive("Number of iterations", optarg); 171 171 break; 172 + #ifdef __x86_64__ 172 173 case 'q': 173 174 p.disable_slot_zap_quirk = true; 174 175 175 176 TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & 176 177 KVM_X86_QUIRK_SLOT_ZAP_ALL); 177 178 break; 179 + #endif 178 180 case 'h': 179 181 default: 180 182 help(argv[0]);
+6
tools/testing/selftests/kvm/memslot_perf_test.c
··· 113 113 static sem_t vcpu_ready; 114 114 115 115 static bool map_unmap_verify; 116 + #ifdef __x86_64__ 116 117 static bool disable_slot_zap_quirk; 118 + #endif 117 119 118 120 static bool verbose; 119 121 #define pr_info_v(...) \ ··· 581 579 uint32_t guest_page_size = data->vm->page_size; 582 580 uint64_t movesrcgpa, movetestgpa; 583 581 582 + #ifdef __x86_64__ 584 583 if (disable_slot_zap_quirk) 585 584 vm_enable_cap(data->vm, KVM_CAP_DISABLE_QUIRKS2, KVM_X86_QUIRK_SLOT_ZAP_ALL); 585 + #endif 586 586 587 587 movesrcgpa = vm_slot2gpa(data, data->nslots - 1); 588 588 ··· 975 971 case 'd': 976 972 map_unmap_verify = true; 977 973 break; 974 + #ifdef __x86_64__ 978 975 case 'q': 979 976 disable_slot_zap_quirk = true; 980 977 TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & 981 978 KVM_X86_QUIRK_SLOT_ZAP_ALL); 982 979 break; 980 + #endif 983 981 case 's': 984 982 targs->nslots = atoi_paranoid(optarg); 985 983 if (targs->nslots <= 1 && targs->nslots != -1) {