Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
"ARM:
- fix fault on page table writes during instruction fetch

s390:
- doc improvement

x86:
- The obvious patches are always the ones that turn out to be
completely broken. /me hangs his head in shame"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
Revert "KVM: Check the allocation of pv cpu mask"
KVM: arm64: Remove S1PTW check from kvm_vcpu_dabt_iswrite()
KVM: arm64: Assume write fault on S1PTW permission fault on instruction fetch
docs: kvm: add documentation for KVM_CAP_S390_DIAG318

Changed files
+37 -25
Documentation
virt
kvm
arch
arm64
include
kvm
hyp
include
hyp
x86
kernel
+20
Documentation/virt/kvm/api.rst
··· 6173 6173 is supported, than the other should as well and vice versa. For arm64 6174 6174 see Documentation/virt/kvm/devices/vcpu.rst "KVM_ARM_VCPU_PVTIME_CTRL". 6175 6175 For x86 see Documentation/virt/kvm/msr.rst "MSR_KVM_STEAL_TIME". 6176 + 6177 + 8.25 KVM_CAP_S390_DIAG318 6178 + ------------------------- 6179 + 6180 + :Architectures: s390 6181 + 6182 + This capability enables a guest to set information about its control program 6183 + (i.e. guest kernel type and version). The information is helpful during 6184 + system/firmware service events, providing additional data about the guest 6185 + environments running on the machine. 6186 + 6187 + The information is associated with the DIAGNOSE 0x318 instruction, which sets 6188 + an 8-byte value consisting of a one-byte Control Program Name Code (CPNC) and 6189 + a 7-byte Control Program Version Code (CPVC). The CPNC determines what 6190 + environment the control program is running in (e.g. Linux, z/VM...), and the 6191 + CPVC is used for information specific to OS (e.g. Linux version, Linux 6192 + distribution...) 6193 + 6194 + If this capability is available, then the CPNC and CPVC can be synchronized 6195 + between KVM and userspace via the sync regs mechanism (KVM_SYNC_DIAG318).
+11 -3
arch/arm64/include/asm/kvm_emulate.h
··· 298 298 return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; 299 299 } 300 300 301 - static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) 301 + static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu) 302 302 { 303 303 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW); 304 304 } 305 305 306 + /* Always check for S1PTW *before* using this. */ 306 307 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) 307 308 { 308 - return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) || 309 - kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ 309 + return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR; 310 310 } 311 311 312 312 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) ··· 333 333 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) 334 334 { 335 335 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; 336 + } 337 + 338 + static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu) 339 + { 340 + return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu); 336 341 } 337 342 338 343 static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) ··· 377 372 378 373 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) 379 374 { 375 + if (kvm_vcpu_abt_iss1tw(vcpu)) 376 + return true; 377 + 380 378 if (kvm_vcpu_trap_is_iabt(vcpu)) 381 379 return false; 382 380
+1 -1
arch/arm64/kvm/hyp/include/hyp/switch.h
··· 449 449 kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT && 450 450 kvm_vcpu_dabt_isvalid(vcpu) && 451 451 !kvm_vcpu_abt_issea(vcpu) && 452 - !kvm_vcpu_dabt_iss1tw(vcpu); 452 + !kvm_vcpu_abt_iss1tw(vcpu); 453 453 454 454 if (valid) { 455 455 int ret = __vgic_v2_perform_cpuif_access(vcpu);
+2 -2
arch/arm64/kvm/mmu.c
··· 1849 1849 struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu; 1850 1850 1851 1851 write_fault = kvm_is_write_fault(vcpu); 1852 - exec_fault = kvm_vcpu_trap_is_iabt(vcpu); 1852 + exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); 1853 1853 VM_BUG_ON(write_fault && exec_fault); 1854 1854 1855 1855 if (fault_status == FSC_PERM && !write_fault && !exec_fault) { ··· 2131 2131 goto out; 2132 2132 } 2133 2133 2134 - if (kvm_vcpu_dabt_iss1tw(vcpu)) { 2134 + if (kvm_vcpu_abt_iss1tw(vcpu)) { 2135 2135 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); 2136 2136 ret = 1; 2137 2137 goto out_unlock;
+3 -19
arch/x86/kernel/kvm.c
··· 652 652 } 653 653 654 654 if (pv_tlb_flush_supported()) { 655 + pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others; 655 656 pv_ops.mmu.tlb_remove_table = tlb_remove_table; 656 657 pr_info("KVM setup pv remote TLB flush\n"); 657 658 } ··· 765 764 } 766 765 arch_initcall(activate_jump_labels); 767 766 768 - static void kvm_free_pv_cpu_mask(void) 769 - { 770 - unsigned int cpu; 771 - 772 - for_each_possible_cpu(cpu) 773 - free_cpumask_var(per_cpu(__pv_cpu_mask, cpu)); 774 - } 775 - 776 767 static __init int kvm_alloc_cpumask(void) 777 768 { 778 769 int cpu; ··· 783 790 784 791 if (alloc) 785 792 for_each_possible_cpu(cpu) { 786 - if (!zalloc_cpumask_var_node( 787 - per_cpu_ptr(&__pv_cpu_mask, cpu), 788 - GFP_KERNEL, cpu_to_node(cpu))) { 789 - goto zalloc_cpumask_fail; 790 - } 793 + zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu), 794 + GFP_KERNEL, cpu_to_node(cpu)); 791 795 } 792 796 793 - apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself; 794 - pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others; 795 797 return 0; 796 - 797 - zalloc_cpumask_fail: 798 - kvm_free_pv_cpu_mask(); 799 - return -ENOMEM; 800 798 } 801 799 arch_initcall(kvm_alloc_cpumask); 802 800