Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
"ARM:
- fix fault on page table writes during instruction fetch

s390:
- doc improvement

x86:
- The obvious patches are always the ones that turn out to be
completely broken. /me hangs his head in shame"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
Revert "KVM: Check the allocation of pv cpu mask"
KVM: arm64: Remove S1PTW check from kvm_vcpu_dabt_iswrite()
KVM: arm64: Assume write fault on S1PTW permission fault on instruction fetch
docs: kvm: add documentation for KVM_CAP_S390_DIAG318

+37 -25
+20
Documentation/virt/kvm/api.rst
··· 6173 is supported, than the other should as well and vice versa. For arm64 6174 see Documentation/virt/kvm/devices/vcpu.rst "KVM_ARM_VCPU_PVTIME_CTRL". 6175 For x86 see Documentation/virt/kvm/msr.rst "MSR_KVM_STEAL_TIME".
··· 6173 is supported, than the other should as well and vice versa. For arm64 6174 see Documentation/virt/kvm/devices/vcpu.rst "KVM_ARM_VCPU_PVTIME_CTRL". 6175 For x86 see Documentation/virt/kvm/msr.rst "MSR_KVM_STEAL_TIME". 6176 + 6177 + 8.25 KVM_CAP_S390_DIAG318 6178 + ------------------------- 6179 + 6180 + :Architectures: s390 6181 + 6182 + This capability enables a guest to set information about its control program 6183 + (i.e. guest kernel type and version). The information is helpful during 6184 + system/firmware service events, providing additional data about the guest 6185 + environments running on the machine. 6186 + 6187 + The information is associated with the DIAGNOSE 0x318 instruction, which sets 6188 + an 8-byte value consisting of a one-byte Control Program Name Code (CPNC) and 6189 + a 7-byte Control Program Version Code (CPVC). The CPNC determines what 6190 + environment the control program is running in (e.g. Linux, z/VM...), and the 6191 + CPVC is used for information specific to OS (e.g. Linux version, Linux 6192 + distribution...) 6193 + 6194 + If this capability is available, then the CPNC and CPVC can be synchronized 6195 + between KVM and userspace via the sync regs mechanism (KVM_SYNC_DIAG318).
+11 -3
arch/arm64/include/asm/kvm_emulate.h
··· 298 return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; 299 } 300 301 - static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) 302 { 303 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW); 304 } 305 306 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) 307 { 308 - return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) || 309 - kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ 310 } 311 312 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) ··· 333 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) 334 { 335 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; 336 } 337 338 static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) ··· 377 378 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) 379 { 380 if (kvm_vcpu_trap_is_iabt(vcpu)) 381 return false; 382
··· 298 return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; 299 } 300 301 + static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu) 302 { 303 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW); 304 } 305 306 + /* Always check for S1PTW *before* using this. */ 307 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) 308 { 309 + return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR; 310 } 311 312 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) ··· 333 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) 334 { 335 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; 336 + } 337 + 338 + static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu) 339 + { 340 + return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu); 341 } 342 343 static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) ··· 372 373 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) 374 { 375 + if (kvm_vcpu_abt_iss1tw(vcpu)) 376 + return true; 377 + 378 if (kvm_vcpu_trap_is_iabt(vcpu)) 379 return false; 380
+1 -1
arch/arm64/kvm/hyp/include/hyp/switch.h
··· 449 kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT && 450 kvm_vcpu_dabt_isvalid(vcpu) && 451 !kvm_vcpu_abt_issea(vcpu) && 452 - !kvm_vcpu_dabt_iss1tw(vcpu); 453 454 if (valid) { 455 int ret = __vgic_v2_perform_cpuif_access(vcpu);
··· 449 kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT && 450 kvm_vcpu_dabt_isvalid(vcpu) && 451 !kvm_vcpu_abt_issea(vcpu) && 452 + !kvm_vcpu_abt_iss1tw(vcpu); 453 454 if (valid) { 455 int ret = __vgic_v2_perform_cpuif_access(vcpu);
+2 -2
arch/arm64/kvm/mmu.c
··· 1849 struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu; 1850 1851 write_fault = kvm_is_write_fault(vcpu); 1852 - exec_fault = kvm_vcpu_trap_is_iabt(vcpu); 1853 VM_BUG_ON(write_fault && exec_fault); 1854 1855 if (fault_status == FSC_PERM && !write_fault && !exec_fault) { ··· 2131 goto out; 2132 } 2133 2134 - if (kvm_vcpu_dabt_iss1tw(vcpu)) { 2135 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); 2136 ret = 1; 2137 goto out_unlock;
··· 1849 struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu; 1850 1851 write_fault = kvm_is_write_fault(vcpu); 1852 + exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); 1853 VM_BUG_ON(write_fault && exec_fault); 1854 1855 if (fault_status == FSC_PERM && !write_fault && !exec_fault) { ··· 2131 goto out; 2132 } 2133 2134 + if (kvm_vcpu_abt_iss1tw(vcpu)) { 2135 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); 2136 ret = 1; 2137 goto out_unlock;
+3 -19
arch/x86/kernel/kvm.c
··· 652 } 653 654 if (pv_tlb_flush_supported()) { 655 pv_ops.mmu.tlb_remove_table = tlb_remove_table; 656 pr_info("KVM setup pv remote TLB flush\n"); 657 } ··· 765 } 766 arch_initcall(activate_jump_labels); 767 768 - static void kvm_free_pv_cpu_mask(void) 769 - { 770 - unsigned int cpu; 771 - 772 - for_each_possible_cpu(cpu) 773 - free_cpumask_var(per_cpu(__pv_cpu_mask, cpu)); 774 - } 775 - 776 static __init int kvm_alloc_cpumask(void) 777 { 778 int cpu; ··· 783 784 if (alloc) 785 for_each_possible_cpu(cpu) { 786 - if (!zalloc_cpumask_var_node( 787 - per_cpu_ptr(&__pv_cpu_mask, cpu), 788 - GFP_KERNEL, cpu_to_node(cpu))) { 789 - goto zalloc_cpumask_fail; 790 - } 791 } 792 793 - apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself; 794 - pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others; 795 return 0; 796 - 797 - zalloc_cpumask_fail: 798 - kvm_free_pv_cpu_mask(); 799 - return -ENOMEM; 800 } 801 arch_initcall(kvm_alloc_cpumask); 802
··· 652 } 653 654 if (pv_tlb_flush_supported()) { 655 + pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others; 656 pv_ops.mmu.tlb_remove_table = tlb_remove_table; 657 pr_info("KVM setup pv remote TLB flush\n"); 658 } ··· 764 } 765 arch_initcall(activate_jump_labels); 766 767 static __init int kvm_alloc_cpumask(void) 768 { 769 int cpu; ··· 790 791 if (alloc) 792 for_each_possible_cpu(cpu) { 793 + zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu), 794 + GFP_KERNEL, cpu_to_node(cpu)); 795 } 796 797 return 0; 798 } 799 arch_initcall(kvm_alloc_cpumask); 800