Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:

- PAE and PKU bugfixes for x86

- selftests fix for new binutils

- MMU notifier fix for arm64

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: arm64: Only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is not set
KVM: Pass MMU notifier range flags to kvm_unmap_hva_range()
kvm: x86: Toggling CR4.PKE does not load PDPTEs in PAE mode
kvm: x86: Toggling CR4.SMAP does not load PDPTEs in PAE mode
KVM: x86: fix access code passed to gva_to_gpa
selftests: kvm: Use a shorter encoding to clear RAX

+36 -18
+1 -1
arch/arm64/include/asm/kvm_host.h
··· 473 473 474 474 #define KVM_ARCH_WANT_MMU_NOTIFIER 475 475 int kvm_unmap_hva_range(struct kvm *kvm, 476 - unsigned long start, unsigned long end); 476 + unsigned long start, unsigned long end, unsigned flags); 477 477 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 478 478 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 479 479 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+14 -5
arch/arm64/kvm/mmu.c
··· 343 343 * destroying the VM), otherwise another faulting VCPU may come in and mess 344 344 * with things behind our backs. 345 345 */ 346 - static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) 346 + static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size, 347 + bool may_block) 347 348 { 348 349 struct kvm *kvm = mmu->kvm; 349 350 pgd_t *pgd; ··· 370 369 * If the range is too large, release the kvm->mmu_lock 371 370 * to prevent starvation and lockup detector warnings. 372 371 */ 373 - if (next != end) 372 + if (may_block && next != end) 374 373 cond_resched_lock(&kvm->mmu_lock); 375 374 } while (pgd++, addr = next, addr != end); 375 + } 376 + 377 + static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) 378 + { 379 + __unmap_stage2_range(mmu, start, size, true); 376 380 } 377 381 378 382 static void stage2_flush_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd, ··· 2214 2208 2215 2209 static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) 2216 2210 { 2217 - unmap_stage2_range(&kvm->arch.mmu, gpa, size); 2211 + unsigned flags = *(unsigned *)data; 2212 + bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE; 2213 + 2214 + __unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block); 2218 2215 return 0; 2219 2216 } 2220 2217 2221 2218 int kvm_unmap_hva_range(struct kvm *kvm, 2222 - unsigned long start, unsigned long end) 2219 + unsigned long start, unsigned long end, unsigned flags) 2223 2220 { 2224 2221 if (!kvm->arch.mmu.pgd) 2225 2222 return 0; 2226 2223 2227 2224 trace_kvm_unmap_hva_range(start, end); 2228 - handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); 2225 + handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags); 2229 2226 return 0; 2230 2227 } 2231 2228
+1 -1
arch/mips/include/asm/kvm_host.h
··· 969 969 970 970 #define KVM_ARCH_WANT_MMU_NOTIFIER 971 971 int kvm_unmap_hva_range(struct kvm *kvm, 972 - unsigned long start, unsigned long end); 972 + unsigned long start, unsigned long end, unsigned flags); 973 973 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 974 974 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 975 975 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+2 -1
arch/mips/kvm/mmu.c
··· 486 486 return 1; 487 487 } 488 488 489 - int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 489 + int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, 490 + unsigned flags) 490 491 { 491 492 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); 492 493
+2 -1
arch/powerpc/include/asm/kvm_host.h
··· 58 58 #define KVM_ARCH_WANT_MMU_NOTIFIER 59 59 60 60 extern int kvm_unmap_hva_range(struct kvm *kvm, 61 - unsigned long start, unsigned long end); 61 + unsigned long start, unsigned long end, 62 + unsigned flags); 62 63 extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 63 64 extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 64 65 extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+2 -1
arch/powerpc/kvm/book3s.c
··· 834 834 kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change); 835 835 } 836 836 837 - int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 837 + int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, 838 + unsigned flags) 838 839 { 839 840 return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); 840 841 }
+2 -1
arch/powerpc/kvm/e500_mmu_host.c
··· 734 734 return 0; 735 735 } 736 736 737 - int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 737 + int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, 738 + unsigned flags) 738 739 { 739 740 /* kvm_unmap_hva flushes everything anyways */ 740 741 kvm_unmap_hva(kvm, start);
+2 -1
arch/x86/include/asm/kvm_host.h
··· 1596 1596 _ASM_EXTABLE(666b, 667b) 1597 1597 1598 1598 #define KVM_ARCH_WANT_MMU_NOTIFIER 1599 - int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); 1599 + int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, 1600 + unsigned flags); 1600 1601 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 1601 1602 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 1602 1603 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+2 -1
arch/x86/kvm/mmu/mmu.c
··· 1916 1916 return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); 1917 1917 } 1918 1918 1919 - int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 1919 + int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, 1920 + unsigned flags) 1920 1921 { 1921 1922 return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); 1922 1923 }
+4 -2
arch/x86/kvm/x86.c
··· 975 975 { 976 976 unsigned long old_cr4 = kvm_read_cr4(vcpu); 977 977 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | 978 - X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE; 978 + X86_CR4_SMEP; 979 979 980 980 if (kvm_valid_cr4(vcpu, cr4)) 981 981 return 1; ··· 10751 10751 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code) 10752 10752 { 10753 10753 struct x86_exception fault; 10754 + u32 access = error_code & 10755 + (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK); 10754 10756 10755 10757 if (!(error_code & PFERR_PRESENT_MASK) || 10756 - vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, error_code, &fault) != UNMAPPED_GVA) { 10758 + vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, &fault) != UNMAPPED_GVA) { 10757 10759 /* 10758 10760 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page 10759 10761 * tables probably do not match the TLB. Just proceed
+2 -2
tools/testing/selftests/kvm/x86_64/debug_regs.c
··· 40 40 41 41 /* Single step test, covers 2 basic instructions and 2 emulated */ 42 42 asm volatile("ss_start: " 43 - "xor %%rax,%%rax\n\t" 43 + "xor %%eax,%%eax\n\t" 44 44 "cpuid\n\t" 45 45 "movl $0x1a0,%%ecx\n\t" 46 46 "rdmsr\n\t" 47 - : : : "rax", "ecx"); 47 + : : : "eax", "ebx", "ecx", "edx"); 48 48 49 49 /* DR6.BD test */ 50 50 asm volatile("bd_start: mov %%dr0, %%rax" : : : "rax");
+2 -1
virt/kvm/kvm_main.c
··· 482 482 * count is also read inside the mmu_lock critical section. 483 483 */ 484 484 kvm->mmu_notifier_count++; 485 - need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end); 485 + need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end, 486 + range->flags); 486 487 need_tlb_flush |= kvm->tlbs_dirty; 487 488 /* we've to flush the tlb before the pages can be freed */ 488 489 if (need_tlb_flush)