Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: Pass MMU notifier range flags to kvm_unmap_hva_range()

The 'flags' field of 'struct mmu_notifier_range' is used to indicate
whether invalidate_range_{start,end}() are permitted to block. In the
case of kvm_mmu_notifier_invalidate_range_start(), this field is not
forwarded on to the architecture-specific implementation of
kvm_unmap_hva_range() and therefore the backend cannot sensibly decide
whether or not to block.

Add an extra 'flags' parameter to kvm_unmap_hva_range() so that
architectures are aware as to whether or not they are permitted to block.

Cc: <stable@vger.kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: James Morse <james.morse@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
Message-Id: <20200811102725.7121-2-will@kernel.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Will Deacon and committed by
Paolo Bonzini
fdfe7cbd cb957adb

+17 -10
+1 -1
arch/arm64/include/asm/kvm_host.h
··· 473 473 474 474 #define KVM_ARCH_WANT_MMU_NOTIFIER 475 475 int kvm_unmap_hva_range(struct kvm *kvm, 476 - unsigned long start, unsigned long end); 476 + unsigned long start, unsigned long end, unsigned flags); 477 477 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 478 478 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 479 479 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+1 -1
arch/arm64/kvm/mmu.c
··· 2213 2213 } 2214 2214 2215 2215 int kvm_unmap_hva_range(struct kvm *kvm, 2216 - unsigned long start, unsigned long end) 2216 + unsigned long start, unsigned long end, unsigned flags) 2217 2217 { 2218 2218 if (!kvm->arch.mmu.pgd) 2219 2219 return 0;
+1 -1
arch/mips/include/asm/kvm_host.h
··· 969 969 970 970 #define KVM_ARCH_WANT_MMU_NOTIFIER 971 971 int kvm_unmap_hva_range(struct kvm *kvm, 972 - unsigned long start, unsigned long end); 972 + unsigned long start, unsigned long end, unsigned flags); 973 973 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 974 974 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 975 975 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+2 -1
arch/mips/kvm/mmu.c
··· 486 486 return 1; 487 487 } 488 488 489 - int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 489 + int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, 490 + unsigned flags) 490 491 { 491 492 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); 492 493
+2 -1
arch/powerpc/include/asm/kvm_host.h
··· 58 58 #define KVM_ARCH_WANT_MMU_NOTIFIER 59 59 60 60 extern int kvm_unmap_hva_range(struct kvm *kvm, 61 - unsigned long start, unsigned long end); 61 + unsigned long start, unsigned long end, 62 + unsigned flags); 62 63 extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 63 64 extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 64 65 extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+2 -1
arch/powerpc/kvm/book3s.c
··· 834 834 kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change); 835 835 } 836 836 837 - int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 837 + int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, 838 + unsigned flags) 838 839 { 839 840 return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); 840 841 }
+2 -1
arch/powerpc/kvm/e500_mmu_host.c
··· 734 734 return 0; 735 735 } 736 736 737 - int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 737 + int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, 738 + unsigned flags) 738 739 { 739 740 /* kvm_unmap_hva flushes everything anyways */ 740 741 kvm_unmap_hva(kvm, start);
+2 -1
arch/x86/include/asm/kvm_host.h
··· 1596 1596 _ASM_EXTABLE(666b, 667b) 1597 1597 1598 1598 #define KVM_ARCH_WANT_MMU_NOTIFIER 1599 - int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); 1599 + int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, 1600 + unsigned flags); 1600 1601 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 1601 1602 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 1602 1603 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+2 -1
arch/x86/kvm/mmu/mmu.c
··· 1916 1916 return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); 1917 1917 } 1918 1918 1919 - int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 1919 + int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, 1920 + unsigned flags) 1920 1921 { 1921 1922 return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); 1922 1923 }
+2 -1
virt/kvm/kvm_main.c
··· 482 482 * count is also read inside the mmu_lock critical section. 483 483 */ 484 484 kvm->mmu_notifier_count++; 485 - need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end); 485 + need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end, 486 + range->flags); 486 487 need_tlb_flush |= kvm->tlbs_dirty; 487 488 /* we've to flush the tlb before the pages can be freed */ 488 489 if (need_tlb_flush)