Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Book3S HV: Fix conversion to gfn-based MMU notifier callbacks

Commit b1c5356e873c ("KVM: PPC: Convert to the gfn-based MMU notifier
callbacks") causes unmap_gfn_range and age_gfn callbacks to only work
on the first gfn in the range. It also makes the aging callbacks call
into both radix and hash aging functions for radix guests. Fix this.

Add warnings for the single-gfn calls that have been converted to range
callbacks, in case they ever receieve ranges greater than 1.

Fixes: b1c5356e873c ("KVM: PPC: Convert to the gfn-based MMU notifier callbacks")
Reported-by: Bharata B Rao <bharata@linux.ibm.com>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Tested-by: Bharata B Rao <bharata@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210505121509.1470207-1-npiggin@gmail.com

authored by

Nicholas Piggin and committed by
Michael Ellerman
32b48bf8 c6b05f4e

+37 -18
+1 -1
arch/powerpc/include/asm/kvm_book3s.h
··· 210 210 unsigned int lpid); 211 211 extern int kvmppc_radix_init(void); 212 212 extern void kvmppc_radix_exit(void); 213 - extern bool kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 213 + extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 214 214 unsigned long gfn); 215 215 extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 216 216 unsigned long gfn);
+34 -14
arch/powerpc/kvm/book3s_64_mmu_hv.c
··· 795 795 } 796 796 } 797 797 798 - static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, 798 + static void kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, 799 799 unsigned long gfn) 800 800 { 801 801 unsigned long i; ··· 829 829 unlock_rmap(rmapp); 830 830 __unlock_hpte(hptep, be64_to_cpu(hptep[0])); 831 831 } 832 - return false; 833 832 } 834 833 835 834 bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range) 836 835 { 837 - if (kvm_is_radix(kvm)) 838 - return kvm_unmap_radix(kvm, range->slot, range->start); 836 + gfn_t gfn; 839 837 840 - return kvm_unmap_rmapp(kvm, range->slot, range->start); 838 + if (kvm_is_radix(kvm)) { 839 + for (gfn = range->start; gfn < range->end; gfn++) 840 + kvm_unmap_radix(kvm, range->slot, gfn); 841 + } else { 842 + for (gfn = range->start; gfn < range->end; gfn++) 843 + kvm_unmap_rmapp(kvm, range->slot, range->start); 844 + } 845 + 846 + return false; 841 847 } 842 848 843 849 void kvmppc_core_flush_memslot_hv(struct kvm *kvm, ··· 930 924 931 925 bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) 932 926 { 933 - if (kvm_is_radix(kvm)) 934 - kvm_age_radix(kvm, range->slot, range->start); 927 + gfn_t gfn; 928 + bool ret = false; 935 929 936 - return kvm_age_rmapp(kvm, range->slot, range->start); 930 + if (kvm_is_radix(kvm)) { 931 + for (gfn = range->start; gfn < range->end; gfn++) 932 + ret |= kvm_age_radix(kvm, range->slot, gfn); 933 + } else { 934 + for (gfn = range->start; gfn < range->end; gfn++) 935 + ret |= kvm_age_rmapp(kvm, range->slot, gfn); 936 + } 937 + 938 + return ret; 937 939 } 938 940 939 941 static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, ··· 979 965 980 966 bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) 981 967 { 982 - if (kvm_is_radix(kvm)) 983 - kvm_test_age_radix(kvm, range->slot, range->start); 968 + WARN_ON(range->start + 1 != range->end); 984 969 985 - return kvm_test_age_rmapp(kvm, range->slot, range->start); 970 + if (kvm_is_radix(kvm)) 971 + return kvm_test_age_radix(kvm, range->slot, range->start); 972 + else 973 + return kvm_test_age_rmapp(kvm, range->slot, range->start); 986 974 } 987 975 988 976 bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) 989 977 { 990 - if (kvm_is_radix(kvm)) 991 - return kvm_unmap_radix(kvm, range->slot, range->start); 978 + WARN_ON(range->start + 1 != range->end); 992 979 993 - return kvm_unmap_rmapp(kvm, range->slot, range->start); 980 + if (kvm_is_radix(kvm)) 981 + kvm_unmap_radix(kvm, range->slot, range->start); 982 + else 983 + kvm_unmap_rmapp(kvm, range->slot, range->start); 984 + 985 + return false; 994 986 } 995 987 996 988 static int vcpus_running(struct kvm *kvm)
+2 -3
arch/powerpc/kvm/book3s_64_mmu_radix.c
··· 993 993 } 994 994 995 995 /* Called with kvm->mmu_lock held */ 996 - bool kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 996 + void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 997 997 unsigned long gfn) 998 998 { 999 999 pte_t *ptep; ··· 1002 1002 1003 1003 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) { 1004 1004 uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT); 1005 - return false; 1005 + return; 1006 1006 } 1007 1007 1008 1008 ptep = find_kvm_secondary_pte(kvm, gpa, &shift); 1009 1009 if (ptep && pte_present(*ptep)) 1010 1010 kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot, 1011 1011 kvm->arch.lpid); 1012 - return false; 1013 1012 } 1014 1013 1015 1014 /* Called with kvm->mmu_lock held */