Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: KVM: Unmap IPA on memslot delete/move

Currently when a KVM region is deleted or moved after
KVM_SET_USER_MEMORY_REGION ioctl, the corresponding
intermediate physical memory is not unmapped.

This patch corrects this and unmaps the region's IPA range
in kvm_arch_commit_memory_region using unmap_stage2_range.

Signed-off-by: Eric Auger <eric.auger@linaro.org>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>

authored by

Eric Auger and committed by
Christoffer Dall
df6ce24f 4f853a71

+46 -37
-37
arch/arm/kvm/arm.c
··· 155 155 return VM_FAULT_SIGBUS; 156 156 } 157 157 158 - void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 159 - struct kvm_memory_slot *dont) 160 - { 161 - } 162 - 163 - int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 164 - unsigned long npages) 165 - { 166 - return 0; 167 - } 168 158 169 159 /** 170 160 * kvm_arch_destroy_vm - destroy the VM data structure ··· 215 225 return -EINVAL; 216 226 } 217 227 218 - void kvm_arch_memslots_updated(struct kvm *kvm) 219 - { 220 - } 221 - 222 - int kvm_arch_prepare_memory_region(struct kvm *kvm, 223 - struct kvm_memory_slot *memslot, 224 - struct kvm_userspace_memory_region *mem, 225 - enum kvm_mr_change change) 226 - { 227 - return 0; 228 - } 229 - 230 - void kvm_arch_commit_memory_region(struct kvm *kvm, 231 - struct kvm_userspace_memory_region *mem, 232 - const struct kvm_memory_slot *old, 233 - enum kvm_mr_change change) 234 - { 235 - } 236 - 237 - void kvm_arch_flush_shadow_all(struct kvm *kvm) 238 - { 239 - } 240 - 241 - void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 242 - struct kvm_memory_slot *slot) 243 - { 244 - } 245 228 246 229 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 247 230 {
+46
arch/arm/kvm/mmu.c
··· 1111 1111 free_hyp_pgds(); 1112 1112 return err; 1113 1113 } 1114 + 1115 + void kvm_arch_commit_memory_region(struct kvm *kvm, 1116 + struct kvm_userspace_memory_region *mem, 1117 + const struct kvm_memory_slot *old, 1118 + enum kvm_mr_change change) 1119 + { 1120 + gpa_t gpa = old->base_gfn << PAGE_SHIFT; 1121 + phys_addr_t size = old->npages << PAGE_SHIFT; 1122 + if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { 1123 + spin_lock(&kvm->mmu_lock); 1124 + unmap_stage2_range(kvm, gpa, size); 1125 + spin_unlock(&kvm->mmu_lock); 1126 + } 1127 + } 1128 + 1129 + int kvm_arch_prepare_memory_region(struct kvm *kvm, 1130 + struct kvm_memory_slot *memslot, 1131 + struct kvm_userspace_memory_region *mem, 1132 + enum kvm_mr_change change) 1133 + { 1134 + return 0; 1135 + } 1136 + 1137 + void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 1138 + struct kvm_memory_slot *dont) 1139 + { 1140 + } 1141 + 1142 + int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 1143 + unsigned long npages) 1144 + { 1145 + return 0; 1146 + } 1147 + 1148 + void kvm_arch_memslots_updated(struct kvm *kvm) 1149 + { 1150 + } 1151 + 1152 + void kvm_arch_flush_shadow_all(struct kvm *kvm) 1153 + { 1154 + } 1155 + 1156 + void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 1157 + struct kvm_memory_slot *slot) 1158 + { 1159 + }