Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: arm64: Return guest memory from EL2 via dedicated teardown memcache

Rather than relying on the host to free the previously-donated pKVM
hypervisor VM pages explicitly on teardown, introduce a dedicated
teardown memcache which allows the host to reclaim guest memory
resources without having to keep track of all of the allocations made by
the pKVM hypervisor at EL2.

Tested-by: Vincent Donnefort <vdonnefort@google.com>
Co-developed-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Quentin Perret <qperret@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
[maz: dropped __maybe_unused from unmap_donated_memory_noclear()]
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221110190259.26861-21-will@kernel.org

authored by

Quentin Perret and committed by
Marc Zyngier
f41dff4e 60dfe093

+40 -44
+1 -6
arch/arm64/include/asm/kvm_host.h
··· 176 176 177 177 struct kvm_protected_vm { 178 178 pkvm_handle_t handle; 179 - 180 - struct { 181 - void *pgd; 182 - void *vm; 183 - void *vcpus[KVM_MAX_VCPUS]; 184 - } hyp_donations; 179 + struct kvm_hyp_memcache teardown_mc; 185 180 }; 186 181 187 182 struct kvm_arch {
+1 -1
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
··· 76 76 77 77 int hyp_pin_shared_mem(void *from, void *to); 78 78 void hyp_unpin_shared_mem(void *from, void *to); 79 - void reclaim_guest_pages(struct pkvm_hyp_vm *vm); 79 + void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc); 80 80 int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages, 81 81 struct kvm_hyp_memcache *host_mc); 82 82
+11 -6
arch/arm64/kvm/hyp/nvhe/mem_protect.c
··· 260 260 return 0; 261 261 } 262 262 263 - void reclaim_guest_pages(struct pkvm_hyp_vm *vm) 263 + void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc) 264 264 { 265 - void *pgd = vm->pgt.pgd; 266 - unsigned long nr_pages; 265 + void *addr; 267 266 268 - nr_pages = kvm_pgtable_stage2_pgd_size(vm->kvm.arch.vtcr) >> PAGE_SHIFT; 269 - 267 + /* Dump all pgtable pages in the hyp_pool */ 270 268 guest_lock_component(vm); 271 269 kvm_pgtable_stage2_destroy(&vm->pgt); 272 270 vm->kvm.arch.mmu.pgd_phys = 0ULL; 273 271 guest_unlock_component(vm); 274 272 275 - WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(pgd), nr_pages)); 273 + /* Drain the hyp_pool into the memcache */ 274 + addr = hyp_alloc_pages(&vm->pool, 0); 275 + while (addr) { 276 + memset(hyp_virt_to_page(addr), 0, sizeof(struct hyp_page)); 277 + push_hyp_memcache(mc, addr, hyp_virt_to_phys); 278 + WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(addr), 1)); 279 + addr = hyp_alloc_pages(&vm->pool, 0); 280 + } 276 281 } 277 282 278 283 int __pkvm_prot_finalize(void)
+21 -6
arch/arm64/kvm/hyp/nvhe/pkvm.c
··· 393 393 __unmap_donated_memory(va, size); 394 394 } 395 395 396 - static void __maybe_unused unmap_donated_memory_noclear(void *va, size_t size) 396 + static void unmap_donated_memory_noclear(void *va, size_t size) 397 397 { 398 398 if (!va) 399 399 return; ··· 527 527 return ret; 528 528 } 529 529 530 + static void 531 + teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size) 532 + { 533 + size = PAGE_ALIGN(size); 534 + memset(addr, 0, size); 535 + 536 + for (void *start = addr; start < addr + size; start += PAGE_SIZE) 537 + push_hyp_memcache(mc, start, hyp_virt_to_phys); 538 + 539 + unmap_donated_memory_noclear(addr, size); 540 + } 541 + 530 542 int __pkvm_teardown_vm(pkvm_handle_t handle) 531 543 { 544 + struct kvm_hyp_memcache *mc; 532 545 struct pkvm_hyp_vm *hyp_vm; 533 546 struct kvm *host_kvm; 534 547 unsigned int idx; ··· 560 547 goto err_unlock; 561 548 } 562 549 550 + host_kvm = hyp_vm->host_kvm; 551 + 563 552 /* Ensure the VMID is clean before it can be reallocated */ 564 553 __kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu); 565 554 remove_vm_table_entry(handle); 566 555 hyp_spin_unlock(&vm_table_lock); 567 556 568 557 /* Reclaim guest pages (including page-table pages) */ 569 - reclaim_guest_pages(hyp_vm); 558 + mc = &host_kvm->arch.pkvm.teardown_mc; 559 + reclaim_guest_pages(hyp_vm, mc); 570 560 unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus); 571 561 572 - /* Return the metadata pages to the host */ 562 + /* Push the metadata pages to the teardown memcache */ 573 563 for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) { 574 564 struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx]; 575 565 576 - unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu)); 566 + teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu)); 577 567 } 578 568 579 - host_kvm = hyp_vm->host_kvm; 580 569 vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus); 581 - unmap_donated_memory(hyp_vm, vm_size); 570 + teardown_donated_memory(mc, hyp_vm, vm_size); 582 571 hyp_unpin_shared_mem(host_kvm, host_kvm + 1); 583 572 return 0; 584 573
+6 -25
arch/arm64/kvm/pkvm.c
··· 147 147 handle = ret; 148 148 149 149 host_kvm->arch.pkvm.handle = handle; 150 - host_kvm->arch.pkvm.hyp_donations.pgd = pgd; 151 - host_kvm->arch.pkvm.hyp_donations.vm = hyp_vm; 152 150 153 151 /* Donate memory for the vcpus at hyp and initialize it. */ 154 152 hyp_vcpu_sz = PAGE_ALIGN(PKVM_HYP_VCPU_SIZE); ··· 165 167 goto destroy_vm; 166 168 } 167 169 168 - host_kvm->arch.pkvm.hyp_donations.vcpus[idx] = hyp_vcpu; 169 - 170 170 ret = kvm_call_hyp_nvhe(__pkvm_init_vcpu, handle, host_vcpu, 171 171 hyp_vcpu); 172 - if (ret) 172 + if (ret) { 173 + free_pages_exact(hyp_vcpu, hyp_vcpu_sz); 173 174 goto destroy_vm; 175 + } 174 176 } 175 177 176 178 return 0; ··· 199 201 200 202 void pkvm_destroy_hyp_vm(struct kvm *host_kvm) 201 203 { 202 - unsigned long idx, nr_vcpus = host_kvm->created_vcpus; 203 - size_t pgd_sz, hyp_vm_sz; 204 - 205 - if (host_kvm->arch.pkvm.handle) 204 + if (host_kvm->arch.pkvm.handle) { 206 205 WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm, 207 206 host_kvm->arch.pkvm.handle)); 208 - 209 - host_kvm->arch.pkvm.handle = 0; 210 - 211 - for (idx = 0; idx < nr_vcpus; ++idx) { 212 - void *hyp_vcpu = host_kvm->arch.pkvm.hyp_donations.vcpus[idx]; 213 - 214 - if (!hyp_vcpu) 215 - break; 216 - 217 - free_pages_exact(hyp_vcpu, PAGE_ALIGN(PKVM_HYP_VCPU_SIZE)); 218 207 } 219 208 220 - hyp_vm_sz = PAGE_ALIGN(size_add(PKVM_HYP_VM_SIZE, 221 - size_mul(sizeof(void *), nr_vcpus))); 222 - pgd_sz = kvm_pgtable_stage2_pgd_size(host_kvm->arch.vtcr); 223 - 224 - free_pages_exact(host_kvm->arch.pkvm.hyp_donations.vm, hyp_vm_sz); 225 - free_pages_exact(host_kvm->arch.pkvm.hyp_donations.pgd, pgd_sz); 209 + host_kvm->arch.pkvm.handle = 0; 210 + free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc); 226 211 } 227 212 228 213 int pkvm_init_host_vm(struct kvm *host_kvm)