Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: s390: Simplify and move pv code

All functions in kvm/gmap.c fit better in kvm/pv.c instead.
Move and rename them appropriately, then delete the now empty
kvm/gmap.c and kvm/gmap.h.

Reviewed-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
Reviewed-by: Steffen Eiden <seiden@linux.ibm.com>
Reviewed-by: Christoph Schlameuss <schlameuss@linux.ibm.com>
Acked-by: Janosch Frank <frankja@linux.ibm.com>
Link: https://lore.kernel.org/r/20250528095502.226213-5-imbrenda@linux.ibm.com
Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Message-ID: <20250528095502.226213-5-imbrenda@linux.ibm.com>

+133 -182
+6 -6
arch/s390/kernel/uv.c
··· 136 136 { 137 137 int rc; 138 138 139 - /* See gmap_make_secure(): large folios cannot be secure */ 139 + /* Large folios cannot be secure */ 140 140 if (unlikely(folio_test_large(folio))) 141 141 return 0; 142 142 ··· 185 185 { 186 186 int rc; 187 187 188 - /* See gmap_make_secure(): large folios cannot be secure */ 188 + /* Large folios cannot be secure */ 189 189 if (unlikely(folio_test_large(folio))) 190 190 return 0; 191 191 ··· 462 462 463 463 /* 464 464 * To be called with the folio locked or with an extra reference! This will 465 - * prevent gmap_make_secure from touching the folio concurrently. Having 2 466 - * parallel arch_make_folio_accessible is fine, as the UV calls will become a 467 - * no-op if the folio is already exported. 465 + * prevent kvm_s390_pv_make_secure() from touching the folio concurrently. 466 + * Having 2 parallel arch_make_folio_accessible is fine, as the UV calls will 467 + * become a no-op if the folio is already exported. 468 468 */ 469 469 int arch_make_folio_accessible(struct folio *folio) 470 470 { 471 471 int rc = 0; 472 472 473 - /* See gmap_make_secure(): large folios cannot be secure */ 473 + /* Large folios cannot be secure */ 474 474 if (unlikely(folio_test_large(folio))) 475 475 return 0; 476 476
+1 -1
arch/s390/kvm/Makefile
··· 8 8 ccflags-y := -Ivirt/kvm -Iarch/s390/kvm 9 9 10 10 kvm-y += kvm-s390.o intercept.o interrupt.o priv.o sigp.o 11 - kvm-y += diag.o gaccess.o guestdbg.o vsie.o pv.o gmap.o gmap-vsie.o 11 + kvm-y += diag.o gaccess.o guestdbg.o vsie.o pv.o gmap-vsie.o 12 12 13 13 kvm-$(CONFIG_VFIO_PCI_ZDEV_KVM) += pci.o 14 14 obj-$(CONFIG_KVM) += kvm.o
+2 -1
arch/s390/kvm/gaccess.c
··· 16 16 #include <asm/gmap.h> 17 17 #include <asm/dat-bits.h> 18 18 #include "kvm-s390.h" 19 - #include "gmap.h" 20 19 #include "gaccess.h" 20 + 21 + #define GMAP_SHADOW_FAKE_TABLE 1ULL 21 22 22 23 /* 23 24 * vaddress union in order to easily decode a virtual address into its
-1
arch/s390/kvm/gmap-vsie.c
··· 22 22 #include <asm/uv.h> 23 23 24 24 #include "kvm-s390.h" 25 - #include "gmap.h" 26 25 27 26 /** 28 27 * gmap_find_shadow - find a specific asce in the list of shadow tables
-121
arch/s390/kvm/gmap.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* 3 - * Guest memory management for KVM/s390 4 - * 5 - * Copyright IBM Corp. 2008, 2020, 2024 6 - * 7 - * Author(s): Claudio Imbrenda <imbrenda@linux.ibm.com> 8 - * Martin Schwidefsky <schwidefsky@de.ibm.com> 9 - * David Hildenbrand <david@redhat.com> 10 - * Janosch Frank <frankja@linux.vnet.ibm.com> 11 - */ 12 - 13 - #include <linux/compiler.h> 14 - #include <linux/kvm.h> 15 - #include <linux/kvm_host.h> 16 - #include <linux/pgtable.h> 17 - #include <linux/pagemap.h> 18 - 19 - #include <asm/lowcore.h> 20 - #include <asm/gmap.h> 21 - #include <asm/uv.h> 22 - 23 - #include "gmap.h" 24 - 25 - /** 26 - * gmap_make_secure() - make one guest page secure 27 - * @gmap: the guest gmap 28 - * @gaddr: the guest address that needs to be made secure 29 - * @uvcb: the UVCB specifying which operation needs to be performed 30 - * 31 - * Context: needs to be called with kvm->srcu held. 32 - * Return: 0 on success, < 0 in case of error. 33 - */ 34 - int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb) 35 - { 36 - struct kvm *kvm = gmap->private; 37 - unsigned long vmaddr; 38 - 39 - lockdep_assert_held(&kvm->srcu); 40 - 41 - vmaddr = gfn_to_hva(kvm, gpa_to_gfn(gaddr)); 42 - if (kvm_is_error_hva(vmaddr)) 43 - return -EFAULT; 44 - return make_hva_secure(gmap->mm, vmaddr, uvcb); 45 - } 46 - 47 - int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr) 48 - { 49 - struct uv_cb_cts uvcb = { 50 - .header.cmd = UVC_CMD_CONV_TO_SEC_STOR, 51 - .header.len = sizeof(uvcb), 52 - .guest_handle = gmap->guest_handle, 53 - .gaddr = gaddr, 54 - }; 55 - 56 - return gmap_make_secure(gmap, gaddr, &uvcb); 57 - } 58 - 59 - /** 60 - * __gmap_destroy_page() - Destroy a guest page. 61 - * @gmap: the gmap of the guest 62 - * @page: the page to destroy 63 - * 64 - * An attempt will be made to destroy the given guest page. If the attempt 65 - * fails, an attempt is made to export the page. If both attempts fail, an 66 - * appropriate error is returned. 67 - * 68 - * Context: must be called holding the mm lock for gmap->mm 69 - */ 70 - static int __gmap_destroy_page(struct gmap *gmap, struct page *page) 71 - { 72 - struct folio *folio = page_folio(page); 73 - int rc; 74 - 75 - /* 76 - * See gmap_make_secure(): large folios cannot be secure. Small 77 - * folio implies FW_LEVEL_PTE. 78 - */ 79 - if (folio_test_large(folio)) 80 - return -EFAULT; 81 - 82 - rc = uv_destroy_folio(folio); 83 - /* 84 - * Fault handlers can race; it is possible that two CPUs will fault 85 - * on the same secure page. One CPU can destroy the page, reboot, 86 - * re-enter secure mode and import it, while the second CPU was 87 - * stuck at the beginning of the handler. At some point the second 88 - * CPU will be able to progress, and it will not be able to destroy 89 - * the page. In that case we do not want to terminate the process, 90 - * we instead try to export the page. 91 - */ 92 - if (rc) 93 - rc = uv_convert_from_secure_folio(folio); 94 - 95 - return rc; 96 - } 97 - 98 - /** 99 - * gmap_destroy_page() - Destroy a guest page. 100 - * @gmap: the gmap of the guest 101 - * @gaddr: the guest address to destroy 102 - * 103 - * An attempt will be made to destroy the given guest page. If the attempt 104 - * fails, an attempt is made to export the page. If both attempts fail, an 105 - * appropriate error is returned. 106 - * 107 - * Context: may sleep. 108 - */ 109 - int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr) 110 - { 111 - struct page *page; 112 - int rc = 0; 113 - 114 - mmap_read_lock(gmap->mm); 115 - page = gfn_to_page(gmap->private, gpa_to_gfn(gaddr)); 116 - if (page) 117 - rc = __gmap_destroy_page(gmap, page); 118 - kvm_release_page_clean(page); 119 - mmap_read_unlock(gmap->mm); 120 - return rc; 121 - }
-39
arch/s390/kvm/gmap.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * KVM guest address space mapping code 4 - * 5 - * Copyright IBM Corp. 2007, 2016, 2025 6 - * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 7 - * Claudio Imbrenda <imbrenda@linux.ibm.com> 8 - */ 9 - 10 - #ifndef ARCH_KVM_S390_GMAP_H 11 - #define ARCH_KVM_S390_GMAP_H 12 - 13 - #define GMAP_SHADOW_FAKE_TABLE 1ULL 14 - 15 - int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb); 16 - int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr); 17 - int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr); 18 - struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, int edat_level); 19 - 20 - /** 21 - * gmap_shadow_valid - check if a shadow guest address space matches the 22 - * given properties and is still valid 23 - * @sg: pointer to the shadow guest address space structure 24 - * @asce: ASCE for which the shadow table is requested 25 - * @edat_level: edat level to be used for the shadow translation 26 - * 27 - * Returns 1 if the gmap shadow is still valid and matches the given 28 - * properties, the caller can continue using it. Returns 0 otherwise, the 29 - * caller has to request a new shadow gmap in this case. 30 - * 31 - */ 32 - static inline int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level) 33 - { 34 - if (sg->removed) 35 - return 0; 36 - return sg->orig_asce == asce && sg->edat_level == edat_level; 37 - } 38 - 39 - #endif
+3 -7
arch/s390/kvm/intercept.c
··· 16 16 #include <asm/irq.h> 17 17 #include <asm/sysinfo.h> 18 18 #include <asm/uv.h> 19 - #include <asm/gmap.h> 20 19 21 20 #include "kvm-s390.h" 22 21 #include "gaccess.h" 23 22 #include "trace.h" 24 23 #include "trace-s390.h" 25 - #include "gmap.h" 26 24 27 25 u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu) 28 26 { ··· 544 546 guest_uvcb->header.cmd); 545 547 return 0; 546 548 } 547 - rc = gmap_make_secure(vcpu->arch.gmap, uvcb.gaddr, &uvcb); 549 + rc = kvm_s390_pv_make_secure(vcpu->kvm, uvcb.gaddr, &uvcb); 548 550 /* 549 551 * If the unpin did not succeed, the guest will exit again for the UVC 550 552 * and we will retry the unpin. ··· 652 654 break; 653 655 case ICPT_PV_PREF: 654 656 rc = 0; 655 - gmap_convert_to_secure(vcpu->arch.gmap, 656 - kvm_s390_get_prefix(vcpu)); 657 - gmap_convert_to_secure(vcpu->arch.gmap, 658 - kvm_s390_get_prefix(vcpu) + PAGE_SIZE); 657 + kvm_s390_pv_convert_to_secure(vcpu->kvm, kvm_s390_get_prefix(vcpu)); 658 + kvm_s390_pv_convert_to_secure(vcpu->kvm, kvm_s390_get_prefix(vcpu) + PAGE_SIZE); 659 659 break; 660 660 default: 661 661 return -EOPNOTSUPP;
+2 -3
arch/s390/kvm/kvm-s390.c
··· 53 53 #include "kvm-s390.h" 54 54 #include "gaccess.h" 55 55 #include "pci.h" 56 - #include "gmap.h" 57 56 58 57 #define CREATE_TRACE_POINTS 59 58 #include "trace.h" ··· 4975 4976 * previous protected guest. The old pages need to be destroyed 4976 4977 * so the new guest can use them. 4977 4978 */ 4978 - if (gmap_destroy_page(vcpu->arch.gmap, gaddr)) { 4979 + if (kvm_s390_pv_destroy_page(vcpu->kvm, gaddr)) { 4979 4980 /* 4980 4981 * Either KVM messed up the secure guest mapping or the 4981 4982 * same page is mapped into multiple secure guests. ··· 4997 4998 * guest has not been imported yet. Try to import the page into 4998 4999 * the protected guest. 4999 5000 */ 5000 - rc = gmap_convert_to_secure(vcpu->arch.gmap, gaddr); 5001 + rc = kvm_s390_pv_convert_to_secure(vcpu->kvm, gaddr); 5001 5002 if (rc == -EINVAL) 5002 5003 send_sig(SIGSEGV, current, 0); 5003 5004 if (rc != -ENXIO)
+42
arch/s390/kvm/kvm-s390.h
··· 308 308 u64 *gaddr, u64 buff_user_len, u16 *rc, u16 *rrc); 309 309 int kvm_s390_pv_dump_complete(struct kvm *kvm, void __user *buff_user, 310 310 u16 *rc, u16 *rrc); 311 + int kvm_s390_pv_destroy_page(struct kvm *kvm, unsigned long gaddr); 312 + int kvm_s390_pv_convert_to_secure(struct kvm *kvm, unsigned long gaddr); 313 + int kvm_s390_pv_make_secure(struct kvm *kvm, unsigned long gaddr, void *uvcb); 311 314 312 315 static inline u64 kvm_s390_pv_get_handle(struct kvm *kvm) 313 316 { ··· 320 317 static inline u64 kvm_s390_pv_cpu_get_handle(struct kvm_vcpu *vcpu) 321 318 { 322 319 return vcpu->arch.pv.handle; 320 + } 321 + 322 + /** 323 + * __kvm_s390_pv_destroy_page() - Destroy a guest page. 324 + * @page: the page to destroy 325 + * 326 + * An attempt will be made to destroy the given guest page. If the attempt 327 + * fails, an attempt is made to export the page. If both attempts fail, an 328 + * appropriate error is returned. 329 + * 330 + * Context: must be called holding the mm lock for gmap->mm 331 + */ 332 + static inline int __kvm_s390_pv_destroy_page(struct page *page) 333 + { 334 + struct folio *folio = page_folio(page); 335 + int rc; 336 + 337 + /* Large folios cannot be secure. Small folio implies FW_LEVEL_PTE. */ 338 + if (folio_test_large(folio)) 339 + return -EFAULT; 340 + 341 + rc = uv_destroy_folio(folio); 342 + /* 343 + * Fault handlers can race; it is possible that two CPUs will fault 344 + * on the same secure page. One CPU can destroy the page, reboot, 345 + * re-enter secure mode and import it, while the second CPU was 346 + * stuck at the beginning of the handler. At some point the second 347 + * CPU will be able to progress, and it will not be able to destroy 348 + * the page. In that case we do not want to terminate the process, 349 + * we instead try to export the page. 350 + */ 351 + if (rc) 352 + rc = uv_convert_from_secure_folio(folio); 353 + 354 + return rc; 323 355 } 324 356 325 357 /* implemented in interrupt.c */ ··· 436 398 unsigned long end); 437 399 void kvm_s390_vsie_init(struct kvm *kvm); 438 400 void kvm_s390_vsie_destroy(struct kvm *kvm); 401 + int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level); 402 + 403 + /* implemented in gmap-vsie.c */ 404 + struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, int edat_level); 439 405 440 406 /* implemented in sigp.c */ 441 407 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
+59 -2
arch/s390/kvm/pv.c
··· 17 17 #include <linux/sched/mm.h> 18 18 #include <linux/mmu_notifier.h> 19 19 #include "kvm-s390.h" 20 - #include "gmap.h" 21 20 22 21 bool kvm_s390_pv_is_protected(struct kvm *kvm) 23 22 { ··· 31 32 return !!kvm_s390_pv_cpu_get_handle(vcpu); 32 33 } 33 34 EXPORT_SYMBOL_GPL(kvm_s390_pv_cpu_is_protected); 35 + 36 + /** 37 + * kvm_s390_pv_make_secure() - make one guest page secure 38 + * @kvm: the guest 39 + * @gaddr: the guest address that needs to be made secure 40 + * @uvcb: the UVCB specifying which operation needs to be performed 41 + * 42 + * Context: needs to be called with kvm->srcu held. 43 + * Return: 0 on success, < 0 in case of error. 44 + */ 45 + int kvm_s390_pv_make_secure(struct kvm *kvm, unsigned long gaddr, void *uvcb) 46 + { 47 + unsigned long vmaddr; 48 + 49 + lockdep_assert_held(&kvm->srcu); 50 + 51 + vmaddr = gfn_to_hva(kvm, gpa_to_gfn(gaddr)); 52 + if (kvm_is_error_hva(vmaddr)) 53 + return -EFAULT; 54 + return make_hva_secure(kvm->mm, vmaddr, uvcb); 55 + } 56 + 57 + int kvm_s390_pv_convert_to_secure(struct kvm *kvm, unsigned long gaddr) 58 + { 59 + struct uv_cb_cts uvcb = { 60 + .header.cmd = UVC_CMD_CONV_TO_SEC_STOR, 61 + .header.len = sizeof(uvcb), 62 + .guest_handle = kvm_s390_pv_get_handle(kvm), 63 + .gaddr = gaddr, 64 + }; 65 + 66 + return kvm_s390_pv_make_secure(kvm, gaddr, &uvcb); 67 + } 68 + 69 + /** 70 + * kvm_s390_pv_destroy_page() - Destroy a guest page. 71 + * @kvm: the guest 72 + * @gaddr: the guest address to destroy 73 + * 74 + * An attempt will be made to destroy the given guest page. If the attempt 75 + * fails, an attempt is made to export the page. If both attempts fail, an 76 + * appropriate error is returned. 77 + * 78 + * Context: may sleep. 79 + */ 80 + int kvm_s390_pv_destroy_page(struct kvm *kvm, unsigned long gaddr) 81 + { 82 + struct page *page; 83 + int rc = 0; 84 + 85 + mmap_read_lock(kvm->mm); 86 + page = gfn_to_page(kvm, gpa_to_gfn(gaddr)); 87 + if (page) 88 + rc = __kvm_s390_pv_destroy_page(page); 89 + kvm_release_page_clean(page); 90 + mmap_read_unlock(kvm->mm); 91 + return rc; 92 + } 34 93 35 94 /** 36 95 * struct pv_vm_to_be_destroyed - Represents a protected VM that needs to ··· 695 638 .tweak[0] = tweak, 696 639 .tweak[1] = offset, 697 640 }; 698 - int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb); 641 + int ret = kvm_s390_pv_make_secure(kvm, addr, &uvcb); 699 642 unsigned long vmaddr; 700 643 bool unlocked; 701 644
+18 -1
arch/s390/kvm/vsie.c
··· 23 23 #include <asm/facility.h> 24 24 #include "kvm-s390.h" 25 25 #include "gaccess.h" 26 - #include "gmap.h" 27 26 28 27 enum vsie_page_flags { 29 28 VSIE_PAGE_IN_USE = 0, ··· 66 67 struct kvm_s390_crypto_cb crycb; /* 0x0700 */ 67 68 __u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE]; /* 0x0800 */ 68 69 }; 70 + 71 + /** 72 + * gmap_shadow_valid() - check if a shadow guest address space matches the 73 + * given properties and is still valid 74 + * @sg: pointer to the shadow guest address space structure 75 + * @asce: ASCE for which the shadow table is requested 76 + * @edat_level: edat level to be used for the shadow translation 77 + * 78 + * Returns 1 if the gmap shadow is still valid and matches the given 79 + * properties, the caller can continue using it. Returns 0 otherwise; the 80 + * caller has to request a new shadow gmap in this case. 81 + */ 82 + int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level) 83 + { 84 + if (sg->removed) 85 + return 0; 86 + return sg->orig_asce == asce && sg->edat_level == edat_level; 87 + } 69 88 70 89 /* trigger a validity icpt for the given scb */ 71 90 static int set_validity_icpt(struct kvm_s390_sie_block *scb,