Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: s390: Refactor kvm_is_error_gpa() into kvm_is_gpa_in_memslot()

Rename kvm_is_error_gpa() to kvm_is_gpa_in_memslot() and invert the
polarity accordingly in order to (a) free up kvm_is_error_gpa() to match
with kvm_is_error_{hva,page}(), and (b) to make it more obvious that the
helper is doing a memslot lookup, i.e. not simply checking for INVALID_GPA.

No functional change intended.

Link: https://lore.kernel.org/r/20240215152916.1158-9-paul@xen.org
Signed-off-by: Sean Christopherson <seanjc@google.com>

+15 -15
+1 -1
arch/s390/kvm/diag.c
··· 102 102 parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL) 103 103 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 104 104 105 - if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr)) 105 + if (!kvm_is_gpa_in_memslot(vcpu->kvm, parm.token_addr)) 106 106 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 107 107 108 108 vcpu->arch.pfault_token = parm.token_addr;
+7 -7
arch/s390/kvm/gaccess.c
··· 664 664 case ASCE_TYPE_REGION1: { 665 665 union region1_table_entry rfte; 666 666 667 - if (kvm_is_error_gpa(vcpu->kvm, ptr)) 667 + if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr)) 668 668 return PGM_ADDRESSING; 669 669 if (deref_table(vcpu->kvm, ptr, &rfte.val)) 670 670 return -EFAULT; ··· 682 682 case ASCE_TYPE_REGION2: { 683 683 union region2_table_entry rste; 684 684 685 - if (kvm_is_error_gpa(vcpu->kvm, ptr)) 685 + if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr)) 686 686 return PGM_ADDRESSING; 687 687 if (deref_table(vcpu->kvm, ptr, &rste.val)) 688 688 return -EFAULT; ··· 700 700 case ASCE_TYPE_REGION3: { 701 701 union region3_table_entry rtte; 702 702 703 - if (kvm_is_error_gpa(vcpu->kvm, ptr)) 703 + if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr)) 704 704 return PGM_ADDRESSING; 705 705 if (deref_table(vcpu->kvm, ptr, &rtte.val)) 706 706 return -EFAULT; ··· 728 728 case ASCE_TYPE_SEGMENT: { 729 729 union segment_table_entry ste; 730 730 731 - if (kvm_is_error_gpa(vcpu->kvm, ptr)) 731 + if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr)) 732 732 return PGM_ADDRESSING; 733 733 if (deref_table(vcpu->kvm, ptr, &ste.val)) 734 734 return -EFAULT; ··· 748 748 ptr = ste.fc0.pto * (PAGE_SIZE / 2) + vaddr.px * 8; 749 749 } 750 750 } 751 - if (kvm_is_error_gpa(vcpu->kvm, ptr)) 751 + if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr)) 752 752 return PGM_ADDRESSING; 753 753 if (deref_table(vcpu->kvm, ptr, &pte.val)) 754 754 return -EFAULT; ··· 770 770 *prot = PROT_TYPE_IEP; 771 771 return PGM_PROTECTION; 772 772 } 773 - if (kvm_is_error_gpa(vcpu->kvm, raddr.addr)) 773 + if (!kvm_is_gpa_in_memslot(vcpu->kvm, raddr.addr)) 774 774 return PGM_ADDRESSING; 775 775 *gpa = raddr.addr; 776 776 return 0; ··· 957 957 return rc; 958 958 } else { 959 959 gpa = kvm_s390_real_to_abs(vcpu, ga); 960 - if (kvm_is_error_gpa(vcpu->kvm, gpa)) { 960 + if (!kvm_is_gpa_in_memslot(vcpu->kvm, gpa)) { 961 961 rc = PGM_ADDRESSING; 962 962 prot = PROT_NONE; 963 963 }
+2 -2
arch/s390/kvm/kvm-s390.c
··· 2878 2878 2879 2879 srcu_idx = srcu_read_lock(&kvm->srcu); 2880 2880 2881 - if (kvm_is_error_gpa(kvm, mop->gaddr)) { 2881 + if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) { 2882 2882 r = PGM_ADDRESSING; 2883 2883 goto out_unlock; 2884 2884 } ··· 2940 2940 2941 2941 srcu_idx = srcu_read_lock(&kvm->srcu); 2942 2942 2943 - if (kvm_is_error_gpa(kvm, mop->gaddr)) { 2943 + if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) { 2944 2944 r = PGM_ADDRESSING; 2945 2945 goto out_unlock; 2946 2946 }
+2 -2
arch/s390/kvm/priv.c
··· 149 149 * first page, since address is 8k aligned and memory pieces are always 150 150 * at least 1MB aligned and have at least a size of 1MB. 151 151 */ 152 - if (kvm_is_error_gpa(vcpu->kvm, address)) 152 + if (!kvm_is_gpa_in_memslot(vcpu->kvm, address)) 153 153 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 154 154 155 155 kvm_s390_set_prefix(vcpu, address); ··· 464 464 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 465 465 addr = kvm_s390_real_to_abs(vcpu, addr); 466 466 467 - if (kvm_is_error_gpa(vcpu->kvm, addr)) 467 + if (!kvm_is_gpa_in_memslot(vcpu->kvm, addr)) 468 468 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 469 469 /* 470 470 * We don't expect errors on modern systems, and do not care
+1 -1
arch/s390/kvm/sigp.c
··· 172 172 * first page, since address is 8k aligned and memory pieces are always 173 173 * at least 1MB aligned and have at least a size of 1MB. 174 174 */ 175 - if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) { 175 + if (!kvm_is_gpa_in_memslot(vcpu->kvm, irq.u.prefix.address)) { 176 176 *reg &= 0xffffffff00000000UL; 177 177 *reg |= SIGP_STATUS_INVALID_PARAMETER; 178 178 return SIGP_CC_STATUS_STORED;
+2 -2
include/linux/kvm_host.h
··· 1779 1779 return (hpa_t)pfn << PAGE_SHIFT; 1780 1780 } 1781 1781 1782 - static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) 1782 + static inline bool kvm_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa) 1783 1783 { 1784 1784 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); 1785 1785 1786 - return kvm_is_error_hva(hva); 1786 + return !kvm_is_error_hva(hva); 1787 1787 } 1788 1788 1789 1789 static inline void kvm_gpc_mark_dirty_in_slot(struct gfn_to_pfn_cache *gpc)