Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'kvm-s390-next-5.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390: Fixes and Features for 5.16

- SIGP Fixes
- initial preparations for lazy destroy of secure VMs
- storage key improvements/fixes
- Log the guest CPNC

+200 -77
+6 -3
arch/s390/include/asm/pgtable.h
··· 1074 1074 pte_t res; 1075 1075 1076 1076 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); 1077 + /* At this point the reference through the mapping is still present */ 1077 1078 if (mm_is_protected(mm) && pte_present(res)) 1078 - uv_convert_from_secure(pte_val(res) & PAGE_MASK); 1079 + uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK); 1079 1080 return res; 1080 1081 } 1081 1082 ··· 1092 1091 pte_t res; 1093 1092 1094 1093 res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID)); 1094 + /* At this point the reference through the mapping is still present */ 1095 1095 if (mm_is_protected(vma->vm_mm) && pte_present(res)) 1096 - uv_convert_from_secure(pte_val(res) & PAGE_MASK); 1096 + uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK); 1097 1097 return res; 1098 1098 } 1099 1099 ··· 1118 1116 } else { 1119 1117 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); 1120 1118 } 1119 + /* At this point the reference through the mapping is still present */ 1121 1120 if (mm_is_protected(mm) && pte_present(res)) 1122 - uv_convert_from_secure(pte_val(res) & PAGE_MASK); 1121 + uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK); 1123 1122 return res; 1124 1123 } 1125 1124
+13 -2
arch/s390/include/asm/uv.h
··· 18 18 #include <asm/page.h> 19 19 #include <asm/gmap.h> 20 20 21 + #define UVC_CC_OK 0 22 + #define UVC_CC_ERROR 1 23 + #define UVC_CC_BUSY 2 24 + #define UVC_CC_PARTIAL 3 25 + 21 26 #define UVC_RC_EXECUTED 0x0001 22 27 #define UVC_RC_INV_CMD 0x0002 23 28 #define UVC_RC_INV_STATE 0x0003 ··· 356 351 } 357 352 358 353 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb); 359 - int uv_destroy_page(unsigned long paddr); 354 + int uv_destroy_owned_page(unsigned long paddr); 360 355 int uv_convert_from_secure(unsigned long paddr); 356 + int uv_convert_owned_from_secure(unsigned long paddr); 361 357 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr); 362 358 363 359 void setup_uv(void); ··· 366 360 #define is_prot_virt_host() 0 367 361 static inline void setup_uv(void) {} 368 362 369 - static inline int uv_destroy_page(unsigned long paddr) 363 + static inline int uv_destroy_owned_page(unsigned long paddr) 370 364 { 371 365 return 0; 372 366 } 373 367 374 368 static inline int uv_convert_from_secure(unsigned long paddr) 369 + { 370 + return 0; 371 + } 372 + 373 + static inline int uv_convert_owned_from_secure(unsigned long paddr) 375 374 { 376 375 return 0; 377 376 }
+57 -8
arch/s390/kernel/uv.c
··· 100 100 * 101 101 * @paddr: Absolute host address of page to be destroyed 102 102 */ 103 - int uv_destroy_page(unsigned long paddr) 103 + static int uv_destroy_page(unsigned long paddr) 104 104 { 105 105 struct uv_cb_cfs uvcb = { 106 106 .header.cmd = UVC_CMD_DESTR_SEC_STOR, ··· 121 121 } 122 122 123 123 /* 124 + * The caller must already hold a reference to the page 125 + */ 126 + int uv_destroy_owned_page(unsigned long paddr) 127 + { 128 + struct page *page = phys_to_page(paddr); 129 + int rc; 130 + 131 + get_page(page); 132 + rc = uv_destroy_page(paddr); 133 + if (!rc) 134 + clear_bit(PG_arch_1, &page->flags); 135 + put_page(page); 136 + return rc; 137 + } 138 + 139 + /* 124 140 * Requests the Ultravisor to encrypt a guest page and make it 125 141 * accessible to the host for paging (export). 126 142 * ··· 153 137 if (uv_call(0, (u64)&uvcb)) 154 138 return -EINVAL; 155 139 return 0; 140 + } 141 + 142 + /* 143 + * The caller must already hold a reference to the page 144 + */ 145 + int uv_convert_owned_from_secure(unsigned long paddr) 146 + { 147 + struct page *page = phys_to_page(paddr); 148 + int rc; 149 + 150 + get_page(page); 151 + rc = uv_convert_from_secure(paddr); 152 + if (!rc) 153 + clear_bit(PG_arch_1, &page->flags); 154 + put_page(page); 155 + return rc; 156 156 } 157 157 158 158 /* ··· 197 165 { 198 166 pte_t entry = READ_ONCE(*ptep); 199 167 struct page *page; 200 - int expected, rc = 0; 168 + int expected, cc = 0; 201 169 202 170 if (!pte_present(entry)) 203 171 return -ENXIO; ··· 213 181 if (!page_ref_freeze(page, expected)) 214 182 return -EBUSY; 215 183 set_bit(PG_arch_1, &page->flags); 216 - rc = uv_call(0, (u64)uvcb); 184 + /* 185 + * If the UVC does not succeed or fail immediately, we don't want to 186 + * loop for long, or we might get stall notifications. 187 + * On the other hand, this is a complex scenario and we are holding a lot of 188 + * locks, so we can't easily sleep and reschedule. We try only once, 189 + * and if the UVC returned busy or partial completion, we return 190 + * -EAGAIN and we let the callers deal with it. 191 + */ 192 + cc = __uv_call(0, (u64)uvcb); 217 193 page_ref_unfreeze(page, expected); 218 - /* Return -ENXIO if the page was not mapped, -EINVAL otherwise */ 219 - if (rc) 220 - rc = uvcb->rc == 0x10a ? -ENXIO : -EINVAL; 221 - return rc; 194 + /* 195 + * Return -ENXIO if the page was not mapped, -EINVAL for other errors. 196 + * If busy or partially completed, return -EAGAIN. 197 + */ 198 + if (cc == UVC_CC_OK) 199 + return 0; 200 + else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL) 201 + return -EAGAIN; 202 + return uvcb->rc == 0x10a ? -ENXIO : -EINVAL; 222 203 } 223 204 224 205 /* ··· 257 212 uaddr = __gmap_translate(gmap, gaddr); 258 213 if (IS_ERR_VALUE(uaddr)) 259 214 goto out; 260 - vma = find_vma(gmap->mm, uaddr); 215 + vma = vma_lookup(gmap->mm, uaddr); 261 216 if (!vma) 262 217 goto out; 263 218 /* ··· 284 239 mmap_read_unlock(gmap->mm); 285 240 286 241 if (rc == -EAGAIN) { 242 + /* 243 + * If we are here because the UVC returned busy or partial 244 + * completion, this is just a useless check, but it is safe. 245 + */ 287 246 wait_on_page_writeback(page); 288 247 } else if (rc == -EBUSY) { 289 248 /*
+5
arch/s390/kvm/intercept.c
··· 518 518 */ 519 519 if (rc == -EINVAL) 520 520 return 0; 521 + /* 522 + * If we got -EAGAIN here, we simply return it. It will eventually 523 + * get propagated all the way to userspace, which should then try 524 + * again. 525 + */ 521 526 return rc; 522 527 } 523 528
+3 -2
arch/s390/kvm/interrupt.c
··· 3053 3053 int vcpu_idx, online_vcpus = atomic_read(&kvm->online_vcpus); 3054 3054 struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; 3055 3055 struct kvm_vcpu *vcpu; 3056 + u8 vcpu_isc_mask; 3056 3057 3057 3058 for_each_set_bit(vcpu_idx, kvm->arch.idle_mask, online_vcpus) { 3058 3059 vcpu = kvm_get_vcpu(kvm, vcpu_idx); 3059 3060 if (psw_ioint_disabled(vcpu)) 3060 3061 continue; 3061 - deliverable_mask &= (u8)(vcpu->arch.sie_block->gcr[6] >> 24); 3062 - if (deliverable_mask) { 3062 + vcpu_isc_mask = (u8)(vcpu->arch.sie_block->gcr[6] >> 24); 3063 + if (deliverable_mask & vcpu_isc_mask) { 3063 3064 /* lately kicked but not yet running */ 3064 3065 if (test_and_set_bit(vcpu_idx, gi->kicked_mask)) 3065 3066 return;
+5 -3
arch/s390/kvm/kvm-s390.c
··· 2487 2487 case KVM_S390_PV_COMMAND: { 2488 2488 struct kvm_pv_cmd args; 2489 2489 2490 - /* protvirt means user sigp */ 2491 - kvm->arch.user_cpu_state_ctrl = 1; 2490 + /* protvirt means user cpu state */ 2491 + kvm_s390_set_user_cpu_state_ctrl(kvm); 2492 2492 r = 0; 2493 2493 if (!is_prot_virt_host()) { 2494 2494 r = -EINVAL; ··· 3363 3363 3364 3364 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 3365 3365 { 3366 + clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); 3366 3367 return kvm_s390_vcpu_has_irq(vcpu, 0); 3367 3368 } 3368 3369 ··· 3802 3801 vcpu_load(vcpu); 3803 3802 3804 3803 /* user space knows about this interface - let it control the state */ 3805 - vcpu->kvm->arch.user_cpu_state_ctrl = 1; 3804 + kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm); 3806 3805 3807 3806 switch (mp_state->mp_state) { 3808 3807 case KVM_MP_STATE_STOPPED: ··· 4255 4254 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) { 4256 4255 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318; 4257 4256 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc; 4257 + VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc); 4258 4258 } 4259 4259 /* 4260 4260 * If userspace sets the riccb (e.g. after migration) to a valid state,
+9
arch/s390/kvm/kvm-s390.h
··· 208 208 return kvm->arch.user_cpu_state_ctrl != 0; 209 209 } 210 210 211 + static inline void kvm_s390_set_user_cpu_state_ctrl(struct kvm *kvm) 212 + { 213 + if (kvm->arch.user_cpu_state_ctrl) 214 + return; 215 + 216 + VM_EVENT(kvm, 3, "%s", "ENABLE: Userspace CPU state control"); 217 + kvm->arch.user_cpu_state_ctrl = 1; 218 + } 219 + 211 220 /* implemented in pv.c */ 212 221 int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc); 213 222 int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
+2
arch/s390/kvm/priv.c
··· 397 397 mmap_read_unlock(current->mm); 398 398 if (rc == -EFAULT) 399 399 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 400 + if (rc == -EAGAIN) 401 + continue; 400 402 if (rc < 0) 401 403 return rc; 402 404 start += PAGE_SIZE;
+10 -11
arch/s390/kvm/pv.c
··· 16 16 17 17 int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc) 18 18 { 19 - int cc = 0; 19 + int cc; 20 20 21 - if (kvm_s390_pv_cpu_get_handle(vcpu)) { 22 - cc = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), 23 - UVC_CMD_DESTROY_SEC_CPU, rc, rrc); 21 + if (!kvm_s390_pv_cpu_get_handle(vcpu)) 22 + return 0; 24 23 25 - KVM_UV_EVENT(vcpu->kvm, 3, 26 - "PROTVIRT DESTROY VCPU %d: rc %x rrc %x", 27 - vcpu->vcpu_id, *rc, *rrc); 28 - WARN_ONCE(cc, "protvirt destroy cpu failed rc %x rrc %x", 29 - *rc, *rrc); 30 - } 24 + cc = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), UVC_CMD_DESTROY_SEC_CPU, rc, rrc); 25 + 26 + KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT DESTROY VCPU %d: rc %x rrc %x", 27 + vcpu->vcpu_id, *rc, *rrc); 28 + WARN_ONCE(cc, "protvirt destroy cpu failed rc %x rrc %x", *rc, *rrc); 29 + 31 30 /* Intended memory leak for something that should never happen. */ 32 31 if (!cc) 33 32 free_pages(vcpu->arch.pv.stor_base, ··· 195 196 uvcb.conf_base_stor_origin = (u64)kvm->arch.pv.stor_base; 196 197 uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var; 197 198 198 - cc = uv_call(0, (u64)&uvcb); 199 + cc = uv_call_sched(0, (u64)&uvcb); 199 200 *rc = uvcb.header.rc; 200 201 *rrc = uvcb.header.rrc; 201 202 KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x",
+1 -13
arch/s390/kvm/sigp.c
··· 151 151 static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter, 152 152 u64 *status_reg) 153 153 { 154 - unsigned int i; 155 - struct kvm_vcpu *v; 156 - bool all_stopped = true; 157 - 158 - kvm_for_each_vcpu(i, v, vcpu->kvm) { 159 - if (v == vcpu) 160 - continue; 161 - if (!is_vcpu_stopped(v)) 162 - all_stopped = false; 163 - } 164 - 165 154 *status_reg &= 0xffffffff00000000UL; 166 155 167 156 /* Reject set arch order, with czam we're always in z/Arch mode. */ 168 - *status_reg |= (all_stopped ? SIGP_STATUS_INVALID_PARAMETER : 169 - SIGP_STATUS_INCORRECT_STATE); 157 + *status_reg |= SIGP_STATUS_INVALID_PARAMETER; 170 158 return SIGP_CC_STATUS_STORED; 171 159 } 172 160
+12 -3
arch/s390/mm/gmap.c
··· 672 672 */ 673 673 void __gmap_zap(struct gmap *gmap, unsigned long gaddr) 674 674 { 675 + struct vm_area_struct *vma; 675 676 unsigned long vmaddr; 676 677 spinlock_t *ptl; 677 678 pte_t *ptep; ··· 682 681 gaddr >> PMD_SHIFT); 683 682 if (vmaddr) { 684 683 vmaddr |= gaddr & ~PMD_MASK; 684 + 685 + vma = vma_lookup(gmap->mm, vmaddr); 686 + if (!vma || is_vm_hugetlb_page(vma)) 687 + return; 688 + 685 689 /* Get pointer to the page table entry */ 686 690 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl); 687 - if (likely(ptep)) 691 + if (likely(ptep)) { 688 692 ptep_zap_unused(gmap->mm, vmaddr, ptep, 0); 689 - pte_unmap_unlock(ptep, ptl); 693 + pte_unmap_unlock(ptep, ptl); 694 + } 690 695 } 691 696 } 692 697 EXPORT_SYMBOL_GPL(__gmap_zap); ··· 2684 2677 { 2685 2678 pte_t pte = READ_ONCE(*ptep); 2686 2679 2680 + /* There is a reference through the mapping */ 2687 2681 if (pte_present(pte)) 2688 - WARN_ON_ONCE(uv_destroy_page(pte_val(pte) & PAGE_MASK)); 2682 + WARN_ON_ONCE(uv_destroy_owned_page(pte_val(pte) & PAGE_MASK)); 2683 + 2689 2684 return 0; 2690 2685 } 2691 2686
+77 -32
arch/s390/mm/pgtable.c
··· 429 429 } 430 430 431 431 #ifdef CONFIG_PGSTE 432 - static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr) 432 + static int pmd_lookup(struct mm_struct *mm, unsigned long addr, pmd_t **pmdp) 433 433 { 434 + struct vm_area_struct *vma; 434 435 pgd_t *pgd; 435 436 p4d_t *p4d; 436 437 pud_t *pud; 437 - pmd_t *pmd; 438 + 439 + /* We need a valid VMA, otherwise this is clearly a fault. */ 440 + vma = vma_lookup(mm, addr); 441 + if (!vma) 442 + return -EFAULT; 438 443 439 444 pgd = pgd_offset(mm, addr); 440 - p4d = p4d_alloc(mm, pgd, addr); 441 - if (!p4d) 442 - return NULL; 443 - pud = pud_alloc(mm, p4d, addr); 444 - if (!pud) 445 - return NULL; 446 - pmd = pmd_alloc(mm, pud, addr); 447 - return pmd; 445 + if (!pgd_present(*pgd)) 446 + return -ENOENT; 447 + 448 + p4d = p4d_offset(pgd, addr); 449 + if (!p4d_present(*p4d)) 450 + return -ENOENT; 451 + 452 + pud = pud_offset(p4d, addr); 453 + if (!pud_present(*pud)) 454 + return -ENOENT; 455 + 456 + /* Large PUDs are not supported yet. */ 457 + if (pud_large(*pud)) 458 + return -EFAULT; 459 + 460 + *pmdp = pmd_offset(pud, addr); 461 + return 0; 448 462 } 449 463 #endif 450 464 ··· 792 778 pmd_t *pmdp; 793 779 pte_t *ptep; 794 780 795 - pmdp = pmd_alloc_map(mm, addr); 796 - if (unlikely(!pmdp)) 781 + /* 782 + * If we don't have a PTE table and if there is no huge page mapped, 783 + * we can ignore attempts to set the key to 0, because it already is 0. 784 + */ 785 + switch (pmd_lookup(mm, addr, &pmdp)) { 786 + case -ENOENT: 787 + return key ? -EFAULT : 0; 788 + case 0: 789 + break; 790 + default: 797 791 return -EFAULT; 792 + } 798 793 799 794 ptl = pmd_lock(mm, pmdp); 800 795 if (!pmd_present(*pmdp)) { 801 796 spin_unlock(ptl); 802 - return -EFAULT; 797 + return key ? -EFAULT : 0; 803 798 } 804 799 805 800 if (pmd_large(*pmdp)) { ··· 824 801 } 825 802 spin_unlock(ptl); 826 803 827 - ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl); 828 - if (unlikely(!ptep)) 829 - return -EFAULT; 830 - 804 + ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); 831 805 new = old = pgste_get_lock(ptep); 832 806 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT | 833 807 PGSTE_ACC_BITS | PGSTE_FP_BIT); ··· 901 881 pte_t *ptep; 902 882 int cc = 0; 903 883 904 - pmdp = pmd_alloc_map(mm, addr); 905 - if (unlikely(!pmdp)) 884 + /* 885 + * If we don't have a PTE table and if there is no huge page mapped, 886 + * the storage key is 0 and there is nothing for us to do. 887 + */ 888 + switch (pmd_lookup(mm, addr, &pmdp)) { 889 + case -ENOENT: 890 + return 0; 891 + case 0: 892 + break; 893 + default: 906 894 return -EFAULT; 895 + } 907 896 908 897 ptl = pmd_lock(mm, pmdp); 909 898 if (!pmd_present(*pmdp)) { 910 899 spin_unlock(ptl); 911 - return -EFAULT; 900 + return 0; 912 901 } 913 902 914 903 if (pmd_large(*pmdp)) { ··· 929 900 } 930 901 spin_unlock(ptl); 931 902 932 - ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl); 933 - if (unlikely(!ptep)) 934 - return -EFAULT; 935 - 903 + ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); 936 904 new = old = pgste_get_lock(ptep); 937 905 /* Reset guest reference bit only */ 938 906 pgste_val(new) &= ~PGSTE_GR_BIT; ··· 961 935 pmd_t *pmdp; 962 936 pte_t *ptep; 963 937 964 - pmdp = pmd_alloc_map(mm, addr); 965 - if (unlikely(!pmdp)) 938 + /* 939 + * If we don't have a PTE table and if there is no huge page mapped, 940 + * the storage key is 0. 941 + */ 942 + *key = 0; 943 + 944 + switch (pmd_lookup(mm, addr, &pmdp)) { 945 + case -ENOENT: 946 + return 0; 947 + case 0: 948 + break; 949 + default: 966 950 return -EFAULT; 951 + } 967 952 968 953 ptl = pmd_lock(mm, pmdp); 969 954 if (!pmd_present(*pmdp)) { 970 - /* Not yet mapped memory has a zero key */ 971 955 spin_unlock(ptl); 972 - *key = 0; 973 956 return 0; 974 957 } 975 958 ··· 991 956 } 992 957 spin_unlock(ptl); 993 958 994 - ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl); 995 - if (unlikely(!ptep)) 996 - return -EFAULT; 997 - 959 + ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); 998 960 pgste = pgste_get_lock(ptep); 999 961 *key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56; 1000 962 paddr = pte_val(*ptep) & PAGE_MASK; ··· 1020 988 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, 1021 989 unsigned long *oldpte, unsigned long *oldpgste) 1022 990 { 991 + struct vm_area_struct *vma; 1023 992 unsigned long pgstev; 1024 993 spinlock_t *ptl; 1025 994 pgste_t pgste; ··· 1030 997 WARN_ON_ONCE(orc > ESSA_MAX); 1031 998 if (unlikely(orc > ESSA_MAX)) 1032 999 return -EINVAL; 1000 + 1001 + vma = vma_lookup(mm, hva); 1002 + if (!vma || is_vm_hugetlb_page(vma)) 1003 + return -EFAULT; 1033 1004 ptep = get_locked_pte(mm, hva, &ptl); 1034 1005 if (unlikely(!ptep)) 1035 1006 return -EFAULT; ··· 1126 1089 int set_pgste_bits(struct mm_struct *mm, unsigned long hva, 1127 1090 unsigned long bits, unsigned long value) 1128 1091 { 1092 + struct vm_area_struct *vma; 1129 1093 spinlock_t *ptl; 1130 1094 pgste_t new; 1131 1095 pte_t *ptep; 1132 1096 1097 + vma = vma_lookup(mm, hva); 1098 + if (!vma || is_vm_hugetlb_page(vma)) 1099 + return -EFAULT; 1133 1100 ptep = get_locked_pte(mm, hva, &ptl); 1134 1101 if (unlikely(!ptep)) 1135 1102 return -EFAULT; ··· 1158 1117 */ 1159 1118 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep) 1160 1119 { 1120 + struct vm_area_struct *vma; 1161 1121 spinlock_t *ptl; 1162 1122 pte_t *ptep; 1163 1123 1124 + vma = vma_lookup(mm, hva); 1125 + if (!vma || is_vm_hugetlb_page(vma)) 1126 + return -EFAULT; 1164 1127 ptep = get_locked_pte(mm, hva, &ptl); 1165 1128 if (unlikely(!ptep)) 1166 1129 return -EFAULT;