Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: arm64: Convert translation level parameter to s8

With the introduction of FEAT_LPA2, the Arm ARM adds a new level of
translation, level -1, so levels can now be in the range [-1;3]. 3 is
always the last level and the first level is determined based on the
number of VA bits in use.

Convert level variables to use a signed type in preparation for
supporting this new level -1.

Since the last level is always anchored at 3, and the first level varies
to suit the number of VA/IPA bits, take the opportunity to replace
KVM_PGTABLE_MAX_LEVELS with the 2 macros KVM_PGTABLE_FIRST_LEVEL and
KVM_PGTABLE_LAST_LEVEL. This removes the assumption from the code that
levels run from 0 to KVM_PGTABLE_MAX_LEVELS - 1, which will soon no
longer be true.

Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20231127111737.1897081-9-ryan.roberts@arm.com

authored by

Ryan Roberts and committed by
Marc Zyngier
419edf48 bd412e2a

+71 -61
+1 -1
arch/arm64/include/asm/kvm_emulate.h
··· 409 409 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE; 410 410 } 411 411 412 - static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu) 412 + static __always_inline s8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu) 413 413 { 414 414 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL; 415 415 }
+16 -15
arch/arm64/include/asm/kvm_pgtable.h
··· 11 11 #include <linux/kvm_host.h> 12 12 #include <linux/types.h> 13 13 14 - #define KVM_PGTABLE_MAX_LEVELS 4U 14 + #define KVM_PGTABLE_FIRST_LEVEL 0 15 + #define KVM_PGTABLE_LAST_LEVEL 3 15 16 16 17 /* 17 18 * The largest supported block sizes for KVM (no 52-bit PA support): ··· 21 20 * - 64K (level 2): 512MB 22 21 */ 23 22 #ifdef CONFIG_ARM64_4K_PAGES 24 - #define KVM_PGTABLE_MIN_BLOCK_LEVEL 1U 23 + #define KVM_PGTABLE_MIN_BLOCK_LEVEL 1 25 24 #else 26 - #define KVM_PGTABLE_MIN_BLOCK_LEVEL 2U 25 + #define KVM_PGTABLE_MIN_BLOCK_LEVEL 2 27 26 #endif 28 27 29 28 #define kvm_lpa2_is_enabled() system_supports_lpa2() ··· 104 103 return __phys_to_pfn(kvm_pte_to_phys(pte)); 105 104 } 106 105 107 - static inline u64 kvm_granule_shift(u32 level) 106 + static inline u64 kvm_granule_shift(s8 level) 108 107 { 109 - /* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */ 108 + /* Assumes KVM_PGTABLE_LAST_LEVEL is 3 */ 110 109 return ARM64_HW_PGTABLE_LEVEL_SHIFT(level); 111 110 } 112 111 113 - static inline u64 kvm_granule_size(u32 level) 112 + static inline u64 kvm_granule_size(s8 level) 114 113 { 115 114 return BIT(kvm_granule_shift(level)); 116 115 } 117 116 118 - static inline bool kvm_level_supports_block_mapping(u32 level) 117 + static inline bool kvm_level_supports_block_mapping(s8 level) 119 118 { 120 119 return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL; 121 120 } 122 121 123 122 static inline u32 kvm_supported_block_sizes(void) 124 123 { 125 - u32 level = KVM_PGTABLE_MIN_BLOCK_LEVEL; 124 + s8 level = KVM_PGTABLE_MIN_BLOCK_LEVEL; 126 125 u32 r = 0; 127 126 128 - for (; level < KVM_PGTABLE_MAX_LEVELS; level++) 127 + for (; level <= KVM_PGTABLE_LAST_LEVEL; level++) 129 128 r |= BIT(kvm_granule_shift(level)); 130 129 131 130 return r; ··· 170 169 void* (*zalloc_page)(void *arg); 171 170 void* (*zalloc_pages_exact)(size_t size); 172 171 void (*free_pages_exact)(void *addr, size_t size); 173 - void (*free_unlinked_table)(void *addr, u32 level); 172 + void (*free_unlinked_table)(void *addr, s8 level); 174 173 void (*get_page)(void *addr); 175 174 void (*put_page)(void *addr); 176 175 int (*page_count)(void *addr); ··· 266 265 u64 start; 267 266 u64 addr; 268 267 u64 end; 269 - u32 level; 268 + s8 level; 270 269 enum kvm_pgtable_walk_flags flags; 271 270 }; 272 271 ··· 369 368 */ 370 369 struct kvm_pgtable { 371 370 u32 ia_bits; 372 - u32 start_level; 371 + s8 start_level; 373 372 kvm_pteref_t pgd; 374 373 struct kvm_pgtable_mm_ops *mm_ops; 375 374 ··· 503 502 * The page-table is assumed to be unreachable by any hardware walkers prior to 504 503 * freeing and therefore no TLB invalidation is performed. 505 504 */ 506 - void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level); 505 + void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level); 507 506 508 507 /** 509 508 * kvm_pgtable_stage2_create_unlinked() - Create an unlinked stage-2 paging structure. ··· 527 526 * an ERR_PTR(error) on failure. 528 527 */ 529 528 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, 530 - u64 phys, u32 level, 529 + u64 phys, s8 level, 531 530 enum kvm_pgtable_prot prot, 532 531 void *mc, bool force_pte); 533 532 ··· 753 752 * Return: 0 on success, negative error code on failure. 754 753 */ 755 754 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr, 756 - kvm_pte_t *ptep, u32 *level); 755 + kvm_pte_t *ptep, s8 *level); 757 756 758 757 /** 759 758 * kvm_pgtable_stage2_pte_prot() - Retrieve the protection attributes of a
+3 -2
arch/arm64/include/asm/kvm_pkvm.h
··· 56 56 57 57 static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages) 58 58 { 59 - unsigned long total = 0, i; 59 + unsigned long total = 0; 60 + int i; 60 61 61 62 /* Provision the worst case scenario */ 62 - for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) { 63 + for (i = KVM_PGTABLE_FIRST_LEVEL; i <= KVM_PGTABLE_LAST_LEVEL; i++) { 63 64 nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE); 64 65 total += nr_pages; 65 66 }
+3 -3
arch/arm64/kvm/hyp/nvhe/mem_protect.c
··· 91 91 hyp_put_page(&host_s2_pool, addr); 92 92 } 93 93 94 - static void host_s2_free_unlinked_table(void *addr, u32 level) 94 + static void host_s2_free_unlinked_table(void *addr, s8 level) 95 95 { 96 96 kvm_pgtable_stage2_free_unlinked(&host_mmu.mm_ops, addr, level); 97 97 } ··· 443 443 { 444 444 struct kvm_mem_range cur; 445 445 kvm_pte_t pte; 446 - u32 level; 446 + s8 level; 447 447 int ret; 448 448 449 449 hyp_assert_lock_held(&host_mmu.lock); ··· 462 462 cur.start = ALIGN_DOWN(addr, granule); 463 463 cur.end = cur.start + granule; 464 464 level++; 465 - } while ((level < KVM_PGTABLE_MAX_LEVELS) && 465 + } while ((level <= KVM_PGTABLE_LAST_LEVEL) && 466 466 !(kvm_level_supports_block_mapping(level) && 467 467 range_included(&cur, range))); 468 468
+2 -2
arch/arm64/kvm/hyp/nvhe/mm.c
··· 260 260 * https://lore.kernel.org/kvm/20221017115209.2099-1-will@kernel.org/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03 261 261 */ 262 262 dsb(ishst); 263 - __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), (KVM_PGTABLE_MAX_LEVELS - 1)); 263 + __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), KVM_PGTABLE_LAST_LEVEL); 264 264 dsb(ish); 265 265 isb(); 266 266 } ··· 275 275 { 276 276 struct hyp_fixmap_slot *slot = per_cpu_ptr(&fixmap_slots, (u64)ctx->arg); 277 277 278 - if (!kvm_pte_valid(ctx->old) || ctx->level != KVM_PGTABLE_MAX_LEVELS - 1) 278 + if (!kvm_pte_valid(ctx->old) || ctx->level != KVM_PGTABLE_LAST_LEVEL) 279 279 return -EINVAL; 280 280 281 281 slot->addr = ctx->addr;
+1 -1
arch/arm64/kvm/hyp/nvhe/setup.c
··· 181 181 if (!kvm_pte_valid(ctx->old)) 182 182 return 0; 183 183 184 - if (ctx->level != (KVM_PGTABLE_MAX_LEVELS - 1)) 184 + if (ctx->level != KVM_PGTABLE_LAST_LEVEL) 185 185 return -EINVAL; 186 186 187 187 phys = kvm_pte_to_phys(ctx->old);
+36 -30
arch/arm64/kvm/hyp/pgtable.c
··· 101 101 return IS_ALIGNED(ctx->addr, granule); 102 102 } 103 103 104 - static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, u32 level) 104 + static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, s8 level) 105 105 { 106 106 u64 shift = kvm_granule_shift(level); 107 107 u64 mask = BIT(PAGE_SHIFT - 3) - 1; ··· 117 117 return (addr & mask) >> shift; 118 118 } 119 119 120 - static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level) 120 + static u32 kvm_pgd_pages(u32 ia_bits, s8 start_level) 121 121 { 122 122 struct kvm_pgtable pgt = { 123 123 .ia_bits = ia_bits, ··· 127 127 return kvm_pgd_page_idx(&pgt, -1ULL) + 1; 128 128 } 129 129 130 - static bool kvm_pte_table(kvm_pte_t pte, u32 level) 130 + static bool kvm_pte_table(kvm_pte_t pte, s8 level) 131 131 { 132 - if (level == KVM_PGTABLE_MAX_LEVELS - 1) 132 + if (level == KVM_PGTABLE_LAST_LEVEL) 133 133 return false; 134 134 135 135 if (!kvm_pte_valid(pte)) ··· 157 157 return pte; 158 158 } 159 159 160 - static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level) 160 + static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, s8 level) 161 161 { 162 162 kvm_pte_t pte = kvm_phys_to_pte(pa); 163 - u64 type = (level == KVM_PGTABLE_MAX_LEVELS - 1) ? KVM_PTE_TYPE_PAGE : 164 - KVM_PTE_TYPE_BLOCK; 163 + u64 type = (level == KVM_PGTABLE_LAST_LEVEL) ? KVM_PTE_TYPE_PAGE : 164 + KVM_PTE_TYPE_BLOCK; 165 165 166 166 pte |= attr & (KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI); 167 167 pte |= FIELD_PREP(KVM_PTE_TYPE, type); ··· 206 206 } 207 207 208 208 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data, 209 - struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, u32 level); 209 + struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, s8 level); 210 210 211 211 static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data, 212 212 struct kvm_pgtable_mm_ops *mm_ops, 213 - kvm_pteref_t pteref, u32 level) 213 + kvm_pteref_t pteref, s8 level) 214 214 { 215 215 enum kvm_pgtable_walk_flags flags = data->walker->flags; 216 216 kvm_pte_t *ptep = kvm_dereference_pteref(data->walker, pteref); ··· 275 275 } 276 276 277 277 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data, 278 - struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, u32 level) 278 + struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, s8 level) 279 279 { 280 280 u32 idx; 281 281 int ret = 0; 282 282 283 - if (WARN_ON_ONCE(level >= KVM_PGTABLE_MAX_LEVELS)) 283 + if (WARN_ON_ONCE(level < KVM_PGTABLE_FIRST_LEVEL || 284 + level > KVM_PGTABLE_LAST_LEVEL)) 284 285 return -EINVAL; 285 286 286 287 for (idx = kvm_pgtable_idx(data, level); idx < PTRS_PER_PTE; ++idx) { ··· 344 343 345 344 struct leaf_walk_data { 346 345 kvm_pte_t pte; 347 - u32 level; 346 + s8 level; 348 347 }; 349 348 350 349 static int leaf_walker(const struct kvm_pgtable_visit_ctx *ctx, ··· 359 358 } 360 359 361 360 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr, 362 - kvm_pte_t *ptep, u32 *level) 361 + kvm_pte_t *ptep, s8 *level) 363 362 { 364 363 struct leaf_walk_data data; 365 364 struct kvm_pgtable_walker walker = { ··· 472 471 if (hyp_map_walker_try_leaf(ctx, data)) 473 472 return 0; 474 473 475 - if (WARN_ON(ctx->level == KVM_PGTABLE_MAX_LEVELS - 1)) 474 + if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL)) 476 475 return -EINVAL; 477 476 478 477 childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL); ··· 568 567 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, 569 568 struct kvm_pgtable_mm_ops *mm_ops) 570 569 { 571 - u64 levels = ARM64_HW_PGTABLE_LEVELS(va_bits); 570 + s8 start_level = KVM_PGTABLE_LAST_LEVEL + 1 - 571 + ARM64_HW_PGTABLE_LEVELS(va_bits); 572 + 573 + if (start_level < KVM_PGTABLE_FIRST_LEVEL || 574 + start_level > KVM_PGTABLE_LAST_LEVEL) 575 + return -EINVAL; 572 576 573 577 pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_page(NULL); 574 578 if (!pgt->pgd) 575 579 return -ENOMEM; 576 580 577 581 pgt->ia_bits = va_bits; 578 - pgt->start_level = KVM_PGTABLE_MAX_LEVELS - levels; 582 + pgt->start_level = start_level; 579 583 pgt->mm_ops = mm_ops; 580 584 pgt->mmu = NULL; 581 585 pgt->force_pte_cb = NULL; ··· 634 628 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift) 635 629 { 636 630 u64 vtcr = VTCR_EL2_FLAGS; 637 - u8 lvls; 631 + s8 lvls; 638 632 639 633 vtcr |= kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT; 640 634 vtcr |= VTCR_EL2_T0SZ(phys_shift); ··· 917 911 { 918 912 u64 phys = stage2_map_walker_phys_addr(ctx, data); 919 913 920 - if (data->force_pte && (ctx->level < (KVM_PGTABLE_MAX_LEVELS - 1))) 914 + if (data->force_pte && ctx->level < KVM_PGTABLE_LAST_LEVEL) 921 915 return false; 922 916 923 917 return kvm_block_mapping_supported(ctx, phys); ··· 996 990 if (ret != -E2BIG) 997 991 return ret; 998 992 999 - if (WARN_ON(ctx->level == KVM_PGTABLE_MAX_LEVELS - 1)) 993 + if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL)) 1000 994 return -EINVAL; 1001 995 1002 996 if (!data->memcache) ··· 1166 1160 kvm_pte_t attr_set; 1167 1161 kvm_pte_t attr_clr; 1168 1162 kvm_pte_t pte; 1169 - u32 level; 1163 + s8 level; 1170 1164 }; 1171 1165 1172 1166 static int stage2_attr_walker(const struct kvm_pgtable_visit_ctx *ctx, ··· 1209 1203 static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr, 1210 1204 u64 size, kvm_pte_t attr_set, 1211 1205 kvm_pte_t attr_clr, kvm_pte_t *orig_pte, 1212 - u32 *level, enum kvm_pgtable_walk_flags flags) 1206 + s8 *level, enum kvm_pgtable_walk_flags flags) 1213 1207 { 1214 1208 int ret; 1215 1209 kvm_pte_t attr_mask = KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI; ··· 1311 1305 enum kvm_pgtable_prot prot) 1312 1306 { 1313 1307 int ret; 1314 - u32 level; 1308 + s8 level; 1315 1309 kvm_pte_t set = 0, clr = 0; 1316 1310 1317 1311 if (prot & KVM_PTE_LEAF_ATTR_HI_SW) ··· 1364 1358 } 1365 1359 1366 1360 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, 1367 - u64 phys, u32 level, 1361 + u64 phys, s8 level, 1368 1362 enum kvm_pgtable_prot prot, 1369 1363 void *mc, bool force_pte) 1370 1364 { ··· 1422 1416 * fully populated tree up to the PTE entries. Note that @level is 1423 1417 * interpreted as in "level @level entry". 1424 1418 */ 1425 - static int stage2_block_get_nr_page_tables(u32 level) 1419 + static int stage2_block_get_nr_page_tables(s8 level) 1426 1420 { 1427 1421 switch (level) { 1428 1422 case 1: ··· 1433 1427 return 0; 1434 1428 default: 1435 1429 WARN_ON_ONCE(level < KVM_PGTABLE_MIN_BLOCK_LEVEL || 1436 - level >= KVM_PGTABLE_MAX_LEVELS); 1430 + level > KVM_PGTABLE_LAST_LEVEL); 1437 1431 return -EINVAL; 1438 1432 }; 1439 1433 } ··· 1446 1440 struct kvm_s2_mmu *mmu; 1447 1441 kvm_pte_t pte = ctx->old, new, *childp; 1448 1442 enum kvm_pgtable_prot prot; 1449 - u32 level = ctx->level; 1443 + s8 level = ctx->level; 1450 1444 bool force_pte; 1451 1445 int nr_pages; 1452 1446 u64 phys; 1453 1447 1454 1448 /* No huge-pages exist at the last level */ 1455 - if (level == KVM_PGTABLE_MAX_LEVELS - 1) 1449 + if (level == KVM_PGTABLE_LAST_LEVEL) 1456 1450 return 0; 1457 1451 1458 1452 /* We only split valid block mappings */ ··· 1529 1523 u64 vtcr = mmu->vtcr; 1530 1524 u32 ia_bits = VTCR_EL2_IPA(vtcr); 1531 1525 u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr); 1532 - u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0; 1526 + s8 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0; 1533 1527 1534 1528 pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE; 1535 1529 pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_pages_exact(pgd_sz); ··· 1552 1546 { 1553 1547 u32 ia_bits = VTCR_EL2_IPA(vtcr); 1554 1548 u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr); 1555 - u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0; 1549 + s8 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0; 1556 1550 1557 1551 return kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE; 1558 1552 } ··· 1588 1582 pgt->pgd = NULL; 1589 1583 } 1590 1584 1591 - void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level) 1585 + void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level) 1592 1586 { 1593 1587 kvm_pteref_t ptep = (kvm_pteref_t)pgtable; 1594 1588 struct kvm_pgtable_walker walker = {
+9 -7
arch/arm64/kvm/mmu.c
··· 223 223 { 224 224 struct page *page = container_of(head, struct page, rcu_head); 225 225 void *pgtable = page_to_virt(page); 226 - u32 level = page_private(page); 226 + s8 level = page_private(page); 227 227 228 228 kvm_pgtable_stage2_free_unlinked(&kvm_s2_mm_ops, pgtable, level); 229 229 } 230 230 231 - static void stage2_free_unlinked_table(void *addr, u32 level) 231 + static void stage2_free_unlinked_table(void *addr, s8 level) 232 232 { 233 233 struct page *page = virt_to_page(addr); 234 234 ··· 804 804 struct kvm_pgtable pgt = { 805 805 .pgd = (kvm_pteref_t)kvm->mm->pgd, 806 806 .ia_bits = vabits_actual, 807 - .start_level = (KVM_PGTABLE_MAX_LEVELS - 808 - CONFIG_PGTABLE_LEVELS), 807 + .start_level = (KVM_PGTABLE_LAST_LEVEL - 808 + CONFIG_PGTABLE_LEVELS + 1), 809 809 .mm_ops = &kvm_user_mm_ops, 810 810 }; 811 811 unsigned long flags; 812 812 kvm_pte_t pte = 0; /* Keep GCC quiet... */ 813 - u32 level = ~0; 813 + s8 level = S8_MAX; 814 814 int ret; 815 815 816 816 /* ··· 829 829 * Not seeing an error, but not updating level? Something went 830 830 * deeply wrong... 831 831 */ 832 - if (WARN_ON(level >= KVM_PGTABLE_MAX_LEVELS)) 832 + if (WARN_ON(level > KVM_PGTABLE_LAST_LEVEL)) 833 + return -EFAULT; 834 + if (WARN_ON(level < KVM_PGTABLE_FIRST_LEVEL)) 833 835 return -EFAULT; 834 836 835 837 /* Oops, the userspace PTs are gone... Replay the fault */ ··· 1390 1388 gfn_t gfn; 1391 1389 kvm_pfn_t pfn; 1392 1390 bool logging_active = memslot_is_logging(memslot); 1393 - unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu); 1391 + s8 fault_level = kvm_vcpu_trap_get_fault_level(vcpu); 1394 1392 long vma_pagesize, fault_granule; 1395 1393 enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R; 1396 1394 struct kvm_pgtable *pgt;