Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'kvm-arm64/nv-xnx-haf' into kvmarm/next

* kvm-arm64/nv-xnx-haf: (22 commits)
: Support for FEAT_XNX and FEAT_HAF in nested
:
: Add support for a couple of MMU-related features that weren't
: implemented by KVM's software page table walk:
:
: - FEAT_XNX: Allows the hypervisor to describe execute permissions
: separately for EL0 and EL1
:
: - FEAT_HAF: Hardware update of the Access Flag, which in the context of
: nested means software walkers must also set the Access Flag.
:
: The series also adds some basic support for testing KVM's emulation of
: the AT instruction, including the implementation detail that AT sets the
: Access Flag in KVM.
KVM: arm64: at: Update AF on software walk only if VM has FEAT_HAFDBS
KVM: arm64: at: Use correct HA bit in TCR_EL2 when regime is EL2
KVM: arm64: Document KVM_PGTABLE_PROT_{UX,PX}
KVM: arm64: Fix spelling mistake "Unexpeced" -> "Unexpected"
KVM: arm64: Add break to default case in kvm_pgtable_stage2_pte_prot()
KVM: arm64: Add endian casting to kvm_swap_s[12]_desc()
KVM: arm64: Fix compilation when CONFIG_ARM64_USE_LSE_ATOMICS=n
KVM: arm64: selftests: Add test for AT emulation
KVM: arm64: nv: Expose hardware access flag management to NV guests
KVM: arm64: nv: Implement HW access flag management in stage-2 SW PTW
KVM: arm64: Implement HW access flag management in stage-1 SW PTW
KVM: arm64: Propagate PTW errors up to AT emulation
KVM: arm64: Add helper for swapping guest descriptor
KVM: arm64: nv: Use pgtable definitions in stage-2 walk
KVM: arm64: Handle endianness in read helper for emulated PTW
KVM: arm64: nv: Stop passing vCPU through void ptr in S2 PTW
KVM: arm64: Call helper for reading descriptors directly
KVM: arm64: nv: Advertise support for FEAT_XNX
KVM: arm64: Teach ptdump about FEAT_XNX permissions
KVM: arm64: nv: Forward FEAT_XNX permissions to the shadow stage-2
...

Signed-off-by: Oliver Upton <oupton@kernel.org>

+607 -95
+1
arch/arm64/include/asm/kvm_arm.h
··· 111 111 #define TCR_EL2_DS (1UL << 32) 112 112 #define TCR_EL2_RES1 ((1U << 31) | (1 << 23)) 113 113 #define TCR_EL2_HPD (1 << 24) 114 + #define TCR_EL2_HA (1 << 21) 114 115 #define TCR_EL2_TBI (1 << 20) 115 116 #define TCR_EL2_PS_SHIFT 16 116 117 #define TCR_EL2_PS_MASK (7 << TCR_EL2_PS_SHIFT)
+3 -3
arch/arm64/include/asm/kvm_asm.h
··· 246 246 extern int __kvm_tlbi_s1e2(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding); 247 247 248 248 extern void __kvm_timer_set_cntvoff(u64 cntvoff); 249 - extern void __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr); 250 - extern void __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr); 251 - extern void __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr); 249 + extern int __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr); 250 + extern int __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr); 251 + extern int __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr); 252 252 253 253 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); 254 254
+38 -2
arch/arm64/include/asm/kvm_nested.h
··· 120 120 return trans->writable; 121 121 } 122 122 123 - static inline bool kvm_s2_trans_executable(struct kvm_s2_trans *trans) 123 + static inline bool kvm_has_xnx(struct kvm *kvm) 124 124 { 125 - return !(trans->desc & BIT(54)); 125 + return cpus_have_final_cap(ARM64_HAS_XNX) && 126 + kvm_has_feat(kvm, ID_AA64MMFR1_EL1, XNX, IMP); 127 + } 128 + 129 + static inline bool kvm_s2_trans_exec_el0(struct kvm *kvm, struct kvm_s2_trans *trans) 130 + { 131 + u8 xn = FIELD_GET(KVM_PTE_LEAF_ATTR_HI_S2_XN, trans->desc); 132 + 133 + if (!kvm_has_xnx(kvm)) 134 + xn &= FIELD_PREP(KVM_PTE_LEAF_ATTR_HI_S2_XN, 0b10); 135 + 136 + switch (xn) { 137 + case 0b00: 138 + case 0b01: 139 + return true; 140 + default: 141 + return false; 142 + } 143 + } 144 + 145 + static inline bool kvm_s2_trans_exec_el1(struct kvm *kvm, struct kvm_s2_trans *trans) 146 + { 147 + u8 xn = FIELD_GET(KVM_PTE_LEAF_ATTR_HI_S2_XN, trans->desc); 148 + 149 + if (!kvm_has_xnx(kvm)) 150 + xn &= FIELD_PREP(KVM_PTE_LEAF_ATTR_HI_S2_XN, 0b10); 151 + 152 + switch (xn) { 153 + case 0b00: 154 + case 0b11: 155 + return true; 156 + default: 157 + return false; 158 + } 126 159 } 127 160 128 161 extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa, ··· 353 320 bool be; 354 321 bool s2; 355 322 bool pa52bit; 323 + bool ha; 356 324 }; 357 325 358 326 struct s1_walk_result { ··· 403 369 BUG_ON(__c >= NR_CPUS); \ 404 370 (FIX_VNCR - __c); \ 405 371 }) 372 + 373 + int __kvm_at_swap_desc(struct kvm *kvm, gpa_t ipa, u64 old, u64 new); 406 374 407 375 #endif /* __ARM64_KVM_NESTED_H */
+12 -7
arch/arm64/include/asm/kvm_pgtable.h
··· 89 89 90 90 #define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54) 91 91 92 - #define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54) 92 + #define KVM_PTE_LEAF_ATTR_HI_S2_XN GENMASK(54, 53) 93 93 94 94 #define KVM_PTE_LEAF_ATTR_HI_S1_GP BIT(50) 95 95 ··· 240 240 241 241 /** 242 242 * enum kvm_pgtable_prot - Page-table permissions and attributes. 243 - * @KVM_PGTABLE_PROT_X: Execute permission. 243 + * @KVM_PGTABLE_PROT_UX: Unprivileged execute permission. 244 + * @KVM_PGTABLE_PROT_PX: Privileged execute permission. 245 + * @KVM_PGTABLE_PROT_X: Privileged and unprivileged execute permission. 244 246 * @KVM_PGTABLE_PROT_W: Write permission. 245 247 * @KVM_PGTABLE_PROT_R: Read permission. 246 248 * @KVM_PGTABLE_PROT_DEVICE: Device attributes. ··· 253 251 * @KVM_PGTABLE_PROT_SW3: Software bit 3. 254 252 */ 255 253 enum kvm_pgtable_prot { 256 - KVM_PGTABLE_PROT_X = BIT(0), 257 - KVM_PGTABLE_PROT_W = BIT(1), 258 - KVM_PGTABLE_PROT_R = BIT(2), 254 + KVM_PGTABLE_PROT_PX = BIT(0), 255 + KVM_PGTABLE_PROT_UX = BIT(1), 256 + KVM_PGTABLE_PROT_X = KVM_PGTABLE_PROT_PX | 257 + KVM_PGTABLE_PROT_UX, 258 + KVM_PGTABLE_PROT_W = BIT(2), 259 + KVM_PGTABLE_PROT_R = BIT(3), 259 260 260 - KVM_PGTABLE_PROT_DEVICE = BIT(3), 261 - KVM_PGTABLE_PROT_NORMAL_NC = BIT(4), 261 + KVM_PGTABLE_PROT_DEVICE = BIT(4), 262 + KVM_PGTABLE_PROT_NORMAL_NC = BIT(5), 262 263 263 264 KVM_PGTABLE_PROT_SW0 = BIT(55), 264 265 KVM_PGTABLE_PROT_SW1 = BIT(56),
+7
arch/arm64/kernel/cpufeature.c
··· 3140 3140 .capability = ARM64_HAS_GICV5_LEGACY, 3141 3141 .matches = test_has_gicv5_legacy, 3142 3142 }, 3143 + { 3144 + .desc = "XNX", 3145 + .capability = ARM64_HAS_XNX, 3146 + .type = ARM64_CPUCAP_SYSTEM_FEATURE, 3147 + .matches = has_cpuid_feature, 3148 + ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, XNX, IMP) 3149 + }, 3143 3150 {}, 3144 3151 }; 3145 3152
+176 -20
arch/arm64/kvm/at.c
··· 346 346 347 347 wi->baddr &= GENMASK_ULL(wi->max_oa_bits - 1, x); 348 348 349 + wi->ha = kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, HAFDBS, AF); 350 + wi->ha &= (wi->regime == TR_EL2 ? 351 + FIELD_GET(TCR_EL2_HA, tcr) : 352 + FIELD_GET(TCR_HA, tcr)); 353 + 349 354 return 0; 350 355 351 356 addrsz: ··· 367 362 return -EFAULT; 368 363 } 369 364 365 + static int kvm_read_s1_desc(struct kvm_vcpu *vcpu, u64 pa, u64 *desc, 366 + struct s1_walk_info *wi) 367 + { 368 + u64 val; 369 + int r; 370 + 371 + r = kvm_read_guest(vcpu->kvm, pa, &val, sizeof(val)); 372 + if (r) 373 + return r; 374 + 375 + if (wi->be) 376 + *desc = be64_to_cpu((__force __be64)val); 377 + else 378 + *desc = le64_to_cpu((__force __le64)val); 379 + 380 + return 0; 381 + } 382 + 383 + static int kvm_swap_s1_desc(struct kvm_vcpu *vcpu, u64 pa, u64 old, u64 new, 384 + struct s1_walk_info *wi) 385 + { 386 + if (wi->be) { 387 + old = (__force u64)cpu_to_be64(old); 388 + new = (__force u64)cpu_to_be64(new); 389 + } else { 390 + old = (__force u64)cpu_to_le64(old); 391 + new = (__force u64)cpu_to_le64(new); 392 + } 393 + 394 + return __kvm_at_swap_desc(vcpu->kvm, pa, old, new); 395 + } 396 + 370 397 static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi, 371 398 struct s1_walk_result *wr, u64 va) 372 399 { 373 - u64 va_top, va_bottom, baddr, desc; 400 + u64 va_top, va_bottom, baddr, desc, new_desc, ipa; 374 401 int level, stride, ret; 375 402 376 403 level = wi->sl; ··· 412 375 va_top = get_ia_size(wi) - 1; 413 376 414 377 while (1) { 415 - u64 index, ipa; 378 + u64 index; 416 379 417 380 va_bottom = (3 - level) * stride + wi->pgshift; 418 381 index = (va & GENMASK_ULL(va_top, va_bottom)) >> (va_bottom - 3); ··· 451 414 return ret; 452 415 } 453 416 454 - ret = kvm_read_guest(vcpu->kvm, ipa, &desc, sizeof(desc)); 417 + ret = kvm_read_s1_desc(vcpu, ipa, &desc, wi); 455 418 if (ret) { 456 419 fail_s1_walk(wr, ESR_ELx_FSC_SEA_TTW(level), false); 457 420 return ret; 458 421 } 459 422 460 - if (wi->be) 461 - desc = be64_to_cpu((__force __be64)desc); 462 - else 463 - desc = le64_to_cpu((__force __le64)desc); 423 + new_desc = desc; 464 424 465 425 /* Invalid descriptor */ 466 426 if (!(desc & BIT(0))) ··· 510 476 baddr = desc_to_oa(wi, desc); 511 477 if (check_output_size(baddr & GENMASK(52, va_bottom), wi)) 512 478 goto addrsz; 479 + 480 + if (wi->ha) 481 + new_desc |= PTE_AF; 482 + 483 + if (new_desc != desc) { 484 + ret = kvm_swap_s1_desc(vcpu, ipa, desc, new_desc, wi); 485 + if (ret) 486 + return ret; 487 + 488 + desc = new_desc; 489 + } 513 490 514 491 if (!(desc & PTE_AF)) { 515 492 fail_s1_walk(wr, ESR_ELx_FSC_ACCESS_L(level), false); ··· 1266 1221 wr->pr &= !pan; 1267 1222 } 1268 1223 1269 - static u64 handle_at_slow(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) 1224 + static int handle_at_slow(struct kvm_vcpu *vcpu, u32 op, u64 vaddr, u64 *par) 1270 1225 { 1271 1226 struct s1_walk_result wr = {}; 1272 1227 struct s1_walk_info wi = {}; ··· 1291 1246 1292 1247 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1293 1248 1249 + /* 1250 + * Race to update a descriptor -- restart the walk. 1251 + */ 1252 + if (ret == -EAGAIN) 1253 + return ret; 1294 1254 if (ret) 1295 1255 goto compute_par; 1296 1256 ··· 1329 1279 fail_s1_walk(&wr, ESR_ELx_FSC_PERM_L(wr.level), false); 1330 1280 1331 1281 compute_par: 1332 - return compute_par_s1(vcpu, &wi, &wr); 1282 + *par = compute_par_s1(vcpu, &wi, &wr); 1283 + return 0; 1333 1284 } 1334 1285 1335 1286 /* ··· 1458 1407 !(par & SYS_PAR_EL1_S)); 1459 1408 } 1460 1409 1461 - void __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) 1410 + int __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) 1462 1411 { 1463 1412 u64 par = __kvm_at_s1e01_fast(vcpu, op, vaddr); 1413 + int ret; 1464 1414 1465 1415 /* 1466 1416 * If PAR_EL1 reports that AT failed on a S1 permission or access ··· 1473 1421 */ 1474 1422 if ((par & SYS_PAR_EL1_F) && 1475 1423 !par_check_s1_perm_fault(par) && 1476 - !par_check_s1_access_fault(par)) 1477 - par = handle_at_slow(vcpu, op, vaddr); 1424 + !par_check_s1_access_fault(par)) { 1425 + ret = handle_at_slow(vcpu, op, vaddr, &par); 1426 + if (ret) 1427 + return ret; 1428 + } 1478 1429 1479 1430 vcpu_write_sys_reg(vcpu, par, PAR_EL1); 1431 + return 0; 1480 1432 } 1481 1433 1482 - void __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) 1434 + int __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) 1483 1435 { 1484 1436 u64 par; 1437 + int ret; 1485 1438 1486 1439 /* 1487 1440 * We've trapped, so everything is live on the CPU. As we will be ··· 1533 1476 } 1534 1477 1535 1478 /* We failed the translation, let's replay it in slow motion */ 1536 - if ((par & SYS_PAR_EL1_F) && !par_check_s1_perm_fault(par)) 1537 - par = handle_at_slow(vcpu, op, vaddr); 1479 + if ((par & SYS_PAR_EL1_F) && !par_check_s1_perm_fault(par)) { 1480 + ret = handle_at_slow(vcpu, op, vaddr, &par); 1481 + if (ret) 1482 + return ret; 1483 + } 1538 1484 1539 1485 vcpu_write_sys_reg(vcpu, par, PAR_EL1); 1486 + return 0; 1540 1487 } 1541 1488 1542 - void __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) 1489 + int __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr) 1543 1490 { 1544 1491 struct kvm_s2_trans out = {}; 1545 1492 u64 ipa, par; ··· 1570 1509 break; 1571 1510 default: 1572 1511 WARN_ON_ONCE(1); 1573 - return; 1512 + return 0; 1574 1513 } 1575 1514 1576 1515 __kvm_at_s1e01(vcpu, op, vaddr); 1577 1516 par = vcpu_read_sys_reg(vcpu, PAR_EL1); 1578 1517 if (par & SYS_PAR_EL1_F) 1579 - return; 1518 + return 0; 1580 1519 1581 1520 /* 1582 1521 * If we only have a single stage of translation (EL2&0), exit ··· 1584 1523 */ 1585 1524 if (compute_translation_regime(vcpu, op) == TR_EL20 || 1586 1525 !(vcpu_read_sys_reg(vcpu, HCR_EL2) & (HCR_VM | HCR_DC))) 1587 - return; 1526 + return 0; 1588 1527 1589 1528 /* Do the stage-2 translation */ 1590 1529 ipa = (par & GENMASK_ULL(47, 12)) | (vaddr & GENMASK_ULL(11, 0)); 1591 1530 out.esr = 0; 1592 1531 ret = kvm_walk_nested_s2(vcpu, ipa, &out); 1593 1532 if (ret < 0) 1594 - return; 1533 + return ret; 1595 1534 1596 1535 /* Check the access permission */ 1597 1536 if (!out.esr && ··· 1600 1539 1601 1540 par = compute_par_s12(vcpu, par, &out); 1602 1541 vcpu_write_sys_reg(vcpu, par, PAR_EL1); 1542 + return 0; 1603 1543 } 1604 1544 1605 1545 /* ··· 1698 1636 /* Any other error... */ 1699 1637 return ret; 1700 1638 } 1639 + } 1640 + 1641 + #ifdef CONFIG_ARM64_LSE_ATOMICS 1642 + static int __lse_swap_desc(u64 __user *ptep, u64 old, u64 new) 1643 + { 1644 + u64 tmp = old; 1645 + int ret = 0; 1646 + 1647 + uaccess_enable_privileged(); 1648 + 1649 + asm volatile(__LSE_PREAMBLE 1650 + "1: cas %[old], %[new], %[addr]\n" 1651 + "2:\n" 1652 + _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w[ret]) 1653 + : [old] "+r" (old), [addr] "+Q" (*ptep), [ret] "+r" (ret) 1654 + : [new] "r" (new) 1655 + : "memory"); 1656 + 1657 + uaccess_disable_privileged(); 1658 + 1659 + if (ret) 1660 + return ret; 1661 + if (tmp != old) 1662 + return -EAGAIN; 1663 + 1664 + return ret; 1665 + } 1666 + #else 1667 + static int __lse_swap_desc(u64 __user *ptep, u64 old, u64 new) 1668 + { 1669 + return -EINVAL; 1670 + } 1671 + #endif 1672 + 1673 + static int __llsc_swap_desc(u64 __user *ptep, u64 old, u64 new) 1674 + { 1675 + int ret = 1; 1676 + u64 tmp; 1677 + 1678 + uaccess_enable_privileged(); 1679 + 1680 + asm volatile("prfm pstl1strm, %[addr]\n" 1681 + "1: ldxr %[tmp], %[addr]\n" 1682 + "sub %[tmp], %[tmp], %[old]\n" 1683 + "cbnz %[tmp], 3f\n" 1684 + "2: stlxr %w[ret], %[new], %[addr]\n" 1685 + "3:\n" 1686 + _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w[ret]) 1687 + _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %w[ret]) 1688 + : [ret] "+r" (ret), [addr] "+Q" (*ptep), [tmp] "=&r" (tmp) 1689 + : [old] "r" (old), [new] "r" (new) 1690 + : "memory"); 1691 + 1692 + uaccess_disable_privileged(); 1693 + 1694 + /* STLXR didn't update the descriptor, or the compare failed */ 1695 + if (ret == 1) 1696 + return -EAGAIN; 1697 + 1698 + return ret; 1699 + } 1700 + 1701 + int __kvm_at_swap_desc(struct kvm *kvm, gpa_t ipa, u64 old, u64 new) 1702 + { 1703 + struct kvm_memory_slot *slot; 1704 + unsigned long hva; 1705 + u64 __user *ptep; 1706 + bool writable; 1707 + int offset; 1708 + gfn_t gfn; 1709 + int r; 1710 + 1711 + lockdep_assert(srcu_read_lock_held(&kvm->srcu)); 1712 + 1713 + gfn = ipa >> PAGE_SHIFT; 1714 + offset = offset_in_page(ipa); 1715 + slot = gfn_to_memslot(kvm, gfn); 1716 + hva = gfn_to_hva_memslot_prot(slot, gfn, &writable); 1717 + if (kvm_is_error_hva(hva)) 1718 + return -EINVAL; 1719 + if (!writable) 1720 + return -EPERM; 1721 + 1722 + ptep = (u64 __user *)hva + offset; 1723 + if (cpus_have_final_cap(ARM64_HAS_LSE_ATOMICS)) 1724 + r = __lse_swap_desc(ptep, old, new); 1725 + else 1726 + r = __llsc_swap_desc(ptep, old, new); 1727 + 1728 + if (r < 0) 1729 + return r; 1730 + 1731 + mark_page_dirty_in_slot(kvm, slot, gfn); 1732 + return 0; 1701 1733 }
+51 -8
arch/arm64/kvm/hyp/pgtable.c
··· 661 661 662 662 #define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt)) 663 663 664 + static int stage2_set_xn_attr(enum kvm_pgtable_prot prot, kvm_pte_t *attr) 665 + { 666 + bool px, ux; 667 + u8 xn; 668 + 669 + px = prot & KVM_PGTABLE_PROT_PX; 670 + ux = prot & KVM_PGTABLE_PROT_UX; 671 + 672 + if (!cpus_have_final_cap(ARM64_HAS_XNX) && px != ux) 673 + return -EINVAL; 674 + 675 + if (px && ux) 676 + xn = 0b00; 677 + else if (!px && ux) 678 + xn = 0b01; 679 + else if (!px && !ux) 680 + xn = 0b10; 681 + else 682 + xn = 0b11; 683 + 684 + *attr &= ~KVM_PTE_LEAF_ATTR_HI_S2_XN; 685 + *attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_HI_S2_XN, xn); 686 + return 0; 687 + } 688 + 664 689 static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot, 665 690 kvm_pte_t *ptep) 666 691 { 667 692 kvm_pte_t attr; 668 693 u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS; 694 + int r; 669 695 670 696 switch (prot & (KVM_PGTABLE_PROT_DEVICE | 671 697 KVM_PGTABLE_PROT_NORMAL_NC)) { ··· 711 685 attr = KVM_S2_MEMATTR(pgt, NORMAL); 712 686 } 713 687 714 - if (!(prot & KVM_PGTABLE_PROT_X)) 715 - attr |= KVM_PTE_LEAF_ATTR_HI_S2_XN; 688 + r = stage2_set_xn_attr(prot, &attr); 689 + if (r) 690 + return r; 716 691 717 692 if (prot & KVM_PGTABLE_PROT_R) 718 693 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R; ··· 742 715 prot |= KVM_PGTABLE_PROT_R; 743 716 if (pte & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W) 744 717 prot |= KVM_PGTABLE_PROT_W; 745 - if (!(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN)) 746 - prot |= KVM_PGTABLE_PROT_X; 718 + 719 + switch (FIELD_GET(KVM_PTE_LEAF_ATTR_HI_S2_XN, pte)) { 720 + case 0b00: 721 + prot |= KVM_PGTABLE_PROT_PX | KVM_PGTABLE_PROT_UX; 722 + break; 723 + case 0b01: 724 + prot |= KVM_PGTABLE_PROT_UX; 725 + break; 726 + case 0b11: 727 + prot |= KVM_PGTABLE_PROT_PX; 728 + break; 729 + default: 730 + break; 731 + } 747 732 748 733 return prot; 749 734 } ··· 1329 1290 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, 1330 1291 enum kvm_pgtable_prot prot, enum kvm_pgtable_walk_flags flags) 1331 1292 { 1332 - int ret; 1293 + kvm_pte_t xn = 0, set = 0, clr = 0; 1333 1294 s8 level; 1334 - kvm_pte_t set = 0, clr = 0; 1295 + int ret; 1335 1296 1336 1297 if (prot & KVM_PTE_LEAF_ATTR_HI_SW) 1337 1298 return -EINVAL; ··· 1342 1303 if (prot & KVM_PGTABLE_PROT_W) 1343 1304 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W; 1344 1305 1345 - if (prot & KVM_PGTABLE_PROT_X) 1346 - clr |= KVM_PTE_LEAF_ATTR_HI_S2_XN; 1306 + ret = stage2_set_xn_attr(prot, &xn); 1307 + if (ret) 1308 + return ret; 1309 + 1310 + set |= xn & KVM_PTE_LEAF_ATTR_HI_S2_XN; 1311 + clr |= ~xn & KVM_PTE_LEAF_ATTR_HI_S2_XN; 1347 1312 1348 1313 ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level, flags); 1349 1314 if (!ret || ret == -EAGAIN)
+23 -5
arch/arm64/kvm/mmu.c
··· 1553 1553 *prot |= kvm_encode_nested_level(nested); 1554 1554 } 1555 1555 1556 + static void adjust_nested_exec_perms(struct kvm *kvm, 1557 + struct kvm_s2_trans *nested, 1558 + enum kvm_pgtable_prot *prot) 1559 + { 1560 + if (!kvm_s2_trans_exec_el0(kvm, nested)) 1561 + *prot &= ~KVM_PGTABLE_PROT_UX; 1562 + if (!kvm_s2_trans_exec_el1(kvm, nested)) 1563 + *prot &= ~KVM_PGTABLE_PROT_PX; 1564 + } 1565 + 1556 1566 #define KVM_PGTABLE_WALK_MEMABORT_FLAGS (KVM_PGTABLE_WALK_HANDLE_FAULT | KVM_PGTABLE_WALK_SHARED) 1557 1567 1558 1568 static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ··· 1614 1604 if (writable) 1615 1605 prot |= KVM_PGTABLE_PROT_W; 1616 1606 1617 - if (exec_fault || 1618 - (cpus_have_final_cap(ARM64_HAS_CACHE_DIC) && 1619 - (!nested || kvm_s2_trans_executable(nested)))) 1607 + if (exec_fault || cpus_have_final_cap(ARM64_HAS_CACHE_DIC)) 1620 1608 prot |= KVM_PGTABLE_PROT_X; 1609 + 1610 + if (nested) 1611 + adjust_nested_exec_perms(kvm, nested, &prot); 1621 1612 1622 1613 kvm_fault_lock(kvm); 1623 1614 if (mmu_invalidate_retry(kvm, mmu_seq)) { ··· 1894 1883 prot |= KVM_PGTABLE_PROT_NORMAL_NC; 1895 1884 else 1896 1885 prot |= KVM_PGTABLE_PROT_DEVICE; 1897 - } else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC) && 1898 - (!nested || kvm_s2_trans_executable(nested))) { 1886 + } else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC)) { 1899 1887 prot |= KVM_PGTABLE_PROT_X; 1900 1888 } 1889 + 1890 + if (nested) 1891 + adjust_nested_exec_perms(kvm, nested, &prot); 1901 1892 1902 1893 /* 1903 1894 * Under the premise of getting a FSC_PERM fault, we just need to relax ··· 2110 2097 u32 esr; 2111 2098 2112 2099 ret = kvm_walk_nested_s2(vcpu, fault_ipa, &nested_trans); 2100 + if (ret == -EAGAIN) { 2101 + ret = 1; 2102 + goto out_unlock; 2103 + } 2104 + 2113 2105 if (ret) { 2114 2106 esr = kvm_s2_trans_esr(&nested_trans); 2115 2107 kvm_inject_s2_fault(vcpu, esr);
+84 -39
arch/arm64/kvm/nested.c
··· 124 124 } 125 125 126 126 struct s2_walk_info { 127 - int (*read_desc)(phys_addr_t pa, u64 *desc, void *data); 128 - void *data; 129 - u64 baddr; 130 - unsigned int max_oa_bits; 131 - unsigned int pgshift; 132 - unsigned int sl; 133 - unsigned int t0sz; 134 - bool be; 127 + u64 baddr; 128 + unsigned int max_oa_bits; 129 + unsigned int pgshift; 130 + unsigned int sl; 131 + unsigned int t0sz; 132 + bool be; 133 + bool ha; 135 134 }; 136 135 137 136 static u32 compute_fsc(int level, u32 fsc) ··· 198 199 return 0; 199 200 } 200 201 202 + static int read_guest_s2_desc(struct kvm_vcpu *vcpu, phys_addr_t pa, u64 *desc, 203 + struct s2_walk_info *wi) 204 + { 205 + u64 val; 206 + int r; 207 + 208 + r = kvm_read_guest(vcpu->kvm, pa, &val, sizeof(val)); 209 + if (r) 210 + return r; 211 + 212 + /* 213 + * Handle reversedescriptors if endianness differs between the 214 + * host and the guest hypervisor. 215 + */ 216 + if (wi->be) 217 + *desc = be64_to_cpu((__force __be64)val); 218 + else 219 + *desc = le64_to_cpu((__force __le64)val); 220 + 221 + return 0; 222 + } 223 + 224 + static int swap_guest_s2_desc(struct kvm_vcpu *vcpu, phys_addr_t pa, u64 old, u64 new, 225 + struct s2_walk_info *wi) 226 + { 227 + if (wi->be) { 228 + old = (__force u64)cpu_to_be64(old); 229 + new = (__force u64)cpu_to_be64(new); 230 + } else { 231 + old = (__force u64)cpu_to_le64(old); 232 + new = (__force u64)cpu_to_le64(new); 233 + } 234 + 235 + return __kvm_at_swap_desc(vcpu->kvm, pa, old, new); 236 + } 237 + 201 238 /* 202 239 * This is essentially a C-version of the pseudo code from the ARM ARM 203 240 * AArch64.TranslationTableWalk function. I strongly recommend looking at ··· 241 206 * 242 207 * Must be called with the kvm->srcu read lock held 243 208 */ 244 - static int walk_nested_s2_pgd(phys_addr_t ipa, 209 + static int walk_nested_s2_pgd(struct kvm_vcpu *vcpu, phys_addr_t ipa, 245 210 struct s2_walk_info *wi, struct kvm_s2_trans *out) 246 211 { 247 212 int first_block_level, level, stride, input_size, base_lower_bound; 248 213 phys_addr_t base_addr; 249 214 unsigned int addr_top, addr_bottom; 250 - u64 desc; /* page table entry */ 215 + u64 desc, new_desc; /* page table entry */ 251 216 int ret; 252 217 phys_addr_t paddr; 253 218 ··· 292 257 >> (addr_bottom - 3); 293 258 294 259 paddr = base_addr | index; 295 - ret = wi->read_desc(paddr, &desc, wi->data); 260 + ret = read_guest_s2_desc(vcpu, paddr, &desc, wi); 296 261 if (ret < 0) 297 262 return ret; 298 263 299 - /* 300 - * Handle reversedescriptors if endianness differs between the 301 - * host and the guest hypervisor. 302 - */ 303 - if (wi->be) 304 - desc = be64_to_cpu((__force __be64)desc); 305 - else 306 - desc = le64_to_cpu((__force __le64)desc); 264 + new_desc = desc; 307 265 308 266 /* Check for valid descriptor at this point */ 309 - if (!(desc & 1) || ((desc & 3) == 1 && level == 3)) { 267 + if (!(desc & KVM_PTE_VALID)) { 310 268 out->esr = compute_fsc(level, ESR_ELx_FSC_FAULT); 311 269 out->desc = desc; 312 270 return 1; 313 271 } 314 272 315 - /* We're at the final level or block translation level */ 316 - if ((desc & 3) == 1 || level == 3) 273 + if (FIELD_GET(KVM_PTE_TYPE, desc) == KVM_PTE_TYPE_BLOCK) { 274 + if (level < 3) 275 + break; 276 + 277 + out->esr = compute_fsc(level, ESR_ELx_FSC_FAULT); 278 + out->desc = desc; 279 + return 1; 280 + } 281 + 282 + /* We're at the final level */ 283 + if (level == 3) 317 284 break; 318 285 319 286 if (check_output_size(wi, desc)) { ··· 342 305 return 1; 343 306 } 344 307 345 - if (!(desc & BIT(10))) { 308 + if (wi->ha) 309 + new_desc |= KVM_PTE_LEAF_ATTR_LO_S2_AF; 310 + 311 + if (new_desc != desc) { 312 + ret = swap_guest_s2_desc(vcpu, paddr, desc, new_desc, wi); 313 + if (ret) 314 + return ret; 315 + 316 + desc = new_desc; 317 + } 318 + 319 + if (!(desc & KVM_PTE_LEAF_ATTR_LO_S2_AF)) { 346 320 out->esr = compute_fsc(level, ESR_ELx_FSC_ACCESS); 347 321 out->desc = desc; 348 322 return 1; ··· 366 318 (ipa & GENMASK_ULL(addr_bottom - 1, 0)); 367 319 out->output = paddr; 368 320 out->block_size = 1UL << ((3 - level) * stride + wi->pgshift); 369 - out->readable = desc & (0b01 << 6); 370 - out->writable = desc & (0b10 << 6); 321 + out->readable = desc & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R; 322 + out->writable = desc & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W; 371 323 out->level = level; 372 324 out->desc = desc; 373 325 return 0; 374 - } 375 - 376 - static int read_guest_s2_desc(phys_addr_t pa, u64 *desc, void *data) 377 - { 378 - struct kvm_vcpu *vcpu = data; 379 - 380 - return kvm_read_guest(vcpu->kvm, pa, desc, sizeof(*desc)); 381 326 } 382 327 383 328 static void vtcr_to_walk_info(u64 vtcr, struct s2_walk_info *wi) ··· 391 350 /* Global limit for now, should eventually be per-VM */ 392 351 wi->max_oa_bits = min(get_kvm_ipa_limit(), 393 352 ps_to_output_size(FIELD_GET(VTCR_EL2_PS_MASK, vtcr), false)); 353 + 354 + wi->ha = vtcr & VTCR_EL2_HA; 394 355 } 395 356 396 357 int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa, ··· 407 364 if (!vcpu_has_nv(vcpu)) 408 365 return 0; 409 366 410 - wi.read_desc = read_guest_s2_desc; 411 - wi.data = vcpu; 412 367 wi.baddr = vcpu_read_sys_reg(vcpu, VTTBR_EL2); 413 368 414 369 vtcr_to_walk_info(vtcr, &wi); 415 370 416 371 wi.be = vcpu_read_sys_reg(vcpu, SCTLR_EL2) & SCTLR_ELx_EE; 417 372 418 - ret = walk_nested_s2_pgd(gipa, &wi, result); 373 + ret = walk_nested_s2_pgd(vcpu, gipa, &wi, result); 419 374 if (ret) 420 375 result->esr |= (kvm_vcpu_get_esr(vcpu) & ~ESR_ELx_FSC); 421 376 ··· 829 788 return 0; 830 789 831 790 if (kvm_vcpu_trap_is_iabt(vcpu)) { 832 - forward_fault = !kvm_s2_trans_executable(trans); 791 + if (vcpu_mode_priv(vcpu)) 792 + forward_fault = !kvm_s2_trans_exec_el1(vcpu->kvm, trans); 793 + else 794 + forward_fault = !kvm_s2_trans_exec_el0(vcpu->kvm, trans); 833 795 } else { 834 796 bool write_fault = kvm_is_write_fault(vcpu); 835 797 ··· 1599 1555 case SYS_ID_AA64MMFR1_EL1: 1600 1556 val &= ~(ID_AA64MMFR1_EL1_CMOW | 1601 1557 ID_AA64MMFR1_EL1_nTLBPA | 1602 - ID_AA64MMFR1_EL1_ETS | 1603 - ID_AA64MMFR1_EL1_XNX | 1604 - ID_AA64MMFR1_EL1_HAFDBS); 1558 + ID_AA64MMFR1_EL1_ETS); 1559 + 1605 1560 /* FEAT_E2H0 implies no VHE */ 1606 1561 if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features)) 1607 1562 val &= ~ID_AA64MMFR1_EL1_VH; 1563 + 1564 + val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64MMFR1_EL1, HAFDBS, AF); 1608 1565 break; 1609 1566 1610 1567 case SYS_ID_AA64MMFR2_EL1:
+27 -8
arch/arm64/kvm/ptdump.c
··· 31 31 .val = PTE_VALID, 32 32 .set = " ", 33 33 .clear = "F", 34 - }, { 34 + }, 35 + { 35 36 .mask = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R, 36 37 .val = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R, 37 38 .set = "R", 38 39 .clear = " ", 39 - }, { 40 + }, 41 + { 40 42 .mask = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W, 41 43 .val = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W, 42 44 .set = "W", 43 45 .clear = " ", 44 - }, { 46 + }, 47 + { 45 48 .mask = KVM_PTE_LEAF_ATTR_HI_S2_XN, 46 - .val = KVM_PTE_LEAF_ATTR_HI_S2_XN, 47 - .set = "NX", 48 - .clear = "x ", 49 - }, { 49 + .val = 0b00UL << __bf_shf(KVM_PTE_LEAF_ATTR_HI_S2_XN), 50 + .set = "px ux ", 51 + }, 52 + { 53 + .mask = KVM_PTE_LEAF_ATTR_HI_S2_XN, 54 + .val = 0b01UL << __bf_shf(KVM_PTE_LEAF_ATTR_HI_S2_XN), 55 + .set = "PXNux ", 56 + }, 57 + { 58 + .mask = KVM_PTE_LEAF_ATTR_HI_S2_XN, 59 + .val = 0b10UL << __bf_shf(KVM_PTE_LEAF_ATTR_HI_S2_XN), 60 + .set = "PXNUXN", 61 + }, 62 + { 63 + .mask = KVM_PTE_LEAF_ATTR_HI_S2_XN, 64 + .val = 0b11UL << __bf_shf(KVM_PTE_LEAF_ATTR_HI_S2_XN), 65 + .set = "px UXN", 66 + }, 67 + { 50 68 .mask = KVM_PTE_LEAF_ATTR_LO_S2_AF, 51 69 .val = KVM_PTE_LEAF_ATTR_LO_S2_AF, 52 70 .set = "AF", 53 71 .clear = " ", 54 - }, { 72 + }, 73 + { 55 74 .mask = PMD_TYPE_MASK, 56 75 .val = PMD_TYPE_SECT, 57 76 .set = "BLK",
+6 -3
arch/arm64/kvm/sys_regs.c
··· 3782 3782 { 3783 3783 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); 3784 3784 3785 - __kvm_at_s1e01(vcpu, op, p->regval); 3785 + if (__kvm_at_s1e01(vcpu, op, p->regval)) 3786 + return false; 3786 3787 3787 3788 return true; 3788 3789 } ··· 3800 3799 return false; 3801 3800 } 3802 3801 3803 - __kvm_at_s1e2(vcpu, op, p->regval); 3802 + if (__kvm_at_s1e2(vcpu, op, p->regval)) 3803 + return false; 3804 3804 3805 3805 return true; 3806 3806 } ··· 3811 3809 { 3812 3810 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); 3813 3811 3814 - __kvm_at_s12(vcpu, op, p->regval); 3812 + if (__kvm_at_s12(vcpu, op, p->regval)) 3813 + return false; 3815 3814 3816 3815 return true; 3817 3816 }
+1
arch/arm64/tools/cpucaps
··· 65 65 HAS_VA52 66 66 HAS_VIRT_HOST_EXTN 67 67 HAS_WFXT 68 + HAS_XNX 68 69 HAFT 69 70 HW_DBM 70 71 KVM_HVHE
+1
tools/testing/selftests/kvm/Makefile.kvm
··· 156 156 TEST_GEN_PROGS_arm64 = $(TEST_GEN_PROGS_COMMON) 157 157 TEST_GEN_PROGS_arm64 += arm64/aarch32_id_regs 158 158 TEST_GEN_PROGS_arm64 += arm64/arch_timer_edge_cases 159 + TEST_GEN_PROGS_arm64 += arm64/at 159 160 TEST_GEN_PROGS_arm64 += arm64/debug-exceptions 160 161 TEST_GEN_PROGS_arm64 += arm64/hello_el2 161 162 TEST_GEN_PROGS_arm64 += arm64/host_sve
+166
tools/testing/selftests/kvm/arm64/at.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * at - Test for KVM's AT emulation in the EL2&0 and EL1&0 translation regimes. 4 + */ 5 + #include "kvm_util.h" 6 + #include "processor.h" 7 + #include "test_util.h" 8 + #include "ucall.h" 9 + 10 + #include <asm/sysreg.h> 11 + 12 + #define TEST_ADDR 0x80000000 13 + 14 + enum { 15 + CLEAR_ACCESS_FLAG, 16 + TEST_ACCESS_FLAG, 17 + }; 18 + 19 + static u64 *ptep_hva; 20 + 21 + #define copy_el2_to_el1(reg) \ 22 + write_sysreg_s(read_sysreg_s(SYS_##reg##_EL1), SYS_##reg##_EL12) 23 + 24 + /* Yes, this is an ugly hack */ 25 + #define __at(op, addr) write_sysreg_s(addr, op) 26 + 27 + #define test_at_insn(op, expect_fault) \ 28 + do { \ 29 + u64 par, fsc; \ 30 + bool fault; \ 31 + \ 32 + GUEST_SYNC(CLEAR_ACCESS_FLAG); \ 33 + \ 34 + __at(OP_AT_##op, TEST_ADDR); \ 35 + isb(); \ 36 + par = read_sysreg(par_el1); \ 37 + \ 38 + fault = par & SYS_PAR_EL1_F; \ 39 + fsc = FIELD_GET(SYS_PAR_EL1_FST, par); \ 40 + \ 41 + __GUEST_ASSERT((expect_fault) == fault, \ 42 + "AT "#op": %sexpected fault (par: %lx)1", \ 43 + (expect_fault) ? "" : "un", par); \ 44 + if ((expect_fault)) { \ 45 + __GUEST_ASSERT(fsc == ESR_ELx_FSC_ACCESS_L(3), \ 46 + "AT "#op": expected access flag fault (par: %lx)", \ 47 + par); \ 48 + } else { \ 49 + GUEST_ASSERT_EQ(FIELD_GET(SYS_PAR_EL1_ATTR, par), MAIR_ATTR_NORMAL); \ 50 + GUEST_ASSERT_EQ(FIELD_GET(SYS_PAR_EL1_SH, par), PTE_SHARED >> 8); \ 51 + GUEST_ASSERT_EQ(par & SYS_PAR_EL1_PA, TEST_ADDR); \ 52 + GUEST_SYNC(TEST_ACCESS_FLAG); \ 53 + } \ 54 + } while (0) 55 + 56 + static void test_at(bool expect_fault) 57 + { 58 + test_at_insn(S1E2R, expect_fault); 59 + test_at_insn(S1E2W, expect_fault); 60 + 61 + /* Reuse the stage-1 MMU context from EL2 at EL1 */ 62 + copy_el2_to_el1(SCTLR); 63 + copy_el2_to_el1(MAIR); 64 + copy_el2_to_el1(TCR); 65 + copy_el2_to_el1(TTBR0); 66 + copy_el2_to_el1(TTBR1); 67 + 68 + /* Disable stage-2 translation and enter a non-host context */ 69 + write_sysreg(0, vtcr_el2); 70 + write_sysreg(0, vttbr_el2); 71 + sysreg_clear_set(hcr_el2, HCR_EL2_TGE | HCR_EL2_VM, 0); 72 + isb(); 73 + 74 + test_at_insn(S1E1R, expect_fault); 75 + test_at_insn(S1E1W, expect_fault); 76 + } 77 + 78 + static void guest_code(void) 79 + { 80 + sysreg_clear_set(tcr_el1, TCR_HA, 0); 81 + isb(); 82 + 83 + test_at(true); 84 + 85 + if (!SYS_FIELD_GET(ID_AA64MMFR1_EL1, HAFDBS, read_sysreg(id_aa64mmfr1_el1))) 86 + GUEST_DONE(); 87 + 88 + /* 89 + * KVM's software PTW makes the implementation choice that the AT 90 + * instruction sets the access flag. 91 + */ 92 + sysreg_clear_set(tcr_el1, 0, TCR_HA); 93 + isb(); 94 + test_at(false); 95 + 96 + GUEST_DONE(); 97 + } 98 + 99 + static void handle_sync(struct kvm_vcpu *vcpu, struct ucall *uc) 100 + { 101 + switch (uc->args[1]) { 102 + case CLEAR_ACCESS_FLAG: 103 + /* 104 + * Delete + reinstall the memslot to invalidate stage-2 105 + * mappings of the stage-1 page tables, forcing KVM to 106 + * use the 'slow' AT emulation path. 107 + * 108 + * This and clearing the access flag from host userspace 109 + * ensures that the access flag cannot be set speculatively 110 + * and is reliably cleared at the time of the AT instruction. 111 + */ 112 + clear_bit(__ffs(PTE_AF), ptep_hva); 113 + vm_mem_region_reload(vcpu->vm, vcpu->vm->memslots[MEM_REGION_PT]); 114 + break; 115 + case TEST_ACCESS_FLAG: 116 + TEST_ASSERT(test_bit(__ffs(PTE_AF), ptep_hva), 117 + "Expected access flag to be set (desc: %lu)", *ptep_hva); 118 + break; 119 + default: 120 + TEST_FAIL("Unexpected SYNC arg: %lu", uc->args[1]); 121 + } 122 + } 123 + 124 + static void run_test(struct kvm_vcpu *vcpu) 125 + { 126 + struct ucall uc; 127 + 128 + while (true) { 129 + vcpu_run(vcpu); 130 + switch (get_ucall(vcpu, &uc)) { 131 + case UCALL_DONE: 132 + return; 133 + case UCALL_SYNC: 134 + handle_sync(vcpu, &uc); 135 + continue; 136 + case UCALL_ABORT: 137 + REPORT_GUEST_ASSERT(uc); 138 + return; 139 + default: 140 + TEST_FAIL("Unexpected ucall: %lu", uc.cmd); 141 + } 142 + } 143 + } 144 + 145 + int main(void) 146 + { 147 + struct kvm_vcpu_init init; 148 + struct kvm_vcpu *vcpu; 149 + struct kvm_vm *vm; 150 + 151 + TEST_REQUIRE(kvm_check_cap(KVM_CAP_ARM_EL2)); 152 + 153 + vm = vm_create(1); 154 + 155 + kvm_get_default_vcpu_target(vm, &init); 156 + init.features[0] |= BIT(KVM_ARM_VCPU_HAS_EL2); 157 + vcpu = aarch64_vcpu_add(vm, 0, &init, guest_code); 158 + kvm_arch_vm_finalize_vcpus(vm); 159 + 160 + virt_map(vm, TEST_ADDR, TEST_ADDR, 1); 161 + ptep_hva = virt_get_pte_hva_at_level(vm, TEST_ADDR, 3); 162 + run_test(vcpu); 163 + 164 + kvm_vm_free(vm); 165 + return 0; 166 + }
+1
tools/testing/selftests/kvm/include/kvm_util.h
··· 715 715 #endif 716 716 717 717 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); 718 + void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot); 718 719 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa); 719 720 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); 720 721 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
+10
tools/testing/selftests/kvm/lib/kvm_util.c
··· 1201 1201 ret, errno, slot, flags); 1202 1202 } 1203 1203 1204 + void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot) 1205 + { 1206 + struct userspace_mem_region *region = memslot2region(vm, slot); 1207 + struct kvm_userspace_memory_region2 tmp = region->region; 1208 + 1209 + tmp.memory_size = 0; 1210 + vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &tmp); 1211 + vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region); 1212 + } 1213 + 1204 1214 /* 1205 1215 * VM Memory Region Move 1206 1216 *