Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch kvm-arm64/pkvm-fixed-features-6.14 into kvmarm-master/next

* kvm-arm64/pkvm-fixed-features-6.14: (24 commits)
: .
: Complete rework of the pKVM handling of features, catching up
: with the rest of the code deals with it these days.
: Patches courtesy of Fuad Tabba. From the cover letter:
:
: "This patch series uses the vm's feature id registers to track the
: supported features, a framework similar to nested virt to set the
: trap values, and removes the need to store cptr_el2 per vcpu in
: favor of setting its value when traps are activated, as VHE mode
: does."
:
: This branch drags the arm64/for-next/cpufeature branch to solve
: ugly conflicts in -next.
: .
KVM: arm64: Fix FEAT_MTE in pKVM
KVM: arm64: Use kvm_vcpu_has_feature() directly for struct kvm
KVM: arm64: Convert the SVE guest vcpu flag to a vm flag
KVM: arm64: Remove PtrAuth guest vcpu flag
KVM: arm64: Fix the value of the CPTR_EL2 RES1 bitmask for nVHE
KVM: arm64: Refactor kvm_reset_cptr_el2()
KVM: arm64: Calculate cptr_el2 traps on activating traps
KVM: arm64: Remove redundant setting of HCR_EL2 trap bit
KVM: arm64: Remove fixed_config.h header
KVM: arm64: Rework specifying restricted features for protected VMs
KVM: arm64: Set protected VM traps based on its view of feature registers
KVM: arm64: Fix RAS trapping in pKVM for protected VMs
KVM: arm64: Initialize feature id registers for protected VMs
KVM: arm64: Use KVM extension checks for allowed protected VM capabilities
KVM: arm64: Remove KVM_ARM_VCPU_POWER_OFF from protected VMs allowed features in pKVM
KVM: arm64: Move checking protected vcpu features to a separate function
KVM: arm64: Group setting traps for protected VMs by control register
KVM: arm64: Consolidate allowed and restricted VM feature checks
arm64/sysreg: Get rid of CPACR_ELx SysregFields
arm64/sysreg: Convert *_EL12 accessors to Mapping
...

Signed-off-by: Marc Zyngier <maz@kernel.org>

# Conflicts:
# arch/arm64/kvm/fpsimd.c
# arch/arm64/kvm/hyp/nvhe/pkvm.c

+570 -826
+2
arch/arm64/include/asm/cpucaps.h
··· 46 46 return IS_ENABLED(CONFIG_ARM64_POE); 47 47 case ARM64_HAS_GCS: 48 48 return IS_ENABLED(CONFIG_ARM64_GCS); 49 + case ARM64_HAFT: 50 + return IS_ENABLED(CONFIG_ARM64_HAFT); 49 51 case ARM64_UNMAP_KERNEL_AT_EL0: 50 52 return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0); 51 53 case ARM64_WORKAROUND_843419:
+1 -2
arch/arm64/include/asm/cpufeature.h
··· 852 852 853 853 static inline bool system_supports_haft(void) 854 854 { 855 - return IS_ENABLED(CONFIG_ARM64_HAFT) && 856 - cpus_have_final_cap(ARM64_HAFT); 855 + return cpus_have_final_cap(ARM64_HAFT); 857 856 } 858 857 859 858 static __always_inline bool system_supports_mpam(void)
+3 -3
arch/arm64/include/asm/el2_setup.h
··· 154 154 /* Coprocessor traps */ 155 155 .macro __init_el2_cptr 156 156 __check_hvhe .LnVHE_\@, x1 157 - mov x0, #CPACR_ELx_FPEN 157 + mov x0, #CPACR_EL1_FPEN 158 158 msr cpacr_el1, x0 159 159 b .Lskip_set_cptr_\@ 160 160 .LnVHE_\@: ··· 332 332 333 333 // (h)VHE case 334 334 mrs x0, cpacr_el1 // Disable SVE traps 335 - orr x0, x0, #CPACR_ELx_ZEN 335 + orr x0, x0, #CPACR_EL1_ZEN 336 336 msr cpacr_el1, x0 337 337 b .Lskip_set_cptr_\@ 338 338 ··· 353 353 354 354 // (h)VHE case 355 355 mrs x0, cpacr_el1 // Disable SME traps 356 - orr x0, x0, #CPACR_ELx_SMEN 356 + orr x0, x0, #CPACR_EL1_SMEN 357 357 msr cpacr_el1, x0 358 358 b .Lskip_set_cptr_sme_\@ 359 359
+1 -3
arch/arm64/include/asm/kvm_arm.h
··· 300 300 #define CPTR_EL2_TSM (1 << 12) 301 301 #define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT) 302 302 #define CPTR_EL2_TZ (1 << 8) 303 - #define CPTR_NVHE_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 (nVHE) */ 303 + #define CPTR_NVHE_EL2_RES1 (BIT(13) | BIT(9) | GENMASK(7, 0)) 304 304 #define CPTR_NVHE_EL2_RES0 (GENMASK(63, 32) | \ 305 305 GENMASK(29, 21) | \ 306 306 GENMASK(19, 14) | \ ··· 390 390 ECN(BREAKPT_LOW), ECN(BREAKPT_CUR), ECN(SOFTSTP_LOW), \ 391 391 ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \ 392 392 ECN(BKPT32), ECN(VECTOR32), ECN(BRK64), ECN(ERET) 393 - 394 - #define CPACR_EL1_TTA (1 << 28) 395 393 396 394 #define kvm_mode_names \ 397 395 { PSR_MODE_EL0t, "EL0t" }, \
+32 -37
arch/arm64/include/asm/kvm_emulate.h
··· 556 556 ({ \ 557 557 u64 cptr = 0; \ 558 558 \ 559 - if ((set) & CPACR_ELx_FPEN) \ 559 + if ((set) & CPACR_EL1_FPEN) \ 560 560 cptr |= CPTR_EL2_TFP; \ 561 - if ((set) & CPACR_ELx_ZEN) \ 561 + if ((set) & CPACR_EL1_ZEN) \ 562 562 cptr |= CPTR_EL2_TZ; \ 563 - if ((set) & CPACR_ELx_SMEN) \ 563 + if ((set) & CPACR_EL1_SMEN) \ 564 564 cptr |= CPTR_EL2_TSM; \ 565 - if ((clr) & CPACR_ELx_TTA) \ 565 + if ((clr) & CPACR_EL1_TTA) \ 566 566 cptr |= CPTR_EL2_TTA; \ 567 567 if ((clr) & CPTR_EL2_TAM) \ 568 568 cptr |= CPTR_EL2_TAM; \ ··· 576 576 ({ \ 577 577 u64 cptr = 0; \ 578 578 \ 579 - if ((clr) & CPACR_ELx_FPEN) \ 579 + if ((clr) & CPACR_EL1_FPEN) \ 580 580 cptr |= CPTR_EL2_TFP; \ 581 - if ((clr) & CPACR_ELx_ZEN) \ 581 + if ((clr) & CPACR_EL1_ZEN) \ 582 582 cptr |= CPTR_EL2_TZ; \ 583 - if ((clr) & CPACR_ELx_SMEN) \ 583 + if ((clr) & CPACR_EL1_SMEN) \ 584 584 cptr |= CPTR_EL2_TSM; \ 585 - if ((set) & CPACR_ELx_TTA) \ 585 + if ((set) & CPACR_EL1_TTA) \ 586 586 cptr |= CPTR_EL2_TTA; \ 587 587 if ((set) & CPTR_EL2_TAM) \ 588 588 cptr |= CPTR_EL2_TAM; \ ··· 595 595 #define cpacr_clear_set(clr, set) \ 596 596 do { \ 597 597 BUILD_BUG_ON((set) & CPTR_VHE_EL2_RES0); \ 598 - BUILD_BUG_ON((clr) & CPACR_ELx_E0POE); \ 599 - __build_check_all_or_none((clr), CPACR_ELx_FPEN); \ 600 - __build_check_all_or_none((set), CPACR_ELx_FPEN); \ 601 - __build_check_all_or_none((clr), CPACR_ELx_ZEN); \ 602 - __build_check_all_or_none((set), CPACR_ELx_ZEN); \ 603 - __build_check_all_or_none((clr), CPACR_ELx_SMEN); \ 604 - __build_check_all_or_none((set), CPACR_ELx_SMEN); \ 598 + BUILD_BUG_ON((clr) & CPACR_EL1_E0POE); \ 599 + __build_check_all_or_none((clr), CPACR_EL1_FPEN); \ 600 + __build_check_all_or_none((set), CPACR_EL1_FPEN); \ 601 + __build_check_all_or_none((clr), CPACR_EL1_ZEN); \ 602 + __build_check_all_or_none((set), CPACR_EL1_ZEN); \ 603 + __build_check_all_or_none((clr), CPACR_EL1_SMEN); \ 604 + __build_check_all_or_none((set), CPACR_EL1_SMEN); \ 605 605 \ 606 606 if (has_vhe() || has_hvhe()) \ 607 607 sysreg_clear_set(cpacr_el1, clr, set); \ ··· 619 619 write_sysreg(val, cptr_el2); 620 620 } 621 621 622 - static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu) 622 + /* Resets the value of cptr_el2 when returning to the host. */ 623 + static __always_inline void __kvm_reset_cptr_el2(struct kvm *kvm) 623 624 { 624 625 u64 val; 625 626 626 627 if (has_vhe()) { 627 - val = (CPACR_ELx_FPEN | CPACR_EL1_ZEN_EL1EN); 628 + val = (CPACR_EL1_FPEN | CPACR_EL1_ZEN_EL1EN); 628 629 if (cpus_have_final_cap(ARM64_SME)) 629 630 val |= CPACR_EL1_SMEN_EL1EN; 630 631 } else if (has_hvhe()) { 631 - val = CPACR_ELx_FPEN; 632 + val = CPACR_EL1_FPEN; 632 633 633 - if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs()) 634 - val |= CPACR_ELx_ZEN; 634 + if (!kvm_has_sve(kvm) || !guest_owns_fp_regs()) 635 + val |= CPACR_EL1_ZEN; 635 636 if (cpus_have_final_cap(ARM64_SME)) 636 - val |= CPACR_ELx_SMEN; 637 + val |= CPACR_EL1_SMEN; 637 638 } else { 638 639 val = CPTR_NVHE_EL2_RES1; 639 640 640 - if (vcpu_has_sve(vcpu) && guest_owns_fp_regs()) 641 + if (kvm_has_sve(kvm) && guest_owns_fp_regs()) 641 642 val |= CPTR_EL2_TZ; 642 - if (cpus_have_final_cap(ARM64_SME)) 643 - val &= ~CPTR_EL2_TSM; 643 + if (!cpus_have_final_cap(ARM64_SME)) 644 + val |= CPTR_EL2_TSM; 644 645 } 645 - 646 - return val; 647 - } 648 - 649 - static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu) 650 - { 651 - u64 val = kvm_get_reset_cptr_el2(vcpu); 652 646 653 647 kvm_write_cptr_el2(val); 654 648 } 649 + 650 + #ifdef __KVM_NVHE_HYPERVISOR__ 651 + #define kvm_reset_cptr_el2(v) __kvm_reset_cptr_el2(kern_hyp_va((v)->kvm)) 652 + #else 653 + #define kvm_reset_cptr_el2(v) __kvm_reset_cptr_el2((v)->kvm) 654 + #endif 655 655 656 656 /* 657 657 * Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE ··· 685 685 #define __guest_hyp_cptr_xen_trap_enabled(vcpu, xen) \ 686 686 (!vcpu_has_nv(vcpu) ? false : \ 687 687 ____cptr_xen_trap_enabled(vcpu, \ 688 - SYS_FIELD_GET(CPACR_ELx, xen, \ 688 + SYS_FIELD_GET(CPACR_EL1, xen, \ 689 689 vcpu_sanitised_cptr_el2(vcpu)))) 690 690 691 691 static inline bool guest_hyp_fpsimd_traps_enabled(const struct kvm_vcpu *vcpu) ··· 696 696 static inline bool guest_hyp_sve_traps_enabled(const struct kvm_vcpu *vcpu) 697 697 { 698 698 return __guest_hyp_cptr_xen_trap_enabled(vcpu, ZEN); 699 - } 700 - 701 - static inline void kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) 702 - { 703 - vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH); 704 699 } 705 700 #endif /* __ARM64_KVM_EMULATE_H__ */
+15 -10
arch/arm64/include/asm/kvm_host.h
··· 332 332 #define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7 333 333 /* Fine-Grained UNDEF initialised */ 334 334 #define KVM_ARCH_FLAG_FGU_INITIALIZED 8 335 + /* SVE exposed to guest */ 336 + #define KVM_ARCH_FLAG_GUEST_HAS_SVE 9 335 337 unsigned long flags; 336 338 337 339 /* VM-wide vCPU feature set */ ··· 724 722 u64 hcr_el2; 725 723 u64 hcrx_el2; 726 724 u64 mdcr_el2; 727 - u64 cptr_el2; 728 725 729 726 /* Exception Information */ 730 727 struct kvm_vcpu_fault_info fault; ··· 872 871 #define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__) 873 872 #define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__) 874 873 875 - /* SVE exposed to guest */ 876 - #define GUEST_HAS_SVE __vcpu_single_flag(cflags, BIT(0)) 874 + /* KVM_ARM_VCPU_INIT completed */ 875 + #define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(0)) 877 876 /* SVE config completed */ 878 877 #define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1)) 879 - /* PTRAUTH exposed to guest */ 880 - #define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2)) 881 - /* KVM_ARM_VCPU_INIT completed */ 882 - #define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(3)) 883 878 884 879 /* Exception pending */ 885 880 #define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0)) ··· 956 959 KVM_GUESTDBG_USE_HW | \ 957 960 KVM_GUESTDBG_SINGLESTEP) 958 961 959 - #define vcpu_has_sve(vcpu) (system_supports_sve() && \ 960 - vcpu_get_flag(vcpu, GUEST_HAS_SVE)) 962 + #define kvm_has_sve(kvm) (system_supports_sve() && \ 963 + test_bit(KVM_ARCH_FLAG_GUEST_HAS_SVE, &(kvm)->arch.flags)) 964 + 965 + #ifdef __KVM_NVHE_HYPERVISOR__ 966 + #define vcpu_has_sve(vcpu) kvm_has_sve(kern_hyp_va((vcpu)->kvm)) 967 + #else 968 + #define vcpu_has_sve(vcpu) kvm_has_sve((vcpu)->kvm) 969 + #endif 961 970 962 971 #ifdef CONFIG_ARM64_PTR_AUTH 963 972 #define vcpu_has_ptrauth(vcpu) \ 964 973 ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \ 965 974 cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \ 966 - vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH)) 975 + (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) || \ 976 + vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC))) 967 977 #else 968 978 #define vcpu_has_ptrauth(vcpu) false 969 979 #endif ··· 1436 1432 return test_bit(feature, ka->vcpu_features); 1437 1433 } 1438 1434 1435 + #define kvm_vcpu_has_feature(k, f) __vcpu_has_feature(&(k)->arch, (f)) 1439 1436 #define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f)) 1440 1437 1441 1438 #define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED)
+4 -4
arch/arm64/include/asm/kvm_nested.h
··· 33 33 34 34 static inline u64 translate_cptr_el2_to_cpacr_el1(u64 cptr_el2) 35 35 { 36 - u64 cpacr_el1 = CPACR_ELx_RES1; 36 + u64 cpacr_el1 = CPACR_EL1_RES1; 37 37 38 38 if (cptr_el2 & CPTR_EL2_TTA) 39 - cpacr_el1 |= CPACR_ELx_TTA; 39 + cpacr_el1 |= CPACR_EL1_TTA; 40 40 if (!(cptr_el2 & CPTR_EL2_TFP)) 41 - cpacr_el1 |= CPACR_ELx_FPEN; 41 + cpacr_el1 |= CPACR_EL1_FPEN; 42 42 if (!(cptr_el2 & CPTR_EL2_TZ)) 43 - cpacr_el1 |= CPACR_ELx_ZEN; 43 + cpacr_el1 |= CPACR_EL1_ZEN; 44 44 45 45 cpacr_el1 |= cptr_el2 & (CPTR_EL2_TCPAC | CPTR_EL2_TAM); 46 46
+25
arch/arm64/include/asm/kvm_pkvm.h
··· 20 20 int pkvm_create_hyp_vm(struct kvm *kvm); 21 21 void pkvm_destroy_hyp_vm(struct kvm *kvm); 22 22 23 + /* 24 + * This functions as an allow-list of protected VM capabilities. 25 + * Features not explicitly allowed by this function are denied. 26 + */ 27 + static inline bool kvm_pvm_ext_allowed(long ext) 28 + { 29 + switch (ext) { 30 + case KVM_CAP_IRQCHIP: 31 + case KVM_CAP_ARM_PSCI: 32 + case KVM_CAP_ARM_PSCI_0_2: 33 + case KVM_CAP_NR_VCPUS: 34 + case KVM_CAP_MAX_VCPUS: 35 + case KVM_CAP_MAX_VCPU_ID: 36 + case KVM_CAP_MSI_DEVID: 37 + case KVM_CAP_ARM_VM_IPA_SIZE: 38 + case KVM_CAP_ARM_PMU_V3: 39 + case KVM_CAP_ARM_SVE: 40 + case KVM_CAP_ARM_PTRAUTH_ADDRESS: 41 + case KVM_CAP_ARM_PTRAUTH_GENERIC: 42 + return true; 43 + default: 44 + return false; 45 + } 46 + } 47 + 23 48 extern struct memblock_region kvm_nvhe_sym(hyp_memory)[]; 24 49 extern unsigned int kvm_nvhe_sym(hyp_memblock_nr); 25 50
+8 -9
arch/arm64/kernel/cpufeature.c
··· 1004 1004 /* Override was valid */ 1005 1005 ftr_new = tmp; 1006 1006 str = "forced"; 1007 - } else if (ftr_ovr == tmp) { 1007 + } else { 1008 1008 /* Override was the safe value */ 1009 1009 str = "already set"; 1010 1010 } 1011 1011 1012 - if (str) 1013 - pr_warn("%s[%d:%d]: %s to %llx\n", 1014 - reg->name, 1015 - ftrp->shift + ftrp->width - 1, 1016 - ftrp->shift, str, 1017 - tmp & (BIT(ftrp->width) - 1)); 1012 + pr_warn("%s[%d:%d]: %s to %llx\n", 1013 + reg->name, 1014 + ftrp->shift + ftrp->width - 1, 1015 + ftrp->shift, str, 1016 + tmp & (BIT(ftrp->width) - 1)); 1018 1017 } else if ((ftr_mask & reg->override->val) == ftr_mask) { 1019 1018 reg->override->val &= ~ftr_mask; 1020 1019 pr_warn("%s[%d:%d]: impossible override, ignored\n", ··· 2375 2376 #ifdef CONFIG_ARM64_POE 2376 2377 static void cpu_enable_poe(const struct arm64_cpu_capabilities *__unused) 2377 2378 { 2378 - sysreg_clear_set(REG_TCR2_EL1, 0, TCR2_EL1x_E0POE); 2379 - sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_E0POE); 2379 + sysreg_clear_set(REG_TCR2_EL1, 0, TCR2_EL1_E0POE); 2380 + sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_E0POE); 2380 2381 } 2381 2382 #endif 2382 2383
+2 -28
arch/arm64/kvm/arm.c
··· 80 80 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; 81 81 } 82 82 83 - /* 84 - * This functions as an allow-list of protected VM capabilities. 85 - * Features not explicitly allowed by this function are denied. 86 - */ 87 - static bool pkvm_ext_allowed(struct kvm *kvm, long ext) 88 - { 89 - switch (ext) { 90 - case KVM_CAP_IRQCHIP: 91 - case KVM_CAP_ARM_PSCI: 92 - case KVM_CAP_ARM_PSCI_0_2: 93 - case KVM_CAP_NR_VCPUS: 94 - case KVM_CAP_MAX_VCPUS: 95 - case KVM_CAP_MAX_VCPU_ID: 96 - case KVM_CAP_MSI_DEVID: 97 - case KVM_CAP_ARM_VM_IPA_SIZE: 98 - case KVM_CAP_ARM_PMU_V3: 99 - case KVM_CAP_ARM_SVE: 100 - case KVM_CAP_ARM_PTRAUTH_ADDRESS: 101 - case KVM_CAP_ARM_PTRAUTH_GENERIC: 102 - return true; 103 - default: 104 - return false; 105 - } 106 - } 107 - 108 83 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 109 84 struct kvm_enable_cap *cap) 110 85 { ··· 88 113 if (cap->flags) 89 114 return -EINVAL; 90 115 91 - if (kvm_vm_is_protected(kvm) && !pkvm_ext_allowed(kvm, cap->cap)) 116 + if (kvm_vm_is_protected(kvm) && !kvm_pvm_ext_allowed(cap->cap)) 92 117 return -EINVAL; 93 118 94 119 switch (cap->cap) { ··· 286 311 { 287 312 int r; 288 313 289 - if (kvm && kvm_vm_is_protected(kvm) && !pkvm_ext_allowed(kvm, ext)) 314 + if (kvm && kvm_vm_is_protected(kvm) && !kvm_pvm_ext_allowed(ext)) 290 315 return 0; 291 316 292 317 switch (ext) { ··· 1559 1584 } 1560 1585 1561 1586 vcpu_reset_hcr(vcpu); 1562 - vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu); 1563 1587 1564 1588 /* 1565 1589 * Handle the "start in power-off" case.
+3 -3
arch/arm64/kvm/at.c
··· 111 111 return vcpu_read_sys_reg(vcpu, TCR2_EL2) & TCR2_EL2_PIE; 112 112 case TR_EL10: 113 113 return (__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TCR2En) && 114 - (__vcpu_sys_reg(vcpu, TCR2_EL1) & TCR2_EL1x_PIE); 114 + (__vcpu_sys_reg(vcpu, TCR2_EL1) & TCR2_EL1_PIE); 115 115 default: 116 116 BUG(); 117 117 } ··· 140 140 } 141 141 142 142 val = __vcpu_sys_reg(vcpu, TCR2_EL1); 143 - wi->poe = val & TCR2_EL1x_POE; 144 - wi->e0poe = val & TCR2_EL1x_E0POE; 143 + wi->poe = val & TCR2_EL1_POE; 144 + wi->e0poe = val & TCR2_EL1_E0POE; 145 145 } 146 146 } 147 147
+1 -1
arch/arm64/kvm/emulate-nested.c
··· 494 494 if (!vcpu_el2_e2h_is_set(vcpu)) 495 495 val = translate_cptr_el2_to_cpacr_el1(val); 496 496 497 - if (val & CPACR_ELx_TTA) 497 + if (val & CPACR_EL1_TTA) 498 498 return BEHAVE_FORWARD_RW; 499 499 500 500 return BEHAVE_HANDLE_LOCALLY;
+1 -1
arch/arm64/kvm/fpsimd.c
··· 169 169 if (has_vhe() && system_supports_sme()) { 170 170 /* Also restore EL0 state seen on entry */ 171 171 if (host_data_test_flag(HOST_SME_ENABLED)) 172 - sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_SMEN); 172 + sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_SMEN); 173 173 else 174 174 sysreg_clear_set(CPACR_EL1, 175 175 CPACR_EL1_SMEN_EL0EN,
+2 -2
arch/arm64/kvm/hyp/include/hyp/switch.h
··· 419 419 420 420 /* First disable enough traps to allow us to update the registers */ 421 421 if (sve_guest || (is_protected_kvm_enabled() && system_supports_sve())) 422 - cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN); 422 + cpacr_clear_set(0, CPACR_EL1_FPEN | CPACR_EL1_ZEN); 423 423 else 424 - cpacr_clear_set(0, CPACR_ELx_FPEN); 424 + cpacr_clear_set(0, CPACR_EL1_FPEN); 425 425 isb(); 426 426 427 427 /* Write out the host state if it's in the registers */
-223
arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0-only */ 2 - /* 3 - * Copyright (C) 2021 Google LLC 4 - * Author: Fuad Tabba <tabba@google.com> 5 - */ 6 - 7 - #ifndef __ARM64_KVM_FIXED_CONFIG_H__ 8 - #define __ARM64_KVM_FIXED_CONFIG_H__ 9 - 10 - #include <asm/sysreg.h> 11 - 12 - /* 13 - * This file contains definitions for features to be allowed or restricted for 14 - * guest virtual machines, depending on the mode KVM is running in and on the 15 - * type of guest that is running. 16 - * 17 - * The ALLOW masks represent a bitmask of feature fields that are allowed 18 - * without any restrictions as long as they are supported by the system. 19 - * 20 - * The RESTRICT_UNSIGNED masks, if present, represent unsigned fields for 21 - * features that are restricted to support at most the specified feature. 22 - * 23 - * If a feature field is not present in either, than it is not supported. 24 - * 25 - * The approach taken for protected VMs is to allow features that are: 26 - * - Needed by common Linux distributions (e.g., floating point) 27 - * - Trivial to support, e.g., supporting the feature does not introduce or 28 - * require tracking of additional state in KVM 29 - * - Cannot be trapped or prevent the guest from using anyway 30 - */ 31 - 32 - /* 33 - * Allow for protected VMs: 34 - * - Floating-point and Advanced SIMD 35 - * - Data Independent Timing 36 - * - Spectre/Meltdown Mitigation 37 - */ 38 - #define PVM_ID_AA64PFR0_ALLOW (\ 39 - ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \ 40 - ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \ 41 - ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) | \ 42 - ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | \ 43 - ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3) \ 44 - ) 45 - 46 - /* 47 - * Restrict to the following *unsigned* features for protected VMs: 48 - * - AArch64 guests only (no support for AArch32 guests): 49 - * AArch32 adds complexity in trap handling, emulation, condition codes, 50 - * etc... 51 - * - RAS (v1) 52 - * Supported by KVM 53 - */ 54 - #define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\ 55 - SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL0, IMP) | \ 56 - SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL1, IMP) | \ 57 - SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL2, IMP) | \ 58 - SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL3, IMP) | \ 59 - SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, RAS, IMP) \ 60 - ) 61 - 62 - /* 63 - * Allow for protected VMs: 64 - * - Branch Target Identification 65 - * - Speculative Store Bypassing 66 - */ 67 - #define PVM_ID_AA64PFR1_ALLOW (\ 68 - ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_BT) | \ 69 - ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SSBS) \ 70 - ) 71 - 72 - #define PVM_ID_AA64PFR2_ALLOW 0ULL 73 - 74 - /* 75 - * Allow for protected VMs: 76 - * - Mixed-endian 77 - * - Distinction between Secure and Non-secure Memory 78 - * - Mixed-endian at EL0 only 79 - * - Non-context synchronizing exception entry and exit 80 - */ 81 - #define PVM_ID_AA64MMFR0_ALLOW (\ 82 - ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGEND) | \ 83 - ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_SNSMEM) | \ 84 - ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGENDEL0) | \ 85 - ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_EXS) \ 86 - ) 87 - 88 - /* 89 - * Restrict to the following *unsigned* features for protected VMs: 90 - * - 40-bit IPA 91 - * - 16-bit ASID 92 - */ 93 - #define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\ 94 - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_PARANGE), ID_AA64MMFR0_EL1_PARANGE_40) | \ 95 - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_ASIDBITS), ID_AA64MMFR0_EL1_ASIDBITS_16) \ 96 - ) 97 - 98 - /* 99 - * Allow for protected VMs: 100 - * - Hardware translation table updates to Access flag and Dirty state 101 - * - Number of VMID bits from CPU 102 - * - Hierarchical Permission Disables 103 - * - Privileged Access Never 104 - * - SError interrupt exceptions from speculative reads 105 - * - Enhanced Translation Synchronization 106 - * - Control for cache maintenance permission 107 - */ 108 - #define PVM_ID_AA64MMFR1_ALLOW (\ 109 - ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HAFDBS) | \ 110 - ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_VMIDBits) | \ 111 - ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HPDS) | \ 112 - ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_PAN) | \ 113 - ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_SpecSEI) | \ 114 - ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_ETS) | \ 115 - ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_CMOW) \ 116 - ) 117 - 118 - /* 119 - * Allow for protected VMs: 120 - * - Common not Private translations 121 - * - User Access Override 122 - * - IESB bit in the SCTLR_ELx registers 123 - * - Unaligned single-copy atomicity and atomic functions 124 - * - ESR_ELx.EC value on an exception by read access to feature ID space 125 - * - TTL field in address operations. 126 - * - Break-before-make sequences when changing translation block size 127 - * - E0PDx mechanism 128 - */ 129 - #define PVM_ID_AA64MMFR2_ALLOW (\ 130 - ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_CnP) | \ 131 - ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_UAO) | \ 132 - ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_IESB) | \ 133 - ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_AT) | \ 134 - ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_IDS) | \ 135 - ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_TTL) | \ 136 - ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_BBM) | \ 137 - ARM64_FEATURE_MASK(ID_AA64MMFR2_EL1_E0PD) \ 138 - ) 139 - 140 - #define PVM_ID_AA64MMFR3_ALLOW (0ULL) 141 - 142 - /* 143 - * No support for Scalable Vectors for protected VMs: 144 - * Requires additional support from KVM, e.g., context-switching and 145 - * trapping at EL2 146 - */ 147 - #define PVM_ID_AA64ZFR0_ALLOW (0ULL) 148 - 149 - /* 150 - * No support for debug, including breakpoints, and watchpoints for protected 151 - * VMs: 152 - * The Arm architecture mandates support for at least the Armv8 debug 153 - * architecture, which would include at least 2 hardware breakpoints and 154 - * watchpoints. Providing that support to protected guests adds 155 - * considerable state and complexity. Therefore, the reserved value of 0 is 156 - * used for debug-related fields. 157 - */ 158 - #define PVM_ID_AA64DFR0_ALLOW (0ULL) 159 - #define PVM_ID_AA64DFR1_ALLOW (0ULL) 160 - 161 - /* 162 - * No support for implementation defined features. 163 - */ 164 - #define PVM_ID_AA64AFR0_ALLOW (0ULL) 165 - #define PVM_ID_AA64AFR1_ALLOW (0ULL) 166 - 167 - /* 168 - * No restrictions on instructions implemented in AArch64. 169 - */ 170 - #define PVM_ID_AA64ISAR0_ALLOW (\ 171 - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_AES) | \ 172 - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA1) | \ 173 - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA2) | \ 174 - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_CRC32) | \ 175 - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_ATOMIC) | \ 176 - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_RDM) | \ 177 - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA3) | \ 178 - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SM3) | \ 179 - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SM4) | \ 180 - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_DP) | \ 181 - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_FHM) | \ 182 - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_TS) | \ 183 - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_TLB) | \ 184 - ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_RNDR) \ 185 - ) 186 - 187 - /* Restrict pointer authentication to the basic version. */ 188 - #define PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED (\ 189 - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), ID_AA64ISAR1_EL1_APA_PAuth) | \ 190 - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), ID_AA64ISAR1_EL1_API_PAuth) \ 191 - ) 192 - 193 - #define PVM_ID_AA64ISAR2_RESTRICT_UNSIGNED (\ 194 - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3), ID_AA64ISAR2_EL1_APA3_PAuth) \ 195 - ) 196 - 197 - #define PVM_ID_AA64ISAR1_ALLOW (\ 198 - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_DPB) | \ 199 - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_JSCVT) | \ 200 - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_FCMA) | \ 201 - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_LRCPC) | \ 202 - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) | \ 203 - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI) | \ 204 - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_FRINTTS) | \ 205 - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_SB) | \ 206 - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_SPECRES) | \ 207 - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_BF16) | \ 208 - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_DGH) | \ 209 - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_I8MM) \ 210 - ) 211 - 212 - #define PVM_ID_AA64ISAR2_ALLOW (\ 213 - ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_ATS1A)| \ 214 - ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3) | \ 215 - ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS) \ 216 - ) 217 - 218 - u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id); 219 - bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code); 220 - bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code); 221 - int kvm_check_pvm_sysreg_table(void); 222 - 223 - #endif /* __ARM64_KVM_FIXED_CONFIG_H__ */
+7
arch/arm64/kvm/hyp/include/nvhe/pkvm.h
··· 53 53 struct pkvm_hyp_vcpu *vcpus[]; 54 54 }; 55 55 56 + extern hyp_spinlock_t vm_table_lock; 57 + 56 58 static inline struct pkvm_hyp_vm * 57 59 pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu) 58 60 { ··· 87 85 struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle); 88 86 struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle); 89 87 void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm); 88 + 89 + bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code); 90 + bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code); 91 + void kvm_init_pvm_id_regs(struct kvm_vcpu *vcpu); 92 + int kvm_check_pvm_sysreg_table(void); 90 93 91 94 #endif /* __ARM64_KVM_NVHE_PKVM_H__ */
+2 -2
arch/arm64/kvm/hyp/nvhe/hyp-main.c
··· 68 68 if (!guest_owns_fp_regs()) 69 69 return; 70 70 71 - cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN); 71 + cpacr_clear_set(0, CPACR_EL1_FPEN | CPACR_EL1_ZEN); 72 72 isb(); 73 73 74 74 if (vcpu_has_sve(vcpu)) ··· 652 652 handle_host_smc(host_ctxt); 653 653 break; 654 654 case ESR_ELx_EC_SVE: 655 - cpacr_clear_set(0, CPACR_ELx_ZEN); 655 + cpacr_clear_set(0, CPACR_EL1_ZEN); 656 656 isb(); 657 657 sve_cond_update_zcr_vq(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, 658 658 SYS_ZCR_EL2);
+139 -234
arch/arm64/kvm/hyp/nvhe/pkvm.c
··· 9 9 10 10 #include <asm/kvm_emulate.h> 11 11 12 - #include <nvhe/fixed_config.h> 13 12 #include <nvhe/mem_protect.h> 14 13 #include <nvhe/memory.h> 15 14 #include <nvhe/pkvm.h> ··· 27 28 * protected KVM is enabled, but for both protected and non-protected VMs. 28 29 */ 29 30 static DEFINE_PER_CPU(struct pkvm_hyp_vcpu *, loaded_hyp_vcpu); 30 - 31 - /* 32 - * Set trap register values based on features in ID_AA64PFR0. 33 - */ 34 - static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) 35 - { 36 - const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1); 37 - u64 hcr_set = HCR_RW; 38 - u64 hcr_clear = 0; 39 - u64 cptr_set = 0; 40 - u64 cptr_clear = 0; 41 - 42 - /* Protected KVM does not support AArch32 guests. */ 43 - BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), 44 - PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_EL0_IMP); 45 - BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), 46 - PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_EL1_IMP); 47 - 48 - /* 49 - * Linux guests assume support for floating-point and Advanced SIMD. Do 50 - * not change the trapping behavior for these from the KVM default. 51 - */ 52 - BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP), 53 - PVM_ID_AA64PFR0_ALLOW)); 54 - BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD), 55 - PVM_ID_AA64PFR0_ALLOW)); 56 - 57 - if (has_hvhe()) 58 - hcr_set |= HCR_E2H; 59 - 60 - /* Trap RAS unless all current versions are supported */ 61 - if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) < 62 - ID_AA64PFR0_EL1_RAS_V1P1) { 63 - hcr_set |= HCR_TERR | HCR_TEA; 64 - hcr_clear |= HCR_FIEN; 65 - } 66 - 67 - /* Trap AMU */ 68 - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) { 69 - hcr_clear |= HCR_AMVOFFEN; 70 - cptr_set |= CPTR_EL2_TAM; 71 - } 72 - 73 - /* Trap SVE */ 74 - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) { 75 - if (has_hvhe()) 76 - cptr_clear |= CPACR_ELx_ZEN; 77 - else 78 - cptr_set |= CPTR_EL2_TZ; 79 - } 80 - 81 - vcpu->arch.hcr_el2 |= hcr_set; 82 - vcpu->arch.hcr_el2 &= ~hcr_clear; 83 - vcpu->arch.cptr_el2 |= cptr_set; 84 - vcpu->arch.cptr_el2 &= ~cptr_clear; 85 - } 86 - 87 - /* 88 - * Set trap register values based on features in ID_AA64PFR1. 89 - */ 90 - static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu) 91 - { 92 - const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR1_EL1); 93 - u64 hcr_set = 0; 94 - u64 hcr_clear = 0; 95 - 96 - /* Memory Tagging: Trap and Treat as Untagged if not supported. */ 97 - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) { 98 - hcr_set |= HCR_TID5; 99 - hcr_clear |= HCR_DCT | HCR_ATA; 100 - } 101 - 102 - vcpu->arch.hcr_el2 |= hcr_set; 103 - vcpu->arch.hcr_el2 &= ~hcr_clear; 104 - } 105 - 106 - /* 107 - * Set trap register values based on features in ID_AA64DFR0. 108 - */ 109 - static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu) 110 - { 111 - const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1); 112 - u64 mdcr_set = 0; 113 - u64 mdcr_clear = 0; 114 - u64 cptr_set = 0; 115 - 116 - /* Trap/constrain PMU */ 117 - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) { 118 - mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR; 119 - mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME | 120 - MDCR_EL2_HPMN_MASK; 121 - } 122 - 123 - /* Trap Debug */ 124 - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids)) 125 - mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE; 126 - 127 - /* Trap OS Double Lock */ 128 - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids)) 129 - mdcr_set |= MDCR_EL2_TDOSA; 130 - 131 - /* Trap SPE */ 132 - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) { 133 - mdcr_set |= MDCR_EL2_TPMS; 134 - mdcr_clear |= MDCR_EL2_E2PB_MASK; 135 - } 136 - 137 - /* Trap Trace Filter */ 138 - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids)) 139 - mdcr_set |= MDCR_EL2_TTRF; 140 - 141 - /* Trap Trace */ 142 - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) { 143 - if (has_hvhe()) 144 - cptr_set |= CPACR_EL1_TTA; 145 - else 146 - cptr_set |= CPTR_EL2_TTA; 147 - } 148 - 149 - /* Trap External Trace */ 150 - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids)) 151 - mdcr_clear |= MDCR_EL2_E2TB_MASK; 152 - 153 - vcpu->arch.mdcr_el2 |= mdcr_set; 154 - vcpu->arch.mdcr_el2 &= ~mdcr_clear; 155 - vcpu->arch.cptr_el2 |= cptr_set; 156 - } 157 - 158 - /* 159 - * Set trap register values based on features in ID_AA64MMFR0. 160 - */ 161 - static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu) 162 - { 163 - const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR0_EL1); 164 - u64 mdcr_set = 0; 165 - 166 - /* Trap Debug Communications Channel registers */ 167 - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), feature_ids)) 168 - mdcr_set |= MDCR_EL2_TDCC; 169 - 170 - vcpu->arch.mdcr_el2 |= mdcr_set; 171 - } 172 - 173 - /* 174 - * Set trap register values based on features in ID_AA64MMFR1. 175 - */ 176 - static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu) 177 - { 178 - const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR1_EL1); 179 - u64 hcr_set = 0; 180 - 181 - /* Trap LOR */ 182 - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), feature_ids)) 183 - hcr_set |= HCR_TLOR; 184 - 185 - vcpu->arch.hcr_el2 |= hcr_set; 186 - } 187 - 188 - /* 189 - * Set baseline trap register values. 190 - */ 191 - static void pvm_init_trap_regs(struct kvm_vcpu *vcpu) 192 - { 193 - const u64 hcr_trap_feat_regs = HCR_TID3; 194 - const u64 hcr_trap_impdef = HCR_TACR | HCR_TIDCP | HCR_TID1; 195 - 196 - /* 197 - * Always trap: 198 - * - Feature id registers: to control features exposed to guests 199 - * - Implementation-defined features 200 - */ 201 - vcpu->arch.hcr_el2 |= hcr_trap_feat_regs | hcr_trap_impdef; 202 - 203 - /* Clear res0 and set res1 bits to trap potential new features. */ 204 - vcpu->arch.hcr_el2 &= ~(HCR_RES0); 205 - vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0); 206 - if (!has_hvhe()) { 207 - vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1; 208 - vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0); 209 - } 210 - } 211 31 212 32 static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu) 213 33 { ··· 53 235 54 236 if (vcpu_has_ptrauth(vcpu)) 55 237 vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK); 238 + 239 + if (kvm_has_mte(vcpu->kvm)) 240 + vcpu->arch.hcr_el2 |= HCR_ATA; 241 + } 242 + 243 + static void pvm_init_traps_hcr(struct kvm_vcpu *vcpu) 244 + { 245 + struct kvm *kvm = vcpu->kvm; 246 + u64 val = vcpu->arch.hcr_el2; 247 + 248 + /* No support for AArch32. */ 249 + val |= HCR_RW; 250 + 251 + /* 252 + * Always trap: 253 + * - Feature id registers: to control features exposed to guests 254 + * - Implementation-defined features 255 + */ 256 + val |= HCR_TACR | HCR_TIDCP | HCR_TID3 | HCR_TID1; 257 + 258 + if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) { 259 + val |= HCR_TERR | HCR_TEA; 260 + val &= ~(HCR_FIEN); 261 + } 262 + 263 + if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP)) 264 + val &= ~(HCR_AMVOFFEN); 265 + 266 + if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, MTE, IMP)) { 267 + val |= HCR_TID5; 268 + val &= ~(HCR_DCT | HCR_ATA); 269 + } 270 + 271 + if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP)) 272 + val |= HCR_TLOR; 273 + 274 + vcpu->arch.hcr_el2 = val; 275 + } 276 + 277 + static void pvm_init_traps_mdcr(struct kvm_vcpu *vcpu) 278 + { 279 + struct kvm *kvm = vcpu->kvm; 280 + u64 val = vcpu->arch.mdcr_el2; 281 + 282 + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP)) { 283 + val |= MDCR_EL2_TPM | MDCR_EL2_TPMCR; 284 + val &= ~(MDCR_EL2_HPME | MDCR_EL2_MTPME | MDCR_EL2_HPMN_MASK); 285 + } 286 + 287 + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DebugVer, IMP)) 288 + val |= MDCR_EL2_TDRA | MDCR_EL2_TDA; 289 + 290 + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DoubleLock, IMP)) 291 + val |= MDCR_EL2_TDOSA; 292 + 293 + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP)) { 294 + val |= MDCR_EL2_TPMS; 295 + val &= ~MDCR_EL2_E2PB_MASK; 296 + } 297 + 298 + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP)) 299 + val |= MDCR_EL2_TTRF; 300 + 301 + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, ExtTrcBuff, IMP)) 302 + val |= MDCR_EL2_E2TB_MASK; 303 + 304 + /* Trap Debug Communications Channel registers */ 305 + if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, FGT, IMP)) 306 + val |= MDCR_EL2_TDCC; 307 + 308 + vcpu->arch.mdcr_el2 = val; 309 + } 310 + 311 + /* 312 + * Check that cpu features that are neither trapped nor supported are not 313 + * enabled for protected VMs. 314 + */ 315 + static int pkvm_check_pvm_cpu_features(struct kvm_vcpu *vcpu) 316 + { 317 + struct kvm *kvm = vcpu->kvm; 318 + 319 + /* Protected KVM does not support AArch32 guests. */ 320 + if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL0, AARCH32) || 321 + kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL1, AARCH32)) 322 + return -EINVAL; 323 + 324 + /* 325 + * Linux guests assume support for floating-point and Advanced SIMD. Do 326 + * not change the trapping behavior for these from the KVM default. 327 + */ 328 + if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, FP, IMP) || 329 + !kvm_has_feat(kvm, ID_AA64PFR0_EL1, AdvSIMD, IMP)) 330 + return -EINVAL; 331 + 332 + /* No SME support in KVM right now. Check to catch if it changes. */ 333 + if (kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP)) 334 + return -EINVAL; 335 + 336 + return 0; 56 337 } 57 338 58 339 /* 59 340 * Initialize trap register values in protected mode. 60 341 */ 61 - static void pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu) 342 + static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu) 62 343 { 63 - vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu); 344 + struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu; 345 + int ret; 346 + 64 347 vcpu->arch.mdcr_el2 = 0; 65 348 66 349 pkvm_vcpu_reset_hcr(vcpu); 67 350 68 - if ((!vcpu_is_protected(vcpu))) 69 - return; 351 + if ((!pkvm_hyp_vcpu_is_protected(hyp_vcpu))) 352 + return 0; 70 353 71 - pvm_init_trap_regs(vcpu); 72 - pvm_init_traps_aa64pfr0(vcpu); 73 - pvm_init_traps_aa64pfr1(vcpu); 74 - pvm_init_traps_aa64dfr0(vcpu); 75 - pvm_init_traps_aa64mmfr0(vcpu); 76 - pvm_init_traps_aa64mmfr1(vcpu); 354 + ret = pkvm_check_pvm_cpu_features(vcpu); 355 + if (ret) 356 + return ret; 357 + 358 + pvm_init_traps_hcr(vcpu); 359 + pvm_init_traps_mdcr(vcpu); 360 + 361 + return 0; 77 362 } 78 363 79 364 /* ··· 197 276 198 277 /* 199 278 * Spinlock for protecting state related to the VM table. Protects writes 200 - * to 'vm_table' and 'nr_table_entries' as well as reads and writes to 201 - * 'last_hyp_vcpu_lookup'. 279 + * to 'vm_table', 'nr_table_entries', and other per-vm state on initialization. 280 + * Also protects reads and writes to 'last_hyp_vcpu_lookup'. 202 281 */ 203 - static DEFINE_HYP_SPINLOCK(vm_table_lock); 282 + DEFINE_HYP_SPINLOCK(vm_table_lock); 204 283 205 284 /* 206 285 * The table of VM entries for protected VMs in hyp. ··· 312 391 static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struct kvm *host_kvm) 313 392 { 314 393 struct kvm *kvm = &hyp_vm->kvm; 394 + unsigned long host_arch_flags = READ_ONCE(host_kvm->arch.flags); 315 395 DECLARE_BITMAP(allowed_features, KVM_VCPU_MAX_FEATURES); 396 + 397 + if (test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &host_kvm->arch.flags)) 398 + set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags); 316 399 317 400 /* No restrictions for non-protected VMs. */ 318 401 if (!kvm_vm_is_protected(kvm)) { 402 + hyp_vm->kvm.arch.flags = host_arch_flags; 403 + 319 404 bitmap_copy(kvm->arch.vcpu_features, 320 405 host_kvm->arch.vcpu_features, 321 406 KVM_VCPU_MAX_FEATURES); ··· 330 403 331 404 bitmap_zero(allowed_features, KVM_VCPU_MAX_FEATURES); 332 405 333 - /* 334 - * For protected VMs, always allow: 335 - * - CPU starting in poweroff state 336 - * - PSCI v0.2 337 - */ 338 - set_bit(KVM_ARM_VCPU_POWER_OFF, allowed_features); 339 406 set_bit(KVM_ARM_VCPU_PSCI_0_2, allowed_features); 340 407 341 - /* 342 - * Check if remaining features are allowed: 343 - * - Performance Monitoring 344 - * - Scalable Vectors 345 - * - Pointer Authentication 346 - */ 347 - if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), PVM_ID_AA64DFR0_ALLOW)) 408 + if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PMU_V3)) 348 409 set_bit(KVM_ARM_VCPU_PMU_V3, allowed_features); 349 410 350 - if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), PVM_ID_AA64PFR0_ALLOW)) 351 - set_bit(KVM_ARM_VCPU_SVE, allowed_features); 352 - 353 - if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED) && 354 - FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA), PVM_ID_AA64ISAR1_RESTRICT_UNSIGNED)) 411 + if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PTRAUTH_ADDRESS)) 355 412 set_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, allowed_features); 356 413 357 - if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI), PVM_ID_AA64ISAR1_ALLOW) && 358 - FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA), PVM_ID_AA64ISAR1_ALLOW)) 414 + if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PTRAUTH_GENERIC)) 359 415 set_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, allowed_features); 416 + 417 + if (kvm_pvm_ext_allowed(KVM_CAP_ARM_SVE)) { 418 + set_bit(KVM_ARM_VCPU_SVE, allowed_features); 419 + kvm->arch.flags |= host_arch_flags & BIT(KVM_ARCH_FLAG_GUEST_HAS_SVE); 420 + } 360 421 361 422 bitmap_and(kvm->arch.vcpu_features, host_kvm->arch.vcpu_features, 362 423 allowed_features, KVM_VCPU_MAX_FEATURES); 363 - } 364 - 365 - static void pkvm_vcpu_init_ptrauth(struct pkvm_hyp_vcpu *hyp_vcpu) 366 - { 367 - struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu; 368 - 369 - if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) || 370 - vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC)) { 371 - kvm_vcpu_enable_ptrauth(vcpu); 372 - } else { 373 - vcpu_clear_flag(&hyp_vcpu->vcpu, GUEST_HAS_PTRAUTH); 374 - } 375 424 } 376 425 377 426 static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu) ··· 372 469 hyp_vm->kvm.created_vcpus = nr_vcpus; 373 470 hyp_vm->kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr; 374 471 hyp_vm->kvm.arch.pkvm.enabled = READ_ONCE(host_kvm->arch.pkvm.enabled); 472 + hyp_vm->kvm.arch.flags = 0; 375 473 pkvm_init_features_from_host(hyp_vm, host_kvm); 376 474 } 377 475 ··· 380 476 { 381 477 struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu; 382 478 383 - if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) { 384 - vcpu_clear_flag(vcpu, GUEST_HAS_SVE); 479 + if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) 385 480 vcpu_clear_flag(vcpu, VCPU_SVE_FINALIZED); 386 - } 387 481 } 388 482 389 483 static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu, ··· 409 507 hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags); 410 508 hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED; 411 509 510 + if (pkvm_hyp_vcpu_is_protected(hyp_vcpu)) 511 + kvm_init_pvm_id_regs(&hyp_vcpu->vcpu); 512 + 513 + ret = pkvm_vcpu_init_traps(hyp_vcpu); 514 + if (ret) 515 + goto done; 516 + 412 517 pkvm_vcpu_init_sve(hyp_vcpu, host_vcpu); 413 - pkvm_vcpu_init_ptrauth(hyp_vcpu); 414 - pkvm_vcpu_init_traps(&hyp_vcpu->vcpu); 415 518 done: 416 519 if (ret) 417 520 unpin_host_vcpu(host_vcpu); ··· 660 753 unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu)); 661 754 return ret; 662 755 } 663 - 664 - hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu); 665 756 666 757 return 0; 667 758 }
-1
arch/arm64/kvm/hyp/nvhe/setup.c
··· 12 12 13 13 #include <nvhe/early_alloc.h> 14 14 #include <nvhe/ffa.h> 15 - #include <nvhe/fixed_config.h> 16 15 #include <nvhe/gfp.h> 17 16 #include <nvhe/memory.h> 18 17 #include <nvhe/mem_protect.h>
+33 -21
arch/arm64/kvm/hyp/nvhe/switch.c
··· 26 26 #include <asm/debug-monitors.h> 27 27 #include <asm/processor.h> 28 28 29 - #include <nvhe/fixed_config.h> 30 29 #include <nvhe/mem_protect.h> 31 30 32 31 /* Non-VHE specific context */ ··· 35 36 36 37 extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc); 37 38 38 - static void __activate_traps(struct kvm_vcpu *vcpu) 39 + static void __activate_cptr_traps(struct kvm_vcpu *vcpu) 39 40 { 40 - u64 val; 41 + u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */ 41 42 42 - ___activate_traps(vcpu, vcpu->arch.hcr_el2); 43 - __activate_traps_common(vcpu); 43 + if (has_hvhe()) { 44 + val |= CPACR_EL1_TTA; 44 45 45 - val = vcpu->arch.cptr_el2; 46 - val |= CPTR_EL2_TAM; /* Same bit irrespective of E2H */ 47 - val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA; 48 - if (cpus_have_final_cap(ARM64_SME)) { 49 - if (has_hvhe()) 50 - val &= ~CPACR_ELx_SMEN; 51 - else 52 - val |= CPTR_EL2_TSM; 46 + if (guest_owns_fp_regs()) { 47 + val |= CPACR_EL1_FPEN; 48 + if (vcpu_has_sve(vcpu)) 49 + val |= CPACR_EL1_ZEN; 50 + } 51 + } else { 52 + val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1; 53 + 54 + /* 55 + * Always trap SME since it's not supported in KVM. 56 + * TSM is RES1 if SME isn't implemented. 57 + */ 58 + val |= CPTR_EL2_TSM; 59 + 60 + if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs()) 61 + val |= CPTR_EL2_TZ; 62 + 63 + if (!guest_owns_fp_regs()) 64 + val |= CPTR_EL2_TFP; 53 65 } 54 66 55 - if (!guest_owns_fp_regs()) { 56 - if (has_hvhe()) 57 - val &= ~(CPACR_ELx_FPEN | CPACR_ELx_ZEN); 58 - else 59 - val |= CPTR_EL2_TFP | CPTR_EL2_TZ; 60 - 67 + if (!guest_owns_fp_regs()) 61 68 __activate_traps_fpsimd32(vcpu); 62 - } 63 69 64 70 kvm_write_cptr_el2(val); 71 + } 72 + 73 + static void __activate_traps(struct kvm_vcpu *vcpu) 74 + { 75 + ___activate_traps(vcpu, vcpu->arch.hcr_el2); 76 + __activate_traps_common(vcpu); 77 + __activate_cptr_traps(vcpu); 78 + 65 79 write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2); 66 80 67 81 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { ··· 204 192 205 193 /* Re-enable SVE traps if not supported for the guest vcpu. */ 206 194 if (!vcpu_has_sve(vcpu)) 207 - cpacr_clear_set(CPACR_ELx_ZEN, 0); 195 + cpacr_clear_set(CPACR_EL1_ZEN, 0); 208 196 209 197 } else { 210 198 __fpsimd_save_state(*host_data_ptr(fpsimd_state));
+250 -200
arch/arm64/kvm/hyp/nvhe/sys_regs.c
··· 11 11 12 12 #include <hyp/adjust_pc.h> 13 13 14 - #include <nvhe/fixed_config.h> 14 + #include <nvhe/pkvm.h> 15 15 16 16 #include "../../sys_regs.h" 17 17 ··· 27 27 u64 id_aa64mmfr1_el1_sys_val; 28 28 u64 id_aa64mmfr2_el1_sys_val; 29 29 u64 id_aa64smfr0_el1_sys_val; 30 + 31 + struct pvm_ftr_bits { 32 + bool sign; 33 + u8 shift; 34 + u8 width; 35 + u8 max_val; 36 + bool (*vm_supported)(const struct kvm *kvm); 37 + }; 38 + 39 + #define __MAX_FEAT_FUNC(id, fld, max, func, sgn) \ 40 + { \ 41 + .sign = sgn, \ 42 + .shift = id##_##fld##_SHIFT, \ 43 + .width = id##_##fld##_WIDTH, \ 44 + .max_val = id##_##fld##_##max, \ 45 + .vm_supported = func, \ 46 + } 47 + 48 + #define MAX_FEAT_FUNC(id, fld, max, func) \ 49 + __MAX_FEAT_FUNC(id, fld, max, func, id##_##fld##_SIGNED) 50 + 51 + #define MAX_FEAT(id, fld, max) \ 52 + MAX_FEAT_FUNC(id, fld, max, NULL) 53 + 54 + #define MAX_FEAT_ENUM(id, fld, max) \ 55 + __MAX_FEAT_FUNC(id, fld, max, NULL, false) 56 + 57 + #define FEAT_END { .width = 0, } 58 + 59 + static bool vm_has_ptrauth(const struct kvm *kvm) 60 + { 61 + if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH)) 62 + return false; 63 + 64 + return (cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || 65 + cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && 66 + kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_GENERIC); 67 + } 68 + 69 + static bool vm_has_sve(const struct kvm *kvm) 70 + { 71 + return system_supports_sve() && kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_SVE); 72 + } 73 + 74 + /* 75 + * Definitions for features to be allowed or restricted for protected guests. 76 + * 77 + * Each field in the masks represents the highest supported value for the 78 + * feature. If a feature field is not present, it is not supported. Moreover, 79 + * these are used to generate the guest's view of the feature registers. 80 + * 81 + * The approach for protected VMs is to at least support features that are: 82 + * - Needed by common Linux distributions (e.g., floating point) 83 + * - Trivial to support, e.g., supporting the feature does not introduce or 84 + * require tracking of additional state in KVM 85 + * - Cannot be trapped or prevent the guest from using anyway 86 + */ 87 + 88 + static const struct pvm_ftr_bits pvmid_aa64pfr0[] = { 89 + MAX_FEAT(ID_AA64PFR0_EL1, EL0, IMP), 90 + MAX_FEAT(ID_AA64PFR0_EL1, EL1, IMP), 91 + MAX_FEAT(ID_AA64PFR0_EL1, EL2, IMP), 92 + MAX_FEAT(ID_AA64PFR0_EL1, EL3, IMP), 93 + MAX_FEAT(ID_AA64PFR0_EL1, FP, FP16), 94 + MAX_FEAT(ID_AA64PFR0_EL1, AdvSIMD, FP16), 95 + MAX_FEAT(ID_AA64PFR0_EL1, GIC, IMP), 96 + MAX_FEAT_FUNC(ID_AA64PFR0_EL1, SVE, IMP, vm_has_sve), 97 + MAX_FEAT(ID_AA64PFR0_EL1, RAS, IMP), 98 + MAX_FEAT(ID_AA64PFR0_EL1, DIT, IMP), 99 + MAX_FEAT(ID_AA64PFR0_EL1, CSV2, IMP), 100 + MAX_FEAT(ID_AA64PFR0_EL1, CSV3, IMP), 101 + FEAT_END 102 + }; 103 + 104 + static const struct pvm_ftr_bits pvmid_aa64pfr1[] = { 105 + MAX_FEAT(ID_AA64PFR1_EL1, BT, IMP), 106 + MAX_FEAT(ID_AA64PFR1_EL1, SSBS, SSBS2), 107 + MAX_FEAT_ENUM(ID_AA64PFR1_EL1, MTE_frac, NI), 108 + FEAT_END 109 + }; 110 + 111 + static const struct pvm_ftr_bits pvmid_aa64mmfr0[] = { 112 + MAX_FEAT_ENUM(ID_AA64MMFR0_EL1, PARANGE, 40), 113 + MAX_FEAT_ENUM(ID_AA64MMFR0_EL1, ASIDBITS, 16), 114 + MAX_FEAT(ID_AA64MMFR0_EL1, BIGEND, IMP), 115 + MAX_FEAT(ID_AA64MMFR0_EL1, SNSMEM, IMP), 116 + MAX_FEAT(ID_AA64MMFR0_EL1, BIGENDEL0, IMP), 117 + MAX_FEAT(ID_AA64MMFR0_EL1, EXS, IMP), 118 + FEAT_END 119 + }; 120 + 121 + static const struct pvm_ftr_bits pvmid_aa64mmfr1[] = { 122 + MAX_FEAT(ID_AA64MMFR1_EL1, HAFDBS, DBM), 123 + MAX_FEAT_ENUM(ID_AA64MMFR1_EL1, VMIDBits, 16), 124 + MAX_FEAT(ID_AA64MMFR1_EL1, HPDS, HPDS2), 125 + MAX_FEAT(ID_AA64MMFR1_EL1, PAN, PAN3), 126 + MAX_FEAT(ID_AA64MMFR1_EL1, SpecSEI, IMP), 127 + MAX_FEAT(ID_AA64MMFR1_EL1, ETS, IMP), 128 + MAX_FEAT(ID_AA64MMFR1_EL1, CMOW, IMP), 129 + FEAT_END 130 + }; 131 + 132 + static const struct pvm_ftr_bits pvmid_aa64mmfr2[] = { 133 + MAX_FEAT(ID_AA64MMFR2_EL1, CnP, IMP), 134 + MAX_FEAT(ID_AA64MMFR2_EL1, UAO, IMP), 135 + MAX_FEAT(ID_AA64MMFR2_EL1, IESB, IMP), 136 + MAX_FEAT(ID_AA64MMFR2_EL1, AT, IMP), 137 + MAX_FEAT_ENUM(ID_AA64MMFR2_EL1, IDS, 0x18), 138 + MAX_FEAT(ID_AA64MMFR2_EL1, TTL, IMP), 139 + MAX_FEAT(ID_AA64MMFR2_EL1, BBM, 2), 140 + MAX_FEAT(ID_AA64MMFR2_EL1, E0PD, IMP), 141 + FEAT_END 142 + }; 143 + 144 + static const struct pvm_ftr_bits pvmid_aa64isar1[] = { 145 + MAX_FEAT(ID_AA64ISAR1_EL1, DPB, DPB2), 146 + MAX_FEAT_FUNC(ID_AA64ISAR1_EL1, APA, PAuth, vm_has_ptrauth), 147 + MAX_FEAT_FUNC(ID_AA64ISAR1_EL1, API, PAuth, vm_has_ptrauth), 148 + MAX_FEAT(ID_AA64ISAR1_EL1, JSCVT, IMP), 149 + MAX_FEAT(ID_AA64ISAR1_EL1, FCMA, IMP), 150 + MAX_FEAT(ID_AA64ISAR1_EL1, LRCPC, LRCPC3), 151 + MAX_FEAT(ID_AA64ISAR1_EL1, GPA, IMP), 152 + MAX_FEAT(ID_AA64ISAR1_EL1, GPI, IMP), 153 + MAX_FEAT(ID_AA64ISAR1_EL1, FRINTTS, IMP), 154 + MAX_FEAT(ID_AA64ISAR1_EL1, SB, IMP), 155 + MAX_FEAT(ID_AA64ISAR1_EL1, SPECRES, COSP_RCTX), 156 + MAX_FEAT(ID_AA64ISAR1_EL1, BF16, EBF16), 157 + MAX_FEAT(ID_AA64ISAR1_EL1, DGH, IMP), 158 + MAX_FEAT(ID_AA64ISAR1_EL1, I8MM, IMP), 159 + FEAT_END 160 + }; 161 + 162 + static const struct pvm_ftr_bits pvmid_aa64isar2[] = { 163 + MAX_FEAT_FUNC(ID_AA64ISAR2_EL1, GPA3, IMP, vm_has_ptrauth), 164 + MAX_FEAT_FUNC(ID_AA64ISAR2_EL1, APA3, PAuth, vm_has_ptrauth), 165 + MAX_FEAT(ID_AA64ISAR2_EL1, ATS1A, IMP), 166 + FEAT_END 167 + }; 168 + 169 + /* 170 + * None of the features in ID_AA64DFR0_EL1 nor ID_AA64MMFR4_EL1 are supported. 171 + * However, both have Not-Implemented values that are non-zero. Define them 172 + * so they can be used when getting the value of these registers. 173 + */ 174 + #define ID_AA64DFR0_EL1_NONZERO_NI \ 175 + ( \ 176 + SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, DoubleLock, NI) | \ 177 + SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, MTPMU, NI) \ 178 + ) 179 + 180 + #define ID_AA64MMFR4_EL1_NONZERO_NI \ 181 + SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, E2H0, NI) 182 + 183 + /* 184 + * Returns the value of the feature registers based on the system register 185 + * value, the vcpu support for the revelant features, and the additional 186 + * restrictions for protected VMs. 187 + */ 188 + static u64 get_restricted_features(const struct kvm_vcpu *vcpu, 189 + u64 sys_reg_val, 190 + const struct pvm_ftr_bits restrictions[]) 191 + { 192 + u64 val = 0UL; 193 + int i; 194 + 195 + for (i = 0; restrictions[i].width != 0; i++) { 196 + bool (*vm_supported)(const struct kvm *) = restrictions[i].vm_supported; 197 + bool sign = restrictions[i].sign; 198 + int shift = restrictions[i].shift; 199 + int width = restrictions[i].width; 200 + u64 min_signed = (1UL << width) - 1UL; 201 + u64 sign_bit = 1UL << (width - 1); 202 + u64 mask = GENMASK_ULL(width + shift - 1, shift); 203 + u64 sys_val = (sys_reg_val & mask) >> shift; 204 + u64 pvm_max = restrictions[i].max_val; 205 + 206 + if (vm_supported && !vm_supported(vcpu->kvm)) 207 + val |= (sign ? min_signed : 0) << shift; 208 + else if (sign && (sys_val >= sign_bit || pvm_max >= sign_bit)) 209 + val |= max(sys_val, pvm_max) << shift; 210 + else 211 + val |= min(sys_val, pvm_max) << shift; 212 + } 213 + 214 + return val; 215 + } 216 + 217 + static u64 pvm_calc_id_reg(const struct kvm_vcpu *vcpu, u32 id) 218 + { 219 + switch (id) { 220 + case SYS_ID_AA64PFR0_EL1: 221 + return get_restricted_features(vcpu, id_aa64pfr0_el1_sys_val, pvmid_aa64pfr0); 222 + case SYS_ID_AA64PFR1_EL1: 223 + return get_restricted_features(vcpu, id_aa64pfr1_el1_sys_val, pvmid_aa64pfr1); 224 + case SYS_ID_AA64ISAR0_EL1: 225 + return id_aa64isar0_el1_sys_val; 226 + case SYS_ID_AA64ISAR1_EL1: 227 + return get_restricted_features(vcpu, id_aa64isar1_el1_sys_val, pvmid_aa64isar1); 228 + case SYS_ID_AA64ISAR2_EL1: 229 + return get_restricted_features(vcpu, id_aa64isar2_el1_sys_val, pvmid_aa64isar2); 230 + case SYS_ID_AA64MMFR0_EL1: 231 + return get_restricted_features(vcpu, id_aa64mmfr0_el1_sys_val, pvmid_aa64mmfr0); 232 + case SYS_ID_AA64MMFR1_EL1: 233 + return get_restricted_features(vcpu, id_aa64mmfr1_el1_sys_val, pvmid_aa64mmfr1); 234 + case SYS_ID_AA64MMFR2_EL1: 235 + return get_restricted_features(vcpu, id_aa64mmfr2_el1_sys_val, pvmid_aa64mmfr2); 236 + case SYS_ID_AA64DFR0_EL1: 237 + return ID_AA64DFR0_EL1_NONZERO_NI; 238 + case SYS_ID_AA64MMFR4_EL1: 239 + return ID_AA64MMFR4_EL1_NONZERO_NI; 240 + default: 241 + /* Unhandled ID register, RAZ */ 242 + return 0; 243 + } 244 + } 30 245 31 246 /* 32 247 * Inject an unknown/undefined exception to an AArch64 guest while most of its ··· 264 49 write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR); 265 50 } 266 51 267 - /* 268 - * Returns the restricted features values of the feature register based on the 269 - * limitations in restrict_fields. 270 - * A feature id field value of 0b0000 does not impose any restrictions. 271 - * Note: Use only for unsigned feature field values. 272 - */ 273 - static u64 get_restricted_features_unsigned(u64 sys_reg_val, 274 - u64 restrict_fields) 275 - { 276 - u64 value = 0UL; 277 - u64 mask = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0); 278 - 279 - /* 280 - * According to the Arm Architecture Reference Manual, feature fields 281 - * use increasing values to indicate increases in functionality. 282 - * Iterate over the restricted feature fields and calculate the minimum 283 - * unsigned value between the one supported by the system, and what the 284 - * value is being restricted to. 285 - */ 286 - while (sys_reg_val && restrict_fields) { 287 - value |= min(sys_reg_val & mask, restrict_fields & mask); 288 - sys_reg_val &= ~mask; 289 - restrict_fields &= ~mask; 290 - mask <<= ARM64_FEATURE_FIELD_BITS; 291 - } 292 - 293 - return value; 294 - } 295 - 296 - /* 297 - * Functions that return the value of feature id registers for protected VMs 298 - * based on allowed features, system features, and KVM support. 299 - */ 300 - 301 - static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu) 302 - { 303 - u64 set_mask = 0; 304 - u64 allow_mask = PVM_ID_AA64PFR0_ALLOW; 305 - 306 - set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val, 307 - PVM_ID_AA64PFR0_RESTRICT_UNSIGNED); 308 - 309 - return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask; 310 - } 311 - 312 - static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu) 313 - { 314 - const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm); 315 - u64 allow_mask = PVM_ID_AA64PFR1_ALLOW; 316 - 317 - if (!kvm_has_mte(kvm)) 318 - allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE); 319 - 320 - return id_aa64pfr1_el1_sys_val & allow_mask; 321 - } 322 - 323 - static u64 get_pvm_id_aa64zfr0(const struct kvm_vcpu *vcpu) 324 - { 325 - /* 326 - * No support for Scalable Vectors, therefore, hyp has no sanitized 327 - * copy of the feature id register. 328 - */ 329 - BUILD_BUG_ON(PVM_ID_AA64ZFR0_ALLOW != 0ULL); 330 - return 0; 331 - } 332 - 333 - static u64 get_pvm_id_aa64dfr0(const struct kvm_vcpu *vcpu) 334 - { 335 - /* 336 - * No support for debug, including breakpoints, and watchpoints, 337 - * therefore, pKVM has no sanitized copy of the feature id register. 338 - */ 339 - BUILD_BUG_ON(PVM_ID_AA64DFR0_ALLOW != 0ULL); 340 - return 0; 341 - } 342 - 343 - static u64 get_pvm_id_aa64dfr1(const struct kvm_vcpu *vcpu) 344 - { 345 - /* 346 - * No support for debug, therefore, hyp has no sanitized copy of the 347 - * feature id register. 348 - */ 349 - BUILD_BUG_ON(PVM_ID_AA64DFR1_ALLOW != 0ULL); 350 - return 0; 351 - } 352 - 353 - static u64 get_pvm_id_aa64afr0(const struct kvm_vcpu *vcpu) 354 - { 355 - /* 356 - * No support for implementation defined features, therefore, hyp has no 357 - * sanitized copy of the feature id register. 358 - */ 359 - BUILD_BUG_ON(PVM_ID_AA64AFR0_ALLOW != 0ULL); 360 - return 0; 361 - } 362 - 363 - static u64 get_pvm_id_aa64afr1(const struct kvm_vcpu *vcpu) 364 - { 365 - /* 366 - * No support for implementation defined features, therefore, hyp has no 367 - * sanitized copy of the feature id register. 368 - */ 369 - BUILD_BUG_ON(PVM_ID_AA64AFR1_ALLOW != 0ULL); 370 - return 0; 371 - } 372 - 373 - static u64 get_pvm_id_aa64isar0(const struct kvm_vcpu *vcpu) 374 - { 375 - return id_aa64isar0_el1_sys_val & PVM_ID_AA64ISAR0_ALLOW; 376 - } 377 - 378 - static u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu) 379 - { 380 - u64 allow_mask = PVM_ID_AA64ISAR1_ALLOW; 381 - 382 - if (!vcpu_has_ptrauth(vcpu)) 383 - allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) | 384 - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) | 385 - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) | 386 - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI)); 387 - 388 - return id_aa64isar1_el1_sys_val & allow_mask; 389 - } 390 - 391 - static u64 get_pvm_id_aa64isar2(const struct kvm_vcpu *vcpu) 392 - { 393 - u64 allow_mask = PVM_ID_AA64ISAR2_ALLOW; 394 - 395 - if (!vcpu_has_ptrauth(vcpu)) 396 - allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) | 397 - ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3)); 398 - 399 - return id_aa64isar2_el1_sys_val & allow_mask; 400 - } 401 - 402 - static u64 get_pvm_id_aa64mmfr0(const struct kvm_vcpu *vcpu) 403 - { 404 - u64 set_mask; 405 - 406 - set_mask = get_restricted_features_unsigned(id_aa64mmfr0_el1_sys_val, 407 - PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED); 408 - 409 - return (id_aa64mmfr0_el1_sys_val & PVM_ID_AA64MMFR0_ALLOW) | set_mask; 410 - } 411 - 412 - static u64 get_pvm_id_aa64mmfr1(const struct kvm_vcpu *vcpu) 413 - { 414 - return id_aa64mmfr1_el1_sys_val & PVM_ID_AA64MMFR1_ALLOW; 415 - } 416 - 417 - static u64 get_pvm_id_aa64mmfr2(const struct kvm_vcpu *vcpu) 418 - { 419 - return id_aa64mmfr2_el1_sys_val & PVM_ID_AA64MMFR2_ALLOW; 420 - } 421 - 422 - /* Read a sanitized cpufeature ID register by its encoding */ 423 - u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id) 424 - { 425 - switch (id) { 426 - case SYS_ID_AA64PFR0_EL1: 427 - return get_pvm_id_aa64pfr0(vcpu); 428 - case SYS_ID_AA64PFR1_EL1: 429 - return get_pvm_id_aa64pfr1(vcpu); 430 - case SYS_ID_AA64ZFR0_EL1: 431 - return get_pvm_id_aa64zfr0(vcpu); 432 - case SYS_ID_AA64DFR0_EL1: 433 - return get_pvm_id_aa64dfr0(vcpu); 434 - case SYS_ID_AA64DFR1_EL1: 435 - return get_pvm_id_aa64dfr1(vcpu); 436 - case SYS_ID_AA64AFR0_EL1: 437 - return get_pvm_id_aa64afr0(vcpu); 438 - case SYS_ID_AA64AFR1_EL1: 439 - return get_pvm_id_aa64afr1(vcpu); 440 - case SYS_ID_AA64ISAR0_EL1: 441 - return get_pvm_id_aa64isar0(vcpu); 442 - case SYS_ID_AA64ISAR1_EL1: 443 - return get_pvm_id_aa64isar1(vcpu); 444 - case SYS_ID_AA64ISAR2_EL1: 445 - return get_pvm_id_aa64isar2(vcpu); 446 - case SYS_ID_AA64MMFR0_EL1: 447 - return get_pvm_id_aa64mmfr0(vcpu); 448 - case SYS_ID_AA64MMFR1_EL1: 449 - return get_pvm_id_aa64mmfr1(vcpu); 450 - case SYS_ID_AA64MMFR2_EL1: 451 - return get_pvm_id_aa64mmfr2(vcpu); 452 - default: 453 - /* Unhandled ID register, RAZ */ 454 - return 0; 455 - } 456 - } 457 - 458 52 static u64 read_id_reg(const struct kvm_vcpu *vcpu, 459 53 struct sys_reg_desc const *r) 460 54 { 461 - return pvm_read_id_reg(vcpu, reg_to_encoding(r)); 55 + struct kvm *kvm = vcpu->kvm; 56 + u32 reg = reg_to_encoding(r); 57 + 58 + if (WARN_ON_ONCE(!test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))) 59 + return 0; 60 + 61 + if (reg >= sys_reg(3, 0, 0, 1, 0) && reg <= sys_reg(3, 0, 0, 7, 7)) 62 + return kvm->arch.id_regs[IDREG_IDX(reg)]; 63 + 64 + return 0; 462 65 } 463 66 464 67 /* Handler to RAZ/WI sysregs */ ··· 303 270 inject_undef64(vcpu); 304 271 return false; 305 272 } 306 - 307 - /* 308 - * No support for AArch32 guests, therefore, pKVM has no sanitized copy 309 - * of AArch32 feature id registers. 310 - */ 311 - BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), 312 - PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_EL1_EL1_IMP); 313 273 314 274 return pvm_access_raz_wi(vcpu, p, r); 315 275 } ··· 473 447 474 448 /* Performance Monitoring Registers are restricted. */ 475 449 }; 450 + 451 + /* 452 + * Initializes feature registers for protected vms. 453 + */ 454 + void kvm_init_pvm_id_regs(struct kvm_vcpu *vcpu) 455 + { 456 + struct kvm *kvm = vcpu->kvm; 457 + struct kvm_arch *ka = &kvm->arch; 458 + u32 r; 459 + 460 + hyp_assert_lock_held(&vm_table_lock); 461 + 462 + if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags)) 463 + return; 464 + 465 + /* 466 + * Initialize only AArch64 id registers since AArch32 isn't supported 467 + * for protected VMs. 468 + */ 469 + for (r = sys_reg(3, 0, 0, 4, 0); r <= sys_reg(3, 0, 0, 7, 7); r += sys_reg(0, 0, 0, 0, 1)) 470 + ka->id_regs[IDREG_IDX(r)] = pvm_calc_id_reg(vcpu, r); 471 + 472 + set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags); 473 + } 476 474 477 475 /* 478 476 * Checks that the sysreg table is unique and in-order.
+8 -8
arch/arm64/kvm/hyp/vhe/switch.c
··· 77 77 * VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM 78 78 * shift value for trapping the AMU accesses. 79 79 */ 80 - u64 val = CPACR_ELx_TTA | CPTR_EL2_TAM; 80 + u64 val = CPACR_EL1_TTA | CPTR_EL2_TAM; 81 81 82 82 if (guest_owns_fp_regs()) { 83 - val |= CPACR_ELx_FPEN; 83 + val |= CPACR_EL1_FPEN; 84 84 if (vcpu_has_sve(vcpu)) 85 - val |= CPACR_ELx_ZEN; 85 + val |= CPACR_EL1_ZEN; 86 86 } else { 87 87 __activate_traps_fpsimd32(vcpu); 88 88 } ··· 122 122 * hypervisor has traps enabled to dispel any illusion of something more 123 123 * complicated taking place. 124 124 */ 125 - if (!(SYS_FIELD_GET(CPACR_ELx, FPEN, cptr) & BIT(0))) 126 - val &= ~CPACR_ELx_FPEN; 127 - if (!(SYS_FIELD_GET(CPACR_ELx, ZEN, cptr) & BIT(0))) 128 - val &= ~CPACR_ELx_ZEN; 125 + if (!(SYS_FIELD_GET(CPACR_EL1, FPEN, cptr) & BIT(0))) 126 + val &= ~CPACR_EL1_FPEN; 127 + if (!(SYS_FIELD_GET(CPACR_EL1, ZEN, cptr) & BIT(0))) 128 + val &= ~CPACR_EL1_ZEN; 129 129 130 130 if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S2POE, IMP)) 131 - val |= cptr & CPACR_ELx_E0POE; 131 + val |= cptr & CPACR_EL1_E0POE; 132 132 133 133 val |= cptr & CPTR_EL2_TCPAC; 134 134
+4 -4
arch/arm64/kvm/nested.c
··· 1021 1021 res0 |= HCR_NV2; 1022 1022 if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, IMP)) 1023 1023 res0 |= (HCR_AT | HCR_NV1 | HCR_NV); 1024 - if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) && 1025 - __vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_GENERIC))) 1024 + if (!(kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_ADDRESS) && 1025 + kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_GENERIC))) 1026 1026 res0 |= (HCR_API | HCR_APK); 1027 1027 if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TME, IMP)) 1028 1028 res0 |= BIT(39); ··· 1078 1078 1079 1079 /* HFG[RW]TR_EL2 */ 1080 1080 res0 = res1 = 0; 1081 - if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) && 1082 - __vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_GENERIC))) 1081 + if (!(kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_ADDRESS) && 1082 + kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_GENERIC))) 1083 1083 res0 |= (HFGxTR_EL2_APDAKey | HFGxTR_EL2_APDBKey | 1084 1084 HFGxTR_EL2_APGAKey | HFGxTR_EL2_APIAKey | 1085 1085 HFGxTR_EL2_APIBKey);
+1 -5
arch/arm64/kvm/reset.c
··· 85 85 * KVM_REG_ARM64_SVE_VLS. Allocation is deferred until 86 86 * kvm_arm_vcpu_finalize(), which freezes the configuration. 87 87 */ 88 - vcpu_set_flag(vcpu, GUEST_HAS_SVE); 88 + set_bit(KVM_ARCH_FLAG_GUEST_HAS_SVE, &vcpu->kvm->arch.flags); 89 89 } 90 90 91 91 /* ··· 210 210 } else { 211 211 kvm_vcpu_reset_sve(vcpu); 212 212 } 213 - 214 - if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) || 215 - vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC)) 216 - kvm_vcpu_enable_ptrauth(vcpu); 217 213 218 214 if (vcpu_el1_is_32bit(vcpu)) 219 215 pstate = VCPU_RESET_PSTATE_SVC;
+3 -2
arch/arm64/mm/proc.S
··· 501 501 #ifdef CONFIG_ARM64_HAFT 502 502 cmp x9, ID_AA64MMFR1_EL1_HAFDBS_HAFT 503 503 b.lt 1f 504 - orr tcr2, tcr2, TCR2_EL1x_HAFT 504 + orr tcr2, tcr2, TCR2_EL1_HAFT 505 505 #endif /* CONFIG_ARM64_HAFT */ 506 506 1: 507 507 #endif /* CONFIG_ARM64_HW_AFDBM */ ··· 532 532 #undef PTE_MAYBE_NG 533 533 #undef PTE_MAYBE_SHARED 534 534 535 - orr tcr2, tcr2, TCR2_EL1x_PIE 535 + orr tcr2, tcr2, TCR2_EL1_PIE 536 + msr REG_TCR2_EL1, x0 536 537 537 538 .Lskip_indirection: 538 539
+1 -1
arch/arm64/tools/gen-sysreg.awk
··· 206 206 207 207 # Currently this is effectivey a comment, in future we may want to emit 208 208 # defines for the fields. 209 - /^Fields/ && block_current() == "Sysreg" { 209 + (/^Fields/ || /^Mapping/) && block_current() == "Sysreg" { 210 210 expect_fields(2) 211 211 212 212 if (next_bit != 63)
+22 -22
arch/arm64/tools/sysreg
··· 24 24 # ... 25 25 # EndEnum 26 26 27 - # Alternatively if multiple registers share the same layout then 28 - # a SysregFields block can be used to describe the shared layout 27 + # For VHE aliases (*_EL12, *_EL02) of system registers, a Mapping 28 + # entry describes the register the alias actually accesses: 29 + 30 + # Sysreg <name_EL12> <op0> <op1> <crn> <crm> <op2> 31 + # Mapping <name_EL1> 32 + # EndSysreg 33 + 34 + # Where multiple system regsiters are not VHE aliases but share a 35 + # common layout, a SysregFields block can be used to describe the 36 + # shared layout: 29 37 30 38 # SysregFields <fieldsname> 31 39 # <field> ··· 1986 1978 Field 0 M 1987 1979 EndSysreg 1988 1980 1989 - SysregFields CPACR_ELx 1981 + Sysreg CPACR_EL1 3 0 1 0 2 1990 1982 Res0 63:30 1991 1983 Field 29 E0POE 1992 1984 Field 28 TTA ··· 1997 1989 Res0 19:18 1998 1990 Field 17:16 ZEN 1999 1991 Res0 15:0 2000 - EndSysregFields 2001 - 2002 - Sysreg CPACR_EL1 3 0 1 0 2 2003 - Fields CPACR_ELx 2004 1992 EndSysreg 2005 1993 2006 1994 Sysreg SMPRI_EL1 3 0 1 2 4 ··· 2951 2947 EndSysreg 2952 2948 2953 2949 Sysreg CPACR_EL12 3 5 1 0 2 2954 - Fields CPACR_ELx 2950 + Mapping CPACR_EL1 2955 2951 EndSysreg 2956 2952 2957 2953 Sysreg ZCR_EL12 3 5 1 2 0 2958 - Fields ZCR_ELx 2954 + Mapping ZCR_EL1 2959 2955 EndSysreg 2960 2956 2961 2957 Sysreg SMCR_EL12 3 5 1 2 6 2962 - Fields SMCR_ELx 2958 + Mapping SMCR_EL1 2963 2959 EndSysreg 2964 2960 2965 2961 Sysreg GCSCR_EL12 3 5 2 5 0 2966 - Fields GCSCR_ELx 2962 + Mapping GCSCR_EL1 2967 2963 EndSysreg 2968 2964 2969 2965 Sysreg GCSPR_EL12 3 5 2 5 1 2970 - Fields GCSPR_ELx 2966 + Mapping GCSPR_EL1 2971 2967 EndSysreg 2972 2968 2973 2969 Sysreg FAR_EL12 3 5 6 0 0 ··· 2979 2975 EndSysreg 2980 2976 2981 2977 Sysreg CONTEXTIDR_EL12 3 5 13 0 1 2982 - Fields CONTEXTIDR_ELx 2978 + Mapping CONTEXTIDR_EL1 2983 2979 EndSysreg 2984 2980 2985 2981 SysregFields TTBRx_EL1 ··· 2996 2992 Fields TTBRx_EL1 2997 2993 EndSysreg 2998 2994 2999 - SysregFields TCR2_EL1x 2995 + Sysreg TCR2_EL1 3 0 2 0 3 3000 2996 Res0 63:16 3001 2997 Field 15 DisCH1 3002 2998 Field 14 DisCH0 ··· 3010 3006 Field 2 E0POE 3011 3007 Field 1 PIE 3012 3008 Field 0 PnCH 3013 - EndSysregFields 3014 - 3015 - Sysreg TCR2_EL1 3 0 2 0 3 3016 - Fields TCR2_EL1x 3017 3009 EndSysreg 3018 3010 3019 3011 Sysreg TCR2_EL12 3 5 2 0 3 3020 - Fields TCR2_EL1x 3012 + Mapping TCR2_EL1 3021 3013 EndSysreg 3022 3014 3023 3015 Sysreg TCR2_EL2 3 4 2 0 3 ··· 3084 3084 EndSysreg 3085 3085 3086 3086 Sysreg PIRE0_EL12 3 5 10 2 2 3087 - Fields PIRx_ELx 3087 + Mapping PIRE0_EL1 3088 3088 EndSysreg 3089 3089 3090 3090 Sysreg PIRE0_EL2 3 4 10 2 2 ··· 3096 3096 EndSysreg 3097 3097 3098 3098 Sysreg PIR_EL12 3 5 10 2 3 3099 - Fields PIRx_ELx 3099 + Mapping PIR_EL1 3100 3100 EndSysreg 3101 3101 3102 3102 Sysreg PIR_EL2 3 4 10 2 3 ··· 3116 3116 EndSysreg 3117 3117 3118 3118 Sysreg POR_EL12 3 5 10 2 4 3119 - Fields PIRx_ELx 3119 + Mapping POR_EL1 3120 3120 EndSysreg 3121 3121 3122 3122 Sysreg S2POR_EL1 3 0 10 2 5