Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch kvm-arm64/pkvm-fixed-features-prologue into kvmarm-master/next

* kvm-arm64/pkvm-fixed-features-prologue:
: Rework a bunch of common infrastructure as a prologue
: to Fuad Tabba's protected VM fixed feature series.
KVM: arm64: Upgrade trace_kvm_arm_set_dreg32() to 64bit
KVM: arm64: Add config register bit definitions
KVM: arm64: Add feature register flag definitions
KVM: arm64: Track value of cptr_el2 in struct kvm_vcpu_arch
KVM: arm64: Keep mdcr_el2's value as set by __init_el2_debug
KVM: arm64: Restore mdcr_el2 from vcpu
KVM: arm64: Refactor sys_regs.h,c for nVHE reuse
KVM: arm64: Fix names of config register fields
KVM: arm64: MDCR_EL2 is a 64-bit register
KVM: arm64: Remove trailing whitespace in comment
KVM: arm64: placeholder to check if VM is protected

Signed-off-by: Marc Zyngier <maz@kernel.org>

+140 -104
+2 -2
arch/arm64/include/asm/cpufeature.h
··· 602 602 { 603 603 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SHIFT); 604 604 605 - return val == ID_AA64PFR0_EL1_32BIT_64BIT; 605 + return val == ID_AA64PFR0_ELx_32BIT_64BIT; 606 606 } 607 607 608 608 static inline bool id_aa64pfr0_32bit_el0(u64 pfr0) 609 609 { 610 610 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT); 611 611 612 - return val == ID_AA64PFR0_EL0_32BIT_64BIT; 612 + return val == ID_AA64PFR0_ELx_32BIT_64BIT; 613 613 } 614 614 615 615 static inline bool id_aa64pfr0_sve(u64 pfr0)
+38 -16
arch/arm64/include/asm/kvm_arm.h
··· 12 12 #include <asm/types.h> 13 13 14 14 /* Hyp Configuration Register (HCR) bits */ 15 + 16 + #define HCR_TID5 (UL(1) << 58) 17 + #define HCR_DCT (UL(1) << 57) 15 18 #define HCR_ATA_SHIFT 56 16 19 #define HCR_ATA (UL(1) << HCR_ATA_SHIFT) 20 + #define HCR_AMVOFFEN (UL(1) << 51) 21 + #define HCR_FIEN (UL(1) << 47) 17 22 #define HCR_FWB (UL(1) << 46) 18 23 #define HCR_API (UL(1) << 41) 19 24 #define HCR_APK (UL(1) << 40) ··· 37 32 #define HCR_TVM (UL(1) << 26) 38 33 #define HCR_TTLB (UL(1) << 25) 39 34 #define HCR_TPU (UL(1) << 24) 40 - #define HCR_TPC (UL(1) << 23) 35 + #define HCR_TPC (UL(1) << 23) /* HCR_TPCP if FEAT_DPB */ 41 36 #define HCR_TSW (UL(1) << 22) 42 - #define HCR_TAC (UL(1) << 21) 37 + #define HCR_TACR (UL(1) << 21) 43 38 #define HCR_TIDCP (UL(1) << 20) 44 39 #define HCR_TSC (UL(1) << 19) 45 40 #define HCR_TID3 (UL(1) << 18) ··· 61 56 #define HCR_PTW (UL(1) << 2) 62 57 #define HCR_SWIO (UL(1) << 1) 63 58 #define HCR_VM (UL(1) << 0) 59 + #define HCR_RES0 ((UL(1) << 48) | (UL(1) << 39)) 64 60 65 61 /* 66 62 * The bits we set in HCR: 67 63 * TLOR: Trap LORegion register accesses 68 64 * RW: 64bit by default, can be overridden for 32bit VMs 69 - * TAC: Trap ACTLR 65 + * TACR: Trap ACTLR 70 66 * TSC: Trap SMC 71 67 * TSW: Trap cache operations by set/way 72 68 * TWE: Trap WFE ··· 82 76 * PTW: Take a stage2 fault if a stage1 walk steps in device memory 83 77 */ 84 78 #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ 85 - HCR_BSU_IS | HCR_FB | HCR_TAC | \ 79 + HCR_BSU_IS | HCR_FB | HCR_TACR | \ 86 80 HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \ 87 81 HCR_FMO | HCR_IMO | HCR_PTW ) 88 82 #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) ··· 281 275 #define CPTR_EL2_TTA (1 << 20) 282 276 #define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT) 283 277 #define CPTR_EL2_TZ (1 << 8) 284 - #define CPTR_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 */ 285 - #define CPTR_EL2_DEFAULT CPTR_EL2_RES1 278 + #define CPTR_NVHE_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 (nVHE) */ 279 + #define CPTR_EL2_DEFAULT CPTR_NVHE_EL2_RES1 280 + #define CPTR_NVHE_EL2_RES0 (GENMASK(63, 32) | \ 281 + GENMASK(29, 21) | \ 282 + GENMASK(19, 14) | \ 283 + BIT(11)) 286 284 287 285 /* Hyp Debug Configuration Register bits */ 288 286 #define MDCR_EL2_E2TB_MASK (UL(0x3)) 289 287 #define MDCR_EL2_E2TB_SHIFT (UL(24)) 290 - #define MDCR_EL2_TTRF (1 << 19) 291 - #define MDCR_EL2_TPMS (1 << 14) 288 + #define MDCR_EL2_HPMFZS (UL(1) << 36) 289 + #define MDCR_EL2_HPMFZO (UL(1) << 29) 290 + #define MDCR_EL2_MTPME (UL(1) << 28) 291 + #define MDCR_EL2_TDCC (UL(1) << 27) 292 + #define MDCR_EL2_HCCD (UL(1) << 23) 293 + #define MDCR_EL2_TTRF (UL(1) << 19) 294 + #define MDCR_EL2_HPMD (UL(1) << 17) 295 + #define MDCR_EL2_TPMS (UL(1) << 14) 292 296 #define MDCR_EL2_E2PB_MASK (UL(0x3)) 293 297 #define MDCR_EL2_E2PB_SHIFT (UL(12)) 294 - #define MDCR_EL2_TDRA (1 << 11) 295 - #define MDCR_EL2_TDOSA (1 << 10) 296 - #define MDCR_EL2_TDA (1 << 9) 297 - #define MDCR_EL2_TDE (1 << 8) 298 - #define MDCR_EL2_HPME (1 << 7) 299 - #define MDCR_EL2_TPM (1 << 6) 300 - #define MDCR_EL2_TPMCR (1 << 5) 301 - #define MDCR_EL2_HPMN_MASK (0x1F) 298 + #define MDCR_EL2_TDRA (UL(1) << 11) 299 + #define MDCR_EL2_TDOSA (UL(1) << 10) 300 + #define MDCR_EL2_TDA (UL(1) << 9) 301 + #define MDCR_EL2_TDE (UL(1) << 8) 302 + #define MDCR_EL2_HPME (UL(1) << 7) 303 + #define MDCR_EL2_TPM (UL(1) << 6) 304 + #define MDCR_EL2_TPMCR (UL(1) << 5) 305 + #define MDCR_EL2_HPMN_MASK (UL(0x1F)) 306 + #define MDCR_EL2_RES0 (GENMASK(63, 37) | \ 307 + GENMASK(35, 30) | \ 308 + GENMASK(25, 24) | \ 309 + GENMASK(22, 20) | \ 310 + BIT(18) | \ 311 + GENMASK(16, 15)) 302 312 303 313 /* For compatibility with fault code shared with 32-bit */ 304 314 #define FSC_FAULT ESR_ELx_FSC_FAULT
+1 -1
arch/arm64/include/asm/kvm_asm.h
··· 209 209 extern void __vgic_v3_write_vmcr(u32 vmcr); 210 210 extern void __vgic_v3_init_lrs(void); 211 211 212 - extern u32 __kvm_get_mdcr_el2(void); 212 + extern u64 __kvm_get_mdcr_el2(void); 213 213 214 214 #define __KVM_EXTABLE(from, to) \ 215 215 " .pushsection __kvm_ex_table, \"a\"\n" \
+11 -2
arch/arm64/include/asm/kvm_host.h
··· 286 286 /* Stage 2 paging state used by the hardware on next switch */ 287 287 struct kvm_s2_mmu *hw_mmu; 288 288 289 - /* HYP configuration */ 289 + /* Values of trap registers for the guest. */ 290 290 u64 hcr_el2; 291 - u32 mdcr_el2; 291 + u64 mdcr_el2; 292 + u64 cptr_el2; 293 + 294 + /* Values of trap registers for the host before guest entry. */ 295 + u64 mdcr_el2_host; 292 296 293 297 /* Exception Information */ 294 298 struct kvm_vcpu_fault_info fault; ··· 774 770 void kvm_arch_free_vm(struct kvm *kvm); 775 771 776 772 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type); 773 + 774 + static inline bool kvm_vm_is_protected(struct kvm *kvm) 775 + { 776 + return false; 777 + } 777 778 778 779 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature); 779 780 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
+1 -1
arch/arm64/include/asm/kvm_hyp.h
··· 95 95 96 96 #ifndef __KVM_NVHE_HYPERVISOR__ 97 97 void activate_traps_vhe_load(struct kvm_vcpu *vcpu); 98 - void deactivate_traps_vhe_put(void); 98 + void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu); 99 99 #endif 100 100 101 101 u64 __guest_enter(struct kvm_vcpu *vcpu);
+12 -4
arch/arm64/include/asm/sysreg.h
··· 784 784 #define ID_AA64PFR0_AMU 0x1 785 785 #define ID_AA64PFR0_SVE 0x1 786 786 #define ID_AA64PFR0_RAS_V1 0x1 787 + #define ID_AA64PFR0_RAS_V1P1 0x2 787 788 #define ID_AA64PFR0_FP_NI 0xf 788 789 #define ID_AA64PFR0_FP_SUPPORTED 0x0 789 790 #define ID_AA64PFR0_ASIMD_NI 0xf 790 791 #define ID_AA64PFR0_ASIMD_SUPPORTED 0x0 791 - #define ID_AA64PFR0_EL1_64BIT_ONLY 0x1 792 - #define ID_AA64PFR0_EL1_32BIT_64BIT 0x2 793 - #define ID_AA64PFR0_EL0_64BIT_ONLY 0x1 794 - #define ID_AA64PFR0_EL0_32BIT_64BIT 0x2 792 + #define ID_AA64PFR0_ELx_64BIT_ONLY 0x1 793 + #define ID_AA64PFR0_ELx_32BIT_64BIT 0x2 795 794 796 795 /* id_aa64pfr1 */ 797 796 #define ID_AA64PFR1_MPAMFRAC_SHIFT 16 ··· 845 846 #define ID_AA64MMFR0_BIGENDEL_SHIFT 8 846 847 #define ID_AA64MMFR0_ASID_SHIFT 4 847 848 #define ID_AA64MMFR0_PARANGE_SHIFT 0 849 + 850 + #define ID_AA64MMFR0_ASID_8 0x0 851 + #define ID_AA64MMFR0_ASID_16 0x2 848 852 849 853 #define ID_AA64MMFR0_TGRAN4_NI 0xf 850 854 #define ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN 0x0 ··· 913 911 #define ID_AA64MMFR2_CNP_SHIFT 0 914 912 915 913 /* id_aa64dfr0 */ 914 + #define ID_AA64DFR0_MTPMU_SHIFT 48 916 915 #define ID_AA64DFR0_TRBE_SHIFT 44 917 916 #define ID_AA64DFR0_TRACE_FILT_SHIFT 40 918 917 #define ID_AA64DFR0_DOUBLELOCK_SHIFT 36 ··· 1169 1166 #define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT) 1170 1167 #define ICH_VTR_A3V_SHIFT 21 1171 1168 #define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT) 1169 + 1170 + #define ARM64_FEATURE_FIELD_BITS 4 1171 + 1172 + /* Create a mask for the feature bits of the specified feature. */ 1173 + #define ARM64_FEATURE_MASK(x) (GENMASK_ULL(x##_SHIFT + ARM64_FEATURE_FIELD_BITS - 1, x##_SHIFT)) 1172 1174 1173 1175 #ifdef __ASSEMBLY__ 1174 1176
+4 -4
arch/arm64/kernel/cpufeature.c
··· 239 239 S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), 240 240 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0), 241 241 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0), 242 - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), 243 - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), 242 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_ELx_64BIT_ONLY), 243 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_ELx_64BIT_ONLY), 244 244 ARM64_FTR_END, 245 245 }; 246 246 ··· 1956 1956 .sys_reg = SYS_ID_AA64PFR0_EL1, 1957 1957 .sign = FTR_UNSIGNED, 1958 1958 .field_pos = ID_AA64PFR0_EL0_SHIFT, 1959 - .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT, 1959 + .min_field_value = ID_AA64PFR0_ELx_32BIT_64BIT, 1960 1960 }, 1961 1961 #ifdef CONFIG_KVM 1962 1962 { ··· 1967 1967 .sys_reg = SYS_ID_AA64PFR0_EL1, 1968 1968 .sign = FTR_UNSIGNED, 1969 1969 .field_pos = ID_AA64PFR0_EL1_SHIFT, 1970 - .min_field_value = ID_AA64PFR0_EL1_32BIT_64BIT, 1970 + .min_field_value = ID_AA64PFR0_ELx_32BIT_64BIT, 1971 1971 }, 1972 1972 { 1973 1973 .desc = "Protected KVM",
+1
arch/arm64/kvm/arm.c
··· 1122 1122 } 1123 1123 1124 1124 vcpu_reset_hcr(vcpu); 1125 + vcpu->arch.cptr_el2 = CPTR_EL2_DEFAULT; 1125 1126 1126 1127 /* 1127 1128 * Handle the "start in power-off" case.
+1 -1
arch/arm64/kvm/debug.c
··· 21 21 DBG_MDSCR_KDE | \ 22 22 DBG_MDSCR_MDE) 23 23 24 - static DEFINE_PER_CPU(u32, mdcr_el2); 24 + static DEFINE_PER_CPU(u64, mdcr_el2); 25 25 26 26 /** 27 27 * save/restore_guest_debug_regs
+5 -1
arch/arm64/kvm/hyp/include/hyp/switch.h
··· 92 92 write_sysreg(0, pmselr_el0); 93 93 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0); 94 94 } 95 + 96 + vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2); 95 97 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); 96 98 } 97 99 98 - static inline void __deactivate_traps_common(void) 100 + static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) 99 101 { 102 + write_sysreg(vcpu->arch.mdcr_el2_host, mdcr_el2); 103 + 100 104 write_sysreg(0, hstr_el2); 101 105 if (kvm_arm_support_pmu_v3()) 102 106 write_sysreg(0, pmuserenr_el0);
+1 -1
arch/arm64/kvm/hyp/nvhe/debug-sr.c
··· 109 109 __debug_switch_to_host_common(vcpu); 110 110 } 111 111 112 - u32 __kvm_get_mdcr_el2(void) 112 + u64 __kvm_get_mdcr_el2(void) 113 113 { 114 114 return read_sysreg(mdcr_el2); 115 115 }
+3 -10
arch/arm64/kvm/hyp/nvhe/switch.c
··· 41 41 ___activate_traps(vcpu); 42 42 __activate_traps_common(vcpu); 43 43 44 - val = CPTR_EL2_DEFAULT; 44 + val = vcpu->arch.cptr_el2; 45 45 val |= CPTR_EL2_TTA | CPTR_EL2_TAM; 46 46 if (!update_fp_enabled(vcpu)) { 47 47 val |= CPTR_EL2_TFP | CPTR_EL2_TZ; ··· 69 69 static void __deactivate_traps(struct kvm_vcpu *vcpu) 70 70 { 71 71 extern char __kvm_hyp_host_vector[]; 72 - u64 mdcr_el2, cptr; 72 + u64 cptr; 73 73 74 74 ___deactivate_traps(vcpu); 75 - 76 - mdcr_el2 = read_sysreg(mdcr_el2); 77 75 78 76 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { 79 77 u64 val; ··· 90 92 isb(); 91 93 } 92 94 93 - __deactivate_traps_common(); 95 + __deactivate_traps_common(vcpu); 94 96 95 - mdcr_el2 &= MDCR_EL2_HPMN_MASK; 96 - mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; 97 - mdcr_el2 |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT; 98 - 99 - write_sysreg(mdcr_el2, mdcr_el2); 100 97 write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2); 101 98 102 99 cptr = CPTR_EL2_DEFAULT;
+1 -1
arch/arm64/kvm/hyp/vhe/debug-sr.c
··· 20 20 __debug_switch_to_host_common(vcpu); 21 21 } 22 22 23 - u32 __kvm_get_mdcr_el2(void) 23 + u64 __kvm_get_mdcr_el2(void) 24 24 { 25 25 return read_sysreg(mdcr_el2); 26 26 }
+2 -10
arch/arm64/kvm/hyp/vhe/switch.c
··· 91 91 __activate_traps_common(vcpu); 92 92 } 93 93 94 - void deactivate_traps_vhe_put(void) 94 + void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu) 95 95 { 96 - u64 mdcr_el2 = read_sysreg(mdcr_el2); 97 - 98 - mdcr_el2 &= MDCR_EL2_HPMN_MASK | 99 - MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT | 100 - MDCR_EL2_TPMS; 101 - 102 - write_sysreg(mdcr_el2, mdcr_el2); 103 - 104 - __deactivate_traps_common(); 96 + __deactivate_traps_common(vcpu); 105 97 } 106 98 107 99 /* Switch to the guest for VHE systems running in EL2 */
+1 -1
arch/arm64/kvm/hyp/vhe/sysreg-sr.c
··· 101 101 struct kvm_cpu_context *host_ctxt; 102 102 103 103 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; 104 - deactivate_traps_vhe_put(); 104 + deactivate_traps_vhe_put(vcpu); 105 105 106 106 __sysreg_save_el1_state(guest_ctxt); 107 107 __sysreg_save_user_state(guest_ctxt);
+18 -46
arch/arm64/kvm/sys_regs.c
··· 44 44 * 64bit interface. 45 45 */ 46 46 47 - #define reg_to_encoding(x) \ 48 - sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \ 49 - (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2) 50 - 51 47 static bool read_from_write_only(struct kvm_vcpu *vcpu, 52 48 struct sys_reg_params *params, 53 49 const struct sys_reg_desc *r) ··· 314 318 /* 315 319 * We want to avoid world-switching all the DBG registers all the 316 320 * time: 317 - * 321 + * 318 322 * - If we've touched any debug register, it is likely that we're 319 323 * going to touch more of them. It then makes sense to disable the 320 324 * traps and start doing the save/restore dance 321 325 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is 322 326 * then mandatory to save/restore the registers, as the guest 323 327 * depends on them. 324 - * 328 + * 325 329 * For this, we use a DIRTY bit, indicating the guest has modified the 326 330 * debug registers, used as follow: 327 331 * ··· 1059 1063 return true; 1060 1064 } 1061 1065 1062 - #define FEATURE(x) (GENMASK_ULL(x##_SHIFT + 3, x##_SHIFT)) 1063 - 1064 1066 /* Read a sanitised cpufeature ID register by sys_reg_desc */ 1065 1067 static u64 read_id_reg(const struct kvm_vcpu *vcpu, 1066 1068 struct sys_reg_desc const *r, bool raz) ··· 1069 1075 switch (id) { 1070 1076 case SYS_ID_AA64PFR0_EL1: 1071 1077 if (!vcpu_has_sve(vcpu)) 1072 - val &= ~FEATURE(ID_AA64PFR0_SVE); 1073 - val &= ~FEATURE(ID_AA64PFR0_AMU); 1074 - val &= ~FEATURE(ID_AA64PFR0_CSV2); 1075 - val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2); 1076 - val &= ~FEATURE(ID_AA64PFR0_CSV3); 1077 - val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3); 1078 + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE); 1079 + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_AMU); 1080 + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2); 1081 + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2); 1082 + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3); 1083 + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3); 1078 1084 break; 1079 1085 case SYS_ID_AA64PFR1_EL1: 1080 - val &= ~FEATURE(ID_AA64PFR1_MTE); 1086 + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE); 1081 1087 if (kvm_has_mte(vcpu->kvm)) { 1082 1088 u64 pfr, mte; 1083 1089 1084 1090 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1); 1085 1091 mte = cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR1_MTE_SHIFT); 1086 - val |= FIELD_PREP(FEATURE(ID_AA64PFR1_MTE), mte); 1092 + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR1_MTE), mte); 1087 1093 } 1088 1094 break; 1089 1095 case SYS_ID_AA64ISAR1_EL1: 1090 1096 if (!vcpu_has_ptrauth(vcpu)) 1091 - val &= ~(FEATURE(ID_AA64ISAR1_APA) | 1092 - FEATURE(ID_AA64ISAR1_API) | 1093 - FEATURE(ID_AA64ISAR1_GPA) | 1094 - FEATURE(ID_AA64ISAR1_GPI)); 1097 + val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) | 1098 + ARM64_FEATURE_MASK(ID_AA64ISAR1_API) | 1099 + ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) | 1100 + ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI)); 1095 1101 break; 1096 1102 case SYS_ID_AA64DFR0_EL1: 1097 1103 /* Limit debug to ARMv8.0 */ 1098 - val &= ~FEATURE(ID_AA64DFR0_DEBUGVER); 1099 - val |= FIELD_PREP(FEATURE(ID_AA64DFR0_DEBUGVER), 6); 1104 + val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER); 1105 + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), 6); 1100 1106 /* Limit guests to PMUv3 for ARMv8.4 */ 1101 1107 val = cpuid_feature_cap_perfmon_field(val, 1102 1108 ID_AA64DFR0_PMUVER_SHIFT, 1103 1109 kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0); 1104 1110 /* Hide SPE from guests */ 1105 - val &= ~FEATURE(ID_AA64DFR0_PMSVER); 1111 + val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER); 1106 1112 break; 1107 1113 case SYS_ID_DFR0_EL1: 1108 1114 /* Limit guests to PMUv3 for ARMv8.4 */ ··· 2156 2162 return 0; 2157 2163 } 2158 2164 2159 - static int match_sys_reg(const void *key, const void *elt) 2160 - { 2161 - const unsigned long pval = (unsigned long)key; 2162 - const struct sys_reg_desc *r = elt; 2163 - 2164 - return pval - reg_to_encoding(r); 2165 - } 2166 - 2167 - static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, 2168 - const struct sys_reg_desc table[], 2169 - unsigned int num) 2170 - { 2171 - unsigned long pval = reg_to_encoding(params); 2172 - 2173 - return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg); 2174 - } 2175 - 2176 2165 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu) 2177 2166 { 2178 2167 kvm_inject_undefined(vcpu); ··· 2398 2421 2399 2422 trace_kvm_handle_sys_reg(esr); 2400 2423 2401 - params.Op0 = (esr >> 20) & 3; 2402 - params.Op1 = (esr >> 14) & 0x7; 2403 - params.CRn = (esr >> 10) & 0xf; 2404 - params.CRm = (esr >> 1) & 0xf; 2405 - params.Op2 = (esr >> 17) & 0x7; 2424 + params = esr_sys64_to_params(esr); 2406 2425 params.regval = vcpu_get_reg(vcpu, Rt); 2407 - params.is_write = !(esr & 1); 2408 2426 2409 2427 ret = emulate_sys_reg(vcpu, &params); 2410 2428
+31
arch/arm64/kvm/sys_regs.h
··· 11 11 #ifndef __ARM64_KVM_SYS_REGS_LOCAL_H__ 12 12 #define __ARM64_KVM_SYS_REGS_LOCAL_H__ 13 13 14 + #include <linux/bsearch.h> 15 + 16 + #define reg_to_encoding(x) \ 17 + sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \ 18 + (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2) 19 + 14 20 struct sys_reg_params { 15 21 u8 Op0; 16 22 u8 Op1; ··· 26 20 u64 regval; 27 21 bool is_write; 28 22 }; 23 + 24 + #define esr_sys64_to_params(esr) \ 25 + ((struct sys_reg_params){ .Op0 = ((esr) >> 20) & 3, \ 26 + .Op1 = ((esr) >> 14) & 0x7, \ 27 + .CRn = ((esr) >> 10) & 0xf, \ 28 + .CRm = ((esr) >> 1) & 0xf, \ 29 + .Op2 = ((esr) >> 17) & 0x7, \ 30 + .is_write = !((esr) & 1) }) 29 31 30 32 struct sys_reg_desc { 31 33 /* Sysreg string for debug */ ··· 164 150 if (i1->CRm != i2->CRm) 165 151 return i1->CRm - i2->CRm; 166 152 return i1->Op2 - i2->Op2; 153 + } 154 + 155 + static inline int match_sys_reg(const void *key, const void *elt) 156 + { 157 + const unsigned long pval = (unsigned long)key; 158 + const struct sys_reg_desc *r = elt; 159 + 160 + return pval - reg_to_encoding(r); 161 + } 162 + 163 + static inline const struct sys_reg_desc * 164 + find_reg(const struct sys_reg_params *params, const struct sys_reg_desc table[], 165 + unsigned int num) 166 + { 167 + unsigned long pval = reg_to_encoding(params); 168 + 169 + return __inline_bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg); 167 170 } 168 171 169 172 const struct sys_reg_desc *find_reg_by_id(u64 id,
+7 -3
arch/arm64/kvm/trace_handle_exit.h
··· 78 78 TP_printk("flags: 0x%08x", __entry->guest_debug) 79 79 ); 80 80 81 + /* 82 + * The dreg32 name is a leftover from a distant past. This will really 83 + * output a 64bit value... 84 + */ 81 85 TRACE_EVENT(kvm_arm_set_dreg32, 82 - TP_PROTO(const char *name, __u32 value), 86 + TP_PROTO(const char *name, __u64 value), 83 87 TP_ARGS(name, value), 84 88 85 89 TP_STRUCT__entry( 86 90 __field(const char *, name) 87 - __field(__u32, value) 91 + __field(__u64, value) 88 92 ), 89 93 90 94 TP_fast_assign( ··· 96 92 __entry->value = value; 97 93 ), 98 94 99 - TP_printk("%s: 0x%08x", __entry->name, __entry->value) 95 + TP_printk("%s: 0x%llx", __entry->name, __entry->value) 100 96 ); 101 97 102 98 TRACE_DEFINE_SIZEOF(__u64);