Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch kvm-arm64/el2-feature-control into kvmarm-master/next

* kvm-arm64/el2-feature-control: (23 commits)
: .
: General rework of EL2 features that can be disabled to satisfy
: the requirement of migration between heterogeneous hosts:
:
: - Handle effective RES0 behaviour of undefined registers, making sure
: that disabling a feature affects full registeres, and not just
: individual control bits. (20250918151402.1665315-1-maz@kernel.org)
:
: - Allow ID_AA64MMFR1_EL1.{TWED,HCX} to be disabled from userspace.
: (20250911114621.3724469-1-yangjinqian1@huawei.com)
:
: - Turn the NV feature management into a deny-list, and expose
: missing features to EL2 guests.
: (20250912212258.407350-1-oliver.upton@linux.dev)
: .
KVM: arm64: nv: Expose up to FEAT_Debugv8p8 to NV-enabled VMs
KVM: arm64: nv: Advertise FEAT_TIDCP1 to NV-enabled VMs
KVM: arm64: nv: Advertise FEAT_SpecSEI to NV-enabled VMs
KVM: arm64: nv: Expose FEAT_TWED to NV-enabled VMs
KVM: arm64: nv: Exclude guest's TWED configuration when TWE isn't set
KVM: arm64: nv: Expose FEAT_AFP to NV-enabled VMs
KVM: arm64: nv: Expose FEAT_ECBHB to NV-enabled VMs
KVM: arm64: nv: Expose FEAT_RASv1p1 via RAS_frac
KVM: arm64: nv: Expose FEAT_DF2 to NV-enabled VMs
KVM: arm64: nv: Don't erroneously claim FEAT_DoubleLock for NV VMs
KVM: arm64: nv: Convert masks to denylists in limit_nv_id_reg()
KVM: arm64: selftests: Test writes to ID_AA64MMFR1_EL1.{HCX, TWED}
KVM: arm64: Make ID_AA64MMFR1_EL1.{HCX, TWED} writable from userspace
KVM: arm64: Convert MDCR_EL2 RES0 handling to compute_reg_res0_bits()
KVM: arm64: Convert SCTLR_EL1 RES0 handling to compute_reg_res0_bits()
KVM: arm64: Enforce absence of FEAT_TCR2 on TCR2_EL2
KVM: arm64: Enforce absence of FEAT_SCTLR2 on SCTLR2_EL{1,2}
KVM: arm64: Convert HCR_EL2 RES0 handling to compute_reg_res0_bits()
KVM: arm64: Enforce absence of FEAT_HCX on HCRX_EL2
KVM: arm64: Enforce absence of FEAT_FGT2 on FGT2 registers
...

Signed-off-by: Marc Zyngier <maz@kernel.org>

+291 -149
+227 -131
arch/arm64/kvm/config.c
··· 7 7 #include <linux/kvm_host.h> 8 8 #include <asm/sysreg.h> 9 9 10 + /* 11 + * Describes the dependencies between a set of bits (or the negation 12 + * of a set of RES0 bits) and a feature. The flags indicate how the 13 + * data is interpreted. 14 + */ 10 15 struct reg_bits_to_feat_map { 11 - u64 bits; 16 + union { 17 + u64 bits; 18 + u64 *res0p; 19 + }; 12 20 13 21 #define NEVER_FGU BIT(0) /* Can trap, but never UNDEF */ 14 22 #define CALL_FUNC BIT(1) /* Needs to evaluate tons of crap */ 15 23 #define FIXED_VALUE BIT(2) /* RAZ/WI or RAO/WI in KVM */ 24 + #define RES0_POINTER BIT(3) /* Pointer to RES0 value instead of bits */ 25 + 16 26 unsigned long flags; 17 27 18 28 union { ··· 38 28 }; 39 29 }; 40 30 41 - #define __NEEDS_FEAT_3(m, f, id, fld, lim) \ 31 + /* 32 + * Describes the dependencies for a given register: 33 + * 34 + * @feat_map describes the dependency for the whole register. If the 35 + * features the register depends on are not present, the whole 36 + * register is effectively RES0. 37 + * 38 + * @bit_feat_map describes the dependencies for a set of bits in that 39 + * register. If the features these bits depend on are not present, the 40 + * bits are effectively RES0. 41 + */ 42 + struct reg_feat_map_desc { 43 + const char *name; 44 + const struct reg_bits_to_feat_map feat_map; 45 + const struct reg_bits_to_feat_map *bit_feat_map; 46 + const unsigned int bit_feat_map_sz; 47 + }; 48 + 49 + #define __NEEDS_FEAT_3(m, f, w, id, fld, lim) \ 42 50 { \ 43 - .bits = (m), \ 51 + .w = (m), \ 44 52 .flags = (f), \ 45 53 .regidx = IDREG_IDX(SYS_ ## id), \ 46 54 .shift = id ##_## fld ## _SHIFT, \ ··· 67 39 .lo_lim = id ##_## fld ##_## lim \ 68 40 } 69 41 70 - #define __NEEDS_FEAT_2(m, f, fun, dummy) \ 42 + #define __NEEDS_FEAT_2(m, f, w, fun, dummy) \ 71 43 { \ 72 - .bits = (m), \ 44 + .w = (m), \ 73 45 .flags = (f) | CALL_FUNC, \ 74 46 .fval = (fun), \ 75 47 } 76 48 77 - #define __NEEDS_FEAT_1(m, f, fun) \ 49 + #define __NEEDS_FEAT_1(m, f, w, fun) \ 78 50 { \ 79 - .bits = (m), \ 51 + .w = (m), \ 80 52 .flags = (f) | CALL_FUNC, \ 81 53 .match = (fun), \ 82 54 } 83 55 56 + #define __NEEDS_FEAT_FLAG(m, f, w, ...) \ 57 + CONCATENATE(__NEEDS_FEAT_, COUNT_ARGS(__VA_ARGS__))(m, f, w, __VA_ARGS__) 58 + 84 59 #define NEEDS_FEAT_FLAG(m, f, ...) \ 85 - CONCATENATE(__NEEDS_FEAT_, COUNT_ARGS(__VA_ARGS__))(m, f, __VA_ARGS__) 60 + __NEEDS_FEAT_FLAG(m, f, bits, __VA_ARGS__) 86 61 87 62 #define NEEDS_FEAT_FIXED(m, ...) \ 88 - NEEDS_FEAT_FLAG(m, FIXED_VALUE, __VA_ARGS__, 0) 63 + __NEEDS_FEAT_FLAG(m, FIXED_VALUE, bits, __VA_ARGS__, 0) 89 64 65 + #define NEEDS_FEAT_RES0(p, ...) \ 66 + __NEEDS_FEAT_FLAG(p, RES0_POINTER, res0p, __VA_ARGS__) 67 + 68 + /* 69 + * Declare the dependency between a set of bits and a set of features, 70 + * generating a struct reg_bit_to_feat_map. 71 + */ 90 72 #define NEEDS_FEAT(m, ...) NEEDS_FEAT_FLAG(m, 0, __VA_ARGS__) 73 + 74 + /* 75 + * Declare the dependency between a non-FGT register, a set of 76 + * feature, and the set of individual bits it contains. This generates 77 + * a struct reg_feat_map_desc. 78 + */ 79 + #define DECLARE_FEAT_MAP(n, r, m, f) \ 80 + struct reg_feat_map_desc n = { \ 81 + .name = #r, \ 82 + .feat_map = NEEDS_FEAT(~r##_RES0, f), \ 83 + .bit_feat_map = m, \ 84 + .bit_feat_map_sz = ARRAY_SIZE(m), \ 85 + } 86 + 87 + /* 88 + * Specialised version of the above for FGT registers that have their 89 + * RES0 masks described as struct fgt_masks. 90 + */ 91 + #define DECLARE_FEAT_MAP_FGT(n, msk, m, f) \ 92 + struct reg_feat_map_desc n = { \ 93 + .name = #msk, \ 94 + .feat_map = NEEDS_FEAT_RES0(&msk.res0, f),\ 95 + .bit_feat_map = m, \ 96 + .bit_feat_map_sz = ARRAY_SIZE(m), \ 97 + } 91 98 92 99 #define FEAT_SPE ID_AA64DFR0_EL1, PMSVer, IMP 93 100 #define FEAT_SPE_FnE ID_AA64DFR0_EL1, PMSVer, V1P2 ··· 136 73 #define FEAT_AA32EL0 ID_AA64PFR0_EL1, EL0, AARCH32 137 74 #define FEAT_AA32EL1 ID_AA64PFR0_EL1, EL1, AARCH32 138 75 #define FEAT_AA64EL1 ID_AA64PFR0_EL1, EL1, IMP 76 + #define FEAT_AA64EL2 ID_AA64PFR0_EL1, EL2, IMP 139 77 #define FEAT_AA64EL3 ID_AA64PFR0_EL1, EL3, IMP 140 78 #define FEAT_AIE ID_AA64MMFR3_EL1, AIE, IMP 141 79 #define FEAT_S2POE ID_AA64MMFR3_EL1, S2POE, IMP ··· 195 131 #define FEAT_SPMU ID_AA64DFR1_EL1, SPMU, IMP 196 132 #define FEAT_SPE_nVM ID_AA64DFR2_EL1, SPE_nVM, IMP 197 133 #define FEAT_STEP2 ID_AA64DFR2_EL1, STEP, IMP 198 - #define FEAT_SYSREG128 ID_AA64ISAR2_EL1, SYSREG_128, IMP 199 134 #define FEAT_CPA2 ID_AA64ISAR3_EL1, CPA, CPA2 200 135 #define FEAT_ASID2 ID_AA64MMFR4_EL1, ASID2, IMP 201 136 #define FEAT_MEC ID_AA64MMFR3_EL1, MEC, IMP ··· 206 143 #define FEAT_LSMAOC ID_AA64MMFR2_EL1, LSM, IMP 207 144 #define FEAT_MixedEnd ID_AA64MMFR0_EL1, BIGEND, IMP 208 145 #define FEAT_MixedEndEL0 ID_AA64MMFR0_EL1, BIGENDEL0, IMP 209 - #define FEAT_MTE2 ID_AA64PFR1_EL1, MTE, MTE2 210 146 #define FEAT_MTE_ASYNC ID_AA64PFR1_EL1, MTE_frac, ASYNC 211 147 #define FEAT_MTE_STORE_ONLY ID_AA64PFR2_EL1, MTESTOREONLY, IMP 212 148 #define FEAT_PAN ID_AA64MMFR1_EL1, PAN, IMP ··· 213 151 #define FEAT_SSBS ID_AA64PFR1_EL1, SSBS, IMP 214 152 #define FEAT_TIDCP1 ID_AA64MMFR1_EL1, TIDCP1, IMP 215 153 #define FEAT_FGT ID_AA64MMFR0_EL1, FGT, IMP 154 + #define FEAT_FGT2 ID_AA64MMFR0_EL1, FGT, FGT2 216 155 #define FEAT_MTPMU ID_AA64DFR0_EL1, MTPMU, IMP 156 + #define FEAT_HCX ID_AA64MMFR1_EL1, HCX, IMP 217 157 218 158 static bool not_feat_aa64el3(struct kvm *kvm) 219 159 { ··· 461 397 NEVER_FGU, FEAT_AA64EL1), 462 398 }; 463 399 400 + 401 + static const DECLARE_FEAT_MAP_FGT(hfgrtr_desc, hfgrtr_masks, 402 + hfgrtr_feat_map, FEAT_FGT); 403 + 464 404 static const struct reg_bits_to_feat_map hfgwtr_feat_map[] = { 465 405 NEEDS_FEAT(HFGWTR_EL2_nAMAIR2_EL1 | 466 406 HFGWTR_EL2_nMAIR2_EL1, ··· 528 460 HFGWTR_EL2_AFSR0_EL1, 529 461 NEVER_FGU, FEAT_AA64EL1), 530 462 }; 463 + 464 + static const DECLARE_FEAT_MAP_FGT(hfgwtr_desc, hfgwtr_masks, 465 + hfgwtr_feat_map, FEAT_FGT); 531 466 532 467 static const struct reg_bits_to_feat_map hdfgrtr_feat_map[] = { 533 468 NEEDS_FEAT(HDFGRTR_EL2_PMBIDR_EL1 | ··· 599 528 NEVER_FGU, FEAT_AA64EL1) 600 529 }; 601 530 531 + static const DECLARE_FEAT_MAP_FGT(hdfgrtr_desc, hdfgrtr_masks, 532 + hdfgrtr_feat_map, FEAT_FGT); 533 + 602 534 static const struct reg_bits_to_feat_map hdfgwtr_feat_map[] = { 603 535 NEEDS_FEAT(HDFGWTR_EL2_PMSLATFR_EL1 | 604 536 HDFGWTR_EL2_PMSIRR_EL1 | ··· 662 588 NEEDS_FEAT(HDFGWTR_EL2_TRFCR_EL1, FEAT_TRF), 663 589 }; 664 590 591 + static const DECLARE_FEAT_MAP_FGT(hdfgwtr_desc, hdfgwtr_masks, 592 + hdfgwtr_feat_map, FEAT_FGT); 665 593 666 594 static const struct reg_bits_to_feat_map hfgitr_feat_map[] = { 667 595 NEEDS_FEAT(HFGITR_EL2_PSBCSYNC, FEAT_SPEv1p5), ··· 738 662 NEVER_FGU, FEAT_AA64EL1), 739 663 }; 740 664 665 + static const DECLARE_FEAT_MAP_FGT(hfgitr_desc, hfgitr_masks, 666 + hfgitr_feat_map, FEAT_FGT); 667 + 741 668 static const struct reg_bits_to_feat_map hafgrtr_feat_map[] = { 742 669 NEEDS_FEAT(HAFGRTR_EL2_AMEVTYPER115_EL0 | 743 670 HAFGRTR_EL2_AMEVTYPER114_EL0 | ··· 783 704 FEAT_AMUv1), 784 705 }; 785 706 707 + static const DECLARE_FEAT_MAP_FGT(hafgrtr_desc, hafgrtr_masks, 708 + hafgrtr_feat_map, FEAT_FGT); 709 + 786 710 static const struct reg_bits_to_feat_map hfgitr2_feat_map[] = { 787 711 NEEDS_FEAT(HFGITR2_EL2_nDCCIVAPS, FEAT_PoPS), 788 712 NEEDS_FEAT(HFGITR2_EL2_TSBCSYNC, FEAT_TRBEv1p1) 789 713 }; 714 + 715 + static const DECLARE_FEAT_MAP_FGT(hfgitr2_desc, hfgitr2_masks, 716 + hfgitr2_feat_map, FEAT_FGT2); 790 717 791 718 static const struct reg_bits_to_feat_map hfgrtr2_feat_map[] = { 792 719 NEEDS_FEAT(HFGRTR2_EL2_nPFAR_EL1, FEAT_PFAR), ··· 813 728 NEEDS_FEAT(HFGRTR2_EL2_nRCWSMASK_EL1, FEAT_THE), 814 729 }; 815 730 731 + static const DECLARE_FEAT_MAP_FGT(hfgrtr2_desc, hfgrtr2_masks, 732 + hfgrtr2_feat_map, FEAT_FGT2); 733 + 816 734 static const struct reg_bits_to_feat_map hfgwtr2_feat_map[] = { 817 735 NEEDS_FEAT(HFGWTR2_EL2_nPFAR_EL1, FEAT_PFAR), 818 736 NEEDS_FEAT(HFGWTR2_EL2_nACTLRALIAS_EL1 | ··· 833 745 FEAT_SRMASK), 834 746 NEEDS_FEAT(HFGWTR2_EL2_nRCWSMASK_EL1, FEAT_THE), 835 747 }; 748 + 749 + static const DECLARE_FEAT_MAP_FGT(hfgwtr2_desc, hfgwtr2_masks, 750 + hfgwtr2_feat_map, FEAT_FGT2); 836 751 837 752 static const struct reg_bits_to_feat_map hdfgrtr2_feat_map[] = { 838 753 NEEDS_FEAT(HDFGRTR2_EL2_nMDSELR_EL1, FEAT_Debugv8p9), ··· 867 776 NEEDS_FEAT(HDFGRTR2_EL2_nTRBMPAM_EL1, feat_trbe_mpam), 868 777 }; 869 778 779 + static const DECLARE_FEAT_MAP_FGT(hdfgrtr2_desc, hdfgrtr2_masks, 780 + hdfgrtr2_feat_map, FEAT_FGT2); 781 + 870 782 static const struct reg_bits_to_feat_map hdfgwtr2_feat_map[] = { 871 783 NEEDS_FEAT(HDFGWTR2_EL2_nMDSELR_EL1, FEAT_Debugv8p9), 872 784 NEEDS_FEAT(HDFGWTR2_EL2_nPMECR_EL1, feat_ebep_pmuv3_ss), ··· 897 803 NEEDS_FEAT(HDFGWTR2_EL2_nMDSTEPOP_EL1, FEAT_STEP2), 898 804 NEEDS_FEAT(HDFGWTR2_EL2_nTRBMPAM_EL1, feat_trbe_mpam), 899 805 }; 806 + 807 + static const DECLARE_FEAT_MAP_FGT(hdfgwtr2_desc, hdfgwtr2_masks, 808 + hdfgwtr2_feat_map, FEAT_FGT2); 809 + 900 810 901 811 static const struct reg_bits_to_feat_map hcrx_feat_map[] = { 902 812 NEEDS_FEAT(HCRX_EL2_PACMEn, feat_pauth_lr), ··· 930 832 NEEDS_FEAT(HCRX_EL2_EnALS, FEAT_LS64), 931 833 NEEDS_FEAT(HCRX_EL2_EnAS0, FEAT_LS64_ACCDATA), 932 834 }; 835 + 836 + 837 + static const DECLARE_FEAT_MAP(hcrx_desc, __HCRX_EL2, 838 + hcrx_feat_map, FEAT_HCX); 933 839 934 840 static const struct reg_bits_to_feat_map hcr_feat_map[] = { 935 841 NEEDS_FEAT(HCR_EL2_TID0, FEAT_AA32EL0), ··· 1006 904 NEEDS_FEAT_FIXED(HCR_EL2_E2H, compute_hcr_e2h), 1007 905 }; 1008 906 907 + static const DECLARE_FEAT_MAP(hcr_desc, HCR_EL2, 908 + hcr_feat_map, FEAT_AA64EL2); 909 + 1009 910 static const struct reg_bits_to_feat_map sctlr2_feat_map[] = { 1010 911 NEEDS_FEAT(SCTLR2_EL1_NMEA | 1011 912 SCTLR2_EL1_EASE, ··· 1025 920 SCTLR2_EL1_CPTM0, 1026 921 FEAT_CPA2), 1027 922 }; 923 + 924 + static const DECLARE_FEAT_MAP(sctlr2_desc, SCTLR2_EL1, 925 + sctlr2_feat_map, FEAT_SCTLR2); 1028 926 1029 927 static const struct reg_bits_to_feat_map tcr2_el2_feat_map[] = { 1030 928 NEEDS_FEAT(TCR2_EL2_FNG1 | ··· 1050 942 FEAT_S1POE), 1051 943 NEEDS_FEAT(TCR2_EL2_PIE, FEAT_S1PIE), 1052 944 }; 945 + 946 + static const DECLARE_FEAT_MAP(tcr2_el2_desc, TCR2_EL2, 947 + tcr2_el2_feat_map, FEAT_TCR2); 1053 948 1054 949 static const struct reg_bits_to_feat_map sctlr_el1_feat_map[] = { 1055 950 NEEDS_FEAT(SCTLR_EL1_CP15BEN | ··· 1128 1017 FEAT_AA64EL1), 1129 1018 }; 1130 1019 1020 + static const DECLARE_FEAT_MAP(sctlr_el1_desc, SCTLR_EL1, 1021 + sctlr_el1_feat_map, FEAT_AA64EL1); 1022 + 1131 1023 static const struct reg_bits_to_feat_map mdcr_el2_feat_map[] = { 1132 1024 NEEDS_FEAT(MDCR_EL2_EBWE, FEAT_Debugv8p9), 1133 1025 NEEDS_FEAT(MDCR_EL2_TDOSA, FEAT_DoubleLock), ··· 1162 1048 FEAT_AA64EL1), 1163 1049 }; 1164 1050 1051 + static const DECLARE_FEAT_MAP(mdcr_el2_desc, MDCR_EL2, 1052 + mdcr_el2_feat_map, FEAT_AA64EL2); 1053 + 1165 1054 static void __init check_feat_map(const struct reg_bits_to_feat_map *map, 1166 1055 int map_size, u64 res0, const char *str) 1167 1056 { ··· 1178 1061 str, mask ^ ~res0); 1179 1062 } 1180 1063 1064 + static u64 reg_feat_map_bits(const struct reg_bits_to_feat_map *map) 1065 + { 1066 + return map->flags & RES0_POINTER ? ~(*map->res0p) : map->bits; 1067 + } 1068 + 1069 + static void __init check_reg_desc(const struct reg_feat_map_desc *r) 1070 + { 1071 + check_feat_map(r->bit_feat_map, r->bit_feat_map_sz, 1072 + ~reg_feat_map_bits(&r->feat_map), r->name); 1073 + } 1074 + 1181 1075 void __init check_feature_map(void) 1182 1076 { 1183 - check_feat_map(hfgrtr_feat_map, ARRAY_SIZE(hfgrtr_feat_map), 1184 - hfgrtr_masks.res0, hfgrtr_masks.str); 1185 - check_feat_map(hfgwtr_feat_map, ARRAY_SIZE(hfgwtr_feat_map), 1186 - hfgwtr_masks.res0, hfgwtr_masks.str); 1187 - check_feat_map(hfgitr_feat_map, ARRAY_SIZE(hfgitr_feat_map), 1188 - hfgitr_masks.res0, hfgitr_masks.str); 1189 - check_feat_map(hdfgrtr_feat_map, ARRAY_SIZE(hdfgrtr_feat_map), 1190 - hdfgrtr_masks.res0, hdfgrtr_masks.str); 1191 - check_feat_map(hdfgwtr_feat_map, ARRAY_SIZE(hdfgwtr_feat_map), 1192 - hdfgwtr_masks.res0, hdfgwtr_masks.str); 1193 - check_feat_map(hafgrtr_feat_map, ARRAY_SIZE(hafgrtr_feat_map), 1194 - hafgrtr_masks.res0, hafgrtr_masks.str); 1195 - check_feat_map(hcrx_feat_map, ARRAY_SIZE(hcrx_feat_map), 1196 - __HCRX_EL2_RES0, "HCRX_EL2"); 1197 - check_feat_map(hcr_feat_map, ARRAY_SIZE(hcr_feat_map), 1198 - HCR_EL2_RES0, "HCR_EL2"); 1199 - check_feat_map(sctlr2_feat_map, ARRAY_SIZE(sctlr2_feat_map), 1200 - SCTLR2_EL1_RES0, "SCTLR2_EL1"); 1201 - check_feat_map(tcr2_el2_feat_map, ARRAY_SIZE(tcr2_el2_feat_map), 1202 - TCR2_EL2_RES0, "TCR2_EL2"); 1203 - check_feat_map(sctlr_el1_feat_map, ARRAY_SIZE(sctlr_el1_feat_map), 1204 - SCTLR_EL1_RES0, "SCTLR_EL1"); 1205 - check_feat_map(mdcr_el2_feat_map, ARRAY_SIZE(mdcr_el2_feat_map), 1206 - MDCR_EL2_RES0, "MDCR_EL2"); 1077 + check_reg_desc(&hfgrtr_desc); 1078 + check_reg_desc(&hfgwtr_desc); 1079 + check_reg_desc(&hfgitr_desc); 1080 + check_reg_desc(&hdfgrtr_desc); 1081 + check_reg_desc(&hdfgwtr_desc); 1082 + check_reg_desc(&hafgrtr_desc); 1083 + check_reg_desc(&hfgrtr2_desc); 1084 + check_reg_desc(&hfgwtr2_desc); 1085 + check_reg_desc(&hfgitr2_desc); 1086 + check_reg_desc(&hdfgrtr2_desc); 1087 + check_reg_desc(&hdfgwtr2_desc); 1088 + check_reg_desc(&hcrx_desc); 1089 + check_reg_desc(&hcr_desc); 1090 + check_reg_desc(&sctlr2_desc); 1091 + check_reg_desc(&tcr2_el2_desc); 1092 + check_reg_desc(&sctlr_el1_desc); 1093 + check_reg_desc(&mdcr_el2_desc); 1207 1094 } 1208 1095 1209 1096 static bool idreg_feat_match(struct kvm *kvm, const struct reg_bits_to_feat_map *map) ··· 1250 1129 match = idreg_feat_match(kvm, &map[i]); 1251 1130 1252 1131 if (!match || (map[i].flags & FIXED_VALUE)) 1253 - val |= map[i].bits; 1132 + val |= reg_feat_map_bits(&map[i]); 1254 1133 } 1255 1134 1256 1135 return val; ··· 1266 1145 require, exclude | FIXED_VALUE); 1267 1146 } 1268 1147 1269 - static u64 compute_fixed_bits(struct kvm *kvm, 1270 - const struct reg_bits_to_feat_map *map, 1271 - int map_size, 1272 - u64 *fixed_bits, 1273 - unsigned long require, 1274 - unsigned long exclude) 1148 + static u64 compute_reg_res0_bits(struct kvm *kvm, 1149 + const struct reg_feat_map_desc *r, 1150 + unsigned long require, unsigned long exclude) 1151 + 1275 1152 { 1276 - return __compute_fixed_bits(kvm, map, map_size, fixed_bits, 1277 - require | FIXED_VALUE, exclude); 1153 + u64 res0; 1154 + 1155 + res0 = compute_res0_bits(kvm, r->bit_feat_map, r->bit_feat_map_sz, 1156 + require, exclude); 1157 + 1158 + /* 1159 + * If computing FGUs, don't take RES0 or register existence 1160 + * into account -- we're not computing bits for the register 1161 + * itself. 1162 + */ 1163 + if (!(exclude & NEVER_FGU)) { 1164 + res0 |= compute_res0_bits(kvm, &r->feat_map, 1, require, exclude); 1165 + res0 |= ~reg_feat_map_bits(&r->feat_map); 1166 + } 1167 + 1168 + return res0; 1169 + } 1170 + 1171 + static u64 compute_reg_fixed_bits(struct kvm *kvm, 1172 + const struct reg_feat_map_desc *r, 1173 + u64 *fixed_bits, unsigned long require, 1174 + unsigned long exclude) 1175 + { 1176 + return __compute_fixed_bits(kvm, r->bit_feat_map, r->bit_feat_map_sz, 1177 + fixed_bits, require | FIXED_VALUE, exclude); 1278 1178 } 1279 1179 1280 1180 void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt) ··· 1304 1162 1305 1163 switch (fgt) { 1306 1164 case HFGRTR_GROUP: 1307 - val |= compute_res0_bits(kvm, hfgrtr_feat_map, 1308 - ARRAY_SIZE(hfgrtr_feat_map), 1309 - 0, NEVER_FGU); 1310 - val |= compute_res0_bits(kvm, hfgwtr_feat_map, 1311 - ARRAY_SIZE(hfgwtr_feat_map), 1312 - 0, NEVER_FGU); 1165 + val |= compute_reg_res0_bits(kvm, &hfgrtr_desc, 1166 + 0, NEVER_FGU); 1167 + val |= compute_reg_res0_bits(kvm, &hfgwtr_desc, 1168 + 0, NEVER_FGU); 1313 1169 break; 1314 1170 case HFGITR_GROUP: 1315 - val |= compute_res0_bits(kvm, hfgitr_feat_map, 1316 - ARRAY_SIZE(hfgitr_feat_map), 1317 - 0, NEVER_FGU); 1171 + val |= compute_reg_res0_bits(kvm, &hfgitr_desc, 1172 + 0, NEVER_FGU); 1318 1173 break; 1319 1174 case HDFGRTR_GROUP: 1320 - val |= compute_res0_bits(kvm, hdfgrtr_feat_map, 1321 - ARRAY_SIZE(hdfgrtr_feat_map), 1322 - 0, NEVER_FGU); 1323 - val |= compute_res0_bits(kvm, hdfgwtr_feat_map, 1324 - ARRAY_SIZE(hdfgwtr_feat_map), 1325 - 0, NEVER_FGU); 1175 + val |= compute_reg_res0_bits(kvm, &hdfgrtr_desc, 1176 + 0, NEVER_FGU); 1177 + val |= compute_reg_res0_bits(kvm, &hdfgwtr_desc, 1178 + 0, NEVER_FGU); 1326 1179 break; 1327 1180 case HAFGRTR_GROUP: 1328 - val |= compute_res0_bits(kvm, hafgrtr_feat_map, 1329 - ARRAY_SIZE(hafgrtr_feat_map), 1330 - 0, NEVER_FGU); 1181 + val |= compute_reg_res0_bits(kvm, &hafgrtr_desc, 1182 + 0, NEVER_FGU); 1331 1183 break; 1332 1184 case HFGRTR2_GROUP: 1333 - val |= compute_res0_bits(kvm, hfgrtr2_feat_map, 1334 - ARRAY_SIZE(hfgrtr2_feat_map), 1335 - 0, NEVER_FGU); 1336 - val |= compute_res0_bits(kvm, hfgwtr2_feat_map, 1337 - ARRAY_SIZE(hfgwtr2_feat_map), 1338 - 0, NEVER_FGU); 1185 + val |= compute_reg_res0_bits(kvm, &hfgrtr2_desc, 1186 + 0, NEVER_FGU); 1187 + val |= compute_reg_res0_bits(kvm, &hfgwtr2_desc, 1188 + 0, NEVER_FGU); 1339 1189 break; 1340 1190 case HFGITR2_GROUP: 1341 - val |= compute_res0_bits(kvm, hfgitr2_feat_map, 1342 - ARRAY_SIZE(hfgitr2_feat_map), 1343 - 0, NEVER_FGU); 1191 + val |= compute_reg_res0_bits(kvm, &hfgitr2_desc, 1192 + 0, NEVER_FGU); 1344 1193 break; 1345 1194 case HDFGRTR2_GROUP: 1346 - val |= compute_res0_bits(kvm, hdfgrtr2_feat_map, 1347 - ARRAY_SIZE(hdfgrtr2_feat_map), 1348 - 0, NEVER_FGU); 1349 - val |= compute_res0_bits(kvm, hdfgwtr2_feat_map, 1350 - ARRAY_SIZE(hdfgwtr2_feat_map), 1351 - 0, NEVER_FGU); 1195 + val |= compute_reg_res0_bits(kvm, &hdfgrtr2_desc, 1196 + 0, NEVER_FGU); 1197 + val |= compute_reg_res0_bits(kvm, &hdfgwtr2_desc, 1198 + 0, NEVER_FGU); 1352 1199 break; 1353 1200 default: 1354 1201 BUG(); ··· 1352 1221 1353 1222 switch (reg) { 1354 1223 case HFGRTR_EL2: 1355 - *res0 = compute_res0_bits(kvm, hfgrtr_feat_map, 1356 - ARRAY_SIZE(hfgrtr_feat_map), 0, 0); 1357 - *res0 |= hfgrtr_masks.res0; 1224 + *res0 = compute_reg_res0_bits(kvm, &hfgrtr_desc, 0, 0); 1358 1225 *res1 = HFGRTR_EL2_RES1; 1359 1226 break; 1360 1227 case HFGWTR_EL2: 1361 - *res0 = compute_res0_bits(kvm, hfgwtr_feat_map, 1362 - ARRAY_SIZE(hfgwtr_feat_map), 0, 0); 1363 - *res0 |= hfgwtr_masks.res0; 1228 + *res0 = compute_reg_res0_bits(kvm, &hfgwtr_desc, 0, 0); 1364 1229 *res1 = HFGWTR_EL2_RES1; 1365 1230 break; 1366 1231 case HFGITR_EL2: 1367 - *res0 = compute_res0_bits(kvm, hfgitr_feat_map, 1368 - ARRAY_SIZE(hfgitr_feat_map), 0, 0); 1369 - *res0 |= hfgitr_masks.res0; 1232 + *res0 = compute_reg_res0_bits(kvm, &hfgitr_desc, 0, 0); 1370 1233 *res1 = HFGITR_EL2_RES1; 1371 1234 break; 1372 1235 case HDFGRTR_EL2: 1373 - *res0 = compute_res0_bits(kvm, hdfgrtr_feat_map, 1374 - ARRAY_SIZE(hdfgrtr_feat_map), 0, 0); 1375 - *res0 |= hdfgrtr_masks.res0; 1236 + *res0 = compute_reg_res0_bits(kvm, &hdfgrtr_desc, 0, 0); 1376 1237 *res1 = HDFGRTR_EL2_RES1; 1377 1238 break; 1378 1239 case HDFGWTR_EL2: 1379 - *res0 = compute_res0_bits(kvm, hdfgwtr_feat_map, 1380 - ARRAY_SIZE(hdfgwtr_feat_map), 0, 0); 1381 - *res0 |= hdfgwtr_masks.res0; 1240 + *res0 = compute_reg_res0_bits(kvm, &hdfgwtr_desc, 0, 0); 1382 1241 *res1 = HDFGWTR_EL2_RES1; 1383 1242 break; 1384 1243 case HAFGRTR_EL2: 1385 - *res0 = compute_res0_bits(kvm, hafgrtr_feat_map, 1386 - ARRAY_SIZE(hafgrtr_feat_map), 0, 0); 1387 - *res0 |= hafgrtr_masks.res0; 1244 + *res0 = compute_reg_res0_bits(kvm, &hafgrtr_desc, 0, 0); 1388 1245 *res1 = HAFGRTR_EL2_RES1; 1389 1246 break; 1390 1247 case HFGRTR2_EL2: 1391 - *res0 = compute_res0_bits(kvm, hfgrtr2_feat_map, 1392 - ARRAY_SIZE(hfgrtr2_feat_map), 0, 0); 1393 - *res0 |= hfgrtr2_masks.res0; 1248 + *res0 = compute_reg_res0_bits(kvm, &hfgrtr2_desc, 0, 0); 1394 1249 *res1 = HFGRTR2_EL2_RES1; 1395 1250 break; 1396 1251 case HFGWTR2_EL2: 1397 - *res0 = compute_res0_bits(kvm, hfgwtr2_feat_map, 1398 - ARRAY_SIZE(hfgwtr2_feat_map), 0, 0); 1399 - *res0 |= hfgwtr2_masks.res0; 1252 + *res0 = compute_reg_res0_bits(kvm, &hfgwtr2_desc, 0, 0); 1400 1253 *res1 = HFGWTR2_EL2_RES1; 1401 1254 break; 1402 1255 case HFGITR2_EL2: 1403 - *res0 = compute_res0_bits(kvm, hfgitr2_feat_map, 1404 - ARRAY_SIZE(hfgitr2_feat_map), 0, 0); 1405 - *res0 |= hfgitr2_masks.res0; 1256 + *res0 = compute_reg_res0_bits(kvm, &hfgitr2_desc, 0, 0); 1406 1257 *res1 = HFGITR2_EL2_RES1; 1407 1258 break; 1408 1259 case HDFGRTR2_EL2: 1409 - *res0 = compute_res0_bits(kvm, hdfgrtr2_feat_map, 1410 - ARRAY_SIZE(hdfgrtr2_feat_map), 0, 0); 1411 - *res0 |= hdfgrtr2_masks.res0; 1260 + *res0 = compute_reg_res0_bits(kvm, &hdfgrtr2_desc, 0, 0); 1412 1261 *res1 = HDFGRTR2_EL2_RES1; 1413 1262 break; 1414 1263 case HDFGWTR2_EL2: 1415 - *res0 = compute_res0_bits(kvm, hdfgwtr2_feat_map, 1416 - ARRAY_SIZE(hdfgwtr2_feat_map), 0, 0); 1417 - *res0 |= hdfgwtr2_masks.res0; 1264 + *res0 = compute_reg_res0_bits(kvm, &hdfgwtr2_desc, 0, 0); 1418 1265 *res1 = HDFGWTR2_EL2_RES1; 1419 1266 break; 1420 1267 case HCRX_EL2: 1421 - *res0 = compute_res0_bits(kvm, hcrx_feat_map, 1422 - ARRAY_SIZE(hcrx_feat_map), 0, 0); 1423 - *res0 |= __HCRX_EL2_RES0; 1268 + *res0 = compute_reg_res0_bits(kvm, &hcrx_desc, 0, 0); 1424 1269 *res1 = __HCRX_EL2_RES1; 1425 1270 break; 1426 1271 case HCR_EL2: 1427 - mask = compute_fixed_bits(kvm, hcr_feat_map, 1428 - ARRAY_SIZE(hcr_feat_map), &fixed, 1429 - 0, 0); 1430 - *res0 = compute_res0_bits(kvm, hcr_feat_map, 1431 - ARRAY_SIZE(hcr_feat_map), 0, 0); 1432 - *res0 |= HCR_EL2_RES0 | (mask & ~fixed); 1272 + mask = compute_reg_fixed_bits(kvm, &hcr_desc, &fixed, 0, 0); 1273 + *res0 = compute_reg_res0_bits(kvm, &hcr_desc, 0, 0); 1274 + *res0 |= (mask & ~fixed); 1433 1275 *res1 = HCR_EL2_RES1 | (mask & fixed); 1434 1276 break; 1435 1277 case SCTLR2_EL1: 1436 1278 case SCTLR2_EL2: 1437 - *res0 = compute_res0_bits(kvm, sctlr2_feat_map, 1438 - ARRAY_SIZE(sctlr2_feat_map), 0, 0); 1439 - *res0 |= SCTLR2_EL1_RES0; 1279 + *res0 = compute_reg_res0_bits(kvm, &sctlr2_desc, 0, 0); 1440 1280 *res1 = SCTLR2_EL1_RES1; 1441 1281 break; 1442 1282 case TCR2_EL2: 1443 - *res0 = compute_res0_bits(kvm, tcr2_el2_feat_map, 1444 - ARRAY_SIZE(tcr2_el2_feat_map), 0, 0); 1445 - *res0 |= TCR2_EL2_RES0; 1283 + *res0 = compute_reg_res0_bits(kvm, &tcr2_el2_desc, 0, 0); 1446 1284 *res1 = TCR2_EL2_RES1; 1447 1285 break; 1448 1286 case SCTLR_EL1: 1449 - *res0 = compute_res0_bits(kvm, sctlr_el1_feat_map, 1450 - ARRAY_SIZE(sctlr_el1_feat_map), 0, 0); 1451 - *res0 |= SCTLR_EL1_RES0; 1287 + *res0 = compute_reg_res0_bits(kvm, &sctlr_el1_desc, 0, 0); 1452 1288 *res1 = SCTLR_EL1_RES1; 1453 1289 break; 1454 1290 case MDCR_EL2: 1455 - *res0 = compute_res0_bits(kvm, mdcr_el2_feat_map, 1456 - ARRAY_SIZE(mdcr_el2_feat_map), 0, 0); 1457 - *res0 |= MDCR_EL2_RES0; 1291 + *res0 = compute_reg_res0_bits(kvm, &mdcr_el2_desc, 0, 0); 1458 1292 *res1 = MDCR_EL2_RES1; 1459 1293 break; 1460 1294 default:
+7
arch/arm64/kvm/hyp/vhe/switch.c
··· 95 95 /* Force NV2 in case the guest is forgetful... */ 96 96 guest_hcr |= HCR_NV2; 97 97 } 98 + 99 + /* 100 + * Exclude the guest's TWED configuration if it hasn't set TWE 101 + * to avoid potentially delaying traps for the host. 102 + */ 103 + if (!(guest_hcr & HCR_TWE)) 104 + guest_hcr &= ~(HCR_EL2_TWEDEn | HCR_EL2_TWEDEL); 98 105 } 99 106 100 107 BUG_ON(host_data_test_flag(VCPU_IN_HYP_CONTEXT) &&
+30 -16
arch/arm64/kvm/nested.c
··· 1462 1462 1463 1463 case SYS_ID_AA64PFR1_EL1: 1464 1464 /* Only support BTI, SSBS, CSV2_frac */ 1465 - val &= (ID_AA64PFR1_EL1_BT | 1466 - ID_AA64PFR1_EL1_SSBS | 1467 - ID_AA64PFR1_EL1_CSV2_frac); 1465 + val &= ~(ID_AA64PFR1_EL1_PFAR | 1466 + ID_AA64PFR1_EL1_MTEX | 1467 + ID_AA64PFR1_EL1_THE | 1468 + ID_AA64PFR1_EL1_GCS | 1469 + ID_AA64PFR1_EL1_MTE_frac | 1470 + ID_AA64PFR1_EL1_NMI | 1471 + ID_AA64PFR1_EL1_SME | 1472 + ID_AA64PFR1_EL1_RES0 | 1473 + ID_AA64PFR1_EL1_MPAM_frac | 1474 + ID_AA64PFR1_EL1_MTE); 1468 1475 break; 1469 1476 1470 1477 case SYS_ID_AA64MMFR0_EL1: ··· 1524 1517 break; 1525 1518 1526 1519 case SYS_ID_AA64MMFR1_EL1: 1527 - val &= (ID_AA64MMFR1_EL1_HCX | 1528 - ID_AA64MMFR1_EL1_PAN | 1529 - ID_AA64MMFR1_EL1_LO | 1530 - ID_AA64MMFR1_EL1_HPDS | 1531 - ID_AA64MMFR1_EL1_VH | 1532 - ID_AA64MMFR1_EL1_VMIDBits); 1520 + val &= ~(ID_AA64MMFR1_EL1_CMOW | 1521 + ID_AA64MMFR1_EL1_nTLBPA | 1522 + ID_AA64MMFR1_EL1_ETS | 1523 + ID_AA64MMFR1_EL1_XNX | 1524 + ID_AA64MMFR1_EL1_HAFDBS); 1533 1525 /* FEAT_E2H0 implies no VHE */ 1534 1526 if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features)) 1535 1527 val &= ~ID_AA64MMFR1_EL1_VH; ··· 1570 1564 1571 1565 case SYS_ID_AA64DFR0_EL1: 1572 1566 /* Only limited support for PMU, Debug, BPs, WPs, and HPMN0 */ 1573 - val &= (ID_AA64DFR0_EL1_PMUVer | 1574 - ID_AA64DFR0_EL1_WRPs | 1575 - ID_AA64DFR0_EL1_BRPs | 1576 - ID_AA64DFR0_EL1_DebugVer| 1577 - ID_AA64DFR0_EL1_HPMN0); 1567 + val &= ~(ID_AA64DFR0_EL1_ExtTrcBuff | 1568 + ID_AA64DFR0_EL1_BRBE | 1569 + ID_AA64DFR0_EL1_MTPMU | 1570 + ID_AA64DFR0_EL1_TraceBuffer | 1571 + ID_AA64DFR0_EL1_TraceFilt | 1572 + ID_AA64DFR0_EL1_PMSVer | 1573 + ID_AA64DFR0_EL1_CTX_CMPs | 1574 + ID_AA64DFR0_EL1_SEBEP | 1575 + ID_AA64DFR0_EL1_PMSS | 1576 + ID_AA64DFR0_EL1_TraceVer); 1578 1577 1579 - /* Cap Debug to ARMv8.1 */ 1580 - val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, VHE); 1578 + /* 1579 + * FEAT_Debugv8p9 requires support for extended breakpoints / 1580 + * watchpoints. 1581 + */ 1582 + val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8); 1581 1583 break; 1582 1584 } 1583 1585
+25 -2
arch/arm64/kvm/sys_regs.c
··· 1997 1997 return val; 1998 1998 } 1999 1999 2000 + /* 2001 + * Older versions of KVM erroneously claim support for FEAT_DoubleLock with 2002 + * NV-enabled VMs on unsupporting hardware. Silently ignore the incorrect 2003 + * value if it is consistent with the bug. 2004 + */ 2005 + static bool ignore_feat_doublelock(struct kvm_vcpu *vcpu, u64 val) 2006 + { 2007 + u8 host, user; 2008 + 2009 + if (!vcpu_has_nv(vcpu)) 2010 + return false; 2011 + 2012 + host = SYS_FIELD_GET(ID_AA64DFR0_EL1, DoubleLock, 2013 + read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1)); 2014 + user = SYS_FIELD_GET(ID_AA64DFR0_EL1, DoubleLock, val); 2015 + 2016 + return host == ID_AA64DFR0_EL1_DoubleLock_NI && 2017 + user == ID_AA64DFR0_EL1_DoubleLock_IMP; 2018 + } 2019 + 2000 2020 static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu, 2001 2021 const struct sys_reg_desc *rd, 2002 2022 u64 val) ··· 2047 2027 */ 2048 2028 if (debugver < ID_AA64DFR0_EL1_DebugVer_IMP) 2049 2029 return -EINVAL; 2030 + 2031 + if (ignore_feat_doublelock(vcpu, val)) { 2032 + val &= ~ID_AA64DFR0_EL1_DoubleLock; 2033 + val |= SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, DoubleLock, NI); 2034 + } 2050 2035 2051 2036 return set_id_reg(vcpu, rd, val); 2052 2037 } ··· 3177 3152 ~(ID_AA64MMFR0_EL1_RES0 | 3178 3153 ID_AA64MMFR0_EL1_ASIDBITS)), 3179 3154 ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 | 3180 - ID_AA64MMFR1_EL1_HCX | 3181 - ID_AA64MMFR1_EL1_TWED | 3182 3155 ID_AA64MMFR1_EL1_XNX | 3183 3156 ID_AA64MMFR1_EL1_VH | 3184 3157 ID_AA64MMFR1_EL1_VMIDBits)),
+2
tools/testing/selftests/kvm/arm64/set_id_regs.c
··· 165 165 static const struct reg_ftr_bits ftr_id_aa64mmfr1_el1[] = { 166 166 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, TIDCP1, 0), 167 167 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, AFP, 0), 168 + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HCX, 0), 168 169 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, ETS, 0), 170 + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, TWED, 0), 169 171 REG_FTR_BITS(FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1, SpecSEI, 0), 170 172 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, PAN, 0), 171 173 REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, LO, 0),