Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64/sysreg: Add _EL1 into ID_AA64ISAR2_EL1 definition names

Normally we include the full register name in the defines for fields within
registers but this has not been followed for ID registers. In preparation
for automatic generation of defines add the _EL1s into the defines for
ID_AA64ISAR2_EL1 to follow the convention. No functional changes.

Signed-off-by: Mark Brown <broonie@kernel.org>
Link: https://lore.kernel.org/r/20220704170302.2609529-17-broonie@kernel.org
Signed-off-by: Will Deacon <will@kernel.org>

authored by

Mark Brown and committed by
Will Deacon
b2d71f27 aa50479b

+43 -43
+1 -1
arch/arm64/include/asm/asm_pointer_auth.h
··· 61 61 mrs \tmp1, id_aa64isar1_el1 62 62 ubfx \tmp1, \tmp1, #ID_AA64ISAR1_EL1_APA_SHIFT, #8 63 63 mrs_s \tmp2, SYS_ID_AA64ISAR2_EL1 64 - ubfx \tmp2, \tmp2, #ID_AA64ISAR2_APA3_SHIFT, #4 64 + ubfx \tmp2, \tmp2, #ID_AA64ISAR2_EL1_APA3_SHIFT, #4 65 65 orr \tmp1, \tmp1, \tmp2 66 66 cbz \tmp1, .Lno_addr_auth\@ 67 67 mov_q \tmp1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
+1 -1
arch/arm64/include/asm/cpufeature.h
··· 673 673 isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1); 674 674 675 675 return cpuid_feature_extract_unsigned_field(isar2, 676 - ID_AA64ISAR2_BC_SHIFT); 676 + ID_AA64ISAR2_EL1_BC_SHIFT); 677 677 } 678 678 679 679 const struct cpumask *system_32bit_el0_cpumask(void);
+15 -15
arch/arm64/include/asm/sysreg.h
··· 738 738 #define ID_AA64ISAR1_EL1_GPI_IMP 0x1 739 739 740 740 /* id_aa64isar2 */ 741 - #define ID_AA64ISAR2_BC_SHIFT 28 742 - #define ID_AA64ISAR2_APA3_SHIFT 12 743 - #define ID_AA64ISAR2_GPA3_SHIFT 8 744 - #define ID_AA64ISAR2_RPRES_SHIFT 4 745 - #define ID_AA64ISAR2_WFxT_SHIFT 0 741 + #define ID_AA64ISAR2_EL1_BC_SHIFT 28 742 + #define ID_AA64ISAR2_EL1_APA3_SHIFT 12 743 + #define ID_AA64ISAR2_EL1_GPA3_SHIFT 8 744 + #define ID_AA64ISAR2_EL1_RPRES_SHIFT 4 745 + #define ID_AA64ISAR2_EL1_WFxT_SHIFT 0 746 746 747 747 /* 748 748 * Value 0x1 has been removed from the architecture, and is 749 749 * reserved, but has not yet been removed from the ARM ARM 750 750 * as of ARM DDI 0487G.b. 751 751 */ 752 - #define ID_AA64ISAR2_WFxT_NI 0x0 753 - #define ID_AA64ISAR2_WFxT_IMP 0x2 752 + #define ID_AA64ISAR2_EL1_WFxT_NI 0x0 753 + #define ID_AA64ISAR2_EL1_WFxT_IMP 0x2 754 754 755 - #define ID_AA64ISAR2_APA3_NI 0x0 756 - #define ID_AA64ISAR2_APA3_PAuth 0x1 757 - #define ID_AA64ISAR2_APA3_EPAC 0x2 758 - #define ID_AA64ISAR2_APA3_PAuth2 0x3 759 - #define ID_AA64ISAR2_APA3_FPAC 0x4 760 - #define ID_AA64ISAR2_APA3_FPACCOMBINE 0x5 755 + #define ID_AA64ISAR2_EL1_APA3_NI 0x0 756 + #define ID_AA64ISAR2_EL1_APA3_PAuth 0x1 757 + #define ID_AA64ISAR2_EL1_APA3_EPAC 0x2 758 + #define ID_AA64ISAR2_EL1_APA3_PAuth2 0x3 759 + #define ID_AA64ISAR2_EL1_APA3_FPAC 0x4 760 + #define ID_AA64ISAR2_EL1_APA3_FPACCOMBINE 0x5 761 761 762 - #define ID_AA64ISAR2_GPA3_NI 0x0 763 - #define ID_AA64ISAR2_GPA3_IMP 0x1 762 + #define ID_AA64ISAR2_EL1_GPA3_NI 0x0 763 + #define ID_AA64ISAR2_EL1_GPA3_IMP 0x1 764 764 765 765 /* id_aa64pfr0 */ 766 766 #define ID_AA64PFR0_CSV3_SHIFT 60
+17 -17
arch/arm64/kernel/cpufeature.c
··· 231 231 }; 232 232 233 233 static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { 234 - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_BC_SHIFT, 4, 0), 234 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0), 235 235 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), 236 - FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_APA3_SHIFT, 4, 0), 236 + FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0), 237 237 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), 238 - FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_GPA3_SHIFT, 4, 0), 239 - ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0), 240 - ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_WFxT_SHIFT, 4, 0), 238 + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_GPA3_SHIFT, 4, 0), 239 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, 0), 240 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, 0), 241 241 ARM64_FTR_END, 242 242 }; 243 243 ··· 2326 2326 .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, 2327 2327 .sys_reg = SYS_ID_AA64ISAR2_EL1, 2328 2328 .sign = FTR_UNSIGNED, 2329 - .field_pos = ID_AA64ISAR2_APA3_SHIFT, 2329 + .field_pos = ID_AA64ISAR2_EL1_APA3_SHIFT, 2330 2330 .field_width = 4, 2331 - .min_field_value = ID_AA64ISAR2_APA3_PAuth, 2331 + .min_field_value = ID_AA64ISAR2_EL1_APA3_PAuth, 2332 2332 .matches = has_address_auth_cpucap, 2333 2333 }, 2334 2334 { ··· 2364 2364 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2365 2365 .sys_reg = SYS_ID_AA64ISAR2_EL1, 2366 2366 .sign = FTR_UNSIGNED, 2367 - .field_pos = ID_AA64ISAR2_GPA3_SHIFT, 2367 + .field_pos = ID_AA64ISAR2_EL1_GPA3_SHIFT, 2368 2368 .field_width = 4, 2369 - .min_field_value = ID_AA64ISAR2_GPA3_IMP, 2369 + .min_field_value = ID_AA64ISAR2_EL1_GPA3_IMP, 2370 2370 .matches = has_cpuid_feature, 2371 2371 }, 2372 2372 { ··· 2516 2516 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2517 2517 .sys_reg = SYS_ID_AA64ISAR2_EL1, 2518 2518 .sign = FTR_UNSIGNED, 2519 - .field_pos = ID_AA64ISAR2_WFxT_SHIFT, 2519 + .field_pos = ID_AA64ISAR2_EL1_WFxT_SHIFT, 2520 2520 .field_width = 4, 2521 2521 .matches = has_cpuid_feature, 2522 - .min_field_value = ID_AA64ISAR2_WFxT_IMP, 2522 + .min_field_value = ID_AA64ISAR2_EL1_WFxT_IMP, 2523 2523 }, 2524 2524 {}, 2525 2525 }; ··· 2565 2565 ID_AA64ISAR1_EL1_APA_PAuth) 2566 2566 }, 2567 2567 { 2568 - HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_APA3_SHIFT, 2569 - 4, FTR_UNSIGNED, ID_AA64ISAR2_APA3_PAuth) 2568 + HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_APA3_SHIFT, 2569 + 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_APA3_PAuth) 2570 2570 }, 2571 2571 { 2572 2572 HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_API_SHIFT, ··· 2581 2581 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_GPA_IMP) 2582 2582 }, 2583 2583 { 2584 - HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_GPA3_SHIFT, 2585 - 4, FTR_UNSIGNED, ID_AA64ISAR2_GPA3_IMP) 2584 + HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_GPA3_SHIFT, 2585 + 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_GPA3_IMP) 2586 2586 }, 2587 2587 { 2588 2588 HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_GPI_SHIFT, ··· 2653 2653 #endif /* CONFIG_ARM64_MTE */ 2654 2654 HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV), 2655 2655 HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP), 2656 - HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES), 2657 - HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT), 2656 + HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES), 2657 + HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT), 2658 2658 #ifdef CONFIG_ARM64_SME 2659 2659 HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_SME, CAP_HWCAP, KERNEL_HWCAP_SME), 2660 2660 HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_FA64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
+2 -2
arch/arm64/kernel/idreg-override.c
··· 75 75 .name = "id_aa64isar2", 76 76 .override = &id_aa64isar2_override, 77 77 .fields = { 78 - { "gpa3", ID_AA64ISAR2_GPA3_SHIFT }, 79 - { "apa3", ID_AA64ISAR2_APA3_SHIFT }, 78 + { "gpa3", ID_AA64ISAR2_EL1_GPA3_SHIFT }, 79 + { "apa3", ID_AA64ISAR2_EL1_APA3_SHIFT }, 80 80 {} 81 81 }, 82 82 };
+2 -2
arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
··· 193 193 ) 194 194 195 195 #define PVM_ID_AA64ISAR2_ALLOW (\ 196 - ARM64_FEATURE_MASK(ID_AA64ISAR2_GPA3) | \ 197 - ARM64_FEATURE_MASK(ID_AA64ISAR2_APA3) \ 196 + ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3) | \ 197 + ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) \ 198 198 ) 199 199 200 200 u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id);
+2 -2
arch/arm64/kvm/hyp/nvhe/sys_regs.c
··· 186 186 u64 allow_mask = PVM_ID_AA64ISAR2_ALLOW; 187 187 188 188 if (!vcpu_has_ptrauth(vcpu)) 189 - allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_APA3) | 190 - ARM64_FEATURE_MASK(ID_AA64ISAR2_GPA3)); 189 + allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) | 190 + ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3)); 191 191 192 192 return id_aa64isar2_el1_sys_val & allow_mask; 193 193 }
+3 -3
arch/arm64/kvm/sys_regs.c
··· 1143 1143 break; 1144 1144 case SYS_ID_AA64ISAR2_EL1: 1145 1145 if (!vcpu_has_ptrauth(vcpu)) 1146 - val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_APA3) | 1147 - ARM64_FEATURE_MASK(ID_AA64ISAR2_GPA3)); 1146 + val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) | 1147 + ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3)); 1148 1148 if (!cpus_have_final_cap(ARM64_HAS_WFXT)) 1149 - val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_WFxT); 1149 + val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT); 1150 1150 break; 1151 1151 case SYS_ID_AA64DFR0_EL1: 1152 1152 /* Limit debug to ARMv8.0 */