Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64/sysreg: Add _EL1 into ID_AA64DFR0_EL1 definition names

Normally we include the full register name in the defines for fields within
registers but this has not been followed for ID registers. In preparation
for automatic generation of defines add the _EL1s into the defines for
ID_AA64DFR0_EL1 to follow the convention. No functional changes.

Signed-off-by: Mark Brown <broonie@kernel.org>
Link: https://lore.kernel.org/r/20220910163354.860255-3-broonie@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

authored by

Mark Brown and committed by
Catalin Marinas
fcf37b38 c0357a73

+63 -63
+1 -1
arch/arm64/include/asm/assembler.h
··· 512 512 */ 513 513 .macro reset_pmuserenr_el0, tmpreg 514 514 mrs \tmpreg, id_aa64dfr0_el1 515 - sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVer_SHIFT, #4 515 + sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4 516 516 cmp \tmpreg, #1 // Skip if no PMU present 517 517 b.lt 9000f 518 518 msr pmuserenr_el0, xzr // Disable PMU access from EL0
+1 -1
arch/arm64/include/asm/cpufeature.h
··· 553 553 u64 mask = GENMASK_ULL(field + 3, field); 554 554 555 555 /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */ 556 - if (val == ID_AA64DFR0_PMUVer_IMP_DEF) 556 + if (val == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) 557 557 val = 0; 558 558 559 559 if (val > cap) {
+4 -4
arch/arm64/include/asm/el2_setup.h
··· 40 40 41 41 .macro __init_el2_debug 42 42 mrs x1, id_aa64dfr0_el1 43 - sbfx x0, x1, #ID_AA64DFR0_PMUVer_SHIFT, #4 43 + sbfx x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4 44 44 cmp x0, #1 45 45 b.lt .Lskip_pmu_\@ // Skip if no PMU present 46 46 mrs x0, pmcr_el0 // Disable debug access traps ··· 49 49 csel x2, xzr, x0, lt // all PMU counters from EL1 50 50 51 51 /* Statistical profiling */ 52 - ubfx x0, x1, #ID_AA64DFR0_PMSVer_SHIFT, #4 52 + ubfx x0, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4 53 53 cbz x0, .Lskip_spe_\@ // Skip if SPE not present 54 54 55 55 mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2, ··· 65 65 66 66 .Lskip_spe_\@: 67 67 /* Trace buffer */ 68 - ubfx x0, x1, #ID_AA64DFR0_TraceBuffer_SHIFT, #4 68 + ubfx x0, x1, #ID_AA64DFR0_EL1_TraceBuffer_SHIFT, #4 69 69 cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present 70 70 71 71 mrs_s x0, SYS_TRBIDR_EL1 ··· 137 137 138 138 mov x0, xzr 139 139 mrs x1, id_aa64dfr0_el1 140 - ubfx x1, x1, #ID_AA64DFR0_PMSVer_SHIFT, #4 140 + ubfx x1, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4 141 141 cmp x1, #3 142 142 b.lt .Lset_debug_fgt_\@ 143 143 /* Disable PMSNEVFR_EL1 read and write traps */
+2 -2
arch/arm64/include/asm/hw_breakpoint.h
··· 142 142 u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); 143 143 return 1 + 144 144 cpuid_feature_extract_unsigned_field(dfr0, 145 - ID_AA64DFR0_BRPs_SHIFT); 145 + ID_AA64DFR0_EL1_BRPs_SHIFT); 146 146 } 147 147 148 148 /* Determine number of WRP registers available. */ ··· 151 151 u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); 152 152 return 1 + 153 153 cpuid_feature_extract_unsigned_field(dfr0, 154 - ID_AA64DFR0_WRPs_SHIFT); 154 + ID_AA64DFR0_EL1_WRPs_SHIFT); 155 155 } 156 156 157 157 #endif /* __ASM_BREAKPOINT_H */
+19 -19
arch/arm64/include/asm/sysreg.h
··· 699 699 #endif 700 700 701 701 /* id_aa64dfr0 */ 702 - #define ID_AA64DFR0_MTPMU_SHIFT 48 703 - #define ID_AA64DFR0_TraceBuffer_SHIFT 44 704 - #define ID_AA64DFR0_TraceFilt_SHIFT 40 705 - #define ID_AA64DFR0_DoubleLock_SHIFT 36 706 - #define ID_AA64DFR0_PMSVer_SHIFT 32 707 - #define ID_AA64DFR0_CTX_CMPs_SHIFT 28 708 - #define ID_AA64DFR0_WRPs_SHIFT 20 709 - #define ID_AA64DFR0_BRPs_SHIFT 12 710 - #define ID_AA64DFR0_PMUVer_SHIFT 8 711 - #define ID_AA64DFR0_TraceVer_SHIFT 4 712 - #define ID_AA64DFR0_DebugVer_SHIFT 0 702 + #define ID_AA64DFR0_EL1_MTPMU_SHIFT 48 703 + #define ID_AA64DFR0_EL1_TraceBuffer_SHIFT 44 704 + #define ID_AA64DFR0_EL1_TraceFilt_SHIFT 40 705 + #define ID_AA64DFR0_EL1_DoubleLock_SHIFT 36 706 + #define ID_AA64DFR0_EL1_PMSVer_SHIFT 32 707 + #define ID_AA64DFR0_EL1_CTX_CMPs_SHIFT 28 708 + #define ID_AA64DFR0_EL1_WRPs_SHIFT 20 709 + #define ID_AA64DFR0_EL1_BRPs_SHIFT 12 710 + #define ID_AA64DFR0_EL1_PMUVer_SHIFT 8 711 + #define ID_AA64DFR0_EL1_TraceVer_SHIFT 4 712 + #define ID_AA64DFR0_EL1_DebugVer_SHIFT 0 713 713 714 - #define ID_AA64DFR0_PMUVer_8_0 0x1 715 - #define ID_AA64DFR0_PMUVer_8_1 0x4 716 - #define ID_AA64DFR0_PMUVer_8_4 0x5 717 - #define ID_AA64DFR0_PMUVer_8_5 0x6 718 - #define ID_AA64DFR0_PMUVer_8_7 0x7 719 - #define ID_AA64DFR0_PMUVer_IMP_DEF 0xf 714 + #define ID_AA64DFR0_EL1_PMUVer_8_0 0x1 715 + #define ID_AA64DFR0_EL1_PMUVer_8_1 0x4 716 + #define ID_AA64DFR0_EL1_PMUVer_8_4 0x5 717 + #define ID_AA64DFR0_EL1_PMUVer_8_5 0x6 718 + #define ID_AA64DFR0_EL1_PMUVer_8_7 0x7 719 + #define ID_AA64DFR0_EL1_PMUVer_IMP_DEF 0xf 720 720 721 - #define ID_AA64DFR0_PMSVer_8_2 0x1 722 - #define ID_AA64DFR0_PMSVer_8_3 0x2 721 + #define ID_AA64DFR0_EL1_PMSVer_8_2 0x1 722 + #define ID_AA64DFR0_EL1_PMSVer_8_3 0x2 723 723 724 724 #define ID_DFR0_PERFMON_SHIFT 24 725 725
+7 -7
arch/arm64/kernel/cpufeature.c
··· 434 434 }; 435 435 436 436 static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = { 437 - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DoubleLock_SHIFT, 4, 0), 438 - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVer_SHIFT, 4, 0), 439 - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPs_SHIFT, 4, 0), 440 - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPs_SHIFT, 4, 0), 441 - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPs_SHIFT, 4, 0), 437 + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_DoubleLock_SHIFT, 4, 0), 438 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_PMSVer_SHIFT, 4, 0), 439 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_CTX_CMPs_SHIFT, 4, 0), 440 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_WRPs_SHIFT, 4, 0), 441 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_BRPs_SHIFT, 4, 0), 442 442 /* 443 443 * We can instantiate multiple PMU instances with different levels 444 444 * of support. 445 445 */ 446 - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVer_SHIFT, 4, 0), 447 - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DebugVer_SHIFT, 4, 0x6), 446 + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_EL1_PMUVer_SHIFT, 4, 0), 447 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_EL1_DebugVer_SHIFT, 4, 0x6), 448 448 ARM64_FTR_END, 449 449 }; 450 450
+1 -1
arch/arm64/kernel/debug-monitors.c
··· 28 28 u8 debug_monitors_arch(void) 29 29 { 30 30 return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1), 31 - ID_AA64DFR0_DebugVer_SHIFT); 31 + ID_AA64DFR0_EL1_DebugVer_SHIFT); 32 32 } 33 33 34 34 /*
+4 -4
arch/arm64/kernel/perf_event.c
··· 390 390 */ 391 391 static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu) 392 392 { 393 - return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVer_8_5); 393 + return (cpu_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_8_5); 394 394 } 395 395 396 396 static inline bool armv8pmu_event_has_user_read(struct perf_event *event) ··· 1145 1145 1146 1146 dfr0 = read_sysreg(id_aa64dfr0_el1); 1147 1147 pmuver = cpuid_feature_extract_unsigned_field(dfr0, 1148 - ID_AA64DFR0_PMUVer_SHIFT); 1149 - if (pmuver == ID_AA64DFR0_PMUVer_IMP_DEF || pmuver == 0) 1148 + ID_AA64DFR0_EL1_PMUVer_SHIFT); 1149 + if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF || pmuver == 0) 1150 1150 return; 1151 1151 1152 1152 cpu_pmu->pmuver = pmuver; ··· 1172 1172 pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); 1173 1173 1174 1174 /* store PMMIR_EL1 register for sysfs */ 1175 - if (pmuver >= ID_AA64DFR0_PMUVer_8_4 && (pmceid_raw[1] & BIT(31))) 1175 + if (pmuver >= ID_AA64DFR0_EL1_PMUVer_8_4 && (pmceid_raw[1] & BIT(31))) 1176 1176 cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1); 1177 1177 else 1178 1178 cpu_pmu->reg_pmmir = 0;
+2 -2
arch/arm64/kvm/debug.c
··· 295 295 * If SPE is present on this CPU and is available at current EL, 296 296 * we may need to check if the host state needs to be saved. 297 297 */ 298 - if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVer_SHIFT) && 298 + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) && 299 299 !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT))) 300 300 vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE); 301 301 302 302 /* Check if we have TRBE implemented and available at the host */ 303 - if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TraceBuffer_SHIFT) && 303 + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) && 304 304 !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG)) 305 305 vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE); 306 306 }
+6 -6
arch/arm64/kvm/hyp/nvhe/pkvm.c
··· 86 86 u64 cptr_set = 0; 87 87 88 88 /* Trap/constrain PMU */ 89 - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVer), feature_ids)) { 89 + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) { 90 90 mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR; 91 91 mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME | 92 92 MDCR_EL2_HPMN_MASK; 93 93 } 94 94 95 95 /* Trap Debug */ 96 - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer), feature_ids)) 96 + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids)) 97 97 mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE; 98 98 99 99 /* Trap OS Double Lock */ 100 - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DoubleLock), feature_ids)) 100 + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids)) 101 101 mdcr_set |= MDCR_EL2_TDOSA; 102 102 103 103 /* Trap SPE */ 104 - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVer), feature_ids)) { 104 + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) { 105 105 mdcr_set |= MDCR_EL2_TPMS; 106 106 mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; 107 107 } 108 108 109 109 /* Trap Trace Filter */ 110 - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TraceFilt), feature_ids)) 110 + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids)) 111 111 mdcr_set |= MDCR_EL2_TTRF; 112 112 113 113 /* Trap Trace */ 114 - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TraceVer), feature_ids)) 114 + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) 115 115 cptr_set |= CPTR_EL2_TTA; 116 116 117 117 vcpu->arch.mdcr_el2 |= mdcr_set;
+8 -8
arch/arm64/kvm/pmu-emul.c
··· 33 33 pmuver = kvm->arch.arm_pmu->pmuver; 34 34 35 35 switch (pmuver) { 36 - case ID_AA64DFR0_PMUVer_8_0: 36 + case ID_AA64DFR0_EL1_PMUVer_8_0: 37 37 return GENMASK(9, 0); 38 - case ID_AA64DFR0_PMUVer_8_1: 39 - case ID_AA64DFR0_PMUVer_8_4: 40 - case ID_AA64DFR0_PMUVer_8_5: 41 - case ID_AA64DFR0_PMUVer_8_7: 38 + case ID_AA64DFR0_EL1_PMUVer_8_1: 39 + case ID_AA64DFR0_EL1_PMUVer_8_4: 40 + case ID_AA64DFR0_EL1_PMUVer_8_5: 41 + case ID_AA64DFR0_EL1_PMUVer_8_7: 42 42 return GENMASK(15, 0); 43 43 default: /* Shouldn't be here, just for sanity */ 44 44 WARN_ONCE(1, "Unknown PMU version %d\n", pmuver); ··· 774 774 { 775 775 struct arm_pmu_entry *entry; 776 776 777 - if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVer_IMP_DEF) 777 + if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) 778 778 return; 779 779 780 780 mutex_lock(&arm_pmus_lock); ··· 828 828 if (event->pmu) { 829 829 pmu = to_arm_pmu(event->pmu); 830 830 if (pmu->pmuver == 0 || 831 - pmu->pmuver == ID_AA64DFR0_PMUVer_IMP_DEF) 831 + pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) 832 832 pmu = NULL; 833 833 } 834 834 ··· 856 856 * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled 857 857 * as RAZ 858 858 */ 859 - if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_PMUVer_8_4) 859 + if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_8_4) 860 860 val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); 861 861 base = 32; 862 862 }
+8 -8
arch/arm64/kvm/sys_regs.c
··· 1110 1110 break; 1111 1111 case SYS_ID_AA64DFR0_EL1: 1112 1112 /* Limit debug to ARMv8.0 */ 1113 - val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer); 1114 - val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DebugVer), 6); 1113 + val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer); 1114 + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6); 1115 1115 /* Limit guests to PMUv3 for ARMv8.4 */ 1116 1116 val = cpuid_feature_cap_perfmon_field(val, 1117 - ID_AA64DFR0_PMUVer_SHIFT, 1118 - kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVer_8_4 : 0); 1117 + ID_AA64DFR0_EL1_PMUVer_SHIFT, 1118 + kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_EL1_PMUVer_8_4 : 0); 1119 1119 /* Hide SPE from guests */ 1120 - val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVer); 1120 + val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer); 1121 1121 break; 1122 1122 case SYS_ID_DFR0_EL1: 1123 1123 /* Limit guests to PMUv3 for ARMv8.4 */ ··· 1827 1827 u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); 1828 1828 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT); 1829 1829 1830 - p->regval = ((((dfr >> ID_AA64DFR0_WRPs_SHIFT) & 0xf) << 28) | 1831 - (((dfr >> ID_AA64DFR0_BRPs_SHIFT) & 0xf) << 24) | 1832 - (((dfr >> ID_AA64DFR0_CTX_CMPs_SHIFT) & 0xf) << 20) 1830 + p->regval = ((((dfr >> ID_AA64DFR0_EL1_WRPs_SHIFT) & 0xf) << 28) | 1831 + (((dfr >> ID_AA64DFR0_EL1_BRPs_SHIFT) & 0xf) << 24) | 1832 + (((dfr >> ID_AA64DFR0_EL1_CTX_CMPs_SHIFT) & 0xf) << 20) 1833 1833 | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12)); 1834 1834 return true; 1835 1835 }