Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: cpuinfo: Split AArch32 registers out into a separate struct

In preparation for late initialisation of the "sanitised" AArch32 register
state, move the AArch32 registers out of 'struct cpuinfo' and into their
own struct definition.

Acked-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20210608180313.11502-2-will@kernel.org
Signed-off-by: Will Deacon <will@kernel.org>

+92 -81
+25 -21
arch/arm64/include/asm/cpu.h
··· 12 12 /* 13 13 * Records attributes of an individual CPU. 14 14 */ 15 - struct cpuinfo_arm64 { 16 - struct cpu cpu; 17 - struct kobject kobj; 18 - u64 reg_ctr; 19 - u64 reg_cntfrq; 20 - u64 reg_dczid; 21 - u64 reg_midr; 22 - u64 reg_revidr; 23 - u64 reg_gmid; 24 - 25 - u64 reg_id_aa64dfr0; 26 - u64 reg_id_aa64dfr1; 27 - u64 reg_id_aa64isar0; 28 - u64 reg_id_aa64isar1; 29 - u64 reg_id_aa64mmfr0; 30 - u64 reg_id_aa64mmfr1; 31 - u64 reg_id_aa64mmfr2; 32 - u64 reg_id_aa64pfr0; 33 - u64 reg_id_aa64pfr1; 34 - u64 reg_id_aa64zfr0; 35 - 15 + struct cpuinfo_32bit { 36 16 u32 reg_id_dfr0; 37 17 u32 reg_id_dfr1; 38 18 u32 reg_id_isar0; ··· 35 55 u32 reg_mvfr0; 36 56 u32 reg_mvfr1; 37 57 u32 reg_mvfr2; 58 + }; 59 + 60 + struct cpuinfo_arm64 { 61 + struct cpu cpu; 62 + struct kobject kobj; 63 + u64 reg_ctr; 64 + u64 reg_cntfrq; 65 + u64 reg_dczid; 66 + u64 reg_midr; 67 + u64 reg_revidr; 68 + u64 reg_gmid; 69 + 70 + u64 reg_id_aa64dfr0; 71 + u64 reg_id_aa64dfr1; 72 + u64 reg_id_aa64isar0; 73 + u64 reg_id_aa64isar1; 74 + u64 reg_id_aa64mmfr0; 75 + u64 reg_id_aa64mmfr1; 76 + u64 reg_id_aa64mmfr2; 77 + u64 reg_id_aa64pfr0; 78 + u64 reg_id_aa64pfr1; 79 + u64 reg_id_aa64zfr0; 80 + 81 + struct cpuinfo_32bit aarch32; 38 82 39 83 /* pseudo-ZCR for recording maximum ZCR_EL1 LEN value: */ 40 84 u64 reg_zcr;
+39 -35
arch/arm64/kernel/cpufeature.c
··· 871 871 872 872 static void __init setup_boot_cpu_capabilities(void); 873 873 874 + static void __init init_32bit_cpu_features(struct cpuinfo_32bit *info) 875 + { 876 + init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0); 877 + init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1); 878 + init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0); 879 + init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1); 880 + init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2); 881 + init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3); 882 + init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4); 883 + init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5); 884 + init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6); 885 + init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0); 886 + init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1); 887 + init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2); 888 + init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3); 889 + init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4); 890 + init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5); 891 + init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0); 892 + init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1); 893 + init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2); 894 + init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0); 895 + init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1); 896 + init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2); 897 + } 898 + 874 899 void __init init_cpu_features(struct cpuinfo_arm64 *info) 875 900 { 876 901 /* Before we start using the tables, make sure it is sorted */ ··· 915 890 init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1); 916 891 init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0); 917 892 918 - if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { 919 - init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0); 920 - init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1); 921 - init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0); 922 - init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1); 923 - init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2); 924 - init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3); 925 - init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4); 926 - init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5); 927 - init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6); 928 - init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0); 929 - init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1); 930 - init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2); 931 - init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3); 932 - init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4); 933 - init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5); 934 - init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0); 935 - init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1); 936 - init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2); 937 - init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0); 938 - init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1); 939 - init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2); 940 - } 893 + if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) 894 + init_32bit_cpu_features(&info->aarch32); 941 895 942 896 if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) { 943 897 init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr); ··· 990 986 WARN_ON(!ftrp->width); 991 987 } 992 988 993 - static int update_32bit_cpu_features(int cpu, struct cpuinfo_arm64 *info, 994 - struct cpuinfo_arm64 *boot) 989 + static int update_32bit_cpu_features(int cpu, struct cpuinfo_32bit *info, 990 + struct cpuinfo_32bit *boot) 995 991 { 996 992 int taint = 0; 997 993 u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); 998 - 999 - /* 1000 - * If we don't have AArch32 at all then skip the checks entirely 1001 - * as the register values may be UNKNOWN and we're not going to be 1002 - * using them for anything. 1003 - */ 1004 - if (!id_aa64pfr0_32bit_el0(pfr0)) 1005 - return taint; 1006 994 1007 995 /* 1008 996 * If we don't have AArch32 at EL1, then relax the strictness of ··· 1147 1151 * value is the same on all CPUs. 1148 1152 */ 1149 1153 if (IS_ENABLED(CONFIG_ARM64_MTE) && 1150 - id_aa64pfr1_mte(info->reg_id_aa64pfr1)) 1154 + id_aa64pfr1_mte(info->reg_id_aa64pfr1)) { 1151 1155 taint |= check_update_ftr_reg(SYS_GMID_EL1, cpu, 1152 1156 info->reg_gmid, boot->reg_gmid); 1157 + } 1153 1158 1154 1159 /* 1160 + * If we don't have AArch32 at all then skip the checks entirely 1161 + * as the register values may be UNKNOWN and we're not going to be 1162 + * using them for anything. 1163 + * 1155 1164 * This relies on a sanitised view of the AArch64 ID registers 1156 1165 * (e.g. SYS_ID_AA64PFR0_EL1), so we call it last. 1157 1166 */ 1158 - taint |= update_32bit_cpu_features(cpu, info, boot); 1167 + if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { 1168 + taint |= update_32bit_cpu_features(cpu, &info->aarch32, 1169 + &boot->aarch32); 1170 + } 1159 1171 1160 1172 /* 1161 1173 * Mismatched CPU features are a recipe for disaster. Don't even
+28 -25
arch/arm64/kernel/cpuinfo.c
··· 344 344 pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu); 345 345 } 346 346 347 + static void __cpuinfo_store_cpu_32bit(struct cpuinfo_32bit *info) 348 + { 349 + info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1); 350 + info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1); 351 + info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1); 352 + info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1); 353 + info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1); 354 + info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1); 355 + info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1); 356 + info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1); 357 + info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1); 358 + info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1); 359 + info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1); 360 + info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1); 361 + info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1); 362 + info->reg_id_mmfr4 = read_cpuid(ID_MMFR4_EL1); 363 + info->reg_id_mmfr5 = read_cpuid(ID_MMFR5_EL1); 364 + info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1); 365 + info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); 366 + info->reg_id_pfr2 = read_cpuid(ID_PFR2_EL1); 367 + 368 + info->reg_mvfr0 = read_cpuid(MVFR0_EL1); 369 + info->reg_mvfr1 = read_cpuid(MVFR1_EL1); 370 + info->reg_mvfr2 = read_cpuid(MVFR2_EL1); 371 + } 372 + 347 373 static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) 348 374 { 349 375 info->reg_cntfrq = arch_timer_get_cntfrq(); ··· 400 374 if (id_aa64pfr1_mte(info->reg_id_aa64pfr1)) 401 375 info->reg_gmid = read_cpuid(GMID_EL1); 402 376 403 - /* Update the 32bit ID registers only if AArch32 is implemented */ 404 - if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { 405 - info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1); 406 - info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1); 407 - info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1); 408 - info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1); 409 - info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1); 410 - info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1); 411 - info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1); 412 - info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1); 413 - info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1); 414 - info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1); 415 - info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1); 416 - info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1); 417 - info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1); 418 - info->reg_id_mmfr4 = read_cpuid(ID_MMFR4_EL1); 419 - info->reg_id_mmfr5 = read_cpuid(ID_MMFR5_EL1); 420 - info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1); 421 - info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); 422 - info->reg_id_pfr2 = read_cpuid(ID_PFR2_EL1); 423 - 424 - info->reg_mvfr0 = read_cpuid(MVFR0_EL1); 425 - info->reg_mvfr1 = read_cpuid(MVFR1_EL1); 426 - info->reg_mvfr2 = read_cpuid(MVFR2_EL1); 427 - } 377 + if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) 378 + __cpuinfo_store_cpu_32bit(&info->aarch32); 428 379 429 380 if (IS_ENABLED(CONFIG_ARM64_SVE) && 430 381 id_aa64pfr0_sve(info->reg_id_aa64pfr0))