Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: make it easier to check the CPU part number correctly

Ensure that platform maintainers check the CPU part number in the right
manner: the CPU part number is meaningless without also checking the
CPU implement(e|o)r (choose your preferred spelling!) Provide an
interface which returns both the implementer and part number together,
and update the definitions to include the implementer.

Mark the old function as being deprecated... indeed, using the old
function with the definitions will now always evaluate as false, so
people must update their un-merged code to the new function. While
this could be avoided by adding new definitions, we'd also have to
create new names for them which would be awkward.

Acked-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

+66 -63
+24 -13
arch/arm/include/asm/cputype.h
··· 62 62 #define ARM_CPU_IMP_ARM 0x41 63 63 #define ARM_CPU_IMP_INTEL 0x69 64 64 65 - #define ARM_CPU_PART_ARM1136 0xB360 66 - #define ARM_CPU_PART_ARM1156 0xB560 67 - #define ARM_CPU_PART_ARM1176 0xB760 68 - #define ARM_CPU_PART_ARM11MPCORE 0xB020 69 - #define ARM_CPU_PART_CORTEX_A8 0xC080 70 - #define ARM_CPU_PART_CORTEX_A9 0xC090 71 - #define ARM_CPU_PART_CORTEX_A5 0xC050 72 - #define ARM_CPU_PART_CORTEX_A15 0xC0F0 73 - #define ARM_CPU_PART_CORTEX_A7 0xC070 74 - #define ARM_CPU_PART_CORTEX_A12 0xC0D0 75 - #define ARM_CPU_PART_CORTEX_A17 0xC0E0 65 + /* ARM implemented processors */ 66 + #define ARM_CPU_PART_ARM1136 0x4100b360 67 + #define ARM_CPU_PART_ARM1156 0x4100b560 68 + #define ARM_CPU_PART_ARM1176 0x4100b760 69 + #define ARM_CPU_PART_ARM11MPCORE 0x4100b020 70 + #define ARM_CPU_PART_CORTEX_A8 0x4100c080 71 + #define ARM_CPU_PART_CORTEX_A9 0x4100c090 72 + #define ARM_CPU_PART_CORTEX_A5 0x4100c050 73 + #define ARM_CPU_PART_CORTEX_A7 0x4100c070 74 + #define ARM_CPU_PART_CORTEX_A12 0x4100c0d0 75 + #define ARM_CPU_PART_CORTEX_A17 0x4100c0e0 76 + #define ARM_CPU_PART_CORTEX_A15 0x4100c0f0 76 77 77 78 #define ARM_CPU_XSCALE_ARCH_MASK 0xe000 78 79 #define ARM_CPU_XSCALE_ARCH_V1 0x2000 ··· 172 171 return (read_cpuid_id() & 0xFF000000) >> 24; 173 172 } 174 173 175 - static inline unsigned int __attribute_const__ read_cpuid_part_number(void) 174 + /* 175 + * The CPU part number is meaningless without referring to the CPU 176 + * implementer: implementers are free to define their own part numbers 177 + * which are permitted to clash with other implementer part numbers. 178 + */ 179 + static inline unsigned int __attribute_const__ read_cpuid_part(void) 180 + { 181 + return read_cpuid_id() & 0xff00fff0; 182 + } 183 + 184 + static inline unsigned int __attribute_const__ __deprecated read_cpuid_part_number(void) 176 185 { 177 186 return read_cpuid_id() & 0xFFF0; 178 187 } 179 188 180 189 static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void) 181 190 { 182 - return read_cpuid_part_number() & ARM_CPU_XSCALE_ARCH_MASK; 191 + return read_cpuid_id() & ARM_CPU_XSCALE_ARCH_MASK; 183 192 } 184 193 185 194 static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
+1 -1
arch/arm/include/asm/smp_scu.h
··· 11 11 12 12 static inline bool scu_a9_has_base(void) 13 13 { 14 - return read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9; 14 + return read_cpuid_part() == ARM_CPU_PART_CORTEX_A9; 15 15 } 16 16 17 17 static inline unsigned long scu_a9_get_base(void)
+27 -28
arch/arm/kernel/perf_event_cpu.c
··· 250 250 static int probe_current_pmu(struct arm_pmu *pmu) 251 251 { 252 252 int cpu = get_cpu(); 253 - unsigned long implementor = read_cpuid_implementor(); 254 - unsigned long part_number = read_cpuid_part_number(); 255 253 int ret = -ENODEV; 256 254 257 255 pr_info("probing PMU on CPU %d\n", cpu); 258 256 257 + switch (read_cpuid_part()) { 259 258 /* ARM Ltd CPUs. */ 260 - if (implementor == ARM_CPU_IMP_ARM) { 261 - switch (part_number) { 262 - case ARM_CPU_PART_ARM1136: 263 - case ARM_CPU_PART_ARM1156: 264 - case ARM_CPU_PART_ARM1176: 265 - ret = armv6pmu_init(pmu); 266 - break; 267 - case ARM_CPU_PART_ARM11MPCORE: 268 - ret = armv6mpcore_pmu_init(pmu); 269 - break; 270 - case ARM_CPU_PART_CORTEX_A8: 271 - ret = armv7_a8_pmu_init(pmu); 272 - break; 273 - case ARM_CPU_PART_CORTEX_A9: 274 - ret = armv7_a9_pmu_init(pmu); 275 - break; 259 + case ARM_CPU_PART_ARM1136: 260 + case ARM_CPU_PART_ARM1156: 261 + case ARM_CPU_PART_ARM1176: 262 + ret = armv6pmu_init(pmu); 263 + break; 264 + case ARM_CPU_PART_ARM11MPCORE: 265 + ret = armv6mpcore_pmu_init(pmu); 266 + break; 267 + case ARM_CPU_PART_CORTEX_A8: 268 + ret = armv7_a8_pmu_init(pmu); 269 + break; 270 + case ARM_CPU_PART_CORTEX_A9: 271 + ret = armv7_a9_pmu_init(pmu); 272 + break; 273 + 274 + default: 275 + if (read_cpuid_implementor() == ARM_CPU_IMP_INTEL) { 276 + switch (xscale_cpu_arch_version()) { 277 + case ARM_CPU_XSCALE_ARCH_V1: 278 + ret = xscale1pmu_init(pmu); 279 + break; 280 + case ARM_CPU_XSCALE_ARCH_V2: 281 + ret = xscale2pmu_init(pmu); 282 + break; 283 + } 276 284 } 277 - /* Intel CPUs [xscale]. */ 278 - } else if (implementor == ARM_CPU_IMP_INTEL) { 279 - switch (xscale_cpu_arch_version()) { 280 - case ARM_CPU_XSCALE_ARCH_V1: 281 - ret = xscale1pmu_init(pmu); 282 - break; 283 - case ARM_CPU_XSCALE_ARCH_V2: 284 - ret = xscale2pmu_init(pmu); 285 - break; 286 - } 285 + break; 287 286 } 288 287 289 288 put_cpu();
+1 -7
arch/arm/kvm/guest.c
··· 274 274 275 275 int __attribute_const__ kvm_target_cpu(void) 276 276 { 277 - unsigned long implementor = read_cpuid_implementor(); 278 - unsigned long part_number = read_cpuid_part_number(); 279 - 280 - if (implementor != ARM_CPU_IMP_ARM) 281 - return -EINVAL; 282 - 283 - switch (part_number) { 277 + switch (read_cpuid_part()) { 284 278 case ARM_CPU_PART_CORTEX_A7: 285 279 return KVM_ARM_TARGET_CORTEX_A7; 286 280 case ARM_CPU_PART_CORTEX_A15:
+2 -2
arch/arm/mach-exynos/mcpm-exynos.c
··· 196 196 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { 197 197 arch_spin_unlock(&exynos_mcpm_lock); 198 198 199 - if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) { 199 + if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) { 200 200 /* 201 201 * On the Cortex-A15 we need to disable 202 202 * L2 prefetching before flushing the cache. ··· 291 291 292 292 static void __init exynos_cache_off(void) 293 293 { 294 - if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) { 294 + if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) { 295 295 /* disable L2 prefetching on the Cortex-A15 */ 296 296 asm volatile( 297 297 "mcr p15, 1, %0, c15, c0, 3\n\t"
+2 -2
arch/arm/mach-exynos/platsmp.c
··· 188 188 void __iomem *scu_base = scu_base_addr(); 189 189 unsigned int i, ncores; 190 190 191 - if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) 191 + if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) 192 192 ncores = scu_base ? scu_get_core_count(scu_base) : 1; 193 193 else 194 194 /* ··· 214 214 215 215 exynos_sysram_init(); 216 216 217 - if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) 217 + if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) 218 218 scu_enable(scu_base_addr()); 219 219 220 220 /*
+5 -6
arch/arm/mach-exynos/pm.c
··· 300 300 tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0); 301 301 __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION); 302 302 303 - if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) 303 + if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) 304 304 exynos_cpu_save_register(); 305 305 306 306 return 0; ··· 334 334 if (exynos_pm_central_resume()) 335 335 goto early_wakeup; 336 336 337 - if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) 337 + if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) 338 338 exynos_cpu_restore_register(); 339 339 340 340 /* For release retention */ ··· 353 353 354 354 s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save)); 355 355 356 - if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) 356 + if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) 357 357 scu_enable(S5P_VA_SCU); 358 358 359 359 early_wakeup: ··· 440 440 case CPU_PM_ENTER: 441 441 if (cpu == 0) { 442 442 exynos_pm_central_suspend(); 443 - if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) 443 + if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) 444 444 exynos_cpu_save_register(); 445 445 } 446 446 break; 447 447 448 448 case CPU_PM_EXIT: 449 449 if (cpu == 0) { 450 - if (read_cpuid_part_number() == 451 - ARM_CPU_PART_CORTEX_A9) { 450 + if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) { 452 451 scu_enable(S5P_VA_SCU); 453 452 exynos_cpu_restore_register(); 454 453 }
+2 -2
arch/arm/mach-vexpress/tc2_pm.c
··· 152 152 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { 153 153 arch_spin_unlock(&tc2_pm_lock); 154 154 155 - if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) { 155 + if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) { 156 156 /* 157 157 * On the Cortex-A15 we need to disable 158 158 * L2 prefetching before flushing the cache. ··· 326 326 static void __init tc2_cache_off(void) 327 327 { 328 328 pr_info("TC2: disabling cache during MCPM loopback test\n"); 329 - if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) { 329 + if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) { 330 330 /* disable L2 prefetching on the Cortex-A15 */ 331 331 asm volatile( 332 332 "mcr p15, 1, %0, c15, c0, 3 \n\t"
+1 -1
arch/arm/mm/cache-l2x0.c
··· 665 665 static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock) 666 666 { 667 667 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK; 668 - bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9; 668 + bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9; 669 669 670 670 if (rev >= L310_CACHE_ID_RTL_R2P0) { 671 671 if (cortex_a9) {
+1 -1
drivers/clocksource/arm_global_timer.c
··· 250 250 * fire when the timer value is greater than or equal to. In previous 251 251 * revisions the comparators fired when the timer value was equal to. 252 252 */ 253 - if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9 253 + if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9 254 254 && (read_cpuid_id() & 0xf0000f) < 0x200000) { 255 255 pr_warn("global-timer: non support for this cpu version.\n"); 256 256 return;