Merge tag 'x86-urgent-2024-06-02' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
"Miscellaneous topology parsing fixes:

- Fix topology parsing regression on older CPUs in the new AMD/Hygon
parser

- Fix boot crash on odd Intel Quark and similar CPUs that do not fill
out cpuinfo_x86::x86_clflush_size and zero out
cpuinfo_x86::x86_cache_alignment as a result.

Provide 32 bytes as a general fallback value.

- Fix topology enumeration on certain rare CPUs where the BIOS locks
certain CPUID leaves and the kernel unlocked them late, which broke
with the new topology parsing code. Factor out this unlocking logic
and move it earlier in the parsing sequence"

* tag 'x86-urgent-2024-06-02' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/topology/intel: Unlock CPUID before evaluating anything
x86/cpu: Provide default cache line size if not enumerated
x86/topology/amd: Evaluate SMT in CPUID leaf 0x8000001e only on family 0x17 and greater

Changed files
+26 -12
arch
+6 -1
arch/x86/kernel/cpu/common.c
··· 1075 1075 1076 1076 c->x86_virt_bits = (eax >> 8) & 0xff; 1077 1077 c->x86_phys_bits = eax & 0xff; 1078 + 1079 + /* Provide a sane default if not enumerated: */ 1080 + if (!c->x86_clflush_size) 1081 + c->x86_clflush_size = 32; 1078 1082 } 1079 1083 1080 1084 c->x86_cache_bits = c->x86_phys_bits; ··· 1589 1585 if (have_cpuid_p()) { 1590 1586 cpu_detect(c); 1591 1587 get_cpu_vendor(c); 1588 + intel_unlock_cpuid_leafs(c); 1592 1589 get_cpu_cap(c); 1593 1590 setup_force_cpu_cap(X86_FEATURE_CPUID); 1594 1591 get_cpu_address_sizes(c); ··· 1749 1744 cpu_detect(c); 1750 1745 1751 1746 get_cpu_vendor(c); 1752 - 1747 + intel_unlock_cpuid_leafs(c); 1753 1748 get_cpu_cap(c); 1754 1749 1755 1750 get_cpu_address_sizes(c);
+2
arch/x86/kernel/cpu/cpu.h
··· 61 61 62 62 extern void __init tsx_init(void); 63 63 void tsx_ap_init(void); 64 + void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c); 64 65 #else 65 66 static inline void tsx_init(void) { } 66 67 static inline void tsx_ap_init(void) { } 68 + static inline void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c) { } 67 69 #endif /* CONFIG_CPU_SUP_INTEL */ 68 70 69 71 extern void init_spectral_chicken(struct cpuinfo_x86 *c);
+16 -9
arch/x86/kernel/cpu/intel.c
··· 269 269 c->x86_phys_bits -= keyid_bits; 270 270 } 271 271 272 + void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c) 273 + { 274 + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 275 + return; 276 + 277 + if (c->x86 < 6 || (c->x86 == 6 && c->x86_model < 0xd)) 278 + return; 279 + 280 + /* 281 + * The BIOS can have limited CPUID to leaf 2, which breaks feature 282 + * enumeration. Unlock it and update the maximum leaf info. 283 + */ 284 + if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) 285 + c->cpuid_level = cpuid_eax(0); 286 + } 287 + 272 288 static void early_init_intel(struct cpuinfo_x86 *c) 273 289 { 274 290 u64 misc_enable; 275 - 276 - /* Unmask CPUID levels if masked: */ 277 - if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { 278 - if (msr_clear_bit(MSR_IA32_MISC_ENABLE, 279 - MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) { 280 - c->cpuid_level = cpuid_eax(0); 281 - get_cpu_cap(c); 282 - } 283 - } 284 291 285 292 if ((c->x86 == 0xf && c->x86_model >= 0x03) || 286 293 (c->x86 == 0x6 && c->x86_model >= 0x0e))
+2 -2
arch/x86/kernel/cpu/topology_amd.c
··· 84 84 85 85 /* 86 86 * If leaf 0xb is available, then the domain shifts are set 87 - * already and nothing to do here. 87 + * already and nothing to do here. Only valid for family >= 0x17. 88 88 */ 89 - if (!has_topoext) { 89 + if (!has_topoext && tscan->c->x86 >= 0x17) { 90 90 /* 91 91 * Leaf 0x80000008 set the CORE domain shift already. 92 92 * Update the SMT domain, but do not propagate it.