Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 CPU updates from Ingo Molnar:
"The changes in this development cycle were:

- AMD CPU topology enhancements that are cleanups on current CPUs but
which enable future Fam17 hardware. (Yazen Ghannam)

- unify bugs.c and bugs_64.c (Borislav Petkov)

- remove the show_msr= boot option (Borislav Petkov)

- simplify a boot message (Borislav Petkov)"

* 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/cpu/AMD: Clean up cpu_llc_id assignment per topology feature
x86/cpu: Get rid of the show_msr= boot option
x86/cpu: Merge bugs.c and bugs_64.c
x86/cpu: Remove the printk format specifier in "CPU0: "

+43 -112
-6
Documentation/kernel-parameters.txt
··· 3826 3826 shapers= [NET] 3827 3827 Maximal number of shapers. 3828 3828 3829 - show_msr= [x86] show boot-time MSR settings 3830 - Format: { <integer> } 3831 - Show boot-time (BIOS-initialized) MSR settings. 3832 - The parameter means the number of CPUs to show, 3833 - for example 1 means boot CPU only. 3834 - 3835 3829 simeth= [IA-64] 3836 3830 simscsi= 3837 3831
+1 -3
arch/x86/kernel/cpu/Makefile
··· 20 20 obj-y += common.o 21 21 obj-y += rdrand.o 22 22 obj-y += match.o 23 + obj-y += bugs.o 23 24 24 25 obj-$(CONFIG_PROC_FS) += proc.o 25 26 obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o 26 - 27 - obj-$(CONFIG_X86_32) += bugs.o 28 - obj-$(CONFIG_X86_64) += bugs_64.o 29 27 30 28 obj-$(CONFIG_CPU_SUP_INTEL) += intel.o 31 29 obj-$(CONFIG_CPU_SUP_AMD) += amd.o
+19 -12
arch/x86/kernel/cpu/amd.c
··· 314 314 smp_num_siblings = ((ebx >> 8) & 3) + 1; 315 315 c->x86_max_cores /= smp_num_siblings; 316 316 c->cpu_core_id = ebx & 0xff; 317 + 318 + /* 319 + * We may have multiple LLCs if L3 caches exist, so check if we 320 + * have an L3 cache by looking at the L3 cache CPUID leaf. 321 + */ 322 + if (cpuid_edx(0x80000006)) { 323 + if (c->x86 == 0x17) { 324 + /* 325 + * LLC is at the core complex level. 326 + * Core complex id is ApicId[3]. 327 + */ 328 + per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; 329 + } else { 330 + /* LLC is at the node level. */ 331 + per_cpu(cpu_llc_id, cpu) = node_id; 332 + } 333 + } 317 334 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { 318 335 u64 value; 319 336 320 337 rdmsrl(MSR_FAM10H_NODE_ID, value); 321 338 node_id = value & 7; 339 + 340 + per_cpu(cpu_llc_id, cpu) = node_id; 322 341 } else 323 342 return; 324 343 ··· 347 328 348 329 set_cpu_cap(c, X86_FEATURE_AMD_DCM); 349 330 cus_per_node = c->x86_max_cores / nodes_per_socket; 350 - 351 - /* store NodeID, use llc_shared_map to store sibling info */ 352 - per_cpu(cpu_llc_id, cpu) = node_id; 353 331 354 332 /* core id has to be in the [0 .. cores_per_node - 1] range */ 355 333 c->cpu_core_id %= cus_per_node; ··· 372 356 /* use socket ID also for last level cache */ 373 357 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; 374 358 amd_get_topology(c); 375 - 376 - /* 377 - * Fix percpu cpu_llc_id here as LLC topology is different 378 - * for Fam17h systems. 379 - */ 380 - if (c->x86 != 0x17 || !cpuid_edx(0x80000006)) 381 - return; 382 - 383 - per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; 384 359 #endif 385 360 } 386 361
+22 -4
arch/x86/kernel/cpu/bugs.c
··· 16 16 #include <asm/msr.h> 17 17 #include <asm/paravirt.h> 18 18 #include <asm/alternative.h> 19 + #include <asm/pgtable.h> 20 + #include <asm/cacheflush.h> 19 21 20 22 void __init check_bugs(void) 21 23 { 22 24 identify_boot_cpu(); 23 - #ifndef CONFIG_SMP 24 - pr_info("CPU: "); 25 - print_cpu_info(&boot_cpu_data); 26 - #endif 27 25 26 + if (!IS_ENABLED(CONFIG_SMP)) { 27 + pr_info("CPU: "); 28 + print_cpu_info(&boot_cpu_data); 29 + } 30 + 31 + #ifdef CONFIG_X86_32 28 32 /* 29 33 * Check whether we are able to run this kernel safely on SMP. 30 34 * ··· 44 40 alternative_instructions(); 45 41 46 42 fpu__init_check_bugs(); 43 + #else /* CONFIG_X86_64 */ 44 + alternative_instructions(); 45 + 46 + /* 47 + * Make sure the first 2MB area is not mapped by huge pages 48 + * There are typically fixed size MTRRs in there and overlapping 49 + * MTRRs into large pages causes slow downs. 50 + * 51 + * Right now we don't do that with gbpages because there seems 52 + * very little benefit for that case. 53 + */ 54 + if (!direct_gbpages) 55 + set_memory_4k((unsigned long)__va(0), 1); 56 + #endif 47 57 }
-33
arch/x86/kernel/cpu/bugs_64.c
··· 1 - /* 2 - * Copyright (C) 1994 Linus Torvalds 3 - * Copyright (C) 2000 SuSE 4 - */ 5 - 6 - #include <linux/kernel.h> 7 - #include <linux/init.h> 8 - #include <asm/alternative.h> 9 - #include <asm/bugs.h> 10 - #include <asm/processor.h> 11 - #include <asm/mtrr.h> 12 - #include <asm/cacheflush.h> 13 - 14 - void __init check_bugs(void) 15 - { 16 - identify_boot_cpu(); 17 - #if !defined(CONFIG_SMP) 18 - pr_info("CPU: "); 19 - print_cpu_info(&boot_cpu_data); 20 - #endif 21 - alternative_instructions(); 22 - 23 - /* 24 - * Make sure the first 2MB area is not mapped by huge pages 25 - * There are typically fixed size MTRRs in there and overlapping 26 - * MTRRs into large pages causes slow downs. 27 - * 28 - * Right now we don't do that with gbpages because there seems 29 - * very little benefit for that case. 30 - */ 31 - if (!direct_gbpages) 32 - set_memory_4k((unsigned long)__va(0), 1); 33 - }
-53
arch/x86/kernel/cpu/common.c
··· 1190 1190 mtrr_ap_init(); 1191 1191 } 1192 1192 1193 - struct msr_range { 1194 - unsigned min; 1195 - unsigned max; 1196 - }; 1197 - 1198 - static const struct msr_range msr_range_array[] = { 1199 - { 0x00000000, 0x00000418}, 1200 - { 0xc0000000, 0xc000040b}, 1201 - { 0xc0010000, 0xc0010142}, 1202 - { 0xc0011000, 0xc001103b}, 1203 - }; 1204 - 1205 - static void __print_cpu_msr(void) 1206 - { 1207 - unsigned index_min, index_max; 1208 - unsigned index; 1209 - u64 val; 1210 - int i; 1211 - 1212 - for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { 1213 - index_min = msr_range_array[i].min; 1214 - index_max = msr_range_array[i].max; 1215 - 1216 - for (index = index_min; index < index_max; index++) { 1217 - if (rdmsrl_safe(index, &val)) 1218 - continue; 1219 - pr_info(" MSR%08x: %016llx\n", index, val); 1220 - } 1221 - } 1222 - } 1223 - 1224 - static int show_msr; 1225 - 1226 - static __init int setup_show_msr(char *arg) 1227 - { 1228 - int num; 1229 - 1230 - get_option(&arg, &num); 1231 - 1232 - if (num > 0) 1233 - show_msr = num; 1234 - return 1; 1235 - } 1236 - __setup("show_msr=", setup_show_msr); 1237 - 1238 1193 static __init int setup_noclflush(char *arg) 1239 1194 { 1240 1195 setup_clear_cpu_cap(X86_FEATURE_CLFLUSH); ··· 1223 1268 pr_cont(", stepping: 0x%x)\n", c->x86_mask); 1224 1269 else 1225 1270 pr_cont(")\n"); 1226 - 1227 - print_cpu_msr(c); 1228 - } 1229 - 1230 - void print_cpu_msr(struct cpuinfo_x86 *c) 1231 - { 1232 - if (c->cpu_index < show_msr) 1233 - __print_cpu_msr(); 1234 1271 } 1235 1272 1236 1273 static __init int setup_disablecpuid(char *arg)
+1 -1
arch/x86/kernel/smpboot.c
··· 1352 1352 default_setup_apic_routing(); 1353 1353 cpu0_logical_apicid = apic_bsp_setup(false); 1354 1354 1355 - pr_info("CPU%d: ", 0); 1355 + pr_info("CPU0: "); 1356 1356 print_cpu_info(&cpu_data(0)); 1357 1357 1358 1358 if (is_uv_system())