x86/microcode: Rework early revisions reporting

The AMD side of the loader issues the microcode revision for each
logical thread on the system, which can become really noisy on huge
machines. And doing that doesn't make a whole lot of sense - the
microcode revision is already in /proc/cpuinfo.

So in case one is interested in the theoretical support of mixed silicon
steppings on AMD, one can check there.

What is also missing on the AMD side - something which people have
requested before - is showing the microcode revision the CPU had
*before* the early update.

So abstract that up in the main code and have the BSP on each vendor
provide those revision numbers.

Then, dump them only once on driver init.

On Intel, do not dump the patch date - it is not needed.

Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/CAHk-=wg=%2B8rceshMkB4VnKxmRccVLtBLPBawnewZuuqyx5U=3A@mail.gmail.com

Changed files
+37 -44
arch
x86
kernel
cpu
+11 -28
arch/x86/kernel/cpu/microcode/amd.c
··· 104 104 size_t size; 105 105 }; 106 106 107 - static u32 ucode_new_rev; 108 - 109 107 /* 110 108 * Microcode patch container file is prepended to the initrd in cpio 111 109 * format. See Documentation/arch/x86/microcode.rst ··· 440 442 * 441 443 * Returns true if container found (sets @desc), false otherwise. 442 444 */ 443 - static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size) 445 + static bool early_apply_microcode(u32 cpuid_1_eax, u32 old_rev, void *ucode, size_t size) 444 446 { 445 447 struct cont_desc desc = { 0 }; 446 448 struct microcode_amd *mc; 447 449 bool ret = false; 448 - u32 rev, dummy; 449 450 450 451 desc.cpuid_1_eax = cpuid_1_eax; 451 452 ··· 454 457 if (!mc) 455 458 return ret; 456 459 457 - native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); 458 - 459 460 /* 460 461 * Allow application of the same revision to pick up SMT-specific 461 462 * changes even if the revision of the other SMT thread is already 462 463 * up-to-date. 463 464 */ 464 - if (rev > mc->hdr.patch_id) 465 + if (old_rev > mc->hdr.patch_id) 465 466 return ret; 466 467 467 - if (!__apply_microcode_amd(mc)) { 468 - ucode_new_rev = mc->hdr.patch_id; 469 - ret = true; 470 - } 471 - 472 - return ret; 468 + return !__apply_microcode_amd(mc); 473 469 } 474 470 475 471 static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) ··· 496 506 *ret = cp; 497 507 } 498 508 499 - void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax) 509 + void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax) 500 510 { 501 511 struct cpio_data cp = { }; 512 + u32 dummy; 513 + 514 + native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->old_rev, dummy); 502 515 503 516 /* Needed in load_microcode_amd() */ 504 517 ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax; ··· 510 517 if (!(cp.data && cp.size)) 511 518 return; 512 519 513 - early_apply_microcode(cpuid_1_eax, cp.data, cp.size); 520 + if (early_apply_microcode(cpuid_1_eax, ed->old_rev, cp.data, cp.size)) 521 + native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy); 514 522 } 515 523 516 524 static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); ··· 619 625 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); 620 626 621 627 if (rev < mc->hdr.patch_id) { 622 - if (!__apply_microcode_amd(mc)) { 623 - ucode_new_rev = mc->hdr.patch_id; 624 - pr_info("reload patch_level=0x%08x\n", ucode_new_rev); 625 - } 628 + if (!__apply_microcode_amd(mc)) 629 + pr_info_once("reload revision: 0x%08x\n", mc->hdr.patch_id); 626 630 } 627 631 } 628 632 ··· 640 648 p = find_patch(cpu); 641 649 if (p && (p->patch_id == csig->rev)) 642 650 uci->mc = p->data; 643 - 644 - pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev); 645 651 646 652 return 0; 647 653 } ··· 680 690 681 691 rev = mc_amd->hdr.patch_id; 682 692 ret = UCODE_UPDATED; 683 - 684 - pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev); 685 693 686 694 out: 687 695 uci->cpu_sig.rev = rev; ··· 923 935 pr_warn("AMD CPU family 0x%x not supported\n", c->x86); 924 936 return NULL; 925 937 } 926 - 927 - if (ucode_new_rev) 928 - pr_info_once("microcode updated early to new patch_level=0x%08x\n", 929 - ucode_new_rev); 930 - 931 938 return &microcode_amd_ops; 932 939 } 933 940
+9 -2
arch/x86/kernel/cpu/microcode/core.c
··· 75 75 0, /* T-101 terminator */ 76 76 }; 77 77 78 + struct early_load_data early_data; 79 + 78 80 /* 79 81 * Check the current patch level on this CPU. 80 82 * ··· 155 153 return; 156 154 157 155 if (intel) 158 - load_ucode_intel_bsp(); 156 + load_ucode_intel_bsp(&early_data); 159 157 else 160 - load_ucode_amd_bsp(cpuid_1_eax); 158 + load_ucode_amd_bsp(&early_data, cpuid_1_eax); 161 159 } 162 160 163 161 void load_ucode_ap(void) ··· 827 825 828 826 if (!microcode_ops) 829 827 return -ENODEV; 828 + 829 + pr_info_once("Current revision: 0x%08x\n", (early_data.new_rev ?: early_data.old_rev)); 830 + 831 + if (early_data.new_rev) 832 + pr_info_once("Updated early from: 0x%08x\n", early_data.old_rev); 830 833 831 834 microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0); 832 835 if (IS_ERR(microcode_pdev))
+7 -10
arch/x86/kernel/cpu/microcode/intel.c
··· 339 339 static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci) 340 340 { 341 341 struct microcode_intel *mc = uci->mc; 342 - enum ucode_state ret; 343 - u32 cur_rev, date; 342 + u32 cur_rev; 344 343 345 - ret = __apply_microcode(uci, mc, &cur_rev); 346 - if (ret == UCODE_UPDATED) { 347 - date = mc->hdr.date; 348 - pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n", 349 - cur_rev, mc->hdr.rev, date & 0xffff, date >> 24, (date >> 16) & 0xff); 350 - } 351 - return ret; 344 + return __apply_microcode(uci, mc, &cur_rev); 352 345 } 353 346 354 347 static __init bool load_builtin_intel_microcode(struct cpio_data *cp) ··· 406 413 early_initcall(save_builtin_microcode); 407 414 408 415 /* Load microcode on BSP from initrd or builtin blobs */ 409 - void __init load_ucode_intel_bsp(void) 416 + void __init load_ucode_intel_bsp(struct early_load_data *ed) 410 417 { 411 418 struct ucode_cpu_info uci; 419 + 420 + ed->old_rev = intel_get_microcode_revision(); 412 421 413 422 uci.mc = get_microcode_blob(&uci, false); 414 423 if (uci.mc && apply_microcode_early(&uci) == UCODE_UPDATED) 415 424 ucode_patch_va = UCODE_BSP_LOADED; 425 + 426 + ed->new_rev = uci.cpu_sig.rev; 416 427 } 417 428 418 429 void load_ucode_intel_ap(void)
+10 -4
arch/x86/kernel/cpu/microcode/internal.h
··· 37 37 use_nmi : 1; 38 38 }; 39 39 40 + struct early_load_data { 41 + u32 old_rev; 42 + u32 new_rev; 43 + }; 44 + 45 + extern struct early_load_data early_data; 40 46 extern struct ucode_cpu_info ucode_cpu_info[]; 41 47 struct cpio_data find_microcode_in_initrd(const char *path); 42 48 ··· 98 92 extern bool force_minrev; 99 93 100 94 #ifdef CONFIG_CPU_SUP_AMD 101 - void load_ucode_amd_bsp(unsigned int family); 95 + void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family); 102 96 void load_ucode_amd_ap(unsigned int family); 103 97 int save_microcode_in_initrd_amd(unsigned int family); 104 98 void reload_ucode_amd(unsigned int cpu); 105 99 struct microcode_ops *init_amd_microcode(void); 106 100 void exit_amd_microcode(void); 107 101 #else /* CONFIG_CPU_SUP_AMD */ 108 - static inline void load_ucode_amd_bsp(unsigned int family) { } 102 + static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { } 109 103 static inline void load_ucode_amd_ap(unsigned int family) { } 110 104 static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } 111 105 static inline void reload_ucode_amd(unsigned int cpu) { } ··· 114 108 #endif /* !CONFIG_CPU_SUP_AMD */ 115 109 116 110 #ifdef CONFIG_CPU_SUP_INTEL 117 - void load_ucode_intel_bsp(void); 111 + void load_ucode_intel_bsp(struct early_load_data *ed); 118 112 void load_ucode_intel_ap(void); 119 113 void reload_ucode_intel(void); 120 114 struct microcode_ops *init_intel_microcode(void); 121 115 #else /* CONFIG_CPU_SUP_INTEL */ 122 - static inline void load_ucode_intel_bsp(void) { } 116 + static inline void load_ucode_intel_bsp(struct early_load_data *ed) { } 123 117 static inline void load_ucode_intel_ap(void) { } 124 118 static inline void reload_ucode_intel(void) { } 125 119 static inline struct microcode_ops *init_intel_microcode(void) { return NULL; }