x86/microcode: Rework early revisions reporting

The AMD side of the loader issues the microcode revision for each
logical thread on the system, which can become really noisy on huge
machines. And doing that doesn't make a whole lot of sense - the
microcode revision is already in /proc/cpuinfo.

So in case one is interested in the theoretical support of mixed silicon
steppings on AMD, one can check there.

What is also missing on the AMD side - something which people have
requested before - is showing the microcode revision the CPU had
*before* the early update.

So abstract that up in the main code and have the BSP on each vendor
provide those revision numbers.

Then, dump them only once on driver init.

On Intel, do not dump the patch date - it is not needed.

Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/CAHk-=wg=%2B8rceshMkB4VnKxmRccVLtBLPBawnewZuuqyx5U=3A@mail.gmail.com

Changed files
+37 -44
arch
x86
kernel
cpu
+11 -28
arch/x86/kernel/cpu/microcode/amd.c
··· 104 size_t size; 105 }; 106 107 - static u32 ucode_new_rev; 108 - 109 /* 110 * Microcode patch container file is prepended to the initrd in cpio 111 * format. See Documentation/arch/x86/microcode.rst ··· 440 * 441 * Returns true if container found (sets @desc), false otherwise. 442 */ 443 - static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size) 444 { 445 struct cont_desc desc = { 0 }; 446 struct microcode_amd *mc; 447 bool ret = false; 448 - u32 rev, dummy; 449 450 desc.cpuid_1_eax = cpuid_1_eax; 451 ··· 454 if (!mc) 455 return ret; 456 457 - native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); 458 - 459 /* 460 * Allow application of the same revision to pick up SMT-specific 461 * changes even if the revision of the other SMT thread is already 462 * up-to-date. 463 */ 464 - if (rev > mc->hdr.patch_id) 465 return ret; 466 467 - if (!__apply_microcode_amd(mc)) { 468 - ucode_new_rev = mc->hdr.patch_id; 469 - ret = true; 470 - } 471 - 472 - return ret; 473 } 474 475 static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) ··· 496 *ret = cp; 497 } 498 499 - void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax) 500 { 501 struct cpio_data cp = { }; 502 503 /* Needed in load_microcode_amd() */ 504 ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax; ··· 510 if (!(cp.data && cp.size)) 511 return; 512 513 - early_apply_microcode(cpuid_1_eax, cp.data, cp.size); 514 } 515 516 static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); ··· 619 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); 620 621 if (rev < mc->hdr.patch_id) { 622 - if (!__apply_microcode_amd(mc)) { 623 - ucode_new_rev = mc->hdr.patch_id; 624 - pr_info("reload patch_level=0x%08x\n", ucode_new_rev); 625 - } 626 } 627 } 628 ··· 640 p = find_patch(cpu); 641 if (p && (p->patch_id == csig->rev)) 642 uci->mc = p->data; 643 - 644 - pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev); 645 646 return 0; 647 } ··· 680 681 rev = mc_amd->hdr.patch_id; 682 ret = UCODE_UPDATED; 683 - 684 - pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev); 685 686 out: 687 uci->cpu_sig.rev = rev; ··· 923 pr_warn("AMD CPU family 0x%x not supported\n", c->x86); 924 return NULL; 925 } 926 - 927 - if (ucode_new_rev) 928 - pr_info_once("microcode updated early to new patch_level=0x%08x\n", 929 - ucode_new_rev); 930 - 931 return &microcode_amd_ops; 932 } 933
··· 104 size_t size; 105 }; 106 107 /* 108 * Microcode patch container file is prepended to the initrd in cpio 109 * format. See Documentation/arch/x86/microcode.rst ··· 442 * 443 * Returns true if container found (sets @desc), false otherwise. 444 */ 445 + static bool early_apply_microcode(u32 cpuid_1_eax, u32 old_rev, void *ucode, size_t size) 446 { 447 struct cont_desc desc = { 0 }; 448 struct microcode_amd *mc; 449 bool ret = false; 450 451 desc.cpuid_1_eax = cpuid_1_eax; 452 ··· 457 if (!mc) 458 return ret; 459 460 /* 461 * Allow application of the same revision to pick up SMT-specific 462 * changes even if the revision of the other SMT thread is already 463 * up-to-date. 464 */ 465 + if (old_rev > mc->hdr.patch_id) 466 return ret; 467 468 + return !__apply_microcode_amd(mc); 469 } 470 471 static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) ··· 506 *ret = cp; 507 } 508 509 + void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax) 510 { 511 struct cpio_data cp = { }; 512 + u32 dummy; 513 + 514 + native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->old_rev, dummy); 515 516 /* Needed in load_microcode_amd() */ 517 ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax; ··· 517 if (!(cp.data && cp.size)) 518 return; 519 520 + if (early_apply_microcode(cpuid_1_eax, ed->old_rev, cp.data, cp.size)) 521 + native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy); 522 } 523 524 static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); ··· 625 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); 626 627 if (rev < mc->hdr.patch_id) { 628 + if (!__apply_microcode_amd(mc)) 629 + pr_info_once("reload revision: 0x%08x\n", mc->hdr.patch_id); 630 } 631 } 632 ··· 648 p = find_patch(cpu); 649 if (p && (p->patch_id == csig->rev)) 650 uci->mc = p->data; 651 652 return 0; 653 } ··· 690 691 rev = mc_amd->hdr.patch_id; 692 ret = UCODE_UPDATED; 693 694 out: 695 uci->cpu_sig.rev = rev; ··· 935 pr_warn("AMD CPU family 0x%x not supported\n", c->x86); 936 return NULL; 937 } 938 return &microcode_amd_ops; 939 } 940
+9 -2
arch/x86/kernel/cpu/microcode/core.c
··· 75 0, /* T-101 terminator */ 76 }; 77 78 /* 79 * Check the current patch level on this CPU. 80 * ··· 155 return; 156 157 if (intel) 158 - load_ucode_intel_bsp(); 159 else 160 - load_ucode_amd_bsp(cpuid_1_eax); 161 } 162 163 void load_ucode_ap(void) ··· 827 828 if (!microcode_ops) 829 return -ENODEV; 830 831 microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0); 832 if (IS_ERR(microcode_pdev))
··· 75 0, /* T-101 terminator */ 76 }; 77 78 + struct early_load_data early_data; 79 + 80 /* 81 * Check the current patch level on this CPU. 82 * ··· 153 return; 154 155 if (intel) 156 + load_ucode_intel_bsp(&early_data); 157 else 158 + load_ucode_amd_bsp(&early_data, cpuid_1_eax); 159 } 160 161 void load_ucode_ap(void) ··· 825 826 if (!microcode_ops) 827 return -ENODEV; 828 + 829 + pr_info_once("Current revision: 0x%08x\n", (early_data.new_rev ?: early_data.old_rev)); 830 + 831 + if (early_data.new_rev) 832 + pr_info_once("Updated early from: 0x%08x\n", early_data.old_rev); 833 834 microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0); 835 if (IS_ERR(microcode_pdev))
+7 -10
arch/x86/kernel/cpu/microcode/intel.c
··· 339 static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci) 340 { 341 struct microcode_intel *mc = uci->mc; 342 - enum ucode_state ret; 343 - u32 cur_rev, date; 344 345 - ret = __apply_microcode(uci, mc, &cur_rev); 346 - if (ret == UCODE_UPDATED) { 347 - date = mc->hdr.date; 348 - pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n", 349 - cur_rev, mc->hdr.rev, date & 0xffff, date >> 24, (date >> 16) & 0xff); 350 - } 351 - return ret; 352 } 353 354 static __init bool load_builtin_intel_microcode(struct cpio_data *cp) ··· 406 early_initcall(save_builtin_microcode); 407 408 /* Load microcode on BSP from initrd or builtin blobs */ 409 - void __init load_ucode_intel_bsp(void) 410 { 411 struct ucode_cpu_info uci; 412 413 uci.mc = get_microcode_blob(&uci, false); 414 if (uci.mc && apply_microcode_early(&uci) == UCODE_UPDATED) 415 ucode_patch_va = UCODE_BSP_LOADED; 416 } 417 418 void load_ucode_intel_ap(void)
··· 339 static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci) 340 { 341 struct microcode_intel *mc = uci->mc; 342 + u32 cur_rev; 343 344 + return __apply_microcode(uci, mc, &cur_rev); 345 } 346 347 static __init bool load_builtin_intel_microcode(struct cpio_data *cp) ··· 413 early_initcall(save_builtin_microcode); 414 415 /* Load microcode on BSP from initrd or builtin blobs */ 416 + void __init load_ucode_intel_bsp(struct early_load_data *ed) 417 { 418 struct ucode_cpu_info uci; 419 + 420 + ed->old_rev = intel_get_microcode_revision(); 421 422 uci.mc = get_microcode_blob(&uci, false); 423 if (uci.mc && apply_microcode_early(&uci) == UCODE_UPDATED) 424 ucode_patch_va = UCODE_BSP_LOADED; 425 + 426 + ed->new_rev = uci.cpu_sig.rev; 427 } 428 429 void load_ucode_intel_ap(void)
+10 -4
arch/x86/kernel/cpu/microcode/internal.h
··· 37 use_nmi : 1; 38 }; 39 40 extern struct ucode_cpu_info ucode_cpu_info[]; 41 struct cpio_data find_microcode_in_initrd(const char *path); 42 ··· 98 extern bool force_minrev; 99 100 #ifdef CONFIG_CPU_SUP_AMD 101 - void load_ucode_amd_bsp(unsigned int family); 102 void load_ucode_amd_ap(unsigned int family); 103 int save_microcode_in_initrd_amd(unsigned int family); 104 void reload_ucode_amd(unsigned int cpu); 105 struct microcode_ops *init_amd_microcode(void); 106 void exit_amd_microcode(void); 107 #else /* CONFIG_CPU_SUP_AMD */ 108 - static inline void load_ucode_amd_bsp(unsigned int family) { } 109 static inline void load_ucode_amd_ap(unsigned int family) { } 110 static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } 111 static inline void reload_ucode_amd(unsigned int cpu) { } ··· 114 #endif /* !CONFIG_CPU_SUP_AMD */ 115 116 #ifdef CONFIG_CPU_SUP_INTEL 117 - void load_ucode_intel_bsp(void); 118 void load_ucode_intel_ap(void); 119 void reload_ucode_intel(void); 120 struct microcode_ops *init_intel_microcode(void); 121 #else /* CONFIG_CPU_SUP_INTEL */ 122 - static inline void load_ucode_intel_bsp(void) { } 123 static inline void load_ucode_intel_ap(void) { } 124 static inline void reload_ucode_intel(void) { } 125 static inline struct microcode_ops *init_intel_microcode(void) { return NULL; }
··· 37 use_nmi : 1; 38 }; 39 40 + struct early_load_data { 41 + u32 old_rev; 42 + u32 new_rev; 43 + }; 44 + 45 + extern struct early_load_data early_data; 46 extern struct ucode_cpu_info ucode_cpu_info[]; 47 struct cpio_data find_microcode_in_initrd(const char *path); 48 ··· 92 extern bool force_minrev; 93 94 #ifdef CONFIG_CPU_SUP_AMD 95 + void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family); 96 void load_ucode_amd_ap(unsigned int family); 97 int save_microcode_in_initrd_amd(unsigned int family); 98 void reload_ucode_amd(unsigned int cpu); 99 struct microcode_ops *init_amd_microcode(void); 100 void exit_amd_microcode(void); 101 #else /* CONFIG_CPU_SUP_AMD */ 102 + static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { } 103 static inline void load_ucode_amd_ap(unsigned int family) { } 104 static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } 105 static inline void reload_ucode_amd(unsigned int cpu) { } ··· 108 #endif /* !CONFIG_CPU_SUP_AMD */ 109 110 #ifdef CONFIG_CPU_SUP_INTEL 111 + void load_ucode_intel_bsp(struct early_load_data *ed); 112 void load_ucode_intel_ap(void); 113 void reload_ucode_intel(void); 114 struct microcode_ops *init_intel_microcode(void); 115 #else /* CONFIG_CPU_SUP_INTEL */ 116 + static inline void load_ucode_intel_bsp(struct early_load_data *ed) { } 117 static inline void load_ucode_intel_ap(void) { } 118 static inline void reload_ucode_intel(void) { } 119 static inline struct microcode_ops *init_intel_microcode(void) { return NULL; }