Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'x86_urgent_for_v6.13_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

- Add a terminating zero end-element to the array describing AMD CPUs
affected by erratum 1386 so that the matching loop actually
terminates instead of going off into the weeds

- Update the boot protocol documentation to mention the fact that the
preferred address to load the kernel to is considered in the
relocatable kernel case too

- Flush the memory buffer containing the microcode patch after applying
microcode on AMD Zen1 and Zen2, to avoid unnecessary slowdowns

- Make sure the PPIN CPU feature flag is cleared on all CPUs if PPIN
has been disabled

* tag 'x86_urgent_for_v6.13_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/CPU/AMD: Terminate the erratum_1386_microcode array
x86/Documentation: Update algo in init_size description of boot protocol
x86/microcode/AMD: Flush patch buffer mapping after application
x86/mm: Carve out INVLPG inline asm for use by others
x86/cpu: Fix PPIN initialization

+41 -11
+13 -4
Documentation/arch/x86/boot.rst
··· 896 896 897 897 The kernel runtime start address is determined by the following algorithm:: 898 898 899 - if (relocatable_kernel) 900 - runtime_start = align_up(load_address, kernel_alignment) 901 - else 902 - runtime_start = pref_address 899 + if (relocatable_kernel) { 900 + if (load_address < pref_address) 901 + load_address = pref_address; 902 + runtime_start = align_up(load_address, kernel_alignment); 903 + } else { 904 + runtime_start = pref_address; 905 + } 906 + 907 + Hence the necessary memory window location and size can be estimated by 908 + a boot loader as:: 909 + 910 + memory_window_start = runtime_start; 911 + memory_window_size = init_size; 903 912 904 913 ============ =============== 905 914 Field name: handover_offset
+4
arch/x86/include/asm/tlb.h
··· 34 34 free_page_and_swap_cache(table); 35 35 } 36 36 37 + static inline void invlpg(unsigned long addr) 38 + { 39 + asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); 40 + } 37 41 #endif /* _ASM_X86_TLB_H */
+1
arch/x86/kernel/cpu/amd.c
··· 798 798 static const struct x86_cpu_desc erratum_1386_microcode[] = { 799 799 AMD_CPU_DESC(0x17, 0x1, 0x2, 0x0800126e), 800 800 AMD_CPU_DESC(0x17, 0x31, 0x0, 0x08301052), 801 + {}, 801 802 }; 802 803 803 804 static void fix_erratum_1386(struct cpuinfo_x86 *c)
+1 -1
arch/x86/kernel/cpu/common.c
··· 169 169 } 170 170 171 171 clear_ppin: 172 - clear_cpu_cap(c, info->feature); 172 + setup_clear_cpu_cap(info->feature); 173 173 } 174 174 175 175 static void default_init(struct cpuinfo_x86 *c)
+20 -5
arch/x86/kernel/cpu/microcode/amd.c
··· 34 34 #include <asm/setup.h> 35 35 #include <asm/cpu.h> 36 36 #include <asm/msr.h> 37 + #include <asm/tlb.h> 37 38 38 39 #include "internal.h" 39 40 ··· 484 483 } 485 484 } 486 485 487 - static int __apply_microcode_amd(struct microcode_amd *mc) 486 + static int __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize) 488 487 { 488 + unsigned long p_addr = (unsigned long)&mc->hdr.data_code; 489 489 u32 rev, dummy; 490 490 491 - native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code); 491 + native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr); 492 + 493 + if (x86_family(bsp_cpuid_1_eax) == 0x17) { 494 + unsigned long p_addr_end = p_addr + psize - 1; 495 + 496 + invlpg(p_addr); 497 + 498 + /* 499 + * Flush next page too if patch image is crossing a page 500 + * boundary. 501 + */ 502 + if (p_addr >> PAGE_SHIFT != p_addr_end >> PAGE_SHIFT) 503 + invlpg(p_addr_end); 504 + } 492 505 493 506 /* verify patch application was successful */ 494 507 native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); ··· 544 529 if (old_rev > mc->hdr.patch_id) 545 530 return ret; 546 531 547 - return !__apply_microcode_amd(mc); 532 + return !__apply_microcode_amd(mc, desc.psize); 548 533 } 549 534 550 535 static bool get_builtin_microcode(struct cpio_data *cp) ··· 760 745 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); 761 746 762 747 if (rev < mc->hdr.patch_id) { 763 - if (!__apply_microcode_amd(mc)) 748 + if (!__apply_microcode_amd(mc, p->size)) 764 749 pr_info_once("reload revision: 0x%08x\n", mc->hdr.patch_id); 765 750 } 766 751 } ··· 813 798 goto out; 814 799 } 815 800 816 - if (__apply_microcode_amd(mc_amd)) { 801 + if (__apply_microcode_amd(mc_amd, p->size)) { 817 802 pr_err("CPU%d: update failed for patch_level=0x%08x\n", 818 803 cpu, mc_amd->hdr.patch_id); 819 804 return UCODE_ERROR;
+2 -1
arch/x86/mm/tlb.c
··· 20 20 #include <asm/cacheflush.h> 21 21 #include <asm/apic.h> 22 22 #include <asm/perf_event.h> 23 + #include <asm/tlb.h> 23 24 24 25 #include "mm_internal.h" 25 26 ··· 1141 1140 bool cpu_pcide; 1142 1141 1143 1142 /* Flush 'addr' from the kernel PCID: */ 1144 - asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); 1143 + invlpg(addr); 1145 1144 1146 1145 /* If PTI is off there is no user PCID and nothing to flush. */ 1147 1146 if (!static_cpu_has(X86_FEATURE_PTI))