Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
"There's a number of fixes:

- a round of fixes for CPUID-less legacy CPUs
- a number of microcode loader fixes
- i8042 detection robustization fixes
- stack dump/unwinder fixes
- x86 SoC platform driver fixes
- a GCC 7 warning fix
- virtualization related fixes"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
Revert "x86/unwind: Detect bad stack return address"
x86/paravirt: Mark unused patch_default label
x86/microcode/AMD: Reload proper initrd start address
x86/platform/intel/quark: Add printf attribute to imr_self_test_result()
x86/platform/intel-mid: Switch MPU3050 driver to IIO
x86/alternatives: Do not use sync_core() to serialize I$
x86/topology: Document cpu_llc_id
x86/hyperv: Handle unknown NMIs on one CPU when unknown_nmi_panic
x86/asm: Rewrite sync_core() to use IRET-to-self
x86/microcode/intel: Replace sync_core() with native_cpuid()
Revert "x86/boot: Fail the boot if !M486 and CPUID is missing"
x86/asm/32: Make sync_core() handle missing CPUID on all 32-bit kernels
x86/cpu: Probe CPUID leaf 6 even when cpuid_level == 6
x86/tools: Fix gcc-7 warning in relocs.c
x86/unwind: Dump stack data on warnings
x86/unwind: Adjust last frame check for aligned function stacks
x86/init: Fix a couple of comment typos
x86/init: Remove i8042_detect() from platform ops
Input: i8042 - Trust firmware a bit more when probing on X86
x86/init: Add i8042 state to the platform data
...

+280 -118
+9
Documentation/x86/topology.txt
··· 63 The maximum possible number of packages in the system. Helpful for per 64 package facilities to preallocate per package information. 65 66 67 * Cores: 68
··· 63 The maximum possible number of packages in the system. Helpful for per 64 package facilities to preallocate per package information. 65 66 + - cpu_llc_id: 67 + 68 + A per-CPU variable containing: 69 + - On Intel, the first APIC ID of the list of CPUs sharing the Last Level 70 + Cache 71 + 72 + - On AMD, the Node ID or Core Complex ID containing the Last Level 73 + Cache. In general, it is a number identifying an LLC uniquely on the 74 + system. 75 76 * Cores: 77
-6
arch/x86/boot/cpu.c
··· 87 return -1; 88 } 89 90 - if (CONFIG_X86_MINIMUM_CPU_FAMILY <= 4 && !IS_ENABLED(CONFIG_M486) && 91 - !has_eflag(X86_EFLAGS_ID)) { 92 - printf("This kernel requires a CPU with the CPUID instruction. Build with CONFIG_M486=y to run on this CPU.\n"); 93 - return -1; 94 - } 95 - 96 if (err_flags) { 97 puts("This kernel requires the following features " 98 "not present on the CPU:\n");
··· 87 return -1; 88 } 89 90 if (err_flags) { 91 puts("This kernel requires the following features " 92 "not present on the CPU:\n");
+59 -23
arch/x86/include/asm/processor.h
··· 602 rep_nop(); 603 } 604 605 - /* Stop speculative execution and prefetching of modified code. */ 606 static inline void sync_core(void) 607 { 608 - int tmp; 609 610 - #ifdef CONFIG_M486 611 - /* 612 - * Do a CPUID if available, otherwise do a jump. The jump 613 - * can conveniently enough be the jump around CPUID. 614 - */ 615 - asm volatile("cmpl %2,%1\n\t" 616 - "jl 1f\n\t" 617 - "cpuid\n" 618 - "1:" 619 - : "=a" (tmp) 620 - : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1) 621 - : "ebx", "ecx", "edx", "memory"); 622 #else 623 - /* 624 - * CPUID is a barrier to speculative execution. 625 - * Prefetched instructions are automatically 626 - * invalidated when modified. 627 - */ 628 - asm volatile("cpuid" 629 - : "=a" (tmp) 630 - : "0" (1) 631 - : "ebx", "ecx", "edx", "memory"); 632 #endif 633 } 634
··· 602 rep_nop(); 603 } 604 605 + /* 606 + * This function forces the icache and prefetched instruction stream to 607 + * catch up with reality in two very specific cases: 608 + * 609 + * a) Text was modified using one virtual address and is about to be executed 610 + * from the same physical page at a different virtual address. 611 + * 612 + * b) Text was modified on a different CPU, may subsequently be 613 + * executed on this CPU, and you want to make sure the new version 614 + * gets executed. This generally means you're calling this in a IPI. 615 + * 616 + * If you're calling this for a different reason, you're probably doing 617 + * it wrong. 618 + */ 619 static inline void sync_core(void) 620 { 621 + /* 622 + * There are quite a few ways to do this. IRET-to-self is nice 623 + * because it works on every CPU, at any CPL (so it's compatible 624 + * with paravirtualization), and it never exits to a hypervisor. 625 + * The only down sides are that it's a bit slow (it seems to be 626 + * a bit more than 2x slower than the fastest options) and that 627 + * it unmasks NMIs. The "push %cs" is needed because, in 628 + * paravirtual environments, __KERNEL_CS may not be a valid CS 629 + * value when we do IRET directly. 630 + * 631 + * In case NMI unmasking or performance ever becomes a problem, 632 + * the next best option appears to be MOV-to-CR2 and an 633 + * unconditional jump. That sequence also works on all CPUs, 634 + * but it will fault at CPL3 (i.e. Xen PV and lguest). 635 + * 636 + * CPUID is the conventional way, but it's nasty: it doesn't 637 + * exist on some 486-like CPUs, and it usually exits to a 638 + * hypervisor. 639 + * 640 + * Like all of Linux's memory ordering operations, this is a 641 + * compiler barrier as well. 642 + */ 643 + register void *__sp asm(_ASM_SP); 644 645 + #ifdef CONFIG_X86_32 646 + asm volatile ( 647 + "pushfl\n\t" 648 + "pushl %%cs\n\t" 649 + "pushl $1f\n\t" 650 + "iret\n\t" 651 + "1:" 652 + : "+r" (__sp) : : "memory"); 653 #else 654 + unsigned int tmp; 655 + 656 + asm volatile ( 657 + "mov %%ss, %0\n\t" 658 + "pushq %q0\n\t" 659 + "pushq %%rsp\n\t" 660 + "addq $8, (%%rsp)\n\t" 661 + "pushfq\n\t" 662 + "mov %%cs, %0\n\t" 663 + "pushq %q0\n\t" 664 + "pushq $1f\n\t" 665 + "iretq\n\t" 666 + "1:" 667 + : "=&r" (tmp), "+r" (__sp) : : "cc", "memory"); 668 #endif 669 } 670
+1 -1
arch/x86/include/asm/unwind.h
··· 12 struct task_struct *task; 13 int graph_idx; 14 #ifdef CONFIG_FRAME_POINTER 15 - unsigned long *bp; 16 struct pt_regs *regs; 17 #else 18 unsigned long *sp;
··· 12 struct task_struct *task; 13 int graph_idx; 14 #ifdef CONFIG_FRAME_POINTER 15 + unsigned long *bp, *orig_sp; 16 struct pt_regs *regs; 17 #else 18 unsigned long *sp;
+21 -5
arch/x86/include/asm/x86_init.h
··· 59 60 /** 61 * struct x86_init_oem - oem platform specific customizing functions 62 - * @arch_setup: platform specific architecure setup 63 * @banner: print a platform specific banner 64 */ 65 struct x86_init_oem { ··· 165 }; 166 167 /** 168 * struct x86_legacy_features - legacy x86 features 169 * 170 * @rtc: this device has a CMOS real-time clock present 171 * @reserve_bios_regions: boot code will search for the EBDA address and the 172 * start of the 640k - 1M BIOS region. If false, the platform must ··· 192 * documentation for further details. 193 */ 194 struct x86_legacy_features { 195 int rtc; 196 int reserve_bios_regions; 197 struct x86_legacy_devices devices; ··· 206 * @set_wallclock: set time back to HW clock 207 * @is_untracked_pat_range exclude from PAT logic 208 * @nmi_init enable NMI on cpus 209 - * @i8042_detect pre-detect if i8042 controller exists 210 * @save_sched_clock_state: save state for sched_clock() on suspend 211 * @restore_sched_clock_state: restore state for sched_clock() on resume 212 - * @apic_post_init: adjust apic if neeeded 213 * @legacy: legacy features 214 * @set_legacy_features: override legacy features. Use of this callback 215 * is highly discouraged. You should only need 216 * this if your hardware platform requires further 217 - * custom fine tuning far beyong what may be 218 * possible in x86_early_init_platform_quirks() by 219 * only using the current x86_hardware_subarch 220 * semantics. ··· 227 bool (*is_untracked_pat_range)(u64 start, u64 end); 228 void (*nmi_init)(void); 229 unsigned char (*get_nmi_reason)(void); 230 - int (*i8042_detect)(void); 231 void (*save_sched_clock_state)(void); 232 void (*restore_sched_clock_state)(void); 233 void (*apic_post_init)(void);
··· 59 60 /** 61 * struct x86_init_oem - oem platform specific customizing functions 62 + * @arch_setup: platform specific architecture setup 63 * @banner: print a platform specific banner 64 */ 65 struct x86_init_oem { ··· 165 }; 166 167 /** 168 + * enum x86_legacy_i8042_state - i8042 keyboard controller state 169 + * @X86_LEGACY_I8042_PLATFORM_ABSENT: the controller is always absent on 170 + * given platform/subarch. 171 + * @X86_LEGACY_I8042_FIRMWARE_ABSENT: firmware reports that the controller 172 + * is absent. 173 + * @X86_LEGACY_i8042_EXPECTED_PRESENT: the controller is likely to be 174 + * present, the i8042 driver should probe for controller existence. 175 + */ 176 + enum x86_legacy_i8042_state { 177 + X86_LEGACY_I8042_PLATFORM_ABSENT, 178 + X86_LEGACY_I8042_FIRMWARE_ABSENT, 179 + X86_LEGACY_I8042_EXPECTED_PRESENT, 180 + }; 181 + 182 + /** 183 * struct x86_legacy_features - legacy x86 features 184 * 185 + * @i8042: indicated if we expect the device to have i8042 controller 186 + * present. 187 * @rtc: this device has a CMOS real-time clock present 188 * @reserve_bios_regions: boot code will search for the EBDA address and the 189 * start of the 640k - 1M BIOS region. If false, the platform must ··· 175 * documentation for further details. 176 */ 177 struct x86_legacy_features { 178 + enum x86_legacy_i8042_state i8042; 179 int rtc; 180 int reserve_bios_regions; 181 struct x86_legacy_devices devices; ··· 188 * @set_wallclock: set time back to HW clock 189 * @is_untracked_pat_range exclude from PAT logic 190 * @nmi_init enable NMI on cpus 191 * @save_sched_clock_state: save state for sched_clock() on suspend 192 * @restore_sched_clock_state: restore state for sched_clock() on resume 193 + * @apic_post_init: adjust apic if needed 194 * @legacy: legacy features 195 * @set_legacy_features: override legacy features. Use of this callback 196 * is highly discouraged. You should only need 197 * this if your hardware platform requires further 198 + * custom fine tuning far beyond what may be 199 * possible in x86_early_init_platform_quirks() by 200 * only using the current x86_hardware_subarch 201 * semantics. ··· 210 bool (*is_untracked_pat_range)(u64 start, u64 end); 211 void (*nmi_init)(void); 212 unsigned char (*get_nmi_reason)(void); 213 void (*save_sched_clock_state)(void); 214 void (*restore_sched_clock_state)(void); 215 void (*apic_post_init)(void);
+7
arch/x86/kernel/acpi/boot.c
··· 930 x86_platform.legacy.devices.pnpbios = 0; 931 } 932 933 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) { 934 pr_debug("ACPI: not registering RTC platform device\n"); 935 x86_platform.legacy.rtc = 0;
··· 930 x86_platform.legacy.devices.pnpbios = 0; 931 } 932 933 + if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && 934 + !(acpi_gbl_FADT.boot_flags & ACPI_FADT_8042) && 935 + x86_platform.legacy.i8042 != X86_LEGACY_I8042_PLATFORM_ABSENT) { 936 + pr_debug("ACPI: i8042 controller is absent\n"); 937 + x86_platform.legacy.i8042 = X86_LEGACY_I8042_FIRMWARE_ABSENT; 938 + } 939 + 940 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) { 941 pr_debug("ACPI: not registering RTC platform device\n"); 942 x86_platform.legacy.rtc = 0;
+10 -5
arch/x86/kernel/alternative.c
··· 337 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len); 338 } 339 340 - static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr) 341 { 342 unsigned long flags; 343 ··· 350 351 local_irq_save(flags); 352 add_nops(instr + (a->instrlen - a->padlen), a->padlen); 353 - sync_core(); 354 local_irq_restore(flags); 355 356 DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ", ··· 362 * This implies that asymmetric systems where APs have less capabilities than 363 * the boot processor are not handled. Tough. Make sure you disable such 364 * features by hand. 365 */ 366 - void __init_or_module apply_alternatives(struct alt_instr *start, 367 - struct alt_instr *end) 368 { 369 struct alt_instr *a; 370 u8 *instr, *replacement; ··· 673 unsigned long flags; 674 local_irq_save(flags); 675 memcpy(addr, opcode, len); 676 - sync_core(); 677 local_irq_restore(flags); 678 /* Could also do a CLFLUSH here to speed up CPU recovery; but 679 that causes hangs on some VIA CPUs. */
··· 337 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len); 338 } 339 340 + /* 341 + * "noinline" to cause control flow change and thus invalidate I$ and 342 + * cause refetch after modification. 343 + */ 344 + static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr) 345 { 346 unsigned long flags; 347 ··· 346 347 local_irq_save(flags); 348 add_nops(instr + (a->instrlen - a->padlen), a->padlen); 349 local_irq_restore(flags); 350 351 DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ", ··· 359 * This implies that asymmetric systems where APs have less capabilities than 360 * the boot processor are not handled. Tough. Make sure you disable such 361 * features by hand. 362 + * 363 + * Marked "noinline" to cause control flow change and thus insn cache 364 + * to refetch changed I$ lines. 365 */ 366 + void __init_or_module noinline apply_alternatives(struct alt_instr *start, 367 + struct alt_instr *end) 368 { 369 struct alt_instr *a; 370 u8 *instr, *replacement; ··· 667 unsigned long flags; 668 local_irq_save(flags); 669 memcpy(addr, opcode, len); 670 local_irq_restore(flags); 671 /* Could also do a CLFLUSH here to speed up CPU recovery; but 672 that causes hangs on some VIA CPUs. */
+4 -3
arch/x86/kernel/cpu/common.c
··· 667 c->x86_capability[CPUID_1_EDX] = edx; 668 } 669 670 /* Additional Intel-defined flags: level 0x00000007 */ 671 if (c->cpuid_level >= 0x00000007) { 672 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); 673 - 674 c->x86_capability[CPUID_7_0_EBX] = ebx; 675 - 676 - c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); 677 c->x86_capability[CPUID_7_ECX] = ecx; 678 } 679
··· 667 c->x86_capability[CPUID_1_EDX] = edx; 668 } 669 670 + /* Thermal and Power Management Leaf: level 0x00000006 (eax) */ 671 + if (c->cpuid_level >= 0x00000006) 672 + c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); 673 + 674 /* Additional Intel-defined flags: level 0x00000007 */ 675 if (c->cpuid_level >= 0x00000007) { 676 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); 677 c->x86_capability[CPUID_7_0_EBX] = ebx; 678 c->x86_capability[CPUID_7_ECX] = ecx; 679 } 680
+33 -23
arch/x86/kernel/cpu/microcode/amd.c
··· 116 117 /* 118 * This scans the ucode blob for the proper container as we can have multiple 119 - * containers glued together. 120 */ 121 - static struct container 122 - find_proper_container(u8 *ucode, size_t size, u16 *ret_id) 123 { 124 struct container ret = { NULL, 0 }; 125 u32 eax, ebx, ecx, edx; ··· 139 if (header[0] != UCODE_MAGIC || 140 header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ 141 header[2] == 0) /* size */ 142 - return ret; 143 144 eax = 0x00000001; 145 ecx = 0; ··· 164 * ucode update loop below 165 */ 166 left = ret.size - offset; 167 - *ret_id = eq_id; 168 - return ret; 169 } 170 171 /* ··· 191 ucode = data; 192 } 193 194 - return ret; 195 } 196 197 static int __apply_microcode_amd(struct microcode_amd *mc_amd) ··· 216 * and on 32-bit during save_microcode_in_initrd_amd() -- we can call 217 * load_microcode_amd() to save equivalent cpu table and microcode patches in 218 * kernel heap memory. 219 */ 220 - static struct container 221 - apply_microcode_early_amd(void *ucode, size_t size, bool save_patch) 222 { 223 - struct container ret = { NULL, 0 }; 224 u8 (*patch)[PATCH_MAX_SIZE]; 225 int offset, left; 226 - u32 rev, *header; 227 - u8 *data; 228 u16 eq_id = 0; 229 - u32 *new_rev; 230 231 #ifdef CONFIG_X86_32 232 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); ··· 238 #endif 239 240 if (check_current_patch_level(&rev, true)) 241 - return (struct container){ NULL, 0 }; 242 243 - ret = find_proper_container(ucode, size, &eq_id); 244 if (!eq_id) 245 - return (struct container){ NULL, 0 }; 246 247 this_equiv_id = eq_id; 248 header = (u32 *)ret.data; ··· 276 data += offset; 277 left -= offset; 278 } 279 - return ret; 280 } 281 282 static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) ··· 301 void __init load_ucode_amd_bsp(unsigned int family) 302 { 303 struct ucode_cpu_info *uci; 304 struct cpio_data cp; 305 const char *path; 306 bool use_pa; ··· 323 return; 324 325 /* Get BSP's CPUID.EAX(1), needed in load_microcode_amd() */ 326 - uci->cpu_sig.sig = cpuid_eax(1); 327 328 - apply_microcode_early_amd(cp.data, cp.size, true); 329 } 330 331 #ifdef CONFIG_X86_32 ··· 360 * This would set amd_ucode_patch above so that the following APs can 361 * use it directly instead of going down this path again. 362 */ 363 - apply_microcode_early_amd(cp.data, cp.size, true); 364 } 365 #else 366 void load_ucode_amd_ap(unsigned int family) ··· 398 } 399 } 400 401 - cont = apply_microcode_early_amd(cp.data, cp.size, false); 402 - if (!(cont.data && cont.size)) { 403 cont.size = -1; 404 return; 405 } ··· 453 return -EINVAL; 454 } 455 456 - cont = find_proper_container(cp.data, cp.size, &eq_id); 457 if (!eq_id) { 458 cont.size = -1; 459 return -EINVAL;
··· 116 117 /* 118 * This scans the ucode blob for the proper container as we can have multiple 119 + * containers glued together. Returns the equivalence ID from the equivalence 120 + * table or 0 if none found. 121 */ 122 + static u16 123 + find_proper_container(u8 *ucode, size_t size, struct container *ret_cont) 124 { 125 struct container ret = { NULL, 0 }; 126 u32 eax, ebx, ecx, edx; ··· 138 if (header[0] != UCODE_MAGIC || 139 header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ 140 header[2] == 0) /* size */ 141 + return eq_id; 142 143 eax = 0x00000001; 144 ecx = 0; ··· 163 * ucode update loop below 164 */ 165 left = ret.size - offset; 166 + 167 + *ret_cont = ret; 168 + return eq_id; 169 } 170 171 /* ··· 189 ucode = data; 190 } 191 192 + return eq_id; 193 } 194 195 static int __apply_microcode_amd(struct microcode_amd *mc_amd) ··· 214 * and on 32-bit during save_microcode_in_initrd_amd() -- we can call 215 * load_microcode_amd() to save equivalent cpu table and microcode patches in 216 * kernel heap memory. 217 + * 218 + * Returns true if container found (sets @ret_cont), false otherwise. 219 */ 220 + static bool apply_microcode_early_amd(void *ucode, size_t size, bool save_patch, 221 + struct container *ret_cont) 222 { 223 u8 (*patch)[PATCH_MAX_SIZE]; 224 + u32 rev, *header, *new_rev; 225 + struct container ret; 226 int offset, left; 227 u16 eq_id = 0; 228 + u8 *data; 229 230 #ifdef CONFIG_X86_32 231 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); ··· 235 #endif 236 237 if (check_current_patch_level(&rev, true)) 238 + return false; 239 240 + eq_id = find_proper_container(ucode, size, &ret); 241 if (!eq_id) 242 + return false; 243 244 this_equiv_id = eq_id; 245 header = (u32 *)ret.data; ··· 273 data += offset; 274 left -= offset; 275 } 276 + 277 + if (ret_cont) 278 + *ret_cont = ret; 279 + 280 + return true; 281 } 282 283 static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) ··· 294 void __init load_ucode_amd_bsp(unsigned int family) 295 { 296 struct ucode_cpu_info *uci; 297 + u32 eax, ebx, ecx, edx; 298 struct cpio_data cp; 299 const char *path; 300 bool use_pa; ··· 315 return; 316 317 /* Get BSP's CPUID.EAX(1), needed in load_microcode_amd() */ 318 + eax = 1; 319 + ecx = 0; 320 + native_cpuid(&eax, &ebx, &ecx, &edx); 321 + uci->cpu_sig.sig = eax; 322 323 + apply_microcode_early_amd(cp.data, cp.size, true, NULL); 324 } 325 326 #ifdef CONFIG_X86_32 ··· 349 * This would set amd_ucode_patch above so that the following APs can 350 * use it directly instead of going down this path again. 351 */ 352 + apply_microcode_early_amd(cp.data, cp.size, true, NULL); 353 } 354 #else 355 void load_ucode_amd_ap(unsigned int family) ··· 387 } 388 } 389 390 + if (!apply_microcode_early_amd(cp.data, cp.size, false, &cont)) { 391 cont.size = -1; 392 return; 393 } ··· 443 return -EINVAL; 444 } 445 446 + eq_id = find_proper_container(cp.data, cp.size, &cont); 447 if (!eq_id) { 448 cont.size = -1; 449 return -EINVAL;
+24 -16
arch/x86/kernel/cpu/microcode/core.c
··· 44 #define DRIVER_VERSION "2.2" 45 46 static struct microcode_ops *microcode_ops; 47 - static bool dis_ucode_ldr; 48 49 LIST_HEAD(microcode_cache); 50 ··· 76 static bool __init check_loader_disabled_bsp(void) 77 { 78 static const char *__dis_opt_str = "dis_ucode_ldr"; 79 80 #ifdef CONFIG_X86_32 81 const char *cmdline = (const char *)__pa_nodebug(boot_command_line); ··· 89 bool *res = &dis_ucode_ldr; 90 #endif 91 92 - if (cmdline_find_option_bool(cmdline, option)) 93 - *res = true; 94 95 return *res; 96 } ··· 135 unsigned int family; 136 137 if (check_loader_disabled_bsp()) 138 - return; 139 - 140 - if (!have_cpuid_p()) 141 return; 142 143 vendor = x86_cpuid_vendor(); ··· 168 int vendor, family; 169 170 if (check_loader_disabled_ap()) 171 - return; 172 - 173 - if (!have_cpuid_p()) 174 return; 175 176 vendor = x86_cpuid_vendor(); ··· 243 # endif 244 245 /* 246 - * Did we relocate the ramdisk? 247 - * 248 - * So we possibly relocate the ramdisk *after* applying microcode on the 249 - * BSP so we rely on use_pa (use physical addresses) - even if it is not 250 - * absolutely correct - to determine whether we've done the ramdisk 251 - * relocation already. 252 */ 253 - if (!use_pa && relocated_ramdisk) 254 start = initrd_start; 255 256 return find_cpio_data(path, (void *)start, size, NULL);
··· 44 #define DRIVER_VERSION "2.2" 45 46 static struct microcode_ops *microcode_ops; 47 + static bool dis_ucode_ldr = true; 48 49 LIST_HEAD(microcode_cache); 50 ··· 76 static bool __init check_loader_disabled_bsp(void) 77 { 78 static const char *__dis_opt_str = "dis_ucode_ldr"; 79 + u32 a, b, c, d; 80 81 #ifdef CONFIG_X86_32 82 const char *cmdline = (const char *)__pa_nodebug(boot_command_line); ··· 88 bool *res = &dis_ucode_ldr; 89 #endif 90 91 + if (!have_cpuid_p()) 92 + return *res; 93 + 94 + a = 1; 95 + c = 0; 96 + native_cpuid(&a, &b, &c, &d); 97 + 98 + /* 99 + * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not 100 + * completely accurate as xen pv guests don't see that CPUID bit set but 101 + * that's good enough as they don't land on the BSP path anyway. 102 + */ 103 + if (c & BIT(31)) 104 + return *res; 105 + 106 + if (cmdline_find_option_bool(cmdline, option) <= 0) 107 + *res = false; 108 109 return *res; 110 } ··· 119 unsigned int family; 120 121 if (check_loader_disabled_bsp()) 122 return; 123 124 vendor = x86_cpuid_vendor(); ··· 155 int vendor, family; 156 157 if (check_loader_disabled_ap()) 158 return; 159 160 vendor = x86_cpuid_vendor(); ··· 233 # endif 234 235 /* 236 + * Fixup the start address: after reserve_initrd() runs, initrd_start 237 + * has the virtual address of the beginning of the initrd. It also 238 + * possibly relocates the ramdisk. In either case, initrd_start contains 239 + * the updated address so use that instead. 240 */ 241 + if (!use_pa && initrd_start) 242 start = initrd_start; 243 244 return find_cpio_data(path, (void *)start, size, NULL);
+23 -3
arch/x86/kernel/cpu/microcode/intel.c
··· 368 return patch; 369 } 370 371 static int collect_cpu_info_early(struct ucode_cpu_info *uci) 372 { 373 unsigned int val[2]; ··· 413 native_wrmsrl(MSR_IA32_UCODE_REV, 0); 414 415 /* As documented in the SDM: Do a CPUID 1 here */ 416 - sync_core(); 417 418 /* get the current revision from MSR 0x8B */ 419 native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); ··· 613 native_wrmsrl(MSR_IA32_UCODE_REV, 0); 614 615 /* As documented in the SDM: Do a CPUID 1 here */ 616 - sync_core(); 617 618 /* get the current revision from MSR 0x8B */ 619 native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); ··· 825 wrmsrl(MSR_IA32_UCODE_REV, 0); 826 827 /* As documented in the SDM: Do a CPUID 1 here */ 828 - sync_core(); 829 830 /* get the current revision from MSR 0x8B */ 831 rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
··· 368 return patch; 369 } 370 371 + static void cpuid_1(void) 372 + { 373 + /* 374 + * According to the Intel SDM, Volume 3, 9.11.7: 375 + * 376 + * CPUID returns a value in a model specific register in 377 + * addition to its usual register return values. The 378 + * semantics of CPUID cause it to deposit an update ID value 379 + * in the 64-bit model-specific register at address 08BH 380 + * (IA32_BIOS_SIGN_ID). If no update is present in the 381 + * processor, the value in the MSR remains unmodified. 382 + * 383 + * Use native_cpuid -- this code runs very early and we don't 384 + * want to mess with paravirt. 385 + */ 386 + unsigned int eax = 1, ebx, ecx = 0, edx; 387 + 388 + native_cpuid(&eax, &ebx, &ecx, &edx); 389 + } 390 + 391 static int collect_cpu_info_early(struct ucode_cpu_info *uci) 392 { 393 unsigned int val[2]; ··· 393 native_wrmsrl(MSR_IA32_UCODE_REV, 0); 394 395 /* As documented in the SDM: Do a CPUID 1 here */ 396 + cpuid_1(); 397 398 /* get the current revision from MSR 0x8B */ 399 native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); ··· 593 native_wrmsrl(MSR_IA32_UCODE_REV, 0); 594 595 /* As documented in the SDM: Do a CPUID 1 here */ 596 + cpuid_1(); 597 598 /* get the current revision from MSR 0x8B */ 599 native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); ··· 805 wrmsrl(MSR_IA32_UCODE_REV, 0); 806 807 /* As documented in the SDM: Do a CPUID 1 here */ 808 + cpuid_1(); 809 810 /* get the current revision from MSR 0x8B */ 811 rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
+24
arch/x86/kernel/cpu/mshyperv.c
··· 30 #include <asm/apic.h> 31 #include <asm/timer.h> 32 #include <asm/reboot.h> 33 34 struct ms_hyperv_info ms_hyperv; 35 EXPORT_SYMBOL_GPL(ms_hyperv); ··· 158 return 0; 159 } 160 161 static void __init ms_hyperv_init_platform(void) 162 { 163 /* ··· 203 pr_info("HyperV: LAPIC Timer Frequency: %#x\n", 204 lapic_timer_frequency); 205 } 206 #endif 207 208 if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
··· 30 #include <asm/apic.h> 31 #include <asm/timer.h> 32 #include <asm/reboot.h> 33 + #include <asm/nmi.h> 34 35 struct ms_hyperv_info ms_hyperv; 36 EXPORT_SYMBOL_GPL(ms_hyperv); ··· 157 return 0; 158 } 159 160 + #ifdef CONFIG_X86_LOCAL_APIC 161 + /* 162 + * Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes 163 + * it dificult to process CHANNELMSG_UNLOAD in case of crash. Handle 164 + * unknown NMI on the first CPU which gets it. 165 + */ 166 + static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs) 167 + { 168 + static atomic_t nmi_cpu = ATOMIC_INIT(-1); 169 + 170 + if (!unknown_nmi_panic) 171 + return NMI_DONE; 172 + 173 + if (atomic_cmpxchg(&nmi_cpu, -1, raw_smp_processor_id()) != -1) 174 + return NMI_HANDLED; 175 + 176 + return NMI_DONE; 177 + } 178 + #endif 179 + 180 static void __init ms_hyperv_init_platform(void) 181 { 182 /* ··· 182 pr_info("HyperV: LAPIC Timer Frequency: %#x\n", 183 lapic_timer_frequency); 184 } 185 + 186 + register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST, 187 + "hv_nmi_unknown"); 188 #endif 189 190 if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
+1 -1
arch/x86/kernel/paravirt_patch_32.c
··· 68 #endif 69 70 default: 71 - patch_default: 72 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); 73 break; 74
··· 68 #endif 69 70 default: 71 + patch_default: __maybe_unused 72 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); 73 break; 74
+1 -1
arch/x86/kernel/paravirt_patch_64.c
··· 80 #endif 81 82 default: 83 - patch_default: 84 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); 85 break; 86
··· 80 #endif 81 82 default: 83 + patch_default: __maybe_unused 84 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); 85 break; 86
+5
arch/x86/kernel/platform-quirks.c
··· 6 7 void __init x86_early_init_platform_quirks(void) 8 { 9 x86_platform.legacy.rtc = 1; 10 x86_platform.legacy.reserve_bios_regions = 0; 11 x86_platform.legacy.devices.pnpbios = 1; ··· 17 break; 18 case X86_SUBARCH_XEN: 19 case X86_SUBARCH_LGUEST: 20 case X86_SUBARCH_INTEL_MID: 21 case X86_SUBARCH_CE4100: 22 x86_platform.legacy.devices.pnpbios = 0; 23 x86_platform.legacy.rtc = 0; 24 break; 25 } 26
··· 6 7 void __init x86_early_init_platform_quirks(void) 8 { 9 + x86_platform.legacy.i8042 = X86_LEGACY_I8042_EXPECTED_PRESENT; 10 x86_platform.legacy.rtc = 1; 11 x86_platform.legacy.reserve_bios_regions = 0; 12 x86_platform.legacy.devices.pnpbios = 1; ··· 16 break; 17 case X86_SUBARCH_XEN: 18 case X86_SUBARCH_LGUEST: 19 + x86_platform.legacy.devices.pnpbios = 0; 20 + x86_platform.legacy.rtc = 0; 21 + break; 22 case X86_SUBARCH_INTEL_MID: 23 case X86_SUBARCH_CE4100: 24 x86_platform.legacy.devices.pnpbios = 0; 25 x86_platform.legacy.rtc = 0; 26 + x86_platform.legacy.i8042 = X86_LEGACY_I8042_PLATFORM_ABSENT; 27 break; 28 } 29
+46 -10
arch/x86/kernel/unwind_frame.c
··· 6 7 #define FRAME_HEADER_SIZE (sizeof(long) * 2) 8 9 unsigned long unwind_get_return_address(struct unwind_state *state) 10 { 11 unsigned long addr; ··· 51 addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, *addr_p, 52 addr_p); 53 54 - if (!__kernel_text_address(addr)) { 55 - printk_deferred_once(KERN_WARNING 56 - "WARNING: unrecognized kernel stack return address %p at %p in %s:%d\n", 57 - (void *)addr, addr_p, state->task->comm, 58 - state->task->pid); 59 - return 0; 60 - } 61 - 62 - return addr; 63 } 64 EXPORT_SYMBOL_GPL(unwind_get_return_address); 65 ··· 69 unsigned long bp = (unsigned long)state->bp; 70 unsigned long regs = (unsigned long)task_pt_regs(state->task); 71 72 - return bp == regs - FRAME_HEADER_SIZE; 73 } 74 75 /* ··· 97 size_t len) 98 { 99 struct stack_info *info = &state->stack_info; 100 101 /* 102 * If addr isn't on the current stack, switch to the next one. ··· 110 if (get_stack_info(info->next_sp, state->task, info, 111 &state->stack_mask)) 112 return false; 113 114 return true; 115 } ··· 212 "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n", 213 state->regs, state->task->comm, 214 state->task->pid, next_frame); 215 } else { 216 printk_deferred_once(KERN_WARNING 217 "WARNING: kernel stack frame pointer at %p in %s:%d has bad value %p\n", 218 state->bp, state->task->comm, 219 state->task->pid, next_frame); 220 } 221 the_end: 222 state->stack_info.type = STACK_TYPE_UNKNOWN;
··· 6 7 #define FRAME_HEADER_SIZE (sizeof(long) * 2) 8 9 + static void unwind_dump(struct unwind_state *state, unsigned long *sp) 10 + { 11 + static bool dumped_before = false; 12 + bool prev_zero, zero = false; 13 + unsigned long word; 14 + 15 + if (dumped_before) 16 + return; 17 + 18 + dumped_before = true; 19 + 20 + printk_deferred("unwind stack type:%d next_sp:%p mask:%lx graph_idx:%d\n", 21 + state->stack_info.type, state->stack_info.next_sp, 22 + state->stack_mask, state->graph_idx); 23 + 24 + for (sp = state->orig_sp; sp < state->stack_info.end; sp++) { 25 + word = READ_ONCE_NOCHECK(*sp); 26 + 27 + prev_zero = zero; 28 + zero = word == 0; 29 + 30 + if (zero) { 31 + if (!prev_zero) 32 + printk_deferred("%p: %016x ...\n", sp, 0); 33 + continue; 34 + } 35 + 36 + printk_deferred("%p: %016lx (%pB)\n", sp, word, (void *)word); 37 + } 38 + } 39 + 40 unsigned long unwind_get_return_address(struct unwind_state *state) 41 { 42 unsigned long addr; ··· 20 addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, *addr_p, 21 addr_p); 22 23 + return __kernel_text_address(addr) ? addr : 0; 24 } 25 EXPORT_SYMBOL_GPL(unwind_get_return_address); 26 ··· 46 unsigned long bp = (unsigned long)state->bp; 47 unsigned long regs = (unsigned long)task_pt_regs(state->task); 48 49 + /* 50 + * We have to check for the last task frame at two different locations 51 + * because gcc can occasionally decide to realign the stack pointer and 52 + * change the offset of the stack frame by a word in the prologue of a 53 + * function called by head/entry code. 54 + */ 55 + return bp == regs - FRAME_HEADER_SIZE || 56 + bp == regs - FRAME_HEADER_SIZE - sizeof(long); 57 } 58 59 /* ··· 67 size_t len) 68 { 69 struct stack_info *info = &state->stack_info; 70 + enum stack_type orig_type = info->type; 71 72 /* 73 * If addr isn't on the current stack, switch to the next one. ··· 79 if (get_stack_info(info->next_sp, state->task, info, 80 &state->stack_mask)) 81 return false; 82 + 83 + if (!state->orig_sp || info->type != orig_type) 84 + state->orig_sp = addr; 85 86 return true; 87 } ··· 178 "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n", 179 state->regs, state->task->comm, 180 state->task->pid, next_frame); 181 + unwind_dump(state, (unsigned long *)state->regs); 182 } else { 183 printk_deferred_once(KERN_WARNING 184 "WARNING: kernel stack frame pointer at %p in %s:%d has bad value %p\n", 185 state->bp, state->task->comm, 186 state->task->pid, next_frame); 187 + unwind_dump(state, state->bp); 188 } 189 the_end: 190 state->stack_info.type = STACK_TYPE_UNKNOWN;
-2
arch/x86/kernel/x86_init.c
··· 89 }; 90 91 static void default_nmi_init(void) { }; 92 - static int default_i8042_detect(void) { return 1; }; 93 94 struct x86_platform_ops x86_platform __ro_after_init = { 95 .calibrate_cpu = native_calibrate_cpu, ··· 99 .is_untracked_pat_range = is_ISA_range, 100 .nmi_init = default_nmi_init, 101 .get_nmi_reason = default_get_nmi_reason, 102 - .i8042_detect = default_i8042_detect, 103 .save_sched_clock_state = tsc_save_sched_clock_state, 104 .restore_sched_clock_state = tsc_restore_sched_clock_state, 105 };
··· 89 }; 90 91 static void default_nmi_init(void) { }; 92 93 struct x86_platform_ops x86_platform __ro_after_init = { 94 .calibrate_cpu = native_calibrate_cpu, ··· 100 .is_untracked_pat_range = is_ISA_range, 101 .nmi_init = default_nmi_init, 102 .get_nmi_reason = default_get_nmi_reason, 103 .save_sched_clock_state = tsc_save_sched_clock_state, 104 .restore_sched_clock_state = tsc_restore_sched_clock_state, 105 };
-6
arch/x86/platform/ce4100/ce4100.c
··· 23 #include <asm/io_apic.h> 24 #include <asm/emergency-restart.h> 25 26 - static int ce4100_i8042_detect(void) 27 - { 28 - return 0; 29 - } 30 - 31 /* 32 * The CE4100 platform has an internal 8051 Microcontroller which is 33 * responsible for signaling to the external Power Management Unit the ··· 140 void __init x86_ce4100_early_setup(void) 141 { 142 x86_init.oem.arch_setup = sdv_arch_setup; 143 - x86_platform.i8042_detect = ce4100_i8042_detect; 144 x86_init.resources.probe_roms = x86_init_noop; 145 x86_init.mpparse.get_smp_config = x86_init_uint_noop; 146 x86_init.mpparse.find_smp_config = x86_init_noop;
··· 23 #include <asm/io_apic.h> 24 #include <asm/emergency-restart.h> 25 26 /* 27 * The CE4100 platform has an internal 8051 Microcontroller which is 28 * responsible for signaling to the external Power Management Unit the ··· 145 void __init x86_ce4100_early_setup(void) 146 { 147 x86_init.oem.arch_setup = sdv_arch_setup; 148 x86_init.resources.probe_roms = x86_init_noop; 149 x86_init.mpparse.get_smp_config = x86_init_uint_noop; 150 x86_init.mpparse.find_smp_config = x86_init_noop;
+1 -1
arch/x86/platform/intel-mid/device_libs/Makefile
··· 19 # I2C Devices 20 obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o 21 obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o 22 - obj-$(subst m,y,$(CONFIG_INPUT_MPU3050)) += platform_mpu3050.o 23 obj-$(subst m,y,$(CONFIG_INPUT_BMA150)) += platform_bma023.o 24 obj-$(subst m,y,$(CONFIG_DRM_MEDFIELD)) += platform_tc35876x.o 25 # I2C GPIO Expanders
··· 19 # I2C Devices 20 obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o 21 obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o 22 + obj-$(subst m,y,$(CONFIG_MPU3050_I2C)) += platform_mpu3050.o 23 obj-$(subst m,y,$(CONFIG_INPUT_BMA150)) += platform_bma023.o 24 obj-$(subst m,y,$(CONFIG_DRM_MEDFIELD)) += platform_tc35876x.o 25 # I2C GPIO Expanders
-7
arch/x86/platform/intel-mid/intel-mid.c
··· 161 regulator_has_full_constraints(); 162 } 163 164 - /* MID systems don't have i8042 controller */ 165 - static int intel_mid_i8042_detect(void) 166 - { 167 - return 0; 168 - } 169 - 170 /* 171 * Moorestown does not have external NMI source nor port 0x61 to report 172 * NMI status. The possible NMI sources are from pmu as a result of NMI ··· 191 x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock; 192 193 x86_platform.calibrate_tsc = intel_mid_calibrate_tsc; 194 - x86_platform.i8042_detect = intel_mid_i8042_detect; 195 x86_init.timers.wallclock_init = intel_mid_rtc_init; 196 x86_platform.get_nmi_reason = intel_mid_get_nmi_reason; 197
··· 161 regulator_has_full_constraints(); 162 } 163 164 /* 165 * Moorestown does not have external NMI source nor port 0x61 to report 166 * NMI status. The possible NMI sources are from pmu as a result of NMI ··· 197 x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock; 198 199 x86_platform.calibrate_tsc = intel_mid_calibrate_tsc; 200 x86_init.timers.wallclock_init = intel_mid_rtc_init; 201 x86_platform.get_nmi_reason = intel_mid_get_nmi_reason; 202
+2 -1
arch/x86/platform/intel-quark/imr_selftest.c
··· 25 * @fmt: format string. 26 * ... variadic argument list. 27 */ 28 - static void __init imr_self_test_result(int res, const char *fmt, ...) 29 { 30 va_list vlist; 31
··· 25 * @fmt: format string. 26 * ... variadic argument list. 27 */ 28 + static __printf(2, 3) 29 + void __init imr_self_test_result(int res, const char *fmt, ...) 30 { 31 va_list vlist; 32
+2 -1
arch/x86/tools/relocs.c
··· 992 die("Segment relocations found but --realmode not specified\n"); 993 994 /* Order the relocations for more efficient processing */ 995 - sort_relocs(&relocs16); 996 sort_relocs(&relocs32); 997 #if ELF_BITS == 64 998 sort_relocs(&relocs32neg); 999 sort_relocs(&relocs64); 1000 #endif 1001 1002 /* Print the relocations */
··· 992 die("Segment relocations found but --realmode not specified\n"); 993 994 /* Order the relocations for more efficient processing */ 995 sort_relocs(&relocs32); 996 #if ELF_BITS == 64 997 sort_relocs(&relocs32neg); 998 sort_relocs(&relocs64); 999 + #else 1000 + sort_relocs(&relocs16); 1001 #endif 1002 1003 /* Print the relocations */
+7 -3
drivers/input/serio/i8042-x86ia64io.h
··· 983 #if defined(__ia64__) 984 return -ENODEV; 985 #else 986 - pr_info("PNP: No PS/2 controller found. Probing ports directly.\n"); 987 return 0; 988 #endif 989 } ··· 1074 1075 #ifdef CONFIG_X86 1076 u8 a20_on = 0xdf; 1077 - /* Just return if pre-detection shows no i8042 controller exist */ 1078 - if (!x86_platform.i8042_detect()) 1079 return -ENODEV; 1080 #endif 1081
··· 983 #if defined(__ia64__) 984 return -ENODEV; 985 #else 986 + pr_info("PNP: No PS/2 controller found.\n"); 987 + if (x86_platform.legacy.i8042 != 988 + X86_LEGACY_I8042_EXPECTED_PRESENT) 989 + return -ENODEV; 990 + pr_info("Probing ports directly.\n"); 991 return 0; 992 #endif 993 } ··· 1070 1071 #ifdef CONFIG_X86 1072 u8 a20_on = 0xdf; 1073 + /* Just return if platform does not have i8042 controller */ 1074 + if (x86_platform.legacy.i8042 == X86_LEGACY_I8042_PLATFORM_ABSENT) 1075 return -ENODEV; 1076 #endif 1077