Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
"There's a number of fixes:

- a round of fixes for CPUID-less legacy CPUs
- a number of microcode loader fixes
- i8042 detection robustization fixes
- stack dump/unwinder fixes
- x86 SoC platform driver fixes
- a GCC 7 warning fix
- virtualization related fixes"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
Revert "x86/unwind: Detect bad stack return address"
x86/paravirt: Mark unused patch_default label
x86/microcode/AMD: Reload proper initrd start address
x86/platform/intel/quark: Add printf attribute to imr_self_test_result()
x86/platform/intel-mid: Switch MPU3050 driver to IIO
x86/alternatives: Do not use sync_core() to serialize I$
x86/topology: Document cpu_llc_id
x86/hyperv: Handle unknown NMIs on one CPU when unknown_nmi_panic
x86/asm: Rewrite sync_core() to use IRET-to-self
x86/microcode/intel: Replace sync_core() with native_cpuid()
Revert "x86/boot: Fail the boot if !M486 and CPUID is missing"
x86/asm/32: Make sync_core() handle missing CPUID on all 32-bit kernels
x86/cpu: Probe CPUID leaf 6 even when cpuid_level == 6
x86/tools: Fix gcc-7 warning in relocs.c
x86/unwind: Dump stack data on warnings
x86/unwind: Adjust last frame check for aligned function stacks
x86/init: Fix a couple of comment typos
x86/init: Remove i8042_detect() from platform ops
Input: i8042 - Trust firmware a bit more when probing on X86
x86/init: Add i8042 state to the platform data
...

+280 -118
+9
Documentation/x86/topology.txt
··· 63 63 The maximum possible number of packages in the system. Helpful for per 64 64 package facilities to preallocate per package information. 65 65 66 + - cpu_llc_id: 67 + 68 + A per-CPU variable containing: 69 + - On Intel, the first APIC ID of the list of CPUs sharing the Last Level 70 + Cache 71 + 72 + - On AMD, the Node ID or Core Complex ID containing the Last Level 73 + Cache. In general, it is a number identifying an LLC uniquely on the 74 + system. 66 75 67 76 * Cores: 68 77
-6
arch/x86/boot/cpu.c
··· 87 87 return -1; 88 88 } 89 89 90 - if (CONFIG_X86_MINIMUM_CPU_FAMILY <= 4 && !IS_ENABLED(CONFIG_M486) && 91 - !has_eflag(X86_EFLAGS_ID)) { 92 - printf("This kernel requires a CPU with the CPUID instruction. Build with CONFIG_M486=y to run on this CPU.\n"); 93 - return -1; 94 - } 95 - 96 90 if (err_flags) { 97 91 puts("This kernel requires the following features " 98 92 "not present on the CPU:\n");
+59 -23
arch/x86/include/asm/processor.h
··· 602 602 rep_nop(); 603 603 } 604 604 605 - /* Stop speculative execution and prefetching of modified code. */ 605 + /* 606 + * This function forces the icache and prefetched instruction stream to 607 + * catch up with reality in two very specific cases: 608 + * 609 + * a) Text was modified using one virtual address and is about to be executed 610 + * from the same physical page at a different virtual address. 611 + * 612 + * b) Text was modified on a different CPU, may subsequently be 613 + * executed on this CPU, and you want to make sure the new version 614 + * gets executed. This generally means you're calling this in a IPI. 615 + * 616 + * If you're calling this for a different reason, you're probably doing 617 + * it wrong. 618 + */ 606 619 static inline void sync_core(void) 607 620 { 608 - int tmp; 621 + /* 622 + * There are quite a few ways to do this. IRET-to-self is nice 623 + * because it works on every CPU, at any CPL (so it's compatible 624 + * with paravirtualization), and it never exits to a hypervisor. 625 + * The only down sides are that it's a bit slow (it seems to be 626 + * a bit more than 2x slower than the fastest options) and that 627 + * it unmasks NMIs. The "push %cs" is needed because, in 628 + * paravirtual environments, __KERNEL_CS may not be a valid CS 629 + * value when we do IRET directly. 630 + * 631 + * In case NMI unmasking or performance ever becomes a problem, 632 + * the next best option appears to be MOV-to-CR2 and an 633 + * unconditional jump. That sequence also works on all CPUs, 634 + * but it will fault at CPL3 (i.e. Xen PV and lguest). 635 + * 636 + * CPUID is the conventional way, but it's nasty: it doesn't 637 + * exist on some 486-like CPUs, and it usually exits to a 638 + * hypervisor. 639 + * 640 + * Like all of Linux's memory ordering operations, this is a 641 + * compiler barrier as well. 642 + */ 643 + register void *__sp asm(_ASM_SP); 609 644 610 - #ifdef CONFIG_M486 611 - /* 612 - * Do a CPUID if available, otherwise do a jump. The jump 613 - * can conveniently enough be the jump around CPUID. 614 - */ 615 - asm volatile("cmpl %2,%1\n\t" 616 - "jl 1f\n\t" 617 - "cpuid\n" 618 - "1:" 619 - : "=a" (tmp) 620 - : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1) 621 - : "ebx", "ecx", "edx", "memory"); 645 + #ifdef CONFIG_X86_32 646 + asm volatile ( 647 + "pushfl\n\t" 648 + "pushl %%cs\n\t" 649 + "pushl $1f\n\t" 650 + "iret\n\t" 651 + "1:" 652 + : "+r" (__sp) : : "memory"); 622 653 #else 623 - /* 624 - * CPUID is a barrier to speculative execution. 625 - * Prefetched instructions are automatically 626 - * invalidated when modified. 627 - */ 628 - asm volatile("cpuid" 629 - : "=a" (tmp) 630 - : "0" (1) 631 - : "ebx", "ecx", "edx", "memory"); 654 + unsigned int tmp; 655 + 656 + asm volatile ( 657 + "mov %%ss, %0\n\t" 658 + "pushq %q0\n\t" 659 + "pushq %%rsp\n\t" 660 + "addq $8, (%%rsp)\n\t" 661 + "pushfq\n\t" 662 + "mov %%cs, %0\n\t" 663 + "pushq %q0\n\t" 664 + "pushq $1f\n\t" 665 + "iretq\n\t" 666 + "1:" 667 + : "=&r" (tmp), "+r" (__sp) : : "cc", "memory"); 632 668 #endif 633 669 } 634 670
+1 -1
arch/x86/include/asm/unwind.h
··· 12 12 struct task_struct *task; 13 13 int graph_idx; 14 14 #ifdef CONFIG_FRAME_POINTER 15 - unsigned long *bp; 15 + unsigned long *bp, *orig_sp; 16 16 struct pt_regs *regs; 17 17 #else 18 18 unsigned long *sp;
+21 -5
arch/x86/include/asm/x86_init.h
··· 59 59 60 60 /** 61 61 * struct x86_init_oem - oem platform specific customizing functions 62 - * @arch_setup: platform specific architecure setup 62 + * @arch_setup: platform specific architecture setup 63 63 * @banner: print a platform specific banner 64 64 */ 65 65 struct x86_init_oem { ··· 165 165 }; 166 166 167 167 /** 168 + * enum x86_legacy_i8042_state - i8042 keyboard controller state 169 + * @X86_LEGACY_I8042_PLATFORM_ABSENT: the controller is always absent on 170 + * given platform/subarch. 171 + * @X86_LEGACY_I8042_FIRMWARE_ABSENT: firmware reports that the controller 172 + * is absent. 173 + * @X86_LEGACY_i8042_EXPECTED_PRESENT: the controller is likely to be 174 + * present, the i8042 driver should probe for controller existence. 175 + */ 176 + enum x86_legacy_i8042_state { 177 + X86_LEGACY_I8042_PLATFORM_ABSENT, 178 + X86_LEGACY_I8042_FIRMWARE_ABSENT, 179 + X86_LEGACY_I8042_EXPECTED_PRESENT, 180 + }; 181 + 182 + /** 168 183 * struct x86_legacy_features - legacy x86 features 169 184 * 185 + * @i8042: indicated if we expect the device to have i8042 controller 186 + * present. 170 187 * @rtc: this device has a CMOS real-time clock present 171 188 * @reserve_bios_regions: boot code will search for the EBDA address and the 172 189 * start of the 640k - 1M BIOS region. If false, the platform must ··· 192 175 * documentation for further details. 193 176 */ 194 177 struct x86_legacy_features { 178 + enum x86_legacy_i8042_state i8042; 195 179 int rtc; 196 180 int reserve_bios_regions; 197 181 struct x86_legacy_devices devices; ··· 206 188 * @set_wallclock: set time back to HW clock 207 189 * @is_untracked_pat_range exclude from PAT logic 208 190 * @nmi_init enable NMI on cpus 209 - * @i8042_detect pre-detect if i8042 controller exists 210 191 * @save_sched_clock_state: save state for sched_clock() on suspend 211 192 * @restore_sched_clock_state: restore state for sched_clock() on resume 212 - * @apic_post_init: adjust apic if neeeded 193 + * @apic_post_init: adjust apic if needed 213 194 * @legacy: legacy features 214 195 * @set_legacy_features: override legacy features. Use of this callback 215 196 * is highly discouraged. You should only need 216 197 * this if your hardware platform requires further 217 - * custom fine tuning far beyong what may be 198 + * custom fine tuning far beyond what may be 218 199 * possible in x86_early_init_platform_quirks() by 219 200 * only using the current x86_hardware_subarch 220 201 * semantics. ··· 227 210 bool (*is_untracked_pat_range)(u64 start, u64 end); 228 211 void (*nmi_init)(void); 229 212 unsigned char (*get_nmi_reason)(void); 230 - int (*i8042_detect)(void); 231 213 void (*save_sched_clock_state)(void); 232 214 void (*restore_sched_clock_state)(void); 233 215 void (*apic_post_init)(void);
+7
arch/x86/kernel/acpi/boot.c
··· 930 930 x86_platform.legacy.devices.pnpbios = 0; 931 931 } 932 932 933 + if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && 934 + !(acpi_gbl_FADT.boot_flags & ACPI_FADT_8042) && 935 + x86_platform.legacy.i8042 != X86_LEGACY_I8042_PLATFORM_ABSENT) { 936 + pr_debug("ACPI: i8042 controller is absent\n"); 937 + x86_platform.legacy.i8042 = X86_LEGACY_I8042_FIRMWARE_ABSENT; 938 + } 939 + 933 940 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) { 934 941 pr_debug("ACPI: not registering RTC platform device\n"); 935 942 x86_platform.legacy.rtc = 0;
+10 -5
arch/x86/kernel/alternative.c
··· 337 337 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len); 338 338 } 339 339 340 - static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr) 340 + /* 341 + * "noinline" to cause control flow change and thus invalidate I$ and 342 + * cause refetch after modification. 343 + */ 344 + static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr) 341 345 { 342 346 unsigned long flags; 343 347 ··· 350 346 351 347 local_irq_save(flags); 352 348 add_nops(instr + (a->instrlen - a->padlen), a->padlen); 353 - sync_core(); 354 349 local_irq_restore(flags); 355 350 356 351 DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ", ··· 362 359 * This implies that asymmetric systems where APs have less capabilities than 363 360 * the boot processor are not handled. Tough. Make sure you disable such 364 361 * features by hand. 362 + * 363 + * Marked "noinline" to cause control flow change and thus insn cache 364 + * to refetch changed I$ lines. 365 365 */ 366 - void __init_or_module apply_alternatives(struct alt_instr *start, 367 - struct alt_instr *end) 366 + void __init_or_module noinline apply_alternatives(struct alt_instr *start, 367 + struct alt_instr *end) 368 368 { 369 369 struct alt_instr *a; 370 370 u8 *instr, *replacement; ··· 673 667 unsigned long flags; 674 668 local_irq_save(flags); 675 669 memcpy(addr, opcode, len); 676 - sync_core(); 677 670 local_irq_restore(flags); 678 671 /* Could also do a CLFLUSH here to speed up CPU recovery; but 679 672 that causes hangs on some VIA CPUs. */
+4 -3
arch/x86/kernel/cpu/common.c
··· 667 667 c->x86_capability[CPUID_1_EDX] = edx; 668 668 } 669 669 670 + /* Thermal and Power Management Leaf: level 0x00000006 (eax) */ 671 + if (c->cpuid_level >= 0x00000006) 672 + c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); 673 + 670 674 /* Additional Intel-defined flags: level 0x00000007 */ 671 675 if (c->cpuid_level >= 0x00000007) { 672 676 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); 673 - 674 677 c->x86_capability[CPUID_7_0_EBX] = ebx; 675 - 676 - c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); 677 678 c->x86_capability[CPUID_7_ECX] = ecx; 678 679 } 679 680
+33 -23
arch/x86/kernel/cpu/microcode/amd.c
··· 116 116 117 117 /* 118 118 * This scans the ucode blob for the proper container as we can have multiple 119 - * containers glued together. 119 + * containers glued together. Returns the equivalence ID from the equivalence 120 + * table or 0 if none found. 120 121 */ 121 - static struct container 122 - find_proper_container(u8 *ucode, size_t size, u16 *ret_id) 122 + static u16 123 + find_proper_container(u8 *ucode, size_t size, struct container *ret_cont) 123 124 { 124 125 struct container ret = { NULL, 0 }; 125 126 u32 eax, ebx, ecx, edx; ··· 139 138 if (header[0] != UCODE_MAGIC || 140 139 header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ 141 140 header[2] == 0) /* size */ 142 - return ret; 141 + return eq_id; 143 142 144 143 eax = 0x00000001; 145 144 ecx = 0; ··· 164 163 * ucode update loop below 165 164 */ 166 165 left = ret.size - offset; 167 - *ret_id = eq_id; 168 - return ret; 166 + 167 + *ret_cont = ret; 168 + return eq_id; 169 169 } 170 170 171 171 /* ··· 191 189 ucode = data; 192 190 } 193 191 194 - return ret; 192 + return eq_id; 195 193 } 196 194 197 195 static int __apply_microcode_amd(struct microcode_amd *mc_amd) ··· 216 214 * and on 32-bit during save_microcode_in_initrd_amd() -- we can call 217 215 * load_microcode_amd() to save equivalent cpu table and microcode patches in 218 216 * kernel heap memory. 217 + * 218 + * Returns true if container found (sets @ret_cont), false otherwise. 219 219 */ 220 - static struct container 221 - apply_microcode_early_amd(void *ucode, size_t size, bool save_patch) 220 + static bool apply_microcode_early_amd(void *ucode, size_t size, bool save_patch, 221 + struct container *ret_cont) 222 222 { 223 - struct container ret = { NULL, 0 }; 224 223 u8 (*patch)[PATCH_MAX_SIZE]; 224 + u32 rev, *header, *new_rev; 225 + struct container ret; 225 226 int offset, left; 226 - u32 rev, *header; 227 - u8 *data; 228 227 u16 eq_id = 0; 229 - u32 *new_rev; 228 + u8 *data; 230 229 231 230 #ifdef CONFIG_X86_32 232 231 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); ··· 238 235 #endif 239 236 240 237 if (check_current_patch_level(&rev, true)) 241 - return (struct container){ NULL, 0 }; 238 + return false; 242 239 243 - ret = find_proper_container(ucode, size, &eq_id); 240 + eq_id = find_proper_container(ucode, size, &ret); 244 241 if (!eq_id) 245 - return (struct container){ NULL, 0 }; 242 + return false; 246 243 247 244 this_equiv_id = eq_id; 248 245 header = (u32 *)ret.data; ··· 276 273 data += offset; 277 274 left -= offset; 278 275 } 279 - return ret; 276 + 277 + if (ret_cont) 278 + *ret_cont = ret; 279 + 280 + return true; 280 281 } 281 282 282 283 static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) ··· 301 294 void __init load_ucode_amd_bsp(unsigned int family) 302 295 { 303 296 struct ucode_cpu_info *uci; 297 + u32 eax, ebx, ecx, edx; 304 298 struct cpio_data cp; 305 299 const char *path; 306 300 bool use_pa; ··· 323 315 return; 324 316 325 317 /* Get BSP's CPUID.EAX(1), needed in load_microcode_amd() */ 326 - uci->cpu_sig.sig = cpuid_eax(1); 318 + eax = 1; 319 + ecx = 0; 320 + native_cpuid(&eax, &ebx, &ecx, &edx); 321 + uci->cpu_sig.sig = eax; 327 322 328 - apply_microcode_early_amd(cp.data, cp.size, true); 323 + apply_microcode_early_amd(cp.data, cp.size, true, NULL); 329 324 } 330 325 331 326 #ifdef CONFIG_X86_32 ··· 360 349 * This would set amd_ucode_patch above so that the following APs can 361 350 * use it directly instead of going down this path again. 362 351 */ 363 - apply_microcode_early_amd(cp.data, cp.size, true); 352 + apply_microcode_early_amd(cp.data, cp.size, true, NULL); 364 353 } 365 354 #else 366 355 void load_ucode_amd_ap(unsigned int family) ··· 398 387 } 399 388 } 400 389 401 - cont = apply_microcode_early_amd(cp.data, cp.size, false); 402 - if (!(cont.data && cont.size)) { 390 + if (!apply_microcode_early_amd(cp.data, cp.size, false, &cont)) { 403 391 cont.size = -1; 404 392 return; 405 393 } ··· 453 443 return -EINVAL; 454 444 } 455 445 456 - cont = find_proper_container(cp.data, cp.size, &eq_id); 446 + eq_id = find_proper_container(cp.data, cp.size, &cont); 457 447 if (!eq_id) { 458 448 cont.size = -1; 459 449 return -EINVAL;
+24 -16
arch/x86/kernel/cpu/microcode/core.c
··· 44 44 #define DRIVER_VERSION "2.2" 45 45 46 46 static struct microcode_ops *microcode_ops; 47 - static bool dis_ucode_ldr; 47 + static bool dis_ucode_ldr = true; 48 48 49 49 LIST_HEAD(microcode_cache); 50 50 ··· 76 76 static bool __init check_loader_disabled_bsp(void) 77 77 { 78 78 static const char *__dis_opt_str = "dis_ucode_ldr"; 79 + u32 a, b, c, d; 79 80 80 81 #ifdef CONFIG_X86_32 81 82 const char *cmdline = (const char *)__pa_nodebug(boot_command_line); ··· 89 88 bool *res = &dis_ucode_ldr; 90 89 #endif 91 90 92 - if (cmdline_find_option_bool(cmdline, option)) 93 - *res = true; 91 + if (!have_cpuid_p()) 92 + return *res; 93 + 94 + a = 1; 95 + c = 0; 96 + native_cpuid(&a, &b, &c, &d); 97 + 98 + /* 99 + * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not 100 + * completely accurate as xen pv guests don't see that CPUID bit set but 101 + * that's good enough as they don't land on the BSP path anyway. 102 + */ 103 + if (c & BIT(31)) 104 + return *res; 105 + 106 + if (cmdline_find_option_bool(cmdline, option) <= 0) 107 + *res = false; 94 108 95 109 return *res; 96 110 } ··· 135 119 unsigned int family; 136 120 137 121 if (check_loader_disabled_bsp()) 138 - return; 139 - 140 - if (!have_cpuid_p()) 141 122 return; 142 123 143 124 vendor = x86_cpuid_vendor(); ··· 168 155 int vendor, family; 169 156 170 157 if (check_loader_disabled_ap()) 171 - return; 172 - 173 - if (!have_cpuid_p()) 174 158 return; 175 159 176 160 vendor = x86_cpuid_vendor(); ··· 243 233 # endif 244 234 245 235 /* 246 - * Did we relocate the ramdisk? 247 - * 248 - * So we possibly relocate the ramdisk *after* applying microcode on the 249 - * BSP so we rely on use_pa (use physical addresses) - even if it is not 250 - * absolutely correct - to determine whether we've done the ramdisk 251 - * relocation already. 236 + * Fixup the start address: after reserve_initrd() runs, initrd_start 237 + * has the virtual address of the beginning of the initrd. It also 238 + * possibly relocates the ramdisk. In either case, initrd_start contains 239 + * the updated address so use that instead. 252 240 */ 253 - if (!use_pa && relocated_ramdisk) 241 + if (!use_pa && initrd_start) 254 242 start = initrd_start; 255 243 256 244 return find_cpio_data(path, (void *)start, size, NULL);
+23 -3
arch/x86/kernel/cpu/microcode/intel.c
··· 368 368 return patch; 369 369 } 370 370 371 + static void cpuid_1(void) 372 + { 373 + /* 374 + * According to the Intel SDM, Volume 3, 9.11.7: 375 + * 376 + * CPUID returns a value in a model specific register in 377 + * addition to its usual register return values. The 378 + * semantics of CPUID cause it to deposit an update ID value 379 + * in the 64-bit model-specific register at address 08BH 380 + * (IA32_BIOS_SIGN_ID). If no update is present in the 381 + * processor, the value in the MSR remains unmodified. 382 + * 383 + * Use native_cpuid -- this code runs very early and we don't 384 + * want to mess with paravirt. 385 + */ 386 + unsigned int eax = 1, ebx, ecx = 0, edx; 387 + 388 + native_cpuid(&eax, &ebx, &ecx, &edx); 389 + } 390 + 371 391 static int collect_cpu_info_early(struct ucode_cpu_info *uci) 372 392 { 373 393 unsigned int val[2]; ··· 413 393 native_wrmsrl(MSR_IA32_UCODE_REV, 0); 414 394 415 395 /* As documented in the SDM: Do a CPUID 1 here */ 416 - sync_core(); 396 + cpuid_1(); 417 397 418 398 /* get the current revision from MSR 0x8B */ 419 399 native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); ··· 613 593 native_wrmsrl(MSR_IA32_UCODE_REV, 0); 614 594 615 595 /* As documented in the SDM: Do a CPUID 1 here */ 616 - sync_core(); 596 + cpuid_1(); 617 597 618 598 /* get the current revision from MSR 0x8B */ 619 599 native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); ··· 825 805 wrmsrl(MSR_IA32_UCODE_REV, 0); 826 806 827 807 /* As documented in the SDM: Do a CPUID 1 here */ 828 - sync_core(); 808 + cpuid_1(); 829 809 830 810 /* get the current revision from MSR 0x8B */ 831 811 rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
+24
arch/x86/kernel/cpu/mshyperv.c
··· 30 30 #include <asm/apic.h> 31 31 #include <asm/timer.h> 32 32 #include <asm/reboot.h> 33 + #include <asm/nmi.h> 33 34 34 35 struct ms_hyperv_info ms_hyperv; 35 36 EXPORT_SYMBOL_GPL(ms_hyperv); ··· 158 157 return 0; 159 158 } 160 159 160 + #ifdef CONFIG_X86_LOCAL_APIC 161 + /* 162 + * Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes 163 + * it dificult to process CHANNELMSG_UNLOAD in case of crash. Handle 164 + * unknown NMI on the first CPU which gets it. 165 + */ 166 + static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs) 167 + { 168 + static atomic_t nmi_cpu = ATOMIC_INIT(-1); 169 + 170 + if (!unknown_nmi_panic) 171 + return NMI_DONE; 172 + 173 + if (atomic_cmpxchg(&nmi_cpu, -1, raw_smp_processor_id()) != -1) 174 + return NMI_HANDLED; 175 + 176 + return NMI_DONE; 177 + } 178 + #endif 179 + 161 180 static void __init ms_hyperv_init_platform(void) 162 181 { 163 182 /* ··· 203 182 pr_info("HyperV: LAPIC Timer Frequency: %#x\n", 204 183 lapic_timer_frequency); 205 184 } 185 + 186 + register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST, 187 + "hv_nmi_unknown"); 206 188 #endif 207 189 208 190 if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
+1 -1
arch/x86/kernel/paravirt_patch_32.c
··· 68 68 #endif 69 69 70 70 default: 71 - patch_default: 71 + patch_default: __maybe_unused 72 72 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); 73 73 break; 74 74
+1 -1
arch/x86/kernel/paravirt_patch_64.c
··· 80 80 #endif 81 81 82 82 default: 83 - patch_default: 83 + patch_default: __maybe_unused 84 84 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); 85 85 break; 86 86
+5
arch/x86/kernel/platform-quirks.c
··· 6 6 7 7 void __init x86_early_init_platform_quirks(void) 8 8 { 9 + x86_platform.legacy.i8042 = X86_LEGACY_I8042_EXPECTED_PRESENT; 9 10 x86_platform.legacy.rtc = 1; 10 11 x86_platform.legacy.reserve_bios_regions = 0; 11 12 x86_platform.legacy.devices.pnpbios = 1; ··· 17 16 break; 18 17 case X86_SUBARCH_XEN: 19 18 case X86_SUBARCH_LGUEST: 19 + x86_platform.legacy.devices.pnpbios = 0; 20 + x86_platform.legacy.rtc = 0; 21 + break; 20 22 case X86_SUBARCH_INTEL_MID: 21 23 case X86_SUBARCH_CE4100: 22 24 x86_platform.legacy.devices.pnpbios = 0; 23 25 x86_platform.legacy.rtc = 0; 26 + x86_platform.legacy.i8042 = X86_LEGACY_I8042_PLATFORM_ABSENT; 24 27 break; 25 28 } 26 29
+46 -10
arch/x86/kernel/unwind_frame.c
··· 6 6 7 7 #define FRAME_HEADER_SIZE (sizeof(long) * 2) 8 8 9 + static void unwind_dump(struct unwind_state *state, unsigned long *sp) 10 + { 11 + static bool dumped_before = false; 12 + bool prev_zero, zero = false; 13 + unsigned long word; 14 + 15 + if (dumped_before) 16 + return; 17 + 18 + dumped_before = true; 19 + 20 + printk_deferred("unwind stack type:%d next_sp:%p mask:%lx graph_idx:%d\n", 21 + state->stack_info.type, state->stack_info.next_sp, 22 + state->stack_mask, state->graph_idx); 23 + 24 + for (sp = state->orig_sp; sp < state->stack_info.end; sp++) { 25 + word = READ_ONCE_NOCHECK(*sp); 26 + 27 + prev_zero = zero; 28 + zero = word == 0; 29 + 30 + if (zero) { 31 + if (!prev_zero) 32 + printk_deferred("%p: %016x ...\n", sp, 0); 33 + continue; 34 + } 35 + 36 + printk_deferred("%p: %016lx (%pB)\n", sp, word, (void *)word); 37 + } 38 + } 39 + 9 40 unsigned long unwind_get_return_address(struct unwind_state *state) 10 41 { 11 42 unsigned long addr; ··· 51 20 addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, *addr_p, 52 21 addr_p); 53 22 54 - if (!__kernel_text_address(addr)) { 55 - printk_deferred_once(KERN_WARNING 56 - "WARNING: unrecognized kernel stack return address %p at %p in %s:%d\n", 57 - (void *)addr, addr_p, state->task->comm, 58 - state->task->pid); 59 - return 0; 60 - } 61 - 62 - return addr; 23 + return __kernel_text_address(addr) ? addr : 0; 63 24 } 64 25 EXPORT_SYMBOL_GPL(unwind_get_return_address); 65 26 ··· 69 46 unsigned long bp = (unsigned long)state->bp; 70 47 unsigned long regs = (unsigned long)task_pt_regs(state->task); 71 48 72 - return bp == regs - FRAME_HEADER_SIZE; 49 + /* 50 + * We have to check for the last task frame at two different locations 51 + * because gcc can occasionally decide to realign the stack pointer and 52 + * change the offset of the stack frame by a word in the prologue of a 53 + * function called by head/entry code. 54 + */ 55 + return bp == regs - FRAME_HEADER_SIZE || 56 + bp == regs - FRAME_HEADER_SIZE - sizeof(long); 73 57 } 74 58 75 59 /* ··· 97 67 size_t len) 98 68 { 99 69 struct stack_info *info = &state->stack_info; 70 + enum stack_type orig_type = info->type; 100 71 101 72 /* 102 73 * If addr isn't on the current stack, switch to the next one. ··· 110 79 if (get_stack_info(info->next_sp, state->task, info, 111 80 &state->stack_mask)) 112 81 return false; 82 + 83 + if (!state->orig_sp || info->type != orig_type) 84 + state->orig_sp = addr; 113 85 114 86 return true; 115 87 } ··· 212 178 "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n", 213 179 state->regs, state->task->comm, 214 180 state->task->pid, next_frame); 181 + unwind_dump(state, (unsigned long *)state->regs); 215 182 } else { 216 183 printk_deferred_once(KERN_WARNING 217 184 "WARNING: kernel stack frame pointer at %p in %s:%d has bad value %p\n", 218 185 state->bp, state->task->comm, 219 186 state->task->pid, next_frame); 187 + unwind_dump(state, state->bp); 220 188 } 221 189 the_end: 222 190 state->stack_info.type = STACK_TYPE_UNKNOWN;
-2
arch/x86/kernel/x86_init.c
··· 89 89 }; 90 90 91 91 static void default_nmi_init(void) { }; 92 - static int default_i8042_detect(void) { return 1; }; 93 92 94 93 struct x86_platform_ops x86_platform __ro_after_init = { 95 94 .calibrate_cpu = native_calibrate_cpu, ··· 99 100 .is_untracked_pat_range = is_ISA_range, 100 101 .nmi_init = default_nmi_init, 101 102 .get_nmi_reason = default_get_nmi_reason, 102 - .i8042_detect = default_i8042_detect, 103 103 .save_sched_clock_state = tsc_save_sched_clock_state, 104 104 .restore_sched_clock_state = tsc_restore_sched_clock_state, 105 105 };
-6
arch/x86/platform/ce4100/ce4100.c
··· 23 23 #include <asm/io_apic.h> 24 24 #include <asm/emergency-restart.h> 25 25 26 - static int ce4100_i8042_detect(void) 27 - { 28 - return 0; 29 - } 30 - 31 26 /* 32 27 * The CE4100 platform has an internal 8051 Microcontroller which is 33 28 * responsible for signaling to the external Power Management Unit the ··· 140 145 void __init x86_ce4100_early_setup(void) 141 146 { 142 147 x86_init.oem.arch_setup = sdv_arch_setup; 143 - x86_platform.i8042_detect = ce4100_i8042_detect; 144 148 x86_init.resources.probe_roms = x86_init_noop; 145 149 x86_init.mpparse.get_smp_config = x86_init_uint_noop; 146 150 x86_init.mpparse.find_smp_config = x86_init_noop;
+1 -1
arch/x86/platform/intel-mid/device_libs/Makefile
··· 19 19 # I2C Devices 20 20 obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o 21 21 obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o 22 - obj-$(subst m,y,$(CONFIG_INPUT_MPU3050)) += platform_mpu3050.o 22 + obj-$(subst m,y,$(CONFIG_MPU3050_I2C)) += platform_mpu3050.o 23 23 obj-$(subst m,y,$(CONFIG_INPUT_BMA150)) += platform_bma023.o 24 24 obj-$(subst m,y,$(CONFIG_DRM_MEDFIELD)) += platform_tc35876x.o 25 25 # I2C GPIO Expanders
-7
arch/x86/platform/intel-mid/intel-mid.c
··· 161 161 regulator_has_full_constraints(); 162 162 } 163 163 164 - /* MID systems don't have i8042 controller */ 165 - static int intel_mid_i8042_detect(void) 166 - { 167 - return 0; 168 - } 169 - 170 164 /* 171 165 * Moorestown does not have external NMI source nor port 0x61 to report 172 166 * NMI status. The possible NMI sources are from pmu as a result of NMI ··· 191 197 x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock; 192 198 193 199 x86_platform.calibrate_tsc = intel_mid_calibrate_tsc; 194 - x86_platform.i8042_detect = intel_mid_i8042_detect; 195 200 x86_init.timers.wallclock_init = intel_mid_rtc_init; 196 201 x86_platform.get_nmi_reason = intel_mid_get_nmi_reason; 197 202
+2 -1
arch/x86/platform/intel-quark/imr_selftest.c
··· 25 25 * @fmt: format string. 26 26 * ... variadic argument list. 27 27 */ 28 - static void __init imr_self_test_result(int res, const char *fmt, ...) 28 + static __printf(2, 3) 29 + void __init imr_self_test_result(int res, const char *fmt, ...) 29 30 { 30 31 va_list vlist; 31 32
+2 -1
arch/x86/tools/relocs.c
··· 992 992 die("Segment relocations found but --realmode not specified\n"); 993 993 994 994 /* Order the relocations for more efficient processing */ 995 - sort_relocs(&relocs16); 996 995 sort_relocs(&relocs32); 997 996 #if ELF_BITS == 64 998 997 sort_relocs(&relocs32neg); 999 998 sort_relocs(&relocs64); 999 + #else 1000 + sort_relocs(&relocs16); 1000 1001 #endif 1001 1002 1002 1003 /* Print the relocations */
+7 -3
drivers/input/serio/i8042-x86ia64io.h
··· 983 983 #if defined(__ia64__) 984 984 return -ENODEV; 985 985 #else 986 - pr_info("PNP: No PS/2 controller found. Probing ports directly.\n"); 986 + pr_info("PNP: No PS/2 controller found.\n"); 987 + if (x86_platform.legacy.i8042 != 988 + X86_LEGACY_I8042_EXPECTED_PRESENT) 989 + return -ENODEV; 990 + pr_info("Probing ports directly.\n"); 987 991 return 0; 988 992 #endif 989 993 } ··· 1074 1070 1075 1071 #ifdef CONFIG_X86 1076 1072 u8 a20_on = 0xdf; 1077 - /* Just return if pre-detection shows no i8042 controller exist */ 1078 - if (!x86_platform.i8042_detect()) 1073 + /* Just return if platform does not have i8042 controller */ 1074 + if (x86_platform.legacy.i8042 == X86_LEGACY_I8042_PLATFORM_ABSENT) 1079 1075 return -ENODEV; 1080 1076 #endif 1081 1077