Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Peter Anvin:
"A couple of crash fixes, plus a fix that on 32 bits would cause a
missing -ENOSYS for nonexistent system calls"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86, cpu: Fix cache topology for early P4-SMT
x86_32, entry: Store badsys error code in %eax
x86, MCE: Robustify mcheck_init_device

+34 -19
+11 -11
arch/x86/kernel/cpu/intel.c
··· 370 */ 371 detect_extended_topology(c); 372 373 l2 = init_intel_cacheinfo(c); 374 if (c->cpuid_level > 9) { 375 unsigned eax = cpuid_eax(10); ··· 448 if (c->x86 == 6) 449 set_cpu_cap(c, X86_FEATURE_P3); 450 #endif 451 - 452 - if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { 453 - /* 454 - * let's use the legacy cpuid vector 0x1 and 0x4 for topology 455 - * detection. 456 - */ 457 - c->x86_max_cores = intel_num_cpu_cores(c); 458 - #ifdef CONFIG_X86_32 459 - detect_ht(c); 460 - #endif 461 - } 462 463 /* Work around errata */ 464 srat_detect_node(c);
··· 370 */ 371 detect_extended_topology(c); 372 373 + if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { 374 + /* 375 + * let's use the legacy cpuid vector 0x1 and 0x4 for topology 376 + * detection. 377 + */ 378 + c->x86_max_cores = intel_num_cpu_cores(c); 379 + #ifdef CONFIG_X86_32 380 + detect_ht(c); 381 + #endif 382 + } 383 + 384 l2 = init_intel_cacheinfo(c); 385 if (c->cpuid_level > 9) { 386 unsigned eax = cpuid_eax(10); ··· 437 if (c->x86 == 6) 438 set_cpu_cap(c, X86_FEATURE_P3); 439 #endif 440 441 /* Work around errata */ 442 srat_detect_node(c);
+12
arch/x86/kernel/cpu/intel_cacheinfo.c
··· 730 #endif 731 } 732 733 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); 734 735 return l2;
··· 730 #endif 731 } 732 733 + #ifdef CONFIG_X86_HT 734 + /* 735 + * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in 736 + * turns means that the only possibility is SMT (as indicated in 737 + * cpuid1). Since cpuid2 doesn't specify shared caches, and we know 738 + * that SMT shares all caches, we can unconditionally set cpu_llc_id to 739 + * c->phys_proc_id. 740 + */ 741 + if (per_cpu(cpu_llc_id, cpu) == BAD_APICID) 742 + per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; 743 + #endif 744 + 745 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); 746 747 return l2;
+6 -4
arch/x86/kernel/cpu/mcheck/mce.c
··· 2451 for_each_online_cpu(i) { 2452 err = mce_device_create(i); 2453 if (err) { 2454 cpu_notifier_register_done(); 2455 goto err_device_create; 2456 } ··· 2476 2477 err_register: 2478 unregister_syscore_ops(&mce_syscore_ops); 2479 - 2480 - cpu_notifier_register_begin(); 2481 - __unregister_hotcpu_notifier(&mce_cpu_notifier); 2482 - cpu_notifier_register_done(); 2483 2484 err_device_create: 2485 /*
··· 2451 for_each_online_cpu(i) { 2452 err = mce_device_create(i); 2453 if (err) { 2454 + /* 2455 + * Register notifier anyway (and do not unreg it) so 2456 + * that we don't leave undeleted timers, see notifier 2457 + * callback above. 2458 + */ 2459 + __register_hotcpu_notifier(&mce_cpu_notifier); 2460 cpu_notifier_register_done(); 2461 goto err_device_create; 2462 } ··· 2470 2471 err_register: 2472 unregister_syscore_ops(&mce_syscore_ops); 2473 2474 err_device_create: 2475 /*
+5 -4
arch/x86/kernel/entry_32.S
··· 425 cmpl $(NR_syscalls), %eax 426 jae sysenter_badsys 427 call *sys_call_table(,%eax,4) 428 - movl %eax,PT_EAX(%esp) 429 sysenter_after_call: 430 LOCKDEP_SYS_EXIT 431 DISABLE_INTERRUPTS(CLBR_ANY) 432 TRACE_IRQS_OFF ··· 502 jae syscall_badsys 503 syscall_call: 504 call *sys_call_table(,%eax,4) 505 movl %eax,PT_EAX(%esp) # store the return value 506 syscall_exit: 507 LOCKDEP_SYS_EXIT ··· 676 END(syscall_fault) 677 678 syscall_badsys: 679 - movl $-ENOSYS,PT_EAX(%esp) 680 - jmp syscall_exit 681 END(syscall_badsys) 682 683 sysenter_badsys: 684 - movl $-ENOSYS,PT_EAX(%esp) 685 jmp sysenter_after_call 686 END(syscall_badsys) 687 CFI_ENDPROC
··· 425 cmpl $(NR_syscalls), %eax 426 jae sysenter_badsys 427 call *sys_call_table(,%eax,4) 428 sysenter_after_call: 429 + movl %eax,PT_EAX(%esp) 430 LOCKDEP_SYS_EXIT 431 DISABLE_INTERRUPTS(CLBR_ANY) 432 TRACE_IRQS_OFF ··· 502 jae syscall_badsys 503 syscall_call: 504 call *sys_call_table(,%eax,4) 505 + syscall_after_call: 506 movl %eax,PT_EAX(%esp) # store the return value 507 syscall_exit: 508 LOCKDEP_SYS_EXIT ··· 675 END(syscall_fault) 676 677 syscall_badsys: 678 + movl $-ENOSYS,%eax 679 + jmp syscall_after_call 680 END(syscall_badsys) 681 682 sysenter_badsys: 683 + movl $-ENOSYS,%eax 684 jmp sysenter_after_call 685 END(syscall_badsys) 686 CFI_ENDPROC