+11
-11
arch/x86/kernel/cpu/intel.c
+11
-11
arch/x86/kernel/cpu/intel.c
···
370
370
*/
371
371
detect_extended_topology(c);
372
372
373
+
if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
374
+
/*
375
+
* let's use the legacy cpuid vector 0x1 and 0x4 for topology
376
+
* detection.
377
+
*/
378
+
c->x86_max_cores = intel_num_cpu_cores(c);
379
+
#ifdef CONFIG_X86_32
380
+
detect_ht(c);
381
+
#endif
382
+
}
383
+
373
384
l2 = init_intel_cacheinfo(c);
374
385
if (c->cpuid_level > 9) {
375
386
unsigned eax = cpuid_eax(10);
···
448
437
if (c->x86 == 6)
449
438
set_cpu_cap(c, X86_FEATURE_P3);
450
439
#endif
451
-
452
-
if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
453
-
/*
454
-
* let's use the legacy cpuid vector 0x1 and 0x4 for topology
455
-
* detection.
456
-
*/
457
-
c->x86_max_cores = intel_num_cpu_cores(c);
458
-
#ifdef CONFIG_X86_32
459
-
detect_ht(c);
460
-
#endif
461
-
}
462
440
463
441
/* Work around errata */
464
442
srat_detect_node(c);
+12
arch/x86/kernel/cpu/intel_cacheinfo.c
+12
arch/x86/kernel/cpu/intel_cacheinfo.c
···
730
730
#endif
731
731
}
732
732
733
+
#ifdef CONFIG_X86_HT
734
+
/*
735
+
* If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
736
+
* turns means that the only possibility is SMT (as indicated in
737
+
* cpuid1). Since cpuid2 doesn't specify shared caches, and we know
738
+
* that SMT shares all caches, we can unconditionally set cpu_llc_id to
739
+
* c->phys_proc_id.
740
+
*/
741
+
if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
742
+
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
743
+
#endif
744
+
733
745
c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
734
746
735
747
return l2;
+6
-4
arch/x86/kernel/cpu/mcheck/mce.c
+6
-4
arch/x86/kernel/cpu/mcheck/mce.c
···
2451
2451
for_each_online_cpu(i) {
2452
2452
err = mce_device_create(i);
2453
2453
if (err) {
2454
+
/*
2455
+
* Register notifier anyway (and do not unreg it) so
2456
+
* that we don't leave undeleted timers, see notifier
2457
+
* callback above.
2458
+
*/
2459
+
__register_hotcpu_notifier(&mce_cpu_notifier);
2454
2460
cpu_notifier_register_done();
2455
2461
goto err_device_create;
2456
2462
}
···
2476
2470
2477
2471
err_register:
2478
2472
unregister_syscore_ops(&mce_syscore_ops);
2479
-
2480
-
cpu_notifier_register_begin();
2481
-
__unregister_hotcpu_notifier(&mce_cpu_notifier);
2482
-
cpu_notifier_register_done();
2483
2473
2484
2474
err_device_create:
2485
2475
/*
+5
-4
arch/x86/kernel/entry_32.S
+5
-4
arch/x86/kernel/entry_32.S
···
425
425
cmpl $(NR_syscalls), %eax
426
426
jae sysenter_badsys
427
427
call *sys_call_table(,%eax,4)
428
-
movl %eax,PT_EAX(%esp)
429
428
sysenter_after_call:
429
+
movl %eax,PT_EAX(%esp)
430
430
LOCKDEP_SYS_EXIT
431
431
DISABLE_INTERRUPTS(CLBR_ANY)
432
432
TRACE_IRQS_OFF
···
502
502
jae syscall_badsys
503
503
syscall_call:
504
504
call *sys_call_table(,%eax,4)
505
+
syscall_after_call:
505
506
movl %eax,PT_EAX(%esp) # store the return value
506
507
syscall_exit:
507
508
LOCKDEP_SYS_EXIT
···
676
675
END(syscall_fault)
677
676
678
677
syscall_badsys:
679
-
movl $-ENOSYS,PT_EAX(%esp)
680
-
jmp syscall_exit
678
+
movl $-ENOSYS,%eax
679
+
jmp syscall_after_call
681
680
END(syscall_badsys)
682
681
683
682
sysenter_badsys:
684
-
movl $-ENOSYS,PT_EAX(%esp)
683
+
movl $-ENOSYS,%eax
685
684
jmp sysenter_after_call
686
685
END(syscall_badsys)
687
686
CFI_ENDPROC