Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86, mce: Replace MCE_SELF_VECTOR by irq_work

The MCE handler uses a special vector for self IPI to invoke
post-emergency processing in an interrupt context, e.g. call an
NMI-unsafe function, wakeup loggers, schedule time-consuming work for
recovery, etc.

This mechanism is now generalized by the following commit:

> e360adbe29241a0194e10e20595360dd7b98a2b3
> Author: Peter Zijlstra <a.p.zijlstra@chello.nl>
> Date: Thu Oct 14 14:01:34 2010 +0800
>
> irq_work: Add generic hardirq context callbacks
>
> Provide a mechanism that allows running code in IRQ context. It is
> most useful for NMI code that needs to interact with the rest of the
> system -- like wakeup a task to drain buffers.
:

So change to use provided generic mechanism.

Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Acked-by: Tony Luck <tony.luck@intel.com>
Link: http://lkml.kernel.org/r/4DEED6B2.6080005@jp.fujitsu.com
Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>

authored by

Hidetoshi Seto and committed by
Borislav Petkov
b77e70bf 7639bfc7

+6 -59
-4
arch/x86/include/asm/entry_arch.h
··· 53 53 BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR) 54 54 #endif 55 55 56 - #ifdef CONFIG_X86_MCE 57 - BUILD_INTERRUPT(mce_self_interrupt,MCE_SELF_VECTOR) 58 - #endif 59 - 60 56 #endif
-1
arch/x86/include/asm/hw_irq.h
··· 34 34 extern void spurious_interrupt(void); 35 35 extern void thermal_interrupt(void); 36 36 extern void reschedule_interrupt(void); 37 - extern void mce_self_interrupt(void); 38 37 39 38 extern void invalidate_interrupt(void); 40 39 extern void invalidate_interrupt0(void);
-5
arch/x86/include/asm/irq_vectors.h
··· 109 109 110 110 #define UV_BAU_MESSAGE 0xf5 111 111 112 - /* 113 - * Self IPI vector for machine checks 114 - */ 115 - #define MCE_SELF_VECTOR 0xf4 116 - 117 112 /* Xen vector callback to receive events in a HVM domain */ 118 113 #define XEN_HVM_EVTCHN_CALLBACK 0xf3 119 114
+6 -41
arch/x86/kernel/cpu/mcheck/mce.c
··· 10 10 #include <linux/thread_info.h> 11 11 #include <linux/capability.h> 12 12 #include <linux/miscdevice.h> 13 - #include <linux/interrupt.h> 14 13 #include <linux/ratelimit.h> 15 14 #include <linux/kallsyms.h> 16 15 #include <linux/rcupdate.h> ··· 37 38 #include <linux/mm.h> 38 39 #include <linux/debugfs.h> 39 40 #include <linux/edac_mce.h> 41 + #include <linux/irq_work.h> 40 42 41 43 #include <asm/processor.h> 42 - #include <asm/hw_irq.h> 43 - #include <asm/apic.h> 44 - #include <asm/idle.h> 45 - #include <asm/ipi.h> 46 44 #include <asm/mce.h> 47 45 #include <asm/msr.h> 48 46 ··· 457 461 m->ip = mce_rdmsrl(rip_msr); 458 462 } 459 463 460 - #ifdef CONFIG_X86_LOCAL_APIC 461 - /* 462 - * Called after interrupts have been reenabled again 463 - * when a MCE happened during an interrupts off region 464 - * in the kernel. 465 - */ 466 - asmlinkage void smp_mce_self_interrupt(struct pt_regs *regs) 464 + DEFINE_PER_CPU(struct irq_work, mce_irq_work); 465 + 466 + static void mce_irq_work_cb(struct irq_work *entry) 467 467 { 468 - ack_APIC_irq(); 469 - exit_idle(); 470 - irq_enter(); 471 468 mce_notify_irq(); 472 469 mce_schedule_work(); 473 - irq_exit(); 474 470 } 475 - #endif 476 471 477 472 static void mce_report_event(struct pt_regs *regs) 478 473 { ··· 479 492 return; 480 493 } 481 494 482 - #ifdef CONFIG_X86_LOCAL_APIC 483 - /* 484 - * Without APIC do not notify. The event will be picked 485 - * up eventually. 486 - */ 487 - if (!cpu_has_apic) 488 - return; 489 - 490 - /* 491 - * When interrupts are disabled we cannot use 492 - * kernel services safely. Trigger an self interrupt 493 - * through the APIC to instead do the notification 494 - * after interrupts are reenabled again. 495 - */ 496 - apic->send_IPI_self(MCE_SELF_VECTOR); 497 - 498 - /* 499 - * Wait for idle afterwards again so that we don't leave the 500 - * APIC in a non idle state because the normal APIC writes 501 - * cannot exclude us. 502 - */ 503 - apic_wait_icr_idle(); 504 - #endif 495 + irq_work_queue(&__get_cpu_var(mce_irq_work)); 505 496 } 506 497 507 498 DEFINE_PER_CPU(unsigned, mce_poll_count); ··· 1409 1444 __mcheck_cpu_init_vendor(c); 1410 1445 __mcheck_cpu_init_timer(); 1411 1446 INIT_WORK(&__get_cpu_var(mce_work), mce_process_work); 1412 - 1447 + init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb); 1413 1448 } 1414 1449 1415 1450 /*
-5
arch/x86/kernel/entry_64.S
··· 991 991 apicinterrupt THERMAL_APIC_VECTOR \ 992 992 thermal_interrupt smp_thermal_interrupt 993 993 994 - #ifdef CONFIG_X86_MCE 995 - apicinterrupt MCE_SELF_VECTOR \ 996 - mce_self_interrupt smp_mce_self_interrupt 997 - #endif 998 - 999 994 #ifdef CONFIG_SMP 1000 995 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \ 1001 996 call_function_single_interrupt smp_call_function_single_interrupt
-3
arch/x86/kernel/irqinit.c
··· 272 272 #ifdef CONFIG_X86_MCE_THRESHOLD 273 273 alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); 274 274 #endif 275 - #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_LOCAL_APIC) 276 - alloc_intr_gate(MCE_SELF_VECTOR, mce_self_interrupt); 277 - #endif 278 275 279 276 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) 280 277 /* self generated IPI for local APIC timer */