at v4.3 2.7 kB view raw
1/* 2 * linux/include/linux/nmi.h 3 */ 4#ifndef LINUX_NMI_H 5#define LINUX_NMI_H 6 7#include <linux/sched.h> 8#include <asm/irq.h> 9 10/** 11 * touch_nmi_watchdog - restart NMI watchdog timeout. 12 * 13 * If the architecture supports the NMI watchdog, touch_nmi_watchdog() 14 * may be used to reset the timeout - for code which intentionally 15 * disables interrupts for a long time. This call is stateless. 16 */ 17#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) 18#include <asm/nmi.h> 19extern void touch_nmi_watchdog(void); 20#else 21static inline void touch_nmi_watchdog(void) 22{ 23 touch_softlockup_watchdog(); 24} 25#endif 26 27#if defined(CONFIG_HARDLOCKUP_DETECTOR) 28extern void hardlockup_detector_disable(void); 29#else 30static inline void hardlockup_detector_disable(void) {} 31#endif 32 33/* 34 * Create trigger_all_cpu_backtrace() out of the arch-provided 35 * base function. Return whether such support was available, 36 * to allow calling code to fall back to some other mechanism: 37 */ 38#ifdef arch_trigger_all_cpu_backtrace 39static inline bool trigger_all_cpu_backtrace(void) 40{ 41 arch_trigger_all_cpu_backtrace(true); 42 43 return true; 44} 45static inline bool trigger_allbutself_cpu_backtrace(void) 46{ 47 arch_trigger_all_cpu_backtrace(false); 48 return true; 49} 50 51/* generic implementation */ 52void nmi_trigger_all_cpu_backtrace(bool include_self, 53 void (*raise)(cpumask_t *mask)); 54bool nmi_cpu_backtrace(struct pt_regs *regs); 55 56#else 57static inline bool trigger_all_cpu_backtrace(void) 58{ 59 return false; 60} 61static inline bool trigger_allbutself_cpu_backtrace(void) 62{ 63 return false; 64} 65#endif 66 67#ifdef CONFIG_LOCKUP_DETECTOR 68int hw_nmi_is_cpu_stuck(struct pt_regs *); 69u64 hw_nmi_get_sample_period(int watchdog_thresh); 70extern int nmi_watchdog_enabled; 71extern int soft_watchdog_enabled; 72extern int watchdog_user_enabled; 73extern int watchdog_thresh; 74extern unsigned long *watchdog_cpumask_bits; 75extern int sysctl_softlockup_all_cpu_backtrace; 76struct ctl_table; 77extern int proc_watchdog(struct ctl_table *, int , 78 void __user *, size_t *, loff_t *); 79extern int proc_nmi_watchdog(struct ctl_table *, int , 80 void __user *, size_t *, loff_t *); 81extern int proc_soft_watchdog(struct ctl_table *, int , 82 void __user *, size_t *, loff_t *); 83extern int proc_watchdog_thresh(struct ctl_table *, int , 84 void __user *, size_t *, loff_t *); 85extern int proc_watchdog_cpumask(struct ctl_table *, int, 86 void __user *, size_t *, loff_t *); 87extern int lockup_detector_suspend(void); 88extern void lockup_detector_resume(void); 89#else 90static inline int lockup_detector_suspend(void) 91{ 92 return 0; 93} 94 95static inline void lockup_detector_resume(void) 96{ 97} 98#endif 99 100#ifdef CONFIG_HAVE_ACPI_APEI_NMI 101#include <asm/nmi.h> 102#endif 103 104#endif