at v4.13 5.6 kB view raw
1/* 2 * linux/include/linux/nmi.h 3 */ 4#ifndef LINUX_NMI_H 5#define LINUX_NMI_H 6 7#include <linux/sched.h> 8#include <asm/irq.h> 9#if defined(CONFIG_HAVE_NMI_WATCHDOG) 10#include <asm/nmi.h> 11#endif 12 13#ifdef CONFIG_LOCKUP_DETECTOR 14void lockup_detector_init(void); 15#else 16static inline void lockup_detector_init(void) 17{ 18} 19#endif 20 21#ifdef CONFIG_SOFTLOCKUP_DETECTOR 22extern void touch_softlockup_watchdog_sched(void); 23extern void touch_softlockup_watchdog(void); 24extern void touch_softlockup_watchdog_sync(void); 25extern void touch_all_softlockup_watchdogs(void); 26extern unsigned int softlockup_panic; 27extern int soft_watchdog_enabled; 28extern atomic_t watchdog_park_in_progress; 29#else 30static inline void touch_softlockup_watchdog_sched(void) 31{ 32} 33static inline void touch_softlockup_watchdog(void) 34{ 35} 36static inline void touch_softlockup_watchdog_sync(void) 37{ 38} 39static inline void touch_all_softlockup_watchdogs(void) 40{ 41} 42#endif 43 44#ifdef CONFIG_DETECT_HUNG_TASK 45void reset_hung_task_detector(void); 46#else 47static inline void reset_hung_task_detector(void) 48{ 49} 50#endif 51 52/* 53 * The run state of the lockup detectors is controlled by the content of the 54 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - 55 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. 56 * 57 * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled' 58 * are variables that are only used as an 'interface' between the parameters 59 * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The 60 * 'watchdog_thresh' variable is handled differently because its value is not 61 * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh' 62 * is equal zero. 63 */ 64#define NMI_WATCHDOG_ENABLED_BIT 0 65#define SOFT_WATCHDOG_ENABLED_BIT 1 66#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) 67#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) 68 69#if defined(CONFIG_HARDLOCKUP_DETECTOR) 70extern void hardlockup_detector_disable(void); 71extern unsigned int hardlockup_panic; 72#else 73static inline void hardlockup_detector_disable(void) {} 74#endif 75 76#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) 77extern void arch_touch_nmi_watchdog(void); 78#else 79#if !defined(CONFIG_HAVE_NMI_WATCHDOG) 80static inline void arch_touch_nmi_watchdog(void) {} 81#endif 82#endif 83 84/** 85 * touch_nmi_watchdog - restart NMI watchdog timeout. 86 * 87 * If the architecture supports the NMI watchdog, touch_nmi_watchdog() 88 * may be used to reset the timeout - for code which intentionally 89 * disables interrupts for a long time. This call is stateless. 90 */ 91static inline void touch_nmi_watchdog(void) 92{ 93 arch_touch_nmi_watchdog(); 94 touch_softlockup_watchdog(); 95} 96 97/* 98 * Create trigger_all_cpu_backtrace() out of the arch-provided 99 * base function. Return whether such support was available, 100 * to allow calling code to fall back to some other mechanism: 101 */ 102#ifdef arch_trigger_cpumask_backtrace 103static inline bool trigger_all_cpu_backtrace(void) 104{ 105 arch_trigger_cpumask_backtrace(cpu_online_mask, false); 106 return true; 107} 108 109static inline bool trigger_allbutself_cpu_backtrace(void) 110{ 111 arch_trigger_cpumask_backtrace(cpu_online_mask, true); 112 return true; 113} 114 115static inline bool trigger_cpumask_backtrace(struct cpumask *mask) 116{ 117 arch_trigger_cpumask_backtrace(mask, false); 118 return true; 119} 120 121static inline bool trigger_single_cpu_backtrace(int cpu) 122{ 123 arch_trigger_cpumask_backtrace(cpumask_of(cpu), false); 124 return true; 125} 126 127/* generic implementation */ 128void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, 129 bool exclude_self, 130 void (*raise)(cpumask_t *mask)); 131bool nmi_cpu_backtrace(struct pt_regs *regs); 132 133#else 134static inline bool trigger_all_cpu_backtrace(void) 135{ 136 return false; 137} 138static inline bool trigger_allbutself_cpu_backtrace(void) 139{ 140 return false; 141} 142static inline bool trigger_cpumask_backtrace(struct cpumask *mask) 143{ 144 return false; 145} 146static inline bool trigger_single_cpu_backtrace(int cpu) 147{ 148 return false; 149} 150#endif 151 152#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF 153u64 hw_nmi_get_sample_period(int watchdog_thresh); 154#endif 155 156#ifdef CONFIG_LOCKUP_DETECTOR 157extern int nmi_watchdog_enabled; 158extern int watchdog_user_enabled; 159extern int watchdog_thresh; 160extern unsigned long watchdog_enabled; 161extern struct cpumask watchdog_cpumask; 162extern unsigned long *watchdog_cpumask_bits; 163extern int __read_mostly watchdog_suspended; 164#ifdef CONFIG_SMP 165extern int sysctl_softlockup_all_cpu_backtrace; 166extern int sysctl_hardlockup_all_cpu_backtrace; 167#else 168#define sysctl_softlockup_all_cpu_backtrace 0 169#define sysctl_hardlockup_all_cpu_backtrace 0 170#endif 171 172#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \ 173 defined(CONFIG_HARDLOCKUP_DETECTOR) 174void watchdog_update_hrtimer_threshold(u64 period); 175#else 176static inline void watchdog_update_hrtimer_threshold(u64 period) { } 177#endif 178 179extern bool is_hardlockup(void); 180struct ctl_table; 181extern int proc_watchdog(struct ctl_table *, int , 182 void __user *, size_t *, loff_t *); 183extern int proc_nmi_watchdog(struct ctl_table *, int , 184 void __user *, size_t *, loff_t *); 185extern int proc_soft_watchdog(struct ctl_table *, int , 186 void __user *, size_t *, loff_t *); 187extern int proc_watchdog_thresh(struct ctl_table *, int , 188 void __user *, size_t *, loff_t *); 189extern int proc_watchdog_cpumask(struct ctl_table *, int, 190 void __user *, size_t *, loff_t *); 191extern int lockup_detector_suspend(void); 192extern void lockup_detector_resume(void); 193#else 194static inline int lockup_detector_suspend(void) 195{ 196 return 0; 197} 198 199static inline void lockup_detector_resume(void) 200{ 201} 202#endif 203 204#ifdef CONFIG_HAVE_ACPI_APEI_NMI 205#include <asm/nmi.h> 206#endif 207 208#endif