at v3.14 81 lines 1.7 kB view raw
1#ifndef LINUX_HARDIRQ_H 2#define LINUX_HARDIRQ_H 3 4#include <linux/preempt_mask.h> 5#include <linux/lockdep.h> 6#include <linux/ftrace_irq.h> 7#include <linux/vtime.h> 8#include <asm/hardirq.h> 9 10 11extern void synchronize_irq(unsigned int irq); 12 13#if defined(CONFIG_TINY_RCU) 14 15static inline void rcu_nmi_enter(void) 16{ 17} 18 19static inline void rcu_nmi_exit(void) 20{ 21} 22 23#else 24extern void rcu_nmi_enter(void); 25extern void rcu_nmi_exit(void); 26#endif 27 28/* 29 * It is safe to do non-atomic ops on ->hardirq_context, 30 * because NMI handlers may not preempt and the ops are 31 * always balanced, so the interrupted value of ->hardirq_context 32 * will always be restored. 33 */ 34#define __irq_enter() \ 35 do { \ 36 account_irq_enter_time(current); \ 37 preempt_count_add(HARDIRQ_OFFSET); \ 38 trace_hardirq_enter(); \ 39 } while (0) 40 41/* 42 * Enter irq context (on NO_HZ, update jiffies): 43 */ 44extern void irq_enter(void); 45 46/* 47 * Exit irq context without processing softirqs: 48 */ 49#define __irq_exit() \ 50 do { \ 51 trace_hardirq_exit(); \ 52 account_irq_exit_time(current); \ 53 preempt_count_sub(HARDIRQ_OFFSET); \ 54 } while (0) 55 56/* 57 * Exit irq context and process softirqs if needed: 58 */ 59extern void irq_exit(void); 60 61#define nmi_enter() \ 62 do { \ 63 lockdep_off(); \ 64 ftrace_nmi_enter(); \ 65 BUG_ON(in_nmi()); \ 66 preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ 67 rcu_nmi_enter(); \ 68 trace_hardirq_enter(); \ 69 } while (0) 70 71#define nmi_exit() \ 72 do { \ 73 trace_hardirq_exit(); \ 74 rcu_nmi_exit(); \ 75 BUG_ON(!in_nmi()); \ 76 preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ 77 ftrace_nmi_exit(); \ 78 lockdep_on(); \ 79 } while (0) 80 81#endif /* LINUX_HARDIRQ_H */