at v3.13 1.7 kB view raw
1#ifndef LINUX_HARDIRQ_H 2#define LINUX_HARDIRQ_H 3 4#include <linux/preempt_mask.h> 5#include <linux/lockdep.h> 6#include <linux/ftrace_irq.h> 7#include <linux/vtime.h> 8 9 10extern void synchronize_irq(unsigned int irq); 11 12#if defined(CONFIG_TINY_RCU) 13 14static inline void rcu_nmi_enter(void) 15{ 16} 17 18static inline void rcu_nmi_exit(void) 19{ 20} 21 22#else 23extern void rcu_nmi_enter(void); 24extern void rcu_nmi_exit(void); 25#endif 26 27/* 28 * It is safe to do non-atomic ops on ->hardirq_context, 29 * because NMI handlers may not preempt and the ops are 30 * always balanced, so the interrupted value of ->hardirq_context 31 * will always be restored. 32 */ 33#define __irq_enter() \ 34 do { \ 35 account_irq_enter_time(current); \ 36 preempt_count_add(HARDIRQ_OFFSET); \ 37 trace_hardirq_enter(); \ 38 } while (0) 39 40/* 41 * Enter irq context (on NO_HZ, update jiffies): 42 */ 43extern void irq_enter(void); 44 45/* 46 * Exit irq context without processing softirqs: 47 */ 48#define __irq_exit() \ 49 do { \ 50 trace_hardirq_exit(); \ 51 account_irq_exit_time(current); \ 52 preempt_count_sub(HARDIRQ_OFFSET); \ 53 } while (0) 54 55/* 56 * Exit irq context and process softirqs if needed: 57 */ 58extern void irq_exit(void); 59 60#define nmi_enter() \ 61 do { \ 62 lockdep_off(); \ 63 ftrace_nmi_enter(); \ 64 BUG_ON(in_nmi()); \ 65 preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ 66 rcu_nmi_enter(); \ 67 trace_hardirq_enter(); \ 68 } while (0) 69 70#define nmi_exit() \ 71 do { \ 72 trace_hardirq_exit(); \ 73 rcu_nmi_exit(); \ 74 BUG_ON(!in_nmi()); \ 75 preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ 76 ftrace_nmi_exit(); \ 77 lockdep_on(); \ 78 } while (0) 79 80#endif /* LINUX_HARDIRQ_H */