1#ifndef LINUX_HARDIRQ_H 2#define LINUX_HARDIRQ_H 3 4#include <linux/preempt.h> 5#include <linux/smp_lock.h> 6#include <linux/lockdep.h> 7#include <asm/hardirq.h> 8#include <asm/system.h> 9 10/* 11 * We put the hardirq and softirq counter into the preemption 12 * counter. The bitmask has the following meaning: 13 * 14 * - bits 0-7 are the preemption count (max preemption depth: 256) 15 * - bits 8-15 are the softirq count (max # of softirqs: 256) 16 * 17 * The hardirq count can be overridden per architecture, the default is: 18 * 19 * - bits 16-27 are the hardirq count (max # of hardirqs: 4096) 20 * - ( bit 28 is the PREEMPT_ACTIVE flag. ) 21 * 22 * PREEMPT_MASK: 0x000000ff 23 * SOFTIRQ_MASK: 0x0000ff00 24 * HARDIRQ_MASK: 0x0fff0000 25 */ 26#define PREEMPT_BITS 8 27#define SOFTIRQ_BITS 8 28 29#ifndef HARDIRQ_BITS 30#define HARDIRQ_BITS 12 31 32#ifndef MAX_HARDIRQS_PER_CPU 33#define MAX_HARDIRQS_PER_CPU NR_IRQS 34#endif 35 36/* 37 * The hardirq mask has to be large enough to have space for potentially 38 * all IRQ sources in the system nesting on a single CPU. 39 */ 40#if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU 41# error HARDIRQ_BITS is too low! 42#endif 43#endif 44 45#define PREEMPT_SHIFT 0 46#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) 47#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) 48 49#define __IRQ_MASK(x) ((1UL << (x))-1) 50 51#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) 52#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) 53#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) 54 55#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) 56#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) 57#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) 58 59#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS)) 60#error PREEMPT_ACTIVE is too low! 61#endif 62 63#define hardirq_count() (preempt_count() & HARDIRQ_MASK) 64#define softirq_count() (preempt_count() & SOFTIRQ_MASK) 65#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) 66 67/* 68 * Are we doing bottom half or hardware interrupt processing? 69 * Are we in a softirq context? Interrupt context? 70 */ 71#define in_irq() (hardirq_count()) 72#define in_softirq() (softirq_count()) 73#define in_interrupt() (irq_count()) 74 75#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) 76 77#ifdef CONFIG_PREEMPT 78# define PREEMPT_CHECK_OFFSET 1 79#else 80# define PREEMPT_CHECK_OFFSET 0 81#endif 82 83/* 84 * Check whether we were atomic before we did preempt_disable(): 85 * (used by the scheduler) 86 */ 87#define in_atomic_preempt_off() \ 88 ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) 89 90#ifdef CONFIG_PREEMPT 91# define preemptible() (preempt_count() == 0 && !irqs_disabled()) 92# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) 93#else 94# define preemptible() 0 95# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET 96#endif 97 98#ifdef CONFIG_SMP 99extern void synchronize_irq(unsigned int irq); 100#else 101# define synchronize_irq(irq) barrier() 102#endif 103 104struct task_struct; 105 106#ifndef CONFIG_VIRT_CPU_ACCOUNTING 107static inline void account_system_vtime(struct task_struct *tsk) 108{ 109} 110#endif 111 112/* 113 * It is safe to do non-atomic ops on ->hardirq_context, 114 * because NMI handlers may not preempt and the ops are 115 * always balanced, so the interrupted value of ->hardirq_context 116 * will always be restored. 117 */ 118#define __irq_enter() \ 119 do { \ 120 account_system_vtime(current); \ 121 add_preempt_count(HARDIRQ_OFFSET); \ 122 trace_hardirq_enter(); \ 123 } while (0) 124 125/* 126 * Enter irq context (on NO_HZ, update jiffies): 127 */ 128extern void irq_enter(void); 129 130/* 131 * Exit irq context without processing softirqs: 132 */ 133#define __irq_exit() \ 134 do { \ 135 trace_hardirq_exit(); \ 136 account_system_vtime(current); \ 137 sub_preempt_count(HARDIRQ_OFFSET); \ 138 } while (0) 139 140/* 141 * Exit irq context and process softirqs if needed: 142 */ 143extern void irq_exit(void); 144 145#define nmi_enter() do { lockdep_off(); __irq_enter(); } while (0) 146#define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0) 147 148#endif /* LINUX_HARDIRQ_H */