at v3.12 3.8 kB view raw
1#ifndef LINUX_PREEMPT_MASK_H 2#define LINUX_PREEMPT_MASK_H 3 4#include <linux/preempt.h> 5#include <asm/hardirq.h> 6 7/* 8 * We put the hardirq and softirq counter into the preemption 9 * counter. The bitmask has the following meaning: 10 * 11 * - bits 0-7 are the preemption count (max preemption depth: 256) 12 * - bits 8-15 are the softirq count (max # of softirqs: 256) 13 * 14 * The hardirq count can in theory reach the same as NR_IRQS. 15 * In reality, the number of nested IRQS is limited to the stack 16 * size as well. For archs with over 1000 IRQS it is not practical 17 * to expect that they will all nest. We give a max of 10 bits for 18 * hardirq nesting. An arch may choose to give less than 10 bits. 19 * m68k expects it to be 8. 20 * 21 * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) 22 * - bit 26 is the NMI_MASK 23 * - bit 27 is the PREEMPT_ACTIVE flag 24 * 25 * PREEMPT_MASK: 0x000000ff 26 * SOFTIRQ_MASK: 0x0000ff00 27 * HARDIRQ_MASK: 0x03ff0000 28 * NMI_MASK: 0x04000000 29 */ 30#define PREEMPT_BITS 8 31#define SOFTIRQ_BITS 8 32#define NMI_BITS 1 33 34#define MAX_HARDIRQ_BITS 10 35 36#ifndef HARDIRQ_BITS 37# define HARDIRQ_BITS MAX_HARDIRQ_BITS 38#endif 39 40#if HARDIRQ_BITS > MAX_HARDIRQ_BITS 41#error HARDIRQ_BITS too high! 42#endif 43 44#define PREEMPT_SHIFT 0 45#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) 46#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) 47#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) 48 49#define __IRQ_MASK(x) ((1UL << (x))-1) 50 51#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) 52#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) 53#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) 54#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) 55 56#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) 57#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) 58#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) 59#define NMI_OFFSET (1UL << NMI_SHIFT) 60 61#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) 62 63#ifndef PREEMPT_ACTIVE 64#define PREEMPT_ACTIVE_BITS 1 65#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) 66#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) 67#endif 68 69#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS)) 70#error PREEMPT_ACTIVE is too low! 71#endif 72 73#define hardirq_count() (preempt_count() & HARDIRQ_MASK) 74#define softirq_count() (preempt_count() & SOFTIRQ_MASK) 75#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ 76 | NMI_MASK)) 77 78/* 79 * Are we doing bottom half or hardware interrupt processing? 80 * Are we in a softirq context? Interrupt context? 81 * in_softirq - Are we currently processing softirq or have bh disabled? 82 * in_serving_softirq - Are we currently processing softirq? 83 */ 84#define in_irq() (hardirq_count()) 85#define in_softirq() (softirq_count()) 86#define in_interrupt() (irq_count()) 87#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) 88 89/* 90 * Are we in NMI context? 91 */ 92#define in_nmi() (preempt_count() & NMI_MASK) 93 94#if defined(CONFIG_PREEMPT_COUNT) 95# define PREEMPT_CHECK_OFFSET 1 96#else 97# define PREEMPT_CHECK_OFFSET 0 98#endif 99 100/* 101 * Are we running in atomic context? WARNING: this macro cannot 102 * always detect atomic context; in particular, it cannot know about 103 * held spinlocks in non-preemptible kernels. Thus it should not be 104 * used in the general case to determine whether sleeping is possible. 105 * Do not use in_atomic() in driver code. 106 */ 107#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) 108 109/* 110 * Check whether we were atomic before we did preempt_disable(): 111 * (used by the scheduler, *after* releasing the kernel lock) 112 */ 113#define in_atomic_preempt_off() \ 114 ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) 115 116#ifdef CONFIG_PREEMPT_COUNT 117# define preemptible() (preempt_count() == 0 && !irqs_disabled()) 118#else 119# define preemptible() 0 120#endif 121 122#endif /* LINUX_PREEMPT_MASK_H */