Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef __LINUX_PREEMPT_H
2#define __LINUX_PREEMPT_H
3
4/*
5 * include/linux/preempt.h - macros for accessing and manipulating
6 * preempt_count (used for kernel preemption, interrupt count, etc.)
7 */
8
9#include <linux/linkage.h>
10#include <linux/list.h>
11
12/*
13 * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
14 * the other bits -- can't include that header due to inclusion hell.
15 */
16#define PREEMPT_NEED_RESCHED 0x80000000
17
18#include <asm/preempt.h>
19
20#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
21extern void preempt_count_add(int val);
22extern void preempt_count_sub(int val);
23#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
24#else
25#define preempt_count_add(val) __preempt_count_add(val)
26#define preempt_count_sub(val) __preempt_count_sub(val)
27#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
28#endif
29
30#define __preempt_count_inc() __preempt_count_add(1)
31#define __preempt_count_dec() __preempt_count_sub(1)
32
33#define preempt_count_inc() preempt_count_add(1)
34#define preempt_count_dec() preempt_count_sub(1)
35
36#ifdef CONFIG_PREEMPT_COUNT
37
38#define preempt_disable() \
39do { \
40 preempt_count_inc(); \
41 barrier(); \
42} while (0)
43
44#define sched_preempt_enable_no_resched() \
45do { \
46 barrier(); \
47 preempt_count_dec(); \
48} while (0)
49
50#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
51
52#ifdef CONFIG_PREEMPT
53#define preempt_enable() \
54do { \
55 barrier(); \
56 if (unlikely(preempt_count_dec_and_test())) \
57 __preempt_schedule(); \
58} while (0)
59
60#define preempt_check_resched() \
61do { \
62 if (should_resched()) \
63 __preempt_schedule(); \
64} while (0)
65
66#else
67#define preempt_enable() \
68do { \
69 barrier(); \
70 preempt_count_dec(); \
71} while (0)
72#define preempt_check_resched() do { } while (0)
73#endif
74
75#define preempt_disable_notrace() \
76do { \
77 __preempt_count_inc(); \
78 barrier(); \
79} while (0)
80
81#define preempt_enable_no_resched_notrace() \
82do { \
83 barrier(); \
84 __preempt_count_dec(); \
85} while (0)
86
87#ifdef CONFIG_PREEMPT
88
89#ifndef CONFIG_CONTEXT_TRACKING
90#define __preempt_schedule_context() __preempt_schedule()
91#endif
92
93#define preempt_enable_notrace() \
94do { \
95 barrier(); \
96 if (unlikely(__preempt_count_dec_and_test())) \
97 __preempt_schedule_context(); \
98} while (0)
99#else
100#define preempt_enable_notrace() \
101do { \
102 barrier(); \
103 __preempt_count_dec(); \
104} while (0)
105#endif
106
107#else /* !CONFIG_PREEMPT_COUNT */
108
109/*
110 * Even if we don't have any preemption, we need preempt disable/enable
111 * to be barriers, so that we don't have things like get_user/put_user
112 * that can cause faults and scheduling migrate into our preempt-protected
113 * region.
114 */
115#define preempt_disable() barrier()
116#define sched_preempt_enable_no_resched() barrier()
117#define preempt_enable_no_resched() barrier()
118#define preempt_enable() barrier()
119#define preempt_check_resched() do { } while (0)
120
121#define preempt_disable_notrace() barrier()
122#define preempt_enable_no_resched_notrace() barrier()
123#define preempt_enable_notrace() barrier()
124
125#endif /* CONFIG_PREEMPT_COUNT */
126
127#ifdef MODULE
128/*
129 * Modules have no business playing preemption tricks.
130 */
131#undef sched_preempt_enable_no_resched
132#undef preempt_enable_no_resched
133#undef preempt_enable_no_resched_notrace
134#undef preempt_check_resched
135#endif
136
137#define preempt_set_need_resched() \
138do { \
139 set_preempt_need_resched(); \
140} while (0)
141#define preempt_fold_need_resched() \
142do { \
143 if (tif_need_resched()) \
144 set_preempt_need_resched(); \
145} while (0)
146
147#ifdef CONFIG_PREEMPT_NOTIFIERS
148
149struct preempt_notifier;
150
151/**
152 * preempt_ops - notifiers called when a task is preempted and rescheduled
153 * @sched_in: we're about to be rescheduled:
154 * notifier: struct preempt_notifier for the task being scheduled
155 * cpu: cpu we're scheduled on
156 * @sched_out: we've just been preempted
157 * notifier: struct preempt_notifier for the task being preempted
158 * next: the task that's kicking us out
159 *
160 * Please note that sched_in and out are called under different
161 * contexts. sched_out is called with rq lock held and irq disabled
162 * while sched_in is called without rq lock and irq enabled. This
163 * difference is intentional and depended upon by its users.
164 */
165struct preempt_ops {
166 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
167 void (*sched_out)(struct preempt_notifier *notifier,
168 struct task_struct *next);
169};
170
171/**
172 * preempt_notifier - key for installing preemption notifiers
173 * @link: internal use
174 * @ops: defines the notifier functions to be called
175 *
176 * Usually used in conjunction with container_of().
177 */
178struct preempt_notifier {
179 struct hlist_node link;
180 struct preempt_ops *ops;
181};
182
183void preempt_notifier_register(struct preempt_notifier *notifier);
184void preempt_notifier_unregister(struct preempt_notifier *notifier);
185
186static inline void preempt_notifier_init(struct preempt_notifier *notifier,
187 struct preempt_ops *ops)
188{
189 INIT_HLIST_NODE(¬ifier->link);
190 notifier->ops = ops;
191}
192
193#endif
194
195#endif /* __LINUX_PREEMPT_H */