Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef __LINUX_PREEMPT_H
2#define __LINUX_PREEMPT_H
3
4/*
5 * include/linux/preempt.h - macros for accessing and manipulating
6 * preempt_count (used for kernel preemption, interrupt count, etc.)
7 */
8
9#include <linux/thread_info.h>
10#include <linux/linkage.h>
11#include <linux/list.h>
12
13#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
14 extern void add_preempt_count(int val);
15 extern void sub_preempt_count(int val);
16#else
17# define add_preempt_count(val) do { preempt_count() += (val); } while (0)
18# define sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
19#endif
20
21#define inc_preempt_count() add_preempt_count(1)
22#define dec_preempt_count() sub_preempt_count(1)
23
24#define preempt_count() (current_thread_info()->preempt_count)
25
26#ifdef CONFIG_PREEMPT
27
28asmlinkage void preempt_schedule(void);
29
30#define preempt_check_resched() \
31do { \
32 if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
33 preempt_schedule(); \
34} while (0)
35
36#ifdef CONFIG_CONTEXT_TRACKING
37
38void preempt_schedule_context(void);
39
40#define preempt_check_resched_context() \
41do { \
42 if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
43 preempt_schedule_context(); \
44} while (0)
45#else
46
47#define preempt_check_resched_context() preempt_check_resched()
48
49#endif /* CONFIG_CONTEXT_TRACKING */
50
51#else /* !CONFIG_PREEMPT */
52
53#define preempt_check_resched() do { } while (0)
54#define preempt_check_resched_context() do { } while (0)
55
56#endif /* CONFIG_PREEMPT */
57
58
59#ifdef CONFIG_PREEMPT_COUNT
60
61#define preempt_disable() \
62do { \
63 inc_preempt_count(); \
64 barrier(); \
65} while (0)
66
67#define sched_preempt_enable_no_resched() \
68do { \
69 barrier(); \
70 dec_preempt_count(); \
71} while (0)
72
73#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
74
75#define preempt_enable() \
76do { \
77 preempt_enable_no_resched(); \
78 barrier(); \
79 preempt_check_resched(); \
80} while (0)
81
82/* For debugging and tracer internals only! */
83#define add_preempt_count_notrace(val) \
84 do { preempt_count() += (val); } while (0)
85#define sub_preempt_count_notrace(val) \
86 do { preempt_count() -= (val); } while (0)
87#define inc_preempt_count_notrace() add_preempt_count_notrace(1)
88#define dec_preempt_count_notrace() sub_preempt_count_notrace(1)
89
90#define preempt_disable_notrace() \
91do { \
92 inc_preempt_count_notrace(); \
93 barrier(); \
94} while (0)
95
96#define preempt_enable_no_resched_notrace() \
97do { \
98 barrier(); \
99 dec_preempt_count_notrace(); \
100} while (0)
101
102/* preempt_check_resched is OK to trace */
103#define preempt_enable_notrace() \
104do { \
105 preempt_enable_no_resched_notrace(); \
106 barrier(); \
107 preempt_check_resched_context(); \
108} while (0)
109
110#else /* !CONFIG_PREEMPT_COUNT */
111
112/*
113 * Even if we don't have any preemption, we need preempt disable/enable
114 * to be barriers, so that we don't have things like get_user/put_user
115 * that can cause faults and scheduling migrate into our preempt-protected
116 * region.
117 */
118#define preempt_disable() barrier()
119#define sched_preempt_enable_no_resched() barrier()
120#define preempt_enable_no_resched() barrier()
121#define preempt_enable() barrier()
122
123#define preempt_disable_notrace() barrier()
124#define preempt_enable_no_resched_notrace() barrier()
125#define preempt_enable_notrace() barrier()
126
127#endif /* CONFIG_PREEMPT_COUNT */
128
129#ifdef CONFIG_PREEMPT_NOTIFIERS
130
131struct preempt_notifier;
132
133/**
134 * preempt_ops - notifiers called when a task is preempted and rescheduled
135 * @sched_in: we're about to be rescheduled:
136 * notifier: struct preempt_notifier for the task being scheduled
137 * cpu: cpu we're scheduled on
138 * @sched_out: we've just been preempted
139 * notifier: struct preempt_notifier for the task being preempted
140 * next: the task that's kicking us out
141 *
142 * Please note that sched_in and out are called under different
143 * contexts. sched_out is called with rq lock held and irq disabled
144 * while sched_in is called without rq lock and irq enabled. This
145 * difference is intentional and depended upon by its users.
146 */
147struct preempt_ops {
148 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
149 void (*sched_out)(struct preempt_notifier *notifier,
150 struct task_struct *next);
151};
152
153/**
154 * preempt_notifier - key for installing preemption notifiers
155 * @link: internal use
156 * @ops: defines the notifier functions to be called
157 *
158 * Usually used in conjunction with container_of().
159 */
160struct preempt_notifier {
161 struct hlist_node link;
162 struct preempt_ops *ops;
163};
164
165void preempt_notifier_register(struct preempt_notifier *notifier);
166void preempt_notifier_unregister(struct preempt_notifier *notifier);
167
168static inline void preempt_notifier_init(struct preempt_notifier *notifier,
169 struct preempt_ops *ops)
170{
171 INIT_HLIST_NODE(¬ifier->link);
172 notifier->ops = ops;
173}
174
175#endif
176
177#endif /* __LINUX_PREEMPT_H */