Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_PREEMPT_H
3#define __ASM_PREEMPT_H
4
5#include <asm/current.h>
6#include <linux/thread_info.h>
7#include <asm/atomic_ops.h>
8#include <asm/cmpxchg.h>
9#include <asm/march.h>
10
11/* We use the MSB mostly because its available */
12#define PREEMPT_NEED_RESCHED 0x80000000
13
14/*
15 * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
16 * that a decrement hitting 0 means we can and should reschedule.
17 */
18#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
19
20/*
21 * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
22 * that think a non-zero value indicates we cannot preempt.
23 */
24static __always_inline int preempt_count(void)
25{
26 return READ_ONCE(get_lowcore()->preempt_count) & ~PREEMPT_NEED_RESCHED;
27}
28
29static __always_inline void preempt_count_set(int pc)
30{
31 int old, new;
32
33 old = READ_ONCE(get_lowcore()->preempt_count);
34 do {
35 new = (old & PREEMPT_NEED_RESCHED) | (pc & ~PREEMPT_NEED_RESCHED);
36 } while (!arch_try_cmpxchg(&get_lowcore()->preempt_count, &old, new));
37}
38
39/*
40 * We fold the NEED_RESCHED bit into the preempt count such that
41 * preempt_enable() can decrement and test for needing to reschedule with a
42 * short instruction sequence.
43 *
44 * We invert the actual bit, so that when the decrement hits 0 we know we both
45 * need to resched (the bit is cleared) and can resched (no preempt count).
46 */
47
48static __always_inline void set_preempt_need_resched(void)
49{
50 __atomic_and(~PREEMPT_NEED_RESCHED, &get_lowcore()->preempt_count);
51}
52
53static __always_inline void clear_preempt_need_resched(void)
54{
55 __atomic_or(PREEMPT_NEED_RESCHED, &get_lowcore()->preempt_count);
56}
57
58static __always_inline bool test_preempt_need_resched(void)
59{
60 return !(READ_ONCE(get_lowcore()->preempt_count) & PREEMPT_NEED_RESCHED);
61}
62
63static __always_inline void __preempt_count_add(int val)
64{
65 /*
66 * With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES
67 * enabled, gcc 12 fails to handle __builtin_constant_p().
68 */
69 if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) {
70 if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) {
71 __atomic_add_const(val, &get_lowcore()->preempt_count);
72 return;
73 }
74 }
75 __atomic_add(val, &get_lowcore()->preempt_count);
76}
77
78static __always_inline void __preempt_count_sub(int val)
79{
80 __preempt_count_add(-val);
81}
82
83/*
84 * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
85 * a decrement which hits zero means we have no preempt_count and should
86 * reschedule.
87 */
88static __always_inline bool __preempt_count_dec_and_test(void)
89{
90 return __atomic_add_const_and_test(-1, &get_lowcore()->preempt_count);
91}
92
93/*
94 * Returns true when we need to resched and can (barring IRQ state).
95 */
96static __always_inline bool should_resched(int preempt_offset)
97{
98 return unlikely(READ_ONCE(get_lowcore()->preempt_count) == preempt_offset);
99}
100
101#define init_task_preempt_count(p) do { } while (0)
102/* Deferred to CPU bringup time */
103#define init_idle_preempt_count(p, cpu) do { } while (0)
104
105#ifdef CONFIG_PREEMPTION
106
107void preempt_schedule(void);
108void preempt_schedule_notrace(void);
109
110#ifdef CONFIG_PREEMPT_DYNAMIC
111
112void dynamic_preempt_schedule(void);
113void dynamic_preempt_schedule_notrace(void);
114#define __preempt_schedule() dynamic_preempt_schedule()
115#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace()
116
117#else /* CONFIG_PREEMPT_DYNAMIC */
118
119#define __preempt_schedule() preempt_schedule()
120#define __preempt_schedule_notrace() preempt_schedule_notrace()
121
122#endif /* CONFIG_PREEMPT_DYNAMIC */
123
124#endif /* CONFIG_PREEMPTION */
125
126#endif /* __ASM_PREEMPT_H */