Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
4 *
5 * Copyright IBM Corporation, 2008
6 *
7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
8 *
9 * For detailed explanation of Read-Copy Update mechanism see -
10 * Documentation/RCU
11 */
12#ifndef __LINUX_TINY_H
13#define __LINUX_TINY_H
14
15#include <asm/param.h> /* for HZ */
16
17unsigned long get_state_synchronize_rcu(void);
18unsigned long start_poll_synchronize_rcu(void);
19bool poll_state_synchronize_rcu(unsigned long oldstate);
20
21static inline void cond_synchronize_rcu(unsigned long oldstate)
22{
23 might_sleep();
24}
25
26static inline unsigned long start_poll_synchronize_rcu_expedited(void)
27{
28 return start_poll_synchronize_rcu();
29}
30
31static inline void cond_synchronize_rcu_expedited(unsigned long oldstate)
32{
33 cond_synchronize_rcu(oldstate);
34}
35
36extern void rcu_barrier(void);
37
38static inline void synchronize_rcu_expedited(void)
39{
40 synchronize_rcu();
41}
42
43/*
44 * Add one more declaration of kvfree() here. It is
45 * not so straight forward to just include <linux/mm.h>
46 * where it is defined due to getting many compile
47 * errors caused by that include.
48 */
49extern void kvfree(const void *addr);
50
51static inline void __kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
52{
53 if (head) {
54 call_rcu(head, func);
55 return;
56 }
57
58 // kvfree_rcu(one_arg) call.
59 might_sleep();
60 synchronize_rcu();
61 kvfree((void *) func);
62}
63
64#ifdef CONFIG_KASAN_GENERIC
65void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
66#else
67static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
68{
69 __kvfree_call_rcu(head, func);
70}
71#endif
72
73void rcu_qs(void);
74
75static inline void rcu_softirq_qs(void)
76{
77 rcu_qs();
78}
79
80#define rcu_note_context_switch(preempt) \
81 do { \
82 rcu_qs(); \
83 rcu_tasks_qs(current, (preempt)); \
84 } while (0)
85
86static inline int rcu_needs_cpu(void)
87{
88 return 0;
89}
90
91/*
92 * Take advantage of the fact that there is only one CPU, which
93 * allows us to ignore virtualization-based context switches.
94 */
95static inline void rcu_virt_note_context_switch(int cpu) { }
96static inline void rcu_cpu_stall_reset(void) { }
97static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
98static inline void rcu_irq_exit_check_preempt(void) { }
99#define rcu_is_idle_cpu(cpu) \
100 (is_idle_task(current) && !in_nmi() && !in_hardirq() && !in_serving_softirq())
101static inline void exit_rcu(void) { }
102static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
103{
104 return false;
105}
106static inline void rcu_preempt_deferred_qs(struct task_struct *t) { }
107#ifdef CONFIG_SRCU
108void rcu_scheduler_starting(void);
109#else /* #ifndef CONFIG_SRCU */
110static inline void rcu_scheduler_starting(void) { }
111#endif /* #else #ifndef CONFIG_SRCU */
112static inline void rcu_end_inkernel_boot(void) { }
113static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
114static inline bool rcu_is_watching(void) { return true; }
115static inline void rcu_momentary_dyntick_idle(void) { }
116static inline void kfree_rcu_scheduler_running(void) { }
117static inline bool rcu_gp_might_be_stalled(void) { return false; }
118
119/* Avoid RCU read-side critical sections leaking across. */
120static inline void rcu_all_qs(void) { barrier(); }
121
122/* RCUtree hotplug events */
123#define rcutree_prepare_cpu NULL
124#define rcutree_online_cpu NULL
125#define rcutree_offline_cpu NULL
126#define rcutree_dead_cpu NULL
127#define rcutree_dying_cpu NULL
128static inline void rcu_cpu_starting(unsigned int cpu) { }
129
130#endif /* __LINUX_RCUTINY_H */