Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Sleepable Read-Copy Update mechanism for mutual exclusion,
4 * tiny variant.
5 *
6 * Copyright (C) IBM Corporation, 2017
7 *
8 * Author: Paul McKenney <paulmck@linux.ibm.com>
9 */
10
11#ifndef _LINUX_SRCU_TINY_H
12#define _LINUX_SRCU_TINY_H
13
14#include <linux/swait.h>
15
16struct srcu_struct {
17 short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */
18 u8 srcu_gp_running; /* GP workqueue running? */
19 u8 srcu_gp_waiting; /* GP waiting for readers? */
20 unsigned long srcu_idx; /* Current reader array element in bit 0x2. */
21 unsigned long srcu_idx_max; /* Furthest future srcu_idx request. */
22 struct swait_queue_head srcu_wq;
23 /* Last srcu_read_unlock() wakes GP. */
24 struct rcu_head *srcu_cb_head; /* Pending callbacks: Head. */
25 struct rcu_head **srcu_cb_tail; /* Pending callbacks: Tail. */
26 struct work_struct srcu_work; /* For driving grace periods. */
27#ifdef CONFIG_DEBUG_LOCK_ALLOC
28 struct lockdep_map dep_map;
29#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
30};
31
32void srcu_drive_gp(struct work_struct *wp);
33
34#define __SRCU_STRUCT_INIT(name, __ignored, ___ignored, ____ignored) \
35{ \
36 .srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \
37 .srcu_cb_tail = &name.srcu_cb_head, \
38 .srcu_work = __WORK_INITIALIZER(name.srcu_work, srcu_drive_gp), \
39 __SRCU_DEP_MAP_INIT(name) \
40}
41
42/*
43 * This odd _STATIC_ arrangement is needed for API compatibility with
44 * Tree SRCU, which needs some per-CPU data.
45 */
46#define DEFINE_SRCU(name) \
47 struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name, name)
48#define DEFINE_STATIC_SRCU(name) \
49 static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name, name)
50#define DEFINE_SRCU_FAST(name) DEFINE_SRCU(name)
51#define DEFINE_STATIC_SRCU_FAST(name) \
52 static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name, name)
53#define DEFINE_SRCU_FAST_UPDOWN(name) DEFINE_SRCU(name)
54#define DEFINE_STATIC_SRCU_FAST_UPDOWN(name) \
55 static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name, name)
56
57// Dummy structure for srcu_notifier_head.
58struct srcu_usage { };
59#define __SRCU_USAGE_INIT(name) { }
60#define __init_srcu_struct_fast __init_srcu_struct
61#define __init_srcu_struct_fast_updown __init_srcu_struct
62#ifndef CONFIG_DEBUG_LOCK_ALLOC
63#define init_srcu_struct_fast init_srcu_struct
64#define init_srcu_struct_fast_updown init_srcu_struct
65#endif // #ifndef CONFIG_DEBUG_LOCK_ALLOC
66
67void synchronize_srcu(struct srcu_struct *ssp);
68
69/*
70 * Counts the new reader in the appropriate per-CPU element of the
71 * srcu_struct. Can be invoked from irq/bh handlers, but the matching
72 * __srcu_read_unlock() must be in the same handler instance. Returns an
73 * index that must be passed to the matching srcu_read_unlock().
74 */
75static inline int __srcu_read_lock(struct srcu_struct *ssp)
76{
77 int idx;
78
79 preempt_disable(); // Needed for PREEMPT_LAZY
80 idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
81 WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1);
82 preempt_enable();
83 return idx;
84}
85
86struct srcu_ctr;
87
88static inline bool __srcu_ptr_to_ctr(struct srcu_struct *ssp, struct srcu_ctr __percpu *scpp)
89{
90 return (int)(intptr_t)(struct srcu_ctr __force __kernel *)scpp;
91}
92
93static inline struct srcu_ctr __percpu *__srcu_ctr_to_ptr(struct srcu_struct *ssp, int idx)
94{
95 return (struct srcu_ctr __percpu *)(intptr_t)idx;
96}
97
98static inline struct srcu_ctr __percpu *__srcu_read_lock_fast(struct srcu_struct *ssp)
99{
100 return __srcu_ctr_to_ptr(ssp, __srcu_read_lock(ssp));
101}
102
103static inline void __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
104{
105 __srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp));
106}
107
108static inline struct srcu_ctr __percpu *__srcu_read_lock_fast_updown(struct srcu_struct *ssp)
109{
110 return __srcu_ctr_to_ptr(ssp, __srcu_read_lock(ssp));
111}
112
113static inline
114void __srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
115{
116 __srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp));
117}
118
119static inline void synchronize_srcu_expedited(struct srcu_struct *ssp)
120{
121 synchronize_srcu(ssp);
122}
123
124static inline void srcu_barrier(struct srcu_struct *ssp)
125{
126 synchronize_srcu(ssp);
127}
128
129static inline void srcu_expedite_current(struct srcu_struct *ssp) { }
130#define srcu_check_read_flavor(ssp, read_flavor) do { } while (0)
131
132/* Defined here to avoid size increase for non-torture kernels. */
133static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
134 char *tt, char *tf)
135{
136 int idx;
137
138 idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1;
139 pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd) gp: %lu->%lu\n",
140 tt, tf, idx,
141 data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])),
142 data_race(READ_ONCE(ssp->srcu_lock_nesting[idx])),
143 data_race(READ_ONCE(ssp->srcu_idx)),
144 data_race(READ_ONCE(ssp->srcu_idx_max)));
145}
146
147#endif