Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_PERCPU_COUNTER_H
3#define _LINUX_PERCPU_COUNTER_H
4/*
5 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
6 *
7 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
8 */
9
10#include <linux/spinlock.h>
11#include <linux/smp.h>
12#include <linux/list.h>
13#include <linux/threads.h>
14#include <linux/percpu.h>
15#include <linux/types.h>
16
17/* percpu_counter batch for local add or sub */
18#define PERCPU_COUNTER_LOCAL_BATCH INT_MAX
19
20#ifdef CONFIG_SMP
21
22struct percpu_counter {
23 raw_spinlock_t lock;
24 s64 count;
25#ifdef CONFIG_HOTPLUG_CPU
26 struct list_head list; /* All percpu_counters are on a list */
27#endif
28 s32 __percpu *counters;
29};
30
31extern int percpu_counter_batch;
32
33int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
34 gfp_t gfp, u32 nr_counters,
35 struct lock_class_key *key);
36
37#define percpu_counter_init_many(fbc, value, gfp, nr_counters) \
38 ({ \
39 static struct lock_class_key __key; \
40 \
41 __percpu_counter_init_many(fbc, value, gfp, nr_counters,\
42 &__key); \
43 })
44
45
46#define percpu_counter_init(fbc, value, gfp) \
47 percpu_counter_init_many(fbc, value, gfp, 1)
48
49void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters);
50static inline void percpu_counter_destroy(struct percpu_counter *fbc)
51{
52 percpu_counter_destroy_many(fbc, 1);
53}
54
55void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
56void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
57 s32 batch);
58s64 __percpu_counter_sum(struct percpu_counter *fbc);
59int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
60void percpu_counter_sync(struct percpu_counter *fbc);
61
62static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
63{
64 return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
65}
66
67static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
68{
69 percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
70}
71
72/*
73 * With percpu_counter_add_local() and percpu_counter_sub_local(), counts
74 * are accumulated in local per cpu counter and not in fbc->count until
75 * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter
76 * write efficient.
77 * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be
78 * used to add up the counts from each CPU to account for all the local
79 * counts. So percpu_counter_add_local() and percpu_counter_sub_local()
80 * should be used when a counter is updated frequently and read rarely.
81 */
82static inline void
83percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
84{
85 percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH);
86}
87
88static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
89{
90 s64 ret = __percpu_counter_sum(fbc);
91 return ret < 0 ? 0 : ret;
92}
93
94static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
95{
96 return __percpu_counter_sum(fbc);
97}
98
99static inline s64 percpu_counter_read(struct percpu_counter *fbc)
100{
101 return fbc->count;
102}
103
104/*
105 * It is possible for the percpu_counter_read() to return a small negative
106 * number for some counter which should never be negative.
107 *
108 */
109static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
110{
111 /* Prevent reloads of fbc->count */
112 s64 ret = READ_ONCE(fbc->count);
113
114 if (ret >= 0)
115 return ret;
116 return 0;
117}
118
119static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
120{
121 return (fbc->counters != NULL);
122}
123
124#else /* !CONFIG_SMP */
125
126struct percpu_counter {
127 s64 count;
128};
129
130static inline int percpu_counter_init_many(struct percpu_counter *fbc,
131 s64 amount, gfp_t gfp,
132 u32 nr_counters)
133{
134 u32 i;
135
136 for (i = 0; i < nr_counters; i++)
137 fbc[i].count = amount;
138
139 return 0;
140}
141
142static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
143 gfp_t gfp)
144{
145 return percpu_counter_init_many(fbc, amount, gfp, 1);
146}
147
148static inline void percpu_counter_destroy_many(struct percpu_counter *fbc,
149 u32 nr_counters)
150{
151}
152
153static inline void percpu_counter_destroy(struct percpu_counter *fbc)
154{
155}
156
157static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
158{
159 fbc->count = amount;
160}
161
162static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
163{
164 if (fbc->count > rhs)
165 return 1;
166 else if (fbc->count < rhs)
167 return -1;
168 else
169 return 0;
170}
171
172static inline int
173__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
174{
175 return percpu_counter_compare(fbc, rhs);
176}
177
178static inline void
179percpu_counter_add(struct percpu_counter *fbc, s64 amount)
180{
181 unsigned long flags;
182
183 local_irq_save(flags);
184 fbc->count += amount;
185 local_irq_restore(flags);
186}
187
188/* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
189static inline void
190percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
191{
192 percpu_counter_add(fbc, amount);
193}
194
195static inline void
196percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
197{
198 percpu_counter_add(fbc, amount);
199}
200
201static inline s64 percpu_counter_read(struct percpu_counter *fbc)
202{
203 return fbc->count;
204}
205
206/*
207 * percpu_counter is intended to track positive numbers. In the UP case the
208 * number should never be negative.
209 */
210static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
211{
212 return fbc->count;
213}
214
215static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
216{
217 return percpu_counter_read_positive(fbc);
218}
219
220static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
221{
222 return percpu_counter_read(fbc);
223}
224
225static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
226{
227 return true;
228}
229
230static inline void percpu_counter_sync(struct percpu_counter *fbc)
231{
232}
233#endif /* CONFIG_SMP */
234
235static inline void percpu_counter_inc(struct percpu_counter *fbc)
236{
237 percpu_counter_add(fbc, 1);
238}
239
240static inline void percpu_counter_dec(struct percpu_counter *fbc)
241{
242 percpu_counter_add(fbc, -1);
243}
244
245static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
246{
247 percpu_counter_add(fbc, -amount);
248}
249
250static inline void
251percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount)
252{
253 percpu_counter_add_local(fbc, -amount);
254}
255
256#endif /* _LINUX_PERCPU_COUNTER_H */