Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_PERCPU_COUNTER_H
3#define _LINUX_PERCPU_COUNTER_H
4/*
5 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
6 *
7 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
8 */
9
10#include <linux/spinlock.h>
11#include <linux/smp.h>
12#include <linux/list.h>
13#include <linux/threads.h>
14#include <linux/percpu.h>
15#include <linux/types.h>
16#include <linux/gfp.h>
17
18/* percpu_counter batch for local add or sub */
19#define PERCPU_COUNTER_LOCAL_BATCH INT_MAX
20
21#ifdef CONFIG_SMP
22
23struct percpu_counter {
24 raw_spinlock_t lock;
25 s64 count;
26#ifdef CONFIG_HOTPLUG_CPU
27 struct list_head list; /* All percpu_counters are on a list */
28#endif
29 s32 __percpu *counters;
30};
31
32extern int percpu_counter_batch;
33
34int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
35 struct lock_class_key *key);
36
37#define percpu_counter_init(fbc, value, gfp) \
38 ({ \
39 static struct lock_class_key __key; \
40 \
41 __percpu_counter_init(fbc, value, gfp, &__key); \
42 })
43
44void percpu_counter_destroy(struct percpu_counter *fbc);
45void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
46void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
47 s32 batch);
48s64 __percpu_counter_sum(struct percpu_counter *fbc);
49int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
50void percpu_counter_sync(struct percpu_counter *fbc);
51
52static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
53{
54 return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
55}
56
57static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
58{
59 percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
60}
61
62/*
63 * With percpu_counter_add_local() and percpu_counter_sub_local(), counts
64 * are accumulated in local per cpu counter and not in fbc->count until
65 * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter
66 * write efficient.
67 * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be
68 * used to add up the counts from each CPU to account for all the local
69 * counts. So percpu_counter_add_local() and percpu_counter_sub_local()
70 * should be used when a counter is updated frequently and read rarely.
71 */
72static inline void
73percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
74{
75 percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH);
76}
77
78static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
79{
80 s64 ret = __percpu_counter_sum(fbc);
81 return ret < 0 ? 0 : ret;
82}
83
84static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
85{
86 return __percpu_counter_sum(fbc);
87}
88
89static inline s64 percpu_counter_read(struct percpu_counter *fbc)
90{
91 return fbc->count;
92}
93
94/*
95 * It is possible for the percpu_counter_read() to return a small negative
96 * number for some counter which should never be negative.
97 *
98 */
99static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
100{
101 /* Prevent reloads of fbc->count */
102 s64 ret = READ_ONCE(fbc->count);
103
104 if (ret >= 0)
105 return ret;
106 return 0;
107}
108
109static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
110{
111 return (fbc->counters != NULL);
112}
113
114#else /* !CONFIG_SMP */
115
116struct percpu_counter {
117 s64 count;
118};
119
120static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
121 gfp_t gfp)
122{
123 fbc->count = amount;
124 return 0;
125}
126
127static inline void percpu_counter_destroy(struct percpu_counter *fbc)
128{
129}
130
131static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
132{
133 fbc->count = amount;
134}
135
136static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
137{
138 if (fbc->count > rhs)
139 return 1;
140 else if (fbc->count < rhs)
141 return -1;
142 else
143 return 0;
144}
145
146static inline int
147__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
148{
149 return percpu_counter_compare(fbc, rhs);
150}
151
152static inline void
153percpu_counter_add(struct percpu_counter *fbc, s64 amount)
154{
155 preempt_disable();
156 fbc->count += amount;
157 preempt_enable();
158}
159
160/* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
161static inline void
162percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
163{
164 percpu_counter_add(fbc, amount);
165}
166
167static inline void
168percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
169{
170 percpu_counter_add(fbc, amount);
171}
172
173static inline s64 percpu_counter_read(struct percpu_counter *fbc)
174{
175 return fbc->count;
176}
177
178/*
179 * percpu_counter is intended to track positive numbers. In the UP case the
180 * number should never be negative.
181 */
182static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
183{
184 return fbc->count;
185}
186
187static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
188{
189 return percpu_counter_read_positive(fbc);
190}
191
192static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
193{
194 return percpu_counter_read(fbc);
195}
196
197static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
198{
199 return true;
200}
201
202static inline void percpu_counter_sync(struct percpu_counter *fbc)
203{
204}
205#endif /* CONFIG_SMP */
206
207static inline void percpu_counter_inc(struct percpu_counter *fbc)
208{
209 percpu_counter_add(fbc, 1);
210}
211
212static inline void percpu_counter_dec(struct percpu_counter *fbc)
213{
214 percpu_counter_add(fbc, -1);
215}
216
217static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
218{
219 percpu_counter_add(fbc, -amount);
220}
221
222static inline void
223percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount)
224{
225 percpu_counter_add_local(fbc, -amount);
226}
227
228#endif /* _LINUX_PERCPU_COUNTER_H */