Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _LINUX_PERCPU_COUNTER_H
2#define _LINUX_PERCPU_COUNTER_H
3/*
4 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
5 *
6 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
7 */
8
9#include <linux/spinlock.h>
10#include <linux/smp.h>
11#include <linux/list.h>
12#include <linux/threads.h>
13#include <linux/percpu.h>
14#include <linux/types.h>
15
16#ifdef CONFIG_SMP
17
18struct percpu_counter {
19 spinlock_t lock;
20 s64 count;
21#ifdef CONFIG_HOTPLUG_CPU
22 struct list_head list; /* All percpu_counters are on a list */
23#endif
24 s32 __percpu *counters;
25};
26
27extern int percpu_counter_batch;
28
29int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
30 struct lock_class_key *key);
31
32#define percpu_counter_init(fbc, value) \
33 ({ \
34 static struct lock_class_key __key; \
35 \
36 __percpu_counter_init(fbc, value, &__key); \
37 })
38
39void percpu_counter_destroy(struct percpu_counter *fbc);
40void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
41void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
42s64 __percpu_counter_sum(struct percpu_counter *fbc);
43int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs);
44
45static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
46{
47 __percpu_counter_add(fbc, amount, percpu_counter_batch);
48}
49
50static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
51{
52 s64 ret = __percpu_counter_sum(fbc);
53 return ret < 0 ? 0 : ret;
54}
55
56static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
57{
58 return __percpu_counter_sum(fbc);
59}
60
61static inline s64 percpu_counter_read(struct percpu_counter *fbc)
62{
63 return fbc->count;
64}
65
66/*
67 * It is possible for the percpu_counter_read() to return a small negative
68 * number for some counter which should never be negative.
69 *
70 */
71static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
72{
73 s64 ret = fbc->count;
74
75 barrier(); /* Prevent reloads of fbc->count */
76 if (ret >= 0)
77 return ret;
78 return 1;
79}
80
81#else
82
83struct percpu_counter {
84 s64 count;
85};
86
87static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
88{
89 fbc->count = amount;
90 return 0;
91}
92
93static inline void percpu_counter_destroy(struct percpu_counter *fbc)
94{
95}
96
97static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
98{
99 fbc->count = amount;
100}
101
102static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
103{
104 if (fbc->count > rhs)
105 return 1;
106 else if (fbc->count < rhs)
107 return -1;
108 else
109 return 0;
110}
111
112static inline void
113percpu_counter_add(struct percpu_counter *fbc, s64 amount)
114{
115 preempt_disable();
116 fbc->count += amount;
117 preempt_enable();
118}
119
120static inline void
121__percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
122{
123 percpu_counter_add(fbc, amount);
124}
125
126static inline s64 percpu_counter_read(struct percpu_counter *fbc)
127{
128 return fbc->count;
129}
130
131static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
132{
133 return fbc->count;
134}
135
136static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
137{
138 return percpu_counter_read_positive(fbc);
139}
140
141static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
142{
143 return percpu_counter_read(fbc);
144}
145
146#endif /* CONFIG_SMP */
147
148static inline void percpu_counter_inc(struct percpu_counter *fbc)
149{
150 percpu_counter_add(fbc, 1);
151}
152
153static inline void percpu_counter_dec(struct percpu_counter *fbc)
154{
155 percpu_counter_add(fbc, -1);
156}
157
158static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
159{
160 percpu_counter_add(fbc, -amount);
161}
162
163#endif /* _LINUX_PERCPU_COUNTER_H */