Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _LINUX_PERCPU_COUNTER_H
2#define _LINUX_PERCPU_COUNTER_H
3/*
4 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
5 *
6 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
7 */
8
9#include <linux/spinlock.h>
10#include <linux/smp.h>
11#include <linux/list.h>
12#include <linux/threads.h>
13#include <linux/percpu.h>
14#include <linux/types.h>
15
16#ifdef CONFIG_SMP
17
18struct percpu_counter {
19 spinlock_t lock;
20 s64 count;
21#ifdef CONFIG_HOTPLUG_CPU
22 struct list_head list; /* All percpu_counters are on a list */
23#endif
24 s32 __percpu *counters;
25};
26
27extern int percpu_counter_batch;
28
29int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
30 struct lock_class_key *key);
31
32#define percpu_counter_init(fbc, value) \
33 ({ \
34 static struct lock_class_key __key; \
35 \
36 __percpu_counter_init(fbc, value, &__key); \
37 })
38
39void percpu_counter_destroy(struct percpu_counter *fbc);
40void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
41void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
42s64 __percpu_counter_sum(struct percpu_counter *fbc);
43
44static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
45{
46 __percpu_counter_add(fbc, amount, percpu_counter_batch);
47}
48
49static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
50{
51 s64 ret = __percpu_counter_sum(fbc);
52 return ret < 0 ? 0 : ret;
53}
54
55static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
56{
57 return __percpu_counter_sum(fbc);
58}
59
60static inline s64 percpu_counter_read(struct percpu_counter *fbc)
61{
62 return fbc->count;
63}
64
65/*
66 * It is possible for the percpu_counter_read() to return a small negative
67 * number for some counter which should never be negative.
68 *
69 */
70static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
71{
72 s64 ret = fbc->count;
73
74 barrier(); /* Prevent reloads of fbc->count */
75 if (ret >= 0)
76 return ret;
77 return 1;
78}
79
80#else
81
82struct percpu_counter {
83 s64 count;
84};
85
86static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
87{
88 fbc->count = amount;
89 return 0;
90}
91
92static inline void percpu_counter_destroy(struct percpu_counter *fbc)
93{
94}
95
96static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
97{
98 fbc->count = amount;
99}
100
101static inline void
102percpu_counter_add(struct percpu_counter *fbc, s64 amount)
103{
104 preempt_disable();
105 fbc->count += amount;
106 preempt_enable();
107}
108
109static inline void
110__percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
111{
112 percpu_counter_add(fbc, amount);
113}
114
115static inline s64 percpu_counter_read(struct percpu_counter *fbc)
116{
117 return fbc->count;
118}
119
120static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
121{
122 return fbc->count;
123}
124
125static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
126{
127 return percpu_counter_read_positive(fbc);
128}
129
130static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
131{
132 return percpu_counter_read(fbc);
133}
134
135#endif /* CONFIG_SMP */
136
137static inline void percpu_counter_inc(struct percpu_counter *fbc)
138{
139 percpu_counter_add(fbc, 1);
140}
141
142static inline void percpu_counter_dec(struct percpu_counter *fbc)
143{
144 percpu_counter_add(fbc, -1);
145}
146
147static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
148{
149 percpu_counter_add(fbc, -amount);
150}
151
152#endif /* _LINUX_PERCPU_COUNTER_H */