Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _LINUX_PERCPU_COUNTER_H
2#define _LINUX_PERCPU_COUNTER_H
3/*
4 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
5 *
6 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
7 */
8
9#include <linux/spinlock.h>
10#include <linux/smp.h>
11#include <linux/list.h>
12#include <linux/threads.h>
13#include <linux/percpu.h>
14#include <linux/types.h>
15
16#ifdef CONFIG_SMP
17
18struct percpu_counter {
19 spinlock_t lock;
20 s64 count;
21#ifdef CONFIG_HOTPLUG_CPU
22 struct list_head list; /* All percpu_counters are on a list */
23#endif
24 s32 *counters;
25};
26
27#if NR_CPUS >= 16
28#define FBC_BATCH (NR_CPUS*2)
29#else
30#define FBC_BATCH (NR_CPUS*4)
31#endif
32
33int percpu_counter_init(struct percpu_counter *fbc, s64 amount);
34int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount);
35void percpu_counter_destroy(struct percpu_counter *fbc);
36void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
37void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
38s64 __percpu_counter_sum(struct percpu_counter *fbc);
39
40static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
41{
42 __percpu_counter_add(fbc, amount, FBC_BATCH);
43}
44
45static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
46{
47 s64 ret = __percpu_counter_sum(fbc);
48 return ret < 0 ? 0 : ret;
49}
50
51static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
52{
53 return __percpu_counter_sum(fbc);
54}
55
56static inline s64 percpu_counter_read(struct percpu_counter *fbc)
57{
58 return fbc->count;
59}
60
61/*
62 * It is possible for the percpu_counter_read() to return a small negative
63 * number for some counter which should never be negative.
64 *
65 */
66static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
67{
68 s64 ret = fbc->count;
69
70 barrier(); /* Prevent reloads of fbc->count */
71 if (ret >= 0)
72 return ret;
73 return 1;
74}
75
76#else
77
78struct percpu_counter {
79 s64 count;
80};
81
82static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
83{
84 fbc->count = amount;
85 return 0;
86}
87
88#define percpu_counter_init_irq percpu_counter_init
89
90static inline void percpu_counter_destroy(struct percpu_counter *fbc)
91{
92}
93
94static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
95{
96 fbc->count = amount;
97}
98
99#define __percpu_counter_add(fbc, amount, batch) \
100 percpu_counter_add(fbc, amount)
101
102static inline void
103percpu_counter_add(struct percpu_counter *fbc, s64 amount)
104{
105 preempt_disable();
106 fbc->count += amount;
107 preempt_enable();
108}
109
110static inline s64 percpu_counter_read(struct percpu_counter *fbc)
111{
112 return fbc->count;
113}
114
115static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
116{
117 return fbc->count;
118}
119
120static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
121{
122 return percpu_counter_read_positive(fbc);
123}
124
125static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
126{
127 return percpu_counter_read(fbc);
128}
129
130#endif /* CONFIG_SMP */
131
132static inline void percpu_counter_inc(struct percpu_counter *fbc)
133{
134 percpu_counter_add(fbc, 1);
135}
136
137static inline void percpu_counter_dec(struct percpu_counter *fbc)
138{
139 percpu_counter_add(fbc, -1);
140}
141
142static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
143{
144 percpu_counter_add(fbc, -amount);
145}
146
147#endif /* _LINUX_PERCPU_COUNTER_H */