Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _LINUX_PERCPU_COUNTER_H
2#define _LINUX_PERCPU_COUNTER_H
3/*
4 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
5 *
6 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
7 */
8
9#include <linux/spinlock.h>
10#include <linux/smp.h>
11#include <linux/list.h>
12#include <linux/threads.h>
13#include <linux/percpu.h>
14#include <linux/types.h>
15
16#ifdef CONFIG_SMP
17
18struct percpu_counter {
19 spinlock_t lock;
20 s64 count;
21#ifdef CONFIG_HOTPLUG_CPU
22 struct list_head list; /* All percpu_counters are on a list */
23#endif
24 s32 *counters;
25};
26
27#if NR_CPUS >= 16
28#define FBC_BATCH (NR_CPUS*2)
29#else
30#define FBC_BATCH (NR_CPUS*4)
31#endif
32
33int percpu_counter_init(struct percpu_counter *fbc, s64 amount);
34int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount);
35void percpu_counter_destroy(struct percpu_counter *fbc);
36void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
37void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
38s64 __percpu_counter_sum(struct percpu_counter *fbc, int set);
39
40static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
41{
42 __percpu_counter_add(fbc, amount, FBC_BATCH);
43}
44
45static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
46{
47 s64 ret = __percpu_counter_sum(fbc, 0);
48 return ret < 0 ? 0 : ret;
49}
50
51static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc)
52{
53 return __percpu_counter_sum(fbc, 1);
54}
55
56
57static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
58{
59 return __percpu_counter_sum(fbc, 0);
60}
61
62static inline s64 percpu_counter_read(struct percpu_counter *fbc)
63{
64 return fbc->count;
65}
66
67/*
68 * It is possible for the percpu_counter_read() to return a small negative
69 * number for some counter which should never be negative.
70 *
71 */
72static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
73{
74 s64 ret = fbc->count;
75
76 barrier(); /* Prevent reloads of fbc->count */
77 if (ret >= 0)
78 return ret;
79 return 1;
80}
81
82#else
83
84struct percpu_counter {
85 s64 count;
86};
87
88static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
89{
90 fbc->count = amount;
91 return 0;
92}
93
94#define percpu_counter_init_irq percpu_counter_init
95
96static inline void percpu_counter_destroy(struct percpu_counter *fbc)
97{
98}
99
100static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
101{
102 fbc->count = amount;
103}
104
105#define __percpu_counter_add(fbc, amount, batch) \
106 percpu_counter_add(fbc, amount)
107
108static inline void
109percpu_counter_add(struct percpu_counter *fbc, s64 amount)
110{
111 preempt_disable();
112 fbc->count += amount;
113 preempt_enable();
114}
115
116static inline s64 percpu_counter_read(struct percpu_counter *fbc)
117{
118 return fbc->count;
119}
120
121static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
122{
123 return fbc->count;
124}
125
126static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
127{
128 return percpu_counter_read_positive(fbc);
129}
130
131static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
132{
133 return percpu_counter_read(fbc);
134}
135
136#endif /* CONFIG_SMP */
137
138static inline void percpu_counter_inc(struct percpu_counter *fbc)
139{
140 percpu_counter_add(fbc, 1);
141}
142
143static inline void percpu_counter_dec(struct percpu_counter *fbc)
144{
145 percpu_counter_add(fbc, -1);
146}
147
148static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
149{
150 percpu_counter_add(fbc, -amount);
151}
152
153#endif /* _LINUX_PERCPU_COUNTER_H */