at v2.6.29 3.3 kB view raw
1#ifndef _LINUX_PERCPU_COUNTER_H 2#define _LINUX_PERCPU_COUNTER_H 3/* 4 * A simple "approximate counter" for use in ext2 and ext3 superblocks. 5 * 6 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4. 7 */ 8 9#include <linux/spinlock.h> 10#include <linux/smp.h> 11#include <linux/list.h> 12#include <linux/threads.h> 13#include <linux/percpu.h> 14#include <linux/types.h> 15 16#ifdef CONFIG_SMP 17 18struct percpu_counter { 19 spinlock_t lock; 20 s64 count; 21#ifdef CONFIG_HOTPLUG_CPU 22 struct list_head list; /* All percpu_counters are on a list */ 23#endif 24 s32 *counters; 25}; 26 27extern int percpu_counter_batch; 28 29int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, 30 struct lock_class_key *key); 31 32#define percpu_counter_init(fbc, value) \ 33 ({ \ 34 static struct lock_class_key __key; \ 35 \ 36 __percpu_counter_init(fbc, value, &__key); \ 37 }) 38 39void percpu_counter_destroy(struct percpu_counter *fbc); 40void percpu_counter_set(struct percpu_counter *fbc, s64 amount); 41void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); 42s64 __percpu_counter_sum(struct percpu_counter *fbc); 43 44static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) 45{ 46 __percpu_counter_add(fbc, amount, percpu_counter_batch); 47} 48 49static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) 50{ 51 s64 ret = __percpu_counter_sum(fbc); 52 return ret < 0 ? 0 : ret; 53} 54 55static inline s64 percpu_counter_sum(struct percpu_counter *fbc) 56{ 57 return __percpu_counter_sum(fbc); 58} 59 60static inline s64 percpu_counter_read(struct percpu_counter *fbc) 61{ 62 return fbc->count; 63} 64 65/* 66 * It is possible for the percpu_counter_read() to return a small negative 67 * number for some counter which should never be negative. 68 * 69 */ 70static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) 71{ 72 s64 ret = fbc->count; 73 74 barrier(); /* Prevent reloads of fbc->count */ 75 if (ret >= 0) 76 return ret; 77 return 1; 78} 79 80#else 81 82struct percpu_counter { 83 s64 count; 84}; 85 86static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount) 87{ 88 fbc->count = amount; 89 return 0; 90} 91 92static inline void percpu_counter_destroy(struct percpu_counter *fbc) 93{ 94} 95 96static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount) 97{ 98 fbc->count = amount; 99} 100 101#define __percpu_counter_add(fbc, amount, batch) \ 102 percpu_counter_add(fbc, amount) 103 104static inline void 105percpu_counter_add(struct percpu_counter *fbc, s64 amount) 106{ 107 preempt_disable(); 108 fbc->count += amount; 109 preempt_enable(); 110} 111 112static inline s64 percpu_counter_read(struct percpu_counter *fbc) 113{ 114 return fbc->count; 115} 116 117static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) 118{ 119 return fbc->count; 120} 121 122static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) 123{ 124 return percpu_counter_read_positive(fbc); 125} 126 127static inline s64 percpu_counter_sum(struct percpu_counter *fbc) 128{ 129 return percpu_counter_read(fbc); 130} 131 132#endif /* CONFIG_SMP */ 133 134static inline void percpu_counter_inc(struct percpu_counter *fbc) 135{ 136 percpu_counter_add(fbc, 1); 137} 138 139static inline void percpu_counter_dec(struct percpu_counter *fbc) 140{ 141 percpu_counter_add(fbc, -1); 142} 143 144static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount) 145{ 146 percpu_counter_add(fbc, -amount); 147} 148 149#endif /* _LINUX_PERCPU_COUNTER_H */