at v4.13 4.3 kB view raw
1#ifndef _LINUX_PERCPU_COUNTER_H 2#define _LINUX_PERCPU_COUNTER_H 3/* 4 * A simple "approximate counter" for use in ext2 and ext3 superblocks. 5 * 6 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4. 7 */ 8 9#include <linux/spinlock.h> 10#include <linux/smp.h> 11#include <linux/list.h> 12#include <linux/threads.h> 13#include <linux/percpu.h> 14#include <linux/types.h> 15#include <linux/gfp.h> 16 17#ifdef CONFIG_SMP 18 19struct percpu_counter { 20 raw_spinlock_t lock; 21 s64 count; 22#ifdef CONFIG_HOTPLUG_CPU 23 struct list_head list; /* All percpu_counters are on a list */ 24#endif 25 s32 __percpu *counters; 26}; 27 28extern int percpu_counter_batch; 29 30int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp, 31 struct lock_class_key *key); 32 33#define percpu_counter_init(fbc, value, gfp) \ 34 ({ \ 35 static struct lock_class_key __key; \ 36 \ 37 __percpu_counter_init(fbc, value, gfp, &__key); \ 38 }) 39 40void percpu_counter_destroy(struct percpu_counter *fbc); 41void percpu_counter_set(struct percpu_counter *fbc, s64 amount); 42void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, 43 s32 batch); 44s64 __percpu_counter_sum(struct percpu_counter *fbc); 45int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); 46 47static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) 48{ 49 return __percpu_counter_compare(fbc, rhs, percpu_counter_batch); 50} 51 52static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) 53{ 54 percpu_counter_add_batch(fbc, amount, percpu_counter_batch); 55} 56 57static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) 58{ 59 s64 ret = __percpu_counter_sum(fbc); 60 return ret < 0 ? 0 : ret; 61} 62 63static inline s64 percpu_counter_sum(struct percpu_counter *fbc) 64{ 65 return __percpu_counter_sum(fbc); 66} 67 68static inline s64 percpu_counter_read(struct percpu_counter *fbc) 69{ 70 return fbc->count; 71} 72 73/* 74 * It is possible for the percpu_counter_read() to return a small negative 75 * number for some counter which should never be negative. 76 * 77 */ 78static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) 79{ 80 s64 ret = fbc->count; 81 82 barrier(); /* Prevent reloads of fbc->count */ 83 if (ret >= 0) 84 return ret; 85 return 0; 86} 87 88static inline int percpu_counter_initialized(struct percpu_counter *fbc) 89{ 90 return (fbc->counters != NULL); 91} 92 93#else /* !CONFIG_SMP */ 94 95struct percpu_counter { 96 s64 count; 97}; 98 99static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount, 100 gfp_t gfp) 101{ 102 fbc->count = amount; 103 return 0; 104} 105 106static inline void percpu_counter_destroy(struct percpu_counter *fbc) 107{ 108} 109 110static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount) 111{ 112 fbc->count = amount; 113} 114 115static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) 116{ 117 if (fbc->count > rhs) 118 return 1; 119 else if (fbc->count < rhs) 120 return -1; 121 else 122 return 0; 123} 124 125static inline int 126__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) 127{ 128 return percpu_counter_compare(fbc, rhs); 129} 130 131static inline void 132percpu_counter_add(struct percpu_counter *fbc, s64 amount) 133{ 134 preempt_disable(); 135 fbc->count += amount; 136 preempt_enable(); 137} 138 139static inline void 140percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) 141{ 142 percpu_counter_add(fbc, amount); 143} 144 145static inline s64 percpu_counter_read(struct percpu_counter *fbc) 146{ 147 return fbc->count; 148} 149 150/* 151 * percpu_counter is intended to track positive numbers. In the UP case the 152 * number should never be negative. 153 */ 154static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) 155{ 156 return fbc->count; 157} 158 159static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) 160{ 161 return percpu_counter_read_positive(fbc); 162} 163 164static inline s64 percpu_counter_sum(struct percpu_counter *fbc) 165{ 166 return percpu_counter_read(fbc); 167} 168 169static inline int percpu_counter_initialized(struct percpu_counter *fbc) 170{ 171 return 1; 172} 173 174#endif /* CONFIG_SMP */ 175 176static inline void percpu_counter_inc(struct percpu_counter *fbc) 177{ 178 percpu_counter_add(fbc, 1); 179} 180 181static inline void percpu_counter_dec(struct percpu_counter *fbc) 182{ 183 percpu_counter_add(fbc, -1); 184} 185 186static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount) 187{ 188 percpu_counter_add(fbc, -amount); 189} 190 191#endif /* _LINUX_PERCPU_COUNTER_H */