at v3.6-rc3 6.8 kB view raw
1#ifndef _LINUX_VMSTAT_H 2#define _LINUX_VMSTAT_H 3 4#include <linux/types.h> 5#include <linux/percpu.h> 6#include <linux/mm.h> 7#include <linux/mmzone.h> 8#include <linux/vm_event_item.h> 9#include <linux/atomic.h> 10 11extern int sysctl_stat_interval; 12 13#ifdef CONFIG_VM_EVENT_COUNTERS 14/* 15 * Light weight per cpu counter implementation. 16 * 17 * Counters should only be incremented and no critical kernel component 18 * should rely on the counter values. 19 * 20 * Counters are handled completely inline. On many platforms the code 21 * generated will simply be the increment of a global address. 22 */ 23 24struct vm_event_state { 25 unsigned long event[NR_VM_EVENT_ITEMS]; 26}; 27 28DECLARE_PER_CPU(struct vm_event_state, vm_event_states); 29 30static inline void __count_vm_event(enum vm_event_item item) 31{ 32 __this_cpu_inc(vm_event_states.event[item]); 33} 34 35static inline void count_vm_event(enum vm_event_item item) 36{ 37 this_cpu_inc(vm_event_states.event[item]); 38} 39 40static inline void __count_vm_events(enum vm_event_item item, long delta) 41{ 42 __this_cpu_add(vm_event_states.event[item], delta); 43} 44 45static inline void count_vm_events(enum vm_event_item item, long delta) 46{ 47 this_cpu_add(vm_event_states.event[item], delta); 48} 49 50extern void all_vm_events(unsigned long *); 51#ifdef CONFIG_HOTPLUG 52extern void vm_events_fold_cpu(int cpu); 53#else 54static inline void vm_events_fold_cpu(int cpu) 55{ 56} 57#endif 58 59#else 60 61/* Disable counters */ 62static inline void count_vm_event(enum vm_event_item item) 63{ 64} 65static inline void count_vm_events(enum vm_event_item item, long delta) 66{ 67} 68static inline void __count_vm_event(enum vm_event_item item) 69{ 70} 71static inline void __count_vm_events(enum vm_event_item item, long delta) 72{ 73} 74static inline void all_vm_events(unsigned long *ret) 75{ 76} 77static inline void vm_events_fold_cpu(int cpu) 78{ 79} 80 81#endif /* CONFIG_VM_EVENT_COUNTERS */ 82 83#define __count_zone_vm_events(item, zone, delta) \ 84 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ 85 zone_idx(zone), delta) 86 87/* 88 * Zone based page accounting with per cpu differentials. 89 */ 90extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 91 92static inline void zone_page_state_add(long x, struct zone *zone, 93 enum zone_stat_item item) 94{ 95 atomic_long_add(x, &zone->vm_stat[item]); 96 atomic_long_add(x, &vm_stat[item]); 97} 98 99static inline unsigned long global_page_state(enum zone_stat_item item) 100{ 101 long x = atomic_long_read(&vm_stat[item]); 102#ifdef CONFIG_SMP 103 if (x < 0) 104 x = 0; 105#endif 106 return x; 107} 108 109static inline unsigned long zone_page_state(struct zone *zone, 110 enum zone_stat_item item) 111{ 112 long x = atomic_long_read(&zone->vm_stat[item]); 113#ifdef CONFIG_SMP 114 if (x < 0) 115 x = 0; 116#endif 117 return x; 118} 119 120/* 121 * More accurate version that also considers the currently pending 122 * deltas. For that we need to loop over all cpus to find the current 123 * deltas. There is no synchronization so the result cannot be 124 * exactly accurate either. 125 */ 126static inline unsigned long zone_page_state_snapshot(struct zone *zone, 127 enum zone_stat_item item) 128{ 129 long x = atomic_long_read(&zone->vm_stat[item]); 130 131#ifdef CONFIG_SMP 132 int cpu; 133 for_each_online_cpu(cpu) 134 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; 135 136 if (x < 0) 137 x = 0; 138#endif 139 return x; 140} 141 142extern unsigned long global_reclaimable_pages(void); 143extern unsigned long zone_reclaimable_pages(struct zone *zone); 144 145#ifdef CONFIG_NUMA 146/* 147 * Determine the per node value of a stat item. This function 148 * is called frequently in a NUMA machine, so try to be as 149 * frugal as possible. 150 */ 151static inline unsigned long node_page_state(int node, 152 enum zone_stat_item item) 153{ 154 struct zone *zones = NODE_DATA(node)->node_zones; 155 156 return 157#ifdef CONFIG_ZONE_DMA 158 zone_page_state(&zones[ZONE_DMA], item) + 159#endif 160#ifdef CONFIG_ZONE_DMA32 161 zone_page_state(&zones[ZONE_DMA32], item) + 162#endif 163#ifdef CONFIG_HIGHMEM 164 zone_page_state(&zones[ZONE_HIGHMEM], item) + 165#endif 166 zone_page_state(&zones[ZONE_NORMAL], item) + 167 zone_page_state(&zones[ZONE_MOVABLE], item); 168} 169 170extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); 171 172#else 173 174#define node_page_state(node, item) global_page_state(item) 175#define zone_statistics(_zl, _z, gfp) do { } while (0) 176 177#endif /* CONFIG_NUMA */ 178 179#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) 180#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) 181 182extern void inc_zone_state(struct zone *, enum zone_stat_item); 183 184#ifdef CONFIG_SMP 185void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); 186void __inc_zone_page_state(struct page *, enum zone_stat_item); 187void __dec_zone_page_state(struct page *, enum zone_stat_item); 188 189void mod_zone_page_state(struct zone *, enum zone_stat_item, int); 190void inc_zone_page_state(struct page *, enum zone_stat_item); 191void dec_zone_page_state(struct page *, enum zone_stat_item); 192 193extern void inc_zone_state(struct zone *, enum zone_stat_item); 194extern void __inc_zone_state(struct zone *, enum zone_stat_item); 195extern void dec_zone_state(struct zone *, enum zone_stat_item); 196extern void __dec_zone_state(struct zone *, enum zone_stat_item); 197 198void refresh_cpu_vm_stats(int); 199void refresh_zone_stat_thresholds(void); 200 201int calculate_pressure_threshold(struct zone *zone); 202int calculate_normal_threshold(struct zone *zone); 203void set_pgdat_percpu_threshold(pg_data_t *pgdat, 204 int (*calculate_pressure)(struct zone *)); 205#else /* CONFIG_SMP */ 206 207/* 208 * We do not maintain differentials in a single processor configuration. 209 * The functions directly modify the zone and global counters. 210 */ 211static inline void __mod_zone_page_state(struct zone *zone, 212 enum zone_stat_item item, int delta) 213{ 214 zone_page_state_add(delta, zone, item); 215} 216 217static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 218{ 219 atomic_long_inc(&zone->vm_stat[item]); 220 atomic_long_inc(&vm_stat[item]); 221} 222 223static inline void __inc_zone_page_state(struct page *page, 224 enum zone_stat_item item) 225{ 226 __inc_zone_state(page_zone(page), item); 227} 228 229static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) 230{ 231 atomic_long_dec(&zone->vm_stat[item]); 232 atomic_long_dec(&vm_stat[item]); 233} 234 235static inline void __dec_zone_page_state(struct page *page, 236 enum zone_stat_item item) 237{ 238 __dec_zone_state(page_zone(page), item); 239} 240 241/* 242 * We only use atomic operations to update counters. So there is no need to 243 * disable interrupts. 244 */ 245#define inc_zone_page_state __inc_zone_page_state 246#define dec_zone_page_state __dec_zone_page_state 247#define mod_zone_page_state __mod_zone_page_state 248 249#define set_pgdat_percpu_threshold(pgdat, callback) { } 250 251static inline void refresh_cpu_vm_stats(int cpu) { } 252static inline void refresh_zone_stat_thresholds(void) { } 253 254#endif /* CONFIG_SMP */ 255 256extern const char * const vmstat_text[]; 257 258#endif /* _LINUX_VMSTAT_H */