at v2.6.22 6.7 kB view raw
1#ifndef _LINUX_VMSTAT_H 2#define _LINUX_VMSTAT_H 3 4#include <linux/types.h> 5#include <linux/percpu.h> 6#include <linux/mm.h> 7#include <linux/mmzone.h> 8#include <asm/atomic.h> 9 10#ifdef CONFIG_ZONE_DMA 11#define DMA_ZONE(xx) xx##_DMA, 12#else 13#define DMA_ZONE(xx) 14#endif 15 16#ifdef CONFIG_ZONE_DMA32 17#define DMA32_ZONE(xx) xx##_DMA32, 18#else 19#define DMA32_ZONE(xx) 20#endif 21 22#ifdef CONFIG_HIGHMEM 23#define HIGHMEM_ZONE(xx) , xx##_HIGH 24#else 25#define HIGHMEM_ZONE(xx) 26#endif 27 28#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) 29 30enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, 31 FOR_ALL_ZONES(PGALLOC), 32 PGFREE, PGACTIVATE, PGDEACTIVATE, 33 PGFAULT, PGMAJFAULT, 34 FOR_ALL_ZONES(PGREFILL), 35 FOR_ALL_ZONES(PGSTEAL), 36 FOR_ALL_ZONES(PGSCAN_KSWAPD), 37 FOR_ALL_ZONES(PGSCAN_DIRECT), 38 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, 39 PAGEOUTRUN, ALLOCSTALL, PGROTATED, 40 NR_VM_EVENT_ITEMS 41}; 42 43#ifdef CONFIG_VM_EVENT_COUNTERS 44/* 45 * Light weight per cpu counter implementation. 46 * 47 * Counters should only be incremented and no critical kernel component 48 * should rely on the counter values. 49 * 50 * Counters are handled completely inline. On many platforms the code 51 * generated will simply be the increment of a global address. 52 */ 53 54struct vm_event_state { 55 unsigned long event[NR_VM_EVENT_ITEMS]; 56}; 57 58DECLARE_PER_CPU(struct vm_event_state, vm_event_states); 59 60static inline void __count_vm_event(enum vm_event_item item) 61{ 62 __get_cpu_var(vm_event_states).event[item]++; 63} 64 65static inline void count_vm_event(enum vm_event_item item) 66{ 67 get_cpu_var(vm_event_states).event[item]++; 68 put_cpu(); 69} 70 71static inline void __count_vm_events(enum vm_event_item item, long delta) 72{ 73 __get_cpu_var(vm_event_states).event[item] += delta; 74} 75 76static inline void count_vm_events(enum vm_event_item item, long delta) 77{ 78 get_cpu_var(vm_event_states).event[item] += delta; 79 put_cpu(); 80} 81 82extern void all_vm_events(unsigned long *); 83#ifdef CONFIG_HOTPLUG 84extern void vm_events_fold_cpu(int cpu); 85#else 86static inline void vm_events_fold_cpu(int cpu) 87{ 88} 89#endif 90 91#else 92 93/* Disable counters */ 94static inline void count_vm_event(enum vm_event_item item) 95{ 96} 97static inline void count_vm_events(enum vm_event_item item, long delta) 98{ 99} 100static inline void __count_vm_event(enum vm_event_item item) 101{ 102} 103static inline void __count_vm_events(enum vm_event_item item, long delta) 104{ 105} 106static inline void all_vm_events(unsigned long *ret) 107{ 108} 109static inline void vm_events_fold_cpu(int cpu) 110{ 111} 112 113#endif /* CONFIG_VM_EVENT_COUNTERS */ 114 115#define __count_zone_vm_events(item, zone, delta) \ 116 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ 117 zone_idx(zone), delta) 118 119/* 120 * Zone based page accounting with per cpu differentials. 121 */ 122extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 123 124static inline void zone_page_state_add(long x, struct zone *zone, 125 enum zone_stat_item item) 126{ 127 atomic_long_add(x, &zone->vm_stat[item]); 128 atomic_long_add(x, &vm_stat[item]); 129} 130 131static inline unsigned long global_page_state(enum zone_stat_item item) 132{ 133 long x = atomic_long_read(&vm_stat[item]); 134#ifdef CONFIG_SMP 135 if (x < 0) 136 x = 0; 137#endif 138 return x; 139} 140 141static inline unsigned long zone_page_state(struct zone *zone, 142 enum zone_stat_item item) 143{ 144 long x = atomic_long_read(&zone->vm_stat[item]); 145#ifdef CONFIG_SMP 146 if (x < 0) 147 x = 0; 148#endif 149 return x; 150} 151 152#ifdef CONFIG_NUMA 153/* 154 * Determine the per node value of a stat item. This function 155 * is called frequently in a NUMA machine, so try to be as 156 * frugal as possible. 157 */ 158static inline unsigned long node_page_state(int node, 159 enum zone_stat_item item) 160{ 161 struct zone *zones = NODE_DATA(node)->node_zones; 162 163 return 164#ifdef CONFIG_ZONE_DMA 165 zone_page_state(&zones[ZONE_DMA], item) + 166#endif 167#ifdef CONFIG_ZONE_DMA32 168 zone_page_state(&zones[ZONE_DMA32], item) + 169#endif 170#ifdef CONFIG_HIGHMEM 171 zone_page_state(&zones[ZONE_HIGHMEM], item) + 172#endif 173 zone_page_state(&zones[ZONE_NORMAL], item); 174} 175 176extern void zone_statistics(struct zonelist *, struct zone *); 177 178#else 179 180#define node_page_state(node, item) global_page_state(item) 181#define zone_statistics(_zl,_z) do { } while (0) 182 183#endif /* CONFIG_NUMA */ 184 185#define __add_zone_page_state(__z, __i, __d) \ 186 __mod_zone_page_state(__z, __i, __d) 187#define __sub_zone_page_state(__z, __i, __d) \ 188 __mod_zone_page_state(__z, __i,-(__d)) 189 190#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) 191#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) 192 193static inline void zap_zone_vm_stats(struct zone *zone) 194{ 195 memset(zone->vm_stat, 0, sizeof(zone->vm_stat)); 196} 197 198extern void inc_zone_state(struct zone *, enum zone_stat_item); 199 200#ifdef CONFIG_SMP 201void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); 202void __inc_zone_page_state(struct page *, enum zone_stat_item); 203void __dec_zone_page_state(struct page *, enum zone_stat_item); 204 205void mod_zone_page_state(struct zone *, enum zone_stat_item, int); 206void inc_zone_page_state(struct page *, enum zone_stat_item); 207void dec_zone_page_state(struct page *, enum zone_stat_item); 208 209extern void inc_zone_state(struct zone *, enum zone_stat_item); 210extern void __inc_zone_state(struct zone *, enum zone_stat_item); 211extern void dec_zone_state(struct zone *, enum zone_stat_item); 212extern void __dec_zone_state(struct zone *, enum zone_stat_item); 213 214void refresh_cpu_vm_stats(int); 215#else /* CONFIG_SMP */ 216 217/* 218 * We do not maintain differentials in a single processor configuration. 219 * The functions directly modify the zone and global counters. 220 */ 221static inline void __mod_zone_page_state(struct zone *zone, 222 enum zone_stat_item item, int delta) 223{ 224 zone_page_state_add(delta, zone, item); 225} 226 227static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 228{ 229 atomic_long_inc(&zone->vm_stat[item]); 230 atomic_long_inc(&vm_stat[item]); 231} 232 233static inline void __inc_zone_page_state(struct page *page, 234 enum zone_stat_item item) 235{ 236 __inc_zone_state(page_zone(page), item); 237} 238 239static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) 240{ 241 atomic_long_dec(&zone->vm_stat[item]); 242 atomic_long_dec(&vm_stat[item]); 243} 244 245static inline void __dec_zone_page_state(struct page *page, 246 enum zone_stat_item item) 247{ 248 atomic_long_dec(&page_zone(page)->vm_stat[item]); 249 atomic_long_dec(&vm_stat[item]); 250} 251 252/* 253 * We only use atomic operations to update counters. So there is no need to 254 * disable interrupts. 255 */ 256#define inc_zone_page_state __inc_zone_page_state 257#define dec_zone_page_state __dec_zone_page_state 258#define mod_zone_page_state __mod_zone_page_state 259 260static inline void refresh_cpu_vm_stats(int cpu) { } 261#endif 262 263#endif /* _LINUX_VMSTAT_H */