at v2.6.39 8.3 kB view raw
1#ifndef _LINUX_VMSTAT_H 2#define _LINUX_VMSTAT_H 3 4#include <linux/types.h> 5#include <linux/percpu.h> 6#include <linux/mm.h> 7#include <linux/mmzone.h> 8#include <asm/atomic.h> 9 10#ifdef CONFIG_ZONE_DMA 11#define DMA_ZONE(xx) xx##_DMA, 12#else 13#define DMA_ZONE(xx) 14#endif 15 16#ifdef CONFIG_ZONE_DMA32 17#define DMA32_ZONE(xx) xx##_DMA32, 18#else 19#define DMA32_ZONE(xx) 20#endif 21 22#ifdef CONFIG_HIGHMEM 23#define HIGHMEM_ZONE(xx) , xx##_HIGH 24#else 25#define HIGHMEM_ZONE(xx) 26#endif 27 28 29#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE 30 31enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, 32 FOR_ALL_ZONES(PGALLOC), 33 PGFREE, PGACTIVATE, PGDEACTIVATE, 34 PGFAULT, PGMAJFAULT, 35 FOR_ALL_ZONES(PGREFILL), 36 FOR_ALL_ZONES(PGSTEAL), 37 FOR_ALL_ZONES(PGSCAN_KSWAPD), 38 FOR_ALL_ZONES(PGSCAN_DIRECT), 39#ifdef CONFIG_NUMA 40 PGSCAN_ZONE_RECLAIM_FAILED, 41#endif 42 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, 43 KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, 44 KSWAPD_SKIP_CONGESTION_WAIT, 45 PAGEOUTRUN, ALLOCSTALL, PGROTATED, 46#ifdef CONFIG_COMPACTION 47 COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED, 48 COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS, 49#endif 50#ifdef CONFIG_HUGETLB_PAGE 51 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, 52#endif 53 UNEVICTABLE_PGCULLED, /* culled to noreclaim list */ 54 UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */ 55 UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */ 56 UNEVICTABLE_PGMLOCKED, 57 UNEVICTABLE_PGMUNLOCKED, 58 UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ 59 UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ 60 UNEVICTABLE_MLOCKFREED, 61#ifdef CONFIG_TRANSPARENT_HUGEPAGE 62 THP_FAULT_ALLOC, 63 THP_FAULT_FALLBACK, 64 THP_COLLAPSE_ALLOC, 65 THP_COLLAPSE_ALLOC_FAILED, 66 THP_SPLIT, 67#endif 68 NR_VM_EVENT_ITEMS 69}; 70 71extern int sysctl_stat_interval; 72 73#ifdef CONFIG_VM_EVENT_COUNTERS 74/* 75 * Light weight per cpu counter implementation. 76 * 77 * Counters should only be incremented and no critical kernel component 78 * should rely on the counter values. 79 * 80 * Counters are handled completely inline. On many platforms the code 81 * generated will simply be the increment of a global address. 82 */ 83 84struct vm_event_state { 85 unsigned long event[NR_VM_EVENT_ITEMS]; 86}; 87 88DECLARE_PER_CPU(struct vm_event_state, vm_event_states); 89 90static inline void __count_vm_event(enum vm_event_item item) 91{ 92 __this_cpu_inc(vm_event_states.event[item]); 93} 94 95static inline void count_vm_event(enum vm_event_item item) 96{ 97 this_cpu_inc(vm_event_states.event[item]); 98} 99 100static inline void __count_vm_events(enum vm_event_item item, long delta) 101{ 102 __this_cpu_add(vm_event_states.event[item], delta); 103} 104 105static inline void count_vm_events(enum vm_event_item item, long delta) 106{ 107 this_cpu_add(vm_event_states.event[item], delta); 108} 109 110extern void all_vm_events(unsigned long *); 111#ifdef CONFIG_HOTPLUG 112extern void vm_events_fold_cpu(int cpu); 113#else 114static inline void vm_events_fold_cpu(int cpu) 115{ 116} 117#endif 118 119#else 120 121/* Disable counters */ 122static inline void count_vm_event(enum vm_event_item item) 123{ 124} 125static inline void count_vm_events(enum vm_event_item item, long delta) 126{ 127} 128static inline void __count_vm_event(enum vm_event_item item) 129{ 130} 131static inline void __count_vm_events(enum vm_event_item item, long delta) 132{ 133} 134static inline void all_vm_events(unsigned long *ret) 135{ 136} 137static inline void vm_events_fold_cpu(int cpu) 138{ 139} 140 141#endif /* CONFIG_VM_EVENT_COUNTERS */ 142 143#define __count_zone_vm_events(item, zone, delta) \ 144 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ 145 zone_idx(zone), delta) 146 147/* 148 * Zone based page accounting with per cpu differentials. 149 */ 150extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 151 152static inline void zone_page_state_add(long x, struct zone *zone, 153 enum zone_stat_item item) 154{ 155 atomic_long_add(x, &zone->vm_stat[item]); 156 atomic_long_add(x, &vm_stat[item]); 157} 158 159static inline unsigned long global_page_state(enum zone_stat_item item) 160{ 161 long x = atomic_long_read(&vm_stat[item]); 162#ifdef CONFIG_SMP 163 if (x < 0) 164 x = 0; 165#endif 166 return x; 167} 168 169static inline unsigned long zone_page_state(struct zone *zone, 170 enum zone_stat_item item) 171{ 172 long x = atomic_long_read(&zone->vm_stat[item]); 173#ifdef CONFIG_SMP 174 if (x < 0) 175 x = 0; 176#endif 177 return x; 178} 179 180/* 181 * More accurate version that also considers the currently pending 182 * deltas. For that we need to loop over all cpus to find the current 183 * deltas. There is no synchronization so the result cannot be 184 * exactly accurate either. 185 */ 186static inline unsigned long zone_page_state_snapshot(struct zone *zone, 187 enum zone_stat_item item) 188{ 189 long x = atomic_long_read(&zone->vm_stat[item]); 190 191#ifdef CONFIG_SMP 192 int cpu; 193 for_each_online_cpu(cpu) 194 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; 195 196 if (x < 0) 197 x = 0; 198#endif 199 return x; 200} 201 202extern unsigned long global_reclaimable_pages(void); 203extern unsigned long zone_reclaimable_pages(struct zone *zone); 204 205#ifdef CONFIG_NUMA 206/* 207 * Determine the per node value of a stat item. This function 208 * is called frequently in a NUMA machine, so try to be as 209 * frugal as possible. 210 */ 211static inline unsigned long node_page_state(int node, 212 enum zone_stat_item item) 213{ 214 struct zone *zones = NODE_DATA(node)->node_zones; 215 216 return 217#ifdef CONFIG_ZONE_DMA 218 zone_page_state(&zones[ZONE_DMA], item) + 219#endif 220#ifdef CONFIG_ZONE_DMA32 221 zone_page_state(&zones[ZONE_DMA32], item) + 222#endif 223#ifdef CONFIG_HIGHMEM 224 zone_page_state(&zones[ZONE_HIGHMEM], item) + 225#endif 226 zone_page_state(&zones[ZONE_NORMAL], item) + 227 zone_page_state(&zones[ZONE_MOVABLE], item); 228} 229 230extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); 231 232#else 233 234#define node_page_state(node, item) global_page_state(item) 235#define zone_statistics(_zl, _z, gfp) do { } while (0) 236 237#endif /* CONFIG_NUMA */ 238 239#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) 240#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) 241 242static inline void zap_zone_vm_stats(struct zone *zone) 243{ 244 memset(zone->vm_stat, 0, sizeof(zone->vm_stat)); 245} 246 247extern void inc_zone_state(struct zone *, enum zone_stat_item); 248 249#ifdef CONFIG_SMP 250void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); 251void __inc_zone_page_state(struct page *, enum zone_stat_item); 252void __dec_zone_page_state(struct page *, enum zone_stat_item); 253 254void mod_zone_page_state(struct zone *, enum zone_stat_item, int); 255void inc_zone_page_state(struct page *, enum zone_stat_item); 256void dec_zone_page_state(struct page *, enum zone_stat_item); 257 258extern void inc_zone_state(struct zone *, enum zone_stat_item); 259extern void __inc_zone_state(struct zone *, enum zone_stat_item); 260extern void dec_zone_state(struct zone *, enum zone_stat_item); 261extern void __dec_zone_state(struct zone *, enum zone_stat_item); 262 263void refresh_cpu_vm_stats(int); 264 265int calculate_pressure_threshold(struct zone *zone); 266int calculate_normal_threshold(struct zone *zone); 267void set_pgdat_percpu_threshold(pg_data_t *pgdat, 268 int (*calculate_pressure)(struct zone *)); 269#else /* CONFIG_SMP */ 270 271/* 272 * We do not maintain differentials in a single processor configuration. 273 * The functions directly modify the zone and global counters. 274 */ 275static inline void __mod_zone_page_state(struct zone *zone, 276 enum zone_stat_item item, int delta) 277{ 278 zone_page_state_add(delta, zone, item); 279} 280 281static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 282{ 283 atomic_long_inc(&zone->vm_stat[item]); 284 atomic_long_inc(&vm_stat[item]); 285} 286 287static inline void __inc_zone_page_state(struct page *page, 288 enum zone_stat_item item) 289{ 290 __inc_zone_state(page_zone(page), item); 291} 292 293static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) 294{ 295 atomic_long_dec(&zone->vm_stat[item]); 296 atomic_long_dec(&vm_stat[item]); 297} 298 299static inline void __dec_zone_page_state(struct page *page, 300 enum zone_stat_item item) 301{ 302 __dec_zone_state(page_zone(page), item); 303} 304 305/* 306 * We only use atomic operations to update counters. So there is no need to 307 * disable interrupts. 308 */ 309#define inc_zone_page_state __inc_zone_page_state 310#define dec_zone_page_state __dec_zone_page_state 311#define mod_zone_page_state __mod_zone_page_state 312 313#define set_pgdat_percpu_threshold(pgdat, callback) { } 314 315static inline void refresh_cpu_vm_stats(int cpu) { } 316#endif 317 318#endif /* _LINUX_VMSTAT_H */