at v2.6.26 6.8 kB view raw
1#ifndef _LINUX_VMSTAT_H 2#define _LINUX_VMSTAT_H 3 4#include <linux/types.h> 5#include <linux/percpu.h> 6#include <linux/mm.h> 7#include <linux/mmzone.h> 8#include <asm/atomic.h> 9 10#ifdef CONFIG_ZONE_DMA 11#define DMA_ZONE(xx) xx##_DMA, 12#else 13#define DMA_ZONE(xx) 14#endif 15 16#ifdef CONFIG_ZONE_DMA32 17#define DMA32_ZONE(xx) xx##_DMA32, 18#else 19#define DMA32_ZONE(xx) 20#endif 21 22#ifdef CONFIG_HIGHMEM 23#define HIGHMEM_ZONE(xx) , xx##_HIGH 24#else 25#define HIGHMEM_ZONE(xx) 26#endif 27 28 29#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE 30 31enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, 32 FOR_ALL_ZONES(PGALLOC), 33 PGFREE, PGACTIVATE, PGDEACTIVATE, 34 PGFAULT, PGMAJFAULT, 35 FOR_ALL_ZONES(PGREFILL), 36 FOR_ALL_ZONES(PGSTEAL), 37 FOR_ALL_ZONES(PGSCAN_KSWAPD), 38 FOR_ALL_ZONES(PGSCAN_DIRECT), 39 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, 40 PAGEOUTRUN, ALLOCSTALL, PGROTATED, 41#ifdef CONFIG_HUGETLB_PAGE 42 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, 43#endif 44 NR_VM_EVENT_ITEMS 45}; 46 47#ifdef CONFIG_VM_EVENT_COUNTERS 48/* 49 * Light weight per cpu counter implementation. 50 * 51 * Counters should only be incremented and no critical kernel component 52 * should rely on the counter values. 53 * 54 * Counters are handled completely inline. On many platforms the code 55 * generated will simply be the increment of a global address. 56 */ 57 58struct vm_event_state { 59 unsigned long event[NR_VM_EVENT_ITEMS]; 60}; 61 62DECLARE_PER_CPU(struct vm_event_state, vm_event_states); 63 64static inline void __count_vm_event(enum vm_event_item item) 65{ 66 __get_cpu_var(vm_event_states).event[item]++; 67} 68 69static inline void count_vm_event(enum vm_event_item item) 70{ 71 get_cpu_var(vm_event_states).event[item]++; 72 put_cpu(); 73} 74 75static inline void __count_vm_events(enum vm_event_item item, long delta) 76{ 77 __get_cpu_var(vm_event_states).event[item] += delta; 78} 79 80static inline void count_vm_events(enum vm_event_item item, long delta) 81{ 82 get_cpu_var(vm_event_states).event[item] += delta; 83 put_cpu(); 84} 85 86extern void all_vm_events(unsigned long *); 87#ifdef CONFIG_HOTPLUG 88extern void vm_events_fold_cpu(int cpu); 89#else 90static inline void vm_events_fold_cpu(int cpu) 91{ 92} 93#endif 94 95#else 96 97/* Disable counters */ 98static inline void count_vm_event(enum vm_event_item item) 99{ 100} 101static inline void count_vm_events(enum vm_event_item item, long delta) 102{ 103} 104static inline void __count_vm_event(enum vm_event_item item) 105{ 106} 107static inline void __count_vm_events(enum vm_event_item item, long delta) 108{ 109} 110static inline void all_vm_events(unsigned long *ret) 111{ 112} 113static inline void vm_events_fold_cpu(int cpu) 114{ 115} 116 117#endif /* CONFIG_VM_EVENT_COUNTERS */ 118 119#define __count_zone_vm_events(item, zone, delta) \ 120 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ 121 zone_idx(zone), delta) 122 123/* 124 * Zone based page accounting with per cpu differentials. 125 */ 126extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 127 128static inline void zone_page_state_add(long x, struct zone *zone, 129 enum zone_stat_item item) 130{ 131 atomic_long_add(x, &zone->vm_stat[item]); 132 atomic_long_add(x, &vm_stat[item]); 133} 134 135static inline unsigned long global_page_state(enum zone_stat_item item) 136{ 137 long x = atomic_long_read(&vm_stat[item]); 138#ifdef CONFIG_SMP 139 if (x < 0) 140 x = 0; 141#endif 142 return x; 143} 144 145static inline unsigned long zone_page_state(struct zone *zone, 146 enum zone_stat_item item) 147{ 148 long x = atomic_long_read(&zone->vm_stat[item]); 149#ifdef CONFIG_SMP 150 if (x < 0) 151 x = 0; 152#endif 153 return x; 154} 155 156#ifdef CONFIG_NUMA 157/* 158 * Determine the per node value of a stat item. This function 159 * is called frequently in a NUMA machine, so try to be as 160 * frugal as possible. 161 */ 162static inline unsigned long node_page_state(int node, 163 enum zone_stat_item item) 164{ 165 struct zone *zones = NODE_DATA(node)->node_zones; 166 167 return 168#ifdef CONFIG_ZONE_DMA 169 zone_page_state(&zones[ZONE_DMA], item) + 170#endif 171#ifdef CONFIG_ZONE_DMA32 172 zone_page_state(&zones[ZONE_DMA32], item) + 173#endif 174#ifdef CONFIG_HIGHMEM 175 zone_page_state(&zones[ZONE_HIGHMEM], item) + 176#endif 177 zone_page_state(&zones[ZONE_NORMAL], item) + 178 zone_page_state(&zones[ZONE_MOVABLE], item); 179} 180 181extern void zone_statistics(struct zone *, struct zone *); 182 183#else 184 185#define node_page_state(node, item) global_page_state(item) 186#define zone_statistics(_zl,_z) do { } while (0) 187 188#endif /* CONFIG_NUMA */ 189 190#define __add_zone_page_state(__z, __i, __d) \ 191 __mod_zone_page_state(__z, __i, __d) 192#define __sub_zone_page_state(__z, __i, __d) \ 193 __mod_zone_page_state(__z, __i,-(__d)) 194 195#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) 196#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) 197 198static inline void zap_zone_vm_stats(struct zone *zone) 199{ 200 memset(zone->vm_stat, 0, sizeof(zone->vm_stat)); 201} 202 203extern void inc_zone_state(struct zone *, enum zone_stat_item); 204 205#ifdef CONFIG_SMP 206void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); 207void __inc_zone_page_state(struct page *, enum zone_stat_item); 208void __dec_zone_page_state(struct page *, enum zone_stat_item); 209 210void mod_zone_page_state(struct zone *, enum zone_stat_item, int); 211void inc_zone_page_state(struct page *, enum zone_stat_item); 212void dec_zone_page_state(struct page *, enum zone_stat_item); 213 214extern void inc_zone_state(struct zone *, enum zone_stat_item); 215extern void __inc_zone_state(struct zone *, enum zone_stat_item); 216extern void dec_zone_state(struct zone *, enum zone_stat_item); 217extern void __dec_zone_state(struct zone *, enum zone_stat_item); 218 219void refresh_cpu_vm_stats(int); 220#else /* CONFIG_SMP */ 221 222/* 223 * We do not maintain differentials in a single processor configuration. 224 * The functions directly modify the zone and global counters. 225 */ 226static inline void __mod_zone_page_state(struct zone *zone, 227 enum zone_stat_item item, int delta) 228{ 229 zone_page_state_add(delta, zone, item); 230} 231 232static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 233{ 234 atomic_long_inc(&zone->vm_stat[item]); 235 atomic_long_inc(&vm_stat[item]); 236} 237 238static inline void __inc_zone_page_state(struct page *page, 239 enum zone_stat_item item) 240{ 241 __inc_zone_state(page_zone(page), item); 242} 243 244static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) 245{ 246 atomic_long_dec(&zone->vm_stat[item]); 247 atomic_long_dec(&vm_stat[item]); 248} 249 250static inline void __dec_zone_page_state(struct page *page, 251 enum zone_stat_item item) 252{ 253 __dec_zone_state(page_zone(page), item); 254} 255 256/* 257 * We only use atomic operations to update counters. So there is no need to 258 * disable interrupts. 259 */ 260#define inc_zone_page_state __inc_zone_page_state 261#define dec_zone_page_state __dec_zone_page_state 262#define mod_zone_page_state __mod_zone_page_state 263 264static inline void refresh_cpu_vm_stats(int cpu) { } 265#endif 266 267#endif /* _LINUX_VMSTAT_H */