at v4.13 10 kB view raw
1#ifndef _LINUX_VMSTAT_H 2#define _LINUX_VMSTAT_H 3 4#include <linux/types.h> 5#include <linux/percpu.h> 6#include <linux/mmzone.h> 7#include <linux/vm_event_item.h> 8#include <linux/atomic.h> 9 10extern int sysctl_stat_interval; 11 12#ifdef CONFIG_VM_EVENT_COUNTERS 13/* 14 * Light weight per cpu counter implementation. 15 * 16 * Counters should only be incremented and no critical kernel component 17 * should rely on the counter values. 18 * 19 * Counters are handled completely inline. On many platforms the code 20 * generated will simply be the increment of a global address. 21 */ 22 23struct vm_event_state { 24 unsigned long event[NR_VM_EVENT_ITEMS]; 25}; 26 27DECLARE_PER_CPU(struct vm_event_state, vm_event_states); 28 29/* 30 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the 31 * local_irq_disable overhead. 32 */ 33static inline void __count_vm_event(enum vm_event_item item) 34{ 35 raw_cpu_inc(vm_event_states.event[item]); 36} 37 38static inline void count_vm_event(enum vm_event_item item) 39{ 40 this_cpu_inc(vm_event_states.event[item]); 41} 42 43static inline void __count_vm_events(enum vm_event_item item, long delta) 44{ 45 raw_cpu_add(vm_event_states.event[item], delta); 46} 47 48static inline void count_vm_events(enum vm_event_item item, long delta) 49{ 50 this_cpu_add(vm_event_states.event[item], delta); 51} 52 53extern void all_vm_events(unsigned long *); 54 55extern void vm_events_fold_cpu(int cpu); 56 57#else 58 59/* Disable counters */ 60static inline void count_vm_event(enum vm_event_item item) 61{ 62} 63static inline void count_vm_events(enum vm_event_item item, long delta) 64{ 65} 66static inline void __count_vm_event(enum vm_event_item item) 67{ 68} 69static inline void __count_vm_events(enum vm_event_item item, long delta) 70{ 71} 72static inline void all_vm_events(unsigned long *ret) 73{ 74} 75static inline void vm_events_fold_cpu(int cpu) 76{ 77} 78 79#endif /* CONFIG_VM_EVENT_COUNTERS */ 80 81#ifdef CONFIG_NUMA_BALANCING 82#define count_vm_numa_event(x) count_vm_event(x) 83#define count_vm_numa_events(x, y) count_vm_events(x, y) 84#else 85#define count_vm_numa_event(x) do {} while (0) 86#define count_vm_numa_events(x, y) do { (void)(y); } while (0) 87#endif /* CONFIG_NUMA_BALANCING */ 88 89#ifdef CONFIG_DEBUG_TLBFLUSH 90#define count_vm_tlb_event(x) count_vm_event(x) 91#define count_vm_tlb_events(x, y) count_vm_events(x, y) 92#else 93#define count_vm_tlb_event(x) do {} while (0) 94#define count_vm_tlb_events(x, y) do { (void)(y); } while (0) 95#endif 96 97#ifdef CONFIG_DEBUG_VM_VMACACHE 98#define count_vm_vmacache_event(x) count_vm_event(x) 99#else 100#define count_vm_vmacache_event(x) do {} while (0) 101#endif 102 103#define __count_zid_vm_events(item, zid, delta) \ 104 __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta) 105 106/* 107 * Zone and node-based page accounting with per cpu differentials. 108 */ 109extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS]; 110extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS]; 111 112static inline void zone_page_state_add(long x, struct zone *zone, 113 enum zone_stat_item item) 114{ 115 atomic_long_add(x, &zone->vm_stat[item]); 116 atomic_long_add(x, &vm_zone_stat[item]); 117} 118 119static inline void node_page_state_add(long x, struct pglist_data *pgdat, 120 enum node_stat_item item) 121{ 122 atomic_long_add(x, &pgdat->vm_stat[item]); 123 atomic_long_add(x, &vm_node_stat[item]); 124} 125 126static inline unsigned long global_page_state(enum zone_stat_item item) 127{ 128 long x = atomic_long_read(&vm_zone_stat[item]); 129#ifdef CONFIG_SMP 130 if (x < 0) 131 x = 0; 132#endif 133 return x; 134} 135 136static inline unsigned long global_node_page_state(enum node_stat_item item) 137{ 138 long x = atomic_long_read(&vm_node_stat[item]); 139#ifdef CONFIG_SMP 140 if (x < 0) 141 x = 0; 142#endif 143 return x; 144} 145 146static inline unsigned long zone_page_state(struct zone *zone, 147 enum zone_stat_item item) 148{ 149 long x = atomic_long_read(&zone->vm_stat[item]); 150#ifdef CONFIG_SMP 151 if (x < 0) 152 x = 0; 153#endif 154 return x; 155} 156 157/* 158 * More accurate version that also considers the currently pending 159 * deltas. For that we need to loop over all cpus to find the current 160 * deltas. There is no synchronization so the result cannot be 161 * exactly accurate either. 162 */ 163static inline unsigned long zone_page_state_snapshot(struct zone *zone, 164 enum zone_stat_item item) 165{ 166 long x = atomic_long_read(&zone->vm_stat[item]); 167 168#ifdef CONFIG_SMP 169 int cpu; 170 for_each_online_cpu(cpu) 171 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; 172 173 if (x < 0) 174 x = 0; 175#endif 176 return x; 177} 178 179static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat, 180 enum node_stat_item item) 181{ 182 long x = atomic_long_read(&pgdat->vm_stat[item]); 183 184#ifdef CONFIG_SMP 185 int cpu; 186 for_each_online_cpu(cpu) 187 x += per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->vm_node_stat_diff[item]; 188 189 if (x < 0) 190 x = 0; 191#endif 192 return x; 193} 194 195 196#ifdef CONFIG_NUMA 197extern unsigned long sum_zone_node_page_state(int node, 198 enum zone_stat_item item); 199extern unsigned long node_page_state(struct pglist_data *pgdat, 200 enum node_stat_item item); 201#else 202#define sum_zone_node_page_state(node, item) global_page_state(item) 203#define node_page_state(node, item) global_node_page_state(item) 204#endif /* CONFIG_NUMA */ 205 206#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) 207#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) 208#define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d) 209#define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d)) 210 211#ifdef CONFIG_SMP 212void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); 213void __inc_zone_page_state(struct page *, enum zone_stat_item); 214void __dec_zone_page_state(struct page *, enum zone_stat_item); 215 216void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long); 217void __inc_node_page_state(struct page *, enum node_stat_item); 218void __dec_node_page_state(struct page *, enum node_stat_item); 219 220void mod_zone_page_state(struct zone *, enum zone_stat_item, long); 221void inc_zone_page_state(struct page *, enum zone_stat_item); 222void dec_zone_page_state(struct page *, enum zone_stat_item); 223 224void mod_node_page_state(struct pglist_data *, enum node_stat_item, long); 225void inc_node_page_state(struct page *, enum node_stat_item); 226void dec_node_page_state(struct page *, enum node_stat_item); 227 228extern void inc_node_state(struct pglist_data *, enum node_stat_item); 229extern void __inc_zone_state(struct zone *, enum zone_stat_item); 230extern void __inc_node_state(struct pglist_data *, enum node_stat_item); 231extern void dec_zone_state(struct zone *, enum zone_stat_item); 232extern void __dec_zone_state(struct zone *, enum zone_stat_item); 233extern void __dec_node_state(struct pglist_data *, enum node_stat_item); 234 235void quiet_vmstat(void); 236void cpu_vm_stats_fold(int cpu); 237void refresh_zone_stat_thresholds(void); 238 239struct ctl_table; 240int vmstat_refresh(struct ctl_table *, int write, 241 void __user *buffer, size_t *lenp, loff_t *ppos); 242 243void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); 244 245int calculate_pressure_threshold(struct zone *zone); 246int calculate_normal_threshold(struct zone *zone); 247void set_pgdat_percpu_threshold(pg_data_t *pgdat, 248 int (*calculate_pressure)(struct zone *)); 249#else /* CONFIG_SMP */ 250 251/* 252 * We do not maintain differentials in a single processor configuration. 253 * The functions directly modify the zone and global counters. 254 */ 255static inline void __mod_zone_page_state(struct zone *zone, 256 enum zone_stat_item item, long delta) 257{ 258 zone_page_state_add(delta, zone, item); 259} 260 261static inline void __mod_node_page_state(struct pglist_data *pgdat, 262 enum node_stat_item item, int delta) 263{ 264 node_page_state_add(delta, pgdat, item); 265} 266 267static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 268{ 269 atomic_long_inc(&zone->vm_stat[item]); 270 atomic_long_inc(&vm_zone_stat[item]); 271} 272 273static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) 274{ 275 atomic_long_inc(&pgdat->vm_stat[item]); 276 atomic_long_inc(&vm_node_stat[item]); 277} 278 279static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) 280{ 281 atomic_long_dec(&zone->vm_stat[item]); 282 atomic_long_dec(&vm_zone_stat[item]); 283} 284 285static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) 286{ 287 atomic_long_dec(&pgdat->vm_stat[item]); 288 atomic_long_dec(&vm_node_stat[item]); 289} 290 291static inline void __inc_zone_page_state(struct page *page, 292 enum zone_stat_item item) 293{ 294 __inc_zone_state(page_zone(page), item); 295} 296 297static inline void __inc_node_page_state(struct page *page, 298 enum node_stat_item item) 299{ 300 __inc_node_state(page_pgdat(page), item); 301} 302 303 304static inline void __dec_zone_page_state(struct page *page, 305 enum zone_stat_item item) 306{ 307 __dec_zone_state(page_zone(page), item); 308} 309 310static inline void __dec_node_page_state(struct page *page, 311 enum node_stat_item item) 312{ 313 __dec_node_state(page_pgdat(page), item); 314} 315 316 317/* 318 * We only use atomic operations to update counters. So there is no need to 319 * disable interrupts. 320 */ 321#define inc_zone_page_state __inc_zone_page_state 322#define dec_zone_page_state __dec_zone_page_state 323#define mod_zone_page_state __mod_zone_page_state 324 325#define inc_node_page_state __inc_node_page_state 326#define dec_node_page_state __dec_node_page_state 327#define mod_node_page_state __mod_node_page_state 328 329#define inc_zone_state __inc_zone_state 330#define inc_node_state __inc_node_state 331#define dec_zone_state __dec_zone_state 332 333#define set_pgdat_percpu_threshold(pgdat, callback) { } 334 335static inline void refresh_zone_stat_thresholds(void) { } 336static inline void cpu_vm_stats_fold(int cpu) { } 337static inline void quiet_vmstat(void) { } 338 339static inline void drain_zonestat(struct zone *zone, 340 struct per_cpu_pageset *pset) { } 341#endif /* CONFIG_SMP */ 342 343static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, 344 int migratetype) 345{ 346 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); 347 if (is_migrate_cma(migratetype)) 348 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); 349} 350 351extern const char * const vmstat_text[]; 352 353#endif /* _LINUX_VMSTAT_H */