at v4.11 10 kB view raw
1#ifndef _LINUX_VMSTAT_H 2#define _LINUX_VMSTAT_H 3 4#include <linux/types.h> 5#include <linux/percpu.h> 6#include <linux/mm.h> 7#include <linux/mmzone.h> 8#include <linux/vm_event_item.h> 9#include <linux/atomic.h> 10 11extern int sysctl_stat_interval; 12 13#ifdef CONFIG_VM_EVENT_COUNTERS 14/* 15 * Light weight per cpu counter implementation. 16 * 17 * Counters should only be incremented and no critical kernel component 18 * should rely on the counter values. 19 * 20 * Counters are handled completely inline. On many platforms the code 21 * generated will simply be the increment of a global address. 22 */ 23 24struct vm_event_state { 25 unsigned long event[NR_VM_EVENT_ITEMS]; 26}; 27 28DECLARE_PER_CPU(struct vm_event_state, vm_event_states); 29 30/* 31 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the 32 * local_irq_disable overhead. 33 */ 34static inline void __count_vm_event(enum vm_event_item item) 35{ 36 raw_cpu_inc(vm_event_states.event[item]); 37} 38 39static inline void count_vm_event(enum vm_event_item item) 40{ 41 this_cpu_inc(vm_event_states.event[item]); 42} 43 44static inline void __count_vm_events(enum vm_event_item item, long delta) 45{ 46 raw_cpu_add(vm_event_states.event[item], delta); 47} 48 49static inline void count_vm_events(enum vm_event_item item, long delta) 50{ 51 this_cpu_add(vm_event_states.event[item], delta); 52} 53 54extern void all_vm_events(unsigned long *); 55 56extern void vm_events_fold_cpu(int cpu); 57 58#else 59 60/* Disable counters */ 61static inline void count_vm_event(enum vm_event_item item) 62{ 63} 64static inline void count_vm_events(enum vm_event_item item, long delta) 65{ 66} 67static inline void __count_vm_event(enum vm_event_item item) 68{ 69} 70static inline void __count_vm_events(enum vm_event_item item, long delta) 71{ 72} 73static inline void all_vm_events(unsigned long *ret) 74{ 75} 76static inline void vm_events_fold_cpu(int cpu) 77{ 78} 79 80#endif /* CONFIG_VM_EVENT_COUNTERS */ 81 82#ifdef CONFIG_NUMA_BALANCING 83#define count_vm_numa_event(x) count_vm_event(x) 84#define count_vm_numa_events(x, y) count_vm_events(x, y) 85#else 86#define count_vm_numa_event(x) do {} while (0) 87#define count_vm_numa_events(x, y) do { (void)(y); } while (0) 88#endif /* CONFIG_NUMA_BALANCING */ 89 90#ifdef CONFIG_DEBUG_TLBFLUSH 91#define count_vm_tlb_event(x) count_vm_event(x) 92#define count_vm_tlb_events(x, y) count_vm_events(x, y) 93#else 94#define count_vm_tlb_event(x) do {} while (0) 95#define count_vm_tlb_events(x, y) do { (void)(y); } while (0) 96#endif 97 98#ifdef CONFIG_DEBUG_VM_VMACACHE 99#define count_vm_vmacache_event(x) count_vm_event(x) 100#else 101#define count_vm_vmacache_event(x) do {} while (0) 102#endif 103 104#define __count_zid_vm_events(item, zid, delta) \ 105 __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta) 106 107/* 108 * Zone and node-based page accounting with per cpu differentials. 109 */ 110extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS]; 111extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS]; 112 113static inline void zone_page_state_add(long x, struct zone *zone, 114 enum zone_stat_item item) 115{ 116 atomic_long_add(x, &zone->vm_stat[item]); 117 atomic_long_add(x, &vm_zone_stat[item]); 118} 119 120static inline void node_page_state_add(long x, struct pglist_data *pgdat, 121 enum node_stat_item item) 122{ 123 atomic_long_add(x, &pgdat->vm_stat[item]); 124 atomic_long_add(x, &vm_node_stat[item]); 125} 126 127static inline unsigned long global_page_state(enum zone_stat_item item) 128{ 129 long x = atomic_long_read(&vm_zone_stat[item]); 130#ifdef CONFIG_SMP 131 if (x < 0) 132 x = 0; 133#endif 134 return x; 135} 136 137static inline unsigned long global_node_page_state(enum node_stat_item item) 138{ 139 long x = atomic_long_read(&vm_node_stat[item]); 140#ifdef CONFIG_SMP 141 if (x < 0) 142 x = 0; 143#endif 144 return x; 145} 146 147static inline unsigned long zone_page_state(struct zone *zone, 148 enum zone_stat_item item) 149{ 150 long x = atomic_long_read(&zone->vm_stat[item]); 151#ifdef CONFIG_SMP 152 if (x < 0) 153 x = 0; 154#endif 155 return x; 156} 157 158/* 159 * More accurate version that also considers the currently pending 160 * deltas. For that we need to loop over all cpus to find the current 161 * deltas. There is no synchronization so the result cannot be 162 * exactly accurate either. 163 */ 164static inline unsigned long zone_page_state_snapshot(struct zone *zone, 165 enum zone_stat_item item) 166{ 167 long x = atomic_long_read(&zone->vm_stat[item]); 168 169#ifdef CONFIG_SMP 170 int cpu; 171 for_each_online_cpu(cpu) 172 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; 173 174 if (x < 0) 175 x = 0; 176#endif 177 return x; 178} 179 180static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat, 181 enum node_stat_item item) 182{ 183 long x = atomic_long_read(&pgdat->vm_stat[item]); 184 185#ifdef CONFIG_SMP 186 int cpu; 187 for_each_online_cpu(cpu) 188 x += per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->vm_node_stat_diff[item]; 189 190 if (x < 0) 191 x = 0; 192#endif 193 return x; 194} 195 196 197#ifdef CONFIG_NUMA 198extern unsigned long sum_zone_node_page_state(int node, 199 enum zone_stat_item item); 200extern unsigned long node_page_state(struct pglist_data *pgdat, 201 enum node_stat_item item); 202#else 203#define sum_zone_node_page_state(node, item) global_page_state(item) 204#define node_page_state(node, item) global_node_page_state(item) 205#endif /* CONFIG_NUMA */ 206 207#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) 208#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) 209#define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d) 210#define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d)) 211 212#ifdef CONFIG_SMP 213void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); 214void __inc_zone_page_state(struct page *, enum zone_stat_item); 215void __dec_zone_page_state(struct page *, enum zone_stat_item); 216 217void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long); 218void __inc_node_page_state(struct page *, enum node_stat_item); 219void __dec_node_page_state(struct page *, enum node_stat_item); 220 221void mod_zone_page_state(struct zone *, enum zone_stat_item, long); 222void inc_zone_page_state(struct page *, enum zone_stat_item); 223void dec_zone_page_state(struct page *, enum zone_stat_item); 224 225void mod_node_page_state(struct pglist_data *, enum node_stat_item, long); 226void inc_node_page_state(struct page *, enum node_stat_item); 227void dec_node_page_state(struct page *, enum node_stat_item); 228 229extern void inc_node_state(struct pglist_data *, enum node_stat_item); 230extern void __inc_zone_state(struct zone *, enum zone_stat_item); 231extern void __inc_node_state(struct pglist_data *, enum node_stat_item); 232extern void dec_zone_state(struct zone *, enum zone_stat_item); 233extern void __dec_zone_state(struct zone *, enum zone_stat_item); 234extern void __dec_node_state(struct pglist_data *, enum node_stat_item); 235 236void quiet_vmstat(void); 237void cpu_vm_stats_fold(int cpu); 238void refresh_zone_stat_thresholds(void); 239 240struct ctl_table; 241int vmstat_refresh(struct ctl_table *, int write, 242 void __user *buffer, size_t *lenp, loff_t *ppos); 243 244void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); 245 246int calculate_pressure_threshold(struct zone *zone); 247int calculate_normal_threshold(struct zone *zone); 248void set_pgdat_percpu_threshold(pg_data_t *pgdat, 249 int (*calculate_pressure)(struct zone *)); 250#else /* CONFIG_SMP */ 251 252/* 253 * We do not maintain differentials in a single processor configuration. 254 * The functions directly modify the zone and global counters. 255 */ 256static inline void __mod_zone_page_state(struct zone *zone, 257 enum zone_stat_item item, long delta) 258{ 259 zone_page_state_add(delta, zone, item); 260} 261 262static inline void __mod_node_page_state(struct pglist_data *pgdat, 263 enum node_stat_item item, int delta) 264{ 265 node_page_state_add(delta, pgdat, item); 266} 267 268static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 269{ 270 atomic_long_inc(&zone->vm_stat[item]); 271 atomic_long_inc(&vm_zone_stat[item]); 272} 273 274static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) 275{ 276 atomic_long_inc(&pgdat->vm_stat[item]); 277 atomic_long_inc(&vm_node_stat[item]); 278} 279 280static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) 281{ 282 atomic_long_dec(&zone->vm_stat[item]); 283 atomic_long_dec(&vm_zone_stat[item]); 284} 285 286static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) 287{ 288 atomic_long_dec(&pgdat->vm_stat[item]); 289 atomic_long_dec(&vm_node_stat[item]); 290} 291 292static inline void __inc_zone_page_state(struct page *page, 293 enum zone_stat_item item) 294{ 295 __inc_zone_state(page_zone(page), item); 296} 297 298static inline void __inc_node_page_state(struct page *page, 299 enum node_stat_item item) 300{ 301 __inc_node_state(page_pgdat(page), item); 302} 303 304 305static inline void __dec_zone_page_state(struct page *page, 306 enum zone_stat_item item) 307{ 308 __dec_zone_state(page_zone(page), item); 309} 310 311static inline void __dec_node_page_state(struct page *page, 312 enum node_stat_item item) 313{ 314 __dec_node_state(page_pgdat(page), item); 315} 316 317 318/* 319 * We only use atomic operations to update counters. So there is no need to 320 * disable interrupts. 321 */ 322#define inc_zone_page_state __inc_zone_page_state 323#define dec_zone_page_state __dec_zone_page_state 324#define mod_zone_page_state __mod_zone_page_state 325 326#define inc_node_page_state __inc_node_page_state 327#define dec_node_page_state __dec_node_page_state 328#define mod_node_page_state __mod_node_page_state 329 330#define inc_zone_state __inc_zone_state 331#define inc_node_state __inc_node_state 332#define dec_zone_state __dec_zone_state 333 334#define set_pgdat_percpu_threshold(pgdat, callback) { } 335 336static inline void refresh_zone_stat_thresholds(void) { } 337static inline void cpu_vm_stats_fold(int cpu) { } 338static inline void quiet_vmstat(void) { } 339 340static inline void drain_zonestat(struct zone *zone, 341 struct per_cpu_pageset *pset) { } 342#endif /* CONFIG_SMP */ 343 344static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, 345 int migratetype) 346{ 347 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); 348 if (is_migrate_cma(migratetype)) 349 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); 350} 351 352extern const char * const vmstat_text[]; 353 354#endif /* _LINUX_VMSTAT_H */