Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
6#include <linux/mmzone.h>
7#include <linux/vm_event_item.h>
8#include <linux/atomic.h>
9
10extern int sysctl_stat_interval;
11
12#ifdef CONFIG_VM_EVENT_COUNTERS
13/*
14 * Light weight per cpu counter implementation.
15 *
16 * Counters should only be incremented and no critical kernel component
17 * should rely on the counter values.
18 *
19 * Counters are handled completely inline. On many platforms the code
20 * generated will simply be the increment of a global address.
21 */
22
23struct vm_event_state {
24 unsigned long event[NR_VM_EVENT_ITEMS];
25};
26
27DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
28
29/*
30 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
31 * local_irq_disable overhead.
32 */
33static inline void __count_vm_event(enum vm_event_item item)
34{
35 raw_cpu_inc(vm_event_states.event[item]);
36}
37
38static inline void count_vm_event(enum vm_event_item item)
39{
40 this_cpu_inc(vm_event_states.event[item]);
41}
42
43static inline void __count_vm_events(enum vm_event_item item, long delta)
44{
45 raw_cpu_add(vm_event_states.event[item], delta);
46}
47
48static inline void count_vm_events(enum vm_event_item item, long delta)
49{
50 this_cpu_add(vm_event_states.event[item], delta);
51}
52
53extern void all_vm_events(unsigned long *);
54
55extern void vm_events_fold_cpu(int cpu);
56
57#else
58
59/* Disable counters */
60static inline void count_vm_event(enum vm_event_item item)
61{
62}
63static inline void count_vm_events(enum vm_event_item item, long delta)
64{
65}
66static inline void __count_vm_event(enum vm_event_item item)
67{
68}
69static inline void __count_vm_events(enum vm_event_item item, long delta)
70{
71}
72static inline void all_vm_events(unsigned long *ret)
73{
74}
75static inline void vm_events_fold_cpu(int cpu)
76{
77}
78
79#endif /* CONFIG_VM_EVENT_COUNTERS */
80
81#ifdef CONFIG_NUMA_BALANCING
82#define count_vm_numa_event(x) count_vm_event(x)
83#define count_vm_numa_events(x, y) count_vm_events(x, y)
84#else
85#define count_vm_numa_event(x) do {} while (0)
86#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
87#endif /* CONFIG_NUMA_BALANCING */
88
89#ifdef CONFIG_DEBUG_TLBFLUSH
90#define count_vm_tlb_event(x) count_vm_event(x)
91#define count_vm_tlb_events(x, y) count_vm_events(x, y)
92#else
93#define count_vm_tlb_event(x) do {} while (0)
94#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
95#endif
96
97#ifdef CONFIG_DEBUG_VM_VMACACHE
98#define count_vm_vmacache_event(x) count_vm_event(x)
99#else
100#define count_vm_vmacache_event(x) do {} while (0)
101#endif
102
103#define __count_zid_vm_events(item, zid, delta) \
104 __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
105
106/*
107 * Zone and node-based page accounting with per cpu differentials.
108 */
109extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
110extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
111extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
112
113#ifdef CONFIG_NUMA
114static inline void zone_numa_state_add(long x, struct zone *zone,
115 enum numa_stat_item item)
116{
117 atomic_long_add(x, &zone->vm_numa_stat[item]);
118 atomic_long_add(x, &vm_numa_stat[item]);
119}
120
121static inline unsigned long global_numa_state(enum numa_stat_item item)
122{
123 long x = atomic_long_read(&vm_numa_stat[item]);
124
125 return x;
126}
127
128static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
129 enum numa_stat_item item)
130{
131 long x = atomic_long_read(&zone->vm_numa_stat[item]);
132 int cpu;
133
134 for_each_online_cpu(cpu)
135 x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
136
137 return x;
138}
139#endif /* CONFIG_NUMA */
140
141static inline void zone_page_state_add(long x, struct zone *zone,
142 enum zone_stat_item item)
143{
144 atomic_long_add(x, &zone->vm_stat[item]);
145 atomic_long_add(x, &vm_zone_stat[item]);
146}
147
148static inline void node_page_state_add(long x, struct pglist_data *pgdat,
149 enum node_stat_item item)
150{
151 atomic_long_add(x, &pgdat->vm_stat[item]);
152 atomic_long_add(x, &vm_node_stat[item]);
153}
154
155static inline unsigned long global_zone_page_state(enum zone_stat_item item)
156{
157 long x = atomic_long_read(&vm_zone_stat[item]);
158#ifdef CONFIG_SMP
159 if (x < 0)
160 x = 0;
161#endif
162 return x;
163}
164
165static inline unsigned long global_node_page_state(enum node_stat_item item)
166{
167 long x = atomic_long_read(&vm_node_stat[item]);
168#ifdef CONFIG_SMP
169 if (x < 0)
170 x = 0;
171#endif
172 return x;
173}
174
175static inline unsigned long zone_page_state(struct zone *zone,
176 enum zone_stat_item item)
177{
178 long x = atomic_long_read(&zone->vm_stat[item]);
179#ifdef CONFIG_SMP
180 if (x < 0)
181 x = 0;
182#endif
183 return x;
184}
185
186/*
187 * More accurate version that also considers the currently pending
188 * deltas. For that we need to loop over all cpus to find the current
189 * deltas. There is no synchronization so the result cannot be
190 * exactly accurate either.
191 */
192static inline unsigned long zone_page_state_snapshot(struct zone *zone,
193 enum zone_stat_item item)
194{
195 long x = atomic_long_read(&zone->vm_stat[item]);
196
197#ifdef CONFIG_SMP
198 int cpu;
199 for_each_online_cpu(cpu)
200 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
201
202 if (x < 0)
203 x = 0;
204#endif
205 return x;
206}
207
208static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat,
209 enum node_stat_item item)
210{
211 long x = atomic_long_read(&pgdat->vm_stat[item]);
212
213#ifdef CONFIG_SMP
214 int cpu;
215 for_each_online_cpu(cpu)
216 x += per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->vm_node_stat_diff[item];
217
218 if (x < 0)
219 x = 0;
220#endif
221 return x;
222}
223
224
225#ifdef CONFIG_NUMA
226extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
227extern unsigned long sum_zone_node_page_state(int node,
228 enum zone_stat_item item);
229extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item);
230extern unsigned long node_page_state(struct pglist_data *pgdat,
231 enum node_stat_item item);
232#else
233#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
234#define node_page_state(node, item) global_node_page_state(item)
235#endif /* CONFIG_NUMA */
236
237#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
238#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
239#define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d)
240#define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d))
241
242#ifdef CONFIG_SMP
243void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
244void __inc_zone_page_state(struct page *, enum zone_stat_item);
245void __dec_zone_page_state(struct page *, enum zone_stat_item);
246
247void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
248void __inc_node_page_state(struct page *, enum node_stat_item);
249void __dec_node_page_state(struct page *, enum node_stat_item);
250
251void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
252void inc_zone_page_state(struct page *, enum zone_stat_item);
253void dec_zone_page_state(struct page *, enum zone_stat_item);
254
255void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
256void inc_node_page_state(struct page *, enum node_stat_item);
257void dec_node_page_state(struct page *, enum node_stat_item);
258
259extern void inc_node_state(struct pglist_data *, enum node_stat_item);
260extern void __inc_zone_state(struct zone *, enum zone_stat_item);
261extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
262extern void dec_zone_state(struct zone *, enum zone_stat_item);
263extern void __dec_zone_state(struct zone *, enum zone_stat_item);
264extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
265
266void quiet_vmstat(void);
267void cpu_vm_stats_fold(int cpu);
268void refresh_zone_stat_thresholds(void);
269
270struct ctl_table;
271int vmstat_refresh(struct ctl_table *, int write,
272 void __user *buffer, size_t *lenp, loff_t *ppos);
273
274void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
275
276int calculate_pressure_threshold(struct zone *zone);
277int calculate_normal_threshold(struct zone *zone);
278void set_pgdat_percpu_threshold(pg_data_t *pgdat,
279 int (*calculate_pressure)(struct zone *));
280#else /* CONFIG_SMP */
281
282/*
283 * We do not maintain differentials in a single processor configuration.
284 * The functions directly modify the zone and global counters.
285 */
286static inline void __mod_zone_page_state(struct zone *zone,
287 enum zone_stat_item item, long delta)
288{
289 zone_page_state_add(delta, zone, item);
290}
291
292static inline void __mod_node_page_state(struct pglist_data *pgdat,
293 enum node_stat_item item, int delta)
294{
295 node_page_state_add(delta, pgdat, item);
296}
297
298static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
299{
300 atomic_long_inc(&zone->vm_stat[item]);
301 atomic_long_inc(&vm_zone_stat[item]);
302}
303
304static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
305{
306 atomic_long_inc(&pgdat->vm_stat[item]);
307 atomic_long_inc(&vm_node_stat[item]);
308}
309
310static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
311{
312 atomic_long_dec(&zone->vm_stat[item]);
313 atomic_long_dec(&vm_zone_stat[item]);
314}
315
316static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
317{
318 atomic_long_dec(&pgdat->vm_stat[item]);
319 atomic_long_dec(&vm_node_stat[item]);
320}
321
322static inline void __inc_zone_page_state(struct page *page,
323 enum zone_stat_item item)
324{
325 __inc_zone_state(page_zone(page), item);
326}
327
328static inline void __inc_node_page_state(struct page *page,
329 enum node_stat_item item)
330{
331 __inc_node_state(page_pgdat(page), item);
332}
333
334
335static inline void __dec_zone_page_state(struct page *page,
336 enum zone_stat_item item)
337{
338 __dec_zone_state(page_zone(page), item);
339}
340
341static inline void __dec_node_page_state(struct page *page,
342 enum node_stat_item item)
343{
344 __dec_node_state(page_pgdat(page), item);
345}
346
347
348/*
349 * We only use atomic operations to update counters. So there is no need to
350 * disable interrupts.
351 */
352#define inc_zone_page_state __inc_zone_page_state
353#define dec_zone_page_state __dec_zone_page_state
354#define mod_zone_page_state __mod_zone_page_state
355
356#define inc_node_page_state __inc_node_page_state
357#define dec_node_page_state __dec_node_page_state
358#define mod_node_page_state __mod_node_page_state
359
360#define inc_zone_state __inc_zone_state
361#define inc_node_state __inc_node_state
362#define dec_zone_state __dec_zone_state
363
364#define set_pgdat_percpu_threshold(pgdat, callback) { }
365
366static inline void refresh_zone_stat_thresholds(void) { }
367static inline void cpu_vm_stats_fold(int cpu) { }
368static inline void quiet_vmstat(void) { }
369
370static inline void drain_zonestat(struct zone *zone,
371 struct per_cpu_pageset *pset) { }
372#endif /* CONFIG_SMP */
373
374static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
375 int migratetype)
376{
377 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
378 if (is_migrate_cma(migratetype))
379 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
380}
381
382extern const char * const vmstat_text[];
383
384#endif /* _LINUX_VMSTAT_H */