Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
6#include <linux/mm.h>
7#include <linux/mmzone.h>
8#include <linux/vm_event_item.h>
9#include <asm/atomic.h>
10
11extern int sysctl_stat_interval;
12
13#ifdef CONFIG_VM_EVENT_COUNTERS
14/*
15 * Light weight per cpu counter implementation.
16 *
17 * Counters should only be incremented and no critical kernel component
18 * should rely on the counter values.
19 *
20 * Counters are handled completely inline. On many platforms the code
21 * generated will simply be the increment of a global address.
22 */
23
24struct vm_event_state {
25 unsigned long event[NR_VM_EVENT_ITEMS];
26};
27
28DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
29
30static inline void __count_vm_event(enum vm_event_item item)
31{
32 __this_cpu_inc(vm_event_states.event[item]);
33}
34
35static inline void count_vm_event(enum vm_event_item item)
36{
37 this_cpu_inc(vm_event_states.event[item]);
38}
39
40static inline void __count_vm_events(enum vm_event_item item, long delta)
41{
42 __this_cpu_add(vm_event_states.event[item], delta);
43}
44
45static inline void count_vm_events(enum vm_event_item item, long delta)
46{
47 this_cpu_add(vm_event_states.event[item], delta);
48}
49
50extern void all_vm_events(unsigned long *);
51#ifdef CONFIG_HOTPLUG
52extern void vm_events_fold_cpu(int cpu);
53#else
54static inline void vm_events_fold_cpu(int cpu)
55{
56}
57#endif
58
59#else
60
61/* Disable counters */
62static inline void count_vm_event(enum vm_event_item item)
63{
64}
65static inline void count_vm_events(enum vm_event_item item, long delta)
66{
67}
68static inline void __count_vm_event(enum vm_event_item item)
69{
70}
71static inline void __count_vm_events(enum vm_event_item item, long delta)
72{
73}
74static inline void all_vm_events(unsigned long *ret)
75{
76}
77static inline void vm_events_fold_cpu(int cpu)
78{
79}
80
81#endif /* CONFIG_VM_EVENT_COUNTERS */
82
83#define __count_zone_vm_events(item, zone, delta) \
84 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
85 zone_idx(zone), delta)
86
87/*
88 * Zone based page accounting with per cpu differentials.
89 */
90extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
91
92static inline void zone_page_state_add(long x, struct zone *zone,
93 enum zone_stat_item item)
94{
95 atomic_long_add(x, &zone->vm_stat[item]);
96 atomic_long_add(x, &vm_stat[item]);
97}
98
99static inline unsigned long global_page_state(enum zone_stat_item item)
100{
101 long x = atomic_long_read(&vm_stat[item]);
102#ifdef CONFIG_SMP
103 if (x < 0)
104 x = 0;
105#endif
106 return x;
107}
108
109static inline unsigned long zone_page_state(struct zone *zone,
110 enum zone_stat_item item)
111{
112 long x = atomic_long_read(&zone->vm_stat[item]);
113#ifdef CONFIG_SMP
114 if (x < 0)
115 x = 0;
116#endif
117 return x;
118}
119
120/*
121 * More accurate version that also considers the currently pending
122 * deltas. For that we need to loop over all cpus to find the current
123 * deltas. There is no synchronization so the result cannot be
124 * exactly accurate either.
125 */
126static inline unsigned long zone_page_state_snapshot(struct zone *zone,
127 enum zone_stat_item item)
128{
129 long x = atomic_long_read(&zone->vm_stat[item]);
130
131#ifdef CONFIG_SMP
132 int cpu;
133 for_each_online_cpu(cpu)
134 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
135
136 if (x < 0)
137 x = 0;
138#endif
139 return x;
140}
141
142extern unsigned long global_reclaimable_pages(void);
143extern unsigned long zone_reclaimable_pages(struct zone *zone);
144
145#ifdef CONFIG_NUMA
146/*
147 * Determine the per node value of a stat item. This function
148 * is called frequently in a NUMA machine, so try to be as
149 * frugal as possible.
150 */
151static inline unsigned long node_page_state(int node,
152 enum zone_stat_item item)
153{
154 struct zone *zones = NODE_DATA(node)->node_zones;
155
156 return
157#ifdef CONFIG_ZONE_DMA
158 zone_page_state(&zones[ZONE_DMA], item) +
159#endif
160#ifdef CONFIG_ZONE_DMA32
161 zone_page_state(&zones[ZONE_DMA32], item) +
162#endif
163#ifdef CONFIG_HIGHMEM
164 zone_page_state(&zones[ZONE_HIGHMEM], item) +
165#endif
166 zone_page_state(&zones[ZONE_NORMAL], item) +
167 zone_page_state(&zones[ZONE_MOVABLE], item);
168}
169
170extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
171
172#else
173
174#define node_page_state(node, item) global_page_state(item)
175#define zone_statistics(_zl, _z, gfp) do { } while (0)
176
177#endif /* CONFIG_NUMA */
178
179#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
180#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
181
182static inline void zap_zone_vm_stats(struct zone *zone)
183{
184 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
185}
186
187extern void inc_zone_state(struct zone *, enum zone_stat_item);
188
189#ifdef CONFIG_SMP
190void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
191void __inc_zone_page_state(struct page *, enum zone_stat_item);
192void __dec_zone_page_state(struct page *, enum zone_stat_item);
193
194void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
195void inc_zone_page_state(struct page *, enum zone_stat_item);
196void dec_zone_page_state(struct page *, enum zone_stat_item);
197
198extern void inc_zone_state(struct zone *, enum zone_stat_item);
199extern void __inc_zone_state(struct zone *, enum zone_stat_item);
200extern void dec_zone_state(struct zone *, enum zone_stat_item);
201extern void __dec_zone_state(struct zone *, enum zone_stat_item);
202
203void refresh_cpu_vm_stats(int);
204void refresh_zone_stat_thresholds(void);
205
206int calculate_pressure_threshold(struct zone *zone);
207int calculate_normal_threshold(struct zone *zone);
208void set_pgdat_percpu_threshold(pg_data_t *pgdat,
209 int (*calculate_pressure)(struct zone *));
210#else /* CONFIG_SMP */
211
212/*
213 * We do not maintain differentials in a single processor configuration.
214 * The functions directly modify the zone and global counters.
215 */
216static inline void __mod_zone_page_state(struct zone *zone,
217 enum zone_stat_item item, int delta)
218{
219 zone_page_state_add(delta, zone, item);
220}
221
222static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
223{
224 atomic_long_inc(&zone->vm_stat[item]);
225 atomic_long_inc(&vm_stat[item]);
226}
227
228static inline void __inc_zone_page_state(struct page *page,
229 enum zone_stat_item item)
230{
231 __inc_zone_state(page_zone(page), item);
232}
233
234static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
235{
236 atomic_long_dec(&zone->vm_stat[item]);
237 atomic_long_dec(&vm_stat[item]);
238}
239
240static inline void __dec_zone_page_state(struct page *page,
241 enum zone_stat_item item)
242{
243 __dec_zone_state(page_zone(page), item);
244}
245
246/*
247 * We only use atomic operations to update counters. So there is no need to
248 * disable interrupts.
249 */
250#define inc_zone_page_state __inc_zone_page_state
251#define dec_zone_page_state __dec_zone_page_state
252#define mod_zone_page_state __mod_zone_page_state
253
254#define set_pgdat_percpu_threshold(pgdat, callback) { }
255
256static inline void refresh_cpu_vm_stats(int cpu) { }
257static inline void refresh_zone_stat_thresholds(void) { }
258
259#endif /* CONFIG_SMP */
260
261extern const char * const vmstat_text[];
262
263#endif /* _LINUX_VMSTAT_H */