Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_VMSTAT_H
3#define _LINUX_VMSTAT_H
4
5#include <linux/types.h>
6#include <linux/percpu.h>
7#include <linux/mmzone.h>
8#include <linux/vm_event_item.h>
9#include <linux/atomic.h>
10#include <linux/static_key.h>
11#include <linux/mmdebug.h>
12
13extern int sysctl_stat_interval;
14
15#ifdef CONFIG_NUMA
16#define ENABLE_NUMA_STAT 1
17#define DISABLE_NUMA_STAT 0
18extern int sysctl_vm_numa_stat;
19DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
20int sysctl_vm_numa_stat_handler(const struct ctl_table *table, int write,
21 void *buffer, size_t *length, loff_t *ppos);
22#endif
23
24struct reclaim_stat {
25 unsigned nr_dirty;
26 unsigned nr_unqueued_dirty;
27 unsigned nr_congested;
28 unsigned nr_writeback;
29 unsigned nr_immediate;
30 unsigned nr_pageout;
31 unsigned nr_activate[ANON_AND_FILE];
32 unsigned nr_ref_keep;
33 unsigned nr_unmap_fail;
34 unsigned nr_lazyfree_fail;
35 unsigned nr_demoted;
36};
37
38/* Stat data for system wide items */
39enum vm_stat_item {
40 NR_DIRTY_THRESHOLD,
41 NR_DIRTY_BG_THRESHOLD,
42 NR_MEMMAP_PAGES, /* page metadata allocated through buddy allocator */
43 NR_MEMMAP_BOOT_PAGES, /* page metadata allocated through boot allocator */
44 NR_VM_STAT_ITEMS,
45};
46
47#ifdef CONFIG_VM_EVENT_COUNTERS
48/*
49 * Light weight per cpu counter implementation.
50 *
51 * Counters should only be incremented and no critical kernel component
52 * should rely on the counter values.
53 *
54 * Counters are handled completely inline. On many platforms the code
55 * generated will simply be the increment of a global address.
56 */
57
58struct vm_event_state {
59 unsigned long event[NR_VM_EVENT_ITEMS];
60};
61
62DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
63
64/*
65 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
66 * local_irq_disable overhead.
67 */
68static inline void __count_vm_event(enum vm_event_item item)
69{
70 raw_cpu_inc(vm_event_states.event[item]);
71}
72
73static inline void count_vm_event(enum vm_event_item item)
74{
75 this_cpu_inc(vm_event_states.event[item]);
76}
77
78static inline void __count_vm_events(enum vm_event_item item, long delta)
79{
80 raw_cpu_add(vm_event_states.event[item], delta);
81}
82
83static inline void count_vm_events(enum vm_event_item item, long delta)
84{
85 this_cpu_add(vm_event_states.event[item], delta);
86}
87
88extern void all_vm_events(unsigned long *);
89
90extern void vm_events_fold_cpu(int cpu);
91
92#else
93
94/* Disable counters */
95static inline void count_vm_event(enum vm_event_item item)
96{
97}
98static inline void count_vm_events(enum vm_event_item item, long delta)
99{
100}
101static inline void __count_vm_event(enum vm_event_item item)
102{
103}
104static inline void __count_vm_events(enum vm_event_item item, long delta)
105{
106}
107static inline void all_vm_events(unsigned long *ret)
108{
109}
110static inline void vm_events_fold_cpu(int cpu)
111{
112}
113
114#endif /* CONFIG_VM_EVENT_COUNTERS */
115
116#ifdef CONFIG_NUMA_BALANCING
117#define count_vm_numa_event(x) count_vm_event(x)
118#define count_vm_numa_events(x, y) count_vm_events(x, y)
119#else
120#define count_vm_numa_event(x) do {} while (0)
121#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
122#endif /* CONFIG_NUMA_BALANCING */
123
124#ifdef CONFIG_DEBUG_TLBFLUSH
125#define count_vm_tlb_event(x) count_vm_event(x)
126#define count_vm_tlb_events(x, y) count_vm_events(x, y)
127#else
128#define count_vm_tlb_event(x) do {} while (0)
129#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
130#endif
131
132#ifdef CONFIG_PER_VMA_LOCK_STATS
133#define count_vm_vma_lock_event(x) count_vm_event(x)
134#else
135#define count_vm_vma_lock_event(x) do {} while (0)
136#endif
137
138#define __count_zid_vm_events(item, zid, delta) \
139 __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
140
141/*
142 * Zone and node-based page accounting with per cpu differentials.
143 */
144extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
145extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
146extern atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
147
148#ifdef CONFIG_NUMA
149static inline void zone_numa_event_add(long x, struct zone *zone,
150 enum numa_stat_item item)
151{
152 atomic_long_add(x, &zone->vm_numa_event[item]);
153 atomic_long_add(x, &vm_numa_event[item]);
154}
155
156static inline unsigned long zone_numa_event_state(struct zone *zone,
157 enum numa_stat_item item)
158{
159 return atomic_long_read(&zone->vm_numa_event[item]);
160}
161
162static inline unsigned long
163global_numa_event_state(enum numa_stat_item item)
164{
165 return atomic_long_read(&vm_numa_event[item]);
166}
167#endif /* CONFIG_NUMA */
168
169static inline void zone_page_state_add(long x, struct zone *zone,
170 enum zone_stat_item item)
171{
172 atomic_long_add(x, &zone->vm_stat[item]);
173 atomic_long_add(x, &vm_zone_stat[item]);
174}
175
176static inline void node_page_state_add(long x, struct pglist_data *pgdat,
177 enum node_stat_item item)
178{
179 atomic_long_add(x, &pgdat->vm_stat[item]);
180 atomic_long_add(x, &vm_node_stat[item]);
181}
182
183static inline unsigned long global_zone_page_state(enum zone_stat_item item)
184{
185 long x = atomic_long_read(&vm_zone_stat[item]);
186#ifdef CONFIG_SMP
187 if (x < 0)
188 x = 0;
189#endif
190 return x;
191}
192
193static inline
194unsigned long global_node_page_state_pages(enum node_stat_item item)
195{
196 long x = atomic_long_read(&vm_node_stat[item]);
197#ifdef CONFIG_SMP
198 if (x < 0)
199 x = 0;
200#endif
201 return x;
202}
203
204static inline unsigned long global_node_page_state(enum node_stat_item item)
205{
206 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
207
208 return global_node_page_state_pages(item);
209}
210
211static inline unsigned long zone_page_state(struct zone *zone,
212 enum zone_stat_item item)
213{
214 long x = atomic_long_read(&zone->vm_stat[item]);
215#ifdef CONFIG_SMP
216 if (x < 0)
217 x = 0;
218#endif
219 return x;
220}
221
222/*
223 * More accurate version that also considers the currently pending
224 * deltas. For that we need to loop over all cpus to find the current
225 * deltas. There is no synchronization so the result cannot be
226 * exactly accurate either.
227 */
228static inline unsigned long zone_page_state_snapshot(struct zone *zone,
229 enum zone_stat_item item)
230{
231 long x = atomic_long_read(&zone->vm_stat[item]);
232
233#ifdef CONFIG_SMP
234 int cpu;
235 for_each_online_cpu(cpu)
236 x += per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_stat_diff[item];
237
238 if (x < 0)
239 x = 0;
240#endif
241 return x;
242}
243
244#ifdef CONFIG_NUMA
245/* See __count_vm_event comment on why raw_cpu_inc is used. */
246static inline void
247__count_numa_event(struct zone *zone, enum numa_stat_item item)
248{
249 struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
250
251 raw_cpu_inc(pzstats->vm_numa_event[item]);
252}
253
254static inline void
255__count_numa_events(struct zone *zone, enum numa_stat_item item, long delta)
256{
257 struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
258
259 raw_cpu_add(pzstats->vm_numa_event[item], delta);
260}
261
262extern unsigned long sum_zone_node_page_state(int node,
263 enum zone_stat_item item);
264extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item);
265extern unsigned long node_page_state(struct pglist_data *pgdat,
266 enum node_stat_item item);
267extern unsigned long node_page_state_pages(struct pglist_data *pgdat,
268 enum node_stat_item item);
269extern void fold_vm_numa_events(void);
270#else
271#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
272#define node_page_state(node, item) global_node_page_state(item)
273#define node_page_state_pages(node, item) global_node_page_state_pages(item)
274static inline void fold_vm_numa_events(void)
275{
276}
277#endif /* CONFIG_NUMA */
278
279#ifdef CONFIG_SMP
280void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
281void __inc_zone_page_state(struct page *, enum zone_stat_item);
282void __dec_zone_page_state(struct page *, enum zone_stat_item);
283
284void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
285void __inc_node_page_state(struct page *, enum node_stat_item);
286void __dec_node_page_state(struct page *, enum node_stat_item);
287
288void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
289void inc_zone_page_state(struct page *, enum zone_stat_item);
290void dec_zone_page_state(struct page *, enum zone_stat_item);
291
292void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
293void inc_node_page_state(struct page *, enum node_stat_item);
294void dec_node_page_state(struct page *, enum node_stat_item);
295
296extern void inc_node_state(struct pglist_data *, enum node_stat_item);
297extern void __inc_zone_state(struct zone *, enum zone_stat_item);
298extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
299extern void dec_zone_state(struct zone *, enum zone_stat_item);
300extern void __dec_zone_state(struct zone *, enum zone_stat_item);
301extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
302
303void quiet_vmstat(void);
304void cpu_vm_stats_fold(int cpu);
305void refresh_zone_stat_thresholds(void);
306
307struct ctl_table;
308int vmstat_refresh(const struct ctl_table *, int write, void *buffer, size_t *lenp,
309 loff_t *ppos);
310
311void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *);
312
313int calculate_pressure_threshold(struct zone *zone);
314int calculate_normal_threshold(struct zone *zone);
315void set_pgdat_percpu_threshold(pg_data_t *pgdat,
316 int (*calculate_pressure)(struct zone *));
317#else /* CONFIG_SMP */
318
319/*
320 * We do not maintain differentials in a single processor configuration.
321 * The functions directly modify the zone and global counters.
322 */
323static inline void __mod_zone_page_state(struct zone *zone,
324 enum zone_stat_item item, long delta)
325{
326 zone_page_state_add(delta, zone, item);
327}
328
329static inline void __mod_node_page_state(struct pglist_data *pgdat,
330 enum node_stat_item item, int delta)
331{
332 if (vmstat_item_in_bytes(item)) {
333 /*
334 * Only cgroups use subpage accounting right now; at
335 * the global level, these items still change in
336 * multiples of whole pages. Store them as pages
337 * internally to keep the per-cpu counters compact.
338 */
339 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
340 delta >>= PAGE_SHIFT;
341 }
342
343 node_page_state_add(delta, pgdat, item);
344}
345
346static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
347{
348 atomic_long_inc(&zone->vm_stat[item]);
349 atomic_long_inc(&vm_zone_stat[item]);
350}
351
352static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
353{
354 atomic_long_inc(&pgdat->vm_stat[item]);
355 atomic_long_inc(&vm_node_stat[item]);
356}
357
358static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
359{
360 atomic_long_dec(&zone->vm_stat[item]);
361 atomic_long_dec(&vm_zone_stat[item]);
362}
363
364static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
365{
366 atomic_long_dec(&pgdat->vm_stat[item]);
367 atomic_long_dec(&vm_node_stat[item]);
368}
369
370static inline void __inc_zone_page_state(struct page *page,
371 enum zone_stat_item item)
372{
373 __inc_zone_state(page_zone(page), item);
374}
375
376static inline void __inc_node_page_state(struct page *page,
377 enum node_stat_item item)
378{
379 __inc_node_state(page_pgdat(page), item);
380}
381
382
383static inline void __dec_zone_page_state(struct page *page,
384 enum zone_stat_item item)
385{
386 __dec_zone_state(page_zone(page), item);
387}
388
389static inline void __dec_node_page_state(struct page *page,
390 enum node_stat_item item)
391{
392 __dec_node_state(page_pgdat(page), item);
393}
394
395
396/*
397 * We only use atomic operations to update counters. So there is no need to
398 * disable interrupts.
399 */
400#define inc_zone_page_state __inc_zone_page_state
401#define dec_zone_page_state __dec_zone_page_state
402#define mod_zone_page_state __mod_zone_page_state
403
404#define inc_node_page_state __inc_node_page_state
405#define dec_node_page_state __dec_node_page_state
406#define mod_node_page_state __mod_node_page_state
407
408#define inc_zone_state __inc_zone_state
409#define inc_node_state __inc_node_state
410#define dec_zone_state __dec_zone_state
411
412#define set_pgdat_percpu_threshold(pgdat, callback) { }
413
414static inline void refresh_zone_stat_thresholds(void) { }
415static inline void cpu_vm_stats_fold(int cpu) { }
416static inline void quiet_vmstat(void) { }
417
418static inline void drain_zonestat(struct zone *zone,
419 struct per_cpu_zonestat *pzstats) { }
420#endif /* CONFIG_SMP */
421
422static inline void __zone_stat_mod_folio(struct folio *folio,
423 enum zone_stat_item item, long nr)
424{
425 __mod_zone_page_state(folio_zone(folio), item, nr);
426}
427
428static inline void __zone_stat_add_folio(struct folio *folio,
429 enum zone_stat_item item)
430{
431 __mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio));
432}
433
434static inline void __zone_stat_sub_folio(struct folio *folio,
435 enum zone_stat_item item)
436{
437 __mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio));
438}
439
440static inline void zone_stat_mod_folio(struct folio *folio,
441 enum zone_stat_item item, long nr)
442{
443 mod_zone_page_state(folio_zone(folio), item, nr);
444}
445
446static inline void zone_stat_add_folio(struct folio *folio,
447 enum zone_stat_item item)
448{
449 mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio));
450}
451
452static inline void zone_stat_sub_folio(struct folio *folio,
453 enum zone_stat_item item)
454{
455 mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio));
456}
457
458static inline void __node_stat_mod_folio(struct folio *folio,
459 enum node_stat_item item, long nr)
460{
461 __mod_node_page_state(folio_pgdat(folio), item, nr);
462}
463
464static inline void __node_stat_add_folio(struct folio *folio,
465 enum node_stat_item item)
466{
467 __mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio));
468}
469
470static inline void __node_stat_sub_folio(struct folio *folio,
471 enum node_stat_item item)
472{
473 __mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
474}
475
476static inline void node_stat_mod_folio(struct folio *folio,
477 enum node_stat_item item, long nr)
478{
479 mod_node_page_state(folio_pgdat(folio), item, nr);
480}
481
482static inline void node_stat_add_folio(struct folio *folio,
483 enum node_stat_item item)
484{
485 mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio));
486}
487
488static inline void node_stat_sub_folio(struct folio *folio,
489 enum node_stat_item item)
490{
491 mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
492}
493
494extern const char * const vmstat_text[];
495
496static inline const char *zone_stat_name(enum zone_stat_item item)
497{
498 return vmstat_text[item];
499}
500
501#ifdef CONFIG_NUMA
502static inline const char *numa_stat_name(enum numa_stat_item item)
503{
504 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
505 item];
506}
507#endif /* CONFIG_NUMA */
508
509static inline const char *node_stat_name(enum node_stat_item item)
510{
511 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
512 NR_VM_NUMA_EVENT_ITEMS +
513 item];
514}
515
516static inline const char *lru_list_name(enum lru_list lru)
517{
518 return node_stat_name(NR_LRU_BASE + (enum node_stat_item)lru) + 3; // skip "nr_"
519}
520
521#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
522static inline const char *vm_event_name(enum vm_event_item item)
523{
524 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
525 NR_VM_NUMA_EVENT_ITEMS +
526 NR_VM_NODE_STAT_ITEMS +
527 NR_VM_STAT_ITEMS +
528 item];
529}
530#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
531
532#ifdef CONFIG_MEMCG
533
534void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
535 int val);
536
537static inline void mod_lruvec_state(struct lruvec *lruvec,
538 enum node_stat_item idx, int val)
539{
540 unsigned long flags;
541
542 local_irq_save(flags);
543 __mod_lruvec_state(lruvec, idx, val);
544 local_irq_restore(flags);
545}
546
547void __lruvec_stat_mod_folio(struct folio *folio,
548 enum node_stat_item idx, int val);
549
550static inline void lruvec_stat_mod_folio(struct folio *folio,
551 enum node_stat_item idx, int val)
552{
553 unsigned long flags;
554
555 local_irq_save(flags);
556 __lruvec_stat_mod_folio(folio, idx, val);
557 local_irq_restore(flags);
558}
559
560static inline void mod_lruvec_page_state(struct page *page,
561 enum node_stat_item idx, int val)
562{
563 lruvec_stat_mod_folio(page_folio(page), idx, val);
564}
565
566#else
567
568static inline void __mod_lruvec_state(struct lruvec *lruvec,
569 enum node_stat_item idx, int val)
570{
571 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
572}
573
574static inline void mod_lruvec_state(struct lruvec *lruvec,
575 enum node_stat_item idx, int val)
576{
577 mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
578}
579
580static inline void __lruvec_stat_mod_folio(struct folio *folio,
581 enum node_stat_item idx, int val)
582{
583 __mod_node_page_state(folio_pgdat(folio), idx, val);
584}
585
586static inline void lruvec_stat_mod_folio(struct folio *folio,
587 enum node_stat_item idx, int val)
588{
589 mod_node_page_state(folio_pgdat(folio), idx, val);
590}
591
592static inline void mod_lruvec_page_state(struct page *page,
593 enum node_stat_item idx, int val)
594{
595 mod_node_page_state(page_pgdat(page), idx, val);
596}
597
598#endif /* CONFIG_MEMCG */
599
600static inline void __lruvec_stat_add_folio(struct folio *folio,
601 enum node_stat_item idx)
602{
603 __lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
604}
605
606static inline void __lruvec_stat_sub_folio(struct folio *folio,
607 enum node_stat_item idx)
608{
609 __lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
610}
611
612static inline void lruvec_stat_add_folio(struct folio *folio,
613 enum node_stat_item idx)
614{
615 lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
616}
617
618static inline void lruvec_stat_sub_folio(struct folio *folio,
619 enum node_stat_item idx)
620{
621 lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
622}
623
624void memmap_boot_pages_add(long delta);
625void memmap_pages_add(long delta);
626#endif /* _LINUX_VMSTAT_H */