Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm, memcg: rename ambiguously named memory.stat counters and functions

I spent literally an hour trying to work out why an earlier version of
my memory.events aggregation code doesn't work properly, only to find
out I was calling memcg->events instead of memcg->memory_events, which
is fairly confusing.

This naming seems in need of reworking, so make it harder to do the
wrong thing by using vmevents instead of events, which makes it more
clear that these are vm counters rather than memcg-specific counters.

There are also a few other inconsistent names in both the percpu and
aggregated structs, so these are all cleaned up to be more coherent and
easy to understand.

This commit contains code cleanup only: there are no logic changes.

[akpm@linux-foundation.org: fix it for preceding changes]
Link: http://lkml.kernel.org/r/20190208224319.GA23801@chrisdown.name
Signed-off-by: Chris Down <chris@chrisdown.name>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: Dennis Zhou <dennis@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Chris Down and committed by
Linus Torvalds
871789d4 b09e8936

+87 -83
+12 -12
include/linux/memcontrol.h
··· 94 94 MEM_CGROUP_NTARGETS, 95 95 }; 96 96 97 - struct mem_cgroup_stat_cpu { 98 - long count[MEMCG_NR_STAT]; 97 + struct memcg_vmstats_percpu { 98 + long stat[MEMCG_NR_STAT]; 99 99 unsigned long events[NR_VM_EVENT_ITEMS]; 100 100 unsigned long nr_page_events; 101 101 unsigned long targets[MEM_CGROUP_NTARGETS]; ··· 274 274 struct task_struct *move_lock_task; 275 275 276 276 /* memory.stat */ 277 - struct mem_cgroup_stat_cpu __percpu *stat_cpu; 277 + struct memcg_vmstats_percpu __percpu *vmstats_percpu; 278 278 279 279 MEMCG_PADDING(_pad2_); 280 280 281 - atomic_long_t stat[MEMCG_NR_STAT]; 282 - atomic_long_t events[NR_VM_EVENT_ITEMS]; 281 + atomic_long_t vmstats[MEMCG_NR_STAT]; 282 + atomic_long_t vmevents[NR_VM_EVENT_ITEMS]; 283 283 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; 284 284 285 285 unsigned long socket_pressure; ··· 557 557 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, 558 558 int idx) 559 559 { 560 - long x = atomic_long_read(&memcg->stat[idx]); 560 + long x = atomic_long_read(&memcg->vmstats[idx]); 561 561 #ifdef CONFIG_SMP 562 562 if (x < 0) 563 563 x = 0; ··· 574 574 if (mem_cgroup_disabled()) 575 575 return; 576 576 577 - x = val + __this_cpu_read(memcg->stat_cpu->count[idx]); 577 + x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); 578 578 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { 579 - atomic_long_add(x, &memcg->stat[idx]); 579 + atomic_long_add(x, &memcg->vmstats[idx]); 580 580 x = 0; 581 581 } 582 - __this_cpu_write(memcg->stat_cpu->count[idx], x); 582 + __this_cpu_write(memcg->vmstats_percpu->stat[idx], x); 583 583 } 584 584 585 585 /* idx can be of type enum memcg_stat_item or node_stat_item */ ··· 717 717 if (mem_cgroup_disabled()) 718 718 return; 719 719 720 - x = count + __this_cpu_read(memcg->stat_cpu->events[idx]); 720 + x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]); 721 721 if (unlikely(x > MEMCG_CHARGE_BATCH)) { 722 - atomic_long_add(x, &memcg->events[idx]); 722 + atomic_long_add(x, &memcg->vmevents[idx]); 723 723 x = 0; 724 724 } 725 - __this_cpu_write(memcg->stat_cpu->events[idx], x); 725 + __this_cpu_write(memcg->vmstats_percpu->events[idx], x); 726 726 } 727 727 728 728 static inline void count_memcg_events(struct mem_cgroup *memcg,
+75 -71
mm/memcontrol.c
··· 690 690 static unsigned long memcg_sum_events(struct mem_cgroup *memcg, 691 691 int event) 692 692 { 693 - return atomic_long_read(&memcg->events[event]); 693 + return atomic_long_read(&memcg->vmevents[event]); 694 694 } 695 695 696 696 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, ··· 722 722 nr_pages = -nr_pages; /* for event */ 723 723 } 724 724 725 - __this_cpu_add(memcg->stat_cpu->nr_page_events, nr_pages); 725 + __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); 726 726 } 727 727 728 728 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, ··· 730 730 { 731 731 unsigned long val, next; 732 732 733 - val = __this_cpu_read(memcg->stat_cpu->nr_page_events); 734 - next = __this_cpu_read(memcg->stat_cpu->targets[target]); 733 + val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); 734 + next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); 735 735 /* from time_after() in jiffies.h */ 736 736 if ((long)(next - val) < 0) { 737 737 switch (target) { ··· 747 747 default: 748 748 break; 749 749 } 750 - __this_cpu_write(memcg->stat_cpu->targets[target], next); 750 + __this_cpu_write(memcg->vmstats_percpu->targets[target], next); 751 751 return true; 752 752 } 753 753 return false; ··· 2088 2088 int nid; 2089 2089 long x; 2090 2090 2091 - x = this_cpu_xchg(memcg->stat_cpu->count[i], 0); 2091 + x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0); 2092 2092 if (x) 2093 - atomic_long_add(x, &memcg->stat[i]); 2093 + atomic_long_add(x, &memcg->vmstats[i]); 2094 2094 2095 2095 if (i >= NR_VM_NODE_STAT_ITEMS) 2096 2096 continue; ··· 2108 2108 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 2109 2109 long x; 2110 2110 2111 - x = this_cpu_xchg(memcg->stat_cpu->events[i], 0); 2111 + x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0); 2112 2112 if (x) 2113 - atomic_long_add(x, &memcg->events[i]); 2113 + atomic_long_add(x, &memcg->vmevents[i]); 2114 2114 } 2115 2115 } 2116 2116 ··· 2940 2940 return retval; 2941 2941 } 2942 2942 2943 - struct accumulated_stats { 2944 - unsigned long stat[MEMCG_NR_STAT]; 2945 - unsigned long events[NR_VM_EVENT_ITEMS]; 2943 + struct accumulated_vmstats { 2944 + unsigned long vmstats[MEMCG_NR_STAT]; 2945 + unsigned long vmevents[NR_VM_EVENT_ITEMS]; 2946 2946 unsigned long lru_pages[NR_LRU_LISTS]; 2947 - const unsigned int *stats_array; 2948 - const unsigned int *events_array; 2949 - int stats_size; 2950 - int events_size; 2947 + 2948 + /* overrides for v1 */ 2949 + const unsigned int *vmstats_array; 2950 + const unsigned int *vmevents_array; 2951 + 2952 + int vmstats_size; 2953 + int vmevents_size; 2951 2954 }; 2952 2955 2953 - static void accumulate_memcg_tree(struct mem_cgroup *memcg, 2954 - struct accumulated_stats *acc) 2956 + static void accumulate_vmstats(struct mem_cgroup *memcg, 2957 + struct accumulated_vmstats *acc) 2955 2958 { 2956 2959 struct mem_cgroup *mi; 2957 2960 int i; 2958 2961 2959 2962 for_each_mem_cgroup_tree(mi, memcg) { 2960 - for (i = 0; i < acc->stats_size; i++) 2961 - acc->stat[i] += memcg_page_state(mi, 2962 - acc->stats_array ? acc->stats_array[i] : i); 2963 + for (i = 0; i < acc->vmstats_size; i++) 2964 + acc->vmstats[i] += memcg_page_state(mi, 2965 + acc->vmstats_array ? acc->vmstats_array[i] : i); 2963 2966 2964 - for (i = 0; i < acc->events_size; i++) 2965 - acc->events[i] += memcg_sum_events(mi, 2966 - acc->events_array ? acc->events_array[i] : i); 2967 + for (i = 0; i < acc->vmevents_size; i++) 2968 + acc->vmevents[i] += memcg_sum_events(mi, 2969 + acc->vmevents_array 2970 + ? acc->vmevents_array[i] : i); 2967 2971 2968 2972 for (i = 0; i < NR_LRU_LISTS; i++) 2969 2973 acc->lru_pages[i] += memcg_page_state(mi, ··· 3418 3414 unsigned long memory, memsw; 3419 3415 struct mem_cgroup *mi; 3420 3416 unsigned int i; 3421 - struct accumulated_stats acc; 3417 + struct accumulated_vmstats acc; 3422 3418 3423 3419 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); 3424 3420 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); ··· 3453 3449 (u64)memsw * PAGE_SIZE); 3454 3450 3455 3451 memset(&acc, 0, sizeof(acc)); 3456 - acc.stats_size = ARRAY_SIZE(memcg1_stats); 3457 - acc.stats_array = memcg1_stats; 3458 - acc.events_size = ARRAY_SIZE(memcg1_events); 3459 - acc.events_array = memcg1_events; 3460 - accumulate_memcg_tree(memcg, &acc); 3452 + acc.vmstats_size = ARRAY_SIZE(memcg1_stats); 3453 + acc.vmstats_array = memcg1_stats; 3454 + acc.vmevents_size = ARRAY_SIZE(memcg1_events); 3455 + acc.vmevents_array = memcg1_events; 3456 + accumulate_vmstats(memcg, &acc); 3461 3457 3462 3458 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 3463 3459 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 3464 3460 continue; 3465 3461 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], 3466 - (u64)acc.stat[i] * PAGE_SIZE); 3462 + (u64)acc.vmstats[i] * PAGE_SIZE); 3467 3463 } 3468 3464 3469 3465 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 3470 3466 seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], 3471 - (u64)acc.events[i]); 3467 + (u64)acc.vmevents[i]); 3472 3468 3473 3469 for (i = 0; i < NR_LRU_LISTS; i++) 3474 3470 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], ··· 3905 3901 */ 3906 3902 static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx) 3907 3903 { 3908 - long x = atomic_long_read(&memcg->stat[idx]); 3904 + long x = atomic_long_read(&memcg->vmstats[idx]); 3909 3905 int cpu; 3910 3906 3911 3907 for_each_online_cpu(cpu) 3912 - x += per_cpu_ptr(memcg->stat_cpu, cpu)->count[idx]; 3908 + x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx]; 3913 3909 if (x < 0) 3914 3910 x = 0; 3915 3911 return x; ··· 4449 4445 4450 4446 for_each_node(node) 4451 4447 free_mem_cgroup_per_node_info(memcg, node); 4452 - free_percpu(memcg->stat_cpu); 4448 + free_percpu(memcg->vmstats_percpu); 4453 4449 kfree(memcg); 4454 4450 } 4455 4451 ··· 4478 4474 if (memcg->id.id < 0) 4479 4475 goto fail; 4480 4476 4481 - memcg->stat_cpu = alloc_percpu(struct mem_cgroup_stat_cpu); 4482 - if (!memcg->stat_cpu) 4477 + memcg->vmstats_percpu = alloc_percpu(struct memcg_vmstats_percpu); 4478 + if (!memcg->vmstats_percpu) 4483 4479 goto fail; 4484 4480 4485 4481 for_each_node(node) ··· 5565 5561 static int memory_stat_show(struct seq_file *m, void *v) 5566 5562 { 5567 5563 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); 5568 - struct accumulated_stats acc; 5564 + struct accumulated_vmstats acc; 5569 5565 int i; 5570 5566 5571 5567 /* ··· 5580 5576 */ 5581 5577 5582 5578 memset(&acc, 0, sizeof(acc)); 5583 - acc.stats_size = MEMCG_NR_STAT; 5584 - acc.events_size = NR_VM_EVENT_ITEMS; 5585 - accumulate_memcg_tree(memcg, &acc); 5579 + acc.vmstats_size = MEMCG_NR_STAT; 5580 + acc.vmevents_size = NR_VM_EVENT_ITEMS; 5581 + accumulate_vmstats(memcg, &acc); 5586 5582 5587 5583 seq_printf(m, "anon %llu\n", 5588 - (u64)acc.stat[MEMCG_RSS] * PAGE_SIZE); 5584 + (u64)acc.vmstats[MEMCG_RSS] * PAGE_SIZE); 5589 5585 seq_printf(m, "file %llu\n", 5590 - (u64)acc.stat[MEMCG_CACHE] * PAGE_SIZE); 5586 + (u64)acc.vmstats[MEMCG_CACHE] * PAGE_SIZE); 5591 5587 seq_printf(m, "kernel_stack %llu\n", 5592 - (u64)acc.stat[MEMCG_KERNEL_STACK_KB] * 1024); 5588 + (u64)acc.vmstats[MEMCG_KERNEL_STACK_KB] * 1024); 5593 5589 seq_printf(m, "slab %llu\n", 5594 - (u64)(acc.stat[NR_SLAB_RECLAIMABLE] + 5595 - acc.stat[NR_SLAB_UNRECLAIMABLE]) * PAGE_SIZE); 5590 + (u64)(acc.vmstats[NR_SLAB_RECLAIMABLE] + 5591 + acc.vmstats[NR_SLAB_UNRECLAIMABLE]) * PAGE_SIZE); 5596 5592 seq_printf(m, "sock %llu\n", 5597 - (u64)acc.stat[MEMCG_SOCK] * PAGE_SIZE); 5593 + (u64)acc.vmstats[MEMCG_SOCK] * PAGE_SIZE); 5598 5594 5599 5595 seq_printf(m, "shmem %llu\n", 5600 - (u64)acc.stat[NR_SHMEM] * PAGE_SIZE); 5596 + (u64)acc.vmstats[NR_SHMEM] * PAGE_SIZE); 5601 5597 seq_printf(m, "file_mapped %llu\n", 5602 - (u64)acc.stat[NR_FILE_MAPPED] * PAGE_SIZE); 5598 + (u64)acc.vmstats[NR_FILE_MAPPED] * PAGE_SIZE); 5603 5599 seq_printf(m, "file_dirty %llu\n", 5604 - (u64)acc.stat[NR_FILE_DIRTY] * PAGE_SIZE); 5600 + (u64)acc.vmstats[NR_FILE_DIRTY] * PAGE_SIZE); 5605 5601 seq_printf(m, "file_writeback %llu\n", 5606 - (u64)acc.stat[NR_WRITEBACK] * PAGE_SIZE); 5602 + (u64)acc.vmstats[NR_WRITEBACK] * PAGE_SIZE); 5607 5603 5608 5604 /* 5609 5605 * TODO: We should eventually replace our own MEMCG_RSS_HUGE counter ··· 5612 5608 * where the page->mem_cgroup is set up and stable. 5613 5609 */ 5614 5610 seq_printf(m, "anon_thp %llu\n", 5615 - (u64)acc.stat[MEMCG_RSS_HUGE] * PAGE_SIZE); 5611 + (u64)acc.vmstats[MEMCG_RSS_HUGE] * PAGE_SIZE); 5616 5612 5617 5613 for (i = 0; i < NR_LRU_LISTS; i++) 5618 5614 seq_printf(m, "%s %llu\n", mem_cgroup_lru_names[i], 5619 5615 (u64)acc.lru_pages[i] * PAGE_SIZE); 5620 5616 5621 5617 seq_printf(m, "slab_reclaimable %llu\n", 5622 - (u64)acc.stat[NR_SLAB_RECLAIMABLE] * PAGE_SIZE); 5618 + (u64)acc.vmstats[NR_SLAB_RECLAIMABLE] * PAGE_SIZE); 5623 5619 seq_printf(m, "slab_unreclaimable %llu\n", 5624 - (u64)acc.stat[NR_SLAB_UNRECLAIMABLE] * PAGE_SIZE); 5620 + (u64)acc.vmstats[NR_SLAB_UNRECLAIMABLE] * PAGE_SIZE); 5625 5621 5626 5622 /* Accumulated memory events */ 5627 5623 5628 - seq_printf(m, "pgfault %lu\n", acc.events[PGFAULT]); 5629 - seq_printf(m, "pgmajfault %lu\n", acc.events[PGMAJFAULT]); 5624 + seq_printf(m, "pgfault %lu\n", acc.vmevents[PGFAULT]); 5625 + seq_printf(m, "pgmajfault %lu\n", acc.vmevents[PGMAJFAULT]); 5630 5626 5631 5627 seq_printf(m, "workingset_refault %lu\n", 5632 - acc.stat[WORKINGSET_REFAULT]); 5628 + acc.vmstats[WORKINGSET_REFAULT]); 5633 5629 seq_printf(m, "workingset_activate %lu\n", 5634 - acc.stat[WORKINGSET_ACTIVATE]); 5630 + acc.vmstats[WORKINGSET_ACTIVATE]); 5635 5631 seq_printf(m, "workingset_nodereclaim %lu\n", 5636 - acc.stat[WORKINGSET_NODERECLAIM]); 5632 + acc.vmstats[WORKINGSET_NODERECLAIM]); 5637 5633 5638 - seq_printf(m, "pgrefill %lu\n", acc.events[PGREFILL]); 5639 - seq_printf(m, "pgscan %lu\n", acc.events[PGSCAN_KSWAPD] + 5640 - acc.events[PGSCAN_DIRECT]); 5641 - seq_printf(m, "pgsteal %lu\n", acc.events[PGSTEAL_KSWAPD] + 5642 - acc.events[PGSTEAL_DIRECT]); 5643 - seq_printf(m, "pgactivate %lu\n", acc.events[PGACTIVATE]); 5644 - seq_printf(m, "pgdeactivate %lu\n", acc.events[PGDEACTIVATE]); 5645 - seq_printf(m, "pglazyfree %lu\n", acc.events[PGLAZYFREE]); 5646 - seq_printf(m, "pglazyfreed %lu\n", acc.events[PGLAZYFREED]); 5634 + seq_printf(m, "pgrefill %lu\n", acc.vmevents[PGREFILL]); 5635 + seq_printf(m, "pgscan %lu\n", acc.vmevents[PGSCAN_KSWAPD] + 5636 + acc.vmevents[PGSCAN_DIRECT]); 5637 + seq_printf(m, "pgsteal %lu\n", acc.vmevents[PGSTEAL_KSWAPD] + 5638 + acc.vmevents[PGSTEAL_DIRECT]); 5639 + seq_printf(m, "pgactivate %lu\n", acc.vmevents[PGACTIVATE]); 5640 + seq_printf(m, "pgdeactivate %lu\n", acc.vmevents[PGDEACTIVATE]); 5641 + seq_printf(m, "pglazyfree %lu\n", acc.vmevents[PGLAZYFREE]); 5642 + seq_printf(m, "pglazyfreed %lu\n", acc.vmevents[PGLAZYFREED]); 5647 5643 5648 5644 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5649 - seq_printf(m, "thp_fault_alloc %lu\n", acc.events[THP_FAULT_ALLOC]); 5645 + seq_printf(m, "thp_fault_alloc %lu\n", acc.vmevents[THP_FAULT_ALLOC]); 5650 5646 seq_printf(m, "thp_collapse_alloc %lu\n", 5651 - acc.events[THP_COLLAPSE_ALLOC]); 5647 + acc.vmevents[THP_COLLAPSE_ALLOC]); 5652 5648 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 5653 5649 5654 5650 return 0; ··· 6084 6080 __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge); 6085 6081 __mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem); 6086 6082 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); 6087 - __this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages); 6083 + __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, nr_pages); 6088 6084 memcg_check_events(ug->memcg, ug->dummy_page); 6089 6085 local_irq_restore(flags); 6090 6086