Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Simplify cgroup_hierarchical_stats selftest

The cgroup_hierarchical_stats selftest is complicated. It has to be,
because it tests an entire workflow of recording, aggregating, and
dumping cgroup stats. However, some of the complexity is unnecessary.
The test now enables the memory controller in a cgroup hierarchy, invokes
reclaim, measure reclaim time, THEN uses that reclaim time to test the
stats collection and aggregation. We don't need to use such a
complicated stat, as the context in which the stat is collected is
orthogonal.

Simplify the test by using a simple stat instead of reclaim time, the
total number of times a process has ever entered a cgroup. This makes
the test simpler and removes the dependency on the memory controller and
the memory reclaim interface.

Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: KP Singh <kpsingh@kernel.org>
Link: https://lore.kernel.org/bpf/20220919175330.890793-1-yosryahmed@google.com

authored by

Yosry Ahmed and committed by
Andrii Nakryiko
e0401dce ee9bb9b4

+129 -218
+75 -93
tools/testing/selftests/bpf/prog_tests/cgroup_hierarchical_stats.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * Functions to manage eBPF programs attached to cgroup subsystems 3 + * This test makes sure BPF stats collection using rstat works correctly. 4 + * The test uses 3 BPF progs: 5 + * (a) counter: This BPF prog is invoked every time we attach a process to a 6 + * cgroup and locklessly increments a percpu counter. 7 + * The program then calls cgroup_rstat_updated() to inform rstat 8 + * of an update on the (cpu, cgroup) pair. 9 + * 10 + * (b) flusher: This BPF prog is invoked when an rstat flush is ongoing, it 11 + * aggregates all percpu counters to a total counter, and also 12 + * propagates the changes to the ancestor cgroups. 13 + * 14 + * (c) dumper: This BPF prog is a cgroup_iter. It is used to output the total 15 + * counter of a cgroup through reading a file in userspace. 16 + * 17 + * The test sets up a cgroup hierarchy, and the above programs. It spawns a few 18 + * processes in the leaf cgroups and makes sure all the counters are aggregated 19 + * correctly. 4 20 * 5 21 * Copyright 2022 Google LLC. 6 22 */ ··· 37 21 #define PAGE_SIZE 4096 38 22 #define MB(x) (x << 20) 39 23 24 + #define PROCESSES_PER_CGROUP 3 25 + 40 26 #define BPFFS_ROOT "/sys/fs/bpf/" 41 - #define BPFFS_VMSCAN BPFFS_ROOT"vmscan/" 27 + #define BPFFS_ATTACH_COUNTERS BPFFS_ROOT "attach_counters/" 42 28 43 29 #define CG_ROOT_NAME "root" 44 30 #define CG_ROOT_ID 1 ··· 97 79 return err; 98 80 99 81 /* Create a directory to contain stat files in bpffs */ 100 - err = mkdir(BPFFS_VMSCAN, 0755); 82 + err = mkdir(BPFFS_ATTACH_COUNTERS, 0755); 101 83 if (!ASSERT_OK(err, "mkdir")) 102 84 return err; 103 85 ··· 107 89 static void cleanup_bpffs(void) 108 90 { 109 91 /* Remove created directory in bpffs */ 110 - ASSERT_OK(rmdir(BPFFS_VMSCAN), "rmdir "BPFFS_VMSCAN); 92 + ASSERT_OK(rmdir(BPFFS_ATTACH_COUNTERS), "rmdir "BPFFS_ATTACH_COUNTERS); 111 93 112 94 /* Unmount bpffs, if it wasn't already mounted when we started */ 113 95 if (mounted_bpffs) ··· 136 118 137 119 cgroups[i].fd = fd; 138 120 cgroups[i].id = get_cgroup_id(cgroups[i].path); 139 - 140 - /* 141 - * Enable memcg controller for the entire hierarchy. 142 - * Note that stats are collected for all cgroups in a hierarchy 143 - * with memcg enabled anyway, but are only exposed for cgroups 144 - * that have memcg enabled. 145 - */ 146 - if (i < N_NON_LEAF_CGROUPS) { 147 - err = enable_controllers(cgroups[i].path, "memory"); 148 - if (!ASSERT_OK(err, "enable_controllers")) 149 - return err; 150 - } 151 121 } 152 122 return 0; 153 123 } ··· 160 154 cleanup_bpffs(); 161 155 } 162 156 163 - static int reclaimer(const char *cgroup_path, size_t size) 157 + static int attach_processes(void) 164 158 { 165 - static char size_buf[128]; 166 - char *buf, *ptr; 167 - int err; 159 + int i, j, status; 168 160 169 - /* Join cgroup in the parent process workdir */ 170 - if (join_parent_cgroup(cgroup_path)) 171 - return EACCES; 172 - 173 - /* Allocate memory */ 174 - buf = malloc(size); 175 - if (!buf) 176 - return ENOMEM; 177 - 178 - /* Write to memory to make sure it's actually allocated */ 179 - for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE) 180 - *ptr = 1; 181 - 182 - /* Try to reclaim memory */ 183 - snprintf(size_buf, 128, "%lu", size); 184 - err = write_cgroup_file_parent(cgroup_path, "memory.reclaim", size_buf); 185 - 186 - free(buf); 187 - /* memory.reclaim returns EAGAIN if the amount is not fully reclaimed */ 188 - if (err && errno != EAGAIN) 189 - return errno; 190 - 191 - return 0; 192 - } 193 - 194 - static int induce_vmscan(void) 195 - { 196 - int i, status; 197 - 198 - /* 199 - * In every leaf cgroup, run a child process that allocates some memory 200 - * and attempts to reclaim some of it. 201 - */ 161 + /* In every leaf cgroup, attach 3 processes */ 202 162 for (i = N_NON_LEAF_CGROUPS; i < N_CGROUPS; i++) { 203 - pid_t pid; 163 + for (j = 0; j < PROCESSES_PER_CGROUP; j++) { 164 + pid_t pid; 204 165 205 - /* Create reclaimer child */ 206 - pid = fork(); 207 - if (pid == 0) { 208 - status = reclaimer(cgroups[i].path, MB(5)); 209 - exit(status); 166 + /* Create child and attach to cgroup */ 167 + pid = fork(); 168 + if (pid == 0) { 169 + if (join_parent_cgroup(cgroups[i].path)) 170 + exit(EACCES); 171 + exit(0); 172 + } 173 + 174 + /* Cleanup child */ 175 + waitpid(pid, &status, 0); 176 + if (!ASSERT_TRUE(WIFEXITED(status), "child process exited")) 177 + return 1; 178 + if (!ASSERT_EQ(WEXITSTATUS(status), 0, 179 + "child process exit code")) 180 + return 1; 210 181 } 211 - 212 - /* Cleanup reclaimer child */ 213 - waitpid(pid, &status, 0); 214 - ASSERT_TRUE(WIFEXITED(status), "reclaimer exited"); 215 - ASSERT_EQ(WEXITSTATUS(status), 0, "reclaim exit code"); 216 182 } 217 183 return 0; 218 184 } 219 185 220 186 static unsigned long long 221 - get_cgroup_vmscan_delay(unsigned long long cgroup_id, const char *file_name) 187 + get_attach_counter(unsigned long long cgroup_id, const char *file_name) 222 188 { 223 - unsigned long long vmscan = 0, id = 0; 189 + unsigned long long attach_counter = 0, id = 0; 224 190 static char buf[128], path[128]; 225 191 226 192 /* For every cgroup, read the file generated by cgroup_iter */ 227 - snprintf(path, 128, "%s%s", BPFFS_VMSCAN, file_name); 193 + snprintf(path, 128, "%s%s", BPFFS_ATTACH_COUNTERS, file_name); 228 194 if (!ASSERT_OK(read_from_file(path, buf, 128), "read cgroup_iter")) 229 195 return 0; 230 196 231 197 /* Check the output file formatting */ 232 - ASSERT_EQ(sscanf(buf, "cg_id: %llu, total_vmscan_delay: %llu\n", 233 - &id, &vmscan), 2, "output format"); 198 + ASSERT_EQ(sscanf(buf, "cg_id: %llu, attach_counter: %llu\n", 199 + &id, &attach_counter), 2, "output format"); 234 200 235 201 /* Check that the cgroup_id is displayed correctly */ 236 202 ASSERT_EQ(id, cgroup_id, "cgroup_id"); 237 - /* Check that the vmscan reading is non-zero */ 238 - ASSERT_GT(vmscan, 0, "vmscan_reading"); 239 - return vmscan; 203 + /* Check that the counter is non-zero */ 204 + ASSERT_GT(attach_counter, 0, "attach counter non-zero"); 205 + return attach_counter; 240 206 } 241 207 242 - static void check_vmscan_stats(void) 208 + static void check_attach_counters(void) 243 209 { 244 - unsigned long long vmscan_readings[N_CGROUPS], vmscan_root; 210 + unsigned long long attach_counters[N_CGROUPS], root_attach_counter; 245 211 int i; 246 212 247 - for (i = 0; i < N_CGROUPS; i++) { 248 - vmscan_readings[i] = get_cgroup_vmscan_delay(cgroups[i].id, 249 - cgroups[i].name); 250 - } 213 + for (i = 0; i < N_CGROUPS; i++) 214 + attach_counters[i] = get_attach_counter(cgroups[i].id, 215 + cgroups[i].name); 251 216 252 217 /* Read stats for root too */ 253 - vmscan_root = get_cgroup_vmscan_delay(CG_ROOT_ID, CG_ROOT_NAME); 218 + root_attach_counter = get_attach_counter(CG_ROOT_ID, CG_ROOT_NAME); 219 + 220 + /* Check that all leafs cgroups have an attach counter of 3 */ 221 + for (i = N_NON_LEAF_CGROUPS; i < N_CGROUPS; i++) 222 + ASSERT_EQ(attach_counters[i], PROCESSES_PER_CGROUP, 223 + "leaf cgroup attach counter"); 254 224 255 225 /* Check that child1 == child1_1 + child1_2 */ 256 - ASSERT_EQ(vmscan_readings[1], vmscan_readings[3] + vmscan_readings[4], 257 - "child1_vmscan"); 226 + ASSERT_EQ(attach_counters[1], attach_counters[3] + attach_counters[4], 227 + "child1_counter"); 258 228 /* Check that child2 == child2_1 + child2_2 */ 259 - ASSERT_EQ(vmscan_readings[2], vmscan_readings[5] + vmscan_readings[6], 260 - "child2_vmscan"); 229 + ASSERT_EQ(attach_counters[2], attach_counters[5] + attach_counters[6], 230 + "child2_counter"); 261 231 /* Check that test == child1 + child2 */ 262 - ASSERT_EQ(vmscan_readings[0], vmscan_readings[1] + vmscan_readings[2], 263 - "test_vmscan"); 232 + ASSERT_EQ(attach_counters[0], attach_counters[1] + attach_counters[2], 233 + "test_counter"); 264 234 /* Check that root >= test */ 265 - ASSERT_GE(vmscan_root, vmscan_readings[1], "root_vmscan"); 235 + ASSERT_GE(root_attach_counter, attach_counters[1], "root_counter"); 266 236 } 267 237 268 238 /* Creates iter link and pins in bpffs, returns 0 on success, -errno on failure. ··· 260 278 linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY; 261 279 opts.link_info = &linfo; 262 280 opts.link_info_len = sizeof(linfo); 263 - link = bpf_program__attach_iter(obj->progs.dump_vmscan, &opts); 281 + link = bpf_program__attach_iter(obj->progs.dumper, &opts); 264 282 if (!ASSERT_OK_PTR(link, "attach_iter")) 265 283 return -EFAULT; 266 284 267 285 /* Pin the link to a bpffs file */ 268 - snprintf(path, 128, "%s%s", BPFFS_VMSCAN, file_name); 286 + snprintf(path, 128, "%s%s", BPFFS_ATTACH_COUNTERS, file_name); 269 287 err = bpf_link__pin(link, path); 270 288 ASSERT_OK(err, "pin cgroup_iter"); 271 289 ··· 295 313 if (!ASSERT_OK(err, "setup_cgroup_iter")) 296 314 return err; 297 315 298 - bpf_program__set_autoattach((*skel)->progs.dump_vmscan, false); 316 + bpf_program__set_autoattach((*skel)->progs.dumper, false); 299 317 err = cgroup_hierarchical_stats__attach(*skel); 300 318 if (!ASSERT_OK(err, "attach")) 301 319 return err; ··· 310 328 311 329 for (i = 0; i < N_CGROUPS; i++) { 312 330 /* Delete files in bpffs that cgroup_iters are pinned in */ 313 - snprintf(path, 128, "%s%s", BPFFS_VMSCAN, 331 + snprintf(path, 128, "%s%s", BPFFS_ATTACH_COUNTERS, 314 332 cgroups[i].name); 315 333 ASSERT_OK(remove(path), "remove cgroup_iter pin"); 316 334 } 317 335 318 336 /* Delete root file in bpffs */ 319 - snprintf(path, 128, "%s%s", BPFFS_VMSCAN, CG_ROOT_NAME); 337 + snprintf(path, 128, "%s%s", BPFFS_ATTACH_COUNTERS, CG_ROOT_NAME); 320 338 ASSERT_OK(remove(path), "remove cgroup_iter root pin"); 321 339 cgroup_hierarchical_stats__destroy(skel); 322 340 } ··· 329 347 goto hierarchy_cleanup; 330 348 if (setup_progs(&skel)) 331 349 goto cleanup; 332 - if (induce_vmscan()) 350 + if (attach_processes()) 333 351 goto cleanup; 334 - check_vmscan_stats(); 352 + check_attach_counters(); 335 353 cleanup: 336 354 destroy_progs(skel); 337 355 hierarchy_cleanup:
+54 -125
tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * Functions to manage eBPF programs attached to cgroup subsystems 4 - * 5 3 * Copyright 2022 Google LLC. 6 4 */ 7 5 #include "vmlinux.h" ··· 9 11 10 12 char _license[] SEC("license") = "GPL"; 11 13 12 - /* 13 - * Start times are stored per-task, not per-cgroup, as multiple tasks in one 14 - * cgroup can perform reclaim concurrently. 15 - */ 16 - struct { 17 - __uint(type, BPF_MAP_TYPE_TASK_STORAGE); 18 - __uint(map_flags, BPF_F_NO_PREALLOC); 19 - __type(key, int); 20 - __type(value, __u64); 21 - } vmscan_start_time SEC(".maps"); 22 - 23 - struct vmscan_percpu { 14 + struct percpu_attach_counter { 24 15 /* Previous percpu state, to figure out if we have new updates */ 25 16 __u64 prev; 26 17 /* Current percpu state */ 27 18 __u64 state; 28 19 }; 29 20 30 - struct vmscan { 21 + struct attach_counter { 31 22 /* State propagated through children, pending aggregation */ 32 23 __u64 pending; 33 24 /* Total state, including all cpus and all children */ ··· 25 38 26 39 struct { 27 40 __uint(type, BPF_MAP_TYPE_PERCPU_HASH); 28 - __uint(max_entries, 100); 41 + __uint(max_entries, 1024); 29 42 __type(key, __u64); 30 - __type(value, struct vmscan_percpu); 31 - } pcpu_cgroup_vmscan_elapsed SEC(".maps"); 43 + __type(value, struct percpu_attach_counter); 44 + } percpu_attach_counters SEC(".maps"); 32 45 33 46 struct { 34 47 __uint(type, BPF_MAP_TYPE_HASH); 35 - __uint(max_entries, 100); 48 + __uint(max_entries, 1024); 36 49 __type(key, __u64); 37 - __type(value, struct vmscan); 38 - } cgroup_vmscan_elapsed SEC(".maps"); 50 + __type(value, struct attach_counter); 51 + } attach_counters SEC(".maps"); 39 52 40 53 extern void cgroup_rstat_updated(struct cgroup *cgrp, int cpu) __ksym; 41 54 extern void cgroup_rstat_flush(struct cgroup *cgrp) __ksym; 42 - 43 - static struct cgroup *task_memcg(struct task_struct *task) 44 - { 45 - int cgrp_id; 46 - 47 - #if __has_builtin(__builtin_preserve_enum_value) 48 - cgrp_id = bpf_core_enum_value(enum cgroup_subsys_id, memory_cgrp_id); 49 - #else 50 - cgrp_id = memory_cgrp_id; 51 - #endif 52 - return task->cgroups->subsys[cgrp_id]->cgroup; 53 - } 54 55 55 56 static uint64_t cgroup_id(struct cgroup *cgrp) 56 57 { 57 58 return cgrp->kn->id; 58 59 } 59 60 60 - static int create_vmscan_percpu_elem(__u64 cg_id, __u64 state) 61 + static int create_percpu_attach_counter(__u64 cg_id, __u64 state) 61 62 { 62 - struct vmscan_percpu pcpu_init = {.state = state, .prev = 0}; 63 + struct percpu_attach_counter pcpu_init = {.state = state, .prev = 0}; 63 64 64 - return bpf_map_update_elem(&pcpu_cgroup_vmscan_elapsed, &cg_id, 65 + return bpf_map_update_elem(&percpu_attach_counters, &cg_id, 65 66 &pcpu_init, BPF_NOEXIST); 66 67 } 67 68 68 - static int create_vmscan_elem(__u64 cg_id, __u64 state, __u64 pending) 69 + static int create_attach_counter(__u64 cg_id, __u64 state, __u64 pending) 69 70 { 70 - struct vmscan init = {.state = state, .pending = pending}; 71 + struct attach_counter init = {.state = state, .pending = pending}; 71 72 72 - return bpf_map_update_elem(&cgroup_vmscan_elapsed, &cg_id, 73 + return bpf_map_update_elem(&attach_counters, &cg_id, 73 74 &init, BPF_NOEXIST); 74 75 } 75 76 76 - SEC("tp_btf/mm_vmscan_memcg_reclaim_begin") 77 - int BPF_PROG(vmscan_start, int order, gfp_t gfp_flags) 77 + SEC("fentry/cgroup_attach_task") 78 + int BPF_PROG(counter, struct cgroup *dst_cgrp, struct task_struct *leader, 79 + bool threadgroup) 78 80 { 79 - struct task_struct *task = bpf_get_current_task_btf(); 80 - __u64 *start_time_ptr; 81 + __u64 cg_id = cgroup_id(dst_cgrp); 82 + struct percpu_attach_counter *pcpu_counter = bpf_map_lookup_elem( 83 + &percpu_attach_counters, 84 + &cg_id); 81 85 82 - start_time_ptr = bpf_task_storage_get(&vmscan_start_time, task, 0, 83 - BPF_LOCAL_STORAGE_GET_F_CREATE); 84 - if (start_time_ptr) 85 - *start_time_ptr = bpf_ktime_get_ns(); 86 - return 0; 87 - } 88 - 89 - SEC("tp_btf/mm_vmscan_memcg_reclaim_end") 90 - int BPF_PROG(vmscan_end, unsigned long nr_reclaimed) 91 - { 92 - struct vmscan_percpu *pcpu_stat; 93 - struct task_struct *current = bpf_get_current_task_btf(); 94 - struct cgroup *cgrp; 95 - __u64 *start_time_ptr; 96 - __u64 current_elapsed, cg_id; 97 - __u64 end_time = bpf_ktime_get_ns(); 98 - 99 - /* 100 - * cgrp is the first parent cgroup of current that has memcg enabled in 101 - * its subtree_control, or NULL if memcg is disabled in the entire tree. 102 - * In a cgroup hierarchy like this: 103 - * a 104 - * / \ 105 - * b c 106 - * If "a" has memcg enabled, while "b" doesn't, then processes in "b" 107 - * will accumulate their stats directly to "a". This makes sure that no 108 - * stats are lost from processes in leaf cgroups that don't have memcg 109 - * enabled, but only exposes stats for cgroups that have memcg enabled. 110 - */ 111 - cgrp = task_memcg(current); 112 - if (!cgrp) 86 + if (pcpu_counter) 87 + pcpu_counter->state += 1; 88 + else if (create_percpu_attach_counter(cg_id, 1)) 113 89 return 0; 114 90 115 - cg_id = cgroup_id(cgrp); 116 - start_time_ptr = bpf_task_storage_get(&vmscan_start_time, current, 0, 117 - BPF_LOCAL_STORAGE_GET_F_CREATE); 118 - if (!start_time_ptr) 119 - return 0; 120 - 121 - current_elapsed = end_time - *start_time_ptr; 122 - pcpu_stat = bpf_map_lookup_elem(&pcpu_cgroup_vmscan_elapsed, 123 - &cg_id); 124 - if (pcpu_stat) 125 - pcpu_stat->state += current_elapsed; 126 - else if (create_vmscan_percpu_elem(cg_id, current_elapsed)) 127 - return 0; 128 - 129 - cgroup_rstat_updated(cgrp, bpf_get_smp_processor_id()); 91 + cgroup_rstat_updated(dst_cgrp, bpf_get_smp_processor_id()); 130 92 return 0; 131 93 } 132 94 133 95 SEC("fentry/bpf_rstat_flush") 134 - int BPF_PROG(vmscan_flush, struct cgroup *cgrp, struct cgroup *parent, int cpu) 96 + int BPF_PROG(flusher, struct cgroup *cgrp, struct cgroup *parent, int cpu) 135 97 { 136 - struct vmscan_percpu *pcpu_stat; 137 - struct vmscan *total_stat, *parent_stat; 98 + struct percpu_attach_counter *pcpu_counter; 99 + struct attach_counter *total_counter, *parent_counter; 138 100 __u64 cg_id = cgroup_id(cgrp); 139 101 __u64 parent_cg_id = parent ? cgroup_id(parent) : 0; 140 - __u64 *pcpu_vmscan; 141 102 __u64 state; 142 103 __u64 delta = 0; 143 104 144 105 /* Add CPU changes on this level since the last flush */ 145 - pcpu_stat = bpf_map_lookup_percpu_elem(&pcpu_cgroup_vmscan_elapsed, 146 - &cg_id, cpu); 147 - if (pcpu_stat) { 148 - state = pcpu_stat->state; 149 - delta += state - pcpu_stat->prev; 150 - pcpu_stat->prev = state; 106 + pcpu_counter = bpf_map_lookup_percpu_elem(&percpu_attach_counters, 107 + &cg_id, cpu); 108 + if (pcpu_counter) { 109 + state = pcpu_counter->state; 110 + delta += state - pcpu_counter->prev; 111 + pcpu_counter->prev = state; 151 112 } 152 113 153 - total_stat = bpf_map_lookup_elem(&cgroup_vmscan_elapsed, &cg_id); 154 - if (!total_stat) { 155 - if (create_vmscan_elem(cg_id, delta, 0)) 114 + total_counter = bpf_map_lookup_elem(&attach_counters, &cg_id); 115 + if (!total_counter) { 116 + if (create_attach_counter(cg_id, delta, 0)) 156 117 return 0; 157 - 158 118 goto update_parent; 159 119 } 160 120 161 121 /* Collect pending stats from subtree */ 162 - if (total_stat->pending) { 163 - delta += total_stat->pending; 164 - total_stat->pending = 0; 122 + if (total_counter->pending) { 123 + delta += total_counter->pending; 124 + total_counter->pending = 0; 165 125 } 166 126 167 127 /* Propagate changes to this cgroup's total */ 168 - total_stat->state += delta; 128 + total_counter->state += delta; 169 129 170 130 update_parent: 171 131 /* Skip if there are no changes to propagate, or no parent */ ··· 120 186 return 0; 121 187 122 188 /* Propagate changes to cgroup's parent */ 123 - parent_stat = bpf_map_lookup_elem(&cgroup_vmscan_elapsed, 124 - &parent_cg_id); 125 - if (parent_stat) 126 - parent_stat->pending += delta; 189 + parent_counter = bpf_map_lookup_elem(&attach_counters, 190 + &parent_cg_id); 191 + if (parent_counter) 192 + parent_counter->pending += delta; 127 193 else 128 - create_vmscan_elem(parent_cg_id, 0, delta); 194 + create_attach_counter(parent_cg_id, 0, delta); 129 195 return 0; 130 196 } 131 197 132 198 SEC("iter.s/cgroup") 133 - int BPF_PROG(dump_vmscan, struct bpf_iter_meta *meta, struct cgroup *cgrp) 199 + int BPF_PROG(dumper, struct bpf_iter_meta *meta, struct cgroup *cgrp) 134 200 { 135 201 struct seq_file *seq = meta->seq; 136 - struct vmscan *total_stat; 202 + struct attach_counter *total_counter; 137 203 __u64 cg_id = cgrp ? cgroup_id(cgrp) : 0; 138 204 139 205 /* Do nothing for the terminal call */ ··· 143 209 /* Flush the stats to make sure we get the most updated numbers */ 144 210 cgroup_rstat_flush(cgrp); 145 211 146 - total_stat = bpf_map_lookup_elem(&cgroup_vmscan_elapsed, &cg_id); 147 - if (!total_stat) { 148 - BPF_SEQ_PRINTF(seq, "cg_id: %llu, total_vmscan_delay: 0\n", 212 + total_counter = bpf_map_lookup_elem(&attach_counters, &cg_id); 213 + if (!total_counter) { 214 + BPF_SEQ_PRINTF(seq, "cg_id: %llu, attach_counter: 0\n", 149 215 cg_id); 150 216 } else { 151 - BPF_SEQ_PRINTF(seq, "cg_id: %llu, total_vmscan_delay: %llu\n", 152 - cg_id, total_stat->state); 217 + BPF_SEQ_PRINTF(seq, "cg_id: %llu, attach_counter: %llu\n", 218 + cg_id, total_counter->state); 153 219 } 154 - 155 - /* 156 - * We only dump stats for one cgroup here, so return 1 to stop 157 - * iteration after the first cgroup. 158 - */ 159 - return 1; 220 + return 0; 160 221 }