Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf trace: Support --summary-mode=cgroup

Add a new summary mode to collect stats for each cgroup.

$ sudo ./perf trace -as --bpf-summary --summary-mode=cgroup -- sleep 1

Summary of events:

cgroup /user.slice/user-657345.slice/user@657345.service/session.slice/org.gnome.Shell@x11.service, 535 events

syscall calls errors total min avg max stddev
(msec) (msec) (msec) (msec) (%)
--------------- -------- ------ -------- --------- --------- --------- ------
ppoll 15 0 373.600 0.004 24.907 197.491 55.26%
poll 15 0 1.325 0.001 0.088 0.369 38.76%
close 66 0 0.567 0.007 0.009 0.026 3.55%
write 150 0 0.471 0.001 0.003 0.010 3.29%
recvmsg 94 83 0.290 0.000 0.003 0.037 16.39%
ioctl 26 0 0.237 0.001 0.009 0.096 50.13%
timerfd_create 66 0 0.236 0.003 0.004 0.024 8.92%
timerfd_settime 70 0 0.160 0.001 0.002 0.012 7.66%
writev 10 0 0.118 0.001 0.012 0.019 18.17%
read 9 0 0.021 0.001 0.002 0.004 14.07%
getpid 14 0 0.019 0.000 0.001 0.004 20.28%

cgroup /system.slice/polkit.service, 94 events

syscall calls errors total min avg max stddev
(msec) (msec) (msec) (msec) (%)
--------------- -------- ------ -------- --------- --------- --------- ------
ppoll 22 0 19.811 0.000 0.900 9.273 63.88%
write 30 0 0.040 0.001 0.001 0.003 12.09%
recvmsg 12 0 0.018 0.001 0.002 0.006 28.15%
read 18 0 0.013 0.000 0.001 0.003 21.99%
poll 12 0 0.006 0.000 0.001 0.001 4.48%

cgroup /user.slice/user-657345.slice/user@657345.service/app.slice/app-org.gnome.Terminal.slice/gnome-terminal-server.service, 21 events

syscall calls errors total min avg max stddev
(msec) (msec) (msec) (msec) (%)
--------------- -------- ------ -------- --------- --------- --------- ------
ppoll 4 0 17.476 0.003 4.369 13.298 69.65%
recvmsg 15 12 0.068 0.002 0.005 0.014 26.53%
writev 1 0 0.033 0.033 0.033 0.033 0.00%
poll 1 0 0.005 0.005 0.005 0.005 0.00%

...

It works only for --bpf-summary for now.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Howard Chu <howardchu95@gmail.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/r/20250501225337.928470-1-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

authored by

Namhyung Kim and committed by
Arnaldo Carvalho de Melo
ef60b8f5 39922dc5

+170 -12
+2 -1
tools/perf/Documentation/perf-trace.txt
··· 152 152 153 153 --summary-mode=mode:: 154 154 To be used with -s or -S, to select how to show summary. By default it'll 155 - show the syscall summary by thread. Possible values are: thread, total. 155 + show the syscall summary by thread. Possible values are: thread, total, 156 + cgroup. 156 157 157 158 --tool_stats:: 158 159 Show tool stats such as number of times fd->pathname was discovered thru
+9 -1
tools/perf/builtin-trace.c
··· 5302 5302 trace->summary_mode = SUMMARY__BY_THREAD; 5303 5303 } else if (!strcmp(str, "total")) { 5304 5304 trace->summary_mode = SUMMARY__BY_TOTAL; 5305 + } else if (!strcmp(str, "cgroup")) { 5306 + trace->summary_mode = SUMMARY__BY_CGROUP; 5305 5307 } else { 5306 5308 pr_err("Unknown summary mode: %s\n", str); 5307 5309 return -1; ··· 5463 5461 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary, 5464 5462 "Show errno stats per syscall, use with -s or -S"), 5465 5463 OPT_CALLBACK(0, "summary-mode", &trace, "mode", 5466 - "How to show summary: select thread (default) or total", 5464 + "How to show summary: select thread (default), total or cgroup", 5467 5465 trace__parse_summary_mode), 5468 5466 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min", 5469 5467 "Trace pagefaults", parse_pagefaults, "maj"), ··· 5777 5775 symbol_conf.keep_exited_threads = true; 5778 5776 if (trace.summary_mode == SUMMARY__NONE) 5779 5777 trace.summary_mode = SUMMARY__BY_THREAD; 5778 + 5779 + if (!trace.summary_bpf && trace.summary_mode == SUMMARY__BY_CGROUP) { 5780 + pr_err("Error: --summary-mode=cgroup only works with --bpf-summary\n"); 5781 + err = -EINVAL; 5782 + goto out; 5783 + } 5780 5784 } 5781 5785 5782 5786 if (output_name != NULL) {
+117 -6
tools/perf/util/bpf-trace-summary.c
··· 6 6 7 7 #include "dwarf-regs.h" /* for EM_HOST */ 8 8 #include "syscalltbl.h" 9 + #include "util/cgroup.h" 9 10 #include "util/hashmap.h" 10 11 #include "util/trace.h" 11 12 #include "util/util.h" 12 13 #include <bpf/bpf.h> 14 + #include <linux/rbtree.h> 13 15 #include <linux/time64.h> 14 16 #include <tools/libc_compat.h> /* reallocarray */ 15 17 ··· 20 18 21 19 22 20 static struct syscall_summary_bpf *skel; 21 + static struct rb_root cgroups = RB_ROOT; 23 22 24 23 int trace_prepare_bpf_summary(enum trace_summary_mode mode) 25 24 { ··· 32 29 33 30 if (mode == SUMMARY__BY_THREAD) 34 31 skel->rodata->aggr_mode = SYSCALL_AGGR_THREAD; 32 + else if (mode == SUMMARY__BY_CGROUP) 33 + skel->rodata->aggr_mode = SYSCALL_AGGR_CGROUP; 35 34 else 36 35 skel->rodata->aggr_mode = SYSCALL_AGGR_CPU; 36 + 37 + if (cgroup_is_v2("perf_event") > 0) 38 + skel->rodata->use_cgroup_v2 = 1; 37 39 38 40 if (syscall_summary_bpf__load(skel) < 0) { 39 41 fprintf(stderr, "failed to load syscall summary bpf skeleton\n"); ··· 49 41 fprintf(stderr, "failed to attach syscall summary bpf skeleton\n"); 50 42 return -1; 51 43 } 44 + 45 + if (mode == SUMMARY__BY_CGROUP) 46 + read_all_cgroups(&cgroups); 52 47 53 48 return 0; 54 49 } ··· 99 88 * per-cpu analysis so it's keyed by the syscall number to combine stats 100 89 * from different CPUs. And syscall_data always has a syscall_node so 101 90 * it can effectively work as flat hierarchy. 91 + * 92 + * For per-cgroup stats, it uses two-level data structure like thread 93 + * syscall_data is keyed by CGROUP and has an array of node which 94 + * represents each syscall for the cgroup. 102 95 */ 103 96 struct syscall_data { 104 - int key; /* tid if AGGR_THREAD, syscall-nr if AGGR_CPU */ 97 + u64 key; /* tid if AGGR_THREAD, syscall-nr if AGGR_CPU, cgroup if AGGR_CGROUP */ 105 98 int nr_events; 106 99 int nr_nodes; 107 100 u64 total_time; ··· 206 191 207 192 qsort(data->nodes, data->nr_nodes, sizeof(*data->nodes), nodecmp); 208 193 209 - printed += fprintf(fp, " thread (%d), ", data->key); 194 + printed += fprintf(fp, " thread (%d), ", (int)data->key); 210 195 printed += fprintf(fp, "%d events\n\n", data->nr_events); 211 196 212 197 printed += fprintf(fp, " syscall calls errors total min avg max stddev\n"); ··· 298 283 return printed; 299 284 } 300 285 286 + static int update_cgroup_stats(struct hashmap *hash, struct syscall_key *map_key, 287 + struct syscall_stats *map_data) 288 + { 289 + struct syscall_data *data; 290 + struct syscall_node *nodes; 291 + 292 + if (!hashmap__find(hash, map_key->cgroup, &data)) { 293 + data = zalloc(sizeof(*data)); 294 + if (data == NULL) 295 + return -ENOMEM; 296 + 297 + data->key = map_key->cgroup; 298 + if (hashmap__add(hash, data->key, data) < 0) { 299 + free(data); 300 + return -ENOMEM; 301 + } 302 + } 303 + 304 + /* update thread total stats */ 305 + data->nr_events += map_data->count; 306 + data->total_time += map_data->total_time; 307 + 308 + nodes = reallocarray(data->nodes, data->nr_nodes + 1, sizeof(*nodes)); 309 + if (nodes == NULL) 310 + return -ENOMEM; 311 + 312 + data->nodes = nodes; 313 + nodes = &data->nodes[data->nr_nodes++]; 314 + nodes->syscall_nr = map_key->nr; 315 + 316 + /* each thread has an entry for each syscall, just use the stat */ 317 + memcpy(&nodes->stats, map_data, sizeof(*map_data)); 318 + return 0; 319 + } 320 + 321 + static int print_cgroup_stat(struct syscall_data *data, FILE *fp) 322 + { 323 + int printed = 0; 324 + struct cgroup *cgrp = __cgroup__find(&cgroups, data->key); 325 + 326 + qsort(data->nodes, data->nr_nodes, sizeof(*data->nodes), nodecmp); 327 + 328 + if (cgrp) 329 + printed += fprintf(fp, " cgroup %s,", cgrp->name); 330 + else 331 + printed += fprintf(fp, " cgroup id:%lu,", (unsigned long)data->key); 332 + 333 + printed += fprintf(fp, " %d events\n\n", data->nr_events); 334 + 335 + printed += fprintf(fp, " syscall calls errors total min avg max stddev\n"); 336 + printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n"); 337 + printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- --------- ------\n"); 338 + 339 + printed += print_common_stats(data, fp); 340 + printed += fprintf(fp, "\n\n"); 341 + 342 + return printed; 343 + } 344 + 345 + static int print_cgroup_stats(struct syscall_data **data, int nr_data, FILE *fp) 346 + { 347 + int printed = 0; 348 + 349 + for (int i = 0; i < nr_data; i++) 350 + printed += print_cgroup_stat(data[i], fp); 351 + 352 + return printed; 353 + } 354 + 301 355 int trace_print_bpf_summary(FILE *fp) 302 356 { 303 357 struct bpf_map *map = skel->maps.syscall_stats_map; ··· 389 305 struct syscall_stats stat; 390 306 391 307 if (!bpf_map__lookup_elem(map, &key, sizeof(key), &stat, sizeof(stat), 0)) { 392 - if (skel->rodata->aggr_mode == SYSCALL_AGGR_THREAD) 308 + switch (skel->rodata->aggr_mode) { 309 + case SYSCALL_AGGR_THREAD: 393 310 update_thread_stats(&schash, &key, &stat); 394 - else 311 + break; 312 + case SYSCALL_AGGR_CPU: 395 313 update_total_stats(&schash, &key, &stat); 314 + break; 315 + case SYSCALL_AGGR_CGROUP: 316 + update_cgroup_stats(&schash, &key, &stat); 317 + break; 318 + default: 319 + break; 320 + } 396 321 } 397 322 398 323 prev_key = &key; ··· 418 325 419 326 qsort(data, nr_data, sizeof(*data), datacmp); 420 327 421 - if (skel->rodata->aggr_mode == SYSCALL_AGGR_THREAD) 328 + switch (skel->rodata->aggr_mode) { 329 + case SYSCALL_AGGR_THREAD: 422 330 printed += print_thread_stats(data, nr_data, fp); 423 - else 331 + break; 332 + case SYSCALL_AGGR_CPU: 424 333 printed += print_total_stats(data, nr_data, fp); 334 + break; 335 + case SYSCALL_AGGR_CGROUP: 336 + printed += print_cgroup_stats(data, nr_data, fp); 337 + break; 338 + default: 339 + break; 340 + } 425 341 426 342 for (i = 0; i < nr_data && data; i++) { 427 343 free(data[i]->nodes); ··· 445 343 446 344 void trace_cleanup_bpf_summary(void) 447 345 { 346 + if (!RB_EMPTY_ROOT(&cgroups)) { 347 + struct cgroup *cgrp, *tmp; 348 + 349 + rbtree_postorder_for_each_entry_safe(cgrp, tmp, &cgroups, node) 350 + cgroup__put(cgrp); 351 + 352 + cgroups = RB_ROOT; 353 + } 354 + 448 355 syscall_summary_bpf__destroy(skel); 449 356 }
+39 -4
tools/perf/util/bpf_skel/syscall_summary.bpf.c
··· 8 8 9 9 #include <bpf/bpf_helpers.h> 10 10 #include <bpf/bpf_tracing.h> 11 + #include <bpf/bpf_core_read.h> 11 12 12 13 /* This is to calculate a delta between sys-enter and sys-exit for each thread */ 13 14 struct syscall_trace { ··· 36 35 int enabled; /* controlled from userspace */ 37 36 38 37 const volatile enum syscall_aggr_mode aggr_mode; 38 + const volatile int use_cgroup_v2; 39 39 40 - static void update_stats(int cpu_or_tid, int nr, s64 duration, long ret) 40 + int perf_subsys_id = -1; 41 + 42 + static inline __u64 get_current_cgroup_id(void) 41 43 { 42 - struct syscall_key key = { .cpu_or_tid = cpu_or_tid, .nr = nr, }; 44 + struct task_struct *task; 45 + struct cgroup *cgrp; 46 + 47 + if (use_cgroup_v2) 48 + return bpf_get_current_cgroup_id(); 49 + 50 + task = bpf_get_current_task_btf(); 51 + 52 + if (perf_subsys_id == -1) { 53 + #if __has_builtin(__builtin_preserve_enum_value) 54 + perf_subsys_id = bpf_core_enum_value(enum cgroup_subsys_id, 55 + perf_event_cgrp_id); 56 + #else 57 + perf_subsys_id = perf_event_cgrp_id; 58 + #endif 59 + } 60 + 61 + cgrp = BPF_CORE_READ(task, cgroups, subsys[perf_subsys_id], cgroup); 62 + return BPF_CORE_READ(cgrp, kn, id); 63 + } 64 + 65 + static void update_stats(int cpu_or_tid, u64 cgroup_id, int nr, s64 duration, 66 + long ret) 67 + { 68 + struct syscall_key key = { 69 + .cpu_or_tid = cpu_or_tid, 70 + .cgroup = cgroup_id, 71 + .nr = nr, 72 + }; 43 73 struct syscall_stats *stats; 44 74 45 75 stats = bpf_map_lookup_elem(&syscall_stats_map, &key); ··· 122 90 int sys_exit(u64 *ctx) 123 91 { 124 92 int tid; 125 - int key; 93 + int key = 0; 94 + u64 cgroup = 0; 126 95 long ret = ctx[1]; /* return value of the syscall */ 127 96 struct syscall_trace *st; 128 97 s64 delta; ··· 138 105 139 106 if (aggr_mode == SYSCALL_AGGR_THREAD) 140 107 key = tid; 108 + else if (aggr_mode == SYSCALL_AGGR_CGROUP) 109 + cgroup = get_current_cgroup_id(); 141 110 else 142 111 key = bpf_get_smp_processor_id(); 143 112 144 113 delta = bpf_ktime_get_ns() - st->timestamp; 145 - update_stats(key, st->nr, delta, ret); 114 + update_stats(key, cgroup, st->nr, delta, ret); 146 115 147 116 bpf_map_delete_elem(&syscall_trace_map, &tid); 148 117 return 0;
+2
tools/perf/util/bpf_skel/syscall_summary.h
··· 6 6 enum syscall_aggr_mode { 7 7 SYSCALL_AGGR_THREAD, 8 8 SYSCALL_AGGR_CPU, 9 + SYSCALL_AGGR_CGROUP, 9 10 }; 10 11 11 12 struct syscall_key { 13 + u64 cgroup; 12 14 int cpu_or_tid; 13 15 int nr; 14 16 };
+1
tools/perf/util/trace.h
··· 8 8 SUMMARY__NONE = 0, 9 9 SUMMARY__BY_TOTAL, 10 10 SUMMARY__BY_THREAD, 11 + SUMMARY__BY_CGROUP, 11 12 }; 12 13 13 14 #ifdef HAVE_BPF_SKEL