Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf stat: Skip evlist__[enable|disable] when all events uses BPF

When all events of a perf-stat session use BPF, it is not necessary to
call evlist__enable() and evlist__disable(). Skip them when
all_counters_use_bpf is true.

Signed-off-by: Song Liu <song@kernel.org>
Reported-by: Jiri Olsa <jolsa@redhat.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

authored by

Song Liu and committed by
Arnaldo Carvalho de Melo
f8b61bd2 f42907e8

+10 -6
+10 -3
tools/perf/builtin-stat.c
··· 572 572 * - we have initial delay configured 573 573 */ 574 574 if (!target__none(&target) || stat_config.initial_delay) { 575 - evlist__enable(evsel_list); 575 + if (!all_counters_use_bpf) 576 + evlist__enable(evsel_list); 576 577 if (stat_config.initial_delay > 0) 577 578 pr_info(EVLIST_ENABLED_MSG); 578 579 } ··· 582 581 583 582 static void disable_counters(void) 584 583 { 584 + struct evsel *counter; 585 + 585 586 /* 586 587 * If we don't have tracee (attaching to task or cpu), counters may 587 588 * still be running. To get accurate group ratios, we must stop groups 588 589 * from counting before reading their constituent counters. 589 590 */ 590 - if (!target__none(&target)) 591 - evlist__disable(evsel_list); 591 + if (!target__none(&target)) { 592 + evlist__for_each_entry(evsel_list, counter) 593 + bpf_counter__disable(counter); 594 + if (!all_counters_use_bpf) 595 + evlist__disable(evsel_list); 596 + } 592 597 } 593 598 594 599 static volatile int workload_exec_errno;
-3
tools/perf/util/evlist.c
··· 425 425 if (affinity__setup(&affinity) < 0) 426 426 return; 427 427 428 - evlist__for_each_entry(evlist, pos) 429 - bpf_counter__disable(pos); 430 - 431 428 /* Disable 'immediate' events last */ 432 429 for (imm = 0; imm <= 1; imm++) { 433 430 evlist__for_each_cpu(evlist, i, cpu) {