Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf stat: Move create_perf_stat_counter() to builtin-stat.c

The function create_perf_stat_counter is only used in builtin-stat.c
and contains logic about retrying events specific to
builtin-stat.c.

Move the code to builtin-stat.c to tidy this up.

Reviewed-by: James Clark <james.clark@linaro.org>
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Chun-Tse Shao <ctshao@google.com>
Cc: Dapeng Mi <dapeng1.mi@linux.intel.com>
Cc: Howard Chu <howardchu95@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Falcon <thomas.falcon@intel.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

authored by

Ian Rogers and committed by
Arnaldo Carvalho de Melo
6026ab65 79cc9b4b

+58 -62
+58 -2
tools/perf/builtin-stat.c
··· 676 676 return COUNTER_FATAL; 677 677 } 678 678 679 + static int create_perf_stat_counter(struct evsel *evsel, 680 + struct perf_stat_config *config, 681 + int cpu_map_idx) 682 + { 683 + struct perf_event_attr *attr = &evsel->core.attr; 684 + struct evsel *leader = evsel__leader(evsel); 685 + 686 + /* Reset supported flag as creating a stat counter is retried. */ 687 + attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 688 + PERF_FORMAT_TOTAL_TIME_RUNNING; 689 + 690 + /* 691 + * The event is part of non trivial group, let's enable 692 + * the group read (for leader) and ID retrieval for all 693 + * members. 694 + */ 695 + if (leader->core.nr_members > 1) 696 + attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP; 697 + 698 + attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list); 699 + 700 + /* 701 + * Some events get initialized with sample_(period/type) set, 702 + * like tracepoints. Clear it up for counting. 703 + */ 704 + attr->sample_period = 0; 705 + 706 + if (config->identifier) 707 + attr->sample_type = PERF_SAMPLE_IDENTIFIER; 708 + 709 + if (config->all_user) { 710 + attr->exclude_kernel = 1; 711 + attr->exclude_user = 0; 712 + } 713 + 714 + if (config->all_kernel) { 715 + attr->exclude_kernel = 0; 716 + attr->exclude_user = 1; 717 + } 718 + 719 + /* 720 + * Disabling all counters initially, they will be enabled 721 + * either manually by us or by kernel via enable_on_exec 722 + * set later. 723 + */ 724 + if (evsel__is_group_leader(evsel)) { 725 + attr->disabled = 1; 726 + 727 + if (target__enable_on_exec(&target)) 728 + attr->enable_on_exec = 1; 729 + } 730 + 731 + return evsel__open_per_cpu_and_thread(evsel, evsel__cpus(evsel), cpu_map_idx, 732 + evsel->core.threads); 733 + } 734 + 679 735 static int __run_perf_stat(int argc, const char **argv, int run_idx) 680 736 { 681 737 int interval = stat_config.interval; ··· 792 736 if (evsel__is_bperf(counter)) 793 737 continue; 794 738 try_again: 795 - if (create_perf_stat_counter(counter, &stat_config, &target, 739 + if (create_perf_stat_counter(counter, &stat_config, 796 740 evlist_cpu_itr.cpu_map_idx) < 0) { 797 741 798 742 /* ··· 850 794 continue; 851 795 try_again_reset: 852 796 pr_debug2("reopening weak %s\n", evsel__name(counter)); 853 - if (create_perf_stat_counter(counter, &stat_config, &target, 797 + if (create_perf_stat_counter(counter, &stat_config, 854 798 evlist_cpu_itr.cpu_map_idx) < 0) { 855 799 856 800 switch (stat_handle_error(counter, errno)) {
-56
tools/perf/util/stat.c
··· 716 716 717 717 return ret; 718 718 } 719 - 720 - int create_perf_stat_counter(struct evsel *evsel, 721 - struct perf_stat_config *config, 722 - struct target *target, 723 - int cpu_map_idx) 724 - { 725 - struct perf_event_attr *attr = &evsel->core.attr; 726 - struct evsel *leader = evsel__leader(evsel); 727 - 728 - attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 729 - PERF_FORMAT_TOTAL_TIME_RUNNING; 730 - 731 - /* 732 - * The event is part of non trivial group, let's enable 733 - * the group read (for leader) and ID retrieval for all 734 - * members. 735 - */ 736 - if (leader->core.nr_members > 1) 737 - attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP; 738 - 739 - attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list); 740 - 741 - /* 742 - * Some events get initialized with sample_(period/type) set, 743 - * like tracepoints. Clear it up for counting. 744 - */ 745 - attr->sample_period = 0; 746 - 747 - if (config->identifier) 748 - attr->sample_type = PERF_SAMPLE_IDENTIFIER; 749 - 750 - if (config->all_user) { 751 - attr->exclude_kernel = 1; 752 - attr->exclude_user = 0; 753 - } 754 - 755 - if (config->all_kernel) { 756 - attr->exclude_kernel = 0; 757 - attr->exclude_user = 1; 758 - } 759 - 760 - /* 761 - * Disabling all counters initially, they will be enabled 762 - * either manually by us or by kernel via enable_on_exec 763 - * set later. 764 - */ 765 - if (evsel__is_group_leader(evsel)) { 766 - attr->disabled = 1; 767 - 768 - if (target__enable_on_exec(target)) 769 - attr->enable_on_exec = 1; 770 - } 771 - 772 - return evsel__open_per_cpu_and_thread(evsel, evsel__cpus(evsel), cpu_map_idx, 773 - evsel->core.threads); 774 - }
-4
tools/perf/util/stat.h
··· 223 223 size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp); 224 224 size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp); 225 225 226 - int create_perf_stat_counter(struct evsel *evsel, 227 - struct perf_stat_config *config, 228 - struct target *target, 229 - int cpu_map_idx); 230 226 void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *config, 231 227 struct target *_target, struct timespec *ts, int argc, const char **argv); 232 228