Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf stat: Remove evlist__add_default_attrs use strings

add_default_atttributes would add evsels by having pre-created
perf_event_attr, however, this needed fixing for hybrid as the
extended PMU type was necessary for each core PMU. The logic for this
was in an arch specific x86 function and wasn't present for ARM,
meaning that default events weren't being opened on all PMUs on
ARM. Change the creation of the default events to use parse_events and
strings as that will open the events on all PMUs.

Rather than try to detect events on PMUs before parsing, parse the
event but skip its output in stat-display.

The previous order of hardware events was: cycles,
stalled-cycles-frontend, stalled-cycles-backend, instructions. As
instructions is a more fundamental concept the order is changed to:
instructions, cycles, stalled-cycles-frontend, stalled-cycles-backend.

Closes: https://lore.kernel.org/lkml/CAP-5=fVABSBZnsmtRn1uF-k-G1GWM-L5SgiinhPTfHbQsKXb_g@mail.gmail.com/
Acked-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Ian Rogers <irogers@google.com>
[Don't display unsupported default events except 'cycles']
Acked-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: James Clark <james.clark@linaro.org>
Cc: Yang Jihong <yangjihong@bytedance.com>
Cc: Dominique Martinet <asmadeus@codewreck.org>
Cc: Colin Ian King <colin.i.king@gmail.com>
Cc: Howard Chu <howardchu95@gmail.com>
Cc: Ze Gao <zegao2021@gmail.com>
Cc: Yicong Yang <yangyicong@hisilicon.com>
Cc: Weilin Wang <weilin.wang@intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Jing Zhang <renyu.zj@linux.alibaba.com>
Cc: Yang Li <yang.lee@linux.alibaba.com>
Cc: Leo Yan <leo.yan@linux.dev>
Cc: ak@linux.intel.com
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: linux-arm-kernel@lists.infradead.org
Cc: Sun Haiyong <sunhaiyong@loongson.cn>
Cc: John Garry <john.g.garry@oracle.com>
Link: https://lore.kernel.org/r/20240926144851.245903-4-james.clark@linaro.org
Signed-off-by: Namhyung Kim <namhyung@kernel.org>

authored by

Ian Rogers and committed by
Namhyung Kim
d38461e9 057f8bfc

+132 -308
+3 -71
tools/perf/arch/x86/util/evlist.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - #include <stdio.h> 3 - #include "util/pmu.h" 4 - #include "util/pmus.h" 5 - #include "util/evlist.h" 6 - #include "util/parse-events.h" 7 - #include "util/event.h" 2 + #include <string.h> 3 + #include "../../../util/evlist.h" 4 + #include "../../../util/evsel.h" 8 5 #include "topdown.h" 9 6 #include "evsel.h" 10 - 11 - static int ___evlist__add_default_attrs(struct evlist *evlist, 12 - struct perf_event_attr *attrs, 13 - size_t nr_attrs) 14 - { 15 - LIST_HEAD(head); 16 - size_t i = 0; 17 - 18 - for (i = 0; i < nr_attrs; i++) 19 - event_attr_init(attrs + i); 20 - 21 - if (perf_pmus__num_core_pmus() == 1) 22 - return evlist__add_attrs(evlist, attrs, nr_attrs); 23 - 24 - for (i = 0; i < nr_attrs; i++) { 25 - struct perf_pmu *pmu = NULL; 26 - 27 - if (attrs[i].type == PERF_TYPE_SOFTWARE) { 28 - struct evsel *evsel = evsel__new(attrs + i); 29 - 30 - if (evsel == NULL) 31 - goto out_delete_partial_list; 32 - list_add_tail(&evsel->core.node, &head); 33 - continue; 34 - } 35 - 36 - while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 37 - struct perf_cpu_map *cpus; 38 - struct evsel *evsel; 39 - 40 - evsel = evsel__new(attrs + i); 41 - if (evsel == NULL) 42 - goto out_delete_partial_list; 43 - evsel->core.attr.config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT; 44 - cpus = perf_cpu_map__get(pmu->cpus); 45 - evsel->core.cpus = cpus; 46 - evsel->core.own_cpus = perf_cpu_map__get(cpus); 47 - evsel->pmu_name = strdup(pmu->name); 48 - list_add_tail(&evsel->core.node, &head); 49 - } 50 - } 51 - 52 - evlist__splice_list_tail(evlist, &head); 53 - 54 - return 0; 55 - 56 - out_delete_partial_list: 57 - { 58 - struct evsel *evsel, *n; 59 - 60 - __evlist__for_each_entry_safe(&head, n, evsel) 61 - evsel__delete(evsel); 62 - } 63 - return -1; 64 - } 65 - 66 - int arch_evlist__add_default_attrs(struct evlist *evlist, 67 - struct perf_event_attr *attrs, 68 - size_t nr_attrs) 69 - { 70 - if (!nr_attrs) 71 - return 0; 72 - 73 - return ___evlist__add_default_attrs(evlist, attrs, nr_attrs); 74 - } 75 7 76 8 int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs) 77 9 {
+119 -180
tools/perf/builtin-stat.c
··· 1842 1842 } 1843 1843 1844 1844 /* 1845 - * Add default attributes, if there were no attributes specified or 1845 + * Add default events, if there were no attributes specified or 1846 1846 * if -d/--detailed, -d -d or -d -d -d is used: 1847 1847 */ 1848 - static int add_default_attributes(void) 1848 + static int add_default_events(void) 1849 1849 { 1850 - struct perf_event_attr default_attrs0[] = { 1851 - 1852 - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 1853 - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, 1854 - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, 1855 - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, 1856 - 1857 - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, 1858 - }; 1859 - struct perf_event_attr frontend_attrs[] = { 1860 - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, 1861 - }; 1862 - struct perf_event_attr backend_attrs[] = { 1863 - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, 1864 - }; 1865 - struct perf_event_attr default_attrs1[] = { 1866 - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, 1867 - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, 1868 - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, 1869 - 1870 - }; 1871 - 1872 - /* 1873 - * Detailed stats (-d), covering the L1 and last level data caches: 1874 - */ 1875 - struct perf_event_attr detailed_attrs[] = { 1876 - 1877 - { .type = PERF_TYPE_HW_CACHE, 1878 - .config = 1879 - PERF_COUNT_HW_CACHE_L1D << 0 | 1880 - (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1881 - (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1882 - 1883 - { .type = PERF_TYPE_HW_CACHE, 1884 - .config = 1885 - PERF_COUNT_HW_CACHE_L1D << 0 | 1886 - (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1887 - (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1888 - 1889 - { .type = PERF_TYPE_HW_CACHE, 1890 - .config = 1891 - PERF_COUNT_HW_CACHE_LL << 0 | 1892 - (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1893 - (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1894 - 1895 - { .type = PERF_TYPE_HW_CACHE, 1896 - .config = 1897 - PERF_COUNT_HW_CACHE_LL << 0 | 1898 - (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1899 - (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1900 - }; 1901 - 1902 - /* 1903 - * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: 1904 - */ 1905 - struct perf_event_attr very_detailed_attrs[] = { 1906 - 1907 - { .type = PERF_TYPE_HW_CACHE, 1908 - .config = 1909 - PERF_COUNT_HW_CACHE_L1I << 0 | 1910 - (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1911 - (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1912 - 1913 - { .type = PERF_TYPE_HW_CACHE, 1914 - .config = 1915 - PERF_COUNT_HW_CACHE_L1I << 0 | 1916 - (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1917 - (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1918 - 1919 - { .type = PERF_TYPE_HW_CACHE, 1920 - .config = 1921 - PERF_COUNT_HW_CACHE_DTLB << 0 | 1922 - (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1923 - (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1924 - 1925 - { .type = PERF_TYPE_HW_CACHE, 1926 - .config = 1927 - PERF_COUNT_HW_CACHE_DTLB << 0 | 1928 - (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1929 - (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1930 - 1931 - { .type = PERF_TYPE_HW_CACHE, 1932 - .config = 1933 - PERF_COUNT_HW_CACHE_ITLB << 0 | 1934 - (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1935 - (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1936 - 1937 - { .type = PERF_TYPE_HW_CACHE, 1938 - .config = 1939 - PERF_COUNT_HW_CACHE_ITLB << 0 | 1940 - (PERF_COUNT_HW_CACHE_OP_READ << 8) | 1941 - (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1942 - 1943 - }; 1944 - 1945 - /* 1946 - * Very, very detailed stats (-d -d -d), adding prefetch events: 1947 - */ 1948 - struct perf_event_attr very_very_detailed_attrs[] = { 1949 - 1950 - { .type = PERF_TYPE_HW_CACHE, 1951 - .config = 1952 - PERF_COUNT_HW_CACHE_L1D << 0 | 1953 - (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1954 - (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, 1955 - 1956 - { .type = PERF_TYPE_HW_CACHE, 1957 - .config = 1958 - PERF_COUNT_HW_CACHE_L1D << 0 | 1959 - (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 1960 - (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, 1961 - }; 1962 - 1963 - struct perf_event_attr default_null_attrs[] = {}; 1964 1850 const char *pmu = parse_events_option_args.pmu_filter ?: "all"; 1851 + struct parse_events_error err; 1852 + struct evlist *evlist = evlist__new(); 1853 + struct evsel *evsel; 1854 + int ret = 0; 1855 + 1856 + if (!evlist) 1857 + return -ENOMEM; 1858 + 1859 + parse_events_error__init(&err); 1965 1860 1966 1861 /* Set attrs if no event is selected and !null_run: */ 1967 1862 if (stat_config.null_run) 1968 - return 0; 1863 + goto out; 1969 1864 1970 1865 if (transaction_run) { 1971 1866 /* Handle -T as -M transaction. Once platform specific metrics ··· 1870 1975 */ 1871 1976 if (!metricgroup__has_metric(pmu, "transaction")) { 1872 1977 pr_err("Missing transaction metrics\n"); 1873 - return -1; 1978 + ret = -1; 1979 + goto out; 1874 1980 } 1875 - return metricgroup__parse_groups(evsel_list, pmu, "transaction", 1981 + ret = metricgroup__parse_groups(evlist, pmu, "transaction", 1876 1982 stat_config.metric_no_group, 1877 1983 stat_config.metric_no_merge, 1878 1984 stat_config.metric_no_threshold, ··· 1881 1985 stat_config.system_wide, 1882 1986 stat_config.hardware_aware_grouping, 1883 1987 &stat_config.metric_events); 1988 + goto out; 1884 1989 } 1885 1990 1886 1991 if (smi_cost) { ··· 1889 1992 1890 1993 if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) { 1891 1994 pr_err("freeze_on_smi is not supported.\n"); 1892 - return -1; 1995 + ret = -1; 1996 + goto out; 1893 1997 } 1894 1998 1895 1999 if (!smi) { 1896 2000 if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) { 1897 - fprintf(stderr, "Failed to set freeze_on_smi.\n"); 1898 - return -1; 2001 + pr_err("Failed to set freeze_on_smi.\n"); 2002 + ret = -1; 2003 + goto out; 1899 2004 } 1900 2005 smi_reset = true; 1901 2006 } 1902 2007 1903 2008 if (!metricgroup__has_metric(pmu, "smi")) { 1904 2009 pr_err("Missing smi metrics\n"); 1905 - return -1; 2010 + ret = -1; 2011 + goto out; 1906 2012 } 1907 2013 1908 2014 if (!force_metric_only) 1909 2015 stat_config.metric_only = true; 1910 2016 1911 - return metricgroup__parse_groups(evsel_list, pmu, "smi", 2017 + ret = metricgroup__parse_groups(evlist, pmu, "smi", 1912 2018 stat_config.metric_no_group, 1913 2019 stat_config.metric_no_merge, 1914 2020 stat_config.metric_no_threshold, ··· 1919 2019 stat_config.system_wide, 1920 2020 stat_config.hardware_aware_grouping, 1921 2021 &stat_config.metric_events); 2022 + goto out; 1922 2023 } 1923 2024 1924 2025 if (topdown_run) { ··· 1932 2031 if (!max_level) { 1933 2032 pr_err("Topdown requested but the topdown metric groups aren't present.\n" 1934 2033 "(See perf list the metric groups have names like TopdownL1)\n"); 1935 - return -1; 2034 + ret = -1; 2035 + goto out; 1936 2036 } 1937 2037 if (stat_config.topdown_level > max_level) { 1938 2038 pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level); 1939 - return -1; 1940 - } else if (!stat_config.topdown_level) 2039 + ret = -1; 2040 + goto out; 2041 + } else if (!stat_config.topdown_level) { 1941 2042 stat_config.topdown_level = 1; 1942 - 2043 + } 1943 2044 if (!stat_config.interval && !stat_config.metric_only) { 1944 2045 fprintf(stat_config.output, 1945 2046 "Topdown accuracy may decrease when measuring long periods.\n" 1946 2047 "Please print the result regularly, e.g. -I1000\n"); 1947 2048 } 1948 2049 str[8] = stat_config.topdown_level + '0'; 1949 - if (metricgroup__parse_groups(evsel_list, 2050 + if (metricgroup__parse_groups(evlist, 1950 2051 pmu, str, 1951 2052 /*metric_no_group=*/false, 1952 2053 /*metric_no_merge=*/false, ··· 1956 2053 stat_config.user_requested_cpu_list, 1957 2054 stat_config.system_wide, 1958 2055 stat_config.hardware_aware_grouping, 1959 - &stat_config.metric_events) < 0) 1960 - return -1; 2056 + &stat_config.metric_events) < 0) { 2057 + ret = -1; 2058 + goto out; 2059 + } 1961 2060 } 1962 2061 1963 2062 if (!stat_config.topdown_level) 1964 2063 stat_config.topdown_level = 1; 1965 2064 1966 - if (!evsel_list->core.nr_entries) { 2065 + if (!evlist->core.nr_entries && !evsel_list->core.nr_entries) { 1967 2066 /* No events so add defaults. */ 1968 2067 if (target__has_cpu(&target)) 1969 - default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; 2068 + ret = parse_events(evlist, "cpu-clock", &err); 2069 + else 2070 + ret = parse_events(evlist, "task-clock", &err); 2071 + if (ret) 2072 + goto out; 1970 2073 1971 - if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0) 1972 - return -1; 1973 - if (perf_pmus__have_event("cpu", "stalled-cycles-frontend")) { 1974 - if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0) 1975 - return -1; 1976 - } 1977 - if (perf_pmus__have_event("cpu", "stalled-cycles-backend")) { 1978 - if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0) 1979 - return -1; 1980 - } 1981 - if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0) 1982 - return -1; 2074 + ret = parse_events(evlist, 2075 + "context-switches," 2076 + "cpu-migrations," 2077 + "page-faults," 2078 + "instructions," 2079 + "cycles," 2080 + "stalled-cycles-frontend," 2081 + "stalled-cycles-backend," 2082 + "branches," 2083 + "branch-misses", 2084 + &err); 2085 + if (ret) 2086 + goto out; 2087 + 1983 2088 /* 1984 2089 * Add TopdownL1 metrics if they exist. To minimize 1985 2090 * multiplexing, don't request threshold computation. 1986 2091 */ 1987 2092 if (metricgroup__has_metric(pmu, "Default")) { 1988 2093 struct evlist *metric_evlist = evlist__new(); 1989 - struct evsel *metric_evsel; 1990 2094 1991 - if (!metric_evlist) 1992 - return -1; 1993 - 2095 + if (!metric_evlist) { 2096 + ret = -ENOMEM; 2097 + goto out; 2098 + } 1994 2099 if (metricgroup__parse_groups(metric_evlist, pmu, "Default", 1995 2100 /*metric_no_group=*/false, 1996 2101 /*metric_no_merge=*/false, ··· 2006 2095 stat_config.user_requested_cpu_list, 2007 2096 stat_config.system_wide, 2008 2097 stat_config.hardware_aware_grouping, 2009 - &stat_config.metric_events) < 0) 2010 - return -1; 2011 - 2012 - evlist__for_each_entry(metric_evlist, metric_evsel) { 2013 - metric_evsel->skippable = true; 2014 - metric_evsel->default_metricgroup = true; 2098 + &stat_config.metric_events) < 0) { 2099 + ret = -1; 2100 + goto out; 2015 2101 } 2016 - evlist__splice_list_tail(evsel_list, &metric_evlist->core.entries); 2102 + 2103 + evlist__for_each_entry(metric_evlist, evsel) 2104 + evsel->default_metricgroup = true; 2105 + 2106 + evlist__splice_list_tail(evlist, &metric_evlist->core.entries); 2017 2107 evlist__delete(metric_evlist); 2018 2108 } 2019 - 2020 - /* Platform specific attrs */ 2021 - if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0) 2022 - return -1; 2023 2109 } 2024 2110 2025 2111 /* Detailed events get appended to the event list: */ 2026 2112 2027 - if (detailed_run < 1) 2028 - return 0; 2029 - 2030 - /* Append detailed run extra attributes: */ 2031 - if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) 2032 - return -1; 2033 - 2034 - if (detailed_run < 2) 2035 - return 0; 2036 - 2037 - /* Append very detailed run extra attributes: */ 2038 - if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) 2039 - return -1; 2040 - 2041 - if (detailed_run < 3) 2042 - return 0; 2043 - 2044 - /* Append very, very detailed run extra attributes: */ 2045 - return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); 2113 + if (!ret && detailed_run >= 1) { 2114 + /* 2115 + * Detailed stats (-d), covering the L1 and last level data 2116 + * caches: 2117 + */ 2118 + ret = parse_events(evlist, 2119 + "L1-dcache-loads," 2120 + "L1-dcache-load-misses," 2121 + "LLC-loads," 2122 + "LLC-load-misses", 2123 + &err); 2124 + } 2125 + if (!ret && detailed_run >= 2) { 2126 + /* 2127 + * Very detailed stats (-d -d), covering the instruction cache 2128 + * and the TLB caches: 2129 + */ 2130 + ret = parse_events(evlist, 2131 + "L1-icache-loads," 2132 + "L1-icache-load-misses," 2133 + "dTLB-loads," 2134 + "dTLB-load-misses," 2135 + "iTLB-loads," 2136 + "iTLB-load-misses", 2137 + &err); 2138 + } 2139 + if (!ret && detailed_run >= 3) { 2140 + /* 2141 + * Very, very detailed stats (-d -d -d), adding prefetch events: 2142 + */ 2143 + ret = parse_events(evlist, 2144 + "L1-dcache-prefetches," 2145 + "L1-dcache-prefetch-misses", 2146 + &err); 2147 + } 2148 + out: 2149 + if (!ret) { 2150 + evlist__for_each_entry(evlist, evsel) { 2151 + /* 2152 + * Make at least one event non-skippable so fatal errors are visible. 2153 + * 'cycles' always used to be default and non-skippable, so use that. 2154 + */ 2155 + if (strcmp("cycles", evsel__name(evsel))) 2156 + evsel->skippable = true; 2157 + } 2158 + } 2159 + parse_events_error__exit(&err); 2160 + evlist__splice_list_tail(evsel_list, &evlist->core.entries); 2161 + evlist__delete(evlist); 2162 + return ret; 2046 2163 } 2047 2164 2048 2165 static const char * const stat_record_usage[] = { ··· 2727 2788 } 2728 2789 } 2729 2790 2730 - if (add_default_attributes()) 2791 + if (add_default_events()) 2731 2792 goto out; 2732 2793 2733 2794 if (stat_config.cgroup_list) {
-43
tools/perf/util/evlist.c
··· 320 320 } 321 321 #endif 322 322 323 - int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs) 324 - { 325 - struct evsel *evsel, *n; 326 - LIST_HEAD(head); 327 - size_t i; 328 - 329 - for (i = 0; i < nr_attrs; i++) { 330 - evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i); 331 - if (evsel == NULL) 332 - goto out_delete_partial_list; 333 - list_add_tail(&evsel->core.node, &head); 334 - } 335 - 336 - evlist__splice_list_tail(evlist, &head); 337 - 338 - return 0; 339 - 340 - out_delete_partial_list: 341 - __evlist__for_each_entry_safe(&head, n, evsel) 342 - evsel__delete(evsel); 343 - return -1; 344 - } 345 - 346 - int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs) 347 - { 348 - size_t i; 349 - 350 - for (i = 0; i < nr_attrs; i++) 351 - event_attr_init(attrs + i); 352 - 353 - return evlist__add_attrs(evlist, attrs, nr_attrs); 354 - } 355 - 356 - __weak int arch_evlist__add_default_attrs(struct evlist *evlist, 357 - struct perf_event_attr *attrs, 358 - size_t nr_attrs) 359 - { 360 - if (!nr_attrs) 361 - return 0; 362 - 363 - return __evlist__add_default_attrs(evlist, attrs, nr_attrs); 364 - } 365 - 366 323 struct evsel *evlist__find_tracepoint_by_id(struct evlist *evlist, int id) 367 324 { 368 325 struct evsel *evsel;
-12
tools/perf/util/evlist.h
··· 102 102 void evlist__add(struct evlist *evlist, struct evsel *entry); 103 103 void evlist__remove(struct evlist *evlist, struct evsel *evsel); 104 104 105 - int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs); 106 - 107 - int __evlist__add_default_attrs(struct evlist *evlist, 108 - struct perf_event_attr *attrs, size_t nr_attrs); 109 - 110 - int arch_evlist__add_default_attrs(struct evlist *evlist, 111 - struct perf_event_attr *attrs, 112 - size_t nr_attrs); 113 - 114 - #define evlist__add_default_attrs(evlist, array) \ 115 - arch_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array)) 116 - 117 105 int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs); 118 106 119 107 int evlist__add_dummy(struct evlist *evlist);
+8
tools/perf/util/stat-display.c
··· 7 7 #include <perf/cpumap.h> 8 8 #include "color.h" 9 9 #include "counts.h" 10 + #include "debug.h" 10 11 #include "evlist.h" 11 12 #include "evsel.h" 12 13 #include "stat.h" ··· 967 966 { 968 967 struct perf_cpu cpu; 969 968 int idx; 969 + 970 + /* 971 + * Skip unsupported default events when not verbose. (default events 972 + * are all marked 'skippable'). 973 + */ 974 + if (verbose == 0 && counter->skippable && !counter->supported) 975 + return true; 970 976 971 977 /* 972 978 * Skip value 0 when enabling --per-thread globally,
+2 -2
tools/perf/util/stat-shadow.c
··· 76 76 memset(&ru_stats, 0, sizeof(ru_stats)); 77 77 } 78 78 79 - static enum stat_type evsel__stat_type(const struct evsel *evsel) 79 + static enum stat_type evsel__stat_type(struct evsel *evsel) 80 80 { 81 81 /* Fake perf_hw_cache_op_id values for use with evsel__match. */ 82 82 u64 PERF_COUNT_hw_cache_l1d_miss = PERF_COUNT_HW_CACHE_L1D | ··· 152 152 153 153 static double find_stat(const struct evsel *evsel, int aggr_idx, enum stat_type type) 154 154 { 155 - const struct evsel *cur; 155 + struct evsel *cur; 156 156 int evsel_ctx = evsel_context(evsel); 157 157 158 158 evlist__for_each_entry(evsel->evlist, cur) {