Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf pmus: Remove perf_pmus__has_hybrid

perf_pmus__has_hybrid was used to detect when there was >1 core PMU,
this can be achieved with perf_pmus__num_core_pmus that doesn't depend
upon is_pmu_hybrid and PMU name comparisons. When modifying the
function calls take the opportunity to improve comments,
enable/simplify tests that were previously failing for hybrid but now
pass and to simplify generic code.

Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ali Saidi <alisaidi@amazon.com>
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Dmitrii Dolgov <9erthalion6@gmail.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jing Zhang <renyu.zj@linux.alibaba.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kang Minchul <tegongkang@gmail.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Ming Wang <wangming01@loongson.cn>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Link: https://lore.kernel.org/r/20230527072210.2900565-34-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

authored by

Ian Rogers and committed by
Arnaldo Carvalho de Melo
94f9eb95 002c4845

+31 -80
+1 -1
tools/perf/arch/x86/tests/hybrid.c
··· 281 281 282 282 int test__hybrid(struct test_suite *test __maybe_unused, int subtest __maybe_unused) 283 283 { 284 - if (!perf_pmus__has_hybrid()) 284 + if (perf_pmus__num_core_pmus() == 1) 285 285 return TEST_SKIP; 286 286 287 287 return test_events(test__hybrid_events, ARRAY_SIZE(test__hybrid_events));
+1 -1
tools/perf/arch/x86/util/evlist.c
··· 18 18 for (i = 0; i < nr_attrs; i++) 19 19 event_attr_init(attrs + i); 20 20 21 - if (!perf_pmus__has_hybrid()) 21 + if (perf_pmus__num_core_pmus() == 1) 22 22 return evlist__add_attrs(evlist, attrs, nr_attrs); 23 23 24 24 for (i = 0; i < nr_attrs; i++) {
+1 -1
tools/perf/arch/x86/util/perf_regs.c
··· 292 292 */ 293 293 attr.sample_period = 1; 294 294 295 - if (perf_pmus__has_hybrid()) { 295 + if (perf_pmus__num_core_pmus() > 1) { 296 296 struct perf_pmu *pmu = NULL; 297 297 __u64 type = PERF_TYPE_RAW; 298 298
+2 -2
tools/perf/builtin-record.c
··· 1294 1294 * of waiting or event synthesis. 1295 1295 */ 1296 1296 if (opts->target.initial_delay || target__has_cpu(&opts->target) || 1297 - perf_pmus__has_hybrid()) { 1297 + perf_pmus__num_core_pmus() > 1) { 1298 1298 pos = evlist__get_tracking_event(evlist); 1299 1299 if (!evsel__is_dummy_event(pos)) { 1300 1300 /* Set up dummy event. */ ··· 2193 2193 char *new_name; 2194 2194 int ret; 2195 2195 2196 - if (!perf_pmus__has_hybrid()) 2196 + if (perf_pmus__num_core_pmus() == 1) 2197 2197 return; 2198 2198 2199 2199 evlist__for_each_entry(evlist, pos) {
+8 -1
tools/perf/tests/attr.c
··· 185 185 char path_dir[PATH_MAX]; 186 186 char *exec_path; 187 187 188 - if (perf_pmus__has_hybrid()) 188 + if (perf_pmus__num_core_pmus() > 1) { 189 + /* 190 + * TODO: Attribute tests hard code the PMU type. If there are >1 191 + * core PMU then each PMU will have a different type whic 192 + * requires additional support. 193 + */ 194 + pr_debug("Skip test on hybrid systems"); 189 195 return TEST_SKIP; 196 + } 190 197 191 198 /* First try development tree tests. */ 192 199 if (!lstat("./tests", &st))
+2 -5
tools/perf/tests/parse-metric.c
··· 302 302 TEST_ASSERT_VAL("DCache_L2 failed", test_dcache_l2() == 0); 303 303 TEST_ASSERT_VAL("recursion fail failed", test_recursion_fail() == 0); 304 304 TEST_ASSERT_VAL("Memory bandwidth", test_memory_bandwidth() == 0); 305 - 306 - if (!perf_pmus__has_hybrid()) { 307 - TEST_ASSERT_VAL("cache_miss_cycles failed", test_cache_miss_cycles() == 0); 308 - TEST_ASSERT_VAL("test metric group", test_metric_group() == 0); 309 - } 305 + TEST_ASSERT_VAL("cache_miss_cycles failed", test_cache_miss_cycles() == 0); 306 + TEST_ASSERT_VAL("test metric group", test_metric_group() == 0); 310 307 return 0; 311 308 } 312 309
+1 -11
tools/perf/tests/switch-tracking.c
··· 375 375 cpu_clocks_evsel = evlist__last(evlist); 376 376 377 377 /* Second event */ 378 - if (perf_pmus__has_hybrid()) { 379 - cycles = "cpu_core/cycles/u"; 380 - err = parse_event(evlist, cycles); 381 - if (err) { 382 - cycles = "cpu_atom/cycles/u"; 383 - pr_debug("Trying %s\n", cycles); 384 - err = parse_event(evlist, cycles); 385 - } 386 - } else { 387 - err = parse_event(evlist, cycles); 388 - } 378 + err = parse_event(evlist, cycles); 389 379 if (err) { 390 380 pr_debug("Failed to parse event %s\n", cycles); 391 381 goto out_err;
+2 -12
tools/perf/tests/topology.c
··· 41 41 session = perf_session__new(&data, NULL); 42 42 TEST_ASSERT_VAL("can't get session", !IS_ERR(session)); 43 43 44 - if (!perf_pmus__has_hybrid()) { 45 - session->evlist = evlist__new_default(); 46 - TEST_ASSERT_VAL("can't get evlist", session->evlist); 47 - } else { 48 - struct parse_events_error err; 49 - 50 - session->evlist = evlist__new(); 51 - TEST_ASSERT_VAL("can't get evlist", session->evlist); 52 - parse_events_error__init(&err); 53 - parse_events(session->evlist, "cpu_core/cycles/", &err); 54 - parse_events_error__exit(&err); 55 - } 44 + session->evlist = evlist__new_default(); 45 + TEST_ASSERT_VAL("can't get evlist", session->evlist); 56 46 57 47 perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY); 58 48 perf_header__set_feat(&session->header, HEADER_NRCPUS);
+2 -8
tools/perf/util/cputopo.c
··· 472 472 { 473 473 struct perf_pmu *pmu = NULL; 474 474 struct hybrid_topology *tp = NULL; 475 - u32 nr = 0, i = 0; 475 + int nr = perf_pmus__num_core_pmus(), i = 0; 476 476 477 - if (!perf_pmus__has_hybrid()) 478 - return NULL; 479 - 480 - while ((pmu = perf_pmus__scan_core(pmu)) != NULL) 481 - nr++; 482 - 483 - if (nr == 0) 477 + if (nr <= 1) 484 478 return NULL; 485 479 486 480 tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0]) * nr);
+1 -1
tools/perf/util/evsel.c
··· 3140 3140 */ 3141 3141 bool evsel__is_hybrid(const struct evsel *evsel) 3142 3142 { 3143 - if (!perf_pmus__has_hybrid()) 3143 + if (perf_pmus__num_core_pmus() == 1) 3144 3144 return false; 3145 3145 3146 3146 return evsel->core.is_pmu_core;
+1 -1
tools/perf/util/header.c
··· 1605 1605 * Write hybrid pmu caps first to maintain compatibility with 1606 1606 * older perf tool. 1607 1607 */ 1608 - if (perf_pmus__has_hybrid()) { 1608 + if (perf_pmus__num_core_pmus() > 1) { 1609 1609 pmu = NULL; 1610 1610 while ((pmu = perf_pmus__scan_core(pmu))) { 1611 1611 ret = __write_pmu_caps(ff, pmu, true);
+5 -13
tools/perf/util/mem-events.c
··· 121 121 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) { 122 122 struct perf_mem_event *e = perf_mem_events__ptr(j); 123 123 char sysfs_name[100]; 124 + struct perf_pmu *pmu = NULL; 124 125 125 126 /* 126 127 * If the event entry isn't valid, skip initialization ··· 130 129 if (!e->tag) 131 130 continue; 132 131 133 - if (!perf_pmus__has_hybrid()) { 134 - scnprintf(sysfs_name, sizeof(sysfs_name), 135 - e->sysfs_name, "cpu"); 136 - e->supported = perf_mem_event__supported(mnt, sysfs_name); 137 - } else { 138 - struct perf_pmu *pmu = NULL; 139 - 140 - while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 141 - scnprintf(sysfs_name, sizeof(sysfs_name), 142 - e->sysfs_name, pmu->name); 143 - e->supported |= perf_mem_event__supported(mnt, sysfs_name); 144 - } 132 + while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 133 + scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name, pmu->name); 134 + e->supported |= perf_mem_event__supported(mnt, sysfs_name); 145 135 } 146 136 147 137 if (e->supported) ··· 188 196 if (!e->record) 189 197 continue; 190 198 191 - if (!perf_pmus__has_hybrid()) { 199 + if (perf_pmus__num_core_pmus() == 1) { 192 200 if (!e->supported) { 193 201 pr_err("failed: event '%s' not supported\n", 194 202 perf_mem_events__name(j, NULL));
+1 -1
tools/perf/util/metricgroup.c
··· 274 274 const char *metric_id; 275 275 struct evsel *ev; 276 276 size_t ids_size, matched_events, i; 277 - bool all_pmus = !strcmp(pmu, "all") || !perf_pmus__has_hybrid() || !is_pmu_hybrid(pmu); 277 + bool all_pmus = !strcmp(pmu, "all") || perf_pmus__num_core_pmus() == 1 || !is_pmu_core(pmu); 278 278 279 279 *out_metric_events = NULL; 280 280 ids_size = hashmap__size(ids);
-18
tools/perf/util/pmus.c
··· 464 464 return pmu && perf_pmu__have_event(pmu, name); 465 465 } 466 466 467 - bool perf_pmus__has_hybrid(void) 468 - { 469 - static bool hybrid_scanned, has_hybrid; 470 - 471 - if (!hybrid_scanned) { 472 - struct perf_pmu *pmu = NULL; 473 - 474 - while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 475 - if (is_pmu_hybrid(pmu->name)) { 476 - has_hybrid = true; 477 - break; 478 - } 479 - } 480 - hybrid_scanned = true; 481 - } 482 - return has_hybrid; 483 - } 484 - 485 467 int perf_pmus__num_core_pmus(void) 486 468 { 487 469 static int count;
-1
tools/perf/util/pmus.h
··· 18 18 int perf_pmus__num_mem_pmus(void); 19 19 void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *print_state); 20 20 bool perf_pmus__have_event(const char *pname, const char *name); 21 - bool perf_pmus__has_hybrid(void); 22 21 int perf_pmus__num_core_pmus(void); 23 22 24 23 #endif /* __PMUS_H */
+2 -2
tools/perf/util/python.c
··· 103 103 return EOF; 104 104 } 105 105 106 - bool perf_pmus__has_hybrid(void) 106 + int perf_pmus__num_core_pmus(void) 107 107 { 108 - return false; 108 + return 1; 109 109 } 110 110 111 111 bool evsel__is_aux_event(const struct evsel *evsel __maybe_unused)
+1 -1
tools/perf/util/stat-display.c
··· 696 696 { 697 697 struct evsel *evsel; 698 698 699 - if (!perf_pmus__has_hybrid()) 699 + if (perf_pmus__num_core_pmus() == 1) 700 700 return false; 701 701 702 702 evlist__for_each_entry(evlist, evsel) {