Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf parse-events: Warn if a cpu term is unsupported by a CPU

Factor requested CPU warning out of evlist and into evsel. At the end
of adding an event, perform the warning check. To avoid repeatedly
testing if the cpu_list is empty, add a local variable.

```
$ perf stat -e cpu_atom/cycles,cpu=1/ -a true
WARNING: A requested CPU in '1' is not supported by PMU 'cpu_atom' (CPUs 16-27) for event 'cpu_atom/cycles/'

Performance counter stats for 'system wide':

<not supported> cpu_atom/cycles/

0.000781511 seconds time elapsed
```

Reviewed-by: Thomas Falcon <thomas.falcon@intel.com>
Signed-off-by: Ian Rogers <irogers@google.com>
Tested-by: James Clark <james.clark@linaro.org>
Link: https://lore.kernel.org/r/20250719030517.1990983-2-irogers@google.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>

authored by

Ian Rogers and committed by
Namhyung Kim
62f45122 12d30725

+35 -18
+1 -14
tools/perf/util/evlist.c
··· 2549 2549 return; 2550 2550 2551 2551 evlist__for_each_entry(evlist, pos) { 2552 - struct perf_cpu_map *intersect, *to_test, *online = cpu_map__online(); 2553 - const struct perf_pmu *pmu = evsel__find_pmu(pos); 2554 - 2555 - to_test = pmu && pmu->is_core ? pmu->cpus : online; 2556 - intersect = perf_cpu_map__intersect(to_test, user_requested_cpus); 2557 - if (!perf_cpu_map__equal(intersect, user_requested_cpus)) { 2558 - char buf[128]; 2559 - 2560 - cpu_map__snprint(to_test, buf, sizeof(buf)); 2561 - pr_warning("WARNING: A requested CPU in '%s' is not supported by PMU '%s' (CPUs %s) for event '%s'\n", 2562 - cpu_list, pmu ? pmu->name : "cpu", buf, evsel__name(pos)); 2563 - } 2564 - perf_cpu_map__put(intersect); 2565 - perf_cpu_map__put(online); 2552 + evsel__warn_user_requested_cpus(pos, user_requested_cpus); 2566 2553 } 2567 2554 perf_cpu_map__put(user_requested_cpus); 2568 2555 }
+24
tools/perf/util/evsel.c
··· 4091 4091 counter->uniquified_name = false; 4092 4092 } 4093 4093 } 4094 + 4095 + void evsel__warn_user_requested_cpus(struct evsel *evsel, struct perf_cpu_map *user_requested_cpus) 4096 + { 4097 + struct perf_cpu_map *intersect, *online = NULL; 4098 + const struct perf_pmu *pmu = evsel__find_pmu(evsel); 4099 + 4100 + if (pmu && pmu->is_core) { 4101 + intersect = perf_cpu_map__intersect(pmu->cpus, user_requested_cpus); 4102 + } else { 4103 + online = cpu_map__online(); 4104 + intersect = perf_cpu_map__intersect(online, user_requested_cpus); 4105 + } 4106 + if (!perf_cpu_map__equal(intersect, user_requested_cpus)) { 4107 + char buf1[128]; 4108 + char buf2[128]; 4109 + 4110 + cpu_map__snprint(user_requested_cpus, buf1, sizeof(buf1)); 4111 + cpu_map__snprint(online ?: pmu->cpus, buf2, sizeof(buf2)); 4112 + pr_warning("WARNING: A requested CPU in '%s' is not supported by PMU '%s' (CPUs %s) for event '%s'\n", 4113 + buf1, pmu ? pmu->name : "cpu", buf2, evsel__name(evsel)); 4114 + } 4115 + perf_cpu_map__put(intersect); 4116 + perf_cpu_map__put(online); 4117 + }
+2
tools/perf/util/evsel.h
··· 574 574 575 575 bool evsel__is_offcpu_event(struct evsel *evsel); 576 576 577 + void evsel__warn_user_requested_cpus(struct evsel *evsel, struct perf_cpu_map *user_requested_cpus); 578 + 577 579 #endif /* __PERF_EVSEL_H */
+8 -4
tools/perf/util/parse-events.c
··· 252 252 struct evsel *evsel; 253 253 bool is_pmu_core; 254 254 struct perf_cpu_map *cpus; 255 + bool has_cpu_list = !perf_cpu_map__is_empty(cpu_list); 255 256 256 257 /* 257 258 * Ensure the first_wildcard_match's PMU matches that of the new event ··· 277 276 278 277 if (pmu) { 279 278 is_pmu_core = pmu->is_core; 280 - cpus = perf_cpu_map__get(perf_cpu_map__is_empty(cpu_list) ? pmu->cpus : cpu_list); 279 + cpus = perf_cpu_map__get(has_cpu_list ? cpu_list : pmu->cpus); 281 280 perf_pmu__warn_invalid_formats(pmu); 282 281 if (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX) { 283 282 perf_pmu__warn_invalid_config(pmu, attr->config, name, ··· 292 291 } else { 293 292 is_pmu_core = (attr->type == PERF_TYPE_HARDWARE || 294 293 attr->type == PERF_TYPE_HW_CACHE); 295 - if (perf_cpu_map__is_empty(cpu_list)) 296 - cpus = is_pmu_core ? perf_cpu_map__new_online_cpus() : NULL; 297 - else 294 + if (has_cpu_list) 298 295 cpus = perf_cpu_map__get(cpu_list); 296 + else 297 + cpus = is_pmu_core ? cpu_map__online() : NULL; 299 298 } 300 299 if (init_attr) 301 300 event_attr_init(attr); ··· 326 325 327 326 if (list) 328 327 list_add_tail(&evsel->core.node, list); 328 + 329 + if (has_cpu_list) 330 + evsel__warn_user_requested_cpus(evsel, cpu_list); 329 331 330 332 return evsel; 331 333 }