Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/hw_breakpoint.h>
3#include <linux/err.h>
4#include <linux/list_sort.h>
5#include <linux/zalloc.h>
6#include <dirent.h>
7#include <errno.h>
8#include <sys/ioctl.h>
9#include <sys/param.h>
10#include "cpumap.h"
11#include "term.h"
12#include "env.h"
13#include "evlist.h"
14#include "evsel.h"
15#include <subcmd/parse-options.h>
16#include "parse-events.h"
17#include "string2.h"
18#include "strbuf.h"
19#include "debug.h"
20#include <perf/cpumap.h>
21#include <util/parse-events-bison.h>
22#include <util/parse-events-flex.h>
23#include "pmu.h"
24#include "pmus.h"
25#include "tp_pmu.h"
26#include "asm/bug.h"
27#include "ui/ui.h"
28#include "util/parse-branch-options.h"
29#include "util/evsel_config.h"
30#include "util/event.h"
31#include "util/bpf-filter.h"
32#include "util/stat.h"
33#include "util/tool_pmu.h"
34#include "util/util.h"
35#include "tracepoint.h"
36#include <api/fs/tracing_path.h>
37
38#define MAX_NAME_LEN 100
39
40static int get_config_terms(const struct parse_events_terms *head_config,
41 struct list_head *head_terms);
42static int parse_events_terms__copy(const struct parse_events_terms *src,
43 struct parse_events_terms *dest);
44static int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb);
45
46static const char *const event_types[] = {
47 [PERF_TYPE_HARDWARE] = "hardware",
48 [PERF_TYPE_SOFTWARE] = "software",
49 [PERF_TYPE_TRACEPOINT] = "tracepoint",
50 [PERF_TYPE_HW_CACHE] = "hardware-cache",
51 [PERF_TYPE_RAW] = "raw",
52 [PERF_TYPE_BREAKPOINT] = "breakpoint",
53};
54
55const char *event_type(size_t type)
56{
57 if (type >= PERF_TYPE_MAX)
58 return "unknown";
59
60 return event_types[type];
61}
62
63static char *get_config_str(const struct parse_events_terms *head_terms,
64 enum parse_events__term_type type_term)
65{
66 struct parse_events_term *term;
67
68 if (!head_terms)
69 return NULL;
70
71 list_for_each_entry(term, &head_terms->terms, list)
72 if (term->type_term == type_term)
73 return term->val.str;
74
75 return NULL;
76}
77
78static char *get_config_metric_id(const struct parse_events_terms *head_terms)
79{
80 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID);
81}
82
83static char *get_config_name(const struct parse_events_terms *head_terms)
84{
85 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME);
86}
87
88static struct perf_cpu_map *get_config_cpu(const struct parse_events_terms *head_terms,
89 bool fake_pmu)
90{
91 struct parse_events_term *term;
92 struct perf_cpu_map *cpus = NULL;
93
94 if (!head_terms)
95 return NULL;
96
97 list_for_each_entry(term, &head_terms->terms, list) {
98 struct perf_cpu_map *term_cpus;
99
100 if (term->type_term != PARSE_EVENTS__TERM_TYPE_CPU)
101 continue;
102
103 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
104 term_cpus = perf_cpu_map__new_int(term->val.num);
105 } else {
106 struct perf_pmu *pmu = perf_pmus__find(term->val.str);
107
108 if (pmu) {
109 term_cpus = pmu->is_core && perf_cpu_map__is_empty(pmu->cpus)
110 ? cpu_map__online()
111 : perf_cpu_map__get(pmu->cpus);
112 } else {
113 term_cpus = perf_cpu_map__new(term->val.str);
114 if (!term_cpus && fake_pmu) {
115 /*
116 * Assume the PMU string makes sense on a different
117 * machine and fake a value with all online CPUs.
118 */
119 term_cpus = cpu_map__online();
120 }
121 }
122 }
123 perf_cpu_map__merge(&cpus, term_cpus);
124 perf_cpu_map__put(term_cpus);
125 }
126
127 return cpus;
128}
129
130/**
131 * fix_raw - For each raw term see if there is an event (aka alias) in pmu that
132 * matches the raw's string value. If the string value matches an
133 * event then change the term to be an event, if not then change it to
134 * be a config term. For example, "read" may be an event of the PMU or
135 * a raw hex encoding of 0xead. The fix-up is done late so the PMU of
136 * the event can be determined and we don't need to scan all PMUs
137 * ahead-of-time.
138 * @config_terms: the list of terms that may contain a raw term.
139 * @pmu: the PMU to scan for events from.
140 */
141static void fix_raw(struct parse_events_terms *config_terms, struct perf_pmu *pmu)
142{
143 struct parse_events_term *term;
144
145 list_for_each_entry(term, &config_terms->terms, list) {
146 u64 num;
147
148 if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW)
149 continue;
150
151 if (perf_pmu__have_event(pmu, term->val.str)) {
152 zfree(&term->config);
153 term->config = term->val.str;
154 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
155 term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
156 term->val.num = 1;
157 term->no_value = true;
158 continue;
159 }
160
161 zfree(&term->config);
162 term->config = strdup("config");
163 errno = 0;
164 num = strtoull(term->val.str + 1, NULL, 16);
165 assert(errno == 0);
166 free(term->val.str);
167 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
168 term->type_term = PARSE_EVENTS__TERM_TYPE_CONFIG;
169 term->val.num = num;
170 term->no_value = false;
171 }
172}
173
174static struct evsel *
175__add_event(struct list_head *list, int *idx,
176 struct perf_event_attr *attr,
177 bool init_attr,
178 const char *name, const char *metric_id, struct perf_pmu *pmu,
179 struct list_head *config_terms, struct evsel *first_wildcard_match,
180 struct perf_cpu_map *user_cpus, u64 alternate_hw_config)
181{
182 struct evsel *evsel;
183 bool is_pmu_core;
184 struct perf_cpu_map *cpus, *pmu_cpus;
185 bool has_user_cpus = !perf_cpu_map__is_empty(user_cpus);
186
187 /*
188 * Ensure the first_wildcard_match's PMU matches that of the new event
189 * being added. Otherwise try to match with another event further down
190 * the evlist.
191 */
192 if (first_wildcard_match) {
193 struct evsel *pos = list_prev_entry(first_wildcard_match, core.node);
194
195 first_wildcard_match = NULL;
196 list_for_each_entry_continue(pos, list, core.node) {
197 if (perf_pmu__name_no_suffix_match(pos->pmu, pmu->name)) {
198 first_wildcard_match = pos;
199 break;
200 }
201 if (pos->pmu->is_core && (!pmu || pmu->is_core)) {
202 first_wildcard_match = pos;
203 break;
204 }
205 }
206 }
207
208 if (pmu) {
209 perf_pmu__warn_invalid_formats(pmu);
210 if (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX) {
211 perf_pmu__warn_invalid_config(pmu, attr->config, name,
212 PERF_PMU_FORMAT_VALUE_CONFIG, "config");
213 perf_pmu__warn_invalid_config(pmu, attr->config1, name,
214 PERF_PMU_FORMAT_VALUE_CONFIG1, "config1");
215 perf_pmu__warn_invalid_config(pmu, attr->config2, name,
216 PERF_PMU_FORMAT_VALUE_CONFIG2, "config2");
217 perf_pmu__warn_invalid_config(pmu, attr->config3, name,
218 PERF_PMU_FORMAT_VALUE_CONFIG3, "config3");
219 perf_pmu__warn_invalid_config(pmu, attr->config4, name,
220 PERF_PMU_FORMAT_VALUE_CONFIG4, "config4");
221 }
222 }
223 /*
224 * If a PMU wasn't given, such as for legacy events, find now that
225 * warnings won't be generated.
226 */
227 if (!pmu)
228 pmu = perf_pmus__find_by_attr(attr);
229
230 if (pmu) {
231 is_pmu_core = pmu->is_core;
232 pmu_cpus = perf_cpu_map__get(pmu->cpus);
233 if (perf_cpu_map__is_empty(pmu_cpus)) {
234 if (perf_pmu__is_tool(pmu))
235 pmu_cpus = tool_pmu__cpus(attr);
236 else
237 pmu_cpus = cpu_map__online();
238 }
239 } else {
240 is_pmu_core = (attr->type == PERF_TYPE_HARDWARE ||
241 attr->type == PERF_TYPE_HW_CACHE);
242 pmu_cpus = is_pmu_core ? cpu_map__online() : NULL;
243 }
244
245 if (has_user_cpus)
246 cpus = perf_cpu_map__get(user_cpus);
247 else
248 cpus = perf_cpu_map__get(pmu_cpus);
249
250 if (init_attr)
251 event_attr_init(attr);
252
253 evsel = evsel__new_idx(attr, *idx);
254 if (!evsel) {
255 perf_cpu_map__put(cpus);
256 perf_cpu_map__put(pmu_cpus);
257 return NULL;
258 }
259
260 if (name) {
261 evsel->name = strdup(name);
262 if (!evsel->name)
263 goto out_err;
264 }
265
266 if (metric_id) {
267 evsel->metric_id = strdup(metric_id);
268 if (!evsel->metric_id)
269 goto out_err;
270 }
271
272 (*idx)++;
273 evsel->core.cpus = cpus;
274 evsel->core.pmu_cpus = pmu_cpus;
275 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false;
276 evsel->core.is_pmu_core = is_pmu_core;
277 evsel->pmu = pmu;
278 evsel->alternate_hw_config = alternate_hw_config;
279 evsel->first_wildcard_match = first_wildcard_match;
280
281 if (config_terms)
282 list_splice_init(config_terms, &evsel->config_terms);
283
284 if (list)
285 list_add_tail(&evsel->core.node, list);
286
287 if (has_user_cpus)
288 evsel__warn_user_requested_cpus(evsel, user_cpus);
289
290 return evsel;
291out_err:
292 perf_cpu_map__put(cpus);
293 perf_cpu_map__put(pmu_cpus);
294 zfree(&evsel->name);
295 zfree(&evsel->metric_id);
296 free(evsel);
297 return NULL;
298}
299
300struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
301 const char *name, const char *metric_id,
302 struct perf_pmu *pmu)
303{
304 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name,
305 metric_id, pmu, /*config_terms=*/NULL,
306 /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL,
307 /*alternate_hw_config=*/PERF_COUNT_HW_MAX);
308}
309
310static int add_event(struct list_head *list, int *idx,
311 struct perf_event_attr *attr, const char *name,
312 const char *metric_id, struct list_head *config_terms,
313 u64 alternate_hw_config)
314{
315 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id,
316 /*pmu=*/NULL, config_terms,
317 /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL,
318 alternate_hw_config) ? 0 : -ENOMEM;
319}
320
321/**
322 * parse_aliases - search names for entries beginning or equalling str ignoring
323 * case. If mutliple entries in names match str then the longest
324 * is chosen.
325 * @str: The needle to look for.
326 * @names: The haystack to search.
327 * @size: The size of the haystack.
328 * @longest: Out argument giving the length of the matching entry.
329 */
330static int parse_aliases(const char *str, const char *const names[][EVSEL__MAX_ALIASES], int size,
331 int *longest)
332{
333 *longest = -1;
334 for (int i = 0; i < size; i++) {
335 for (int j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) {
336 int n = strlen(names[i][j]);
337
338 if (n > *longest && !strncasecmp(str, names[i][j], n))
339 *longest = n;
340 }
341 if (*longest > 0)
342 return i;
343 }
344
345 return -1;
346}
347
348typedef int config_term_func_t(struct perf_event_attr *attr,
349 struct parse_events_term *term,
350 struct parse_events_state *parse_state);
351static int config_term_common(struct perf_event_attr *attr,
352 struct parse_events_term *term,
353 struct parse_events_state *parse_state);
354static int config_attr(struct perf_event_attr *attr,
355 const struct parse_events_terms *head,
356 struct parse_events_state *parse_state,
357 config_term_func_t config_term);
358
359/**
360 * parse_events__decode_legacy_cache - Search name for the legacy cache event
361 * name composed of 1, 2 or 3 hyphen
362 * separated sections. The first section is
363 * the cache type while the others are the
364 * optional op and optional result. To make
365 * life hard the names in the table also
366 * contain hyphens and the longest name
367 * should always be selected.
368 */
369int parse_events__decode_legacy_cache(const char *name, int extended_pmu_type, __u64 *config)
370{
371 int len, cache_type = -1, cache_op = -1, cache_result = -1;
372 const char *name_end = &name[strlen(name) + 1];
373 const char *str = name;
374
375 cache_type = parse_aliases(str, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX, &len);
376 if (cache_type == -1)
377 return -EINVAL;
378 str += len + 1;
379
380 if (str < name_end) {
381 cache_op = parse_aliases(str, evsel__hw_cache_op,
382 PERF_COUNT_HW_CACHE_OP_MAX, &len);
383 if (cache_op >= 0) {
384 if (!evsel__is_cache_op_valid(cache_type, cache_op))
385 return -EINVAL;
386 str += len + 1;
387 } else {
388 cache_result = parse_aliases(str, evsel__hw_cache_result,
389 PERF_COUNT_HW_CACHE_RESULT_MAX, &len);
390 if (cache_result >= 0)
391 str += len + 1;
392 }
393 }
394 if (str < name_end) {
395 if (cache_op < 0) {
396 cache_op = parse_aliases(str, evsel__hw_cache_op,
397 PERF_COUNT_HW_CACHE_OP_MAX, &len);
398 if (cache_op >= 0) {
399 if (!evsel__is_cache_op_valid(cache_type, cache_op))
400 return -EINVAL;
401 }
402 } else if (cache_result < 0) {
403 cache_result = parse_aliases(str, evsel__hw_cache_result,
404 PERF_COUNT_HW_CACHE_RESULT_MAX, &len);
405 }
406 }
407
408 /*
409 * Fall back to reads:
410 */
411 if (cache_op == -1)
412 cache_op = PERF_COUNT_HW_CACHE_OP_READ;
413
414 /*
415 * Fall back to accesses:
416 */
417 if (cache_result == -1)
418 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
419
420 *config = cache_type | (cache_op << 8) | (cache_result << 16);
421 if (perf_pmus__supports_extended_type())
422 *config |= (__u64)extended_pmu_type << PERF_PMU_TYPE_SHIFT;
423 return 0;
424}
425
426/**
427 * parse_events__filter_pmu - returns false if a wildcard PMU should be
428 * considered, true if it should be filtered.
429 */
430bool parse_events__filter_pmu(const struct parse_events_state *parse_state,
431 const struct perf_pmu *pmu)
432{
433 if (parse_state->pmu_filter == NULL)
434 return false;
435
436 return strcmp(parse_state->pmu_filter, pmu->name) != 0;
437}
438
439static int parse_events_add_pmu(struct parse_events_state *parse_state,
440 struct list_head *list, struct perf_pmu *pmu,
441 const struct parse_events_terms *const_parsed_terms,
442 struct evsel *first_wildcard_match);
443
444static void tracepoint_error(struct parse_events_error *e, int err,
445 const char *sys, const char *name, int column)
446{
447 const char *str;
448 char help[BUFSIZ];
449
450 if (!e)
451 return;
452
453 /*
454 * We get error directly from syscall errno ( > 0),
455 * or from encoded pointer's error ( < 0).
456 */
457 err = abs(err);
458
459 switch (err) {
460 case EACCES:
461 str = "can't access trace events";
462 break;
463 case ENOENT:
464 str = "unknown tracepoint";
465 break;
466 default:
467 str = "failed to add tracepoint";
468 break;
469 }
470
471 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name);
472 parse_events_error__handle(e, column, strdup(str), strdup(help));
473}
474
475static int add_tracepoint(struct parse_events_state *parse_state,
476 struct list_head *list,
477 const char *sys_name, const char *evt_name,
478 struct parse_events_error *err,
479 struct parse_events_terms *head_config, void *loc_)
480{
481 YYLTYPE *loc = loc_;
482 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, parse_state->idx++,
483 !parse_state->fake_tp);
484
485 if (IS_ERR(evsel)) {
486 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name, loc->first_column);
487 return PTR_ERR(evsel);
488 }
489
490 if (head_config) {
491 LIST_HEAD(config_terms);
492
493 if (get_config_terms(head_config, &config_terms))
494 return -ENOMEM;
495 list_splice(&config_terms, &evsel->config_terms);
496 }
497
498 list_add_tail(&evsel->core.node, list);
499 return 0;
500}
501
502struct add_tracepoint_multi_args {
503 struct parse_events_state *parse_state;
504 struct list_head *list;
505 const char *sys_glob;
506 const char *evt_glob;
507 struct parse_events_error *err;
508 struct parse_events_terms *head_config;
509 YYLTYPE *loc;
510 int found;
511};
512
513static int add_tracepoint_multi_event_cb(void *state, const char *sys_name, const char *evt_name)
514{
515 struct add_tracepoint_multi_args *args = state;
516 int ret;
517
518 if (!strglobmatch(evt_name, args->evt_glob))
519 return 0;
520
521 args->found++;
522 ret = add_tracepoint(args->parse_state, args->list, sys_name, evt_name,
523 args->err, args->head_config, args->loc);
524
525 return ret;
526}
527
528static int add_tracepoint_multi_event(struct add_tracepoint_multi_args *args, const char *sys_name)
529{
530 if (strpbrk(args->evt_glob, "*?") == NULL) {
531 /* Not a glob. */
532 args->found++;
533 return add_tracepoint(args->parse_state, args->list, sys_name, args->evt_glob,
534 args->err, args->head_config, args->loc);
535 }
536
537 return tp_pmu__for_each_tp_event(sys_name, args, add_tracepoint_multi_event_cb);
538}
539
540static int add_tracepoint_multi_sys_cb(void *state, const char *sys_name)
541{
542 struct add_tracepoint_multi_args *args = state;
543
544 if (!strglobmatch(sys_name, args->sys_glob))
545 return 0;
546
547 return add_tracepoint_multi_event(args, sys_name);
548}
549
550static int add_tracepoint_multi_sys(struct parse_events_state *parse_state,
551 struct list_head *list,
552 const char *sys_glob, const char *evt_glob,
553 struct parse_events_error *err,
554 struct parse_events_terms *head_config, YYLTYPE *loc)
555{
556 struct add_tracepoint_multi_args args = {
557 .parse_state = parse_state,
558 .list = list,
559 .sys_glob = sys_glob,
560 .evt_glob = evt_glob,
561 .err = err,
562 .head_config = head_config,
563 .loc = loc,
564 .found = 0,
565 };
566 int ret;
567
568 if (strpbrk(sys_glob, "*?") == NULL) {
569 /* Not a glob. */
570 ret = add_tracepoint_multi_event(&args, sys_glob);
571 } else {
572 ret = tp_pmu__for_each_tp_sys(&args, add_tracepoint_multi_sys_cb);
573 }
574 if (args.found == 0) {
575 tracepoint_error(err, ENOENT, sys_glob, evt_glob, loc->first_column);
576 return -ENOENT;
577 }
578 return ret;
579}
580
581size_t default_breakpoint_len(void)
582{
583#if defined(__i386__)
584 static int len;
585
586 if (len == 0) {
587 struct perf_env env = {};
588
589 perf_env__init(&env);
590 len = perf_env__kernel_is_64_bit(&env) ? sizeof(u64) : sizeof(long);
591 perf_env__exit(&env);
592 }
593 return len;
594#elif defined(__aarch64__)
595 return 4;
596#else
597 return sizeof(long);
598#endif
599}
600
601static int
602parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
603{
604 int i;
605
606 for (i = 0; i < 3; i++) {
607 if (!type || !type[i])
608 break;
609
610#define CHECK_SET_TYPE(bit) \
611do { \
612 if (attr->bp_type & bit) \
613 return -EINVAL; \
614 else \
615 attr->bp_type |= bit; \
616} while (0)
617
618 switch (type[i]) {
619 case 'r':
620 CHECK_SET_TYPE(HW_BREAKPOINT_R);
621 break;
622 case 'w':
623 CHECK_SET_TYPE(HW_BREAKPOINT_W);
624 break;
625 case 'x':
626 CHECK_SET_TYPE(HW_BREAKPOINT_X);
627 break;
628 default:
629 return -EINVAL;
630 }
631 }
632
633#undef CHECK_SET_TYPE
634
635 if (!attr->bp_type) /* Default */
636 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
637
638 return 0;
639}
640
641int parse_events_add_breakpoint(struct parse_events_state *parse_state,
642 struct list_head *list,
643 u64 addr, char *type, u64 len,
644 struct parse_events_terms *head_config)
645{
646 struct perf_event_attr attr;
647 LIST_HEAD(config_terms);
648 const char *name;
649
650 memset(&attr, 0, sizeof(attr));
651 attr.bp_addr = addr;
652
653 if (parse_breakpoint_type(type, &attr))
654 return -EINVAL;
655
656 /* Provide some defaults if len is not specified */
657 if (!len) {
658 if (attr.bp_type == HW_BREAKPOINT_X)
659 len = default_breakpoint_len();
660 else
661 len = HW_BREAKPOINT_LEN_4;
662 }
663
664 attr.bp_len = len;
665
666 attr.type = PERF_TYPE_BREAKPOINT;
667 attr.sample_period = 1;
668
669 if (head_config) {
670 if (config_attr(&attr, head_config, parse_state, config_term_common))
671 return -EINVAL;
672
673 if (get_config_terms(head_config, &config_terms))
674 return -ENOMEM;
675 }
676
677 name = get_config_name(head_config);
678
679 return add_event(list, &parse_state->idx, &attr, name, /*mertic_id=*/NULL,
680 &config_terms, /*alternate_hw_config=*/PERF_COUNT_HW_MAX);
681}
682
683static int check_type_val(struct parse_events_term *term,
684 struct parse_events_error *err,
685 enum parse_events__term_val_type type)
686{
687 if (type == term->type_val)
688 return 0;
689
690 if (err) {
691 parse_events_error__handle(err, term->err_val,
692 type == PARSE_EVENTS__TERM_TYPE_NUM
693 ? strdup("expected numeric value")
694 : strdup("expected string value"),
695 NULL);
696 }
697 return -EINVAL;
698}
699
700static bool config_term_shrinked;
701
702const char *parse_events__term_type_str(enum parse_events__term_type term_type)
703{
704 /*
705 * Update according to parse-events.l
706 */
707 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
708 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>",
709 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config",
710 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1",
711 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2",
712 [PARSE_EVENTS__TERM_TYPE_CONFIG3] = "config3",
713 [PARSE_EVENTS__TERM_TYPE_CONFIG4] = "config4",
714 [PARSE_EVENTS__TERM_TYPE_NAME] = "name",
715 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period",
716 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq",
717 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type",
718 [PARSE_EVENTS__TERM_TYPE_TIME] = "time",
719 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph",
720 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size",
721 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit",
722 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit",
723 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack",
724 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr",
725 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite",
726 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite",
727 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config",
728 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore",
729 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output",
730 [PARSE_EVENTS__TERM_TYPE_AUX_ACTION] = "aux-action",
731 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size",
732 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id",
733 [PARSE_EVENTS__TERM_TYPE_RAW] = "raw",
734 [PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG] = "legacy-hardware-config",
735 [PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG] = "legacy-cache-config",
736 [PARSE_EVENTS__TERM_TYPE_CPU] = "cpu",
737 [PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV] = "ratio-to-prev",
738 };
739 if ((unsigned int)term_type >= __PARSE_EVENTS__TERM_TYPE_NR)
740 return "unknown term";
741
742 return config_term_names[term_type];
743}
744
745static bool
746config_term_avail(enum parse_events__term_type term_type, struct parse_events_error *err)
747{
748 char *err_str;
749
750 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) {
751 parse_events_error__handle(err, -1,
752 strdup("Invalid term_type"), NULL);
753 return false;
754 }
755 if (!config_term_shrinked)
756 return true;
757
758 switch (term_type) {
759 case PARSE_EVENTS__TERM_TYPE_CONFIG:
760 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
761 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
762 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
763 case PARSE_EVENTS__TERM_TYPE_CONFIG4:
764 case PARSE_EVENTS__TERM_TYPE_NAME:
765 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
766 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
767 case PARSE_EVENTS__TERM_TYPE_PERCORE:
768 case PARSE_EVENTS__TERM_TYPE_CPU:
769 return true;
770 case PARSE_EVENTS__TERM_TYPE_USER:
771 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
772 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
773 case PARSE_EVENTS__TERM_TYPE_TIME:
774 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
775 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
776 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
777 case PARSE_EVENTS__TERM_TYPE_INHERIT:
778 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
779 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
780 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
781 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
782 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
783 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
784 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
785 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
786 case PARSE_EVENTS__TERM_TYPE_RAW:
787 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV:
788 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG:
789 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG:
790 default:
791 if (!err)
792 return false;
793
794 /* term_type is validated so indexing is safe */
795 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'",
796 parse_events__term_type_str(term_type)) >= 0)
797 parse_events_error__handle(err, -1, err_str, NULL);
798 return false;
799 }
800}
801
802void parse_events__shrink_config_terms(void)
803{
804 config_term_shrinked = true;
805}
806
807static int config_term_common(struct perf_event_attr *attr,
808 struct parse_events_term *term,
809 struct parse_events_state *parse_state)
810{
811#define CHECK_TYPE_VAL(type) \
812do { \
813 if (check_type_val(term, parse_state->error, PARSE_EVENTS__TERM_TYPE_ ## type)) \
814 return -EINVAL; \
815} while (0)
816
817 switch (term->type_term) {
818 case PARSE_EVENTS__TERM_TYPE_CONFIG:
819 CHECK_TYPE_VAL(NUM);
820 attr->config = term->val.num;
821 break;
822 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
823 CHECK_TYPE_VAL(NUM);
824 attr->config1 = term->val.num;
825 break;
826 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
827 CHECK_TYPE_VAL(NUM);
828 attr->config2 = term->val.num;
829 break;
830 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
831 CHECK_TYPE_VAL(NUM);
832 attr->config3 = term->val.num;
833 break;
834 case PARSE_EVENTS__TERM_TYPE_CONFIG4:
835 CHECK_TYPE_VAL(NUM);
836 attr->config4 = term->val.num;
837 break;
838 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
839 CHECK_TYPE_VAL(NUM);
840 break;
841 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
842 CHECK_TYPE_VAL(NUM);
843 break;
844 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
845 CHECK_TYPE_VAL(STR);
846 if (strcmp(term->val.str, "no") &&
847 parse_branch_str(term->val.str,
848 &attr->branch_sample_type)) {
849 parse_events_error__handle(parse_state->error, term->err_val,
850 strdup("invalid branch sample type"),
851 NULL);
852 return -EINVAL;
853 }
854 break;
855 case PARSE_EVENTS__TERM_TYPE_TIME:
856 CHECK_TYPE_VAL(NUM);
857 if (term->val.num > 1) {
858 parse_events_error__handle(parse_state->error, term->err_val,
859 strdup("expected 0 or 1"),
860 NULL);
861 return -EINVAL;
862 }
863 break;
864 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
865 CHECK_TYPE_VAL(STR);
866 break;
867 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
868 CHECK_TYPE_VAL(NUM);
869 break;
870 case PARSE_EVENTS__TERM_TYPE_INHERIT:
871 CHECK_TYPE_VAL(NUM);
872 break;
873 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
874 CHECK_TYPE_VAL(NUM);
875 break;
876 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
877 CHECK_TYPE_VAL(NUM);
878 break;
879 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
880 CHECK_TYPE_VAL(NUM);
881 break;
882 case PARSE_EVENTS__TERM_TYPE_NAME:
883 CHECK_TYPE_VAL(STR);
884 break;
885 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
886 CHECK_TYPE_VAL(STR);
887 break;
888 case PARSE_EVENTS__TERM_TYPE_RAW:
889 CHECK_TYPE_VAL(STR);
890 break;
891 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
892 CHECK_TYPE_VAL(NUM);
893 break;
894 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
895 CHECK_TYPE_VAL(NUM);
896 break;
897 case PARSE_EVENTS__TERM_TYPE_PERCORE:
898 CHECK_TYPE_VAL(NUM);
899 if ((unsigned int)term->val.num > 1) {
900 parse_events_error__handle(parse_state->error, term->err_val,
901 strdup("expected 0 or 1"),
902 NULL);
903 return -EINVAL;
904 }
905 break;
906 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
907 CHECK_TYPE_VAL(NUM);
908 break;
909 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
910 CHECK_TYPE_VAL(STR);
911 break;
912 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
913 CHECK_TYPE_VAL(NUM);
914 if (term->val.num > UINT_MAX) {
915 parse_events_error__handle(parse_state->error, term->err_val,
916 strdup("too big"),
917 NULL);
918 return -EINVAL;
919 }
920 break;
921 case PARSE_EVENTS__TERM_TYPE_CPU: {
922 struct perf_cpu_map *map;
923
924 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
925 if (term->val.num >= (u64)cpu__max_present_cpu().cpu) {
926 parse_events_error__handle(parse_state->error, term->err_val,
927 strdup("too big"),
928 /*help=*/NULL);
929 return -EINVAL;
930 }
931 break;
932 }
933 assert(term->type_val == PARSE_EVENTS__TERM_TYPE_STR);
934 if (perf_pmus__find(term->val.str) != NULL)
935 break;
936
937 map = perf_cpu_map__new(term->val.str);
938 if (!map && !parse_state->fake_pmu) {
939 parse_events_error__handle(parse_state->error, term->err_val,
940 strdup("not a valid PMU or CPU number"),
941 /*help=*/NULL);
942 return -EINVAL;
943 }
944 perf_cpu_map__put(map);
945 break;
946 }
947 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV:
948 CHECK_TYPE_VAL(STR);
949 if (strtod(term->val.str, NULL) <= 0) {
950 parse_events_error__handle(parse_state->error, term->err_val,
951 strdup("zero or negative"),
952 NULL);
953 return -EINVAL;
954 }
955 if (errno == ERANGE) {
956 parse_events_error__handle(parse_state->error, term->err_val,
957 strdup("too big"),
958 NULL);
959 return -EINVAL;
960 }
961 break;
962 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
963 case PARSE_EVENTS__TERM_TYPE_USER:
964 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG:
965 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG:
966 default:
967 parse_events_error__handle(parse_state->error, term->err_term,
968 strdup(parse_events__term_type_str(term->type_term)),
969 parse_events_formats_error_string(NULL));
970 return -EINVAL;
971 }
972
973 /*
974 * Check term availability after basic checking so
975 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered.
976 *
977 * If check availability at the entry of this function,
978 * user will see "'<sysfs term>' is not usable in 'perf stat'"
979 * if an invalid config term is provided for legacy events
980 * (for example, instructions/badterm/...), which is confusing.
981 */
982 if (!config_term_avail(term->type_term, parse_state->error))
983 return -EINVAL;
984 return 0;
985#undef CHECK_TYPE_VAL
986}
987
988static bool check_pmu_is_core(__u32 type, const struct parse_events_term *term,
989 struct parse_events_error *err)
990{
991 struct perf_pmu *pmu = NULL;
992
993 /* Avoid loading all PMUs with perf_pmus__find_by_type, just scan the core ones. */
994 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
995 if (pmu->type == type)
996 return true;
997 }
998 parse_events_error__handle(err, term->err_val,
999 strdup("needs a core PMU"),
1000 NULL);
1001 return false;
1002}
1003
1004static int config_term_pmu(struct perf_event_attr *attr,
1005 struct parse_events_term *term,
1006 struct parse_events_state *parse_state)
1007{
1008 if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG) {
1009 if (check_type_val(term, parse_state->error, PARSE_EVENTS__TERM_TYPE_NUM))
1010 return -EINVAL;
1011 if (term->val.num >= PERF_COUNT_HW_MAX) {
1012 parse_events_error__handle(parse_state->error, term->err_val,
1013 strdup("too big"),
1014 NULL);
1015 return -EINVAL;
1016 }
1017 if (!check_pmu_is_core(attr->type, term, parse_state->error))
1018 return -EINVAL;
1019 attr->config = term->val.num;
1020 if (perf_pmus__supports_extended_type())
1021 attr->config |= (__u64)attr->type << PERF_PMU_TYPE_SHIFT;
1022 attr->type = PERF_TYPE_HARDWARE;
1023 return 0;
1024 }
1025 if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG) {
1026 int cache_type, cache_op, cache_result;
1027
1028 if (check_type_val(term, parse_state->error, PARSE_EVENTS__TERM_TYPE_NUM))
1029 return -EINVAL;
1030 cache_type = term->val.num & 0xFF;
1031 cache_op = (term->val.num >> 8) & 0xFF;
1032 cache_result = (term->val.num >> 16) & 0xFF;
1033 if ((term->val.num & ~0xFFFFFF) ||
1034 cache_type >= PERF_COUNT_HW_CACHE_MAX ||
1035 cache_op >= PERF_COUNT_HW_CACHE_OP_MAX ||
1036 cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) {
1037 parse_events_error__handle(parse_state->error, term->err_val,
1038 strdup("too big"),
1039 NULL);
1040 return -EINVAL;
1041 }
1042 if (!check_pmu_is_core(attr->type, term, parse_state->error))
1043 return -EINVAL;
1044 attr->config = term->val.num;
1045 if (perf_pmus__supports_extended_type())
1046 attr->config |= (__u64)attr->type << PERF_PMU_TYPE_SHIFT;
1047 attr->type = PERF_TYPE_HW_CACHE;
1048 return 0;
1049 }
1050 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER ||
1051 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) {
1052 /*
1053 * Always succeed for sysfs terms, as we dont know
1054 * at this point what type they need to have.
1055 */
1056 return 0;
1057 }
1058 return config_term_common(attr, term, parse_state);
1059}
1060
1061static int config_term_tracepoint(struct perf_event_attr *attr,
1062 struct parse_events_term *term,
1063 struct parse_events_state *parse_state)
1064{
1065 switch (term->type_term) {
1066 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1067 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1068 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1069 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1070 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1071 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1072 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1073 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1074 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1075 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
1076 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1077 return config_term_common(attr, term, parse_state);
1078 case PARSE_EVENTS__TERM_TYPE_USER:
1079 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1080 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1081 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1082 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1083 case PARSE_EVENTS__TERM_TYPE_CONFIG4:
1084 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG:
1085 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG:
1086 case PARSE_EVENTS__TERM_TYPE_NAME:
1087 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1088 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1089 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1090 case PARSE_EVENTS__TERM_TYPE_TIME:
1091 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1092 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1093 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1094 case PARSE_EVENTS__TERM_TYPE_RAW:
1095 case PARSE_EVENTS__TERM_TYPE_CPU:
1096 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV:
1097 default:
1098 parse_events_error__handle(parse_state->error, term->err_term,
1099 strdup(parse_events__term_type_str(term->type_term)),
1100 strdup("valid terms: call-graph,stack-size\n")
1101 );
1102 return -EINVAL;
1103 }
1104
1105 return 0;
1106}
1107
1108static int config_attr(struct perf_event_attr *attr,
1109 const struct parse_events_terms *head,
1110 struct parse_events_state *parse_state,
1111 config_term_func_t config_term)
1112{
1113 struct parse_events_term *term;
1114
1115 list_for_each_entry(term, &head->terms, list)
1116 if (config_term(attr, term, parse_state))
1117 return -EINVAL;
1118
1119 return 0;
1120}
1121
1122static int get_config_terms(const struct parse_events_terms *head_config,
1123 struct list_head *head_terms)
1124{
1125#define ADD_CONFIG_TERM(__type, __weak) \
1126 struct evsel_config_term *__t; \
1127 \
1128 __t = zalloc(sizeof(*__t)); \
1129 if (!__t) \
1130 return -ENOMEM; \
1131 \
1132 INIT_LIST_HEAD(&__t->list); \
1133 __t->type = EVSEL__CONFIG_TERM_ ## __type; \
1134 __t->weak = __weak; \
1135 list_add_tail(&__t->list, head_terms)
1136
1137#define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \
1138do { \
1139 ADD_CONFIG_TERM(__type, __weak); \
1140 __t->val.__name = __val; \
1141} while (0)
1142
1143#define ADD_CONFIG_TERM_STR(__type, __val, __weak) \
1144do { \
1145 ADD_CONFIG_TERM(__type, __weak); \
1146 __t->val.str = strdup(__val); \
1147 if (!__t->val.str) { \
1148 zfree(&__t); \
1149 return -ENOMEM; \
1150 } \
1151 __t->free_str = true; \
1152} while (0)
1153
1154 struct parse_events_term *term;
1155
1156 list_for_each_entry(term, &head_config->terms, list) {
1157 switch (term->type_term) {
1158 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1159 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak);
1160 break;
1161 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1162 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak);
1163 break;
1164 case PARSE_EVENTS__TERM_TYPE_TIME:
1165 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak);
1166 break;
1167 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1168 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak);
1169 break;
1170 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1171 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak);
1172 break;
1173 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1174 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user,
1175 term->val.num, term->weak);
1176 break;
1177 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1178 ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1179 term->val.num ? 1 : 0, term->weak);
1180 break;
1181 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1182 ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1183 term->val.num ? 0 : 1, term->weak);
1184 break;
1185 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1186 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack,
1187 term->val.num, term->weak);
1188 break;
1189 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1190 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events,
1191 term->val.num, term->weak);
1192 break;
1193 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1194 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1195 term->val.num ? 1 : 0, term->weak);
1196 break;
1197 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1198 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1199 term->val.num ? 0 : 1, term->weak);
1200 break;
1201 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1202 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak);
1203 break;
1204 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1205 ADD_CONFIG_TERM_VAL(PERCORE, percore,
1206 term->val.num ? true : false, term->weak);
1207 break;
1208 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1209 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
1210 term->val.num ? 1 : 0, term->weak);
1211 break;
1212 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
1213 ADD_CONFIG_TERM_STR(AUX_ACTION, term->val.str, term->weak);
1214 break;
1215 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1216 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
1217 term->val.num, term->weak);
1218 break;
1219 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV:
1220 ADD_CONFIG_TERM_STR(RATIO_TO_PREV, term->val.str, term->weak);
1221 break;
1222 case PARSE_EVENTS__TERM_TYPE_USER:
1223 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1224 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1225 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1226 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1227 case PARSE_EVENTS__TERM_TYPE_CONFIG4:
1228 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG:
1229 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG:
1230 case PARSE_EVENTS__TERM_TYPE_NAME:
1231 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1232 case PARSE_EVENTS__TERM_TYPE_RAW:
1233 case PARSE_EVENTS__TERM_TYPE_CPU:
1234 default:
1235 break;
1236 }
1237 }
1238 return 0;
1239}
1240
1241/*
1242 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for
1243 * each bit of attr->config that the user has changed.
1244 */
1245static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head_config,
1246 struct list_head *head_terms)
1247{
1248 struct parse_events_term *term;
1249 u64 bits = 0;
1250 int type;
1251
1252 list_for_each_entry(term, &head_config->terms, list) {
1253 switch (term->type_term) {
1254 case PARSE_EVENTS__TERM_TYPE_USER:
1255 type = perf_pmu__format_type(pmu, term->config);
1256 if (type != PERF_PMU_FORMAT_VALUE_CONFIG)
1257 continue;
1258 bits |= perf_pmu__format_bits(pmu, term->config);
1259 break;
1260 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1261 bits = ~(u64)0;
1262 break;
1263 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1264 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1265 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1266 case PARSE_EVENTS__TERM_TYPE_CONFIG4:
1267 case PARSE_EVENTS__TERM_TYPE_LEGACY_HARDWARE_CONFIG:
1268 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE_CONFIG:
1269 case PARSE_EVENTS__TERM_TYPE_NAME:
1270 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1271 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1272 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1273 case PARSE_EVENTS__TERM_TYPE_TIME:
1274 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1275 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1276 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1277 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1278 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1279 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1280 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1281 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1282 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1283 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1284 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1285 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
1286 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1287 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1288 case PARSE_EVENTS__TERM_TYPE_RAW:
1289 case PARSE_EVENTS__TERM_TYPE_CPU:
1290 case PARSE_EVENTS__TERM_TYPE_RATIO_TO_PREV:
1291 default:
1292 break;
1293 }
1294 }
1295
1296 if (bits)
1297 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false);
1298
1299#undef ADD_CONFIG_TERM
1300 return 0;
1301}
1302
1303int parse_events_add_tracepoint(struct parse_events_state *parse_state,
1304 struct list_head *list,
1305 const char *sys, const char *event,
1306 struct parse_events_error *err,
1307 struct parse_events_terms *head_config, void *loc_)
1308{
1309 YYLTYPE *loc = loc_;
1310
1311 if (head_config) {
1312 struct perf_event_attr attr;
1313
1314 if (config_attr(&attr, head_config, parse_state, config_term_tracepoint))
1315 return -EINVAL;
1316 }
1317
1318 return add_tracepoint_multi_sys(parse_state, list, sys, event,
1319 err, head_config, loc);
1320}
1321
1322static int __parse_events_add_numeric(struct parse_events_state *parse_state,
1323 struct list_head *list,
1324 struct perf_pmu *pmu, u32 type, u32 extended_type,
1325 u64 config, const struct parse_events_terms *head_config,
1326 struct evsel *first_wildcard_match)
1327{
1328 struct perf_event_attr attr;
1329 LIST_HEAD(config_terms);
1330 const char *name, *metric_id;
1331 struct perf_cpu_map *cpus;
1332 int ret;
1333
1334 memset(&attr, 0, sizeof(attr));
1335 attr.type = type;
1336 attr.config = config;
1337 if (extended_type && (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE)) {
1338 assert(perf_pmus__supports_extended_type());
1339 attr.config |= (u64)extended_type << PERF_PMU_TYPE_SHIFT;
1340 }
1341
1342 if (head_config) {
1343 if (config_attr(&attr, head_config, parse_state, config_term_common))
1344 return -EINVAL;
1345
1346 if (get_config_terms(head_config, &config_terms))
1347 return -ENOMEM;
1348 }
1349
1350 name = get_config_name(head_config);
1351 metric_id = get_config_metric_id(head_config);
1352 cpus = get_config_cpu(head_config, parse_state->fake_pmu);
1353 ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name,
1354 metric_id, pmu, &config_terms, first_wildcard_match,
1355 cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) ? 0 : -ENOMEM;
1356 perf_cpu_map__put(cpus);
1357 free_config_terms(&config_terms);
1358 return ret;
1359}
1360
1361int parse_events_add_numeric(struct parse_events_state *parse_state,
1362 struct list_head *list,
1363 u32 type, u64 config,
1364 const struct parse_events_terms *head_config,
1365 bool wildcard)
1366{
1367 struct perf_pmu *pmu = NULL;
1368 bool found_supported = false;
1369
1370 /* Wildcards on numeric values are only supported by core PMUs. */
1371 if (wildcard && perf_pmus__supports_extended_type()) {
1372 struct evsel *first_wildcard_match = NULL;
1373 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
1374 int ret;
1375
1376 found_supported = true;
1377 if (parse_events__filter_pmu(parse_state, pmu))
1378 continue;
1379
1380 ret = __parse_events_add_numeric(parse_state, list, pmu,
1381 type, pmu->type,
1382 config, head_config,
1383 first_wildcard_match);
1384 if (ret)
1385 return ret;
1386 if (first_wildcard_match == NULL)
1387 first_wildcard_match =
1388 container_of(list->prev, struct evsel, core.node);
1389 }
1390 if (found_supported)
1391 return 0;
1392 }
1393 return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type),
1394 type, /*extended_type=*/0, config, head_config,
1395 /*first_wildcard_match=*/NULL);
1396}
1397
1398static bool config_term_percore(struct list_head *config_terms)
1399{
1400 struct evsel_config_term *term;
1401
1402 list_for_each_entry(term, config_terms, list) {
1403 if (term->type == EVSEL__CONFIG_TERM_PERCORE)
1404 return term->val.percore;
1405 }
1406
1407 return false;
1408}
1409
1410static int parse_events_add_pmu(struct parse_events_state *parse_state,
1411 struct list_head *list, struct perf_pmu *pmu,
1412 const struct parse_events_terms *const_parsed_terms,
1413 struct evsel *first_wildcard_match)
1414{
1415 u64 alternate_hw_config = PERF_COUNT_HW_MAX;
1416 struct perf_event_attr attr;
1417 struct perf_pmu_info info;
1418 struct evsel *evsel;
1419 struct parse_events_error *err = parse_state->error;
1420 LIST_HEAD(config_terms);
1421 struct parse_events_terms parsed_terms;
1422 bool alias_rewrote_terms = false;
1423 struct perf_cpu_map *term_cpu = NULL;
1424
1425 if (verbose > 1) {
1426 struct strbuf sb;
1427
1428 strbuf_init(&sb, /*hint=*/ 0);
1429 if (pmu->selectable && const_parsed_terms &&
1430 list_empty(&const_parsed_terms->terms)) {
1431 strbuf_addf(&sb, "%s//", pmu->name);
1432 } else {
1433 strbuf_addf(&sb, "%s/", pmu->name);
1434 parse_events_terms__to_strbuf(const_parsed_terms, &sb);
1435 strbuf_addch(&sb, '/');
1436 }
1437 fprintf(stderr, "Attempt to add: %s\n", sb.buf);
1438 strbuf_release(&sb);
1439 }
1440
1441 memset(&attr, 0, sizeof(attr));
1442 if (pmu->perf_event_attr_init_default)
1443 pmu->perf_event_attr_init_default(pmu, &attr);
1444
1445 attr.type = pmu->type;
1446
1447 if (!const_parsed_terms || list_empty(&const_parsed_terms->terms)) {
1448 evsel = __add_event(list, &parse_state->idx, &attr,
1449 /*init_attr=*/true, /*name=*/NULL,
1450 /*metric_id=*/NULL, pmu,
1451 /*config_terms=*/NULL, first_wildcard_match,
1452 /*cpu_list=*/NULL, alternate_hw_config);
1453 return evsel ? 0 : -ENOMEM;
1454 }
1455
1456 parse_events_terms__init(&parsed_terms);
1457 if (const_parsed_terms) {
1458 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
1459
1460 if (ret)
1461 return ret;
1462 }
1463 fix_raw(&parsed_terms, pmu);
1464
1465 /* Configure attr/terms with a known PMU, this will set hardcoded terms. */
1466 if (config_attr(&attr, &parsed_terms, parse_state, config_term_pmu)) {
1467 parse_events_terms__exit(&parsed_terms);
1468 return -EINVAL;
1469 }
1470
1471 /* Look for event names in the terms and rewrite into format based terms. */
1472 if (perf_pmu__check_alias(pmu, &parsed_terms,
1473 &info, &alias_rewrote_terms,
1474 &alternate_hw_config, err)) {
1475 parse_events_terms__exit(&parsed_terms);
1476 return -EINVAL;
1477 }
1478
1479 if (verbose > 1) {
1480 struct strbuf sb;
1481
1482 strbuf_init(&sb, /*hint=*/ 0);
1483 parse_events_terms__to_strbuf(&parsed_terms, &sb);
1484 fprintf(stderr, "..after resolving event: %s/%s/\n", pmu->name, sb.buf);
1485 strbuf_release(&sb);
1486 }
1487
1488 /* Configure attr/terms again if an alias was expanded. */
1489 if (alias_rewrote_terms &&
1490 config_attr(&attr, &parsed_terms, parse_state, config_term_pmu)) {
1491 parse_events_terms__exit(&parsed_terms);
1492 return -EINVAL;
1493 }
1494
1495 if (get_config_terms(&parsed_terms, &config_terms)) {
1496 parse_events_terms__exit(&parsed_terms);
1497 return -ENOMEM;
1498 }
1499
1500 /*
1501 * When using default config, record which bits of attr->config were
1502 * changed by the user.
1503 */
1504 if (pmu->perf_event_attr_init_default &&
1505 get_config_chgs(pmu, &parsed_terms, &config_terms)) {
1506 parse_events_terms__exit(&parsed_terms);
1507 return -ENOMEM;
1508 }
1509
1510 /* Skip configuring hard coded terms that were applied by config_attr. */
1511 if (perf_pmu__config(pmu, &attr, &parsed_terms, /*apply_hardcoded=*/false,
1512 parse_state->error)) {
1513 free_config_terms(&config_terms);
1514 parse_events_terms__exit(&parsed_terms);
1515 return -EINVAL;
1516 }
1517
1518 term_cpu = get_config_cpu(&parsed_terms, parse_state->fake_pmu);
1519 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true,
1520 get_config_name(&parsed_terms),
1521 get_config_metric_id(&parsed_terms), pmu,
1522 &config_terms, first_wildcard_match, term_cpu, alternate_hw_config);
1523 perf_cpu_map__put(term_cpu);
1524 if (!evsel) {
1525 parse_events_terms__exit(&parsed_terms);
1526 return -ENOMEM;
1527 }
1528
1529 if (evsel->name)
1530 evsel->use_config_name = true;
1531
1532 evsel->percore = config_term_percore(&evsel->config_terms);
1533
1534 parse_events_terms__exit(&parsed_terms);
1535 free((char *)evsel->unit);
1536 evsel->unit = strdup(info.unit);
1537 evsel->scale = info.scale;
1538 evsel->per_pkg = info.per_pkg;
1539 evsel->snapshot = info.snapshot;
1540 evsel->retirement_latency.mean = info.retirement_latency_mean;
1541 evsel->retirement_latency.min = info.retirement_latency_min;
1542 evsel->retirement_latency.max = info.retirement_latency_max;
1543
1544 return 0;
1545}
1546
1547int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
1548 const char *event_name,
1549 const struct parse_events_terms *const_parsed_terms,
1550 struct list_head **listp, void *loc_)
1551{
1552 struct parse_events_term *term;
1553 struct list_head *list = NULL;
1554 struct perf_pmu *pmu = NULL;
1555 YYLTYPE *loc = loc_;
1556 int ok = 0;
1557 const char *config;
1558 struct parse_events_terms parsed_terms;
1559 struct evsel *first_wildcard_match = NULL;
1560
1561 *listp = NULL;
1562
1563 parse_events_terms__init(&parsed_terms);
1564 if (const_parsed_terms) {
1565 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
1566
1567 if (ret)
1568 return ret;
1569 }
1570
1571 config = strdup(event_name);
1572 if (!config)
1573 goto out_err;
1574
1575 if (parse_events_term__num(&term,
1576 PARSE_EVENTS__TERM_TYPE_USER,
1577 config, /*num=*/1, /*novalue=*/true,
1578 loc, /*loc_val=*/NULL) < 0) {
1579 zfree(&config);
1580 goto out_err;
1581 }
1582 list_add_tail(&term->list, &parsed_terms.terms);
1583
1584 /* Add it for all PMUs that support the alias */
1585 list = malloc(sizeof(struct list_head));
1586 if (!list)
1587 goto out_err;
1588
1589 INIT_LIST_HEAD(list);
1590
1591 while ((pmu = perf_pmus__scan_for_event(pmu, event_name)) != NULL) {
1592
1593 if (parse_events__filter_pmu(parse_state, pmu))
1594 continue;
1595
1596 if (!perf_pmu__have_event(pmu, event_name))
1597 continue;
1598
1599 if (!parse_events_add_pmu(parse_state, list, pmu,
1600 &parsed_terms, first_wildcard_match)) {
1601 struct strbuf sb;
1602
1603 strbuf_init(&sb, /*hint=*/ 0);
1604 parse_events_terms__to_strbuf(&parsed_terms, &sb);
1605 pr_debug("%s -> %s/%s/\n", event_name, pmu->name, sb.buf);
1606 strbuf_release(&sb);
1607 ok++;
1608 }
1609 if (first_wildcard_match == NULL)
1610 first_wildcard_match = container_of(list->prev, struct evsel, core.node);
1611 }
1612
1613 if (parse_state->fake_pmu) {
1614 if (!parse_events_add_pmu(parse_state, list, perf_pmus__fake_pmu(), &parsed_terms,
1615 first_wildcard_match)) {
1616 struct strbuf sb;
1617
1618 strbuf_init(&sb, /*hint=*/ 0);
1619 parse_events_terms__to_strbuf(&parsed_terms, &sb);
1620 pr_debug("%s -> fake/%s/\n", event_name, sb.buf);
1621 strbuf_release(&sb);
1622 ok++;
1623 }
1624 }
1625
1626out_err:
1627 parse_events_terms__exit(&parsed_terms);
1628 if (ok)
1629 *listp = list;
1630 else
1631 free(list);
1632
1633 return ok ? 0 : -1;
1634}
1635
1636int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state,
1637 const char *event_or_pmu,
1638 const struct parse_events_terms *const_parsed_terms,
1639 struct list_head **listp,
1640 void *loc_)
1641{
1642 YYLTYPE *loc = loc_;
1643 struct perf_pmu *pmu;
1644 int ok = 0;
1645 char *help;
1646 struct evsel *first_wildcard_match = NULL;
1647
1648 *listp = malloc(sizeof(**listp));
1649 if (!*listp)
1650 return -ENOMEM;
1651
1652 INIT_LIST_HEAD(*listp);
1653
1654 /* Attempt to add to list assuming event_or_pmu is a PMU name. */
1655 pmu = perf_pmus__find(event_or_pmu);
1656 if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms,
1657 first_wildcard_match))
1658 return 0;
1659
1660 if (parse_state->fake_pmu) {
1661 if (!parse_events_add_pmu(parse_state, *listp, perf_pmus__fake_pmu(),
1662 const_parsed_terms,
1663 first_wildcard_match))
1664 return 0;
1665 }
1666
1667 pmu = NULL;
1668 /* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */
1669 while ((pmu = perf_pmus__scan_matching_wildcard(pmu, event_or_pmu)) != NULL) {
1670
1671 if (parse_events__filter_pmu(parse_state, pmu))
1672 continue;
1673
1674 if (!parse_events_add_pmu(parse_state, *listp, pmu,
1675 const_parsed_terms,
1676 first_wildcard_match)) {
1677 ok++;
1678 parse_state->wild_card_pmus = true;
1679 }
1680 if (first_wildcard_match == NULL) {
1681 first_wildcard_match =
1682 container_of((*listp)->prev, struct evsel, core.node);
1683 }
1684 }
1685 if (ok)
1686 return 0;
1687
1688 /* Failure to add, assume event_or_pmu is an event name. */
1689 zfree(listp);
1690 if (!parse_events_multi_pmu_add(parse_state, event_or_pmu,
1691 const_parsed_terms, listp, loc))
1692 return 0;
1693
1694 if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", event_or_pmu) < 0)
1695 help = NULL;
1696 parse_events_error__handle(parse_state->error, loc->first_column,
1697 strdup("Bad event or PMU"),
1698 help);
1699 zfree(listp);
1700 return -EINVAL;
1701}
1702
1703void parse_events__set_leader(char *name, struct list_head *list)
1704{
1705 struct evsel *leader;
1706
1707 if (list_empty(list)) {
1708 WARN_ONCE(true, "WARNING: failed to set leader: empty list");
1709 return;
1710 }
1711
1712 leader = list_first_entry(list, struct evsel, core.node);
1713 __perf_evlist__set_leader(list, &leader->core);
1714 zfree(&leader->group_name);
1715 leader->group_name = name;
1716}
1717
1718static int parse_events__modifier_list(struct parse_events_state *parse_state,
1719 YYLTYPE *loc,
1720 struct list_head *list,
1721 struct parse_events_modifier mod,
1722 bool group)
1723{
1724 struct evsel *evsel;
1725
1726 if (!group && mod.weak) {
1727 parse_events_error__handle(parse_state->error, loc->first_column,
1728 strdup("Weak modifier is for use with groups"), NULL);
1729 return -EINVAL;
1730 }
1731
1732 __evlist__for_each_entry(list, evsel) {
1733 /* Translate modifiers into the equivalent evsel excludes. */
1734 int eu = group ? evsel->core.attr.exclude_user : 0;
1735 int ek = group ? evsel->core.attr.exclude_kernel : 0;
1736 int eh = group ? evsel->core.attr.exclude_hv : 0;
1737 int eH = group ? evsel->core.attr.exclude_host : 0;
1738 int eG = group ? evsel->core.attr.exclude_guest : 0;
1739 int exclude = eu | ek | eh;
1740 int exclude_GH = eG | eH;
1741
1742 if (mod.user) {
1743 if (!exclude)
1744 exclude = eu = ek = eh = 1;
1745 eu = 0;
1746 }
1747 if (mod.kernel) {
1748 if (!exclude)
1749 exclude = eu = ek = eh = 1;
1750 ek = 0;
1751 }
1752 if (mod.hypervisor) {
1753 if (!exclude)
1754 exclude = eu = ek = eh = 1;
1755 eh = 0;
1756 }
1757 if (mod.guest) {
1758 if (!exclude_GH)
1759 exclude_GH = eG = eH = 1;
1760 eG = 0;
1761 }
1762 if (mod.host) {
1763 if (!exclude_GH)
1764 exclude_GH = eG = eH = 1;
1765 eH = 0;
1766 }
1767 if (!exclude_GH && exclude_GH_default) {
1768 if (perf_host)
1769 eG = 1;
1770 else if (perf_guest)
1771 eH = 1;
1772 }
1773
1774 evsel->core.attr.exclude_user = eu;
1775 evsel->core.attr.exclude_kernel = ek;
1776 evsel->core.attr.exclude_hv = eh;
1777 evsel->core.attr.exclude_host = eH;
1778 evsel->core.attr.exclude_guest = eG;
1779 evsel->exclude_GH = exclude_GH;
1780
1781 /* Simple modifiers copied to the evsel. */
1782 if (mod.precise) {
1783 u8 precise = evsel->core.attr.precise_ip + mod.precise;
1784 /*
1785 * precise ip:
1786 *
1787 * 0 - SAMPLE_IP can have arbitrary skid
1788 * 1 - SAMPLE_IP must have constant skid
1789 * 2 - SAMPLE_IP requested to have 0 skid
1790 * 3 - SAMPLE_IP must have 0 skid
1791 *
1792 * See also PERF_RECORD_MISC_EXACT_IP
1793 */
1794 if (precise > 3) {
1795 char *help;
1796
1797 if (asprintf(&help,
1798 "Maximum combined precise value is 3, adding precision to \"%s\"",
1799 evsel__name(evsel)) > 0) {
1800 parse_events_error__handle(parse_state->error,
1801 loc->first_column,
1802 help, NULL);
1803 }
1804 return -EINVAL;
1805 }
1806 evsel->core.attr.precise_ip = precise;
1807 }
1808 if (mod.precise_max)
1809 evsel->precise_max = 1;
1810 if (mod.non_idle)
1811 evsel->core.attr.exclude_idle = 1;
1812 if (mod.sample_read)
1813 evsel->sample_read = 1;
1814 if (mod.pinned && evsel__is_group_leader(evsel))
1815 evsel->core.attr.pinned = 1;
1816 if (mod.exclusive && evsel__is_group_leader(evsel))
1817 evsel->core.attr.exclusive = 1;
1818 if (mod.weak)
1819 evsel->weak_group = true;
1820 if (mod.bpf)
1821 evsel->bpf_counter = true;
1822 if (mod.retire_lat)
1823 evsel->retire_lat = true;
1824 if (mod.dont_regroup)
1825 evsel->dont_regroup = true;
1826 }
1827 return 0;
1828}
1829
1830int parse_events__modifier_group(struct parse_events_state *parse_state, void *loc,
1831 struct list_head *list,
1832 struct parse_events_modifier mod)
1833{
1834 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/true);
1835}
1836
1837int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc,
1838 struct list_head *list,
1839 struct parse_events_modifier mod)
1840{
1841 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/false);
1842}
1843
1844int parse_events__set_default_name(struct list_head *list, char *name)
1845{
1846 struct evsel *evsel;
1847 bool used_name = false;
1848
1849 __evlist__for_each_entry(list, evsel) {
1850 if (!evsel->name) {
1851 evsel->name = used_name ? strdup(name) : name;
1852 used_name = true;
1853 if (!evsel->name)
1854 return -ENOMEM;
1855 }
1856 }
1857 if (!used_name)
1858 free(name);
1859 return 0;
1860}
1861
1862static int parse_events__scanner(const char *str,
1863 struct parse_events_state *parse_state)
1864{
1865 YY_BUFFER_STATE buffer;
1866 void *scanner;
1867 int ret;
1868
1869 ret = parse_events_lex_init_extra(parse_state, &scanner);
1870 if (ret)
1871 return ret;
1872
1873 buffer = parse_events__scan_string(str, scanner);
1874
1875#ifdef PARSER_DEBUG
1876 parse_events_debug = 1;
1877 parse_events_set_debug(1, scanner);
1878#endif
1879 ret = parse_events_parse(parse_state, scanner);
1880
1881 parse_events__flush_buffer(buffer, scanner);
1882 parse_events__delete_buffer(buffer, scanner);
1883 parse_events_lex_destroy(scanner);
1884 return ret;
1885}
1886
1887/*
1888 * parse event config string, return a list of event terms.
1889 */
1890int parse_events_terms(struct parse_events_terms *terms, const char *str)
1891{
1892 struct parse_events_state parse_state = {
1893 .terms = NULL,
1894 .stoken = PE_START_TERMS,
1895 };
1896 int ret;
1897
1898 ret = parse_events__scanner(str, &parse_state);
1899 if (!ret)
1900 list_splice(&parse_state.terms->terms, &terms->terms);
1901
1902 zfree(&parse_state.terms);
1903 return ret;
1904}
1905
1906static int evsel__compute_group_pmu_name(struct evsel *evsel,
1907 const struct list_head *head)
1908{
1909 struct evsel *leader = evsel__leader(evsel);
1910 struct evsel *pos;
1911 const char *group_pmu_name;
1912 struct perf_pmu *pmu = evsel__find_pmu(evsel);
1913
1914 if (!pmu) {
1915 /*
1916 * For PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE types the PMU
1917 * is a core PMU, but in heterogeneous systems this is
1918 * unknown. For now pick the first core PMU.
1919 */
1920 pmu = perf_pmus__scan_core(NULL);
1921 }
1922 if (!pmu) {
1923 pr_debug("No PMU found for '%s'\n", evsel__name(evsel));
1924 return -EINVAL;
1925 }
1926 group_pmu_name = pmu->name;
1927 /*
1928 * Software events may be in a group with other uncore PMU events. Use
1929 * the pmu_name of the first non-software event to avoid breaking the
1930 * software event out of the group.
1931 *
1932 * Aux event leaders, like intel_pt, expect a group with events from
1933 * other PMUs, so substitute the AUX event's PMU in this case.
1934 */
1935 if (perf_pmu__is_software(pmu) || evsel__is_aux_event(leader)) {
1936 struct perf_pmu *leader_pmu = evsel__find_pmu(leader);
1937
1938 if (!leader_pmu) {
1939 /* As with determining pmu above. */
1940 leader_pmu = perf_pmus__scan_core(NULL);
1941 }
1942 /*
1943 * Starting with the leader, find the first event with a named
1944 * non-software PMU. for_each_group_(member|evsel) isn't used as
1945 * the list isn't yet sorted putting evsel's in the same group
1946 * together.
1947 */
1948 if (leader_pmu && !perf_pmu__is_software(leader_pmu)) {
1949 group_pmu_name = leader_pmu->name;
1950 } else if (leader->core.nr_members > 1) {
1951 list_for_each_entry(pos, head, core.node) {
1952 struct perf_pmu *pos_pmu;
1953
1954 if (pos == leader || evsel__leader(pos) != leader)
1955 continue;
1956 pos_pmu = evsel__find_pmu(pos);
1957 if (!pos_pmu) {
1958 /* As with determining pmu above. */
1959 pos_pmu = perf_pmus__scan_core(NULL);
1960 }
1961 if (pos_pmu && !perf_pmu__is_software(pos_pmu)) {
1962 group_pmu_name = pos_pmu->name;
1963 break;
1964 }
1965 }
1966 }
1967 }
1968 /* Record computed name. */
1969 evsel->group_pmu_name = strdup(group_pmu_name);
1970 return evsel->group_pmu_name ? 0 : -ENOMEM;
1971}
1972
1973__weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs)
1974{
1975 /* Order by insertion index. */
1976 return lhs->core.idx - rhs->core.idx;
1977}
1978
1979static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r)
1980{
1981 const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node);
1982 const struct evsel *lhs = container_of(lhs_core, struct evsel, core);
1983 const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node);
1984 const struct evsel *rhs = container_of(rhs_core, struct evsel, core);
1985 int *force_grouped_idx = _fg_idx;
1986 int lhs_sort_idx, rhs_sort_idx, ret;
1987 const char *lhs_pmu_name, *rhs_pmu_name;
1988
1989 /*
1990 * Get the indexes of the 2 events to sort. If the events are
1991 * in groups then the leader's index is used otherwise the
1992 * event's index is used. An index may be forced for events that
1993 * must be in the same group, namely Intel topdown events.
1994 */
1995 if (lhs->dont_regroup) {
1996 lhs_sort_idx = lhs_core->idx;
1997 } else if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)) {
1998 lhs_sort_idx = *force_grouped_idx;
1999 } else {
2000 bool lhs_has_group = lhs_core->leader != lhs_core || lhs_core->nr_members > 1;
2001
2002 lhs_sort_idx = lhs_has_group ? lhs_core->leader->idx : lhs_core->idx;
2003 }
2004 if (rhs->dont_regroup) {
2005 rhs_sort_idx = rhs_core->idx;
2006 } else if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)) {
2007 rhs_sort_idx = *force_grouped_idx;
2008 } else {
2009 bool rhs_has_group = rhs_core->leader != rhs_core || rhs_core->nr_members > 1;
2010
2011 rhs_sort_idx = rhs_has_group ? rhs_core->leader->idx : rhs_core->idx;
2012 }
2013
2014 /* If the indices differ then respect the insertion order. */
2015 if (lhs_sort_idx != rhs_sort_idx)
2016 return lhs_sort_idx - rhs_sort_idx;
2017
2018 /*
2019 * Ignoring forcing, lhs_sort_idx == rhs_sort_idx so lhs and rhs should
2020 * be in the same group. Events in the same group need to be ordered by
2021 * their grouping PMU name as the group will be broken to ensure only
2022 * events on the same PMU are programmed together.
2023 *
2024 * With forcing the lhs_sort_idx == rhs_sort_idx shows that one or both
2025 * events are being forced to be at force_group_index. If only one event
2026 * is being forced then the other event is the group leader of the group
2027 * we're trying to force the event into. Ensure for the force grouped
2028 * case that the PMU name ordering is also respected.
2029 */
2030 lhs_pmu_name = lhs->group_pmu_name;
2031 rhs_pmu_name = rhs->group_pmu_name;
2032 ret = strcmp(lhs_pmu_name, rhs_pmu_name);
2033 if (ret)
2034 return ret;
2035
2036 /*
2037 * Architecture specific sorting, by default sort events in the same
2038 * group with the same PMU by their insertion index. On Intel topdown
2039 * constraints must be adhered to - slots first, etc.
2040 */
2041 return arch_evlist__cmp(lhs, rhs);
2042}
2043
2044int __weak arch_evlist__add_required_events(struct list_head *list __always_unused)
2045{
2046 return 0;
2047}
2048
2049static int parse_events__sort_events_and_fix_groups(struct list_head *list)
2050{
2051 int idx = 0, force_grouped_idx = -1;
2052 struct evsel *pos, *cur_leader = NULL;
2053 struct perf_evsel *cur_leaders_grp = NULL;
2054 bool idx_changed = false;
2055 int orig_num_leaders = 0, num_leaders = 0;
2056 int ret;
2057 struct evsel *force_grouped_leader = NULL;
2058 bool last_event_was_forced_leader = false;
2059
2060 /* On x86 topdown metrics events require a slots event. */
2061 ret = arch_evlist__add_required_events(list);
2062 if (ret)
2063 return ret;
2064
2065 /*
2066 * Compute index to insert ungrouped events at. Place them where the
2067 * first ungrouped event appears.
2068 */
2069 list_for_each_entry(pos, list, core.node) {
2070 const struct evsel *pos_leader = evsel__leader(pos);
2071
2072 ret = evsel__compute_group_pmu_name(pos, list);
2073 if (ret)
2074 return ret;
2075
2076 if (pos == pos_leader)
2077 orig_num_leaders++;
2078
2079 /*
2080 * Ensure indexes are sequential, in particular for multiple
2081 * event lists being merged. The indexes are used to detect when
2082 * the user order is modified.
2083 */
2084 pos->core.idx = idx++;
2085
2086 /*
2087 * Remember an index to sort all forced grouped events
2088 * together to. Use the group leader as some events
2089 * must appear first within the group.
2090 */
2091 if (force_grouped_idx == -1 && arch_evsel__must_be_in_group(pos))
2092 force_grouped_idx = pos_leader->core.idx;
2093 }
2094
2095 /* Sort events. */
2096 list_sort(&force_grouped_idx, list, evlist__cmp);
2097
2098 /*
2099 * Recompute groups, splitting for PMUs and adding groups for events
2100 * that require them.
2101 */
2102 idx = 0;
2103 list_for_each_entry(pos, list, core.node) {
2104 struct evsel *pos_leader = evsel__leader(pos);
2105 const char *pos_pmu_name = pos->group_pmu_name;
2106 const char *cur_leader_pmu_name;
2107 bool pos_force_grouped = force_grouped_idx != -1 && !pos->dont_regroup &&
2108 arch_evsel__must_be_in_group(pos);
2109
2110 /* Reset index and nr_members. */
2111 if (pos->core.idx != idx)
2112 idx_changed = true;
2113 pos->core.idx = idx++;
2114 pos->core.nr_members = 0;
2115
2116 /*
2117 * Set the group leader respecting the given groupings and that
2118 * groups can't span PMUs.
2119 */
2120 if (!cur_leader || pos->dont_regroup) {
2121 cur_leader = pos->dont_regroup ? pos_leader : pos;
2122 cur_leaders_grp = &cur_leader->core;
2123 if (pos_force_grouped)
2124 force_grouped_leader = pos;
2125 }
2126 cur_leader_pmu_name = cur_leader->group_pmu_name;
2127 if (strcmp(cur_leader_pmu_name, pos_pmu_name)) {
2128 /* PMU changed so the group/leader must change. */
2129 cur_leader = pos;
2130 cur_leaders_grp = pos->core.leader;
2131 if (pos_force_grouped && force_grouped_leader == NULL)
2132 force_grouped_leader = pos;
2133 } else if (cur_leaders_grp != pos->core.leader) {
2134 bool split_even_if_last_leader_was_forced = true;
2135
2136 /*
2137 * Event is for a different group. If the last event was
2138 * the forced group leader then subsequent group events
2139 * and forced events should be in the same group. If
2140 * there are no other forced group events then the
2141 * forced group leader wasn't really being forced into a
2142 * group, it just set arch_evsel__must_be_in_group, and
2143 * we don't want the group to split here.
2144 */
2145 if (force_grouped_idx != -1 && last_event_was_forced_leader) {
2146 struct evsel *pos2 = pos;
2147 /*
2148 * Search the whole list as the group leaders
2149 * aren't currently valid.
2150 */
2151 list_for_each_entry_continue(pos2, list, core.node) {
2152 if (pos->core.leader == pos2->core.leader &&
2153 arch_evsel__must_be_in_group(pos2)) {
2154 split_even_if_last_leader_was_forced = false;
2155 break;
2156 }
2157 }
2158 }
2159 if (!last_event_was_forced_leader || split_even_if_last_leader_was_forced) {
2160 if (pos_force_grouped) {
2161 if (force_grouped_leader) {
2162 cur_leader = force_grouped_leader;
2163 cur_leaders_grp = force_grouped_leader->core.leader;
2164 } else {
2165 cur_leader = force_grouped_leader = pos;
2166 cur_leaders_grp = &pos->core;
2167 }
2168 } else {
2169 cur_leader = pos;
2170 cur_leaders_grp = pos->core.leader;
2171 }
2172 }
2173 }
2174 if (pos_leader != cur_leader) {
2175 /* The leader changed so update it. */
2176 evsel__set_leader(pos, cur_leader);
2177 }
2178 last_event_was_forced_leader = (force_grouped_leader == pos);
2179 }
2180 list_for_each_entry(pos, list, core.node) {
2181 struct evsel *pos_leader = evsel__leader(pos);
2182
2183 if (pos == pos_leader)
2184 num_leaders++;
2185 pos_leader->core.nr_members++;
2186 }
2187 return (idx_changed || num_leaders != orig_num_leaders) ? 1 : 0;
2188}
2189
2190int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter,
2191 struct parse_events_error *err, bool fake_pmu,
2192 bool warn_if_reordered, bool fake_tp)
2193{
2194 struct parse_events_state parse_state = {
2195 .list = LIST_HEAD_INIT(parse_state.list),
2196 .idx = evlist->core.nr_entries,
2197 .error = err,
2198 .stoken = PE_START_EVENTS,
2199 .fake_pmu = fake_pmu,
2200 .fake_tp = fake_tp,
2201 .pmu_filter = pmu_filter,
2202 .match_legacy_cache_terms = true,
2203 };
2204 int ret, ret2;
2205
2206 ret = parse_events__scanner(str, &parse_state);
2207
2208 if (!ret && list_empty(&parse_state.list)) {
2209 WARN_ONCE(true, "WARNING: event parser found nothing\n");
2210 return -1;
2211 }
2212
2213 ret2 = parse_events__sort_events_and_fix_groups(&parse_state.list);
2214 if (ret2 < 0)
2215 return ret;
2216
2217 /*
2218 * Add list to the evlist even with errors to allow callers to clean up.
2219 */
2220 evlist__splice_list_tail(evlist, &parse_state.list);
2221
2222 if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus) {
2223 pr_warning("WARNING: events were regrouped to match PMUs\n");
2224
2225 if (verbose > 0) {
2226 struct strbuf sb = STRBUF_INIT;
2227
2228 evlist__uniquify_evsel_names(evlist, &stat_config);
2229 evlist__format_evsels(evlist, &sb, 2048);
2230 pr_debug("evlist after sorting/fixing: '%s'\n", sb.buf);
2231 strbuf_release(&sb);
2232 }
2233 }
2234 if (!ret) {
2235 struct evsel *last;
2236
2237 last = evlist__last(evlist);
2238 last->cmdline_group_boundary = true;
2239
2240 return 0;
2241 }
2242
2243 /*
2244 * There are 2 users - builtin-record and builtin-test objects.
2245 * Both call evlist__delete in case of error, so we dont
2246 * need to bother.
2247 */
2248 return ret;
2249}
2250
2251int parse_event(struct evlist *evlist, const char *str)
2252{
2253 struct parse_events_error err;
2254 int ret;
2255
2256 parse_events_error__init(&err);
2257 ret = parse_events(evlist, str, &err);
2258 if (ret && verbose > 0)
2259 parse_events_error__print(&err, str);
2260 parse_events_error__exit(&err);
2261 return ret;
2262}
2263
2264struct parse_events_error_entry {
2265 /** @list: The list the error is part of. */
2266 struct list_head list;
2267 /** @idx: index in the parsed string */
2268 int idx;
2269 /** @str: string to display at the index */
2270 char *str;
2271 /** @help: optional help string */
2272 char *help;
2273};
2274
2275void parse_events_error__init(struct parse_events_error *err)
2276{
2277 INIT_LIST_HEAD(&err->list);
2278}
2279
2280void parse_events_error__exit(struct parse_events_error *err)
2281{
2282 struct parse_events_error_entry *pos, *tmp;
2283
2284 list_for_each_entry_safe(pos, tmp, &err->list, list) {
2285 zfree(&pos->str);
2286 zfree(&pos->help);
2287 list_del_init(&pos->list);
2288 free(pos);
2289 }
2290}
2291
2292void parse_events_error__handle(struct parse_events_error *err, int idx,
2293 char *str, char *help)
2294{
2295 struct parse_events_error_entry *entry;
2296
2297 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n"))
2298 goto out_free;
2299
2300 entry = zalloc(sizeof(*entry));
2301 if (!entry) {
2302 pr_err("Failed to allocate memory for event parsing error: %s (%s)\n",
2303 str, help ?: "<no help>");
2304 goto out_free;
2305 }
2306 entry->idx = idx;
2307 entry->str = str;
2308 entry->help = help;
2309 list_add(&entry->list, &err->list);
2310 return;
2311out_free:
2312 free(str);
2313 free(help);
2314}
2315
2316#define MAX_WIDTH 1000
2317static int get_term_width(void)
2318{
2319 struct winsize ws;
2320
2321 get_term_dimensions(&ws);
2322 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col;
2323}
2324
2325static void __parse_events_error__print(int err_idx, const char *err_str,
2326 const char *err_help, const char *event)
2327{
2328 const char *str = "invalid or unsupported event: ";
2329 char _buf[MAX_WIDTH];
2330 char *buf = (char *) event;
2331 int idx = 0;
2332 if (err_str) {
2333 /* -2 for extra '' in the final fprintf */
2334 int width = get_term_width() - 2;
2335 int len_event = strlen(event);
2336 int len_str, max_len, cut = 0;
2337
2338 /*
2339 * Maximum error index indent, we will cut
2340 * the event string if it's bigger.
2341 */
2342 int max_err_idx = 13;
2343
2344 /*
2345 * Let's be specific with the message when
2346 * we have the precise error.
2347 */
2348 str = "event syntax error: ";
2349 len_str = strlen(str);
2350 max_len = width - len_str;
2351
2352 buf = _buf;
2353
2354 /* We're cutting from the beginning. */
2355 if (err_idx > max_err_idx)
2356 cut = err_idx - max_err_idx;
2357
2358 strncpy(buf, event + cut, max_len);
2359
2360 /* Mark cut parts with '..' on both sides. */
2361 if (cut)
2362 buf[0] = buf[1] = '.';
2363
2364 if ((len_event - cut) > max_len) {
2365 buf[max_len - 1] = buf[max_len - 2] = '.';
2366 buf[max_len] = 0;
2367 }
2368
2369 idx = len_str + err_idx - cut;
2370 }
2371
2372 fprintf(stderr, "%s'%s'\n", str, buf);
2373 if (idx) {
2374 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str);
2375 if (err_help)
2376 fprintf(stderr, "\n%s\n", err_help);
2377 }
2378}
2379
2380void parse_events_error__print(const struct parse_events_error *err,
2381 const char *event)
2382{
2383 struct parse_events_error_entry *pos;
2384 bool first = true;
2385
2386 list_for_each_entry(pos, &err->list, list) {
2387 if (!first)
2388 fputs("\n", stderr);
2389 __parse_events_error__print(pos->idx, pos->str, pos->help, event);
2390 first = false;
2391 }
2392}
2393
2394/*
2395 * In the list of errors err, do any of the error strings (str) contain the
2396 * given needle string?
2397 */
2398bool parse_events_error__contains(const struct parse_events_error *err,
2399 const char *needle)
2400{
2401 struct parse_events_error_entry *pos;
2402
2403 list_for_each_entry(pos, &err->list, list) {
2404 if (strstr(pos->str, needle) != NULL)
2405 return true;
2406 }
2407 return false;
2408}
2409
2410#undef MAX_WIDTH
2411
2412int parse_events_option(const struct option *opt, const char *str,
2413 int unset __maybe_unused)
2414{
2415 struct parse_events_option_args *args = opt->value;
2416 struct parse_events_error err;
2417 int ret;
2418
2419 parse_events_error__init(&err);
2420 ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err,
2421 /*fake_pmu=*/false, /*warn_if_reordered=*/true,
2422 /*fake_tp=*/false);
2423
2424 if (ret) {
2425 parse_events_error__print(&err, str);
2426 fprintf(stderr, "Run 'perf list' for a list of valid events\n");
2427 }
2428 parse_events_error__exit(&err);
2429
2430 return ret;
2431}
2432
2433int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset)
2434{
2435 struct parse_events_option_args *args = opt->value;
2436 int ret;
2437
2438 if (*args->evlistp == NULL) {
2439 *args->evlistp = evlist__new();
2440
2441 if (*args->evlistp == NULL) {
2442 fprintf(stderr, "Not enough memory to create evlist\n");
2443 return -1;
2444 }
2445 }
2446 ret = parse_events_option(opt, str, unset);
2447 if (ret) {
2448 evlist__delete(*args->evlistp);
2449 *args->evlistp = NULL;
2450 }
2451
2452 return ret;
2453}
2454
2455static int
2456foreach_evsel_in_last_glob(struct evlist *evlist,
2457 int (*func)(struct evsel *evsel,
2458 const void *arg),
2459 const void *arg)
2460{
2461 struct evsel *last = NULL;
2462 int err;
2463
2464 /*
2465 * Don't return when list_empty, give func a chance to report
2466 * error when it found last == NULL.
2467 *
2468 * So no need to WARN here, let *func do this.
2469 */
2470 if (evlist->core.nr_entries > 0)
2471 last = evlist__last(evlist);
2472
2473 do {
2474 err = (*func)(last, arg);
2475 if (err)
2476 return -1;
2477 if (!last)
2478 return 0;
2479
2480 if (last->core.node.prev == &evlist->core.entries)
2481 return 0;
2482 last = list_entry(last->core.node.prev, struct evsel, core.node);
2483 } while (!last->cmdline_group_boundary);
2484
2485 return 0;
2486}
2487
2488/* Will a tracepoint filter work for str or should a BPF filter be used? */
2489static bool is_possible_tp_filter(const char *str)
2490{
2491 return strstr(str, "uid") == NULL;
2492}
2493
2494static int set_filter(struct evsel *evsel, const void *arg)
2495{
2496 const char *str = arg;
2497 int nr_addr_filters = 0;
2498 struct perf_pmu *pmu;
2499
2500 if (evsel == NULL) {
2501 fprintf(stderr,
2502 "--filter option should follow a -e tracepoint or HW tracer option\n");
2503 return -1;
2504 }
2505
2506 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && is_possible_tp_filter(str)) {
2507 if (evsel__append_tp_filter(evsel, str) < 0) {
2508 fprintf(stderr,
2509 "not enough memory to hold filter string\n");
2510 return -1;
2511 }
2512
2513 return 0;
2514 }
2515
2516 pmu = evsel__find_pmu(evsel);
2517 if (pmu) {
2518 perf_pmu__scan_file(pmu, "nr_addr_filters",
2519 "%d", &nr_addr_filters);
2520 }
2521 if (!nr_addr_filters)
2522 return perf_bpf_filter__parse(&evsel->bpf_filters, str);
2523
2524 if (evsel__append_addr_filter(evsel, str) < 0) {
2525 fprintf(stderr,
2526 "not enough memory to hold filter string\n");
2527 return -1;
2528 }
2529
2530 return 0;
2531}
2532
2533int parse_filter(const struct option *opt, const char *str,
2534 int unset __maybe_unused)
2535{
2536 struct evlist *evlist = *(struct evlist **)opt->value;
2537
2538 return foreach_evsel_in_last_glob(evlist, set_filter,
2539 (const void *)str);
2540}
2541
2542int parse_uid_filter(struct evlist *evlist, uid_t uid)
2543{
2544 struct option opt = {
2545 .value = &evlist,
2546 };
2547 char buf[128];
2548 int ret;
2549
2550 snprintf(buf, sizeof(buf), "uid == %d", uid);
2551 ret = parse_filter(&opt, buf, /*unset=*/0);
2552 if (ret) {
2553 if (use_browser >= 1) {
2554 /*
2555 * Use ui__warning so a pop up appears above the
2556 * underlying BPF error message.
2557 */
2558 ui__warning("Failed to add UID filtering that uses BPF filtering.\n");
2559 } else {
2560 fprintf(stderr, "Failed to add UID filtering that uses BPF filtering.\n");
2561 }
2562 }
2563 return ret;
2564}
2565
2566static int add_exclude_perf_filter(struct evsel *evsel,
2567 const void *arg __maybe_unused)
2568{
2569 char new_filter[64];
2570
2571 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2572 fprintf(stderr,
2573 "--exclude-perf option should follow a -e tracepoint option\n");
2574 return -1;
2575 }
2576
2577 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid());
2578
2579 if (evsel__append_tp_filter(evsel, new_filter) < 0) {
2580 fprintf(stderr,
2581 "not enough memory to hold filter string\n");
2582 return -1;
2583 }
2584
2585 return 0;
2586}
2587
2588int exclude_perf(const struct option *opt,
2589 const char *arg __maybe_unused,
2590 int unset __maybe_unused)
2591{
2592 struct evlist *evlist = *(struct evlist **)opt->value;
2593
2594 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter,
2595 NULL);
2596}
2597
2598int parse_events__is_hardcoded_term(struct parse_events_term *term)
2599{
2600 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
2601}
2602
2603static int new_term(struct parse_events_term **_term,
2604 struct parse_events_term *temp,
2605 char *str, u64 num)
2606{
2607 struct parse_events_term *term;
2608
2609 term = malloc(sizeof(*term));
2610 if (!term)
2611 return -ENOMEM;
2612
2613 *term = *temp;
2614 INIT_LIST_HEAD(&term->list);
2615 term->weak = false;
2616
2617 switch (term->type_val) {
2618 case PARSE_EVENTS__TERM_TYPE_NUM:
2619 term->val.num = num;
2620 break;
2621 case PARSE_EVENTS__TERM_TYPE_STR:
2622 term->val.str = str;
2623 break;
2624 default:
2625 free(term);
2626 return -EINVAL;
2627 }
2628
2629 *_term = term;
2630 return 0;
2631}
2632
2633int parse_events_term__num(struct parse_events_term **term,
2634 enum parse_events__term_type type_term,
2635 const char *config, u64 num,
2636 bool no_value,
2637 void *loc_term_, void *loc_val_)
2638{
2639 YYLTYPE *loc_term = loc_term_;
2640 YYLTYPE *loc_val = loc_val_;
2641
2642 struct parse_events_term temp = {
2643 .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
2644 .type_term = type_term,
2645 .config = config ? : strdup(parse_events__term_type_str(type_term)),
2646 .no_value = no_value,
2647 .err_term = loc_term ? loc_term->first_column : 0,
2648 .err_val = loc_val ? loc_val->first_column : 0,
2649 };
2650
2651 return new_term(term, &temp, /*str=*/NULL, num);
2652}
2653
2654int parse_events_term__str(struct parse_events_term **term,
2655 enum parse_events__term_type type_term,
2656 char *config, char *str,
2657 void *loc_term_, void *loc_val_)
2658{
2659 YYLTYPE *loc_term = loc_term_;
2660 YYLTYPE *loc_val = loc_val_;
2661
2662 struct parse_events_term temp = {
2663 .type_val = PARSE_EVENTS__TERM_TYPE_STR,
2664 .type_term = type_term,
2665 .config = config,
2666 .err_term = loc_term ? loc_term->first_column : 0,
2667 .err_val = loc_val ? loc_val->first_column : 0,
2668 };
2669
2670 return new_term(term, &temp, str, /*num=*/0);
2671}
2672
2673int parse_events_term__term(struct parse_events_term **term,
2674 enum parse_events__term_type term_lhs,
2675 enum parse_events__term_type term_rhs,
2676 void *loc_term, void *loc_val)
2677{
2678 return parse_events_term__str(term, term_lhs, NULL,
2679 strdup(parse_events__term_type_str(term_rhs)),
2680 loc_term, loc_val);
2681}
2682
2683int parse_events_term__clone(struct parse_events_term **new,
2684 const struct parse_events_term *term)
2685{
2686 char *str;
2687 struct parse_events_term temp = *term;
2688
2689 temp.used = false;
2690 if (term->config) {
2691 temp.config = strdup(term->config);
2692 if (!temp.config)
2693 return -ENOMEM;
2694 }
2695 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
2696 return new_term(new, &temp, /*str=*/NULL, term->val.num);
2697
2698 str = strdup(term->val.str);
2699 if (!str) {
2700 zfree(&temp.config);
2701 return -ENOMEM;
2702 }
2703 return new_term(new, &temp, str, /*num=*/0);
2704}
2705
2706void parse_events_term__delete(struct parse_events_term *term)
2707{
2708 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
2709 zfree(&term->val.str);
2710
2711 zfree(&term->config);
2712 free(term);
2713}
2714
2715static int parse_events_terms__copy(const struct parse_events_terms *src,
2716 struct parse_events_terms *dest)
2717{
2718 struct parse_events_term *term;
2719
2720 list_for_each_entry (term, &src->terms, list) {
2721 struct parse_events_term *n;
2722 int ret;
2723
2724 ret = parse_events_term__clone(&n, term);
2725 if (ret)
2726 return ret;
2727
2728 list_add_tail(&n->list, &dest->terms);
2729 }
2730 return 0;
2731}
2732
2733void parse_events_terms__init(struct parse_events_terms *terms)
2734{
2735 INIT_LIST_HEAD(&terms->terms);
2736}
2737
2738void parse_events_terms__exit(struct parse_events_terms *terms)
2739{
2740 struct parse_events_term *term, *h;
2741
2742 list_for_each_entry_safe(term, h, &terms->terms, list) {
2743 list_del_init(&term->list);
2744 parse_events_term__delete(term);
2745 }
2746}
2747
2748void parse_events_terms__delete(struct parse_events_terms *terms)
2749{
2750 if (!terms)
2751 return;
2752 parse_events_terms__exit(terms);
2753 free(terms);
2754}
2755
2756static int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb)
2757{
2758 struct parse_events_term *term;
2759 bool first = true;
2760
2761 if (!terms)
2762 return 0;
2763
2764 list_for_each_entry(term, &terms->terms, list) {
2765 int ret;
2766
2767 if (!first) {
2768 ret = strbuf_addch(sb, ',');
2769 if (ret < 0)
2770 return ret;
2771 }
2772 first = false;
2773
2774 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
2775 if (term->no_value) {
2776 assert(term->val.num == 1);
2777 ret = strbuf_addf(sb, "%s", term->config);
2778 } else
2779 ret = strbuf_addf(sb, "%s=%#"PRIx64, term->config, term->val.num);
2780 else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
2781 if (term->config) {
2782 ret = strbuf_addf(sb, "%s=", term->config);
2783 if (ret < 0)
2784 return ret;
2785 } else if ((unsigned int)term->type_term < __PARSE_EVENTS__TERM_TYPE_NR) {
2786 ret = strbuf_addf(sb, "%s=",
2787 parse_events__term_type_str(term->type_term));
2788 if (ret < 0)
2789 return ret;
2790 }
2791 assert(!term->no_value);
2792 ret = strbuf_addf(sb, "%s", term->val.str);
2793 }
2794 if (ret < 0)
2795 return ret;
2796 }
2797 return 0;
2798}
2799
2800static void config_terms_list(char *buf, size_t buf_sz)
2801{
2802 int i;
2803 bool first = true;
2804
2805 buf[0] = '\0';
2806 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) {
2807 const char *name = parse_events__term_type_str(i);
2808
2809 if (!config_term_avail(i, NULL))
2810 continue;
2811 if (!name)
2812 continue;
2813 if (name[0] == '<')
2814 continue;
2815
2816 if (strlen(buf) + strlen(name) + 2 >= buf_sz)
2817 return;
2818
2819 if (!first)
2820 strcat(buf, ",");
2821 else
2822 first = false;
2823 strcat(buf, name);
2824 }
2825}
2826
2827/*
2828 * Return string contains valid config terms of an event.
2829 * @additional_terms: For terms such as PMU sysfs terms.
2830 */
2831char *parse_events_formats_error_string(char *additional_terms)
2832{
2833 char *str;
2834 /* "no-overwrite" is the longest name */
2835 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR *
2836 (sizeof("no-overwrite") - 1)];
2837
2838 config_terms_list(static_terms, sizeof(static_terms));
2839 /* valid terms */
2840 if (additional_terms) {
2841 if (asprintf(&str, "valid terms: %s,%s",
2842 additional_terms, static_terms) < 0)
2843 goto fail;
2844 } else {
2845 if (asprintf(&str, "valid terms: %s", static_terms) < 0)
2846 goto fail;
2847 }
2848 return str;
2849
2850fail:
2851 return NULL;
2852}