Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/hw_breakpoint.h>
3#include <linux/err.h>
4#include <linux/list_sort.h>
5#include <linux/zalloc.h>
6#include <dirent.h>
7#include <errno.h>
8#include <sys/ioctl.h>
9#include <sys/param.h>
10#include "cpumap.h"
11#include "term.h"
12#include "env.h"
13#include "evlist.h"
14#include "evsel.h"
15#include <subcmd/parse-options.h>
16#include "parse-events.h"
17#include "string2.h"
18#include "strbuf.h"
19#include "debug.h"
20#include <api/fs/tracing_path.h>
21#include <api/io_dir.h>
22#include <perf/cpumap.h>
23#include <util/parse-events-bison.h>
24#include <util/parse-events-flex.h>
25#include "pmu.h"
26#include "pmus.h"
27#include "asm/bug.h"
28#include "util/parse-branch-options.h"
29#include "util/evsel_config.h"
30#include "util/event.h"
31#include "util/bpf-filter.h"
32#include "util/stat.h"
33#include "util/util.h"
34#include "tracepoint.h"
35
36#define MAX_NAME_LEN 100
37
38static int get_config_terms(const struct parse_events_terms *head_config,
39 struct list_head *head_terms);
40static int parse_events_terms__copy(const struct parse_events_terms *src,
41 struct parse_events_terms *dest);
42
43const struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
44 [PERF_COUNT_HW_CPU_CYCLES] = {
45 .symbol = "cpu-cycles",
46 .alias = "cycles",
47 },
48 [PERF_COUNT_HW_INSTRUCTIONS] = {
49 .symbol = "instructions",
50 .alias = "",
51 },
52 [PERF_COUNT_HW_CACHE_REFERENCES] = {
53 .symbol = "cache-references",
54 .alias = "",
55 },
56 [PERF_COUNT_HW_CACHE_MISSES] = {
57 .symbol = "cache-misses",
58 .alias = "",
59 },
60 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {
61 .symbol = "branch-instructions",
62 .alias = "branches",
63 },
64 [PERF_COUNT_HW_BRANCH_MISSES] = {
65 .symbol = "branch-misses",
66 .alias = "",
67 },
68 [PERF_COUNT_HW_BUS_CYCLES] = {
69 .symbol = "bus-cycles",
70 .alias = "",
71 },
72 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {
73 .symbol = "stalled-cycles-frontend",
74 .alias = "idle-cycles-frontend",
75 },
76 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {
77 .symbol = "stalled-cycles-backend",
78 .alias = "idle-cycles-backend",
79 },
80 [PERF_COUNT_HW_REF_CPU_CYCLES] = {
81 .symbol = "ref-cycles",
82 .alias = "",
83 },
84};
85
86const struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
87 [PERF_COUNT_SW_CPU_CLOCK] = {
88 .symbol = "cpu-clock",
89 .alias = "",
90 },
91 [PERF_COUNT_SW_TASK_CLOCK] = {
92 .symbol = "task-clock",
93 .alias = "",
94 },
95 [PERF_COUNT_SW_PAGE_FAULTS] = {
96 .symbol = "page-faults",
97 .alias = "faults",
98 },
99 [PERF_COUNT_SW_CONTEXT_SWITCHES] = {
100 .symbol = "context-switches",
101 .alias = "cs",
102 },
103 [PERF_COUNT_SW_CPU_MIGRATIONS] = {
104 .symbol = "cpu-migrations",
105 .alias = "migrations",
106 },
107 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = {
108 .symbol = "minor-faults",
109 .alias = "",
110 },
111 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = {
112 .symbol = "major-faults",
113 .alias = "",
114 },
115 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = {
116 .symbol = "alignment-faults",
117 .alias = "",
118 },
119 [PERF_COUNT_SW_EMULATION_FAULTS] = {
120 .symbol = "emulation-faults",
121 .alias = "",
122 },
123 [PERF_COUNT_SW_DUMMY] = {
124 .symbol = "dummy",
125 .alias = "",
126 },
127 [PERF_COUNT_SW_BPF_OUTPUT] = {
128 .symbol = "bpf-output",
129 .alias = "",
130 },
131 [PERF_COUNT_SW_CGROUP_SWITCHES] = {
132 .symbol = "cgroup-switches",
133 .alias = "",
134 },
135};
136
137const char *event_type(int type)
138{
139 switch (type) {
140 case PERF_TYPE_HARDWARE:
141 return "hardware";
142
143 case PERF_TYPE_SOFTWARE:
144 return "software";
145
146 case PERF_TYPE_TRACEPOINT:
147 return "tracepoint";
148
149 case PERF_TYPE_HW_CACHE:
150 return "hardware-cache";
151
152 default:
153 break;
154 }
155
156 return "unknown";
157}
158
159static char *get_config_str(const struct parse_events_terms *head_terms,
160 enum parse_events__term_type type_term)
161{
162 struct parse_events_term *term;
163
164 if (!head_terms)
165 return NULL;
166
167 list_for_each_entry(term, &head_terms->terms, list)
168 if (term->type_term == type_term)
169 return term->val.str;
170
171 return NULL;
172}
173
174static char *get_config_metric_id(const struct parse_events_terms *head_terms)
175{
176 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID);
177}
178
179static char *get_config_name(const struct parse_events_terms *head_terms)
180{
181 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME);
182}
183
184static struct perf_cpu_map *get_config_cpu(const struct parse_events_terms *head_terms)
185{
186 struct parse_events_term *term;
187 struct perf_cpu_map *cpus = NULL;
188
189 if (!head_terms)
190 return NULL;
191
192 list_for_each_entry(term, &head_terms->terms, list) {
193 if (term->type_term == PARSE_EVENTS__TERM_TYPE_CPU) {
194 struct perf_cpu_map *cpu = perf_cpu_map__new_int(term->val.num);
195
196 perf_cpu_map__merge(&cpus, cpu);
197 perf_cpu_map__put(cpu);
198 }
199 }
200
201 return cpus;
202}
203
204/**
205 * fix_raw - For each raw term see if there is an event (aka alias) in pmu that
206 * matches the raw's string value. If the string value matches an
207 * event then change the term to be an event, if not then change it to
208 * be a config term. For example, "read" may be an event of the PMU or
209 * a raw hex encoding of 0xead. The fix-up is done late so the PMU of
210 * the event can be determined and we don't need to scan all PMUs
211 * ahead-of-time.
212 * @config_terms: the list of terms that may contain a raw term.
213 * @pmu: the PMU to scan for events from.
214 */
215static void fix_raw(struct parse_events_terms *config_terms, struct perf_pmu *pmu)
216{
217 struct parse_events_term *term;
218
219 list_for_each_entry(term, &config_terms->terms, list) {
220 u64 num;
221
222 if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW)
223 continue;
224
225 if (perf_pmu__have_event(pmu, term->val.str)) {
226 zfree(&term->config);
227 term->config = term->val.str;
228 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
229 term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
230 term->val.num = 1;
231 term->no_value = true;
232 continue;
233 }
234
235 zfree(&term->config);
236 term->config = strdup("config");
237 errno = 0;
238 num = strtoull(term->val.str + 1, NULL, 16);
239 assert(errno == 0);
240 free(term->val.str);
241 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
242 term->type_term = PARSE_EVENTS__TERM_TYPE_CONFIG;
243 term->val.num = num;
244 term->no_value = false;
245 }
246}
247
248static struct evsel *
249__add_event(struct list_head *list, int *idx,
250 struct perf_event_attr *attr,
251 bool init_attr,
252 const char *name, const char *metric_id, struct perf_pmu *pmu,
253 struct list_head *config_terms, struct evsel *first_wildcard_match,
254 struct perf_cpu_map *cpu_list, u64 alternate_hw_config)
255{
256 struct evsel *evsel;
257 bool is_pmu_core;
258 struct perf_cpu_map *cpus;
259
260 /*
261 * Ensure the first_wildcard_match's PMU matches that of the new event
262 * being added. Otherwise try to match with another event further down
263 * the evlist.
264 */
265 if (first_wildcard_match) {
266 struct evsel *pos = list_prev_entry(first_wildcard_match, core.node);
267
268 first_wildcard_match = NULL;
269 list_for_each_entry_continue(pos, list, core.node) {
270 if (perf_pmu__name_no_suffix_match(pos->pmu, pmu->name)) {
271 first_wildcard_match = pos;
272 break;
273 }
274 if (pos->pmu->is_core && (!pmu || pmu->is_core)) {
275 first_wildcard_match = pos;
276 break;
277 }
278 }
279 }
280
281 if (pmu) {
282 is_pmu_core = pmu->is_core;
283 cpus = perf_cpu_map__get(perf_cpu_map__is_empty(cpu_list) ? pmu->cpus : cpu_list);
284 perf_pmu__warn_invalid_formats(pmu);
285 if (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX) {
286 perf_pmu__warn_invalid_config(pmu, attr->config, name,
287 PERF_PMU_FORMAT_VALUE_CONFIG, "config");
288 perf_pmu__warn_invalid_config(pmu, attr->config1, name,
289 PERF_PMU_FORMAT_VALUE_CONFIG1, "config1");
290 perf_pmu__warn_invalid_config(pmu, attr->config2, name,
291 PERF_PMU_FORMAT_VALUE_CONFIG2, "config2");
292 perf_pmu__warn_invalid_config(pmu, attr->config3, name,
293 PERF_PMU_FORMAT_VALUE_CONFIG3, "config3");
294 }
295 } else {
296 is_pmu_core = (attr->type == PERF_TYPE_HARDWARE ||
297 attr->type == PERF_TYPE_HW_CACHE);
298 if (perf_cpu_map__is_empty(cpu_list))
299 cpus = is_pmu_core ? perf_cpu_map__new_online_cpus() : NULL;
300 else
301 cpus = perf_cpu_map__get(cpu_list);
302 }
303 if (init_attr)
304 event_attr_init(attr);
305
306 evsel = evsel__new_idx(attr, *idx);
307 if (!evsel) {
308 perf_cpu_map__put(cpus);
309 return NULL;
310 }
311
312 (*idx)++;
313 evsel->core.cpus = cpus;
314 evsel->core.own_cpus = perf_cpu_map__get(cpus);
315 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false;
316 evsel->core.is_pmu_core = is_pmu_core;
317 evsel->pmu = pmu;
318 evsel->alternate_hw_config = alternate_hw_config;
319 evsel->first_wildcard_match = first_wildcard_match;
320
321 if (name)
322 evsel->name = strdup(name);
323
324 if (metric_id)
325 evsel->metric_id = strdup(metric_id);
326
327 if (config_terms)
328 list_splice_init(config_terms, &evsel->config_terms);
329
330 if (list)
331 list_add_tail(&evsel->core.node, list);
332
333 return evsel;
334}
335
336struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
337 const char *name, const char *metric_id,
338 struct perf_pmu *pmu)
339{
340 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name,
341 metric_id, pmu, /*config_terms=*/NULL,
342 /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL,
343 /*alternate_hw_config=*/PERF_COUNT_HW_MAX);
344}
345
346static int add_event(struct list_head *list, int *idx,
347 struct perf_event_attr *attr, const char *name,
348 const char *metric_id, struct list_head *config_terms,
349 u64 alternate_hw_config)
350{
351 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id,
352 /*pmu=*/NULL, config_terms,
353 /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL,
354 alternate_hw_config) ? 0 : -ENOMEM;
355}
356
357/**
358 * parse_aliases - search names for entries beginning or equalling str ignoring
359 * case. If mutliple entries in names match str then the longest
360 * is chosen.
361 * @str: The needle to look for.
362 * @names: The haystack to search.
363 * @size: The size of the haystack.
364 * @longest: Out argument giving the length of the matching entry.
365 */
366static int parse_aliases(const char *str, const char *const names[][EVSEL__MAX_ALIASES], int size,
367 int *longest)
368{
369 *longest = -1;
370 for (int i = 0; i < size; i++) {
371 for (int j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) {
372 int n = strlen(names[i][j]);
373
374 if (n > *longest && !strncasecmp(str, names[i][j], n))
375 *longest = n;
376 }
377 if (*longest > 0)
378 return i;
379 }
380
381 return -1;
382}
383
384typedef int config_term_func_t(struct perf_event_attr *attr,
385 struct parse_events_term *term,
386 struct parse_events_error *err);
387static int config_term_common(struct perf_event_attr *attr,
388 struct parse_events_term *term,
389 struct parse_events_error *err);
390static int config_attr(struct perf_event_attr *attr,
391 const struct parse_events_terms *head,
392 struct parse_events_error *err,
393 config_term_func_t config_term);
394
395/**
396 * parse_events__decode_legacy_cache - Search name for the legacy cache event
397 * name composed of 1, 2 or 3 hyphen
398 * separated sections. The first section is
399 * the cache type while the others are the
400 * optional op and optional result. To make
401 * life hard the names in the table also
402 * contain hyphens and the longest name
403 * should always be selected.
404 */
405int parse_events__decode_legacy_cache(const char *name, int extended_pmu_type, __u64 *config)
406{
407 int len, cache_type = -1, cache_op = -1, cache_result = -1;
408 const char *name_end = &name[strlen(name) + 1];
409 const char *str = name;
410
411 cache_type = parse_aliases(str, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX, &len);
412 if (cache_type == -1)
413 return -EINVAL;
414 str += len + 1;
415
416 if (str < name_end) {
417 cache_op = parse_aliases(str, evsel__hw_cache_op,
418 PERF_COUNT_HW_CACHE_OP_MAX, &len);
419 if (cache_op >= 0) {
420 if (!evsel__is_cache_op_valid(cache_type, cache_op))
421 return -EINVAL;
422 str += len + 1;
423 } else {
424 cache_result = parse_aliases(str, evsel__hw_cache_result,
425 PERF_COUNT_HW_CACHE_RESULT_MAX, &len);
426 if (cache_result >= 0)
427 str += len + 1;
428 }
429 }
430 if (str < name_end) {
431 if (cache_op < 0) {
432 cache_op = parse_aliases(str, evsel__hw_cache_op,
433 PERF_COUNT_HW_CACHE_OP_MAX, &len);
434 if (cache_op >= 0) {
435 if (!evsel__is_cache_op_valid(cache_type, cache_op))
436 return -EINVAL;
437 }
438 } else if (cache_result < 0) {
439 cache_result = parse_aliases(str, evsel__hw_cache_result,
440 PERF_COUNT_HW_CACHE_RESULT_MAX, &len);
441 }
442 }
443
444 /*
445 * Fall back to reads:
446 */
447 if (cache_op == -1)
448 cache_op = PERF_COUNT_HW_CACHE_OP_READ;
449
450 /*
451 * Fall back to accesses:
452 */
453 if (cache_result == -1)
454 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
455
456 *config = cache_type | (cache_op << 8) | (cache_result << 16);
457 if (perf_pmus__supports_extended_type())
458 *config |= (__u64)extended_pmu_type << PERF_PMU_TYPE_SHIFT;
459 return 0;
460}
461
462/**
463 * parse_events__filter_pmu - returns false if a wildcard PMU should be
464 * considered, true if it should be filtered.
465 */
466bool parse_events__filter_pmu(const struct parse_events_state *parse_state,
467 const struct perf_pmu *pmu)
468{
469 if (parse_state->pmu_filter == NULL)
470 return false;
471
472 return strcmp(parse_state->pmu_filter, pmu->name) != 0;
473}
474
475static int parse_events_add_pmu(struct parse_events_state *parse_state,
476 struct list_head *list, struct perf_pmu *pmu,
477 const struct parse_events_terms *const_parsed_terms,
478 struct evsel *first_wildcard_match, u64 alternate_hw_config);
479
480int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
481 struct parse_events_state *parse_state,
482 struct parse_events_terms *parsed_terms)
483{
484 struct perf_pmu *pmu = NULL;
485 bool found_supported = false;
486 const char *config_name = get_config_name(parsed_terms);
487 const char *metric_id = get_config_metric_id(parsed_terms);
488 struct perf_cpu_map *cpus = get_config_cpu(parsed_terms);
489 int ret = 0;
490 struct evsel *first_wildcard_match = NULL;
491
492 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
493 LIST_HEAD(config_terms);
494 struct perf_event_attr attr;
495
496 if (parse_events__filter_pmu(parse_state, pmu))
497 continue;
498
499 if (perf_pmu__have_event(pmu, name)) {
500 /*
501 * The PMU has the event so add as not a legacy cache
502 * event.
503 */
504 ret = parse_events_add_pmu(parse_state, list, pmu,
505 parsed_terms,
506 first_wildcard_match,
507 /*alternate_hw_config=*/PERF_COUNT_HW_MAX);
508 if (ret)
509 goto out_err;
510 if (first_wildcard_match == NULL)
511 first_wildcard_match =
512 container_of(list->prev, struct evsel, core.node);
513 continue;
514 }
515
516 if (!pmu->is_core) {
517 /* Legacy cache events are only supported by core PMUs. */
518 continue;
519 }
520
521 memset(&attr, 0, sizeof(attr));
522 attr.type = PERF_TYPE_HW_CACHE;
523
524 ret = parse_events__decode_legacy_cache(name, pmu->type, &attr.config);
525 if (ret)
526 return ret;
527
528 found_supported = true;
529
530 if (parsed_terms) {
531 if (config_attr(&attr, parsed_terms, parse_state->error,
532 config_term_common)) {
533 ret = -EINVAL;
534 goto out_err;
535 }
536 if (get_config_terms(parsed_terms, &config_terms)) {
537 ret = -ENOMEM;
538 goto out_err;
539 }
540 }
541
542 if (__add_event(list, idx, &attr, /*init_attr*/true, config_name ?: name,
543 metric_id, pmu, &config_terms, first_wildcard_match,
544 cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) == NULL)
545 ret = -ENOMEM;
546
547 if (first_wildcard_match == NULL)
548 first_wildcard_match = container_of(list->prev, struct evsel, core.node);
549 free_config_terms(&config_terms);
550 if (ret)
551 goto out_err;
552 }
553out_err:
554 perf_cpu_map__put(cpus);
555 return found_supported ? 0 : -EINVAL;
556}
557
558static void tracepoint_error(struct parse_events_error *e, int err,
559 const char *sys, const char *name, int column)
560{
561 const char *str;
562 char help[BUFSIZ];
563
564 if (!e)
565 return;
566
567 /*
568 * We get error directly from syscall errno ( > 0),
569 * or from encoded pointer's error ( < 0).
570 */
571 err = abs(err);
572
573 switch (err) {
574 case EACCES:
575 str = "can't access trace events";
576 break;
577 case ENOENT:
578 str = "unknown tracepoint";
579 break;
580 default:
581 str = "failed to add tracepoint";
582 break;
583 }
584
585 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name);
586 parse_events_error__handle(e, column, strdup(str), strdup(help));
587}
588
589static int add_tracepoint(struct parse_events_state *parse_state,
590 struct list_head *list,
591 const char *sys_name, const char *evt_name,
592 struct parse_events_error *err,
593 struct parse_events_terms *head_config, void *loc_)
594{
595 YYLTYPE *loc = loc_;
596 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, parse_state->idx++,
597 !parse_state->fake_tp);
598
599 if (IS_ERR(evsel)) {
600 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name, loc->first_column);
601 return PTR_ERR(evsel);
602 }
603
604 if (head_config) {
605 LIST_HEAD(config_terms);
606
607 if (get_config_terms(head_config, &config_terms))
608 return -ENOMEM;
609 list_splice(&config_terms, &evsel->config_terms);
610 }
611
612 list_add_tail(&evsel->core.node, list);
613 return 0;
614}
615
616static int add_tracepoint_multi_event(struct parse_events_state *parse_state,
617 struct list_head *list,
618 const char *sys_name, const char *evt_name,
619 struct parse_events_error *err,
620 struct parse_events_terms *head_config, YYLTYPE *loc)
621{
622 char *evt_path;
623 struct io_dirent64 *evt_ent;
624 struct io_dir evt_dir;
625 int ret = 0, found = 0;
626
627 evt_path = get_events_file(sys_name);
628 if (!evt_path) {
629 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
630 return -1;
631 }
632 io_dir__init(&evt_dir, open(evt_path, O_CLOEXEC | O_DIRECTORY | O_RDONLY));
633 if (evt_dir.dirfd < 0) {
634 put_events_file(evt_path);
635 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
636 return -1;
637 }
638
639 while (!ret && (evt_ent = io_dir__readdir(&evt_dir))) {
640 if (!strcmp(evt_ent->d_name, ".")
641 || !strcmp(evt_ent->d_name, "..")
642 || !strcmp(evt_ent->d_name, "enable")
643 || !strcmp(evt_ent->d_name, "filter"))
644 continue;
645
646 if (!strglobmatch(evt_ent->d_name, evt_name))
647 continue;
648
649 found++;
650
651 ret = add_tracepoint(parse_state, list, sys_name, evt_ent->d_name,
652 err, head_config, loc);
653 }
654
655 if (!found) {
656 tracepoint_error(err, ENOENT, sys_name, evt_name, loc->first_column);
657 ret = -1;
658 }
659
660 put_events_file(evt_path);
661 close(evt_dir.dirfd);
662 return ret;
663}
664
665static int add_tracepoint_event(struct parse_events_state *parse_state,
666 struct list_head *list,
667 const char *sys_name, const char *evt_name,
668 struct parse_events_error *err,
669 struct parse_events_terms *head_config, YYLTYPE *loc)
670{
671 return strpbrk(evt_name, "*?") ?
672 add_tracepoint_multi_event(parse_state, list, sys_name, evt_name,
673 err, head_config, loc) :
674 add_tracepoint(parse_state, list, sys_name, evt_name,
675 err, head_config, loc);
676}
677
678static int add_tracepoint_multi_sys(struct parse_events_state *parse_state,
679 struct list_head *list,
680 const char *sys_name, const char *evt_name,
681 struct parse_events_error *err,
682 struct parse_events_terms *head_config, YYLTYPE *loc)
683{
684 struct io_dirent64 *events_ent;
685 struct io_dir events_dir;
686 int ret = 0;
687 char *events_dir_path = get_tracing_file("events");
688
689 if (!events_dir_path) {
690 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
691 return -1;
692 }
693 io_dir__init(&events_dir, open(events_dir_path, O_CLOEXEC | O_DIRECTORY | O_RDONLY));
694 put_events_file(events_dir_path);
695 if (events_dir.dirfd < 0) {
696 tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
697 return -1;
698 }
699
700 while (!ret && (events_ent = io_dir__readdir(&events_dir))) {
701 if (!strcmp(events_ent->d_name, ".")
702 || !strcmp(events_ent->d_name, "..")
703 || !strcmp(events_ent->d_name, "enable")
704 || !strcmp(events_ent->d_name, "header_event")
705 || !strcmp(events_ent->d_name, "header_page"))
706 continue;
707
708 if (!strglobmatch(events_ent->d_name, sys_name))
709 continue;
710
711 ret = add_tracepoint_event(parse_state, list, events_ent->d_name,
712 evt_name, err, head_config, loc);
713 }
714 close(events_dir.dirfd);
715 return ret;
716}
717
718size_t default_breakpoint_len(void)
719{
720#if defined(__i386__)
721 static int len;
722
723 if (len == 0) {
724 struct perf_env env = {};
725
726 perf_env__init(&env);
727 len = perf_env__kernel_is_64_bit(&env) ? sizeof(u64) : sizeof(long);
728 perf_env__exit(&env);
729 }
730 return len;
731#elif defined(__aarch64__)
732 return 4;
733#else
734 return sizeof(long);
735#endif
736}
737
738static int
739parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
740{
741 int i;
742
743 for (i = 0; i < 3; i++) {
744 if (!type || !type[i])
745 break;
746
747#define CHECK_SET_TYPE(bit) \
748do { \
749 if (attr->bp_type & bit) \
750 return -EINVAL; \
751 else \
752 attr->bp_type |= bit; \
753} while (0)
754
755 switch (type[i]) {
756 case 'r':
757 CHECK_SET_TYPE(HW_BREAKPOINT_R);
758 break;
759 case 'w':
760 CHECK_SET_TYPE(HW_BREAKPOINT_W);
761 break;
762 case 'x':
763 CHECK_SET_TYPE(HW_BREAKPOINT_X);
764 break;
765 default:
766 return -EINVAL;
767 }
768 }
769
770#undef CHECK_SET_TYPE
771
772 if (!attr->bp_type) /* Default */
773 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
774
775 return 0;
776}
777
778int parse_events_add_breakpoint(struct parse_events_state *parse_state,
779 struct list_head *list,
780 u64 addr, char *type, u64 len,
781 struct parse_events_terms *head_config)
782{
783 struct perf_event_attr attr;
784 LIST_HEAD(config_terms);
785 const char *name;
786
787 memset(&attr, 0, sizeof(attr));
788 attr.bp_addr = addr;
789
790 if (parse_breakpoint_type(type, &attr))
791 return -EINVAL;
792
793 /* Provide some defaults if len is not specified */
794 if (!len) {
795 if (attr.bp_type == HW_BREAKPOINT_X)
796 len = default_breakpoint_len();
797 else
798 len = HW_BREAKPOINT_LEN_4;
799 }
800
801 attr.bp_len = len;
802
803 attr.type = PERF_TYPE_BREAKPOINT;
804 attr.sample_period = 1;
805
806 if (head_config) {
807 if (config_attr(&attr, head_config, parse_state->error,
808 config_term_common))
809 return -EINVAL;
810
811 if (get_config_terms(head_config, &config_terms))
812 return -ENOMEM;
813 }
814
815 name = get_config_name(head_config);
816
817 return add_event(list, &parse_state->idx, &attr, name, /*mertic_id=*/NULL,
818 &config_terms, /*alternate_hw_config=*/PERF_COUNT_HW_MAX);
819}
820
821static int check_type_val(struct parse_events_term *term,
822 struct parse_events_error *err,
823 enum parse_events__term_val_type type)
824{
825 if (type == term->type_val)
826 return 0;
827
828 if (err) {
829 parse_events_error__handle(err, term->err_val,
830 type == PARSE_EVENTS__TERM_TYPE_NUM
831 ? strdup("expected numeric value")
832 : strdup("expected string value"),
833 NULL);
834 }
835 return -EINVAL;
836}
837
838static bool config_term_shrinked;
839
840const char *parse_events__term_type_str(enum parse_events__term_type term_type)
841{
842 /*
843 * Update according to parse-events.l
844 */
845 static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
846 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>",
847 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config",
848 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1",
849 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2",
850 [PARSE_EVENTS__TERM_TYPE_CONFIG3] = "config3",
851 [PARSE_EVENTS__TERM_TYPE_NAME] = "name",
852 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period",
853 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq",
854 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type",
855 [PARSE_EVENTS__TERM_TYPE_TIME] = "time",
856 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph",
857 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size",
858 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit",
859 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit",
860 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack",
861 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr",
862 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite",
863 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite",
864 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config",
865 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore",
866 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output",
867 [PARSE_EVENTS__TERM_TYPE_AUX_ACTION] = "aux-action",
868 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size",
869 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id",
870 [PARSE_EVENTS__TERM_TYPE_RAW] = "raw",
871 [PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE] = "legacy-cache",
872 [PARSE_EVENTS__TERM_TYPE_HARDWARE] = "hardware",
873 [PARSE_EVENTS__TERM_TYPE_CPU] = "cpu",
874 };
875 if ((unsigned int)term_type >= __PARSE_EVENTS__TERM_TYPE_NR)
876 return "unknown term";
877
878 return config_term_names[term_type];
879}
880
881static bool
882config_term_avail(enum parse_events__term_type term_type, struct parse_events_error *err)
883{
884 char *err_str;
885
886 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) {
887 parse_events_error__handle(err, -1,
888 strdup("Invalid term_type"), NULL);
889 return false;
890 }
891 if (!config_term_shrinked)
892 return true;
893
894 switch (term_type) {
895 case PARSE_EVENTS__TERM_TYPE_CONFIG:
896 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
897 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
898 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
899 case PARSE_EVENTS__TERM_TYPE_NAME:
900 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
901 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
902 case PARSE_EVENTS__TERM_TYPE_PERCORE:
903 case PARSE_EVENTS__TERM_TYPE_CPU:
904 return true;
905 case PARSE_EVENTS__TERM_TYPE_USER:
906 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
907 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
908 case PARSE_EVENTS__TERM_TYPE_TIME:
909 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
910 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
911 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
912 case PARSE_EVENTS__TERM_TYPE_INHERIT:
913 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
914 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
915 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
916 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
917 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
918 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
919 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
920 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
921 case PARSE_EVENTS__TERM_TYPE_RAW:
922 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
923 case PARSE_EVENTS__TERM_TYPE_HARDWARE:
924 default:
925 if (!err)
926 return false;
927
928 /* term_type is validated so indexing is safe */
929 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'",
930 parse_events__term_type_str(term_type)) >= 0)
931 parse_events_error__handle(err, -1, err_str, NULL);
932 return false;
933 }
934}
935
936void parse_events__shrink_config_terms(void)
937{
938 config_term_shrinked = true;
939}
940
941static int config_term_common(struct perf_event_attr *attr,
942 struct parse_events_term *term,
943 struct parse_events_error *err)
944{
945#define CHECK_TYPE_VAL(type) \
946do { \
947 if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \
948 return -EINVAL; \
949} while (0)
950
951 switch (term->type_term) {
952 case PARSE_EVENTS__TERM_TYPE_CONFIG:
953 CHECK_TYPE_VAL(NUM);
954 attr->config = term->val.num;
955 break;
956 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
957 CHECK_TYPE_VAL(NUM);
958 attr->config1 = term->val.num;
959 break;
960 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
961 CHECK_TYPE_VAL(NUM);
962 attr->config2 = term->val.num;
963 break;
964 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
965 CHECK_TYPE_VAL(NUM);
966 attr->config3 = term->val.num;
967 break;
968 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
969 CHECK_TYPE_VAL(NUM);
970 break;
971 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
972 CHECK_TYPE_VAL(NUM);
973 break;
974 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
975 CHECK_TYPE_VAL(STR);
976 if (strcmp(term->val.str, "no") &&
977 parse_branch_str(term->val.str,
978 &attr->branch_sample_type)) {
979 parse_events_error__handle(err, term->err_val,
980 strdup("invalid branch sample type"),
981 NULL);
982 return -EINVAL;
983 }
984 break;
985 case PARSE_EVENTS__TERM_TYPE_TIME:
986 CHECK_TYPE_VAL(NUM);
987 if (term->val.num > 1) {
988 parse_events_error__handle(err, term->err_val,
989 strdup("expected 0 or 1"),
990 NULL);
991 return -EINVAL;
992 }
993 break;
994 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
995 CHECK_TYPE_VAL(STR);
996 break;
997 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
998 CHECK_TYPE_VAL(NUM);
999 break;
1000 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1001 CHECK_TYPE_VAL(NUM);
1002 break;
1003 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1004 CHECK_TYPE_VAL(NUM);
1005 break;
1006 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1007 CHECK_TYPE_VAL(NUM);
1008 break;
1009 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1010 CHECK_TYPE_VAL(NUM);
1011 break;
1012 case PARSE_EVENTS__TERM_TYPE_NAME:
1013 CHECK_TYPE_VAL(STR);
1014 break;
1015 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1016 CHECK_TYPE_VAL(STR);
1017 break;
1018 case PARSE_EVENTS__TERM_TYPE_RAW:
1019 CHECK_TYPE_VAL(STR);
1020 break;
1021 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1022 CHECK_TYPE_VAL(NUM);
1023 break;
1024 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1025 CHECK_TYPE_VAL(NUM);
1026 break;
1027 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1028 CHECK_TYPE_VAL(NUM);
1029 if ((unsigned int)term->val.num > 1) {
1030 parse_events_error__handle(err, term->err_val,
1031 strdup("expected 0 or 1"),
1032 NULL);
1033 return -EINVAL;
1034 }
1035 break;
1036 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1037 CHECK_TYPE_VAL(NUM);
1038 break;
1039 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
1040 CHECK_TYPE_VAL(STR);
1041 break;
1042 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1043 CHECK_TYPE_VAL(NUM);
1044 if (term->val.num > UINT_MAX) {
1045 parse_events_error__handle(err, term->err_val,
1046 strdup("too big"),
1047 NULL);
1048 return -EINVAL;
1049 }
1050 break;
1051 case PARSE_EVENTS__TERM_TYPE_CPU:
1052 CHECK_TYPE_VAL(NUM);
1053 if (term->val.num >= (u64)cpu__max_present_cpu().cpu) {
1054 parse_events_error__handle(err, term->err_val,
1055 strdup("too big"),
1056 NULL);
1057 return -EINVAL;
1058 }
1059 break;
1060 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1061 case PARSE_EVENTS__TERM_TYPE_USER:
1062 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1063 case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1064 default:
1065 parse_events_error__handle(err, term->err_term,
1066 strdup(parse_events__term_type_str(term->type_term)),
1067 parse_events_formats_error_string(NULL));
1068 return -EINVAL;
1069 }
1070
1071 /*
1072 * Check term availability after basic checking so
1073 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered.
1074 *
1075 * If check availability at the entry of this function,
1076 * user will see "'<sysfs term>' is not usable in 'perf stat'"
1077 * if an invalid config term is provided for legacy events
1078 * (for example, instructions/badterm/...), which is confusing.
1079 */
1080 if (!config_term_avail(term->type_term, err))
1081 return -EINVAL;
1082 return 0;
1083#undef CHECK_TYPE_VAL
1084}
1085
1086static int config_term_pmu(struct perf_event_attr *attr,
1087 struct parse_events_term *term,
1088 struct parse_events_error *err)
1089{
1090 if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE) {
1091 struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
1092
1093 if (!pmu) {
1094 char *err_str;
1095
1096 if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0)
1097 parse_events_error__handle(err, term->err_term,
1098 err_str, /*help=*/NULL);
1099 return -EINVAL;
1100 }
1101 /*
1102 * Rewrite the PMU event to a legacy cache one unless the PMU
1103 * doesn't support legacy cache events or the event is present
1104 * within the PMU.
1105 */
1106 if (perf_pmu__supports_legacy_cache(pmu) &&
1107 !perf_pmu__have_event(pmu, term->config)) {
1108 attr->type = PERF_TYPE_HW_CACHE;
1109 return parse_events__decode_legacy_cache(term->config, pmu->type,
1110 &attr->config);
1111 } else {
1112 term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
1113 term->no_value = true;
1114 }
1115 }
1116 if (term->type_term == PARSE_EVENTS__TERM_TYPE_HARDWARE) {
1117 struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
1118
1119 if (!pmu) {
1120 char *err_str;
1121
1122 if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0)
1123 parse_events_error__handle(err, term->err_term,
1124 err_str, /*help=*/NULL);
1125 return -EINVAL;
1126 }
1127 /*
1128 * If the PMU has a sysfs or json event prefer it over
1129 * legacy. ARM requires this.
1130 */
1131 if (perf_pmu__have_event(pmu, term->config)) {
1132 term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
1133 term->no_value = true;
1134 term->alternate_hw_config = true;
1135 } else {
1136 attr->type = PERF_TYPE_HARDWARE;
1137 attr->config = term->val.num;
1138 if (perf_pmus__supports_extended_type())
1139 attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
1140 }
1141 return 0;
1142 }
1143 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER ||
1144 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) {
1145 /*
1146 * Always succeed for sysfs terms, as we dont know
1147 * at this point what type they need to have.
1148 */
1149 return 0;
1150 }
1151 return config_term_common(attr, term, err);
1152}
1153
1154static int config_term_tracepoint(struct perf_event_attr *attr,
1155 struct parse_events_term *term,
1156 struct parse_events_error *err)
1157{
1158 switch (term->type_term) {
1159 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1160 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1161 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1162 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1163 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1164 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1165 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1166 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1167 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1168 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
1169 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1170 return config_term_common(attr, term, err);
1171 case PARSE_EVENTS__TERM_TYPE_USER:
1172 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1173 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1174 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1175 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1176 case PARSE_EVENTS__TERM_TYPE_NAME:
1177 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1178 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1179 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1180 case PARSE_EVENTS__TERM_TYPE_TIME:
1181 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1182 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1183 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1184 case PARSE_EVENTS__TERM_TYPE_RAW:
1185 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1186 case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1187 case PARSE_EVENTS__TERM_TYPE_CPU:
1188 default:
1189 if (err) {
1190 parse_events_error__handle(err, term->err_term,
1191 strdup(parse_events__term_type_str(term->type_term)),
1192 strdup("valid terms: call-graph,stack-size\n")
1193 );
1194 }
1195 return -EINVAL;
1196 }
1197
1198 return 0;
1199}
1200
1201static int config_attr(struct perf_event_attr *attr,
1202 const struct parse_events_terms *head,
1203 struct parse_events_error *err,
1204 config_term_func_t config_term)
1205{
1206 struct parse_events_term *term;
1207
1208 list_for_each_entry(term, &head->terms, list)
1209 if (config_term(attr, term, err))
1210 return -EINVAL;
1211
1212 return 0;
1213}
1214
1215static int get_config_terms(const struct parse_events_terms *head_config,
1216 struct list_head *head_terms)
1217{
1218#define ADD_CONFIG_TERM(__type, __weak) \
1219 struct evsel_config_term *__t; \
1220 \
1221 __t = zalloc(sizeof(*__t)); \
1222 if (!__t) \
1223 return -ENOMEM; \
1224 \
1225 INIT_LIST_HEAD(&__t->list); \
1226 __t->type = EVSEL__CONFIG_TERM_ ## __type; \
1227 __t->weak = __weak; \
1228 list_add_tail(&__t->list, head_terms)
1229
1230#define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \
1231do { \
1232 ADD_CONFIG_TERM(__type, __weak); \
1233 __t->val.__name = __val; \
1234} while (0)
1235
1236#define ADD_CONFIG_TERM_STR(__type, __val, __weak) \
1237do { \
1238 ADD_CONFIG_TERM(__type, __weak); \
1239 __t->val.str = strdup(__val); \
1240 if (!__t->val.str) { \
1241 zfree(&__t); \
1242 return -ENOMEM; \
1243 } \
1244 __t->free_str = true; \
1245} while (0)
1246
1247 struct parse_events_term *term;
1248
1249 list_for_each_entry(term, &head_config->terms, list) {
1250 switch (term->type_term) {
1251 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1252 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak);
1253 break;
1254 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1255 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak);
1256 break;
1257 case PARSE_EVENTS__TERM_TYPE_TIME:
1258 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak);
1259 break;
1260 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1261 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak);
1262 break;
1263 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1264 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak);
1265 break;
1266 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1267 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user,
1268 term->val.num, term->weak);
1269 break;
1270 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1271 ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1272 term->val.num ? 1 : 0, term->weak);
1273 break;
1274 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1275 ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1276 term->val.num ? 0 : 1, term->weak);
1277 break;
1278 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1279 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack,
1280 term->val.num, term->weak);
1281 break;
1282 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1283 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events,
1284 term->val.num, term->weak);
1285 break;
1286 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1287 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1288 term->val.num ? 1 : 0, term->weak);
1289 break;
1290 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1291 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1292 term->val.num ? 0 : 1, term->weak);
1293 break;
1294 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1295 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak);
1296 break;
1297 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1298 ADD_CONFIG_TERM_VAL(PERCORE, percore,
1299 term->val.num ? true : false, term->weak);
1300 break;
1301 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1302 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
1303 term->val.num ? 1 : 0, term->weak);
1304 break;
1305 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
1306 ADD_CONFIG_TERM_STR(AUX_ACTION, term->val.str, term->weak);
1307 break;
1308 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1309 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
1310 term->val.num, term->weak);
1311 break;
1312 case PARSE_EVENTS__TERM_TYPE_USER:
1313 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1314 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1315 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1316 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1317 case PARSE_EVENTS__TERM_TYPE_NAME:
1318 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1319 case PARSE_EVENTS__TERM_TYPE_RAW:
1320 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1321 case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1322 case PARSE_EVENTS__TERM_TYPE_CPU:
1323 default:
1324 break;
1325 }
1326 }
1327 return 0;
1328}
1329
1330/*
1331 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for
1332 * each bit of attr->config that the user has changed.
1333 */
1334static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head_config,
1335 struct list_head *head_terms)
1336{
1337 struct parse_events_term *term;
1338 u64 bits = 0;
1339 int type;
1340
1341 list_for_each_entry(term, &head_config->terms, list) {
1342 switch (term->type_term) {
1343 case PARSE_EVENTS__TERM_TYPE_USER:
1344 type = perf_pmu__format_type(pmu, term->config);
1345 if (type != PERF_PMU_FORMAT_VALUE_CONFIG)
1346 continue;
1347 bits |= perf_pmu__format_bits(pmu, term->config);
1348 break;
1349 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1350 bits = ~(u64)0;
1351 break;
1352 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1353 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1354 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1355 case PARSE_EVENTS__TERM_TYPE_NAME:
1356 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1357 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1358 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1359 case PARSE_EVENTS__TERM_TYPE_TIME:
1360 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1361 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1362 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1363 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1364 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1365 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1366 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1367 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1368 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1369 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1370 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1371 case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
1372 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1373 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1374 case PARSE_EVENTS__TERM_TYPE_RAW:
1375 case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
1376 case PARSE_EVENTS__TERM_TYPE_HARDWARE:
1377 case PARSE_EVENTS__TERM_TYPE_CPU:
1378 default:
1379 break;
1380 }
1381 }
1382
1383 if (bits)
1384 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false);
1385
1386#undef ADD_CONFIG_TERM
1387 return 0;
1388}
1389
1390int parse_events_add_tracepoint(struct parse_events_state *parse_state,
1391 struct list_head *list,
1392 const char *sys, const char *event,
1393 struct parse_events_error *err,
1394 struct parse_events_terms *head_config, void *loc_)
1395{
1396 YYLTYPE *loc = loc_;
1397
1398 if (head_config) {
1399 struct perf_event_attr attr;
1400
1401 if (config_attr(&attr, head_config, err,
1402 config_term_tracepoint))
1403 return -EINVAL;
1404 }
1405
1406 if (strpbrk(sys, "*?"))
1407 return add_tracepoint_multi_sys(parse_state, list, sys, event,
1408 err, head_config, loc);
1409 else
1410 return add_tracepoint_event(parse_state, list, sys, event,
1411 err, head_config, loc);
1412}
1413
1414static int __parse_events_add_numeric(struct parse_events_state *parse_state,
1415 struct list_head *list,
1416 struct perf_pmu *pmu, u32 type, u32 extended_type,
1417 u64 config, const struct parse_events_terms *head_config,
1418 struct evsel *first_wildcard_match)
1419{
1420 struct perf_event_attr attr;
1421 LIST_HEAD(config_terms);
1422 const char *name, *metric_id;
1423 struct perf_cpu_map *cpus;
1424 int ret;
1425
1426 memset(&attr, 0, sizeof(attr));
1427 attr.type = type;
1428 attr.config = config;
1429 if (extended_type && (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE)) {
1430 assert(perf_pmus__supports_extended_type());
1431 attr.config |= (u64)extended_type << PERF_PMU_TYPE_SHIFT;
1432 }
1433
1434 if (head_config) {
1435 if (config_attr(&attr, head_config, parse_state->error,
1436 config_term_common))
1437 return -EINVAL;
1438
1439 if (get_config_terms(head_config, &config_terms))
1440 return -ENOMEM;
1441 }
1442
1443 name = get_config_name(head_config);
1444 metric_id = get_config_metric_id(head_config);
1445 cpus = get_config_cpu(head_config);
1446 ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name,
1447 metric_id, pmu, &config_terms, first_wildcard_match,
1448 cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) ? 0 : -ENOMEM;
1449 perf_cpu_map__put(cpus);
1450 free_config_terms(&config_terms);
1451 return ret;
1452}
1453
1454int parse_events_add_numeric(struct parse_events_state *parse_state,
1455 struct list_head *list,
1456 u32 type, u64 config,
1457 const struct parse_events_terms *head_config,
1458 bool wildcard)
1459{
1460 struct perf_pmu *pmu = NULL;
1461 bool found_supported = false;
1462
1463 /* Wildcards on numeric values are only supported by core PMUs. */
1464 if (wildcard && perf_pmus__supports_extended_type()) {
1465 struct evsel *first_wildcard_match = NULL;
1466 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
1467 int ret;
1468
1469 found_supported = true;
1470 if (parse_events__filter_pmu(parse_state, pmu))
1471 continue;
1472
1473 ret = __parse_events_add_numeric(parse_state, list, pmu,
1474 type, pmu->type,
1475 config, head_config,
1476 first_wildcard_match);
1477 if (ret)
1478 return ret;
1479 if (first_wildcard_match == NULL)
1480 first_wildcard_match =
1481 container_of(list->prev, struct evsel, core.node);
1482 }
1483 if (found_supported)
1484 return 0;
1485 }
1486 return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type),
1487 type, /*extended_type=*/0, config, head_config,
1488 /*first_wildcard_match=*/NULL);
1489}
1490
1491static bool config_term_percore(struct list_head *config_terms)
1492{
1493 struct evsel_config_term *term;
1494
1495 list_for_each_entry(term, config_terms, list) {
1496 if (term->type == EVSEL__CONFIG_TERM_PERCORE)
1497 return term->val.percore;
1498 }
1499
1500 return false;
1501}
1502
1503static int parse_events_add_pmu(struct parse_events_state *parse_state,
1504 struct list_head *list, struct perf_pmu *pmu,
1505 const struct parse_events_terms *const_parsed_terms,
1506 struct evsel *first_wildcard_match, u64 alternate_hw_config)
1507{
1508 struct perf_event_attr attr;
1509 struct perf_pmu_info info;
1510 struct evsel *evsel;
1511 struct parse_events_error *err = parse_state->error;
1512 LIST_HEAD(config_terms);
1513 struct parse_events_terms parsed_terms;
1514 bool alias_rewrote_terms = false;
1515 struct perf_cpu_map *term_cpu = NULL;
1516
1517 if (verbose > 1) {
1518 struct strbuf sb;
1519
1520 strbuf_init(&sb, /*hint=*/ 0);
1521 if (pmu->selectable && const_parsed_terms &&
1522 list_empty(&const_parsed_terms->terms)) {
1523 strbuf_addf(&sb, "%s//", pmu->name);
1524 } else {
1525 strbuf_addf(&sb, "%s/", pmu->name);
1526 parse_events_terms__to_strbuf(const_parsed_terms, &sb);
1527 strbuf_addch(&sb, '/');
1528 }
1529 fprintf(stderr, "Attempt to add: %s\n", sb.buf);
1530 strbuf_release(&sb);
1531 }
1532
1533 memset(&attr, 0, sizeof(attr));
1534 if (pmu->perf_event_attr_init_default)
1535 pmu->perf_event_attr_init_default(pmu, &attr);
1536
1537 attr.type = pmu->type;
1538
1539 if (!const_parsed_terms || list_empty(&const_parsed_terms->terms)) {
1540 evsel = __add_event(list, &parse_state->idx, &attr,
1541 /*init_attr=*/true, /*name=*/NULL,
1542 /*metric_id=*/NULL, pmu,
1543 /*config_terms=*/NULL, first_wildcard_match,
1544 /*cpu_list=*/NULL, alternate_hw_config);
1545 return evsel ? 0 : -ENOMEM;
1546 }
1547
1548 parse_events_terms__init(&parsed_terms);
1549 if (const_parsed_terms) {
1550 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
1551
1552 if (ret)
1553 return ret;
1554 }
1555 fix_raw(&parsed_terms, pmu);
1556
1557 /* Configure attr/terms with a known PMU, this will set hardcoded terms. */
1558 if (config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) {
1559 parse_events_terms__exit(&parsed_terms);
1560 return -EINVAL;
1561 }
1562
1563 /* Look for event names in the terms and rewrite into format based terms. */
1564 if (perf_pmu__check_alias(pmu, &parsed_terms,
1565 &info, &alias_rewrote_terms,
1566 &alternate_hw_config, err)) {
1567 parse_events_terms__exit(&parsed_terms);
1568 return -EINVAL;
1569 }
1570
1571 if (verbose > 1) {
1572 struct strbuf sb;
1573
1574 strbuf_init(&sb, /*hint=*/ 0);
1575 parse_events_terms__to_strbuf(&parsed_terms, &sb);
1576 fprintf(stderr, "..after resolving event: %s/%s/\n", pmu->name, sb.buf);
1577 strbuf_release(&sb);
1578 }
1579
1580 /* Configure attr/terms again if an alias was expanded. */
1581 if (alias_rewrote_terms &&
1582 config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) {
1583 parse_events_terms__exit(&parsed_terms);
1584 return -EINVAL;
1585 }
1586
1587 if (get_config_terms(&parsed_terms, &config_terms)) {
1588 parse_events_terms__exit(&parsed_terms);
1589 return -ENOMEM;
1590 }
1591
1592 /*
1593 * When using default config, record which bits of attr->config were
1594 * changed by the user.
1595 */
1596 if (pmu->perf_event_attr_init_default &&
1597 get_config_chgs(pmu, &parsed_terms, &config_terms)) {
1598 parse_events_terms__exit(&parsed_terms);
1599 return -ENOMEM;
1600 }
1601
1602 /* Skip configuring hard coded terms that were applied by config_attr. */
1603 if (perf_pmu__config(pmu, &attr, &parsed_terms, /*apply_hardcoded=*/false,
1604 parse_state->error)) {
1605 free_config_terms(&config_terms);
1606 parse_events_terms__exit(&parsed_terms);
1607 return -EINVAL;
1608 }
1609
1610 term_cpu = get_config_cpu(&parsed_terms);
1611 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true,
1612 get_config_name(&parsed_terms),
1613 get_config_metric_id(&parsed_terms), pmu,
1614 &config_terms, first_wildcard_match, term_cpu, alternate_hw_config);
1615 perf_cpu_map__put(term_cpu);
1616 if (!evsel) {
1617 parse_events_terms__exit(&parsed_terms);
1618 return -ENOMEM;
1619 }
1620
1621 if (evsel->name)
1622 evsel->use_config_name = true;
1623
1624 evsel->percore = config_term_percore(&evsel->config_terms);
1625
1626 parse_events_terms__exit(&parsed_terms);
1627 free((char *)evsel->unit);
1628 evsel->unit = strdup(info.unit);
1629 evsel->scale = info.scale;
1630 evsel->per_pkg = info.per_pkg;
1631 evsel->snapshot = info.snapshot;
1632 evsel->retirement_latency.mean = info.retirement_latency_mean;
1633 evsel->retirement_latency.min = info.retirement_latency_min;
1634 evsel->retirement_latency.max = info.retirement_latency_max;
1635
1636 return 0;
1637}
1638
1639int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
1640 const char *event_name, u64 hw_config,
1641 const struct parse_events_terms *const_parsed_terms,
1642 struct list_head **listp, void *loc_)
1643{
1644 struct parse_events_term *term;
1645 struct list_head *list = NULL;
1646 struct perf_pmu *pmu = NULL;
1647 YYLTYPE *loc = loc_;
1648 int ok = 0;
1649 const char *config;
1650 struct parse_events_terms parsed_terms;
1651 struct evsel *first_wildcard_match = NULL;
1652
1653 *listp = NULL;
1654
1655 parse_events_terms__init(&parsed_terms);
1656 if (const_parsed_terms) {
1657 int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
1658
1659 if (ret)
1660 return ret;
1661 }
1662
1663 config = strdup(event_name);
1664 if (!config)
1665 goto out_err;
1666
1667 if (parse_events_term__num(&term,
1668 PARSE_EVENTS__TERM_TYPE_USER,
1669 config, /*num=*/1, /*novalue=*/true,
1670 loc, /*loc_val=*/NULL) < 0) {
1671 zfree(&config);
1672 goto out_err;
1673 }
1674 list_add_tail(&term->list, &parsed_terms.terms);
1675
1676 /* Add it for all PMUs that support the alias */
1677 list = malloc(sizeof(struct list_head));
1678 if (!list)
1679 goto out_err;
1680
1681 INIT_LIST_HEAD(list);
1682
1683 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
1684 if (parse_events__filter_pmu(parse_state, pmu))
1685 continue;
1686
1687 if (!perf_pmu__have_event(pmu, event_name))
1688 continue;
1689
1690 if (!parse_events_add_pmu(parse_state, list, pmu,
1691 &parsed_terms, first_wildcard_match, hw_config)) {
1692 struct strbuf sb;
1693
1694 strbuf_init(&sb, /*hint=*/ 0);
1695 parse_events_terms__to_strbuf(&parsed_terms, &sb);
1696 pr_debug("%s -> %s/%s/\n", event_name, pmu->name, sb.buf);
1697 strbuf_release(&sb);
1698 ok++;
1699 }
1700 if (first_wildcard_match == NULL)
1701 first_wildcard_match = container_of(list->prev, struct evsel, core.node);
1702 }
1703
1704 if (parse_state->fake_pmu) {
1705 if (!parse_events_add_pmu(parse_state, list, perf_pmus__fake_pmu(), &parsed_terms,
1706 first_wildcard_match, hw_config)) {
1707 struct strbuf sb;
1708
1709 strbuf_init(&sb, /*hint=*/ 0);
1710 parse_events_terms__to_strbuf(&parsed_terms, &sb);
1711 pr_debug("%s -> fake/%s/\n", event_name, sb.buf);
1712 strbuf_release(&sb);
1713 ok++;
1714 }
1715 }
1716
1717out_err:
1718 parse_events_terms__exit(&parsed_terms);
1719 if (ok)
1720 *listp = list;
1721 else
1722 free(list);
1723
1724 return ok ? 0 : -1;
1725}
1726
1727int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state,
1728 const char *event_or_pmu,
1729 const struct parse_events_terms *const_parsed_terms,
1730 struct list_head **listp,
1731 void *loc_)
1732{
1733 YYLTYPE *loc = loc_;
1734 struct perf_pmu *pmu;
1735 int ok = 0;
1736 char *help;
1737 struct evsel *first_wildcard_match = NULL;
1738
1739 *listp = malloc(sizeof(**listp));
1740 if (!*listp)
1741 return -ENOMEM;
1742
1743 INIT_LIST_HEAD(*listp);
1744
1745 /* Attempt to add to list assuming event_or_pmu is a PMU name. */
1746 pmu = perf_pmus__find(event_or_pmu);
1747 if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms,
1748 first_wildcard_match,
1749 /*alternate_hw_config=*/PERF_COUNT_HW_MAX))
1750 return 0;
1751
1752 if (parse_state->fake_pmu) {
1753 if (!parse_events_add_pmu(parse_state, *listp, perf_pmus__fake_pmu(),
1754 const_parsed_terms,
1755 first_wildcard_match,
1756 /*alternate_hw_config=*/PERF_COUNT_HW_MAX))
1757 return 0;
1758 }
1759
1760 pmu = NULL;
1761 /* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */
1762 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
1763 if (!parse_events__filter_pmu(parse_state, pmu) &&
1764 perf_pmu__wildcard_match(pmu, event_or_pmu)) {
1765 if (!parse_events_add_pmu(parse_state, *listp, pmu,
1766 const_parsed_terms,
1767 first_wildcard_match,
1768 /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) {
1769 ok++;
1770 parse_state->wild_card_pmus = true;
1771 }
1772 if (first_wildcard_match == NULL)
1773 first_wildcard_match =
1774 container_of((*listp)->prev, struct evsel, core.node);
1775 }
1776 }
1777 if (ok)
1778 return 0;
1779
1780 /* Failure to add, assume event_or_pmu is an event name. */
1781 zfree(listp);
1782 if (!parse_events_multi_pmu_add(parse_state, event_or_pmu, PERF_COUNT_HW_MAX,
1783 const_parsed_terms, listp, loc))
1784 return 0;
1785
1786 if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", event_or_pmu) < 0)
1787 help = NULL;
1788 parse_events_error__handle(parse_state->error, loc->first_column,
1789 strdup("Bad event or PMU"),
1790 help);
1791 zfree(listp);
1792 return -EINVAL;
1793}
1794
1795void parse_events__set_leader(char *name, struct list_head *list)
1796{
1797 struct evsel *leader;
1798
1799 if (list_empty(list)) {
1800 WARN_ONCE(true, "WARNING: failed to set leader: empty list");
1801 return;
1802 }
1803
1804 leader = list_first_entry(list, struct evsel, core.node);
1805 __perf_evlist__set_leader(list, &leader->core);
1806 zfree(&leader->group_name);
1807 leader->group_name = name;
1808}
1809
1810static int parse_events__modifier_list(struct parse_events_state *parse_state,
1811 YYLTYPE *loc,
1812 struct list_head *list,
1813 struct parse_events_modifier mod,
1814 bool group)
1815{
1816 struct evsel *evsel;
1817
1818 if (!group && mod.weak) {
1819 parse_events_error__handle(parse_state->error, loc->first_column,
1820 strdup("Weak modifier is for use with groups"), NULL);
1821 return -EINVAL;
1822 }
1823
1824 __evlist__for_each_entry(list, evsel) {
1825 /* Translate modifiers into the equivalent evsel excludes. */
1826 int eu = group ? evsel->core.attr.exclude_user : 0;
1827 int ek = group ? evsel->core.attr.exclude_kernel : 0;
1828 int eh = group ? evsel->core.attr.exclude_hv : 0;
1829 int eH = group ? evsel->core.attr.exclude_host : 0;
1830 int eG = group ? evsel->core.attr.exclude_guest : 0;
1831 int exclude = eu | ek | eh;
1832 int exclude_GH = group ? evsel->exclude_GH : 0;
1833
1834 if (mod.user) {
1835 if (!exclude)
1836 exclude = eu = ek = eh = 1;
1837 if (!exclude_GH && !perf_guest && exclude_GH_default)
1838 eG = 1;
1839 eu = 0;
1840 }
1841 if (mod.kernel) {
1842 if (!exclude)
1843 exclude = eu = ek = eh = 1;
1844 ek = 0;
1845 }
1846 if (mod.hypervisor) {
1847 if (!exclude)
1848 exclude = eu = ek = eh = 1;
1849 eh = 0;
1850 }
1851 if (mod.guest) {
1852 if (!exclude_GH)
1853 exclude_GH = eG = eH = 1;
1854 eG = 0;
1855 }
1856 if (mod.host) {
1857 if (!exclude_GH)
1858 exclude_GH = eG = eH = 1;
1859 eH = 0;
1860 }
1861 evsel->core.attr.exclude_user = eu;
1862 evsel->core.attr.exclude_kernel = ek;
1863 evsel->core.attr.exclude_hv = eh;
1864 evsel->core.attr.exclude_host = eH;
1865 evsel->core.attr.exclude_guest = eG;
1866 evsel->exclude_GH = exclude_GH;
1867
1868 /* Simple modifiers copied to the evsel. */
1869 if (mod.precise) {
1870 u8 precise = evsel->core.attr.precise_ip + mod.precise;
1871 /*
1872 * precise ip:
1873 *
1874 * 0 - SAMPLE_IP can have arbitrary skid
1875 * 1 - SAMPLE_IP must have constant skid
1876 * 2 - SAMPLE_IP requested to have 0 skid
1877 * 3 - SAMPLE_IP must have 0 skid
1878 *
1879 * See also PERF_RECORD_MISC_EXACT_IP
1880 */
1881 if (precise > 3) {
1882 char *help;
1883
1884 if (asprintf(&help,
1885 "Maximum combined precise value is 3, adding precision to \"%s\"",
1886 evsel__name(evsel)) > 0) {
1887 parse_events_error__handle(parse_state->error,
1888 loc->first_column,
1889 help, NULL);
1890 }
1891 return -EINVAL;
1892 }
1893 evsel->core.attr.precise_ip = precise;
1894 }
1895 if (mod.precise_max)
1896 evsel->precise_max = 1;
1897 if (mod.non_idle)
1898 evsel->core.attr.exclude_idle = 1;
1899 if (mod.sample_read)
1900 evsel->sample_read = 1;
1901 if (mod.pinned && evsel__is_group_leader(evsel))
1902 evsel->core.attr.pinned = 1;
1903 if (mod.exclusive && evsel__is_group_leader(evsel))
1904 evsel->core.attr.exclusive = 1;
1905 if (mod.weak)
1906 evsel->weak_group = true;
1907 if (mod.bpf)
1908 evsel->bpf_counter = true;
1909 if (mod.retire_lat)
1910 evsel->retire_lat = true;
1911 }
1912 return 0;
1913}
1914
1915int parse_events__modifier_group(struct parse_events_state *parse_state, void *loc,
1916 struct list_head *list,
1917 struct parse_events_modifier mod)
1918{
1919 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/true);
1920}
1921
1922int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc,
1923 struct list_head *list,
1924 struct parse_events_modifier mod)
1925{
1926 return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/false);
1927}
1928
1929int parse_events__set_default_name(struct list_head *list, char *name)
1930{
1931 struct evsel *evsel;
1932 bool used_name = false;
1933
1934 __evlist__for_each_entry(list, evsel) {
1935 if (!evsel->name) {
1936 evsel->name = used_name ? strdup(name) : name;
1937 used_name = true;
1938 if (!evsel->name)
1939 return -ENOMEM;
1940 }
1941 }
1942 if (!used_name)
1943 free(name);
1944 return 0;
1945}
1946
1947static int parse_events__scanner(const char *str,
1948 FILE *input,
1949 struct parse_events_state *parse_state)
1950{
1951 YY_BUFFER_STATE buffer;
1952 void *scanner;
1953 int ret;
1954
1955 ret = parse_events_lex_init_extra(parse_state, &scanner);
1956 if (ret)
1957 return ret;
1958
1959 if (str)
1960 buffer = parse_events__scan_string(str, scanner);
1961 else
1962 parse_events_set_in(input, scanner);
1963
1964#ifdef PARSER_DEBUG
1965 parse_events_debug = 1;
1966 parse_events_set_debug(1, scanner);
1967#endif
1968 ret = parse_events_parse(parse_state, scanner);
1969
1970 if (str) {
1971 parse_events__flush_buffer(buffer, scanner);
1972 parse_events__delete_buffer(buffer, scanner);
1973 }
1974 parse_events_lex_destroy(scanner);
1975 return ret;
1976}
1977
1978/*
1979 * parse event config string, return a list of event terms.
1980 */
1981int parse_events_terms(struct parse_events_terms *terms, const char *str, FILE *input)
1982{
1983 struct parse_events_state parse_state = {
1984 .terms = NULL,
1985 .stoken = PE_START_TERMS,
1986 };
1987 int ret;
1988
1989 ret = parse_events__scanner(str, input, &parse_state);
1990 if (!ret)
1991 list_splice(&parse_state.terms->terms, &terms->terms);
1992
1993 zfree(&parse_state.terms);
1994 return ret;
1995}
1996
1997static int evsel__compute_group_pmu_name(struct evsel *evsel,
1998 const struct list_head *head)
1999{
2000 struct evsel *leader = evsel__leader(evsel);
2001 struct evsel *pos;
2002 const char *group_pmu_name;
2003 struct perf_pmu *pmu = evsel__find_pmu(evsel);
2004
2005 if (!pmu) {
2006 /*
2007 * For PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE types the PMU
2008 * is a core PMU, but in heterogeneous systems this is
2009 * unknown. For now pick the first core PMU.
2010 */
2011 pmu = perf_pmus__scan_core(NULL);
2012 }
2013 if (!pmu) {
2014 pr_debug("No PMU found for '%s'\n", evsel__name(evsel));
2015 return -EINVAL;
2016 }
2017 group_pmu_name = pmu->name;
2018 /*
2019 * Software events may be in a group with other uncore PMU events. Use
2020 * the pmu_name of the first non-software event to avoid breaking the
2021 * software event out of the group.
2022 *
2023 * Aux event leaders, like intel_pt, expect a group with events from
2024 * other PMUs, so substitute the AUX event's PMU in this case.
2025 */
2026 if (perf_pmu__is_software(pmu) || evsel__is_aux_event(leader)) {
2027 struct perf_pmu *leader_pmu = evsel__find_pmu(leader);
2028
2029 if (!leader_pmu) {
2030 /* As with determining pmu above. */
2031 leader_pmu = perf_pmus__scan_core(NULL);
2032 }
2033 /*
2034 * Starting with the leader, find the first event with a named
2035 * non-software PMU. for_each_group_(member|evsel) isn't used as
2036 * the list isn't yet sorted putting evsel's in the same group
2037 * together.
2038 */
2039 if (leader_pmu && !perf_pmu__is_software(leader_pmu)) {
2040 group_pmu_name = leader_pmu->name;
2041 } else if (leader->core.nr_members > 1) {
2042 list_for_each_entry(pos, head, core.node) {
2043 struct perf_pmu *pos_pmu;
2044
2045 if (pos == leader || evsel__leader(pos) != leader)
2046 continue;
2047 pos_pmu = evsel__find_pmu(pos);
2048 if (!pos_pmu) {
2049 /* As with determining pmu above. */
2050 pos_pmu = perf_pmus__scan_core(NULL);
2051 }
2052 if (pos_pmu && !perf_pmu__is_software(pos_pmu)) {
2053 group_pmu_name = pos_pmu->name;
2054 break;
2055 }
2056 }
2057 }
2058 }
2059 /* Record computed name. */
2060 evsel->group_pmu_name = strdup(group_pmu_name);
2061 return evsel->group_pmu_name ? 0 : -ENOMEM;
2062}
2063
2064__weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs)
2065{
2066 /* Order by insertion index. */
2067 return lhs->core.idx - rhs->core.idx;
2068}
2069
2070static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct list_head *r)
2071{
2072 const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node);
2073 const struct evsel *lhs = container_of(lhs_core, struct evsel, core);
2074 const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node);
2075 const struct evsel *rhs = container_of(rhs_core, struct evsel, core);
2076 int *force_grouped_idx = _fg_idx;
2077 int lhs_sort_idx, rhs_sort_idx, ret;
2078 const char *lhs_pmu_name, *rhs_pmu_name;
2079
2080 /*
2081 * Get the indexes of the 2 events to sort. If the events are
2082 * in groups then the leader's index is used otherwise the
2083 * event's index is used. An index may be forced for events that
2084 * must be in the same group, namely Intel topdown events.
2085 */
2086 if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)) {
2087 lhs_sort_idx = *force_grouped_idx;
2088 } else {
2089 bool lhs_has_group = lhs_core->leader != lhs_core || lhs_core->nr_members > 1;
2090
2091 lhs_sort_idx = lhs_has_group ? lhs_core->leader->idx : lhs_core->idx;
2092 }
2093 if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)) {
2094 rhs_sort_idx = *force_grouped_idx;
2095 } else {
2096 bool rhs_has_group = rhs_core->leader != rhs_core || rhs_core->nr_members > 1;
2097
2098 rhs_sort_idx = rhs_has_group ? rhs_core->leader->idx : rhs_core->idx;
2099 }
2100
2101 /* If the indices differ then respect the insertion order. */
2102 if (lhs_sort_idx != rhs_sort_idx)
2103 return lhs_sort_idx - rhs_sort_idx;
2104
2105 /*
2106 * Ignoring forcing, lhs_sort_idx == rhs_sort_idx so lhs and rhs should
2107 * be in the same group. Events in the same group need to be ordered by
2108 * their grouping PMU name as the group will be broken to ensure only
2109 * events on the same PMU are programmed together.
2110 *
2111 * With forcing the lhs_sort_idx == rhs_sort_idx shows that one or both
2112 * events are being forced to be at force_group_index. If only one event
2113 * is being forced then the other event is the group leader of the group
2114 * we're trying to force the event into. Ensure for the force grouped
2115 * case that the PMU name ordering is also respected.
2116 */
2117 lhs_pmu_name = lhs->group_pmu_name;
2118 rhs_pmu_name = rhs->group_pmu_name;
2119 ret = strcmp(lhs_pmu_name, rhs_pmu_name);
2120 if (ret)
2121 return ret;
2122
2123 /*
2124 * Architecture specific sorting, by default sort events in the same
2125 * group with the same PMU by their insertion index. On Intel topdown
2126 * constraints must be adhered to - slots first, etc.
2127 */
2128 return arch_evlist__cmp(lhs, rhs);
2129}
2130
2131static int parse_events__sort_events_and_fix_groups(struct list_head *list)
2132{
2133 int idx = 0, force_grouped_idx = -1;
2134 struct evsel *pos, *cur_leader = NULL;
2135 struct perf_evsel *cur_leaders_grp = NULL;
2136 bool idx_changed = false;
2137 int orig_num_leaders = 0, num_leaders = 0;
2138 int ret;
2139 struct evsel *force_grouped_leader = NULL;
2140 bool last_event_was_forced_leader = false;
2141
2142 /*
2143 * Compute index to insert ungrouped events at. Place them where the
2144 * first ungrouped event appears.
2145 */
2146 list_for_each_entry(pos, list, core.node) {
2147 const struct evsel *pos_leader = evsel__leader(pos);
2148
2149 ret = evsel__compute_group_pmu_name(pos, list);
2150 if (ret)
2151 return ret;
2152
2153 if (pos == pos_leader)
2154 orig_num_leaders++;
2155
2156 /*
2157 * Ensure indexes are sequential, in particular for multiple
2158 * event lists being merged. The indexes are used to detect when
2159 * the user order is modified.
2160 */
2161 pos->core.idx = idx++;
2162
2163 /*
2164 * Remember an index to sort all forced grouped events
2165 * together to. Use the group leader as some events
2166 * must appear first within the group.
2167 */
2168 if (force_grouped_idx == -1 && arch_evsel__must_be_in_group(pos))
2169 force_grouped_idx = pos_leader->core.idx;
2170 }
2171
2172 /* Sort events. */
2173 list_sort(&force_grouped_idx, list, evlist__cmp);
2174
2175 /*
2176 * Recompute groups, splitting for PMUs and adding groups for events
2177 * that require them.
2178 */
2179 idx = 0;
2180 list_for_each_entry(pos, list, core.node) {
2181 const struct evsel *pos_leader = evsel__leader(pos);
2182 const char *pos_pmu_name = pos->group_pmu_name;
2183 const char *cur_leader_pmu_name;
2184 bool pos_force_grouped = force_grouped_idx != -1 &&
2185 arch_evsel__must_be_in_group(pos);
2186
2187 /* Reset index and nr_members. */
2188 if (pos->core.idx != idx)
2189 idx_changed = true;
2190 pos->core.idx = idx++;
2191 pos->core.nr_members = 0;
2192
2193 /*
2194 * Set the group leader respecting the given groupings and that
2195 * groups can't span PMUs.
2196 */
2197 if (!cur_leader) {
2198 cur_leader = pos;
2199 cur_leaders_grp = &pos->core;
2200 if (pos_force_grouped)
2201 force_grouped_leader = pos;
2202 }
2203
2204 cur_leader_pmu_name = cur_leader->group_pmu_name;
2205 if (strcmp(cur_leader_pmu_name, pos_pmu_name)) {
2206 /* PMU changed so the group/leader must change. */
2207 cur_leader = pos;
2208 cur_leaders_grp = pos->core.leader;
2209 if (pos_force_grouped && force_grouped_leader == NULL)
2210 force_grouped_leader = pos;
2211 } else if (cur_leaders_grp != pos->core.leader) {
2212 bool split_even_if_last_leader_was_forced = true;
2213
2214 /*
2215 * Event is for a different group. If the last event was
2216 * the forced group leader then subsequent group events
2217 * and forced events should be in the same group. If
2218 * there are no other forced group events then the
2219 * forced group leader wasn't really being forced into a
2220 * group, it just set arch_evsel__must_be_in_group, and
2221 * we don't want the group to split here.
2222 */
2223 if (force_grouped_idx != -1 && last_event_was_forced_leader) {
2224 struct evsel *pos2 = pos;
2225 /*
2226 * Search the whole list as the group leaders
2227 * aren't currently valid.
2228 */
2229 list_for_each_entry_continue(pos2, list, core.node) {
2230 if (pos->core.leader == pos2->core.leader &&
2231 arch_evsel__must_be_in_group(pos2)) {
2232 split_even_if_last_leader_was_forced = false;
2233 break;
2234 }
2235 }
2236 }
2237 if (!last_event_was_forced_leader || split_even_if_last_leader_was_forced) {
2238 if (pos_force_grouped) {
2239 if (force_grouped_leader) {
2240 cur_leader = force_grouped_leader;
2241 cur_leaders_grp = force_grouped_leader->core.leader;
2242 } else {
2243 cur_leader = force_grouped_leader = pos;
2244 cur_leaders_grp = &pos->core;
2245 }
2246 } else {
2247 cur_leader = pos;
2248 cur_leaders_grp = pos->core.leader;
2249 }
2250 }
2251 }
2252 if (pos_leader != cur_leader) {
2253 /* The leader changed so update it. */
2254 evsel__set_leader(pos, cur_leader);
2255 }
2256 last_event_was_forced_leader = (force_grouped_leader == pos);
2257 }
2258 list_for_each_entry(pos, list, core.node) {
2259 struct evsel *pos_leader = evsel__leader(pos);
2260
2261 if (pos == pos_leader)
2262 num_leaders++;
2263 pos_leader->core.nr_members++;
2264 }
2265 return (idx_changed || num_leaders != orig_num_leaders) ? 1 : 0;
2266}
2267
2268int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter,
2269 struct parse_events_error *err, bool fake_pmu,
2270 bool warn_if_reordered, bool fake_tp)
2271{
2272 struct parse_events_state parse_state = {
2273 .list = LIST_HEAD_INIT(parse_state.list),
2274 .idx = evlist->core.nr_entries,
2275 .error = err,
2276 .stoken = PE_START_EVENTS,
2277 .fake_pmu = fake_pmu,
2278 .fake_tp = fake_tp,
2279 .pmu_filter = pmu_filter,
2280 .match_legacy_cache_terms = true,
2281 };
2282 int ret, ret2;
2283
2284 ret = parse_events__scanner(str, /*input=*/ NULL, &parse_state);
2285
2286 if (!ret && list_empty(&parse_state.list)) {
2287 WARN_ONCE(true, "WARNING: event parser found nothing\n");
2288 return -1;
2289 }
2290
2291 ret2 = parse_events__sort_events_and_fix_groups(&parse_state.list);
2292 if (ret2 < 0)
2293 return ret;
2294
2295 /*
2296 * Add list to the evlist even with errors to allow callers to clean up.
2297 */
2298 evlist__splice_list_tail(evlist, &parse_state.list);
2299
2300 if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus) {
2301 pr_warning("WARNING: events were regrouped to match PMUs\n");
2302
2303 if (verbose > 0) {
2304 struct strbuf sb = STRBUF_INIT;
2305
2306 evlist__uniquify_evsel_names(evlist, &stat_config);
2307 evlist__format_evsels(evlist, &sb, 2048);
2308 pr_debug("evlist after sorting/fixing: '%s'\n", sb.buf);
2309 strbuf_release(&sb);
2310 }
2311 }
2312 if (!ret) {
2313 struct evsel *last;
2314
2315 last = evlist__last(evlist);
2316 last->cmdline_group_boundary = true;
2317
2318 return 0;
2319 }
2320
2321 /*
2322 * There are 2 users - builtin-record and builtin-test objects.
2323 * Both call evlist__delete in case of error, so we dont
2324 * need to bother.
2325 */
2326 return ret;
2327}
2328
2329int parse_event(struct evlist *evlist, const char *str)
2330{
2331 struct parse_events_error err;
2332 int ret;
2333
2334 parse_events_error__init(&err);
2335 ret = parse_events(evlist, str, &err);
2336 parse_events_error__exit(&err);
2337 return ret;
2338}
2339
2340struct parse_events_error_entry {
2341 /** @list: The list the error is part of. */
2342 struct list_head list;
2343 /** @idx: index in the parsed string */
2344 int idx;
2345 /** @str: string to display at the index */
2346 char *str;
2347 /** @help: optional help string */
2348 char *help;
2349};
2350
2351void parse_events_error__init(struct parse_events_error *err)
2352{
2353 INIT_LIST_HEAD(&err->list);
2354}
2355
2356void parse_events_error__exit(struct parse_events_error *err)
2357{
2358 struct parse_events_error_entry *pos, *tmp;
2359
2360 list_for_each_entry_safe(pos, tmp, &err->list, list) {
2361 zfree(&pos->str);
2362 zfree(&pos->help);
2363 list_del_init(&pos->list);
2364 free(pos);
2365 }
2366}
2367
2368void parse_events_error__handle(struct parse_events_error *err, int idx,
2369 char *str, char *help)
2370{
2371 struct parse_events_error_entry *entry;
2372
2373 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n"))
2374 goto out_free;
2375
2376 entry = zalloc(sizeof(*entry));
2377 if (!entry) {
2378 pr_err("Failed to allocate memory for event parsing error: %s (%s)\n",
2379 str, help ?: "<no help>");
2380 goto out_free;
2381 }
2382 entry->idx = idx;
2383 entry->str = str;
2384 entry->help = help;
2385 list_add(&entry->list, &err->list);
2386 return;
2387out_free:
2388 free(str);
2389 free(help);
2390}
2391
2392#define MAX_WIDTH 1000
2393static int get_term_width(void)
2394{
2395 struct winsize ws;
2396
2397 get_term_dimensions(&ws);
2398 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col;
2399}
2400
2401static void __parse_events_error__print(int err_idx, const char *err_str,
2402 const char *err_help, const char *event)
2403{
2404 const char *str = "invalid or unsupported event: ";
2405 char _buf[MAX_WIDTH];
2406 char *buf = (char *) event;
2407 int idx = 0;
2408 if (err_str) {
2409 /* -2 for extra '' in the final fprintf */
2410 int width = get_term_width() - 2;
2411 int len_event = strlen(event);
2412 int len_str, max_len, cut = 0;
2413
2414 /*
2415 * Maximum error index indent, we will cut
2416 * the event string if it's bigger.
2417 */
2418 int max_err_idx = 13;
2419
2420 /*
2421 * Let's be specific with the message when
2422 * we have the precise error.
2423 */
2424 str = "event syntax error: ";
2425 len_str = strlen(str);
2426 max_len = width - len_str;
2427
2428 buf = _buf;
2429
2430 /* We're cutting from the beginning. */
2431 if (err_idx > max_err_idx)
2432 cut = err_idx - max_err_idx;
2433
2434 strncpy(buf, event + cut, max_len);
2435
2436 /* Mark cut parts with '..' on both sides. */
2437 if (cut)
2438 buf[0] = buf[1] = '.';
2439
2440 if ((len_event - cut) > max_len) {
2441 buf[max_len - 1] = buf[max_len - 2] = '.';
2442 buf[max_len] = 0;
2443 }
2444
2445 idx = len_str + err_idx - cut;
2446 }
2447
2448 fprintf(stderr, "%s'%s'\n", str, buf);
2449 if (idx) {
2450 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str);
2451 if (err_help)
2452 fprintf(stderr, "\n%s\n", err_help);
2453 }
2454}
2455
2456void parse_events_error__print(const struct parse_events_error *err,
2457 const char *event)
2458{
2459 struct parse_events_error_entry *pos;
2460 bool first = true;
2461
2462 list_for_each_entry(pos, &err->list, list) {
2463 if (!first)
2464 fputs("\n", stderr);
2465 __parse_events_error__print(pos->idx, pos->str, pos->help, event);
2466 first = false;
2467 }
2468}
2469
2470/*
2471 * In the list of errors err, do any of the error strings (str) contain the
2472 * given needle string?
2473 */
2474bool parse_events_error__contains(const struct parse_events_error *err,
2475 const char *needle)
2476{
2477 struct parse_events_error_entry *pos;
2478
2479 list_for_each_entry(pos, &err->list, list) {
2480 if (strstr(pos->str, needle) != NULL)
2481 return true;
2482 }
2483 return false;
2484}
2485
2486#undef MAX_WIDTH
2487
2488int parse_events_option(const struct option *opt, const char *str,
2489 int unset __maybe_unused)
2490{
2491 struct parse_events_option_args *args = opt->value;
2492 struct parse_events_error err;
2493 int ret;
2494
2495 parse_events_error__init(&err);
2496 ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err,
2497 /*fake_pmu=*/false, /*warn_if_reordered=*/true,
2498 /*fake_tp=*/false);
2499
2500 if (ret) {
2501 parse_events_error__print(&err, str);
2502 fprintf(stderr, "Run 'perf list' for a list of valid events\n");
2503 }
2504 parse_events_error__exit(&err);
2505
2506 return ret;
2507}
2508
2509int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset)
2510{
2511 struct parse_events_option_args *args = opt->value;
2512 int ret;
2513
2514 if (*args->evlistp == NULL) {
2515 *args->evlistp = evlist__new();
2516
2517 if (*args->evlistp == NULL) {
2518 fprintf(stderr, "Not enough memory to create evlist\n");
2519 return -1;
2520 }
2521 }
2522 ret = parse_events_option(opt, str, unset);
2523 if (ret) {
2524 evlist__delete(*args->evlistp);
2525 *args->evlistp = NULL;
2526 }
2527
2528 return ret;
2529}
2530
2531static int
2532foreach_evsel_in_last_glob(struct evlist *evlist,
2533 int (*func)(struct evsel *evsel,
2534 const void *arg),
2535 const void *arg)
2536{
2537 struct evsel *last = NULL;
2538 int err;
2539
2540 /*
2541 * Don't return when list_empty, give func a chance to report
2542 * error when it found last == NULL.
2543 *
2544 * So no need to WARN here, let *func do this.
2545 */
2546 if (evlist->core.nr_entries > 0)
2547 last = evlist__last(evlist);
2548
2549 do {
2550 err = (*func)(last, arg);
2551 if (err)
2552 return -1;
2553 if (!last)
2554 return 0;
2555
2556 if (last->core.node.prev == &evlist->core.entries)
2557 return 0;
2558 last = list_entry(last->core.node.prev, struct evsel, core.node);
2559 } while (!last->cmdline_group_boundary);
2560
2561 return 0;
2562}
2563
2564static int set_filter(struct evsel *evsel, const void *arg)
2565{
2566 const char *str = arg;
2567 bool found = false;
2568 int nr_addr_filters = 0;
2569 struct perf_pmu *pmu = NULL;
2570
2571 if (evsel == NULL) {
2572 fprintf(stderr,
2573 "--filter option should follow a -e tracepoint or HW tracer option\n");
2574 return -1;
2575 }
2576
2577 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
2578 if (evsel__append_tp_filter(evsel, str) < 0) {
2579 fprintf(stderr,
2580 "not enough memory to hold filter string\n");
2581 return -1;
2582 }
2583
2584 return 0;
2585 }
2586
2587 while ((pmu = perf_pmus__scan(pmu)) != NULL)
2588 if (pmu->type == evsel->core.attr.type) {
2589 found = true;
2590 break;
2591 }
2592
2593 if (found)
2594 perf_pmu__scan_file(pmu, "nr_addr_filters",
2595 "%d", &nr_addr_filters);
2596
2597 if (!nr_addr_filters)
2598 return perf_bpf_filter__parse(&evsel->bpf_filters, str);
2599
2600 if (evsel__append_addr_filter(evsel, str) < 0) {
2601 fprintf(stderr,
2602 "not enough memory to hold filter string\n");
2603 return -1;
2604 }
2605
2606 return 0;
2607}
2608
2609int parse_filter(const struct option *opt, const char *str,
2610 int unset __maybe_unused)
2611{
2612 struct evlist *evlist = *(struct evlist **)opt->value;
2613
2614 return foreach_evsel_in_last_glob(evlist, set_filter,
2615 (const void *)str);
2616}
2617
2618static int add_exclude_perf_filter(struct evsel *evsel,
2619 const void *arg __maybe_unused)
2620{
2621 char new_filter[64];
2622
2623 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2624 fprintf(stderr,
2625 "--exclude-perf option should follow a -e tracepoint option\n");
2626 return -1;
2627 }
2628
2629 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid());
2630
2631 if (evsel__append_tp_filter(evsel, new_filter) < 0) {
2632 fprintf(stderr,
2633 "not enough memory to hold filter string\n");
2634 return -1;
2635 }
2636
2637 return 0;
2638}
2639
2640int exclude_perf(const struct option *opt,
2641 const char *arg __maybe_unused,
2642 int unset __maybe_unused)
2643{
2644 struct evlist *evlist = *(struct evlist **)opt->value;
2645
2646 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter,
2647 NULL);
2648}
2649
2650int parse_events__is_hardcoded_term(struct parse_events_term *term)
2651{
2652 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
2653}
2654
2655static int new_term(struct parse_events_term **_term,
2656 struct parse_events_term *temp,
2657 char *str, u64 num)
2658{
2659 struct parse_events_term *term;
2660
2661 term = malloc(sizeof(*term));
2662 if (!term)
2663 return -ENOMEM;
2664
2665 *term = *temp;
2666 INIT_LIST_HEAD(&term->list);
2667 term->weak = false;
2668
2669 switch (term->type_val) {
2670 case PARSE_EVENTS__TERM_TYPE_NUM:
2671 term->val.num = num;
2672 break;
2673 case PARSE_EVENTS__TERM_TYPE_STR:
2674 term->val.str = str;
2675 break;
2676 default:
2677 free(term);
2678 return -EINVAL;
2679 }
2680
2681 *_term = term;
2682 return 0;
2683}
2684
2685int parse_events_term__num(struct parse_events_term **term,
2686 enum parse_events__term_type type_term,
2687 const char *config, u64 num,
2688 bool no_value,
2689 void *loc_term_, void *loc_val_)
2690{
2691 YYLTYPE *loc_term = loc_term_;
2692 YYLTYPE *loc_val = loc_val_;
2693
2694 struct parse_events_term temp = {
2695 .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
2696 .type_term = type_term,
2697 .config = config ? : strdup(parse_events__term_type_str(type_term)),
2698 .no_value = no_value,
2699 .err_term = loc_term ? loc_term->first_column : 0,
2700 .err_val = loc_val ? loc_val->first_column : 0,
2701 };
2702
2703 return new_term(term, &temp, /*str=*/NULL, num);
2704}
2705
2706int parse_events_term__str(struct parse_events_term **term,
2707 enum parse_events__term_type type_term,
2708 char *config, char *str,
2709 void *loc_term_, void *loc_val_)
2710{
2711 YYLTYPE *loc_term = loc_term_;
2712 YYLTYPE *loc_val = loc_val_;
2713
2714 struct parse_events_term temp = {
2715 .type_val = PARSE_EVENTS__TERM_TYPE_STR,
2716 .type_term = type_term,
2717 .config = config,
2718 .err_term = loc_term ? loc_term->first_column : 0,
2719 .err_val = loc_val ? loc_val->first_column : 0,
2720 };
2721
2722 return new_term(term, &temp, str, /*num=*/0);
2723}
2724
2725int parse_events_term__term(struct parse_events_term **term,
2726 enum parse_events__term_type term_lhs,
2727 enum parse_events__term_type term_rhs,
2728 void *loc_term, void *loc_val)
2729{
2730 return parse_events_term__str(term, term_lhs, NULL,
2731 strdup(parse_events__term_type_str(term_rhs)),
2732 loc_term, loc_val);
2733}
2734
2735int parse_events_term__clone(struct parse_events_term **new,
2736 const struct parse_events_term *term)
2737{
2738 char *str;
2739 struct parse_events_term temp = *term;
2740
2741 temp.used = false;
2742 if (term->config) {
2743 temp.config = strdup(term->config);
2744 if (!temp.config)
2745 return -ENOMEM;
2746 }
2747 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
2748 return new_term(new, &temp, /*str=*/NULL, term->val.num);
2749
2750 str = strdup(term->val.str);
2751 if (!str) {
2752 zfree(&temp.config);
2753 return -ENOMEM;
2754 }
2755 return new_term(new, &temp, str, /*num=*/0);
2756}
2757
2758void parse_events_term__delete(struct parse_events_term *term)
2759{
2760 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
2761 zfree(&term->val.str);
2762
2763 zfree(&term->config);
2764 free(term);
2765}
2766
2767static int parse_events_terms__copy(const struct parse_events_terms *src,
2768 struct parse_events_terms *dest)
2769{
2770 struct parse_events_term *term;
2771
2772 list_for_each_entry (term, &src->terms, list) {
2773 struct parse_events_term *n;
2774 int ret;
2775
2776 ret = parse_events_term__clone(&n, term);
2777 if (ret)
2778 return ret;
2779
2780 list_add_tail(&n->list, &dest->terms);
2781 }
2782 return 0;
2783}
2784
2785void parse_events_terms__init(struct parse_events_terms *terms)
2786{
2787 INIT_LIST_HEAD(&terms->terms);
2788}
2789
2790void parse_events_terms__exit(struct parse_events_terms *terms)
2791{
2792 struct parse_events_term *term, *h;
2793
2794 list_for_each_entry_safe(term, h, &terms->terms, list) {
2795 list_del_init(&term->list);
2796 parse_events_term__delete(term);
2797 }
2798}
2799
2800void parse_events_terms__delete(struct parse_events_terms *terms)
2801{
2802 if (!terms)
2803 return;
2804 parse_events_terms__exit(terms);
2805 free(terms);
2806}
2807
2808int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb)
2809{
2810 struct parse_events_term *term;
2811 bool first = true;
2812
2813 if (!terms)
2814 return 0;
2815
2816 list_for_each_entry(term, &terms->terms, list) {
2817 int ret;
2818
2819 if (!first) {
2820 ret = strbuf_addch(sb, ',');
2821 if (ret < 0)
2822 return ret;
2823 }
2824 first = false;
2825
2826 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
2827 if (term->no_value) {
2828 assert(term->val.num == 1);
2829 ret = strbuf_addf(sb, "%s", term->config);
2830 } else
2831 ret = strbuf_addf(sb, "%s=%#"PRIx64, term->config, term->val.num);
2832 else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
2833 if (term->config) {
2834 ret = strbuf_addf(sb, "%s=", term->config);
2835 if (ret < 0)
2836 return ret;
2837 } else if ((unsigned int)term->type_term < __PARSE_EVENTS__TERM_TYPE_NR) {
2838 ret = strbuf_addf(sb, "%s=",
2839 parse_events__term_type_str(term->type_term));
2840 if (ret < 0)
2841 return ret;
2842 }
2843 assert(!term->no_value);
2844 ret = strbuf_addf(sb, "%s", term->val.str);
2845 }
2846 if (ret < 0)
2847 return ret;
2848 }
2849 return 0;
2850}
2851
2852static void config_terms_list(char *buf, size_t buf_sz)
2853{
2854 int i;
2855 bool first = true;
2856
2857 buf[0] = '\0';
2858 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) {
2859 const char *name = parse_events__term_type_str(i);
2860
2861 if (!config_term_avail(i, NULL))
2862 continue;
2863 if (!name)
2864 continue;
2865 if (name[0] == '<')
2866 continue;
2867
2868 if (strlen(buf) + strlen(name) + 2 >= buf_sz)
2869 return;
2870
2871 if (!first)
2872 strcat(buf, ",");
2873 else
2874 first = false;
2875 strcat(buf, name);
2876 }
2877}
2878
2879/*
2880 * Return string contains valid config terms of an event.
2881 * @additional_terms: For terms such as PMU sysfs terms.
2882 */
2883char *parse_events_formats_error_string(char *additional_terms)
2884{
2885 char *str;
2886 /* "no-overwrite" is the longest name */
2887 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR *
2888 (sizeof("no-overwrite") - 1)];
2889
2890 config_terms_list(static_terms, sizeof(static_terms));
2891 /* valid terms */
2892 if (additional_terms) {
2893 if (asprintf(&str, "valid terms: %s,%s",
2894 additional_terms, static_terms) < 0)
2895 goto fail;
2896 } else {
2897 if (asprintf(&str, "valid terms: %s", static_terms) < 0)
2898 goto fail;
2899 }
2900 return str;
2901
2902fail:
2903 return NULL;
2904}