Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/hw_breakpoint.h>
3#include <linux/err.h>
4#include <linux/list_sort.h>
5#include <linux/zalloc.h>
6#include <dirent.h>
7#include <errno.h>
8#include <sys/ioctl.h>
9#include <sys/param.h>
10#include "term.h"
11#include "evlist.h"
12#include "evsel.h"
13#include <subcmd/parse-options.h>
14#include "parse-events.h"
15#include "string2.h"
16#include "strlist.h"
17#include "bpf-loader.h"
18#include "debug.h"
19#include <api/fs/tracing_path.h>
20#include <perf/cpumap.h>
21#include "parse-events-bison.h"
22#include "parse-events-flex.h"
23#include "pmu.h"
24#include "asm/bug.h"
25#include "util/parse-branch-options.h"
26#include "util/evsel_config.h"
27#include "util/event.h"
28#include "util/parse-events-hybrid.h"
29#include "util/pmu-hybrid.h"
30#include "util/bpf-filter.h"
31#include "util/util.h"
32#include "tracepoint.h"
33#include "thread_map.h"
34
35#define MAX_NAME_LEN 100
36
37struct perf_pmu_event_symbol {
38 char *symbol;
39 enum perf_pmu_event_symbol_type type;
40};
41
42#ifdef PARSER_DEBUG
43extern int parse_events_debug;
44#endif
45int parse_events_parse(void *parse_state, void *scanner);
46static int get_config_terms(struct list_head *head_config,
47 struct list_head *head_terms __maybe_unused);
48static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state,
49 const char *str, char *pmu_name,
50 struct list_head *list);
51
52static struct perf_pmu_event_symbol *perf_pmu_events_list;
53/*
54 * The variable indicates the number of supported pmu event symbols.
55 * 0 means not initialized and ready to init
56 * -1 means failed to init, don't try anymore
57 * >0 is the number of supported pmu event symbols
58 */
59static int perf_pmu_events_list_num;
60
61struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
62 [PERF_COUNT_HW_CPU_CYCLES] = {
63 .symbol = "cpu-cycles",
64 .alias = "cycles",
65 },
66 [PERF_COUNT_HW_INSTRUCTIONS] = {
67 .symbol = "instructions",
68 .alias = "",
69 },
70 [PERF_COUNT_HW_CACHE_REFERENCES] = {
71 .symbol = "cache-references",
72 .alias = "",
73 },
74 [PERF_COUNT_HW_CACHE_MISSES] = {
75 .symbol = "cache-misses",
76 .alias = "",
77 },
78 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {
79 .symbol = "branch-instructions",
80 .alias = "branches",
81 },
82 [PERF_COUNT_HW_BRANCH_MISSES] = {
83 .symbol = "branch-misses",
84 .alias = "",
85 },
86 [PERF_COUNT_HW_BUS_CYCLES] = {
87 .symbol = "bus-cycles",
88 .alias = "",
89 },
90 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {
91 .symbol = "stalled-cycles-frontend",
92 .alias = "idle-cycles-frontend",
93 },
94 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {
95 .symbol = "stalled-cycles-backend",
96 .alias = "idle-cycles-backend",
97 },
98 [PERF_COUNT_HW_REF_CPU_CYCLES] = {
99 .symbol = "ref-cycles",
100 .alias = "",
101 },
102};
103
104struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
105 [PERF_COUNT_SW_CPU_CLOCK] = {
106 .symbol = "cpu-clock",
107 .alias = "",
108 },
109 [PERF_COUNT_SW_TASK_CLOCK] = {
110 .symbol = "task-clock",
111 .alias = "",
112 },
113 [PERF_COUNT_SW_PAGE_FAULTS] = {
114 .symbol = "page-faults",
115 .alias = "faults",
116 },
117 [PERF_COUNT_SW_CONTEXT_SWITCHES] = {
118 .symbol = "context-switches",
119 .alias = "cs",
120 },
121 [PERF_COUNT_SW_CPU_MIGRATIONS] = {
122 .symbol = "cpu-migrations",
123 .alias = "migrations",
124 },
125 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = {
126 .symbol = "minor-faults",
127 .alias = "",
128 },
129 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = {
130 .symbol = "major-faults",
131 .alias = "",
132 },
133 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = {
134 .symbol = "alignment-faults",
135 .alias = "",
136 },
137 [PERF_COUNT_SW_EMULATION_FAULTS] = {
138 .symbol = "emulation-faults",
139 .alias = "",
140 },
141 [PERF_COUNT_SW_DUMMY] = {
142 .symbol = "dummy",
143 .alias = "",
144 },
145 [PERF_COUNT_SW_BPF_OUTPUT] = {
146 .symbol = "bpf-output",
147 .alias = "",
148 },
149 [PERF_COUNT_SW_CGROUP_SWITCHES] = {
150 .symbol = "cgroup-switches",
151 .alias = "",
152 },
153};
154
155bool is_event_supported(u8 type, u64 config)
156{
157 bool ret = true;
158 int open_return;
159 struct evsel *evsel;
160 struct perf_event_attr attr = {
161 .type = type,
162 .config = config,
163 .disabled = 1,
164 };
165 struct perf_thread_map *tmap = thread_map__new_by_tid(0);
166
167 if (tmap == NULL)
168 return false;
169
170 evsel = evsel__new(&attr);
171 if (evsel) {
172 open_return = evsel__open(evsel, NULL, tmap);
173 ret = open_return >= 0;
174
175 if (open_return == -EACCES) {
176 /*
177 * This happens if the paranoid value
178 * /proc/sys/kernel/perf_event_paranoid is set to 2
179 * Re-run with exclude_kernel set; we don't do that
180 * by default as some ARM machines do not support it.
181 *
182 */
183 evsel->core.attr.exclude_kernel = 1;
184 ret = evsel__open(evsel, NULL, tmap) >= 0;
185 }
186 evsel__delete(evsel);
187 }
188
189 perf_thread_map__put(tmap);
190 return ret;
191}
192
193const char *event_type(int type)
194{
195 switch (type) {
196 case PERF_TYPE_HARDWARE:
197 return "hardware";
198
199 case PERF_TYPE_SOFTWARE:
200 return "software";
201
202 case PERF_TYPE_TRACEPOINT:
203 return "tracepoint";
204
205 case PERF_TYPE_HW_CACHE:
206 return "hardware-cache";
207
208 default:
209 break;
210 }
211
212 return "unknown";
213}
214
215static char *get_config_str(struct list_head *head_terms, int type_term)
216{
217 struct parse_events_term *term;
218
219 if (!head_terms)
220 return NULL;
221
222 list_for_each_entry(term, head_terms, list)
223 if (term->type_term == type_term)
224 return term->val.str;
225
226 return NULL;
227}
228
229static char *get_config_metric_id(struct list_head *head_terms)
230{
231 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID);
232}
233
234static char *get_config_name(struct list_head *head_terms)
235{
236 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME);
237}
238
239static struct evsel *
240__add_event(struct list_head *list, int *idx,
241 struct perf_event_attr *attr,
242 bool init_attr,
243 const char *name, const char *metric_id, struct perf_pmu *pmu,
244 struct list_head *config_terms, bool auto_merge_stats,
245 const char *cpu_list)
246{
247 struct evsel *evsel;
248 struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) :
249 cpu_list ? perf_cpu_map__new(cpu_list) : NULL;
250
251 if (pmu)
252 perf_pmu__warn_invalid_formats(pmu);
253
254 if (pmu && attr->type == PERF_TYPE_RAW)
255 perf_pmu__warn_invalid_config(pmu, attr->config, name);
256
257 if (init_attr)
258 event_attr_init(attr);
259
260 evsel = evsel__new_idx(attr, *idx);
261 if (!evsel) {
262 perf_cpu_map__put(cpus);
263 return NULL;
264 }
265
266 (*idx)++;
267 evsel->core.cpus = cpus;
268 evsel->core.own_cpus = perf_cpu_map__get(cpus);
269 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false;
270 evsel->auto_merge_stats = auto_merge_stats;
271 evsel->pmu = pmu;
272
273 if (name)
274 evsel->name = strdup(name);
275
276 if (metric_id)
277 evsel->metric_id = strdup(metric_id);
278
279 if (config_terms)
280 list_splice_init(config_terms, &evsel->config_terms);
281
282 if (list)
283 list_add_tail(&evsel->core.node, list);
284
285 return evsel;
286}
287
288struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
289 const char *name, const char *metric_id,
290 struct perf_pmu *pmu)
291{
292 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name,
293 metric_id, pmu, /*config_terms=*/NULL,
294 /*auto_merge_stats=*/false, /*cpu_list=*/NULL);
295}
296
297static int add_event(struct list_head *list, int *idx,
298 struct perf_event_attr *attr, const char *name,
299 const char *metric_id, struct list_head *config_terms)
300{
301 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id,
302 /*pmu=*/NULL, config_terms,
303 /*auto_merge_stats=*/false, /*cpu_list=*/NULL) ? 0 : -ENOMEM;
304}
305
306static int add_event_tool(struct list_head *list, int *idx,
307 enum perf_tool_event tool_event)
308{
309 struct evsel *evsel;
310 struct perf_event_attr attr = {
311 .type = PERF_TYPE_SOFTWARE,
312 .config = PERF_COUNT_SW_DUMMY,
313 };
314
315 evsel = __add_event(list, idx, &attr, /*init_attr=*/true, /*name=*/NULL,
316 /*metric_id=*/NULL, /*pmu=*/NULL,
317 /*config_terms=*/NULL, /*auto_merge_stats=*/false,
318 /*cpu_list=*/"0");
319 if (!evsel)
320 return -ENOMEM;
321 evsel->tool_event = tool_event;
322 if (tool_event == PERF_TOOL_DURATION_TIME
323 || tool_event == PERF_TOOL_USER_TIME
324 || tool_event == PERF_TOOL_SYSTEM_TIME) {
325 free((char *)evsel->unit);
326 evsel->unit = strdup("ns");
327 }
328 return 0;
329}
330
331static int parse_aliases(char *str, const char *const names[][EVSEL__MAX_ALIASES], int size)
332{
333 int i, j;
334 int n, longest = -1;
335
336 for (i = 0; i < size; i++) {
337 for (j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) {
338 n = strlen(names[i][j]);
339 if (n > longest && !strncasecmp(str, names[i][j], n))
340 longest = n;
341 }
342 if (longest > 0)
343 return i;
344 }
345
346 return -1;
347}
348
349typedef int config_term_func_t(struct perf_event_attr *attr,
350 struct parse_events_term *term,
351 struct parse_events_error *err);
352static int config_term_common(struct perf_event_attr *attr,
353 struct parse_events_term *term,
354 struct parse_events_error *err);
355static int config_attr(struct perf_event_attr *attr,
356 struct list_head *head,
357 struct parse_events_error *err,
358 config_term_func_t config_term);
359
360int parse_events_add_cache(struct list_head *list, int *idx,
361 char *type, char *op_result1, char *op_result2,
362 struct parse_events_error *err,
363 struct list_head *head_config,
364 struct parse_events_state *parse_state)
365{
366 struct perf_event_attr attr;
367 LIST_HEAD(config_terms);
368 char name[MAX_NAME_LEN];
369 const char *config_name, *metric_id;
370 int cache_type = -1, cache_op = -1, cache_result = -1;
371 char *op_result[2] = { op_result1, op_result2 };
372 int i, n, ret;
373 bool hybrid;
374
375 /*
376 * No fallback - if we cannot get a clear cache type
377 * then bail out:
378 */
379 cache_type = parse_aliases(type, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX);
380 if (cache_type == -1)
381 return -EINVAL;
382
383 config_name = get_config_name(head_config);
384 n = snprintf(name, MAX_NAME_LEN, "%s", type);
385
386 for (i = 0; (i < 2) && (op_result[i]); i++) {
387 char *str = op_result[i];
388
389 n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str);
390
391 if (cache_op == -1) {
392 cache_op = parse_aliases(str, evsel__hw_cache_op,
393 PERF_COUNT_HW_CACHE_OP_MAX);
394 if (cache_op >= 0) {
395 if (!evsel__is_cache_op_valid(cache_type, cache_op))
396 return -EINVAL;
397 continue;
398 }
399 }
400
401 if (cache_result == -1) {
402 cache_result = parse_aliases(str, evsel__hw_cache_result,
403 PERF_COUNT_HW_CACHE_RESULT_MAX);
404 if (cache_result >= 0)
405 continue;
406 }
407 }
408
409 /*
410 * Fall back to reads:
411 */
412 if (cache_op == -1)
413 cache_op = PERF_COUNT_HW_CACHE_OP_READ;
414
415 /*
416 * Fall back to accesses:
417 */
418 if (cache_result == -1)
419 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
420
421 memset(&attr, 0, sizeof(attr));
422 attr.config = cache_type | (cache_op << 8) | (cache_result << 16);
423 attr.type = PERF_TYPE_HW_CACHE;
424
425 if (head_config) {
426 if (config_attr(&attr, head_config, err,
427 config_term_common))
428 return -EINVAL;
429
430 if (get_config_terms(head_config, &config_terms))
431 return -ENOMEM;
432 }
433
434 metric_id = get_config_metric_id(head_config);
435 ret = parse_events__add_cache_hybrid(list, idx, &attr,
436 config_name ? : name,
437 metric_id,
438 &config_terms,
439 &hybrid, parse_state);
440 if (hybrid)
441 goto out_free_terms;
442
443 ret = add_event(list, idx, &attr, config_name ? : name, metric_id,
444 &config_terms);
445out_free_terms:
446 free_config_terms(&config_terms);
447 return ret;
448}
449
450#ifdef HAVE_LIBTRACEEVENT
451static void tracepoint_error(struct parse_events_error *e, int err,
452 const char *sys, const char *name)
453{
454 const char *str;
455 char help[BUFSIZ];
456
457 if (!e)
458 return;
459
460 /*
461 * We get error directly from syscall errno ( > 0),
462 * or from encoded pointer's error ( < 0).
463 */
464 err = abs(err);
465
466 switch (err) {
467 case EACCES:
468 str = "can't access trace events";
469 break;
470 case ENOENT:
471 str = "unknown tracepoint";
472 break;
473 default:
474 str = "failed to add tracepoint";
475 break;
476 }
477
478 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name);
479 parse_events_error__handle(e, 0, strdup(str), strdup(help));
480}
481
482static int add_tracepoint(struct list_head *list, int *idx,
483 const char *sys_name, const char *evt_name,
484 struct parse_events_error *err,
485 struct list_head *head_config)
486{
487 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, (*idx)++);
488
489 if (IS_ERR(evsel)) {
490 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name);
491 return PTR_ERR(evsel);
492 }
493
494 if (head_config) {
495 LIST_HEAD(config_terms);
496
497 if (get_config_terms(head_config, &config_terms))
498 return -ENOMEM;
499 list_splice(&config_terms, &evsel->config_terms);
500 }
501
502 list_add_tail(&evsel->core.node, list);
503 return 0;
504}
505
506static int add_tracepoint_multi_event(struct list_head *list, int *idx,
507 const char *sys_name, const char *evt_name,
508 struct parse_events_error *err,
509 struct list_head *head_config)
510{
511 char *evt_path;
512 struct dirent *evt_ent;
513 DIR *evt_dir;
514 int ret = 0, found = 0;
515
516 evt_path = get_events_file(sys_name);
517 if (!evt_path) {
518 tracepoint_error(err, errno, sys_name, evt_name);
519 return -1;
520 }
521 evt_dir = opendir(evt_path);
522 if (!evt_dir) {
523 put_events_file(evt_path);
524 tracepoint_error(err, errno, sys_name, evt_name);
525 return -1;
526 }
527
528 while (!ret && (evt_ent = readdir(evt_dir))) {
529 if (!strcmp(evt_ent->d_name, ".")
530 || !strcmp(evt_ent->d_name, "..")
531 || !strcmp(evt_ent->d_name, "enable")
532 || !strcmp(evt_ent->d_name, "filter"))
533 continue;
534
535 if (!strglobmatch(evt_ent->d_name, evt_name))
536 continue;
537
538 found++;
539
540 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name,
541 err, head_config);
542 }
543
544 if (!found) {
545 tracepoint_error(err, ENOENT, sys_name, evt_name);
546 ret = -1;
547 }
548
549 put_events_file(evt_path);
550 closedir(evt_dir);
551 return ret;
552}
553
554static int add_tracepoint_event(struct list_head *list, int *idx,
555 const char *sys_name, const char *evt_name,
556 struct parse_events_error *err,
557 struct list_head *head_config)
558{
559 return strpbrk(evt_name, "*?") ?
560 add_tracepoint_multi_event(list, idx, sys_name, evt_name,
561 err, head_config) :
562 add_tracepoint(list, idx, sys_name, evt_name,
563 err, head_config);
564}
565
566static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
567 const char *sys_name, const char *evt_name,
568 struct parse_events_error *err,
569 struct list_head *head_config)
570{
571 struct dirent *events_ent;
572 DIR *events_dir;
573 int ret = 0;
574
575 events_dir = tracing_events__opendir();
576 if (!events_dir) {
577 tracepoint_error(err, errno, sys_name, evt_name);
578 return -1;
579 }
580
581 while (!ret && (events_ent = readdir(events_dir))) {
582 if (!strcmp(events_ent->d_name, ".")
583 || !strcmp(events_ent->d_name, "..")
584 || !strcmp(events_ent->d_name, "enable")
585 || !strcmp(events_ent->d_name, "header_event")
586 || !strcmp(events_ent->d_name, "header_page"))
587 continue;
588
589 if (!strglobmatch(events_ent->d_name, sys_name))
590 continue;
591
592 ret = add_tracepoint_event(list, idx, events_ent->d_name,
593 evt_name, err, head_config);
594 }
595
596 closedir(events_dir);
597 return ret;
598}
599#endif /* HAVE_LIBTRACEEVENT */
600
601#ifdef HAVE_LIBBPF_SUPPORT
602struct __add_bpf_event_param {
603 struct parse_events_state *parse_state;
604 struct list_head *list;
605 struct list_head *head_config;
606};
607
608static int add_bpf_event(const char *group, const char *event, int fd, struct bpf_object *obj,
609 void *_param)
610{
611 LIST_HEAD(new_evsels);
612 struct __add_bpf_event_param *param = _param;
613 struct parse_events_state *parse_state = param->parse_state;
614 struct list_head *list = param->list;
615 struct evsel *pos;
616 int err;
617 /*
618 * Check if we should add the event, i.e. if it is a TP but starts with a '!',
619 * then don't add the tracepoint, this will be used for something else, like
620 * adding to a BPF_MAP_TYPE_PROG_ARRAY.
621 *
622 * See tools/perf/examples/bpf/augmented_raw_syscalls.c
623 */
624 if (group[0] == '!')
625 return 0;
626
627 pr_debug("add bpf event %s:%s and attach bpf program %d\n",
628 group, event, fd);
629
630 err = parse_events_add_tracepoint(&new_evsels, &parse_state->idx, group,
631 event, parse_state->error,
632 param->head_config);
633 if (err) {
634 struct evsel *evsel, *tmp;
635
636 pr_debug("Failed to add BPF event %s:%s\n",
637 group, event);
638 list_for_each_entry_safe(evsel, tmp, &new_evsels, core.node) {
639 list_del_init(&evsel->core.node);
640 evsel__delete(evsel);
641 }
642 return err;
643 }
644 pr_debug("adding %s:%s\n", group, event);
645
646 list_for_each_entry(pos, &new_evsels, core.node) {
647 pr_debug("adding %s:%s to %p\n",
648 group, event, pos);
649 pos->bpf_fd = fd;
650 pos->bpf_obj = obj;
651 }
652 list_splice(&new_evsels, list);
653 return 0;
654}
655
656int parse_events_load_bpf_obj(struct parse_events_state *parse_state,
657 struct list_head *list,
658 struct bpf_object *obj,
659 struct list_head *head_config)
660{
661 int err;
662 char errbuf[BUFSIZ];
663 struct __add_bpf_event_param param = {parse_state, list, head_config};
664 static bool registered_unprobe_atexit = false;
665
666 if (IS_ERR(obj) || !obj) {
667 snprintf(errbuf, sizeof(errbuf),
668 "Internal error: load bpf obj with NULL");
669 err = -EINVAL;
670 goto errout;
671 }
672
673 /*
674 * Register atexit handler before calling bpf__probe() so
675 * bpf__probe() don't need to unprobe probe points its already
676 * created when failure.
677 */
678 if (!registered_unprobe_atexit) {
679 atexit(bpf__clear);
680 registered_unprobe_atexit = true;
681 }
682
683 err = bpf__probe(obj);
684 if (err) {
685 bpf__strerror_probe(obj, err, errbuf, sizeof(errbuf));
686 goto errout;
687 }
688
689 err = bpf__load(obj);
690 if (err) {
691 bpf__strerror_load(obj, err, errbuf, sizeof(errbuf));
692 goto errout;
693 }
694
695 err = bpf__foreach_event(obj, add_bpf_event, ¶m);
696 if (err) {
697 snprintf(errbuf, sizeof(errbuf),
698 "Attach events in BPF object failed");
699 goto errout;
700 }
701
702 return 0;
703errout:
704 parse_events_error__handle(parse_state->error, 0,
705 strdup(errbuf), strdup("(add -v to see detail)"));
706 return err;
707}
708
709static int
710parse_events_config_bpf(struct parse_events_state *parse_state,
711 struct bpf_object *obj,
712 struct list_head *head_config)
713{
714 struct parse_events_term *term;
715 int error_pos;
716
717 if (!head_config || list_empty(head_config))
718 return 0;
719
720 list_for_each_entry(term, head_config, list) {
721 int err;
722
723 if (term->type_term != PARSE_EVENTS__TERM_TYPE_USER) {
724 parse_events_error__handle(parse_state->error, term->err_term,
725 strdup("Invalid config term for BPF object"),
726 NULL);
727 return -EINVAL;
728 }
729
730 err = bpf__config_obj(obj, term, parse_state->evlist, &error_pos);
731 if (err) {
732 char errbuf[BUFSIZ];
733 int idx;
734
735 bpf__strerror_config_obj(obj, term, parse_state->evlist,
736 &error_pos, err, errbuf,
737 sizeof(errbuf));
738
739 if (err == -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE)
740 idx = term->err_val;
741 else
742 idx = term->err_term + error_pos;
743
744 parse_events_error__handle(parse_state->error, idx,
745 strdup(errbuf),
746 strdup(
747"Hint:\tValid config terms:\n"
748" \tmap:[<arraymap>].value<indices>=[value]\n"
749" \tmap:[<eventmap>].event<indices>=[event]\n"
750"\n"
751" \twhere <indices> is something like [0,3...5] or [all]\n"
752" \t(add -v to see detail)"));
753 return err;
754 }
755 }
756 return 0;
757}
758
759/*
760 * Split config terms:
761 * perf record -e bpf.c/call-graph=fp,map:array.value[0]=1/ ...
762 * 'call-graph=fp' is 'evt config', should be applied to each
763 * events in bpf.c.
764 * 'map:array.value[0]=1' is 'obj config', should be processed
765 * with parse_events_config_bpf.
766 *
767 * Move object config terms from the first list to obj_head_config.
768 */
769static void
770split_bpf_config_terms(struct list_head *evt_head_config,
771 struct list_head *obj_head_config)
772{
773 struct parse_events_term *term, *temp;
774
775 /*
776 * Currently, all possible user config term
777 * belong to bpf object. parse_events__is_hardcoded_term()
778 * happens to be a good flag.
779 *
780 * See parse_events_config_bpf() and
781 * config_term_tracepoint().
782 */
783 list_for_each_entry_safe(term, temp, evt_head_config, list)
784 if (!parse_events__is_hardcoded_term(term))
785 list_move_tail(&term->list, obj_head_config);
786}
787
788int parse_events_load_bpf(struct parse_events_state *parse_state,
789 struct list_head *list,
790 char *bpf_file_name,
791 bool source,
792 struct list_head *head_config)
793{
794 int err;
795 struct bpf_object *obj;
796 LIST_HEAD(obj_head_config);
797
798 if (head_config)
799 split_bpf_config_terms(head_config, &obj_head_config);
800
801 obj = bpf__prepare_load(bpf_file_name, source);
802 if (IS_ERR(obj)) {
803 char errbuf[BUFSIZ];
804
805 err = PTR_ERR(obj);
806
807 if (err == -ENOTSUP)
808 snprintf(errbuf, sizeof(errbuf),
809 "BPF support is not compiled");
810 else
811 bpf__strerror_prepare_load(bpf_file_name,
812 source,
813 -err, errbuf,
814 sizeof(errbuf));
815
816 parse_events_error__handle(parse_state->error, 0,
817 strdup(errbuf), strdup("(add -v to see detail)"));
818 return err;
819 }
820
821 err = parse_events_load_bpf_obj(parse_state, list, obj, head_config);
822 if (err)
823 return err;
824 err = parse_events_config_bpf(parse_state, obj, &obj_head_config);
825
826 /*
827 * Caller doesn't know anything about obj_head_config,
828 * so combine them together again before returning.
829 */
830 if (head_config)
831 list_splice_tail(&obj_head_config, head_config);
832 return err;
833}
834#else // HAVE_LIBBPF_SUPPORT
835int parse_events_load_bpf_obj(struct parse_events_state *parse_state,
836 struct list_head *list __maybe_unused,
837 struct bpf_object *obj __maybe_unused,
838 struct list_head *head_config __maybe_unused)
839{
840 parse_events_error__handle(parse_state->error, 0,
841 strdup("BPF support is not compiled"),
842 strdup("Make sure libbpf-devel is available at build time."));
843 return -ENOTSUP;
844}
845
846int parse_events_load_bpf(struct parse_events_state *parse_state,
847 struct list_head *list __maybe_unused,
848 char *bpf_file_name __maybe_unused,
849 bool source __maybe_unused,
850 struct list_head *head_config __maybe_unused)
851{
852 parse_events_error__handle(parse_state->error, 0,
853 strdup("BPF support is not compiled"),
854 strdup("Make sure libbpf-devel is available at build time."));
855 return -ENOTSUP;
856}
857#endif // HAVE_LIBBPF_SUPPORT
858
859static int
860parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
861{
862 int i;
863
864 for (i = 0; i < 3; i++) {
865 if (!type || !type[i])
866 break;
867
868#define CHECK_SET_TYPE(bit) \
869do { \
870 if (attr->bp_type & bit) \
871 return -EINVAL; \
872 else \
873 attr->bp_type |= bit; \
874} while (0)
875
876 switch (type[i]) {
877 case 'r':
878 CHECK_SET_TYPE(HW_BREAKPOINT_R);
879 break;
880 case 'w':
881 CHECK_SET_TYPE(HW_BREAKPOINT_W);
882 break;
883 case 'x':
884 CHECK_SET_TYPE(HW_BREAKPOINT_X);
885 break;
886 default:
887 return -EINVAL;
888 }
889 }
890
891#undef CHECK_SET_TYPE
892
893 if (!attr->bp_type) /* Default */
894 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
895
896 return 0;
897}
898
899int parse_events_add_breakpoint(struct list_head *list, int *idx,
900 u64 addr, char *type, u64 len)
901{
902 struct perf_event_attr attr;
903
904 memset(&attr, 0, sizeof(attr));
905 attr.bp_addr = addr;
906
907 if (parse_breakpoint_type(type, &attr))
908 return -EINVAL;
909
910 /* Provide some defaults if len is not specified */
911 if (!len) {
912 if (attr.bp_type == HW_BREAKPOINT_X)
913 len = sizeof(long);
914 else
915 len = HW_BREAKPOINT_LEN_4;
916 }
917
918 attr.bp_len = len;
919
920 attr.type = PERF_TYPE_BREAKPOINT;
921 attr.sample_period = 1;
922
923 return add_event(list, idx, &attr, /*name=*/NULL, /*mertic_id=*/NULL,
924 /*config_terms=*/NULL);
925}
926
927static int check_type_val(struct parse_events_term *term,
928 struct parse_events_error *err,
929 int type)
930{
931 if (type == term->type_val)
932 return 0;
933
934 if (err) {
935 parse_events_error__handle(err, term->err_val,
936 type == PARSE_EVENTS__TERM_TYPE_NUM
937 ? strdup("expected numeric value")
938 : strdup("expected string value"),
939 NULL);
940 }
941 return -EINVAL;
942}
943
944/*
945 * Update according to parse-events.l
946 */
947static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
948 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>",
949 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config",
950 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1",
951 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2",
952 [PARSE_EVENTS__TERM_TYPE_CONFIG3] = "config3",
953 [PARSE_EVENTS__TERM_TYPE_NAME] = "name",
954 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period",
955 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq",
956 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type",
957 [PARSE_EVENTS__TERM_TYPE_TIME] = "time",
958 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph",
959 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size",
960 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit",
961 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit",
962 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack",
963 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr",
964 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite",
965 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite",
966 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config",
967 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore",
968 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output",
969 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size",
970 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id",
971};
972
973static bool config_term_shrinked;
974
975static bool
976config_term_avail(int term_type, struct parse_events_error *err)
977{
978 char *err_str;
979
980 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) {
981 parse_events_error__handle(err, -1,
982 strdup("Invalid term_type"), NULL);
983 return false;
984 }
985 if (!config_term_shrinked)
986 return true;
987
988 switch (term_type) {
989 case PARSE_EVENTS__TERM_TYPE_CONFIG:
990 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
991 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
992 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
993 case PARSE_EVENTS__TERM_TYPE_NAME:
994 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
995 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
996 case PARSE_EVENTS__TERM_TYPE_PERCORE:
997 return true;
998 default:
999 if (!err)
1000 return false;
1001
1002 /* term_type is validated so indexing is safe */
1003 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'",
1004 config_term_names[term_type]) >= 0)
1005 parse_events_error__handle(err, -1, err_str, NULL);
1006 return false;
1007 }
1008}
1009
1010void parse_events__shrink_config_terms(void)
1011{
1012 config_term_shrinked = true;
1013}
1014
1015static int config_term_common(struct perf_event_attr *attr,
1016 struct parse_events_term *term,
1017 struct parse_events_error *err)
1018{
1019#define CHECK_TYPE_VAL(type) \
1020do { \
1021 if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \
1022 return -EINVAL; \
1023} while (0)
1024
1025 switch (term->type_term) {
1026 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1027 CHECK_TYPE_VAL(NUM);
1028 attr->config = term->val.num;
1029 break;
1030 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1031 CHECK_TYPE_VAL(NUM);
1032 attr->config1 = term->val.num;
1033 break;
1034 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1035 CHECK_TYPE_VAL(NUM);
1036 attr->config2 = term->val.num;
1037 break;
1038 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1039 CHECK_TYPE_VAL(NUM);
1040 attr->config3 = term->val.num;
1041 break;
1042 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1043 CHECK_TYPE_VAL(NUM);
1044 break;
1045 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1046 CHECK_TYPE_VAL(NUM);
1047 break;
1048 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1049 CHECK_TYPE_VAL(STR);
1050 if (strcmp(term->val.str, "no") &&
1051 parse_branch_str(term->val.str,
1052 &attr->branch_sample_type)) {
1053 parse_events_error__handle(err, term->err_val,
1054 strdup("invalid branch sample type"),
1055 NULL);
1056 return -EINVAL;
1057 }
1058 break;
1059 case PARSE_EVENTS__TERM_TYPE_TIME:
1060 CHECK_TYPE_VAL(NUM);
1061 if (term->val.num > 1) {
1062 parse_events_error__handle(err, term->err_val,
1063 strdup("expected 0 or 1"),
1064 NULL);
1065 return -EINVAL;
1066 }
1067 break;
1068 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1069 CHECK_TYPE_VAL(STR);
1070 break;
1071 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1072 CHECK_TYPE_VAL(NUM);
1073 break;
1074 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1075 CHECK_TYPE_VAL(NUM);
1076 break;
1077 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1078 CHECK_TYPE_VAL(NUM);
1079 break;
1080 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1081 CHECK_TYPE_VAL(NUM);
1082 break;
1083 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1084 CHECK_TYPE_VAL(NUM);
1085 break;
1086 case PARSE_EVENTS__TERM_TYPE_NAME:
1087 CHECK_TYPE_VAL(STR);
1088 break;
1089 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1090 CHECK_TYPE_VAL(STR);
1091 break;
1092 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1093 CHECK_TYPE_VAL(NUM);
1094 break;
1095 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1096 CHECK_TYPE_VAL(NUM);
1097 break;
1098 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1099 CHECK_TYPE_VAL(NUM);
1100 if ((unsigned int)term->val.num > 1) {
1101 parse_events_error__handle(err, term->err_val,
1102 strdup("expected 0 or 1"),
1103 NULL);
1104 return -EINVAL;
1105 }
1106 break;
1107 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1108 CHECK_TYPE_VAL(NUM);
1109 break;
1110 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1111 CHECK_TYPE_VAL(NUM);
1112 if (term->val.num > UINT_MAX) {
1113 parse_events_error__handle(err, term->err_val,
1114 strdup("too big"),
1115 NULL);
1116 return -EINVAL;
1117 }
1118 break;
1119 default:
1120 parse_events_error__handle(err, term->err_term,
1121 strdup("unknown term"),
1122 parse_events_formats_error_string(NULL));
1123 return -EINVAL;
1124 }
1125
1126 /*
1127 * Check term availability after basic checking so
1128 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered.
1129 *
1130 * If check availability at the entry of this function,
1131 * user will see "'<sysfs term>' is not usable in 'perf stat'"
1132 * if an invalid config term is provided for legacy events
1133 * (for example, instructions/badterm/...), which is confusing.
1134 */
1135 if (!config_term_avail(term->type_term, err))
1136 return -EINVAL;
1137 return 0;
1138#undef CHECK_TYPE_VAL
1139}
1140
1141static int config_term_pmu(struct perf_event_attr *attr,
1142 struct parse_events_term *term,
1143 struct parse_events_error *err)
1144{
1145 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER ||
1146 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG)
1147 /*
1148 * Always succeed for sysfs terms, as we dont know
1149 * at this point what type they need to have.
1150 */
1151 return 0;
1152 else
1153 return config_term_common(attr, term, err);
1154}
1155
1156#ifdef HAVE_LIBTRACEEVENT
1157static int config_term_tracepoint(struct perf_event_attr *attr,
1158 struct parse_events_term *term,
1159 struct parse_events_error *err)
1160{
1161 switch (term->type_term) {
1162 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1163 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1164 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1165 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1166 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1167 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1168 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1169 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1170 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1171 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1172 return config_term_common(attr, term, err);
1173 default:
1174 if (err) {
1175 parse_events_error__handle(err, term->err_term,
1176 strdup("unknown term"),
1177 strdup("valid terms: call-graph,stack-size\n"));
1178 }
1179 return -EINVAL;
1180 }
1181
1182 return 0;
1183}
1184#endif
1185
1186static int config_attr(struct perf_event_attr *attr,
1187 struct list_head *head,
1188 struct parse_events_error *err,
1189 config_term_func_t config_term)
1190{
1191 struct parse_events_term *term;
1192
1193 list_for_each_entry(term, head, list)
1194 if (config_term(attr, term, err))
1195 return -EINVAL;
1196
1197 return 0;
1198}
1199
1200static int get_config_terms(struct list_head *head_config,
1201 struct list_head *head_terms __maybe_unused)
1202{
1203#define ADD_CONFIG_TERM(__type, __weak) \
1204 struct evsel_config_term *__t; \
1205 \
1206 __t = zalloc(sizeof(*__t)); \
1207 if (!__t) \
1208 return -ENOMEM; \
1209 \
1210 INIT_LIST_HEAD(&__t->list); \
1211 __t->type = EVSEL__CONFIG_TERM_ ## __type; \
1212 __t->weak = __weak; \
1213 list_add_tail(&__t->list, head_terms)
1214
1215#define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \
1216do { \
1217 ADD_CONFIG_TERM(__type, __weak); \
1218 __t->val.__name = __val; \
1219} while (0)
1220
1221#define ADD_CONFIG_TERM_STR(__type, __val, __weak) \
1222do { \
1223 ADD_CONFIG_TERM(__type, __weak); \
1224 __t->val.str = strdup(__val); \
1225 if (!__t->val.str) { \
1226 zfree(&__t); \
1227 return -ENOMEM; \
1228 } \
1229 __t->free_str = true; \
1230} while (0)
1231
1232 struct parse_events_term *term;
1233
1234 list_for_each_entry(term, head_config, list) {
1235 switch (term->type_term) {
1236 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1237 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak);
1238 break;
1239 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1240 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak);
1241 break;
1242 case PARSE_EVENTS__TERM_TYPE_TIME:
1243 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak);
1244 break;
1245 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1246 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak);
1247 break;
1248 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1249 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak);
1250 break;
1251 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1252 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user,
1253 term->val.num, term->weak);
1254 break;
1255 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1256 ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1257 term->val.num ? 1 : 0, term->weak);
1258 break;
1259 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1260 ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1261 term->val.num ? 0 : 1, term->weak);
1262 break;
1263 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1264 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack,
1265 term->val.num, term->weak);
1266 break;
1267 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1268 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events,
1269 term->val.num, term->weak);
1270 break;
1271 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1272 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1273 term->val.num ? 1 : 0, term->weak);
1274 break;
1275 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1276 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1277 term->val.num ? 0 : 1, term->weak);
1278 break;
1279 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1280 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak);
1281 break;
1282 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1283 ADD_CONFIG_TERM_VAL(PERCORE, percore,
1284 term->val.num ? true : false, term->weak);
1285 break;
1286 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1287 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
1288 term->val.num ? 1 : 0, term->weak);
1289 break;
1290 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1291 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
1292 term->val.num, term->weak);
1293 break;
1294 default:
1295 break;
1296 }
1297 }
1298 return 0;
1299}
1300
1301/*
1302 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for
1303 * each bit of attr->config that the user has changed.
1304 */
1305static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config,
1306 struct list_head *head_terms)
1307{
1308 struct parse_events_term *term;
1309 u64 bits = 0;
1310 int type;
1311
1312 list_for_each_entry(term, head_config, list) {
1313 switch (term->type_term) {
1314 case PARSE_EVENTS__TERM_TYPE_USER:
1315 type = perf_pmu__format_type(&pmu->format, term->config);
1316 if (type != PERF_PMU_FORMAT_VALUE_CONFIG)
1317 continue;
1318 bits |= perf_pmu__format_bits(&pmu->format, term->config);
1319 break;
1320 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1321 bits = ~(u64)0;
1322 break;
1323 default:
1324 break;
1325 }
1326 }
1327
1328 if (bits)
1329 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false);
1330
1331#undef ADD_CONFIG_TERM
1332 return 0;
1333}
1334
1335int parse_events_add_tracepoint(struct list_head *list, int *idx,
1336 const char *sys, const char *event,
1337 struct parse_events_error *err,
1338 struct list_head *head_config)
1339{
1340#ifdef HAVE_LIBTRACEEVENT
1341 if (head_config) {
1342 struct perf_event_attr attr;
1343
1344 if (config_attr(&attr, head_config, err,
1345 config_term_tracepoint))
1346 return -EINVAL;
1347 }
1348
1349 if (strpbrk(sys, "*?"))
1350 return add_tracepoint_multi_sys(list, idx, sys, event,
1351 err, head_config);
1352 else
1353 return add_tracepoint_event(list, idx, sys, event,
1354 err, head_config);
1355#else
1356 (void)list;
1357 (void)idx;
1358 (void)sys;
1359 (void)event;
1360 (void)head_config;
1361 parse_events_error__handle(err, 0, strdup("unsupported tracepoint"),
1362 strdup("libtraceevent is necessary for tracepoint support"));
1363 return -1;
1364#endif
1365}
1366
1367int parse_events_add_numeric(struct parse_events_state *parse_state,
1368 struct list_head *list,
1369 u32 type, u64 config,
1370 struct list_head *head_config)
1371{
1372 struct perf_event_attr attr;
1373 LIST_HEAD(config_terms);
1374 const char *name, *metric_id;
1375 bool hybrid;
1376 int ret;
1377
1378 memset(&attr, 0, sizeof(attr));
1379 attr.type = type;
1380 attr.config = config;
1381
1382 if (head_config) {
1383 if (config_attr(&attr, head_config, parse_state->error,
1384 config_term_common))
1385 return -EINVAL;
1386
1387 if (get_config_terms(head_config, &config_terms))
1388 return -ENOMEM;
1389 }
1390
1391 name = get_config_name(head_config);
1392 metric_id = get_config_metric_id(head_config);
1393 ret = parse_events__add_numeric_hybrid(parse_state, list, &attr,
1394 name, metric_id,
1395 &config_terms, &hybrid);
1396 if (hybrid)
1397 goto out_free_terms;
1398
1399 ret = add_event(list, &parse_state->idx, &attr, name, metric_id,
1400 &config_terms);
1401out_free_terms:
1402 free_config_terms(&config_terms);
1403 return ret;
1404}
1405
1406int parse_events_add_tool(struct parse_events_state *parse_state,
1407 struct list_head *list,
1408 int tool_event)
1409{
1410 return add_event_tool(list, &parse_state->idx, tool_event);
1411}
1412
1413static bool config_term_percore(struct list_head *config_terms)
1414{
1415 struct evsel_config_term *term;
1416
1417 list_for_each_entry(term, config_terms, list) {
1418 if (term->type == EVSEL__CONFIG_TERM_PERCORE)
1419 return term->val.percore;
1420 }
1421
1422 return false;
1423}
1424
1425static int parse_events__inside_hybrid_pmu(struct parse_events_state *parse_state,
1426 struct list_head *list, char *name,
1427 struct list_head *head_config)
1428{
1429 struct parse_events_term *term;
1430 int ret = -1;
1431
1432 if (parse_state->fake_pmu || !head_config || list_empty(head_config) ||
1433 !perf_pmu__is_hybrid(name)) {
1434 return -1;
1435 }
1436
1437 /*
1438 * More than one term in list.
1439 */
1440 if (head_config->next && head_config->next->next != head_config)
1441 return -1;
1442
1443 term = list_first_entry(head_config, struct parse_events_term, list);
1444 if (term && term->config && strcmp(term->config, "event")) {
1445 ret = parse_events__with_hybrid_pmu(parse_state, term->config,
1446 name, list);
1447 }
1448
1449 return ret;
1450}
1451
1452int parse_events_add_pmu(struct parse_events_state *parse_state,
1453 struct list_head *list, char *name,
1454 struct list_head *head_config,
1455 bool auto_merge_stats)
1456{
1457 struct perf_event_attr attr;
1458 struct perf_pmu_info info;
1459 struct perf_pmu *pmu;
1460 struct evsel *evsel;
1461 struct parse_events_error *err = parse_state->error;
1462 LIST_HEAD(config_terms);
1463
1464 pmu = parse_state->fake_pmu ?: perf_pmu__find(name);
1465
1466 if (verbose > 1 && !(pmu && pmu->selectable)) {
1467 fprintf(stderr, "Attempting to add event pmu '%s' with '",
1468 name);
1469 if (head_config) {
1470 struct parse_events_term *term;
1471
1472 list_for_each_entry(term, head_config, list) {
1473 fprintf(stderr, "%s,", term->config);
1474 }
1475 }
1476 fprintf(stderr, "' that may result in non-fatal errors\n");
1477 }
1478
1479 if (!pmu) {
1480 char *err_str;
1481
1482 if (asprintf(&err_str,
1483 "Cannot find PMU `%s'. Missing kernel support?",
1484 name) >= 0)
1485 parse_events_error__handle(err, 0, err_str, NULL);
1486 return -EINVAL;
1487 }
1488
1489 if (pmu->default_config) {
1490 memcpy(&attr, pmu->default_config,
1491 sizeof(struct perf_event_attr));
1492 } else {
1493 memset(&attr, 0, sizeof(attr));
1494 }
1495
1496 if (!head_config) {
1497 attr.type = pmu->type;
1498 evsel = __add_event(list, &parse_state->idx, &attr,
1499 /*init_attr=*/true, /*name=*/NULL,
1500 /*metric_id=*/NULL, pmu,
1501 /*config_terms=*/NULL, auto_merge_stats,
1502 /*cpu_list=*/NULL);
1503 if (evsel) {
1504 evsel->pmu_name = name ? strdup(name) : NULL;
1505 return 0;
1506 } else {
1507 return -ENOMEM;
1508 }
1509 }
1510
1511 if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, head_config, &info))
1512 return -EINVAL;
1513
1514 if (verbose > 1) {
1515 fprintf(stderr, "After aliases, add event pmu '%s' with '",
1516 name);
1517 if (head_config) {
1518 struct parse_events_term *term;
1519
1520 list_for_each_entry(term, head_config, list) {
1521 fprintf(stderr, "%s,", term->config);
1522 }
1523 }
1524 fprintf(stderr, "' that may result in non-fatal errors\n");
1525 }
1526
1527 /*
1528 * Configure hardcoded terms first, no need to check
1529 * return value when called with fail == 0 ;)
1530 */
1531 if (config_attr(&attr, head_config, parse_state->error, config_term_pmu))
1532 return -EINVAL;
1533
1534 if (get_config_terms(head_config, &config_terms))
1535 return -ENOMEM;
1536
1537 /*
1538 * When using default config, record which bits of attr->config were
1539 * changed by the user.
1540 */
1541 if (pmu->default_config && get_config_chgs(pmu, head_config, &config_terms))
1542 return -ENOMEM;
1543
1544 if (!parse_events__inside_hybrid_pmu(parse_state, list, name,
1545 head_config)) {
1546 return 0;
1547 }
1548
1549 if (!parse_state->fake_pmu && perf_pmu__config(pmu, &attr, head_config, parse_state->error)) {
1550 free_config_terms(&config_terms);
1551 return -EINVAL;
1552 }
1553
1554 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true,
1555 get_config_name(head_config),
1556 get_config_metric_id(head_config), pmu,
1557 &config_terms, auto_merge_stats, /*cpu_list=*/NULL);
1558 if (!evsel)
1559 return -ENOMEM;
1560
1561 if (evsel->name)
1562 evsel->use_config_name = true;
1563
1564 evsel->pmu_name = name ? strdup(name) : NULL;
1565 evsel->percore = config_term_percore(&evsel->config_terms);
1566
1567 if (parse_state->fake_pmu)
1568 return 0;
1569
1570 free((char *)evsel->unit);
1571 evsel->unit = strdup(info.unit);
1572 evsel->scale = info.scale;
1573 evsel->per_pkg = info.per_pkg;
1574 evsel->snapshot = info.snapshot;
1575 return 0;
1576}
1577
1578int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
1579 char *str, struct list_head *head,
1580 struct list_head **listp)
1581{
1582 struct parse_events_term *term;
1583 struct list_head *list = NULL;
1584 struct list_head *orig_head = NULL;
1585 struct perf_pmu *pmu = NULL;
1586 int ok = 0;
1587 char *config;
1588
1589 *listp = NULL;
1590
1591 if (!head) {
1592 head = malloc(sizeof(struct list_head));
1593 if (!head)
1594 goto out_err;
1595
1596 INIT_LIST_HEAD(head);
1597 }
1598 config = strdup(str);
1599 if (!config)
1600 goto out_err;
1601
1602 if (parse_events_term__num(&term,
1603 PARSE_EVENTS__TERM_TYPE_USER,
1604 config, 1, false, NULL,
1605 NULL) < 0) {
1606 free(config);
1607 goto out_err;
1608 }
1609 list_add_tail(&term->list, head);
1610
1611 /* Add it for all PMUs that support the alias */
1612 list = malloc(sizeof(struct list_head));
1613 if (!list)
1614 goto out_err;
1615
1616 INIT_LIST_HEAD(list);
1617
1618 while ((pmu = perf_pmu__scan(pmu)) != NULL) {
1619 struct perf_pmu_alias *alias;
1620
1621 list_for_each_entry(alias, &pmu->aliases, list) {
1622 if (!strcasecmp(alias->name, str)) {
1623 parse_events_copy_term_list(head, &orig_head);
1624 if (!parse_events_add_pmu(parse_state, list,
1625 pmu->name, orig_head,
1626 /*auto_merge_stats=*/true)) {
1627 pr_debug("%s -> %s/%s/\n", str,
1628 pmu->name, alias->str);
1629 ok++;
1630 }
1631 parse_events_terms__delete(orig_head);
1632 }
1633 }
1634 }
1635
1636 if (parse_state->fake_pmu) {
1637 if (!parse_events_add_pmu(parse_state, list, str, head,
1638 /*auto_merge_stats=*/true)) {
1639 pr_debug("%s -> %s/%s/\n", str, "fake_pmu", str);
1640 ok++;
1641 }
1642 }
1643
1644out_err:
1645 if (ok)
1646 *listp = list;
1647 else
1648 free(list);
1649
1650 parse_events_terms__delete(head);
1651 return ok ? 0 : -1;
1652}
1653
1654int parse_events__modifier_group(struct list_head *list,
1655 char *event_mod)
1656{
1657 return parse_events__modifier_event(list, event_mod, true);
1658}
1659
1660void parse_events__set_leader(char *name, struct list_head *list)
1661{
1662 struct evsel *leader;
1663
1664 if (list_empty(list)) {
1665 WARN_ONCE(true, "WARNING: failed to set leader: empty list");
1666 return;
1667 }
1668
1669 leader = list_first_entry(list, struct evsel, core.node);
1670 __perf_evlist__set_leader(list, &leader->core);
1671 leader->group_name = name;
1672}
1673
1674/* list_event is assumed to point to malloc'ed memory */
1675void parse_events_update_lists(struct list_head *list_event,
1676 struct list_head *list_all)
1677{
1678 /*
1679 * Called for single event definition. Update the
1680 * 'all event' list, and reinit the 'single event'
1681 * list, for next event definition.
1682 */
1683 list_splice_tail(list_event, list_all);
1684 free(list_event);
1685}
1686
1687struct event_modifier {
1688 int eu;
1689 int ek;
1690 int eh;
1691 int eH;
1692 int eG;
1693 int eI;
1694 int precise;
1695 int precise_max;
1696 int exclude_GH;
1697 int sample_read;
1698 int pinned;
1699 int weak;
1700 int exclusive;
1701 int bpf_counter;
1702};
1703
1704static int get_event_modifier(struct event_modifier *mod, char *str,
1705 struct evsel *evsel)
1706{
1707 int eu = evsel ? evsel->core.attr.exclude_user : 0;
1708 int ek = evsel ? evsel->core.attr.exclude_kernel : 0;
1709 int eh = evsel ? evsel->core.attr.exclude_hv : 0;
1710 int eH = evsel ? evsel->core.attr.exclude_host : 0;
1711 int eG = evsel ? evsel->core.attr.exclude_guest : 0;
1712 int eI = evsel ? evsel->core.attr.exclude_idle : 0;
1713 int precise = evsel ? evsel->core.attr.precise_ip : 0;
1714 int precise_max = 0;
1715 int sample_read = 0;
1716 int pinned = evsel ? evsel->core.attr.pinned : 0;
1717 int exclusive = evsel ? evsel->core.attr.exclusive : 0;
1718
1719 int exclude = eu | ek | eh;
1720 int exclude_GH = evsel ? evsel->exclude_GH : 0;
1721 int weak = 0;
1722 int bpf_counter = 0;
1723
1724 memset(mod, 0, sizeof(*mod));
1725
1726 while (*str) {
1727 if (*str == 'u') {
1728 if (!exclude)
1729 exclude = eu = ek = eh = 1;
1730 if (!exclude_GH && !perf_guest)
1731 eG = 1;
1732 eu = 0;
1733 } else if (*str == 'k') {
1734 if (!exclude)
1735 exclude = eu = ek = eh = 1;
1736 ek = 0;
1737 } else if (*str == 'h') {
1738 if (!exclude)
1739 exclude = eu = ek = eh = 1;
1740 eh = 0;
1741 } else if (*str == 'G') {
1742 if (!exclude_GH)
1743 exclude_GH = eG = eH = 1;
1744 eG = 0;
1745 } else if (*str == 'H') {
1746 if (!exclude_GH)
1747 exclude_GH = eG = eH = 1;
1748 eH = 0;
1749 } else if (*str == 'I') {
1750 eI = 1;
1751 } else if (*str == 'p') {
1752 precise++;
1753 /* use of precise requires exclude_guest */
1754 if (!exclude_GH)
1755 eG = 1;
1756 } else if (*str == 'P') {
1757 precise_max = 1;
1758 } else if (*str == 'S') {
1759 sample_read = 1;
1760 } else if (*str == 'D') {
1761 pinned = 1;
1762 } else if (*str == 'e') {
1763 exclusive = 1;
1764 } else if (*str == 'W') {
1765 weak = 1;
1766 } else if (*str == 'b') {
1767 bpf_counter = 1;
1768 } else
1769 break;
1770
1771 ++str;
1772 }
1773
1774 /*
1775 * precise ip:
1776 *
1777 * 0 - SAMPLE_IP can have arbitrary skid
1778 * 1 - SAMPLE_IP must have constant skid
1779 * 2 - SAMPLE_IP requested to have 0 skid
1780 * 3 - SAMPLE_IP must have 0 skid
1781 *
1782 * See also PERF_RECORD_MISC_EXACT_IP
1783 */
1784 if (precise > 3)
1785 return -EINVAL;
1786
1787 mod->eu = eu;
1788 mod->ek = ek;
1789 mod->eh = eh;
1790 mod->eH = eH;
1791 mod->eG = eG;
1792 mod->eI = eI;
1793 mod->precise = precise;
1794 mod->precise_max = precise_max;
1795 mod->exclude_GH = exclude_GH;
1796 mod->sample_read = sample_read;
1797 mod->pinned = pinned;
1798 mod->weak = weak;
1799 mod->bpf_counter = bpf_counter;
1800 mod->exclusive = exclusive;
1801
1802 return 0;
1803}
1804
1805/*
1806 * Basic modifier sanity check to validate it contains only one
1807 * instance of any modifier (apart from 'p') present.
1808 */
1809static int check_modifier(char *str)
1810{
1811 char *p = str;
1812
1813 /* The sizeof includes 0 byte as well. */
1814 if (strlen(str) > (sizeof("ukhGHpppPSDIWeb") - 1))
1815 return -1;
1816
1817 while (*p) {
1818 if (*p != 'p' && strchr(p + 1, *p))
1819 return -1;
1820 p++;
1821 }
1822
1823 return 0;
1824}
1825
1826int parse_events__modifier_event(struct list_head *list, char *str, bool add)
1827{
1828 struct evsel *evsel;
1829 struct event_modifier mod;
1830
1831 if (str == NULL)
1832 return 0;
1833
1834 if (check_modifier(str))
1835 return -EINVAL;
1836
1837 if (!add && get_event_modifier(&mod, str, NULL))
1838 return -EINVAL;
1839
1840 __evlist__for_each_entry(list, evsel) {
1841 if (add && get_event_modifier(&mod, str, evsel))
1842 return -EINVAL;
1843
1844 evsel->core.attr.exclude_user = mod.eu;
1845 evsel->core.attr.exclude_kernel = mod.ek;
1846 evsel->core.attr.exclude_hv = mod.eh;
1847 evsel->core.attr.precise_ip = mod.precise;
1848 evsel->core.attr.exclude_host = mod.eH;
1849 evsel->core.attr.exclude_guest = mod.eG;
1850 evsel->core.attr.exclude_idle = mod.eI;
1851 evsel->exclude_GH = mod.exclude_GH;
1852 evsel->sample_read = mod.sample_read;
1853 evsel->precise_max = mod.precise_max;
1854 evsel->weak_group = mod.weak;
1855 evsel->bpf_counter = mod.bpf_counter;
1856
1857 if (evsel__is_group_leader(evsel)) {
1858 evsel->core.attr.pinned = mod.pinned;
1859 evsel->core.attr.exclusive = mod.exclusive;
1860 }
1861 }
1862
1863 return 0;
1864}
1865
1866int parse_events_name(struct list_head *list, const char *name)
1867{
1868 struct evsel *evsel;
1869
1870 __evlist__for_each_entry(list, evsel) {
1871 if (!evsel->name)
1872 evsel->name = strdup(name);
1873 }
1874
1875 return 0;
1876}
1877
1878static int
1879comp_pmu(const void *p1, const void *p2)
1880{
1881 struct perf_pmu_event_symbol *pmu1 = (struct perf_pmu_event_symbol *) p1;
1882 struct perf_pmu_event_symbol *pmu2 = (struct perf_pmu_event_symbol *) p2;
1883
1884 return strcasecmp(pmu1->symbol, pmu2->symbol);
1885}
1886
1887static void perf_pmu__parse_cleanup(void)
1888{
1889 if (perf_pmu_events_list_num > 0) {
1890 struct perf_pmu_event_symbol *p;
1891 int i;
1892
1893 for (i = 0; i < perf_pmu_events_list_num; i++) {
1894 p = perf_pmu_events_list + i;
1895 zfree(&p->symbol);
1896 }
1897 zfree(&perf_pmu_events_list);
1898 perf_pmu_events_list_num = 0;
1899 }
1900}
1901
1902#define SET_SYMBOL(str, stype) \
1903do { \
1904 p->symbol = str; \
1905 if (!p->symbol) \
1906 goto err; \
1907 p->type = stype; \
1908} while (0)
1909
1910/*
1911 * Read the pmu events list from sysfs
1912 * Save it into perf_pmu_events_list
1913 */
1914static void perf_pmu__parse_init(void)
1915{
1916
1917 struct perf_pmu *pmu = NULL;
1918 struct perf_pmu_alias *alias;
1919 int len = 0;
1920
1921 pmu = NULL;
1922 while ((pmu = perf_pmu__scan(pmu)) != NULL) {
1923 list_for_each_entry(alias, &pmu->aliases, list) {
1924 char *tmp = strchr(alias->name, '-');
1925
1926 if (tmp) {
1927 char *tmp2 = NULL;
1928
1929 tmp2 = strchr(tmp + 1, '-');
1930 len++;
1931 if (tmp2)
1932 len++;
1933 }
1934
1935 len++;
1936 }
1937 }
1938
1939 if (len == 0) {
1940 perf_pmu_events_list_num = -1;
1941 return;
1942 }
1943 perf_pmu_events_list = malloc(sizeof(struct perf_pmu_event_symbol) * len);
1944 if (!perf_pmu_events_list)
1945 return;
1946 perf_pmu_events_list_num = len;
1947
1948 len = 0;
1949 pmu = NULL;
1950 while ((pmu = perf_pmu__scan(pmu)) != NULL) {
1951 list_for_each_entry(alias, &pmu->aliases, list) {
1952 struct perf_pmu_event_symbol *p = perf_pmu_events_list + len;
1953 char *tmp = strchr(alias->name, '-');
1954 char *tmp2 = NULL;
1955
1956 if (tmp)
1957 tmp2 = strchr(tmp + 1, '-');
1958 if (tmp2) {
1959 SET_SYMBOL(strndup(alias->name, tmp - alias->name),
1960 PMU_EVENT_SYMBOL_PREFIX);
1961 p++;
1962 tmp++;
1963 SET_SYMBOL(strndup(tmp, tmp2 - tmp), PMU_EVENT_SYMBOL_SUFFIX);
1964 p++;
1965 SET_SYMBOL(strdup(++tmp2), PMU_EVENT_SYMBOL_SUFFIX2);
1966 len += 3;
1967 } else if (tmp) {
1968 SET_SYMBOL(strndup(alias->name, tmp - alias->name),
1969 PMU_EVENT_SYMBOL_PREFIX);
1970 p++;
1971 SET_SYMBOL(strdup(++tmp), PMU_EVENT_SYMBOL_SUFFIX);
1972 len += 2;
1973 } else {
1974 SET_SYMBOL(strdup(alias->name), PMU_EVENT_SYMBOL);
1975 len++;
1976 }
1977 }
1978 }
1979 qsort(perf_pmu_events_list, len,
1980 sizeof(struct perf_pmu_event_symbol), comp_pmu);
1981
1982 return;
1983err:
1984 perf_pmu__parse_cleanup();
1985}
1986
1987/*
1988 * This function injects special term in
1989 * perf_pmu_events_list so the test code
1990 * can check on this functionality.
1991 */
1992int perf_pmu__test_parse_init(void)
1993{
1994 struct perf_pmu_event_symbol *list, *tmp, symbols[] = {
1995 {(char *)"read", PMU_EVENT_SYMBOL},
1996 {(char *)"event", PMU_EVENT_SYMBOL_PREFIX},
1997 {(char *)"two", PMU_EVENT_SYMBOL_SUFFIX},
1998 {(char *)"hyphen", PMU_EVENT_SYMBOL_SUFFIX},
1999 {(char *)"hyph", PMU_EVENT_SYMBOL_SUFFIX2},
2000 };
2001 unsigned long i, j;
2002
2003 tmp = list = malloc(sizeof(*list) * ARRAY_SIZE(symbols));
2004 if (!list)
2005 return -ENOMEM;
2006
2007 for (i = 0; i < ARRAY_SIZE(symbols); i++, tmp++) {
2008 tmp->type = symbols[i].type;
2009 tmp->symbol = strdup(symbols[i].symbol);
2010 if (!tmp->symbol)
2011 goto err_free;
2012 }
2013
2014 perf_pmu_events_list = list;
2015 perf_pmu_events_list_num = ARRAY_SIZE(symbols);
2016
2017 qsort(perf_pmu_events_list, ARRAY_SIZE(symbols),
2018 sizeof(struct perf_pmu_event_symbol), comp_pmu);
2019 return 0;
2020
2021err_free:
2022 for (j = 0, tmp = list; j < i; j++, tmp++)
2023 zfree(&tmp->symbol);
2024 free(list);
2025 return -ENOMEM;
2026}
2027
2028enum perf_pmu_event_symbol_type
2029perf_pmu__parse_check(const char *name)
2030{
2031 struct perf_pmu_event_symbol p, *r;
2032
2033 /* scan kernel pmu events from sysfs if needed */
2034 if (perf_pmu_events_list_num == 0)
2035 perf_pmu__parse_init();
2036 /*
2037 * name "cpu" could be prefix of cpu-cycles or cpu// events.
2038 * cpu-cycles has been handled by hardcode.
2039 * So it must be cpu// events, not kernel pmu event.
2040 */
2041 if ((perf_pmu_events_list_num <= 0) || !strcmp(name, "cpu"))
2042 return PMU_EVENT_SYMBOL_ERR;
2043
2044 p.symbol = strdup(name);
2045 r = bsearch(&p, perf_pmu_events_list,
2046 (size_t) perf_pmu_events_list_num,
2047 sizeof(struct perf_pmu_event_symbol), comp_pmu);
2048 zfree(&p.symbol);
2049 return r ? r->type : PMU_EVENT_SYMBOL_ERR;
2050}
2051
2052static int parse_events__scanner(const char *str,
2053 struct parse_events_state *parse_state)
2054{
2055 YY_BUFFER_STATE buffer;
2056 void *scanner;
2057 int ret;
2058
2059 ret = parse_events_lex_init_extra(parse_state, &scanner);
2060 if (ret)
2061 return ret;
2062
2063 buffer = parse_events__scan_string(str, scanner);
2064
2065#ifdef PARSER_DEBUG
2066 parse_events_debug = 1;
2067 parse_events_set_debug(1, scanner);
2068#endif
2069 ret = parse_events_parse(parse_state, scanner);
2070
2071 parse_events__flush_buffer(buffer, scanner);
2072 parse_events__delete_buffer(buffer, scanner);
2073 parse_events_lex_destroy(scanner);
2074 return ret;
2075}
2076
2077/*
2078 * parse event config string, return a list of event terms.
2079 */
2080int parse_events_terms(struct list_head *terms, const char *str)
2081{
2082 struct parse_events_state parse_state = {
2083 .terms = NULL,
2084 .stoken = PE_START_TERMS,
2085 };
2086 int ret;
2087
2088 ret = parse_events__scanner(str, &parse_state);
2089 perf_pmu__parse_cleanup();
2090
2091 if (!ret) {
2092 list_splice(parse_state.terms, terms);
2093 zfree(&parse_state.terms);
2094 return 0;
2095 }
2096
2097 parse_events_terms__delete(parse_state.terms);
2098 return ret;
2099}
2100
2101static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state,
2102 const char *str, char *pmu_name,
2103 struct list_head *list)
2104{
2105 struct parse_events_state ps = {
2106 .list = LIST_HEAD_INIT(ps.list),
2107 .stoken = PE_START_EVENTS,
2108 .hybrid_pmu_name = pmu_name,
2109 .idx = parse_state->idx,
2110 };
2111 int ret;
2112
2113 ret = parse_events__scanner(str, &ps);
2114 perf_pmu__parse_cleanup();
2115
2116 if (!ret) {
2117 if (!list_empty(&ps.list)) {
2118 list_splice(&ps.list, list);
2119 parse_state->idx = ps.idx;
2120 return 0;
2121 } else
2122 return -1;
2123 }
2124
2125 return ret;
2126}
2127
2128__weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs)
2129{
2130 /* Order by insertion index. */
2131 return lhs->core.idx - rhs->core.idx;
2132}
2133
2134static int evlist__cmp(void *state, const struct list_head *l, const struct list_head *r)
2135{
2136 const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node);
2137 const struct evsel *lhs = container_of(lhs_core, struct evsel, core);
2138 const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node);
2139 const struct evsel *rhs = container_of(rhs_core, struct evsel, core);
2140 int *leader_idx = state;
2141 int lhs_leader_idx = *leader_idx, rhs_leader_idx = *leader_idx, ret;
2142 const char *lhs_pmu_name, *rhs_pmu_name;
2143
2144 /*
2145 * First sort by grouping/leader. Read the leader idx only if the evsel
2146 * is part of a group, as -1 indicates no group.
2147 */
2148 if (lhs_core->leader != lhs_core || lhs_core->nr_members > 1)
2149 lhs_leader_idx = lhs_core->leader->idx;
2150 if (rhs_core->leader != rhs_core || rhs_core->nr_members > 1)
2151 rhs_leader_idx = rhs_core->leader->idx;
2152
2153 if (lhs_leader_idx != rhs_leader_idx)
2154 return lhs_leader_idx - rhs_leader_idx;
2155
2156 /* Group by PMU. Groups can't span PMUs. */
2157 lhs_pmu_name = evsel__group_pmu_name(lhs);
2158 rhs_pmu_name = evsel__group_pmu_name(rhs);
2159 ret = strcmp(lhs_pmu_name, rhs_pmu_name);
2160 if (ret)
2161 return ret;
2162
2163 /* Architecture specific sorting. */
2164 return arch_evlist__cmp(lhs, rhs);
2165}
2166
2167static bool parse_events__sort_events_and_fix_groups(struct list_head *list)
2168{
2169 int idx = 0, unsorted_idx = -1;
2170 struct evsel *pos, *cur_leader = NULL;
2171 struct perf_evsel *cur_leaders_grp = NULL;
2172 bool idx_changed = false;
2173 int orig_num_leaders = 0, num_leaders = 0;
2174
2175 /*
2176 * Compute index to insert ungrouped events at. Place them where the
2177 * first ungrouped event appears.
2178 */
2179 list_for_each_entry(pos, list, core.node) {
2180 const struct evsel *pos_leader = evsel__leader(pos);
2181
2182 if (pos == pos_leader)
2183 orig_num_leaders++;
2184
2185 /*
2186 * Ensure indexes are sequential, in particular for multiple
2187 * event lists being merged. The indexes are used to detect when
2188 * the user order is modified.
2189 */
2190 pos->core.idx = idx++;
2191
2192 if (unsorted_idx == -1 && pos == pos_leader && pos->core.nr_members < 2)
2193 unsorted_idx = pos->core.idx;
2194 }
2195
2196 /* Sort events. */
2197 list_sort(&unsorted_idx, list, evlist__cmp);
2198
2199 /*
2200 * Recompute groups, splitting for PMUs and adding groups for events
2201 * that require them.
2202 */
2203 idx = 0;
2204 list_for_each_entry(pos, list, core.node) {
2205 const struct evsel *pos_leader = evsel__leader(pos);
2206 const char *pos_pmu_name = evsel__group_pmu_name(pos);
2207 const char *cur_leader_pmu_name, *pos_leader_pmu_name;
2208 bool force_grouped = arch_evsel__must_be_in_group(pos);
2209
2210 /* Reset index and nr_members. */
2211 if (pos->core.idx != idx)
2212 idx_changed = true;
2213 pos->core.idx = idx++;
2214 pos->core.nr_members = 0;
2215
2216 /*
2217 * Set the group leader respecting the given groupings and that
2218 * groups can't span PMUs.
2219 */
2220 if (!cur_leader)
2221 cur_leader = pos;
2222
2223 cur_leader_pmu_name = evsel__group_pmu_name(cur_leader);
2224 if ((cur_leaders_grp != pos->core.leader && !force_grouped) ||
2225 strcmp(cur_leader_pmu_name, pos_pmu_name)) {
2226 /* Event is for a different group/PMU than last. */
2227 cur_leader = pos;
2228 /*
2229 * Remember the leader's group before it is overwritten,
2230 * so that later events match as being in the same
2231 * group.
2232 */
2233 cur_leaders_grp = pos->core.leader;
2234 }
2235 pos_leader_pmu_name = evsel__group_pmu_name(pos_leader);
2236 if (strcmp(pos_leader_pmu_name, pos_pmu_name) || force_grouped) {
2237 /*
2238 * Event's PMU differs from its leader's. Groups can't
2239 * span PMUs, so update leader from the group/PMU
2240 * tracker.
2241 */
2242 evsel__set_leader(pos, cur_leader);
2243 }
2244 }
2245 list_for_each_entry(pos, list, core.node) {
2246 struct evsel *pos_leader = evsel__leader(pos);
2247
2248 if (pos == pos_leader)
2249 num_leaders++;
2250 pos_leader->core.nr_members++;
2251 }
2252 return idx_changed || num_leaders != orig_num_leaders;
2253}
2254
2255int __parse_events(struct evlist *evlist, const char *str,
2256 struct parse_events_error *err, struct perf_pmu *fake_pmu,
2257 bool warn_if_reordered)
2258{
2259 struct parse_events_state parse_state = {
2260 .list = LIST_HEAD_INIT(parse_state.list),
2261 .idx = evlist->core.nr_entries,
2262 .error = err,
2263 .evlist = evlist,
2264 .stoken = PE_START_EVENTS,
2265 .fake_pmu = fake_pmu,
2266 };
2267 int ret;
2268
2269 ret = parse_events__scanner(str, &parse_state);
2270 perf_pmu__parse_cleanup();
2271
2272 if (!ret && list_empty(&parse_state.list)) {
2273 WARN_ONCE(true, "WARNING: event parser found nothing\n");
2274 return -1;
2275 }
2276
2277 if (parse_events__sort_events_and_fix_groups(&parse_state.list) &&
2278 warn_if_reordered && !parse_state.wild_card_pmus)
2279 pr_warning("WARNING: events were regrouped to match PMUs\n");
2280
2281 /*
2282 * Add list to the evlist even with errors to allow callers to clean up.
2283 */
2284 evlist__splice_list_tail(evlist, &parse_state.list);
2285
2286 if (!ret) {
2287 struct evsel *last;
2288
2289 last = evlist__last(evlist);
2290 last->cmdline_group_boundary = true;
2291
2292 return 0;
2293 }
2294
2295 /*
2296 * There are 2 users - builtin-record and builtin-test objects.
2297 * Both call evlist__delete in case of error, so we dont
2298 * need to bother.
2299 */
2300 return ret;
2301}
2302
2303int parse_event(struct evlist *evlist, const char *str)
2304{
2305 struct parse_events_error err;
2306 int ret;
2307
2308 parse_events_error__init(&err);
2309 ret = parse_events(evlist, str, &err);
2310 parse_events_error__exit(&err);
2311 return ret;
2312}
2313
2314void parse_events_error__init(struct parse_events_error *err)
2315{
2316 bzero(err, sizeof(*err));
2317}
2318
2319void parse_events_error__exit(struct parse_events_error *err)
2320{
2321 zfree(&err->str);
2322 zfree(&err->help);
2323 zfree(&err->first_str);
2324 zfree(&err->first_help);
2325}
2326
2327void parse_events_error__handle(struct parse_events_error *err, int idx,
2328 char *str, char *help)
2329{
2330 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n"))
2331 goto out_free;
2332 switch (err->num_errors) {
2333 case 0:
2334 err->idx = idx;
2335 err->str = str;
2336 err->help = help;
2337 break;
2338 case 1:
2339 err->first_idx = err->idx;
2340 err->idx = idx;
2341 err->first_str = err->str;
2342 err->str = str;
2343 err->first_help = err->help;
2344 err->help = help;
2345 break;
2346 default:
2347 pr_debug("Multiple errors dropping message: %s (%s)\n",
2348 err->str, err->help);
2349 free(err->str);
2350 err->str = str;
2351 free(err->help);
2352 err->help = help;
2353 break;
2354 }
2355 err->num_errors++;
2356 return;
2357
2358out_free:
2359 free(str);
2360 free(help);
2361}
2362
2363#define MAX_WIDTH 1000
2364static int get_term_width(void)
2365{
2366 struct winsize ws;
2367
2368 get_term_dimensions(&ws);
2369 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col;
2370}
2371
2372static void __parse_events_error__print(int err_idx, const char *err_str,
2373 const char *err_help, const char *event)
2374{
2375 const char *str = "invalid or unsupported event: ";
2376 char _buf[MAX_WIDTH];
2377 char *buf = (char *) event;
2378 int idx = 0;
2379 if (err_str) {
2380 /* -2 for extra '' in the final fprintf */
2381 int width = get_term_width() - 2;
2382 int len_event = strlen(event);
2383 int len_str, max_len, cut = 0;
2384
2385 /*
2386 * Maximum error index indent, we will cut
2387 * the event string if it's bigger.
2388 */
2389 int max_err_idx = 13;
2390
2391 /*
2392 * Let's be specific with the message when
2393 * we have the precise error.
2394 */
2395 str = "event syntax error: ";
2396 len_str = strlen(str);
2397 max_len = width - len_str;
2398
2399 buf = _buf;
2400
2401 /* We're cutting from the beginning. */
2402 if (err_idx > max_err_idx)
2403 cut = err_idx - max_err_idx;
2404
2405 strncpy(buf, event + cut, max_len);
2406
2407 /* Mark cut parts with '..' on both sides. */
2408 if (cut)
2409 buf[0] = buf[1] = '.';
2410
2411 if ((len_event - cut) > max_len) {
2412 buf[max_len - 1] = buf[max_len - 2] = '.';
2413 buf[max_len] = 0;
2414 }
2415
2416 idx = len_str + err_idx - cut;
2417 }
2418
2419 fprintf(stderr, "%s'%s'\n", str, buf);
2420 if (idx) {
2421 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str);
2422 if (err_help)
2423 fprintf(stderr, "\n%s\n", err_help);
2424 }
2425}
2426
2427void parse_events_error__print(struct parse_events_error *err,
2428 const char *event)
2429{
2430 if (!err->num_errors)
2431 return;
2432
2433 __parse_events_error__print(err->idx, err->str, err->help, event);
2434
2435 if (err->num_errors > 1) {
2436 fputs("\nInitial error:\n", stderr);
2437 __parse_events_error__print(err->first_idx, err->first_str,
2438 err->first_help, event);
2439 }
2440}
2441
2442#undef MAX_WIDTH
2443
2444int parse_events_option(const struct option *opt, const char *str,
2445 int unset __maybe_unused)
2446{
2447 struct evlist *evlist = *(struct evlist **)opt->value;
2448 struct parse_events_error err;
2449 int ret;
2450
2451 parse_events_error__init(&err);
2452 ret = parse_events(evlist, str, &err);
2453
2454 if (ret) {
2455 parse_events_error__print(&err, str);
2456 fprintf(stderr, "Run 'perf list' for a list of valid events\n");
2457 }
2458 parse_events_error__exit(&err);
2459
2460 return ret;
2461}
2462
2463int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset)
2464{
2465 struct evlist **evlistp = opt->value;
2466 int ret;
2467
2468 if (*evlistp == NULL) {
2469 *evlistp = evlist__new();
2470
2471 if (*evlistp == NULL) {
2472 fprintf(stderr, "Not enough memory to create evlist\n");
2473 return -1;
2474 }
2475 }
2476
2477 ret = parse_events_option(opt, str, unset);
2478 if (ret) {
2479 evlist__delete(*evlistp);
2480 *evlistp = NULL;
2481 }
2482
2483 return ret;
2484}
2485
2486static int
2487foreach_evsel_in_last_glob(struct evlist *evlist,
2488 int (*func)(struct evsel *evsel,
2489 const void *arg),
2490 const void *arg)
2491{
2492 struct evsel *last = NULL;
2493 int err;
2494
2495 /*
2496 * Don't return when list_empty, give func a chance to report
2497 * error when it found last == NULL.
2498 *
2499 * So no need to WARN here, let *func do this.
2500 */
2501 if (evlist->core.nr_entries > 0)
2502 last = evlist__last(evlist);
2503
2504 do {
2505 err = (*func)(last, arg);
2506 if (err)
2507 return -1;
2508 if (!last)
2509 return 0;
2510
2511 if (last->core.node.prev == &evlist->core.entries)
2512 return 0;
2513 last = list_entry(last->core.node.prev, struct evsel, core.node);
2514 } while (!last->cmdline_group_boundary);
2515
2516 return 0;
2517}
2518
2519static int set_filter(struct evsel *evsel, const void *arg)
2520{
2521 const char *str = arg;
2522 bool found = false;
2523 int nr_addr_filters = 0;
2524 struct perf_pmu *pmu = NULL;
2525
2526 if (evsel == NULL) {
2527 fprintf(stderr,
2528 "--filter option should follow a -e tracepoint or HW tracer option\n");
2529 return -1;
2530 }
2531
2532 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
2533 if (evsel__append_tp_filter(evsel, str) < 0) {
2534 fprintf(stderr,
2535 "not enough memory to hold filter string\n");
2536 return -1;
2537 }
2538
2539 return 0;
2540 }
2541
2542 while ((pmu = perf_pmu__scan(pmu)) != NULL)
2543 if (pmu->type == evsel->core.attr.type) {
2544 found = true;
2545 break;
2546 }
2547
2548 if (found)
2549 perf_pmu__scan_file(pmu, "nr_addr_filters",
2550 "%d", &nr_addr_filters);
2551
2552 if (!nr_addr_filters)
2553 return perf_bpf_filter__parse(&evsel->bpf_filters, str);
2554
2555 if (evsel__append_addr_filter(evsel, str) < 0) {
2556 fprintf(stderr,
2557 "not enough memory to hold filter string\n");
2558 return -1;
2559 }
2560
2561 return 0;
2562}
2563
2564int parse_filter(const struct option *opt, const char *str,
2565 int unset __maybe_unused)
2566{
2567 struct evlist *evlist = *(struct evlist **)opt->value;
2568
2569 return foreach_evsel_in_last_glob(evlist, set_filter,
2570 (const void *)str);
2571}
2572
2573static int add_exclude_perf_filter(struct evsel *evsel,
2574 const void *arg __maybe_unused)
2575{
2576 char new_filter[64];
2577
2578 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2579 fprintf(stderr,
2580 "--exclude-perf option should follow a -e tracepoint option\n");
2581 return -1;
2582 }
2583
2584 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid());
2585
2586 if (evsel__append_tp_filter(evsel, new_filter) < 0) {
2587 fprintf(stderr,
2588 "not enough memory to hold filter string\n");
2589 return -1;
2590 }
2591
2592 return 0;
2593}
2594
2595int exclude_perf(const struct option *opt,
2596 const char *arg __maybe_unused,
2597 int unset __maybe_unused)
2598{
2599 struct evlist *evlist = *(struct evlist **)opt->value;
2600
2601 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter,
2602 NULL);
2603}
2604
2605int parse_events__is_hardcoded_term(struct parse_events_term *term)
2606{
2607 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
2608}
2609
2610static int new_term(struct parse_events_term **_term,
2611 struct parse_events_term *temp,
2612 char *str, u64 num)
2613{
2614 struct parse_events_term *term;
2615
2616 term = malloc(sizeof(*term));
2617 if (!term)
2618 return -ENOMEM;
2619
2620 *term = *temp;
2621 INIT_LIST_HEAD(&term->list);
2622 term->weak = false;
2623
2624 switch (term->type_val) {
2625 case PARSE_EVENTS__TERM_TYPE_NUM:
2626 term->val.num = num;
2627 break;
2628 case PARSE_EVENTS__TERM_TYPE_STR:
2629 term->val.str = str;
2630 break;
2631 default:
2632 free(term);
2633 return -EINVAL;
2634 }
2635
2636 *_term = term;
2637 return 0;
2638}
2639
2640int parse_events_term__num(struct parse_events_term **term,
2641 int type_term, char *config, u64 num,
2642 bool no_value,
2643 void *loc_term_, void *loc_val_)
2644{
2645 YYLTYPE *loc_term = loc_term_;
2646 YYLTYPE *loc_val = loc_val_;
2647
2648 struct parse_events_term temp = {
2649 .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
2650 .type_term = type_term,
2651 .config = config ? : strdup(config_term_names[type_term]),
2652 .no_value = no_value,
2653 .err_term = loc_term ? loc_term->first_column : 0,
2654 .err_val = loc_val ? loc_val->first_column : 0,
2655 };
2656
2657 return new_term(term, &temp, NULL, num);
2658}
2659
2660int parse_events_term__str(struct parse_events_term **term,
2661 int type_term, char *config, char *str,
2662 void *loc_term_, void *loc_val_)
2663{
2664 YYLTYPE *loc_term = loc_term_;
2665 YYLTYPE *loc_val = loc_val_;
2666
2667 struct parse_events_term temp = {
2668 .type_val = PARSE_EVENTS__TERM_TYPE_STR,
2669 .type_term = type_term,
2670 .config = config,
2671 .err_term = loc_term ? loc_term->first_column : 0,
2672 .err_val = loc_val ? loc_val->first_column : 0,
2673 };
2674
2675 return new_term(term, &temp, str, 0);
2676}
2677
2678int parse_events_term__sym_hw(struct parse_events_term **term,
2679 char *config, unsigned idx)
2680{
2681 struct event_symbol *sym;
2682 char *str;
2683 struct parse_events_term temp = {
2684 .type_val = PARSE_EVENTS__TERM_TYPE_STR,
2685 .type_term = PARSE_EVENTS__TERM_TYPE_USER,
2686 .config = config,
2687 };
2688
2689 if (!temp.config) {
2690 temp.config = strdup("event");
2691 if (!temp.config)
2692 return -ENOMEM;
2693 }
2694 BUG_ON(idx >= PERF_COUNT_HW_MAX);
2695 sym = &event_symbols_hw[idx];
2696
2697 str = strdup(sym->symbol);
2698 if (!str)
2699 return -ENOMEM;
2700 return new_term(term, &temp, str, 0);
2701}
2702
2703int parse_events_term__clone(struct parse_events_term **new,
2704 struct parse_events_term *term)
2705{
2706 char *str;
2707 struct parse_events_term temp = {
2708 .type_val = term->type_val,
2709 .type_term = term->type_term,
2710 .config = NULL,
2711 .err_term = term->err_term,
2712 .err_val = term->err_val,
2713 };
2714
2715 if (term->config) {
2716 temp.config = strdup(term->config);
2717 if (!temp.config)
2718 return -ENOMEM;
2719 }
2720 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
2721 return new_term(new, &temp, NULL, term->val.num);
2722
2723 str = strdup(term->val.str);
2724 if (!str)
2725 return -ENOMEM;
2726 return new_term(new, &temp, str, 0);
2727}
2728
2729void parse_events_term__delete(struct parse_events_term *term)
2730{
2731 if (term->array.nr_ranges)
2732 zfree(&term->array.ranges);
2733
2734 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
2735 zfree(&term->val.str);
2736
2737 zfree(&term->config);
2738 free(term);
2739}
2740
2741int parse_events_copy_term_list(struct list_head *old,
2742 struct list_head **new)
2743{
2744 struct parse_events_term *term, *n;
2745 int ret;
2746
2747 if (!old) {
2748 *new = NULL;
2749 return 0;
2750 }
2751
2752 *new = malloc(sizeof(struct list_head));
2753 if (!*new)
2754 return -ENOMEM;
2755 INIT_LIST_HEAD(*new);
2756
2757 list_for_each_entry (term, old, list) {
2758 ret = parse_events_term__clone(&n, term);
2759 if (ret)
2760 return ret;
2761 list_add_tail(&n->list, *new);
2762 }
2763 return 0;
2764}
2765
2766void parse_events_terms__purge(struct list_head *terms)
2767{
2768 struct parse_events_term *term, *h;
2769
2770 list_for_each_entry_safe(term, h, terms, list) {
2771 list_del_init(&term->list);
2772 parse_events_term__delete(term);
2773 }
2774}
2775
2776void parse_events_terms__delete(struct list_head *terms)
2777{
2778 if (!terms)
2779 return;
2780 parse_events_terms__purge(terms);
2781 free(terms);
2782}
2783
2784void parse_events__clear_array(struct parse_events_array *a)
2785{
2786 zfree(&a->ranges);
2787}
2788
2789void parse_events_evlist_error(struct parse_events_state *parse_state,
2790 int idx, const char *str)
2791{
2792 if (!parse_state->error)
2793 return;
2794
2795 parse_events_error__handle(parse_state->error, idx, strdup(str), NULL);
2796}
2797
2798static void config_terms_list(char *buf, size_t buf_sz)
2799{
2800 int i;
2801 bool first = true;
2802
2803 buf[0] = '\0';
2804 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) {
2805 const char *name = config_term_names[i];
2806
2807 if (!config_term_avail(i, NULL))
2808 continue;
2809 if (!name)
2810 continue;
2811 if (name[0] == '<')
2812 continue;
2813
2814 if (strlen(buf) + strlen(name) + 2 >= buf_sz)
2815 return;
2816
2817 if (!first)
2818 strcat(buf, ",");
2819 else
2820 first = false;
2821 strcat(buf, name);
2822 }
2823}
2824
2825/*
2826 * Return string contains valid config terms of an event.
2827 * @additional_terms: For terms such as PMU sysfs terms.
2828 */
2829char *parse_events_formats_error_string(char *additional_terms)
2830{
2831 char *str;
2832 /* "no-overwrite" is the longest name */
2833 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR *
2834 (sizeof("no-overwrite") - 1)];
2835
2836 config_terms_list(static_terms, sizeof(static_terms));
2837 /* valid terms */
2838 if (additional_terms) {
2839 if (asprintf(&str, "valid terms: %s,%s",
2840 additional_terms, static_terms) < 0)
2841 goto fail;
2842 } else {
2843 if (asprintf(&str, "valid terms: %s", static_terms) < 0)
2844 goto fail;
2845 }
2846 return str;
2847
2848fail:
2849 return NULL;
2850}
2851
2852struct evsel *parse_events__add_event_hybrid(struct list_head *list, int *idx,
2853 struct perf_event_attr *attr,
2854 const char *name,
2855 const char *metric_id,
2856 struct perf_pmu *pmu,
2857 struct list_head *config_terms)
2858{
2859 return __add_event(list, idx, attr, /*init_attr=*/true, name, metric_id,
2860 pmu, config_terms, /*auto_merge_stats=*/false,
2861 /*cpu_list=*/NULL);
2862}