Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/hw_breakpoint.h>
3#include <linux/err.h>
4#include <linux/list_sort.h>
5#include <linux/zalloc.h>
6#include <dirent.h>
7#include <errno.h>
8#include <sys/ioctl.h>
9#include <sys/param.h>
10#include "term.h"
11#include "evlist.h"
12#include "evsel.h"
13#include <subcmd/parse-options.h>
14#include "parse-events.h"
15#include "string2.h"
16#include "strlist.h"
17#include "bpf-loader.h"
18#include "debug.h"
19#include <api/fs/tracing_path.h>
20#include <perf/cpumap.h>
21#include "parse-events-bison.h"
22#include "parse-events-flex.h"
23#include "pmu.h"
24#include "pmus.h"
25#include "asm/bug.h"
26#include "util/parse-branch-options.h"
27#include "util/evsel_config.h"
28#include "util/event.h"
29#include "util/bpf-filter.h"
30#include "util/util.h"
31#include "tracepoint.h"
32
33#define MAX_NAME_LEN 100
34
35#ifdef PARSER_DEBUG
36extern int parse_events_debug;
37#endif
38int parse_events_parse(void *parse_state, void *scanner);
39static int get_config_terms(struct list_head *head_config,
40 struct list_head *head_terms __maybe_unused);
41
42struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
43 [PERF_COUNT_HW_CPU_CYCLES] = {
44 .symbol = "cpu-cycles",
45 .alias = "cycles",
46 },
47 [PERF_COUNT_HW_INSTRUCTIONS] = {
48 .symbol = "instructions",
49 .alias = "",
50 },
51 [PERF_COUNT_HW_CACHE_REFERENCES] = {
52 .symbol = "cache-references",
53 .alias = "",
54 },
55 [PERF_COUNT_HW_CACHE_MISSES] = {
56 .symbol = "cache-misses",
57 .alias = "",
58 },
59 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {
60 .symbol = "branch-instructions",
61 .alias = "branches",
62 },
63 [PERF_COUNT_HW_BRANCH_MISSES] = {
64 .symbol = "branch-misses",
65 .alias = "",
66 },
67 [PERF_COUNT_HW_BUS_CYCLES] = {
68 .symbol = "bus-cycles",
69 .alias = "",
70 },
71 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {
72 .symbol = "stalled-cycles-frontend",
73 .alias = "idle-cycles-frontend",
74 },
75 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {
76 .symbol = "stalled-cycles-backend",
77 .alias = "idle-cycles-backend",
78 },
79 [PERF_COUNT_HW_REF_CPU_CYCLES] = {
80 .symbol = "ref-cycles",
81 .alias = "",
82 },
83};
84
85struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
86 [PERF_COUNT_SW_CPU_CLOCK] = {
87 .symbol = "cpu-clock",
88 .alias = "",
89 },
90 [PERF_COUNT_SW_TASK_CLOCK] = {
91 .symbol = "task-clock",
92 .alias = "",
93 },
94 [PERF_COUNT_SW_PAGE_FAULTS] = {
95 .symbol = "page-faults",
96 .alias = "faults",
97 },
98 [PERF_COUNT_SW_CONTEXT_SWITCHES] = {
99 .symbol = "context-switches",
100 .alias = "cs",
101 },
102 [PERF_COUNT_SW_CPU_MIGRATIONS] = {
103 .symbol = "cpu-migrations",
104 .alias = "migrations",
105 },
106 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = {
107 .symbol = "minor-faults",
108 .alias = "",
109 },
110 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = {
111 .symbol = "major-faults",
112 .alias = "",
113 },
114 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = {
115 .symbol = "alignment-faults",
116 .alias = "",
117 },
118 [PERF_COUNT_SW_EMULATION_FAULTS] = {
119 .symbol = "emulation-faults",
120 .alias = "",
121 },
122 [PERF_COUNT_SW_DUMMY] = {
123 .symbol = "dummy",
124 .alias = "",
125 },
126 [PERF_COUNT_SW_BPF_OUTPUT] = {
127 .symbol = "bpf-output",
128 .alias = "",
129 },
130 [PERF_COUNT_SW_CGROUP_SWITCHES] = {
131 .symbol = "cgroup-switches",
132 .alias = "",
133 },
134};
135
136const char *event_type(int type)
137{
138 switch (type) {
139 case PERF_TYPE_HARDWARE:
140 return "hardware";
141
142 case PERF_TYPE_SOFTWARE:
143 return "software";
144
145 case PERF_TYPE_TRACEPOINT:
146 return "tracepoint";
147
148 case PERF_TYPE_HW_CACHE:
149 return "hardware-cache";
150
151 default:
152 break;
153 }
154
155 return "unknown";
156}
157
158static char *get_config_str(struct list_head *head_terms, int type_term)
159{
160 struct parse_events_term *term;
161
162 if (!head_terms)
163 return NULL;
164
165 list_for_each_entry(term, head_terms, list)
166 if (term->type_term == type_term)
167 return term->val.str;
168
169 return NULL;
170}
171
172static char *get_config_metric_id(struct list_head *head_terms)
173{
174 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID);
175}
176
177static char *get_config_name(struct list_head *head_terms)
178{
179 return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME);
180}
181
182/**
183 * fix_raw - For each raw term see if there is an event (aka alias) in pmu that
184 * matches the raw's string value. If the string value matches an
185 * event then change the term to be an event, if not then change it to
186 * be a config term. For example, "read" may be an event of the PMU or
187 * a raw hex encoding of 0xead. The fix-up is done late so the PMU of
188 * the event can be determined and we don't need to scan all PMUs
189 * ahead-of-time.
190 * @config_terms: the list of terms that may contain a raw term.
191 * @pmu: the PMU to scan for events from.
192 */
193static void fix_raw(struct list_head *config_terms, struct perf_pmu *pmu)
194{
195 struct parse_events_term *term;
196
197 list_for_each_entry(term, config_terms, list) {
198 struct perf_pmu_alias *alias;
199 bool matched = false;
200
201 if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW)
202 continue;
203
204 list_for_each_entry(alias, &pmu->aliases, list) {
205 if (!strcmp(alias->name, term->val.str)) {
206 free(term->config);
207 term->config = term->val.str;
208 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
209 term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
210 term->val.num = 1;
211 term->no_value = true;
212 matched = true;
213 break;
214 }
215 }
216 if (!matched) {
217 u64 num;
218
219 free(term->config);
220 term->config = strdup("config");
221 errno = 0;
222 num = strtoull(term->val.str + 1, NULL, 16);
223 assert(errno == 0);
224 free(term->val.str);
225 term->type_val = PARSE_EVENTS__TERM_TYPE_NUM;
226 term->type_term = PARSE_EVENTS__TERM_TYPE_CONFIG;
227 term->val.num = num;
228 term->no_value = false;
229 }
230 }
231}
232
233static struct evsel *
234__add_event(struct list_head *list, int *idx,
235 struct perf_event_attr *attr,
236 bool init_attr,
237 const char *name, const char *metric_id, struct perf_pmu *pmu,
238 struct list_head *config_terms, bool auto_merge_stats,
239 const char *cpu_list)
240{
241 struct evsel *evsel;
242 struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) :
243 cpu_list ? perf_cpu_map__new(cpu_list) : NULL;
244
245 if (pmu)
246 perf_pmu__warn_invalid_formats(pmu);
247
248 if (pmu && (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX)) {
249 perf_pmu__warn_invalid_config(pmu, attr->config, name,
250 PERF_PMU_FORMAT_VALUE_CONFIG, "config");
251 perf_pmu__warn_invalid_config(pmu, attr->config1, name,
252 PERF_PMU_FORMAT_VALUE_CONFIG1, "config1");
253 perf_pmu__warn_invalid_config(pmu, attr->config2, name,
254 PERF_PMU_FORMAT_VALUE_CONFIG2, "config2");
255 perf_pmu__warn_invalid_config(pmu, attr->config3, name,
256 PERF_PMU_FORMAT_VALUE_CONFIG3, "config3");
257 }
258 if (init_attr)
259 event_attr_init(attr);
260
261 evsel = evsel__new_idx(attr, *idx);
262 if (!evsel) {
263 perf_cpu_map__put(cpus);
264 return NULL;
265 }
266
267 (*idx)++;
268 evsel->core.cpus = cpus;
269 evsel->core.own_cpus = perf_cpu_map__get(cpus);
270 evsel->core.requires_cpu = pmu ? pmu->is_uncore : false;
271 evsel->core.is_pmu_core = pmu ? pmu->is_core : false;
272 evsel->auto_merge_stats = auto_merge_stats;
273 evsel->pmu = pmu;
274 evsel->pmu_name = pmu && pmu->name ? strdup(pmu->name) : NULL;
275
276 if (name)
277 evsel->name = strdup(name);
278
279 if (metric_id)
280 evsel->metric_id = strdup(metric_id);
281
282 if (config_terms)
283 list_splice_init(config_terms, &evsel->config_terms);
284
285 if (list)
286 list_add_tail(&evsel->core.node, list);
287
288 return evsel;
289}
290
291struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
292 const char *name, const char *metric_id,
293 struct perf_pmu *pmu)
294{
295 return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name,
296 metric_id, pmu, /*config_terms=*/NULL,
297 /*auto_merge_stats=*/false, /*cpu_list=*/NULL);
298}
299
300static int add_event(struct list_head *list, int *idx,
301 struct perf_event_attr *attr, const char *name,
302 const char *metric_id, struct list_head *config_terms)
303{
304 return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id,
305 /*pmu=*/NULL, config_terms,
306 /*auto_merge_stats=*/false, /*cpu_list=*/NULL) ? 0 : -ENOMEM;
307}
308
309static int add_event_tool(struct list_head *list, int *idx,
310 enum perf_tool_event tool_event)
311{
312 struct evsel *evsel;
313 struct perf_event_attr attr = {
314 .type = PERF_TYPE_SOFTWARE,
315 .config = PERF_COUNT_SW_DUMMY,
316 };
317
318 evsel = __add_event(list, idx, &attr, /*init_attr=*/true, /*name=*/NULL,
319 /*metric_id=*/NULL, /*pmu=*/NULL,
320 /*config_terms=*/NULL, /*auto_merge_stats=*/false,
321 /*cpu_list=*/"0");
322 if (!evsel)
323 return -ENOMEM;
324 evsel->tool_event = tool_event;
325 if (tool_event == PERF_TOOL_DURATION_TIME
326 || tool_event == PERF_TOOL_USER_TIME
327 || tool_event == PERF_TOOL_SYSTEM_TIME) {
328 free((char *)evsel->unit);
329 evsel->unit = strdup("ns");
330 }
331 return 0;
332}
333
334/**
335 * parse_aliases - search names for entries beginning or equalling str ignoring
336 * case. If mutliple entries in names match str then the longest
337 * is chosen.
338 * @str: The needle to look for.
339 * @names: The haystack to search.
340 * @size: The size of the haystack.
341 * @longest: Out argument giving the length of the matching entry.
342 */
343static int parse_aliases(const char *str, const char *const names[][EVSEL__MAX_ALIASES], int size,
344 int *longest)
345{
346 *longest = -1;
347 for (int i = 0; i < size; i++) {
348 for (int j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) {
349 int n = strlen(names[i][j]);
350
351 if (n > *longest && !strncasecmp(str, names[i][j], n))
352 *longest = n;
353 }
354 if (*longest > 0)
355 return i;
356 }
357
358 return -1;
359}
360
361typedef int config_term_func_t(struct perf_event_attr *attr,
362 struct parse_events_term *term,
363 struct parse_events_error *err);
364static int config_term_common(struct perf_event_attr *attr,
365 struct parse_events_term *term,
366 struct parse_events_error *err);
367static int config_attr(struct perf_event_attr *attr,
368 struct list_head *head,
369 struct parse_events_error *err,
370 config_term_func_t config_term);
371
372/**
373 * parse_events__decode_legacy_cache - Search name for the legacy cache event
374 * name composed of 1, 2 or 3 hyphen
375 * separated sections. The first section is
376 * the cache type while the others are the
377 * optional op and optional result. To make
378 * life hard the names in the table also
379 * contain hyphens and the longest name
380 * should always be selected.
381 */
382int parse_events__decode_legacy_cache(const char *name, int extended_pmu_type, __u64 *config)
383{
384 int len, cache_type = -1, cache_op = -1, cache_result = -1;
385 const char *name_end = &name[strlen(name) + 1];
386 const char *str = name;
387
388 cache_type = parse_aliases(str, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX, &len);
389 if (cache_type == -1)
390 return -EINVAL;
391 str += len + 1;
392
393 if (str < name_end) {
394 cache_op = parse_aliases(str, evsel__hw_cache_op,
395 PERF_COUNT_HW_CACHE_OP_MAX, &len);
396 if (cache_op >= 0) {
397 if (!evsel__is_cache_op_valid(cache_type, cache_op))
398 return -EINVAL;
399 str += len + 1;
400 } else {
401 cache_result = parse_aliases(str, evsel__hw_cache_result,
402 PERF_COUNT_HW_CACHE_RESULT_MAX, &len);
403 if (cache_result >= 0)
404 str += len + 1;
405 }
406 }
407 if (str < name_end) {
408 if (cache_op < 0) {
409 cache_op = parse_aliases(str, evsel__hw_cache_op,
410 PERF_COUNT_HW_CACHE_OP_MAX, &len);
411 if (cache_op >= 0) {
412 if (!evsel__is_cache_op_valid(cache_type, cache_op))
413 return -EINVAL;
414 }
415 } else if (cache_result < 0) {
416 cache_result = parse_aliases(str, evsel__hw_cache_result,
417 PERF_COUNT_HW_CACHE_RESULT_MAX, &len);
418 }
419 }
420
421 /*
422 * Fall back to reads:
423 */
424 if (cache_op == -1)
425 cache_op = PERF_COUNT_HW_CACHE_OP_READ;
426
427 /*
428 * Fall back to accesses:
429 */
430 if (cache_result == -1)
431 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
432
433 *config = cache_type | (cache_op << 8) | (cache_result << 16);
434 if (perf_pmus__supports_extended_type())
435 *config |= (__u64)extended_pmu_type << PERF_PMU_TYPE_SHIFT;
436 return 0;
437}
438
439/**
440 * parse_events__filter_pmu - returns false if a wildcard PMU should be
441 * considered, true if it should be filtered.
442 */
443bool parse_events__filter_pmu(const struct parse_events_state *parse_state,
444 const struct perf_pmu *pmu)
445{
446 if (parse_state->pmu_filter == NULL)
447 return false;
448
449 if (pmu->name == NULL)
450 return true;
451
452 return strcmp(parse_state->pmu_filter, pmu->name) != 0;
453}
454
455int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
456 struct parse_events_state *parse_state,
457 struct list_head *head_config)
458{
459 struct perf_pmu *pmu = NULL;
460 bool found_supported = false;
461 const char *config_name = get_config_name(head_config);
462 const char *metric_id = get_config_metric_id(head_config);
463
464 /* Legacy cache events are only supported by core PMUs. */
465 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
466 LIST_HEAD(config_terms);
467 struct perf_event_attr attr;
468 int ret;
469
470 if (parse_events__filter_pmu(parse_state, pmu))
471 continue;
472
473 memset(&attr, 0, sizeof(attr));
474 attr.type = PERF_TYPE_HW_CACHE;
475
476 ret = parse_events__decode_legacy_cache(name, pmu->type, &attr.config);
477 if (ret)
478 return ret;
479
480 found_supported = true;
481
482 if (head_config) {
483 if (config_attr(&attr, head_config, parse_state->error, config_term_common))
484 return -EINVAL;
485
486 if (get_config_terms(head_config, &config_terms))
487 return -ENOMEM;
488 }
489
490 if (__add_event(list, idx, &attr, /*init_attr*/true, config_name ?: name,
491 metric_id, pmu, &config_terms, /*auto_merge_stats=*/false,
492 /*cpu_list=*/NULL) == NULL)
493 return -ENOMEM;
494
495 free_config_terms(&config_terms);
496 }
497 return found_supported ? 0 : -EINVAL;
498}
499
500#ifdef HAVE_LIBTRACEEVENT
501static void tracepoint_error(struct parse_events_error *e, int err,
502 const char *sys, const char *name)
503{
504 const char *str;
505 char help[BUFSIZ];
506
507 if (!e)
508 return;
509
510 /*
511 * We get error directly from syscall errno ( > 0),
512 * or from encoded pointer's error ( < 0).
513 */
514 err = abs(err);
515
516 switch (err) {
517 case EACCES:
518 str = "can't access trace events";
519 break;
520 case ENOENT:
521 str = "unknown tracepoint";
522 break;
523 default:
524 str = "failed to add tracepoint";
525 break;
526 }
527
528 tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name);
529 parse_events_error__handle(e, 0, strdup(str), strdup(help));
530}
531
532static int add_tracepoint(struct list_head *list, int *idx,
533 const char *sys_name, const char *evt_name,
534 struct parse_events_error *err,
535 struct list_head *head_config)
536{
537 struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, (*idx)++);
538
539 if (IS_ERR(evsel)) {
540 tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name);
541 return PTR_ERR(evsel);
542 }
543
544 if (head_config) {
545 LIST_HEAD(config_terms);
546
547 if (get_config_terms(head_config, &config_terms))
548 return -ENOMEM;
549 list_splice(&config_terms, &evsel->config_terms);
550 }
551
552 list_add_tail(&evsel->core.node, list);
553 return 0;
554}
555
556static int add_tracepoint_multi_event(struct list_head *list, int *idx,
557 const char *sys_name, const char *evt_name,
558 struct parse_events_error *err,
559 struct list_head *head_config)
560{
561 char *evt_path;
562 struct dirent *evt_ent;
563 DIR *evt_dir;
564 int ret = 0, found = 0;
565
566 evt_path = get_events_file(sys_name);
567 if (!evt_path) {
568 tracepoint_error(err, errno, sys_name, evt_name);
569 return -1;
570 }
571 evt_dir = opendir(evt_path);
572 if (!evt_dir) {
573 put_events_file(evt_path);
574 tracepoint_error(err, errno, sys_name, evt_name);
575 return -1;
576 }
577
578 while (!ret && (evt_ent = readdir(evt_dir))) {
579 if (!strcmp(evt_ent->d_name, ".")
580 || !strcmp(evt_ent->d_name, "..")
581 || !strcmp(evt_ent->d_name, "enable")
582 || !strcmp(evt_ent->d_name, "filter"))
583 continue;
584
585 if (!strglobmatch(evt_ent->d_name, evt_name))
586 continue;
587
588 found++;
589
590 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name,
591 err, head_config);
592 }
593
594 if (!found) {
595 tracepoint_error(err, ENOENT, sys_name, evt_name);
596 ret = -1;
597 }
598
599 put_events_file(evt_path);
600 closedir(evt_dir);
601 return ret;
602}
603
604static int add_tracepoint_event(struct list_head *list, int *idx,
605 const char *sys_name, const char *evt_name,
606 struct parse_events_error *err,
607 struct list_head *head_config)
608{
609 return strpbrk(evt_name, "*?") ?
610 add_tracepoint_multi_event(list, idx, sys_name, evt_name,
611 err, head_config) :
612 add_tracepoint(list, idx, sys_name, evt_name,
613 err, head_config);
614}
615
616static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
617 const char *sys_name, const char *evt_name,
618 struct parse_events_error *err,
619 struct list_head *head_config)
620{
621 struct dirent *events_ent;
622 DIR *events_dir;
623 int ret = 0;
624
625 events_dir = tracing_events__opendir();
626 if (!events_dir) {
627 tracepoint_error(err, errno, sys_name, evt_name);
628 return -1;
629 }
630
631 while (!ret && (events_ent = readdir(events_dir))) {
632 if (!strcmp(events_ent->d_name, ".")
633 || !strcmp(events_ent->d_name, "..")
634 || !strcmp(events_ent->d_name, "enable")
635 || !strcmp(events_ent->d_name, "header_event")
636 || !strcmp(events_ent->d_name, "header_page"))
637 continue;
638
639 if (!strglobmatch(events_ent->d_name, sys_name))
640 continue;
641
642 ret = add_tracepoint_event(list, idx, events_ent->d_name,
643 evt_name, err, head_config);
644 }
645
646 closedir(events_dir);
647 return ret;
648}
649#endif /* HAVE_LIBTRACEEVENT */
650
651#ifdef HAVE_LIBBPF_SUPPORT
652struct __add_bpf_event_param {
653 struct parse_events_state *parse_state;
654 struct list_head *list;
655 struct list_head *head_config;
656};
657
658static int add_bpf_event(const char *group, const char *event, int fd, struct bpf_object *obj,
659 void *_param)
660{
661 LIST_HEAD(new_evsels);
662 struct __add_bpf_event_param *param = _param;
663 struct parse_events_state *parse_state = param->parse_state;
664 struct list_head *list = param->list;
665 struct evsel *pos;
666 int err;
667 /*
668 * Check if we should add the event, i.e. if it is a TP but starts with a '!',
669 * then don't add the tracepoint, this will be used for something else, like
670 * adding to a BPF_MAP_TYPE_PROG_ARRAY.
671 *
672 * See tools/perf/examples/bpf/augmented_raw_syscalls.c
673 */
674 if (group[0] == '!')
675 return 0;
676
677 pr_debug("add bpf event %s:%s and attach bpf program %d\n",
678 group, event, fd);
679
680 err = parse_events_add_tracepoint(&new_evsels, &parse_state->idx, group,
681 event, parse_state->error,
682 param->head_config);
683 if (err) {
684 struct evsel *evsel, *tmp;
685
686 pr_debug("Failed to add BPF event %s:%s\n",
687 group, event);
688 list_for_each_entry_safe(evsel, tmp, &new_evsels, core.node) {
689 list_del_init(&evsel->core.node);
690 evsel__delete(evsel);
691 }
692 return err;
693 }
694 pr_debug("adding %s:%s\n", group, event);
695
696 list_for_each_entry(pos, &new_evsels, core.node) {
697 pr_debug("adding %s:%s to %p\n",
698 group, event, pos);
699 pos->bpf_fd = fd;
700 pos->bpf_obj = obj;
701 }
702 list_splice(&new_evsels, list);
703 return 0;
704}
705
706int parse_events_load_bpf_obj(struct parse_events_state *parse_state,
707 struct list_head *list,
708 struct bpf_object *obj,
709 struct list_head *head_config)
710{
711 int err;
712 char errbuf[BUFSIZ];
713 struct __add_bpf_event_param param = {parse_state, list, head_config};
714 static bool registered_unprobe_atexit = false;
715
716 if (IS_ERR(obj) || !obj) {
717 snprintf(errbuf, sizeof(errbuf),
718 "Internal error: load bpf obj with NULL");
719 err = -EINVAL;
720 goto errout;
721 }
722
723 /*
724 * Register atexit handler before calling bpf__probe() so
725 * bpf__probe() don't need to unprobe probe points its already
726 * created when failure.
727 */
728 if (!registered_unprobe_atexit) {
729 atexit(bpf__clear);
730 registered_unprobe_atexit = true;
731 }
732
733 err = bpf__probe(obj);
734 if (err) {
735 bpf__strerror_probe(obj, err, errbuf, sizeof(errbuf));
736 goto errout;
737 }
738
739 err = bpf__load(obj);
740 if (err) {
741 bpf__strerror_load(obj, err, errbuf, sizeof(errbuf));
742 goto errout;
743 }
744
745 err = bpf__foreach_event(obj, add_bpf_event, ¶m);
746 if (err) {
747 snprintf(errbuf, sizeof(errbuf),
748 "Attach events in BPF object failed");
749 goto errout;
750 }
751
752 return 0;
753errout:
754 parse_events_error__handle(parse_state->error, 0,
755 strdup(errbuf), strdup("(add -v to see detail)"));
756 return err;
757}
758
759static int
760parse_events_config_bpf(struct parse_events_state *parse_state,
761 struct bpf_object *obj,
762 struct list_head *head_config)
763{
764 struct parse_events_term *term;
765 int error_pos;
766
767 if (!head_config || list_empty(head_config))
768 return 0;
769
770 list_for_each_entry(term, head_config, list) {
771 int err;
772
773 if (term->type_term != PARSE_EVENTS__TERM_TYPE_USER) {
774 parse_events_error__handle(parse_state->error, term->err_term,
775 strdup("Invalid config term for BPF object"),
776 NULL);
777 return -EINVAL;
778 }
779
780 err = bpf__config_obj(obj, term, parse_state->evlist, &error_pos);
781 if (err) {
782 char errbuf[BUFSIZ];
783 int idx;
784
785 bpf__strerror_config_obj(obj, term, parse_state->evlist,
786 &error_pos, err, errbuf,
787 sizeof(errbuf));
788
789 if (err == -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE)
790 idx = term->err_val;
791 else
792 idx = term->err_term + error_pos;
793
794 parse_events_error__handle(parse_state->error, idx,
795 strdup(errbuf),
796 strdup(
797"Hint:\tValid config terms:\n"
798" \tmap:[<arraymap>].value<indices>=[value]\n"
799" \tmap:[<eventmap>].event<indices>=[event]\n"
800"\n"
801" \twhere <indices> is something like [0,3...5] or [all]\n"
802" \t(add -v to see detail)"));
803 return err;
804 }
805 }
806 return 0;
807}
808
809/*
810 * Split config terms:
811 * perf record -e bpf.c/call-graph=fp,map:array.value[0]=1/ ...
812 * 'call-graph=fp' is 'evt config', should be applied to each
813 * events in bpf.c.
814 * 'map:array.value[0]=1' is 'obj config', should be processed
815 * with parse_events_config_bpf.
816 *
817 * Move object config terms from the first list to obj_head_config.
818 */
819static void
820split_bpf_config_terms(struct list_head *evt_head_config,
821 struct list_head *obj_head_config)
822{
823 struct parse_events_term *term, *temp;
824
825 /*
826 * Currently, all possible user config term
827 * belong to bpf object. parse_events__is_hardcoded_term()
828 * happens to be a good flag.
829 *
830 * See parse_events_config_bpf() and
831 * config_term_tracepoint().
832 */
833 list_for_each_entry_safe(term, temp, evt_head_config, list)
834 if (!parse_events__is_hardcoded_term(term))
835 list_move_tail(&term->list, obj_head_config);
836}
837
838int parse_events_load_bpf(struct parse_events_state *parse_state,
839 struct list_head *list,
840 char *bpf_file_name,
841 bool source,
842 struct list_head *head_config)
843{
844 int err;
845 struct bpf_object *obj;
846 LIST_HEAD(obj_head_config);
847
848 if (head_config)
849 split_bpf_config_terms(head_config, &obj_head_config);
850
851 obj = bpf__prepare_load(bpf_file_name, source);
852 if (IS_ERR(obj)) {
853 char errbuf[BUFSIZ];
854
855 err = PTR_ERR(obj);
856
857 if (err == -ENOTSUP)
858 snprintf(errbuf, sizeof(errbuf),
859 "BPF support is not compiled");
860 else
861 bpf__strerror_prepare_load(bpf_file_name,
862 source,
863 -err, errbuf,
864 sizeof(errbuf));
865
866 parse_events_error__handle(parse_state->error, 0,
867 strdup(errbuf), strdup("(add -v to see detail)"));
868 return err;
869 }
870
871 err = parse_events_load_bpf_obj(parse_state, list, obj, head_config);
872 if (err)
873 return err;
874 err = parse_events_config_bpf(parse_state, obj, &obj_head_config);
875
876 /*
877 * Caller doesn't know anything about obj_head_config,
878 * so combine them together again before returning.
879 */
880 if (head_config)
881 list_splice_tail(&obj_head_config, head_config);
882 return err;
883}
884#else // HAVE_LIBBPF_SUPPORT
885int parse_events_load_bpf_obj(struct parse_events_state *parse_state,
886 struct list_head *list __maybe_unused,
887 struct bpf_object *obj __maybe_unused,
888 struct list_head *head_config __maybe_unused)
889{
890 parse_events_error__handle(parse_state->error, 0,
891 strdup("BPF support is not compiled"),
892 strdup("Make sure libbpf-devel is available at build time."));
893 return -ENOTSUP;
894}
895
896int parse_events_load_bpf(struct parse_events_state *parse_state,
897 struct list_head *list __maybe_unused,
898 char *bpf_file_name __maybe_unused,
899 bool source __maybe_unused,
900 struct list_head *head_config __maybe_unused)
901{
902 parse_events_error__handle(parse_state->error, 0,
903 strdup("BPF support is not compiled"),
904 strdup("Make sure libbpf-devel is available at build time."));
905 return -ENOTSUP;
906}
907#endif // HAVE_LIBBPF_SUPPORT
908
909static int
910parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
911{
912 int i;
913
914 for (i = 0; i < 3; i++) {
915 if (!type || !type[i])
916 break;
917
918#define CHECK_SET_TYPE(bit) \
919do { \
920 if (attr->bp_type & bit) \
921 return -EINVAL; \
922 else \
923 attr->bp_type |= bit; \
924} while (0)
925
926 switch (type[i]) {
927 case 'r':
928 CHECK_SET_TYPE(HW_BREAKPOINT_R);
929 break;
930 case 'w':
931 CHECK_SET_TYPE(HW_BREAKPOINT_W);
932 break;
933 case 'x':
934 CHECK_SET_TYPE(HW_BREAKPOINT_X);
935 break;
936 default:
937 return -EINVAL;
938 }
939 }
940
941#undef CHECK_SET_TYPE
942
943 if (!attr->bp_type) /* Default */
944 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
945
946 return 0;
947}
948
949int parse_events_add_breakpoint(struct parse_events_state *parse_state,
950 struct list_head *list,
951 u64 addr, char *type, u64 len,
952 struct list_head *head_config __maybe_unused)
953{
954 struct perf_event_attr attr;
955 LIST_HEAD(config_terms);
956 const char *name;
957
958 memset(&attr, 0, sizeof(attr));
959 attr.bp_addr = addr;
960
961 if (parse_breakpoint_type(type, &attr))
962 return -EINVAL;
963
964 /* Provide some defaults if len is not specified */
965 if (!len) {
966 if (attr.bp_type == HW_BREAKPOINT_X)
967 len = sizeof(long);
968 else
969 len = HW_BREAKPOINT_LEN_4;
970 }
971
972 attr.bp_len = len;
973
974 attr.type = PERF_TYPE_BREAKPOINT;
975 attr.sample_period = 1;
976
977 if (head_config) {
978 if (config_attr(&attr, head_config, parse_state->error,
979 config_term_common))
980 return -EINVAL;
981
982 if (get_config_terms(head_config, &config_terms))
983 return -ENOMEM;
984 }
985
986 name = get_config_name(head_config);
987
988 return add_event(list, &parse_state->idx, &attr, name, /*mertic_id=*/NULL,
989 &config_terms);
990}
991
992static int check_type_val(struct parse_events_term *term,
993 struct parse_events_error *err,
994 int type)
995{
996 if (type == term->type_val)
997 return 0;
998
999 if (err) {
1000 parse_events_error__handle(err, term->err_val,
1001 type == PARSE_EVENTS__TERM_TYPE_NUM
1002 ? strdup("expected numeric value")
1003 : strdup("expected string value"),
1004 NULL);
1005 }
1006 return -EINVAL;
1007}
1008
1009/*
1010 * Update according to parse-events.l
1011 */
1012static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
1013 [PARSE_EVENTS__TERM_TYPE_USER] = "<sysfs term>",
1014 [PARSE_EVENTS__TERM_TYPE_CONFIG] = "config",
1015 [PARSE_EVENTS__TERM_TYPE_CONFIG1] = "config1",
1016 [PARSE_EVENTS__TERM_TYPE_CONFIG2] = "config2",
1017 [PARSE_EVENTS__TERM_TYPE_CONFIG3] = "config3",
1018 [PARSE_EVENTS__TERM_TYPE_NAME] = "name",
1019 [PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD] = "period",
1020 [PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ] = "freq",
1021 [PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE] = "branch_type",
1022 [PARSE_EVENTS__TERM_TYPE_TIME] = "time",
1023 [PARSE_EVENTS__TERM_TYPE_CALLGRAPH] = "call-graph",
1024 [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size",
1025 [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit",
1026 [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit",
1027 [PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack",
1028 [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS] = "nr",
1029 [PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite",
1030 [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite",
1031 [PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config",
1032 [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore",
1033 [PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output",
1034 [PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size",
1035 [PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id",
1036 [PARSE_EVENTS__TERM_TYPE_RAW] = "raw",
1037 [PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE] = "legacy-cache",
1038 [PARSE_EVENTS__TERM_TYPE_HARDWARE] = "hardware",
1039};
1040
1041static bool config_term_shrinked;
1042
1043static bool
1044config_term_avail(int term_type, struct parse_events_error *err)
1045{
1046 char *err_str;
1047
1048 if (term_type < 0 || term_type >= __PARSE_EVENTS__TERM_TYPE_NR) {
1049 parse_events_error__handle(err, -1,
1050 strdup("Invalid term_type"), NULL);
1051 return false;
1052 }
1053 if (!config_term_shrinked)
1054 return true;
1055
1056 switch (term_type) {
1057 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1058 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1059 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1060 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1061 case PARSE_EVENTS__TERM_TYPE_NAME:
1062 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1063 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1064 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1065 return true;
1066 default:
1067 if (!err)
1068 return false;
1069
1070 /* term_type is validated so indexing is safe */
1071 if (asprintf(&err_str, "'%s' is not usable in 'perf stat'",
1072 config_term_names[term_type]) >= 0)
1073 parse_events_error__handle(err, -1, err_str, NULL);
1074 return false;
1075 }
1076}
1077
1078void parse_events__shrink_config_terms(void)
1079{
1080 config_term_shrinked = true;
1081}
1082
1083static int config_term_common(struct perf_event_attr *attr,
1084 struct parse_events_term *term,
1085 struct parse_events_error *err)
1086{
1087#define CHECK_TYPE_VAL(type) \
1088do { \
1089 if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \
1090 return -EINVAL; \
1091} while (0)
1092
1093 switch (term->type_term) {
1094 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1095 CHECK_TYPE_VAL(NUM);
1096 attr->config = term->val.num;
1097 break;
1098 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1099 CHECK_TYPE_VAL(NUM);
1100 attr->config1 = term->val.num;
1101 break;
1102 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1103 CHECK_TYPE_VAL(NUM);
1104 attr->config2 = term->val.num;
1105 break;
1106 case PARSE_EVENTS__TERM_TYPE_CONFIG3:
1107 CHECK_TYPE_VAL(NUM);
1108 attr->config3 = term->val.num;
1109 break;
1110 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1111 CHECK_TYPE_VAL(NUM);
1112 break;
1113 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1114 CHECK_TYPE_VAL(NUM);
1115 break;
1116 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1117 CHECK_TYPE_VAL(STR);
1118 if (strcmp(term->val.str, "no") &&
1119 parse_branch_str(term->val.str,
1120 &attr->branch_sample_type)) {
1121 parse_events_error__handle(err, term->err_val,
1122 strdup("invalid branch sample type"),
1123 NULL);
1124 return -EINVAL;
1125 }
1126 break;
1127 case PARSE_EVENTS__TERM_TYPE_TIME:
1128 CHECK_TYPE_VAL(NUM);
1129 if (term->val.num > 1) {
1130 parse_events_error__handle(err, term->err_val,
1131 strdup("expected 0 or 1"),
1132 NULL);
1133 return -EINVAL;
1134 }
1135 break;
1136 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1137 CHECK_TYPE_VAL(STR);
1138 break;
1139 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1140 CHECK_TYPE_VAL(NUM);
1141 break;
1142 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1143 CHECK_TYPE_VAL(NUM);
1144 break;
1145 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1146 CHECK_TYPE_VAL(NUM);
1147 break;
1148 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1149 CHECK_TYPE_VAL(NUM);
1150 break;
1151 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1152 CHECK_TYPE_VAL(NUM);
1153 break;
1154 case PARSE_EVENTS__TERM_TYPE_NAME:
1155 CHECK_TYPE_VAL(STR);
1156 break;
1157 case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
1158 CHECK_TYPE_VAL(STR);
1159 break;
1160 case PARSE_EVENTS__TERM_TYPE_RAW:
1161 CHECK_TYPE_VAL(STR);
1162 break;
1163 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1164 CHECK_TYPE_VAL(NUM);
1165 break;
1166 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1167 CHECK_TYPE_VAL(NUM);
1168 break;
1169 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1170 CHECK_TYPE_VAL(NUM);
1171 if ((unsigned int)term->val.num > 1) {
1172 parse_events_error__handle(err, term->err_val,
1173 strdup("expected 0 or 1"),
1174 NULL);
1175 return -EINVAL;
1176 }
1177 break;
1178 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1179 CHECK_TYPE_VAL(NUM);
1180 break;
1181 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1182 CHECK_TYPE_VAL(NUM);
1183 if (term->val.num > UINT_MAX) {
1184 parse_events_error__handle(err, term->err_val,
1185 strdup("too big"),
1186 NULL);
1187 return -EINVAL;
1188 }
1189 break;
1190 default:
1191 parse_events_error__handle(err, term->err_term,
1192 strdup("unknown term"),
1193 parse_events_formats_error_string(NULL));
1194 return -EINVAL;
1195 }
1196
1197 /*
1198 * Check term availability after basic checking so
1199 * PARSE_EVENTS__TERM_TYPE_USER can be found and filtered.
1200 *
1201 * If check availability at the entry of this function,
1202 * user will see "'<sysfs term>' is not usable in 'perf stat'"
1203 * if an invalid config term is provided for legacy events
1204 * (for example, instructions/badterm/...), which is confusing.
1205 */
1206 if (!config_term_avail(term->type_term, err))
1207 return -EINVAL;
1208 return 0;
1209#undef CHECK_TYPE_VAL
1210}
1211
1212static int config_term_pmu(struct perf_event_attr *attr,
1213 struct parse_events_term *term,
1214 struct parse_events_error *err)
1215{
1216 if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE) {
1217 const struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
1218
1219 if (!pmu) {
1220 char *err_str;
1221
1222 if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0)
1223 parse_events_error__handle(err, term->err_term,
1224 err_str, /*help=*/NULL);
1225 return -EINVAL;
1226 }
1227 if (perf_pmu__supports_legacy_cache(pmu)) {
1228 attr->type = PERF_TYPE_HW_CACHE;
1229 return parse_events__decode_legacy_cache(term->config, pmu->type,
1230 &attr->config);
1231 } else
1232 term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
1233 }
1234 if (term->type_term == PARSE_EVENTS__TERM_TYPE_HARDWARE) {
1235 const struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
1236
1237 if (!pmu) {
1238 char *err_str;
1239
1240 if (asprintf(&err_str, "Failed to find PMU for type %d", attr->type) >= 0)
1241 parse_events_error__handle(err, term->err_term,
1242 err_str, /*help=*/NULL);
1243 return -EINVAL;
1244 }
1245 attr->type = PERF_TYPE_HARDWARE;
1246 attr->config = term->val.num;
1247 if (perf_pmus__supports_extended_type())
1248 attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
1249 return 0;
1250 }
1251 if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER ||
1252 term->type_term == PARSE_EVENTS__TERM_TYPE_DRV_CFG) {
1253 /*
1254 * Always succeed for sysfs terms, as we dont know
1255 * at this point what type they need to have.
1256 */
1257 return 0;
1258 }
1259 return config_term_common(attr, term, err);
1260}
1261
1262#ifdef HAVE_LIBTRACEEVENT
1263static int config_term_tracepoint(struct perf_event_attr *attr,
1264 struct parse_events_term *term,
1265 struct parse_events_error *err)
1266{
1267 switch (term->type_term) {
1268 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1269 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1270 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1271 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1272 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1273 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1274 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1275 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1276 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1277 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1278 return config_term_common(attr, term, err);
1279 default:
1280 if (err) {
1281 parse_events_error__handle(err, term->err_term,
1282 strdup("unknown term"),
1283 strdup("valid terms: call-graph,stack-size\n"));
1284 }
1285 return -EINVAL;
1286 }
1287
1288 return 0;
1289}
1290#endif
1291
1292static int config_attr(struct perf_event_attr *attr,
1293 struct list_head *head,
1294 struct parse_events_error *err,
1295 config_term_func_t config_term)
1296{
1297 struct parse_events_term *term;
1298
1299 list_for_each_entry(term, head, list)
1300 if (config_term(attr, term, err))
1301 return -EINVAL;
1302
1303 return 0;
1304}
1305
1306static int get_config_terms(struct list_head *head_config,
1307 struct list_head *head_terms __maybe_unused)
1308{
1309#define ADD_CONFIG_TERM(__type, __weak) \
1310 struct evsel_config_term *__t; \
1311 \
1312 __t = zalloc(sizeof(*__t)); \
1313 if (!__t) \
1314 return -ENOMEM; \
1315 \
1316 INIT_LIST_HEAD(&__t->list); \
1317 __t->type = EVSEL__CONFIG_TERM_ ## __type; \
1318 __t->weak = __weak; \
1319 list_add_tail(&__t->list, head_terms)
1320
1321#define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak) \
1322do { \
1323 ADD_CONFIG_TERM(__type, __weak); \
1324 __t->val.__name = __val; \
1325} while (0)
1326
1327#define ADD_CONFIG_TERM_STR(__type, __val, __weak) \
1328do { \
1329 ADD_CONFIG_TERM(__type, __weak); \
1330 __t->val.str = strdup(__val); \
1331 if (!__t->val.str) { \
1332 zfree(&__t); \
1333 return -ENOMEM; \
1334 } \
1335 __t->free_str = true; \
1336} while (0)
1337
1338 struct parse_events_term *term;
1339
1340 list_for_each_entry(term, head_config, list) {
1341 switch (term->type_term) {
1342 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1343 ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak);
1344 break;
1345 case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
1346 ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak);
1347 break;
1348 case PARSE_EVENTS__TERM_TYPE_TIME:
1349 ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak);
1350 break;
1351 case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
1352 ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak);
1353 break;
1354 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1355 ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak);
1356 break;
1357 case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
1358 ADD_CONFIG_TERM_VAL(STACK_USER, stack_user,
1359 term->val.num, term->weak);
1360 break;
1361 case PARSE_EVENTS__TERM_TYPE_INHERIT:
1362 ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1363 term->val.num ? 1 : 0, term->weak);
1364 break;
1365 case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
1366 ADD_CONFIG_TERM_VAL(INHERIT, inherit,
1367 term->val.num ? 0 : 1, term->weak);
1368 break;
1369 case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
1370 ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack,
1371 term->val.num, term->weak);
1372 break;
1373 case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
1374 ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events,
1375 term->val.num, term->weak);
1376 break;
1377 case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
1378 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1379 term->val.num ? 1 : 0, term->weak);
1380 break;
1381 case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
1382 ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
1383 term->val.num ? 0 : 1, term->weak);
1384 break;
1385 case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
1386 ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak);
1387 break;
1388 case PARSE_EVENTS__TERM_TYPE_PERCORE:
1389 ADD_CONFIG_TERM_VAL(PERCORE, percore,
1390 term->val.num ? true : false, term->weak);
1391 break;
1392 case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
1393 ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
1394 term->val.num ? 1 : 0, term->weak);
1395 break;
1396 case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
1397 ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
1398 term->val.num, term->weak);
1399 break;
1400 default:
1401 break;
1402 }
1403 }
1404 return 0;
1405}
1406
1407/*
1408 * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for
1409 * each bit of attr->config that the user has changed.
1410 */
1411static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config,
1412 struct list_head *head_terms)
1413{
1414 struct parse_events_term *term;
1415 u64 bits = 0;
1416 int type;
1417
1418 list_for_each_entry(term, head_config, list) {
1419 switch (term->type_term) {
1420 case PARSE_EVENTS__TERM_TYPE_USER:
1421 type = perf_pmu__format_type(&pmu->format, term->config);
1422 if (type != PERF_PMU_FORMAT_VALUE_CONFIG)
1423 continue;
1424 bits |= perf_pmu__format_bits(&pmu->format, term->config);
1425 break;
1426 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1427 bits = ~(u64)0;
1428 break;
1429 default:
1430 break;
1431 }
1432 }
1433
1434 if (bits)
1435 ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false);
1436
1437#undef ADD_CONFIG_TERM
1438 return 0;
1439}
1440
1441int parse_events_add_tracepoint(struct list_head *list, int *idx,
1442 const char *sys, const char *event,
1443 struct parse_events_error *err,
1444 struct list_head *head_config)
1445{
1446#ifdef HAVE_LIBTRACEEVENT
1447 if (head_config) {
1448 struct perf_event_attr attr;
1449
1450 if (config_attr(&attr, head_config, err,
1451 config_term_tracepoint))
1452 return -EINVAL;
1453 }
1454
1455 if (strpbrk(sys, "*?"))
1456 return add_tracepoint_multi_sys(list, idx, sys, event,
1457 err, head_config);
1458 else
1459 return add_tracepoint_event(list, idx, sys, event,
1460 err, head_config);
1461#else
1462 (void)list;
1463 (void)idx;
1464 (void)sys;
1465 (void)event;
1466 (void)head_config;
1467 parse_events_error__handle(err, 0, strdup("unsupported tracepoint"),
1468 strdup("libtraceevent is necessary for tracepoint support"));
1469 return -1;
1470#endif
1471}
1472
1473static int __parse_events_add_numeric(struct parse_events_state *parse_state,
1474 struct list_head *list,
1475 struct perf_pmu *pmu, u32 type, u32 extended_type,
1476 u64 config, struct list_head *head_config)
1477{
1478 struct perf_event_attr attr;
1479 LIST_HEAD(config_terms);
1480 const char *name, *metric_id;
1481 int ret;
1482
1483 memset(&attr, 0, sizeof(attr));
1484 attr.type = type;
1485 attr.config = config;
1486 if (extended_type && (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE)) {
1487 assert(perf_pmus__supports_extended_type());
1488 attr.config |= (u64)extended_type << PERF_PMU_TYPE_SHIFT;
1489 }
1490
1491 if (head_config) {
1492 if (config_attr(&attr, head_config, parse_state->error,
1493 config_term_common))
1494 return -EINVAL;
1495
1496 if (get_config_terms(head_config, &config_terms))
1497 return -ENOMEM;
1498 }
1499
1500 name = get_config_name(head_config);
1501 metric_id = get_config_metric_id(head_config);
1502 ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name,
1503 metric_id, pmu, &config_terms, /*auto_merge_stats=*/false,
1504 /*cpu_list=*/NULL) ? 0 : -ENOMEM;
1505 free_config_terms(&config_terms);
1506 return ret;
1507}
1508
1509int parse_events_add_numeric(struct parse_events_state *parse_state,
1510 struct list_head *list,
1511 u32 type, u64 config,
1512 struct list_head *head_config,
1513 bool wildcard)
1514{
1515 struct perf_pmu *pmu = NULL;
1516 bool found_supported = false;
1517
1518 /* Wildcards on numeric values are only supported by core PMUs. */
1519 if (wildcard && perf_pmus__supports_extended_type()) {
1520 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
1521 int ret;
1522
1523 found_supported = true;
1524 if (parse_events__filter_pmu(parse_state, pmu))
1525 continue;
1526
1527 ret = __parse_events_add_numeric(parse_state, list, pmu,
1528 type, pmu->type,
1529 config, head_config);
1530 if (ret)
1531 return ret;
1532 }
1533 if (found_supported)
1534 return 0;
1535 }
1536 return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type),
1537 type, /*extended_type=*/0, config, head_config);
1538}
1539
1540int parse_events_add_tool(struct parse_events_state *parse_state,
1541 struct list_head *list,
1542 int tool_event)
1543{
1544 return add_event_tool(list, &parse_state->idx, tool_event);
1545}
1546
1547static bool config_term_percore(struct list_head *config_terms)
1548{
1549 struct evsel_config_term *term;
1550
1551 list_for_each_entry(term, config_terms, list) {
1552 if (term->type == EVSEL__CONFIG_TERM_PERCORE)
1553 return term->val.percore;
1554 }
1555
1556 return false;
1557}
1558
1559int parse_events_add_pmu(struct parse_events_state *parse_state,
1560 struct list_head *list, char *name,
1561 struct list_head *head_config,
1562 bool auto_merge_stats)
1563{
1564 struct perf_event_attr attr;
1565 struct perf_pmu_info info;
1566 struct perf_pmu *pmu;
1567 struct evsel *evsel;
1568 struct parse_events_error *err = parse_state->error;
1569 LIST_HEAD(config_terms);
1570
1571 pmu = parse_state->fake_pmu ?: perf_pmus__find(name);
1572
1573 if (verbose > 1 && !(pmu && pmu->selectable)) {
1574 fprintf(stderr, "Attempting to add event pmu '%s' with '",
1575 name);
1576 if (head_config) {
1577 struct parse_events_term *term;
1578
1579 list_for_each_entry(term, head_config, list) {
1580 fprintf(stderr, "%s,", term->config);
1581 }
1582 }
1583 fprintf(stderr, "' that may result in non-fatal errors\n");
1584 }
1585
1586 if (!pmu) {
1587 char *err_str;
1588
1589 if (asprintf(&err_str,
1590 "Cannot find PMU `%s'. Missing kernel support?",
1591 name) >= 0)
1592 parse_events_error__handle(err, 0, err_str, NULL);
1593 return -EINVAL;
1594 }
1595 if (head_config)
1596 fix_raw(head_config, pmu);
1597
1598 if (pmu->default_config) {
1599 memcpy(&attr, pmu->default_config,
1600 sizeof(struct perf_event_attr));
1601 } else {
1602 memset(&attr, 0, sizeof(attr));
1603 }
1604 attr.type = pmu->type;
1605
1606 if (!head_config) {
1607 evsel = __add_event(list, &parse_state->idx, &attr,
1608 /*init_attr=*/true, /*name=*/NULL,
1609 /*metric_id=*/NULL, pmu,
1610 /*config_terms=*/NULL, auto_merge_stats,
1611 /*cpu_list=*/NULL);
1612 return evsel ? 0 : -ENOMEM;
1613 }
1614
1615 if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, head_config, &info))
1616 return -EINVAL;
1617
1618 if (verbose > 1) {
1619 fprintf(stderr, "After aliases, add event pmu '%s' with '",
1620 name);
1621 if (head_config) {
1622 struct parse_events_term *term;
1623
1624 list_for_each_entry(term, head_config, list) {
1625 fprintf(stderr, "%s,", term->config);
1626 }
1627 }
1628 fprintf(stderr, "' that may result in non-fatal errors\n");
1629 }
1630
1631 /*
1632 * Configure hardcoded terms first, no need to check
1633 * return value when called with fail == 0 ;)
1634 */
1635 if (config_attr(&attr, head_config, parse_state->error, config_term_pmu))
1636 return -EINVAL;
1637
1638 if (get_config_terms(head_config, &config_terms))
1639 return -ENOMEM;
1640
1641 /*
1642 * When using default config, record which bits of attr->config were
1643 * changed by the user.
1644 */
1645 if (pmu->default_config && get_config_chgs(pmu, head_config, &config_terms))
1646 return -ENOMEM;
1647
1648 if (!parse_state->fake_pmu && perf_pmu__config(pmu, &attr, head_config, parse_state->error)) {
1649 free_config_terms(&config_terms);
1650 return -EINVAL;
1651 }
1652
1653 evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true,
1654 get_config_name(head_config),
1655 get_config_metric_id(head_config), pmu,
1656 &config_terms, auto_merge_stats, /*cpu_list=*/NULL);
1657 if (!evsel)
1658 return -ENOMEM;
1659
1660 if (evsel->name)
1661 evsel->use_config_name = true;
1662
1663 evsel->percore = config_term_percore(&evsel->config_terms);
1664
1665 if (parse_state->fake_pmu)
1666 return 0;
1667
1668 free((char *)evsel->unit);
1669 evsel->unit = strdup(info.unit);
1670 evsel->scale = info.scale;
1671 evsel->per_pkg = info.per_pkg;
1672 evsel->snapshot = info.snapshot;
1673 return 0;
1674}
1675
1676int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
1677 char *str, struct list_head *head,
1678 struct list_head **listp)
1679{
1680 struct parse_events_term *term;
1681 struct list_head *list = NULL;
1682 struct list_head *orig_head = NULL;
1683 struct perf_pmu *pmu = NULL;
1684 int ok = 0;
1685 char *config;
1686
1687 *listp = NULL;
1688
1689 if (!head) {
1690 head = malloc(sizeof(struct list_head));
1691 if (!head)
1692 goto out_err;
1693
1694 INIT_LIST_HEAD(head);
1695 }
1696 config = strdup(str);
1697 if (!config)
1698 goto out_err;
1699
1700 if (parse_events_term__num(&term,
1701 PARSE_EVENTS__TERM_TYPE_USER,
1702 config, 1, false, NULL,
1703 NULL) < 0) {
1704 free(config);
1705 goto out_err;
1706 }
1707 list_add_tail(&term->list, head);
1708
1709 /* Add it for all PMUs that support the alias */
1710 list = malloc(sizeof(struct list_head));
1711 if (!list)
1712 goto out_err;
1713
1714 INIT_LIST_HEAD(list);
1715
1716 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
1717 struct perf_pmu_alias *alias;
1718 bool auto_merge_stats;
1719
1720 if (parse_events__filter_pmu(parse_state, pmu))
1721 continue;
1722
1723 auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
1724
1725 list_for_each_entry(alias, &pmu->aliases, list) {
1726 if (!strcasecmp(alias->name, str)) {
1727 parse_events_copy_term_list(head, &orig_head);
1728 if (!parse_events_add_pmu(parse_state, list,
1729 pmu->name, orig_head,
1730 auto_merge_stats)) {
1731 pr_debug("%s -> %s/%s/\n", str,
1732 pmu->name, alias->str);
1733 ok++;
1734 }
1735 parse_events_terms__delete(orig_head);
1736 }
1737 }
1738 }
1739
1740 if (parse_state->fake_pmu) {
1741 if (!parse_events_add_pmu(parse_state, list, str, head,
1742 /*auto_merge_stats=*/true)) {
1743 pr_debug("%s -> %s/%s/\n", str, "fake_pmu", str);
1744 ok++;
1745 }
1746 }
1747
1748out_err:
1749 if (ok)
1750 *listp = list;
1751 else
1752 free(list);
1753
1754 parse_events_terms__delete(head);
1755 return ok ? 0 : -1;
1756}
1757
1758int parse_events__modifier_group(struct list_head *list,
1759 char *event_mod)
1760{
1761 return parse_events__modifier_event(list, event_mod, true);
1762}
1763
1764void parse_events__set_leader(char *name, struct list_head *list)
1765{
1766 struct evsel *leader;
1767
1768 if (list_empty(list)) {
1769 WARN_ONCE(true, "WARNING: failed to set leader: empty list");
1770 return;
1771 }
1772
1773 leader = list_first_entry(list, struct evsel, core.node);
1774 __perf_evlist__set_leader(list, &leader->core);
1775 leader->group_name = name;
1776}
1777
1778/* list_event is assumed to point to malloc'ed memory */
1779void parse_events_update_lists(struct list_head *list_event,
1780 struct list_head *list_all)
1781{
1782 /*
1783 * Called for single event definition. Update the
1784 * 'all event' list, and reinit the 'single event'
1785 * list, for next event definition.
1786 */
1787 list_splice_tail(list_event, list_all);
1788 free(list_event);
1789}
1790
1791struct event_modifier {
1792 int eu;
1793 int ek;
1794 int eh;
1795 int eH;
1796 int eG;
1797 int eI;
1798 int precise;
1799 int precise_max;
1800 int exclude_GH;
1801 int sample_read;
1802 int pinned;
1803 int weak;
1804 int exclusive;
1805 int bpf_counter;
1806};
1807
1808static int get_event_modifier(struct event_modifier *mod, char *str,
1809 struct evsel *evsel)
1810{
1811 int eu = evsel ? evsel->core.attr.exclude_user : 0;
1812 int ek = evsel ? evsel->core.attr.exclude_kernel : 0;
1813 int eh = evsel ? evsel->core.attr.exclude_hv : 0;
1814 int eH = evsel ? evsel->core.attr.exclude_host : 0;
1815 int eG = evsel ? evsel->core.attr.exclude_guest : 0;
1816 int eI = evsel ? evsel->core.attr.exclude_idle : 0;
1817 int precise = evsel ? evsel->core.attr.precise_ip : 0;
1818 int precise_max = 0;
1819 int sample_read = 0;
1820 int pinned = evsel ? evsel->core.attr.pinned : 0;
1821 int exclusive = evsel ? evsel->core.attr.exclusive : 0;
1822
1823 int exclude = eu | ek | eh;
1824 int exclude_GH = evsel ? evsel->exclude_GH : 0;
1825 int weak = 0;
1826 int bpf_counter = 0;
1827
1828 memset(mod, 0, sizeof(*mod));
1829
1830 while (*str) {
1831 if (*str == 'u') {
1832 if (!exclude)
1833 exclude = eu = ek = eh = 1;
1834 if (!exclude_GH && !perf_guest)
1835 eG = 1;
1836 eu = 0;
1837 } else if (*str == 'k') {
1838 if (!exclude)
1839 exclude = eu = ek = eh = 1;
1840 ek = 0;
1841 } else if (*str == 'h') {
1842 if (!exclude)
1843 exclude = eu = ek = eh = 1;
1844 eh = 0;
1845 } else if (*str == 'G') {
1846 if (!exclude_GH)
1847 exclude_GH = eG = eH = 1;
1848 eG = 0;
1849 } else if (*str == 'H') {
1850 if (!exclude_GH)
1851 exclude_GH = eG = eH = 1;
1852 eH = 0;
1853 } else if (*str == 'I') {
1854 eI = 1;
1855 } else if (*str == 'p') {
1856 precise++;
1857 /* use of precise requires exclude_guest */
1858 if (!exclude_GH)
1859 eG = 1;
1860 } else if (*str == 'P') {
1861 precise_max = 1;
1862 } else if (*str == 'S') {
1863 sample_read = 1;
1864 } else if (*str == 'D') {
1865 pinned = 1;
1866 } else if (*str == 'e') {
1867 exclusive = 1;
1868 } else if (*str == 'W') {
1869 weak = 1;
1870 } else if (*str == 'b') {
1871 bpf_counter = 1;
1872 } else
1873 break;
1874
1875 ++str;
1876 }
1877
1878 /*
1879 * precise ip:
1880 *
1881 * 0 - SAMPLE_IP can have arbitrary skid
1882 * 1 - SAMPLE_IP must have constant skid
1883 * 2 - SAMPLE_IP requested to have 0 skid
1884 * 3 - SAMPLE_IP must have 0 skid
1885 *
1886 * See also PERF_RECORD_MISC_EXACT_IP
1887 */
1888 if (precise > 3)
1889 return -EINVAL;
1890
1891 mod->eu = eu;
1892 mod->ek = ek;
1893 mod->eh = eh;
1894 mod->eH = eH;
1895 mod->eG = eG;
1896 mod->eI = eI;
1897 mod->precise = precise;
1898 mod->precise_max = precise_max;
1899 mod->exclude_GH = exclude_GH;
1900 mod->sample_read = sample_read;
1901 mod->pinned = pinned;
1902 mod->weak = weak;
1903 mod->bpf_counter = bpf_counter;
1904 mod->exclusive = exclusive;
1905
1906 return 0;
1907}
1908
1909/*
1910 * Basic modifier sanity check to validate it contains only one
1911 * instance of any modifier (apart from 'p') present.
1912 */
1913static int check_modifier(char *str)
1914{
1915 char *p = str;
1916
1917 /* The sizeof includes 0 byte as well. */
1918 if (strlen(str) > (sizeof("ukhGHpppPSDIWeb") - 1))
1919 return -1;
1920
1921 while (*p) {
1922 if (*p != 'p' && strchr(p + 1, *p))
1923 return -1;
1924 p++;
1925 }
1926
1927 return 0;
1928}
1929
1930int parse_events__modifier_event(struct list_head *list, char *str, bool add)
1931{
1932 struct evsel *evsel;
1933 struct event_modifier mod;
1934
1935 if (str == NULL)
1936 return 0;
1937
1938 if (check_modifier(str))
1939 return -EINVAL;
1940
1941 if (!add && get_event_modifier(&mod, str, NULL))
1942 return -EINVAL;
1943
1944 __evlist__for_each_entry(list, evsel) {
1945 if (add && get_event_modifier(&mod, str, evsel))
1946 return -EINVAL;
1947
1948 evsel->core.attr.exclude_user = mod.eu;
1949 evsel->core.attr.exclude_kernel = mod.ek;
1950 evsel->core.attr.exclude_hv = mod.eh;
1951 evsel->core.attr.precise_ip = mod.precise;
1952 evsel->core.attr.exclude_host = mod.eH;
1953 evsel->core.attr.exclude_guest = mod.eG;
1954 evsel->core.attr.exclude_idle = mod.eI;
1955 evsel->exclude_GH = mod.exclude_GH;
1956 evsel->sample_read = mod.sample_read;
1957 evsel->precise_max = mod.precise_max;
1958 evsel->weak_group = mod.weak;
1959 evsel->bpf_counter = mod.bpf_counter;
1960
1961 if (evsel__is_group_leader(evsel)) {
1962 evsel->core.attr.pinned = mod.pinned;
1963 evsel->core.attr.exclusive = mod.exclusive;
1964 }
1965 }
1966
1967 return 0;
1968}
1969
1970int parse_events_name(struct list_head *list, const char *name)
1971{
1972 struct evsel *evsel;
1973
1974 __evlist__for_each_entry(list, evsel) {
1975 if (!evsel->name)
1976 evsel->name = strdup(name);
1977 }
1978
1979 return 0;
1980}
1981
1982static int parse_events__scanner(const char *str,
1983 struct parse_events_state *parse_state)
1984{
1985 YY_BUFFER_STATE buffer;
1986 void *scanner;
1987 int ret;
1988
1989 ret = parse_events_lex_init_extra(parse_state, &scanner);
1990 if (ret)
1991 return ret;
1992
1993 buffer = parse_events__scan_string(str, scanner);
1994
1995#ifdef PARSER_DEBUG
1996 parse_events_debug = 1;
1997 parse_events_set_debug(1, scanner);
1998#endif
1999 ret = parse_events_parse(parse_state, scanner);
2000
2001 parse_events__flush_buffer(buffer, scanner);
2002 parse_events__delete_buffer(buffer, scanner);
2003 parse_events_lex_destroy(scanner);
2004 return ret;
2005}
2006
2007/*
2008 * parse event config string, return a list of event terms.
2009 */
2010int parse_events_terms(struct list_head *terms, const char *str)
2011{
2012 struct parse_events_state parse_state = {
2013 .terms = NULL,
2014 .stoken = PE_START_TERMS,
2015 };
2016 int ret;
2017
2018 ret = parse_events__scanner(str, &parse_state);
2019
2020 if (!ret) {
2021 list_splice(parse_state.terms, terms);
2022 zfree(&parse_state.terms);
2023 return 0;
2024 }
2025
2026 parse_events_terms__delete(parse_state.terms);
2027 return ret;
2028}
2029
2030static int evsel__compute_group_pmu_name(struct evsel *evsel,
2031 const struct list_head *head)
2032{
2033 struct evsel *leader = evsel__leader(evsel);
2034 struct evsel *pos;
2035 const char *group_pmu_name;
2036 struct perf_pmu *pmu = evsel__find_pmu(evsel);
2037
2038 if (!pmu) {
2039 /*
2040 * For PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE types the PMU
2041 * is a core PMU, but in heterogeneous systems this is
2042 * unknown. For now pick the first core PMU.
2043 */
2044 pmu = perf_pmus__scan_core(NULL);
2045 }
2046 if (!pmu) {
2047 pr_debug("No PMU found for '%s'\n", evsel__name(evsel));
2048 return -EINVAL;
2049 }
2050 group_pmu_name = pmu->name;
2051 /*
2052 * Software events may be in a group with other uncore PMU events. Use
2053 * the pmu_name of the first non-software event to avoid breaking the
2054 * software event out of the group.
2055 *
2056 * Aux event leaders, like intel_pt, expect a group with events from
2057 * other PMUs, so substitute the AUX event's PMU in this case.
2058 */
2059 if (perf_pmu__is_software(pmu) || evsel__is_aux_event(leader)) {
2060 struct perf_pmu *leader_pmu = evsel__find_pmu(leader);
2061
2062 if (!leader_pmu) {
2063 /* As with determining pmu above. */
2064 leader_pmu = perf_pmus__scan_core(NULL);
2065 }
2066 /*
2067 * Starting with the leader, find the first event with a named
2068 * non-software PMU. for_each_group_(member|evsel) isn't used as
2069 * the list isn't yet sorted putting evsel's in the same group
2070 * together.
2071 */
2072 if (leader_pmu && !perf_pmu__is_software(leader_pmu)) {
2073 group_pmu_name = leader_pmu->name;
2074 } else if (leader->core.nr_members > 1) {
2075 list_for_each_entry(pos, head, core.node) {
2076 struct perf_pmu *pos_pmu;
2077
2078 if (pos == leader || evsel__leader(pos) != leader)
2079 continue;
2080 pos_pmu = evsel__find_pmu(pos);
2081 if (!pos_pmu) {
2082 /* As with determining pmu above. */
2083 pos_pmu = perf_pmus__scan_core(NULL);
2084 }
2085 if (pos_pmu && !perf_pmu__is_software(pos_pmu)) {
2086 group_pmu_name = pos_pmu->name;
2087 break;
2088 }
2089 }
2090 }
2091 }
2092 /* Assign the actual name taking care that the fake PMU lacks a name. */
2093 evsel->group_pmu_name = strdup(group_pmu_name ?: "fake");
2094 return evsel->group_pmu_name ? 0 : -ENOMEM;
2095}
2096
2097__weak int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs)
2098{
2099 /* Order by insertion index. */
2100 return lhs->core.idx - rhs->core.idx;
2101}
2102
2103static int evlist__cmp(void *state, const struct list_head *l, const struct list_head *r)
2104{
2105 const struct perf_evsel *lhs_core = container_of(l, struct perf_evsel, node);
2106 const struct evsel *lhs = container_of(lhs_core, struct evsel, core);
2107 const struct perf_evsel *rhs_core = container_of(r, struct perf_evsel, node);
2108 const struct evsel *rhs = container_of(rhs_core, struct evsel, core);
2109 int *leader_idx = state;
2110 int lhs_leader_idx = *leader_idx, rhs_leader_idx = *leader_idx, ret;
2111 const char *lhs_pmu_name, *rhs_pmu_name;
2112 bool lhs_has_group = false, rhs_has_group = false;
2113
2114 /*
2115 * First sort by grouping/leader. Read the leader idx only if the evsel
2116 * is part of a group, by default ungrouped events will be sorted
2117 * relative to grouped events based on where the first ungrouped event
2118 * occurs. If both events don't have a group we want to fall-through to
2119 * the arch specific sorting, that can reorder and fix things like
2120 * Intel's topdown events.
2121 */
2122 if (lhs_core->leader != lhs_core || lhs_core->nr_members > 1) {
2123 lhs_has_group = true;
2124 lhs_leader_idx = lhs_core->leader->idx;
2125 }
2126 if (rhs_core->leader != rhs_core || rhs_core->nr_members > 1) {
2127 rhs_has_group = true;
2128 rhs_leader_idx = rhs_core->leader->idx;
2129 }
2130
2131 if (lhs_leader_idx != rhs_leader_idx)
2132 return lhs_leader_idx - rhs_leader_idx;
2133
2134 /* Group by PMU if there is a group. Groups can't span PMUs. */
2135 if (lhs_has_group && rhs_has_group) {
2136 lhs_pmu_name = lhs->group_pmu_name;
2137 rhs_pmu_name = rhs->group_pmu_name;
2138 ret = strcmp(lhs_pmu_name, rhs_pmu_name);
2139 if (ret)
2140 return ret;
2141 }
2142
2143 /* Architecture specific sorting. */
2144 return arch_evlist__cmp(lhs, rhs);
2145}
2146
2147static int parse_events__sort_events_and_fix_groups(struct list_head *list)
2148{
2149 int idx = 0, unsorted_idx = -1;
2150 struct evsel *pos, *cur_leader = NULL;
2151 struct perf_evsel *cur_leaders_grp = NULL;
2152 bool idx_changed = false;
2153 int orig_num_leaders = 0, num_leaders = 0;
2154 int ret;
2155
2156 /*
2157 * Compute index to insert ungrouped events at. Place them where the
2158 * first ungrouped event appears.
2159 */
2160 list_for_each_entry(pos, list, core.node) {
2161 const struct evsel *pos_leader = evsel__leader(pos);
2162
2163 ret = evsel__compute_group_pmu_name(pos, list);
2164 if (ret)
2165 return ret;
2166
2167 if (pos == pos_leader)
2168 orig_num_leaders++;
2169
2170 /*
2171 * Ensure indexes are sequential, in particular for multiple
2172 * event lists being merged. The indexes are used to detect when
2173 * the user order is modified.
2174 */
2175 pos->core.idx = idx++;
2176
2177 if (unsorted_idx == -1 && pos == pos_leader && pos->core.nr_members < 2)
2178 unsorted_idx = pos->core.idx;
2179 }
2180
2181 /* Sort events. */
2182 list_sort(&unsorted_idx, list, evlist__cmp);
2183
2184 /*
2185 * Recompute groups, splitting for PMUs and adding groups for events
2186 * that require them.
2187 */
2188 idx = 0;
2189 list_for_each_entry(pos, list, core.node) {
2190 const struct evsel *pos_leader = evsel__leader(pos);
2191 const char *pos_pmu_name = pos->group_pmu_name;
2192 const char *cur_leader_pmu_name, *pos_leader_pmu_name;
2193 bool force_grouped = arch_evsel__must_be_in_group(pos);
2194
2195 /* Reset index and nr_members. */
2196 if (pos->core.idx != idx)
2197 idx_changed = true;
2198 pos->core.idx = idx++;
2199 pos->core.nr_members = 0;
2200
2201 /*
2202 * Set the group leader respecting the given groupings and that
2203 * groups can't span PMUs.
2204 */
2205 if (!cur_leader)
2206 cur_leader = pos;
2207
2208 cur_leader_pmu_name = cur_leader->group_pmu_name;
2209 if ((cur_leaders_grp != pos->core.leader && !force_grouped) ||
2210 strcmp(cur_leader_pmu_name, pos_pmu_name)) {
2211 /* Event is for a different group/PMU than last. */
2212 cur_leader = pos;
2213 /*
2214 * Remember the leader's group before it is overwritten,
2215 * so that later events match as being in the same
2216 * group.
2217 */
2218 cur_leaders_grp = pos->core.leader;
2219 }
2220 pos_leader_pmu_name = pos_leader->group_pmu_name;
2221 if (strcmp(pos_leader_pmu_name, pos_pmu_name) || force_grouped) {
2222 /*
2223 * Event's PMU differs from its leader's. Groups can't
2224 * span PMUs, so update leader from the group/PMU
2225 * tracker.
2226 */
2227 evsel__set_leader(pos, cur_leader);
2228 }
2229 }
2230 list_for_each_entry(pos, list, core.node) {
2231 struct evsel *pos_leader = evsel__leader(pos);
2232
2233 if (pos == pos_leader)
2234 num_leaders++;
2235 pos_leader->core.nr_members++;
2236 }
2237 return (idx_changed || num_leaders != orig_num_leaders) ? 1 : 0;
2238}
2239
2240int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter,
2241 struct parse_events_error *err, struct perf_pmu *fake_pmu,
2242 bool warn_if_reordered)
2243{
2244 struct parse_events_state parse_state = {
2245 .list = LIST_HEAD_INIT(parse_state.list),
2246 .idx = evlist->core.nr_entries,
2247 .error = err,
2248 .evlist = evlist,
2249 .stoken = PE_START_EVENTS,
2250 .fake_pmu = fake_pmu,
2251 .pmu_filter = pmu_filter,
2252 .match_legacy_cache_terms = true,
2253 };
2254 int ret, ret2;
2255
2256 ret = parse_events__scanner(str, &parse_state);
2257
2258 if (!ret && list_empty(&parse_state.list)) {
2259 WARN_ONCE(true, "WARNING: event parser found nothing\n");
2260 return -1;
2261 }
2262
2263 ret2 = parse_events__sort_events_and_fix_groups(&parse_state.list);
2264 if (ret2 < 0)
2265 return ret;
2266
2267 if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus)
2268 pr_warning("WARNING: events were regrouped to match PMUs\n");
2269
2270 /*
2271 * Add list to the evlist even with errors to allow callers to clean up.
2272 */
2273 evlist__splice_list_tail(evlist, &parse_state.list);
2274
2275 if (!ret) {
2276 struct evsel *last;
2277
2278 last = evlist__last(evlist);
2279 last->cmdline_group_boundary = true;
2280
2281 return 0;
2282 }
2283
2284 /*
2285 * There are 2 users - builtin-record and builtin-test objects.
2286 * Both call evlist__delete in case of error, so we dont
2287 * need to bother.
2288 */
2289 return ret;
2290}
2291
2292int parse_event(struct evlist *evlist, const char *str)
2293{
2294 struct parse_events_error err;
2295 int ret;
2296
2297 parse_events_error__init(&err);
2298 ret = parse_events(evlist, str, &err);
2299 parse_events_error__exit(&err);
2300 return ret;
2301}
2302
2303void parse_events_error__init(struct parse_events_error *err)
2304{
2305 bzero(err, sizeof(*err));
2306}
2307
2308void parse_events_error__exit(struct parse_events_error *err)
2309{
2310 zfree(&err->str);
2311 zfree(&err->help);
2312 zfree(&err->first_str);
2313 zfree(&err->first_help);
2314}
2315
2316void parse_events_error__handle(struct parse_events_error *err, int idx,
2317 char *str, char *help)
2318{
2319 if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n"))
2320 goto out_free;
2321 switch (err->num_errors) {
2322 case 0:
2323 err->idx = idx;
2324 err->str = str;
2325 err->help = help;
2326 break;
2327 case 1:
2328 err->first_idx = err->idx;
2329 err->idx = idx;
2330 err->first_str = err->str;
2331 err->str = str;
2332 err->first_help = err->help;
2333 err->help = help;
2334 break;
2335 default:
2336 pr_debug("Multiple errors dropping message: %s (%s)\n",
2337 err->str, err->help);
2338 free(err->str);
2339 err->str = str;
2340 free(err->help);
2341 err->help = help;
2342 break;
2343 }
2344 err->num_errors++;
2345 return;
2346
2347out_free:
2348 free(str);
2349 free(help);
2350}
2351
2352#define MAX_WIDTH 1000
2353static int get_term_width(void)
2354{
2355 struct winsize ws;
2356
2357 get_term_dimensions(&ws);
2358 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col;
2359}
2360
2361static void __parse_events_error__print(int err_idx, const char *err_str,
2362 const char *err_help, const char *event)
2363{
2364 const char *str = "invalid or unsupported event: ";
2365 char _buf[MAX_WIDTH];
2366 char *buf = (char *) event;
2367 int idx = 0;
2368 if (err_str) {
2369 /* -2 for extra '' in the final fprintf */
2370 int width = get_term_width() - 2;
2371 int len_event = strlen(event);
2372 int len_str, max_len, cut = 0;
2373
2374 /*
2375 * Maximum error index indent, we will cut
2376 * the event string if it's bigger.
2377 */
2378 int max_err_idx = 13;
2379
2380 /*
2381 * Let's be specific with the message when
2382 * we have the precise error.
2383 */
2384 str = "event syntax error: ";
2385 len_str = strlen(str);
2386 max_len = width - len_str;
2387
2388 buf = _buf;
2389
2390 /* We're cutting from the beginning. */
2391 if (err_idx > max_err_idx)
2392 cut = err_idx - max_err_idx;
2393
2394 strncpy(buf, event + cut, max_len);
2395
2396 /* Mark cut parts with '..' on both sides. */
2397 if (cut)
2398 buf[0] = buf[1] = '.';
2399
2400 if ((len_event - cut) > max_len) {
2401 buf[max_len - 1] = buf[max_len - 2] = '.';
2402 buf[max_len] = 0;
2403 }
2404
2405 idx = len_str + err_idx - cut;
2406 }
2407
2408 fprintf(stderr, "%s'%s'\n", str, buf);
2409 if (idx) {
2410 fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err_str);
2411 if (err_help)
2412 fprintf(stderr, "\n%s\n", err_help);
2413 }
2414}
2415
2416void parse_events_error__print(struct parse_events_error *err,
2417 const char *event)
2418{
2419 if (!err->num_errors)
2420 return;
2421
2422 __parse_events_error__print(err->idx, err->str, err->help, event);
2423
2424 if (err->num_errors > 1) {
2425 fputs("\nInitial error:\n", stderr);
2426 __parse_events_error__print(err->first_idx, err->first_str,
2427 err->first_help, event);
2428 }
2429}
2430
2431#undef MAX_WIDTH
2432
2433int parse_events_option(const struct option *opt, const char *str,
2434 int unset __maybe_unused)
2435{
2436 struct parse_events_option_args *args = opt->value;
2437 struct parse_events_error err;
2438 int ret;
2439
2440 parse_events_error__init(&err);
2441 ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err,
2442 /*fake_pmu=*/NULL, /*warn_if_reordered=*/true);
2443
2444 if (ret) {
2445 parse_events_error__print(&err, str);
2446 fprintf(stderr, "Run 'perf list' for a list of valid events\n");
2447 }
2448 parse_events_error__exit(&err);
2449
2450 return ret;
2451}
2452
2453int parse_events_option_new_evlist(const struct option *opt, const char *str, int unset)
2454{
2455 struct parse_events_option_args *args = opt->value;
2456 int ret;
2457
2458 if (*args->evlistp == NULL) {
2459 *args->evlistp = evlist__new();
2460
2461 if (*args->evlistp == NULL) {
2462 fprintf(stderr, "Not enough memory to create evlist\n");
2463 return -1;
2464 }
2465 }
2466 ret = parse_events_option(opt, str, unset);
2467 if (ret) {
2468 evlist__delete(*args->evlistp);
2469 *args->evlistp = NULL;
2470 }
2471
2472 return ret;
2473}
2474
2475static int
2476foreach_evsel_in_last_glob(struct evlist *evlist,
2477 int (*func)(struct evsel *evsel,
2478 const void *arg),
2479 const void *arg)
2480{
2481 struct evsel *last = NULL;
2482 int err;
2483
2484 /*
2485 * Don't return when list_empty, give func a chance to report
2486 * error when it found last == NULL.
2487 *
2488 * So no need to WARN here, let *func do this.
2489 */
2490 if (evlist->core.nr_entries > 0)
2491 last = evlist__last(evlist);
2492
2493 do {
2494 err = (*func)(last, arg);
2495 if (err)
2496 return -1;
2497 if (!last)
2498 return 0;
2499
2500 if (last->core.node.prev == &evlist->core.entries)
2501 return 0;
2502 last = list_entry(last->core.node.prev, struct evsel, core.node);
2503 } while (!last->cmdline_group_boundary);
2504
2505 return 0;
2506}
2507
2508static int set_filter(struct evsel *evsel, const void *arg)
2509{
2510 const char *str = arg;
2511 bool found = false;
2512 int nr_addr_filters = 0;
2513 struct perf_pmu *pmu = NULL;
2514
2515 if (evsel == NULL) {
2516 fprintf(stderr,
2517 "--filter option should follow a -e tracepoint or HW tracer option\n");
2518 return -1;
2519 }
2520
2521 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
2522 if (evsel__append_tp_filter(evsel, str) < 0) {
2523 fprintf(stderr,
2524 "not enough memory to hold filter string\n");
2525 return -1;
2526 }
2527
2528 return 0;
2529 }
2530
2531 while ((pmu = perf_pmus__scan(pmu)) != NULL)
2532 if (pmu->type == evsel->core.attr.type) {
2533 found = true;
2534 break;
2535 }
2536
2537 if (found)
2538 perf_pmu__scan_file(pmu, "nr_addr_filters",
2539 "%d", &nr_addr_filters);
2540
2541 if (!nr_addr_filters)
2542 return perf_bpf_filter__parse(&evsel->bpf_filters, str);
2543
2544 if (evsel__append_addr_filter(evsel, str) < 0) {
2545 fprintf(stderr,
2546 "not enough memory to hold filter string\n");
2547 return -1;
2548 }
2549
2550 return 0;
2551}
2552
2553int parse_filter(const struct option *opt, const char *str,
2554 int unset __maybe_unused)
2555{
2556 struct evlist *evlist = *(struct evlist **)opt->value;
2557
2558 return foreach_evsel_in_last_glob(evlist, set_filter,
2559 (const void *)str);
2560}
2561
2562static int add_exclude_perf_filter(struct evsel *evsel,
2563 const void *arg __maybe_unused)
2564{
2565 char new_filter[64];
2566
2567 if (evsel == NULL || evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2568 fprintf(stderr,
2569 "--exclude-perf option should follow a -e tracepoint option\n");
2570 return -1;
2571 }
2572
2573 snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid());
2574
2575 if (evsel__append_tp_filter(evsel, new_filter) < 0) {
2576 fprintf(stderr,
2577 "not enough memory to hold filter string\n");
2578 return -1;
2579 }
2580
2581 return 0;
2582}
2583
2584int exclude_perf(const struct option *opt,
2585 const char *arg __maybe_unused,
2586 int unset __maybe_unused)
2587{
2588 struct evlist *evlist = *(struct evlist **)opt->value;
2589
2590 return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter,
2591 NULL);
2592}
2593
2594int parse_events__is_hardcoded_term(struct parse_events_term *term)
2595{
2596 return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
2597}
2598
2599static int new_term(struct parse_events_term **_term,
2600 struct parse_events_term *temp,
2601 char *str, u64 num)
2602{
2603 struct parse_events_term *term;
2604
2605 term = malloc(sizeof(*term));
2606 if (!term)
2607 return -ENOMEM;
2608
2609 *term = *temp;
2610 INIT_LIST_HEAD(&term->list);
2611 term->weak = false;
2612
2613 switch (term->type_val) {
2614 case PARSE_EVENTS__TERM_TYPE_NUM:
2615 term->val.num = num;
2616 break;
2617 case PARSE_EVENTS__TERM_TYPE_STR:
2618 term->val.str = str;
2619 break;
2620 default:
2621 free(term);
2622 return -EINVAL;
2623 }
2624
2625 *_term = term;
2626 return 0;
2627}
2628
2629int parse_events_term__num(struct parse_events_term **term,
2630 int type_term, char *config, u64 num,
2631 bool no_value,
2632 void *loc_term_, void *loc_val_)
2633{
2634 YYLTYPE *loc_term = loc_term_;
2635 YYLTYPE *loc_val = loc_val_;
2636
2637 struct parse_events_term temp = {
2638 .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
2639 .type_term = type_term,
2640 .config = config ? : strdup(config_term_names[type_term]),
2641 .no_value = no_value,
2642 .err_term = loc_term ? loc_term->first_column : 0,
2643 .err_val = loc_val ? loc_val->first_column : 0,
2644 };
2645
2646 return new_term(term, &temp, NULL, num);
2647}
2648
2649int parse_events_term__str(struct parse_events_term **term,
2650 int type_term, char *config, char *str,
2651 void *loc_term_, void *loc_val_)
2652{
2653 YYLTYPE *loc_term = loc_term_;
2654 YYLTYPE *loc_val = loc_val_;
2655
2656 struct parse_events_term temp = {
2657 .type_val = PARSE_EVENTS__TERM_TYPE_STR,
2658 .type_term = type_term,
2659 .config = config,
2660 .err_term = loc_term ? loc_term->first_column : 0,
2661 .err_val = loc_val ? loc_val->first_column : 0,
2662 };
2663
2664 return new_term(term, &temp, str, 0);
2665}
2666
2667int parse_events_term__term(struct parse_events_term **term,
2668 int term_lhs, int term_rhs,
2669 void *loc_term, void *loc_val)
2670{
2671 return parse_events_term__str(term, term_lhs, NULL,
2672 strdup(config_term_names[term_rhs]),
2673 loc_term, loc_val);
2674}
2675
2676int parse_events_term__clone(struct parse_events_term **new,
2677 struct parse_events_term *term)
2678{
2679 char *str;
2680 struct parse_events_term temp = {
2681 .type_val = term->type_val,
2682 .type_term = term->type_term,
2683 .config = NULL,
2684 .err_term = term->err_term,
2685 .err_val = term->err_val,
2686 };
2687
2688 if (term->config) {
2689 temp.config = strdup(term->config);
2690 if (!temp.config)
2691 return -ENOMEM;
2692 }
2693 if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
2694 return new_term(new, &temp, NULL, term->val.num);
2695
2696 str = strdup(term->val.str);
2697 if (!str)
2698 return -ENOMEM;
2699 return new_term(new, &temp, str, 0);
2700}
2701
2702void parse_events_term__delete(struct parse_events_term *term)
2703{
2704 if (term->array.nr_ranges)
2705 zfree(&term->array.ranges);
2706
2707 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
2708 zfree(&term->val.str);
2709
2710 zfree(&term->config);
2711 free(term);
2712}
2713
2714int parse_events_copy_term_list(struct list_head *old,
2715 struct list_head **new)
2716{
2717 struct parse_events_term *term, *n;
2718 int ret;
2719
2720 if (!old) {
2721 *new = NULL;
2722 return 0;
2723 }
2724
2725 *new = malloc(sizeof(struct list_head));
2726 if (!*new)
2727 return -ENOMEM;
2728 INIT_LIST_HEAD(*new);
2729
2730 list_for_each_entry (term, old, list) {
2731 ret = parse_events_term__clone(&n, term);
2732 if (ret)
2733 return ret;
2734 list_add_tail(&n->list, *new);
2735 }
2736 return 0;
2737}
2738
2739void parse_events_terms__purge(struct list_head *terms)
2740{
2741 struct parse_events_term *term, *h;
2742
2743 list_for_each_entry_safe(term, h, terms, list) {
2744 list_del_init(&term->list);
2745 parse_events_term__delete(term);
2746 }
2747}
2748
2749void parse_events_terms__delete(struct list_head *terms)
2750{
2751 if (!terms)
2752 return;
2753 parse_events_terms__purge(terms);
2754 free(terms);
2755}
2756
2757void parse_events__clear_array(struct parse_events_array *a)
2758{
2759 zfree(&a->ranges);
2760}
2761
2762void parse_events_evlist_error(struct parse_events_state *parse_state,
2763 int idx, const char *str)
2764{
2765 if (!parse_state->error)
2766 return;
2767
2768 parse_events_error__handle(parse_state->error, idx, strdup(str), NULL);
2769}
2770
2771static void config_terms_list(char *buf, size_t buf_sz)
2772{
2773 int i;
2774 bool first = true;
2775
2776 buf[0] = '\0';
2777 for (i = 0; i < __PARSE_EVENTS__TERM_TYPE_NR; i++) {
2778 const char *name = config_term_names[i];
2779
2780 if (!config_term_avail(i, NULL))
2781 continue;
2782 if (!name)
2783 continue;
2784 if (name[0] == '<')
2785 continue;
2786
2787 if (strlen(buf) + strlen(name) + 2 >= buf_sz)
2788 return;
2789
2790 if (!first)
2791 strcat(buf, ",");
2792 else
2793 first = false;
2794 strcat(buf, name);
2795 }
2796}
2797
2798/*
2799 * Return string contains valid config terms of an event.
2800 * @additional_terms: For terms such as PMU sysfs terms.
2801 */
2802char *parse_events_formats_error_string(char *additional_terms)
2803{
2804 char *str;
2805 /* "no-overwrite" is the longest name */
2806 char static_terms[__PARSE_EVENTS__TERM_TYPE_NR *
2807 (sizeof("no-overwrite") - 1)];
2808
2809 config_terms_list(static_terms, sizeof(static_terms));
2810 /* valid terms */
2811 if (additional_terms) {
2812 if (asprintf(&str, "valid terms: %s,%s",
2813 additional_terms, static_terms) < 0)
2814 goto fail;
2815 } else {
2816 if (asprintf(&str, "valid terms: %s", static_terms) < 0)
2817 goto fail;
2818 }
2819 return str;
2820
2821fail:
2822 return NULL;
2823}