Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 */
6
7#include <api/fs/fs.h>
8#include <linux/bits.h>
9#include <linux/bitops.h>
10#include <linux/compiler.h>
11#include <linux/coresight-pmu.h>
12#include <linux/kernel.h>
13#include <linux/log2.h>
14#include <linux/types.h>
15
16#include "cs-etm.h"
17#include "../../perf.h"
18#include "../../util/auxtrace.h"
19#include "../../util/cpumap.h"
20#include "../../util/evlist.h"
21#include "../../util/evsel.h"
22#include "../../util/pmu.h"
23#include "../../util/thread_map.h"
24#include "../../util/cs-etm.h"
25
26#include <errno.h>
27#include <stdlib.h>
28#include <sys/stat.h>
29
30struct cs_etm_recording {
31 struct auxtrace_record itr;
32 struct perf_pmu *cs_etm_pmu;
33 struct perf_evlist *evlist;
34 bool snapshot_mode;
35 size_t snapshot_size;
36};
37
38static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu);
39
40static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr,
41 struct record_opts *opts,
42 const char *str)
43{
44 struct cs_etm_recording *ptr =
45 container_of(itr, struct cs_etm_recording, itr);
46 unsigned long long snapshot_size = 0;
47 char *endptr;
48
49 if (str) {
50 snapshot_size = strtoull(str, &endptr, 0);
51 if (*endptr || snapshot_size > SIZE_MAX)
52 return -1;
53 }
54
55 opts->auxtrace_snapshot_mode = true;
56 opts->auxtrace_snapshot_size = snapshot_size;
57 ptr->snapshot_size = snapshot_size;
58
59 return 0;
60}
61
62static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
63 struct perf_evsel *evsel)
64{
65 char msg[BUFSIZ], path[PATH_MAX], *sink;
66 struct perf_evsel_config_term *term;
67 int ret = -EINVAL;
68 u32 hash;
69
70 if (evsel->attr.config2 & GENMASK(31, 0))
71 return 0;
72
73 list_for_each_entry(term, &evsel->config_terms, list) {
74 if (term->type != PERF_EVSEL__CONFIG_TERM_DRV_CFG)
75 continue;
76
77 sink = term->val.drv_cfg;
78 snprintf(path, PATH_MAX, "sinks/%s", sink);
79
80 ret = perf_pmu__scan_file(pmu, path, "%x", &hash);
81 if (ret != 1) {
82 pr_err("failed to set sink \"%s\" on event %s with %d (%s)\n",
83 sink, perf_evsel__name(evsel), errno,
84 str_error_r(errno, msg, sizeof(msg)));
85 return ret;
86 }
87
88 evsel->attr.config2 |= hash;
89 return 0;
90 }
91
92 /*
93 * No sink was provided on the command line - for _now_ treat
94 * this as an error.
95 */
96 return ret;
97}
98
99static int cs_etm_recording_options(struct auxtrace_record *itr,
100 struct perf_evlist *evlist,
101 struct record_opts *opts)
102{
103 int ret;
104 struct cs_etm_recording *ptr =
105 container_of(itr, struct cs_etm_recording, itr);
106 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
107 struct perf_evsel *evsel, *cs_etm_evsel = NULL;
108 const struct cpu_map *cpus = evlist->cpus;
109 bool privileged = (geteuid() == 0 || perf_event_paranoid() < 0);
110
111 ptr->evlist = evlist;
112 ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
113
114 evlist__for_each_entry(evlist, evsel) {
115 if (evsel->attr.type == cs_etm_pmu->type) {
116 if (cs_etm_evsel) {
117 pr_err("There may be only one %s event\n",
118 CORESIGHT_ETM_PMU_NAME);
119 return -EINVAL;
120 }
121 evsel->attr.freq = 0;
122 evsel->attr.sample_period = 1;
123 cs_etm_evsel = evsel;
124 opts->full_auxtrace = true;
125 }
126 }
127
128 /* no need to continue if at least one event of interest was found */
129 if (!cs_etm_evsel)
130 return 0;
131
132 ret = cs_etm_set_sink_attr(cs_etm_pmu, cs_etm_evsel);
133 if (ret)
134 return ret;
135
136 if (opts->use_clockid) {
137 pr_err("Cannot use clockid (-k option) with %s\n",
138 CORESIGHT_ETM_PMU_NAME);
139 return -EINVAL;
140 }
141
142 /* we are in snapshot mode */
143 if (opts->auxtrace_snapshot_mode) {
144 /*
145 * No size were given to '-S' or '-m,', so go with
146 * the default
147 */
148 if (!opts->auxtrace_snapshot_size &&
149 !opts->auxtrace_mmap_pages) {
150 if (privileged) {
151 opts->auxtrace_mmap_pages = MiB(4) / page_size;
152 } else {
153 opts->auxtrace_mmap_pages =
154 KiB(128) / page_size;
155 if (opts->mmap_pages == UINT_MAX)
156 opts->mmap_pages = KiB(256) / page_size;
157 }
158 } else if (!opts->auxtrace_mmap_pages && !privileged &&
159 opts->mmap_pages == UINT_MAX) {
160 opts->mmap_pages = KiB(256) / page_size;
161 }
162
163 /*
164 * '-m,xyz' was specified but no snapshot size, so make the
165 * snapshot size as big as the auxtrace mmap area.
166 */
167 if (!opts->auxtrace_snapshot_size) {
168 opts->auxtrace_snapshot_size =
169 opts->auxtrace_mmap_pages * (size_t)page_size;
170 }
171
172 /*
173 * -Sxyz was specified but no auxtrace mmap area, so make the
174 * auxtrace mmap area big enough to fit the requested snapshot
175 * size.
176 */
177 if (!opts->auxtrace_mmap_pages) {
178 size_t sz = opts->auxtrace_snapshot_size;
179
180 sz = round_up(sz, page_size) / page_size;
181 opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
182 }
183
184 /* Snapshost size can't be bigger than the auxtrace area */
185 if (opts->auxtrace_snapshot_size >
186 opts->auxtrace_mmap_pages * (size_t)page_size) {
187 pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
188 opts->auxtrace_snapshot_size,
189 opts->auxtrace_mmap_pages * (size_t)page_size);
190 return -EINVAL;
191 }
192
193 /* Something went wrong somewhere - this shouldn't happen */
194 if (!opts->auxtrace_snapshot_size ||
195 !opts->auxtrace_mmap_pages) {
196 pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
197 return -EINVAL;
198 }
199 }
200
201 /* We are in full trace mode but '-m,xyz' wasn't specified */
202 if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
203 if (privileged) {
204 opts->auxtrace_mmap_pages = MiB(4) / page_size;
205 } else {
206 opts->auxtrace_mmap_pages = KiB(128) / page_size;
207 if (opts->mmap_pages == UINT_MAX)
208 opts->mmap_pages = KiB(256) / page_size;
209 }
210
211 }
212
213 /* Validate auxtrace_mmap_pages provided by user */
214 if (opts->auxtrace_mmap_pages) {
215 unsigned int max_page = (KiB(128) / page_size);
216 size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
217
218 if (!privileged &&
219 opts->auxtrace_mmap_pages > max_page) {
220 opts->auxtrace_mmap_pages = max_page;
221 pr_err("auxtrace too big, truncating to %d\n",
222 max_page);
223 }
224
225 if (!is_power_of_2(sz)) {
226 pr_err("Invalid mmap size for %s: must be a power of 2\n",
227 CORESIGHT_ETM_PMU_NAME);
228 return -EINVAL;
229 }
230 }
231
232 if (opts->auxtrace_snapshot_mode)
233 pr_debug2("%s snapshot size: %zu\n", CORESIGHT_ETM_PMU_NAME,
234 opts->auxtrace_snapshot_size);
235
236 /*
237 * To obtain the auxtrace buffer file descriptor, the auxtrace
238 * event must come first.
239 */
240 perf_evlist__to_front(evlist, cs_etm_evsel);
241
242 /*
243 * In the case of per-cpu mmaps, we need the CPU on the
244 * AUX event.
245 */
246 if (!cpu_map__empty(cpus))
247 perf_evsel__set_sample_bit(cs_etm_evsel, CPU);
248
249 /* Add dummy event to keep tracking */
250 if (opts->full_auxtrace) {
251 struct perf_evsel *tracking_evsel;
252 int err;
253
254 err = parse_events(evlist, "dummy:u", NULL);
255 if (err)
256 return err;
257
258 tracking_evsel = perf_evlist__last(evlist);
259 perf_evlist__set_tracking_event(evlist, tracking_evsel);
260
261 tracking_evsel->attr.freq = 0;
262 tracking_evsel->attr.sample_period = 1;
263
264 /* In per-cpu case, always need the time of mmap events etc */
265 if (!cpu_map__empty(cpus))
266 perf_evsel__set_sample_bit(tracking_evsel, TIME);
267 }
268
269 return 0;
270}
271
272static u64 cs_etm_get_config(struct auxtrace_record *itr)
273{
274 u64 config = 0;
275 struct cs_etm_recording *ptr =
276 container_of(itr, struct cs_etm_recording, itr);
277 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
278 struct perf_evlist *evlist = ptr->evlist;
279 struct perf_evsel *evsel;
280
281 evlist__for_each_entry(evlist, evsel) {
282 if (evsel->attr.type == cs_etm_pmu->type) {
283 /*
284 * Variable perf_event_attr::config is assigned to
285 * ETMv3/PTM. The bit fields have been made to match
286 * the ETMv3.5 ETRMCR register specification. See the
287 * PMU_FORMAT_ATTR() declarations in
288 * drivers/hwtracing/coresight/coresight-perf.c for
289 * details.
290 */
291 config = evsel->attr.config;
292 break;
293 }
294 }
295
296 return config;
297}
298
299#ifndef BIT
300#define BIT(N) (1UL << (N))
301#endif
302
303static u64 cs_etmv4_get_config(struct auxtrace_record *itr)
304{
305 u64 config = 0;
306 u64 config_opts = 0;
307
308 /*
309 * The perf event variable config bits represent both
310 * the command line options and register programming
311 * bits in ETMv3/PTM. For ETMv4 we must remap options
312 * to real bits
313 */
314 config_opts = cs_etm_get_config(itr);
315 if (config_opts & BIT(ETM_OPT_CYCACC))
316 config |= BIT(ETM4_CFG_BIT_CYCACC);
317 if (config_opts & BIT(ETM_OPT_TS))
318 config |= BIT(ETM4_CFG_BIT_TS);
319 if (config_opts & BIT(ETM_OPT_RETSTK))
320 config |= BIT(ETM4_CFG_BIT_RETSTK);
321
322 return config;
323}
324
325static size_t
326cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
327 struct perf_evlist *evlist __maybe_unused)
328{
329 int i;
330 int etmv3 = 0, etmv4 = 0;
331 struct cpu_map *event_cpus = evlist->cpus;
332 struct cpu_map *online_cpus = cpu_map__new(NULL);
333
334 /* cpu map is not empty, we have specific CPUs to work with */
335 if (!cpu_map__empty(event_cpus)) {
336 for (i = 0; i < cpu__max_cpu(); i++) {
337 if (!cpu_map__has(event_cpus, i) ||
338 !cpu_map__has(online_cpus, i))
339 continue;
340
341 if (cs_etm_is_etmv4(itr, i))
342 etmv4++;
343 else
344 etmv3++;
345 }
346 } else {
347 /* get configuration for all CPUs in the system */
348 for (i = 0; i < cpu__max_cpu(); i++) {
349 if (!cpu_map__has(online_cpus, i))
350 continue;
351
352 if (cs_etm_is_etmv4(itr, i))
353 etmv4++;
354 else
355 etmv3++;
356 }
357 }
358
359 cpu_map__put(online_cpus);
360
361 return (CS_ETM_HEADER_SIZE +
362 (etmv4 * CS_ETMV4_PRIV_SIZE) +
363 (etmv3 * CS_ETMV3_PRIV_SIZE));
364}
365
366static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = {
367 [CS_ETM_ETMCCER] = "mgmt/etmccer",
368 [CS_ETM_ETMIDR] = "mgmt/etmidr",
369};
370
371static const char *metadata_etmv4_ro[CS_ETMV4_PRIV_MAX] = {
372 [CS_ETMV4_TRCIDR0] = "trcidr/trcidr0",
373 [CS_ETMV4_TRCIDR1] = "trcidr/trcidr1",
374 [CS_ETMV4_TRCIDR2] = "trcidr/trcidr2",
375 [CS_ETMV4_TRCIDR8] = "trcidr/trcidr8",
376 [CS_ETMV4_TRCAUTHSTATUS] = "mgmt/trcauthstatus",
377};
378
379static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu)
380{
381 bool ret = false;
382 char path[PATH_MAX];
383 int scan;
384 unsigned int val;
385 struct cs_etm_recording *ptr =
386 container_of(itr, struct cs_etm_recording, itr);
387 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
388
389 /* Take any of the RO files for ETMv4 and see if it present */
390 snprintf(path, PATH_MAX, "cpu%d/%s",
391 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
392 scan = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
393
394 /* The file was read successfully, we have a winner */
395 if (scan == 1)
396 ret = true;
397
398 return ret;
399}
400
401static int cs_etm_get_ro(struct perf_pmu *pmu, int cpu, const char *path)
402{
403 char pmu_path[PATH_MAX];
404 int scan;
405 unsigned int val = 0;
406
407 /* Get RO metadata from sysfs */
408 snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path);
409
410 scan = perf_pmu__scan_file(pmu, pmu_path, "%x", &val);
411 if (scan != 1)
412 pr_err("%s: error reading: %s\n", __func__, pmu_path);
413
414 return val;
415}
416
417static void cs_etm_get_metadata(int cpu, u32 *offset,
418 struct auxtrace_record *itr,
419 struct auxtrace_info_event *info)
420{
421 u32 increment;
422 u64 magic;
423 struct cs_etm_recording *ptr =
424 container_of(itr, struct cs_etm_recording, itr);
425 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
426
427 /* first see what kind of tracer this cpu is affined to */
428 if (cs_etm_is_etmv4(itr, cpu)) {
429 magic = __perf_cs_etmv4_magic;
430 /* Get trace configuration register */
431 info->priv[*offset + CS_ETMV4_TRCCONFIGR] =
432 cs_etmv4_get_config(itr);
433 /* Get traceID from the framework */
434 info->priv[*offset + CS_ETMV4_TRCTRACEIDR] =
435 coresight_get_trace_id(cpu);
436 /* Get read-only information from sysFS */
437 info->priv[*offset + CS_ETMV4_TRCIDR0] =
438 cs_etm_get_ro(cs_etm_pmu, cpu,
439 metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
440 info->priv[*offset + CS_ETMV4_TRCIDR1] =
441 cs_etm_get_ro(cs_etm_pmu, cpu,
442 metadata_etmv4_ro[CS_ETMV4_TRCIDR1]);
443 info->priv[*offset + CS_ETMV4_TRCIDR2] =
444 cs_etm_get_ro(cs_etm_pmu, cpu,
445 metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
446 info->priv[*offset + CS_ETMV4_TRCIDR8] =
447 cs_etm_get_ro(cs_etm_pmu, cpu,
448 metadata_etmv4_ro[CS_ETMV4_TRCIDR8]);
449 info->priv[*offset + CS_ETMV4_TRCAUTHSTATUS] =
450 cs_etm_get_ro(cs_etm_pmu, cpu,
451 metadata_etmv4_ro
452 [CS_ETMV4_TRCAUTHSTATUS]);
453
454 /* How much space was used */
455 increment = CS_ETMV4_PRIV_MAX;
456 } else {
457 magic = __perf_cs_etmv3_magic;
458 /* Get configuration register */
459 info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr);
460 /* Get traceID from the framework */
461 info->priv[*offset + CS_ETM_ETMTRACEIDR] =
462 coresight_get_trace_id(cpu);
463 /* Get read-only information from sysFS */
464 info->priv[*offset + CS_ETM_ETMCCER] =
465 cs_etm_get_ro(cs_etm_pmu, cpu,
466 metadata_etmv3_ro[CS_ETM_ETMCCER]);
467 info->priv[*offset + CS_ETM_ETMIDR] =
468 cs_etm_get_ro(cs_etm_pmu, cpu,
469 metadata_etmv3_ro[CS_ETM_ETMIDR]);
470
471 /* How much space was used */
472 increment = CS_ETM_PRIV_MAX;
473 }
474
475 /* Build generic header portion */
476 info->priv[*offset + CS_ETM_MAGIC] = magic;
477 info->priv[*offset + CS_ETM_CPU] = cpu;
478 /* Where the next CPU entry should start from */
479 *offset += increment;
480}
481
482static int cs_etm_info_fill(struct auxtrace_record *itr,
483 struct perf_session *session,
484 struct auxtrace_info_event *info,
485 size_t priv_size)
486{
487 int i;
488 u32 offset;
489 u64 nr_cpu, type;
490 struct cpu_map *cpu_map;
491 struct cpu_map *event_cpus = session->evlist->cpus;
492 struct cpu_map *online_cpus = cpu_map__new(NULL);
493 struct cs_etm_recording *ptr =
494 container_of(itr, struct cs_etm_recording, itr);
495 struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
496
497 if (priv_size != cs_etm_info_priv_size(itr, session->evlist))
498 return -EINVAL;
499
500 if (!session->evlist->nr_mmaps)
501 return -EINVAL;
502
503 /* If the cpu_map is empty all online CPUs are involved */
504 if (cpu_map__empty(event_cpus)) {
505 cpu_map = online_cpus;
506 } else {
507 /* Make sure all specified CPUs are online */
508 for (i = 0; i < cpu_map__nr(event_cpus); i++) {
509 if (cpu_map__has(event_cpus, i) &&
510 !cpu_map__has(online_cpus, i))
511 return -EINVAL;
512 }
513
514 cpu_map = event_cpus;
515 }
516
517 nr_cpu = cpu_map__nr(cpu_map);
518 /* Get PMU type as dynamically assigned by the core */
519 type = cs_etm_pmu->type;
520
521 /* First fill out the session header */
522 info->type = PERF_AUXTRACE_CS_ETM;
523 info->priv[CS_HEADER_VERSION_0] = 0;
524 info->priv[CS_PMU_TYPE_CPUS] = type << 32;
525 info->priv[CS_PMU_TYPE_CPUS] |= nr_cpu;
526 info->priv[CS_ETM_SNAPSHOT] = ptr->snapshot_mode;
527
528 offset = CS_ETM_SNAPSHOT + 1;
529
530 for (i = 0; i < cpu__max_cpu() && offset < priv_size; i++)
531 if (cpu_map__has(cpu_map, i))
532 cs_etm_get_metadata(i, &offset, itr, info);
533
534 cpu_map__put(online_cpus);
535
536 return 0;
537}
538
539static int cs_etm_find_snapshot(struct auxtrace_record *itr __maybe_unused,
540 int idx, struct auxtrace_mmap *mm,
541 unsigned char *data __maybe_unused,
542 u64 *head, u64 *old)
543{
544 pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
545 __func__, idx, (size_t)*old, (size_t)*head, mm->len);
546
547 *old = *head;
548 *head += mm->len;
549
550 return 0;
551}
552
553static int cs_etm_snapshot_start(struct auxtrace_record *itr)
554{
555 struct cs_etm_recording *ptr =
556 container_of(itr, struct cs_etm_recording, itr);
557 struct perf_evsel *evsel;
558
559 evlist__for_each_entry(ptr->evlist, evsel) {
560 if (evsel->attr.type == ptr->cs_etm_pmu->type)
561 return perf_evsel__disable(evsel);
562 }
563 return -EINVAL;
564}
565
566static int cs_etm_snapshot_finish(struct auxtrace_record *itr)
567{
568 struct cs_etm_recording *ptr =
569 container_of(itr, struct cs_etm_recording, itr);
570 struct perf_evsel *evsel;
571
572 evlist__for_each_entry(ptr->evlist, evsel) {
573 if (evsel->attr.type == ptr->cs_etm_pmu->type)
574 return perf_evsel__enable(evsel);
575 }
576 return -EINVAL;
577}
578
579static u64 cs_etm_reference(struct auxtrace_record *itr __maybe_unused)
580{
581 return (((u64) rand() << 0) & 0x00000000FFFFFFFFull) |
582 (((u64) rand() << 32) & 0xFFFFFFFF00000000ull);
583}
584
585static void cs_etm_recording_free(struct auxtrace_record *itr)
586{
587 struct cs_etm_recording *ptr =
588 container_of(itr, struct cs_etm_recording, itr);
589 free(ptr);
590}
591
592static int cs_etm_read_finish(struct auxtrace_record *itr, int idx)
593{
594 struct cs_etm_recording *ptr =
595 container_of(itr, struct cs_etm_recording, itr);
596 struct perf_evsel *evsel;
597
598 evlist__for_each_entry(ptr->evlist, evsel) {
599 if (evsel->attr.type == ptr->cs_etm_pmu->type)
600 return perf_evlist__enable_event_idx(ptr->evlist,
601 evsel, idx);
602 }
603
604 return -EINVAL;
605}
606
607struct auxtrace_record *cs_etm_record_init(int *err)
608{
609 struct perf_pmu *cs_etm_pmu;
610 struct cs_etm_recording *ptr;
611
612 cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME);
613
614 if (!cs_etm_pmu) {
615 *err = -EINVAL;
616 goto out;
617 }
618
619 ptr = zalloc(sizeof(struct cs_etm_recording));
620 if (!ptr) {
621 *err = -ENOMEM;
622 goto out;
623 }
624
625 ptr->cs_etm_pmu = cs_etm_pmu;
626 ptr->itr.parse_snapshot_options = cs_etm_parse_snapshot_options;
627 ptr->itr.recording_options = cs_etm_recording_options;
628 ptr->itr.info_priv_size = cs_etm_info_priv_size;
629 ptr->itr.info_fill = cs_etm_info_fill;
630 ptr->itr.find_snapshot = cs_etm_find_snapshot;
631 ptr->itr.snapshot_start = cs_etm_snapshot_start;
632 ptr->itr.snapshot_finish = cs_etm_snapshot_finish;
633 ptr->itr.reference = cs_etm_reference;
634 ptr->itr.free = cs_etm_recording_free;
635 ptr->itr.read_finish = cs_etm_read_finish;
636
637 *err = 0;
638 return &ptr->itr;
639out:
640 return NULL;
641}