Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <inttypes.h>
4#include "string2.h"
5#include <sys/param.h>
6#include <sys/types.h>
7#include <byteswap.h>
8#include <unistd.h>
9#include <stdio.h>
10#include <stdlib.h>
11#include <linux/compiler.h>
12#include <linux/list.h>
13#include <linux/kernel.h>
14#include <linux/bitops.h>
15#include <linux/string.h>
16#include <linux/stringify.h>
17#include <linux/zalloc.h>
18#include <sys/stat.h>
19#include <sys/utsname.h>
20#include <linux/time64.h>
21#include <dirent.h>
22#ifdef HAVE_LIBBPF_SUPPORT
23#include <bpf/libbpf.h>
24#endif
25#include <perf/cpumap.h>
26
27#include "dso.h"
28#include "evlist.h"
29#include "evsel.h"
30#include "util/evsel_fprintf.h"
31#include "header.h"
32#include "memswap.h"
33#include "trace-event.h"
34#include "session.h"
35#include "symbol.h"
36#include "debug.h"
37#include "cpumap.h"
38#include "pmu.h"
39#include "vdso.h"
40#include "strbuf.h"
41#include "build-id.h"
42#include "data.h"
43#include <api/fs/fs.h>
44#include "asm/bug.h"
45#include "tool.h"
46#include "time-utils.h"
47#include "units.h"
48#include "util/util.h" // perf_exe()
49#include "cputopo.h"
50#include "bpf-event.h"
51#include "clockid.h"
52
53#include <linux/ctype.h>
54#include <internal/lib.h>
55
56/*
57 * magic2 = "PERFILE2"
58 * must be a numerical value to let the endianness
59 * determine the memory layout. That way we are able
60 * to detect endianness when reading the perf.data file
61 * back.
62 *
63 * we check for legacy (PERFFILE) format.
64 */
65static const char *__perf_magic1 = "PERFFILE";
66static const u64 __perf_magic2 = 0x32454c4946524550ULL;
67static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
68
69#define PERF_MAGIC __perf_magic2
70
71const char perf_version_string[] = PERF_VERSION;
72
73struct perf_file_attr {
74 struct perf_event_attr attr;
75 struct perf_file_section ids;
76};
77
78void perf_header__set_feat(struct perf_header *header, int feat)
79{
80 set_bit(feat, header->adds_features);
81}
82
83void perf_header__clear_feat(struct perf_header *header, int feat)
84{
85 clear_bit(feat, header->adds_features);
86}
87
88bool perf_header__has_feat(const struct perf_header *header, int feat)
89{
90 return test_bit(feat, header->adds_features);
91}
92
93static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
94{
95 ssize_t ret = writen(ff->fd, buf, size);
96
97 if (ret != (ssize_t)size)
98 return ret < 0 ? (int)ret : -1;
99 return 0;
100}
101
102static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size)
103{
104 /* struct perf_event_header::size is u16 */
105 const size_t max_size = 0xffff - sizeof(struct perf_event_header);
106 size_t new_size = ff->size;
107 void *addr;
108
109 if (size + ff->offset > max_size)
110 return -E2BIG;
111
112 while (size > (new_size - ff->offset))
113 new_size <<= 1;
114 new_size = min(max_size, new_size);
115
116 if (ff->size < new_size) {
117 addr = realloc(ff->buf, new_size);
118 if (!addr)
119 return -ENOMEM;
120 ff->buf = addr;
121 ff->size = new_size;
122 }
123
124 memcpy(ff->buf + ff->offset, buf, size);
125 ff->offset += size;
126
127 return 0;
128}
129
130/* Return: 0 if succeeded, -ERR if failed. */
131int do_write(struct feat_fd *ff, const void *buf, size_t size)
132{
133 if (!ff->buf)
134 return __do_write_fd(ff, buf, size);
135 return __do_write_buf(ff, buf, size);
136}
137
138/* Return: 0 if succeeded, -ERR if failed. */
139static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
140{
141 u64 *p = (u64 *) set;
142 int i, ret;
143
144 ret = do_write(ff, &size, sizeof(size));
145 if (ret < 0)
146 return ret;
147
148 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
149 ret = do_write(ff, p + i, sizeof(*p));
150 if (ret < 0)
151 return ret;
152 }
153
154 return 0;
155}
156
157/* Return: 0 if succeeded, -ERR if failed. */
158int write_padded(struct feat_fd *ff, const void *bf,
159 size_t count, size_t count_aligned)
160{
161 static const char zero_buf[NAME_ALIGN];
162 int err = do_write(ff, bf, count);
163
164 if (!err)
165 err = do_write(ff, zero_buf, count_aligned - count);
166
167 return err;
168}
169
170#define string_size(str) \
171 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
172
173/* Return: 0 if succeeded, -ERR if failed. */
174static int do_write_string(struct feat_fd *ff, const char *str)
175{
176 u32 len, olen;
177 int ret;
178
179 olen = strlen(str) + 1;
180 len = PERF_ALIGN(olen, NAME_ALIGN);
181
182 /* write len, incl. \0 */
183 ret = do_write(ff, &len, sizeof(len));
184 if (ret < 0)
185 return ret;
186
187 return write_padded(ff, str, olen, len);
188}
189
190static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
191{
192 ssize_t ret = readn(ff->fd, addr, size);
193
194 if (ret != size)
195 return ret < 0 ? (int)ret : -1;
196 return 0;
197}
198
199static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
200{
201 if (size > (ssize_t)ff->size - ff->offset)
202 return -1;
203
204 memcpy(addr, ff->buf + ff->offset, size);
205 ff->offset += size;
206
207 return 0;
208
209}
210
211static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
212{
213 if (!ff->buf)
214 return __do_read_fd(ff, addr, size);
215 return __do_read_buf(ff, addr, size);
216}
217
218static int do_read_u32(struct feat_fd *ff, u32 *addr)
219{
220 int ret;
221
222 ret = __do_read(ff, addr, sizeof(*addr));
223 if (ret)
224 return ret;
225
226 if (ff->ph->needs_swap)
227 *addr = bswap_32(*addr);
228 return 0;
229}
230
231static int do_read_u64(struct feat_fd *ff, u64 *addr)
232{
233 int ret;
234
235 ret = __do_read(ff, addr, sizeof(*addr));
236 if (ret)
237 return ret;
238
239 if (ff->ph->needs_swap)
240 *addr = bswap_64(*addr);
241 return 0;
242}
243
244static char *do_read_string(struct feat_fd *ff)
245{
246 u32 len;
247 char *buf;
248
249 if (do_read_u32(ff, &len))
250 return NULL;
251
252 buf = malloc(len);
253 if (!buf)
254 return NULL;
255
256 if (!__do_read(ff, buf, len)) {
257 /*
258 * strings are padded by zeroes
259 * thus the actual strlen of buf
260 * may be less than len
261 */
262 return buf;
263 }
264
265 free(buf);
266 return NULL;
267}
268
269/* Return: 0 if succeeded, -ERR if failed. */
270static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
271{
272 unsigned long *set;
273 u64 size, *p;
274 int i, ret;
275
276 ret = do_read_u64(ff, &size);
277 if (ret)
278 return ret;
279
280 set = bitmap_alloc(size);
281 if (!set)
282 return -ENOMEM;
283
284 p = (u64 *) set;
285
286 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
287 ret = do_read_u64(ff, p + i);
288 if (ret < 0) {
289 free(set);
290 return ret;
291 }
292 }
293
294 *pset = set;
295 *psize = size;
296 return 0;
297}
298
299static int write_tracing_data(struct feat_fd *ff,
300 struct evlist *evlist)
301{
302 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
303 return -1;
304
305 return read_tracing_data(ff->fd, &evlist->core.entries);
306}
307
308static int write_build_id(struct feat_fd *ff,
309 struct evlist *evlist __maybe_unused)
310{
311 struct perf_session *session;
312 int err;
313
314 session = container_of(ff->ph, struct perf_session, header);
315
316 if (!perf_session__read_build_ids(session, true))
317 return -1;
318
319 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
320 return -1;
321
322 err = perf_session__write_buildid_table(session, ff);
323 if (err < 0) {
324 pr_debug("failed to write buildid table\n");
325 return err;
326 }
327 perf_session__cache_build_ids(session);
328
329 return 0;
330}
331
332static int write_hostname(struct feat_fd *ff,
333 struct evlist *evlist __maybe_unused)
334{
335 struct utsname uts;
336 int ret;
337
338 ret = uname(&uts);
339 if (ret < 0)
340 return -1;
341
342 return do_write_string(ff, uts.nodename);
343}
344
345static int write_osrelease(struct feat_fd *ff,
346 struct evlist *evlist __maybe_unused)
347{
348 struct utsname uts;
349 int ret;
350
351 ret = uname(&uts);
352 if (ret < 0)
353 return -1;
354
355 return do_write_string(ff, uts.release);
356}
357
358static int write_arch(struct feat_fd *ff,
359 struct evlist *evlist __maybe_unused)
360{
361 struct utsname uts;
362 int ret;
363
364 ret = uname(&uts);
365 if (ret < 0)
366 return -1;
367
368 return do_write_string(ff, uts.machine);
369}
370
371static int write_version(struct feat_fd *ff,
372 struct evlist *evlist __maybe_unused)
373{
374 return do_write_string(ff, perf_version_string);
375}
376
377static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
378{
379 FILE *file;
380 char *buf = NULL;
381 char *s, *p;
382 const char *search = cpuinfo_proc;
383 size_t len = 0;
384 int ret = -1;
385
386 if (!search)
387 return -1;
388
389 file = fopen("/proc/cpuinfo", "r");
390 if (!file)
391 return -1;
392
393 while (getline(&buf, &len, file) > 0) {
394 ret = strncmp(buf, search, strlen(search));
395 if (!ret)
396 break;
397 }
398
399 if (ret) {
400 ret = -1;
401 goto done;
402 }
403
404 s = buf;
405
406 p = strchr(buf, ':');
407 if (p && *(p+1) == ' ' && *(p+2))
408 s = p + 2;
409 p = strchr(s, '\n');
410 if (p)
411 *p = '\0';
412
413 /* squash extra space characters (branding string) */
414 p = s;
415 while (*p) {
416 if (isspace(*p)) {
417 char *r = p + 1;
418 char *q = skip_spaces(r);
419 *p = ' ';
420 if (q != (p+1))
421 while ((*r++ = *q++));
422 }
423 p++;
424 }
425 ret = do_write_string(ff, s);
426done:
427 free(buf);
428 fclose(file);
429 return ret;
430}
431
432static int write_cpudesc(struct feat_fd *ff,
433 struct evlist *evlist __maybe_unused)
434{
435#if defined(__powerpc__) || defined(__hppa__) || defined(__sparc__)
436#define CPUINFO_PROC { "cpu", }
437#elif defined(__s390__)
438#define CPUINFO_PROC { "vendor_id", }
439#elif defined(__sh__)
440#define CPUINFO_PROC { "cpu type", }
441#elif defined(__alpha__) || defined(__mips__)
442#define CPUINFO_PROC { "cpu model", }
443#elif defined(__arm__)
444#define CPUINFO_PROC { "model name", "Processor", }
445#elif defined(__arc__)
446#define CPUINFO_PROC { "Processor", }
447#elif defined(__xtensa__)
448#define CPUINFO_PROC { "core ID", }
449#else
450#define CPUINFO_PROC { "model name", }
451#endif
452 const char *cpuinfo_procs[] = CPUINFO_PROC;
453#undef CPUINFO_PROC
454 unsigned int i;
455
456 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
457 int ret;
458 ret = __write_cpudesc(ff, cpuinfo_procs[i]);
459 if (ret >= 0)
460 return ret;
461 }
462 return -1;
463}
464
465
466static int write_nrcpus(struct feat_fd *ff,
467 struct evlist *evlist __maybe_unused)
468{
469 long nr;
470 u32 nrc, nra;
471 int ret;
472
473 nrc = cpu__max_present_cpu();
474
475 nr = sysconf(_SC_NPROCESSORS_ONLN);
476 if (nr < 0)
477 return -1;
478
479 nra = (u32)(nr & UINT_MAX);
480
481 ret = do_write(ff, &nrc, sizeof(nrc));
482 if (ret < 0)
483 return ret;
484
485 return do_write(ff, &nra, sizeof(nra));
486}
487
488static int write_event_desc(struct feat_fd *ff,
489 struct evlist *evlist)
490{
491 struct evsel *evsel;
492 u32 nre, nri, sz;
493 int ret;
494
495 nre = evlist->core.nr_entries;
496
497 /*
498 * write number of events
499 */
500 ret = do_write(ff, &nre, sizeof(nre));
501 if (ret < 0)
502 return ret;
503
504 /*
505 * size of perf_event_attr struct
506 */
507 sz = (u32)sizeof(evsel->core.attr);
508 ret = do_write(ff, &sz, sizeof(sz));
509 if (ret < 0)
510 return ret;
511
512 evlist__for_each_entry(evlist, evsel) {
513 ret = do_write(ff, &evsel->core.attr, sz);
514 if (ret < 0)
515 return ret;
516 /*
517 * write number of unique id per event
518 * there is one id per instance of an event
519 *
520 * copy into an nri to be independent of the
521 * type of ids,
522 */
523 nri = evsel->core.ids;
524 ret = do_write(ff, &nri, sizeof(nri));
525 if (ret < 0)
526 return ret;
527
528 /*
529 * write event string as passed on cmdline
530 */
531 ret = do_write_string(ff, evsel__name(evsel));
532 if (ret < 0)
533 return ret;
534 /*
535 * write unique ids for this event
536 */
537 ret = do_write(ff, evsel->core.id, evsel->core.ids * sizeof(u64));
538 if (ret < 0)
539 return ret;
540 }
541 return 0;
542}
543
544static int write_cmdline(struct feat_fd *ff,
545 struct evlist *evlist __maybe_unused)
546{
547 char pbuf[MAXPATHLEN], *buf;
548 int i, ret, n;
549
550 /* actual path to perf binary */
551 buf = perf_exe(pbuf, MAXPATHLEN);
552
553 /* account for binary path */
554 n = perf_env.nr_cmdline + 1;
555
556 ret = do_write(ff, &n, sizeof(n));
557 if (ret < 0)
558 return ret;
559
560 ret = do_write_string(ff, buf);
561 if (ret < 0)
562 return ret;
563
564 for (i = 0 ; i < perf_env.nr_cmdline; i++) {
565 ret = do_write_string(ff, perf_env.cmdline_argv[i]);
566 if (ret < 0)
567 return ret;
568 }
569 return 0;
570}
571
572
573static int write_cpu_topology(struct feat_fd *ff,
574 struct evlist *evlist __maybe_unused)
575{
576 struct cpu_topology *tp;
577 u32 i;
578 int ret, j;
579
580 tp = cpu_topology__new();
581 if (!tp)
582 return -1;
583
584 ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib));
585 if (ret < 0)
586 goto done;
587
588 for (i = 0; i < tp->core_sib; i++) {
589 ret = do_write_string(ff, tp->core_siblings[i]);
590 if (ret < 0)
591 goto done;
592 }
593 ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib));
594 if (ret < 0)
595 goto done;
596
597 for (i = 0; i < tp->thread_sib; i++) {
598 ret = do_write_string(ff, tp->thread_siblings[i]);
599 if (ret < 0)
600 break;
601 }
602
603 ret = perf_env__read_cpu_topology_map(&perf_env);
604 if (ret < 0)
605 goto done;
606
607 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
608 ret = do_write(ff, &perf_env.cpu[j].core_id,
609 sizeof(perf_env.cpu[j].core_id));
610 if (ret < 0)
611 return ret;
612 ret = do_write(ff, &perf_env.cpu[j].socket_id,
613 sizeof(perf_env.cpu[j].socket_id));
614 if (ret < 0)
615 return ret;
616 }
617
618 if (!tp->die_sib)
619 goto done;
620
621 ret = do_write(ff, &tp->die_sib, sizeof(tp->die_sib));
622 if (ret < 0)
623 goto done;
624
625 for (i = 0; i < tp->die_sib; i++) {
626 ret = do_write_string(ff, tp->die_siblings[i]);
627 if (ret < 0)
628 goto done;
629 }
630
631 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
632 ret = do_write(ff, &perf_env.cpu[j].die_id,
633 sizeof(perf_env.cpu[j].die_id));
634 if (ret < 0)
635 return ret;
636 }
637
638done:
639 cpu_topology__delete(tp);
640 return ret;
641}
642
643
644
645static int write_total_mem(struct feat_fd *ff,
646 struct evlist *evlist __maybe_unused)
647{
648 char *buf = NULL;
649 FILE *fp;
650 size_t len = 0;
651 int ret = -1, n;
652 uint64_t mem;
653
654 fp = fopen("/proc/meminfo", "r");
655 if (!fp)
656 return -1;
657
658 while (getline(&buf, &len, fp) > 0) {
659 ret = strncmp(buf, "MemTotal:", 9);
660 if (!ret)
661 break;
662 }
663 if (!ret) {
664 n = sscanf(buf, "%*s %"PRIu64, &mem);
665 if (n == 1)
666 ret = do_write(ff, &mem, sizeof(mem));
667 } else
668 ret = -1;
669 free(buf);
670 fclose(fp);
671 return ret;
672}
673
674static int write_numa_topology(struct feat_fd *ff,
675 struct evlist *evlist __maybe_unused)
676{
677 struct numa_topology *tp;
678 int ret = -1;
679 u32 i;
680
681 tp = numa_topology__new();
682 if (!tp)
683 return -ENOMEM;
684
685 ret = do_write(ff, &tp->nr, sizeof(u32));
686 if (ret < 0)
687 goto err;
688
689 for (i = 0; i < tp->nr; i++) {
690 struct numa_topology_node *n = &tp->nodes[i];
691
692 ret = do_write(ff, &n->node, sizeof(u32));
693 if (ret < 0)
694 goto err;
695
696 ret = do_write(ff, &n->mem_total, sizeof(u64));
697 if (ret)
698 goto err;
699
700 ret = do_write(ff, &n->mem_free, sizeof(u64));
701 if (ret)
702 goto err;
703
704 ret = do_write_string(ff, n->cpus);
705 if (ret < 0)
706 goto err;
707 }
708
709 ret = 0;
710
711err:
712 numa_topology__delete(tp);
713 return ret;
714}
715
716/*
717 * File format:
718 *
719 * struct pmu_mappings {
720 * u32 pmu_num;
721 * struct pmu_map {
722 * u32 type;
723 * char name[];
724 * }[pmu_num];
725 * };
726 */
727
728static int write_pmu_mappings(struct feat_fd *ff,
729 struct evlist *evlist __maybe_unused)
730{
731 struct perf_pmu *pmu = NULL;
732 u32 pmu_num = 0;
733 int ret;
734
735 /*
736 * Do a first pass to count number of pmu to avoid lseek so this
737 * works in pipe mode as well.
738 */
739 while ((pmu = perf_pmu__scan(pmu))) {
740 if (!pmu->name)
741 continue;
742 pmu_num++;
743 }
744
745 ret = do_write(ff, &pmu_num, sizeof(pmu_num));
746 if (ret < 0)
747 return ret;
748
749 while ((pmu = perf_pmu__scan(pmu))) {
750 if (!pmu->name)
751 continue;
752
753 ret = do_write(ff, &pmu->type, sizeof(pmu->type));
754 if (ret < 0)
755 return ret;
756
757 ret = do_write_string(ff, pmu->name);
758 if (ret < 0)
759 return ret;
760 }
761
762 return 0;
763}
764
765/*
766 * File format:
767 *
768 * struct group_descs {
769 * u32 nr_groups;
770 * struct group_desc {
771 * char name[];
772 * u32 leader_idx;
773 * u32 nr_members;
774 * }[nr_groups];
775 * };
776 */
777static int write_group_desc(struct feat_fd *ff,
778 struct evlist *evlist)
779{
780 u32 nr_groups = evlist->nr_groups;
781 struct evsel *evsel;
782 int ret;
783
784 ret = do_write(ff, &nr_groups, sizeof(nr_groups));
785 if (ret < 0)
786 return ret;
787
788 evlist__for_each_entry(evlist, evsel) {
789 if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) {
790 const char *name = evsel->group_name ?: "{anon_group}";
791 u32 leader_idx = evsel->idx;
792 u32 nr_members = evsel->core.nr_members;
793
794 ret = do_write_string(ff, name);
795 if (ret < 0)
796 return ret;
797
798 ret = do_write(ff, &leader_idx, sizeof(leader_idx));
799 if (ret < 0)
800 return ret;
801
802 ret = do_write(ff, &nr_members, sizeof(nr_members));
803 if (ret < 0)
804 return ret;
805 }
806 }
807 return 0;
808}
809
810/*
811 * Return the CPU id as a raw string.
812 *
813 * Each architecture should provide a more precise id string that
814 * can be use to match the architecture's "mapfile".
815 */
816char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
817{
818 return NULL;
819}
820
821/* Return zero when the cpuid from the mapfile.csv matches the
822 * cpuid string generated on this platform.
823 * Otherwise return non-zero.
824 */
825int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
826{
827 regex_t re;
828 regmatch_t pmatch[1];
829 int match;
830
831 if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
832 /* Warn unable to generate match particular string. */
833 pr_info("Invalid regular expression %s\n", mapcpuid);
834 return 1;
835 }
836
837 match = !regexec(&re, cpuid, 1, pmatch, 0);
838 regfree(&re);
839 if (match) {
840 size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
841
842 /* Verify the entire string matched. */
843 if (match_len == strlen(cpuid))
844 return 0;
845 }
846 return 1;
847}
848
849/*
850 * default get_cpuid(): nothing gets recorded
851 * actual implementation must be in arch/$(SRCARCH)/util/header.c
852 */
853int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
854{
855 return ENOSYS; /* Not implemented */
856}
857
858static int write_cpuid(struct feat_fd *ff,
859 struct evlist *evlist __maybe_unused)
860{
861 char buffer[64];
862 int ret;
863
864 ret = get_cpuid(buffer, sizeof(buffer));
865 if (ret)
866 return -1;
867
868 return do_write_string(ff, buffer);
869}
870
871static int write_branch_stack(struct feat_fd *ff __maybe_unused,
872 struct evlist *evlist __maybe_unused)
873{
874 return 0;
875}
876
877static int write_auxtrace(struct feat_fd *ff,
878 struct evlist *evlist __maybe_unused)
879{
880 struct perf_session *session;
881 int err;
882
883 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
884 return -1;
885
886 session = container_of(ff->ph, struct perf_session, header);
887
888 err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
889 if (err < 0)
890 pr_err("Failed to write auxtrace index\n");
891 return err;
892}
893
894static int write_clockid(struct feat_fd *ff,
895 struct evlist *evlist __maybe_unused)
896{
897 return do_write(ff, &ff->ph->env.clock.clockid_res_ns,
898 sizeof(ff->ph->env.clock.clockid_res_ns));
899}
900
901static int write_clock_data(struct feat_fd *ff,
902 struct evlist *evlist __maybe_unused)
903{
904 u64 *data64;
905 u32 data32;
906 int ret;
907
908 /* version */
909 data32 = 1;
910
911 ret = do_write(ff, &data32, sizeof(data32));
912 if (ret < 0)
913 return ret;
914
915 /* clockid */
916 data32 = ff->ph->env.clock.clockid;
917
918 ret = do_write(ff, &data32, sizeof(data32));
919 if (ret < 0)
920 return ret;
921
922 /* TOD ref time */
923 data64 = &ff->ph->env.clock.tod_ns;
924
925 ret = do_write(ff, data64, sizeof(*data64));
926 if (ret < 0)
927 return ret;
928
929 /* clockid ref time */
930 data64 = &ff->ph->env.clock.clockid_ns;
931
932 return do_write(ff, data64, sizeof(*data64));
933}
934
935static int write_dir_format(struct feat_fd *ff,
936 struct evlist *evlist __maybe_unused)
937{
938 struct perf_session *session;
939 struct perf_data *data;
940
941 session = container_of(ff->ph, struct perf_session, header);
942 data = session->data;
943
944 if (WARN_ON(!perf_data__is_dir(data)))
945 return -1;
946
947 return do_write(ff, &data->dir.version, sizeof(data->dir.version));
948}
949
950#ifdef HAVE_LIBBPF_SUPPORT
951static int write_bpf_prog_info(struct feat_fd *ff,
952 struct evlist *evlist __maybe_unused)
953{
954 struct perf_env *env = &ff->ph->env;
955 struct rb_root *root;
956 struct rb_node *next;
957 int ret;
958
959 down_read(&env->bpf_progs.lock);
960
961 ret = do_write(ff, &env->bpf_progs.infos_cnt,
962 sizeof(env->bpf_progs.infos_cnt));
963 if (ret < 0)
964 goto out;
965
966 root = &env->bpf_progs.infos;
967 next = rb_first(root);
968 while (next) {
969 struct bpf_prog_info_node *node;
970 size_t len;
971
972 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
973 next = rb_next(&node->rb_node);
974 len = sizeof(struct bpf_prog_info_linear) +
975 node->info_linear->data_len;
976
977 /* before writing to file, translate address to offset */
978 bpf_program__bpil_addr_to_offs(node->info_linear);
979 ret = do_write(ff, node->info_linear, len);
980 /*
981 * translate back to address even when do_write() fails,
982 * so that this function never changes the data.
983 */
984 bpf_program__bpil_offs_to_addr(node->info_linear);
985 if (ret < 0)
986 goto out;
987 }
988out:
989 up_read(&env->bpf_progs.lock);
990 return ret;
991}
992
993static int write_bpf_btf(struct feat_fd *ff,
994 struct evlist *evlist __maybe_unused)
995{
996 struct perf_env *env = &ff->ph->env;
997 struct rb_root *root;
998 struct rb_node *next;
999 int ret;
1000
1001 down_read(&env->bpf_progs.lock);
1002
1003 ret = do_write(ff, &env->bpf_progs.btfs_cnt,
1004 sizeof(env->bpf_progs.btfs_cnt));
1005
1006 if (ret < 0)
1007 goto out;
1008
1009 root = &env->bpf_progs.btfs;
1010 next = rb_first(root);
1011 while (next) {
1012 struct btf_node *node;
1013
1014 node = rb_entry(next, struct btf_node, rb_node);
1015 next = rb_next(&node->rb_node);
1016 ret = do_write(ff, &node->id,
1017 sizeof(u32) * 2 + node->data_size);
1018 if (ret < 0)
1019 goto out;
1020 }
1021out:
1022 up_read(&env->bpf_progs.lock);
1023 return ret;
1024}
1025#endif // HAVE_LIBBPF_SUPPORT
1026
1027static int cpu_cache_level__sort(const void *a, const void *b)
1028{
1029 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
1030 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
1031
1032 return cache_a->level - cache_b->level;
1033}
1034
1035static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
1036{
1037 if (a->level != b->level)
1038 return false;
1039
1040 if (a->line_size != b->line_size)
1041 return false;
1042
1043 if (a->sets != b->sets)
1044 return false;
1045
1046 if (a->ways != b->ways)
1047 return false;
1048
1049 if (strcmp(a->type, b->type))
1050 return false;
1051
1052 if (strcmp(a->size, b->size))
1053 return false;
1054
1055 if (strcmp(a->map, b->map))
1056 return false;
1057
1058 return true;
1059}
1060
1061static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
1062{
1063 char path[PATH_MAX], file[PATH_MAX];
1064 struct stat st;
1065 size_t len;
1066
1067 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
1068 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
1069
1070 if (stat(file, &st))
1071 return 1;
1072
1073 scnprintf(file, PATH_MAX, "%s/level", path);
1074 if (sysfs__read_int(file, (int *) &cache->level))
1075 return -1;
1076
1077 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
1078 if (sysfs__read_int(file, (int *) &cache->line_size))
1079 return -1;
1080
1081 scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
1082 if (sysfs__read_int(file, (int *) &cache->sets))
1083 return -1;
1084
1085 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
1086 if (sysfs__read_int(file, (int *) &cache->ways))
1087 return -1;
1088
1089 scnprintf(file, PATH_MAX, "%s/type", path);
1090 if (sysfs__read_str(file, &cache->type, &len))
1091 return -1;
1092
1093 cache->type[len] = 0;
1094 cache->type = strim(cache->type);
1095
1096 scnprintf(file, PATH_MAX, "%s/size", path);
1097 if (sysfs__read_str(file, &cache->size, &len)) {
1098 zfree(&cache->type);
1099 return -1;
1100 }
1101
1102 cache->size[len] = 0;
1103 cache->size = strim(cache->size);
1104
1105 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
1106 if (sysfs__read_str(file, &cache->map, &len)) {
1107 zfree(&cache->size);
1108 zfree(&cache->type);
1109 return -1;
1110 }
1111
1112 cache->map[len] = 0;
1113 cache->map = strim(cache->map);
1114 return 0;
1115}
1116
1117static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
1118{
1119 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
1120}
1121
1122#define MAX_CACHE_LVL 4
1123
1124static int build_caches(struct cpu_cache_level caches[], u32 *cntp)
1125{
1126 u32 i, cnt = 0;
1127 u32 nr, cpu;
1128 u16 level;
1129
1130 nr = cpu__max_cpu();
1131
1132 for (cpu = 0; cpu < nr; cpu++) {
1133 for (level = 0; level < MAX_CACHE_LVL; level++) {
1134 struct cpu_cache_level c;
1135 int err;
1136
1137 err = cpu_cache_level__read(&c, cpu, level);
1138 if (err < 0)
1139 return err;
1140
1141 if (err == 1)
1142 break;
1143
1144 for (i = 0; i < cnt; i++) {
1145 if (cpu_cache_level__cmp(&c, &caches[i]))
1146 break;
1147 }
1148
1149 if (i == cnt)
1150 caches[cnt++] = c;
1151 else
1152 cpu_cache_level__free(&c);
1153 }
1154 }
1155 *cntp = cnt;
1156 return 0;
1157}
1158
1159static int write_cache(struct feat_fd *ff,
1160 struct evlist *evlist __maybe_unused)
1161{
1162 u32 max_caches = cpu__max_cpu() * MAX_CACHE_LVL;
1163 struct cpu_cache_level caches[max_caches];
1164 u32 cnt = 0, i, version = 1;
1165 int ret;
1166
1167 ret = build_caches(caches, &cnt);
1168 if (ret)
1169 goto out;
1170
1171 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1172
1173 ret = do_write(ff, &version, sizeof(u32));
1174 if (ret < 0)
1175 goto out;
1176
1177 ret = do_write(ff, &cnt, sizeof(u32));
1178 if (ret < 0)
1179 goto out;
1180
1181 for (i = 0; i < cnt; i++) {
1182 struct cpu_cache_level *c = &caches[i];
1183
1184 #define _W(v) \
1185 ret = do_write(ff, &c->v, sizeof(u32)); \
1186 if (ret < 0) \
1187 goto out;
1188
1189 _W(level)
1190 _W(line_size)
1191 _W(sets)
1192 _W(ways)
1193 #undef _W
1194
1195 #define _W(v) \
1196 ret = do_write_string(ff, (const char *) c->v); \
1197 if (ret < 0) \
1198 goto out;
1199
1200 _W(type)
1201 _W(size)
1202 _W(map)
1203 #undef _W
1204 }
1205
1206out:
1207 for (i = 0; i < cnt; i++)
1208 cpu_cache_level__free(&caches[i]);
1209 return ret;
1210}
1211
1212static int write_stat(struct feat_fd *ff __maybe_unused,
1213 struct evlist *evlist __maybe_unused)
1214{
1215 return 0;
1216}
1217
1218static int write_sample_time(struct feat_fd *ff,
1219 struct evlist *evlist)
1220{
1221 int ret;
1222
1223 ret = do_write(ff, &evlist->first_sample_time,
1224 sizeof(evlist->first_sample_time));
1225 if (ret < 0)
1226 return ret;
1227
1228 return do_write(ff, &evlist->last_sample_time,
1229 sizeof(evlist->last_sample_time));
1230}
1231
1232
1233static int memory_node__read(struct memory_node *n, unsigned long idx)
1234{
1235 unsigned int phys, size = 0;
1236 char path[PATH_MAX];
1237 struct dirent *ent;
1238 DIR *dir;
1239
1240#define for_each_memory(mem, dir) \
1241 while ((ent = readdir(dir))) \
1242 if (strcmp(ent->d_name, ".") && \
1243 strcmp(ent->d_name, "..") && \
1244 sscanf(ent->d_name, "memory%u", &mem) == 1)
1245
1246 scnprintf(path, PATH_MAX,
1247 "%s/devices/system/node/node%lu",
1248 sysfs__mountpoint(), idx);
1249
1250 dir = opendir(path);
1251 if (!dir) {
1252 pr_warning("failed: cant' open memory sysfs data\n");
1253 return -1;
1254 }
1255
1256 for_each_memory(phys, dir) {
1257 size = max(phys, size);
1258 }
1259
1260 size++;
1261
1262 n->set = bitmap_alloc(size);
1263 if (!n->set) {
1264 closedir(dir);
1265 return -ENOMEM;
1266 }
1267
1268 n->node = idx;
1269 n->size = size;
1270
1271 rewinddir(dir);
1272
1273 for_each_memory(phys, dir) {
1274 set_bit(phys, n->set);
1275 }
1276
1277 closedir(dir);
1278 return 0;
1279}
1280
1281static int memory_node__sort(const void *a, const void *b)
1282{
1283 const struct memory_node *na = a;
1284 const struct memory_node *nb = b;
1285
1286 return na->node - nb->node;
1287}
1288
1289static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp)
1290{
1291 char path[PATH_MAX];
1292 struct dirent *ent;
1293 DIR *dir;
1294 u64 cnt = 0;
1295 int ret = 0;
1296
1297 scnprintf(path, PATH_MAX, "%s/devices/system/node/",
1298 sysfs__mountpoint());
1299
1300 dir = opendir(path);
1301 if (!dir) {
1302 pr_debug2("%s: could't read %s, does this arch have topology information?\n",
1303 __func__, path);
1304 return -1;
1305 }
1306
1307 while (!ret && (ent = readdir(dir))) {
1308 unsigned int idx;
1309 int r;
1310
1311 if (!strcmp(ent->d_name, ".") ||
1312 !strcmp(ent->d_name, ".."))
1313 continue;
1314
1315 r = sscanf(ent->d_name, "node%u", &idx);
1316 if (r != 1)
1317 continue;
1318
1319 if (WARN_ONCE(cnt >= size,
1320 "failed to write MEM_TOPOLOGY, way too many nodes\n")) {
1321 closedir(dir);
1322 return -1;
1323 }
1324
1325 ret = memory_node__read(&nodes[cnt++], idx);
1326 }
1327
1328 *cntp = cnt;
1329 closedir(dir);
1330
1331 if (!ret)
1332 qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort);
1333
1334 return ret;
1335}
1336
1337#define MAX_MEMORY_NODES 2000
1338
1339/*
1340 * The MEM_TOPOLOGY holds physical memory map for every
1341 * node in system. The format of data is as follows:
1342 *
1343 * 0 - version | for future changes
1344 * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
1345 * 16 - count | number of nodes
1346 *
1347 * For each node we store map of physical indexes for
1348 * each node:
1349 *
1350 * 32 - node id | node index
1351 * 40 - size | size of bitmap
1352 * 48 - bitmap | bitmap of memory indexes that belongs to node
1353 */
1354static int write_mem_topology(struct feat_fd *ff __maybe_unused,
1355 struct evlist *evlist __maybe_unused)
1356{
1357 static struct memory_node nodes[MAX_MEMORY_NODES];
1358 u64 bsize, version = 1, i, nr;
1359 int ret;
1360
1361 ret = sysfs__read_xll("devices/system/memory/block_size_bytes",
1362 (unsigned long long *) &bsize);
1363 if (ret)
1364 return ret;
1365
1366 ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr);
1367 if (ret)
1368 return ret;
1369
1370 ret = do_write(ff, &version, sizeof(version));
1371 if (ret < 0)
1372 goto out;
1373
1374 ret = do_write(ff, &bsize, sizeof(bsize));
1375 if (ret < 0)
1376 goto out;
1377
1378 ret = do_write(ff, &nr, sizeof(nr));
1379 if (ret < 0)
1380 goto out;
1381
1382 for (i = 0; i < nr; i++) {
1383 struct memory_node *n = &nodes[i];
1384
1385 #define _W(v) \
1386 ret = do_write(ff, &n->v, sizeof(n->v)); \
1387 if (ret < 0) \
1388 goto out;
1389
1390 _W(node)
1391 _W(size)
1392
1393 #undef _W
1394
1395 ret = do_write_bitmap(ff, n->set, n->size);
1396 if (ret < 0)
1397 goto out;
1398 }
1399
1400out:
1401 return ret;
1402}
1403
1404static int write_compressed(struct feat_fd *ff __maybe_unused,
1405 struct evlist *evlist __maybe_unused)
1406{
1407 int ret;
1408
1409 ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver));
1410 if (ret)
1411 return ret;
1412
1413 ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type));
1414 if (ret)
1415 return ret;
1416
1417 ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level));
1418 if (ret)
1419 return ret;
1420
1421 ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio));
1422 if (ret)
1423 return ret;
1424
1425 return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len));
1426}
1427
1428static int write_cpu_pmu_caps(struct feat_fd *ff,
1429 struct evlist *evlist __maybe_unused)
1430{
1431 struct perf_pmu *cpu_pmu = perf_pmu__find("cpu");
1432 struct perf_pmu_caps *caps = NULL;
1433 int nr_caps;
1434 int ret;
1435
1436 if (!cpu_pmu)
1437 return -ENOENT;
1438
1439 nr_caps = perf_pmu__caps_parse(cpu_pmu);
1440 if (nr_caps < 0)
1441 return nr_caps;
1442
1443 ret = do_write(ff, &nr_caps, sizeof(nr_caps));
1444 if (ret < 0)
1445 return ret;
1446
1447 list_for_each_entry(caps, &cpu_pmu->caps, list) {
1448 ret = do_write_string(ff, caps->name);
1449 if (ret < 0)
1450 return ret;
1451
1452 ret = do_write_string(ff, caps->value);
1453 if (ret < 0)
1454 return ret;
1455 }
1456
1457 return ret;
1458}
1459
1460static void print_hostname(struct feat_fd *ff, FILE *fp)
1461{
1462 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
1463}
1464
1465static void print_osrelease(struct feat_fd *ff, FILE *fp)
1466{
1467 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
1468}
1469
1470static void print_arch(struct feat_fd *ff, FILE *fp)
1471{
1472 fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
1473}
1474
1475static void print_cpudesc(struct feat_fd *ff, FILE *fp)
1476{
1477 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
1478}
1479
1480static void print_nrcpus(struct feat_fd *ff, FILE *fp)
1481{
1482 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
1483 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
1484}
1485
1486static void print_version(struct feat_fd *ff, FILE *fp)
1487{
1488 fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
1489}
1490
1491static void print_cmdline(struct feat_fd *ff, FILE *fp)
1492{
1493 int nr, i;
1494
1495 nr = ff->ph->env.nr_cmdline;
1496
1497 fprintf(fp, "# cmdline : ");
1498
1499 for (i = 0; i < nr; i++) {
1500 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
1501 if (!argv_i) {
1502 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
1503 } else {
1504 char *mem = argv_i;
1505 do {
1506 char *quote = strchr(argv_i, '\'');
1507 if (!quote)
1508 break;
1509 *quote++ = '\0';
1510 fprintf(fp, "%s\\\'", argv_i);
1511 argv_i = quote;
1512 } while (1);
1513 fprintf(fp, "%s ", argv_i);
1514 free(mem);
1515 }
1516 }
1517 fputc('\n', fp);
1518}
1519
1520static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
1521{
1522 struct perf_header *ph = ff->ph;
1523 int cpu_nr = ph->env.nr_cpus_avail;
1524 int nr, i;
1525 char *str;
1526
1527 nr = ph->env.nr_sibling_cores;
1528 str = ph->env.sibling_cores;
1529
1530 for (i = 0; i < nr; i++) {
1531 fprintf(fp, "# sibling sockets : %s\n", str);
1532 str += strlen(str) + 1;
1533 }
1534
1535 if (ph->env.nr_sibling_dies) {
1536 nr = ph->env.nr_sibling_dies;
1537 str = ph->env.sibling_dies;
1538
1539 for (i = 0; i < nr; i++) {
1540 fprintf(fp, "# sibling dies : %s\n", str);
1541 str += strlen(str) + 1;
1542 }
1543 }
1544
1545 nr = ph->env.nr_sibling_threads;
1546 str = ph->env.sibling_threads;
1547
1548 for (i = 0; i < nr; i++) {
1549 fprintf(fp, "# sibling threads : %s\n", str);
1550 str += strlen(str) + 1;
1551 }
1552
1553 if (ph->env.nr_sibling_dies) {
1554 if (ph->env.cpu != NULL) {
1555 for (i = 0; i < cpu_nr; i++)
1556 fprintf(fp, "# CPU %d: Core ID %d, "
1557 "Die ID %d, Socket ID %d\n",
1558 i, ph->env.cpu[i].core_id,
1559 ph->env.cpu[i].die_id,
1560 ph->env.cpu[i].socket_id);
1561 } else
1562 fprintf(fp, "# Core ID, Die ID and Socket ID "
1563 "information is not available\n");
1564 } else {
1565 if (ph->env.cpu != NULL) {
1566 for (i = 0; i < cpu_nr; i++)
1567 fprintf(fp, "# CPU %d: Core ID %d, "
1568 "Socket ID %d\n",
1569 i, ph->env.cpu[i].core_id,
1570 ph->env.cpu[i].socket_id);
1571 } else
1572 fprintf(fp, "# Core ID and Socket ID "
1573 "information is not available\n");
1574 }
1575}
1576
1577static void print_clockid(struct feat_fd *ff, FILE *fp)
1578{
1579 fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n",
1580 ff->ph->env.clock.clockid_res_ns * 1000);
1581}
1582
1583static void print_clock_data(struct feat_fd *ff, FILE *fp)
1584{
1585 struct timespec clockid_ns;
1586 char tstr[64], date[64];
1587 struct timeval tod_ns;
1588 clockid_t clockid;
1589 struct tm ltime;
1590 u64 ref;
1591
1592 if (!ff->ph->env.clock.enabled) {
1593 fprintf(fp, "# reference time disabled\n");
1594 return;
1595 }
1596
1597 /* Compute TOD time. */
1598 ref = ff->ph->env.clock.tod_ns;
1599 tod_ns.tv_sec = ref / NSEC_PER_SEC;
1600 ref -= tod_ns.tv_sec * NSEC_PER_SEC;
1601 tod_ns.tv_usec = ref / NSEC_PER_USEC;
1602
1603 /* Compute clockid time. */
1604 ref = ff->ph->env.clock.clockid_ns;
1605 clockid_ns.tv_sec = ref / NSEC_PER_SEC;
1606 ref -= clockid_ns.tv_sec * NSEC_PER_SEC;
1607 clockid_ns.tv_nsec = ref;
1608
1609 clockid = ff->ph->env.clock.clockid;
1610
1611 if (localtime_r(&tod_ns.tv_sec, <ime) == NULL)
1612 snprintf(tstr, sizeof(tstr), "<error>");
1613 else {
1614 strftime(date, sizeof(date), "%F %T", <ime);
1615 scnprintf(tstr, sizeof(tstr), "%s.%06d",
1616 date, (int) tod_ns.tv_usec);
1617 }
1618
1619 fprintf(fp, "# clockid: %s (%u)\n", clockid_name(clockid), clockid);
1620 fprintf(fp, "# reference time: %s = %ld.%06d (TOD) = %ld.%09ld (%s)\n",
1621 tstr, (long) tod_ns.tv_sec, (int) tod_ns.tv_usec,
1622 (long) clockid_ns.tv_sec, clockid_ns.tv_nsec,
1623 clockid_name(clockid));
1624}
1625
1626static void print_dir_format(struct feat_fd *ff, FILE *fp)
1627{
1628 struct perf_session *session;
1629 struct perf_data *data;
1630
1631 session = container_of(ff->ph, struct perf_session, header);
1632 data = session->data;
1633
1634 fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
1635}
1636
1637#ifdef HAVE_LIBBPF_SUPPORT
1638static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
1639{
1640 struct perf_env *env = &ff->ph->env;
1641 struct rb_root *root;
1642 struct rb_node *next;
1643
1644 down_read(&env->bpf_progs.lock);
1645
1646 root = &env->bpf_progs.infos;
1647 next = rb_first(root);
1648
1649 while (next) {
1650 struct bpf_prog_info_node *node;
1651
1652 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
1653 next = rb_next(&node->rb_node);
1654
1655 bpf_event__print_bpf_prog_info(&node->info_linear->info,
1656 env, fp);
1657 }
1658
1659 up_read(&env->bpf_progs.lock);
1660}
1661
1662static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
1663{
1664 struct perf_env *env = &ff->ph->env;
1665 struct rb_root *root;
1666 struct rb_node *next;
1667
1668 down_read(&env->bpf_progs.lock);
1669
1670 root = &env->bpf_progs.btfs;
1671 next = rb_first(root);
1672
1673 while (next) {
1674 struct btf_node *node;
1675
1676 node = rb_entry(next, struct btf_node, rb_node);
1677 next = rb_next(&node->rb_node);
1678 fprintf(fp, "# btf info of id %u\n", node->id);
1679 }
1680
1681 up_read(&env->bpf_progs.lock);
1682}
1683#endif // HAVE_LIBBPF_SUPPORT
1684
1685static void free_event_desc(struct evsel *events)
1686{
1687 struct evsel *evsel;
1688
1689 if (!events)
1690 return;
1691
1692 for (evsel = events; evsel->core.attr.size; evsel++) {
1693 zfree(&evsel->name);
1694 zfree(&evsel->core.id);
1695 }
1696
1697 free(events);
1698}
1699
1700static bool perf_attr_check(struct perf_event_attr *attr)
1701{
1702 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) {
1703 pr_warning("Reserved bits are set unexpectedly. "
1704 "Please update perf tool.\n");
1705 return false;
1706 }
1707
1708 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) {
1709 pr_warning("Unknown sample type (0x%llx) is detected. "
1710 "Please update perf tool.\n",
1711 attr->sample_type);
1712 return false;
1713 }
1714
1715 if (attr->read_format & ~(PERF_FORMAT_MAX-1)) {
1716 pr_warning("Unknown read format (0x%llx) is detected. "
1717 "Please update perf tool.\n",
1718 attr->read_format);
1719 return false;
1720 }
1721
1722 if ((attr->sample_type & PERF_SAMPLE_BRANCH_STACK) &&
1723 (attr->branch_sample_type & ~(PERF_SAMPLE_BRANCH_MAX-1))) {
1724 pr_warning("Unknown branch sample type (0x%llx) is detected. "
1725 "Please update perf tool.\n",
1726 attr->branch_sample_type);
1727
1728 return false;
1729 }
1730
1731 return true;
1732}
1733
1734static struct evsel *read_event_desc(struct feat_fd *ff)
1735{
1736 struct evsel *evsel, *events = NULL;
1737 u64 *id;
1738 void *buf = NULL;
1739 u32 nre, sz, nr, i, j;
1740 size_t msz;
1741
1742 /* number of events */
1743 if (do_read_u32(ff, &nre))
1744 goto error;
1745
1746 if (do_read_u32(ff, &sz))
1747 goto error;
1748
1749 /* buffer to hold on file attr struct */
1750 buf = malloc(sz);
1751 if (!buf)
1752 goto error;
1753
1754 /* the last event terminates with evsel->core.attr.size == 0: */
1755 events = calloc(nre + 1, sizeof(*events));
1756 if (!events)
1757 goto error;
1758
1759 msz = sizeof(evsel->core.attr);
1760 if (sz < msz)
1761 msz = sz;
1762
1763 for (i = 0, evsel = events; i < nre; evsel++, i++) {
1764 evsel->idx = i;
1765
1766 /*
1767 * must read entire on-file attr struct to
1768 * sync up with layout.
1769 */
1770 if (__do_read(ff, buf, sz))
1771 goto error;
1772
1773 if (ff->ph->needs_swap)
1774 perf_event__attr_swap(buf);
1775
1776 memcpy(&evsel->core.attr, buf, msz);
1777
1778 if (!perf_attr_check(&evsel->core.attr))
1779 goto error;
1780
1781 if (do_read_u32(ff, &nr))
1782 goto error;
1783
1784 if (ff->ph->needs_swap)
1785 evsel->needs_swap = true;
1786
1787 evsel->name = do_read_string(ff);
1788 if (!evsel->name)
1789 goto error;
1790
1791 if (!nr)
1792 continue;
1793
1794 id = calloc(nr, sizeof(*id));
1795 if (!id)
1796 goto error;
1797 evsel->core.ids = nr;
1798 evsel->core.id = id;
1799
1800 for (j = 0 ; j < nr; j++) {
1801 if (do_read_u64(ff, id))
1802 goto error;
1803 id++;
1804 }
1805 }
1806out:
1807 free(buf);
1808 return events;
1809error:
1810 free_event_desc(events);
1811 events = NULL;
1812 goto out;
1813}
1814
1815static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1816 void *priv __maybe_unused)
1817{
1818 return fprintf(fp, ", %s = %s", name, val);
1819}
1820
1821static void print_event_desc(struct feat_fd *ff, FILE *fp)
1822{
1823 struct evsel *evsel, *events;
1824 u32 j;
1825 u64 *id;
1826
1827 if (ff->events)
1828 events = ff->events;
1829 else
1830 events = read_event_desc(ff);
1831
1832 if (!events) {
1833 fprintf(fp, "# event desc: not available or unable to read\n");
1834 return;
1835 }
1836
1837 for (evsel = events; evsel->core.attr.size; evsel++) {
1838 fprintf(fp, "# event : name = %s, ", evsel->name);
1839
1840 if (evsel->core.ids) {
1841 fprintf(fp, ", id = {");
1842 for (j = 0, id = evsel->core.id; j < evsel->core.ids; j++, id++) {
1843 if (j)
1844 fputc(',', fp);
1845 fprintf(fp, " %"PRIu64, *id);
1846 }
1847 fprintf(fp, " }");
1848 }
1849
1850 perf_event_attr__fprintf(fp, &evsel->core.attr, __desc_attr__fprintf, NULL);
1851
1852 fputc('\n', fp);
1853 }
1854
1855 free_event_desc(events);
1856 ff->events = NULL;
1857}
1858
1859static void print_total_mem(struct feat_fd *ff, FILE *fp)
1860{
1861 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
1862}
1863
1864static void print_numa_topology(struct feat_fd *ff, FILE *fp)
1865{
1866 int i;
1867 struct numa_node *n;
1868
1869 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
1870 n = &ff->ph->env.numa_nodes[i];
1871
1872 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
1873 " free = %"PRIu64" kB\n",
1874 n->node, n->mem_total, n->mem_free);
1875
1876 fprintf(fp, "# node%u cpu list : ", n->node);
1877 cpu_map__fprintf(n->map, fp);
1878 }
1879}
1880
1881static void print_cpuid(struct feat_fd *ff, FILE *fp)
1882{
1883 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
1884}
1885
1886static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
1887{
1888 fprintf(fp, "# contains samples with branch stack\n");
1889}
1890
1891static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
1892{
1893 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1894}
1895
1896static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
1897{
1898 fprintf(fp, "# contains stat data\n");
1899}
1900
1901static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
1902{
1903 int i;
1904
1905 fprintf(fp, "# CPU cache info:\n");
1906 for (i = 0; i < ff->ph->env.caches_cnt; i++) {
1907 fprintf(fp, "# ");
1908 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
1909 }
1910}
1911
1912static void print_compressed(struct feat_fd *ff, FILE *fp)
1913{
1914 fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n",
1915 ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown",
1916 ff->ph->env.comp_level, ff->ph->env.comp_ratio);
1917}
1918
1919static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
1920{
1921 const char *delimiter = "# cpu pmu capabilities: ";
1922 u32 nr_caps = ff->ph->env.nr_cpu_pmu_caps;
1923 char *str;
1924
1925 if (!nr_caps) {
1926 fprintf(fp, "# cpu pmu capabilities: not available\n");
1927 return;
1928 }
1929
1930 str = ff->ph->env.cpu_pmu_caps;
1931 while (nr_caps--) {
1932 fprintf(fp, "%s%s", delimiter, str);
1933 delimiter = ", ";
1934 str += strlen(str) + 1;
1935 }
1936
1937 fprintf(fp, "\n");
1938}
1939
1940static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
1941{
1942 const char *delimiter = "# pmu mappings: ";
1943 char *str, *tmp;
1944 u32 pmu_num;
1945 u32 type;
1946
1947 pmu_num = ff->ph->env.nr_pmu_mappings;
1948 if (!pmu_num) {
1949 fprintf(fp, "# pmu mappings: not available\n");
1950 return;
1951 }
1952
1953 str = ff->ph->env.pmu_mappings;
1954
1955 while (pmu_num) {
1956 type = strtoul(str, &tmp, 0);
1957 if (*tmp != ':')
1958 goto error;
1959
1960 str = tmp + 1;
1961 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1962
1963 delimiter = ", ";
1964 str += strlen(str) + 1;
1965 pmu_num--;
1966 }
1967
1968 fprintf(fp, "\n");
1969
1970 if (!pmu_num)
1971 return;
1972error:
1973 fprintf(fp, "# pmu mappings: unable to read\n");
1974}
1975
1976static void print_group_desc(struct feat_fd *ff, FILE *fp)
1977{
1978 struct perf_session *session;
1979 struct evsel *evsel;
1980 u32 nr = 0;
1981
1982 session = container_of(ff->ph, struct perf_session, header);
1983
1984 evlist__for_each_entry(session->evlist, evsel) {
1985 if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) {
1986 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", evsel__name(evsel));
1987
1988 nr = evsel->core.nr_members - 1;
1989 } else if (nr) {
1990 fprintf(fp, ",%s", evsel__name(evsel));
1991
1992 if (--nr == 0)
1993 fprintf(fp, "}\n");
1994 }
1995 }
1996}
1997
1998static void print_sample_time(struct feat_fd *ff, FILE *fp)
1999{
2000 struct perf_session *session;
2001 char time_buf[32];
2002 double d;
2003
2004 session = container_of(ff->ph, struct perf_session, header);
2005
2006 timestamp__scnprintf_usec(session->evlist->first_sample_time,
2007 time_buf, sizeof(time_buf));
2008 fprintf(fp, "# time of first sample : %s\n", time_buf);
2009
2010 timestamp__scnprintf_usec(session->evlist->last_sample_time,
2011 time_buf, sizeof(time_buf));
2012 fprintf(fp, "# time of last sample : %s\n", time_buf);
2013
2014 d = (double)(session->evlist->last_sample_time -
2015 session->evlist->first_sample_time) / NSEC_PER_MSEC;
2016
2017 fprintf(fp, "# sample duration : %10.3f ms\n", d);
2018}
2019
2020static void memory_node__fprintf(struct memory_node *n,
2021 unsigned long long bsize, FILE *fp)
2022{
2023 char buf_map[100], buf_size[50];
2024 unsigned long long size;
2025
2026 size = bsize * bitmap_weight(n->set, n->size);
2027 unit_number__scnprintf(buf_size, 50, size);
2028
2029 bitmap_scnprintf(n->set, n->size, buf_map, 100);
2030 fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map);
2031}
2032
2033static void print_mem_topology(struct feat_fd *ff, FILE *fp)
2034{
2035 struct memory_node *nodes;
2036 int i, nr;
2037
2038 nodes = ff->ph->env.memory_nodes;
2039 nr = ff->ph->env.nr_memory_nodes;
2040
2041 fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n",
2042 nr, ff->ph->env.memory_bsize);
2043
2044 for (i = 0; i < nr; i++) {
2045 memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp);
2046 }
2047}
2048
2049static int __event_process_build_id(struct perf_record_header_build_id *bev,
2050 char *filename,
2051 struct perf_session *session)
2052{
2053 int err = -1;
2054 struct machine *machine;
2055 u16 cpumode;
2056 struct dso *dso;
2057 enum dso_space_type dso_space;
2058
2059 machine = perf_session__findnew_machine(session, bev->pid);
2060 if (!machine)
2061 goto out;
2062
2063 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
2064
2065 switch (cpumode) {
2066 case PERF_RECORD_MISC_KERNEL:
2067 dso_space = DSO_SPACE__KERNEL;
2068 break;
2069 case PERF_RECORD_MISC_GUEST_KERNEL:
2070 dso_space = DSO_SPACE__KERNEL_GUEST;
2071 break;
2072 case PERF_RECORD_MISC_USER:
2073 case PERF_RECORD_MISC_GUEST_USER:
2074 dso_space = DSO_SPACE__USER;
2075 break;
2076 default:
2077 goto out;
2078 }
2079
2080 dso = machine__findnew_dso(machine, filename);
2081 if (dso != NULL) {
2082 char sbuild_id[SBUILD_ID_SIZE];
2083 struct build_id bid;
2084 size_t size = BUILD_ID_SIZE;
2085
2086 if (bev->header.misc & PERF_RECORD_MISC_BUILD_ID_SIZE)
2087 size = bev->size;
2088
2089 build_id__init(&bid, bev->data, size);
2090 dso__set_build_id(dso, &bid);
2091
2092 if (dso_space != DSO_SPACE__USER) {
2093 struct kmod_path m = { .name = NULL, };
2094
2095 if (!kmod_path__parse_name(&m, filename) && m.kmod)
2096 dso__set_module_info(dso, &m, machine);
2097
2098 dso->kernel = dso_space;
2099 free(m.name);
2100 }
2101
2102 build_id__sprintf(&dso->bid, sbuild_id);
2103 pr_debug("build id event received for %s: %s [%zu]\n",
2104 dso->long_name, sbuild_id, size);
2105 dso__put(dso);
2106 }
2107
2108 err = 0;
2109out:
2110 return err;
2111}
2112
2113static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
2114 int input, u64 offset, u64 size)
2115{
2116 struct perf_session *session = container_of(header, struct perf_session, header);
2117 struct {
2118 struct perf_event_header header;
2119 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
2120 char filename[0];
2121 } old_bev;
2122 struct perf_record_header_build_id bev;
2123 char filename[PATH_MAX];
2124 u64 limit = offset + size;
2125
2126 while (offset < limit) {
2127 ssize_t len;
2128
2129 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
2130 return -1;
2131
2132 if (header->needs_swap)
2133 perf_event_header__bswap(&old_bev.header);
2134
2135 len = old_bev.header.size - sizeof(old_bev);
2136 if (readn(input, filename, len) != len)
2137 return -1;
2138
2139 bev.header = old_bev.header;
2140
2141 /*
2142 * As the pid is the missing value, we need to fill
2143 * it properly. The header.misc value give us nice hint.
2144 */
2145 bev.pid = HOST_KERNEL_ID;
2146 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
2147 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
2148 bev.pid = DEFAULT_GUEST_KERNEL_ID;
2149
2150 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
2151 __event_process_build_id(&bev, filename, session);
2152
2153 offset += bev.header.size;
2154 }
2155
2156 return 0;
2157}
2158
2159static int perf_header__read_build_ids(struct perf_header *header,
2160 int input, u64 offset, u64 size)
2161{
2162 struct perf_session *session = container_of(header, struct perf_session, header);
2163 struct perf_record_header_build_id bev;
2164 char filename[PATH_MAX];
2165 u64 limit = offset + size, orig_offset = offset;
2166 int err = -1;
2167
2168 while (offset < limit) {
2169 ssize_t len;
2170
2171 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
2172 goto out;
2173
2174 if (header->needs_swap)
2175 perf_event_header__bswap(&bev.header);
2176
2177 len = bev.header.size - sizeof(bev);
2178 if (readn(input, filename, len) != len)
2179 goto out;
2180 /*
2181 * The a1645ce1 changeset:
2182 *
2183 * "perf: 'perf kvm' tool for monitoring guest performance from host"
2184 *
2185 * Added a field to struct perf_record_header_build_id that broke the file
2186 * format.
2187 *
2188 * Since the kernel build-id is the first entry, process the
2189 * table using the old format if the well known
2190 * '[kernel.kallsyms]' string for the kernel build-id has the
2191 * first 4 characters chopped off (where the pid_t sits).
2192 */
2193 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
2194 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
2195 return -1;
2196 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
2197 }
2198
2199 __event_process_build_id(&bev, filename, session);
2200
2201 offset += bev.header.size;
2202 }
2203 err = 0;
2204out:
2205 return err;
2206}
2207
2208/* Macro for features that simply need to read and store a string. */
2209#define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
2210static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
2211{\
2212 ff->ph->env.__feat_env = do_read_string(ff); \
2213 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
2214}
2215
2216FEAT_PROCESS_STR_FUN(hostname, hostname);
2217FEAT_PROCESS_STR_FUN(osrelease, os_release);
2218FEAT_PROCESS_STR_FUN(version, version);
2219FEAT_PROCESS_STR_FUN(arch, arch);
2220FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc);
2221FEAT_PROCESS_STR_FUN(cpuid, cpuid);
2222
2223static int process_tracing_data(struct feat_fd *ff, void *data)
2224{
2225 ssize_t ret = trace_report(ff->fd, data, false);
2226
2227 return ret < 0 ? -1 : 0;
2228}
2229
2230static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
2231{
2232 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
2233 pr_debug("Failed to read buildids, continuing...\n");
2234 return 0;
2235}
2236
2237static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
2238{
2239 int ret;
2240 u32 nr_cpus_avail, nr_cpus_online;
2241
2242 ret = do_read_u32(ff, &nr_cpus_avail);
2243 if (ret)
2244 return ret;
2245
2246 ret = do_read_u32(ff, &nr_cpus_online);
2247 if (ret)
2248 return ret;
2249 ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail;
2250 ff->ph->env.nr_cpus_online = (int)nr_cpus_online;
2251 return 0;
2252}
2253
2254static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
2255{
2256 u64 total_mem;
2257 int ret;
2258
2259 ret = do_read_u64(ff, &total_mem);
2260 if (ret)
2261 return -1;
2262 ff->ph->env.total_mem = (unsigned long long)total_mem;
2263 return 0;
2264}
2265
2266static struct evsel *evlist__find_by_index(struct evlist *evlist, int idx)
2267{
2268 struct evsel *evsel;
2269
2270 evlist__for_each_entry(evlist, evsel) {
2271 if (evsel->idx == idx)
2272 return evsel;
2273 }
2274
2275 return NULL;
2276}
2277
2278static void evlist__set_event_name(struct evlist *evlist, struct evsel *event)
2279{
2280 struct evsel *evsel;
2281
2282 if (!event->name)
2283 return;
2284
2285 evsel = evlist__find_by_index(evlist, event->idx);
2286 if (!evsel)
2287 return;
2288
2289 if (evsel->name)
2290 return;
2291
2292 evsel->name = strdup(event->name);
2293}
2294
2295static int
2296process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
2297{
2298 struct perf_session *session;
2299 struct evsel *evsel, *events = read_event_desc(ff);
2300
2301 if (!events)
2302 return 0;
2303
2304 session = container_of(ff->ph, struct perf_session, header);
2305
2306 if (session->data->is_pipe) {
2307 /* Save events for reading later by print_event_desc,
2308 * since they can't be read again in pipe mode. */
2309 ff->events = events;
2310 }
2311
2312 for (evsel = events; evsel->core.attr.size; evsel++)
2313 evlist__set_event_name(session->evlist, evsel);
2314
2315 if (!session->data->is_pipe)
2316 free_event_desc(events);
2317
2318 return 0;
2319}
2320
2321static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
2322{
2323 char *str, *cmdline = NULL, **argv = NULL;
2324 u32 nr, i, len = 0;
2325
2326 if (do_read_u32(ff, &nr))
2327 return -1;
2328
2329 ff->ph->env.nr_cmdline = nr;
2330
2331 cmdline = zalloc(ff->size + nr + 1);
2332 if (!cmdline)
2333 return -1;
2334
2335 argv = zalloc(sizeof(char *) * (nr + 1));
2336 if (!argv)
2337 goto error;
2338
2339 for (i = 0; i < nr; i++) {
2340 str = do_read_string(ff);
2341 if (!str)
2342 goto error;
2343
2344 argv[i] = cmdline + len;
2345 memcpy(argv[i], str, strlen(str) + 1);
2346 len += strlen(str) + 1;
2347 free(str);
2348 }
2349 ff->ph->env.cmdline = cmdline;
2350 ff->ph->env.cmdline_argv = (const char **) argv;
2351 return 0;
2352
2353error:
2354 free(argv);
2355 free(cmdline);
2356 return -1;
2357}
2358
2359static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
2360{
2361 u32 nr, i;
2362 char *str;
2363 struct strbuf sb;
2364 int cpu_nr = ff->ph->env.nr_cpus_avail;
2365 u64 size = 0;
2366 struct perf_header *ph = ff->ph;
2367 bool do_core_id_test = true;
2368
2369 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
2370 if (!ph->env.cpu)
2371 return -1;
2372
2373 if (do_read_u32(ff, &nr))
2374 goto free_cpu;
2375
2376 ph->env.nr_sibling_cores = nr;
2377 size += sizeof(u32);
2378 if (strbuf_init(&sb, 128) < 0)
2379 goto free_cpu;
2380
2381 for (i = 0; i < nr; i++) {
2382 str = do_read_string(ff);
2383 if (!str)
2384 goto error;
2385
2386 /* include a NULL character at the end */
2387 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2388 goto error;
2389 size += string_size(str);
2390 free(str);
2391 }
2392 ph->env.sibling_cores = strbuf_detach(&sb, NULL);
2393
2394 if (do_read_u32(ff, &nr))
2395 return -1;
2396
2397 ph->env.nr_sibling_threads = nr;
2398 size += sizeof(u32);
2399
2400 for (i = 0; i < nr; i++) {
2401 str = do_read_string(ff);
2402 if (!str)
2403 goto error;
2404
2405 /* include a NULL character at the end */
2406 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2407 goto error;
2408 size += string_size(str);
2409 free(str);
2410 }
2411 ph->env.sibling_threads = strbuf_detach(&sb, NULL);
2412
2413 /*
2414 * The header may be from old perf,
2415 * which doesn't include core id and socket id information.
2416 */
2417 if (ff->size <= size) {
2418 zfree(&ph->env.cpu);
2419 return 0;
2420 }
2421
2422 /* On s390 the socket_id number is not related to the numbers of cpus.
2423 * The socket_id number might be higher than the numbers of cpus.
2424 * This depends on the configuration.
2425 * AArch64 is the same.
2426 */
2427 if (ph->env.arch && (!strncmp(ph->env.arch, "s390", 4)
2428 || !strncmp(ph->env.arch, "aarch64", 7)))
2429 do_core_id_test = false;
2430
2431 for (i = 0; i < (u32)cpu_nr; i++) {
2432 if (do_read_u32(ff, &nr))
2433 goto free_cpu;
2434
2435 ph->env.cpu[i].core_id = nr;
2436 size += sizeof(u32);
2437
2438 if (do_read_u32(ff, &nr))
2439 goto free_cpu;
2440
2441 if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
2442 pr_debug("socket_id number is too big."
2443 "You may need to upgrade the perf tool.\n");
2444 goto free_cpu;
2445 }
2446
2447 ph->env.cpu[i].socket_id = nr;
2448 size += sizeof(u32);
2449 }
2450
2451 /*
2452 * The header may be from old perf,
2453 * which doesn't include die information.
2454 */
2455 if (ff->size <= size)
2456 return 0;
2457
2458 if (do_read_u32(ff, &nr))
2459 return -1;
2460
2461 ph->env.nr_sibling_dies = nr;
2462 size += sizeof(u32);
2463
2464 for (i = 0; i < nr; i++) {
2465 str = do_read_string(ff);
2466 if (!str)
2467 goto error;
2468
2469 /* include a NULL character at the end */
2470 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2471 goto error;
2472 size += string_size(str);
2473 free(str);
2474 }
2475 ph->env.sibling_dies = strbuf_detach(&sb, NULL);
2476
2477 for (i = 0; i < (u32)cpu_nr; i++) {
2478 if (do_read_u32(ff, &nr))
2479 goto free_cpu;
2480
2481 ph->env.cpu[i].die_id = nr;
2482 }
2483
2484 return 0;
2485
2486error:
2487 strbuf_release(&sb);
2488free_cpu:
2489 zfree(&ph->env.cpu);
2490 return -1;
2491}
2492
2493static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
2494{
2495 struct numa_node *nodes, *n;
2496 u32 nr, i;
2497 char *str;
2498
2499 /* nr nodes */
2500 if (do_read_u32(ff, &nr))
2501 return -1;
2502
2503 nodes = zalloc(sizeof(*nodes) * nr);
2504 if (!nodes)
2505 return -ENOMEM;
2506
2507 for (i = 0; i < nr; i++) {
2508 n = &nodes[i];
2509
2510 /* node number */
2511 if (do_read_u32(ff, &n->node))
2512 goto error;
2513
2514 if (do_read_u64(ff, &n->mem_total))
2515 goto error;
2516
2517 if (do_read_u64(ff, &n->mem_free))
2518 goto error;
2519
2520 str = do_read_string(ff);
2521 if (!str)
2522 goto error;
2523
2524 n->map = perf_cpu_map__new(str);
2525 if (!n->map)
2526 goto error;
2527
2528 free(str);
2529 }
2530 ff->ph->env.nr_numa_nodes = nr;
2531 ff->ph->env.numa_nodes = nodes;
2532 return 0;
2533
2534error:
2535 free(nodes);
2536 return -1;
2537}
2538
2539static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
2540{
2541 char *name;
2542 u32 pmu_num;
2543 u32 type;
2544 struct strbuf sb;
2545
2546 if (do_read_u32(ff, &pmu_num))
2547 return -1;
2548
2549 if (!pmu_num) {
2550 pr_debug("pmu mappings not available\n");
2551 return 0;
2552 }
2553
2554 ff->ph->env.nr_pmu_mappings = pmu_num;
2555 if (strbuf_init(&sb, 128) < 0)
2556 return -1;
2557
2558 while (pmu_num) {
2559 if (do_read_u32(ff, &type))
2560 goto error;
2561
2562 name = do_read_string(ff);
2563 if (!name)
2564 goto error;
2565
2566 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
2567 goto error;
2568 /* include a NULL character at the end */
2569 if (strbuf_add(&sb, "", 1) < 0)
2570 goto error;
2571
2572 if (!strcmp(name, "msr"))
2573 ff->ph->env.msr_pmu_type = type;
2574
2575 free(name);
2576 pmu_num--;
2577 }
2578 ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2579 return 0;
2580
2581error:
2582 strbuf_release(&sb);
2583 return -1;
2584}
2585
2586static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
2587{
2588 size_t ret = -1;
2589 u32 i, nr, nr_groups;
2590 struct perf_session *session;
2591 struct evsel *evsel, *leader = NULL;
2592 struct group_desc {
2593 char *name;
2594 u32 leader_idx;
2595 u32 nr_members;
2596 } *desc;
2597
2598 if (do_read_u32(ff, &nr_groups))
2599 return -1;
2600
2601 ff->ph->env.nr_groups = nr_groups;
2602 if (!nr_groups) {
2603 pr_debug("group desc not available\n");
2604 return 0;
2605 }
2606
2607 desc = calloc(nr_groups, sizeof(*desc));
2608 if (!desc)
2609 return -1;
2610
2611 for (i = 0; i < nr_groups; i++) {
2612 desc[i].name = do_read_string(ff);
2613 if (!desc[i].name)
2614 goto out_free;
2615
2616 if (do_read_u32(ff, &desc[i].leader_idx))
2617 goto out_free;
2618
2619 if (do_read_u32(ff, &desc[i].nr_members))
2620 goto out_free;
2621 }
2622
2623 /*
2624 * Rebuild group relationship based on the group_desc
2625 */
2626 session = container_of(ff->ph, struct perf_session, header);
2627 session->evlist->nr_groups = nr_groups;
2628
2629 i = nr = 0;
2630 evlist__for_each_entry(session->evlist, evsel) {
2631 if (evsel->idx == (int) desc[i].leader_idx) {
2632 evsel->leader = evsel;
2633 /* {anon_group} is a dummy name */
2634 if (strcmp(desc[i].name, "{anon_group}")) {
2635 evsel->group_name = desc[i].name;
2636 desc[i].name = NULL;
2637 }
2638 evsel->core.nr_members = desc[i].nr_members;
2639
2640 if (i >= nr_groups || nr > 0) {
2641 pr_debug("invalid group desc\n");
2642 goto out_free;
2643 }
2644
2645 leader = evsel;
2646 nr = evsel->core.nr_members - 1;
2647 i++;
2648 } else if (nr) {
2649 /* This is a group member */
2650 evsel->leader = leader;
2651
2652 nr--;
2653 }
2654 }
2655
2656 if (i != nr_groups || nr != 0) {
2657 pr_debug("invalid group desc\n");
2658 goto out_free;
2659 }
2660
2661 ret = 0;
2662out_free:
2663 for (i = 0; i < nr_groups; i++)
2664 zfree(&desc[i].name);
2665 free(desc);
2666
2667 return ret;
2668}
2669
2670static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
2671{
2672 struct perf_session *session;
2673 int err;
2674
2675 session = container_of(ff->ph, struct perf_session, header);
2676
2677 err = auxtrace_index__process(ff->fd, ff->size, session,
2678 ff->ph->needs_swap);
2679 if (err < 0)
2680 pr_err("Failed to process auxtrace index\n");
2681 return err;
2682}
2683
2684static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
2685{
2686 struct cpu_cache_level *caches;
2687 u32 cnt, i, version;
2688
2689 if (do_read_u32(ff, &version))
2690 return -1;
2691
2692 if (version != 1)
2693 return -1;
2694
2695 if (do_read_u32(ff, &cnt))
2696 return -1;
2697
2698 caches = zalloc(sizeof(*caches) * cnt);
2699 if (!caches)
2700 return -1;
2701
2702 for (i = 0; i < cnt; i++) {
2703 struct cpu_cache_level c;
2704
2705 #define _R(v) \
2706 if (do_read_u32(ff, &c.v))\
2707 goto out_free_caches; \
2708
2709 _R(level)
2710 _R(line_size)
2711 _R(sets)
2712 _R(ways)
2713 #undef _R
2714
2715 #define _R(v) \
2716 c.v = do_read_string(ff); \
2717 if (!c.v) \
2718 goto out_free_caches;
2719
2720 _R(type)
2721 _R(size)
2722 _R(map)
2723 #undef _R
2724
2725 caches[i] = c;
2726 }
2727
2728 ff->ph->env.caches = caches;
2729 ff->ph->env.caches_cnt = cnt;
2730 return 0;
2731out_free_caches:
2732 free(caches);
2733 return -1;
2734}
2735
2736static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
2737{
2738 struct perf_session *session;
2739 u64 first_sample_time, last_sample_time;
2740 int ret;
2741
2742 session = container_of(ff->ph, struct perf_session, header);
2743
2744 ret = do_read_u64(ff, &first_sample_time);
2745 if (ret)
2746 return -1;
2747
2748 ret = do_read_u64(ff, &last_sample_time);
2749 if (ret)
2750 return -1;
2751
2752 session->evlist->first_sample_time = first_sample_time;
2753 session->evlist->last_sample_time = last_sample_time;
2754 return 0;
2755}
2756
2757static int process_mem_topology(struct feat_fd *ff,
2758 void *data __maybe_unused)
2759{
2760 struct memory_node *nodes;
2761 u64 version, i, nr, bsize;
2762 int ret = -1;
2763
2764 if (do_read_u64(ff, &version))
2765 return -1;
2766
2767 if (version != 1)
2768 return -1;
2769
2770 if (do_read_u64(ff, &bsize))
2771 return -1;
2772
2773 if (do_read_u64(ff, &nr))
2774 return -1;
2775
2776 nodes = zalloc(sizeof(*nodes) * nr);
2777 if (!nodes)
2778 return -1;
2779
2780 for (i = 0; i < nr; i++) {
2781 struct memory_node n;
2782
2783 #define _R(v) \
2784 if (do_read_u64(ff, &n.v)) \
2785 goto out; \
2786
2787 _R(node)
2788 _R(size)
2789
2790 #undef _R
2791
2792 if (do_read_bitmap(ff, &n.set, &n.size))
2793 goto out;
2794
2795 nodes[i] = n;
2796 }
2797
2798 ff->ph->env.memory_bsize = bsize;
2799 ff->ph->env.memory_nodes = nodes;
2800 ff->ph->env.nr_memory_nodes = nr;
2801 ret = 0;
2802
2803out:
2804 if (ret)
2805 free(nodes);
2806 return ret;
2807}
2808
2809static int process_clockid(struct feat_fd *ff,
2810 void *data __maybe_unused)
2811{
2812 if (do_read_u64(ff, &ff->ph->env.clock.clockid_res_ns))
2813 return -1;
2814
2815 return 0;
2816}
2817
2818static int process_clock_data(struct feat_fd *ff,
2819 void *_data __maybe_unused)
2820{
2821 u32 data32;
2822 u64 data64;
2823
2824 /* version */
2825 if (do_read_u32(ff, &data32))
2826 return -1;
2827
2828 if (data32 != 1)
2829 return -1;
2830
2831 /* clockid */
2832 if (do_read_u32(ff, &data32))
2833 return -1;
2834
2835 ff->ph->env.clock.clockid = data32;
2836
2837 /* TOD ref time */
2838 if (do_read_u64(ff, &data64))
2839 return -1;
2840
2841 ff->ph->env.clock.tod_ns = data64;
2842
2843 /* clockid ref time */
2844 if (do_read_u64(ff, &data64))
2845 return -1;
2846
2847 ff->ph->env.clock.clockid_ns = data64;
2848 ff->ph->env.clock.enabled = true;
2849 return 0;
2850}
2851
2852static int process_dir_format(struct feat_fd *ff,
2853 void *_data __maybe_unused)
2854{
2855 struct perf_session *session;
2856 struct perf_data *data;
2857
2858 session = container_of(ff->ph, struct perf_session, header);
2859 data = session->data;
2860
2861 if (WARN_ON(!perf_data__is_dir(data)))
2862 return -1;
2863
2864 return do_read_u64(ff, &data->dir.version);
2865}
2866
2867#ifdef HAVE_LIBBPF_SUPPORT
2868static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
2869{
2870 struct bpf_prog_info_linear *info_linear;
2871 struct bpf_prog_info_node *info_node;
2872 struct perf_env *env = &ff->ph->env;
2873 u32 count, i;
2874 int err = -1;
2875
2876 if (ff->ph->needs_swap) {
2877 pr_warning("interpreting bpf_prog_info from systems with endianness is not yet supported\n");
2878 return 0;
2879 }
2880
2881 if (do_read_u32(ff, &count))
2882 return -1;
2883
2884 down_write(&env->bpf_progs.lock);
2885
2886 for (i = 0; i < count; ++i) {
2887 u32 info_len, data_len;
2888
2889 info_linear = NULL;
2890 info_node = NULL;
2891 if (do_read_u32(ff, &info_len))
2892 goto out;
2893 if (do_read_u32(ff, &data_len))
2894 goto out;
2895
2896 if (info_len > sizeof(struct bpf_prog_info)) {
2897 pr_warning("detected invalid bpf_prog_info\n");
2898 goto out;
2899 }
2900
2901 info_linear = malloc(sizeof(struct bpf_prog_info_linear) +
2902 data_len);
2903 if (!info_linear)
2904 goto out;
2905 info_linear->info_len = sizeof(struct bpf_prog_info);
2906 info_linear->data_len = data_len;
2907 if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
2908 goto out;
2909 if (__do_read(ff, &info_linear->info, info_len))
2910 goto out;
2911 if (info_len < sizeof(struct bpf_prog_info))
2912 memset(((void *)(&info_linear->info)) + info_len, 0,
2913 sizeof(struct bpf_prog_info) - info_len);
2914
2915 if (__do_read(ff, info_linear->data, data_len))
2916 goto out;
2917
2918 info_node = malloc(sizeof(struct bpf_prog_info_node));
2919 if (!info_node)
2920 goto out;
2921
2922 /* after reading from file, translate offset to address */
2923 bpf_program__bpil_offs_to_addr(info_linear);
2924 info_node->info_linear = info_linear;
2925 perf_env__insert_bpf_prog_info(env, info_node);
2926 }
2927
2928 up_write(&env->bpf_progs.lock);
2929 return 0;
2930out:
2931 free(info_linear);
2932 free(info_node);
2933 up_write(&env->bpf_progs.lock);
2934 return err;
2935}
2936
2937static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
2938{
2939 struct perf_env *env = &ff->ph->env;
2940 struct btf_node *node = NULL;
2941 u32 count, i;
2942 int err = -1;
2943
2944 if (ff->ph->needs_swap) {
2945 pr_warning("interpreting btf from systems with endianness is not yet supported\n");
2946 return 0;
2947 }
2948
2949 if (do_read_u32(ff, &count))
2950 return -1;
2951
2952 down_write(&env->bpf_progs.lock);
2953
2954 for (i = 0; i < count; ++i) {
2955 u32 id, data_size;
2956
2957 if (do_read_u32(ff, &id))
2958 goto out;
2959 if (do_read_u32(ff, &data_size))
2960 goto out;
2961
2962 node = malloc(sizeof(struct btf_node) + data_size);
2963 if (!node)
2964 goto out;
2965
2966 node->id = id;
2967 node->data_size = data_size;
2968
2969 if (__do_read(ff, node->data, data_size))
2970 goto out;
2971
2972 perf_env__insert_btf(env, node);
2973 node = NULL;
2974 }
2975
2976 err = 0;
2977out:
2978 up_write(&env->bpf_progs.lock);
2979 free(node);
2980 return err;
2981}
2982#endif // HAVE_LIBBPF_SUPPORT
2983
2984static int process_compressed(struct feat_fd *ff,
2985 void *data __maybe_unused)
2986{
2987 if (do_read_u32(ff, &(ff->ph->env.comp_ver)))
2988 return -1;
2989
2990 if (do_read_u32(ff, &(ff->ph->env.comp_type)))
2991 return -1;
2992
2993 if (do_read_u32(ff, &(ff->ph->env.comp_level)))
2994 return -1;
2995
2996 if (do_read_u32(ff, &(ff->ph->env.comp_ratio)))
2997 return -1;
2998
2999 if (do_read_u32(ff, &(ff->ph->env.comp_mmap_len)))
3000 return -1;
3001
3002 return 0;
3003}
3004
3005static int process_cpu_pmu_caps(struct feat_fd *ff,
3006 void *data __maybe_unused)
3007{
3008 char *name, *value;
3009 struct strbuf sb;
3010 u32 nr_caps;
3011
3012 if (do_read_u32(ff, &nr_caps))
3013 return -1;
3014
3015 if (!nr_caps) {
3016 pr_debug("cpu pmu capabilities not available\n");
3017 return 0;
3018 }
3019
3020 ff->ph->env.nr_cpu_pmu_caps = nr_caps;
3021
3022 if (strbuf_init(&sb, 128) < 0)
3023 return -1;
3024
3025 while (nr_caps--) {
3026 name = do_read_string(ff);
3027 if (!name)
3028 goto error;
3029
3030 value = do_read_string(ff);
3031 if (!value)
3032 goto free_name;
3033
3034 if (strbuf_addf(&sb, "%s=%s", name, value) < 0)
3035 goto free_value;
3036
3037 /* include a NULL character at the end */
3038 if (strbuf_add(&sb, "", 1) < 0)
3039 goto free_value;
3040
3041 if (!strcmp(name, "branches"))
3042 ff->ph->env.max_branches = atoi(value);
3043
3044 free(value);
3045 free(name);
3046 }
3047 ff->ph->env.cpu_pmu_caps = strbuf_detach(&sb, NULL);
3048 return 0;
3049
3050free_value:
3051 free(value);
3052free_name:
3053 free(name);
3054error:
3055 strbuf_release(&sb);
3056 return -1;
3057}
3058
3059#define FEAT_OPR(n, func, __full_only) \
3060 [HEADER_##n] = { \
3061 .name = __stringify(n), \
3062 .write = write_##func, \
3063 .print = print_##func, \
3064 .full_only = __full_only, \
3065 .process = process_##func, \
3066 .synthesize = true \
3067 }
3068
3069#define FEAT_OPN(n, func, __full_only) \
3070 [HEADER_##n] = { \
3071 .name = __stringify(n), \
3072 .write = write_##func, \
3073 .print = print_##func, \
3074 .full_only = __full_only, \
3075 .process = process_##func \
3076 }
3077
3078/* feature_ops not implemented: */
3079#define print_tracing_data NULL
3080#define print_build_id NULL
3081
3082#define process_branch_stack NULL
3083#define process_stat NULL
3084
3085// Only used in util/synthetic-events.c
3086const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
3087
3088const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = {
3089 FEAT_OPN(TRACING_DATA, tracing_data, false),
3090 FEAT_OPN(BUILD_ID, build_id, false),
3091 FEAT_OPR(HOSTNAME, hostname, false),
3092 FEAT_OPR(OSRELEASE, osrelease, false),
3093 FEAT_OPR(VERSION, version, false),
3094 FEAT_OPR(ARCH, arch, false),
3095 FEAT_OPR(NRCPUS, nrcpus, false),
3096 FEAT_OPR(CPUDESC, cpudesc, false),
3097 FEAT_OPR(CPUID, cpuid, false),
3098 FEAT_OPR(TOTAL_MEM, total_mem, false),
3099 FEAT_OPR(EVENT_DESC, event_desc, false),
3100 FEAT_OPR(CMDLINE, cmdline, false),
3101 FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true),
3102 FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true),
3103 FEAT_OPN(BRANCH_STACK, branch_stack, false),
3104 FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false),
3105 FEAT_OPR(GROUP_DESC, group_desc, false),
3106 FEAT_OPN(AUXTRACE, auxtrace, false),
3107 FEAT_OPN(STAT, stat, false),
3108 FEAT_OPN(CACHE, cache, true),
3109 FEAT_OPR(SAMPLE_TIME, sample_time, false),
3110 FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
3111 FEAT_OPR(CLOCKID, clockid, false),
3112 FEAT_OPN(DIR_FORMAT, dir_format, false),
3113#ifdef HAVE_LIBBPF_SUPPORT
3114 FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false),
3115 FEAT_OPR(BPF_BTF, bpf_btf, false),
3116#endif
3117 FEAT_OPR(COMPRESSED, compressed, false),
3118 FEAT_OPR(CPU_PMU_CAPS, cpu_pmu_caps, false),
3119 FEAT_OPR(CLOCK_DATA, clock_data, false),
3120};
3121
3122struct header_print_data {
3123 FILE *fp;
3124 bool full; /* extended list of headers */
3125};
3126
3127static int perf_file_section__fprintf_info(struct perf_file_section *section,
3128 struct perf_header *ph,
3129 int feat, int fd, void *data)
3130{
3131 struct header_print_data *hd = data;
3132 struct feat_fd ff;
3133
3134 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
3135 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
3136 "%d, continuing...\n", section->offset, feat);
3137 return 0;
3138 }
3139 if (feat >= HEADER_LAST_FEATURE) {
3140 pr_warning("unknown feature %d\n", feat);
3141 return 0;
3142 }
3143 if (!feat_ops[feat].print)
3144 return 0;
3145
3146 ff = (struct feat_fd) {
3147 .fd = fd,
3148 .ph = ph,
3149 };
3150
3151 if (!feat_ops[feat].full_only || hd->full)
3152 feat_ops[feat].print(&ff, hd->fp);
3153 else
3154 fprintf(hd->fp, "# %s info available, use -I to display\n",
3155 feat_ops[feat].name);
3156
3157 return 0;
3158}
3159
3160int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
3161{
3162 struct header_print_data hd;
3163 struct perf_header *header = &session->header;
3164 int fd = perf_data__fd(session->data);
3165 struct stat st;
3166 time_t stctime;
3167 int ret, bit;
3168
3169 hd.fp = fp;
3170 hd.full = full;
3171
3172 ret = fstat(fd, &st);
3173 if (ret == -1)
3174 return -1;
3175
3176 stctime = st.st_mtime;
3177 fprintf(fp, "# captured on : %s", ctime(&stctime));
3178
3179 fprintf(fp, "# header version : %u\n", header->version);
3180 fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset);
3181 fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size);
3182 fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset);
3183
3184 perf_header__process_sections(header, fd, &hd,
3185 perf_file_section__fprintf_info);
3186
3187 if (session->data->is_pipe)
3188 return 0;
3189
3190 fprintf(fp, "# missing features: ");
3191 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
3192 if (bit)
3193 fprintf(fp, "%s ", feat_ops[bit].name);
3194 }
3195
3196 fprintf(fp, "\n");
3197 return 0;
3198}
3199
3200static int do_write_feat(struct feat_fd *ff, int type,
3201 struct perf_file_section **p,
3202 struct evlist *evlist)
3203{
3204 int err;
3205 int ret = 0;
3206
3207 if (perf_header__has_feat(ff->ph, type)) {
3208 if (!feat_ops[type].write)
3209 return -1;
3210
3211 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
3212 return -1;
3213
3214 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
3215
3216 err = feat_ops[type].write(ff, evlist);
3217 if (err < 0) {
3218 pr_debug("failed to write feature %s\n", feat_ops[type].name);
3219
3220 /* undo anything written */
3221 lseek(ff->fd, (*p)->offset, SEEK_SET);
3222
3223 return -1;
3224 }
3225 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
3226 (*p)++;
3227 }
3228 return ret;
3229}
3230
3231static int perf_header__adds_write(struct perf_header *header,
3232 struct evlist *evlist, int fd)
3233{
3234 int nr_sections;
3235 struct feat_fd ff;
3236 struct perf_file_section *feat_sec, *p;
3237 int sec_size;
3238 u64 sec_start;
3239 int feat;
3240 int err;
3241
3242 ff = (struct feat_fd){
3243 .fd = fd,
3244 .ph = header,
3245 };
3246
3247 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
3248 if (!nr_sections)
3249 return 0;
3250
3251 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
3252 if (feat_sec == NULL)
3253 return -ENOMEM;
3254
3255 sec_size = sizeof(*feat_sec) * nr_sections;
3256
3257 sec_start = header->feat_offset;
3258 lseek(fd, sec_start + sec_size, SEEK_SET);
3259
3260 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
3261 if (do_write_feat(&ff, feat, &p, evlist))
3262 perf_header__clear_feat(header, feat);
3263 }
3264
3265 lseek(fd, sec_start, SEEK_SET);
3266 /*
3267 * may write more than needed due to dropped feature, but
3268 * this is okay, reader will skip the missing entries
3269 */
3270 err = do_write(&ff, feat_sec, sec_size);
3271 if (err < 0)
3272 pr_debug("failed to write feature section\n");
3273 free(feat_sec);
3274 return err;
3275}
3276
3277int perf_header__write_pipe(int fd)
3278{
3279 struct perf_pipe_file_header f_header;
3280 struct feat_fd ff;
3281 int err;
3282
3283 ff = (struct feat_fd){ .fd = fd };
3284
3285 f_header = (struct perf_pipe_file_header){
3286 .magic = PERF_MAGIC,
3287 .size = sizeof(f_header),
3288 };
3289
3290 err = do_write(&ff, &f_header, sizeof(f_header));
3291 if (err < 0) {
3292 pr_debug("failed to write perf pipe header\n");
3293 return err;
3294 }
3295
3296 return 0;
3297}
3298
3299int perf_session__write_header(struct perf_session *session,
3300 struct evlist *evlist,
3301 int fd, bool at_exit)
3302{
3303 struct perf_file_header f_header;
3304 struct perf_file_attr f_attr;
3305 struct perf_header *header = &session->header;
3306 struct evsel *evsel;
3307 struct feat_fd ff;
3308 u64 attr_offset;
3309 int err;
3310
3311 ff = (struct feat_fd){ .fd = fd};
3312 lseek(fd, sizeof(f_header), SEEK_SET);
3313
3314 evlist__for_each_entry(session->evlist, evsel) {
3315 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
3316 err = do_write(&ff, evsel->core.id, evsel->core.ids * sizeof(u64));
3317 if (err < 0) {
3318 pr_debug("failed to write perf header\n");
3319 return err;
3320 }
3321 }
3322
3323 attr_offset = lseek(ff.fd, 0, SEEK_CUR);
3324
3325 evlist__for_each_entry(evlist, evsel) {
3326 if (evsel->core.attr.size < sizeof(evsel->core.attr)) {
3327 /*
3328 * We are likely in "perf inject" and have read
3329 * from an older file. Update attr size so that
3330 * reader gets the right offset to the ids.
3331 */
3332 evsel->core.attr.size = sizeof(evsel->core.attr);
3333 }
3334 f_attr = (struct perf_file_attr){
3335 .attr = evsel->core.attr,
3336 .ids = {
3337 .offset = evsel->id_offset,
3338 .size = evsel->core.ids * sizeof(u64),
3339 }
3340 };
3341 err = do_write(&ff, &f_attr, sizeof(f_attr));
3342 if (err < 0) {
3343 pr_debug("failed to write perf header attribute\n");
3344 return err;
3345 }
3346 }
3347
3348 if (!header->data_offset)
3349 header->data_offset = lseek(fd, 0, SEEK_CUR);
3350 header->feat_offset = header->data_offset + header->data_size;
3351
3352 if (at_exit) {
3353 err = perf_header__adds_write(header, evlist, fd);
3354 if (err < 0)
3355 return err;
3356 }
3357
3358 f_header = (struct perf_file_header){
3359 .magic = PERF_MAGIC,
3360 .size = sizeof(f_header),
3361 .attr_size = sizeof(f_attr),
3362 .attrs = {
3363 .offset = attr_offset,
3364 .size = evlist->core.nr_entries * sizeof(f_attr),
3365 },
3366 .data = {
3367 .offset = header->data_offset,
3368 .size = header->data_size,
3369 },
3370 /* event_types is ignored, store zeros */
3371 };
3372
3373 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
3374
3375 lseek(fd, 0, SEEK_SET);
3376 err = do_write(&ff, &f_header, sizeof(f_header));
3377 if (err < 0) {
3378 pr_debug("failed to write perf header\n");
3379 return err;
3380 }
3381 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
3382
3383 return 0;
3384}
3385
3386static int perf_header__getbuffer64(struct perf_header *header,
3387 int fd, void *buf, size_t size)
3388{
3389 if (readn(fd, buf, size) <= 0)
3390 return -1;
3391
3392 if (header->needs_swap)
3393 mem_bswap_64(buf, size);
3394
3395 return 0;
3396}
3397
3398int perf_header__process_sections(struct perf_header *header, int fd,
3399 void *data,
3400 int (*process)(struct perf_file_section *section,
3401 struct perf_header *ph,
3402 int feat, int fd, void *data))
3403{
3404 struct perf_file_section *feat_sec, *sec;
3405 int nr_sections;
3406 int sec_size;
3407 int feat;
3408 int err;
3409
3410 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
3411 if (!nr_sections)
3412 return 0;
3413
3414 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
3415 if (!feat_sec)
3416 return -1;
3417
3418 sec_size = sizeof(*feat_sec) * nr_sections;
3419
3420 lseek(fd, header->feat_offset, SEEK_SET);
3421
3422 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
3423 if (err < 0)
3424 goto out_free;
3425
3426 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
3427 err = process(sec++, header, feat, fd, data);
3428 if (err < 0)
3429 goto out_free;
3430 }
3431 err = 0;
3432out_free:
3433 free(feat_sec);
3434 return err;
3435}
3436
3437static const int attr_file_abi_sizes[] = {
3438 [0] = PERF_ATTR_SIZE_VER0,
3439 [1] = PERF_ATTR_SIZE_VER1,
3440 [2] = PERF_ATTR_SIZE_VER2,
3441 [3] = PERF_ATTR_SIZE_VER3,
3442 [4] = PERF_ATTR_SIZE_VER4,
3443 0,
3444};
3445
3446/*
3447 * In the legacy file format, the magic number is not used to encode endianness.
3448 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
3449 * on ABI revisions, we need to try all combinations for all endianness to
3450 * detect the endianness.
3451 */
3452static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
3453{
3454 uint64_t ref_size, attr_size;
3455 int i;
3456
3457 for (i = 0 ; attr_file_abi_sizes[i]; i++) {
3458 ref_size = attr_file_abi_sizes[i]
3459 + sizeof(struct perf_file_section);
3460 if (hdr_sz != ref_size) {
3461 attr_size = bswap_64(hdr_sz);
3462 if (attr_size != ref_size)
3463 continue;
3464
3465 ph->needs_swap = true;
3466 }
3467 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
3468 i,
3469 ph->needs_swap);
3470 return 0;
3471 }
3472 /* could not determine endianness */
3473 return -1;
3474}
3475
3476#define PERF_PIPE_HDR_VER0 16
3477
3478static const size_t attr_pipe_abi_sizes[] = {
3479 [0] = PERF_PIPE_HDR_VER0,
3480 0,
3481};
3482
3483/*
3484 * In the legacy pipe format, there is an implicit assumption that endianness
3485 * between host recording the samples, and host parsing the samples is the
3486 * same. This is not always the case given that the pipe output may always be
3487 * redirected into a file and analyzed on a different machine with possibly a
3488 * different endianness and perf_event ABI revisions in the perf tool itself.
3489 */
3490static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
3491{
3492 u64 attr_size;
3493 int i;
3494
3495 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
3496 if (hdr_sz != attr_pipe_abi_sizes[i]) {
3497 attr_size = bswap_64(hdr_sz);
3498 if (attr_size != hdr_sz)
3499 continue;
3500
3501 ph->needs_swap = true;
3502 }
3503 pr_debug("Pipe ABI%d perf.data file detected\n", i);
3504 return 0;
3505 }
3506 return -1;
3507}
3508
3509bool is_perf_magic(u64 magic)
3510{
3511 if (!memcmp(&magic, __perf_magic1, sizeof(magic))
3512 || magic == __perf_magic2
3513 || magic == __perf_magic2_sw)
3514 return true;
3515
3516 return false;
3517}
3518
3519static int check_magic_endian(u64 magic, uint64_t hdr_sz,
3520 bool is_pipe, struct perf_header *ph)
3521{
3522 int ret;
3523
3524 /* check for legacy format */
3525 ret = memcmp(&magic, __perf_magic1, sizeof(magic));
3526 if (ret == 0) {
3527 ph->version = PERF_HEADER_VERSION_1;
3528 pr_debug("legacy perf.data format\n");
3529 if (is_pipe)
3530 return try_all_pipe_abis(hdr_sz, ph);
3531
3532 return try_all_file_abis(hdr_sz, ph);
3533 }
3534 /*
3535 * the new magic number serves two purposes:
3536 * - unique number to identify actual perf.data files
3537 * - encode endianness of file
3538 */
3539 ph->version = PERF_HEADER_VERSION_2;
3540
3541 /* check magic number with one endianness */
3542 if (magic == __perf_magic2)
3543 return 0;
3544
3545 /* check magic number with opposite endianness */
3546 if (magic != __perf_magic2_sw)
3547 return -1;
3548
3549 ph->needs_swap = true;
3550
3551 return 0;
3552}
3553
3554int perf_file_header__read(struct perf_file_header *header,
3555 struct perf_header *ph, int fd)
3556{
3557 ssize_t ret;
3558
3559 lseek(fd, 0, SEEK_SET);
3560
3561 ret = readn(fd, header, sizeof(*header));
3562 if (ret <= 0)
3563 return -1;
3564
3565 if (check_magic_endian(header->magic,
3566 header->attr_size, false, ph) < 0) {
3567 pr_debug("magic/endian check failed\n");
3568 return -1;
3569 }
3570
3571 if (ph->needs_swap) {
3572 mem_bswap_64(header, offsetof(struct perf_file_header,
3573 adds_features));
3574 }
3575
3576 if (header->size != sizeof(*header)) {
3577 /* Support the previous format */
3578 if (header->size == offsetof(typeof(*header), adds_features))
3579 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
3580 else
3581 return -1;
3582 } else if (ph->needs_swap) {
3583 /*
3584 * feature bitmap is declared as an array of unsigned longs --
3585 * not good since its size can differ between the host that
3586 * generated the data file and the host analyzing the file.
3587 *
3588 * We need to handle endianness, but we don't know the size of
3589 * the unsigned long where the file was generated. Take a best
3590 * guess at determining it: try 64-bit swap first (ie., file
3591 * created on a 64-bit host), and check if the hostname feature
3592 * bit is set (this feature bit is forced on as of fbe96f2).
3593 * If the bit is not, undo the 64-bit swap and try a 32-bit
3594 * swap. If the hostname bit is still not set (e.g., older data
3595 * file), punt and fallback to the original behavior --
3596 * clearing all feature bits and setting buildid.
3597 */
3598 mem_bswap_64(&header->adds_features,
3599 BITS_TO_U64(HEADER_FEAT_BITS));
3600
3601 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
3602 /* unswap as u64 */
3603 mem_bswap_64(&header->adds_features,
3604 BITS_TO_U64(HEADER_FEAT_BITS));
3605
3606 /* unswap as u32 */
3607 mem_bswap_32(&header->adds_features,
3608 BITS_TO_U32(HEADER_FEAT_BITS));
3609 }
3610
3611 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
3612 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
3613 set_bit(HEADER_BUILD_ID, header->adds_features);
3614 }
3615 }
3616
3617 memcpy(&ph->adds_features, &header->adds_features,
3618 sizeof(ph->adds_features));
3619
3620 ph->data_offset = header->data.offset;
3621 ph->data_size = header->data.size;
3622 ph->feat_offset = header->data.offset + header->data.size;
3623 return 0;
3624}
3625
3626static int perf_file_section__process(struct perf_file_section *section,
3627 struct perf_header *ph,
3628 int feat, int fd, void *data)
3629{
3630 struct feat_fd fdd = {
3631 .fd = fd,
3632 .ph = ph,
3633 .size = section->size,
3634 .offset = section->offset,
3635 };
3636
3637 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
3638 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
3639 "%d, continuing...\n", section->offset, feat);
3640 return 0;
3641 }
3642
3643 if (feat >= HEADER_LAST_FEATURE) {
3644 pr_debug("unknown feature %d, continuing...\n", feat);
3645 return 0;
3646 }
3647
3648 if (!feat_ops[feat].process)
3649 return 0;
3650
3651 return feat_ops[feat].process(&fdd, data);
3652}
3653
3654static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
3655 struct perf_header *ph,
3656 struct perf_data* data,
3657 bool repipe)
3658{
3659 struct feat_fd ff = {
3660 .fd = STDOUT_FILENO,
3661 .ph = ph,
3662 };
3663 ssize_t ret;
3664
3665 ret = perf_data__read(data, header, sizeof(*header));
3666 if (ret <= 0)
3667 return -1;
3668
3669 if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
3670 pr_debug("endian/magic failed\n");
3671 return -1;
3672 }
3673
3674 if (ph->needs_swap)
3675 header->size = bswap_64(header->size);
3676
3677 if (repipe && do_write(&ff, header, sizeof(*header)) < 0)
3678 return -1;
3679
3680 return 0;
3681}
3682
3683static int perf_header__read_pipe(struct perf_session *session)
3684{
3685 struct perf_header *header = &session->header;
3686 struct perf_pipe_file_header f_header;
3687
3688 if (perf_file_header__read_pipe(&f_header, header, session->data,
3689 session->repipe) < 0) {
3690 pr_debug("incompatible file format\n");
3691 return -EINVAL;
3692 }
3693
3694 return f_header.size == sizeof(f_header) ? 0 : -1;
3695}
3696
3697static int read_attr(int fd, struct perf_header *ph,
3698 struct perf_file_attr *f_attr)
3699{
3700 struct perf_event_attr *attr = &f_attr->attr;
3701 size_t sz, left;
3702 size_t our_sz = sizeof(f_attr->attr);
3703 ssize_t ret;
3704
3705 memset(f_attr, 0, sizeof(*f_attr));
3706
3707 /* read minimal guaranteed structure */
3708 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
3709 if (ret <= 0) {
3710 pr_debug("cannot read %d bytes of header attr\n",
3711 PERF_ATTR_SIZE_VER0);
3712 return -1;
3713 }
3714
3715 /* on file perf_event_attr size */
3716 sz = attr->size;
3717
3718 if (ph->needs_swap)
3719 sz = bswap_32(sz);
3720
3721 if (sz == 0) {
3722 /* assume ABI0 */
3723 sz = PERF_ATTR_SIZE_VER0;
3724 } else if (sz > our_sz) {
3725 pr_debug("file uses a more recent and unsupported ABI"
3726 " (%zu bytes extra)\n", sz - our_sz);
3727 return -1;
3728 }
3729 /* what we have not yet read and that we know about */
3730 left = sz - PERF_ATTR_SIZE_VER0;
3731 if (left) {
3732 void *ptr = attr;
3733 ptr += PERF_ATTR_SIZE_VER0;
3734
3735 ret = readn(fd, ptr, left);
3736 }
3737 /* read perf_file_section, ids are read in caller */
3738 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
3739
3740 return ret <= 0 ? -1 : 0;
3741}
3742
3743static int evsel__prepare_tracepoint_event(struct evsel *evsel, struct tep_handle *pevent)
3744{
3745 struct tep_event *event;
3746 char bf[128];
3747
3748 /* already prepared */
3749 if (evsel->tp_format)
3750 return 0;
3751
3752 if (pevent == NULL) {
3753 pr_debug("broken or missing trace data\n");
3754 return -1;
3755 }
3756
3757 event = tep_find_event(pevent, evsel->core.attr.config);
3758 if (event == NULL) {
3759 pr_debug("cannot find event format for %d\n", (int)evsel->core.attr.config);
3760 return -1;
3761 }
3762
3763 if (!evsel->name) {
3764 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
3765 evsel->name = strdup(bf);
3766 if (evsel->name == NULL)
3767 return -1;
3768 }
3769
3770 evsel->tp_format = event;
3771 return 0;
3772}
3773
3774static int evlist__prepare_tracepoint_events(struct evlist *evlist, struct tep_handle *pevent)
3775{
3776 struct evsel *pos;
3777
3778 evlist__for_each_entry(evlist, pos) {
3779 if (pos->core.attr.type == PERF_TYPE_TRACEPOINT &&
3780 evsel__prepare_tracepoint_event(pos, pevent))
3781 return -1;
3782 }
3783
3784 return 0;
3785}
3786
3787int perf_session__read_header(struct perf_session *session)
3788{
3789 struct perf_data *data = session->data;
3790 struct perf_header *header = &session->header;
3791 struct perf_file_header f_header;
3792 struct perf_file_attr f_attr;
3793 u64 f_id;
3794 int nr_attrs, nr_ids, i, j, err;
3795 int fd = perf_data__fd(data);
3796
3797 session->evlist = evlist__new();
3798 if (session->evlist == NULL)
3799 return -ENOMEM;
3800
3801 session->evlist->env = &header->env;
3802 session->machines.host.env = &header->env;
3803
3804 /*
3805 * We can read 'pipe' data event from regular file,
3806 * check for the pipe header regardless of source.
3807 */
3808 err = perf_header__read_pipe(session);
3809 if (!err || perf_data__is_pipe(data)) {
3810 data->is_pipe = true;
3811 return err;
3812 }
3813
3814 if (perf_file_header__read(&f_header, header, fd) < 0)
3815 return -EINVAL;
3816
3817 /*
3818 * Sanity check that perf.data was written cleanly; data size is
3819 * initialized to 0 and updated only if the on_exit function is run.
3820 * If data size is still 0 then the file contains only partial
3821 * information. Just warn user and process it as much as it can.
3822 */
3823 if (f_header.data.size == 0) {
3824 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
3825 "Was the 'perf record' command properly terminated?\n",
3826 data->file.path);
3827 }
3828
3829 if (f_header.attr_size == 0) {
3830 pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
3831 "Was the 'perf record' command properly terminated?\n",
3832 data->file.path);
3833 return -EINVAL;
3834 }
3835
3836 nr_attrs = f_header.attrs.size / f_header.attr_size;
3837 lseek(fd, f_header.attrs.offset, SEEK_SET);
3838
3839 for (i = 0; i < nr_attrs; i++) {
3840 struct evsel *evsel;
3841 off_t tmp;
3842
3843 if (read_attr(fd, header, &f_attr) < 0)
3844 goto out_errno;
3845
3846 if (header->needs_swap) {
3847 f_attr.ids.size = bswap_64(f_attr.ids.size);
3848 f_attr.ids.offset = bswap_64(f_attr.ids.offset);
3849 perf_event__attr_swap(&f_attr.attr);
3850 }
3851
3852 tmp = lseek(fd, 0, SEEK_CUR);
3853 evsel = evsel__new(&f_attr.attr);
3854
3855 if (evsel == NULL)
3856 goto out_delete_evlist;
3857
3858 evsel->needs_swap = header->needs_swap;
3859 /*
3860 * Do it before so that if perf_evsel__alloc_id fails, this
3861 * entry gets purged too at evlist__delete().
3862 */
3863 evlist__add(session->evlist, evsel);
3864
3865 nr_ids = f_attr.ids.size / sizeof(u64);
3866 /*
3867 * We don't have the cpu and thread maps on the header, so
3868 * for allocating the perf_sample_id table we fake 1 cpu and
3869 * hattr->ids threads.
3870 */
3871 if (perf_evsel__alloc_id(&evsel->core, 1, nr_ids))
3872 goto out_delete_evlist;
3873
3874 lseek(fd, f_attr.ids.offset, SEEK_SET);
3875
3876 for (j = 0; j < nr_ids; j++) {
3877 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
3878 goto out_errno;
3879
3880 perf_evlist__id_add(&session->evlist->core, &evsel->core, 0, j, f_id);
3881 }
3882
3883 lseek(fd, tmp, SEEK_SET);
3884 }
3885
3886 perf_header__process_sections(header, fd, &session->tevent,
3887 perf_file_section__process);
3888
3889 if (evlist__prepare_tracepoint_events(session->evlist, session->tevent.pevent))
3890 goto out_delete_evlist;
3891
3892 return 0;
3893out_errno:
3894 return -errno;
3895
3896out_delete_evlist:
3897 evlist__delete(session->evlist);
3898 session->evlist = NULL;
3899 return -ENOMEM;
3900}
3901
3902int perf_event__process_feature(struct perf_session *session,
3903 union perf_event *event)
3904{
3905 struct perf_tool *tool = session->tool;
3906 struct feat_fd ff = { .fd = 0 };
3907 struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event;
3908 int type = fe->header.type;
3909 u64 feat = fe->feat_id;
3910
3911 if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
3912 pr_warning("invalid record type %d in pipe-mode\n", type);
3913 return 0;
3914 }
3915 if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
3916 pr_warning("invalid record type %d in pipe-mode\n", type);
3917 return -1;
3918 }
3919
3920 if (!feat_ops[feat].process)
3921 return 0;
3922
3923 ff.buf = (void *)fe->data;
3924 ff.size = event->header.size - sizeof(*fe);
3925 ff.ph = &session->header;
3926
3927 if (feat_ops[feat].process(&ff, NULL))
3928 return -1;
3929
3930 if (!feat_ops[feat].print || !tool->show_feat_hdr)
3931 return 0;
3932
3933 if (!feat_ops[feat].full_only ||
3934 tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
3935 feat_ops[feat].print(&ff, stdout);
3936 } else {
3937 fprintf(stdout, "# %s info available, use -I to display\n",
3938 feat_ops[feat].name);
3939 }
3940
3941 return 0;
3942}
3943
3944size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3945{
3946 struct perf_record_event_update *ev = &event->event_update;
3947 struct perf_record_event_update_scale *ev_scale;
3948 struct perf_record_event_update_cpus *ev_cpus;
3949 struct perf_cpu_map *map;
3950 size_t ret;
3951
3952 ret = fprintf(fp, "\n... id: %" PRI_lu64 "\n", ev->id);
3953
3954 switch (ev->type) {
3955 case PERF_EVENT_UPDATE__SCALE:
3956 ev_scale = (struct perf_record_event_update_scale *)ev->data;
3957 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3958 break;
3959 case PERF_EVENT_UPDATE__UNIT:
3960 ret += fprintf(fp, "... unit: %s\n", ev->data);
3961 break;
3962 case PERF_EVENT_UPDATE__NAME:
3963 ret += fprintf(fp, "... name: %s\n", ev->data);
3964 break;
3965 case PERF_EVENT_UPDATE__CPUS:
3966 ev_cpus = (struct perf_record_event_update_cpus *)ev->data;
3967 ret += fprintf(fp, "... ");
3968
3969 map = cpu_map__new_data(&ev_cpus->cpus);
3970 if (map)
3971 ret += cpu_map__fprintf(map, fp);
3972 else
3973 ret += fprintf(fp, "failed to get cpus\n");
3974 break;
3975 default:
3976 ret += fprintf(fp, "... unknown type\n");
3977 break;
3978 }
3979
3980 return ret;
3981}
3982
3983int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3984 union perf_event *event,
3985 struct evlist **pevlist)
3986{
3987 u32 i, ids, n_ids;
3988 struct evsel *evsel;
3989 struct evlist *evlist = *pevlist;
3990
3991 if (evlist == NULL) {
3992 *pevlist = evlist = evlist__new();
3993 if (evlist == NULL)
3994 return -ENOMEM;
3995 }
3996
3997 evsel = evsel__new(&event->attr.attr);
3998 if (evsel == NULL)
3999 return -ENOMEM;
4000
4001 evlist__add(evlist, evsel);
4002
4003 ids = event->header.size;
4004 ids -= (void *)&event->attr.id - (void *)event;
4005 n_ids = ids / sizeof(u64);
4006 /*
4007 * We don't have the cpu and thread maps on the header, so
4008 * for allocating the perf_sample_id table we fake 1 cpu and
4009 * hattr->ids threads.
4010 */
4011 if (perf_evsel__alloc_id(&evsel->core, 1, n_ids))
4012 return -ENOMEM;
4013
4014 for (i = 0; i < n_ids; i++) {
4015 perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, event->attr.id[i]);
4016 }
4017
4018 return 0;
4019}
4020
4021int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
4022 union perf_event *event,
4023 struct evlist **pevlist)
4024{
4025 struct perf_record_event_update *ev = &event->event_update;
4026 struct perf_record_event_update_scale *ev_scale;
4027 struct perf_record_event_update_cpus *ev_cpus;
4028 struct evlist *evlist;
4029 struct evsel *evsel;
4030 struct perf_cpu_map *map;
4031
4032 if (!pevlist || *pevlist == NULL)
4033 return -EINVAL;
4034
4035 evlist = *pevlist;
4036
4037 evsel = evlist__id2evsel(evlist, ev->id);
4038 if (evsel == NULL)
4039 return -EINVAL;
4040
4041 switch (ev->type) {
4042 case PERF_EVENT_UPDATE__UNIT:
4043 evsel->unit = strdup(ev->data);
4044 break;
4045 case PERF_EVENT_UPDATE__NAME:
4046 evsel->name = strdup(ev->data);
4047 break;
4048 case PERF_EVENT_UPDATE__SCALE:
4049 ev_scale = (struct perf_record_event_update_scale *)ev->data;
4050 evsel->scale = ev_scale->scale;
4051 break;
4052 case PERF_EVENT_UPDATE__CPUS:
4053 ev_cpus = (struct perf_record_event_update_cpus *)ev->data;
4054
4055 map = cpu_map__new_data(&ev_cpus->cpus);
4056 if (map)
4057 evsel->core.own_cpus = map;
4058 else
4059 pr_err("failed to get event_update cpus\n");
4060 default:
4061 break;
4062 }
4063
4064 return 0;
4065}
4066
4067int perf_event__process_tracing_data(struct perf_session *session,
4068 union perf_event *event)
4069{
4070 ssize_t size_read, padding, size = event->tracing_data.size;
4071 int fd = perf_data__fd(session->data);
4072 char buf[BUFSIZ];
4073
4074 /*
4075 * The pipe fd is already in proper place and in any case
4076 * we can't move it, and we'd screw the case where we read
4077 * 'pipe' data from regular file. The trace_report reads
4078 * data from 'fd' so we need to set it directly behind the
4079 * event, where the tracing data starts.
4080 */
4081 if (!perf_data__is_pipe(session->data)) {
4082 off_t offset = lseek(fd, 0, SEEK_CUR);
4083
4084 /* setup for reading amidst mmap */
4085 lseek(fd, offset + sizeof(struct perf_record_header_tracing_data),
4086 SEEK_SET);
4087 }
4088
4089 size_read = trace_report(fd, &session->tevent,
4090 session->repipe);
4091 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
4092
4093 if (readn(fd, buf, padding) < 0) {
4094 pr_err("%s: reading input file", __func__);
4095 return -1;
4096 }
4097 if (session->repipe) {
4098 int retw = write(STDOUT_FILENO, buf, padding);
4099 if (retw <= 0 || retw != padding) {
4100 pr_err("%s: repiping tracing data padding", __func__);
4101 return -1;
4102 }
4103 }
4104
4105 if (size_read + padding != size) {
4106 pr_err("%s: tracing data size mismatch", __func__);
4107 return -1;
4108 }
4109
4110 evlist__prepare_tracepoint_events(session->evlist, session->tevent.pevent);
4111
4112 return size_read + padding;
4113}
4114
4115int perf_event__process_build_id(struct perf_session *session,
4116 union perf_event *event)
4117{
4118 __event_process_build_id(&event->build_id,
4119 event->build_id.filename,
4120 session);
4121 return 0;
4122}