Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * builtin-inject.c
4 *
5 * Builtin inject command: Examine the live mode (stdin) event stream
6 * and repipe it to stdout while optionally injecting additional
7 * events into it.
8 */
9#include "builtin.h"
10
11#include "util/color.h"
12#include "util/dso.h"
13#include "util/vdso.h"
14#include "util/evlist.h"
15#include "util/evsel.h"
16#include "util/map.h"
17#include "util/session.h"
18#include "util/tool.h"
19#include "util/debug.h"
20#include "util/build-id.h"
21#include "util/data.h"
22#include "util/auxtrace.h"
23#include "util/jit.h"
24#include "util/symbol.h"
25#include "util/synthetic-events.h"
26#include "util/thread.h"
27#include "util/namespaces.h"
28#include "util/util.h"
29
30#include <internal/lib.h>
31
32#include <linux/err.h>
33#include <subcmd/parse-options.h>
34#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
35
36#include <linux/list.h>
37#include <linux/string.h>
38#include <errno.h>
39#include <signal.h>
40
41struct perf_inject {
42 struct perf_tool tool;
43 struct perf_session *session;
44 bool build_ids;
45 bool build_id_all;
46 bool sched_stat;
47 bool have_auxtrace;
48 bool strip;
49 bool jit_mode;
50 bool in_place_update;
51 bool in_place_update_dry_run;
52 bool is_pipe;
53 bool copy_kcore_dir;
54 const char *input_name;
55 struct perf_data output;
56 u64 bytes_written;
57 u64 aux_id;
58 struct list_head samples;
59 struct itrace_synth_opts itrace_synth_opts;
60 char event_copy[PERF_SAMPLE_MAX_SIZE];
61 struct perf_file_section secs[HEADER_FEAT_BITS];
62};
63
64struct event_entry {
65 struct list_head node;
66 u32 tid;
67 union perf_event event[];
68};
69
70static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
71 struct machine *machine, u8 cpumode, u32 flags);
72
73static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
74{
75 ssize_t size;
76
77 size = perf_data__write(&inject->output, buf, sz);
78 if (size < 0)
79 return -errno;
80
81 inject->bytes_written += size;
82 return 0;
83}
84
85static int perf_event__repipe_synth(struct perf_tool *tool,
86 union perf_event *event)
87{
88 struct perf_inject *inject = container_of(tool, struct perf_inject,
89 tool);
90
91 return output_bytes(inject, event, event->header.size);
92}
93
94static int perf_event__repipe_oe_synth(struct perf_tool *tool,
95 union perf_event *event,
96 struct ordered_events *oe __maybe_unused)
97{
98 return perf_event__repipe_synth(tool, event);
99}
100
101#ifdef HAVE_JITDUMP
102static int perf_event__drop_oe(struct perf_tool *tool __maybe_unused,
103 union perf_event *event __maybe_unused,
104 struct ordered_events *oe __maybe_unused)
105{
106 return 0;
107}
108#endif
109
110static int perf_event__repipe_op2_synth(struct perf_session *session,
111 union perf_event *event)
112{
113 return perf_event__repipe_synth(session->tool, event);
114}
115
116static int perf_event__repipe_op4_synth(struct perf_session *session,
117 union perf_event *event,
118 u64 data __maybe_unused,
119 const char *str __maybe_unused)
120{
121 return perf_event__repipe_synth(session->tool, event);
122}
123
124static int perf_event__repipe_attr(struct perf_tool *tool,
125 union perf_event *event,
126 struct evlist **pevlist)
127{
128 struct perf_inject *inject = container_of(tool, struct perf_inject,
129 tool);
130 int ret;
131
132 ret = perf_event__process_attr(tool, event, pevlist);
133 if (ret)
134 return ret;
135
136 if (!inject->is_pipe)
137 return 0;
138
139 return perf_event__repipe_synth(tool, event);
140}
141
142static int perf_event__repipe_event_update(struct perf_tool *tool,
143 union perf_event *event,
144 struct evlist **pevlist __maybe_unused)
145{
146 return perf_event__repipe_synth(tool, event);
147}
148
149#ifdef HAVE_AUXTRACE_SUPPORT
150
151static int copy_bytes(struct perf_inject *inject, int fd, off_t size)
152{
153 char buf[4096];
154 ssize_t ssz;
155 int ret;
156
157 while (size > 0) {
158 ssz = read(fd, buf, min(size, (off_t)sizeof(buf)));
159 if (ssz < 0)
160 return -errno;
161 ret = output_bytes(inject, buf, ssz);
162 if (ret)
163 return ret;
164 size -= ssz;
165 }
166
167 return 0;
168}
169
170static s64 perf_event__repipe_auxtrace(struct perf_session *session,
171 union perf_event *event)
172{
173 struct perf_tool *tool = session->tool;
174 struct perf_inject *inject = container_of(tool, struct perf_inject,
175 tool);
176 int ret;
177
178 inject->have_auxtrace = true;
179
180 if (!inject->output.is_pipe) {
181 off_t offset;
182
183 offset = lseek(inject->output.file.fd, 0, SEEK_CUR);
184 if (offset == -1)
185 return -errno;
186 ret = auxtrace_index__auxtrace_event(&session->auxtrace_index,
187 event, offset);
188 if (ret < 0)
189 return ret;
190 }
191
192 if (perf_data__is_pipe(session->data) || !session->one_mmap) {
193 ret = output_bytes(inject, event, event->header.size);
194 if (ret < 0)
195 return ret;
196 ret = copy_bytes(inject, perf_data__fd(session->data),
197 event->auxtrace.size);
198 } else {
199 ret = output_bytes(inject, event,
200 event->header.size + event->auxtrace.size);
201 }
202 if (ret < 0)
203 return ret;
204
205 return event->auxtrace.size;
206}
207
208#else
209
210static s64
211perf_event__repipe_auxtrace(struct perf_session *session __maybe_unused,
212 union perf_event *event __maybe_unused)
213{
214 pr_err("AUX area tracing not supported\n");
215 return -EINVAL;
216}
217
218#endif
219
220static int perf_event__repipe(struct perf_tool *tool,
221 union perf_event *event,
222 struct perf_sample *sample __maybe_unused,
223 struct machine *machine __maybe_unused)
224{
225 return perf_event__repipe_synth(tool, event);
226}
227
228static int perf_event__drop(struct perf_tool *tool __maybe_unused,
229 union perf_event *event __maybe_unused,
230 struct perf_sample *sample __maybe_unused,
231 struct machine *machine __maybe_unused)
232{
233 return 0;
234}
235
236static int perf_event__drop_aux(struct perf_tool *tool,
237 union perf_event *event __maybe_unused,
238 struct perf_sample *sample,
239 struct machine *machine __maybe_unused)
240{
241 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
242
243 if (!inject->aux_id)
244 inject->aux_id = sample->id;
245
246 return 0;
247}
248
249static union perf_event *
250perf_inject__cut_auxtrace_sample(struct perf_inject *inject,
251 union perf_event *event,
252 struct perf_sample *sample)
253{
254 size_t sz1 = sample->aux_sample.data - (void *)event;
255 size_t sz2 = event->header.size - sample->aux_sample.size - sz1;
256 union perf_event *ev = (union perf_event *)inject->event_copy;
257
258 if (sz1 > event->header.size || sz2 > event->header.size ||
259 sz1 + sz2 > event->header.size ||
260 sz1 < sizeof(struct perf_event_header) + sizeof(u64))
261 return event;
262
263 memcpy(ev, event, sz1);
264 memcpy((void *)ev + sz1, (void *)event + event->header.size - sz2, sz2);
265 ev->header.size = sz1 + sz2;
266 ((u64 *)((void *)ev + sz1))[-1] = 0;
267
268 return ev;
269}
270
271typedef int (*inject_handler)(struct perf_tool *tool,
272 union perf_event *event,
273 struct perf_sample *sample,
274 struct evsel *evsel,
275 struct machine *machine);
276
277static int perf_event__repipe_sample(struct perf_tool *tool,
278 union perf_event *event,
279 struct perf_sample *sample,
280 struct evsel *evsel,
281 struct machine *machine)
282{
283 struct perf_inject *inject = container_of(tool, struct perf_inject,
284 tool);
285
286 if (evsel && evsel->handler) {
287 inject_handler f = evsel->handler;
288 return f(tool, event, sample, evsel, machine);
289 }
290
291 build_id__mark_dso_hit(tool, event, sample, evsel, machine);
292
293 if (inject->itrace_synth_opts.set && sample->aux_sample.size)
294 event = perf_inject__cut_auxtrace_sample(inject, event, sample);
295
296 return perf_event__repipe_synth(tool, event);
297}
298
299static int perf_event__repipe_mmap(struct perf_tool *tool,
300 union perf_event *event,
301 struct perf_sample *sample,
302 struct machine *machine)
303{
304 int err;
305
306 err = perf_event__process_mmap(tool, event, sample, machine);
307 perf_event__repipe(tool, event, sample, machine);
308
309 return err;
310}
311
312#ifdef HAVE_JITDUMP
313static int perf_event__jit_repipe_mmap(struct perf_tool *tool,
314 union perf_event *event,
315 struct perf_sample *sample,
316 struct machine *machine)
317{
318 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
319 u64 n = 0;
320 int ret;
321
322 /*
323 * if jit marker, then inject jit mmaps and generate ELF images
324 */
325 ret = jit_process(inject->session, &inject->output, machine,
326 event->mmap.filename, event->mmap.pid, event->mmap.tid, &n);
327 if (ret < 0)
328 return ret;
329 if (ret) {
330 inject->bytes_written += n;
331 return 0;
332 }
333 return perf_event__repipe_mmap(tool, event, sample, machine);
334}
335#endif
336
337static struct dso *findnew_dso(int pid, int tid, const char *filename,
338 struct dso_id *id, struct machine *machine)
339{
340 struct thread *thread;
341 struct nsinfo *nsi = NULL;
342 struct nsinfo *nnsi;
343 struct dso *dso;
344 bool vdso;
345
346 thread = machine__findnew_thread(machine, pid, tid);
347 if (thread == NULL) {
348 pr_err("cannot find or create a task %d/%d.\n", tid, pid);
349 return NULL;
350 }
351
352 vdso = is_vdso_map(filename);
353 nsi = nsinfo__get(thread->nsinfo);
354
355 if (vdso) {
356 /* The vdso maps are always on the host and not the
357 * container. Ensure that we don't use setns to look
358 * them up.
359 */
360 nnsi = nsinfo__copy(nsi);
361 if (nnsi) {
362 nsinfo__put(nsi);
363 nsinfo__clear_need_setns(nnsi);
364 nsi = nnsi;
365 }
366 dso = machine__findnew_vdso(machine, thread);
367 } else {
368 dso = machine__findnew_dso_id(machine, filename, id);
369 }
370
371 if (dso) {
372 nsinfo__put(dso->nsinfo);
373 dso->nsinfo = nsi;
374 } else
375 nsinfo__put(nsi);
376
377 thread__put(thread);
378 return dso;
379}
380
381static int perf_event__repipe_buildid_mmap(struct perf_tool *tool,
382 union perf_event *event,
383 struct perf_sample *sample,
384 struct machine *machine)
385{
386 struct dso *dso;
387
388 dso = findnew_dso(event->mmap.pid, event->mmap.tid,
389 event->mmap.filename, NULL, machine);
390
391 if (dso && !dso->hit) {
392 dso->hit = 1;
393 dso__inject_build_id(dso, tool, machine, sample->cpumode, 0);
394 }
395 dso__put(dso);
396
397 return perf_event__repipe(tool, event, sample, machine);
398}
399
400static int perf_event__repipe_mmap2(struct perf_tool *tool,
401 union perf_event *event,
402 struct perf_sample *sample,
403 struct machine *machine)
404{
405 int err;
406
407 err = perf_event__process_mmap2(tool, event, sample, machine);
408 perf_event__repipe(tool, event, sample, machine);
409
410 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
411 struct dso *dso;
412
413 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
414 event->mmap2.filename, NULL, machine);
415 if (dso) {
416 /* mark it not to inject build-id */
417 dso->hit = 1;
418 }
419 dso__put(dso);
420 }
421
422 return err;
423}
424
425#ifdef HAVE_JITDUMP
426static int perf_event__jit_repipe_mmap2(struct perf_tool *tool,
427 union perf_event *event,
428 struct perf_sample *sample,
429 struct machine *machine)
430{
431 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
432 u64 n = 0;
433 int ret;
434
435 /*
436 * if jit marker, then inject jit mmaps and generate ELF images
437 */
438 ret = jit_process(inject->session, &inject->output, machine,
439 event->mmap2.filename, event->mmap2.pid, event->mmap2.tid, &n);
440 if (ret < 0)
441 return ret;
442 if (ret) {
443 inject->bytes_written += n;
444 return 0;
445 }
446 return perf_event__repipe_mmap2(tool, event, sample, machine);
447}
448#endif
449
450static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool,
451 union perf_event *event,
452 struct perf_sample *sample,
453 struct machine *machine)
454{
455 struct dso_id dso_id = {
456 .maj = event->mmap2.maj,
457 .min = event->mmap2.min,
458 .ino = event->mmap2.ino,
459 .ino_generation = event->mmap2.ino_generation,
460 };
461 struct dso *dso;
462
463 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
464 /* cannot use dso_id since it'd have invalid info */
465 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
466 event->mmap2.filename, NULL, machine);
467 if (dso) {
468 /* mark it not to inject build-id */
469 dso->hit = 1;
470 }
471 dso__put(dso);
472 return 0;
473 }
474
475 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
476 event->mmap2.filename, &dso_id, machine);
477
478 if (dso && !dso->hit) {
479 dso->hit = 1;
480 dso__inject_build_id(dso, tool, machine, sample->cpumode,
481 event->mmap2.flags);
482 }
483 dso__put(dso);
484
485 perf_event__repipe(tool, event, sample, machine);
486
487 return 0;
488}
489
490static int perf_event__repipe_fork(struct perf_tool *tool,
491 union perf_event *event,
492 struct perf_sample *sample,
493 struct machine *machine)
494{
495 int err;
496
497 err = perf_event__process_fork(tool, event, sample, machine);
498 perf_event__repipe(tool, event, sample, machine);
499
500 return err;
501}
502
503static int perf_event__repipe_comm(struct perf_tool *tool,
504 union perf_event *event,
505 struct perf_sample *sample,
506 struct machine *machine)
507{
508 int err;
509
510 err = perf_event__process_comm(tool, event, sample, machine);
511 perf_event__repipe(tool, event, sample, machine);
512
513 return err;
514}
515
516static int perf_event__repipe_namespaces(struct perf_tool *tool,
517 union perf_event *event,
518 struct perf_sample *sample,
519 struct machine *machine)
520{
521 int err = perf_event__process_namespaces(tool, event, sample, machine);
522
523 perf_event__repipe(tool, event, sample, machine);
524
525 return err;
526}
527
528static int perf_event__repipe_exit(struct perf_tool *tool,
529 union perf_event *event,
530 struct perf_sample *sample,
531 struct machine *machine)
532{
533 int err;
534
535 err = perf_event__process_exit(tool, event, sample, machine);
536 perf_event__repipe(tool, event, sample, machine);
537
538 return err;
539}
540
541static int perf_event__repipe_tracing_data(struct perf_session *session,
542 union perf_event *event)
543{
544 perf_event__repipe_synth(session->tool, event);
545
546 return perf_event__process_tracing_data(session, event);
547}
548
549static int dso__read_build_id(struct dso *dso)
550{
551 struct nscookie nsc;
552
553 if (dso->has_build_id)
554 return 0;
555
556 nsinfo__mountns_enter(dso->nsinfo, &nsc);
557 if (filename__read_build_id(dso->long_name, &dso->bid) > 0)
558 dso->has_build_id = true;
559 else if (dso->nsinfo) {
560 char *new_name;
561
562 new_name = filename_with_chroot(dso->nsinfo->pid,
563 dso->long_name);
564 if (new_name && filename__read_build_id(new_name, &dso->bid) > 0)
565 dso->has_build_id = true;
566 free(new_name);
567 }
568 nsinfo__mountns_exit(&nsc);
569
570 return dso->has_build_id ? 0 : -1;
571}
572
573static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
574 struct machine *machine, u8 cpumode, u32 flags)
575{
576 int err;
577
578 if (is_anon_memory(dso->long_name) || flags & MAP_HUGETLB)
579 return 0;
580 if (is_no_dso_memory(dso->long_name))
581 return 0;
582
583 if (dso__read_build_id(dso) < 0) {
584 pr_debug("no build_id found for %s\n", dso->long_name);
585 return -1;
586 }
587
588 err = perf_event__synthesize_build_id(tool, dso, cpumode,
589 perf_event__repipe, machine);
590 if (err) {
591 pr_err("Can't synthesize build_id event for %s\n", dso->long_name);
592 return -1;
593 }
594
595 return 0;
596}
597
598int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event,
599 struct perf_sample *sample,
600 struct evsel *evsel __maybe_unused,
601 struct machine *machine)
602{
603 struct addr_location al;
604 struct thread *thread;
605
606 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
607 if (thread == NULL) {
608 pr_err("problem processing %d event, skipping it.\n",
609 event->header.type);
610 goto repipe;
611 }
612
613 if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) {
614 if (!al.map->dso->hit) {
615 al.map->dso->hit = 1;
616 dso__inject_build_id(al.map->dso, tool, machine,
617 sample->cpumode, al.map->flags);
618 }
619 }
620
621 thread__put(thread);
622repipe:
623 perf_event__repipe(tool, event, sample, machine);
624 return 0;
625}
626
627static int perf_inject__sched_process_exit(struct perf_tool *tool,
628 union perf_event *event __maybe_unused,
629 struct perf_sample *sample,
630 struct evsel *evsel __maybe_unused,
631 struct machine *machine __maybe_unused)
632{
633 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
634 struct event_entry *ent;
635
636 list_for_each_entry(ent, &inject->samples, node) {
637 if (sample->tid == ent->tid) {
638 list_del_init(&ent->node);
639 free(ent);
640 break;
641 }
642 }
643
644 return 0;
645}
646
647static int perf_inject__sched_switch(struct perf_tool *tool,
648 union perf_event *event,
649 struct perf_sample *sample,
650 struct evsel *evsel,
651 struct machine *machine)
652{
653 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
654 struct event_entry *ent;
655
656 perf_inject__sched_process_exit(tool, event, sample, evsel, machine);
657
658 ent = malloc(event->header.size + sizeof(struct event_entry));
659 if (ent == NULL) {
660 color_fprintf(stderr, PERF_COLOR_RED,
661 "Not enough memory to process sched switch event!");
662 return -1;
663 }
664
665 ent->tid = sample->tid;
666 memcpy(&ent->event, event, event->header.size);
667 list_add(&ent->node, &inject->samples);
668 return 0;
669}
670
671static int perf_inject__sched_stat(struct perf_tool *tool,
672 union perf_event *event __maybe_unused,
673 struct perf_sample *sample,
674 struct evsel *evsel,
675 struct machine *machine)
676{
677 struct event_entry *ent;
678 union perf_event *event_sw;
679 struct perf_sample sample_sw;
680 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
681 u32 pid = evsel__intval(evsel, sample, "pid");
682
683 list_for_each_entry(ent, &inject->samples, node) {
684 if (pid == ent->tid)
685 goto found;
686 }
687
688 return 0;
689found:
690 event_sw = &ent->event[0];
691 evsel__parse_sample(evsel, event_sw, &sample_sw);
692
693 sample_sw.period = sample->period;
694 sample_sw.time = sample->time;
695 perf_event__synthesize_sample(event_sw, evsel->core.attr.sample_type,
696 evsel->core.attr.read_format, &sample_sw);
697 build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
698 return perf_event__repipe(tool, event_sw, &sample_sw, machine);
699}
700
701static void sig_handler(int sig __maybe_unused)
702{
703 session_done = 1;
704}
705
706static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg)
707{
708 struct perf_event_attr *attr = &evsel->core.attr;
709 const char *name = evsel__name(evsel);
710
711 if (!(attr->sample_type & sample_type)) {
712 pr_err("Samples for %s event do not have %s attribute set.",
713 name, sample_msg);
714 return -EINVAL;
715 }
716
717 return 0;
718}
719
720static int drop_sample(struct perf_tool *tool __maybe_unused,
721 union perf_event *event __maybe_unused,
722 struct perf_sample *sample __maybe_unused,
723 struct evsel *evsel __maybe_unused,
724 struct machine *machine __maybe_unused)
725{
726 return 0;
727}
728
729static void strip_init(struct perf_inject *inject)
730{
731 struct evlist *evlist = inject->session->evlist;
732 struct evsel *evsel;
733
734 inject->tool.context_switch = perf_event__drop;
735
736 evlist__for_each_entry(evlist, evsel)
737 evsel->handler = drop_sample;
738}
739
740static int parse_vm_time_correlation(const struct option *opt, const char *str, int unset)
741{
742 struct perf_inject *inject = opt->value;
743 const char *args;
744 char *dry_run;
745
746 if (unset)
747 return 0;
748
749 inject->itrace_synth_opts.set = true;
750 inject->itrace_synth_opts.vm_time_correlation = true;
751 inject->in_place_update = true;
752
753 if (!str)
754 return 0;
755
756 dry_run = skip_spaces(str);
757 if (!strncmp(dry_run, "dry-run", strlen("dry-run"))) {
758 inject->itrace_synth_opts.vm_tm_corr_dry_run = true;
759 inject->in_place_update_dry_run = true;
760 args = dry_run + strlen("dry-run");
761 } else {
762 args = str;
763 }
764
765 inject->itrace_synth_opts.vm_tm_corr_args = strdup(args);
766
767 return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM;
768}
769
770static int save_section_info_cb(struct perf_file_section *section,
771 struct perf_header *ph __maybe_unused,
772 int feat, int fd __maybe_unused, void *data)
773{
774 struct perf_inject *inject = data;
775
776 inject->secs[feat] = *section;
777 return 0;
778}
779
780static int save_section_info(struct perf_inject *inject)
781{
782 struct perf_header *header = &inject->session->header;
783 int fd = perf_data__fd(inject->session->data);
784
785 return perf_header__process_sections(header, fd, inject, save_section_info_cb);
786}
787
788static bool keep_feat(int feat)
789{
790 switch (feat) {
791 /* Keep original information that describes the machine or software */
792 case HEADER_TRACING_DATA:
793 case HEADER_HOSTNAME:
794 case HEADER_OSRELEASE:
795 case HEADER_VERSION:
796 case HEADER_ARCH:
797 case HEADER_NRCPUS:
798 case HEADER_CPUDESC:
799 case HEADER_CPUID:
800 case HEADER_TOTAL_MEM:
801 case HEADER_CPU_TOPOLOGY:
802 case HEADER_NUMA_TOPOLOGY:
803 case HEADER_PMU_MAPPINGS:
804 case HEADER_CACHE:
805 case HEADER_MEM_TOPOLOGY:
806 case HEADER_CLOCKID:
807 case HEADER_BPF_PROG_INFO:
808 case HEADER_BPF_BTF:
809 case HEADER_CPU_PMU_CAPS:
810 case HEADER_CLOCK_DATA:
811 case HEADER_HYBRID_TOPOLOGY:
812 case HEADER_HYBRID_CPU_PMU_CAPS:
813 return true;
814 /* Information that can be updated */
815 case HEADER_BUILD_ID:
816 case HEADER_CMDLINE:
817 case HEADER_EVENT_DESC:
818 case HEADER_BRANCH_STACK:
819 case HEADER_GROUP_DESC:
820 case HEADER_AUXTRACE:
821 case HEADER_STAT:
822 case HEADER_SAMPLE_TIME:
823 case HEADER_DIR_FORMAT:
824 case HEADER_COMPRESSED:
825 default:
826 return false;
827 };
828}
829
830static int read_file(int fd, u64 offs, void *buf, size_t sz)
831{
832 ssize_t ret = preadn(fd, buf, sz, offs);
833
834 if (ret < 0)
835 return -errno;
836 if ((size_t)ret != sz)
837 return -EINVAL;
838 return 0;
839}
840
841static int feat_copy(struct perf_inject *inject, int feat, struct feat_writer *fw)
842{
843 int fd = perf_data__fd(inject->session->data);
844 u64 offs = inject->secs[feat].offset;
845 size_t sz = inject->secs[feat].size;
846 void *buf = malloc(sz);
847 int ret;
848
849 if (!buf)
850 return -ENOMEM;
851
852 ret = read_file(fd, offs, buf, sz);
853 if (ret)
854 goto out_free;
855
856 ret = fw->write(fw, buf, sz);
857out_free:
858 free(buf);
859 return ret;
860}
861
862struct inject_fc {
863 struct feat_copier fc;
864 struct perf_inject *inject;
865};
866
867static int feat_copy_cb(struct feat_copier *fc, int feat, struct feat_writer *fw)
868{
869 struct inject_fc *inj_fc = container_of(fc, struct inject_fc, fc);
870 struct perf_inject *inject = inj_fc->inject;
871 int ret;
872
873 if (!inject->secs[feat].offset ||
874 !keep_feat(feat))
875 return 0;
876
877 ret = feat_copy(inject, feat, fw);
878 if (ret < 0)
879 return ret;
880
881 return 1; /* Feature section copied */
882}
883
884static int copy_kcore_dir(struct perf_inject *inject)
885{
886 char *cmd;
887 int ret;
888
889 ret = asprintf(&cmd, "cp -r -n %s/kcore_dir* %s >/dev/null 2>&1",
890 inject->input_name, inject->output.path);
891 if (ret < 0)
892 return ret;
893 pr_debug("%s\n", cmd);
894 return system(cmd);
895}
896
897static int output_fd(struct perf_inject *inject)
898{
899 return inject->in_place_update ? -1 : perf_data__fd(&inject->output);
900}
901
902static int __cmd_inject(struct perf_inject *inject)
903{
904 int ret = -EINVAL;
905 struct perf_session *session = inject->session;
906 int fd = output_fd(inject);
907 u64 output_data_offset;
908
909 signal(SIGINT, sig_handler);
910
911 if (inject->build_ids || inject->sched_stat ||
912 inject->itrace_synth_opts.set || inject->build_id_all) {
913 inject->tool.mmap = perf_event__repipe_mmap;
914 inject->tool.mmap2 = perf_event__repipe_mmap2;
915 inject->tool.fork = perf_event__repipe_fork;
916 inject->tool.tracing_data = perf_event__repipe_tracing_data;
917 }
918
919 output_data_offset = session->header.data_offset;
920
921 if (inject->build_id_all) {
922 inject->tool.mmap = perf_event__repipe_buildid_mmap;
923 inject->tool.mmap2 = perf_event__repipe_buildid_mmap2;
924 } else if (inject->build_ids) {
925 inject->tool.sample = perf_event__inject_buildid;
926 } else if (inject->sched_stat) {
927 struct evsel *evsel;
928
929 evlist__for_each_entry(session->evlist, evsel) {
930 const char *name = evsel__name(evsel);
931
932 if (!strcmp(name, "sched:sched_switch")) {
933 if (evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID"))
934 return -EINVAL;
935
936 evsel->handler = perf_inject__sched_switch;
937 } else if (!strcmp(name, "sched:sched_process_exit"))
938 evsel->handler = perf_inject__sched_process_exit;
939 else if (!strncmp(name, "sched:sched_stat_", 17))
940 evsel->handler = perf_inject__sched_stat;
941 }
942 } else if (inject->itrace_synth_opts.vm_time_correlation) {
943 session->itrace_synth_opts = &inject->itrace_synth_opts;
944 memset(&inject->tool, 0, sizeof(inject->tool));
945 inject->tool.id_index = perf_event__process_id_index;
946 inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
947 inject->tool.auxtrace = perf_event__process_auxtrace;
948 inject->tool.auxtrace_error = perf_event__process_auxtrace_error;
949 inject->tool.ordered_events = true;
950 inject->tool.ordering_requires_timestamps = true;
951 } else if (inject->itrace_synth_opts.set) {
952 session->itrace_synth_opts = &inject->itrace_synth_opts;
953 inject->itrace_synth_opts.inject = true;
954 inject->tool.comm = perf_event__repipe_comm;
955 inject->tool.namespaces = perf_event__repipe_namespaces;
956 inject->tool.exit = perf_event__repipe_exit;
957 inject->tool.id_index = perf_event__process_id_index;
958 inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
959 inject->tool.auxtrace = perf_event__process_auxtrace;
960 inject->tool.aux = perf_event__drop_aux;
961 inject->tool.itrace_start = perf_event__drop_aux;
962 inject->tool.aux_output_hw_id = perf_event__drop_aux;
963 inject->tool.ordered_events = true;
964 inject->tool.ordering_requires_timestamps = true;
965 /* Allow space in the header for new attributes */
966 output_data_offset = roundup(8192 + session->header.data_offset, 4096);
967 if (inject->strip)
968 strip_init(inject);
969 }
970
971 if (!inject->itrace_synth_opts.set)
972 auxtrace_index__free(&session->auxtrace_index);
973
974 if (!inject->is_pipe && !inject->in_place_update)
975 lseek(fd, output_data_offset, SEEK_SET);
976
977 ret = perf_session__process_events(session);
978 if (ret)
979 return ret;
980
981 if (!inject->is_pipe && !inject->in_place_update) {
982 struct inject_fc inj_fc = {
983 .fc.copy = feat_copy_cb,
984 .inject = inject,
985 };
986
987 if (inject->build_ids)
988 perf_header__set_feat(&session->header,
989 HEADER_BUILD_ID);
990 /*
991 * Keep all buildids when there is unprocessed AUX data because
992 * it is not known which ones the AUX trace hits.
993 */
994 if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) &&
995 inject->have_auxtrace && !inject->itrace_synth_opts.set)
996 dsos__hit_all(session);
997 /*
998 * The AUX areas have been removed and replaced with
999 * synthesized hardware events, so clear the feature flag.
1000 */
1001 if (inject->itrace_synth_opts.set) {
1002 perf_header__clear_feat(&session->header,
1003 HEADER_AUXTRACE);
1004 if (inject->itrace_synth_opts.last_branch ||
1005 inject->itrace_synth_opts.add_last_branch)
1006 perf_header__set_feat(&session->header,
1007 HEADER_BRANCH_STACK);
1008 }
1009 session->header.data_offset = output_data_offset;
1010 session->header.data_size = inject->bytes_written;
1011 perf_session__inject_header(session, session->evlist, fd, &inj_fc.fc);
1012
1013 if (inject->copy_kcore_dir) {
1014 ret = copy_kcore_dir(inject);
1015 if (ret)
1016 return ret;
1017 }
1018 }
1019
1020 return ret;
1021}
1022
1023int cmd_inject(int argc, const char **argv)
1024{
1025 struct perf_inject inject = {
1026 .tool = {
1027 .sample = perf_event__repipe_sample,
1028 .read = perf_event__repipe_sample,
1029 .mmap = perf_event__repipe,
1030 .mmap2 = perf_event__repipe,
1031 .comm = perf_event__repipe,
1032 .namespaces = perf_event__repipe,
1033 .cgroup = perf_event__repipe,
1034 .fork = perf_event__repipe,
1035 .exit = perf_event__repipe,
1036 .lost = perf_event__repipe,
1037 .lost_samples = perf_event__repipe,
1038 .aux = perf_event__repipe,
1039 .itrace_start = perf_event__repipe,
1040 .aux_output_hw_id = perf_event__repipe,
1041 .context_switch = perf_event__repipe,
1042 .throttle = perf_event__repipe,
1043 .unthrottle = perf_event__repipe,
1044 .ksymbol = perf_event__repipe,
1045 .bpf = perf_event__repipe,
1046 .text_poke = perf_event__repipe,
1047 .attr = perf_event__repipe_attr,
1048 .event_update = perf_event__repipe_event_update,
1049 .tracing_data = perf_event__repipe_op2_synth,
1050 .finished_round = perf_event__repipe_oe_synth,
1051 .build_id = perf_event__repipe_op2_synth,
1052 .id_index = perf_event__repipe_op2_synth,
1053 .auxtrace_info = perf_event__repipe_op2_synth,
1054 .auxtrace_error = perf_event__repipe_op2_synth,
1055 .time_conv = perf_event__repipe_op2_synth,
1056 .thread_map = perf_event__repipe_op2_synth,
1057 .cpu_map = perf_event__repipe_op2_synth,
1058 .stat_config = perf_event__repipe_op2_synth,
1059 .stat = perf_event__repipe_op2_synth,
1060 .stat_round = perf_event__repipe_op2_synth,
1061 .feature = perf_event__repipe_op2_synth,
1062 .compressed = perf_event__repipe_op4_synth,
1063 .auxtrace = perf_event__repipe_auxtrace,
1064 },
1065 .input_name = "-",
1066 .samples = LIST_HEAD_INIT(inject.samples),
1067 .output = {
1068 .path = "-",
1069 .mode = PERF_DATA_MODE_WRITE,
1070 .use_stdio = true,
1071 },
1072 };
1073 struct perf_data data = {
1074 .mode = PERF_DATA_MODE_READ,
1075 .use_stdio = true,
1076 };
1077 int ret;
1078 bool repipe = true;
1079
1080 struct option options[] = {
1081 OPT_BOOLEAN('b', "build-ids", &inject.build_ids,
1082 "Inject build-ids into the output stream"),
1083 OPT_BOOLEAN(0, "buildid-all", &inject.build_id_all,
1084 "Inject build-ids of all DSOs into the output stream"),
1085 OPT_STRING('i', "input", &inject.input_name, "file",
1086 "input file name"),
1087 OPT_STRING('o', "output", &inject.output.path, "file",
1088 "output file name"),
1089 OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
1090 "Merge sched-stat and sched-switch for getting events "
1091 "where and how long tasks slept"),
1092#ifdef HAVE_JITDUMP
1093 OPT_BOOLEAN('j', "jit", &inject.jit_mode, "merge jitdump files into perf.data file"),
1094#endif
1095 OPT_INCR('v', "verbose", &verbose,
1096 "be more verbose (show build ids, etc)"),
1097 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1098 "file", "vmlinux pathname"),
1099 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
1100 "don't load vmlinux even if found"),
1101 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
1102 "kallsyms pathname"),
1103 OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
1104 OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
1105 NULL, "opts", "Instruction Tracing options\n"
1106 ITRACE_HELP,
1107 itrace_parse_synth_opts),
1108 OPT_BOOLEAN(0, "strip", &inject.strip,
1109 "strip non-synthesized events (use with --itrace)"),
1110 OPT_CALLBACK_OPTARG(0, "vm-time-correlation", &inject, NULL, "opts",
1111 "correlate time between VM guests and the host",
1112 parse_vm_time_correlation),
1113 OPT_END()
1114 };
1115 const char * const inject_usage[] = {
1116 "perf inject [<options>]",
1117 NULL
1118 };
1119#ifndef HAVE_JITDUMP
1120 set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true);
1121#endif
1122 argc = parse_options(argc, argv, options, inject_usage, 0);
1123
1124 /*
1125 * Any (unrecognized) arguments left?
1126 */
1127 if (argc)
1128 usage_with_options(inject_usage, options);
1129
1130 if (inject.strip && !inject.itrace_synth_opts.set) {
1131 pr_err("--strip option requires --itrace option\n");
1132 return -1;
1133 }
1134
1135 if (symbol__validate_sym_arguments())
1136 return -1;
1137
1138 if (inject.in_place_update) {
1139 if (!strcmp(inject.input_name, "-")) {
1140 pr_err("Input file name required for in-place updating\n");
1141 return -1;
1142 }
1143 if (strcmp(inject.output.path, "-")) {
1144 pr_err("Output file name must not be specified for in-place updating\n");
1145 return -1;
1146 }
1147 if (!data.force && !inject.in_place_update_dry_run) {
1148 pr_err("The input file would be updated in place, "
1149 "the --force option is required.\n");
1150 return -1;
1151 }
1152 if (!inject.in_place_update_dry_run)
1153 data.in_place_update = true;
1154 } else {
1155 if (strcmp(inject.output.path, "-") && !inject.strip &&
1156 has_kcore_dir(inject.input_name)) {
1157 inject.output.is_dir = true;
1158 inject.copy_kcore_dir = true;
1159 }
1160 if (perf_data__open(&inject.output)) {
1161 perror("failed to create output file");
1162 return -1;
1163 }
1164 }
1165
1166 data.path = inject.input_name;
1167 if (!strcmp(inject.input_name, "-") || inject.output.is_pipe) {
1168 inject.is_pipe = true;
1169 /*
1170 * Do not repipe header when input is a regular file
1171 * since either it can rewrite the header at the end
1172 * or write a new pipe header.
1173 */
1174 if (strcmp(inject.input_name, "-"))
1175 repipe = false;
1176 }
1177
1178 inject.session = __perf_session__new(&data, repipe,
1179 output_fd(&inject),
1180 &inject.tool);
1181 if (IS_ERR(inject.session)) {
1182 ret = PTR_ERR(inject.session);
1183 goto out_close_output;
1184 }
1185
1186 if (zstd_init(&(inject.session->zstd_data), 0) < 0)
1187 pr_warning("Decompression initialization failed.\n");
1188
1189 /* Save original section info before feature bits change */
1190 ret = save_section_info(&inject);
1191 if (ret)
1192 goto out_delete;
1193
1194 if (!data.is_pipe && inject.output.is_pipe) {
1195 ret = perf_header__write_pipe(perf_data__fd(&inject.output));
1196 if (ret < 0) {
1197 pr_err("Couldn't write a new pipe header.\n");
1198 goto out_delete;
1199 }
1200
1201 ret = perf_event__synthesize_for_pipe(&inject.tool,
1202 inject.session,
1203 &inject.output,
1204 perf_event__repipe);
1205 if (ret < 0)
1206 goto out_delete;
1207 }
1208
1209 if (inject.build_ids && !inject.build_id_all) {
1210 /*
1211 * to make sure the mmap records are ordered correctly
1212 * and so that the correct especially due to jitted code
1213 * mmaps. We cannot generate the buildid hit list and
1214 * inject the jit mmaps at the same time for now.
1215 */
1216 inject.tool.ordered_events = true;
1217 inject.tool.ordering_requires_timestamps = true;
1218 }
1219
1220 if (inject.sched_stat) {
1221 inject.tool.ordered_events = true;
1222 }
1223
1224#ifdef HAVE_JITDUMP
1225 if (inject.jit_mode) {
1226 inject.tool.mmap2 = perf_event__jit_repipe_mmap2;
1227 inject.tool.mmap = perf_event__jit_repipe_mmap;
1228 inject.tool.ordered_events = true;
1229 inject.tool.ordering_requires_timestamps = true;
1230 /*
1231 * JIT MMAP injection injects all MMAP events in one go, so it
1232 * does not obey finished_round semantics.
1233 */
1234 inject.tool.finished_round = perf_event__drop_oe;
1235 }
1236#endif
1237 ret = symbol__init(&inject.session->header.env);
1238 if (ret < 0)
1239 goto out_delete;
1240
1241 ret = __cmd_inject(&inject);
1242
1243out_delete:
1244 zstd_fini(&(inject.session->zstd_data));
1245 perf_session__delete(inject.session);
1246out_close_output:
1247 if (!inject.in_place_update)
1248 perf_data__close(&inject.output);
1249 free(inject.itrace_synth_opts.vm_tm_corr_args);
1250 return ret;
1251}