Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
fork
Configure Feed
Select the types of activity you want to include in your feed.
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * intel_pt.c: Intel Processor Trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
5 */
6
7#include <inttypes.h>
8#include <linux/perf_event.h>
9#include <stdio.h>
10#include <stdbool.h>
11#include <errno.h>
12#include <linux/kernel.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/zalloc.h>
16
17#include "session.h"
18#include "machine.h"
19#include "memswap.h"
20#include "sort.h"
21#include "tool.h"
22#include "event.h"
23#include "evlist.h"
24#include "evsel.h"
25#include "map.h"
26#include "color.h"
27#include "thread.h"
28#include "thread-stack.h"
29#include "symbol.h"
30#include "callchain.h"
31#include "dso.h"
32#include "debug.h"
33#include "auxtrace.h"
34#include "tsc.h"
35#include "intel-pt.h"
36#include "config.h"
37#include "util/perf_api_probe.h"
38#include "util/synthetic-events.h"
39#include "time-utils.h"
40
41#include "../arch/x86/include/uapi/asm/perf_regs.h"
42
43#include "intel-pt-decoder/intel-pt-log.h"
44#include "intel-pt-decoder/intel-pt-decoder.h"
45#include "intel-pt-decoder/intel-pt-insn-decoder.h"
46#include "intel-pt-decoder/intel-pt-pkt-decoder.h"
47
48#define MAX_TIMESTAMP (~0ULL)
49
50#define INTEL_PT_CFG_PASS_THRU BIT_ULL(0)
51#define INTEL_PT_CFG_PWR_EVT_EN BIT_ULL(4)
52#define INTEL_PT_CFG_BRANCH_EN BIT_ULL(13)
53#define INTEL_PT_CFG_EVT_EN BIT_ULL(31)
54#define INTEL_PT_CFG_TNT_DIS BIT_ULL(55)
55
56struct range {
57 u64 start;
58 u64 end;
59};
60
61struct intel_pt {
62 struct auxtrace auxtrace;
63 struct auxtrace_queues queues;
64 struct auxtrace_heap heap;
65 u32 auxtrace_type;
66 struct perf_session *session;
67 struct machine *machine;
68 struct evsel *switch_evsel;
69 struct thread *unknown_thread;
70 bool timeless_decoding;
71 bool sampling_mode;
72 bool snapshot_mode;
73 bool per_cpu_mmaps;
74 bool have_tsc;
75 bool data_queued;
76 bool est_tsc;
77 bool sync_switch;
78 bool sync_switch_not_supported;
79 bool mispred_all;
80 bool use_thread_stack;
81 bool callstack;
82 bool cap_event_trace;
83 bool have_guest_sideband;
84 unsigned int br_stack_sz;
85 unsigned int br_stack_sz_plus;
86 int have_sched_switch;
87 u32 pmu_type;
88 u64 kernel_start;
89 u64 switch_ip;
90 u64 ptss_ip;
91 u64 first_timestamp;
92
93 struct perf_tsc_conversion tc;
94 bool cap_user_time_zero;
95
96 struct itrace_synth_opts synth_opts;
97
98 bool sample_instructions;
99 u64 instructions_sample_type;
100 u64 instructions_id;
101
102 bool sample_cycles;
103 u64 cycles_sample_type;
104 u64 cycles_id;
105
106 bool sample_branches;
107 u32 branches_filter;
108 u64 branches_sample_type;
109 u64 branches_id;
110
111 bool sample_transactions;
112 u64 transactions_sample_type;
113 u64 transactions_id;
114
115 bool sample_ptwrites;
116 u64 ptwrites_sample_type;
117 u64 ptwrites_id;
118
119 bool sample_pwr_events;
120 u64 pwr_events_sample_type;
121 u64 mwait_id;
122 u64 pwre_id;
123 u64 exstop_id;
124 u64 pwrx_id;
125 u64 cbr_id;
126 u64 psb_id;
127
128 bool single_pebs;
129 bool sample_pebs;
130 struct evsel *pebs_evsel;
131
132 u64 evt_sample_type;
133 u64 evt_id;
134
135 u64 iflag_chg_sample_type;
136 u64 iflag_chg_id;
137
138 u64 tsc_bit;
139 u64 mtc_bit;
140 u64 mtc_freq_bits;
141 u32 tsc_ctc_ratio_n;
142 u32 tsc_ctc_ratio_d;
143 u64 cyc_bit;
144 u64 noretcomp_bit;
145 unsigned max_non_turbo_ratio;
146 unsigned cbr2khz;
147 int max_loops;
148
149 unsigned long num_events;
150
151 char *filter;
152 struct addr_filters filts;
153
154 struct range *time_ranges;
155 unsigned int range_cnt;
156
157 struct ip_callchain *chain;
158 struct branch_stack *br_stack;
159
160 u64 dflt_tsc_offset;
161 struct rb_root vmcs_info;
162};
163
164enum switch_state {
165 INTEL_PT_SS_NOT_TRACING,
166 INTEL_PT_SS_UNKNOWN,
167 INTEL_PT_SS_TRACING,
168 INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
169 INTEL_PT_SS_EXPECTING_SWITCH_IP,
170};
171
172/* applicable_counters is 64-bits */
173#define INTEL_PT_MAX_PEBS 64
174
175struct intel_pt_pebs_event {
176 struct evsel *evsel;
177 u64 id;
178};
179
180struct intel_pt_queue {
181 struct intel_pt *pt;
182 unsigned int queue_nr;
183 struct auxtrace_buffer *buffer;
184 struct auxtrace_buffer *old_buffer;
185 void *decoder;
186 const struct intel_pt_state *state;
187 struct ip_callchain *chain;
188 struct branch_stack *last_branch;
189 union perf_event *event_buf;
190 bool on_heap;
191 bool stop;
192 bool step_through_buffers;
193 bool use_buffer_pid_tid;
194 bool sync_switch;
195 bool sample_ipc;
196 pid_t pid, tid;
197 int cpu;
198 int switch_state;
199 pid_t next_tid;
200 struct thread *thread;
201 struct machine *guest_machine;
202 struct thread *guest_thread;
203 struct thread *unknown_guest_thread;
204 pid_t guest_machine_pid;
205 pid_t guest_pid;
206 pid_t guest_tid;
207 int vcpu;
208 bool exclude_kernel;
209 bool have_sample;
210 u64 time;
211 u64 timestamp;
212 u64 sel_timestamp;
213 bool sel_start;
214 unsigned int sel_idx;
215 u32 flags;
216 u16 insn_len;
217 u64 last_insn_cnt;
218 u64 ipc_insn_cnt;
219 u64 ipc_cyc_cnt;
220 u64 last_in_insn_cnt;
221 u64 last_in_cyc_cnt;
222 u64 last_cy_insn_cnt;
223 u64 last_cy_cyc_cnt;
224 u64 last_br_insn_cnt;
225 u64 last_br_cyc_cnt;
226 unsigned int cbr_seen;
227 char insn[INTEL_PT_INSN_BUF_SZ];
228 struct intel_pt_pebs_event pebs[INTEL_PT_MAX_PEBS];
229};
230
231static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
232 unsigned char *buf, size_t len)
233{
234 struct intel_pt_pkt packet;
235 size_t pos = 0;
236 int ret, pkt_len, i;
237 char desc[INTEL_PT_PKT_DESC_MAX];
238 const char *color = PERF_COLOR_BLUE;
239 enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX;
240
241 color_fprintf(stdout, color,
242 ". ... Intel Processor Trace data: size %zu bytes\n",
243 len);
244
245 while (len) {
246 ret = intel_pt_get_packet(buf, len, &packet, &ctx);
247 if (ret > 0)
248 pkt_len = ret;
249 else
250 pkt_len = 1;
251 printf(".");
252 color_fprintf(stdout, color, " %08x: ", pos);
253 for (i = 0; i < pkt_len; i++)
254 color_fprintf(stdout, color, " %02x", buf[i]);
255 for (; i < 16; i++)
256 color_fprintf(stdout, color, " ");
257 if (ret > 0) {
258 ret = intel_pt_pkt_desc(&packet, desc,
259 INTEL_PT_PKT_DESC_MAX);
260 if (ret > 0)
261 color_fprintf(stdout, color, " %s\n", desc);
262 } else {
263 color_fprintf(stdout, color, " Bad packet!\n");
264 }
265 pos += pkt_len;
266 buf += pkt_len;
267 len -= pkt_len;
268 }
269}
270
271static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
272 size_t len)
273{
274 printf(".\n");
275 intel_pt_dump(pt, buf, len);
276}
277
278static void intel_pt_log_event(union perf_event *event)
279{
280 FILE *f = intel_pt_log_fp();
281
282 if (!intel_pt_enable_logging || !f)
283 return;
284
285 perf_event__fprintf(event, NULL, f);
286}
287
288static void intel_pt_dump_sample(struct perf_session *session,
289 struct perf_sample *sample)
290{
291 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
292 auxtrace);
293
294 printf("\n");
295 intel_pt_dump(pt, sample->aux_sample.data, sample->aux_sample.size);
296}
297
298static bool intel_pt_log_events(struct intel_pt *pt, u64 tm)
299{
300 struct perf_time_interval *range = pt->synth_opts.ptime_range;
301 int n = pt->synth_opts.range_num;
302
303 if (pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
304 return true;
305
306 if (pt->synth_opts.log_minus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
307 return false;
308
309 /* perf_time__ranges_skip_sample does not work if time is zero */
310 if (!tm)
311 tm = 1;
312
313 return !n || !perf_time__ranges_skip_sample(range, n, tm);
314}
315
316static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs(struct rb_root *rb_root,
317 u64 vmcs,
318 u64 dflt_tsc_offset)
319{
320 struct rb_node **p = &rb_root->rb_node;
321 struct rb_node *parent = NULL;
322 struct intel_pt_vmcs_info *v;
323
324 while (*p) {
325 parent = *p;
326 v = rb_entry(parent, struct intel_pt_vmcs_info, rb_node);
327
328 if (v->vmcs == vmcs)
329 return v;
330
331 if (vmcs < v->vmcs)
332 p = &(*p)->rb_left;
333 else
334 p = &(*p)->rb_right;
335 }
336
337 v = zalloc(sizeof(*v));
338 if (v) {
339 v->vmcs = vmcs;
340 v->tsc_offset = dflt_tsc_offset;
341 v->reliable = dflt_tsc_offset;
342
343 rb_link_node(&v->rb_node, parent, p);
344 rb_insert_color(&v->rb_node, rb_root);
345 }
346
347 return v;
348}
349
350static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs_info(void *data, uint64_t vmcs)
351{
352 struct intel_pt_queue *ptq = data;
353 struct intel_pt *pt = ptq->pt;
354
355 if (!vmcs && !pt->dflt_tsc_offset)
356 return NULL;
357
358 return intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, pt->dflt_tsc_offset);
359}
360
361static void intel_pt_free_vmcs_info(struct intel_pt *pt)
362{
363 struct intel_pt_vmcs_info *v;
364 struct rb_node *n;
365
366 n = rb_first(&pt->vmcs_info);
367 while (n) {
368 v = rb_entry(n, struct intel_pt_vmcs_info, rb_node);
369 n = rb_next(n);
370 rb_erase(&v->rb_node, &pt->vmcs_info);
371 free(v);
372 }
373}
374
375static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
376 struct auxtrace_buffer *b)
377{
378 bool consecutive = false;
379 void *start;
380
381 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
382 pt->have_tsc, &consecutive,
383 pt->synth_opts.vm_time_correlation);
384 if (!start)
385 return -EINVAL;
386 /*
387 * In the case of vm_time_correlation, the overlap might contain TSC
388 * packets that will not be fixed, and that will then no longer work for
389 * overlap detection. Avoid that by zeroing out the overlap.
390 */
391 if (pt->synth_opts.vm_time_correlation)
392 memset(b->data, 0, start - b->data);
393 b->use_size = b->data + b->size - start;
394 b->use_data = start;
395 if (b->use_size && consecutive)
396 b->consecutive = true;
397 return 0;
398}
399
400static int intel_pt_get_buffer(struct intel_pt_queue *ptq,
401 struct auxtrace_buffer *buffer,
402 struct auxtrace_buffer *old_buffer,
403 struct intel_pt_buffer *b)
404{
405 bool might_overlap;
406
407 if (!buffer->data) {
408 int fd = perf_data__fd(ptq->pt->session->data);
409
410 buffer->data = auxtrace_buffer__get_data(buffer, fd);
411 if (!buffer->data)
412 return -ENOMEM;
413 }
414
415 might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode;
416 if (might_overlap && !buffer->consecutive && old_buffer &&
417 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
418 return -ENOMEM;
419
420 if (buffer->use_data) {
421 b->len = buffer->use_size;
422 b->buf = buffer->use_data;
423 } else {
424 b->len = buffer->size;
425 b->buf = buffer->data;
426 }
427 b->ref_timestamp = buffer->reference;
428
429 if (!old_buffer || (might_overlap && !buffer->consecutive)) {
430 b->consecutive = false;
431 b->trace_nr = buffer->buffer_nr + 1;
432 } else {
433 b->consecutive = true;
434 }
435
436 return 0;
437}
438
439/* Do not drop buffers with references - refer intel_pt_get_trace() */
440static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq,
441 struct auxtrace_buffer *buffer)
442{
443 if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer)
444 return;
445
446 auxtrace_buffer__drop_data(buffer);
447}
448
449/* Must be serialized with respect to intel_pt_get_trace() */
450static int intel_pt_lookahead(void *data, intel_pt_lookahead_cb_t cb,
451 void *cb_data)
452{
453 struct intel_pt_queue *ptq = data;
454 struct auxtrace_buffer *buffer = ptq->buffer;
455 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
456 struct auxtrace_queue *queue;
457 int err = 0;
458
459 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
460
461 while (1) {
462 struct intel_pt_buffer b = { .len = 0 };
463
464 buffer = auxtrace_buffer__next(queue, buffer);
465 if (!buffer)
466 break;
467
468 err = intel_pt_get_buffer(ptq, buffer, old_buffer, &b);
469 if (err)
470 break;
471
472 if (b.len) {
473 intel_pt_lookahead_drop_buffer(ptq, old_buffer);
474 old_buffer = buffer;
475 } else {
476 intel_pt_lookahead_drop_buffer(ptq, buffer);
477 continue;
478 }
479
480 err = cb(&b, cb_data);
481 if (err)
482 break;
483 }
484
485 if (buffer != old_buffer)
486 intel_pt_lookahead_drop_buffer(ptq, buffer);
487 intel_pt_lookahead_drop_buffer(ptq, old_buffer);
488
489 return err;
490}
491
492/*
493 * This function assumes data is processed sequentially only.
494 * Must be serialized with respect to intel_pt_lookahead()
495 */
496static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
497{
498 struct intel_pt_queue *ptq = data;
499 struct auxtrace_buffer *buffer = ptq->buffer;
500 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
501 struct auxtrace_queue *queue;
502 int err;
503
504 if (ptq->stop) {
505 b->len = 0;
506 return 0;
507 }
508
509 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
510
511 buffer = auxtrace_buffer__next(queue, buffer);
512 if (!buffer) {
513 if (old_buffer)
514 auxtrace_buffer__drop_data(old_buffer);
515 b->len = 0;
516 return 0;
517 }
518
519 ptq->buffer = buffer;
520
521 err = intel_pt_get_buffer(ptq, buffer, old_buffer, b);
522 if (err)
523 return err;
524
525 if (ptq->step_through_buffers)
526 ptq->stop = true;
527
528 if (b->len) {
529 if (old_buffer)
530 auxtrace_buffer__drop_data(old_buffer);
531 ptq->old_buffer = buffer;
532 } else {
533 auxtrace_buffer__drop_data(buffer);
534 return intel_pt_get_trace(b, data);
535 }
536
537 return 0;
538}
539
540struct intel_pt_cache_entry {
541 struct auxtrace_cache_entry entry;
542 u64 insn_cnt;
543 u64 byte_cnt;
544 enum intel_pt_insn_op op;
545 enum intel_pt_insn_branch branch;
546 bool emulated_ptwrite;
547 int length;
548 int32_t rel;
549 char insn[INTEL_PT_INSN_BUF_SZ];
550};
551
552static int intel_pt_config_div(const char *var, const char *value, void *data)
553{
554 int *d = data;
555 long val;
556
557 if (!strcmp(var, "intel-pt.cache-divisor")) {
558 val = strtol(value, NULL, 0);
559 if (val > 0 && val <= INT_MAX)
560 *d = val;
561 }
562
563 return 0;
564}
565
566static int intel_pt_cache_divisor(void)
567{
568 static int d;
569
570 if (d)
571 return d;
572
573 perf_config(intel_pt_config_div, &d);
574
575 if (!d)
576 d = 64;
577
578 return d;
579}
580
581static unsigned int intel_pt_cache_size(struct dso *dso,
582 struct machine *machine)
583{
584 off_t size;
585
586 size = dso__data_size(dso, machine);
587 size /= intel_pt_cache_divisor();
588 if (size < 1000)
589 return 10;
590 if (size > (1 << 21))
591 return 21;
592 return 32 - __builtin_clz(size);
593}
594
595static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
596 struct machine *machine)
597{
598 struct auxtrace_cache *c;
599 unsigned int bits;
600
601 if (dso->auxtrace_cache)
602 return dso->auxtrace_cache;
603
604 bits = intel_pt_cache_size(dso, machine);
605
606 /* Ignoring cache creation failure */
607 c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
608
609 dso->auxtrace_cache = c;
610
611 return c;
612}
613
614static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
615 u64 offset, u64 insn_cnt, u64 byte_cnt,
616 struct intel_pt_insn *intel_pt_insn)
617{
618 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
619 struct intel_pt_cache_entry *e;
620 int err;
621
622 if (!c)
623 return -ENOMEM;
624
625 e = auxtrace_cache__alloc_entry(c);
626 if (!e)
627 return -ENOMEM;
628
629 e->insn_cnt = insn_cnt;
630 e->byte_cnt = byte_cnt;
631 e->op = intel_pt_insn->op;
632 e->branch = intel_pt_insn->branch;
633 e->emulated_ptwrite = intel_pt_insn->emulated_ptwrite;
634 e->length = intel_pt_insn->length;
635 e->rel = intel_pt_insn->rel;
636 memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
637
638 err = auxtrace_cache__add(c, offset, &e->entry);
639 if (err)
640 auxtrace_cache__free_entry(c, e);
641
642 return err;
643}
644
645static struct intel_pt_cache_entry *
646intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
647{
648 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
649
650 if (!c)
651 return NULL;
652
653 return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
654}
655
656static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine,
657 u64 offset)
658{
659 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
660
661 if (!c)
662 return;
663
664 auxtrace_cache__remove(dso->auxtrace_cache, offset);
665}
666
667static inline bool intel_pt_guest_kernel_ip(uint64_t ip)
668{
669 /* Assumes 64-bit kernel */
670 return ip & (1ULL << 63);
671}
672
673static inline u8 intel_pt_nr_cpumode(struct intel_pt_queue *ptq, uint64_t ip, bool nr)
674{
675 if (nr) {
676 return intel_pt_guest_kernel_ip(ip) ?
677 PERF_RECORD_MISC_GUEST_KERNEL :
678 PERF_RECORD_MISC_GUEST_USER;
679 }
680
681 return ip >= ptq->pt->kernel_start ?
682 PERF_RECORD_MISC_KERNEL :
683 PERF_RECORD_MISC_USER;
684}
685
686static inline u8 intel_pt_cpumode(struct intel_pt_queue *ptq, uint64_t from_ip, uint64_t to_ip)
687{
688 /* No support for non-zero CS base */
689 if (from_ip)
690 return intel_pt_nr_cpumode(ptq, from_ip, ptq->state->from_nr);
691 return intel_pt_nr_cpumode(ptq, to_ip, ptq->state->to_nr);
692}
693
694static int intel_pt_get_guest(struct intel_pt_queue *ptq)
695{
696 struct machines *machines = &ptq->pt->session->machines;
697 struct machine *machine;
698 pid_t pid = ptq->pid <= 0 ? DEFAULT_GUEST_KERNEL_ID : ptq->pid;
699
700 if (ptq->guest_machine && pid == ptq->guest_machine->pid)
701 return 0;
702
703 ptq->guest_machine = NULL;
704 thread__zput(ptq->unknown_guest_thread);
705
706 if (symbol_conf.guest_code) {
707 thread__zput(ptq->guest_thread);
708 ptq->guest_thread = machines__findnew_guest_code(machines, pid);
709 }
710
711 machine = machines__find_guest(machines, pid);
712 if (!machine)
713 return -1;
714
715 ptq->unknown_guest_thread = machine__idle_thread(machine);
716 if (!ptq->unknown_guest_thread)
717 return -1;
718
719 ptq->guest_machine = machine;
720
721 return 0;
722}
723
724static inline bool intel_pt_jmp_16(struct intel_pt_insn *intel_pt_insn)
725{
726 return intel_pt_insn->rel == 16 && intel_pt_insn->branch == INTEL_PT_BR_UNCONDITIONAL;
727}
728
729#define PTWRITE_MAGIC "\x0f\x0bperf,ptwrite "
730#define PTWRITE_MAGIC_LEN 16
731
732static bool intel_pt_emulated_ptwrite(struct dso *dso, struct machine *machine, u64 offset)
733{
734 unsigned char buf[PTWRITE_MAGIC_LEN];
735 ssize_t len;
736
737 len = dso__data_read_offset(dso, machine, offset, buf, PTWRITE_MAGIC_LEN);
738 if (len == PTWRITE_MAGIC_LEN && !memcmp(buf, PTWRITE_MAGIC, PTWRITE_MAGIC_LEN)) {
739 intel_pt_log("Emulated ptwrite signature found\n");
740 return true;
741 }
742 intel_pt_log("Emulated ptwrite signature not found\n");
743 return false;
744}
745
746static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
747 uint64_t *insn_cnt_ptr, uint64_t *ip,
748 uint64_t to_ip, uint64_t max_insn_cnt,
749 void *data)
750{
751 struct intel_pt_queue *ptq = data;
752 struct machine *machine = ptq->pt->machine;
753 struct thread *thread;
754 struct addr_location al;
755 unsigned char buf[INTEL_PT_INSN_BUF_SZ];
756 ssize_t len;
757 int x86_64;
758 u8 cpumode;
759 u64 offset, start_offset, start_ip;
760 u64 insn_cnt = 0;
761 bool one_map = true;
762 bool nr;
763
764 intel_pt_insn->length = 0;
765
766 if (to_ip && *ip == to_ip)
767 goto out_no_cache;
768
769 nr = ptq->state->to_nr;
770 cpumode = intel_pt_nr_cpumode(ptq, *ip, nr);
771
772 if (nr) {
773 if (ptq->pt->have_guest_sideband) {
774 if (!ptq->guest_machine || ptq->guest_machine_pid != ptq->pid) {
775 intel_pt_log("ERROR: guest sideband but no guest machine\n");
776 return -EINVAL;
777 }
778 } else if ((!symbol_conf.guest_code && cpumode != PERF_RECORD_MISC_GUEST_KERNEL) ||
779 intel_pt_get_guest(ptq)) {
780 intel_pt_log("ERROR: no guest machine\n");
781 return -EINVAL;
782 }
783 machine = ptq->guest_machine;
784 thread = ptq->guest_thread;
785 if (!thread) {
786 if (cpumode != PERF_RECORD_MISC_GUEST_KERNEL) {
787 intel_pt_log("ERROR: no guest thread\n");
788 return -EINVAL;
789 }
790 thread = ptq->unknown_guest_thread;
791 }
792 } else {
793 thread = ptq->thread;
794 if (!thread) {
795 if (cpumode != PERF_RECORD_MISC_KERNEL) {
796 intel_pt_log("ERROR: no thread\n");
797 return -EINVAL;
798 }
799 thread = ptq->pt->unknown_thread;
800 }
801 }
802
803 while (1) {
804 struct dso *dso;
805
806 if (!thread__find_map(thread, cpumode, *ip, &al) || !map__dso(al.map)) {
807 if (al.map)
808 intel_pt_log("ERROR: thread has no dso for %#" PRIx64 "\n", *ip);
809 else
810 intel_pt_log("ERROR: thread has no map for %#" PRIx64 "\n", *ip);
811 return -EINVAL;
812 }
813 dso = map__dso(al.map);
814
815 if (dso->data.status == DSO_DATA_STATUS_ERROR &&
816 dso__data_status_seen(dso, DSO_DATA_STATUS_SEEN_ITRACE))
817 return -ENOENT;
818
819 offset = map__map_ip(al.map, *ip);
820
821 if (!to_ip && one_map) {
822 struct intel_pt_cache_entry *e;
823
824 e = intel_pt_cache_lookup(dso, machine, offset);
825 if (e &&
826 (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
827 *insn_cnt_ptr = e->insn_cnt;
828 *ip += e->byte_cnt;
829 intel_pt_insn->op = e->op;
830 intel_pt_insn->branch = e->branch;
831 intel_pt_insn->emulated_ptwrite = e->emulated_ptwrite;
832 intel_pt_insn->length = e->length;
833 intel_pt_insn->rel = e->rel;
834 memcpy(intel_pt_insn->buf, e->insn, INTEL_PT_INSN_BUF_SZ);
835 intel_pt_log_insn_no_data(intel_pt_insn, *ip);
836 return 0;
837 }
838 }
839
840 start_offset = offset;
841 start_ip = *ip;
842
843 /* Load maps to ensure dso->is_64_bit has been updated */
844 map__load(al.map);
845
846 x86_64 = dso->is_64_bit;
847
848 while (1) {
849 len = dso__data_read_offset(dso, machine,
850 offset, buf,
851 INTEL_PT_INSN_BUF_SZ);
852 if (len <= 0) {
853 intel_pt_log("ERROR: failed to read at offset %#" PRIx64 " ",
854 offset);
855 if (intel_pt_enable_logging)
856 dso__fprintf(dso, intel_pt_log_fp());
857 return -EINVAL;
858 }
859
860 if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn))
861 return -EINVAL;
862
863 intel_pt_log_insn(intel_pt_insn, *ip);
864
865 insn_cnt += 1;
866
867 if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH) {
868 bool eptw;
869 u64 offs;
870
871 if (!intel_pt_jmp_16(intel_pt_insn))
872 goto out;
873 /* Check for emulated ptwrite */
874 offs = offset + intel_pt_insn->length;
875 eptw = intel_pt_emulated_ptwrite(dso, machine, offs);
876 intel_pt_insn->emulated_ptwrite = eptw;
877 goto out;
878 }
879
880 if (max_insn_cnt && insn_cnt >= max_insn_cnt)
881 goto out_no_cache;
882
883 *ip += intel_pt_insn->length;
884
885 if (to_ip && *ip == to_ip) {
886 intel_pt_insn->length = 0;
887 goto out_no_cache;
888 }
889
890 if (*ip >= map__end(al.map))
891 break;
892
893 offset += intel_pt_insn->length;
894 }
895 one_map = false;
896 }
897out:
898 *insn_cnt_ptr = insn_cnt;
899
900 if (!one_map)
901 goto out_no_cache;
902
903 /*
904 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
905 * entries.
906 */
907 if (to_ip) {
908 struct intel_pt_cache_entry *e;
909
910 e = intel_pt_cache_lookup(map__dso(al.map), machine, start_offset);
911 if (e)
912 return 0;
913 }
914
915 /* Ignore cache errors */
916 intel_pt_cache_add(map__dso(al.map), machine, start_offset, insn_cnt,
917 *ip - start_ip, intel_pt_insn);
918
919 return 0;
920
921out_no_cache:
922 *insn_cnt_ptr = insn_cnt;
923 return 0;
924}
925
926static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip,
927 uint64_t offset, const char *filename)
928{
929 struct addr_filter *filt;
930 bool have_filter = false;
931 bool hit_tracestop = false;
932 bool hit_filter = false;
933
934 list_for_each_entry(filt, &pt->filts.head, list) {
935 if (filt->start)
936 have_filter = true;
937
938 if ((filename && !filt->filename) ||
939 (!filename && filt->filename) ||
940 (filename && strcmp(filename, filt->filename)))
941 continue;
942
943 if (!(offset >= filt->addr && offset < filt->addr + filt->size))
944 continue;
945
946 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n",
947 ip, offset, filename ? filename : "[kernel]",
948 filt->start ? "filter" : "stop",
949 filt->addr, filt->size);
950
951 if (filt->start)
952 hit_filter = true;
953 else
954 hit_tracestop = true;
955 }
956
957 if (!hit_tracestop && !hit_filter)
958 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n",
959 ip, offset, filename ? filename : "[kernel]");
960
961 return hit_tracestop || (have_filter && !hit_filter);
962}
963
964static int __intel_pt_pgd_ip(uint64_t ip, void *data)
965{
966 struct intel_pt_queue *ptq = data;
967 struct thread *thread;
968 struct addr_location al;
969 u8 cpumode;
970 u64 offset;
971
972 if (ptq->state->to_nr) {
973 if (intel_pt_guest_kernel_ip(ip))
974 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
975 /* No support for decoding guest user space */
976 return -EINVAL;
977 } else if (ip >= ptq->pt->kernel_start) {
978 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
979 }
980
981 cpumode = PERF_RECORD_MISC_USER;
982
983 thread = ptq->thread;
984 if (!thread)
985 return -EINVAL;
986
987 if (!thread__find_map(thread, cpumode, ip, &al) || !map__dso(al.map))
988 return -EINVAL;
989
990 offset = map__map_ip(al.map, ip);
991
992 return intel_pt_match_pgd_ip(ptq->pt, ip, offset, map__dso(al.map)->long_name);
993}
994
995static bool intel_pt_pgd_ip(uint64_t ip, void *data)
996{
997 return __intel_pt_pgd_ip(ip, data) > 0;
998}
999
1000static bool intel_pt_get_config(struct intel_pt *pt,
1001 struct perf_event_attr *attr, u64 *config)
1002{
1003 if (attr->type == pt->pmu_type) {
1004 if (config)
1005 *config = attr->config;
1006 return true;
1007 }
1008
1009 return false;
1010}
1011
1012static bool intel_pt_exclude_kernel(struct intel_pt *pt)
1013{
1014 struct evsel *evsel;
1015
1016 evlist__for_each_entry(pt->session->evlist, evsel) {
1017 if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
1018 !evsel->core.attr.exclude_kernel)
1019 return false;
1020 }
1021 return true;
1022}
1023
1024static bool intel_pt_return_compression(struct intel_pt *pt)
1025{
1026 struct evsel *evsel;
1027 u64 config;
1028
1029 if (!pt->noretcomp_bit)
1030 return true;
1031
1032 evlist__for_each_entry(pt->session->evlist, evsel) {
1033 if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
1034 (config & pt->noretcomp_bit))
1035 return false;
1036 }
1037 return true;
1038}
1039
1040static bool intel_pt_branch_enable(struct intel_pt *pt)
1041{
1042 struct evsel *evsel;
1043 u64 config;
1044
1045 evlist__for_each_entry(pt->session->evlist, evsel) {
1046 if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
1047 (config & INTEL_PT_CFG_PASS_THRU) &&
1048 !(config & INTEL_PT_CFG_BRANCH_EN))
1049 return false;
1050 }
1051 return true;
1052}
1053
1054static bool intel_pt_disabled_tnt(struct intel_pt *pt)
1055{
1056 struct evsel *evsel;
1057 u64 config;
1058
1059 evlist__for_each_entry(pt->session->evlist, evsel) {
1060 if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
1061 config & INTEL_PT_CFG_TNT_DIS)
1062 return true;
1063 }
1064 return false;
1065}
1066
1067static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
1068{
1069 struct evsel *evsel;
1070 unsigned int shift;
1071 u64 config;
1072
1073 if (!pt->mtc_freq_bits)
1074 return 0;
1075
1076 for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
1077 config >>= 1;
1078
1079 evlist__for_each_entry(pt->session->evlist, evsel) {
1080 if (intel_pt_get_config(pt, &evsel->core.attr, &config))
1081 return (config & pt->mtc_freq_bits) >> shift;
1082 }
1083 return 0;
1084}
1085
1086static bool intel_pt_timeless_decoding(struct intel_pt *pt)
1087{
1088 struct evsel *evsel;
1089 bool timeless_decoding = true;
1090 u64 config;
1091
1092 if (!pt->tsc_bit || !pt->cap_user_time_zero || pt->synth_opts.timeless_decoding)
1093 return true;
1094
1095 evlist__for_each_entry(pt->session->evlist, evsel) {
1096 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
1097 return true;
1098 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
1099 if (config & pt->tsc_bit)
1100 timeless_decoding = false;
1101 else
1102 return true;
1103 }
1104 }
1105 return timeless_decoding;
1106}
1107
1108static bool intel_pt_tracing_kernel(struct intel_pt *pt)
1109{
1110 struct evsel *evsel;
1111
1112 evlist__for_each_entry(pt->session->evlist, evsel) {
1113 if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
1114 !evsel->core.attr.exclude_kernel)
1115 return true;
1116 }
1117 return false;
1118}
1119
1120static bool intel_pt_have_tsc(struct intel_pt *pt)
1121{
1122 struct evsel *evsel;
1123 bool have_tsc = false;
1124 u64 config;
1125
1126 if (!pt->tsc_bit)
1127 return false;
1128
1129 evlist__for_each_entry(pt->session->evlist, evsel) {
1130 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
1131 if (config & pt->tsc_bit)
1132 have_tsc = true;
1133 else
1134 return false;
1135 }
1136 }
1137 return have_tsc;
1138}
1139
1140static bool intel_pt_have_mtc(struct intel_pt *pt)
1141{
1142 struct evsel *evsel;
1143 u64 config;
1144
1145 evlist__for_each_entry(pt->session->evlist, evsel) {
1146 if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
1147 (config & pt->mtc_bit))
1148 return true;
1149 }
1150 return false;
1151}
1152
1153static bool intel_pt_sampling_mode(struct intel_pt *pt)
1154{
1155 struct evsel *evsel;
1156
1157 evlist__for_each_entry(pt->session->evlist, evsel) {
1158 if ((evsel->core.attr.sample_type & PERF_SAMPLE_AUX) &&
1159 evsel->core.attr.aux_sample_size)
1160 return true;
1161 }
1162 return false;
1163}
1164
1165static u64 intel_pt_ctl(struct intel_pt *pt)
1166{
1167 struct evsel *evsel;
1168 u64 config;
1169
1170 evlist__for_each_entry(pt->session->evlist, evsel) {
1171 if (intel_pt_get_config(pt, &evsel->core.attr, &config))
1172 return config;
1173 }
1174 return 0;
1175}
1176
1177static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
1178{
1179 u64 quot, rem;
1180
1181 quot = ns / pt->tc.time_mult;
1182 rem = ns % pt->tc.time_mult;
1183 return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
1184 pt->tc.time_mult;
1185}
1186
1187static struct ip_callchain *intel_pt_alloc_chain(struct intel_pt *pt)
1188{
1189 size_t sz = sizeof(struct ip_callchain);
1190
1191 /* Add 1 to callchain_sz for callchain context */
1192 sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64);
1193 return zalloc(sz);
1194}
1195
1196static int intel_pt_callchain_init(struct intel_pt *pt)
1197{
1198 struct evsel *evsel;
1199
1200 evlist__for_each_entry(pt->session->evlist, evsel) {
1201 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN))
1202 evsel->synth_sample_type |= PERF_SAMPLE_CALLCHAIN;
1203 }
1204
1205 pt->chain = intel_pt_alloc_chain(pt);
1206 if (!pt->chain)
1207 return -ENOMEM;
1208
1209 return 0;
1210}
1211
1212static void intel_pt_add_callchain(struct intel_pt *pt,
1213 struct perf_sample *sample)
1214{
1215 struct thread *thread = machine__findnew_thread(pt->machine,
1216 sample->pid,
1217 sample->tid);
1218
1219 thread_stack__sample_late(thread, sample->cpu, pt->chain,
1220 pt->synth_opts.callchain_sz + 1, sample->ip,
1221 pt->kernel_start);
1222
1223 sample->callchain = pt->chain;
1224}
1225
1226static struct branch_stack *intel_pt_alloc_br_stack(unsigned int entry_cnt)
1227{
1228 size_t sz = sizeof(struct branch_stack);
1229
1230 sz += entry_cnt * sizeof(struct branch_entry);
1231 return zalloc(sz);
1232}
1233
1234static int intel_pt_br_stack_init(struct intel_pt *pt)
1235{
1236 struct evsel *evsel;
1237
1238 evlist__for_each_entry(pt->session->evlist, evsel) {
1239 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK))
1240 evsel->synth_sample_type |= PERF_SAMPLE_BRANCH_STACK;
1241 }
1242
1243 pt->br_stack = intel_pt_alloc_br_stack(pt->br_stack_sz);
1244 if (!pt->br_stack)
1245 return -ENOMEM;
1246
1247 return 0;
1248}
1249
1250static void intel_pt_add_br_stack(struct intel_pt *pt,
1251 struct perf_sample *sample)
1252{
1253 struct thread *thread = machine__findnew_thread(pt->machine,
1254 sample->pid,
1255 sample->tid);
1256
1257 thread_stack__br_sample_late(thread, sample->cpu, pt->br_stack,
1258 pt->br_stack_sz, sample->ip,
1259 pt->kernel_start);
1260
1261 sample->branch_stack = pt->br_stack;
1262}
1263
1264/* INTEL_PT_LBR_0, INTEL_PT_LBR_1 and INTEL_PT_LBR_2 */
1265#define LBRS_MAX (INTEL_PT_BLK_ITEM_ID_CNT * 3U)
1266
1267static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
1268 unsigned int queue_nr)
1269{
1270 struct intel_pt_params params = { .get_trace = 0, };
1271 struct perf_env *env = pt->machine->env;
1272 struct intel_pt_queue *ptq;
1273
1274 ptq = zalloc(sizeof(struct intel_pt_queue));
1275 if (!ptq)
1276 return NULL;
1277
1278 if (pt->synth_opts.callchain) {
1279 ptq->chain = intel_pt_alloc_chain(pt);
1280 if (!ptq->chain)
1281 goto out_free;
1282 }
1283
1284 if (pt->synth_opts.last_branch || pt->synth_opts.other_events) {
1285 unsigned int entry_cnt = max(LBRS_MAX, pt->br_stack_sz);
1286
1287 ptq->last_branch = intel_pt_alloc_br_stack(entry_cnt);
1288 if (!ptq->last_branch)
1289 goto out_free;
1290 }
1291
1292 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
1293 if (!ptq->event_buf)
1294 goto out_free;
1295
1296 ptq->pt = pt;
1297 ptq->queue_nr = queue_nr;
1298 ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
1299 ptq->pid = -1;
1300 ptq->tid = -1;
1301 ptq->cpu = -1;
1302 ptq->next_tid = -1;
1303
1304 params.get_trace = intel_pt_get_trace;
1305 params.walk_insn = intel_pt_walk_next_insn;
1306 params.lookahead = intel_pt_lookahead;
1307 params.findnew_vmcs_info = intel_pt_findnew_vmcs_info;
1308 params.data = ptq;
1309 params.return_compression = intel_pt_return_compression(pt);
1310 params.branch_enable = intel_pt_branch_enable(pt);
1311 params.ctl = intel_pt_ctl(pt);
1312 params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
1313 params.mtc_period = intel_pt_mtc_period(pt);
1314 params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
1315 params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
1316 params.quick = pt->synth_opts.quick;
1317 params.vm_time_correlation = pt->synth_opts.vm_time_correlation;
1318 params.vm_tm_corr_dry_run = pt->synth_opts.vm_tm_corr_dry_run;
1319 params.first_timestamp = pt->first_timestamp;
1320 params.max_loops = pt->max_loops;
1321
1322 /* Cannot walk code without TNT, so force 'quick' mode */
1323 if (params.branch_enable && intel_pt_disabled_tnt(pt) && !params.quick)
1324 params.quick = 1;
1325
1326 if (pt->filts.cnt > 0)
1327 params.pgd_ip = intel_pt_pgd_ip;
1328
1329 if (pt->synth_opts.instructions || pt->synth_opts.cycles) {
1330 if (pt->synth_opts.period) {
1331 switch (pt->synth_opts.period_type) {
1332 case PERF_ITRACE_PERIOD_INSTRUCTIONS:
1333 params.period_type =
1334 INTEL_PT_PERIOD_INSTRUCTIONS;
1335 params.period = pt->synth_opts.period;
1336 break;
1337 case PERF_ITRACE_PERIOD_TICKS:
1338 params.period_type = INTEL_PT_PERIOD_TICKS;
1339 params.period = pt->synth_opts.period;
1340 break;
1341 case PERF_ITRACE_PERIOD_NANOSECS:
1342 params.period_type = INTEL_PT_PERIOD_TICKS;
1343 params.period = intel_pt_ns_to_ticks(pt,
1344 pt->synth_opts.period);
1345 break;
1346 default:
1347 break;
1348 }
1349 }
1350
1351 if (!params.period) {
1352 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
1353 params.period = 1;
1354 }
1355 }
1356
1357 if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
1358 params.flags |= INTEL_PT_FUP_WITH_NLIP;
1359
1360 ptq->decoder = intel_pt_decoder_new(¶ms);
1361 if (!ptq->decoder)
1362 goto out_free;
1363
1364 return ptq;
1365
1366out_free:
1367 zfree(&ptq->event_buf);
1368 zfree(&ptq->last_branch);
1369 zfree(&ptq->chain);
1370 free(ptq);
1371 return NULL;
1372}
1373
1374static void intel_pt_free_queue(void *priv)
1375{
1376 struct intel_pt_queue *ptq = priv;
1377
1378 if (!ptq)
1379 return;
1380 thread__zput(ptq->thread);
1381 thread__zput(ptq->guest_thread);
1382 thread__zput(ptq->unknown_guest_thread);
1383 intel_pt_decoder_free(ptq->decoder);
1384 zfree(&ptq->event_buf);
1385 zfree(&ptq->last_branch);
1386 zfree(&ptq->chain);
1387 free(ptq);
1388}
1389
1390static void intel_pt_first_timestamp(struct intel_pt *pt, u64 timestamp)
1391{
1392 unsigned int i;
1393
1394 pt->first_timestamp = timestamp;
1395
1396 for (i = 0; i < pt->queues.nr_queues; i++) {
1397 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1398 struct intel_pt_queue *ptq = queue->priv;
1399
1400 if (ptq && ptq->decoder)
1401 intel_pt_set_first_timestamp(ptq->decoder, timestamp);
1402 }
1403}
1404
1405static int intel_pt_get_guest_from_sideband(struct intel_pt_queue *ptq)
1406{
1407 struct machines *machines = &ptq->pt->session->machines;
1408 struct machine *machine;
1409 pid_t machine_pid = ptq->pid;
1410 pid_t tid;
1411 int vcpu;
1412
1413 if (machine_pid <= 0)
1414 return 0; /* Not a guest machine */
1415
1416 machine = machines__find(machines, machine_pid);
1417 if (!machine)
1418 return 0; /* Not a guest machine */
1419
1420 if (ptq->guest_machine != machine) {
1421 ptq->guest_machine = NULL;
1422 thread__zput(ptq->guest_thread);
1423 thread__zput(ptq->unknown_guest_thread);
1424
1425 ptq->unknown_guest_thread = machine__find_thread(machine, 0, 0);
1426 if (!ptq->unknown_guest_thread)
1427 return -1;
1428 ptq->guest_machine = machine;
1429 }
1430
1431 vcpu = ptq->thread ? ptq->thread->guest_cpu : -1;
1432 if (vcpu < 0)
1433 return -1;
1434
1435 tid = machine__get_current_tid(machine, vcpu);
1436
1437 if (ptq->guest_thread && ptq->guest_thread->tid != tid)
1438 thread__zput(ptq->guest_thread);
1439
1440 if (!ptq->guest_thread) {
1441 ptq->guest_thread = machine__find_thread(machine, -1, tid);
1442 if (!ptq->guest_thread)
1443 return -1;
1444 }
1445
1446 ptq->guest_machine_pid = machine_pid;
1447 ptq->guest_pid = ptq->guest_thread->pid_;
1448 ptq->guest_tid = tid;
1449 ptq->vcpu = vcpu;
1450
1451 return 0;
1452}
1453
1454static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
1455 struct auxtrace_queue *queue)
1456{
1457 struct intel_pt_queue *ptq = queue->priv;
1458
1459 if (queue->tid == -1 || pt->have_sched_switch) {
1460 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
1461 if (ptq->tid == -1)
1462 ptq->pid = -1;
1463 thread__zput(ptq->thread);
1464 }
1465
1466 if (!ptq->thread && ptq->tid != -1)
1467 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
1468
1469 if (ptq->thread) {
1470 ptq->pid = ptq->thread->pid_;
1471 if (queue->cpu == -1)
1472 ptq->cpu = ptq->thread->cpu;
1473 }
1474
1475 if (pt->have_guest_sideband && intel_pt_get_guest_from_sideband(ptq)) {
1476 ptq->guest_machine_pid = 0;
1477 ptq->guest_pid = -1;
1478 ptq->guest_tid = -1;
1479 ptq->vcpu = -1;
1480 }
1481}
1482
1483static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
1484{
1485 struct intel_pt *pt = ptq->pt;
1486
1487 ptq->insn_len = 0;
1488 if (ptq->state->flags & INTEL_PT_ABORT_TX) {
1489 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
1490 } else if (ptq->state->flags & INTEL_PT_ASYNC) {
1491 if (!ptq->state->to_ip)
1492 ptq->flags = PERF_IP_FLAG_BRANCH |
1493 PERF_IP_FLAG_TRACE_END;
1494 else if (ptq->state->from_nr && !ptq->state->to_nr)
1495 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1496 PERF_IP_FLAG_VMEXIT;
1497 else
1498 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1499 PERF_IP_FLAG_ASYNC |
1500 PERF_IP_FLAG_INTERRUPT;
1501 } else {
1502 if (ptq->state->from_ip)
1503 ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
1504 else
1505 ptq->flags = PERF_IP_FLAG_BRANCH |
1506 PERF_IP_FLAG_TRACE_BEGIN;
1507 if (ptq->state->flags & INTEL_PT_IN_TX)
1508 ptq->flags |= PERF_IP_FLAG_IN_TX;
1509 ptq->insn_len = ptq->state->insn_len;
1510 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
1511 }
1512
1513 if (ptq->state->type & INTEL_PT_TRACE_BEGIN)
1514 ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN;
1515 if (ptq->state->type & INTEL_PT_TRACE_END)
1516 ptq->flags |= PERF_IP_FLAG_TRACE_END;
1517
1518 if (pt->cap_event_trace) {
1519 if (ptq->state->type & INTEL_PT_IFLAG_CHG) {
1520 if (!ptq->state->from_iflag)
1521 ptq->flags |= PERF_IP_FLAG_INTR_DISABLE;
1522 if (ptq->state->from_iflag != ptq->state->to_iflag)
1523 ptq->flags |= PERF_IP_FLAG_INTR_TOGGLE;
1524 } else if (!ptq->state->to_iflag) {
1525 ptq->flags |= PERF_IP_FLAG_INTR_DISABLE;
1526 }
1527 }
1528}
1529
1530static void intel_pt_setup_time_range(struct intel_pt *pt,
1531 struct intel_pt_queue *ptq)
1532{
1533 if (!pt->range_cnt)
1534 return;
1535
1536 ptq->sel_timestamp = pt->time_ranges[0].start;
1537 ptq->sel_idx = 0;
1538
1539 if (ptq->sel_timestamp) {
1540 ptq->sel_start = true;
1541 } else {
1542 ptq->sel_timestamp = pt->time_ranges[0].end;
1543 ptq->sel_start = false;
1544 }
1545}
1546
1547static int intel_pt_setup_queue(struct intel_pt *pt,
1548 struct auxtrace_queue *queue,
1549 unsigned int queue_nr)
1550{
1551 struct intel_pt_queue *ptq = queue->priv;
1552
1553 if (list_empty(&queue->head))
1554 return 0;
1555
1556 if (!ptq) {
1557 ptq = intel_pt_alloc_queue(pt, queue_nr);
1558 if (!ptq)
1559 return -ENOMEM;
1560 queue->priv = ptq;
1561
1562 if (queue->cpu != -1)
1563 ptq->cpu = queue->cpu;
1564 ptq->tid = queue->tid;
1565
1566 ptq->cbr_seen = UINT_MAX;
1567
1568 if (pt->sampling_mode && !pt->snapshot_mode &&
1569 pt->timeless_decoding)
1570 ptq->step_through_buffers = true;
1571
1572 ptq->sync_switch = pt->sync_switch;
1573
1574 intel_pt_setup_time_range(pt, ptq);
1575 }
1576
1577 if (!ptq->on_heap &&
1578 (!ptq->sync_switch ||
1579 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
1580 const struct intel_pt_state *state;
1581 int ret;
1582
1583 if (pt->timeless_decoding)
1584 return 0;
1585
1586 intel_pt_log("queue %u getting timestamp\n", queue_nr);
1587 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1588 queue_nr, ptq->cpu, ptq->pid, ptq->tid);
1589
1590 if (ptq->sel_start && ptq->sel_timestamp) {
1591 ret = intel_pt_fast_forward(ptq->decoder,
1592 ptq->sel_timestamp);
1593 if (ret)
1594 return ret;
1595 }
1596
1597 while (1) {
1598 state = intel_pt_decode(ptq->decoder);
1599 if (state->err) {
1600 if (state->err == INTEL_PT_ERR_NODATA) {
1601 intel_pt_log("queue %u has no timestamp\n",
1602 queue_nr);
1603 return 0;
1604 }
1605 continue;
1606 }
1607 if (state->timestamp)
1608 break;
1609 }
1610
1611 ptq->timestamp = state->timestamp;
1612 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
1613 queue_nr, ptq->timestamp);
1614 ptq->state = state;
1615 ptq->have_sample = true;
1616 if (ptq->sel_start && ptq->sel_timestamp &&
1617 ptq->timestamp < ptq->sel_timestamp)
1618 ptq->have_sample = false;
1619 intel_pt_sample_flags(ptq);
1620 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
1621 if (ret)
1622 return ret;
1623 ptq->on_heap = true;
1624 }
1625
1626 return 0;
1627}
1628
1629static int intel_pt_setup_queues(struct intel_pt *pt)
1630{
1631 unsigned int i;
1632 int ret;
1633
1634 for (i = 0; i < pt->queues.nr_queues; i++) {
1635 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
1636 if (ret)
1637 return ret;
1638 }
1639 return 0;
1640}
1641
1642static inline bool intel_pt_skip_event(struct intel_pt *pt)
1643{
1644 return pt->synth_opts.initial_skip &&
1645 pt->num_events++ < pt->synth_opts.initial_skip;
1646}
1647
1648/*
1649 * Cannot count CBR as skipped because it won't go away until cbr == cbr_seen.
1650 * Also ensure CBR is first non-skipped event by allowing for 4 more samples
1651 * from this decoder state.
1652 */
1653static inline bool intel_pt_skip_cbr_event(struct intel_pt *pt)
1654{
1655 return pt->synth_opts.initial_skip &&
1656 pt->num_events + 4 < pt->synth_opts.initial_skip;
1657}
1658
1659static void intel_pt_prep_a_sample(struct intel_pt_queue *ptq,
1660 union perf_event *event,
1661 struct perf_sample *sample)
1662{
1663 event->sample.header.type = PERF_RECORD_SAMPLE;
1664 event->sample.header.size = sizeof(struct perf_event_header);
1665
1666 sample->pid = ptq->pid;
1667 sample->tid = ptq->tid;
1668
1669 if (ptq->pt->have_guest_sideband) {
1670 if ((ptq->state->from_ip && ptq->state->from_nr) ||
1671 (ptq->state->to_ip && ptq->state->to_nr)) {
1672 sample->pid = ptq->guest_pid;
1673 sample->tid = ptq->guest_tid;
1674 sample->machine_pid = ptq->guest_machine_pid;
1675 sample->vcpu = ptq->vcpu;
1676 }
1677 }
1678
1679 sample->cpu = ptq->cpu;
1680 sample->insn_len = ptq->insn_len;
1681 memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1682}
1683
1684static void intel_pt_prep_b_sample(struct intel_pt *pt,
1685 struct intel_pt_queue *ptq,
1686 union perf_event *event,
1687 struct perf_sample *sample)
1688{
1689 intel_pt_prep_a_sample(ptq, event, sample);
1690
1691 if (!pt->timeless_decoding)
1692 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1693
1694 sample->ip = ptq->state->from_ip;
1695 sample->addr = ptq->state->to_ip;
1696 sample->cpumode = intel_pt_cpumode(ptq, sample->ip, sample->addr);
1697 sample->period = 1;
1698 sample->flags = ptq->flags;
1699
1700 event->sample.header.misc = sample->cpumode;
1701}
1702
1703static int intel_pt_inject_event(union perf_event *event,
1704 struct perf_sample *sample, u64 type)
1705{
1706 event->header.size = perf_event__sample_event_size(sample, type, 0);
1707 return perf_event__synthesize_sample(event, type, 0, sample);
1708}
1709
1710static inline int intel_pt_opt_inject(struct intel_pt *pt,
1711 union perf_event *event,
1712 struct perf_sample *sample, u64 type)
1713{
1714 if (!pt->synth_opts.inject)
1715 return 0;
1716
1717 return intel_pt_inject_event(event, sample, type);
1718}
1719
1720static int intel_pt_deliver_synth_event(struct intel_pt *pt,
1721 union perf_event *event,
1722 struct perf_sample *sample, u64 type)
1723{
1724 int ret;
1725
1726 ret = intel_pt_opt_inject(pt, event, sample, type);
1727 if (ret)
1728 return ret;
1729
1730 ret = perf_session__deliver_synth_event(pt->session, event, sample);
1731 if (ret)
1732 pr_err("Intel PT: failed to deliver event, error %d\n", ret);
1733
1734 return ret;
1735}
1736
1737static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
1738{
1739 struct intel_pt *pt = ptq->pt;
1740 union perf_event *event = ptq->event_buf;
1741 struct perf_sample sample = { .ip = 0, };
1742 struct dummy_branch_stack {
1743 u64 nr;
1744 u64 hw_idx;
1745 struct branch_entry entries;
1746 } dummy_bs;
1747
1748 if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
1749 return 0;
1750
1751 if (intel_pt_skip_event(pt))
1752 return 0;
1753
1754 intel_pt_prep_b_sample(pt, ptq, event, &sample);
1755
1756 sample.id = ptq->pt->branches_id;
1757 sample.stream_id = ptq->pt->branches_id;
1758
1759 /*
1760 * perf report cannot handle events without a branch stack when using
1761 * SORT_MODE__BRANCH so make a dummy one.
1762 */
1763 if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
1764 dummy_bs = (struct dummy_branch_stack){
1765 .nr = 1,
1766 .hw_idx = -1ULL,
1767 .entries = {
1768 .from = sample.ip,
1769 .to = sample.addr,
1770 },
1771 };
1772 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1773 }
1774
1775 if (ptq->sample_ipc)
1776 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
1777 if (sample.cyc_cnt) {
1778 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
1779 ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
1780 ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt;
1781 }
1782
1783 return intel_pt_deliver_synth_event(pt, event, &sample,
1784 pt->branches_sample_type);
1785}
1786
1787static void intel_pt_prep_sample(struct intel_pt *pt,
1788 struct intel_pt_queue *ptq,
1789 union perf_event *event,
1790 struct perf_sample *sample)
1791{
1792 intel_pt_prep_b_sample(pt, ptq, event, sample);
1793
1794 if (pt->synth_opts.callchain) {
1795 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
1796 pt->synth_opts.callchain_sz + 1,
1797 sample->ip, pt->kernel_start);
1798 sample->callchain = ptq->chain;
1799 }
1800
1801 if (pt->synth_opts.last_branch) {
1802 thread_stack__br_sample(ptq->thread, ptq->cpu, ptq->last_branch,
1803 pt->br_stack_sz);
1804 sample->branch_stack = ptq->last_branch;
1805 }
1806}
1807
1808static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1809{
1810 struct intel_pt *pt = ptq->pt;
1811 union perf_event *event = ptq->event_buf;
1812 struct perf_sample sample = { .ip = 0, };
1813
1814 if (intel_pt_skip_event(pt))
1815 return 0;
1816
1817 intel_pt_prep_sample(pt, ptq, event, &sample);
1818
1819 sample.id = ptq->pt->instructions_id;
1820 sample.stream_id = ptq->pt->instructions_id;
1821 if (pt->synth_opts.quick)
1822 sample.period = 1;
1823 else
1824 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
1825
1826 if (ptq->sample_ipc)
1827 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
1828 if (sample.cyc_cnt) {
1829 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
1830 ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
1831 ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt;
1832 }
1833
1834 ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
1835
1836 return intel_pt_deliver_synth_event(pt, event, &sample,
1837 pt->instructions_sample_type);
1838}
1839
1840static int intel_pt_synth_cycle_sample(struct intel_pt_queue *ptq)
1841{
1842 struct intel_pt *pt = ptq->pt;
1843 union perf_event *event = ptq->event_buf;
1844 struct perf_sample sample = { .ip = 0, };
1845 u64 period = 0;
1846
1847 if (ptq->sample_ipc)
1848 period = ptq->ipc_cyc_cnt - ptq->last_cy_cyc_cnt;
1849
1850 if (!period || intel_pt_skip_event(pt))
1851 return 0;
1852
1853 intel_pt_prep_sample(pt, ptq, event, &sample);
1854
1855 sample.id = ptq->pt->cycles_id;
1856 sample.stream_id = ptq->pt->cycles_id;
1857 sample.period = period;
1858
1859 sample.cyc_cnt = period;
1860 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_cy_insn_cnt;
1861 ptq->last_cy_insn_cnt = ptq->ipc_insn_cnt;
1862 ptq->last_cy_cyc_cnt = ptq->ipc_cyc_cnt;
1863
1864 return intel_pt_deliver_synth_event(pt, event, &sample, pt->cycles_sample_type);
1865}
1866
1867static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1868{
1869 struct intel_pt *pt = ptq->pt;
1870 union perf_event *event = ptq->event_buf;
1871 struct perf_sample sample = { .ip = 0, };
1872
1873 if (intel_pt_skip_event(pt))
1874 return 0;
1875
1876 intel_pt_prep_sample(pt, ptq, event, &sample);
1877
1878 sample.id = ptq->pt->transactions_id;
1879 sample.stream_id = ptq->pt->transactions_id;
1880
1881 return intel_pt_deliver_synth_event(pt, event, &sample,
1882 pt->transactions_sample_type);
1883}
1884
1885static void intel_pt_prep_p_sample(struct intel_pt *pt,
1886 struct intel_pt_queue *ptq,
1887 union perf_event *event,
1888 struct perf_sample *sample)
1889{
1890 intel_pt_prep_sample(pt, ptq, event, sample);
1891
1892 /*
1893 * Zero IP is used to mean "trace start" but that is not the case for
1894 * power or PTWRITE events with no IP, so clear the flags.
1895 */
1896 if (!sample->ip)
1897 sample->flags = 0;
1898}
1899
1900static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
1901{
1902 struct intel_pt *pt = ptq->pt;
1903 union perf_event *event = ptq->event_buf;
1904 struct perf_sample sample = { .ip = 0, };
1905 struct perf_synth_intel_ptwrite raw;
1906
1907 if (intel_pt_skip_event(pt))
1908 return 0;
1909
1910 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1911
1912 sample.id = ptq->pt->ptwrites_id;
1913 sample.stream_id = ptq->pt->ptwrites_id;
1914
1915 raw.flags = 0;
1916 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1917 raw.payload = cpu_to_le64(ptq->state->ptw_payload);
1918
1919 sample.raw_size = perf_synth__raw_size(raw);
1920 sample.raw_data = perf_synth__raw_data(&raw);
1921
1922 return intel_pt_deliver_synth_event(pt, event, &sample,
1923 pt->ptwrites_sample_type);
1924}
1925
1926static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
1927{
1928 struct intel_pt *pt = ptq->pt;
1929 union perf_event *event = ptq->event_buf;
1930 struct perf_sample sample = { .ip = 0, };
1931 struct perf_synth_intel_cbr raw;
1932 u32 flags;
1933
1934 if (intel_pt_skip_cbr_event(pt))
1935 return 0;
1936
1937 ptq->cbr_seen = ptq->state->cbr;
1938
1939 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1940
1941 sample.id = ptq->pt->cbr_id;
1942 sample.stream_id = ptq->pt->cbr_id;
1943
1944 flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
1945 raw.flags = cpu_to_le32(flags);
1946 raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
1947 raw.reserved3 = 0;
1948
1949 sample.raw_size = perf_synth__raw_size(raw);
1950 sample.raw_data = perf_synth__raw_data(&raw);
1951
1952 return intel_pt_deliver_synth_event(pt, event, &sample,
1953 pt->pwr_events_sample_type);
1954}
1955
1956static int intel_pt_synth_psb_sample(struct intel_pt_queue *ptq)
1957{
1958 struct intel_pt *pt = ptq->pt;
1959 union perf_event *event = ptq->event_buf;
1960 struct perf_sample sample = { .ip = 0, };
1961 struct perf_synth_intel_psb raw;
1962
1963 if (intel_pt_skip_event(pt))
1964 return 0;
1965
1966 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1967
1968 sample.id = ptq->pt->psb_id;
1969 sample.stream_id = ptq->pt->psb_id;
1970 sample.flags = 0;
1971
1972 raw.reserved = 0;
1973 raw.offset = ptq->state->psb_offset;
1974
1975 sample.raw_size = perf_synth__raw_size(raw);
1976 sample.raw_data = perf_synth__raw_data(&raw);
1977
1978 return intel_pt_deliver_synth_event(pt, event, &sample,
1979 pt->pwr_events_sample_type);
1980}
1981
1982static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
1983{
1984 struct intel_pt *pt = ptq->pt;
1985 union perf_event *event = ptq->event_buf;
1986 struct perf_sample sample = { .ip = 0, };
1987 struct perf_synth_intel_mwait raw;
1988
1989 if (intel_pt_skip_event(pt))
1990 return 0;
1991
1992 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1993
1994 sample.id = ptq->pt->mwait_id;
1995 sample.stream_id = ptq->pt->mwait_id;
1996
1997 raw.reserved = 0;
1998 raw.payload = cpu_to_le64(ptq->state->mwait_payload);
1999
2000 sample.raw_size = perf_synth__raw_size(raw);
2001 sample.raw_data = perf_synth__raw_data(&raw);
2002
2003 return intel_pt_deliver_synth_event(pt, event, &sample,
2004 pt->pwr_events_sample_type);
2005}
2006
2007static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
2008{
2009 struct intel_pt *pt = ptq->pt;
2010 union perf_event *event = ptq->event_buf;
2011 struct perf_sample sample = { .ip = 0, };
2012 struct perf_synth_intel_pwre raw;
2013
2014 if (intel_pt_skip_event(pt))
2015 return 0;
2016
2017 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2018
2019 sample.id = ptq->pt->pwre_id;
2020 sample.stream_id = ptq->pt->pwre_id;
2021
2022 raw.reserved = 0;
2023 raw.payload = cpu_to_le64(ptq->state->pwre_payload);
2024
2025 sample.raw_size = perf_synth__raw_size(raw);
2026 sample.raw_data = perf_synth__raw_data(&raw);
2027
2028 return intel_pt_deliver_synth_event(pt, event, &sample,
2029 pt->pwr_events_sample_type);
2030}
2031
2032static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
2033{
2034 struct intel_pt *pt = ptq->pt;
2035 union perf_event *event = ptq->event_buf;
2036 struct perf_sample sample = { .ip = 0, };
2037 struct perf_synth_intel_exstop raw;
2038
2039 if (intel_pt_skip_event(pt))
2040 return 0;
2041
2042 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2043
2044 sample.id = ptq->pt->exstop_id;
2045 sample.stream_id = ptq->pt->exstop_id;
2046
2047 raw.flags = 0;
2048 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
2049
2050 sample.raw_size = perf_synth__raw_size(raw);
2051 sample.raw_data = perf_synth__raw_data(&raw);
2052
2053 return intel_pt_deliver_synth_event(pt, event, &sample,
2054 pt->pwr_events_sample_type);
2055}
2056
2057static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
2058{
2059 struct intel_pt *pt = ptq->pt;
2060 union perf_event *event = ptq->event_buf;
2061 struct perf_sample sample = { .ip = 0, };
2062 struct perf_synth_intel_pwrx raw;
2063
2064 if (intel_pt_skip_event(pt))
2065 return 0;
2066
2067 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2068
2069 sample.id = ptq->pt->pwrx_id;
2070 sample.stream_id = ptq->pt->pwrx_id;
2071
2072 raw.reserved = 0;
2073 raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
2074
2075 sample.raw_size = perf_synth__raw_size(raw);
2076 sample.raw_data = perf_synth__raw_data(&raw);
2077
2078 return intel_pt_deliver_synth_event(pt, event, &sample,
2079 pt->pwr_events_sample_type);
2080}
2081
2082/*
2083 * PEBS gp_regs array indexes plus 1 so that 0 means not present. Refer
2084 * intel_pt_add_gp_regs().
2085 */
2086static const int pebs_gp_regs[] = {
2087 [PERF_REG_X86_FLAGS] = 1,
2088 [PERF_REG_X86_IP] = 2,
2089 [PERF_REG_X86_AX] = 3,
2090 [PERF_REG_X86_CX] = 4,
2091 [PERF_REG_X86_DX] = 5,
2092 [PERF_REG_X86_BX] = 6,
2093 [PERF_REG_X86_SP] = 7,
2094 [PERF_REG_X86_BP] = 8,
2095 [PERF_REG_X86_SI] = 9,
2096 [PERF_REG_X86_DI] = 10,
2097 [PERF_REG_X86_R8] = 11,
2098 [PERF_REG_X86_R9] = 12,
2099 [PERF_REG_X86_R10] = 13,
2100 [PERF_REG_X86_R11] = 14,
2101 [PERF_REG_X86_R12] = 15,
2102 [PERF_REG_X86_R13] = 16,
2103 [PERF_REG_X86_R14] = 17,
2104 [PERF_REG_X86_R15] = 18,
2105};
2106
2107static u64 *intel_pt_add_gp_regs(struct regs_dump *intr_regs, u64 *pos,
2108 const struct intel_pt_blk_items *items,
2109 u64 regs_mask)
2110{
2111 const u64 *gp_regs = items->val[INTEL_PT_GP_REGS_POS];
2112 u32 mask = items->mask[INTEL_PT_GP_REGS_POS];
2113 u32 bit;
2114 int i;
2115
2116 for (i = 0, bit = 1; i < PERF_REG_X86_64_MAX; i++, bit <<= 1) {
2117 /* Get the PEBS gp_regs array index */
2118 int n = pebs_gp_regs[i] - 1;
2119
2120 if (n < 0)
2121 continue;
2122 /*
2123 * Add only registers that were requested (i.e. 'regs_mask') and
2124 * that were provided (i.e. 'mask'), and update the resulting
2125 * mask (i.e. 'intr_regs->mask') accordingly.
2126 */
2127 if (mask & 1 << n && regs_mask & bit) {
2128 intr_regs->mask |= bit;
2129 *pos++ = gp_regs[n];
2130 }
2131 }
2132
2133 return pos;
2134}
2135
2136#ifndef PERF_REG_X86_XMM0
2137#define PERF_REG_X86_XMM0 32
2138#endif
2139
2140static void intel_pt_add_xmm(struct regs_dump *intr_regs, u64 *pos,
2141 const struct intel_pt_blk_items *items,
2142 u64 regs_mask)
2143{
2144 u32 mask = items->has_xmm & (regs_mask >> PERF_REG_X86_XMM0);
2145 const u64 *xmm = items->xmm;
2146
2147 /*
2148 * If there are any XMM registers, then there should be all of them.
2149 * Nevertheless, follow the logic to add only registers that were
2150 * requested (i.e. 'regs_mask') and that were provided (i.e. 'mask'),
2151 * and update the resulting mask (i.e. 'intr_regs->mask') accordingly.
2152 */
2153 intr_regs->mask |= (u64)mask << PERF_REG_X86_XMM0;
2154
2155 for (; mask; mask >>= 1, xmm++) {
2156 if (mask & 1)
2157 *pos++ = *xmm;
2158 }
2159}
2160
2161#define LBR_INFO_MISPRED (1ULL << 63)
2162#define LBR_INFO_IN_TX (1ULL << 62)
2163#define LBR_INFO_ABORT (1ULL << 61)
2164#define LBR_INFO_CYCLES 0xffff
2165
2166/* Refer kernel's intel_pmu_store_pebs_lbrs() */
2167static u64 intel_pt_lbr_flags(u64 info)
2168{
2169 union {
2170 struct branch_flags flags;
2171 u64 result;
2172 } u;
2173
2174 u.result = 0;
2175 u.flags.mispred = !!(info & LBR_INFO_MISPRED);
2176 u.flags.predicted = !(info & LBR_INFO_MISPRED);
2177 u.flags.in_tx = !!(info & LBR_INFO_IN_TX);
2178 u.flags.abort = !!(info & LBR_INFO_ABORT);
2179 u.flags.cycles = info & LBR_INFO_CYCLES;
2180
2181 return u.result;
2182}
2183
2184static void intel_pt_add_lbrs(struct branch_stack *br_stack,
2185 const struct intel_pt_blk_items *items)
2186{
2187 u64 *to;
2188 int i;
2189
2190 br_stack->nr = 0;
2191
2192 to = &br_stack->entries[0].from;
2193
2194 for (i = INTEL_PT_LBR_0_POS; i <= INTEL_PT_LBR_2_POS; i++) {
2195 u32 mask = items->mask[i];
2196 const u64 *from = items->val[i];
2197
2198 for (; mask; mask >>= 3, from += 3) {
2199 if ((mask & 7) == 7) {
2200 *to++ = from[0];
2201 *to++ = from[1];
2202 *to++ = intel_pt_lbr_flags(from[2]);
2203 br_stack->nr += 1;
2204 }
2205 }
2206 }
2207}
2208
2209static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evsel *evsel, u64 id)
2210{
2211 const struct intel_pt_blk_items *items = &ptq->state->items;
2212 struct perf_sample sample = { .ip = 0, };
2213 union perf_event *event = ptq->event_buf;
2214 struct intel_pt *pt = ptq->pt;
2215 u64 sample_type = evsel->core.attr.sample_type;
2216 u8 cpumode;
2217 u64 regs[8 * sizeof(sample.intr_regs.mask)];
2218
2219 if (intel_pt_skip_event(pt))
2220 return 0;
2221
2222 intel_pt_prep_a_sample(ptq, event, &sample);
2223
2224 sample.id = id;
2225 sample.stream_id = id;
2226
2227 if (!evsel->core.attr.freq)
2228 sample.period = evsel->core.attr.sample_period;
2229
2230 /* No support for non-zero CS base */
2231 if (items->has_ip)
2232 sample.ip = items->ip;
2233 else if (items->has_rip)
2234 sample.ip = items->rip;
2235 else
2236 sample.ip = ptq->state->from_ip;
2237
2238 cpumode = intel_pt_cpumode(ptq, sample.ip, 0);
2239
2240 event->sample.header.misc = cpumode | PERF_RECORD_MISC_EXACT_IP;
2241
2242 sample.cpumode = cpumode;
2243
2244 if (sample_type & PERF_SAMPLE_TIME) {
2245 u64 timestamp = 0;
2246
2247 if (items->has_timestamp)
2248 timestamp = items->timestamp;
2249 else if (!pt->timeless_decoding)
2250 timestamp = ptq->timestamp;
2251 if (timestamp)
2252 sample.time = tsc_to_perf_time(timestamp, &pt->tc);
2253 }
2254
2255 if (sample_type & PERF_SAMPLE_CALLCHAIN &&
2256 pt->synth_opts.callchain) {
2257 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
2258 pt->synth_opts.callchain_sz, sample.ip,
2259 pt->kernel_start);
2260 sample.callchain = ptq->chain;
2261 }
2262
2263 if (sample_type & PERF_SAMPLE_REGS_INTR &&
2264 (items->mask[INTEL_PT_GP_REGS_POS] ||
2265 items->mask[INTEL_PT_XMM_POS])) {
2266 u64 regs_mask = evsel->core.attr.sample_regs_intr;
2267 u64 *pos;
2268
2269 sample.intr_regs.abi = items->is_32_bit ?
2270 PERF_SAMPLE_REGS_ABI_32 :
2271 PERF_SAMPLE_REGS_ABI_64;
2272 sample.intr_regs.regs = regs;
2273
2274 pos = intel_pt_add_gp_regs(&sample.intr_regs, regs, items, regs_mask);
2275
2276 intel_pt_add_xmm(&sample.intr_regs, pos, items, regs_mask);
2277 }
2278
2279 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
2280 if (items->mask[INTEL_PT_LBR_0_POS] ||
2281 items->mask[INTEL_PT_LBR_1_POS] ||
2282 items->mask[INTEL_PT_LBR_2_POS]) {
2283 intel_pt_add_lbrs(ptq->last_branch, items);
2284 } else if (pt->synth_opts.last_branch) {
2285 thread_stack__br_sample(ptq->thread, ptq->cpu,
2286 ptq->last_branch,
2287 pt->br_stack_sz);
2288 } else {
2289 ptq->last_branch->nr = 0;
2290 }
2291 sample.branch_stack = ptq->last_branch;
2292 }
2293
2294 if (sample_type & PERF_SAMPLE_ADDR && items->has_mem_access_address)
2295 sample.addr = items->mem_access_address;
2296
2297 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
2298 /*
2299 * Refer kernel's setup_pebs_adaptive_sample_data() and
2300 * intel_hsw_weight().
2301 */
2302 if (items->has_mem_access_latency) {
2303 u64 weight = items->mem_access_latency >> 32;
2304
2305 /*
2306 * Starts from SPR, the mem access latency field
2307 * contains both cache latency [47:32] and instruction
2308 * latency [15:0]. The cache latency is the same as the
2309 * mem access latency on previous platforms.
2310 *
2311 * In practice, no memory access could last than 4G
2312 * cycles. Use latency >> 32 to distinguish the
2313 * different format of the mem access latency field.
2314 */
2315 if (weight > 0) {
2316 sample.weight = weight & 0xffff;
2317 sample.ins_lat = items->mem_access_latency & 0xffff;
2318 } else
2319 sample.weight = items->mem_access_latency;
2320 }
2321 if (!sample.weight && items->has_tsx_aux_info) {
2322 /* Cycles last block */
2323 sample.weight = (u32)items->tsx_aux_info;
2324 }
2325 }
2326
2327 if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) {
2328 u64 ax = items->has_rax ? items->rax : 0;
2329 /* Refer kernel's intel_hsw_transaction() */
2330 u64 txn = (u8)(items->tsx_aux_info >> 32);
2331
2332 /* For RTM XABORTs also log the abort code from AX */
2333 if (txn & PERF_TXN_TRANSACTION && ax & 1)
2334 txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
2335 sample.transaction = txn;
2336 }
2337
2338 return intel_pt_deliver_synth_event(pt, event, &sample, sample_type);
2339}
2340
2341static int intel_pt_synth_single_pebs_sample(struct intel_pt_queue *ptq)
2342{
2343 struct intel_pt *pt = ptq->pt;
2344 struct evsel *evsel = pt->pebs_evsel;
2345 u64 id = evsel->core.id[0];
2346
2347 return intel_pt_do_synth_pebs_sample(ptq, evsel, id);
2348}
2349
2350static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
2351{
2352 const struct intel_pt_blk_items *items = &ptq->state->items;
2353 struct intel_pt_pebs_event *pe;
2354 struct intel_pt *pt = ptq->pt;
2355 int err = -EINVAL;
2356 int hw_id;
2357
2358 if (!items->has_applicable_counters || !items->applicable_counters) {
2359 if (!pt->single_pebs)
2360 pr_err("PEBS-via-PT record with no applicable_counters\n");
2361 return intel_pt_synth_single_pebs_sample(ptq);
2362 }
2363
2364 for_each_set_bit(hw_id, (unsigned long *)&items->applicable_counters, INTEL_PT_MAX_PEBS) {
2365 pe = &ptq->pebs[hw_id];
2366 if (!pe->evsel) {
2367 if (!pt->single_pebs)
2368 pr_err("PEBS-via-PT record with no matching event, hw_id %d\n",
2369 hw_id);
2370 return intel_pt_synth_single_pebs_sample(ptq);
2371 }
2372 err = intel_pt_do_synth_pebs_sample(ptq, pe->evsel, pe->id);
2373 if (err)
2374 return err;
2375 }
2376
2377 return err;
2378}
2379
2380static int intel_pt_synth_events_sample(struct intel_pt_queue *ptq)
2381{
2382 struct intel_pt *pt = ptq->pt;
2383 union perf_event *event = ptq->event_buf;
2384 struct perf_sample sample = { .ip = 0, };
2385 struct {
2386 struct perf_synth_intel_evt cfe;
2387 struct perf_synth_intel_evd evd[INTEL_PT_MAX_EVDS];
2388 } raw;
2389 int i;
2390
2391 if (intel_pt_skip_event(pt))
2392 return 0;
2393
2394 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2395
2396 sample.id = ptq->pt->evt_id;
2397 sample.stream_id = ptq->pt->evt_id;
2398
2399 raw.cfe.type = ptq->state->cfe_type;
2400 raw.cfe.reserved = 0;
2401 raw.cfe.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
2402 raw.cfe.vector = ptq->state->cfe_vector;
2403 raw.cfe.evd_cnt = ptq->state->evd_cnt;
2404
2405 for (i = 0; i < ptq->state->evd_cnt; i++) {
2406 raw.evd[i].et = 0;
2407 raw.evd[i].evd_type = ptq->state->evd[i].type;
2408 raw.evd[i].payload = ptq->state->evd[i].payload;
2409 }
2410
2411 sample.raw_size = perf_synth__raw_size(raw) +
2412 ptq->state->evd_cnt * sizeof(struct perf_synth_intel_evd);
2413 sample.raw_data = perf_synth__raw_data(&raw);
2414
2415 return intel_pt_deliver_synth_event(pt, event, &sample,
2416 pt->evt_sample_type);
2417}
2418
2419static int intel_pt_synth_iflag_chg_sample(struct intel_pt_queue *ptq)
2420{
2421 struct intel_pt *pt = ptq->pt;
2422 union perf_event *event = ptq->event_buf;
2423 struct perf_sample sample = { .ip = 0, };
2424 struct perf_synth_intel_iflag_chg raw;
2425
2426 if (intel_pt_skip_event(pt))
2427 return 0;
2428
2429 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2430
2431 sample.id = ptq->pt->iflag_chg_id;
2432 sample.stream_id = ptq->pt->iflag_chg_id;
2433
2434 raw.flags = 0;
2435 raw.iflag = ptq->state->to_iflag;
2436
2437 if (ptq->state->type & INTEL_PT_BRANCH) {
2438 raw.via_branch = 1;
2439 raw.branch_ip = ptq->state->to_ip;
2440 } else {
2441 sample.addr = 0;
2442 }
2443 sample.flags = ptq->flags;
2444
2445 sample.raw_size = perf_synth__raw_size(raw);
2446 sample.raw_data = perf_synth__raw_data(&raw);
2447
2448 return intel_pt_deliver_synth_event(pt, event, &sample,
2449 pt->iflag_chg_sample_type);
2450}
2451
2452static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
2453 pid_t pid, pid_t tid, u64 ip, u64 timestamp,
2454 pid_t machine_pid, int vcpu)
2455{
2456 bool dump_log_on_error = pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ON_ERROR;
2457 bool log_on_stdout = pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_USE_STDOUT;
2458 union perf_event event;
2459 char msg[MAX_AUXTRACE_ERROR_MSG];
2460 int err;
2461
2462 if (pt->synth_opts.error_minus_flags) {
2463 if (code == INTEL_PT_ERR_OVR &&
2464 pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_OVERFLOW)
2465 return 0;
2466 if (code == INTEL_PT_ERR_LOST &&
2467 pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_DATA_LOST)
2468 return 0;
2469 }
2470
2471 intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
2472
2473 auxtrace_synth_guest_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
2474 code, cpu, pid, tid, ip, msg, timestamp,
2475 machine_pid, vcpu);
2476
2477 if (intel_pt_enable_logging && !log_on_stdout) {
2478 FILE *fp = intel_pt_log_fp();
2479
2480 if (fp)
2481 perf_event__fprintf_auxtrace_error(&event, fp);
2482 }
2483
2484 if (code != INTEL_PT_ERR_LOST && dump_log_on_error)
2485 intel_pt_log_dump_buf();
2486
2487 err = perf_session__deliver_synth_event(pt->session, &event, NULL);
2488 if (err)
2489 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
2490 err);
2491
2492 return err;
2493}
2494
2495static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
2496 const struct intel_pt_state *state)
2497{
2498 struct intel_pt *pt = ptq->pt;
2499 u64 tm = ptq->timestamp;
2500 pid_t machine_pid = 0;
2501 pid_t pid = ptq->pid;
2502 pid_t tid = ptq->tid;
2503 int vcpu = -1;
2504
2505 tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc);
2506
2507 if (pt->have_guest_sideband && state->from_nr) {
2508 machine_pid = ptq->guest_machine_pid;
2509 vcpu = ptq->vcpu;
2510 pid = ptq->guest_pid;
2511 tid = ptq->guest_tid;
2512 }
2513
2514 return intel_pt_synth_error(pt, state->err, ptq->cpu, pid, tid,
2515 state->from_ip, tm, machine_pid, vcpu);
2516}
2517
2518static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
2519{
2520 struct auxtrace_queue *queue;
2521 pid_t tid = ptq->next_tid;
2522 int err;
2523
2524 if (tid == -1)
2525 return 0;
2526
2527 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
2528
2529 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
2530
2531 queue = &pt->queues.queue_array[ptq->queue_nr];
2532 intel_pt_set_pid_tid_cpu(pt, queue);
2533
2534 ptq->next_tid = -1;
2535
2536 return err;
2537}
2538
2539static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
2540{
2541 struct intel_pt *pt = ptq->pt;
2542
2543 return ip == pt->switch_ip &&
2544 (ptq->flags & PERF_IP_FLAG_BRANCH) &&
2545 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
2546 PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
2547}
2548
2549#define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
2550 INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT)
2551
2552static int intel_pt_sample(struct intel_pt_queue *ptq)
2553{
2554 const struct intel_pt_state *state = ptq->state;
2555 struct intel_pt *pt = ptq->pt;
2556 int err;
2557
2558 if (!ptq->have_sample)
2559 return 0;
2560
2561 ptq->have_sample = false;
2562
2563 if (pt->synth_opts.approx_ipc) {
2564 ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
2565 ptq->ipc_cyc_cnt = ptq->state->cycles;
2566 ptq->sample_ipc = true;
2567 } else {
2568 ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
2569 ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
2570 ptq->sample_ipc = ptq->state->flags & INTEL_PT_SAMPLE_IPC;
2571 }
2572
2573 /* Ensure guest code maps are set up */
2574 if (symbol_conf.guest_code && (state->from_nr || state->to_nr))
2575 intel_pt_get_guest(ptq);
2576
2577 /*
2578 * Do PEBS first to allow for the possibility that the PEBS timestamp
2579 * precedes the current timestamp.
2580 */
2581 if (pt->sample_pebs && state->type & INTEL_PT_BLK_ITEMS) {
2582 err = intel_pt_synth_pebs_sample(ptq);
2583 if (err)
2584 return err;
2585 }
2586
2587 if (pt->synth_opts.intr_events) {
2588 if (state->type & INTEL_PT_EVT) {
2589 err = intel_pt_synth_events_sample(ptq);
2590 if (err)
2591 return err;
2592 }
2593 if (state->type & INTEL_PT_IFLAG_CHG) {
2594 err = intel_pt_synth_iflag_chg_sample(ptq);
2595 if (err)
2596 return err;
2597 }
2598 }
2599
2600 if (pt->sample_pwr_events) {
2601 if (state->type & INTEL_PT_PSB_EVT) {
2602 err = intel_pt_synth_psb_sample(ptq);
2603 if (err)
2604 return err;
2605 }
2606 if (ptq->state->cbr != ptq->cbr_seen) {
2607 err = intel_pt_synth_cbr_sample(ptq);
2608 if (err)
2609 return err;
2610 }
2611 if (state->type & INTEL_PT_PWR_EVT) {
2612 if (state->type & INTEL_PT_MWAIT_OP) {
2613 err = intel_pt_synth_mwait_sample(ptq);
2614 if (err)
2615 return err;
2616 }
2617 if (state->type & INTEL_PT_PWR_ENTRY) {
2618 err = intel_pt_synth_pwre_sample(ptq);
2619 if (err)
2620 return err;
2621 }
2622 if (state->type & INTEL_PT_EX_STOP) {
2623 err = intel_pt_synth_exstop_sample(ptq);
2624 if (err)
2625 return err;
2626 }
2627 if (state->type & INTEL_PT_PWR_EXIT) {
2628 err = intel_pt_synth_pwrx_sample(ptq);
2629 if (err)
2630 return err;
2631 }
2632 }
2633 }
2634
2635 if (state->type & INTEL_PT_INSTRUCTION) {
2636 if (pt->sample_instructions) {
2637 err = intel_pt_synth_instruction_sample(ptq);
2638 if (err)
2639 return err;
2640 }
2641 if (pt->sample_cycles) {
2642 err = intel_pt_synth_cycle_sample(ptq);
2643 if (err)
2644 return err;
2645 }
2646 }
2647
2648 if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
2649 err = intel_pt_synth_transaction_sample(ptq);
2650 if (err)
2651 return err;
2652 }
2653
2654 if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
2655 err = intel_pt_synth_ptwrite_sample(ptq);
2656 if (err)
2657 return err;
2658 }
2659
2660 if (!(state->type & INTEL_PT_BRANCH))
2661 return 0;
2662
2663 if (pt->use_thread_stack) {
2664 thread_stack__event(ptq->thread, ptq->cpu, ptq->flags,
2665 state->from_ip, state->to_ip, ptq->insn_len,
2666 state->trace_nr, pt->callstack,
2667 pt->br_stack_sz_plus,
2668 pt->mispred_all);
2669 } else {
2670 thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
2671 }
2672
2673 if (pt->sample_branches) {
2674 if (state->from_nr != state->to_nr &&
2675 state->from_ip && state->to_ip) {
2676 struct intel_pt_state *st = (struct intel_pt_state *)state;
2677 u64 to_ip = st->to_ip;
2678 u64 from_ip = st->from_ip;
2679
2680 /*
2681 * perf cannot handle having different machines for ip
2682 * and addr, so create 2 branches.
2683 */
2684 st->to_ip = 0;
2685 err = intel_pt_synth_branch_sample(ptq);
2686 if (err)
2687 return err;
2688 st->from_ip = 0;
2689 st->to_ip = to_ip;
2690 err = intel_pt_synth_branch_sample(ptq);
2691 st->from_ip = from_ip;
2692 } else {
2693 err = intel_pt_synth_branch_sample(ptq);
2694 }
2695 if (err)
2696 return err;
2697 }
2698
2699 if (!ptq->sync_switch)
2700 return 0;
2701
2702 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
2703 switch (ptq->switch_state) {
2704 case INTEL_PT_SS_NOT_TRACING:
2705 case INTEL_PT_SS_UNKNOWN:
2706 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2707 err = intel_pt_next_tid(pt, ptq);
2708 if (err)
2709 return err;
2710 ptq->switch_state = INTEL_PT_SS_TRACING;
2711 break;
2712 default:
2713 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
2714 return 1;
2715 }
2716 } else if (!state->to_ip) {
2717 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2718 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
2719 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2720 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2721 state->to_ip == pt->ptss_ip &&
2722 (ptq->flags & PERF_IP_FLAG_CALL)) {
2723 ptq->switch_state = INTEL_PT_SS_TRACING;
2724 }
2725
2726 return 0;
2727}
2728
2729static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
2730{
2731 struct machine *machine = pt->machine;
2732 struct map *map;
2733 struct symbol *sym, *start;
2734 u64 ip, switch_ip = 0;
2735 const char *ptss;
2736
2737 if (ptss_ip)
2738 *ptss_ip = 0;
2739
2740 map = machine__kernel_map(machine);
2741 if (!map)
2742 return 0;
2743
2744 if (map__load(map))
2745 return 0;
2746
2747 start = dso__first_symbol(map__dso(map));
2748
2749 for (sym = start; sym; sym = dso__next_symbol(sym)) {
2750 if (sym->binding == STB_GLOBAL &&
2751 !strcmp(sym->name, "__switch_to")) {
2752 ip = map__unmap_ip(map, sym->start);
2753 if (ip >= map__start(map) && ip < map__end(map)) {
2754 switch_ip = ip;
2755 break;
2756 }
2757 }
2758 }
2759
2760 if (!switch_ip || !ptss_ip)
2761 return 0;
2762
2763 if (pt->have_sched_switch == 1)
2764 ptss = "perf_trace_sched_switch";
2765 else
2766 ptss = "__perf_event_task_sched_out";
2767
2768 for (sym = start; sym; sym = dso__next_symbol(sym)) {
2769 if (!strcmp(sym->name, ptss)) {
2770 ip = map__unmap_ip(map, sym->start);
2771 if (ip >= map__start(map) && ip < map__end(map)) {
2772 *ptss_ip = ip;
2773 break;
2774 }
2775 }
2776 }
2777
2778 return switch_ip;
2779}
2780
2781static void intel_pt_enable_sync_switch(struct intel_pt *pt)
2782{
2783 unsigned int i;
2784
2785 if (pt->sync_switch_not_supported)
2786 return;
2787
2788 pt->sync_switch = true;
2789
2790 for (i = 0; i < pt->queues.nr_queues; i++) {
2791 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2792 struct intel_pt_queue *ptq = queue->priv;
2793
2794 if (ptq)
2795 ptq->sync_switch = true;
2796 }
2797}
2798
2799static void intel_pt_disable_sync_switch(struct intel_pt *pt)
2800{
2801 unsigned int i;
2802
2803 pt->sync_switch = false;
2804
2805 for (i = 0; i < pt->queues.nr_queues; i++) {
2806 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2807 struct intel_pt_queue *ptq = queue->priv;
2808
2809 if (ptq) {
2810 ptq->sync_switch = false;
2811 intel_pt_next_tid(pt, ptq);
2812 }
2813 }
2814}
2815
2816/*
2817 * To filter against time ranges, it is only necessary to look at the next start
2818 * or end time.
2819 */
2820static bool intel_pt_next_time(struct intel_pt_queue *ptq)
2821{
2822 struct intel_pt *pt = ptq->pt;
2823
2824 if (ptq->sel_start) {
2825 /* Next time is an end time */
2826 ptq->sel_start = false;
2827 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].end;
2828 return true;
2829 } else if (ptq->sel_idx + 1 < pt->range_cnt) {
2830 /* Next time is a start time */
2831 ptq->sel_start = true;
2832 ptq->sel_idx += 1;
2833 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start;
2834 return true;
2835 }
2836
2837 /* No next time */
2838 return false;
2839}
2840
2841static int intel_pt_time_filter(struct intel_pt_queue *ptq, u64 *ff_timestamp)
2842{
2843 int err;
2844
2845 while (1) {
2846 if (ptq->sel_start) {
2847 if (ptq->timestamp >= ptq->sel_timestamp) {
2848 /* After start time, so consider next time */
2849 intel_pt_next_time(ptq);
2850 if (!ptq->sel_timestamp) {
2851 /* No end time */
2852 return 0;
2853 }
2854 /* Check against end time */
2855 continue;
2856 }
2857 /* Before start time, so fast forward */
2858 ptq->have_sample = false;
2859 if (ptq->sel_timestamp > *ff_timestamp) {
2860 if (ptq->sync_switch) {
2861 intel_pt_next_tid(ptq->pt, ptq);
2862 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2863 }
2864 *ff_timestamp = ptq->sel_timestamp;
2865 err = intel_pt_fast_forward(ptq->decoder,
2866 ptq->sel_timestamp);
2867 if (err)
2868 return err;
2869 }
2870 return 0;
2871 } else if (ptq->timestamp > ptq->sel_timestamp) {
2872 /* After end time, so consider next time */
2873 if (!intel_pt_next_time(ptq)) {
2874 /* No next time range, so stop decoding */
2875 ptq->have_sample = false;
2876 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2877 return 1;
2878 }
2879 /* Check against next start time */
2880 continue;
2881 } else {
2882 /* Before end time */
2883 return 0;
2884 }
2885 }
2886}
2887
2888static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
2889{
2890 const struct intel_pt_state *state = ptq->state;
2891 struct intel_pt *pt = ptq->pt;
2892 u64 ff_timestamp = 0;
2893 int err;
2894
2895 if (!pt->kernel_start) {
2896 pt->kernel_start = machine__kernel_start(pt->machine);
2897 if (pt->per_cpu_mmaps &&
2898 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
2899 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
2900 !pt->sampling_mode && !pt->synth_opts.vm_time_correlation) {
2901 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
2902 if (pt->switch_ip) {
2903 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
2904 pt->switch_ip, pt->ptss_ip);
2905 intel_pt_enable_sync_switch(pt);
2906 }
2907 }
2908 }
2909
2910 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
2911 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
2912 while (1) {
2913 err = intel_pt_sample(ptq);
2914 if (err)
2915 return err;
2916
2917 state = intel_pt_decode(ptq->decoder);
2918 if (state->err) {
2919 if (state->err == INTEL_PT_ERR_NODATA)
2920 return 1;
2921 if (ptq->sync_switch &&
2922 state->from_ip >= pt->kernel_start) {
2923 ptq->sync_switch = false;
2924 intel_pt_next_tid(pt, ptq);
2925 }
2926 ptq->timestamp = state->est_timestamp;
2927 if (pt->synth_opts.errors) {
2928 err = intel_ptq_synth_error(ptq, state);
2929 if (err)
2930 return err;
2931 }
2932 continue;
2933 }
2934
2935 ptq->state = state;
2936 ptq->have_sample = true;
2937 intel_pt_sample_flags(ptq);
2938
2939 /* Use estimated TSC upon return to user space */
2940 if (pt->est_tsc &&
2941 (state->from_ip >= pt->kernel_start || !state->from_ip) &&
2942 state->to_ip && state->to_ip < pt->kernel_start) {
2943 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2944 state->timestamp, state->est_timestamp);
2945 ptq->timestamp = state->est_timestamp;
2946 /* Use estimated TSC in unknown switch state */
2947 } else if (ptq->sync_switch &&
2948 ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2949 intel_pt_is_switch_ip(ptq, state->to_ip) &&
2950 ptq->next_tid == -1) {
2951 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2952 state->timestamp, state->est_timestamp);
2953 ptq->timestamp = state->est_timestamp;
2954 } else if (state->timestamp > ptq->timestamp) {
2955 ptq->timestamp = state->timestamp;
2956 }
2957
2958 if (ptq->sel_timestamp) {
2959 err = intel_pt_time_filter(ptq, &ff_timestamp);
2960 if (err)
2961 return err;
2962 }
2963
2964 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
2965 *timestamp = ptq->timestamp;
2966 return 0;
2967 }
2968 }
2969 return 0;
2970}
2971
2972static inline int intel_pt_update_queues(struct intel_pt *pt)
2973{
2974 if (pt->queues.new_data) {
2975 pt->queues.new_data = false;
2976 return intel_pt_setup_queues(pt);
2977 }
2978 return 0;
2979}
2980
2981static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
2982{
2983 unsigned int queue_nr;
2984 u64 ts;
2985 int ret;
2986
2987 while (1) {
2988 struct auxtrace_queue *queue;
2989 struct intel_pt_queue *ptq;
2990
2991 if (!pt->heap.heap_cnt)
2992 return 0;
2993
2994 if (pt->heap.heap_array[0].ordinal >= timestamp)
2995 return 0;
2996
2997 queue_nr = pt->heap.heap_array[0].queue_nr;
2998 queue = &pt->queues.queue_array[queue_nr];
2999 ptq = queue->priv;
3000
3001 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
3002 queue_nr, pt->heap.heap_array[0].ordinal,
3003 timestamp);
3004
3005 auxtrace_heap__pop(&pt->heap);
3006
3007 if (pt->heap.heap_cnt) {
3008 ts = pt->heap.heap_array[0].ordinal + 1;
3009 if (ts > timestamp)
3010 ts = timestamp;
3011 } else {
3012 ts = timestamp;
3013 }
3014
3015 intel_pt_set_pid_tid_cpu(pt, queue);
3016
3017 ret = intel_pt_run_decoder(ptq, &ts);
3018
3019 if (ret < 0) {
3020 auxtrace_heap__add(&pt->heap, queue_nr, ts);
3021 return ret;
3022 }
3023
3024 if (!ret) {
3025 ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
3026 if (ret < 0)
3027 return ret;
3028 } else {
3029 ptq->on_heap = false;
3030 }
3031 }
3032
3033 return 0;
3034}
3035
3036static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
3037 u64 time_)
3038{
3039 struct auxtrace_queues *queues = &pt->queues;
3040 unsigned int i;
3041 u64 ts = 0;
3042
3043 for (i = 0; i < queues->nr_queues; i++) {
3044 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
3045 struct intel_pt_queue *ptq = queue->priv;
3046
3047 if (ptq && (tid == -1 || ptq->tid == tid)) {
3048 ptq->time = time_;
3049 intel_pt_set_pid_tid_cpu(pt, queue);
3050 intel_pt_run_decoder(ptq, &ts);
3051 }
3052 }
3053 return 0;
3054}
3055
3056static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq,
3057 struct auxtrace_queue *queue,
3058 struct perf_sample *sample)
3059{
3060 struct machine *m = ptq->pt->machine;
3061
3062 ptq->pid = sample->pid;
3063 ptq->tid = sample->tid;
3064 ptq->cpu = queue->cpu;
3065
3066 intel_pt_log("queue %u cpu %d pid %d tid %d\n",
3067 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
3068
3069 thread__zput(ptq->thread);
3070
3071 if (ptq->tid == -1)
3072 return;
3073
3074 if (ptq->pid == -1) {
3075 ptq->thread = machine__find_thread(m, -1, ptq->tid);
3076 if (ptq->thread)
3077 ptq->pid = ptq->thread->pid_;
3078 return;
3079 }
3080
3081 ptq->thread = machine__findnew_thread(m, ptq->pid, ptq->tid);
3082}
3083
3084static int intel_pt_process_timeless_sample(struct intel_pt *pt,
3085 struct perf_sample *sample)
3086{
3087 struct auxtrace_queue *queue;
3088 struct intel_pt_queue *ptq;
3089 u64 ts = 0;
3090
3091 queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
3092 if (!queue)
3093 return -EINVAL;
3094
3095 ptq = queue->priv;
3096 if (!ptq)
3097 return 0;
3098
3099 ptq->stop = false;
3100 ptq->time = sample->time;
3101 intel_pt_sample_set_pid_tid_cpu(ptq, queue, sample);
3102 intel_pt_run_decoder(ptq, &ts);
3103 return 0;
3104}
3105
3106static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
3107{
3108 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
3109 sample->pid, sample->tid, 0, sample->time,
3110 sample->machine_pid, sample->vcpu);
3111}
3112
3113static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
3114{
3115 unsigned i, j;
3116
3117 if (cpu < 0 || !pt->queues.nr_queues)
3118 return NULL;
3119
3120 if ((unsigned)cpu >= pt->queues.nr_queues)
3121 i = pt->queues.nr_queues - 1;
3122 else
3123 i = cpu;
3124
3125 if (pt->queues.queue_array[i].cpu == cpu)
3126 return pt->queues.queue_array[i].priv;
3127
3128 for (j = 0; i > 0; j++) {
3129 if (pt->queues.queue_array[--i].cpu == cpu)
3130 return pt->queues.queue_array[i].priv;
3131 }
3132
3133 for (; j < pt->queues.nr_queues; j++) {
3134 if (pt->queues.queue_array[j].cpu == cpu)
3135 return pt->queues.queue_array[j].priv;
3136 }
3137
3138 return NULL;
3139}
3140
3141static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
3142 u64 timestamp)
3143{
3144 struct intel_pt_queue *ptq;
3145 int err;
3146
3147 if (!pt->sync_switch)
3148 return 1;
3149
3150 ptq = intel_pt_cpu_to_ptq(pt, cpu);
3151 if (!ptq || !ptq->sync_switch)
3152 return 1;
3153
3154 switch (ptq->switch_state) {
3155 case INTEL_PT_SS_NOT_TRACING:
3156 break;
3157 case INTEL_PT_SS_UNKNOWN:
3158 case INTEL_PT_SS_TRACING:
3159 ptq->next_tid = tid;
3160 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
3161 return 0;
3162 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
3163 if (!ptq->on_heap) {
3164 ptq->timestamp = perf_time_to_tsc(timestamp,
3165 &pt->tc);
3166 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
3167 ptq->timestamp);
3168 if (err)
3169 return err;
3170 ptq->on_heap = true;
3171 }
3172 ptq->switch_state = INTEL_PT_SS_TRACING;
3173 break;
3174 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
3175 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
3176 break;
3177 default:
3178 break;
3179 }
3180
3181 ptq->next_tid = -1;
3182
3183 return 1;
3184}
3185
3186#ifdef HAVE_LIBTRACEEVENT
3187static int intel_pt_process_switch(struct intel_pt *pt,
3188 struct perf_sample *sample)
3189{
3190 pid_t tid;
3191 int cpu, ret;
3192 struct evsel *evsel = evlist__id2evsel(pt->session->evlist, sample->id);
3193
3194 if (evsel != pt->switch_evsel)
3195 return 0;
3196
3197 tid = evsel__intval(evsel, sample, "next_pid");
3198 cpu = sample->cpu;
3199
3200 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
3201 cpu, tid, sample->time, perf_time_to_tsc(sample->time,
3202 &pt->tc));
3203
3204 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
3205 if (ret <= 0)
3206 return ret;
3207
3208 return machine__set_current_tid(pt->machine, cpu, -1, tid);
3209}
3210#endif /* HAVE_LIBTRACEEVENT */
3211
3212static int intel_pt_context_switch_in(struct intel_pt *pt,
3213 struct perf_sample *sample)
3214{
3215 pid_t pid = sample->pid;
3216 pid_t tid = sample->tid;
3217 int cpu = sample->cpu;
3218
3219 if (pt->sync_switch) {
3220 struct intel_pt_queue *ptq;
3221
3222 ptq = intel_pt_cpu_to_ptq(pt, cpu);
3223 if (ptq && ptq->sync_switch) {
3224 ptq->next_tid = -1;
3225 switch (ptq->switch_state) {
3226 case INTEL_PT_SS_NOT_TRACING:
3227 case INTEL_PT_SS_UNKNOWN:
3228 case INTEL_PT_SS_TRACING:
3229 break;
3230 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
3231 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
3232 ptq->switch_state = INTEL_PT_SS_TRACING;
3233 break;
3234 default:
3235 break;
3236 }
3237 }
3238 }
3239
3240 /*
3241 * If the current tid has not been updated yet, ensure it is now that
3242 * a "switch in" event has occurred.
3243 */
3244 if (machine__get_current_tid(pt->machine, cpu) == tid)
3245 return 0;
3246
3247 return machine__set_current_tid(pt->machine, cpu, pid, tid);
3248}
3249
3250static int intel_pt_guest_context_switch(struct intel_pt *pt,
3251 union perf_event *event,
3252 struct perf_sample *sample)
3253{
3254 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
3255 struct machines *machines = &pt->session->machines;
3256 struct machine *machine = machines__find(machines, sample->machine_pid);
3257
3258 pt->have_guest_sideband = true;
3259
3260 /*
3261 * sync_switch cannot handle guest machines at present, so just disable
3262 * it.
3263 */
3264 pt->sync_switch_not_supported = true;
3265 if (pt->sync_switch)
3266 intel_pt_disable_sync_switch(pt);
3267
3268 if (out)
3269 return 0;
3270
3271 if (!machine)
3272 return -EINVAL;
3273
3274 return machine__set_current_tid(machine, sample->vcpu, sample->pid, sample->tid);
3275}
3276
3277static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
3278 struct perf_sample *sample)
3279{
3280 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
3281 pid_t pid, tid;
3282 int cpu, ret;
3283
3284 if (perf_event__is_guest(event))
3285 return intel_pt_guest_context_switch(pt, event, sample);
3286
3287 cpu = sample->cpu;
3288
3289 if (pt->have_sched_switch == 3) {
3290 if (!out)
3291 return intel_pt_context_switch_in(pt, sample);
3292 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
3293 pr_err("Expecting CPU-wide context switch event\n");
3294 return -EINVAL;
3295 }
3296 pid = event->context_switch.next_prev_pid;
3297 tid = event->context_switch.next_prev_tid;
3298 } else {
3299 if (out)
3300 return 0;
3301 pid = sample->pid;
3302 tid = sample->tid;
3303 }
3304
3305 if (tid == -1)
3306 intel_pt_log("context_switch event has no tid\n");
3307
3308 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
3309 if (ret <= 0)
3310 return ret;
3311
3312 return machine__set_current_tid(pt->machine, cpu, pid, tid);
3313}
3314
3315static int intel_pt_process_itrace_start(struct intel_pt *pt,
3316 union perf_event *event,
3317 struct perf_sample *sample)
3318{
3319 if (!pt->per_cpu_mmaps)
3320 return 0;
3321
3322 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
3323 sample->cpu, event->itrace_start.pid,
3324 event->itrace_start.tid, sample->time,
3325 perf_time_to_tsc(sample->time, &pt->tc));
3326
3327 return machine__set_current_tid(pt->machine, sample->cpu,
3328 event->itrace_start.pid,
3329 event->itrace_start.tid);
3330}
3331
3332static int intel_pt_process_aux_output_hw_id(struct intel_pt *pt,
3333 union perf_event *event,
3334 struct perf_sample *sample)
3335{
3336 u64 hw_id = event->aux_output_hw_id.hw_id;
3337 struct auxtrace_queue *queue;
3338 struct intel_pt_queue *ptq;
3339 struct evsel *evsel;
3340
3341 queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
3342 evsel = evlist__id2evsel_strict(pt->session->evlist, sample->id);
3343 if (!queue || !queue->priv || !evsel || hw_id > INTEL_PT_MAX_PEBS) {
3344 pr_err("Bad AUX output hardware ID\n");
3345 return -EINVAL;
3346 }
3347
3348 ptq = queue->priv;
3349
3350 ptq->pebs[hw_id].evsel = evsel;
3351 ptq->pebs[hw_id].id = sample->id;
3352
3353 return 0;
3354}
3355
3356static int intel_pt_find_map(struct thread *thread, u8 cpumode, u64 addr,
3357 struct addr_location *al)
3358{
3359 if (!al->map || addr < map__start(al->map) || addr >= map__end(al->map)) {
3360 if (!thread__find_map(thread, cpumode, addr, al))
3361 return -1;
3362 }
3363
3364 return 0;
3365}
3366
3367/* Invalidate all instruction cache entries that overlap the text poke */
3368static int intel_pt_text_poke(struct intel_pt *pt, union perf_event *event)
3369{
3370 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
3371 u64 addr = event->text_poke.addr + event->text_poke.new_len - 1;
3372 /* Assume text poke begins in a basic block no more than 4096 bytes */
3373 int cnt = 4096 + event->text_poke.new_len;
3374 struct thread *thread = pt->unknown_thread;
3375 struct addr_location al = { .map = NULL };
3376 struct machine *machine = pt->machine;
3377 struct intel_pt_cache_entry *e;
3378 u64 offset;
3379
3380 if (!event->text_poke.new_len)
3381 return 0;
3382
3383 for (; cnt; cnt--, addr--) {
3384 struct dso *dso;
3385
3386 if (intel_pt_find_map(thread, cpumode, addr, &al)) {
3387 if (addr < event->text_poke.addr)
3388 return 0;
3389 continue;
3390 }
3391
3392 dso = map__dso(al.map);
3393 if (!dso || !dso->auxtrace_cache)
3394 continue;
3395
3396 offset = map__map_ip(al.map, addr);
3397
3398 e = intel_pt_cache_lookup(dso, machine, offset);
3399 if (!e)
3400 continue;
3401
3402 if (addr + e->byte_cnt + e->length <= event->text_poke.addr) {
3403 /*
3404 * No overlap. Working backwards there cannot be another
3405 * basic block that overlaps the text poke if there is a
3406 * branch instruction before the text poke address.
3407 */
3408 if (e->branch != INTEL_PT_BR_NO_BRANCH)
3409 return 0;
3410 } else {
3411 intel_pt_cache_invalidate(dso, machine, offset);
3412 intel_pt_log("Invalidated instruction cache for %s at %#"PRIx64"\n",
3413 dso->long_name, addr);
3414 }
3415 }
3416
3417 return 0;
3418}
3419
3420static int intel_pt_process_event(struct perf_session *session,
3421 union perf_event *event,
3422 struct perf_sample *sample,
3423 struct perf_tool *tool)
3424{
3425 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3426 auxtrace);
3427 u64 timestamp;
3428 int err = 0;
3429
3430 if (dump_trace)
3431 return 0;
3432
3433 if (!tool->ordered_events) {
3434 pr_err("Intel Processor Trace requires ordered events\n");
3435 return -EINVAL;
3436 }
3437
3438 if (sample->time && sample->time != (u64)-1)
3439 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
3440 else
3441 timestamp = 0;
3442
3443 if (timestamp || pt->timeless_decoding) {
3444 err = intel_pt_update_queues(pt);
3445 if (err)
3446 return err;
3447 }
3448
3449 if (pt->timeless_decoding) {
3450 if (pt->sampling_mode) {
3451 if (sample->aux_sample.size)
3452 err = intel_pt_process_timeless_sample(pt,
3453 sample);
3454 } else if (event->header.type == PERF_RECORD_EXIT) {
3455 err = intel_pt_process_timeless_queues(pt,
3456 event->fork.tid,
3457 sample->time);
3458 }
3459 } else if (timestamp) {
3460 if (!pt->first_timestamp)
3461 intel_pt_first_timestamp(pt, timestamp);
3462 err = intel_pt_process_queues(pt, timestamp);
3463 }
3464 if (err)
3465 return err;
3466
3467 if (event->header.type == PERF_RECORD_SAMPLE) {
3468 if (pt->synth_opts.add_callchain && !sample->callchain)
3469 intel_pt_add_callchain(pt, sample);
3470 if (pt->synth_opts.add_last_branch && !sample->branch_stack)
3471 intel_pt_add_br_stack(pt, sample);
3472 }
3473
3474 if (event->header.type == PERF_RECORD_AUX &&
3475 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
3476 pt->synth_opts.errors) {
3477 err = intel_pt_lost(pt, sample);
3478 if (err)
3479 return err;
3480 }
3481
3482#ifdef HAVE_LIBTRACEEVENT
3483 if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
3484 err = intel_pt_process_switch(pt, sample);
3485 else
3486#endif
3487 if (event->header.type == PERF_RECORD_ITRACE_START)
3488 err = intel_pt_process_itrace_start(pt, event, sample);
3489 else if (event->header.type == PERF_RECORD_AUX_OUTPUT_HW_ID)
3490 err = intel_pt_process_aux_output_hw_id(pt, event, sample);
3491 else if (event->header.type == PERF_RECORD_SWITCH ||
3492 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
3493 err = intel_pt_context_switch(pt, event, sample);
3494
3495 if (!err && event->header.type == PERF_RECORD_TEXT_POKE)
3496 err = intel_pt_text_poke(pt, event);
3497
3498 if (intel_pt_enable_logging && intel_pt_log_events(pt, sample->time)) {
3499 intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
3500 event->header.type, sample->cpu, sample->time, timestamp);
3501 intel_pt_log_event(event);
3502 }
3503
3504 return err;
3505}
3506
3507static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
3508{
3509 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3510 auxtrace);
3511 int ret;
3512
3513 if (dump_trace)
3514 return 0;
3515
3516 if (!tool->ordered_events)
3517 return -EINVAL;
3518
3519 ret = intel_pt_update_queues(pt);
3520 if (ret < 0)
3521 return ret;
3522
3523 if (pt->timeless_decoding)
3524 return intel_pt_process_timeless_queues(pt, -1,
3525 MAX_TIMESTAMP - 1);
3526
3527 return intel_pt_process_queues(pt, MAX_TIMESTAMP);
3528}
3529
3530static void intel_pt_free_events(struct perf_session *session)
3531{
3532 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3533 auxtrace);
3534 struct auxtrace_queues *queues = &pt->queues;
3535 unsigned int i;
3536
3537 for (i = 0; i < queues->nr_queues; i++) {
3538 intel_pt_free_queue(queues->queue_array[i].priv);
3539 queues->queue_array[i].priv = NULL;
3540 }
3541 intel_pt_log_disable();
3542 auxtrace_queues__free(queues);
3543}
3544
3545static void intel_pt_free(struct perf_session *session)
3546{
3547 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3548 auxtrace);
3549
3550 auxtrace_heap__free(&pt->heap);
3551 intel_pt_free_events(session);
3552 session->auxtrace = NULL;
3553 intel_pt_free_vmcs_info(pt);
3554 thread__put(pt->unknown_thread);
3555 addr_filters__exit(&pt->filts);
3556 zfree(&pt->chain);
3557 zfree(&pt->filter);
3558 zfree(&pt->time_ranges);
3559 free(pt);
3560}
3561
3562static bool intel_pt_evsel_is_auxtrace(struct perf_session *session,
3563 struct evsel *evsel)
3564{
3565 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3566 auxtrace);
3567
3568 return evsel->core.attr.type == pt->pmu_type;
3569}
3570
3571static int intel_pt_process_auxtrace_event(struct perf_session *session,
3572 union perf_event *event,
3573 struct perf_tool *tool __maybe_unused)
3574{
3575 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3576 auxtrace);
3577
3578 if (!pt->data_queued) {
3579 struct auxtrace_buffer *buffer;
3580 off_t data_offset;
3581 int fd = perf_data__fd(session->data);
3582 int err;
3583
3584 if (perf_data__is_pipe(session->data)) {
3585 data_offset = 0;
3586 } else {
3587 data_offset = lseek(fd, 0, SEEK_CUR);
3588 if (data_offset == -1)
3589 return -errno;
3590 }
3591
3592 err = auxtrace_queues__add_event(&pt->queues, session, event,
3593 data_offset, &buffer);
3594 if (err)
3595 return err;
3596
3597 /* Dump here now we have copied a piped trace out of the pipe */
3598 if (dump_trace) {
3599 if (auxtrace_buffer__get_data(buffer, fd)) {
3600 intel_pt_dump_event(pt, buffer->data,
3601 buffer->size);
3602 auxtrace_buffer__put_data(buffer);
3603 }
3604 }
3605 }
3606
3607 return 0;
3608}
3609
3610static int intel_pt_queue_data(struct perf_session *session,
3611 struct perf_sample *sample,
3612 union perf_event *event, u64 data_offset)
3613{
3614 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3615 auxtrace);
3616 u64 timestamp;
3617
3618 if (event) {
3619 return auxtrace_queues__add_event(&pt->queues, session, event,
3620 data_offset, NULL);
3621 }
3622
3623 if (sample->time && sample->time != (u64)-1)
3624 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
3625 else
3626 timestamp = 0;
3627
3628 return auxtrace_queues__add_sample(&pt->queues, session, sample,
3629 data_offset, timestamp);
3630}
3631
3632struct intel_pt_synth {
3633 struct perf_tool dummy_tool;
3634 struct perf_session *session;
3635};
3636
3637static int intel_pt_event_synth(struct perf_tool *tool,
3638 union perf_event *event,
3639 struct perf_sample *sample __maybe_unused,
3640 struct machine *machine __maybe_unused)
3641{
3642 struct intel_pt_synth *intel_pt_synth =
3643 container_of(tool, struct intel_pt_synth, dummy_tool);
3644
3645 return perf_session__deliver_synth_event(intel_pt_synth->session, event,
3646 NULL);
3647}
3648
3649static int intel_pt_synth_event(struct perf_session *session, const char *name,
3650 struct perf_event_attr *attr, u64 id)
3651{
3652 struct intel_pt_synth intel_pt_synth;
3653 int err;
3654
3655 pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
3656 name, id, (u64)attr->sample_type);
3657
3658 memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
3659 intel_pt_synth.session = session;
3660
3661 err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
3662 &id, intel_pt_event_synth);
3663 if (err)
3664 pr_err("%s: failed to synthesize '%s' event type\n",
3665 __func__, name);
3666
3667 return err;
3668}
3669
3670static void intel_pt_set_event_name(struct evlist *evlist, u64 id,
3671 const char *name)
3672{
3673 struct evsel *evsel;
3674
3675 evlist__for_each_entry(evlist, evsel) {
3676 if (evsel->core.id && evsel->core.id[0] == id) {
3677 if (evsel->name)
3678 zfree(&evsel->name);
3679 evsel->name = strdup(name);
3680 break;
3681 }
3682 }
3683}
3684
3685static struct evsel *intel_pt_evsel(struct intel_pt *pt,
3686 struct evlist *evlist)
3687{
3688 struct evsel *evsel;
3689
3690 evlist__for_each_entry(evlist, evsel) {
3691 if (evsel->core.attr.type == pt->pmu_type && evsel->core.ids)
3692 return evsel;
3693 }
3694
3695 return NULL;
3696}
3697
3698static int intel_pt_synth_events(struct intel_pt *pt,
3699 struct perf_session *session)
3700{
3701 struct evlist *evlist = session->evlist;
3702 struct evsel *evsel = intel_pt_evsel(pt, evlist);
3703 struct perf_event_attr attr;
3704 u64 id;
3705 int err;
3706
3707 if (!evsel) {
3708 pr_debug("There are no selected events with Intel Processor Trace data\n");
3709 return 0;
3710 }
3711
3712 memset(&attr, 0, sizeof(struct perf_event_attr));
3713 attr.size = sizeof(struct perf_event_attr);
3714 attr.type = PERF_TYPE_HARDWARE;
3715 attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
3716 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
3717 PERF_SAMPLE_PERIOD;
3718 if (pt->timeless_decoding)
3719 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
3720 else
3721 attr.sample_type |= PERF_SAMPLE_TIME;
3722 if (!pt->per_cpu_mmaps)
3723 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
3724 attr.exclude_user = evsel->core.attr.exclude_user;
3725 attr.exclude_kernel = evsel->core.attr.exclude_kernel;
3726 attr.exclude_hv = evsel->core.attr.exclude_hv;
3727 attr.exclude_host = evsel->core.attr.exclude_host;
3728 attr.exclude_guest = evsel->core.attr.exclude_guest;
3729 attr.sample_id_all = evsel->core.attr.sample_id_all;
3730 attr.read_format = evsel->core.attr.read_format;
3731
3732 id = evsel->core.id[0] + 1000000000;
3733 if (!id)
3734 id = 1;
3735
3736 if (pt->synth_opts.branches) {
3737 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
3738 attr.sample_period = 1;
3739 attr.sample_type |= PERF_SAMPLE_ADDR;
3740 err = intel_pt_synth_event(session, "branches", &attr, id);
3741 if (err)
3742 return err;
3743 pt->sample_branches = true;
3744 pt->branches_sample_type = attr.sample_type;
3745 pt->branches_id = id;
3746 id += 1;
3747 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
3748 }
3749
3750 if (pt->synth_opts.callchain)
3751 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
3752 if (pt->synth_opts.last_branch) {
3753 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
3754 /*
3755 * We don't use the hardware index, but the sample generation
3756 * code uses the new format branch_stack with this field,
3757 * so the event attributes must indicate that it's present.
3758 */
3759 attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
3760 }
3761
3762 if (pt->synth_opts.instructions) {
3763 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
3764 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
3765 attr.sample_period =
3766 intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
3767 else
3768 attr.sample_period = pt->synth_opts.period;
3769 err = intel_pt_synth_event(session, "instructions", &attr, id);
3770 if (err)
3771 return err;
3772 pt->sample_instructions = true;
3773 pt->instructions_sample_type = attr.sample_type;
3774 pt->instructions_id = id;
3775 id += 1;
3776 }
3777
3778 if (pt->synth_opts.cycles) {
3779 attr.config = PERF_COUNT_HW_CPU_CYCLES;
3780 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
3781 attr.sample_period =
3782 intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
3783 else
3784 attr.sample_period = pt->synth_opts.period;
3785 err = intel_pt_synth_event(session, "cycles", &attr, id);
3786 if (err)
3787 return err;
3788 pt->sample_cycles = true;
3789 pt->cycles_sample_type = attr.sample_type;
3790 pt->cycles_id = id;
3791 id += 1;
3792 }
3793
3794 attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
3795 attr.sample_period = 1;
3796
3797 if (pt->synth_opts.transactions) {
3798 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
3799 err = intel_pt_synth_event(session, "transactions", &attr, id);
3800 if (err)
3801 return err;
3802 pt->sample_transactions = true;
3803 pt->transactions_sample_type = attr.sample_type;
3804 pt->transactions_id = id;
3805 intel_pt_set_event_name(evlist, id, "transactions");
3806 id += 1;
3807 }
3808
3809 attr.type = PERF_TYPE_SYNTH;
3810 attr.sample_type |= PERF_SAMPLE_RAW;
3811
3812 if (pt->synth_opts.ptwrites) {
3813 attr.config = PERF_SYNTH_INTEL_PTWRITE;
3814 err = intel_pt_synth_event(session, "ptwrite", &attr, id);
3815 if (err)
3816 return err;
3817 pt->sample_ptwrites = true;
3818 pt->ptwrites_sample_type = attr.sample_type;
3819 pt->ptwrites_id = id;
3820 intel_pt_set_event_name(evlist, id, "ptwrite");
3821 id += 1;
3822 }
3823
3824 if (pt->synth_opts.pwr_events) {
3825 pt->sample_pwr_events = true;
3826 pt->pwr_events_sample_type = attr.sample_type;
3827
3828 attr.config = PERF_SYNTH_INTEL_CBR;
3829 err = intel_pt_synth_event(session, "cbr", &attr, id);
3830 if (err)
3831 return err;
3832 pt->cbr_id = id;
3833 intel_pt_set_event_name(evlist, id, "cbr");
3834 id += 1;
3835
3836 attr.config = PERF_SYNTH_INTEL_PSB;
3837 err = intel_pt_synth_event(session, "psb", &attr, id);
3838 if (err)
3839 return err;
3840 pt->psb_id = id;
3841 intel_pt_set_event_name(evlist, id, "psb");
3842 id += 1;
3843 }
3844
3845 if (pt->synth_opts.pwr_events && (evsel->core.attr.config & INTEL_PT_CFG_PWR_EVT_EN)) {
3846 attr.config = PERF_SYNTH_INTEL_MWAIT;
3847 err = intel_pt_synth_event(session, "mwait", &attr, id);
3848 if (err)
3849 return err;
3850 pt->mwait_id = id;
3851 intel_pt_set_event_name(evlist, id, "mwait");
3852 id += 1;
3853
3854 attr.config = PERF_SYNTH_INTEL_PWRE;
3855 err = intel_pt_synth_event(session, "pwre", &attr, id);
3856 if (err)
3857 return err;
3858 pt->pwre_id = id;
3859 intel_pt_set_event_name(evlist, id, "pwre");
3860 id += 1;
3861
3862 attr.config = PERF_SYNTH_INTEL_EXSTOP;
3863 err = intel_pt_synth_event(session, "exstop", &attr, id);
3864 if (err)
3865 return err;
3866 pt->exstop_id = id;
3867 intel_pt_set_event_name(evlist, id, "exstop");
3868 id += 1;
3869
3870 attr.config = PERF_SYNTH_INTEL_PWRX;
3871 err = intel_pt_synth_event(session, "pwrx", &attr, id);
3872 if (err)
3873 return err;
3874 pt->pwrx_id = id;
3875 intel_pt_set_event_name(evlist, id, "pwrx");
3876 id += 1;
3877 }
3878
3879 if (pt->synth_opts.intr_events && (evsel->core.attr.config & INTEL_PT_CFG_EVT_EN)) {
3880 attr.config = PERF_SYNTH_INTEL_EVT;
3881 err = intel_pt_synth_event(session, "evt", &attr, id);
3882 if (err)
3883 return err;
3884 pt->evt_sample_type = attr.sample_type;
3885 pt->evt_id = id;
3886 intel_pt_set_event_name(evlist, id, "evt");
3887 id += 1;
3888 }
3889
3890 if (pt->synth_opts.intr_events && pt->cap_event_trace) {
3891 attr.config = PERF_SYNTH_INTEL_IFLAG_CHG;
3892 err = intel_pt_synth_event(session, "iflag", &attr, id);
3893 if (err)
3894 return err;
3895 pt->iflag_chg_sample_type = attr.sample_type;
3896 pt->iflag_chg_id = id;
3897 intel_pt_set_event_name(evlist, id, "iflag");
3898 id += 1;
3899 }
3900
3901 return 0;
3902}
3903
3904static void intel_pt_setup_pebs_events(struct intel_pt *pt)
3905{
3906 struct evsel *evsel;
3907
3908 if (!pt->synth_opts.other_events)
3909 return;
3910
3911 evlist__for_each_entry(pt->session->evlist, evsel) {
3912 if (evsel->core.attr.aux_output && evsel->core.id) {
3913 if (pt->single_pebs) {
3914 pt->single_pebs = false;
3915 return;
3916 }
3917 pt->single_pebs = true;
3918 pt->sample_pebs = true;
3919 pt->pebs_evsel = evsel;
3920 }
3921 }
3922}
3923
3924static struct evsel *intel_pt_find_sched_switch(struct evlist *evlist)
3925{
3926 struct evsel *evsel;
3927
3928 evlist__for_each_entry_reverse(evlist, evsel) {
3929 const char *name = evsel__name(evsel);
3930
3931 if (!strcmp(name, "sched:sched_switch"))
3932 return evsel;
3933 }
3934
3935 return NULL;
3936}
3937
3938static bool intel_pt_find_switch(struct evlist *evlist)
3939{
3940 struct evsel *evsel;
3941
3942 evlist__for_each_entry(evlist, evsel) {
3943 if (evsel->core.attr.context_switch)
3944 return true;
3945 }
3946
3947 return false;
3948}
3949
3950static int intel_pt_perf_config(const char *var, const char *value, void *data)
3951{
3952 struct intel_pt *pt = data;
3953
3954 if (!strcmp(var, "intel-pt.mispred-all"))
3955 pt->mispred_all = perf_config_bool(var, value);
3956
3957 if (!strcmp(var, "intel-pt.max-loops"))
3958 perf_config_int(&pt->max_loops, var, value);
3959
3960 return 0;
3961}
3962
3963/* Find least TSC which converts to ns or later */
3964static u64 intel_pt_tsc_start(u64 ns, struct intel_pt *pt)
3965{
3966 u64 tsc, tm;
3967
3968 tsc = perf_time_to_tsc(ns, &pt->tc);
3969
3970 while (1) {
3971 tm = tsc_to_perf_time(tsc, &pt->tc);
3972 if (tm < ns)
3973 break;
3974 tsc -= 1;
3975 }
3976
3977 while (tm < ns)
3978 tm = tsc_to_perf_time(++tsc, &pt->tc);
3979
3980 return tsc;
3981}
3982
3983/* Find greatest TSC which converts to ns or earlier */
3984static u64 intel_pt_tsc_end(u64 ns, struct intel_pt *pt)
3985{
3986 u64 tsc, tm;
3987
3988 tsc = perf_time_to_tsc(ns, &pt->tc);
3989
3990 while (1) {
3991 tm = tsc_to_perf_time(tsc, &pt->tc);
3992 if (tm > ns)
3993 break;
3994 tsc += 1;
3995 }
3996
3997 while (tm > ns)
3998 tm = tsc_to_perf_time(--tsc, &pt->tc);
3999
4000 return tsc;
4001}
4002
4003static int intel_pt_setup_time_ranges(struct intel_pt *pt,
4004 struct itrace_synth_opts *opts)
4005{
4006 struct perf_time_interval *p = opts->ptime_range;
4007 int n = opts->range_num;
4008 int i;
4009
4010 if (!n || !p || pt->timeless_decoding)
4011 return 0;
4012
4013 pt->time_ranges = calloc(n, sizeof(struct range));
4014 if (!pt->time_ranges)
4015 return -ENOMEM;
4016
4017 pt->range_cnt = n;
4018
4019 intel_pt_log("%s: %u range(s)\n", __func__, n);
4020
4021 for (i = 0; i < n; i++) {
4022 struct range *r = &pt->time_ranges[i];
4023 u64 ts = p[i].start;
4024 u64 te = p[i].end;
4025
4026 /*
4027 * Take care to ensure the TSC range matches the perf-time range
4028 * when converted back to perf-time.
4029 */
4030 r->start = ts ? intel_pt_tsc_start(ts, pt) : 0;
4031 r->end = te ? intel_pt_tsc_end(te, pt) : 0;
4032
4033 intel_pt_log("range %d: perf time interval: %"PRIu64" to %"PRIu64"\n",
4034 i, ts, te);
4035 intel_pt_log("range %d: TSC time interval: %#"PRIx64" to %#"PRIx64"\n",
4036 i, r->start, r->end);
4037 }
4038
4039 return 0;
4040}
4041
4042static int intel_pt_parse_vm_tm_corr_arg(struct intel_pt *pt, char **args)
4043{
4044 struct intel_pt_vmcs_info *vmcs_info;
4045 u64 tsc_offset, vmcs;
4046 char *p = *args;
4047
4048 errno = 0;
4049
4050 p = skip_spaces(p);
4051 if (!*p)
4052 return 1;
4053
4054 tsc_offset = strtoull(p, &p, 0);
4055 if (errno)
4056 return -errno;
4057 p = skip_spaces(p);
4058 if (*p != ':') {
4059 pt->dflt_tsc_offset = tsc_offset;
4060 *args = p;
4061 return 0;
4062 }
4063 p += 1;
4064 while (1) {
4065 vmcs = strtoull(p, &p, 0);
4066 if (errno)
4067 return -errno;
4068 if (!vmcs)
4069 return -EINVAL;
4070 vmcs_info = intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, tsc_offset);
4071 if (!vmcs_info)
4072 return -ENOMEM;
4073 p = skip_spaces(p);
4074 if (*p != ',')
4075 break;
4076 p += 1;
4077 }
4078 *args = p;
4079 return 0;
4080}
4081
4082static int intel_pt_parse_vm_tm_corr_args(struct intel_pt *pt)
4083{
4084 char *args = pt->synth_opts.vm_tm_corr_args;
4085 int ret;
4086
4087 if (!args)
4088 return 0;
4089
4090 do {
4091 ret = intel_pt_parse_vm_tm_corr_arg(pt, &args);
4092 } while (!ret);
4093
4094 if (ret < 0) {
4095 pr_err("Failed to parse VM Time Correlation options\n");
4096 return ret;
4097 }
4098
4099 return 0;
4100}
4101
4102static const char * const intel_pt_info_fmts[] = {
4103 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
4104 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
4105 [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n",
4106 [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n",
4107 [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n",
4108 [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n",
4109 [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n",
4110 [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n",
4111 [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
4112 [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n",
4113 [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n",
4114 [INTEL_PT_MTC_FREQ_BITS] = " MTC freq bits %#"PRIx64"\n",
4115 [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n",
4116 [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n",
4117 [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n",
4118 [INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n",
4119 [INTEL_PT_FILTER_STR_LEN] = " Filter string len. %"PRIu64"\n",
4120};
4121
4122static void intel_pt_print_info(__u64 *arr, int start, int finish)
4123{
4124 int i;
4125
4126 if (!dump_trace)
4127 return;
4128
4129 for (i = start; i <= finish; i++) {
4130 const char *fmt = intel_pt_info_fmts[i];
4131
4132 if (fmt)
4133 fprintf(stdout, fmt, arr[i]);
4134 }
4135}
4136
4137static void intel_pt_print_info_str(const char *name, const char *str)
4138{
4139 if (!dump_trace)
4140 return;
4141
4142 fprintf(stdout, " %-20s%s\n", name, str ? str : "");
4143}
4144
4145static bool intel_pt_has(struct perf_record_auxtrace_info *auxtrace_info, int pos)
4146{
4147 return auxtrace_info->header.size >=
4148 sizeof(struct perf_record_auxtrace_info) + (sizeof(u64) * (pos + 1));
4149}
4150
4151int intel_pt_process_auxtrace_info(union perf_event *event,
4152 struct perf_session *session)
4153{
4154 struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
4155 size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
4156 struct intel_pt *pt;
4157 void *info_end;
4158 __u64 *info;
4159 int err;
4160
4161 if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
4162 min_sz)
4163 return -EINVAL;
4164
4165 pt = zalloc(sizeof(struct intel_pt));
4166 if (!pt)
4167 return -ENOMEM;
4168
4169 pt->vmcs_info = RB_ROOT;
4170
4171 addr_filters__init(&pt->filts);
4172
4173 err = perf_config(intel_pt_perf_config, pt);
4174 if (err)
4175 goto err_free;
4176
4177 err = auxtrace_queues__init(&pt->queues);
4178 if (err)
4179 goto err_free;
4180
4181 if (session->itrace_synth_opts->set) {
4182 pt->synth_opts = *session->itrace_synth_opts;
4183 } else {
4184 struct itrace_synth_opts *opts = session->itrace_synth_opts;
4185
4186 itrace_synth_opts__set_default(&pt->synth_opts, opts->default_no_sample);
4187 if (!opts->default_no_sample && !opts->inject) {
4188 pt->synth_opts.branches = false;
4189 pt->synth_opts.callchain = true;
4190 pt->synth_opts.add_callchain = true;
4191 }
4192 pt->synth_opts.thread_stack = opts->thread_stack;
4193 }
4194
4195 if (!(pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_USE_STDOUT))
4196 intel_pt_log_set_name(INTEL_PT_PMU_NAME);
4197
4198 pt->session = session;
4199 pt->machine = &session->machines.host; /* No kvm support */
4200 pt->auxtrace_type = auxtrace_info->type;
4201 pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
4202 pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
4203 pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
4204 pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
4205 pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
4206 pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
4207 pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
4208 pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
4209 pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
4210 pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
4211 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
4212 INTEL_PT_PER_CPU_MMAPS);
4213
4214 if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
4215 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
4216 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
4217 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
4218 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
4219 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
4220 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
4221 INTEL_PT_CYC_BIT);
4222 }
4223
4224 if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
4225 pt->max_non_turbo_ratio =
4226 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
4227 intel_pt_print_info(&auxtrace_info->priv[0],
4228 INTEL_PT_MAX_NONTURBO_RATIO,
4229 INTEL_PT_MAX_NONTURBO_RATIO);
4230 }
4231
4232 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
4233 info_end = (void *)auxtrace_info + auxtrace_info->header.size;
4234
4235 if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) {
4236 size_t len;
4237
4238 len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN];
4239 intel_pt_print_info(&auxtrace_info->priv[0],
4240 INTEL_PT_FILTER_STR_LEN,
4241 INTEL_PT_FILTER_STR_LEN);
4242 if (len) {
4243 const char *filter = (const char *)info;
4244
4245 len = roundup(len + 1, 8);
4246 info += len >> 3;
4247 if ((void *)info > info_end) {
4248 pr_err("%s: bad filter string length\n", __func__);
4249 err = -EINVAL;
4250 goto err_free_queues;
4251 }
4252 pt->filter = memdup(filter, len);
4253 if (!pt->filter) {
4254 err = -ENOMEM;
4255 goto err_free_queues;
4256 }
4257 if (session->header.needs_swap)
4258 mem_bswap_64(pt->filter, len);
4259 if (pt->filter[len - 1]) {
4260 pr_err("%s: filter string not null terminated\n", __func__);
4261 err = -EINVAL;
4262 goto err_free_queues;
4263 }
4264 err = addr_filters__parse_bare_filter(&pt->filts,
4265 filter);
4266 if (err)
4267 goto err_free_queues;
4268 }
4269 intel_pt_print_info_str("Filter string", pt->filter);
4270 }
4271
4272 if ((void *)info < info_end) {
4273 pt->cap_event_trace = *info++;
4274 if (dump_trace)
4275 fprintf(stdout, " Cap Event Trace %d\n",
4276 pt->cap_event_trace);
4277 }
4278
4279 pt->timeless_decoding = intel_pt_timeless_decoding(pt);
4280 if (pt->timeless_decoding && !pt->tc.time_mult)
4281 pt->tc.time_mult = 1;
4282 pt->have_tsc = intel_pt_have_tsc(pt);
4283 pt->sampling_mode = intel_pt_sampling_mode(pt);
4284 pt->est_tsc = !pt->timeless_decoding;
4285
4286 if (pt->synth_opts.vm_time_correlation) {
4287 if (pt->timeless_decoding) {
4288 pr_err("Intel PT has no time information for VM Time Correlation\n");
4289 err = -EINVAL;
4290 goto err_free_queues;
4291 }
4292 if (session->itrace_synth_opts->ptime_range) {
4293 pr_err("Time ranges cannot be specified with VM Time Correlation\n");
4294 err = -EINVAL;
4295 goto err_free_queues;
4296 }
4297 /* Currently TSC Offset is calculated using MTC packets */
4298 if (!intel_pt_have_mtc(pt)) {
4299 pr_err("MTC packets must have been enabled for VM Time Correlation\n");
4300 err = -EINVAL;
4301 goto err_free_queues;
4302 }
4303 err = intel_pt_parse_vm_tm_corr_args(pt);
4304 if (err)
4305 goto err_free_queues;
4306 }
4307
4308 pt->unknown_thread = thread__new(999999999, 999999999);
4309 if (!pt->unknown_thread) {
4310 err = -ENOMEM;
4311 goto err_free_queues;
4312 }
4313
4314 /*
4315 * Since this thread will not be kept in any rbtree not in a
4316 * list, initialize its list node so that at thread__put() the
4317 * current thread lifetime assumption is kept and we don't segfault
4318 * at list_del_init().
4319 */
4320 INIT_LIST_HEAD(&pt->unknown_thread->node);
4321
4322 err = thread__set_comm(pt->unknown_thread, "unknown", 0);
4323 if (err)
4324 goto err_delete_thread;
4325 if (thread__init_maps(pt->unknown_thread, pt->machine)) {
4326 err = -ENOMEM;
4327 goto err_delete_thread;
4328 }
4329
4330 pt->auxtrace.process_event = intel_pt_process_event;
4331 pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
4332 pt->auxtrace.queue_data = intel_pt_queue_data;
4333 pt->auxtrace.dump_auxtrace_sample = intel_pt_dump_sample;
4334 pt->auxtrace.flush_events = intel_pt_flush;
4335 pt->auxtrace.free_events = intel_pt_free_events;
4336 pt->auxtrace.free = intel_pt_free;
4337 pt->auxtrace.evsel_is_auxtrace = intel_pt_evsel_is_auxtrace;
4338 session->auxtrace = &pt->auxtrace;
4339
4340 if (dump_trace)
4341 return 0;
4342
4343 if (pt->have_sched_switch == 1) {
4344 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
4345 if (!pt->switch_evsel) {
4346 pr_err("%s: missing sched_switch event\n", __func__);
4347 err = -EINVAL;
4348 goto err_delete_thread;
4349 }
4350 } else if (pt->have_sched_switch == 2 &&
4351 !intel_pt_find_switch(session->evlist)) {
4352 pr_err("%s: missing context_switch attribute flag\n", __func__);
4353 err = -EINVAL;
4354 goto err_delete_thread;
4355 }
4356
4357 if (pt->synth_opts.log) {
4358 bool log_on_error = pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ON_ERROR;
4359 unsigned int log_on_error_size = pt->synth_opts.log_on_error_size;
4360
4361 intel_pt_log_enable(log_on_error, log_on_error_size);
4362 }
4363
4364 /* Maximum non-turbo ratio is TSC freq / 100 MHz */
4365 if (pt->tc.time_mult) {
4366 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
4367
4368 if (!pt->max_non_turbo_ratio)
4369 pt->max_non_turbo_ratio =
4370 (tsc_freq + 50000000) / 100000000;
4371 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
4372 intel_pt_log("Maximum non-turbo ratio %u\n",
4373 pt->max_non_turbo_ratio);
4374 pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
4375 }
4376
4377 err = intel_pt_setup_time_ranges(pt, session->itrace_synth_opts);
4378 if (err)
4379 goto err_delete_thread;
4380
4381 if (pt->synth_opts.calls)
4382 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
4383 PERF_IP_FLAG_TRACE_END;
4384 if (pt->synth_opts.returns)
4385 pt->branches_filter |= PERF_IP_FLAG_RETURN |
4386 PERF_IP_FLAG_TRACE_BEGIN;
4387
4388 if ((pt->synth_opts.callchain || pt->synth_opts.add_callchain) &&
4389 !symbol_conf.use_callchain) {
4390 symbol_conf.use_callchain = true;
4391 if (callchain_register_param(&callchain_param) < 0) {
4392 symbol_conf.use_callchain = false;
4393 pt->synth_opts.callchain = false;
4394 pt->synth_opts.add_callchain = false;
4395 }
4396 }
4397
4398 if (pt->synth_opts.add_callchain) {
4399 err = intel_pt_callchain_init(pt);
4400 if (err)
4401 goto err_delete_thread;
4402 }
4403
4404 if (pt->synth_opts.last_branch || pt->synth_opts.add_last_branch) {
4405 pt->br_stack_sz = pt->synth_opts.last_branch_sz;
4406 pt->br_stack_sz_plus = pt->br_stack_sz;
4407 }
4408
4409 if (pt->synth_opts.add_last_branch) {
4410 err = intel_pt_br_stack_init(pt);
4411 if (err)
4412 goto err_delete_thread;
4413 /*
4414 * Additional branch stack size to cater for tracing from the
4415 * actual sample ip to where the sample time is recorded.
4416 * Measured at about 200 branches, but generously set to 1024.
4417 * If kernel space is not being traced, then add just 1 for the
4418 * branch to kernel space.
4419 */
4420 if (intel_pt_tracing_kernel(pt))
4421 pt->br_stack_sz_plus += 1024;
4422 else
4423 pt->br_stack_sz_plus += 1;
4424 }
4425
4426 pt->use_thread_stack = pt->synth_opts.callchain ||
4427 pt->synth_opts.add_callchain ||
4428 pt->synth_opts.thread_stack ||
4429 pt->synth_opts.last_branch ||
4430 pt->synth_opts.add_last_branch;
4431
4432 pt->callstack = pt->synth_opts.callchain ||
4433 pt->synth_opts.add_callchain ||
4434 pt->synth_opts.thread_stack;
4435
4436 err = intel_pt_synth_events(pt, session);
4437 if (err)
4438 goto err_delete_thread;
4439
4440 intel_pt_setup_pebs_events(pt);
4441
4442 if (perf_data__is_pipe(session->data)) {
4443 pr_warning("WARNING: Intel PT with pipe mode is not recommended.\n"
4444 " The output cannot relied upon. In particular,\n"
4445 " timestamps and the order of events may be incorrect.\n");
4446 }
4447
4448 if (pt->sampling_mode || list_empty(&session->auxtrace_index))
4449 err = auxtrace_queue_data(session, true, true);
4450 else
4451 err = auxtrace_queues__process_index(&pt->queues, session);
4452 if (err)
4453 goto err_delete_thread;
4454
4455 if (pt->queues.populated)
4456 pt->data_queued = true;
4457
4458 if (pt->timeless_decoding)
4459 pr_debug2("Intel PT decoding without timestamps\n");
4460
4461 return 0;
4462
4463err_delete_thread:
4464 zfree(&pt->chain);
4465 thread__zput(pt->unknown_thread);
4466err_free_queues:
4467 intel_pt_log_disable();
4468 auxtrace_queues__free(&pt->queues);
4469 session->auxtrace = NULL;
4470err_free:
4471 addr_filters__exit(&pt->filts);
4472 zfree(&pt->filter);
4473 zfree(&pt->time_ranges);
4474 free(pt);
4475 return err;
4476}