Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
fork
Configure Feed
Select the types of activity you want to include in your feed.
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * intel_pt.c: Intel Processor Trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
5 */
6
7#include <inttypes.h>
8#include <stdio.h>
9#include <stdbool.h>
10#include <errno.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/types.h>
14#include <linux/zalloc.h>
15
16#include "session.h"
17#include "machine.h"
18#include "memswap.h"
19#include "sort.h"
20#include "tool.h"
21#include "event.h"
22#include "evlist.h"
23#include "evsel.h"
24#include "map.h"
25#include "color.h"
26#include "thread.h"
27#include "thread-stack.h"
28#include "symbol.h"
29#include "callchain.h"
30#include "dso.h"
31#include "debug.h"
32#include "auxtrace.h"
33#include "tsc.h"
34#include "intel-pt.h"
35#include "config.h"
36#include "util/perf_api_probe.h"
37#include "util/synthetic-events.h"
38#include "time-utils.h"
39
40#include "../arch/x86/include/uapi/asm/perf_regs.h"
41
42#include "intel-pt-decoder/intel-pt-log.h"
43#include "intel-pt-decoder/intel-pt-decoder.h"
44#include "intel-pt-decoder/intel-pt-insn-decoder.h"
45#include "intel-pt-decoder/intel-pt-pkt-decoder.h"
46
47#define MAX_TIMESTAMP (~0ULL)
48
49#define INTEL_PT_CFG_PASS_THRU BIT_ULL(0)
50#define INTEL_PT_CFG_PWR_EVT_EN BIT_ULL(4)
51#define INTEL_PT_CFG_BRANCH_EN BIT_ULL(13)
52#define INTEL_PT_CFG_EVT_EN BIT_ULL(31)
53#define INTEL_PT_CFG_TNT_DIS BIT_ULL(55)
54
55struct range {
56 u64 start;
57 u64 end;
58};
59
60struct intel_pt {
61 struct auxtrace auxtrace;
62 struct auxtrace_queues queues;
63 struct auxtrace_heap heap;
64 u32 auxtrace_type;
65 struct perf_session *session;
66 struct machine *machine;
67 struct evsel *switch_evsel;
68 struct thread *unknown_thread;
69 bool timeless_decoding;
70 bool sampling_mode;
71 bool snapshot_mode;
72 bool per_cpu_mmaps;
73 bool have_tsc;
74 bool data_queued;
75 bool est_tsc;
76 bool sync_switch;
77 bool mispred_all;
78 bool use_thread_stack;
79 bool callstack;
80 bool cap_event_trace;
81 unsigned int br_stack_sz;
82 unsigned int br_stack_sz_plus;
83 int have_sched_switch;
84 u32 pmu_type;
85 u64 kernel_start;
86 u64 switch_ip;
87 u64 ptss_ip;
88 u64 first_timestamp;
89
90 struct perf_tsc_conversion tc;
91 bool cap_user_time_zero;
92
93 struct itrace_synth_opts synth_opts;
94
95 bool sample_instructions;
96 u64 instructions_sample_type;
97 u64 instructions_id;
98
99 bool sample_branches;
100 u32 branches_filter;
101 u64 branches_sample_type;
102 u64 branches_id;
103
104 bool sample_transactions;
105 u64 transactions_sample_type;
106 u64 transactions_id;
107
108 bool sample_ptwrites;
109 u64 ptwrites_sample_type;
110 u64 ptwrites_id;
111
112 bool sample_pwr_events;
113 u64 pwr_events_sample_type;
114 u64 mwait_id;
115 u64 pwre_id;
116 u64 exstop_id;
117 u64 pwrx_id;
118 u64 cbr_id;
119 u64 psb_id;
120
121 bool single_pebs;
122 bool sample_pebs;
123 struct evsel *pebs_evsel;
124
125 u64 evt_sample_type;
126 u64 evt_id;
127
128 u64 iflag_chg_sample_type;
129 u64 iflag_chg_id;
130
131 u64 tsc_bit;
132 u64 mtc_bit;
133 u64 mtc_freq_bits;
134 u32 tsc_ctc_ratio_n;
135 u32 tsc_ctc_ratio_d;
136 u64 cyc_bit;
137 u64 noretcomp_bit;
138 unsigned max_non_turbo_ratio;
139 unsigned cbr2khz;
140 int max_loops;
141
142 unsigned long num_events;
143
144 char *filter;
145 struct addr_filters filts;
146
147 struct range *time_ranges;
148 unsigned int range_cnt;
149
150 struct ip_callchain *chain;
151 struct branch_stack *br_stack;
152
153 u64 dflt_tsc_offset;
154 struct rb_root vmcs_info;
155};
156
157enum switch_state {
158 INTEL_PT_SS_NOT_TRACING,
159 INTEL_PT_SS_UNKNOWN,
160 INTEL_PT_SS_TRACING,
161 INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
162 INTEL_PT_SS_EXPECTING_SWITCH_IP,
163};
164
165/* applicable_counters is 64-bits */
166#define INTEL_PT_MAX_PEBS 64
167
168struct intel_pt_pebs_event {
169 struct evsel *evsel;
170 u64 id;
171};
172
173struct intel_pt_queue {
174 struct intel_pt *pt;
175 unsigned int queue_nr;
176 struct auxtrace_buffer *buffer;
177 struct auxtrace_buffer *old_buffer;
178 void *decoder;
179 const struct intel_pt_state *state;
180 struct ip_callchain *chain;
181 struct branch_stack *last_branch;
182 union perf_event *event_buf;
183 bool on_heap;
184 bool stop;
185 bool step_through_buffers;
186 bool use_buffer_pid_tid;
187 bool sync_switch;
188 bool sample_ipc;
189 pid_t pid, tid;
190 int cpu;
191 int switch_state;
192 pid_t next_tid;
193 struct thread *thread;
194 struct machine *guest_machine;
195 struct thread *unknown_guest_thread;
196 pid_t guest_machine_pid;
197 bool exclude_kernel;
198 bool have_sample;
199 u64 time;
200 u64 timestamp;
201 u64 sel_timestamp;
202 bool sel_start;
203 unsigned int sel_idx;
204 u32 flags;
205 u16 insn_len;
206 u64 last_insn_cnt;
207 u64 ipc_insn_cnt;
208 u64 ipc_cyc_cnt;
209 u64 last_in_insn_cnt;
210 u64 last_in_cyc_cnt;
211 u64 last_br_insn_cnt;
212 u64 last_br_cyc_cnt;
213 unsigned int cbr_seen;
214 char insn[INTEL_PT_INSN_BUF_SZ];
215 struct intel_pt_pebs_event pebs[INTEL_PT_MAX_PEBS];
216};
217
218static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
219 unsigned char *buf, size_t len)
220{
221 struct intel_pt_pkt packet;
222 size_t pos = 0;
223 int ret, pkt_len, i;
224 char desc[INTEL_PT_PKT_DESC_MAX];
225 const char *color = PERF_COLOR_BLUE;
226 enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX;
227
228 color_fprintf(stdout, color,
229 ". ... Intel Processor Trace data: size %zu bytes\n",
230 len);
231
232 while (len) {
233 ret = intel_pt_get_packet(buf, len, &packet, &ctx);
234 if (ret > 0)
235 pkt_len = ret;
236 else
237 pkt_len = 1;
238 printf(".");
239 color_fprintf(stdout, color, " %08x: ", pos);
240 for (i = 0; i < pkt_len; i++)
241 color_fprintf(stdout, color, " %02x", buf[i]);
242 for (; i < 16; i++)
243 color_fprintf(stdout, color, " ");
244 if (ret > 0) {
245 ret = intel_pt_pkt_desc(&packet, desc,
246 INTEL_PT_PKT_DESC_MAX);
247 if (ret > 0)
248 color_fprintf(stdout, color, " %s\n", desc);
249 } else {
250 color_fprintf(stdout, color, " Bad packet!\n");
251 }
252 pos += pkt_len;
253 buf += pkt_len;
254 len -= pkt_len;
255 }
256}
257
258static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
259 size_t len)
260{
261 printf(".\n");
262 intel_pt_dump(pt, buf, len);
263}
264
265static void intel_pt_log_event(union perf_event *event)
266{
267 FILE *f = intel_pt_log_fp();
268
269 if (!intel_pt_enable_logging || !f)
270 return;
271
272 perf_event__fprintf(event, NULL, f);
273}
274
275static void intel_pt_dump_sample(struct perf_session *session,
276 struct perf_sample *sample)
277{
278 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
279 auxtrace);
280
281 printf("\n");
282 intel_pt_dump(pt, sample->aux_sample.data, sample->aux_sample.size);
283}
284
285static bool intel_pt_log_events(struct intel_pt *pt, u64 tm)
286{
287 struct perf_time_interval *range = pt->synth_opts.ptime_range;
288 int n = pt->synth_opts.range_num;
289
290 if (pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
291 return true;
292
293 if (pt->synth_opts.log_minus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
294 return false;
295
296 /* perf_time__ranges_skip_sample does not work if time is zero */
297 if (!tm)
298 tm = 1;
299
300 return !n || !perf_time__ranges_skip_sample(range, n, tm);
301}
302
303static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs(struct rb_root *rb_root,
304 u64 vmcs,
305 u64 dflt_tsc_offset)
306{
307 struct rb_node **p = &rb_root->rb_node;
308 struct rb_node *parent = NULL;
309 struct intel_pt_vmcs_info *v;
310
311 while (*p) {
312 parent = *p;
313 v = rb_entry(parent, struct intel_pt_vmcs_info, rb_node);
314
315 if (v->vmcs == vmcs)
316 return v;
317
318 if (vmcs < v->vmcs)
319 p = &(*p)->rb_left;
320 else
321 p = &(*p)->rb_right;
322 }
323
324 v = zalloc(sizeof(*v));
325 if (v) {
326 v->vmcs = vmcs;
327 v->tsc_offset = dflt_tsc_offset;
328 v->reliable = dflt_tsc_offset;
329
330 rb_link_node(&v->rb_node, parent, p);
331 rb_insert_color(&v->rb_node, rb_root);
332 }
333
334 return v;
335}
336
337static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs_info(void *data, uint64_t vmcs)
338{
339 struct intel_pt_queue *ptq = data;
340 struct intel_pt *pt = ptq->pt;
341
342 if (!vmcs && !pt->dflt_tsc_offset)
343 return NULL;
344
345 return intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, pt->dflt_tsc_offset);
346}
347
348static void intel_pt_free_vmcs_info(struct intel_pt *pt)
349{
350 struct intel_pt_vmcs_info *v;
351 struct rb_node *n;
352
353 n = rb_first(&pt->vmcs_info);
354 while (n) {
355 v = rb_entry(n, struct intel_pt_vmcs_info, rb_node);
356 n = rb_next(n);
357 rb_erase(&v->rb_node, &pt->vmcs_info);
358 free(v);
359 }
360}
361
362static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
363 struct auxtrace_buffer *b)
364{
365 bool consecutive = false;
366 void *start;
367
368 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
369 pt->have_tsc, &consecutive,
370 pt->synth_opts.vm_time_correlation);
371 if (!start)
372 return -EINVAL;
373 /*
374 * In the case of vm_time_correlation, the overlap might contain TSC
375 * packets that will not be fixed, and that will then no longer work for
376 * overlap detection. Avoid that by zeroing out the overlap.
377 */
378 if (pt->synth_opts.vm_time_correlation)
379 memset(b->data, 0, start - b->data);
380 b->use_size = b->data + b->size - start;
381 b->use_data = start;
382 if (b->use_size && consecutive)
383 b->consecutive = true;
384 return 0;
385}
386
387static int intel_pt_get_buffer(struct intel_pt_queue *ptq,
388 struct auxtrace_buffer *buffer,
389 struct auxtrace_buffer *old_buffer,
390 struct intel_pt_buffer *b)
391{
392 bool might_overlap;
393
394 if (!buffer->data) {
395 int fd = perf_data__fd(ptq->pt->session->data);
396
397 buffer->data = auxtrace_buffer__get_data(buffer, fd);
398 if (!buffer->data)
399 return -ENOMEM;
400 }
401
402 might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode;
403 if (might_overlap && !buffer->consecutive && old_buffer &&
404 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
405 return -ENOMEM;
406
407 if (buffer->use_data) {
408 b->len = buffer->use_size;
409 b->buf = buffer->use_data;
410 } else {
411 b->len = buffer->size;
412 b->buf = buffer->data;
413 }
414 b->ref_timestamp = buffer->reference;
415
416 if (!old_buffer || (might_overlap && !buffer->consecutive)) {
417 b->consecutive = false;
418 b->trace_nr = buffer->buffer_nr + 1;
419 } else {
420 b->consecutive = true;
421 }
422
423 return 0;
424}
425
426/* Do not drop buffers with references - refer intel_pt_get_trace() */
427static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq,
428 struct auxtrace_buffer *buffer)
429{
430 if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer)
431 return;
432
433 auxtrace_buffer__drop_data(buffer);
434}
435
436/* Must be serialized with respect to intel_pt_get_trace() */
437static int intel_pt_lookahead(void *data, intel_pt_lookahead_cb_t cb,
438 void *cb_data)
439{
440 struct intel_pt_queue *ptq = data;
441 struct auxtrace_buffer *buffer = ptq->buffer;
442 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
443 struct auxtrace_queue *queue;
444 int err = 0;
445
446 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
447
448 while (1) {
449 struct intel_pt_buffer b = { .len = 0 };
450
451 buffer = auxtrace_buffer__next(queue, buffer);
452 if (!buffer)
453 break;
454
455 err = intel_pt_get_buffer(ptq, buffer, old_buffer, &b);
456 if (err)
457 break;
458
459 if (b.len) {
460 intel_pt_lookahead_drop_buffer(ptq, old_buffer);
461 old_buffer = buffer;
462 } else {
463 intel_pt_lookahead_drop_buffer(ptq, buffer);
464 continue;
465 }
466
467 err = cb(&b, cb_data);
468 if (err)
469 break;
470 }
471
472 if (buffer != old_buffer)
473 intel_pt_lookahead_drop_buffer(ptq, buffer);
474 intel_pt_lookahead_drop_buffer(ptq, old_buffer);
475
476 return err;
477}
478
479/*
480 * This function assumes data is processed sequentially only.
481 * Must be serialized with respect to intel_pt_lookahead()
482 */
483static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
484{
485 struct intel_pt_queue *ptq = data;
486 struct auxtrace_buffer *buffer = ptq->buffer;
487 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
488 struct auxtrace_queue *queue;
489 int err;
490
491 if (ptq->stop) {
492 b->len = 0;
493 return 0;
494 }
495
496 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
497
498 buffer = auxtrace_buffer__next(queue, buffer);
499 if (!buffer) {
500 if (old_buffer)
501 auxtrace_buffer__drop_data(old_buffer);
502 b->len = 0;
503 return 0;
504 }
505
506 ptq->buffer = buffer;
507
508 err = intel_pt_get_buffer(ptq, buffer, old_buffer, b);
509 if (err)
510 return err;
511
512 if (ptq->step_through_buffers)
513 ptq->stop = true;
514
515 if (b->len) {
516 if (old_buffer)
517 auxtrace_buffer__drop_data(old_buffer);
518 ptq->old_buffer = buffer;
519 } else {
520 auxtrace_buffer__drop_data(buffer);
521 return intel_pt_get_trace(b, data);
522 }
523
524 return 0;
525}
526
527struct intel_pt_cache_entry {
528 struct auxtrace_cache_entry entry;
529 u64 insn_cnt;
530 u64 byte_cnt;
531 enum intel_pt_insn_op op;
532 enum intel_pt_insn_branch branch;
533 int length;
534 int32_t rel;
535 char insn[INTEL_PT_INSN_BUF_SZ];
536};
537
538static int intel_pt_config_div(const char *var, const char *value, void *data)
539{
540 int *d = data;
541 long val;
542
543 if (!strcmp(var, "intel-pt.cache-divisor")) {
544 val = strtol(value, NULL, 0);
545 if (val > 0 && val <= INT_MAX)
546 *d = val;
547 }
548
549 return 0;
550}
551
552static int intel_pt_cache_divisor(void)
553{
554 static int d;
555
556 if (d)
557 return d;
558
559 perf_config(intel_pt_config_div, &d);
560
561 if (!d)
562 d = 64;
563
564 return d;
565}
566
567static unsigned int intel_pt_cache_size(struct dso *dso,
568 struct machine *machine)
569{
570 off_t size;
571
572 size = dso__data_size(dso, machine);
573 size /= intel_pt_cache_divisor();
574 if (size < 1000)
575 return 10;
576 if (size > (1 << 21))
577 return 21;
578 return 32 - __builtin_clz(size);
579}
580
581static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
582 struct machine *machine)
583{
584 struct auxtrace_cache *c;
585 unsigned int bits;
586
587 if (dso->auxtrace_cache)
588 return dso->auxtrace_cache;
589
590 bits = intel_pt_cache_size(dso, machine);
591
592 /* Ignoring cache creation failure */
593 c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
594
595 dso->auxtrace_cache = c;
596
597 return c;
598}
599
600static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
601 u64 offset, u64 insn_cnt, u64 byte_cnt,
602 struct intel_pt_insn *intel_pt_insn)
603{
604 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
605 struct intel_pt_cache_entry *e;
606 int err;
607
608 if (!c)
609 return -ENOMEM;
610
611 e = auxtrace_cache__alloc_entry(c);
612 if (!e)
613 return -ENOMEM;
614
615 e->insn_cnt = insn_cnt;
616 e->byte_cnt = byte_cnt;
617 e->op = intel_pt_insn->op;
618 e->branch = intel_pt_insn->branch;
619 e->length = intel_pt_insn->length;
620 e->rel = intel_pt_insn->rel;
621 memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
622
623 err = auxtrace_cache__add(c, offset, &e->entry);
624 if (err)
625 auxtrace_cache__free_entry(c, e);
626
627 return err;
628}
629
630static struct intel_pt_cache_entry *
631intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
632{
633 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
634
635 if (!c)
636 return NULL;
637
638 return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
639}
640
641static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine,
642 u64 offset)
643{
644 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
645
646 if (!c)
647 return;
648
649 auxtrace_cache__remove(dso->auxtrace_cache, offset);
650}
651
652static inline bool intel_pt_guest_kernel_ip(uint64_t ip)
653{
654 /* Assumes 64-bit kernel */
655 return ip & (1ULL << 63);
656}
657
658static inline u8 intel_pt_nr_cpumode(struct intel_pt_queue *ptq, uint64_t ip, bool nr)
659{
660 if (nr) {
661 return intel_pt_guest_kernel_ip(ip) ?
662 PERF_RECORD_MISC_GUEST_KERNEL :
663 PERF_RECORD_MISC_GUEST_USER;
664 }
665
666 return ip >= ptq->pt->kernel_start ?
667 PERF_RECORD_MISC_KERNEL :
668 PERF_RECORD_MISC_USER;
669}
670
671static inline u8 intel_pt_cpumode(struct intel_pt_queue *ptq, uint64_t from_ip, uint64_t to_ip)
672{
673 /* No support for non-zero CS base */
674 if (from_ip)
675 return intel_pt_nr_cpumode(ptq, from_ip, ptq->state->from_nr);
676 return intel_pt_nr_cpumode(ptq, to_ip, ptq->state->to_nr);
677}
678
679static int intel_pt_get_guest(struct intel_pt_queue *ptq)
680{
681 struct machines *machines = &ptq->pt->session->machines;
682 struct machine *machine;
683 pid_t pid = ptq->pid <= 0 ? DEFAULT_GUEST_KERNEL_ID : ptq->pid;
684
685 if (ptq->guest_machine && pid == ptq->guest_machine_pid)
686 return 0;
687
688 ptq->guest_machine = NULL;
689 thread__zput(ptq->unknown_guest_thread);
690
691 machine = machines__find_guest(machines, pid);
692 if (!machine)
693 return -1;
694
695 ptq->unknown_guest_thread = machine__idle_thread(machine);
696 if (!ptq->unknown_guest_thread)
697 return -1;
698
699 ptq->guest_machine = machine;
700 ptq->guest_machine_pid = pid;
701
702 return 0;
703}
704
705static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
706 uint64_t *insn_cnt_ptr, uint64_t *ip,
707 uint64_t to_ip, uint64_t max_insn_cnt,
708 void *data)
709{
710 struct intel_pt_queue *ptq = data;
711 struct machine *machine = ptq->pt->machine;
712 struct thread *thread;
713 struct addr_location al;
714 unsigned char buf[INTEL_PT_INSN_BUF_SZ];
715 ssize_t len;
716 int x86_64;
717 u8 cpumode;
718 u64 offset, start_offset, start_ip;
719 u64 insn_cnt = 0;
720 bool one_map = true;
721 bool nr;
722
723 intel_pt_insn->length = 0;
724
725 if (to_ip && *ip == to_ip)
726 goto out_no_cache;
727
728 nr = ptq->state->to_nr;
729 cpumode = intel_pt_nr_cpumode(ptq, *ip, nr);
730
731 if (nr) {
732 if (cpumode != PERF_RECORD_MISC_GUEST_KERNEL ||
733 intel_pt_get_guest(ptq))
734 return -EINVAL;
735 machine = ptq->guest_machine;
736 thread = ptq->unknown_guest_thread;
737 } else {
738 thread = ptq->thread;
739 if (!thread) {
740 if (cpumode != PERF_RECORD_MISC_KERNEL)
741 return -EINVAL;
742 thread = ptq->pt->unknown_thread;
743 }
744 }
745
746 while (1) {
747 if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso)
748 return -EINVAL;
749
750 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
751 dso__data_status_seen(al.map->dso,
752 DSO_DATA_STATUS_SEEN_ITRACE))
753 return -ENOENT;
754
755 offset = al.map->map_ip(al.map, *ip);
756
757 if (!to_ip && one_map) {
758 struct intel_pt_cache_entry *e;
759
760 e = intel_pt_cache_lookup(al.map->dso, machine, offset);
761 if (e &&
762 (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
763 *insn_cnt_ptr = e->insn_cnt;
764 *ip += e->byte_cnt;
765 intel_pt_insn->op = e->op;
766 intel_pt_insn->branch = e->branch;
767 intel_pt_insn->length = e->length;
768 intel_pt_insn->rel = e->rel;
769 memcpy(intel_pt_insn->buf, e->insn,
770 INTEL_PT_INSN_BUF_SZ);
771 intel_pt_log_insn_no_data(intel_pt_insn, *ip);
772 return 0;
773 }
774 }
775
776 start_offset = offset;
777 start_ip = *ip;
778
779 /* Load maps to ensure dso->is_64_bit has been updated */
780 map__load(al.map);
781
782 x86_64 = al.map->dso->is_64_bit;
783
784 while (1) {
785 len = dso__data_read_offset(al.map->dso, machine,
786 offset, buf,
787 INTEL_PT_INSN_BUF_SZ);
788 if (len <= 0)
789 return -EINVAL;
790
791 if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn))
792 return -EINVAL;
793
794 intel_pt_log_insn(intel_pt_insn, *ip);
795
796 insn_cnt += 1;
797
798 if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH)
799 goto out;
800
801 if (max_insn_cnt && insn_cnt >= max_insn_cnt)
802 goto out_no_cache;
803
804 *ip += intel_pt_insn->length;
805
806 if (to_ip && *ip == to_ip) {
807 intel_pt_insn->length = 0;
808 goto out_no_cache;
809 }
810
811 if (*ip >= al.map->end)
812 break;
813
814 offset += intel_pt_insn->length;
815 }
816 one_map = false;
817 }
818out:
819 *insn_cnt_ptr = insn_cnt;
820
821 if (!one_map)
822 goto out_no_cache;
823
824 /*
825 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
826 * entries.
827 */
828 if (to_ip) {
829 struct intel_pt_cache_entry *e;
830
831 e = intel_pt_cache_lookup(al.map->dso, machine, start_offset);
832 if (e)
833 return 0;
834 }
835
836 /* Ignore cache errors */
837 intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt,
838 *ip - start_ip, intel_pt_insn);
839
840 return 0;
841
842out_no_cache:
843 *insn_cnt_ptr = insn_cnt;
844 return 0;
845}
846
847static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip,
848 uint64_t offset, const char *filename)
849{
850 struct addr_filter *filt;
851 bool have_filter = false;
852 bool hit_tracestop = false;
853 bool hit_filter = false;
854
855 list_for_each_entry(filt, &pt->filts.head, list) {
856 if (filt->start)
857 have_filter = true;
858
859 if ((filename && !filt->filename) ||
860 (!filename && filt->filename) ||
861 (filename && strcmp(filename, filt->filename)))
862 continue;
863
864 if (!(offset >= filt->addr && offset < filt->addr + filt->size))
865 continue;
866
867 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n",
868 ip, offset, filename ? filename : "[kernel]",
869 filt->start ? "filter" : "stop",
870 filt->addr, filt->size);
871
872 if (filt->start)
873 hit_filter = true;
874 else
875 hit_tracestop = true;
876 }
877
878 if (!hit_tracestop && !hit_filter)
879 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n",
880 ip, offset, filename ? filename : "[kernel]");
881
882 return hit_tracestop || (have_filter && !hit_filter);
883}
884
885static int __intel_pt_pgd_ip(uint64_t ip, void *data)
886{
887 struct intel_pt_queue *ptq = data;
888 struct thread *thread;
889 struct addr_location al;
890 u8 cpumode;
891 u64 offset;
892
893 if (ptq->state->to_nr) {
894 if (intel_pt_guest_kernel_ip(ip))
895 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
896 /* No support for decoding guest user space */
897 return -EINVAL;
898 } else if (ip >= ptq->pt->kernel_start) {
899 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
900 }
901
902 cpumode = PERF_RECORD_MISC_USER;
903
904 thread = ptq->thread;
905 if (!thread)
906 return -EINVAL;
907
908 if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
909 return -EINVAL;
910
911 offset = al.map->map_ip(al.map, ip);
912
913 return intel_pt_match_pgd_ip(ptq->pt, ip, offset,
914 al.map->dso->long_name);
915}
916
917static bool intel_pt_pgd_ip(uint64_t ip, void *data)
918{
919 return __intel_pt_pgd_ip(ip, data) > 0;
920}
921
922static bool intel_pt_get_config(struct intel_pt *pt,
923 struct perf_event_attr *attr, u64 *config)
924{
925 if (attr->type == pt->pmu_type) {
926 if (config)
927 *config = attr->config;
928 return true;
929 }
930
931 return false;
932}
933
934static bool intel_pt_exclude_kernel(struct intel_pt *pt)
935{
936 struct evsel *evsel;
937
938 evlist__for_each_entry(pt->session->evlist, evsel) {
939 if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
940 !evsel->core.attr.exclude_kernel)
941 return false;
942 }
943 return true;
944}
945
946static bool intel_pt_return_compression(struct intel_pt *pt)
947{
948 struct evsel *evsel;
949 u64 config;
950
951 if (!pt->noretcomp_bit)
952 return true;
953
954 evlist__for_each_entry(pt->session->evlist, evsel) {
955 if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
956 (config & pt->noretcomp_bit))
957 return false;
958 }
959 return true;
960}
961
962static bool intel_pt_branch_enable(struct intel_pt *pt)
963{
964 struct evsel *evsel;
965 u64 config;
966
967 evlist__for_each_entry(pt->session->evlist, evsel) {
968 if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
969 (config & INTEL_PT_CFG_PASS_THRU) &&
970 !(config & INTEL_PT_CFG_BRANCH_EN))
971 return false;
972 }
973 return true;
974}
975
976static bool intel_pt_disabled_tnt(struct intel_pt *pt)
977{
978 struct evsel *evsel;
979 u64 config;
980
981 evlist__for_each_entry(pt->session->evlist, evsel) {
982 if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
983 config & INTEL_PT_CFG_TNT_DIS)
984 return true;
985 }
986 return false;
987}
988
989static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
990{
991 struct evsel *evsel;
992 unsigned int shift;
993 u64 config;
994
995 if (!pt->mtc_freq_bits)
996 return 0;
997
998 for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
999 config >>= 1;
1000
1001 evlist__for_each_entry(pt->session->evlist, evsel) {
1002 if (intel_pt_get_config(pt, &evsel->core.attr, &config))
1003 return (config & pt->mtc_freq_bits) >> shift;
1004 }
1005 return 0;
1006}
1007
1008static bool intel_pt_timeless_decoding(struct intel_pt *pt)
1009{
1010 struct evsel *evsel;
1011 bool timeless_decoding = true;
1012 u64 config;
1013
1014 if (!pt->tsc_bit || !pt->cap_user_time_zero || pt->synth_opts.timeless_decoding)
1015 return true;
1016
1017 evlist__for_each_entry(pt->session->evlist, evsel) {
1018 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
1019 return true;
1020 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
1021 if (config & pt->tsc_bit)
1022 timeless_decoding = false;
1023 else
1024 return true;
1025 }
1026 }
1027 return timeless_decoding;
1028}
1029
1030static bool intel_pt_tracing_kernel(struct intel_pt *pt)
1031{
1032 struct evsel *evsel;
1033
1034 evlist__for_each_entry(pt->session->evlist, evsel) {
1035 if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
1036 !evsel->core.attr.exclude_kernel)
1037 return true;
1038 }
1039 return false;
1040}
1041
1042static bool intel_pt_have_tsc(struct intel_pt *pt)
1043{
1044 struct evsel *evsel;
1045 bool have_tsc = false;
1046 u64 config;
1047
1048 if (!pt->tsc_bit)
1049 return false;
1050
1051 evlist__for_each_entry(pt->session->evlist, evsel) {
1052 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
1053 if (config & pt->tsc_bit)
1054 have_tsc = true;
1055 else
1056 return false;
1057 }
1058 }
1059 return have_tsc;
1060}
1061
1062static bool intel_pt_have_mtc(struct intel_pt *pt)
1063{
1064 struct evsel *evsel;
1065 u64 config;
1066
1067 evlist__for_each_entry(pt->session->evlist, evsel) {
1068 if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
1069 (config & pt->mtc_bit))
1070 return true;
1071 }
1072 return false;
1073}
1074
1075static bool intel_pt_sampling_mode(struct intel_pt *pt)
1076{
1077 struct evsel *evsel;
1078
1079 evlist__for_each_entry(pt->session->evlist, evsel) {
1080 if ((evsel->core.attr.sample_type & PERF_SAMPLE_AUX) &&
1081 evsel->core.attr.aux_sample_size)
1082 return true;
1083 }
1084 return false;
1085}
1086
1087static u64 intel_pt_ctl(struct intel_pt *pt)
1088{
1089 struct evsel *evsel;
1090 u64 config;
1091
1092 evlist__for_each_entry(pt->session->evlist, evsel) {
1093 if (intel_pt_get_config(pt, &evsel->core.attr, &config))
1094 return config;
1095 }
1096 return 0;
1097}
1098
1099static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
1100{
1101 u64 quot, rem;
1102
1103 quot = ns / pt->tc.time_mult;
1104 rem = ns % pt->tc.time_mult;
1105 return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
1106 pt->tc.time_mult;
1107}
1108
1109static struct ip_callchain *intel_pt_alloc_chain(struct intel_pt *pt)
1110{
1111 size_t sz = sizeof(struct ip_callchain);
1112
1113 /* Add 1 to callchain_sz for callchain context */
1114 sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64);
1115 return zalloc(sz);
1116}
1117
1118static int intel_pt_callchain_init(struct intel_pt *pt)
1119{
1120 struct evsel *evsel;
1121
1122 evlist__for_each_entry(pt->session->evlist, evsel) {
1123 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN))
1124 evsel->synth_sample_type |= PERF_SAMPLE_CALLCHAIN;
1125 }
1126
1127 pt->chain = intel_pt_alloc_chain(pt);
1128 if (!pt->chain)
1129 return -ENOMEM;
1130
1131 return 0;
1132}
1133
1134static void intel_pt_add_callchain(struct intel_pt *pt,
1135 struct perf_sample *sample)
1136{
1137 struct thread *thread = machine__findnew_thread(pt->machine,
1138 sample->pid,
1139 sample->tid);
1140
1141 thread_stack__sample_late(thread, sample->cpu, pt->chain,
1142 pt->synth_opts.callchain_sz + 1, sample->ip,
1143 pt->kernel_start);
1144
1145 sample->callchain = pt->chain;
1146}
1147
1148static struct branch_stack *intel_pt_alloc_br_stack(unsigned int entry_cnt)
1149{
1150 size_t sz = sizeof(struct branch_stack);
1151
1152 sz += entry_cnt * sizeof(struct branch_entry);
1153 return zalloc(sz);
1154}
1155
1156static int intel_pt_br_stack_init(struct intel_pt *pt)
1157{
1158 struct evsel *evsel;
1159
1160 evlist__for_each_entry(pt->session->evlist, evsel) {
1161 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK))
1162 evsel->synth_sample_type |= PERF_SAMPLE_BRANCH_STACK;
1163 }
1164
1165 pt->br_stack = intel_pt_alloc_br_stack(pt->br_stack_sz);
1166 if (!pt->br_stack)
1167 return -ENOMEM;
1168
1169 return 0;
1170}
1171
1172static void intel_pt_add_br_stack(struct intel_pt *pt,
1173 struct perf_sample *sample)
1174{
1175 struct thread *thread = machine__findnew_thread(pt->machine,
1176 sample->pid,
1177 sample->tid);
1178
1179 thread_stack__br_sample_late(thread, sample->cpu, pt->br_stack,
1180 pt->br_stack_sz, sample->ip,
1181 pt->kernel_start);
1182
1183 sample->branch_stack = pt->br_stack;
1184}
1185
1186/* INTEL_PT_LBR_0, INTEL_PT_LBR_1 and INTEL_PT_LBR_2 */
1187#define LBRS_MAX (INTEL_PT_BLK_ITEM_ID_CNT * 3U)
1188
1189static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
1190 unsigned int queue_nr)
1191{
1192 struct intel_pt_params params = { .get_trace = 0, };
1193 struct perf_env *env = pt->machine->env;
1194 struct intel_pt_queue *ptq;
1195
1196 ptq = zalloc(sizeof(struct intel_pt_queue));
1197 if (!ptq)
1198 return NULL;
1199
1200 if (pt->synth_opts.callchain) {
1201 ptq->chain = intel_pt_alloc_chain(pt);
1202 if (!ptq->chain)
1203 goto out_free;
1204 }
1205
1206 if (pt->synth_opts.last_branch || pt->synth_opts.other_events) {
1207 unsigned int entry_cnt = max(LBRS_MAX, pt->br_stack_sz);
1208
1209 ptq->last_branch = intel_pt_alloc_br_stack(entry_cnt);
1210 if (!ptq->last_branch)
1211 goto out_free;
1212 }
1213
1214 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
1215 if (!ptq->event_buf)
1216 goto out_free;
1217
1218 ptq->pt = pt;
1219 ptq->queue_nr = queue_nr;
1220 ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
1221 ptq->pid = -1;
1222 ptq->tid = -1;
1223 ptq->cpu = -1;
1224 ptq->next_tid = -1;
1225
1226 params.get_trace = intel_pt_get_trace;
1227 params.walk_insn = intel_pt_walk_next_insn;
1228 params.lookahead = intel_pt_lookahead;
1229 params.findnew_vmcs_info = intel_pt_findnew_vmcs_info;
1230 params.data = ptq;
1231 params.return_compression = intel_pt_return_compression(pt);
1232 params.branch_enable = intel_pt_branch_enable(pt);
1233 params.ctl = intel_pt_ctl(pt);
1234 params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
1235 params.mtc_period = intel_pt_mtc_period(pt);
1236 params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
1237 params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
1238 params.quick = pt->synth_opts.quick;
1239 params.vm_time_correlation = pt->synth_opts.vm_time_correlation;
1240 params.vm_tm_corr_dry_run = pt->synth_opts.vm_tm_corr_dry_run;
1241 params.first_timestamp = pt->first_timestamp;
1242 params.max_loops = pt->max_loops;
1243
1244 /* Cannot walk code without TNT, so force 'quick' mode */
1245 if (params.branch_enable && intel_pt_disabled_tnt(pt) && !params.quick)
1246 params.quick = 1;
1247
1248 if (pt->filts.cnt > 0)
1249 params.pgd_ip = intel_pt_pgd_ip;
1250
1251 if (pt->synth_opts.instructions) {
1252 if (pt->synth_opts.period) {
1253 switch (pt->synth_opts.period_type) {
1254 case PERF_ITRACE_PERIOD_INSTRUCTIONS:
1255 params.period_type =
1256 INTEL_PT_PERIOD_INSTRUCTIONS;
1257 params.period = pt->synth_opts.period;
1258 break;
1259 case PERF_ITRACE_PERIOD_TICKS:
1260 params.period_type = INTEL_PT_PERIOD_TICKS;
1261 params.period = pt->synth_opts.period;
1262 break;
1263 case PERF_ITRACE_PERIOD_NANOSECS:
1264 params.period_type = INTEL_PT_PERIOD_TICKS;
1265 params.period = intel_pt_ns_to_ticks(pt,
1266 pt->synth_opts.period);
1267 break;
1268 default:
1269 break;
1270 }
1271 }
1272
1273 if (!params.period) {
1274 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
1275 params.period = 1;
1276 }
1277 }
1278
1279 if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
1280 params.flags |= INTEL_PT_FUP_WITH_NLIP;
1281
1282 ptq->decoder = intel_pt_decoder_new(¶ms);
1283 if (!ptq->decoder)
1284 goto out_free;
1285
1286 return ptq;
1287
1288out_free:
1289 zfree(&ptq->event_buf);
1290 zfree(&ptq->last_branch);
1291 zfree(&ptq->chain);
1292 free(ptq);
1293 return NULL;
1294}
1295
1296static void intel_pt_free_queue(void *priv)
1297{
1298 struct intel_pt_queue *ptq = priv;
1299
1300 if (!ptq)
1301 return;
1302 thread__zput(ptq->thread);
1303 thread__zput(ptq->unknown_guest_thread);
1304 intel_pt_decoder_free(ptq->decoder);
1305 zfree(&ptq->event_buf);
1306 zfree(&ptq->last_branch);
1307 zfree(&ptq->chain);
1308 free(ptq);
1309}
1310
1311static void intel_pt_first_timestamp(struct intel_pt *pt, u64 timestamp)
1312{
1313 unsigned int i;
1314
1315 pt->first_timestamp = timestamp;
1316
1317 for (i = 0; i < pt->queues.nr_queues; i++) {
1318 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1319 struct intel_pt_queue *ptq = queue->priv;
1320
1321 if (ptq && ptq->decoder)
1322 intel_pt_set_first_timestamp(ptq->decoder, timestamp);
1323 }
1324}
1325
1326static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
1327 struct auxtrace_queue *queue)
1328{
1329 struct intel_pt_queue *ptq = queue->priv;
1330
1331 if (queue->tid == -1 || pt->have_sched_switch) {
1332 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
1333 if (ptq->tid == -1)
1334 ptq->pid = -1;
1335 thread__zput(ptq->thread);
1336 }
1337
1338 if (!ptq->thread && ptq->tid != -1)
1339 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
1340
1341 if (ptq->thread) {
1342 ptq->pid = ptq->thread->pid_;
1343 if (queue->cpu == -1)
1344 ptq->cpu = ptq->thread->cpu;
1345 }
1346}
1347
1348static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
1349{
1350 struct intel_pt *pt = ptq->pt;
1351
1352 ptq->insn_len = 0;
1353 if (ptq->state->flags & INTEL_PT_ABORT_TX) {
1354 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
1355 } else if (ptq->state->flags & INTEL_PT_ASYNC) {
1356 if (!ptq->state->to_ip)
1357 ptq->flags = PERF_IP_FLAG_BRANCH |
1358 PERF_IP_FLAG_TRACE_END;
1359 else if (ptq->state->from_nr && !ptq->state->to_nr)
1360 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1361 PERF_IP_FLAG_VMEXIT;
1362 else
1363 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1364 PERF_IP_FLAG_ASYNC |
1365 PERF_IP_FLAG_INTERRUPT;
1366 } else {
1367 if (ptq->state->from_ip)
1368 ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
1369 else
1370 ptq->flags = PERF_IP_FLAG_BRANCH |
1371 PERF_IP_FLAG_TRACE_BEGIN;
1372 if (ptq->state->flags & INTEL_PT_IN_TX)
1373 ptq->flags |= PERF_IP_FLAG_IN_TX;
1374 ptq->insn_len = ptq->state->insn_len;
1375 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
1376 }
1377
1378 if (ptq->state->type & INTEL_PT_TRACE_BEGIN)
1379 ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN;
1380 if (ptq->state->type & INTEL_PT_TRACE_END)
1381 ptq->flags |= PERF_IP_FLAG_TRACE_END;
1382
1383 if (pt->cap_event_trace) {
1384 if (ptq->state->type & INTEL_PT_IFLAG_CHG) {
1385 if (!ptq->state->from_iflag)
1386 ptq->flags |= PERF_IP_FLAG_INTR_DISABLE;
1387 if (ptq->state->from_iflag != ptq->state->to_iflag)
1388 ptq->flags |= PERF_IP_FLAG_INTR_TOGGLE;
1389 } else if (!ptq->state->to_iflag) {
1390 ptq->flags |= PERF_IP_FLAG_INTR_DISABLE;
1391 }
1392 }
1393}
1394
1395static void intel_pt_setup_time_range(struct intel_pt *pt,
1396 struct intel_pt_queue *ptq)
1397{
1398 if (!pt->range_cnt)
1399 return;
1400
1401 ptq->sel_timestamp = pt->time_ranges[0].start;
1402 ptq->sel_idx = 0;
1403
1404 if (ptq->sel_timestamp) {
1405 ptq->sel_start = true;
1406 } else {
1407 ptq->sel_timestamp = pt->time_ranges[0].end;
1408 ptq->sel_start = false;
1409 }
1410}
1411
1412static int intel_pt_setup_queue(struct intel_pt *pt,
1413 struct auxtrace_queue *queue,
1414 unsigned int queue_nr)
1415{
1416 struct intel_pt_queue *ptq = queue->priv;
1417
1418 if (list_empty(&queue->head))
1419 return 0;
1420
1421 if (!ptq) {
1422 ptq = intel_pt_alloc_queue(pt, queue_nr);
1423 if (!ptq)
1424 return -ENOMEM;
1425 queue->priv = ptq;
1426
1427 if (queue->cpu != -1)
1428 ptq->cpu = queue->cpu;
1429 ptq->tid = queue->tid;
1430
1431 ptq->cbr_seen = UINT_MAX;
1432
1433 if (pt->sampling_mode && !pt->snapshot_mode &&
1434 pt->timeless_decoding)
1435 ptq->step_through_buffers = true;
1436
1437 ptq->sync_switch = pt->sync_switch;
1438
1439 intel_pt_setup_time_range(pt, ptq);
1440 }
1441
1442 if (!ptq->on_heap &&
1443 (!ptq->sync_switch ||
1444 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
1445 const struct intel_pt_state *state;
1446 int ret;
1447
1448 if (pt->timeless_decoding)
1449 return 0;
1450
1451 intel_pt_log("queue %u getting timestamp\n", queue_nr);
1452 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1453 queue_nr, ptq->cpu, ptq->pid, ptq->tid);
1454
1455 if (ptq->sel_start && ptq->sel_timestamp) {
1456 ret = intel_pt_fast_forward(ptq->decoder,
1457 ptq->sel_timestamp);
1458 if (ret)
1459 return ret;
1460 }
1461
1462 while (1) {
1463 state = intel_pt_decode(ptq->decoder);
1464 if (state->err) {
1465 if (state->err == INTEL_PT_ERR_NODATA) {
1466 intel_pt_log("queue %u has no timestamp\n",
1467 queue_nr);
1468 return 0;
1469 }
1470 continue;
1471 }
1472 if (state->timestamp)
1473 break;
1474 }
1475
1476 ptq->timestamp = state->timestamp;
1477 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
1478 queue_nr, ptq->timestamp);
1479 ptq->state = state;
1480 ptq->have_sample = true;
1481 if (ptq->sel_start && ptq->sel_timestamp &&
1482 ptq->timestamp < ptq->sel_timestamp)
1483 ptq->have_sample = false;
1484 intel_pt_sample_flags(ptq);
1485 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
1486 if (ret)
1487 return ret;
1488 ptq->on_heap = true;
1489 }
1490
1491 return 0;
1492}
1493
1494static int intel_pt_setup_queues(struct intel_pt *pt)
1495{
1496 unsigned int i;
1497 int ret;
1498
1499 for (i = 0; i < pt->queues.nr_queues; i++) {
1500 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
1501 if (ret)
1502 return ret;
1503 }
1504 return 0;
1505}
1506
1507static inline bool intel_pt_skip_event(struct intel_pt *pt)
1508{
1509 return pt->synth_opts.initial_skip &&
1510 pt->num_events++ < pt->synth_opts.initial_skip;
1511}
1512
1513/*
1514 * Cannot count CBR as skipped because it won't go away until cbr == cbr_seen.
1515 * Also ensure CBR is first non-skipped event by allowing for 4 more samples
1516 * from this decoder state.
1517 */
1518static inline bool intel_pt_skip_cbr_event(struct intel_pt *pt)
1519{
1520 return pt->synth_opts.initial_skip &&
1521 pt->num_events + 4 < pt->synth_opts.initial_skip;
1522}
1523
1524static void intel_pt_prep_a_sample(struct intel_pt_queue *ptq,
1525 union perf_event *event,
1526 struct perf_sample *sample)
1527{
1528 event->sample.header.type = PERF_RECORD_SAMPLE;
1529 event->sample.header.size = sizeof(struct perf_event_header);
1530
1531 sample->pid = ptq->pid;
1532 sample->tid = ptq->tid;
1533 sample->cpu = ptq->cpu;
1534 sample->insn_len = ptq->insn_len;
1535 memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1536}
1537
1538static void intel_pt_prep_b_sample(struct intel_pt *pt,
1539 struct intel_pt_queue *ptq,
1540 union perf_event *event,
1541 struct perf_sample *sample)
1542{
1543 intel_pt_prep_a_sample(ptq, event, sample);
1544
1545 if (!pt->timeless_decoding)
1546 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1547
1548 sample->ip = ptq->state->from_ip;
1549 sample->addr = ptq->state->to_ip;
1550 sample->cpumode = intel_pt_cpumode(ptq, sample->ip, sample->addr);
1551 sample->period = 1;
1552 sample->flags = ptq->flags;
1553
1554 event->sample.header.misc = sample->cpumode;
1555}
1556
1557static int intel_pt_inject_event(union perf_event *event,
1558 struct perf_sample *sample, u64 type)
1559{
1560 event->header.size = perf_event__sample_event_size(sample, type, 0);
1561 return perf_event__synthesize_sample(event, type, 0, sample);
1562}
1563
1564static inline int intel_pt_opt_inject(struct intel_pt *pt,
1565 union perf_event *event,
1566 struct perf_sample *sample, u64 type)
1567{
1568 if (!pt->synth_opts.inject)
1569 return 0;
1570
1571 return intel_pt_inject_event(event, sample, type);
1572}
1573
1574static int intel_pt_deliver_synth_event(struct intel_pt *pt,
1575 union perf_event *event,
1576 struct perf_sample *sample, u64 type)
1577{
1578 int ret;
1579
1580 ret = intel_pt_opt_inject(pt, event, sample, type);
1581 if (ret)
1582 return ret;
1583
1584 ret = perf_session__deliver_synth_event(pt->session, event, sample);
1585 if (ret)
1586 pr_err("Intel PT: failed to deliver event, error %d\n", ret);
1587
1588 return ret;
1589}
1590
1591static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
1592{
1593 struct intel_pt *pt = ptq->pt;
1594 union perf_event *event = ptq->event_buf;
1595 struct perf_sample sample = { .ip = 0, };
1596 struct dummy_branch_stack {
1597 u64 nr;
1598 u64 hw_idx;
1599 struct branch_entry entries;
1600 } dummy_bs;
1601
1602 if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
1603 return 0;
1604
1605 if (intel_pt_skip_event(pt))
1606 return 0;
1607
1608 intel_pt_prep_b_sample(pt, ptq, event, &sample);
1609
1610 sample.id = ptq->pt->branches_id;
1611 sample.stream_id = ptq->pt->branches_id;
1612
1613 /*
1614 * perf report cannot handle events without a branch stack when using
1615 * SORT_MODE__BRANCH so make a dummy one.
1616 */
1617 if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
1618 dummy_bs = (struct dummy_branch_stack){
1619 .nr = 1,
1620 .hw_idx = -1ULL,
1621 .entries = {
1622 .from = sample.ip,
1623 .to = sample.addr,
1624 },
1625 };
1626 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1627 }
1628
1629 if (ptq->sample_ipc)
1630 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
1631 if (sample.cyc_cnt) {
1632 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
1633 ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
1634 ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt;
1635 }
1636
1637 return intel_pt_deliver_synth_event(pt, event, &sample,
1638 pt->branches_sample_type);
1639}
1640
1641static void intel_pt_prep_sample(struct intel_pt *pt,
1642 struct intel_pt_queue *ptq,
1643 union perf_event *event,
1644 struct perf_sample *sample)
1645{
1646 intel_pt_prep_b_sample(pt, ptq, event, sample);
1647
1648 if (pt->synth_opts.callchain) {
1649 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
1650 pt->synth_opts.callchain_sz + 1,
1651 sample->ip, pt->kernel_start);
1652 sample->callchain = ptq->chain;
1653 }
1654
1655 if (pt->synth_opts.last_branch) {
1656 thread_stack__br_sample(ptq->thread, ptq->cpu, ptq->last_branch,
1657 pt->br_stack_sz);
1658 sample->branch_stack = ptq->last_branch;
1659 }
1660}
1661
1662static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1663{
1664 struct intel_pt *pt = ptq->pt;
1665 union perf_event *event = ptq->event_buf;
1666 struct perf_sample sample = { .ip = 0, };
1667
1668 if (intel_pt_skip_event(pt))
1669 return 0;
1670
1671 intel_pt_prep_sample(pt, ptq, event, &sample);
1672
1673 sample.id = ptq->pt->instructions_id;
1674 sample.stream_id = ptq->pt->instructions_id;
1675 if (pt->synth_opts.quick)
1676 sample.period = 1;
1677 else
1678 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
1679
1680 if (ptq->sample_ipc)
1681 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
1682 if (sample.cyc_cnt) {
1683 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
1684 ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
1685 ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt;
1686 }
1687
1688 ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
1689
1690 return intel_pt_deliver_synth_event(pt, event, &sample,
1691 pt->instructions_sample_type);
1692}
1693
1694static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1695{
1696 struct intel_pt *pt = ptq->pt;
1697 union perf_event *event = ptq->event_buf;
1698 struct perf_sample sample = { .ip = 0, };
1699
1700 if (intel_pt_skip_event(pt))
1701 return 0;
1702
1703 intel_pt_prep_sample(pt, ptq, event, &sample);
1704
1705 sample.id = ptq->pt->transactions_id;
1706 sample.stream_id = ptq->pt->transactions_id;
1707
1708 return intel_pt_deliver_synth_event(pt, event, &sample,
1709 pt->transactions_sample_type);
1710}
1711
1712static void intel_pt_prep_p_sample(struct intel_pt *pt,
1713 struct intel_pt_queue *ptq,
1714 union perf_event *event,
1715 struct perf_sample *sample)
1716{
1717 intel_pt_prep_sample(pt, ptq, event, sample);
1718
1719 /*
1720 * Zero IP is used to mean "trace start" but that is not the case for
1721 * power or PTWRITE events with no IP, so clear the flags.
1722 */
1723 if (!sample->ip)
1724 sample->flags = 0;
1725}
1726
1727static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
1728{
1729 struct intel_pt *pt = ptq->pt;
1730 union perf_event *event = ptq->event_buf;
1731 struct perf_sample sample = { .ip = 0, };
1732 struct perf_synth_intel_ptwrite raw;
1733
1734 if (intel_pt_skip_event(pt))
1735 return 0;
1736
1737 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1738
1739 sample.id = ptq->pt->ptwrites_id;
1740 sample.stream_id = ptq->pt->ptwrites_id;
1741
1742 raw.flags = 0;
1743 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1744 raw.payload = cpu_to_le64(ptq->state->ptw_payload);
1745
1746 sample.raw_size = perf_synth__raw_size(raw);
1747 sample.raw_data = perf_synth__raw_data(&raw);
1748
1749 return intel_pt_deliver_synth_event(pt, event, &sample,
1750 pt->ptwrites_sample_type);
1751}
1752
1753static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
1754{
1755 struct intel_pt *pt = ptq->pt;
1756 union perf_event *event = ptq->event_buf;
1757 struct perf_sample sample = { .ip = 0, };
1758 struct perf_synth_intel_cbr raw;
1759 u32 flags;
1760
1761 if (intel_pt_skip_cbr_event(pt))
1762 return 0;
1763
1764 ptq->cbr_seen = ptq->state->cbr;
1765
1766 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1767
1768 sample.id = ptq->pt->cbr_id;
1769 sample.stream_id = ptq->pt->cbr_id;
1770
1771 flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
1772 raw.flags = cpu_to_le32(flags);
1773 raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
1774 raw.reserved3 = 0;
1775
1776 sample.raw_size = perf_synth__raw_size(raw);
1777 sample.raw_data = perf_synth__raw_data(&raw);
1778
1779 return intel_pt_deliver_synth_event(pt, event, &sample,
1780 pt->pwr_events_sample_type);
1781}
1782
1783static int intel_pt_synth_psb_sample(struct intel_pt_queue *ptq)
1784{
1785 struct intel_pt *pt = ptq->pt;
1786 union perf_event *event = ptq->event_buf;
1787 struct perf_sample sample = { .ip = 0, };
1788 struct perf_synth_intel_psb raw;
1789
1790 if (intel_pt_skip_event(pt))
1791 return 0;
1792
1793 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1794
1795 sample.id = ptq->pt->psb_id;
1796 sample.stream_id = ptq->pt->psb_id;
1797 sample.flags = 0;
1798
1799 raw.reserved = 0;
1800 raw.offset = ptq->state->psb_offset;
1801
1802 sample.raw_size = perf_synth__raw_size(raw);
1803 sample.raw_data = perf_synth__raw_data(&raw);
1804
1805 return intel_pt_deliver_synth_event(pt, event, &sample,
1806 pt->pwr_events_sample_type);
1807}
1808
1809static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
1810{
1811 struct intel_pt *pt = ptq->pt;
1812 union perf_event *event = ptq->event_buf;
1813 struct perf_sample sample = { .ip = 0, };
1814 struct perf_synth_intel_mwait raw;
1815
1816 if (intel_pt_skip_event(pt))
1817 return 0;
1818
1819 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1820
1821 sample.id = ptq->pt->mwait_id;
1822 sample.stream_id = ptq->pt->mwait_id;
1823
1824 raw.reserved = 0;
1825 raw.payload = cpu_to_le64(ptq->state->mwait_payload);
1826
1827 sample.raw_size = perf_synth__raw_size(raw);
1828 sample.raw_data = perf_synth__raw_data(&raw);
1829
1830 return intel_pt_deliver_synth_event(pt, event, &sample,
1831 pt->pwr_events_sample_type);
1832}
1833
1834static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
1835{
1836 struct intel_pt *pt = ptq->pt;
1837 union perf_event *event = ptq->event_buf;
1838 struct perf_sample sample = { .ip = 0, };
1839 struct perf_synth_intel_pwre raw;
1840
1841 if (intel_pt_skip_event(pt))
1842 return 0;
1843
1844 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1845
1846 sample.id = ptq->pt->pwre_id;
1847 sample.stream_id = ptq->pt->pwre_id;
1848
1849 raw.reserved = 0;
1850 raw.payload = cpu_to_le64(ptq->state->pwre_payload);
1851
1852 sample.raw_size = perf_synth__raw_size(raw);
1853 sample.raw_data = perf_synth__raw_data(&raw);
1854
1855 return intel_pt_deliver_synth_event(pt, event, &sample,
1856 pt->pwr_events_sample_type);
1857}
1858
1859static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
1860{
1861 struct intel_pt *pt = ptq->pt;
1862 union perf_event *event = ptq->event_buf;
1863 struct perf_sample sample = { .ip = 0, };
1864 struct perf_synth_intel_exstop raw;
1865
1866 if (intel_pt_skip_event(pt))
1867 return 0;
1868
1869 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1870
1871 sample.id = ptq->pt->exstop_id;
1872 sample.stream_id = ptq->pt->exstop_id;
1873
1874 raw.flags = 0;
1875 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1876
1877 sample.raw_size = perf_synth__raw_size(raw);
1878 sample.raw_data = perf_synth__raw_data(&raw);
1879
1880 return intel_pt_deliver_synth_event(pt, event, &sample,
1881 pt->pwr_events_sample_type);
1882}
1883
1884static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
1885{
1886 struct intel_pt *pt = ptq->pt;
1887 union perf_event *event = ptq->event_buf;
1888 struct perf_sample sample = { .ip = 0, };
1889 struct perf_synth_intel_pwrx raw;
1890
1891 if (intel_pt_skip_event(pt))
1892 return 0;
1893
1894 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1895
1896 sample.id = ptq->pt->pwrx_id;
1897 sample.stream_id = ptq->pt->pwrx_id;
1898
1899 raw.reserved = 0;
1900 raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
1901
1902 sample.raw_size = perf_synth__raw_size(raw);
1903 sample.raw_data = perf_synth__raw_data(&raw);
1904
1905 return intel_pt_deliver_synth_event(pt, event, &sample,
1906 pt->pwr_events_sample_type);
1907}
1908
1909/*
1910 * PEBS gp_regs array indexes plus 1 so that 0 means not present. Refer
1911 * intel_pt_add_gp_regs().
1912 */
1913static const int pebs_gp_regs[] = {
1914 [PERF_REG_X86_FLAGS] = 1,
1915 [PERF_REG_X86_IP] = 2,
1916 [PERF_REG_X86_AX] = 3,
1917 [PERF_REG_X86_CX] = 4,
1918 [PERF_REG_X86_DX] = 5,
1919 [PERF_REG_X86_BX] = 6,
1920 [PERF_REG_X86_SP] = 7,
1921 [PERF_REG_X86_BP] = 8,
1922 [PERF_REG_X86_SI] = 9,
1923 [PERF_REG_X86_DI] = 10,
1924 [PERF_REG_X86_R8] = 11,
1925 [PERF_REG_X86_R9] = 12,
1926 [PERF_REG_X86_R10] = 13,
1927 [PERF_REG_X86_R11] = 14,
1928 [PERF_REG_X86_R12] = 15,
1929 [PERF_REG_X86_R13] = 16,
1930 [PERF_REG_X86_R14] = 17,
1931 [PERF_REG_X86_R15] = 18,
1932};
1933
1934static u64 *intel_pt_add_gp_regs(struct regs_dump *intr_regs, u64 *pos,
1935 const struct intel_pt_blk_items *items,
1936 u64 regs_mask)
1937{
1938 const u64 *gp_regs = items->val[INTEL_PT_GP_REGS_POS];
1939 u32 mask = items->mask[INTEL_PT_GP_REGS_POS];
1940 u32 bit;
1941 int i;
1942
1943 for (i = 0, bit = 1; i < PERF_REG_X86_64_MAX; i++, bit <<= 1) {
1944 /* Get the PEBS gp_regs array index */
1945 int n = pebs_gp_regs[i] - 1;
1946
1947 if (n < 0)
1948 continue;
1949 /*
1950 * Add only registers that were requested (i.e. 'regs_mask') and
1951 * that were provided (i.e. 'mask'), and update the resulting
1952 * mask (i.e. 'intr_regs->mask') accordingly.
1953 */
1954 if (mask & 1 << n && regs_mask & bit) {
1955 intr_regs->mask |= bit;
1956 *pos++ = gp_regs[n];
1957 }
1958 }
1959
1960 return pos;
1961}
1962
1963#ifndef PERF_REG_X86_XMM0
1964#define PERF_REG_X86_XMM0 32
1965#endif
1966
1967static void intel_pt_add_xmm(struct regs_dump *intr_regs, u64 *pos,
1968 const struct intel_pt_blk_items *items,
1969 u64 regs_mask)
1970{
1971 u32 mask = items->has_xmm & (regs_mask >> PERF_REG_X86_XMM0);
1972 const u64 *xmm = items->xmm;
1973
1974 /*
1975 * If there are any XMM registers, then there should be all of them.
1976 * Nevertheless, follow the logic to add only registers that were
1977 * requested (i.e. 'regs_mask') and that were provided (i.e. 'mask'),
1978 * and update the resulting mask (i.e. 'intr_regs->mask') accordingly.
1979 */
1980 intr_regs->mask |= (u64)mask << PERF_REG_X86_XMM0;
1981
1982 for (; mask; mask >>= 1, xmm++) {
1983 if (mask & 1)
1984 *pos++ = *xmm;
1985 }
1986}
1987
1988#define LBR_INFO_MISPRED (1ULL << 63)
1989#define LBR_INFO_IN_TX (1ULL << 62)
1990#define LBR_INFO_ABORT (1ULL << 61)
1991#define LBR_INFO_CYCLES 0xffff
1992
1993/* Refer kernel's intel_pmu_store_pebs_lbrs() */
1994static u64 intel_pt_lbr_flags(u64 info)
1995{
1996 union {
1997 struct branch_flags flags;
1998 u64 result;
1999 } u;
2000
2001 u.result = 0;
2002 u.flags.mispred = !!(info & LBR_INFO_MISPRED);
2003 u.flags.predicted = !(info & LBR_INFO_MISPRED);
2004 u.flags.in_tx = !!(info & LBR_INFO_IN_TX);
2005 u.flags.abort = !!(info & LBR_INFO_ABORT);
2006 u.flags.cycles = info & LBR_INFO_CYCLES;
2007
2008 return u.result;
2009}
2010
2011static void intel_pt_add_lbrs(struct branch_stack *br_stack,
2012 const struct intel_pt_blk_items *items)
2013{
2014 u64 *to;
2015 int i;
2016
2017 br_stack->nr = 0;
2018
2019 to = &br_stack->entries[0].from;
2020
2021 for (i = INTEL_PT_LBR_0_POS; i <= INTEL_PT_LBR_2_POS; i++) {
2022 u32 mask = items->mask[i];
2023 const u64 *from = items->val[i];
2024
2025 for (; mask; mask >>= 3, from += 3) {
2026 if ((mask & 7) == 7) {
2027 *to++ = from[0];
2028 *to++ = from[1];
2029 *to++ = intel_pt_lbr_flags(from[2]);
2030 br_stack->nr += 1;
2031 }
2032 }
2033 }
2034}
2035
2036static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evsel *evsel, u64 id)
2037{
2038 const struct intel_pt_blk_items *items = &ptq->state->items;
2039 struct perf_sample sample = { .ip = 0, };
2040 union perf_event *event = ptq->event_buf;
2041 struct intel_pt *pt = ptq->pt;
2042 u64 sample_type = evsel->core.attr.sample_type;
2043 u8 cpumode;
2044 u64 regs[8 * sizeof(sample.intr_regs.mask)];
2045
2046 if (intel_pt_skip_event(pt))
2047 return 0;
2048
2049 intel_pt_prep_a_sample(ptq, event, &sample);
2050
2051 sample.id = id;
2052 sample.stream_id = id;
2053
2054 if (!evsel->core.attr.freq)
2055 sample.period = evsel->core.attr.sample_period;
2056
2057 /* No support for non-zero CS base */
2058 if (items->has_ip)
2059 sample.ip = items->ip;
2060 else if (items->has_rip)
2061 sample.ip = items->rip;
2062 else
2063 sample.ip = ptq->state->from_ip;
2064
2065 cpumode = intel_pt_cpumode(ptq, sample.ip, 0);
2066
2067 event->sample.header.misc = cpumode | PERF_RECORD_MISC_EXACT_IP;
2068
2069 sample.cpumode = cpumode;
2070
2071 if (sample_type & PERF_SAMPLE_TIME) {
2072 u64 timestamp = 0;
2073
2074 if (items->has_timestamp)
2075 timestamp = items->timestamp;
2076 else if (!pt->timeless_decoding)
2077 timestamp = ptq->timestamp;
2078 if (timestamp)
2079 sample.time = tsc_to_perf_time(timestamp, &pt->tc);
2080 }
2081
2082 if (sample_type & PERF_SAMPLE_CALLCHAIN &&
2083 pt->synth_opts.callchain) {
2084 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
2085 pt->synth_opts.callchain_sz, sample.ip,
2086 pt->kernel_start);
2087 sample.callchain = ptq->chain;
2088 }
2089
2090 if (sample_type & PERF_SAMPLE_REGS_INTR &&
2091 (items->mask[INTEL_PT_GP_REGS_POS] ||
2092 items->mask[INTEL_PT_XMM_POS])) {
2093 u64 regs_mask = evsel->core.attr.sample_regs_intr;
2094 u64 *pos;
2095
2096 sample.intr_regs.abi = items->is_32_bit ?
2097 PERF_SAMPLE_REGS_ABI_32 :
2098 PERF_SAMPLE_REGS_ABI_64;
2099 sample.intr_regs.regs = regs;
2100
2101 pos = intel_pt_add_gp_regs(&sample.intr_regs, regs, items, regs_mask);
2102
2103 intel_pt_add_xmm(&sample.intr_regs, pos, items, regs_mask);
2104 }
2105
2106 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
2107 if (items->mask[INTEL_PT_LBR_0_POS] ||
2108 items->mask[INTEL_PT_LBR_1_POS] ||
2109 items->mask[INTEL_PT_LBR_2_POS]) {
2110 intel_pt_add_lbrs(ptq->last_branch, items);
2111 } else if (pt->synth_opts.last_branch) {
2112 thread_stack__br_sample(ptq->thread, ptq->cpu,
2113 ptq->last_branch,
2114 pt->br_stack_sz);
2115 } else {
2116 ptq->last_branch->nr = 0;
2117 }
2118 sample.branch_stack = ptq->last_branch;
2119 }
2120
2121 if (sample_type & PERF_SAMPLE_ADDR && items->has_mem_access_address)
2122 sample.addr = items->mem_access_address;
2123
2124 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
2125 /*
2126 * Refer kernel's setup_pebs_adaptive_sample_data() and
2127 * intel_hsw_weight().
2128 */
2129 if (items->has_mem_access_latency) {
2130 u64 weight = items->mem_access_latency >> 32;
2131
2132 /*
2133 * Starts from SPR, the mem access latency field
2134 * contains both cache latency [47:32] and instruction
2135 * latency [15:0]. The cache latency is the same as the
2136 * mem access latency on previous platforms.
2137 *
2138 * In practice, no memory access could last than 4G
2139 * cycles. Use latency >> 32 to distinguish the
2140 * different format of the mem access latency field.
2141 */
2142 if (weight > 0) {
2143 sample.weight = weight & 0xffff;
2144 sample.ins_lat = items->mem_access_latency & 0xffff;
2145 } else
2146 sample.weight = items->mem_access_latency;
2147 }
2148 if (!sample.weight && items->has_tsx_aux_info) {
2149 /* Cycles last block */
2150 sample.weight = (u32)items->tsx_aux_info;
2151 }
2152 }
2153
2154 if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) {
2155 u64 ax = items->has_rax ? items->rax : 0;
2156 /* Refer kernel's intel_hsw_transaction() */
2157 u64 txn = (u8)(items->tsx_aux_info >> 32);
2158
2159 /* For RTM XABORTs also log the abort code from AX */
2160 if (txn & PERF_TXN_TRANSACTION && ax & 1)
2161 txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
2162 sample.transaction = txn;
2163 }
2164
2165 return intel_pt_deliver_synth_event(pt, event, &sample, sample_type);
2166}
2167
2168static int intel_pt_synth_single_pebs_sample(struct intel_pt_queue *ptq)
2169{
2170 struct intel_pt *pt = ptq->pt;
2171 struct evsel *evsel = pt->pebs_evsel;
2172 u64 id = evsel->core.id[0];
2173
2174 return intel_pt_do_synth_pebs_sample(ptq, evsel, id);
2175}
2176
2177static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
2178{
2179 const struct intel_pt_blk_items *items = &ptq->state->items;
2180 struct intel_pt_pebs_event *pe;
2181 struct intel_pt *pt = ptq->pt;
2182 int err = -EINVAL;
2183 int hw_id;
2184
2185 if (!items->has_applicable_counters || !items->applicable_counters) {
2186 if (!pt->single_pebs)
2187 pr_err("PEBS-via-PT record with no applicable_counters\n");
2188 return intel_pt_synth_single_pebs_sample(ptq);
2189 }
2190
2191 for_each_set_bit(hw_id, (unsigned long *)&items->applicable_counters, INTEL_PT_MAX_PEBS) {
2192 pe = &ptq->pebs[hw_id];
2193 if (!pe->evsel) {
2194 if (!pt->single_pebs)
2195 pr_err("PEBS-via-PT record with no matching event, hw_id %d\n",
2196 hw_id);
2197 return intel_pt_synth_single_pebs_sample(ptq);
2198 }
2199 err = intel_pt_do_synth_pebs_sample(ptq, pe->evsel, pe->id);
2200 if (err)
2201 return err;
2202 }
2203
2204 return err;
2205}
2206
2207static int intel_pt_synth_events_sample(struct intel_pt_queue *ptq)
2208{
2209 struct intel_pt *pt = ptq->pt;
2210 union perf_event *event = ptq->event_buf;
2211 struct perf_sample sample = { .ip = 0, };
2212 struct {
2213 struct perf_synth_intel_evt cfe;
2214 struct perf_synth_intel_evd evd[INTEL_PT_MAX_EVDS];
2215 } raw;
2216 int i;
2217
2218 if (intel_pt_skip_event(pt))
2219 return 0;
2220
2221 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2222
2223 sample.id = ptq->pt->evt_id;
2224 sample.stream_id = ptq->pt->evt_id;
2225
2226 raw.cfe.type = ptq->state->cfe_type;
2227 raw.cfe.reserved = 0;
2228 raw.cfe.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
2229 raw.cfe.vector = ptq->state->cfe_vector;
2230 raw.cfe.evd_cnt = ptq->state->evd_cnt;
2231
2232 for (i = 0; i < ptq->state->evd_cnt; i++) {
2233 raw.evd[i].et = 0;
2234 raw.evd[i].evd_type = ptq->state->evd[i].type;
2235 raw.evd[i].payload = ptq->state->evd[i].payload;
2236 }
2237
2238 sample.raw_size = perf_synth__raw_size(raw) +
2239 ptq->state->evd_cnt * sizeof(struct perf_synth_intel_evd);
2240 sample.raw_data = perf_synth__raw_data(&raw);
2241
2242 return intel_pt_deliver_synth_event(pt, event, &sample,
2243 pt->evt_sample_type);
2244}
2245
2246static int intel_pt_synth_iflag_chg_sample(struct intel_pt_queue *ptq)
2247{
2248 struct intel_pt *pt = ptq->pt;
2249 union perf_event *event = ptq->event_buf;
2250 struct perf_sample sample = { .ip = 0, };
2251 struct perf_synth_intel_iflag_chg raw;
2252
2253 if (intel_pt_skip_event(pt))
2254 return 0;
2255
2256 intel_pt_prep_p_sample(pt, ptq, event, &sample);
2257
2258 sample.id = ptq->pt->iflag_chg_id;
2259 sample.stream_id = ptq->pt->iflag_chg_id;
2260
2261 raw.flags = 0;
2262 raw.iflag = ptq->state->to_iflag;
2263
2264 if (ptq->state->type & INTEL_PT_BRANCH) {
2265 raw.via_branch = 1;
2266 raw.branch_ip = ptq->state->to_ip;
2267 } else {
2268 sample.addr = 0;
2269 }
2270 sample.flags = ptq->flags;
2271
2272 sample.raw_size = perf_synth__raw_size(raw);
2273 sample.raw_data = perf_synth__raw_data(&raw);
2274
2275 return intel_pt_deliver_synth_event(pt, event, &sample,
2276 pt->iflag_chg_sample_type);
2277}
2278
2279static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
2280 pid_t pid, pid_t tid, u64 ip, u64 timestamp)
2281{
2282 union perf_event event;
2283 char msg[MAX_AUXTRACE_ERROR_MSG];
2284 int err;
2285
2286 if (pt->synth_opts.error_minus_flags) {
2287 if (code == INTEL_PT_ERR_OVR &&
2288 pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_OVERFLOW)
2289 return 0;
2290 if (code == INTEL_PT_ERR_LOST &&
2291 pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_DATA_LOST)
2292 return 0;
2293 }
2294
2295 intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
2296
2297 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
2298 code, cpu, pid, tid, ip, msg, timestamp);
2299
2300 err = perf_session__deliver_synth_event(pt->session, &event, NULL);
2301 if (err)
2302 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
2303 err);
2304
2305 return err;
2306}
2307
2308static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
2309 const struct intel_pt_state *state)
2310{
2311 struct intel_pt *pt = ptq->pt;
2312 u64 tm = ptq->timestamp;
2313
2314 tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc);
2315
2316 return intel_pt_synth_error(pt, state->err, ptq->cpu, ptq->pid,
2317 ptq->tid, state->from_ip, tm);
2318}
2319
2320static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
2321{
2322 struct auxtrace_queue *queue;
2323 pid_t tid = ptq->next_tid;
2324 int err;
2325
2326 if (tid == -1)
2327 return 0;
2328
2329 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
2330
2331 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
2332
2333 queue = &pt->queues.queue_array[ptq->queue_nr];
2334 intel_pt_set_pid_tid_cpu(pt, queue);
2335
2336 ptq->next_tid = -1;
2337
2338 return err;
2339}
2340
2341static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
2342{
2343 struct intel_pt *pt = ptq->pt;
2344
2345 return ip == pt->switch_ip &&
2346 (ptq->flags & PERF_IP_FLAG_BRANCH) &&
2347 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
2348 PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
2349}
2350
2351#define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
2352 INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT)
2353
2354static int intel_pt_sample(struct intel_pt_queue *ptq)
2355{
2356 const struct intel_pt_state *state = ptq->state;
2357 struct intel_pt *pt = ptq->pt;
2358 int err;
2359
2360 if (!ptq->have_sample)
2361 return 0;
2362
2363 ptq->have_sample = false;
2364
2365 if (pt->synth_opts.approx_ipc) {
2366 ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
2367 ptq->ipc_cyc_cnt = ptq->state->cycles;
2368 ptq->sample_ipc = true;
2369 } else {
2370 ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
2371 ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
2372 ptq->sample_ipc = ptq->state->flags & INTEL_PT_SAMPLE_IPC;
2373 }
2374
2375 /*
2376 * Do PEBS first to allow for the possibility that the PEBS timestamp
2377 * precedes the current timestamp.
2378 */
2379 if (pt->sample_pebs && state->type & INTEL_PT_BLK_ITEMS) {
2380 err = intel_pt_synth_pebs_sample(ptq);
2381 if (err)
2382 return err;
2383 }
2384
2385 if (pt->synth_opts.intr_events) {
2386 if (state->type & INTEL_PT_EVT) {
2387 err = intel_pt_synth_events_sample(ptq);
2388 if (err)
2389 return err;
2390 }
2391 if (state->type & INTEL_PT_IFLAG_CHG) {
2392 err = intel_pt_synth_iflag_chg_sample(ptq);
2393 if (err)
2394 return err;
2395 }
2396 }
2397
2398 if (pt->sample_pwr_events) {
2399 if (state->type & INTEL_PT_PSB_EVT) {
2400 err = intel_pt_synth_psb_sample(ptq);
2401 if (err)
2402 return err;
2403 }
2404 if (ptq->state->cbr != ptq->cbr_seen) {
2405 err = intel_pt_synth_cbr_sample(ptq);
2406 if (err)
2407 return err;
2408 }
2409 if (state->type & INTEL_PT_PWR_EVT) {
2410 if (state->type & INTEL_PT_MWAIT_OP) {
2411 err = intel_pt_synth_mwait_sample(ptq);
2412 if (err)
2413 return err;
2414 }
2415 if (state->type & INTEL_PT_PWR_ENTRY) {
2416 err = intel_pt_synth_pwre_sample(ptq);
2417 if (err)
2418 return err;
2419 }
2420 if (state->type & INTEL_PT_EX_STOP) {
2421 err = intel_pt_synth_exstop_sample(ptq);
2422 if (err)
2423 return err;
2424 }
2425 if (state->type & INTEL_PT_PWR_EXIT) {
2426 err = intel_pt_synth_pwrx_sample(ptq);
2427 if (err)
2428 return err;
2429 }
2430 }
2431 }
2432
2433 if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) {
2434 err = intel_pt_synth_instruction_sample(ptq);
2435 if (err)
2436 return err;
2437 }
2438
2439 if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
2440 err = intel_pt_synth_transaction_sample(ptq);
2441 if (err)
2442 return err;
2443 }
2444
2445 if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
2446 err = intel_pt_synth_ptwrite_sample(ptq);
2447 if (err)
2448 return err;
2449 }
2450
2451 if (!(state->type & INTEL_PT_BRANCH))
2452 return 0;
2453
2454 if (pt->use_thread_stack) {
2455 thread_stack__event(ptq->thread, ptq->cpu, ptq->flags,
2456 state->from_ip, state->to_ip, ptq->insn_len,
2457 state->trace_nr, pt->callstack,
2458 pt->br_stack_sz_plus,
2459 pt->mispred_all);
2460 } else {
2461 thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
2462 }
2463
2464 if (pt->sample_branches) {
2465 if (state->from_nr != state->to_nr &&
2466 state->from_ip && state->to_ip) {
2467 struct intel_pt_state *st = (struct intel_pt_state *)state;
2468 u64 to_ip = st->to_ip;
2469 u64 from_ip = st->from_ip;
2470
2471 /*
2472 * perf cannot handle having different machines for ip
2473 * and addr, so create 2 branches.
2474 */
2475 st->to_ip = 0;
2476 err = intel_pt_synth_branch_sample(ptq);
2477 if (err)
2478 return err;
2479 st->from_ip = 0;
2480 st->to_ip = to_ip;
2481 err = intel_pt_synth_branch_sample(ptq);
2482 st->from_ip = from_ip;
2483 } else {
2484 err = intel_pt_synth_branch_sample(ptq);
2485 }
2486 if (err)
2487 return err;
2488 }
2489
2490 if (!ptq->sync_switch)
2491 return 0;
2492
2493 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
2494 switch (ptq->switch_state) {
2495 case INTEL_PT_SS_NOT_TRACING:
2496 case INTEL_PT_SS_UNKNOWN:
2497 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2498 err = intel_pt_next_tid(pt, ptq);
2499 if (err)
2500 return err;
2501 ptq->switch_state = INTEL_PT_SS_TRACING;
2502 break;
2503 default:
2504 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
2505 return 1;
2506 }
2507 } else if (!state->to_ip) {
2508 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2509 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
2510 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2511 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2512 state->to_ip == pt->ptss_ip &&
2513 (ptq->flags & PERF_IP_FLAG_CALL)) {
2514 ptq->switch_state = INTEL_PT_SS_TRACING;
2515 }
2516
2517 return 0;
2518}
2519
2520static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
2521{
2522 struct machine *machine = pt->machine;
2523 struct map *map;
2524 struct symbol *sym, *start;
2525 u64 ip, switch_ip = 0;
2526 const char *ptss;
2527
2528 if (ptss_ip)
2529 *ptss_ip = 0;
2530
2531 map = machine__kernel_map(machine);
2532 if (!map)
2533 return 0;
2534
2535 if (map__load(map))
2536 return 0;
2537
2538 start = dso__first_symbol(map->dso);
2539
2540 for (sym = start; sym; sym = dso__next_symbol(sym)) {
2541 if (sym->binding == STB_GLOBAL &&
2542 !strcmp(sym->name, "__switch_to")) {
2543 ip = map->unmap_ip(map, sym->start);
2544 if (ip >= map->start && ip < map->end) {
2545 switch_ip = ip;
2546 break;
2547 }
2548 }
2549 }
2550
2551 if (!switch_ip || !ptss_ip)
2552 return 0;
2553
2554 if (pt->have_sched_switch == 1)
2555 ptss = "perf_trace_sched_switch";
2556 else
2557 ptss = "__perf_event_task_sched_out";
2558
2559 for (sym = start; sym; sym = dso__next_symbol(sym)) {
2560 if (!strcmp(sym->name, ptss)) {
2561 ip = map->unmap_ip(map, sym->start);
2562 if (ip >= map->start && ip < map->end) {
2563 *ptss_ip = ip;
2564 break;
2565 }
2566 }
2567 }
2568
2569 return switch_ip;
2570}
2571
2572static void intel_pt_enable_sync_switch(struct intel_pt *pt)
2573{
2574 unsigned int i;
2575
2576 pt->sync_switch = true;
2577
2578 for (i = 0; i < pt->queues.nr_queues; i++) {
2579 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2580 struct intel_pt_queue *ptq = queue->priv;
2581
2582 if (ptq)
2583 ptq->sync_switch = true;
2584 }
2585}
2586
2587/*
2588 * To filter against time ranges, it is only necessary to look at the next start
2589 * or end time.
2590 */
2591static bool intel_pt_next_time(struct intel_pt_queue *ptq)
2592{
2593 struct intel_pt *pt = ptq->pt;
2594
2595 if (ptq->sel_start) {
2596 /* Next time is an end time */
2597 ptq->sel_start = false;
2598 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].end;
2599 return true;
2600 } else if (ptq->sel_idx + 1 < pt->range_cnt) {
2601 /* Next time is a start time */
2602 ptq->sel_start = true;
2603 ptq->sel_idx += 1;
2604 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start;
2605 return true;
2606 }
2607
2608 /* No next time */
2609 return false;
2610}
2611
2612static int intel_pt_time_filter(struct intel_pt_queue *ptq, u64 *ff_timestamp)
2613{
2614 int err;
2615
2616 while (1) {
2617 if (ptq->sel_start) {
2618 if (ptq->timestamp >= ptq->sel_timestamp) {
2619 /* After start time, so consider next time */
2620 intel_pt_next_time(ptq);
2621 if (!ptq->sel_timestamp) {
2622 /* No end time */
2623 return 0;
2624 }
2625 /* Check against end time */
2626 continue;
2627 }
2628 /* Before start time, so fast forward */
2629 ptq->have_sample = false;
2630 if (ptq->sel_timestamp > *ff_timestamp) {
2631 if (ptq->sync_switch) {
2632 intel_pt_next_tid(ptq->pt, ptq);
2633 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2634 }
2635 *ff_timestamp = ptq->sel_timestamp;
2636 err = intel_pt_fast_forward(ptq->decoder,
2637 ptq->sel_timestamp);
2638 if (err)
2639 return err;
2640 }
2641 return 0;
2642 } else if (ptq->timestamp > ptq->sel_timestamp) {
2643 /* After end time, so consider next time */
2644 if (!intel_pt_next_time(ptq)) {
2645 /* No next time range, so stop decoding */
2646 ptq->have_sample = false;
2647 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2648 return 1;
2649 }
2650 /* Check against next start time */
2651 continue;
2652 } else {
2653 /* Before end time */
2654 return 0;
2655 }
2656 }
2657}
2658
2659static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
2660{
2661 const struct intel_pt_state *state = ptq->state;
2662 struct intel_pt *pt = ptq->pt;
2663 u64 ff_timestamp = 0;
2664 int err;
2665
2666 if (!pt->kernel_start) {
2667 pt->kernel_start = machine__kernel_start(pt->machine);
2668 if (pt->per_cpu_mmaps &&
2669 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
2670 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
2671 !pt->sampling_mode && !pt->synth_opts.vm_time_correlation) {
2672 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
2673 if (pt->switch_ip) {
2674 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
2675 pt->switch_ip, pt->ptss_ip);
2676 intel_pt_enable_sync_switch(pt);
2677 }
2678 }
2679 }
2680
2681 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
2682 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
2683 while (1) {
2684 err = intel_pt_sample(ptq);
2685 if (err)
2686 return err;
2687
2688 state = intel_pt_decode(ptq->decoder);
2689 if (state->err) {
2690 if (state->err == INTEL_PT_ERR_NODATA)
2691 return 1;
2692 if (ptq->sync_switch &&
2693 state->from_ip >= pt->kernel_start) {
2694 ptq->sync_switch = false;
2695 intel_pt_next_tid(pt, ptq);
2696 }
2697 ptq->timestamp = state->est_timestamp;
2698 if (pt->synth_opts.errors) {
2699 err = intel_ptq_synth_error(ptq, state);
2700 if (err)
2701 return err;
2702 }
2703 continue;
2704 }
2705
2706 ptq->state = state;
2707 ptq->have_sample = true;
2708 intel_pt_sample_flags(ptq);
2709
2710 /* Use estimated TSC upon return to user space */
2711 if (pt->est_tsc &&
2712 (state->from_ip >= pt->kernel_start || !state->from_ip) &&
2713 state->to_ip && state->to_ip < pt->kernel_start) {
2714 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2715 state->timestamp, state->est_timestamp);
2716 ptq->timestamp = state->est_timestamp;
2717 /* Use estimated TSC in unknown switch state */
2718 } else if (ptq->sync_switch &&
2719 ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2720 intel_pt_is_switch_ip(ptq, state->to_ip) &&
2721 ptq->next_tid == -1) {
2722 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2723 state->timestamp, state->est_timestamp);
2724 ptq->timestamp = state->est_timestamp;
2725 } else if (state->timestamp > ptq->timestamp) {
2726 ptq->timestamp = state->timestamp;
2727 }
2728
2729 if (ptq->sel_timestamp) {
2730 err = intel_pt_time_filter(ptq, &ff_timestamp);
2731 if (err)
2732 return err;
2733 }
2734
2735 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
2736 *timestamp = ptq->timestamp;
2737 return 0;
2738 }
2739 }
2740 return 0;
2741}
2742
2743static inline int intel_pt_update_queues(struct intel_pt *pt)
2744{
2745 if (pt->queues.new_data) {
2746 pt->queues.new_data = false;
2747 return intel_pt_setup_queues(pt);
2748 }
2749 return 0;
2750}
2751
2752static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
2753{
2754 unsigned int queue_nr;
2755 u64 ts;
2756 int ret;
2757
2758 while (1) {
2759 struct auxtrace_queue *queue;
2760 struct intel_pt_queue *ptq;
2761
2762 if (!pt->heap.heap_cnt)
2763 return 0;
2764
2765 if (pt->heap.heap_array[0].ordinal >= timestamp)
2766 return 0;
2767
2768 queue_nr = pt->heap.heap_array[0].queue_nr;
2769 queue = &pt->queues.queue_array[queue_nr];
2770 ptq = queue->priv;
2771
2772 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
2773 queue_nr, pt->heap.heap_array[0].ordinal,
2774 timestamp);
2775
2776 auxtrace_heap__pop(&pt->heap);
2777
2778 if (pt->heap.heap_cnt) {
2779 ts = pt->heap.heap_array[0].ordinal + 1;
2780 if (ts > timestamp)
2781 ts = timestamp;
2782 } else {
2783 ts = timestamp;
2784 }
2785
2786 intel_pt_set_pid_tid_cpu(pt, queue);
2787
2788 ret = intel_pt_run_decoder(ptq, &ts);
2789
2790 if (ret < 0) {
2791 auxtrace_heap__add(&pt->heap, queue_nr, ts);
2792 return ret;
2793 }
2794
2795 if (!ret) {
2796 ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
2797 if (ret < 0)
2798 return ret;
2799 } else {
2800 ptq->on_heap = false;
2801 }
2802 }
2803
2804 return 0;
2805}
2806
2807static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
2808 u64 time_)
2809{
2810 struct auxtrace_queues *queues = &pt->queues;
2811 unsigned int i;
2812 u64 ts = 0;
2813
2814 for (i = 0; i < queues->nr_queues; i++) {
2815 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2816 struct intel_pt_queue *ptq = queue->priv;
2817
2818 if (ptq && (tid == -1 || ptq->tid == tid)) {
2819 ptq->time = time_;
2820 intel_pt_set_pid_tid_cpu(pt, queue);
2821 intel_pt_run_decoder(ptq, &ts);
2822 }
2823 }
2824 return 0;
2825}
2826
2827static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq,
2828 struct auxtrace_queue *queue,
2829 struct perf_sample *sample)
2830{
2831 struct machine *m = ptq->pt->machine;
2832
2833 ptq->pid = sample->pid;
2834 ptq->tid = sample->tid;
2835 ptq->cpu = queue->cpu;
2836
2837 intel_pt_log("queue %u cpu %d pid %d tid %d\n",
2838 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
2839
2840 thread__zput(ptq->thread);
2841
2842 if (ptq->tid == -1)
2843 return;
2844
2845 if (ptq->pid == -1) {
2846 ptq->thread = machine__find_thread(m, -1, ptq->tid);
2847 if (ptq->thread)
2848 ptq->pid = ptq->thread->pid_;
2849 return;
2850 }
2851
2852 ptq->thread = machine__findnew_thread(m, ptq->pid, ptq->tid);
2853}
2854
2855static int intel_pt_process_timeless_sample(struct intel_pt *pt,
2856 struct perf_sample *sample)
2857{
2858 struct auxtrace_queue *queue;
2859 struct intel_pt_queue *ptq;
2860 u64 ts = 0;
2861
2862 queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
2863 if (!queue)
2864 return -EINVAL;
2865
2866 ptq = queue->priv;
2867 if (!ptq)
2868 return 0;
2869
2870 ptq->stop = false;
2871 ptq->time = sample->time;
2872 intel_pt_sample_set_pid_tid_cpu(ptq, queue, sample);
2873 intel_pt_run_decoder(ptq, &ts);
2874 return 0;
2875}
2876
2877static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
2878{
2879 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
2880 sample->pid, sample->tid, 0, sample->time);
2881}
2882
2883static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
2884{
2885 unsigned i, j;
2886
2887 if (cpu < 0 || !pt->queues.nr_queues)
2888 return NULL;
2889
2890 if ((unsigned)cpu >= pt->queues.nr_queues)
2891 i = pt->queues.nr_queues - 1;
2892 else
2893 i = cpu;
2894
2895 if (pt->queues.queue_array[i].cpu == cpu)
2896 return pt->queues.queue_array[i].priv;
2897
2898 for (j = 0; i > 0; j++) {
2899 if (pt->queues.queue_array[--i].cpu == cpu)
2900 return pt->queues.queue_array[i].priv;
2901 }
2902
2903 for (; j < pt->queues.nr_queues; j++) {
2904 if (pt->queues.queue_array[j].cpu == cpu)
2905 return pt->queues.queue_array[j].priv;
2906 }
2907
2908 return NULL;
2909}
2910
2911static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
2912 u64 timestamp)
2913{
2914 struct intel_pt_queue *ptq;
2915 int err;
2916
2917 if (!pt->sync_switch)
2918 return 1;
2919
2920 ptq = intel_pt_cpu_to_ptq(pt, cpu);
2921 if (!ptq || !ptq->sync_switch)
2922 return 1;
2923
2924 switch (ptq->switch_state) {
2925 case INTEL_PT_SS_NOT_TRACING:
2926 break;
2927 case INTEL_PT_SS_UNKNOWN:
2928 case INTEL_PT_SS_TRACING:
2929 ptq->next_tid = tid;
2930 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
2931 return 0;
2932 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
2933 if (!ptq->on_heap) {
2934 ptq->timestamp = perf_time_to_tsc(timestamp,
2935 &pt->tc);
2936 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
2937 ptq->timestamp);
2938 if (err)
2939 return err;
2940 ptq->on_heap = true;
2941 }
2942 ptq->switch_state = INTEL_PT_SS_TRACING;
2943 break;
2944 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2945 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
2946 break;
2947 default:
2948 break;
2949 }
2950
2951 ptq->next_tid = -1;
2952
2953 return 1;
2954}
2955
2956static int intel_pt_process_switch(struct intel_pt *pt,
2957 struct perf_sample *sample)
2958{
2959 pid_t tid;
2960 int cpu, ret;
2961 struct evsel *evsel = evlist__id2evsel(pt->session->evlist, sample->id);
2962
2963 if (evsel != pt->switch_evsel)
2964 return 0;
2965
2966 tid = evsel__intval(evsel, sample, "next_pid");
2967 cpu = sample->cpu;
2968
2969 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
2970 cpu, tid, sample->time, perf_time_to_tsc(sample->time,
2971 &pt->tc));
2972
2973 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
2974 if (ret <= 0)
2975 return ret;
2976
2977 return machine__set_current_tid(pt->machine, cpu, -1, tid);
2978}
2979
2980static int intel_pt_context_switch_in(struct intel_pt *pt,
2981 struct perf_sample *sample)
2982{
2983 pid_t pid = sample->pid;
2984 pid_t tid = sample->tid;
2985 int cpu = sample->cpu;
2986
2987 if (pt->sync_switch) {
2988 struct intel_pt_queue *ptq;
2989
2990 ptq = intel_pt_cpu_to_ptq(pt, cpu);
2991 if (ptq && ptq->sync_switch) {
2992 ptq->next_tid = -1;
2993 switch (ptq->switch_state) {
2994 case INTEL_PT_SS_NOT_TRACING:
2995 case INTEL_PT_SS_UNKNOWN:
2996 case INTEL_PT_SS_TRACING:
2997 break;
2998 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
2999 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
3000 ptq->switch_state = INTEL_PT_SS_TRACING;
3001 break;
3002 default:
3003 break;
3004 }
3005 }
3006 }
3007
3008 /*
3009 * If the current tid has not been updated yet, ensure it is now that
3010 * a "switch in" event has occurred.
3011 */
3012 if (machine__get_current_tid(pt->machine, cpu) == tid)
3013 return 0;
3014
3015 return machine__set_current_tid(pt->machine, cpu, pid, tid);
3016}
3017
3018static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
3019 struct perf_sample *sample)
3020{
3021 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
3022 pid_t pid, tid;
3023 int cpu, ret;
3024
3025 cpu = sample->cpu;
3026
3027 if (pt->have_sched_switch == 3) {
3028 if (!out)
3029 return intel_pt_context_switch_in(pt, sample);
3030 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
3031 pr_err("Expecting CPU-wide context switch event\n");
3032 return -EINVAL;
3033 }
3034 pid = event->context_switch.next_prev_pid;
3035 tid = event->context_switch.next_prev_tid;
3036 } else {
3037 if (out)
3038 return 0;
3039 pid = sample->pid;
3040 tid = sample->tid;
3041 }
3042
3043 if (tid == -1)
3044 intel_pt_log("context_switch event has no tid\n");
3045
3046 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
3047 if (ret <= 0)
3048 return ret;
3049
3050 return machine__set_current_tid(pt->machine, cpu, pid, tid);
3051}
3052
3053static int intel_pt_process_itrace_start(struct intel_pt *pt,
3054 union perf_event *event,
3055 struct perf_sample *sample)
3056{
3057 if (!pt->per_cpu_mmaps)
3058 return 0;
3059
3060 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
3061 sample->cpu, event->itrace_start.pid,
3062 event->itrace_start.tid, sample->time,
3063 perf_time_to_tsc(sample->time, &pt->tc));
3064
3065 return machine__set_current_tid(pt->machine, sample->cpu,
3066 event->itrace_start.pid,
3067 event->itrace_start.tid);
3068}
3069
3070static int intel_pt_process_aux_output_hw_id(struct intel_pt *pt,
3071 union perf_event *event,
3072 struct perf_sample *sample)
3073{
3074 u64 hw_id = event->aux_output_hw_id.hw_id;
3075 struct auxtrace_queue *queue;
3076 struct intel_pt_queue *ptq;
3077 struct evsel *evsel;
3078
3079 queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
3080 evsel = evlist__id2evsel_strict(pt->session->evlist, sample->id);
3081 if (!queue || !queue->priv || !evsel || hw_id > INTEL_PT_MAX_PEBS) {
3082 pr_err("Bad AUX output hardware ID\n");
3083 return -EINVAL;
3084 }
3085
3086 ptq = queue->priv;
3087
3088 ptq->pebs[hw_id].evsel = evsel;
3089 ptq->pebs[hw_id].id = sample->id;
3090
3091 return 0;
3092}
3093
3094static int intel_pt_find_map(struct thread *thread, u8 cpumode, u64 addr,
3095 struct addr_location *al)
3096{
3097 if (!al->map || addr < al->map->start || addr >= al->map->end) {
3098 if (!thread__find_map(thread, cpumode, addr, al))
3099 return -1;
3100 }
3101
3102 return 0;
3103}
3104
3105/* Invalidate all instruction cache entries that overlap the text poke */
3106static int intel_pt_text_poke(struct intel_pt *pt, union perf_event *event)
3107{
3108 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
3109 u64 addr = event->text_poke.addr + event->text_poke.new_len - 1;
3110 /* Assume text poke begins in a basic block no more than 4096 bytes */
3111 int cnt = 4096 + event->text_poke.new_len;
3112 struct thread *thread = pt->unknown_thread;
3113 struct addr_location al = { .map = NULL };
3114 struct machine *machine = pt->machine;
3115 struct intel_pt_cache_entry *e;
3116 u64 offset;
3117
3118 if (!event->text_poke.new_len)
3119 return 0;
3120
3121 for (; cnt; cnt--, addr--) {
3122 if (intel_pt_find_map(thread, cpumode, addr, &al)) {
3123 if (addr < event->text_poke.addr)
3124 return 0;
3125 continue;
3126 }
3127
3128 if (!al.map->dso || !al.map->dso->auxtrace_cache)
3129 continue;
3130
3131 offset = al.map->map_ip(al.map, addr);
3132
3133 e = intel_pt_cache_lookup(al.map->dso, machine, offset);
3134 if (!e)
3135 continue;
3136
3137 if (addr + e->byte_cnt + e->length <= event->text_poke.addr) {
3138 /*
3139 * No overlap. Working backwards there cannot be another
3140 * basic block that overlaps the text poke if there is a
3141 * branch instruction before the text poke address.
3142 */
3143 if (e->branch != INTEL_PT_BR_NO_BRANCH)
3144 return 0;
3145 } else {
3146 intel_pt_cache_invalidate(al.map->dso, machine, offset);
3147 intel_pt_log("Invalidated instruction cache for %s at %#"PRIx64"\n",
3148 al.map->dso->long_name, addr);
3149 }
3150 }
3151
3152 return 0;
3153}
3154
3155static int intel_pt_process_event(struct perf_session *session,
3156 union perf_event *event,
3157 struct perf_sample *sample,
3158 struct perf_tool *tool)
3159{
3160 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3161 auxtrace);
3162 u64 timestamp;
3163 int err = 0;
3164
3165 if (dump_trace)
3166 return 0;
3167
3168 if (!tool->ordered_events) {
3169 pr_err("Intel Processor Trace requires ordered events\n");
3170 return -EINVAL;
3171 }
3172
3173 if (sample->time && sample->time != (u64)-1)
3174 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
3175 else
3176 timestamp = 0;
3177
3178 if (timestamp || pt->timeless_decoding) {
3179 err = intel_pt_update_queues(pt);
3180 if (err)
3181 return err;
3182 }
3183
3184 if (pt->timeless_decoding) {
3185 if (pt->sampling_mode) {
3186 if (sample->aux_sample.size)
3187 err = intel_pt_process_timeless_sample(pt,
3188 sample);
3189 } else if (event->header.type == PERF_RECORD_EXIT) {
3190 err = intel_pt_process_timeless_queues(pt,
3191 event->fork.tid,
3192 sample->time);
3193 }
3194 } else if (timestamp) {
3195 if (!pt->first_timestamp)
3196 intel_pt_first_timestamp(pt, timestamp);
3197 err = intel_pt_process_queues(pt, timestamp);
3198 }
3199 if (err)
3200 return err;
3201
3202 if (event->header.type == PERF_RECORD_SAMPLE) {
3203 if (pt->synth_opts.add_callchain && !sample->callchain)
3204 intel_pt_add_callchain(pt, sample);
3205 if (pt->synth_opts.add_last_branch && !sample->branch_stack)
3206 intel_pt_add_br_stack(pt, sample);
3207 }
3208
3209 if (event->header.type == PERF_RECORD_AUX &&
3210 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
3211 pt->synth_opts.errors) {
3212 err = intel_pt_lost(pt, sample);
3213 if (err)
3214 return err;
3215 }
3216
3217 if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
3218 err = intel_pt_process_switch(pt, sample);
3219 else if (event->header.type == PERF_RECORD_ITRACE_START)
3220 err = intel_pt_process_itrace_start(pt, event, sample);
3221 else if (event->header.type == PERF_RECORD_AUX_OUTPUT_HW_ID)
3222 err = intel_pt_process_aux_output_hw_id(pt, event, sample);
3223 else if (event->header.type == PERF_RECORD_SWITCH ||
3224 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
3225 err = intel_pt_context_switch(pt, event, sample);
3226
3227 if (!err && event->header.type == PERF_RECORD_TEXT_POKE)
3228 err = intel_pt_text_poke(pt, event);
3229
3230 if (intel_pt_enable_logging && intel_pt_log_events(pt, sample->time)) {
3231 intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
3232 event->header.type, sample->cpu, sample->time, timestamp);
3233 intel_pt_log_event(event);
3234 }
3235
3236 return err;
3237}
3238
3239static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
3240{
3241 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3242 auxtrace);
3243 int ret;
3244
3245 if (dump_trace)
3246 return 0;
3247
3248 if (!tool->ordered_events)
3249 return -EINVAL;
3250
3251 ret = intel_pt_update_queues(pt);
3252 if (ret < 0)
3253 return ret;
3254
3255 if (pt->timeless_decoding)
3256 return intel_pt_process_timeless_queues(pt, -1,
3257 MAX_TIMESTAMP - 1);
3258
3259 return intel_pt_process_queues(pt, MAX_TIMESTAMP);
3260}
3261
3262static void intel_pt_free_events(struct perf_session *session)
3263{
3264 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3265 auxtrace);
3266 struct auxtrace_queues *queues = &pt->queues;
3267 unsigned int i;
3268
3269 for (i = 0; i < queues->nr_queues; i++) {
3270 intel_pt_free_queue(queues->queue_array[i].priv);
3271 queues->queue_array[i].priv = NULL;
3272 }
3273 intel_pt_log_disable();
3274 auxtrace_queues__free(queues);
3275}
3276
3277static void intel_pt_free(struct perf_session *session)
3278{
3279 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3280 auxtrace);
3281
3282 auxtrace_heap__free(&pt->heap);
3283 intel_pt_free_events(session);
3284 session->auxtrace = NULL;
3285 intel_pt_free_vmcs_info(pt);
3286 thread__put(pt->unknown_thread);
3287 addr_filters__exit(&pt->filts);
3288 zfree(&pt->chain);
3289 zfree(&pt->filter);
3290 zfree(&pt->time_ranges);
3291 free(pt);
3292}
3293
3294static bool intel_pt_evsel_is_auxtrace(struct perf_session *session,
3295 struct evsel *evsel)
3296{
3297 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3298 auxtrace);
3299
3300 return evsel->core.attr.type == pt->pmu_type;
3301}
3302
3303static int intel_pt_process_auxtrace_event(struct perf_session *session,
3304 union perf_event *event,
3305 struct perf_tool *tool __maybe_unused)
3306{
3307 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3308 auxtrace);
3309
3310 if (!pt->data_queued) {
3311 struct auxtrace_buffer *buffer;
3312 off_t data_offset;
3313 int fd = perf_data__fd(session->data);
3314 int err;
3315
3316 if (perf_data__is_pipe(session->data)) {
3317 data_offset = 0;
3318 } else {
3319 data_offset = lseek(fd, 0, SEEK_CUR);
3320 if (data_offset == -1)
3321 return -errno;
3322 }
3323
3324 err = auxtrace_queues__add_event(&pt->queues, session, event,
3325 data_offset, &buffer);
3326 if (err)
3327 return err;
3328
3329 /* Dump here now we have copied a piped trace out of the pipe */
3330 if (dump_trace) {
3331 if (auxtrace_buffer__get_data(buffer, fd)) {
3332 intel_pt_dump_event(pt, buffer->data,
3333 buffer->size);
3334 auxtrace_buffer__put_data(buffer);
3335 }
3336 }
3337 }
3338
3339 return 0;
3340}
3341
3342static int intel_pt_queue_data(struct perf_session *session,
3343 struct perf_sample *sample,
3344 union perf_event *event, u64 data_offset)
3345{
3346 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3347 auxtrace);
3348 u64 timestamp;
3349
3350 if (event) {
3351 return auxtrace_queues__add_event(&pt->queues, session, event,
3352 data_offset, NULL);
3353 }
3354
3355 if (sample->time && sample->time != (u64)-1)
3356 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
3357 else
3358 timestamp = 0;
3359
3360 return auxtrace_queues__add_sample(&pt->queues, session, sample,
3361 data_offset, timestamp);
3362}
3363
3364struct intel_pt_synth {
3365 struct perf_tool dummy_tool;
3366 struct perf_session *session;
3367};
3368
3369static int intel_pt_event_synth(struct perf_tool *tool,
3370 union perf_event *event,
3371 struct perf_sample *sample __maybe_unused,
3372 struct machine *machine __maybe_unused)
3373{
3374 struct intel_pt_synth *intel_pt_synth =
3375 container_of(tool, struct intel_pt_synth, dummy_tool);
3376
3377 return perf_session__deliver_synth_event(intel_pt_synth->session, event,
3378 NULL);
3379}
3380
3381static int intel_pt_synth_event(struct perf_session *session, const char *name,
3382 struct perf_event_attr *attr, u64 id)
3383{
3384 struct intel_pt_synth intel_pt_synth;
3385 int err;
3386
3387 pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
3388 name, id, (u64)attr->sample_type);
3389
3390 memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
3391 intel_pt_synth.session = session;
3392
3393 err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
3394 &id, intel_pt_event_synth);
3395 if (err)
3396 pr_err("%s: failed to synthesize '%s' event type\n",
3397 __func__, name);
3398
3399 return err;
3400}
3401
3402static void intel_pt_set_event_name(struct evlist *evlist, u64 id,
3403 const char *name)
3404{
3405 struct evsel *evsel;
3406
3407 evlist__for_each_entry(evlist, evsel) {
3408 if (evsel->core.id && evsel->core.id[0] == id) {
3409 if (evsel->name)
3410 zfree(&evsel->name);
3411 evsel->name = strdup(name);
3412 break;
3413 }
3414 }
3415}
3416
3417static struct evsel *intel_pt_evsel(struct intel_pt *pt,
3418 struct evlist *evlist)
3419{
3420 struct evsel *evsel;
3421
3422 evlist__for_each_entry(evlist, evsel) {
3423 if (evsel->core.attr.type == pt->pmu_type && evsel->core.ids)
3424 return evsel;
3425 }
3426
3427 return NULL;
3428}
3429
3430static int intel_pt_synth_events(struct intel_pt *pt,
3431 struct perf_session *session)
3432{
3433 struct evlist *evlist = session->evlist;
3434 struct evsel *evsel = intel_pt_evsel(pt, evlist);
3435 struct perf_event_attr attr;
3436 u64 id;
3437 int err;
3438
3439 if (!evsel) {
3440 pr_debug("There are no selected events with Intel Processor Trace data\n");
3441 return 0;
3442 }
3443
3444 memset(&attr, 0, sizeof(struct perf_event_attr));
3445 attr.size = sizeof(struct perf_event_attr);
3446 attr.type = PERF_TYPE_HARDWARE;
3447 attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
3448 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
3449 PERF_SAMPLE_PERIOD;
3450 if (pt->timeless_decoding)
3451 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
3452 else
3453 attr.sample_type |= PERF_SAMPLE_TIME;
3454 if (!pt->per_cpu_mmaps)
3455 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
3456 attr.exclude_user = evsel->core.attr.exclude_user;
3457 attr.exclude_kernel = evsel->core.attr.exclude_kernel;
3458 attr.exclude_hv = evsel->core.attr.exclude_hv;
3459 attr.exclude_host = evsel->core.attr.exclude_host;
3460 attr.exclude_guest = evsel->core.attr.exclude_guest;
3461 attr.sample_id_all = evsel->core.attr.sample_id_all;
3462 attr.read_format = evsel->core.attr.read_format;
3463
3464 id = evsel->core.id[0] + 1000000000;
3465 if (!id)
3466 id = 1;
3467
3468 if (pt->synth_opts.branches) {
3469 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
3470 attr.sample_period = 1;
3471 attr.sample_type |= PERF_SAMPLE_ADDR;
3472 err = intel_pt_synth_event(session, "branches", &attr, id);
3473 if (err)
3474 return err;
3475 pt->sample_branches = true;
3476 pt->branches_sample_type = attr.sample_type;
3477 pt->branches_id = id;
3478 id += 1;
3479 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
3480 }
3481
3482 if (pt->synth_opts.callchain)
3483 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
3484 if (pt->synth_opts.last_branch) {
3485 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
3486 /*
3487 * We don't use the hardware index, but the sample generation
3488 * code uses the new format branch_stack with this field,
3489 * so the event attributes must indicate that it's present.
3490 */
3491 attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
3492 }
3493
3494 if (pt->synth_opts.instructions) {
3495 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
3496 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
3497 attr.sample_period =
3498 intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
3499 else
3500 attr.sample_period = pt->synth_opts.period;
3501 err = intel_pt_synth_event(session, "instructions", &attr, id);
3502 if (err)
3503 return err;
3504 pt->sample_instructions = true;
3505 pt->instructions_sample_type = attr.sample_type;
3506 pt->instructions_id = id;
3507 id += 1;
3508 }
3509
3510 attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
3511 attr.sample_period = 1;
3512
3513 if (pt->synth_opts.transactions) {
3514 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
3515 err = intel_pt_synth_event(session, "transactions", &attr, id);
3516 if (err)
3517 return err;
3518 pt->sample_transactions = true;
3519 pt->transactions_sample_type = attr.sample_type;
3520 pt->transactions_id = id;
3521 intel_pt_set_event_name(evlist, id, "transactions");
3522 id += 1;
3523 }
3524
3525 attr.type = PERF_TYPE_SYNTH;
3526 attr.sample_type |= PERF_SAMPLE_RAW;
3527
3528 if (pt->synth_opts.ptwrites) {
3529 attr.config = PERF_SYNTH_INTEL_PTWRITE;
3530 err = intel_pt_synth_event(session, "ptwrite", &attr, id);
3531 if (err)
3532 return err;
3533 pt->sample_ptwrites = true;
3534 pt->ptwrites_sample_type = attr.sample_type;
3535 pt->ptwrites_id = id;
3536 intel_pt_set_event_name(evlist, id, "ptwrite");
3537 id += 1;
3538 }
3539
3540 if (pt->synth_opts.pwr_events) {
3541 pt->sample_pwr_events = true;
3542 pt->pwr_events_sample_type = attr.sample_type;
3543
3544 attr.config = PERF_SYNTH_INTEL_CBR;
3545 err = intel_pt_synth_event(session, "cbr", &attr, id);
3546 if (err)
3547 return err;
3548 pt->cbr_id = id;
3549 intel_pt_set_event_name(evlist, id, "cbr");
3550 id += 1;
3551
3552 attr.config = PERF_SYNTH_INTEL_PSB;
3553 err = intel_pt_synth_event(session, "psb", &attr, id);
3554 if (err)
3555 return err;
3556 pt->psb_id = id;
3557 intel_pt_set_event_name(evlist, id, "psb");
3558 id += 1;
3559 }
3560
3561 if (pt->synth_opts.pwr_events && (evsel->core.attr.config & INTEL_PT_CFG_PWR_EVT_EN)) {
3562 attr.config = PERF_SYNTH_INTEL_MWAIT;
3563 err = intel_pt_synth_event(session, "mwait", &attr, id);
3564 if (err)
3565 return err;
3566 pt->mwait_id = id;
3567 intel_pt_set_event_name(evlist, id, "mwait");
3568 id += 1;
3569
3570 attr.config = PERF_SYNTH_INTEL_PWRE;
3571 err = intel_pt_synth_event(session, "pwre", &attr, id);
3572 if (err)
3573 return err;
3574 pt->pwre_id = id;
3575 intel_pt_set_event_name(evlist, id, "pwre");
3576 id += 1;
3577
3578 attr.config = PERF_SYNTH_INTEL_EXSTOP;
3579 err = intel_pt_synth_event(session, "exstop", &attr, id);
3580 if (err)
3581 return err;
3582 pt->exstop_id = id;
3583 intel_pt_set_event_name(evlist, id, "exstop");
3584 id += 1;
3585
3586 attr.config = PERF_SYNTH_INTEL_PWRX;
3587 err = intel_pt_synth_event(session, "pwrx", &attr, id);
3588 if (err)
3589 return err;
3590 pt->pwrx_id = id;
3591 intel_pt_set_event_name(evlist, id, "pwrx");
3592 id += 1;
3593 }
3594
3595 if (pt->synth_opts.intr_events && (evsel->core.attr.config & INTEL_PT_CFG_EVT_EN)) {
3596 attr.config = PERF_SYNTH_INTEL_EVT;
3597 err = intel_pt_synth_event(session, "evt", &attr, id);
3598 if (err)
3599 return err;
3600 pt->evt_sample_type = attr.sample_type;
3601 pt->evt_id = id;
3602 intel_pt_set_event_name(evlist, id, "evt");
3603 id += 1;
3604 }
3605
3606 if (pt->synth_opts.intr_events && pt->cap_event_trace) {
3607 attr.config = PERF_SYNTH_INTEL_IFLAG_CHG;
3608 err = intel_pt_synth_event(session, "iflag", &attr, id);
3609 if (err)
3610 return err;
3611 pt->iflag_chg_sample_type = attr.sample_type;
3612 pt->iflag_chg_id = id;
3613 intel_pt_set_event_name(evlist, id, "iflag");
3614 id += 1;
3615 }
3616
3617 return 0;
3618}
3619
3620static void intel_pt_setup_pebs_events(struct intel_pt *pt)
3621{
3622 struct evsel *evsel;
3623
3624 if (!pt->synth_opts.other_events)
3625 return;
3626
3627 evlist__for_each_entry(pt->session->evlist, evsel) {
3628 if (evsel->core.attr.aux_output && evsel->core.id) {
3629 if (pt->single_pebs) {
3630 pt->single_pebs = false;
3631 return;
3632 }
3633 pt->single_pebs = true;
3634 pt->sample_pebs = true;
3635 pt->pebs_evsel = evsel;
3636 }
3637 }
3638}
3639
3640static struct evsel *intel_pt_find_sched_switch(struct evlist *evlist)
3641{
3642 struct evsel *evsel;
3643
3644 evlist__for_each_entry_reverse(evlist, evsel) {
3645 const char *name = evsel__name(evsel);
3646
3647 if (!strcmp(name, "sched:sched_switch"))
3648 return evsel;
3649 }
3650
3651 return NULL;
3652}
3653
3654static bool intel_pt_find_switch(struct evlist *evlist)
3655{
3656 struct evsel *evsel;
3657
3658 evlist__for_each_entry(evlist, evsel) {
3659 if (evsel->core.attr.context_switch)
3660 return true;
3661 }
3662
3663 return false;
3664}
3665
3666static int intel_pt_perf_config(const char *var, const char *value, void *data)
3667{
3668 struct intel_pt *pt = data;
3669
3670 if (!strcmp(var, "intel-pt.mispred-all"))
3671 pt->mispred_all = perf_config_bool(var, value);
3672
3673 if (!strcmp(var, "intel-pt.max-loops"))
3674 perf_config_int(&pt->max_loops, var, value);
3675
3676 return 0;
3677}
3678
3679/* Find least TSC which converts to ns or later */
3680static u64 intel_pt_tsc_start(u64 ns, struct intel_pt *pt)
3681{
3682 u64 tsc, tm;
3683
3684 tsc = perf_time_to_tsc(ns, &pt->tc);
3685
3686 while (1) {
3687 tm = tsc_to_perf_time(tsc, &pt->tc);
3688 if (tm < ns)
3689 break;
3690 tsc -= 1;
3691 }
3692
3693 while (tm < ns)
3694 tm = tsc_to_perf_time(++tsc, &pt->tc);
3695
3696 return tsc;
3697}
3698
3699/* Find greatest TSC which converts to ns or earlier */
3700static u64 intel_pt_tsc_end(u64 ns, struct intel_pt *pt)
3701{
3702 u64 tsc, tm;
3703
3704 tsc = perf_time_to_tsc(ns, &pt->tc);
3705
3706 while (1) {
3707 tm = tsc_to_perf_time(tsc, &pt->tc);
3708 if (tm > ns)
3709 break;
3710 tsc += 1;
3711 }
3712
3713 while (tm > ns)
3714 tm = tsc_to_perf_time(--tsc, &pt->tc);
3715
3716 return tsc;
3717}
3718
3719static int intel_pt_setup_time_ranges(struct intel_pt *pt,
3720 struct itrace_synth_opts *opts)
3721{
3722 struct perf_time_interval *p = opts->ptime_range;
3723 int n = opts->range_num;
3724 int i;
3725
3726 if (!n || !p || pt->timeless_decoding)
3727 return 0;
3728
3729 pt->time_ranges = calloc(n, sizeof(struct range));
3730 if (!pt->time_ranges)
3731 return -ENOMEM;
3732
3733 pt->range_cnt = n;
3734
3735 intel_pt_log("%s: %u range(s)\n", __func__, n);
3736
3737 for (i = 0; i < n; i++) {
3738 struct range *r = &pt->time_ranges[i];
3739 u64 ts = p[i].start;
3740 u64 te = p[i].end;
3741
3742 /*
3743 * Take care to ensure the TSC range matches the perf-time range
3744 * when converted back to perf-time.
3745 */
3746 r->start = ts ? intel_pt_tsc_start(ts, pt) : 0;
3747 r->end = te ? intel_pt_tsc_end(te, pt) : 0;
3748
3749 intel_pt_log("range %d: perf time interval: %"PRIu64" to %"PRIu64"\n",
3750 i, ts, te);
3751 intel_pt_log("range %d: TSC time interval: %#"PRIx64" to %#"PRIx64"\n",
3752 i, r->start, r->end);
3753 }
3754
3755 return 0;
3756}
3757
3758static int intel_pt_parse_vm_tm_corr_arg(struct intel_pt *pt, char **args)
3759{
3760 struct intel_pt_vmcs_info *vmcs_info;
3761 u64 tsc_offset, vmcs;
3762 char *p = *args;
3763
3764 errno = 0;
3765
3766 p = skip_spaces(p);
3767 if (!*p)
3768 return 1;
3769
3770 tsc_offset = strtoull(p, &p, 0);
3771 if (errno)
3772 return -errno;
3773 p = skip_spaces(p);
3774 if (*p != ':') {
3775 pt->dflt_tsc_offset = tsc_offset;
3776 *args = p;
3777 return 0;
3778 }
3779 p += 1;
3780 while (1) {
3781 vmcs = strtoull(p, &p, 0);
3782 if (errno)
3783 return -errno;
3784 if (!vmcs)
3785 return -EINVAL;
3786 vmcs_info = intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, tsc_offset);
3787 if (!vmcs_info)
3788 return -ENOMEM;
3789 p = skip_spaces(p);
3790 if (*p != ',')
3791 break;
3792 p += 1;
3793 }
3794 *args = p;
3795 return 0;
3796}
3797
3798static int intel_pt_parse_vm_tm_corr_args(struct intel_pt *pt)
3799{
3800 char *args = pt->synth_opts.vm_tm_corr_args;
3801 int ret;
3802
3803 if (!args)
3804 return 0;
3805
3806 do {
3807 ret = intel_pt_parse_vm_tm_corr_arg(pt, &args);
3808 } while (!ret);
3809
3810 if (ret < 0) {
3811 pr_err("Failed to parse VM Time Correlation options\n");
3812 return ret;
3813 }
3814
3815 return 0;
3816}
3817
3818static const char * const intel_pt_info_fmts[] = {
3819 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
3820 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
3821 [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n",
3822 [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n",
3823 [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n",
3824 [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n",
3825 [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n",
3826 [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n",
3827 [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
3828 [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n",
3829 [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n",
3830 [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n",
3831 [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n",
3832 [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n",
3833 [INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n",
3834 [INTEL_PT_FILTER_STR_LEN] = " Filter string len. %"PRIu64"\n",
3835};
3836
3837static void intel_pt_print_info(__u64 *arr, int start, int finish)
3838{
3839 int i;
3840
3841 if (!dump_trace)
3842 return;
3843
3844 for (i = start; i <= finish; i++)
3845 fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
3846}
3847
3848static void intel_pt_print_info_str(const char *name, const char *str)
3849{
3850 if (!dump_trace)
3851 return;
3852
3853 fprintf(stdout, " %-20s%s\n", name, str ? str : "");
3854}
3855
3856static bool intel_pt_has(struct perf_record_auxtrace_info *auxtrace_info, int pos)
3857{
3858 return auxtrace_info->header.size >=
3859 sizeof(struct perf_record_auxtrace_info) + (sizeof(u64) * (pos + 1));
3860}
3861
3862int intel_pt_process_auxtrace_info(union perf_event *event,
3863 struct perf_session *session)
3864{
3865 struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
3866 size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
3867 struct intel_pt *pt;
3868 void *info_end;
3869 __u64 *info;
3870 int err;
3871
3872 if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
3873 min_sz)
3874 return -EINVAL;
3875
3876 pt = zalloc(sizeof(struct intel_pt));
3877 if (!pt)
3878 return -ENOMEM;
3879
3880 pt->vmcs_info = RB_ROOT;
3881
3882 addr_filters__init(&pt->filts);
3883
3884 err = perf_config(intel_pt_perf_config, pt);
3885 if (err)
3886 goto err_free;
3887
3888 err = auxtrace_queues__init(&pt->queues);
3889 if (err)
3890 goto err_free;
3891
3892 if (session->itrace_synth_opts->set) {
3893 pt->synth_opts = *session->itrace_synth_opts;
3894 } else {
3895 struct itrace_synth_opts *opts = session->itrace_synth_opts;
3896
3897 itrace_synth_opts__set_default(&pt->synth_opts, opts->default_no_sample);
3898 if (!opts->default_no_sample && !opts->inject) {
3899 pt->synth_opts.branches = false;
3900 pt->synth_opts.callchain = true;
3901 pt->synth_opts.add_callchain = true;
3902 }
3903 pt->synth_opts.thread_stack = opts->thread_stack;
3904 }
3905
3906 if (!(pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_USE_STDOUT))
3907 intel_pt_log_set_name(INTEL_PT_PMU_NAME);
3908
3909 pt->session = session;
3910 pt->machine = &session->machines.host; /* No kvm support */
3911 pt->auxtrace_type = auxtrace_info->type;
3912 pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
3913 pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
3914 pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
3915 pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
3916 pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
3917 pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
3918 pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
3919 pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
3920 pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
3921 pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
3922 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
3923 INTEL_PT_PER_CPU_MMAPS);
3924
3925 if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
3926 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
3927 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
3928 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
3929 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
3930 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
3931 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
3932 INTEL_PT_CYC_BIT);
3933 }
3934
3935 if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
3936 pt->max_non_turbo_ratio =
3937 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
3938 intel_pt_print_info(&auxtrace_info->priv[0],
3939 INTEL_PT_MAX_NONTURBO_RATIO,
3940 INTEL_PT_MAX_NONTURBO_RATIO);
3941 }
3942
3943 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
3944 info_end = (void *)auxtrace_info + auxtrace_info->header.size;
3945
3946 if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) {
3947 size_t len;
3948
3949 len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN];
3950 intel_pt_print_info(&auxtrace_info->priv[0],
3951 INTEL_PT_FILTER_STR_LEN,
3952 INTEL_PT_FILTER_STR_LEN);
3953 if (len) {
3954 const char *filter = (const char *)info;
3955
3956 len = roundup(len + 1, 8);
3957 info += len >> 3;
3958 if ((void *)info > info_end) {
3959 pr_err("%s: bad filter string length\n", __func__);
3960 err = -EINVAL;
3961 goto err_free_queues;
3962 }
3963 pt->filter = memdup(filter, len);
3964 if (!pt->filter) {
3965 err = -ENOMEM;
3966 goto err_free_queues;
3967 }
3968 if (session->header.needs_swap)
3969 mem_bswap_64(pt->filter, len);
3970 if (pt->filter[len - 1]) {
3971 pr_err("%s: filter string not null terminated\n", __func__);
3972 err = -EINVAL;
3973 goto err_free_queues;
3974 }
3975 err = addr_filters__parse_bare_filter(&pt->filts,
3976 filter);
3977 if (err)
3978 goto err_free_queues;
3979 }
3980 intel_pt_print_info_str("Filter string", pt->filter);
3981 }
3982
3983 if ((void *)info < info_end) {
3984 pt->cap_event_trace = *info++;
3985 if (dump_trace)
3986 fprintf(stdout, " Cap Event Trace %d\n",
3987 pt->cap_event_trace);
3988 }
3989
3990 pt->timeless_decoding = intel_pt_timeless_decoding(pt);
3991 if (pt->timeless_decoding && !pt->tc.time_mult)
3992 pt->tc.time_mult = 1;
3993 pt->have_tsc = intel_pt_have_tsc(pt);
3994 pt->sampling_mode = intel_pt_sampling_mode(pt);
3995 pt->est_tsc = !pt->timeless_decoding;
3996
3997 if (pt->synth_opts.vm_time_correlation) {
3998 if (pt->timeless_decoding) {
3999 pr_err("Intel PT has no time information for VM Time Correlation\n");
4000 err = -EINVAL;
4001 goto err_free_queues;
4002 }
4003 if (session->itrace_synth_opts->ptime_range) {
4004 pr_err("Time ranges cannot be specified with VM Time Correlation\n");
4005 err = -EINVAL;
4006 goto err_free_queues;
4007 }
4008 /* Currently TSC Offset is calculated using MTC packets */
4009 if (!intel_pt_have_mtc(pt)) {
4010 pr_err("MTC packets must have been enabled for VM Time Correlation\n");
4011 err = -EINVAL;
4012 goto err_free_queues;
4013 }
4014 err = intel_pt_parse_vm_tm_corr_args(pt);
4015 if (err)
4016 goto err_free_queues;
4017 }
4018
4019 pt->unknown_thread = thread__new(999999999, 999999999);
4020 if (!pt->unknown_thread) {
4021 err = -ENOMEM;
4022 goto err_free_queues;
4023 }
4024
4025 /*
4026 * Since this thread will not be kept in any rbtree not in a
4027 * list, initialize its list node so that at thread__put() the
4028 * current thread lifetime assumption is kept and we don't segfault
4029 * at list_del_init().
4030 */
4031 INIT_LIST_HEAD(&pt->unknown_thread->node);
4032
4033 err = thread__set_comm(pt->unknown_thread, "unknown", 0);
4034 if (err)
4035 goto err_delete_thread;
4036 if (thread__init_maps(pt->unknown_thread, pt->machine)) {
4037 err = -ENOMEM;
4038 goto err_delete_thread;
4039 }
4040
4041 pt->auxtrace.process_event = intel_pt_process_event;
4042 pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
4043 pt->auxtrace.queue_data = intel_pt_queue_data;
4044 pt->auxtrace.dump_auxtrace_sample = intel_pt_dump_sample;
4045 pt->auxtrace.flush_events = intel_pt_flush;
4046 pt->auxtrace.free_events = intel_pt_free_events;
4047 pt->auxtrace.free = intel_pt_free;
4048 pt->auxtrace.evsel_is_auxtrace = intel_pt_evsel_is_auxtrace;
4049 session->auxtrace = &pt->auxtrace;
4050
4051 if (dump_trace)
4052 return 0;
4053
4054 if (pt->have_sched_switch == 1) {
4055 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
4056 if (!pt->switch_evsel) {
4057 pr_err("%s: missing sched_switch event\n", __func__);
4058 err = -EINVAL;
4059 goto err_delete_thread;
4060 }
4061 } else if (pt->have_sched_switch == 2 &&
4062 !intel_pt_find_switch(session->evlist)) {
4063 pr_err("%s: missing context_switch attribute flag\n", __func__);
4064 err = -EINVAL;
4065 goto err_delete_thread;
4066 }
4067
4068 if (pt->synth_opts.log)
4069 intel_pt_log_enable();
4070
4071 /* Maximum non-turbo ratio is TSC freq / 100 MHz */
4072 if (pt->tc.time_mult) {
4073 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
4074
4075 if (!pt->max_non_turbo_ratio)
4076 pt->max_non_turbo_ratio =
4077 (tsc_freq + 50000000) / 100000000;
4078 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
4079 intel_pt_log("Maximum non-turbo ratio %u\n",
4080 pt->max_non_turbo_ratio);
4081 pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
4082 }
4083
4084 err = intel_pt_setup_time_ranges(pt, session->itrace_synth_opts);
4085 if (err)
4086 goto err_delete_thread;
4087
4088 if (pt->synth_opts.calls)
4089 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
4090 PERF_IP_FLAG_TRACE_END;
4091 if (pt->synth_opts.returns)
4092 pt->branches_filter |= PERF_IP_FLAG_RETURN |
4093 PERF_IP_FLAG_TRACE_BEGIN;
4094
4095 if ((pt->synth_opts.callchain || pt->synth_opts.add_callchain) &&
4096 !symbol_conf.use_callchain) {
4097 symbol_conf.use_callchain = true;
4098 if (callchain_register_param(&callchain_param) < 0) {
4099 symbol_conf.use_callchain = false;
4100 pt->synth_opts.callchain = false;
4101 pt->synth_opts.add_callchain = false;
4102 }
4103 }
4104
4105 if (pt->synth_opts.add_callchain) {
4106 err = intel_pt_callchain_init(pt);
4107 if (err)
4108 goto err_delete_thread;
4109 }
4110
4111 if (pt->synth_opts.last_branch || pt->synth_opts.add_last_branch) {
4112 pt->br_stack_sz = pt->synth_opts.last_branch_sz;
4113 pt->br_stack_sz_plus = pt->br_stack_sz;
4114 }
4115
4116 if (pt->synth_opts.add_last_branch) {
4117 err = intel_pt_br_stack_init(pt);
4118 if (err)
4119 goto err_delete_thread;
4120 /*
4121 * Additional branch stack size to cater for tracing from the
4122 * actual sample ip to where the sample time is recorded.
4123 * Measured at about 200 branches, but generously set to 1024.
4124 * If kernel space is not being traced, then add just 1 for the
4125 * branch to kernel space.
4126 */
4127 if (intel_pt_tracing_kernel(pt))
4128 pt->br_stack_sz_plus += 1024;
4129 else
4130 pt->br_stack_sz_plus += 1;
4131 }
4132
4133 pt->use_thread_stack = pt->synth_opts.callchain ||
4134 pt->synth_opts.add_callchain ||
4135 pt->synth_opts.thread_stack ||
4136 pt->synth_opts.last_branch ||
4137 pt->synth_opts.add_last_branch;
4138
4139 pt->callstack = pt->synth_opts.callchain ||
4140 pt->synth_opts.add_callchain ||
4141 pt->synth_opts.thread_stack;
4142
4143 err = intel_pt_synth_events(pt, session);
4144 if (err)
4145 goto err_delete_thread;
4146
4147 intel_pt_setup_pebs_events(pt);
4148
4149 if (pt->sampling_mode || list_empty(&session->auxtrace_index))
4150 err = auxtrace_queue_data(session, true, true);
4151 else
4152 err = auxtrace_queues__process_index(&pt->queues, session);
4153 if (err)
4154 goto err_delete_thread;
4155
4156 if (pt->queues.populated)
4157 pt->data_queued = true;
4158
4159 if (pt->timeless_decoding)
4160 pr_debug2("Intel PT decoding without timestamps\n");
4161
4162 return 0;
4163
4164err_delete_thread:
4165 zfree(&pt->chain);
4166 thread__zput(pt->unknown_thread);
4167err_free_queues:
4168 intel_pt_log_disable();
4169 auxtrace_queues__free(&pt->queues);
4170 session->auxtrace = NULL;
4171err_free:
4172 addr_filters__exit(&pt->filts);
4173 zfree(&pt->filter);
4174 zfree(&pt->time_ranges);
4175 free(pt);
4176 return err;
4177}