Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.20-rc1 2637 lines 66 kB view raw
1/* 2 * intel_pt.c: Intel Processor Trace support 3 * Copyright (c) 2013-2015, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 */ 15 16#include <inttypes.h> 17#include <stdio.h> 18#include <stdbool.h> 19#include <errno.h> 20#include <linux/kernel.h> 21#include <linux/types.h> 22 23#include "../perf.h" 24#include "session.h" 25#include "machine.h" 26#include "memswap.h" 27#include "sort.h" 28#include "tool.h" 29#include "event.h" 30#include "evlist.h" 31#include "evsel.h" 32#include "map.h" 33#include "color.h" 34#include "util.h" 35#include "thread.h" 36#include "thread-stack.h" 37#include "symbol.h" 38#include "callchain.h" 39#include "dso.h" 40#include "debug.h" 41#include "auxtrace.h" 42#include "tsc.h" 43#include "intel-pt.h" 44#include "config.h" 45 46#include "intel-pt-decoder/intel-pt-log.h" 47#include "intel-pt-decoder/intel-pt-decoder.h" 48#include "intel-pt-decoder/intel-pt-insn-decoder.h" 49#include "intel-pt-decoder/intel-pt-pkt-decoder.h" 50 51#define MAX_TIMESTAMP (~0ULL) 52 53struct intel_pt { 54 struct auxtrace auxtrace; 55 struct auxtrace_queues queues; 56 struct auxtrace_heap heap; 57 u32 auxtrace_type; 58 struct perf_session *session; 59 struct machine *machine; 60 struct perf_evsel *switch_evsel; 61 struct thread *unknown_thread; 62 bool timeless_decoding; 63 bool sampling_mode; 64 bool snapshot_mode; 65 bool per_cpu_mmaps; 66 bool have_tsc; 67 bool data_queued; 68 bool est_tsc; 69 bool sync_switch; 70 bool mispred_all; 71 int have_sched_switch; 72 u32 pmu_type; 73 u64 kernel_start; 74 u64 switch_ip; 75 u64 ptss_ip; 76 77 struct perf_tsc_conversion tc; 78 bool cap_user_time_zero; 79 80 struct itrace_synth_opts synth_opts; 81 82 bool sample_instructions; 83 u64 instructions_sample_type; 84 u64 instructions_id; 85 86 bool sample_branches; 87 u32 branches_filter; 88 u64 branches_sample_type; 89 u64 branches_id; 90 91 bool sample_transactions; 92 u64 transactions_sample_type; 93 u64 transactions_id; 94 95 bool sample_ptwrites; 96 u64 ptwrites_sample_type; 97 u64 ptwrites_id; 98 99 bool sample_pwr_events; 100 u64 pwr_events_sample_type; 101 u64 mwait_id; 102 u64 pwre_id; 103 u64 exstop_id; 104 u64 pwrx_id; 105 u64 cbr_id; 106 107 u64 tsc_bit; 108 u64 mtc_bit; 109 u64 mtc_freq_bits; 110 u32 tsc_ctc_ratio_n; 111 u32 tsc_ctc_ratio_d; 112 u64 cyc_bit; 113 u64 noretcomp_bit; 114 unsigned max_non_turbo_ratio; 115 unsigned cbr2khz; 116 117 unsigned long num_events; 118 119 char *filter; 120 struct addr_filters filts; 121}; 122 123enum switch_state { 124 INTEL_PT_SS_NOT_TRACING, 125 INTEL_PT_SS_UNKNOWN, 126 INTEL_PT_SS_TRACING, 127 INTEL_PT_SS_EXPECTING_SWITCH_EVENT, 128 INTEL_PT_SS_EXPECTING_SWITCH_IP, 129}; 130 131struct intel_pt_queue { 132 struct intel_pt *pt; 133 unsigned int queue_nr; 134 struct auxtrace_buffer *buffer; 135 struct auxtrace_buffer *old_buffer; 136 void *decoder; 137 const struct intel_pt_state *state; 138 struct ip_callchain *chain; 139 struct branch_stack *last_branch; 140 struct branch_stack *last_branch_rb; 141 size_t last_branch_pos; 142 union perf_event *event_buf; 143 bool on_heap; 144 bool stop; 145 bool step_through_buffers; 146 bool use_buffer_pid_tid; 147 bool sync_switch; 148 pid_t pid, tid; 149 int cpu; 150 int switch_state; 151 pid_t next_tid; 152 struct thread *thread; 153 bool exclude_kernel; 154 bool have_sample; 155 u64 time; 156 u64 timestamp; 157 u32 flags; 158 u16 insn_len; 159 u64 last_insn_cnt; 160 char insn[INTEL_PT_INSN_BUF_SZ]; 161}; 162 163static void intel_pt_dump(struct intel_pt *pt __maybe_unused, 164 unsigned char *buf, size_t len) 165{ 166 struct intel_pt_pkt packet; 167 size_t pos = 0; 168 int ret, pkt_len, i; 169 char desc[INTEL_PT_PKT_DESC_MAX]; 170 const char *color = PERF_COLOR_BLUE; 171 172 color_fprintf(stdout, color, 173 ". ... Intel Processor Trace data: size %zu bytes\n", 174 len); 175 176 while (len) { 177 ret = intel_pt_get_packet(buf, len, &packet); 178 if (ret > 0) 179 pkt_len = ret; 180 else 181 pkt_len = 1; 182 printf("."); 183 color_fprintf(stdout, color, " %08x: ", pos); 184 for (i = 0; i < pkt_len; i++) 185 color_fprintf(stdout, color, " %02x", buf[i]); 186 for (; i < 16; i++) 187 color_fprintf(stdout, color, " "); 188 if (ret > 0) { 189 ret = intel_pt_pkt_desc(&packet, desc, 190 INTEL_PT_PKT_DESC_MAX); 191 if (ret > 0) 192 color_fprintf(stdout, color, " %s\n", desc); 193 } else { 194 color_fprintf(stdout, color, " Bad packet!\n"); 195 } 196 pos += pkt_len; 197 buf += pkt_len; 198 len -= pkt_len; 199 } 200} 201 202static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf, 203 size_t len) 204{ 205 printf(".\n"); 206 intel_pt_dump(pt, buf, len); 207} 208 209static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a, 210 struct auxtrace_buffer *b) 211{ 212 bool consecutive = false; 213 void *start; 214 215 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size, 216 pt->have_tsc, &consecutive); 217 if (!start) 218 return -EINVAL; 219 b->use_size = b->data + b->size - start; 220 b->use_data = start; 221 if (b->use_size && consecutive) 222 b->consecutive = true; 223 return 0; 224} 225 226/* This function assumes data is processed sequentially only */ 227static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data) 228{ 229 struct intel_pt_queue *ptq = data; 230 struct auxtrace_buffer *buffer = ptq->buffer; 231 struct auxtrace_buffer *old_buffer = ptq->old_buffer; 232 struct auxtrace_queue *queue; 233 bool might_overlap; 234 235 if (ptq->stop) { 236 b->len = 0; 237 return 0; 238 } 239 240 queue = &ptq->pt->queues.queue_array[ptq->queue_nr]; 241 242 buffer = auxtrace_buffer__next(queue, buffer); 243 if (!buffer) { 244 if (old_buffer) 245 auxtrace_buffer__drop_data(old_buffer); 246 b->len = 0; 247 return 0; 248 } 249 250 ptq->buffer = buffer; 251 252 if (!buffer->data) { 253 int fd = perf_data__fd(ptq->pt->session->data); 254 255 buffer->data = auxtrace_buffer__get_data(buffer, fd); 256 if (!buffer->data) 257 return -ENOMEM; 258 } 259 260 might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode; 261 if (might_overlap && !buffer->consecutive && old_buffer && 262 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer)) 263 return -ENOMEM; 264 265 if (buffer->use_data) { 266 b->len = buffer->use_size; 267 b->buf = buffer->use_data; 268 } else { 269 b->len = buffer->size; 270 b->buf = buffer->data; 271 } 272 b->ref_timestamp = buffer->reference; 273 274 if (!old_buffer || (might_overlap && !buffer->consecutive)) { 275 b->consecutive = false; 276 b->trace_nr = buffer->buffer_nr + 1; 277 } else { 278 b->consecutive = true; 279 } 280 281 if (ptq->step_through_buffers) 282 ptq->stop = true; 283 284 if (b->len) { 285 if (old_buffer) 286 auxtrace_buffer__drop_data(old_buffer); 287 ptq->old_buffer = buffer; 288 } else { 289 auxtrace_buffer__drop_data(buffer); 290 return intel_pt_get_trace(b, data); 291 } 292 293 return 0; 294} 295 296struct intel_pt_cache_entry { 297 struct auxtrace_cache_entry entry; 298 u64 insn_cnt; 299 u64 byte_cnt; 300 enum intel_pt_insn_op op; 301 enum intel_pt_insn_branch branch; 302 int length; 303 int32_t rel; 304 char insn[INTEL_PT_INSN_BUF_SZ]; 305}; 306 307static int intel_pt_config_div(const char *var, const char *value, void *data) 308{ 309 int *d = data; 310 long val; 311 312 if (!strcmp(var, "intel-pt.cache-divisor")) { 313 val = strtol(value, NULL, 0); 314 if (val > 0 && val <= INT_MAX) 315 *d = val; 316 } 317 318 return 0; 319} 320 321static int intel_pt_cache_divisor(void) 322{ 323 static int d; 324 325 if (d) 326 return d; 327 328 perf_config(intel_pt_config_div, &d); 329 330 if (!d) 331 d = 64; 332 333 return d; 334} 335 336static unsigned int intel_pt_cache_size(struct dso *dso, 337 struct machine *machine) 338{ 339 off_t size; 340 341 size = dso__data_size(dso, machine); 342 size /= intel_pt_cache_divisor(); 343 if (size < 1000) 344 return 10; 345 if (size > (1 << 21)) 346 return 21; 347 return 32 - __builtin_clz(size); 348} 349 350static struct auxtrace_cache *intel_pt_cache(struct dso *dso, 351 struct machine *machine) 352{ 353 struct auxtrace_cache *c; 354 unsigned int bits; 355 356 if (dso->auxtrace_cache) 357 return dso->auxtrace_cache; 358 359 bits = intel_pt_cache_size(dso, machine); 360 361 /* Ignoring cache creation failure */ 362 c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200); 363 364 dso->auxtrace_cache = c; 365 366 return c; 367} 368 369static int intel_pt_cache_add(struct dso *dso, struct machine *machine, 370 u64 offset, u64 insn_cnt, u64 byte_cnt, 371 struct intel_pt_insn *intel_pt_insn) 372{ 373 struct auxtrace_cache *c = intel_pt_cache(dso, machine); 374 struct intel_pt_cache_entry *e; 375 int err; 376 377 if (!c) 378 return -ENOMEM; 379 380 e = auxtrace_cache__alloc_entry(c); 381 if (!e) 382 return -ENOMEM; 383 384 e->insn_cnt = insn_cnt; 385 e->byte_cnt = byte_cnt; 386 e->op = intel_pt_insn->op; 387 e->branch = intel_pt_insn->branch; 388 e->length = intel_pt_insn->length; 389 e->rel = intel_pt_insn->rel; 390 memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ); 391 392 err = auxtrace_cache__add(c, offset, &e->entry); 393 if (err) 394 auxtrace_cache__free_entry(c, e); 395 396 return err; 397} 398 399static struct intel_pt_cache_entry * 400intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset) 401{ 402 struct auxtrace_cache *c = intel_pt_cache(dso, machine); 403 404 if (!c) 405 return NULL; 406 407 return auxtrace_cache__lookup(dso->auxtrace_cache, offset); 408} 409 410static inline u8 intel_pt_cpumode(struct intel_pt *pt, uint64_t ip) 411{ 412 return ip >= pt->kernel_start ? 413 PERF_RECORD_MISC_KERNEL : 414 PERF_RECORD_MISC_USER; 415} 416 417static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn, 418 uint64_t *insn_cnt_ptr, uint64_t *ip, 419 uint64_t to_ip, uint64_t max_insn_cnt, 420 void *data) 421{ 422 struct intel_pt_queue *ptq = data; 423 struct machine *machine = ptq->pt->machine; 424 struct thread *thread; 425 struct addr_location al; 426 unsigned char buf[INTEL_PT_INSN_BUF_SZ]; 427 ssize_t len; 428 int x86_64; 429 u8 cpumode; 430 u64 offset, start_offset, start_ip; 431 u64 insn_cnt = 0; 432 bool one_map = true; 433 434 intel_pt_insn->length = 0; 435 436 if (to_ip && *ip == to_ip) 437 goto out_no_cache; 438 439 cpumode = intel_pt_cpumode(ptq->pt, *ip); 440 441 thread = ptq->thread; 442 if (!thread) { 443 if (cpumode != PERF_RECORD_MISC_KERNEL) 444 return -EINVAL; 445 thread = ptq->pt->unknown_thread; 446 } 447 448 while (1) { 449 if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso) 450 return -EINVAL; 451 452 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR && 453 dso__data_status_seen(al.map->dso, 454 DSO_DATA_STATUS_SEEN_ITRACE)) 455 return -ENOENT; 456 457 offset = al.map->map_ip(al.map, *ip); 458 459 if (!to_ip && one_map) { 460 struct intel_pt_cache_entry *e; 461 462 e = intel_pt_cache_lookup(al.map->dso, machine, offset); 463 if (e && 464 (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) { 465 *insn_cnt_ptr = e->insn_cnt; 466 *ip += e->byte_cnt; 467 intel_pt_insn->op = e->op; 468 intel_pt_insn->branch = e->branch; 469 intel_pt_insn->length = e->length; 470 intel_pt_insn->rel = e->rel; 471 memcpy(intel_pt_insn->buf, e->insn, 472 INTEL_PT_INSN_BUF_SZ); 473 intel_pt_log_insn_no_data(intel_pt_insn, *ip); 474 return 0; 475 } 476 } 477 478 start_offset = offset; 479 start_ip = *ip; 480 481 /* Load maps to ensure dso->is_64_bit has been updated */ 482 map__load(al.map); 483 484 x86_64 = al.map->dso->is_64_bit; 485 486 while (1) { 487 len = dso__data_read_offset(al.map->dso, machine, 488 offset, buf, 489 INTEL_PT_INSN_BUF_SZ); 490 if (len <= 0) 491 return -EINVAL; 492 493 if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn)) 494 return -EINVAL; 495 496 intel_pt_log_insn(intel_pt_insn, *ip); 497 498 insn_cnt += 1; 499 500 if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH) 501 goto out; 502 503 if (max_insn_cnt && insn_cnt >= max_insn_cnt) 504 goto out_no_cache; 505 506 *ip += intel_pt_insn->length; 507 508 if (to_ip && *ip == to_ip) 509 goto out_no_cache; 510 511 if (*ip >= al.map->end) 512 break; 513 514 offset += intel_pt_insn->length; 515 } 516 one_map = false; 517 } 518out: 519 *insn_cnt_ptr = insn_cnt; 520 521 if (!one_map) 522 goto out_no_cache; 523 524 /* 525 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate 526 * entries. 527 */ 528 if (to_ip) { 529 struct intel_pt_cache_entry *e; 530 531 e = intel_pt_cache_lookup(al.map->dso, machine, start_offset); 532 if (e) 533 return 0; 534 } 535 536 /* Ignore cache errors */ 537 intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt, 538 *ip - start_ip, intel_pt_insn); 539 540 return 0; 541 542out_no_cache: 543 *insn_cnt_ptr = insn_cnt; 544 return 0; 545} 546 547static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip, 548 uint64_t offset, const char *filename) 549{ 550 struct addr_filter *filt; 551 bool have_filter = false; 552 bool hit_tracestop = false; 553 bool hit_filter = false; 554 555 list_for_each_entry(filt, &pt->filts.head, list) { 556 if (filt->start) 557 have_filter = true; 558 559 if ((filename && !filt->filename) || 560 (!filename && filt->filename) || 561 (filename && strcmp(filename, filt->filename))) 562 continue; 563 564 if (!(offset >= filt->addr && offset < filt->addr + filt->size)) 565 continue; 566 567 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n", 568 ip, offset, filename ? filename : "[kernel]", 569 filt->start ? "filter" : "stop", 570 filt->addr, filt->size); 571 572 if (filt->start) 573 hit_filter = true; 574 else 575 hit_tracestop = true; 576 } 577 578 if (!hit_tracestop && !hit_filter) 579 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n", 580 ip, offset, filename ? filename : "[kernel]"); 581 582 return hit_tracestop || (have_filter && !hit_filter); 583} 584 585static int __intel_pt_pgd_ip(uint64_t ip, void *data) 586{ 587 struct intel_pt_queue *ptq = data; 588 struct thread *thread; 589 struct addr_location al; 590 u8 cpumode; 591 u64 offset; 592 593 if (ip >= ptq->pt->kernel_start) 594 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL); 595 596 cpumode = PERF_RECORD_MISC_USER; 597 598 thread = ptq->thread; 599 if (!thread) 600 return -EINVAL; 601 602 if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso) 603 return -EINVAL; 604 605 offset = al.map->map_ip(al.map, ip); 606 607 return intel_pt_match_pgd_ip(ptq->pt, ip, offset, 608 al.map->dso->long_name); 609} 610 611static bool intel_pt_pgd_ip(uint64_t ip, void *data) 612{ 613 return __intel_pt_pgd_ip(ip, data) > 0; 614} 615 616static bool intel_pt_get_config(struct intel_pt *pt, 617 struct perf_event_attr *attr, u64 *config) 618{ 619 if (attr->type == pt->pmu_type) { 620 if (config) 621 *config = attr->config; 622 return true; 623 } 624 625 return false; 626} 627 628static bool intel_pt_exclude_kernel(struct intel_pt *pt) 629{ 630 struct perf_evsel *evsel; 631 632 evlist__for_each_entry(pt->session->evlist, evsel) { 633 if (intel_pt_get_config(pt, &evsel->attr, NULL) && 634 !evsel->attr.exclude_kernel) 635 return false; 636 } 637 return true; 638} 639 640static bool intel_pt_return_compression(struct intel_pt *pt) 641{ 642 struct perf_evsel *evsel; 643 u64 config; 644 645 if (!pt->noretcomp_bit) 646 return true; 647 648 evlist__for_each_entry(pt->session->evlist, evsel) { 649 if (intel_pt_get_config(pt, &evsel->attr, &config) && 650 (config & pt->noretcomp_bit)) 651 return false; 652 } 653 return true; 654} 655 656static bool intel_pt_branch_enable(struct intel_pt *pt) 657{ 658 struct perf_evsel *evsel; 659 u64 config; 660 661 evlist__for_each_entry(pt->session->evlist, evsel) { 662 if (intel_pt_get_config(pt, &evsel->attr, &config) && 663 (config & 1) && !(config & 0x2000)) 664 return false; 665 } 666 return true; 667} 668 669static unsigned int intel_pt_mtc_period(struct intel_pt *pt) 670{ 671 struct perf_evsel *evsel; 672 unsigned int shift; 673 u64 config; 674 675 if (!pt->mtc_freq_bits) 676 return 0; 677 678 for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++) 679 config >>= 1; 680 681 evlist__for_each_entry(pt->session->evlist, evsel) { 682 if (intel_pt_get_config(pt, &evsel->attr, &config)) 683 return (config & pt->mtc_freq_bits) >> shift; 684 } 685 return 0; 686} 687 688static bool intel_pt_timeless_decoding(struct intel_pt *pt) 689{ 690 struct perf_evsel *evsel; 691 bool timeless_decoding = true; 692 u64 config; 693 694 if (!pt->tsc_bit || !pt->cap_user_time_zero) 695 return true; 696 697 evlist__for_each_entry(pt->session->evlist, evsel) { 698 if (!(evsel->attr.sample_type & PERF_SAMPLE_TIME)) 699 return true; 700 if (intel_pt_get_config(pt, &evsel->attr, &config)) { 701 if (config & pt->tsc_bit) 702 timeless_decoding = false; 703 else 704 return true; 705 } 706 } 707 return timeless_decoding; 708} 709 710static bool intel_pt_tracing_kernel(struct intel_pt *pt) 711{ 712 struct perf_evsel *evsel; 713 714 evlist__for_each_entry(pt->session->evlist, evsel) { 715 if (intel_pt_get_config(pt, &evsel->attr, NULL) && 716 !evsel->attr.exclude_kernel) 717 return true; 718 } 719 return false; 720} 721 722static bool intel_pt_have_tsc(struct intel_pt *pt) 723{ 724 struct perf_evsel *evsel; 725 bool have_tsc = false; 726 u64 config; 727 728 if (!pt->tsc_bit) 729 return false; 730 731 evlist__for_each_entry(pt->session->evlist, evsel) { 732 if (intel_pt_get_config(pt, &evsel->attr, &config)) { 733 if (config & pt->tsc_bit) 734 have_tsc = true; 735 else 736 return false; 737 } 738 } 739 return have_tsc; 740} 741 742static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns) 743{ 744 u64 quot, rem; 745 746 quot = ns / pt->tc.time_mult; 747 rem = ns % pt->tc.time_mult; 748 return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) / 749 pt->tc.time_mult; 750} 751 752static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt, 753 unsigned int queue_nr) 754{ 755 struct intel_pt_params params = { .get_trace = 0, }; 756 struct perf_env *env = pt->machine->env; 757 struct intel_pt_queue *ptq; 758 759 ptq = zalloc(sizeof(struct intel_pt_queue)); 760 if (!ptq) 761 return NULL; 762 763 if (pt->synth_opts.callchain) { 764 size_t sz = sizeof(struct ip_callchain); 765 766 /* Add 1 to callchain_sz for callchain context */ 767 sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64); 768 ptq->chain = zalloc(sz); 769 if (!ptq->chain) 770 goto out_free; 771 } 772 773 if (pt->synth_opts.last_branch) { 774 size_t sz = sizeof(struct branch_stack); 775 776 sz += pt->synth_opts.last_branch_sz * 777 sizeof(struct branch_entry); 778 ptq->last_branch = zalloc(sz); 779 if (!ptq->last_branch) 780 goto out_free; 781 ptq->last_branch_rb = zalloc(sz); 782 if (!ptq->last_branch_rb) 783 goto out_free; 784 } 785 786 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE); 787 if (!ptq->event_buf) 788 goto out_free; 789 790 ptq->pt = pt; 791 ptq->queue_nr = queue_nr; 792 ptq->exclude_kernel = intel_pt_exclude_kernel(pt); 793 ptq->pid = -1; 794 ptq->tid = -1; 795 ptq->cpu = -1; 796 ptq->next_tid = -1; 797 798 params.get_trace = intel_pt_get_trace; 799 params.walk_insn = intel_pt_walk_next_insn; 800 params.data = ptq; 801 params.return_compression = intel_pt_return_compression(pt); 802 params.branch_enable = intel_pt_branch_enable(pt); 803 params.max_non_turbo_ratio = pt->max_non_turbo_ratio; 804 params.mtc_period = intel_pt_mtc_period(pt); 805 params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n; 806 params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d; 807 808 if (pt->filts.cnt > 0) 809 params.pgd_ip = intel_pt_pgd_ip; 810 811 if (pt->synth_opts.instructions) { 812 if (pt->synth_opts.period) { 813 switch (pt->synth_opts.period_type) { 814 case PERF_ITRACE_PERIOD_INSTRUCTIONS: 815 params.period_type = 816 INTEL_PT_PERIOD_INSTRUCTIONS; 817 params.period = pt->synth_opts.period; 818 break; 819 case PERF_ITRACE_PERIOD_TICKS: 820 params.period_type = INTEL_PT_PERIOD_TICKS; 821 params.period = pt->synth_opts.period; 822 break; 823 case PERF_ITRACE_PERIOD_NANOSECS: 824 params.period_type = INTEL_PT_PERIOD_TICKS; 825 params.period = intel_pt_ns_to_ticks(pt, 826 pt->synth_opts.period); 827 break; 828 default: 829 break; 830 } 831 } 832 833 if (!params.period) { 834 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS; 835 params.period = 1; 836 } 837 } 838 839 if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18)) 840 params.flags |= INTEL_PT_FUP_WITH_NLIP; 841 842 ptq->decoder = intel_pt_decoder_new(&params); 843 if (!ptq->decoder) 844 goto out_free; 845 846 return ptq; 847 848out_free: 849 zfree(&ptq->event_buf); 850 zfree(&ptq->last_branch); 851 zfree(&ptq->last_branch_rb); 852 zfree(&ptq->chain); 853 free(ptq); 854 return NULL; 855} 856 857static void intel_pt_free_queue(void *priv) 858{ 859 struct intel_pt_queue *ptq = priv; 860 861 if (!ptq) 862 return; 863 thread__zput(ptq->thread); 864 intel_pt_decoder_free(ptq->decoder); 865 zfree(&ptq->event_buf); 866 zfree(&ptq->last_branch); 867 zfree(&ptq->last_branch_rb); 868 zfree(&ptq->chain); 869 free(ptq); 870} 871 872static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt, 873 struct auxtrace_queue *queue) 874{ 875 struct intel_pt_queue *ptq = queue->priv; 876 877 if (queue->tid == -1 || pt->have_sched_switch) { 878 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu); 879 thread__zput(ptq->thread); 880 } 881 882 if (!ptq->thread && ptq->tid != -1) 883 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid); 884 885 if (ptq->thread) { 886 ptq->pid = ptq->thread->pid_; 887 if (queue->cpu == -1) 888 ptq->cpu = ptq->thread->cpu; 889 } 890} 891 892static void intel_pt_sample_flags(struct intel_pt_queue *ptq) 893{ 894 if (ptq->state->flags & INTEL_PT_ABORT_TX) { 895 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT; 896 } else if (ptq->state->flags & INTEL_PT_ASYNC) { 897 if (ptq->state->to_ip) 898 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | 899 PERF_IP_FLAG_ASYNC | 900 PERF_IP_FLAG_INTERRUPT; 901 else 902 ptq->flags = PERF_IP_FLAG_BRANCH | 903 PERF_IP_FLAG_TRACE_END; 904 ptq->insn_len = 0; 905 } else { 906 if (ptq->state->from_ip) 907 ptq->flags = intel_pt_insn_type(ptq->state->insn_op); 908 else 909 ptq->flags = PERF_IP_FLAG_BRANCH | 910 PERF_IP_FLAG_TRACE_BEGIN; 911 if (ptq->state->flags & INTEL_PT_IN_TX) 912 ptq->flags |= PERF_IP_FLAG_IN_TX; 913 ptq->insn_len = ptq->state->insn_len; 914 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ); 915 } 916 917 if (ptq->state->type & INTEL_PT_TRACE_BEGIN) 918 ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN; 919 if (ptq->state->type & INTEL_PT_TRACE_END) 920 ptq->flags |= PERF_IP_FLAG_TRACE_END; 921} 922 923static int intel_pt_setup_queue(struct intel_pt *pt, 924 struct auxtrace_queue *queue, 925 unsigned int queue_nr) 926{ 927 struct intel_pt_queue *ptq = queue->priv; 928 929 if (list_empty(&queue->head)) 930 return 0; 931 932 if (!ptq) { 933 ptq = intel_pt_alloc_queue(pt, queue_nr); 934 if (!ptq) 935 return -ENOMEM; 936 queue->priv = ptq; 937 938 if (queue->cpu != -1) 939 ptq->cpu = queue->cpu; 940 ptq->tid = queue->tid; 941 942 if (pt->sampling_mode && !pt->snapshot_mode && 943 pt->timeless_decoding) 944 ptq->step_through_buffers = true; 945 946 ptq->sync_switch = pt->sync_switch; 947 } 948 949 if (!ptq->on_heap && 950 (!ptq->sync_switch || 951 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) { 952 const struct intel_pt_state *state; 953 int ret; 954 955 if (pt->timeless_decoding) 956 return 0; 957 958 intel_pt_log("queue %u getting timestamp\n", queue_nr); 959 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n", 960 queue_nr, ptq->cpu, ptq->pid, ptq->tid); 961 while (1) { 962 state = intel_pt_decode(ptq->decoder); 963 if (state->err) { 964 if (state->err == INTEL_PT_ERR_NODATA) { 965 intel_pt_log("queue %u has no timestamp\n", 966 queue_nr); 967 return 0; 968 } 969 continue; 970 } 971 if (state->timestamp) 972 break; 973 } 974 975 ptq->timestamp = state->timestamp; 976 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n", 977 queue_nr, ptq->timestamp); 978 ptq->state = state; 979 ptq->have_sample = true; 980 intel_pt_sample_flags(ptq); 981 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp); 982 if (ret) 983 return ret; 984 ptq->on_heap = true; 985 } 986 987 return 0; 988} 989 990static int intel_pt_setup_queues(struct intel_pt *pt) 991{ 992 unsigned int i; 993 int ret; 994 995 for (i = 0; i < pt->queues.nr_queues; i++) { 996 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i); 997 if (ret) 998 return ret; 999 } 1000 return 0; 1001} 1002 1003static inline void intel_pt_copy_last_branch_rb(struct intel_pt_queue *ptq) 1004{ 1005 struct branch_stack *bs_src = ptq->last_branch_rb; 1006 struct branch_stack *bs_dst = ptq->last_branch; 1007 size_t nr = 0; 1008 1009 bs_dst->nr = bs_src->nr; 1010 1011 if (!bs_src->nr) 1012 return; 1013 1014 nr = ptq->pt->synth_opts.last_branch_sz - ptq->last_branch_pos; 1015 memcpy(&bs_dst->entries[0], 1016 &bs_src->entries[ptq->last_branch_pos], 1017 sizeof(struct branch_entry) * nr); 1018 1019 if (bs_src->nr >= ptq->pt->synth_opts.last_branch_sz) { 1020 memcpy(&bs_dst->entries[nr], 1021 &bs_src->entries[0], 1022 sizeof(struct branch_entry) * ptq->last_branch_pos); 1023 } 1024} 1025 1026static inline void intel_pt_reset_last_branch_rb(struct intel_pt_queue *ptq) 1027{ 1028 ptq->last_branch_pos = 0; 1029 ptq->last_branch_rb->nr = 0; 1030} 1031 1032static void intel_pt_update_last_branch_rb(struct intel_pt_queue *ptq) 1033{ 1034 const struct intel_pt_state *state = ptq->state; 1035 struct branch_stack *bs = ptq->last_branch_rb; 1036 struct branch_entry *be; 1037 1038 if (!ptq->last_branch_pos) 1039 ptq->last_branch_pos = ptq->pt->synth_opts.last_branch_sz; 1040 1041 ptq->last_branch_pos -= 1; 1042 1043 be = &bs->entries[ptq->last_branch_pos]; 1044 be->from = state->from_ip; 1045 be->to = state->to_ip; 1046 be->flags.abort = !!(state->flags & INTEL_PT_ABORT_TX); 1047 be->flags.in_tx = !!(state->flags & INTEL_PT_IN_TX); 1048 /* No support for mispredict */ 1049 be->flags.mispred = ptq->pt->mispred_all; 1050 1051 if (bs->nr < ptq->pt->synth_opts.last_branch_sz) 1052 bs->nr += 1; 1053} 1054 1055static inline bool intel_pt_skip_event(struct intel_pt *pt) 1056{ 1057 return pt->synth_opts.initial_skip && 1058 pt->num_events++ < pt->synth_opts.initial_skip; 1059} 1060 1061static void intel_pt_prep_b_sample(struct intel_pt *pt, 1062 struct intel_pt_queue *ptq, 1063 union perf_event *event, 1064 struct perf_sample *sample) 1065{ 1066 if (!pt->timeless_decoding) 1067 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc); 1068 1069 sample->ip = ptq->state->from_ip; 1070 sample->cpumode = intel_pt_cpumode(pt, sample->ip); 1071 sample->pid = ptq->pid; 1072 sample->tid = ptq->tid; 1073 sample->addr = ptq->state->to_ip; 1074 sample->period = 1; 1075 sample->cpu = ptq->cpu; 1076 sample->flags = ptq->flags; 1077 sample->insn_len = ptq->insn_len; 1078 memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ); 1079 1080 event->sample.header.type = PERF_RECORD_SAMPLE; 1081 event->sample.header.misc = sample->cpumode; 1082 event->sample.header.size = sizeof(struct perf_event_header); 1083} 1084 1085static int intel_pt_inject_event(union perf_event *event, 1086 struct perf_sample *sample, u64 type) 1087{ 1088 event->header.size = perf_event__sample_event_size(sample, type, 0); 1089 return perf_event__synthesize_sample(event, type, 0, sample); 1090} 1091 1092static inline int intel_pt_opt_inject(struct intel_pt *pt, 1093 union perf_event *event, 1094 struct perf_sample *sample, u64 type) 1095{ 1096 if (!pt->synth_opts.inject) 1097 return 0; 1098 1099 return intel_pt_inject_event(event, sample, type); 1100} 1101 1102static int intel_pt_deliver_synth_b_event(struct intel_pt *pt, 1103 union perf_event *event, 1104 struct perf_sample *sample, u64 type) 1105{ 1106 int ret; 1107 1108 ret = intel_pt_opt_inject(pt, event, sample, type); 1109 if (ret) 1110 return ret; 1111 1112 ret = perf_session__deliver_synth_event(pt->session, event, sample); 1113 if (ret) 1114 pr_err("Intel PT: failed to deliver event, error %d\n", ret); 1115 1116 return ret; 1117} 1118 1119static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq) 1120{ 1121 struct intel_pt *pt = ptq->pt; 1122 union perf_event *event = ptq->event_buf; 1123 struct perf_sample sample = { .ip = 0, }; 1124 struct dummy_branch_stack { 1125 u64 nr; 1126 struct branch_entry entries; 1127 } dummy_bs; 1128 1129 if (pt->branches_filter && !(pt->branches_filter & ptq->flags)) 1130 return 0; 1131 1132 if (intel_pt_skip_event(pt)) 1133 return 0; 1134 1135 intel_pt_prep_b_sample(pt, ptq, event, &sample); 1136 1137 sample.id = ptq->pt->branches_id; 1138 sample.stream_id = ptq->pt->branches_id; 1139 1140 /* 1141 * perf report cannot handle events without a branch stack when using 1142 * SORT_MODE__BRANCH so make a dummy one. 1143 */ 1144 if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) { 1145 dummy_bs = (struct dummy_branch_stack){ 1146 .nr = 1, 1147 .entries = { 1148 .from = sample.ip, 1149 .to = sample.addr, 1150 }, 1151 }; 1152 sample.branch_stack = (struct branch_stack *)&dummy_bs; 1153 } 1154 1155 return intel_pt_deliver_synth_b_event(pt, event, &sample, 1156 pt->branches_sample_type); 1157} 1158 1159static void intel_pt_prep_sample(struct intel_pt *pt, 1160 struct intel_pt_queue *ptq, 1161 union perf_event *event, 1162 struct perf_sample *sample) 1163{ 1164 intel_pt_prep_b_sample(pt, ptq, event, sample); 1165 1166 if (pt->synth_opts.callchain) { 1167 thread_stack__sample(ptq->thread, ptq->chain, 1168 pt->synth_opts.callchain_sz + 1, 1169 sample->ip, pt->kernel_start); 1170 sample->callchain = ptq->chain; 1171 } 1172 1173 if (pt->synth_opts.last_branch) { 1174 intel_pt_copy_last_branch_rb(ptq); 1175 sample->branch_stack = ptq->last_branch; 1176 } 1177} 1178 1179static inline int intel_pt_deliver_synth_event(struct intel_pt *pt, 1180 struct intel_pt_queue *ptq, 1181 union perf_event *event, 1182 struct perf_sample *sample, 1183 u64 type) 1184{ 1185 int ret; 1186 1187 ret = intel_pt_deliver_synth_b_event(pt, event, sample, type); 1188 1189 if (pt->synth_opts.last_branch) 1190 intel_pt_reset_last_branch_rb(ptq); 1191 1192 return ret; 1193} 1194 1195static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq) 1196{ 1197 struct intel_pt *pt = ptq->pt; 1198 union perf_event *event = ptq->event_buf; 1199 struct perf_sample sample = { .ip = 0, }; 1200 1201 if (intel_pt_skip_event(pt)) 1202 return 0; 1203 1204 intel_pt_prep_sample(pt, ptq, event, &sample); 1205 1206 sample.id = ptq->pt->instructions_id; 1207 sample.stream_id = ptq->pt->instructions_id; 1208 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt; 1209 1210 ptq->last_insn_cnt = ptq->state->tot_insn_cnt; 1211 1212 return intel_pt_deliver_synth_event(pt, ptq, event, &sample, 1213 pt->instructions_sample_type); 1214} 1215 1216static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq) 1217{ 1218 struct intel_pt *pt = ptq->pt; 1219 union perf_event *event = ptq->event_buf; 1220 struct perf_sample sample = { .ip = 0, }; 1221 1222 if (intel_pt_skip_event(pt)) 1223 return 0; 1224 1225 intel_pt_prep_sample(pt, ptq, event, &sample); 1226 1227 sample.id = ptq->pt->transactions_id; 1228 sample.stream_id = ptq->pt->transactions_id; 1229 1230 return intel_pt_deliver_synth_event(pt, ptq, event, &sample, 1231 pt->transactions_sample_type); 1232} 1233 1234static void intel_pt_prep_p_sample(struct intel_pt *pt, 1235 struct intel_pt_queue *ptq, 1236 union perf_event *event, 1237 struct perf_sample *sample) 1238{ 1239 intel_pt_prep_sample(pt, ptq, event, sample); 1240 1241 /* 1242 * Zero IP is used to mean "trace start" but that is not the case for 1243 * power or PTWRITE events with no IP, so clear the flags. 1244 */ 1245 if (!sample->ip) 1246 sample->flags = 0; 1247} 1248 1249static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq) 1250{ 1251 struct intel_pt *pt = ptq->pt; 1252 union perf_event *event = ptq->event_buf; 1253 struct perf_sample sample = { .ip = 0, }; 1254 struct perf_synth_intel_ptwrite raw; 1255 1256 if (intel_pt_skip_event(pt)) 1257 return 0; 1258 1259 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1260 1261 sample.id = ptq->pt->ptwrites_id; 1262 sample.stream_id = ptq->pt->ptwrites_id; 1263 1264 raw.flags = 0; 1265 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP); 1266 raw.payload = cpu_to_le64(ptq->state->ptw_payload); 1267 1268 sample.raw_size = perf_synth__raw_size(raw); 1269 sample.raw_data = perf_synth__raw_data(&raw); 1270 1271 return intel_pt_deliver_synth_event(pt, ptq, event, &sample, 1272 pt->ptwrites_sample_type); 1273} 1274 1275static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq) 1276{ 1277 struct intel_pt *pt = ptq->pt; 1278 union perf_event *event = ptq->event_buf; 1279 struct perf_sample sample = { .ip = 0, }; 1280 struct perf_synth_intel_cbr raw; 1281 u32 flags; 1282 1283 if (intel_pt_skip_event(pt)) 1284 return 0; 1285 1286 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1287 1288 sample.id = ptq->pt->cbr_id; 1289 sample.stream_id = ptq->pt->cbr_id; 1290 1291 flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16); 1292 raw.flags = cpu_to_le32(flags); 1293 raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz); 1294 raw.reserved3 = 0; 1295 1296 sample.raw_size = perf_synth__raw_size(raw); 1297 sample.raw_data = perf_synth__raw_data(&raw); 1298 1299 return intel_pt_deliver_synth_event(pt, ptq, event, &sample, 1300 pt->pwr_events_sample_type); 1301} 1302 1303static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq) 1304{ 1305 struct intel_pt *pt = ptq->pt; 1306 union perf_event *event = ptq->event_buf; 1307 struct perf_sample sample = { .ip = 0, }; 1308 struct perf_synth_intel_mwait raw; 1309 1310 if (intel_pt_skip_event(pt)) 1311 return 0; 1312 1313 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1314 1315 sample.id = ptq->pt->mwait_id; 1316 sample.stream_id = ptq->pt->mwait_id; 1317 1318 raw.reserved = 0; 1319 raw.payload = cpu_to_le64(ptq->state->mwait_payload); 1320 1321 sample.raw_size = perf_synth__raw_size(raw); 1322 sample.raw_data = perf_synth__raw_data(&raw); 1323 1324 return intel_pt_deliver_synth_event(pt, ptq, event, &sample, 1325 pt->pwr_events_sample_type); 1326} 1327 1328static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq) 1329{ 1330 struct intel_pt *pt = ptq->pt; 1331 union perf_event *event = ptq->event_buf; 1332 struct perf_sample sample = { .ip = 0, }; 1333 struct perf_synth_intel_pwre raw; 1334 1335 if (intel_pt_skip_event(pt)) 1336 return 0; 1337 1338 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1339 1340 sample.id = ptq->pt->pwre_id; 1341 sample.stream_id = ptq->pt->pwre_id; 1342 1343 raw.reserved = 0; 1344 raw.payload = cpu_to_le64(ptq->state->pwre_payload); 1345 1346 sample.raw_size = perf_synth__raw_size(raw); 1347 sample.raw_data = perf_synth__raw_data(&raw); 1348 1349 return intel_pt_deliver_synth_event(pt, ptq, event, &sample, 1350 pt->pwr_events_sample_type); 1351} 1352 1353static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq) 1354{ 1355 struct intel_pt *pt = ptq->pt; 1356 union perf_event *event = ptq->event_buf; 1357 struct perf_sample sample = { .ip = 0, }; 1358 struct perf_synth_intel_exstop raw; 1359 1360 if (intel_pt_skip_event(pt)) 1361 return 0; 1362 1363 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1364 1365 sample.id = ptq->pt->exstop_id; 1366 sample.stream_id = ptq->pt->exstop_id; 1367 1368 raw.flags = 0; 1369 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP); 1370 1371 sample.raw_size = perf_synth__raw_size(raw); 1372 sample.raw_data = perf_synth__raw_data(&raw); 1373 1374 return intel_pt_deliver_synth_event(pt, ptq, event, &sample, 1375 pt->pwr_events_sample_type); 1376} 1377 1378static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq) 1379{ 1380 struct intel_pt *pt = ptq->pt; 1381 union perf_event *event = ptq->event_buf; 1382 struct perf_sample sample = { .ip = 0, }; 1383 struct perf_synth_intel_pwrx raw; 1384 1385 if (intel_pt_skip_event(pt)) 1386 return 0; 1387 1388 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1389 1390 sample.id = ptq->pt->pwrx_id; 1391 sample.stream_id = ptq->pt->pwrx_id; 1392 1393 raw.reserved = 0; 1394 raw.payload = cpu_to_le64(ptq->state->pwrx_payload); 1395 1396 sample.raw_size = perf_synth__raw_size(raw); 1397 sample.raw_data = perf_synth__raw_data(&raw); 1398 1399 return intel_pt_deliver_synth_event(pt, ptq, event, &sample, 1400 pt->pwr_events_sample_type); 1401} 1402 1403static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu, 1404 pid_t pid, pid_t tid, u64 ip) 1405{ 1406 union perf_event event; 1407 char msg[MAX_AUXTRACE_ERROR_MSG]; 1408 int err; 1409 1410 intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG); 1411 1412 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE, 1413 code, cpu, pid, tid, ip, msg); 1414 1415 err = perf_session__deliver_synth_event(pt->session, &event, NULL); 1416 if (err) 1417 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n", 1418 err); 1419 1420 return err; 1421} 1422 1423static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq) 1424{ 1425 struct auxtrace_queue *queue; 1426 pid_t tid = ptq->next_tid; 1427 int err; 1428 1429 if (tid == -1) 1430 return 0; 1431 1432 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid); 1433 1434 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid); 1435 1436 queue = &pt->queues.queue_array[ptq->queue_nr]; 1437 intel_pt_set_pid_tid_cpu(pt, queue); 1438 1439 ptq->next_tid = -1; 1440 1441 return err; 1442} 1443 1444static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip) 1445{ 1446 struct intel_pt *pt = ptq->pt; 1447 1448 return ip == pt->switch_ip && 1449 (ptq->flags & PERF_IP_FLAG_BRANCH) && 1450 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC | 1451 PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT)); 1452} 1453 1454#define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \ 1455 INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT | \ 1456 INTEL_PT_CBR_CHG) 1457 1458static int intel_pt_sample(struct intel_pt_queue *ptq) 1459{ 1460 const struct intel_pt_state *state = ptq->state; 1461 struct intel_pt *pt = ptq->pt; 1462 int err; 1463 1464 if (!ptq->have_sample) 1465 return 0; 1466 1467 ptq->have_sample = false; 1468 1469 if (pt->sample_pwr_events && (state->type & INTEL_PT_PWR_EVT)) { 1470 if (state->type & INTEL_PT_CBR_CHG) { 1471 err = intel_pt_synth_cbr_sample(ptq); 1472 if (err) 1473 return err; 1474 } 1475 if (state->type & INTEL_PT_MWAIT_OP) { 1476 err = intel_pt_synth_mwait_sample(ptq); 1477 if (err) 1478 return err; 1479 } 1480 if (state->type & INTEL_PT_PWR_ENTRY) { 1481 err = intel_pt_synth_pwre_sample(ptq); 1482 if (err) 1483 return err; 1484 } 1485 if (state->type & INTEL_PT_EX_STOP) { 1486 err = intel_pt_synth_exstop_sample(ptq); 1487 if (err) 1488 return err; 1489 } 1490 if (state->type & INTEL_PT_PWR_EXIT) { 1491 err = intel_pt_synth_pwrx_sample(ptq); 1492 if (err) 1493 return err; 1494 } 1495 } 1496 1497 if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) { 1498 err = intel_pt_synth_instruction_sample(ptq); 1499 if (err) 1500 return err; 1501 } 1502 1503 if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) { 1504 err = intel_pt_synth_transaction_sample(ptq); 1505 if (err) 1506 return err; 1507 } 1508 1509 if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) { 1510 err = intel_pt_synth_ptwrite_sample(ptq); 1511 if (err) 1512 return err; 1513 } 1514 1515 if (!(state->type & INTEL_PT_BRANCH)) 1516 return 0; 1517 1518 if (pt->synth_opts.callchain || pt->synth_opts.thread_stack) 1519 thread_stack__event(ptq->thread, ptq->flags, state->from_ip, 1520 state->to_ip, ptq->insn_len, 1521 state->trace_nr); 1522 else 1523 thread_stack__set_trace_nr(ptq->thread, state->trace_nr); 1524 1525 if (pt->sample_branches) { 1526 err = intel_pt_synth_branch_sample(ptq); 1527 if (err) 1528 return err; 1529 } 1530 1531 if (pt->synth_opts.last_branch) 1532 intel_pt_update_last_branch_rb(ptq); 1533 1534 if (!ptq->sync_switch) 1535 return 0; 1536 1537 if (intel_pt_is_switch_ip(ptq, state->to_ip)) { 1538 switch (ptq->switch_state) { 1539 case INTEL_PT_SS_NOT_TRACING: 1540 case INTEL_PT_SS_UNKNOWN: 1541 case INTEL_PT_SS_EXPECTING_SWITCH_IP: 1542 err = intel_pt_next_tid(pt, ptq); 1543 if (err) 1544 return err; 1545 ptq->switch_state = INTEL_PT_SS_TRACING; 1546 break; 1547 default: 1548 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT; 1549 return 1; 1550 } 1551 } else if (!state->to_ip) { 1552 ptq->switch_state = INTEL_PT_SS_NOT_TRACING; 1553 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) { 1554 ptq->switch_state = INTEL_PT_SS_UNKNOWN; 1555 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN && 1556 state->to_ip == pt->ptss_ip && 1557 (ptq->flags & PERF_IP_FLAG_CALL)) { 1558 ptq->switch_state = INTEL_PT_SS_TRACING; 1559 } 1560 1561 return 0; 1562} 1563 1564static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip) 1565{ 1566 struct machine *machine = pt->machine; 1567 struct map *map; 1568 struct symbol *sym, *start; 1569 u64 ip, switch_ip = 0; 1570 const char *ptss; 1571 1572 if (ptss_ip) 1573 *ptss_ip = 0; 1574 1575 map = machine__kernel_map(machine); 1576 if (!map) 1577 return 0; 1578 1579 if (map__load(map)) 1580 return 0; 1581 1582 start = dso__first_symbol(map->dso); 1583 1584 for (sym = start; sym; sym = dso__next_symbol(sym)) { 1585 if (sym->binding == STB_GLOBAL && 1586 !strcmp(sym->name, "__switch_to")) { 1587 ip = map->unmap_ip(map, sym->start); 1588 if (ip >= map->start && ip < map->end) { 1589 switch_ip = ip; 1590 break; 1591 } 1592 } 1593 } 1594 1595 if (!switch_ip || !ptss_ip) 1596 return 0; 1597 1598 if (pt->have_sched_switch == 1) 1599 ptss = "perf_trace_sched_switch"; 1600 else 1601 ptss = "__perf_event_task_sched_out"; 1602 1603 for (sym = start; sym; sym = dso__next_symbol(sym)) { 1604 if (!strcmp(sym->name, ptss)) { 1605 ip = map->unmap_ip(map, sym->start); 1606 if (ip >= map->start && ip < map->end) { 1607 *ptss_ip = ip; 1608 break; 1609 } 1610 } 1611 } 1612 1613 return switch_ip; 1614} 1615 1616static void intel_pt_enable_sync_switch(struct intel_pt *pt) 1617{ 1618 unsigned int i; 1619 1620 pt->sync_switch = true; 1621 1622 for (i = 0; i < pt->queues.nr_queues; i++) { 1623 struct auxtrace_queue *queue = &pt->queues.queue_array[i]; 1624 struct intel_pt_queue *ptq = queue->priv; 1625 1626 if (ptq) 1627 ptq->sync_switch = true; 1628 } 1629} 1630 1631static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp) 1632{ 1633 const struct intel_pt_state *state = ptq->state; 1634 struct intel_pt *pt = ptq->pt; 1635 int err; 1636 1637 if (!pt->kernel_start) { 1638 pt->kernel_start = machine__kernel_start(pt->machine); 1639 if (pt->per_cpu_mmaps && 1640 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) && 1641 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) && 1642 !pt->sampling_mode) { 1643 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip); 1644 if (pt->switch_ip) { 1645 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n", 1646 pt->switch_ip, pt->ptss_ip); 1647 intel_pt_enable_sync_switch(pt); 1648 } 1649 } 1650 } 1651 1652 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n", 1653 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid); 1654 while (1) { 1655 err = intel_pt_sample(ptq); 1656 if (err) 1657 return err; 1658 1659 state = intel_pt_decode(ptq->decoder); 1660 if (state->err) { 1661 if (state->err == INTEL_PT_ERR_NODATA) 1662 return 1; 1663 if (ptq->sync_switch && 1664 state->from_ip >= pt->kernel_start) { 1665 ptq->sync_switch = false; 1666 intel_pt_next_tid(pt, ptq); 1667 } 1668 if (pt->synth_opts.errors) { 1669 err = intel_pt_synth_error(pt, state->err, 1670 ptq->cpu, ptq->pid, 1671 ptq->tid, 1672 state->from_ip); 1673 if (err) 1674 return err; 1675 } 1676 continue; 1677 } 1678 1679 ptq->state = state; 1680 ptq->have_sample = true; 1681 intel_pt_sample_flags(ptq); 1682 1683 /* Use estimated TSC upon return to user space */ 1684 if (pt->est_tsc && 1685 (state->from_ip >= pt->kernel_start || !state->from_ip) && 1686 state->to_ip && state->to_ip < pt->kernel_start) { 1687 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n", 1688 state->timestamp, state->est_timestamp); 1689 ptq->timestamp = state->est_timestamp; 1690 /* Use estimated TSC in unknown switch state */ 1691 } else if (ptq->sync_switch && 1692 ptq->switch_state == INTEL_PT_SS_UNKNOWN && 1693 intel_pt_is_switch_ip(ptq, state->to_ip) && 1694 ptq->next_tid == -1) { 1695 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n", 1696 state->timestamp, state->est_timestamp); 1697 ptq->timestamp = state->est_timestamp; 1698 } else if (state->timestamp > ptq->timestamp) { 1699 ptq->timestamp = state->timestamp; 1700 } 1701 1702 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) { 1703 *timestamp = ptq->timestamp; 1704 return 0; 1705 } 1706 } 1707 return 0; 1708} 1709 1710static inline int intel_pt_update_queues(struct intel_pt *pt) 1711{ 1712 if (pt->queues.new_data) { 1713 pt->queues.new_data = false; 1714 return intel_pt_setup_queues(pt); 1715 } 1716 return 0; 1717} 1718 1719static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp) 1720{ 1721 unsigned int queue_nr; 1722 u64 ts; 1723 int ret; 1724 1725 while (1) { 1726 struct auxtrace_queue *queue; 1727 struct intel_pt_queue *ptq; 1728 1729 if (!pt->heap.heap_cnt) 1730 return 0; 1731 1732 if (pt->heap.heap_array[0].ordinal >= timestamp) 1733 return 0; 1734 1735 queue_nr = pt->heap.heap_array[0].queue_nr; 1736 queue = &pt->queues.queue_array[queue_nr]; 1737 ptq = queue->priv; 1738 1739 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n", 1740 queue_nr, pt->heap.heap_array[0].ordinal, 1741 timestamp); 1742 1743 auxtrace_heap__pop(&pt->heap); 1744 1745 if (pt->heap.heap_cnt) { 1746 ts = pt->heap.heap_array[0].ordinal + 1; 1747 if (ts > timestamp) 1748 ts = timestamp; 1749 } else { 1750 ts = timestamp; 1751 } 1752 1753 intel_pt_set_pid_tid_cpu(pt, queue); 1754 1755 ret = intel_pt_run_decoder(ptq, &ts); 1756 1757 if (ret < 0) { 1758 auxtrace_heap__add(&pt->heap, queue_nr, ts); 1759 return ret; 1760 } 1761 1762 if (!ret) { 1763 ret = auxtrace_heap__add(&pt->heap, queue_nr, ts); 1764 if (ret < 0) 1765 return ret; 1766 } else { 1767 ptq->on_heap = false; 1768 } 1769 } 1770 1771 return 0; 1772} 1773 1774static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid, 1775 u64 time_) 1776{ 1777 struct auxtrace_queues *queues = &pt->queues; 1778 unsigned int i; 1779 u64 ts = 0; 1780 1781 for (i = 0; i < queues->nr_queues; i++) { 1782 struct auxtrace_queue *queue = &pt->queues.queue_array[i]; 1783 struct intel_pt_queue *ptq = queue->priv; 1784 1785 if (ptq && (tid == -1 || ptq->tid == tid)) { 1786 ptq->time = time_; 1787 intel_pt_set_pid_tid_cpu(pt, queue); 1788 intel_pt_run_decoder(ptq, &ts); 1789 } 1790 } 1791 return 0; 1792} 1793 1794static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample) 1795{ 1796 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu, 1797 sample->pid, sample->tid, 0); 1798} 1799 1800static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu) 1801{ 1802 unsigned i, j; 1803 1804 if (cpu < 0 || !pt->queues.nr_queues) 1805 return NULL; 1806 1807 if ((unsigned)cpu >= pt->queues.nr_queues) 1808 i = pt->queues.nr_queues - 1; 1809 else 1810 i = cpu; 1811 1812 if (pt->queues.queue_array[i].cpu == cpu) 1813 return pt->queues.queue_array[i].priv; 1814 1815 for (j = 0; i > 0; j++) { 1816 if (pt->queues.queue_array[--i].cpu == cpu) 1817 return pt->queues.queue_array[i].priv; 1818 } 1819 1820 for (; j < pt->queues.nr_queues; j++) { 1821 if (pt->queues.queue_array[j].cpu == cpu) 1822 return pt->queues.queue_array[j].priv; 1823 } 1824 1825 return NULL; 1826} 1827 1828static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid, 1829 u64 timestamp) 1830{ 1831 struct intel_pt_queue *ptq; 1832 int err; 1833 1834 if (!pt->sync_switch) 1835 return 1; 1836 1837 ptq = intel_pt_cpu_to_ptq(pt, cpu); 1838 if (!ptq || !ptq->sync_switch) 1839 return 1; 1840 1841 switch (ptq->switch_state) { 1842 case INTEL_PT_SS_NOT_TRACING: 1843 ptq->next_tid = -1; 1844 break; 1845 case INTEL_PT_SS_UNKNOWN: 1846 case INTEL_PT_SS_TRACING: 1847 ptq->next_tid = tid; 1848 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP; 1849 return 0; 1850 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT: 1851 if (!ptq->on_heap) { 1852 ptq->timestamp = perf_time_to_tsc(timestamp, 1853 &pt->tc); 1854 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr, 1855 ptq->timestamp); 1856 if (err) 1857 return err; 1858 ptq->on_heap = true; 1859 } 1860 ptq->switch_state = INTEL_PT_SS_TRACING; 1861 break; 1862 case INTEL_PT_SS_EXPECTING_SWITCH_IP: 1863 ptq->next_tid = tid; 1864 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu); 1865 break; 1866 default: 1867 break; 1868 } 1869 1870 return 1; 1871} 1872 1873static int intel_pt_process_switch(struct intel_pt *pt, 1874 struct perf_sample *sample) 1875{ 1876 struct perf_evsel *evsel; 1877 pid_t tid; 1878 int cpu, ret; 1879 1880 evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id); 1881 if (evsel != pt->switch_evsel) 1882 return 0; 1883 1884 tid = perf_evsel__intval(evsel, sample, "next_pid"); 1885 cpu = sample->cpu; 1886 1887 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n", 1888 cpu, tid, sample->time, perf_time_to_tsc(sample->time, 1889 &pt->tc)); 1890 1891 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time); 1892 if (ret <= 0) 1893 return ret; 1894 1895 return machine__set_current_tid(pt->machine, cpu, -1, tid); 1896} 1897 1898static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event, 1899 struct perf_sample *sample) 1900{ 1901 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; 1902 pid_t pid, tid; 1903 int cpu, ret; 1904 1905 cpu = sample->cpu; 1906 1907 if (pt->have_sched_switch == 3) { 1908 if (!out) 1909 return 0; 1910 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) { 1911 pr_err("Expecting CPU-wide context switch event\n"); 1912 return -EINVAL; 1913 } 1914 pid = event->context_switch.next_prev_pid; 1915 tid = event->context_switch.next_prev_tid; 1916 } else { 1917 if (out) 1918 return 0; 1919 pid = sample->pid; 1920 tid = sample->tid; 1921 } 1922 1923 if (tid == -1) { 1924 pr_err("context_switch event has no tid\n"); 1925 return -EINVAL; 1926 } 1927 1928 intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n", 1929 cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time, 1930 &pt->tc)); 1931 1932 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time); 1933 if (ret <= 0) 1934 return ret; 1935 1936 return machine__set_current_tid(pt->machine, cpu, pid, tid); 1937} 1938 1939static int intel_pt_process_itrace_start(struct intel_pt *pt, 1940 union perf_event *event, 1941 struct perf_sample *sample) 1942{ 1943 if (!pt->per_cpu_mmaps) 1944 return 0; 1945 1946 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n", 1947 sample->cpu, event->itrace_start.pid, 1948 event->itrace_start.tid, sample->time, 1949 perf_time_to_tsc(sample->time, &pt->tc)); 1950 1951 return machine__set_current_tid(pt->machine, sample->cpu, 1952 event->itrace_start.pid, 1953 event->itrace_start.tid); 1954} 1955 1956static int intel_pt_process_event(struct perf_session *session, 1957 union perf_event *event, 1958 struct perf_sample *sample, 1959 struct perf_tool *tool) 1960{ 1961 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 1962 auxtrace); 1963 u64 timestamp; 1964 int err = 0; 1965 1966 if (dump_trace) 1967 return 0; 1968 1969 if (!tool->ordered_events) { 1970 pr_err("Intel Processor Trace requires ordered events\n"); 1971 return -EINVAL; 1972 } 1973 1974 if (sample->time && sample->time != (u64)-1) 1975 timestamp = perf_time_to_tsc(sample->time, &pt->tc); 1976 else 1977 timestamp = 0; 1978 1979 if (timestamp || pt->timeless_decoding) { 1980 err = intel_pt_update_queues(pt); 1981 if (err) 1982 return err; 1983 } 1984 1985 if (pt->timeless_decoding) { 1986 if (event->header.type == PERF_RECORD_EXIT) { 1987 err = intel_pt_process_timeless_queues(pt, 1988 event->fork.tid, 1989 sample->time); 1990 } 1991 } else if (timestamp) { 1992 err = intel_pt_process_queues(pt, timestamp); 1993 } 1994 if (err) 1995 return err; 1996 1997 if (event->header.type == PERF_RECORD_AUX && 1998 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) && 1999 pt->synth_opts.errors) { 2000 err = intel_pt_lost(pt, sample); 2001 if (err) 2002 return err; 2003 } 2004 2005 if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE) 2006 err = intel_pt_process_switch(pt, sample); 2007 else if (event->header.type == PERF_RECORD_ITRACE_START) 2008 err = intel_pt_process_itrace_start(pt, event, sample); 2009 else if (event->header.type == PERF_RECORD_SWITCH || 2010 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) 2011 err = intel_pt_context_switch(pt, event, sample); 2012 2013 intel_pt_log("event %s (%u): cpu %d time %"PRIu64" tsc %#"PRIx64"\n", 2014 perf_event__name(event->header.type), event->header.type, 2015 sample->cpu, sample->time, timestamp); 2016 2017 return err; 2018} 2019 2020static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool) 2021{ 2022 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 2023 auxtrace); 2024 int ret; 2025 2026 if (dump_trace) 2027 return 0; 2028 2029 if (!tool->ordered_events) 2030 return -EINVAL; 2031 2032 ret = intel_pt_update_queues(pt); 2033 if (ret < 0) 2034 return ret; 2035 2036 if (pt->timeless_decoding) 2037 return intel_pt_process_timeless_queues(pt, -1, 2038 MAX_TIMESTAMP - 1); 2039 2040 return intel_pt_process_queues(pt, MAX_TIMESTAMP); 2041} 2042 2043static void intel_pt_free_events(struct perf_session *session) 2044{ 2045 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 2046 auxtrace); 2047 struct auxtrace_queues *queues = &pt->queues; 2048 unsigned int i; 2049 2050 for (i = 0; i < queues->nr_queues; i++) { 2051 intel_pt_free_queue(queues->queue_array[i].priv); 2052 queues->queue_array[i].priv = NULL; 2053 } 2054 intel_pt_log_disable(); 2055 auxtrace_queues__free(queues); 2056} 2057 2058static void intel_pt_free(struct perf_session *session) 2059{ 2060 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 2061 auxtrace); 2062 2063 auxtrace_heap__free(&pt->heap); 2064 intel_pt_free_events(session); 2065 session->auxtrace = NULL; 2066 thread__put(pt->unknown_thread); 2067 addr_filters__exit(&pt->filts); 2068 zfree(&pt->filter); 2069 free(pt); 2070} 2071 2072static int intel_pt_process_auxtrace_event(struct perf_session *session, 2073 union perf_event *event, 2074 struct perf_tool *tool __maybe_unused) 2075{ 2076 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 2077 auxtrace); 2078 2079 if (!pt->data_queued) { 2080 struct auxtrace_buffer *buffer; 2081 off_t data_offset; 2082 int fd = perf_data__fd(session->data); 2083 int err; 2084 2085 if (perf_data__is_pipe(session->data)) { 2086 data_offset = 0; 2087 } else { 2088 data_offset = lseek(fd, 0, SEEK_CUR); 2089 if (data_offset == -1) 2090 return -errno; 2091 } 2092 2093 err = auxtrace_queues__add_event(&pt->queues, session, event, 2094 data_offset, &buffer); 2095 if (err) 2096 return err; 2097 2098 /* Dump here now we have copied a piped trace out of the pipe */ 2099 if (dump_trace) { 2100 if (auxtrace_buffer__get_data(buffer, fd)) { 2101 intel_pt_dump_event(pt, buffer->data, 2102 buffer->size); 2103 auxtrace_buffer__put_data(buffer); 2104 } 2105 } 2106 } 2107 2108 return 0; 2109} 2110 2111struct intel_pt_synth { 2112 struct perf_tool dummy_tool; 2113 struct perf_session *session; 2114}; 2115 2116static int intel_pt_event_synth(struct perf_tool *tool, 2117 union perf_event *event, 2118 struct perf_sample *sample __maybe_unused, 2119 struct machine *machine __maybe_unused) 2120{ 2121 struct intel_pt_synth *intel_pt_synth = 2122 container_of(tool, struct intel_pt_synth, dummy_tool); 2123 2124 return perf_session__deliver_synth_event(intel_pt_synth->session, event, 2125 NULL); 2126} 2127 2128static int intel_pt_synth_event(struct perf_session *session, const char *name, 2129 struct perf_event_attr *attr, u64 id) 2130{ 2131 struct intel_pt_synth intel_pt_synth; 2132 int err; 2133 2134 pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n", 2135 name, id, (u64)attr->sample_type); 2136 2137 memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth)); 2138 intel_pt_synth.session = session; 2139 2140 err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1, 2141 &id, intel_pt_event_synth); 2142 if (err) 2143 pr_err("%s: failed to synthesize '%s' event type\n", 2144 __func__, name); 2145 2146 return err; 2147} 2148 2149static void intel_pt_set_event_name(struct perf_evlist *evlist, u64 id, 2150 const char *name) 2151{ 2152 struct perf_evsel *evsel; 2153 2154 evlist__for_each_entry(evlist, evsel) { 2155 if (evsel->id && evsel->id[0] == id) { 2156 if (evsel->name) 2157 zfree(&evsel->name); 2158 evsel->name = strdup(name); 2159 break; 2160 } 2161 } 2162} 2163 2164static struct perf_evsel *intel_pt_evsel(struct intel_pt *pt, 2165 struct perf_evlist *evlist) 2166{ 2167 struct perf_evsel *evsel; 2168 2169 evlist__for_each_entry(evlist, evsel) { 2170 if (evsel->attr.type == pt->pmu_type && evsel->ids) 2171 return evsel; 2172 } 2173 2174 return NULL; 2175} 2176 2177static int intel_pt_synth_events(struct intel_pt *pt, 2178 struct perf_session *session) 2179{ 2180 struct perf_evlist *evlist = session->evlist; 2181 struct perf_evsel *evsel = intel_pt_evsel(pt, evlist); 2182 struct perf_event_attr attr; 2183 u64 id; 2184 int err; 2185 2186 if (!evsel) { 2187 pr_debug("There are no selected events with Intel Processor Trace data\n"); 2188 return 0; 2189 } 2190 2191 memset(&attr, 0, sizeof(struct perf_event_attr)); 2192 attr.size = sizeof(struct perf_event_attr); 2193 attr.type = PERF_TYPE_HARDWARE; 2194 attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK; 2195 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID | 2196 PERF_SAMPLE_PERIOD; 2197 if (pt->timeless_decoding) 2198 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME; 2199 else 2200 attr.sample_type |= PERF_SAMPLE_TIME; 2201 if (!pt->per_cpu_mmaps) 2202 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU; 2203 attr.exclude_user = evsel->attr.exclude_user; 2204 attr.exclude_kernel = evsel->attr.exclude_kernel; 2205 attr.exclude_hv = evsel->attr.exclude_hv; 2206 attr.exclude_host = evsel->attr.exclude_host; 2207 attr.exclude_guest = evsel->attr.exclude_guest; 2208 attr.sample_id_all = evsel->attr.sample_id_all; 2209 attr.read_format = evsel->attr.read_format; 2210 2211 id = evsel->id[0] + 1000000000; 2212 if (!id) 2213 id = 1; 2214 2215 if (pt->synth_opts.branches) { 2216 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS; 2217 attr.sample_period = 1; 2218 attr.sample_type |= PERF_SAMPLE_ADDR; 2219 err = intel_pt_synth_event(session, "branches", &attr, id); 2220 if (err) 2221 return err; 2222 pt->sample_branches = true; 2223 pt->branches_sample_type = attr.sample_type; 2224 pt->branches_id = id; 2225 id += 1; 2226 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR; 2227 } 2228 2229 if (pt->synth_opts.callchain) 2230 attr.sample_type |= PERF_SAMPLE_CALLCHAIN; 2231 if (pt->synth_opts.last_branch) 2232 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK; 2233 2234 if (pt->synth_opts.instructions) { 2235 attr.config = PERF_COUNT_HW_INSTRUCTIONS; 2236 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS) 2237 attr.sample_period = 2238 intel_pt_ns_to_ticks(pt, pt->synth_opts.period); 2239 else 2240 attr.sample_period = pt->synth_opts.period; 2241 err = intel_pt_synth_event(session, "instructions", &attr, id); 2242 if (err) 2243 return err; 2244 pt->sample_instructions = true; 2245 pt->instructions_sample_type = attr.sample_type; 2246 pt->instructions_id = id; 2247 id += 1; 2248 } 2249 2250 attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD; 2251 attr.sample_period = 1; 2252 2253 if (pt->synth_opts.transactions) { 2254 attr.config = PERF_COUNT_HW_INSTRUCTIONS; 2255 err = intel_pt_synth_event(session, "transactions", &attr, id); 2256 if (err) 2257 return err; 2258 pt->sample_transactions = true; 2259 pt->transactions_sample_type = attr.sample_type; 2260 pt->transactions_id = id; 2261 intel_pt_set_event_name(evlist, id, "transactions"); 2262 id += 1; 2263 } 2264 2265 attr.type = PERF_TYPE_SYNTH; 2266 attr.sample_type |= PERF_SAMPLE_RAW; 2267 2268 if (pt->synth_opts.ptwrites) { 2269 attr.config = PERF_SYNTH_INTEL_PTWRITE; 2270 err = intel_pt_synth_event(session, "ptwrite", &attr, id); 2271 if (err) 2272 return err; 2273 pt->sample_ptwrites = true; 2274 pt->ptwrites_sample_type = attr.sample_type; 2275 pt->ptwrites_id = id; 2276 intel_pt_set_event_name(evlist, id, "ptwrite"); 2277 id += 1; 2278 } 2279 2280 if (pt->synth_opts.pwr_events) { 2281 pt->sample_pwr_events = true; 2282 pt->pwr_events_sample_type = attr.sample_type; 2283 2284 attr.config = PERF_SYNTH_INTEL_CBR; 2285 err = intel_pt_synth_event(session, "cbr", &attr, id); 2286 if (err) 2287 return err; 2288 pt->cbr_id = id; 2289 intel_pt_set_event_name(evlist, id, "cbr"); 2290 id += 1; 2291 } 2292 2293 if (pt->synth_opts.pwr_events && (evsel->attr.config & 0x10)) { 2294 attr.config = PERF_SYNTH_INTEL_MWAIT; 2295 err = intel_pt_synth_event(session, "mwait", &attr, id); 2296 if (err) 2297 return err; 2298 pt->mwait_id = id; 2299 intel_pt_set_event_name(evlist, id, "mwait"); 2300 id += 1; 2301 2302 attr.config = PERF_SYNTH_INTEL_PWRE; 2303 err = intel_pt_synth_event(session, "pwre", &attr, id); 2304 if (err) 2305 return err; 2306 pt->pwre_id = id; 2307 intel_pt_set_event_name(evlist, id, "pwre"); 2308 id += 1; 2309 2310 attr.config = PERF_SYNTH_INTEL_EXSTOP; 2311 err = intel_pt_synth_event(session, "exstop", &attr, id); 2312 if (err) 2313 return err; 2314 pt->exstop_id = id; 2315 intel_pt_set_event_name(evlist, id, "exstop"); 2316 id += 1; 2317 2318 attr.config = PERF_SYNTH_INTEL_PWRX; 2319 err = intel_pt_synth_event(session, "pwrx", &attr, id); 2320 if (err) 2321 return err; 2322 pt->pwrx_id = id; 2323 intel_pt_set_event_name(evlist, id, "pwrx"); 2324 id += 1; 2325 } 2326 2327 return 0; 2328} 2329 2330static struct perf_evsel *intel_pt_find_sched_switch(struct perf_evlist *evlist) 2331{ 2332 struct perf_evsel *evsel; 2333 2334 evlist__for_each_entry_reverse(evlist, evsel) { 2335 const char *name = perf_evsel__name(evsel); 2336 2337 if (!strcmp(name, "sched:sched_switch")) 2338 return evsel; 2339 } 2340 2341 return NULL; 2342} 2343 2344static bool intel_pt_find_switch(struct perf_evlist *evlist) 2345{ 2346 struct perf_evsel *evsel; 2347 2348 evlist__for_each_entry(evlist, evsel) { 2349 if (evsel->attr.context_switch) 2350 return true; 2351 } 2352 2353 return false; 2354} 2355 2356static int intel_pt_perf_config(const char *var, const char *value, void *data) 2357{ 2358 struct intel_pt *pt = data; 2359 2360 if (!strcmp(var, "intel-pt.mispred-all")) 2361 pt->mispred_all = perf_config_bool(var, value); 2362 2363 return 0; 2364} 2365 2366static const char * const intel_pt_info_fmts[] = { 2367 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n", 2368 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n", 2369 [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n", 2370 [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n", 2371 [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n", 2372 [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n", 2373 [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n", 2374 [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n", 2375 [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n", 2376 [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n", 2377 [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n", 2378 [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n", 2379 [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n", 2380 [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n", 2381 [INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n", 2382 [INTEL_PT_FILTER_STR_LEN] = " Filter string len. %"PRIu64"\n", 2383}; 2384 2385static void intel_pt_print_info(u64 *arr, int start, int finish) 2386{ 2387 int i; 2388 2389 if (!dump_trace) 2390 return; 2391 2392 for (i = start; i <= finish; i++) 2393 fprintf(stdout, intel_pt_info_fmts[i], arr[i]); 2394} 2395 2396static void intel_pt_print_info_str(const char *name, const char *str) 2397{ 2398 if (!dump_trace) 2399 return; 2400 2401 fprintf(stdout, " %-20s%s\n", name, str ? str : ""); 2402} 2403 2404static bool intel_pt_has(struct auxtrace_info_event *auxtrace_info, int pos) 2405{ 2406 return auxtrace_info->header.size >= 2407 sizeof(struct auxtrace_info_event) + (sizeof(u64) * (pos + 1)); 2408} 2409 2410int intel_pt_process_auxtrace_info(union perf_event *event, 2411 struct perf_session *session) 2412{ 2413 struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info; 2414 size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS; 2415 struct intel_pt *pt; 2416 void *info_end; 2417 u64 *info; 2418 int err; 2419 2420 if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) + 2421 min_sz) 2422 return -EINVAL; 2423 2424 pt = zalloc(sizeof(struct intel_pt)); 2425 if (!pt) 2426 return -ENOMEM; 2427 2428 addr_filters__init(&pt->filts); 2429 2430 err = perf_config(intel_pt_perf_config, pt); 2431 if (err) 2432 goto err_free; 2433 2434 err = auxtrace_queues__init(&pt->queues); 2435 if (err) 2436 goto err_free; 2437 2438 intel_pt_log_set_name(INTEL_PT_PMU_NAME); 2439 2440 pt->session = session; 2441 pt->machine = &session->machines.host; /* No kvm support */ 2442 pt->auxtrace_type = auxtrace_info->type; 2443 pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE]; 2444 pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT]; 2445 pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT]; 2446 pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO]; 2447 pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO]; 2448 pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT]; 2449 pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT]; 2450 pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH]; 2451 pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE]; 2452 pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS]; 2453 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE, 2454 INTEL_PT_PER_CPU_MMAPS); 2455 2456 if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) { 2457 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT]; 2458 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS]; 2459 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N]; 2460 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D]; 2461 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT]; 2462 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT, 2463 INTEL_PT_CYC_BIT); 2464 } 2465 2466 if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) { 2467 pt->max_non_turbo_ratio = 2468 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO]; 2469 intel_pt_print_info(&auxtrace_info->priv[0], 2470 INTEL_PT_MAX_NONTURBO_RATIO, 2471 INTEL_PT_MAX_NONTURBO_RATIO); 2472 } 2473 2474 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1; 2475 info_end = (void *)info + auxtrace_info->header.size; 2476 2477 if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) { 2478 size_t len; 2479 2480 len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN]; 2481 intel_pt_print_info(&auxtrace_info->priv[0], 2482 INTEL_PT_FILTER_STR_LEN, 2483 INTEL_PT_FILTER_STR_LEN); 2484 if (len) { 2485 const char *filter = (const char *)info; 2486 2487 len = roundup(len + 1, 8); 2488 info += len >> 3; 2489 if ((void *)info > info_end) { 2490 pr_err("%s: bad filter string length\n", __func__); 2491 err = -EINVAL; 2492 goto err_free_queues; 2493 } 2494 pt->filter = memdup(filter, len); 2495 if (!pt->filter) { 2496 err = -ENOMEM; 2497 goto err_free_queues; 2498 } 2499 if (session->header.needs_swap) 2500 mem_bswap_64(pt->filter, len); 2501 if (pt->filter[len - 1]) { 2502 pr_err("%s: filter string not null terminated\n", __func__); 2503 err = -EINVAL; 2504 goto err_free_queues; 2505 } 2506 err = addr_filters__parse_bare_filter(&pt->filts, 2507 filter); 2508 if (err) 2509 goto err_free_queues; 2510 } 2511 intel_pt_print_info_str("Filter string", pt->filter); 2512 } 2513 2514 pt->timeless_decoding = intel_pt_timeless_decoding(pt); 2515 pt->have_tsc = intel_pt_have_tsc(pt); 2516 pt->sampling_mode = false; 2517 pt->est_tsc = !pt->timeless_decoding; 2518 2519 pt->unknown_thread = thread__new(999999999, 999999999); 2520 if (!pt->unknown_thread) { 2521 err = -ENOMEM; 2522 goto err_free_queues; 2523 } 2524 2525 /* 2526 * Since this thread will not be kept in any rbtree not in a 2527 * list, initialize its list node so that at thread__put() the 2528 * current thread lifetime assuption is kept and we don't segfault 2529 * at list_del_init(). 2530 */ 2531 INIT_LIST_HEAD(&pt->unknown_thread->node); 2532 2533 err = thread__set_comm(pt->unknown_thread, "unknown", 0); 2534 if (err) 2535 goto err_delete_thread; 2536 if (thread__init_map_groups(pt->unknown_thread, pt->machine)) { 2537 err = -ENOMEM; 2538 goto err_delete_thread; 2539 } 2540 2541 pt->auxtrace.process_event = intel_pt_process_event; 2542 pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event; 2543 pt->auxtrace.flush_events = intel_pt_flush; 2544 pt->auxtrace.free_events = intel_pt_free_events; 2545 pt->auxtrace.free = intel_pt_free; 2546 session->auxtrace = &pt->auxtrace; 2547 2548 if (dump_trace) 2549 return 0; 2550 2551 if (pt->have_sched_switch == 1) { 2552 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist); 2553 if (!pt->switch_evsel) { 2554 pr_err("%s: missing sched_switch event\n", __func__); 2555 err = -EINVAL; 2556 goto err_delete_thread; 2557 } 2558 } else if (pt->have_sched_switch == 2 && 2559 !intel_pt_find_switch(session->evlist)) { 2560 pr_err("%s: missing context_switch attribute flag\n", __func__); 2561 err = -EINVAL; 2562 goto err_delete_thread; 2563 } 2564 2565 if (session->itrace_synth_opts && session->itrace_synth_opts->set) { 2566 pt->synth_opts = *session->itrace_synth_opts; 2567 } else { 2568 itrace_synth_opts__set_default(&pt->synth_opts, 2569 session->itrace_synth_opts->default_no_sample); 2570 if (use_browser != -1) { 2571 pt->synth_opts.branches = false; 2572 pt->synth_opts.callchain = true; 2573 } 2574 if (session->itrace_synth_opts) 2575 pt->synth_opts.thread_stack = 2576 session->itrace_synth_opts->thread_stack; 2577 } 2578 2579 if (pt->synth_opts.log) 2580 intel_pt_log_enable(); 2581 2582 /* Maximum non-turbo ratio is TSC freq / 100 MHz */ 2583 if (pt->tc.time_mult) { 2584 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000); 2585 2586 if (!pt->max_non_turbo_ratio) 2587 pt->max_non_turbo_ratio = 2588 (tsc_freq + 50000000) / 100000000; 2589 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq); 2590 intel_pt_log("Maximum non-turbo ratio %u\n", 2591 pt->max_non_turbo_ratio); 2592 pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000; 2593 } 2594 2595 if (pt->synth_opts.calls) 2596 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC | 2597 PERF_IP_FLAG_TRACE_END; 2598 if (pt->synth_opts.returns) 2599 pt->branches_filter |= PERF_IP_FLAG_RETURN | 2600 PERF_IP_FLAG_TRACE_BEGIN; 2601 2602 if (pt->synth_opts.callchain && !symbol_conf.use_callchain) { 2603 symbol_conf.use_callchain = true; 2604 if (callchain_register_param(&callchain_param) < 0) { 2605 symbol_conf.use_callchain = false; 2606 pt->synth_opts.callchain = false; 2607 } 2608 } 2609 2610 err = intel_pt_synth_events(pt, session); 2611 if (err) 2612 goto err_delete_thread; 2613 2614 err = auxtrace_queues__process_index(&pt->queues, session); 2615 if (err) 2616 goto err_delete_thread; 2617 2618 if (pt->queues.populated) 2619 pt->data_queued = true; 2620 2621 if (pt->timeless_decoding) 2622 pr_debug2("Intel PT decoding without timestamps\n"); 2623 2624 return 0; 2625 2626err_delete_thread: 2627 thread__zput(pt->unknown_thread); 2628err_free_queues: 2629 intel_pt_log_disable(); 2630 auxtrace_queues__free(&pt->queues); 2631 session->auxtrace = NULL; 2632err_free: 2633 addr_filters__exit(&pt->filts); 2634 zfree(&pt->filter); 2635 free(pt); 2636 return err; 2637}