at v4.10-rc4 2852 lines 68 kB view raw
1#include <sys/mman.h> 2#include "sort.h" 3#include "hist.h" 4#include "comm.h" 5#include "symbol.h" 6#include "evsel.h" 7#include "evlist.h" 8#include <traceevent/event-parse.h> 9#include "mem-events.h" 10 11regex_t parent_regex; 12const char default_parent_pattern[] = "^sys_|^do_page_fault"; 13const char *parent_pattern = default_parent_pattern; 14const char *default_sort_order = "comm,dso,symbol"; 15const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 16const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked"; 17const char default_top_sort_order[] = "dso,symbol"; 18const char default_diff_sort_order[] = "dso,symbol"; 19const char default_tracepoint_sort_order[] = "trace"; 20const char *sort_order; 21const char *field_order; 22regex_t ignore_callees_regex; 23int have_ignore_callees = 0; 24enum sort_mode sort__mode = SORT_MODE__NORMAL; 25 26/* 27 * Replaces all occurrences of a char used with the: 28 * 29 * -t, --field-separator 30 * 31 * option, that uses a special separator character and don't pad with spaces, 32 * replacing all occurances of this separator in symbol names (and other 33 * output) with a '.' character, that thus it's the only non valid separator. 34*/ 35static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 36{ 37 int n; 38 va_list ap; 39 40 va_start(ap, fmt); 41 n = vsnprintf(bf, size, fmt, ap); 42 if (symbol_conf.field_sep && n > 0) { 43 char *sep = bf; 44 45 while (1) { 46 sep = strchr(sep, *symbol_conf.field_sep); 47 if (sep == NULL) 48 break; 49 *sep = '.'; 50 } 51 } 52 va_end(ap); 53 54 if (n >= (int)size) 55 return size - 1; 56 return n; 57} 58 59static int64_t cmp_null(const void *l, const void *r) 60{ 61 if (!l && !r) 62 return 0; 63 else if (!l) 64 return -1; 65 else 66 return 1; 67} 68 69/* --sort pid */ 70 71static int64_t 72sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 73{ 74 return right->thread->tid - left->thread->tid; 75} 76 77static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 78 size_t size, unsigned int width) 79{ 80 const char *comm = thread__comm_str(he->thread); 81 82 width = max(7U, width) - 8; 83 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid, 84 width, width, comm ?: ""); 85} 86 87static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 88{ 89 const struct thread *th = arg; 90 91 if (type != HIST_FILTER__THREAD) 92 return -1; 93 94 return th && he->thread != th; 95} 96 97struct sort_entry sort_thread = { 98 .se_header = " Pid:Command", 99 .se_cmp = sort__thread_cmp, 100 .se_snprintf = hist_entry__thread_snprintf, 101 .se_filter = hist_entry__thread_filter, 102 .se_width_idx = HISTC_THREAD, 103}; 104 105/* --sort comm */ 106 107static int64_t 108sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 109{ 110 /* Compare the addr that should be unique among comm */ 111 return strcmp(comm__str(right->comm), comm__str(left->comm)); 112} 113 114static int64_t 115sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 116{ 117 /* Compare the addr that should be unique among comm */ 118 return strcmp(comm__str(right->comm), comm__str(left->comm)); 119} 120 121static int64_t 122sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 123{ 124 return strcmp(comm__str(right->comm), comm__str(left->comm)); 125} 126 127static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 128 size_t size, unsigned int width) 129{ 130 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 131} 132 133struct sort_entry sort_comm = { 134 .se_header = "Command", 135 .se_cmp = sort__comm_cmp, 136 .se_collapse = sort__comm_collapse, 137 .se_sort = sort__comm_sort, 138 .se_snprintf = hist_entry__comm_snprintf, 139 .se_filter = hist_entry__thread_filter, 140 .se_width_idx = HISTC_COMM, 141}; 142 143/* --sort dso */ 144 145static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 146{ 147 struct dso *dso_l = map_l ? map_l->dso : NULL; 148 struct dso *dso_r = map_r ? map_r->dso : NULL; 149 const char *dso_name_l, *dso_name_r; 150 151 if (!dso_l || !dso_r) 152 return cmp_null(dso_r, dso_l); 153 154 if (verbose) { 155 dso_name_l = dso_l->long_name; 156 dso_name_r = dso_r->long_name; 157 } else { 158 dso_name_l = dso_l->short_name; 159 dso_name_r = dso_r->short_name; 160 } 161 162 return strcmp(dso_name_l, dso_name_r); 163} 164 165static int64_t 166sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 167{ 168 return _sort__dso_cmp(right->ms.map, left->ms.map); 169} 170 171static int _hist_entry__dso_snprintf(struct map *map, char *bf, 172 size_t size, unsigned int width) 173{ 174 if (map && map->dso) { 175 const char *dso_name = !verbose ? map->dso->short_name : 176 map->dso->long_name; 177 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 178 } 179 180 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); 181} 182 183static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 184 size_t size, unsigned int width) 185{ 186 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 187} 188 189static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 190{ 191 const struct dso *dso = arg; 192 193 if (type != HIST_FILTER__DSO) 194 return -1; 195 196 return dso && (!he->ms.map || he->ms.map->dso != dso); 197} 198 199struct sort_entry sort_dso = { 200 .se_header = "Shared Object", 201 .se_cmp = sort__dso_cmp, 202 .se_snprintf = hist_entry__dso_snprintf, 203 .se_filter = hist_entry__dso_filter, 204 .se_width_idx = HISTC_DSO, 205}; 206 207/* --sort symbol */ 208 209static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 210{ 211 return (int64_t)(right_ip - left_ip); 212} 213 214static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 215{ 216 if (!sym_l || !sym_r) 217 return cmp_null(sym_l, sym_r); 218 219 if (sym_l == sym_r) 220 return 0; 221 222 if (sym_l->start != sym_r->start) 223 return (int64_t)(sym_r->start - sym_l->start); 224 225 return (int64_t)(sym_r->end - sym_l->end); 226} 227 228static int64_t 229sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 230{ 231 int64_t ret; 232 233 if (!left->ms.sym && !right->ms.sym) 234 return _sort__addr_cmp(left->ip, right->ip); 235 236 /* 237 * comparing symbol address alone is not enough since it's a 238 * relative address within a dso. 239 */ 240 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 241 ret = sort__dso_cmp(left, right); 242 if (ret != 0) 243 return ret; 244 } 245 246 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 247} 248 249static int64_t 250sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 251{ 252 if (!left->ms.sym || !right->ms.sym) 253 return cmp_null(left->ms.sym, right->ms.sym); 254 255 return strcmp(right->ms.sym->name, left->ms.sym->name); 256} 257 258static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, 259 u64 ip, char level, char *bf, size_t size, 260 unsigned int width) 261{ 262 size_t ret = 0; 263 264 if (verbose) { 265 char o = map ? dso__symtab_origin(map->dso) : '!'; 266 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 267 BITS_PER_LONG / 4 + 2, ip, o); 268 } 269 270 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 271 if (sym && map) { 272 if (map->type == MAP__VARIABLE) { 273 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 274 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 275 ip - map->unmap_ip(map, sym->start)); 276 } else { 277 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 278 width - ret, 279 sym->name); 280 } 281 } else { 282 size_t len = BITS_PER_LONG / 4; 283 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 284 len, ip); 285 } 286 287 return ret; 288} 289 290static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, 291 size_t size, unsigned int width) 292{ 293 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip, 294 he->level, bf, size, width); 295} 296 297static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 298{ 299 const char *sym = arg; 300 301 if (type != HIST_FILTER__SYMBOL) 302 return -1; 303 304 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 305} 306 307struct sort_entry sort_sym = { 308 .se_header = "Symbol", 309 .se_cmp = sort__sym_cmp, 310 .se_sort = sort__sym_sort, 311 .se_snprintf = hist_entry__sym_snprintf, 312 .se_filter = hist_entry__sym_filter, 313 .se_width_idx = HISTC_SYMBOL, 314}; 315 316/* --sort srcline */ 317 318char *hist_entry__get_srcline(struct hist_entry *he) 319{ 320 struct map *map = he->ms.map; 321 322 if (!map) 323 return SRCLINE_UNKNOWN; 324 325 return get_srcline(map->dso, map__rip_2objdump(map, he->ip), 326 he->ms.sym, true); 327} 328 329static int64_t 330sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 331{ 332 if (!left->srcline) 333 left->srcline = hist_entry__get_srcline(left); 334 if (!right->srcline) 335 right->srcline = hist_entry__get_srcline(right); 336 337 return strcmp(right->srcline, left->srcline); 338} 339 340static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 341 size_t size, unsigned int width) 342{ 343 if (!he->srcline) 344 he->srcline = hist_entry__get_srcline(he); 345 346 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 347} 348 349struct sort_entry sort_srcline = { 350 .se_header = "Source:Line", 351 .se_cmp = sort__srcline_cmp, 352 .se_snprintf = hist_entry__srcline_snprintf, 353 .se_width_idx = HISTC_SRCLINE, 354}; 355 356/* --sort srcline_from */ 357 358static int64_t 359sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 360{ 361 if (!left->branch_info->srcline_from) { 362 struct map *map = left->branch_info->from.map; 363 if (!map) 364 left->branch_info->srcline_from = SRCLINE_UNKNOWN; 365 else 366 left->branch_info->srcline_from = get_srcline(map->dso, 367 map__rip_2objdump(map, 368 left->branch_info->from.al_addr), 369 left->branch_info->from.sym, true); 370 } 371 if (!right->branch_info->srcline_from) { 372 struct map *map = right->branch_info->from.map; 373 if (!map) 374 right->branch_info->srcline_from = SRCLINE_UNKNOWN; 375 else 376 right->branch_info->srcline_from = get_srcline(map->dso, 377 map__rip_2objdump(map, 378 right->branch_info->from.al_addr), 379 right->branch_info->from.sym, true); 380 } 381 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 382} 383 384static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 385 size_t size, unsigned int width) 386{ 387 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 388} 389 390struct sort_entry sort_srcline_from = { 391 .se_header = "From Source:Line", 392 .se_cmp = sort__srcline_from_cmp, 393 .se_snprintf = hist_entry__srcline_from_snprintf, 394 .se_width_idx = HISTC_SRCLINE_FROM, 395}; 396 397/* --sort srcline_to */ 398 399static int64_t 400sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 401{ 402 if (!left->branch_info->srcline_to) { 403 struct map *map = left->branch_info->to.map; 404 if (!map) 405 left->branch_info->srcline_to = SRCLINE_UNKNOWN; 406 else 407 left->branch_info->srcline_to = get_srcline(map->dso, 408 map__rip_2objdump(map, 409 left->branch_info->to.al_addr), 410 left->branch_info->from.sym, true); 411 } 412 if (!right->branch_info->srcline_to) { 413 struct map *map = right->branch_info->to.map; 414 if (!map) 415 right->branch_info->srcline_to = SRCLINE_UNKNOWN; 416 else 417 right->branch_info->srcline_to = get_srcline(map->dso, 418 map__rip_2objdump(map, 419 right->branch_info->to.al_addr), 420 right->branch_info->to.sym, true); 421 } 422 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 423} 424 425static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 426 size_t size, unsigned int width) 427{ 428 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 429} 430 431struct sort_entry sort_srcline_to = { 432 .se_header = "To Source:Line", 433 .se_cmp = sort__srcline_to_cmp, 434 .se_snprintf = hist_entry__srcline_to_snprintf, 435 .se_width_idx = HISTC_SRCLINE_TO, 436}; 437 438/* --sort srcfile */ 439 440static char no_srcfile[1]; 441 442static char *hist_entry__get_srcfile(struct hist_entry *e) 443{ 444 char *sf, *p; 445 struct map *map = e->ms.map; 446 447 if (!map) 448 return no_srcfile; 449 450 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip), 451 e->ms.sym, false, true); 452 if (!strcmp(sf, SRCLINE_UNKNOWN)) 453 return no_srcfile; 454 p = strchr(sf, ':'); 455 if (p && *sf) { 456 *p = 0; 457 return sf; 458 } 459 free(sf); 460 return no_srcfile; 461} 462 463static int64_t 464sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 465{ 466 if (!left->srcfile) 467 left->srcfile = hist_entry__get_srcfile(left); 468 if (!right->srcfile) 469 right->srcfile = hist_entry__get_srcfile(right); 470 471 return strcmp(right->srcfile, left->srcfile); 472} 473 474static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 475 size_t size, unsigned int width) 476{ 477 if (!he->srcfile) 478 he->srcfile = hist_entry__get_srcfile(he); 479 480 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 481} 482 483struct sort_entry sort_srcfile = { 484 .se_header = "Source File", 485 .se_cmp = sort__srcfile_cmp, 486 .se_snprintf = hist_entry__srcfile_snprintf, 487 .se_width_idx = HISTC_SRCFILE, 488}; 489 490/* --sort parent */ 491 492static int64_t 493sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 494{ 495 struct symbol *sym_l = left->parent; 496 struct symbol *sym_r = right->parent; 497 498 if (!sym_l || !sym_r) 499 return cmp_null(sym_l, sym_r); 500 501 return strcmp(sym_r->name, sym_l->name); 502} 503 504static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 505 size_t size, unsigned int width) 506{ 507 return repsep_snprintf(bf, size, "%-*.*s", width, width, 508 he->parent ? he->parent->name : "[other]"); 509} 510 511struct sort_entry sort_parent = { 512 .se_header = "Parent symbol", 513 .se_cmp = sort__parent_cmp, 514 .se_snprintf = hist_entry__parent_snprintf, 515 .se_width_idx = HISTC_PARENT, 516}; 517 518/* --sort cpu */ 519 520static int64_t 521sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 522{ 523 return right->cpu - left->cpu; 524} 525 526static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 527 size_t size, unsigned int width) 528{ 529 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 530} 531 532struct sort_entry sort_cpu = { 533 .se_header = "CPU", 534 .se_cmp = sort__cpu_cmp, 535 .se_snprintf = hist_entry__cpu_snprintf, 536 .se_width_idx = HISTC_CPU, 537}; 538 539/* --sort socket */ 540 541static int64_t 542sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 543{ 544 return right->socket - left->socket; 545} 546 547static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 548 size_t size, unsigned int width) 549{ 550 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 551} 552 553static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 554{ 555 int sk = *(const int *)arg; 556 557 if (type != HIST_FILTER__SOCKET) 558 return -1; 559 560 return sk >= 0 && he->socket != sk; 561} 562 563struct sort_entry sort_socket = { 564 .se_header = "Socket", 565 .se_cmp = sort__socket_cmp, 566 .se_snprintf = hist_entry__socket_snprintf, 567 .se_filter = hist_entry__socket_filter, 568 .se_width_idx = HISTC_SOCKET, 569}; 570 571/* --sort trace */ 572 573static char *get_trace_output(struct hist_entry *he) 574{ 575 struct trace_seq seq; 576 struct perf_evsel *evsel; 577 struct pevent_record rec = { 578 .data = he->raw_data, 579 .size = he->raw_size, 580 }; 581 582 evsel = hists_to_evsel(he->hists); 583 584 trace_seq_init(&seq); 585 if (symbol_conf.raw_trace) { 586 pevent_print_fields(&seq, he->raw_data, he->raw_size, 587 evsel->tp_format); 588 } else { 589 pevent_event_info(&seq, evsel->tp_format, &rec); 590 } 591 /* 592 * Trim the buffer, it starts at 4KB and we're not going to 593 * add anything more to this buffer. 594 */ 595 return realloc(seq.buffer, seq.len + 1); 596} 597 598static int64_t 599sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 600{ 601 struct perf_evsel *evsel; 602 603 evsel = hists_to_evsel(left->hists); 604 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 605 return 0; 606 607 if (left->trace_output == NULL) 608 left->trace_output = get_trace_output(left); 609 if (right->trace_output == NULL) 610 right->trace_output = get_trace_output(right); 611 612 return strcmp(right->trace_output, left->trace_output); 613} 614 615static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 616 size_t size, unsigned int width) 617{ 618 struct perf_evsel *evsel; 619 620 evsel = hists_to_evsel(he->hists); 621 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 622 return scnprintf(bf, size, "%-.*s", width, "N/A"); 623 624 if (he->trace_output == NULL) 625 he->trace_output = get_trace_output(he); 626 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 627} 628 629struct sort_entry sort_trace = { 630 .se_header = "Trace output", 631 .se_cmp = sort__trace_cmp, 632 .se_snprintf = hist_entry__trace_snprintf, 633 .se_width_idx = HISTC_TRACE, 634}; 635 636/* sort keys for branch stacks */ 637 638static int64_t 639sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 640{ 641 if (!left->branch_info || !right->branch_info) 642 return cmp_null(left->branch_info, right->branch_info); 643 644 return _sort__dso_cmp(left->branch_info->from.map, 645 right->branch_info->from.map); 646} 647 648static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 649 size_t size, unsigned int width) 650{ 651 if (he->branch_info) 652 return _hist_entry__dso_snprintf(he->branch_info->from.map, 653 bf, size, width); 654 else 655 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 656} 657 658static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 659 const void *arg) 660{ 661 const struct dso *dso = arg; 662 663 if (type != HIST_FILTER__DSO) 664 return -1; 665 666 return dso && (!he->branch_info || !he->branch_info->from.map || 667 he->branch_info->from.map->dso != dso); 668} 669 670static int64_t 671sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 672{ 673 if (!left->branch_info || !right->branch_info) 674 return cmp_null(left->branch_info, right->branch_info); 675 676 return _sort__dso_cmp(left->branch_info->to.map, 677 right->branch_info->to.map); 678} 679 680static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 681 size_t size, unsigned int width) 682{ 683 if (he->branch_info) 684 return _hist_entry__dso_snprintf(he->branch_info->to.map, 685 bf, size, width); 686 else 687 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 688} 689 690static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 691 const void *arg) 692{ 693 const struct dso *dso = arg; 694 695 if (type != HIST_FILTER__DSO) 696 return -1; 697 698 return dso && (!he->branch_info || !he->branch_info->to.map || 699 he->branch_info->to.map->dso != dso); 700} 701 702static int64_t 703sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 704{ 705 struct addr_map_symbol *from_l = &left->branch_info->from; 706 struct addr_map_symbol *from_r = &right->branch_info->from; 707 708 if (!left->branch_info || !right->branch_info) 709 return cmp_null(left->branch_info, right->branch_info); 710 711 from_l = &left->branch_info->from; 712 from_r = &right->branch_info->from; 713 714 if (!from_l->sym && !from_r->sym) 715 return _sort__addr_cmp(from_l->addr, from_r->addr); 716 717 return _sort__sym_cmp(from_l->sym, from_r->sym); 718} 719 720static int64_t 721sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 722{ 723 struct addr_map_symbol *to_l, *to_r; 724 725 if (!left->branch_info || !right->branch_info) 726 return cmp_null(left->branch_info, right->branch_info); 727 728 to_l = &left->branch_info->to; 729 to_r = &right->branch_info->to; 730 731 if (!to_l->sym && !to_r->sym) 732 return _sort__addr_cmp(to_l->addr, to_r->addr); 733 734 return _sort__sym_cmp(to_l->sym, to_r->sym); 735} 736 737static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 738 size_t size, unsigned int width) 739{ 740 if (he->branch_info) { 741 struct addr_map_symbol *from = &he->branch_info->from; 742 743 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, 744 he->level, bf, size, width); 745 } 746 747 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 748} 749 750static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 751 size_t size, unsigned int width) 752{ 753 if (he->branch_info) { 754 struct addr_map_symbol *to = &he->branch_info->to; 755 756 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, 757 he->level, bf, size, width); 758 } 759 760 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 761} 762 763static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 764 const void *arg) 765{ 766 const char *sym = arg; 767 768 if (type != HIST_FILTER__SYMBOL) 769 return -1; 770 771 return sym && !(he->branch_info && he->branch_info->from.sym && 772 strstr(he->branch_info->from.sym->name, sym)); 773} 774 775static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 776 const void *arg) 777{ 778 const char *sym = arg; 779 780 if (type != HIST_FILTER__SYMBOL) 781 return -1; 782 783 return sym && !(he->branch_info && he->branch_info->to.sym && 784 strstr(he->branch_info->to.sym->name, sym)); 785} 786 787struct sort_entry sort_dso_from = { 788 .se_header = "Source Shared Object", 789 .se_cmp = sort__dso_from_cmp, 790 .se_snprintf = hist_entry__dso_from_snprintf, 791 .se_filter = hist_entry__dso_from_filter, 792 .se_width_idx = HISTC_DSO_FROM, 793}; 794 795struct sort_entry sort_dso_to = { 796 .se_header = "Target Shared Object", 797 .se_cmp = sort__dso_to_cmp, 798 .se_snprintf = hist_entry__dso_to_snprintf, 799 .se_filter = hist_entry__dso_to_filter, 800 .se_width_idx = HISTC_DSO_TO, 801}; 802 803struct sort_entry sort_sym_from = { 804 .se_header = "Source Symbol", 805 .se_cmp = sort__sym_from_cmp, 806 .se_snprintf = hist_entry__sym_from_snprintf, 807 .se_filter = hist_entry__sym_from_filter, 808 .se_width_idx = HISTC_SYMBOL_FROM, 809}; 810 811struct sort_entry sort_sym_to = { 812 .se_header = "Target Symbol", 813 .se_cmp = sort__sym_to_cmp, 814 .se_snprintf = hist_entry__sym_to_snprintf, 815 .se_filter = hist_entry__sym_to_filter, 816 .se_width_idx = HISTC_SYMBOL_TO, 817}; 818 819static int64_t 820sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 821{ 822 unsigned char mp, p; 823 824 if (!left->branch_info || !right->branch_info) 825 return cmp_null(left->branch_info, right->branch_info); 826 827 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 828 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 829 return mp || p; 830} 831 832static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 833 size_t size, unsigned int width){ 834 static const char *out = "N/A"; 835 836 if (he->branch_info) { 837 if (he->branch_info->flags.predicted) 838 out = "N"; 839 else if (he->branch_info->flags.mispred) 840 out = "Y"; 841 } 842 843 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 844} 845 846static int64_t 847sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 848{ 849 return left->branch_info->flags.cycles - 850 right->branch_info->flags.cycles; 851} 852 853static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 854 size_t size, unsigned int width) 855{ 856 if (he->branch_info->flags.cycles == 0) 857 return repsep_snprintf(bf, size, "%-*s", width, "-"); 858 return repsep_snprintf(bf, size, "%-*hd", width, 859 he->branch_info->flags.cycles); 860} 861 862struct sort_entry sort_cycles = { 863 .se_header = "Basic Block Cycles", 864 .se_cmp = sort__cycles_cmp, 865 .se_snprintf = hist_entry__cycles_snprintf, 866 .se_width_idx = HISTC_CYCLES, 867}; 868 869/* --sort daddr_sym */ 870int64_t 871sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 872{ 873 uint64_t l = 0, r = 0; 874 875 if (left->mem_info) 876 l = left->mem_info->daddr.addr; 877 if (right->mem_info) 878 r = right->mem_info->daddr.addr; 879 880 return (int64_t)(r - l); 881} 882 883static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 884 size_t size, unsigned int width) 885{ 886 uint64_t addr = 0; 887 struct map *map = NULL; 888 struct symbol *sym = NULL; 889 890 if (he->mem_info) { 891 addr = he->mem_info->daddr.addr; 892 map = he->mem_info->daddr.map; 893 sym = he->mem_info->daddr.sym; 894 } 895 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 896 width); 897} 898 899int64_t 900sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 901{ 902 uint64_t l = 0, r = 0; 903 904 if (left->mem_info) 905 l = left->mem_info->iaddr.addr; 906 if (right->mem_info) 907 r = right->mem_info->iaddr.addr; 908 909 return (int64_t)(r - l); 910} 911 912static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 913 size_t size, unsigned int width) 914{ 915 uint64_t addr = 0; 916 struct map *map = NULL; 917 struct symbol *sym = NULL; 918 919 if (he->mem_info) { 920 addr = he->mem_info->iaddr.addr; 921 map = he->mem_info->iaddr.map; 922 sym = he->mem_info->iaddr.sym; 923 } 924 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 925 width); 926} 927 928static int64_t 929sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 930{ 931 struct map *map_l = NULL; 932 struct map *map_r = NULL; 933 934 if (left->mem_info) 935 map_l = left->mem_info->daddr.map; 936 if (right->mem_info) 937 map_r = right->mem_info->daddr.map; 938 939 return _sort__dso_cmp(map_l, map_r); 940} 941 942static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 943 size_t size, unsigned int width) 944{ 945 struct map *map = NULL; 946 947 if (he->mem_info) 948 map = he->mem_info->daddr.map; 949 950 return _hist_entry__dso_snprintf(map, bf, size, width); 951} 952 953static int64_t 954sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 955{ 956 union perf_mem_data_src data_src_l; 957 union perf_mem_data_src data_src_r; 958 959 if (left->mem_info) 960 data_src_l = left->mem_info->data_src; 961 else 962 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 963 964 if (right->mem_info) 965 data_src_r = right->mem_info->data_src; 966 else 967 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 968 969 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 970} 971 972static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 973 size_t size, unsigned int width) 974{ 975 char out[10]; 976 977 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 978 return repsep_snprintf(bf, size, "%.*s", width, out); 979} 980 981static int64_t 982sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 983{ 984 union perf_mem_data_src data_src_l; 985 union perf_mem_data_src data_src_r; 986 987 if (left->mem_info) 988 data_src_l = left->mem_info->data_src; 989 else 990 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 991 992 if (right->mem_info) 993 data_src_r = right->mem_info->data_src; 994 else 995 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 996 997 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 998} 999 1000static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1001 size_t size, unsigned int width) 1002{ 1003 char out[64]; 1004 1005 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1006 return repsep_snprintf(bf, size, "%-*s", width, out); 1007} 1008 1009static int64_t 1010sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1011{ 1012 union perf_mem_data_src data_src_l; 1013 union perf_mem_data_src data_src_r; 1014 1015 if (left->mem_info) 1016 data_src_l = left->mem_info->data_src; 1017 else 1018 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1019 1020 if (right->mem_info) 1021 data_src_r = right->mem_info->data_src; 1022 else 1023 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1024 1025 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1026} 1027 1028static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1029 size_t size, unsigned int width) 1030{ 1031 char out[64]; 1032 1033 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1034 return repsep_snprintf(bf, size, "%-*s", width, out); 1035} 1036 1037static int64_t 1038sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1039{ 1040 union perf_mem_data_src data_src_l; 1041 union perf_mem_data_src data_src_r; 1042 1043 if (left->mem_info) 1044 data_src_l = left->mem_info->data_src; 1045 else 1046 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1047 1048 if (right->mem_info) 1049 data_src_r = right->mem_info->data_src; 1050 else 1051 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1052 1053 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1054} 1055 1056static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1057 size_t size, unsigned int width) 1058{ 1059 char out[64]; 1060 1061 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1062 return repsep_snprintf(bf, size, "%-*s", width, out); 1063} 1064 1065int64_t 1066sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1067{ 1068 u64 l, r; 1069 struct map *l_map, *r_map; 1070 1071 if (!left->mem_info) return -1; 1072 if (!right->mem_info) return 1; 1073 1074 /* group event types together */ 1075 if (left->cpumode > right->cpumode) return -1; 1076 if (left->cpumode < right->cpumode) return 1; 1077 1078 l_map = left->mem_info->daddr.map; 1079 r_map = right->mem_info->daddr.map; 1080 1081 /* if both are NULL, jump to sort on al_addr instead */ 1082 if (!l_map && !r_map) 1083 goto addr; 1084 1085 if (!l_map) return -1; 1086 if (!r_map) return 1; 1087 1088 if (l_map->maj > r_map->maj) return -1; 1089 if (l_map->maj < r_map->maj) return 1; 1090 1091 if (l_map->min > r_map->min) return -1; 1092 if (l_map->min < r_map->min) return 1; 1093 1094 if (l_map->ino > r_map->ino) return -1; 1095 if (l_map->ino < r_map->ino) return 1; 1096 1097 if (l_map->ino_generation > r_map->ino_generation) return -1; 1098 if (l_map->ino_generation < r_map->ino_generation) return 1; 1099 1100 /* 1101 * Addresses with no major/minor numbers are assumed to be 1102 * anonymous in userspace. Sort those on pid then address. 1103 * 1104 * The kernel and non-zero major/minor mapped areas are 1105 * assumed to be unity mapped. Sort those on address. 1106 */ 1107 1108 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1109 (!(l_map->flags & MAP_SHARED)) && 1110 !l_map->maj && !l_map->min && !l_map->ino && 1111 !l_map->ino_generation) { 1112 /* userspace anonymous */ 1113 1114 if (left->thread->pid_ > right->thread->pid_) return -1; 1115 if (left->thread->pid_ < right->thread->pid_) return 1; 1116 } 1117 1118addr: 1119 /* al_addr does all the right addr - start + offset calculations */ 1120 l = cl_address(left->mem_info->daddr.al_addr); 1121 r = cl_address(right->mem_info->daddr.al_addr); 1122 1123 if (l > r) return -1; 1124 if (l < r) return 1; 1125 1126 return 0; 1127} 1128 1129static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1130 size_t size, unsigned int width) 1131{ 1132 1133 uint64_t addr = 0; 1134 struct map *map = NULL; 1135 struct symbol *sym = NULL; 1136 char level = he->level; 1137 1138 if (he->mem_info) { 1139 addr = cl_address(he->mem_info->daddr.al_addr); 1140 map = he->mem_info->daddr.map; 1141 sym = he->mem_info->daddr.sym; 1142 1143 /* print [s] for shared data mmaps */ 1144 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1145 map && (map->type == MAP__VARIABLE) && 1146 (map->flags & MAP_SHARED) && 1147 (map->maj || map->min || map->ino || 1148 map->ino_generation)) 1149 level = 's'; 1150 else if (!map) 1151 level = 'X'; 1152 } 1153 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size, 1154 width); 1155} 1156 1157struct sort_entry sort_mispredict = { 1158 .se_header = "Branch Mispredicted", 1159 .se_cmp = sort__mispredict_cmp, 1160 .se_snprintf = hist_entry__mispredict_snprintf, 1161 .se_width_idx = HISTC_MISPREDICT, 1162}; 1163 1164static u64 he_weight(struct hist_entry *he) 1165{ 1166 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; 1167} 1168 1169static int64_t 1170sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1171{ 1172 return he_weight(left) - he_weight(right); 1173} 1174 1175static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1176 size_t size, unsigned int width) 1177{ 1178 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); 1179} 1180 1181struct sort_entry sort_local_weight = { 1182 .se_header = "Local Weight", 1183 .se_cmp = sort__local_weight_cmp, 1184 .se_snprintf = hist_entry__local_weight_snprintf, 1185 .se_width_idx = HISTC_LOCAL_WEIGHT, 1186}; 1187 1188static int64_t 1189sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1190{ 1191 return left->stat.weight - right->stat.weight; 1192} 1193 1194static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1195 size_t size, unsigned int width) 1196{ 1197 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); 1198} 1199 1200struct sort_entry sort_global_weight = { 1201 .se_header = "Weight", 1202 .se_cmp = sort__global_weight_cmp, 1203 .se_snprintf = hist_entry__global_weight_snprintf, 1204 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1205}; 1206 1207struct sort_entry sort_mem_daddr_sym = { 1208 .se_header = "Data Symbol", 1209 .se_cmp = sort__daddr_cmp, 1210 .se_snprintf = hist_entry__daddr_snprintf, 1211 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1212}; 1213 1214struct sort_entry sort_mem_iaddr_sym = { 1215 .se_header = "Code Symbol", 1216 .se_cmp = sort__iaddr_cmp, 1217 .se_snprintf = hist_entry__iaddr_snprintf, 1218 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1219}; 1220 1221struct sort_entry sort_mem_daddr_dso = { 1222 .se_header = "Data Object", 1223 .se_cmp = sort__dso_daddr_cmp, 1224 .se_snprintf = hist_entry__dso_daddr_snprintf, 1225 .se_width_idx = HISTC_MEM_DADDR_DSO, 1226}; 1227 1228struct sort_entry sort_mem_locked = { 1229 .se_header = "Locked", 1230 .se_cmp = sort__locked_cmp, 1231 .se_snprintf = hist_entry__locked_snprintf, 1232 .se_width_idx = HISTC_MEM_LOCKED, 1233}; 1234 1235struct sort_entry sort_mem_tlb = { 1236 .se_header = "TLB access", 1237 .se_cmp = sort__tlb_cmp, 1238 .se_snprintf = hist_entry__tlb_snprintf, 1239 .se_width_idx = HISTC_MEM_TLB, 1240}; 1241 1242struct sort_entry sort_mem_lvl = { 1243 .se_header = "Memory access", 1244 .se_cmp = sort__lvl_cmp, 1245 .se_snprintf = hist_entry__lvl_snprintf, 1246 .se_width_idx = HISTC_MEM_LVL, 1247}; 1248 1249struct sort_entry sort_mem_snoop = { 1250 .se_header = "Snoop", 1251 .se_cmp = sort__snoop_cmp, 1252 .se_snprintf = hist_entry__snoop_snprintf, 1253 .se_width_idx = HISTC_MEM_SNOOP, 1254}; 1255 1256struct sort_entry sort_mem_dcacheline = { 1257 .se_header = "Data Cacheline", 1258 .se_cmp = sort__dcacheline_cmp, 1259 .se_snprintf = hist_entry__dcacheline_snprintf, 1260 .se_width_idx = HISTC_MEM_DCACHELINE, 1261}; 1262 1263static int64_t 1264sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1265{ 1266 if (!left->branch_info || !right->branch_info) 1267 return cmp_null(left->branch_info, right->branch_info); 1268 1269 return left->branch_info->flags.abort != 1270 right->branch_info->flags.abort; 1271} 1272 1273static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1274 size_t size, unsigned int width) 1275{ 1276 static const char *out = "N/A"; 1277 1278 if (he->branch_info) { 1279 if (he->branch_info->flags.abort) 1280 out = "A"; 1281 else 1282 out = "."; 1283 } 1284 1285 return repsep_snprintf(bf, size, "%-*s", width, out); 1286} 1287 1288struct sort_entry sort_abort = { 1289 .se_header = "Transaction abort", 1290 .se_cmp = sort__abort_cmp, 1291 .se_snprintf = hist_entry__abort_snprintf, 1292 .se_width_idx = HISTC_ABORT, 1293}; 1294 1295static int64_t 1296sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1297{ 1298 if (!left->branch_info || !right->branch_info) 1299 return cmp_null(left->branch_info, right->branch_info); 1300 1301 return left->branch_info->flags.in_tx != 1302 right->branch_info->flags.in_tx; 1303} 1304 1305static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1306 size_t size, unsigned int width) 1307{ 1308 static const char *out = "N/A"; 1309 1310 if (he->branch_info) { 1311 if (he->branch_info->flags.in_tx) 1312 out = "T"; 1313 else 1314 out = "."; 1315 } 1316 1317 return repsep_snprintf(bf, size, "%-*s", width, out); 1318} 1319 1320struct sort_entry sort_in_tx = { 1321 .se_header = "Branch in transaction", 1322 .se_cmp = sort__in_tx_cmp, 1323 .se_snprintf = hist_entry__in_tx_snprintf, 1324 .se_width_idx = HISTC_IN_TX, 1325}; 1326 1327static int64_t 1328sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1329{ 1330 return left->transaction - right->transaction; 1331} 1332 1333static inline char *add_str(char *p, const char *str) 1334{ 1335 strcpy(p, str); 1336 return p + strlen(str); 1337} 1338 1339static struct txbit { 1340 unsigned flag; 1341 const char *name; 1342 int skip_for_len; 1343} txbits[] = { 1344 { PERF_TXN_ELISION, "EL ", 0 }, 1345 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1346 { PERF_TXN_SYNC, "SYNC ", 1 }, 1347 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1348 { PERF_TXN_RETRY, "RETRY ", 0 }, 1349 { PERF_TXN_CONFLICT, "CON ", 0 }, 1350 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1351 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1352 { 0, NULL, 0 } 1353}; 1354 1355int hist_entry__transaction_len(void) 1356{ 1357 int i; 1358 int len = 0; 1359 1360 for (i = 0; txbits[i].name; i++) { 1361 if (!txbits[i].skip_for_len) 1362 len += strlen(txbits[i].name); 1363 } 1364 len += 4; /* :XX<space> */ 1365 return len; 1366} 1367 1368static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1369 size_t size, unsigned int width) 1370{ 1371 u64 t = he->transaction; 1372 char buf[128]; 1373 char *p = buf; 1374 int i; 1375 1376 buf[0] = 0; 1377 for (i = 0; txbits[i].name; i++) 1378 if (txbits[i].flag & t) 1379 p = add_str(p, txbits[i].name); 1380 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 1381 p = add_str(p, "NEITHER "); 1382 if (t & PERF_TXN_ABORT_MASK) { 1383 sprintf(p, ":%" PRIx64, 1384 (t & PERF_TXN_ABORT_MASK) >> 1385 PERF_TXN_ABORT_SHIFT); 1386 p += strlen(p); 1387 } 1388 1389 return repsep_snprintf(bf, size, "%-*s", width, buf); 1390} 1391 1392struct sort_entry sort_transaction = { 1393 .se_header = "Transaction ", 1394 .se_cmp = sort__transaction_cmp, 1395 .se_snprintf = hist_entry__transaction_snprintf, 1396 .se_width_idx = HISTC_TRANSACTION, 1397}; 1398 1399struct sort_dimension { 1400 const char *name; 1401 struct sort_entry *entry; 1402 int taken; 1403}; 1404 1405#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 1406 1407static struct sort_dimension common_sort_dimensions[] = { 1408 DIM(SORT_PID, "pid", sort_thread), 1409 DIM(SORT_COMM, "comm", sort_comm), 1410 DIM(SORT_DSO, "dso", sort_dso), 1411 DIM(SORT_SYM, "symbol", sort_sym), 1412 DIM(SORT_PARENT, "parent", sort_parent), 1413 DIM(SORT_CPU, "cpu", sort_cpu), 1414 DIM(SORT_SOCKET, "socket", sort_socket), 1415 DIM(SORT_SRCLINE, "srcline", sort_srcline), 1416 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 1417 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 1418 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 1419 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 1420 DIM(SORT_TRACE, "trace", sort_trace), 1421}; 1422 1423#undef DIM 1424 1425#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 1426 1427static struct sort_dimension bstack_sort_dimensions[] = { 1428 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 1429 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 1430 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 1431 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 1432 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 1433 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 1434 DIM(SORT_ABORT, "abort", sort_abort), 1435 DIM(SORT_CYCLES, "cycles", sort_cycles), 1436 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 1437 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 1438}; 1439 1440#undef DIM 1441 1442#define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 1443 1444static struct sort_dimension memory_sort_dimensions[] = { 1445 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 1446 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 1447 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 1448 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 1449 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 1450 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 1451 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 1452 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 1453}; 1454 1455#undef DIM 1456 1457struct hpp_dimension { 1458 const char *name; 1459 struct perf_hpp_fmt *fmt; 1460 int taken; 1461}; 1462 1463#define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 1464 1465static struct hpp_dimension hpp_sort_dimensions[] = { 1466 DIM(PERF_HPP__OVERHEAD, "overhead"), 1467 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 1468 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 1469 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 1470 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 1471 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 1472 DIM(PERF_HPP__SAMPLES, "sample"), 1473 DIM(PERF_HPP__PERIOD, "period"), 1474}; 1475 1476#undef DIM 1477 1478struct hpp_sort_entry { 1479 struct perf_hpp_fmt hpp; 1480 struct sort_entry *se; 1481}; 1482 1483void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 1484{ 1485 struct hpp_sort_entry *hse; 1486 1487 if (!perf_hpp__is_sort_entry(fmt)) 1488 return; 1489 1490 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1491 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 1492} 1493 1494static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1495 struct hists *hists, int line __maybe_unused, 1496 int *span __maybe_unused) 1497{ 1498 struct hpp_sort_entry *hse; 1499 size_t len = fmt->user_len; 1500 1501 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1502 1503 if (!len) 1504 len = hists__col_len(hists, hse->se->se_width_idx); 1505 1506 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 1507} 1508 1509static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 1510 struct perf_hpp *hpp __maybe_unused, 1511 struct hists *hists) 1512{ 1513 struct hpp_sort_entry *hse; 1514 size_t len = fmt->user_len; 1515 1516 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1517 1518 if (!len) 1519 len = hists__col_len(hists, hse->se->se_width_idx); 1520 1521 return len; 1522} 1523 1524static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1525 struct hist_entry *he) 1526{ 1527 struct hpp_sort_entry *hse; 1528 size_t len = fmt->user_len; 1529 1530 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1531 1532 if (!len) 1533 len = hists__col_len(he->hists, hse->se->se_width_idx); 1534 1535 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 1536} 1537 1538static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 1539 struct hist_entry *a, struct hist_entry *b) 1540{ 1541 struct hpp_sort_entry *hse; 1542 1543 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1544 return hse->se->se_cmp(a, b); 1545} 1546 1547static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 1548 struct hist_entry *a, struct hist_entry *b) 1549{ 1550 struct hpp_sort_entry *hse; 1551 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 1552 1553 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1554 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 1555 return collapse_fn(a, b); 1556} 1557 1558static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 1559 struct hist_entry *a, struct hist_entry *b) 1560{ 1561 struct hpp_sort_entry *hse; 1562 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 1563 1564 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1565 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 1566 return sort_fn(a, b); 1567} 1568 1569bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 1570{ 1571 return format->header == __sort__hpp_header; 1572} 1573 1574#define MK_SORT_ENTRY_CHK(key) \ 1575bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 1576{ \ 1577 struct hpp_sort_entry *hse; \ 1578 \ 1579 if (!perf_hpp__is_sort_entry(fmt)) \ 1580 return false; \ 1581 \ 1582 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 1583 return hse->se == &sort_ ## key ; \ 1584} 1585 1586MK_SORT_ENTRY_CHK(trace) 1587MK_SORT_ENTRY_CHK(srcline) 1588MK_SORT_ENTRY_CHK(srcfile) 1589MK_SORT_ENTRY_CHK(thread) 1590MK_SORT_ENTRY_CHK(comm) 1591MK_SORT_ENTRY_CHK(dso) 1592MK_SORT_ENTRY_CHK(sym) 1593 1594 1595static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 1596{ 1597 struct hpp_sort_entry *hse_a; 1598 struct hpp_sort_entry *hse_b; 1599 1600 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 1601 return false; 1602 1603 hse_a = container_of(a, struct hpp_sort_entry, hpp); 1604 hse_b = container_of(b, struct hpp_sort_entry, hpp); 1605 1606 return hse_a->se == hse_b->se; 1607} 1608 1609static void hse_free(struct perf_hpp_fmt *fmt) 1610{ 1611 struct hpp_sort_entry *hse; 1612 1613 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1614 free(hse); 1615} 1616 1617static struct hpp_sort_entry * 1618__sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 1619{ 1620 struct hpp_sort_entry *hse; 1621 1622 hse = malloc(sizeof(*hse)); 1623 if (hse == NULL) { 1624 pr_err("Memory allocation failed\n"); 1625 return NULL; 1626 } 1627 1628 hse->se = sd->entry; 1629 hse->hpp.name = sd->entry->se_header; 1630 hse->hpp.header = __sort__hpp_header; 1631 hse->hpp.width = __sort__hpp_width; 1632 hse->hpp.entry = __sort__hpp_entry; 1633 hse->hpp.color = NULL; 1634 1635 hse->hpp.cmp = __sort__hpp_cmp; 1636 hse->hpp.collapse = __sort__hpp_collapse; 1637 hse->hpp.sort = __sort__hpp_sort; 1638 hse->hpp.equal = __sort__hpp_equal; 1639 hse->hpp.free = hse_free; 1640 1641 INIT_LIST_HEAD(&hse->hpp.list); 1642 INIT_LIST_HEAD(&hse->hpp.sort_list); 1643 hse->hpp.elide = false; 1644 hse->hpp.len = 0; 1645 hse->hpp.user_len = 0; 1646 hse->hpp.level = level; 1647 1648 return hse; 1649} 1650 1651static void hpp_free(struct perf_hpp_fmt *fmt) 1652{ 1653 free(fmt); 1654} 1655 1656static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 1657 int level) 1658{ 1659 struct perf_hpp_fmt *fmt; 1660 1661 fmt = memdup(hd->fmt, sizeof(*fmt)); 1662 if (fmt) { 1663 INIT_LIST_HEAD(&fmt->list); 1664 INIT_LIST_HEAD(&fmt->sort_list); 1665 fmt->free = hpp_free; 1666 fmt->level = level; 1667 } 1668 1669 return fmt; 1670} 1671 1672int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 1673{ 1674 struct perf_hpp_fmt *fmt; 1675 struct hpp_sort_entry *hse; 1676 int ret = -1; 1677 int r; 1678 1679 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1680 if (!perf_hpp__is_sort_entry(fmt)) 1681 continue; 1682 1683 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1684 if (hse->se->se_filter == NULL) 1685 continue; 1686 1687 /* 1688 * hist entry is filtered if any of sort key in the hpp list 1689 * is applied. But it should skip non-matched filter types. 1690 */ 1691 r = hse->se->se_filter(he, type, arg); 1692 if (r >= 0) { 1693 if (ret < 0) 1694 ret = 0; 1695 ret |= r; 1696 } 1697 } 1698 1699 return ret; 1700} 1701 1702static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 1703 struct perf_hpp_list *list, 1704 int level) 1705{ 1706 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 1707 1708 if (hse == NULL) 1709 return -1; 1710 1711 perf_hpp_list__register_sort_field(list, &hse->hpp); 1712 return 0; 1713} 1714 1715static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 1716 struct perf_hpp_list *list) 1717{ 1718 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 1719 1720 if (hse == NULL) 1721 return -1; 1722 1723 perf_hpp_list__column_register(list, &hse->hpp); 1724 return 0; 1725} 1726 1727struct hpp_dynamic_entry { 1728 struct perf_hpp_fmt hpp; 1729 struct perf_evsel *evsel; 1730 struct format_field *field; 1731 unsigned dynamic_len; 1732 bool raw_trace; 1733}; 1734 1735static int hde_width(struct hpp_dynamic_entry *hde) 1736{ 1737 if (!hde->hpp.len) { 1738 int len = hde->dynamic_len; 1739 int namelen = strlen(hde->field->name); 1740 int fieldlen = hde->field->size; 1741 1742 if (namelen > len) 1743 len = namelen; 1744 1745 if (!(hde->field->flags & FIELD_IS_STRING)) { 1746 /* length for print hex numbers */ 1747 fieldlen = hde->field->size * 2 + 2; 1748 } 1749 if (fieldlen > len) 1750 len = fieldlen; 1751 1752 hde->hpp.len = len; 1753 } 1754 return hde->hpp.len; 1755} 1756 1757static void update_dynamic_len(struct hpp_dynamic_entry *hde, 1758 struct hist_entry *he) 1759{ 1760 char *str, *pos; 1761 struct format_field *field = hde->field; 1762 size_t namelen; 1763 bool last = false; 1764 1765 if (hde->raw_trace) 1766 return; 1767 1768 /* parse pretty print result and update max length */ 1769 if (!he->trace_output) 1770 he->trace_output = get_trace_output(he); 1771 1772 namelen = strlen(field->name); 1773 str = he->trace_output; 1774 1775 while (str) { 1776 pos = strchr(str, ' '); 1777 if (pos == NULL) { 1778 last = true; 1779 pos = str + strlen(str); 1780 } 1781 1782 if (!strncmp(str, field->name, namelen)) { 1783 size_t len; 1784 1785 str += namelen + 1; 1786 len = pos - str; 1787 1788 if (len > hde->dynamic_len) 1789 hde->dynamic_len = len; 1790 break; 1791 } 1792 1793 if (last) 1794 str = NULL; 1795 else 1796 str = pos + 1; 1797 } 1798} 1799 1800static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1801 struct hists *hists __maybe_unused, 1802 int line __maybe_unused, 1803 int *span __maybe_unused) 1804{ 1805 struct hpp_dynamic_entry *hde; 1806 size_t len = fmt->user_len; 1807 1808 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1809 1810 if (!len) 1811 len = hde_width(hde); 1812 1813 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 1814} 1815 1816static int __sort__hde_width(struct perf_hpp_fmt *fmt, 1817 struct perf_hpp *hpp __maybe_unused, 1818 struct hists *hists __maybe_unused) 1819{ 1820 struct hpp_dynamic_entry *hde; 1821 size_t len = fmt->user_len; 1822 1823 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1824 1825 if (!len) 1826 len = hde_width(hde); 1827 1828 return len; 1829} 1830 1831bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 1832{ 1833 struct hpp_dynamic_entry *hde; 1834 1835 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1836 1837 return hists_to_evsel(hists) == hde->evsel; 1838} 1839 1840static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1841 struct hist_entry *he) 1842{ 1843 struct hpp_dynamic_entry *hde; 1844 size_t len = fmt->user_len; 1845 char *str, *pos; 1846 struct format_field *field; 1847 size_t namelen; 1848 bool last = false; 1849 int ret; 1850 1851 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1852 1853 if (!len) 1854 len = hde_width(hde); 1855 1856 if (hde->raw_trace) 1857 goto raw_field; 1858 1859 if (!he->trace_output) 1860 he->trace_output = get_trace_output(he); 1861 1862 field = hde->field; 1863 namelen = strlen(field->name); 1864 str = he->trace_output; 1865 1866 while (str) { 1867 pos = strchr(str, ' '); 1868 if (pos == NULL) { 1869 last = true; 1870 pos = str + strlen(str); 1871 } 1872 1873 if (!strncmp(str, field->name, namelen)) { 1874 str += namelen + 1; 1875 str = strndup(str, pos - str); 1876 1877 if (str == NULL) 1878 return scnprintf(hpp->buf, hpp->size, 1879 "%*.*s", len, len, "ERROR"); 1880 break; 1881 } 1882 1883 if (last) 1884 str = NULL; 1885 else 1886 str = pos + 1; 1887 } 1888 1889 if (str == NULL) { 1890 struct trace_seq seq; 1891raw_field: 1892 trace_seq_init(&seq); 1893 pevent_print_field(&seq, he->raw_data, hde->field); 1894 str = seq.buffer; 1895 } 1896 1897 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 1898 free(str); 1899 return ret; 1900} 1901 1902static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 1903 struct hist_entry *a, struct hist_entry *b) 1904{ 1905 struct hpp_dynamic_entry *hde; 1906 struct format_field *field; 1907 unsigned offset, size; 1908 1909 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1910 1911 if (b == NULL) { 1912 update_dynamic_len(hde, a); 1913 return 0; 1914 } 1915 1916 field = hde->field; 1917 if (field->flags & FIELD_IS_DYNAMIC) { 1918 unsigned long long dyn; 1919 1920 pevent_read_number_field(field, a->raw_data, &dyn); 1921 offset = dyn & 0xffff; 1922 size = (dyn >> 16) & 0xffff; 1923 1924 /* record max width for output */ 1925 if (size > hde->dynamic_len) 1926 hde->dynamic_len = size; 1927 } else { 1928 offset = field->offset; 1929 size = field->size; 1930 } 1931 1932 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 1933} 1934 1935bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 1936{ 1937 return fmt->cmp == __sort__hde_cmp; 1938} 1939 1940static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 1941{ 1942 struct hpp_dynamic_entry *hde_a; 1943 struct hpp_dynamic_entry *hde_b; 1944 1945 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 1946 return false; 1947 1948 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 1949 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 1950 1951 return hde_a->field == hde_b->field; 1952} 1953 1954static void hde_free(struct perf_hpp_fmt *fmt) 1955{ 1956 struct hpp_dynamic_entry *hde; 1957 1958 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1959 free(hde); 1960} 1961 1962static struct hpp_dynamic_entry * 1963__alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field, 1964 int level) 1965{ 1966 struct hpp_dynamic_entry *hde; 1967 1968 hde = malloc(sizeof(*hde)); 1969 if (hde == NULL) { 1970 pr_debug("Memory allocation failed\n"); 1971 return NULL; 1972 } 1973 1974 hde->evsel = evsel; 1975 hde->field = field; 1976 hde->dynamic_len = 0; 1977 1978 hde->hpp.name = field->name; 1979 hde->hpp.header = __sort__hde_header; 1980 hde->hpp.width = __sort__hde_width; 1981 hde->hpp.entry = __sort__hde_entry; 1982 hde->hpp.color = NULL; 1983 1984 hde->hpp.cmp = __sort__hde_cmp; 1985 hde->hpp.collapse = __sort__hde_cmp; 1986 hde->hpp.sort = __sort__hde_cmp; 1987 hde->hpp.equal = __sort__hde_equal; 1988 hde->hpp.free = hde_free; 1989 1990 INIT_LIST_HEAD(&hde->hpp.list); 1991 INIT_LIST_HEAD(&hde->hpp.sort_list); 1992 hde->hpp.elide = false; 1993 hde->hpp.len = 0; 1994 hde->hpp.user_len = 0; 1995 hde->hpp.level = level; 1996 1997 return hde; 1998} 1999 2000struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 2001{ 2002 struct perf_hpp_fmt *new_fmt = NULL; 2003 2004 if (perf_hpp__is_sort_entry(fmt)) { 2005 struct hpp_sort_entry *hse, *new_hse; 2006 2007 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2008 new_hse = memdup(hse, sizeof(*hse)); 2009 if (new_hse) 2010 new_fmt = &new_hse->hpp; 2011 } else if (perf_hpp__is_dynamic_entry(fmt)) { 2012 struct hpp_dynamic_entry *hde, *new_hde; 2013 2014 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2015 new_hde = memdup(hde, sizeof(*hde)); 2016 if (new_hde) 2017 new_fmt = &new_hde->hpp; 2018 } else { 2019 new_fmt = memdup(fmt, sizeof(*fmt)); 2020 } 2021 2022 INIT_LIST_HEAD(&new_fmt->list); 2023 INIT_LIST_HEAD(&new_fmt->sort_list); 2024 2025 return new_fmt; 2026} 2027 2028static int parse_field_name(char *str, char **event, char **field, char **opt) 2029{ 2030 char *event_name, *field_name, *opt_name; 2031 2032 event_name = str; 2033 field_name = strchr(str, '.'); 2034 2035 if (field_name) { 2036 *field_name++ = '\0'; 2037 } else { 2038 event_name = NULL; 2039 field_name = str; 2040 } 2041 2042 opt_name = strchr(field_name, '/'); 2043 if (opt_name) 2044 *opt_name++ = '\0'; 2045 2046 *event = event_name; 2047 *field = field_name; 2048 *opt = opt_name; 2049 2050 return 0; 2051} 2052 2053/* find match evsel using a given event name. The event name can be: 2054 * 1. '%' + event index (e.g. '%1' for first event) 2055 * 2. full event name (e.g. sched:sched_switch) 2056 * 3. partial event name (should not contain ':') 2057 */ 2058static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name) 2059{ 2060 struct perf_evsel *evsel = NULL; 2061 struct perf_evsel *pos; 2062 bool full_name; 2063 2064 /* case 1 */ 2065 if (event_name[0] == '%') { 2066 int nr = strtol(event_name+1, NULL, 0); 2067 2068 if (nr > evlist->nr_entries) 2069 return NULL; 2070 2071 evsel = perf_evlist__first(evlist); 2072 while (--nr > 0) 2073 evsel = perf_evsel__next(evsel); 2074 2075 return evsel; 2076 } 2077 2078 full_name = !!strchr(event_name, ':'); 2079 evlist__for_each_entry(evlist, pos) { 2080 /* case 2 */ 2081 if (full_name && !strcmp(pos->name, event_name)) 2082 return pos; 2083 /* case 3 */ 2084 if (!full_name && strstr(pos->name, event_name)) { 2085 if (evsel) { 2086 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 2087 event_name, evsel->name, pos->name); 2088 return NULL; 2089 } 2090 evsel = pos; 2091 } 2092 } 2093 2094 return evsel; 2095} 2096 2097static int __dynamic_dimension__add(struct perf_evsel *evsel, 2098 struct format_field *field, 2099 bool raw_trace, int level) 2100{ 2101 struct hpp_dynamic_entry *hde; 2102 2103 hde = __alloc_dynamic_entry(evsel, field, level); 2104 if (hde == NULL) 2105 return -ENOMEM; 2106 2107 hde->raw_trace = raw_trace; 2108 2109 perf_hpp__register_sort_field(&hde->hpp); 2110 return 0; 2111} 2112 2113static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level) 2114{ 2115 int ret; 2116 struct format_field *field; 2117 2118 field = evsel->tp_format->format.fields; 2119 while (field) { 2120 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2121 if (ret < 0) 2122 return ret; 2123 2124 field = field->next; 2125 } 2126 return 0; 2127} 2128 2129static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace, 2130 int level) 2131{ 2132 int ret; 2133 struct perf_evsel *evsel; 2134 2135 evlist__for_each_entry(evlist, evsel) { 2136 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 2137 continue; 2138 2139 ret = add_evsel_fields(evsel, raw_trace, level); 2140 if (ret < 0) 2141 return ret; 2142 } 2143 return 0; 2144} 2145 2146static int add_all_matching_fields(struct perf_evlist *evlist, 2147 char *field_name, bool raw_trace, int level) 2148{ 2149 int ret = -ESRCH; 2150 struct perf_evsel *evsel; 2151 struct format_field *field; 2152 2153 evlist__for_each_entry(evlist, evsel) { 2154 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 2155 continue; 2156 2157 field = pevent_find_any_field(evsel->tp_format, field_name); 2158 if (field == NULL) 2159 continue; 2160 2161 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2162 if (ret < 0) 2163 break; 2164 } 2165 return ret; 2166} 2167 2168static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok, 2169 int level) 2170{ 2171 char *str, *event_name, *field_name, *opt_name; 2172 struct perf_evsel *evsel; 2173 struct format_field *field; 2174 bool raw_trace = symbol_conf.raw_trace; 2175 int ret = 0; 2176 2177 if (evlist == NULL) 2178 return -ENOENT; 2179 2180 str = strdup(tok); 2181 if (str == NULL) 2182 return -ENOMEM; 2183 2184 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 2185 ret = -EINVAL; 2186 goto out; 2187 } 2188 2189 if (opt_name) { 2190 if (strcmp(opt_name, "raw")) { 2191 pr_debug("unsupported field option %s\n", opt_name); 2192 ret = -EINVAL; 2193 goto out; 2194 } 2195 raw_trace = true; 2196 } 2197 2198 if (!strcmp(field_name, "trace_fields")) { 2199 ret = add_all_dynamic_fields(evlist, raw_trace, level); 2200 goto out; 2201 } 2202 2203 if (event_name == NULL) { 2204 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 2205 goto out; 2206 } 2207 2208 evsel = find_evsel(evlist, event_name); 2209 if (evsel == NULL) { 2210 pr_debug("Cannot find event: %s\n", event_name); 2211 ret = -ENOENT; 2212 goto out; 2213 } 2214 2215 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) { 2216 pr_debug("%s is not a tracepoint event\n", event_name); 2217 ret = -EINVAL; 2218 goto out; 2219 } 2220 2221 if (!strcmp(field_name, "*")) { 2222 ret = add_evsel_fields(evsel, raw_trace, level); 2223 } else { 2224 field = pevent_find_any_field(evsel->tp_format, field_name); 2225 if (field == NULL) { 2226 pr_debug("Cannot find event field for %s.%s\n", 2227 event_name, field_name); 2228 return -ENOENT; 2229 } 2230 2231 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2232 } 2233 2234out: 2235 free(str); 2236 return ret; 2237} 2238 2239static int __sort_dimension__add(struct sort_dimension *sd, 2240 struct perf_hpp_list *list, 2241 int level) 2242{ 2243 if (sd->taken) 2244 return 0; 2245 2246 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 2247 return -1; 2248 2249 if (sd->entry->se_collapse) 2250 list->need_collapse = 1; 2251 2252 sd->taken = 1; 2253 2254 return 0; 2255} 2256 2257static int __hpp_dimension__add(struct hpp_dimension *hd, 2258 struct perf_hpp_list *list, 2259 int level) 2260{ 2261 struct perf_hpp_fmt *fmt; 2262 2263 if (hd->taken) 2264 return 0; 2265 2266 fmt = __hpp_dimension__alloc_hpp(hd, level); 2267 if (!fmt) 2268 return -1; 2269 2270 hd->taken = 1; 2271 perf_hpp_list__register_sort_field(list, fmt); 2272 return 0; 2273} 2274 2275static int __sort_dimension__add_output(struct perf_hpp_list *list, 2276 struct sort_dimension *sd) 2277{ 2278 if (sd->taken) 2279 return 0; 2280 2281 if (__sort_dimension__add_hpp_output(sd, list) < 0) 2282 return -1; 2283 2284 sd->taken = 1; 2285 return 0; 2286} 2287 2288static int __hpp_dimension__add_output(struct perf_hpp_list *list, 2289 struct hpp_dimension *hd) 2290{ 2291 struct perf_hpp_fmt *fmt; 2292 2293 if (hd->taken) 2294 return 0; 2295 2296 fmt = __hpp_dimension__alloc_hpp(hd, 0); 2297 if (!fmt) 2298 return -1; 2299 2300 hd->taken = 1; 2301 perf_hpp_list__column_register(list, fmt); 2302 return 0; 2303} 2304 2305int hpp_dimension__add_output(unsigned col) 2306{ 2307 BUG_ON(col >= PERF_HPP__MAX_INDEX); 2308 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 2309} 2310 2311int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 2312 struct perf_evlist *evlist, 2313 int level) 2314{ 2315 unsigned int i; 2316 2317 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2318 struct sort_dimension *sd = &common_sort_dimensions[i]; 2319 2320 if (strncasecmp(tok, sd->name, strlen(tok))) 2321 continue; 2322 2323 if (sd->entry == &sort_parent) { 2324 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 2325 if (ret) { 2326 char err[BUFSIZ]; 2327 2328 regerror(ret, &parent_regex, err, sizeof(err)); 2329 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 2330 return -EINVAL; 2331 } 2332 list->parent = 1; 2333 } else if (sd->entry == &sort_sym) { 2334 list->sym = 1; 2335 /* 2336 * perf diff displays the performance difference amongst 2337 * two or more perf.data files. Those files could come 2338 * from different binaries. So we should not compare 2339 * their ips, but the name of symbol. 2340 */ 2341 if (sort__mode == SORT_MODE__DIFF) 2342 sd->entry->se_collapse = sort__sym_sort; 2343 2344 } else if (sd->entry == &sort_dso) { 2345 list->dso = 1; 2346 } else if (sd->entry == &sort_socket) { 2347 list->socket = 1; 2348 } else if (sd->entry == &sort_thread) { 2349 list->thread = 1; 2350 } else if (sd->entry == &sort_comm) { 2351 list->comm = 1; 2352 } 2353 2354 return __sort_dimension__add(sd, list, level); 2355 } 2356 2357 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2358 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2359 2360 if (strncasecmp(tok, hd->name, strlen(tok))) 2361 continue; 2362 2363 return __hpp_dimension__add(hd, list, level); 2364 } 2365 2366 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2367 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2368 2369 if (strncasecmp(tok, sd->name, strlen(tok))) 2370 continue; 2371 2372 if (sort__mode != SORT_MODE__BRANCH) 2373 return -EINVAL; 2374 2375 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 2376 list->sym = 1; 2377 2378 __sort_dimension__add(sd, list, level); 2379 return 0; 2380 } 2381 2382 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2383 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2384 2385 if (strncasecmp(tok, sd->name, strlen(tok))) 2386 continue; 2387 2388 if (sort__mode != SORT_MODE__MEMORY) 2389 return -EINVAL; 2390 2391 if (sd->entry == &sort_mem_dcacheline && cacheline_size == 0) 2392 return -EINVAL; 2393 2394 if (sd->entry == &sort_mem_daddr_sym) 2395 list->sym = 1; 2396 2397 __sort_dimension__add(sd, list, level); 2398 return 0; 2399 } 2400 2401 if (!add_dynamic_entry(evlist, tok, level)) 2402 return 0; 2403 2404 return -ESRCH; 2405} 2406 2407static int setup_sort_list(struct perf_hpp_list *list, char *str, 2408 struct perf_evlist *evlist) 2409{ 2410 char *tmp, *tok; 2411 int ret = 0; 2412 int level = 0; 2413 int next_level = 1; 2414 bool in_group = false; 2415 2416 do { 2417 tok = str; 2418 tmp = strpbrk(str, "{}, "); 2419 if (tmp) { 2420 if (in_group) 2421 next_level = level; 2422 else 2423 next_level = level + 1; 2424 2425 if (*tmp == '{') 2426 in_group = true; 2427 else if (*tmp == '}') 2428 in_group = false; 2429 2430 *tmp = '\0'; 2431 str = tmp + 1; 2432 } 2433 2434 if (*tok) { 2435 ret = sort_dimension__add(list, tok, evlist, level); 2436 if (ret == -EINVAL) { 2437 if (!cacheline_size && !strncasecmp(tok, "dcacheline", strlen(tok))) 2438 error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 2439 else 2440 error("Invalid --sort key: `%s'", tok); 2441 break; 2442 } else if (ret == -ESRCH) { 2443 error("Unknown --sort key: `%s'", tok); 2444 break; 2445 } 2446 } 2447 2448 level = next_level; 2449 } while (tmp); 2450 2451 return ret; 2452} 2453 2454static const char *get_default_sort_order(struct perf_evlist *evlist) 2455{ 2456 const char *default_sort_orders[] = { 2457 default_sort_order, 2458 default_branch_sort_order, 2459 default_mem_sort_order, 2460 default_top_sort_order, 2461 default_diff_sort_order, 2462 default_tracepoint_sort_order, 2463 }; 2464 bool use_trace = true; 2465 struct perf_evsel *evsel; 2466 2467 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 2468 2469 if (evlist == NULL) 2470 goto out_no_evlist; 2471 2472 evlist__for_each_entry(evlist, evsel) { 2473 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) { 2474 use_trace = false; 2475 break; 2476 } 2477 } 2478 2479 if (use_trace) { 2480 sort__mode = SORT_MODE__TRACEPOINT; 2481 if (symbol_conf.raw_trace) 2482 return "trace_fields"; 2483 } 2484out_no_evlist: 2485 return default_sort_orders[sort__mode]; 2486} 2487 2488static int setup_sort_order(struct perf_evlist *evlist) 2489{ 2490 char *new_sort_order; 2491 2492 /* 2493 * Append '+'-prefixed sort order to the default sort 2494 * order string. 2495 */ 2496 if (!sort_order || is_strict_order(sort_order)) 2497 return 0; 2498 2499 if (sort_order[1] == '\0') { 2500 error("Invalid --sort key: `+'"); 2501 return -EINVAL; 2502 } 2503 2504 /* 2505 * We allocate new sort_order string, but we never free it, 2506 * because it's checked over the rest of the code. 2507 */ 2508 if (asprintf(&new_sort_order, "%s,%s", 2509 get_default_sort_order(evlist), sort_order + 1) < 0) { 2510 error("Not enough memory to set up --sort"); 2511 return -ENOMEM; 2512 } 2513 2514 sort_order = new_sort_order; 2515 return 0; 2516} 2517 2518/* 2519 * Adds 'pre,' prefix into 'str' is 'pre' is 2520 * not already part of 'str'. 2521 */ 2522static char *prefix_if_not_in(const char *pre, char *str) 2523{ 2524 char *n; 2525 2526 if (!str || strstr(str, pre)) 2527 return str; 2528 2529 if (asprintf(&n, "%s,%s", pre, str) < 0) 2530 return NULL; 2531 2532 free(str); 2533 return n; 2534} 2535 2536static char *setup_overhead(char *keys) 2537{ 2538 if (sort__mode == SORT_MODE__DIFF) 2539 return keys; 2540 2541 keys = prefix_if_not_in("overhead", keys); 2542 2543 if (symbol_conf.cumulate_callchain) 2544 keys = prefix_if_not_in("overhead_children", keys); 2545 2546 return keys; 2547} 2548 2549static int __setup_sorting(struct perf_evlist *evlist) 2550{ 2551 char *str; 2552 const char *sort_keys; 2553 int ret = 0; 2554 2555 ret = setup_sort_order(evlist); 2556 if (ret) 2557 return ret; 2558 2559 sort_keys = sort_order; 2560 if (sort_keys == NULL) { 2561 if (is_strict_order(field_order)) { 2562 /* 2563 * If user specified field order but no sort order, 2564 * we'll honor it and not add default sort orders. 2565 */ 2566 return 0; 2567 } 2568 2569 sort_keys = get_default_sort_order(evlist); 2570 } 2571 2572 str = strdup(sort_keys); 2573 if (str == NULL) { 2574 error("Not enough memory to setup sort keys"); 2575 return -ENOMEM; 2576 } 2577 2578 /* 2579 * Prepend overhead fields for backward compatibility. 2580 */ 2581 if (!is_strict_order(field_order)) { 2582 str = setup_overhead(str); 2583 if (str == NULL) { 2584 error("Not enough memory to setup overhead keys"); 2585 return -ENOMEM; 2586 } 2587 } 2588 2589 ret = setup_sort_list(&perf_hpp_list, str, evlist); 2590 2591 free(str); 2592 return ret; 2593} 2594 2595void perf_hpp__set_elide(int idx, bool elide) 2596{ 2597 struct perf_hpp_fmt *fmt; 2598 struct hpp_sort_entry *hse; 2599 2600 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2601 if (!perf_hpp__is_sort_entry(fmt)) 2602 continue; 2603 2604 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2605 if (hse->se->se_width_idx == idx) { 2606 fmt->elide = elide; 2607 break; 2608 } 2609 } 2610} 2611 2612static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 2613{ 2614 if (list && strlist__nr_entries(list) == 1) { 2615 if (fp != NULL) 2616 fprintf(fp, "# %s: %s\n", list_name, 2617 strlist__entry(list, 0)->s); 2618 return true; 2619 } 2620 return false; 2621} 2622 2623static bool get_elide(int idx, FILE *output) 2624{ 2625 switch (idx) { 2626 case HISTC_SYMBOL: 2627 return __get_elide(symbol_conf.sym_list, "symbol", output); 2628 case HISTC_DSO: 2629 return __get_elide(symbol_conf.dso_list, "dso", output); 2630 case HISTC_COMM: 2631 return __get_elide(symbol_conf.comm_list, "comm", output); 2632 default: 2633 break; 2634 } 2635 2636 if (sort__mode != SORT_MODE__BRANCH) 2637 return false; 2638 2639 switch (idx) { 2640 case HISTC_SYMBOL_FROM: 2641 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 2642 case HISTC_SYMBOL_TO: 2643 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 2644 case HISTC_DSO_FROM: 2645 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 2646 case HISTC_DSO_TO: 2647 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 2648 default: 2649 break; 2650 } 2651 2652 return false; 2653} 2654 2655void sort__setup_elide(FILE *output) 2656{ 2657 struct perf_hpp_fmt *fmt; 2658 struct hpp_sort_entry *hse; 2659 2660 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2661 if (!perf_hpp__is_sort_entry(fmt)) 2662 continue; 2663 2664 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2665 fmt->elide = get_elide(hse->se->se_width_idx, output); 2666 } 2667 2668 /* 2669 * It makes no sense to elide all of sort entries. 2670 * Just revert them to show up again. 2671 */ 2672 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2673 if (!perf_hpp__is_sort_entry(fmt)) 2674 continue; 2675 2676 if (!fmt->elide) 2677 return; 2678 } 2679 2680 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2681 if (!perf_hpp__is_sort_entry(fmt)) 2682 continue; 2683 2684 fmt->elide = false; 2685 } 2686} 2687 2688int output_field_add(struct perf_hpp_list *list, char *tok) 2689{ 2690 unsigned int i; 2691 2692 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2693 struct sort_dimension *sd = &common_sort_dimensions[i]; 2694 2695 if (strncasecmp(tok, sd->name, strlen(tok))) 2696 continue; 2697 2698 return __sort_dimension__add_output(list, sd); 2699 } 2700 2701 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2702 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2703 2704 if (strncasecmp(tok, hd->name, strlen(tok))) 2705 continue; 2706 2707 return __hpp_dimension__add_output(list, hd); 2708 } 2709 2710 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2711 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2712 2713 if (strncasecmp(tok, sd->name, strlen(tok))) 2714 continue; 2715 2716 return __sort_dimension__add_output(list, sd); 2717 } 2718 2719 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2720 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2721 2722 if (strncasecmp(tok, sd->name, strlen(tok))) 2723 continue; 2724 2725 return __sort_dimension__add_output(list, sd); 2726 } 2727 2728 return -ESRCH; 2729} 2730 2731static int setup_output_list(struct perf_hpp_list *list, char *str) 2732{ 2733 char *tmp, *tok; 2734 int ret = 0; 2735 2736 for (tok = strtok_r(str, ", ", &tmp); 2737 tok; tok = strtok_r(NULL, ", ", &tmp)) { 2738 ret = output_field_add(list, tok); 2739 if (ret == -EINVAL) { 2740 error("Invalid --fields key: `%s'", tok); 2741 break; 2742 } else if (ret == -ESRCH) { 2743 error("Unknown --fields key: `%s'", tok); 2744 break; 2745 } 2746 } 2747 2748 return ret; 2749} 2750 2751void reset_dimensions(void) 2752{ 2753 unsigned int i; 2754 2755 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 2756 common_sort_dimensions[i].taken = 0; 2757 2758 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 2759 hpp_sort_dimensions[i].taken = 0; 2760 2761 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 2762 bstack_sort_dimensions[i].taken = 0; 2763 2764 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 2765 memory_sort_dimensions[i].taken = 0; 2766} 2767 2768bool is_strict_order(const char *order) 2769{ 2770 return order && (*order != '+'); 2771} 2772 2773static int __setup_output_field(void) 2774{ 2775 char *str, *strp; 2776 int ret = -EINVAL; 2777 2778 if (field_order == NULL) 2779 return 0; 2780 2781 strp = str = strdup(field_order); 2782 if (str == NULL) { 2783 error("Not enough memory to setup output fields"); 2784 return -ENOMEM; 2785 } 2786 2787 if (!is_strict_order(field_order)) 2788 strp++; 2789 2790 if (!strlen(strp)) { 2791 error("Invalid --fields key: `+'"); 2792 goto out; 2793 } 2794 2795 ret = setup_output_list(&perf_hpp_list, strp); 2796 2797out: 2798 free(str); 2799 return ret; 2800} 2801 2802int setup_sorting(struct perf_evlist *evlist) 2803{ 2804 int err; 2805 2806 err = __setup_sorting(evlist); 2807 if (err < 0) 2808 return err; 2809 2810 if (parent_pattern != default_parent_pattern) { 2811 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 2812 if (err < 0) 2813 return err; 2814 } 2815 2816 reset_dimensions(); 2817 2818 /* 2819 * perf diff doesn't use default hpp output fields. 2820 */ 2821 if (sort__mode != SORT_MODE__DIFF) 2822 perf_hpp__init(); 2823 2824 err = __setup_output_field(); 2825 if (err < 0) 2826 return err; 2827 2828 /* copy sort keys to output fields */ 2829 perf_hpp__setup_output_field(&perf_hpp_list); 2830 /* and then copy output fields to sort keys */ 2831 perf_hpp__append_sort_keys(&perf_hpp_list); 2832 2833 /* setup hists-specific output fields */ 2834 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 2835 return -1; 2836 2837 return 0; 2838} 2839 2840void reset_output_field(void) 2841{ 2842 perf_hpp_list.need_collapse = 0; 2843 perf_hpp_list.parent = 0; 2844 perf_hpp_list.sym = 0; 2845 perf_hpp_list.dso = 0; 2846 2847 field_order = NULL; 2848 sort_order = NULL; 2849 2850 reset_dimensions(); 2851 perf_hpp__reset_output_field(&perf_hpp_list); 2852}