at v4.8-rc1 2845 lines 68 kB view raw
1#include <sys/mman.h> 2#include "sort.h" 3#include "hist.h" 4#include "comm.h" 5#include "symbol.h" 6#include "evsel.h" 7#include "evlist.h" 8#include <traceevent/event-parse.h> 9#include "mem-events.h" 10 11regex_t parent_regex; 12const char default_parent_pattern[] = "^sys_|^do_page_fault"; 13const char *parent_pattern = default_parent_pattern; 14const char default_sort_order[] = "comm,dso,symbol"; 15const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 16const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked"; 17const char default_top_sort_order[] = "dso,symbol"; 18const char default_diff_sort_order[] = "dso,symbol"; 19const char default_tracepoint_sort_order[] = "trace"; 20const char *sort_order; 21const char *field_order; 22regex_t ignore_callees_regex; 23int have_ignore_callees = 0; 24enum sort_mode sort__mode = SORT_MODE__NORMAL; 25 26/* 27 * Replaces all occurrences of a char used with the: 28 * 29 * -t, --field-separator 30 * 31 * option, that uses a special separator character and don't pad with spaces, 32 * replacing all occurances of this separator in symbol names (and other 33 * output) with a '.' character, that thus it's the only non valid separator. 34*/ 35static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 36{ 37 int n; 38 va_list ap; 39 40 va_start(ap, fmt); 41 n = vsnprintf(bf, size, fmt, ap); 42 if (symbol_conf.field_sep && n > 0) { 43 char *sep = bf; 44 45 while (1) { 46 sep = strchr(sep, *symbol_conf.field_sep); 47 if (sep == NULL) 48 break; 49 *sep = '.'; 50 } 51 } 52 va_end(ap); 53 54 if (n >= (int)size) 55 return size - 1; 56 return n; 57} 58 59static int64_t cmp_null(const void *l, const void *r) 60{ 61 if (!l && !r) 62 return 0; 63 else if (!l) 64 return -1; 65 else 66 return 1; 67} 68 69/* --sort pid */ 70 71static int64_t 72sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 73{ 74 return right->thread->tid - left->thread->tid; 75} 76 77static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 78 size_t size, unsigned int width) 79{ 80 const char *comm = thread__comm_str(he->thread); 81 82 width = max(7U, width) - 8; 83 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid, 84 width, width, comm ?: ""); 85} 86 87static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 88{ 89 const struct thread *th = arg; 90 91 if (type != HIST_FILTER__THREAD) 92 return -1; 93 94 return th && he->thread != th; 95} 96 97struct sort_entry sort_thread = { 98 .se_header = " Pid:Command", 99 .se_cmp = sort__thread_cmp, 100 .se_snprintf = hist_entry__thread_snprintf, 101 .se_filter = hist_entry__thread_filter, 102 .se_width_idx = HISTC_THREAD, 103}; 104 105/* --sort comm */ 106 107static int64_t 108sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 109{ 110 /* Compare the addr that should be unique among comm */ 111 return strcmp(comm__str(right->comm), comm__str(left->comm)); 112} 113 114static int64_t 115sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 116{ 117 /* Compare the addr that should be unique among comm */ 118 return strcmp(comm__str(right->comm), comm__str(left->comm)); 119} 120 121static int64_t 122sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 123{ 124 return strcmp(comm__str(right->comm), comm__str(left->comm)); 125} 126 127static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 128 size_t size, unsigned int width) 129{ 130 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 131} 132 133struct sort_entry sort_comm = { 134 .se_header = "Command", 135 .se_cmp = sort__comm_cmp, 136 .se_collapse = sort__comm_collapse, 137 .se_sort = sort__comm_sort, 138 .se_snprintf = hist_entry__comm_snprintf, 139 .se_filter = hist_entry__thread_filter, 140 .se_width_idx = HISTC_COMM, 141}; 142 143/* --sort dso */ 144 145static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 146{ 147 struct dso *dso_l = map_l ? map_l->dso : NULL; 148 struct dso *dso_r = map_r ? map_r->dso : NULL; 149 const char *dso_name_l, *dso_name_r; 150 151 if (!dso_l || !dso_r) 152 return cmp_null(dso_r, dso_l); 153 154 if (verbose) { 155 dso_name_l = dso_l->long_name; 156 dso_name_r = dso_r->long_name; 157 } else { 158 dso_name_l = dso_l->short_name; 159 dso_name_r = dso_r->short_name; 160 } 161 162 return strcmp(dso_name_l, dso_name_r); 163} 164 165static int64_t 166sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 167{ 168 return _sort__dso_cmp(right->ms.map, left->ms.map); 169} 170 171static int _hist_entry__dso_snprintf(struct map *map, char *bf, 172 size_t size, unsigned int width) 173{ 174 if (map && map->dso) { 175 const char *dso_name = !verbose ? map->dso->short_name : 176 map->dso->long_name; 177 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 178 } 179 180 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); 181} 182 183static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 184 size_t size, unsigned int width) 185{ 186 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 187} 188 189static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 190{ 191 const struct dso *dso = arg; 192 193 if (type != HIST_FILTER__DSO) 194 return -1; 195 196 return dso && (!he->ms.map || he->ms.map->dso != dso); 197} 198 199struct sort_entry sort_dso = { 200 .se_header = "Shared Object", 201 .se_cmp = sort__dso_cmp, 202 .se_snprintf = hist_entry__dso_snprintf, 203 .se_filter = hist_entry__dso_filter, 204 .se_width_idx = HISTC_DSO, 205}; 206 207/* --sort symbol */ 208 209static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 210{ 211 return (int64_t)(right_ip - left_ip); 212} 213 214static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 215{ 216 if (!sym_l || !sym_r) 217 return cmp_null(sym_l, sym_r); 218 219 if (sym_l == sym_r) 220 return 0; 221 222 if (sym_l->start != sym_r->start) 223 return (int64_t)(sym_r->start - sym_l->start); 224 225 return (int64_t)(sym_r->end - sym_l->end); 226} 227 228static int64_t 229sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 230{ 231 int64_t ret; 232 233 if (!left->ms.sym && !right->ms.sym) 234 return _sort__addr_cmp(left->ip, right->ip); 235 236 /* 237 * comparing symbol address alone is not enough since it's a 238 * relative address within a dso. 239 */ 240 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 241 ret = sort__dso_cmp(left, right); 242 if (ret != 0) 243 return ret; 244 } 245 246 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 247} 248 249static int64_t 250sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 251{ 252 if (!left->ms.sym || !right->ms.sym) 253 return cmp_null(left->ms.sym, right->ms.sym); 254 255 return strcmp(right->ms.sym->name, left->ms.sym->name); 256} 257 258static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, 259 u64 ip, char level, char *bf, size_t size, 260 unsigned int width) 261{ 262 size_t ret = 0; 263 264 if (verbose) { 265 char o = map ? dso__symtab_origin(map->dso) : '!'; 266 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 267 BITS_PER_LONG / 4 + 2, ip, o); 268 } 269 270 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 271 if (sym && map) { 272 if (map->type == MAP__VARIABLE) { 273 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 274 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 275 ip - map->unmap_ip(map, sym->start)); 276 } else { 277 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 278 width - ret, 279 sym->name); 280 } 281 } else { 282 size_t len = BITS_PER_LONG / 4; 283 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 284 len, ip); 285 } 286 287 return ret; 288} 289 290static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, 291 size_t size, unsigned int width) 292{ 293 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip, 294 he->level, bf, size, width); 295} 296 297static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 298{ 299 const char *sym = arg; 300 301 if (type != HIST_FILTER__SYMBOL) 302 return -1; 303 304 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 305} 306 307struct sort_entry sort_sym = { 308 .se_header = "Symbol", 309 .se_cmp = sort__sym_cmp, 310 .se_sort = sort__sym_sort, 311 .se_snprintf = hist_entry__sym_snprintf, 312 .se_filter = hist_entry__sym_filter, 313 .se_width_idx = HISTC_SYMBOL, 314}; 315 316/* --sort srcline */ 317 318static char *hist_entry__get_srcline(struct hist_entry *he) 319{ 320 struct map *map = he->ms.map; 321 322 if (!map) 323 return SRCLINE_UNKNOWN; 324 325 return get_srcline(map->dso, map__rip_2objdump(map, he->ip), 326 he->ms.sym, true); 327} 328 329static int64_t 330sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 331{ 332 if (!left->srcline) 333 left->srcline = hist_entry__get_srcline(left); 334 if (!right->srcline) 335 right->srcline = hist_entry__get_srcline(right); 336 337 return strcmp(right->srcline, left->srcline); 338} 339 340static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 341 size_t size, unsigned int width) 342{ 343 if (!he->srcline) 344 he->srcline = hist_entry__get_srcline(he); 345 346 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 347} 348 349struct sort_entry sort_srcline = { 350 .se_header = "Source:Line", 351 .se_cmp = sort__srcline_cmp, 352 .se_snprintf = hist_entry__srcline_snprintf, 353 .se_width_idx = HISTC_SRCLINE, 354}; 355 356/* --sort srcline_from */ 357 358static int64_t 359sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 360{ 361 if (!left->branch_info->srcline_from) { 362 struct map *map = left->branch_info->from.map; 363 if (!map) 364 left->branch_info->srcline_from = SRCLINE_UNKNOWN; 365 else 366 left->branch_info->srcline_from = get_srcline(map->dso, 367 map__rip_2objdump(map, 368 left->branch_info->from.al_addr), 369 left->branch_info->from.sym, true); 370 } 371 if (!right->branch_info->srcline_from) { 372 struct map *map = right->branch_info->from.map; 373 if (!map) 374 right->branch_info->srcline_from = SRCLINE_UNKNOWN; 375 else 376 right->branch_info->srcline_from = get_srcline(map->dso, 377 map__rip_2objdump(map, 378 right->branch_info->from.al_addr), 379 right->branch_info->from.sym, true); 380 } 381 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 382} 383 384static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 385 size_t size, unsigned int width) 386{ 387 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 388} 389 390struct sort_entry sort_srcline_from = { 391 .se_header = "From Source:Line", 392 .se_cmp = sort__srcline_from_cmp, 393 .se_snprintf = hist_entry__srcline_from_snprintf, 394 .se_width_idx = HISTC_SRCLINE_FROM, 395}; 396 397/* --sort srcline_to */ 398 399static int64_t 400sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 401{ 402 if (!left->branch_info->srcline_to) { 403 struct map *map = left->branch_info->to.map; 404 if (!map) 405 left->branch_info->srcline_to = SRCLINE_UNKNOWN; 406 else 407 left->branch_info->srcline_to = get_srcline(map->dso, 408 map__rip_2objdump(map, 409 left->branch_info->to.al_addr), 410 left->branch_info->from.sym, true); 411 } 412 if (!right->branch_info->srcline_to) { 413 struct map *map = right->branch_info->to.map; 414 if (!map) 415 right->branch_info->srcline_to = SRCLINE_UNKNOWN; 416 else 417 right->branch_info->srcline_to = get_srcline(map->dso, 418 map__rip_2objdump(map, 419 right->branch_info->to.al_addr), 420 right->branch_info->to.sym, true); 421 } 422 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 423} 424 425static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 426 size_t size, unsigned int width) 427{ 428 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 429} 430 431struct sort_entry sort_srcline_to = { 432 .se_header = "To Source:Line", 433 .se_cmp = sort__srcline_to_cmp, 434 .se_snprintf = hist_entry__srcline_to_snprintf, 435 .se_width_idx = HISTC_SRCLINE_TO, 436}; 437 438/* --sort srcfile */ 439 440static char no_srcfile[1]; 441 442static char *hist_entry__get_srcfile(struct hist_entry *e) 443{ 444 char *sf, *p; 445 struct map *map = e->ms.map; 446 447 if (!map) 448 return no_srcfile; 449 450 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip), 451 e->ms.sym, false, true); 452 if (!strcmp(sf, SRCLINE_UNKNOWN)) 453 return no_srcfile; 454 p = strchr(sf, ':'); 455 if (p && *sf) { 456 *p = 0; 457 return sf; 458 } 459 free(sf); 460 return no_srcfile; 461} 462 463static int64_t 464sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 465{ 466 if (!left->srcfile) 467 left->srcfile = hist_entry__get_srcfile(left); 468 if (!right->srcfile) 469 right->srcfile = hist_entry__get_srcfile(right); 470 471 return strcmp(right->srcfile, left->srcfile); 472} 473 474static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 475 size_t size, unsigned int width) 476{ 477 if (!he->srcfile) 478 he->srcfile = hist_entry__get_srcfile(he); 479 480 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 481} 482 483struct sort_entry sort_srcfile = { 484 .se_header = "Source File", 485 .se_cmp = sort__srcfile_cmp, 486 .se_snprintf = hist_entry__srcfile_snprintf, 487 .se_width_idx = HISTC_SRCFILE, 488}; 489 490/* --sort parent */ 491 492static int64_t 493sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 494{ 495 struct symbol *sym_l = left->parent; 496 struct symbol *sym_r = right->parent; 497 498 if (!sym_l || !sym_r) 499 return cmp_null(sym_l, sym_r); 500 501 return strcmp(sym_r->name, sym_l->name); 502} 503 504static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 505 size_t size, unsigned int width) 506{ 507 return repsep_snprintf(bf, size, "%-*.*s", width, width, 508 he->parent ? he->parent->name : "[other]"); 509} 510 511struct sort_entry sort_parent = { 512 .se_header = "Parent symbol", 513 .se_cmp = sort__parent_cmp, 514 .se_snprintf = hist_entry__parent_snprintf, 515 .se_width_idx = HISTC_PARENT, 516}; 517 518/* --sort cpu */ 519 520static int64_t 521sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 522{ 523 return right->cpu - left->cpu; 524} 525 526static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 527 size_t size, unsigned int width) 528{ 529 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 530} 531 532struct sort_entry sort_cpu = { 533 .se_header = "CPU", 534 .se_cmp = sort__cpu_cmp, 535 .se_snprintf = hist_entry__cpu_snprintf, 536 .se_width_idx = HISTC_CPU, 537}; 538 539/* --sort socket */ 540 541static int64_t 542sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 543{ 544 return right->socket - left->socket; 545} 546 547static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 548 size_t size, unsigned int width) 549{ 550 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 551} 552 553static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 554{ 555 int sk = *(const int *)arg; 556 557 if (type != HIST_FILTER__SOCKET) 558 return -1; 559 560 return sk >= 0 && he->socket != sk; 561} 562 563struct sort_entry sort_socket = { 564 .se_header = "Socket", 565 .se_cmp = sort__socket_cmp, 566 .se_snprintf = hist_entry__socket_snprintf, 567 .se_filter = hist_entry__socket_filter, 568 .se_width_idx = HISTC_SOCKET, 569}; 570 571/* --sort trace */ 572 573static char *get_trace_output(struct hist_entry *he) 574{ 575 struct trace_seq seq; 576 struct perf_evsel *evsel; 577 struct pevent_record rec = { 578 .data = he->raw_data, 579 .size = he->raw_size, 580 }; 581 582 evsel = hists_to_evsel(he->hists); 583 584 trace_seq_init(&seq); 585 if (symbol_conf.raw_trace) { 586 pevent_print_fields(&seq, he->raw_data, he->raw_size, 587 evsel->tp_format); 588 } else { 589 pevent_event_info(&seq, evsel->tp_format, &rec); 590 } 591 return seq.buffer; 592} 593 594static int64_t 595sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 596{ 597 struct perf_evsel *evsel; 598 599 evsel = hists_to_evsel(left->hists); 600 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 601 return 0; 602 603 if (left->trace_output == NULL) 604 left->trace_output = get_trace_output(left); 605 if (right->trace_output == NULL) 606 right->trace_output = get_trace_output(right); 607 608 return strcmp(right->trace_output, left->trace_output); 609} 610 611static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 612 size_t size, unsigned int width) 613{ 614 struct perf_evsel *evsel; 615 616 evsel = hists_to_evsel(he->hists); 617 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 618 return scnprintf(bf, size, "%-.*s", width, "N/A"); 619 620 if (he->trace_output == NULL) 621 he->trace_output = get_trace_output(he); 622 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 623} 624 625struct sort_entry sort_trace = { 626 .se_header = "Trace output", 627 .se_cmp = sort__trace_cmp, 628 .se_snprintf = hist_entry__trace_snprintf, 629 .se_width_idx = HISTC_TRACE, 630}; 631 632/* sort keys for branch stacks */ 633 634static int64_t 635sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 636{ 637 if (!left->branch_info || !right->branch_info) 638 return cmp_null(left->branch_info, right->branch_info); 639 640 return _sort__dso_cmp(left->branch_info->from.map, 641 right->branch_info->from.map); 642} 643 644static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 645 size_t size, unsigned int width) 646{ 647 if (he->branch_info) 648 return _hist_entry__dso_snprintf(he->branch_info->from.map, 649 bf, size, width); 650 else 651 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 652} 653 654static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 655 const void *arg) 656{ 657 const struct dso *dso = arg; 658 659 if (type != HIST_FILTER__DSO) 660 return -1; 661 662 return dso && (!he->branch_info || !he->branch_info->from.map || 663 he->branch_info->from.map->dso != dso); 664} 665 666static int64_t 667sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 668{ 669 if (!left->branch_info || !right->branch_info) 670 return cmp_null(left->branch_info, right->branch_info); 671 672 return _sort__dso_cmp(left->branch_info->to.map, 673 right->branch_info->to.map); 674} 675 676static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 677 size_t size, unsigned int width) 678{ 679 if (he->branch_info) 680 return _hist_entry__dso_snprintf(he->branch_info->to.map, 681 bf, size, width); 682 else 683 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 684} 685 686static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 687 const void *arg) 688{ 689 const struct dso *dso = arg; 690 691 if (type != HIST_FILTER__DSO) 692 return -1; 693 694 return dso && (!he->branch_info || !he->branch_info->to.map || 695 he->branch_info->to.map->dso != dso); 696} 697 698static int64_t 699sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 700{ 701 struct addr_map_symbol *from_l = &left->branch_info->from; 702 struct addr_map_symbol *from_r = &right->branch_info->from; 703 704 if (!left->branch_info || !right->branch_info) 705 return cmp_null(left->branch_info, right->branch_info); 706 707 from_l = &left->branch_info->from; 708 from_r = &right->branch_info->from; 709 710 if (!from_l->sym && !from_r->sym) 711 return _sort__addr_cmp(from_l->addr, from_r->addr); 712 713 return _sort__sym_cmp(from_l->sym, from_r->sym); 714} 715 716static int64_t 717sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 718{ 719 struct addr_map_symbol *to_l, *to_r; 720 721 if (!left->branch_info || !right->branch_info) 722 return cmp_null(left->branch_info, right->branch_info); 723 724 to_l = &left->branch_info->to; 725 to_r = &right->branch_info->to; 726 727 if (!to_l->sym && !to_r->sym) 728 return _sort__addr_cmp(to_l->addr, to_r->addr); 729 730 return _sort__sym_cmp(to_l->sym, to_r->sym); 731} 732 733static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 734 size_t size, unsigned int width) 735{ 736 if (he->branch_info) { 737 struct addr_map_symbol *from = &he->branch_info->from; 738 739 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, 740 he->level, bf, size, width); 741 } 742 743 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 744} 745 746static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 747 size_t size, unsigned int width) 748{ 749 if (he->branch_info) { 750 struct addr_map_symbol *to = &he->branch_info->to; 751 752 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, 753 he->level, bf, size, width); 754 } 755 756 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 757} 758 759static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 760 const void *arg) 761{ 762 const char *sym = arg; 763 764 if (type != HIST_FILTER__SYMBOL) 765 return -1; 766 767 return sym && !(he->branch_info && he->branch_info->from.sym && 768 strstr(he->branch_info->from.sym->name, sym)); 769} 770 771static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 772 const void *arg) 773{ 774 const char *sym = arg; 775 776 if (type != HIST_FILTER__SYMBOL) 777 return -1; 778 779 return sym && !(he->branch_info && he->branch_info->to.sym && 780 strstr(he->branch_info->to.sym->name, sym)); 781} 782 783struct sort_entry sort_dso_from = { 784 .se_header = "Source Shared Object", 785 .se_cmp = sort__dso_from_cmp, 786 .se_snprintf = hist_entry__dso_from_snprintf, 787 .se_filter = hist_entry__dso_from_filter, 788 .se_width_idx = HISTC_DSO_FROM, 789}; 790 791struct sort_entry sort_dso_to = { 792 .se_header = "Target Shared Object", 793 .se_cmp = sort__dso_to_cmp, 794 .se_snprintf = hist_entry__dso_to_snprintf, 795 .se_filter = hist_entry__dso_to_filter, 796 .se_width_idx = HISTC_DSO_TO, 797}; 798 799struct sort_entry sort_sym_from = { 800 .se_header = "Source Symbol", 801 .se_cmp = sort__sym_from_cmp, 802 .se_snprintf = hist_entry__sym_from_snprintf, 803 .se_filter = hist_entry__sym_from_filter, 804 .se_width_idx = HISTC_SYMBOL_FROM, 805}; 806 807struct sort_entry sort_sym_to = { 808 .se_header = "Target Symbol", 809 .se_cmp = sort__sym_to_cmp, 810 .se_snprintf = hist_entry__sym_to_snprintf, 811 .se_filter = hist_entry__sym_to_filter, 812 .se_width_idx = HISTC_SYMBOL_TO, 813}; 814 815static int64_t 816sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 817{ 818 unsigned char mp, p; 819 820 if (!left->branch_info || !right->branch_info) 821 return cmp_null(left->branch_info, right->branch_info); 822 823 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 824 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 825 return mp || p; 826} 827 828static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 829 size_t size, unsigned int width){ 830 static const char *out = "N/A"; 831 832 if (he->branch_info) { 833 if (he->branch_info->flags.predicted) 834 out = "N"; 835 else if (he->branch_info->flags.mispred) 836 out = "Y"; 837 } 838 839 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 840} 841 842static int64_t 843sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 844{ 845 return left->branch_info->flags.cycles - 846 right->branch_info->flags.cycles; 847} 848 849static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 850 size_t size, unsigned int width) 851{ 852 if (he->branch_info->flags.cycles == 0) 853 return repsep_snprintf(bf, size, "%-*s", width, "-"); 854 return repsep_snprintf(bf, size, "%-*hd", width, 855 he->branch_info->flags.cycles); 856} 857 858struct sort_entry sort_cycles = { 859 .se_header = "Basic Block Cycles", 860 .se_cmp = sort__cycles_cmp, 861 .se_snprintf = hist_entry__cycles_snprintf, 862 .se_width_idx = HISTC_CYCLES, 863}; 864 865/* --sort daddr_sym */ 866static int64_t 867sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 868{ 869 uint64_t l = 0, r = 0; 870 871 if (left->mem_info) 872 l = left->mem_info->daddr.addr; 873 if (right->mem_info) 874 r = right->mem_info->daddr.addr; 875 876 return (int64_t)(r - l); 877} 878 879static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 880 size_t size, unsigned int width) 881{ 882 uint64_t addr = 0; 883 struct map *map = NULL; 884 struct symbol *sym = NULL; 885 886 if (he->mem_info) { 887 addr = he->mem_info->daddr.addr; 888 map = he->mem_info->daddr.map; 889 sym = he->mem_info->daddr.sym; 890 } 891 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 892 width); 893} 894 895static int64_t 896sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 897{ 898 uint64_t l = 0, r = 0; 899 900 if (left->mem_info) 901 l = left->mem_info->iaddr.addr; 902 if (right->mem_info) 903 r = right->mem_info->iaddr.addr; 904 905 return (int64_t)(r - l); 906} 907 908static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 909 size_t size, unsigned int width) 910{ 911 uint64_t addr = 0; 912 struct map *map = NULL; 913 struct symbol *sym = NULL; 914 915 if (he->mem_info) { 916 addr = he->mem_info->iaddr.addr; 917 map = he->mem_info->iaddr.map; 918 sym = he->mem_info->iaddr.sym; 919 } 920 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 921 width); 922} 923 924static int64_t 925sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 926{ 927 struct map *map_l = NULL; 928 struct map *map_r = NULL; 929 930 if (left->mem_info) 931 map_l = left->mem_info->daddr.map; 932 if (right->mem_info) 933 map_r = right->mem_info->daddr.map; 934 935 return _sort__dso_cmp(map_l, map_r); 936} 937 938static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 939 size_t size, unsigned int width) 940{ 941 struct map *map = NULL; 942 943 if (he->mem_info) 944 map = he->mem_info->daddr.map; 945 946 return _hist_entry__dso_snprintf(map, bf, size, width); 947} 948 949static int64_t 950sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 951{ 952 union perf_mem_data_src data_src_l; 953 union perf_mem_data_src data_src_r; 954 955 if (left->mem_info) 956 data_src_l = left->mem_info->data_src; 957 else 958 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 959 960 if (right->mem_info) 961 data_src_r = right->mem_info->data_src; 962 else 963 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 964 965 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 966} 967 968static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 969 size_t size, unsigned int width) 970{ 971 char out[10]; 972 973 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 974 return repsep_snprintf(bf, size, "%.*s", width, out); 975} 976 977static int64_t 978sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 979{ 980 union perf_mem_data_src data_src_l; 981 union perf_mem_data_src data_src_r; 982 983 if (left->mem_info) 984 data_src_l = left->mem_info->data_src; 985 else 986 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 987 988 if (right->mem_info) 989 data_src_r = right->mem_info->data_src; 990 else 991 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 992 993 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 994} 995 996static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 997 size_t size, unsigned int width) 998{ 999 char out[64]; 1000 1001 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1002 return repsep_snprintf(bf, size, "%-*s", width, out); 1003} 1004 1005static int64_t 1006sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1007{ 1008 union perf_mem_data_src data_src_l; 1009 union perf_mem_data_src data_src_r; 1010 1011 if (left->mem_info) 1012 data_src_l = left->mem_info->data_src; 1013 else 1014 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1015 1016 if (right->mem_info) 1017 data_src_r = right->mem_info->data_src; 1018 else 1019 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1020 1021 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1022} 1023 1024static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1025 size_t size, unsigned int width) 1026{ 1027 char out[64]; 1028 1029 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1030 return repsep_snprintf(bf, size, "%-*s", width, out); 1031} 1032 1033static int64_t 1034sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1035{ 1036 union perf_mem_data_src data_src_l; 1037 union perf_mem_data_src data_src_r; 1038 1039 if (left->mem_info) 1040 data_src_l = left->mem_info->data_src; 1041 else 1042 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1043 1044 if (right->mem_info) 1045 data_src_r = right->mem_info->data_src; 1046 else 1047 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1048 1049 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1050} 1051 1052static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1053 size_t size, unsigned int width) 1054{ 1055 char out[64]; 1056 1057 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1058 return repsep_snprintf(bf, size, "%-*s", width, out); 1059} 1060 1061static int64_t 1062sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1063{ 1064 u64 l, r; 1065 struct map *l_map, *r_map; 1066 1067 if (!left->mem_info) return -1; 1068 if (!right->mem_info) return 1; 1069 1070 /* group event types together */ 1071 if (left->cpumode > right->cpumode) return -1; 1072 if (left->cpumode < right->cpumode) return 1; 1073 1074 l_map = left->mem_info->daddr.map; 1075 r_map = right->mem_info->daddr.map; 1076 1077 /* if both are NULL, jump to sort on al_addr instead */ 1078 if (!l_map && !r_map) 1079 goto addr; 1080 1081 if (!l_map) return -1; 1082 if (!r_map) return 1; 1083 1084 if (l_map->maj > r_map->maj) return -1; 1085 if (l_map->maj < r_map->maj) return 1; 1086 1087 if (l_map->min > r_map->min) return -1; 1088 if (l_map->min < r_map->min) return 1; 1089 1090 if (l_map->ino > r_map->ino) return -1; 1091 if (l_map->ino < r_map->ino) return 1; 1092 1093 if (l_map->ino_generation > r_map->ino_generation) return -1; 1094 if (l_map->ino_generation < r_map->ino_generation) return 1; 1095 1096 /* 1097 * Addresses with no major/minor numbers are assumed to be 1098 * anonymous in userspace. Sort those on pid then address. 1099 * 1100 * The kernel and non-zero major/minor mapped areas are 1101 * assumed to be unity mapped. Sort those on address. 1102 */ 1103 1104 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1105 (!(l_map->flags & MAP_SHARED)) && 1106 !l_map->maj && !l_map->min && !l_map->ino && 1107 !l_map->ino_generation) { 1108 /* userspace anonymous */ 1109 1110 if (left->thread->pid_ > right->thread->pid_) return -1; 1111 if (left->thread->pid_ < right->thread->pid_) return 1; 1112 } 1113 1114addr: 1115 /* al_addr does all the right addr - start + offset calculations */ 1116 l = cl_address(left->mem_info->daddr.al_addr); 1117 r = cl_address(right->mem_info->daddr.al_addr); 1118 1119 if (l > r) return -1; 1120 if (l < r) return 1; 1121 1122 return 0; 1123} 1124 1125static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1126 size_t size, unsigned int width) 1127{ 1128 1129 uint64_t addr = 0; 1130 struct map *map = NULL; 1131 struct symbol *sym = NULL; 1132 char level = he->level; 1133 1134 if (he->mem_info) { 1135 addr = cl_address(he->mem_info->daddr.al_addr); 1136 map = he->mem_info->daddr.map; 1137 sym = he->mem_info->daddr.sym; 1138 1139 /* print [s] for shared data mmaps */ 1140 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1141 map && (map->type == MAP__VARIABLE) && 1142 (map->flags & MAP_SHARED) && 1143 (map->maj || map->min || map->ino || 1144 map->ino_generation)) 1145 level = 's'; 1146 else if (!map) 1147 level = 'X'; 1148 } 1149 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size, 1150 width); 1151} 1152 1153struct sort_entry sort_mispredict = { 1154 .se_header = "Branch Mispredicted", 1155 .se_cmp = sort__mispredict_cmp, 1156 .se_snprintf = hist_entry__mispredict_snprintf, 1157 .se_width_idx = HISTC_MISPREDICT, 1158}; 1159 1160static u64 he_weight(struct hist_entry *he) 1161{ 1162 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; 1163} 1164 1165static int64_t 1166sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1167{ 1168 return he_weight(left) - he_weight(right); 1169} 1170 1171static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1172 size_t size, unsigned int width) 1173{ 1174 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); 1175} 1176 1177struct sort_entry sort_local_weight = { 1178 .se_header = "Local Weight", 1179 .se_cmp = sort__local_weight_cmp, 1180 .se_snprintf = hist_entry__local_weight_snprintf, 1181 .se_width_idx = HISTC_LOCAL_WEIGHT, 1182}; 1183 1184static int64_t 1185sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1186{ 1187 return left->stat.weight - right->stat.weight; 1188} 1189 1190static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1191 size_t size, unsigned int width) 1192{ 1193 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); 1194} 1195 1196struct sort_entry sort_global_weight = { 1197 .se_header = "Weight", 1198 .se_cmp = sort__global_weight_cmp, 1199 .se_snprintf = hist_entry__global_weight_snprintf, 1200 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1201}; 1202 1203struct sort_entry sort_mem_daddr_sym = { 1204 .se_header = "Data Symbol", 1205 .se_cmp = sort__daddr_cmp, 1206 .se_snprintf = hist_entry__daddr_snprintf, 1207 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1208}; 1209 1210struct sort_entry sort_mem_iaddr_sym = { 1211 .se_header = "Code Symbol", 1212 .se_cmp = sort__iaddr_cmp, 1213 .se_snprintf = hist_entry__iaddr_snprintf, 1214 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1215}; 1216 1217struct sort_entry sort_mem_daddr_dso = { 1218 .se_header = "Data Object", 1219 .se_cmp = sort__dso_daddr_cmp, 1220 .se_snprintf = hist_entry__dso_daddr_snprintf, 1221 .se_width_idx = HISTC_MEM_DADDR_DSO, 1222}; 1223 1224struct sort_entry sort_mem_locked = { 1225 .se_header = "Locked", 1226 .se_cmp = sort__locked_cmp, 1227 .se_snprintf = hist_entry__locked_snprintf, 1228 .se_width_idx = HISTC_MEM_LOCKED, 1229}; 1230 1231struct sort_entry sort_mem_tlb = { 1232 .se_header = "TLB access", 1233 .se_cmp = sort__tlb_cmp, 1234 .se_snprintf = hist_entry__tlb_snprintf, 1235 .se_width_idx = HISTC_MEM_TLB, 1236}; 1237 1238struct sort_entry sort_mem_lvl = { 1239 .se_header = "Memory access", 1240 .se_cmp = sort__lvl_cmp, 1241 .se_snprintf = hist_entry__lvl_snprintf, 1242 .se_width_idx = HISTC_MEM_LVL, 1243}; 1244 1245struct sort_entry sort_mem_snoop = { 1246 .se_header = "Snoop", 1247 .se_cmp = sort__snoop_cmp, 1248 .se_snprintf = hist_entry__snoop_snprintf, 1249 .se_width_idx = HISTC_MEM_SNOOP, 1250}; 1251 1252struct sort_entry sort_mem_dcacheline = { 1253 .se_header = "Data Cacheline", 1254 .se_cmp = sort__dcacheline_cmp, 1255 .se_snprintf = hist_entry__dcacheline_snprintf, 1256 .se_width_idx = HISTC_MEM_DCACHELINE, 1257}; 1258 1259static int64_t 1260sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1261{ 1262 if (!left->branch_info || !right->branch_info) 1263 return cmp_null(left->branch_info, right->branch_info); 1264 1265 return left->branch_info->flags.abort != 1266 right->branch_info->flags.abort; 1267} 1268 1269static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1270 size_t size, unsigned int width) 1271{ 1272 static const char *out = "N/A"; 1273 1274 if (he->branch_info) { 1275 if (he->branch_info->flags.abort) 1276 out = "A"; 1277 else 1278 out = "."; 1279 } 1280 1281 return repsep_snprintf(bf, size, "%-*s", width, out); 1282} 1283 1284struct sort_entry sort_abort = { 1285 .se_header = "Transaction abort", 1286 .se_cmp = sort__abort_cmp, 1287 .se_snprintf = hist_entry__abort_snprintf, 1288 .se_width_idx = HISTC_ABORT, 1289}; 1290 1291static int64_t 1292sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1293{ 1294 if (!left->branch_info || !right->branch_info) 1295 return cmp_null(left->branch_info, right->branch_info); 1296 1297 return left->branch_info->flags.in_tx != 1298 right->branch_info->flags.in_tx; 1299} 1300 1301static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1302 size_t size, unsigned int width) 1303{ 1304 static const char *out = "N/A"; 1305 1306 if (he->branch_info) { 1307 if (he->branch_info->flags.in_tx) 1308 out = "T"; 1309 else 1310 out = "."; 1311 } 1312 1313 return repsep_snprintf(bf, size, "%-*s", width, out); 1314} 1315 1316struct sort_entry sort_in_tx = { 1317 .se_header = "Branch in transaction", 1318 .se_cmp = sort__in_tx_cmp, 1319 .se_snprintf = hist_entry__in_tx_snprintf, 1320 .se_width_idx = HISTC_IN_TX, 1321}; 1322 1323static int64_t 1324sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1325{ 1326 return left->transaction - right->transaction; 1327} 1328 1329static inline char *add_str(char *p, const char *str) 1330{ 1331 strcpy(p, str); 1332 return p + strlen(str); 1333} 1334 1335static struct txbit { 1336 unsigned flag; 1337 const char *name; 1338 int skip_for_len; 1339} txbits[] = { 1340 { PERF_TXN_ELISION, "EL ", 0 }, 1341 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1342 { PERF_TXN_SYNC, "SYNC ", 1 }, 1343 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1344 { PERF_TXN_RETRY, "RETRY ", 0 }, 1345 { PERF_TXN_CONFLICT, "CON ", 0 }, 1346 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1347 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1348 { 0, NULL, 0 } 1349}; 1350 1351int hist_entry__transaction_len(void) 1352{ 1353 int i; 1354 int len = 0; 1355 1356 for (i = 0; txbits[i].name; i++) { 1357 if (!txbits[i].skip_for_len) 1358 len += strlen(txbits[i].name); 1359 } 1360 len += 4; /* :XX<space> */ 1361 return len; 1362} 1363 1364static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1365 size_t size, unsigned int width) 1366{ 1367 u64 t = he->transaction; 1368 char buf[128]; 1369 char *p = buf; 1370 int i; 1371 1372 buf[0] = 0; 1373 for (i = 0; txbits[i].name; i++) 1374 if (txbits[i].flag & t) 1375 p = add_str(p, txbits[i].name); 1376 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 1377 p = add_str(p, "NEITHER "); 1378 if (t & PERF_TXN_ABORT_MASK) { 1379 sprintf(p, ":%" PRIx64, 1380 (t & PERF_TXN_ABORT_MASK) >> 1381 PERF_TXN_ABORT_SHIFT); 1382 p += strlen(p); 1383 } 1384 1385 return repsep_snprintf(bf, size, "%-*s", width, buf); 1386} 1387 1388struct sort_entry sort_transaction = { 1389 .se_header = "Transaction ", 1390 .se_cmp = sort__transaction_cmp, 1391 .se_snprintf = hist_entry__transaction_snprintf, 1392 .se_width_idx = HISTC_TRANSACTION, 1393}; 1394 1395struct sort_dimension { 1396 const char *name; 1397 struct sort_entry *entry; 1398 int taken; 1399}; 1400 1401#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 1402 1403static struct sort_dimension common_sort_dimensions[] = { 1404 DIM(SORT_PID, "pid", sort_thread), 1405 DIM(SORT_COMM, "comm", sort_comm), 1406 DIM(SORT_DSO, "dso", sort_dso), 1407 DIM(SORT_SYM, "symbol", sort_sym), 1408 DIM(SORT_PARENT, "parent", sort_parent), 1409 DIM(SORT_CPU, "cpu", sort_cpu), 1410 DIM(SORT_SOCKET, "socket", sort_socket), 1411 DIM(SORT_SRCLINE, "srcline", sort_srcline), 1412 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 1413 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 1414 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 1415 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 1416 DIM(SORT_TRACE, "trace", sort_trace), 1417}; 1418 1419#undef DIM 1420 1421#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 1422 1423static struct sort_dimension bstack_sort_dimensions[] = { 1424 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 1425 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 1426 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 1427 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 1428 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 1429 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 1430 DIM(SORT_ABORT, "abort", sort_abort), 1431 DIM(SORT_CYCLES, "cycles", sort_cycles), 1432 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 1433 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 1434}; 1435 1436#undef DIM 1437 1438#define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 1439 1440static struct sort_dimension memory_sort_dimensions[] = { 1441 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 1442 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 1443 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 1444 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 1445 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 1446 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 1447 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 1448 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 1449}; 1450 1451#undef DIM 1452 1453struct hpp_dimension { 1454 const char *name; 1455 struct perf_hpp_fmt *fmt; 1456 int taken; 1457}; 1458 1459#define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 1460 1461static struct hpp_dimension hpp_sort_dimensions[] = { 1462 DIM(PERF_HPP__OVERHEAD, "overhead"), 1463 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 1464 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 1465 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 1466 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 1467 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 1468 DIM(PERF_HPP__SAMPLES, "sample"), 1469 DIM(PERF_HPP__PERIOD, "period"), 1470}; 1471 1472#undef DIM 1473 1474struct hpp_sort_entry { 1475 struct perf_hpp_fmt hpp; 1476 struct sort_entry *se; 1477}; 1478 1479void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 1480{ 1481 struct hpp_sort_entry *hse; 1482 1483 if (!perf_hpp__is_sort_entry(fmt)) 1484 return; 1485 1486 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1487 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 1488} 1489 1490static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1491 struct hists *hists) 1492{ 1493 struct hpp_sort_entry *hse; 1494 size_t len = fmt->user_len; 1495 1496 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1497 1498 if (!len) 1499 len = hists__col_len(hists, hse->se->se_width_idx); 1500 1501 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 1502} 1503 1504static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 1505 struct perf_hpp *hpp __maybe_unused, 1506 struct hists *hists) 1507{ 1508 struct hpp_sort_entry *hse; 1509 size_t len = fmt->user_len; 1510 1511 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1512 1513 if (!len) 1514 len = hists__col_len(hists, hse->se->se_width_idx); 1515 1516 return len; 1517} 1518 1519static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1520 struct hist_entry *he) 1521{ 1522 struct hpp_sort_entry *hse; 1523 size_t len = fmt->user_len; 1524 1525 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1526 1527 if (!len) 1528 len = hists__col_len(he->hists, hse->se->se_width_idx); 1529 1530 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 1531} 1532 1533static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 1534 struct hist_entry *a, struct hist_entry *b) 1535{ 1536 struct hpp_sort_entry *hse; 1537 1538 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1539 return hse->se->se_cmp(a, b); 1540} 1541 1542static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 1543 struct hist_entry *a, struct hist_entry *b) 1544{ 1545 struct hpp_sort_entry *hse; 1546 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 1547 1548 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1549 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 1550 return collapse_fn(a, b); 1551} 1552 1553static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 1554 struct hist_entry *a, struct hist_entry *b) 1555{ 1556 struct hpp_sort_entry *hse; 1557 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 1558 1559 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1560 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 1561 return sort_fn(a, b); 1562} 1563 1564bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 1565{ 1566 return format->header == __sort__hpp_header; 1567} 1568 1569#define MK_SORT_ENTRY_CHK(key) \ 1570bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 1571{ \ 1572 struct hpp_sort_entry *hse; \ 1573 \ 1574 if (!perf_hpp__is_sort_entry(fmt)) \ 1575 return false; \ 1576 \ 1577 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 1578 return hse->se == &sort_ ## key ; \ 1579} 1580 1581MK_SORT_ENTRY_CHK(trace) 1582MK_SORT_ENTRY_CHK(srcline) 1583MK_SORT_ENTRY_CHK(srcfile) 1584MK_SORT_ENTRY_CHK(thread) 1585MK_SORT_ENTRY_CHK(comm) 1586MK_SORT_ENTRY_CHK(dso) 1587MK_SORT_ENTRY_CHK(sym) 1588 1589 1590static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 1591{ 1592 struct hpp_sort_entry *hse_a; 1593 struct hpp_sort_entry *hse_b; 1594 1595 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 1596 return false; 1597 1598 hse_a = container_of(a, struct hpp_sort_entry, hpp); 1599 hse_b = container_of(b, struct hpp_sort_entry, hpp); 1600 1601 return hse_a->se == hse_b->se; 1602} 1603 1604static void hse_free(struct perf_hpp_fmt *fmt) 1605{ 1606 struct hpp_sort_entry *hse; 1607 1608 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1609 free(hse); 1610} 1611 1612static struct hpp_sort_entry * 1613__sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 1614{ 1615 struct hpp_sort_entry *hse; 1616 1617 hse = malloc(sizeof(*hse)); 1618 if (hse == NULL) { 1619 pr_err("Memory allocation failed\n"); 1620 return NULL; 1621 } 1622 1623 hse->se = sd->entry; 1624 hse->hpp.name = sd->entry->se_header; 1625 hse->hpp.header = __sort__hpp_header; 1626 hse->hpp.width = __sort__hpp_width; 1627 hse->hpp.entry = __sort__hpp_entry; 1628 hse->hpp.color = NULL; 1629 1630 hse->hpp.cmp = __sort__hpp_cmp; 1631 hse->hpp.collapse = __sort__hpp_collapse; 1632 hse->hpp.sort = __sort__hpp_sort; 1633 hse->hpp.equal = __sort__hpp_equal; 1634 hse->hpp.free = hse_free; 1635 1636 INIT_LIST_HEAD(&hse->hpp.list); 1637 INIT_LIST_HEAD(&hse->hpp.sort_list); 1638 hse->hpp.elide = false; 1639 hse->hpp.len = 0; 1640 hse->hpp.user_len = 0; 1641 hse->hpp.level = level; 1642 1643 return hse; 1644} 1645 1646static void hpp_free(struct perf_hpp_fmt *fmt) 1647{ 1648 free(fmt); 1649} 1650 1651static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 1652 int level) 1653{ 1654 struct perf_hpp_fmt *fmt; 1655 1656 fmt = memdup(hd->fmt, sizeof(*fmt)); 1657 if (fmt) { 1658 INIT_LIST_HEAD(&fmt->list); 1659 INIT_LIST_HEAD(&fmt->sort_list); 1660 fmt->free = hpp_free; 1661 fmt->level = level; 1662 } 1663 1664 return fmt; 1665} 1666 1667int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 1668{ 1669 struct perf_hpp_fmt *fmt; 1670 struct hpp_sort_entry *hse; 1671 int ret = -1; 1672 int r; 1673 1674 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1675 if (!perf_hpp__is_sort_entry(fmt)) 1676 continue; 1677 1678 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1679 if (hse->se->se_filter == NULL) 1680 continue; 1681 1682 /* 1683 * hist entry is filtered if any of sort key in the hpp list 1684 * is applied. But it should skip non-matched filter types. 1685 */ 1686 r = hse->se->se_filter(he, type, arg); 1687 if (r >= 0) { 1688 if (ret < 0) 1689 ret = 0; 1690 ret |= r; 1691 } 1692 } 1693 1694 return ret; 1695} 1696 1697static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 1698 struct perf_hpp_list *list, 1699 int level) 1700{ 1701 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 1702 1703 if (hse == NULL) 1704 return -1; 1705 1706 perf_hpp_list__register_sort_field(list, &hse->hpp); 1707 return 0; 1708} 1709 1710static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 1711 struct perf_hpp_list *list) 1712{ 1713 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 1714 1715 if (hse == NULL) 1716 return -1; 1717 1718 perf_hpp_list__column_register(list, &hse->hpp); 1719 return 0; 1720} 1721 1722struct hpp_dynamic_entry { 1723 struct perf_hpp_fmt hpp; 1724 struct perf_evsel *evsel; 1725 struct format_field *field; 1726 unsigned dynamic_len; 1727 bool raw_trace; 1728}; 1729 1730static int hde_width(struct hpp_dynamic_entry *hde) 1731{ 1732 if (!hde->hpp.len) { 1733 int len = hde->dynamic_len; 1734 int namelen = strlen(hde->field->name); 1735 int fieldlen = hde->field->size; 1736 1737 if (namelen > len) 1738 len = namelen; 1739 1740 if (!(hde->field->flags & FIELD_IS_STRING)) { 1741 /* length for print hex numbers */ 1742 fieldlen = hde->field->size * 2 + 2; 1743 } 1744 if (fieldlen > len) 1745 len = fieldlen; 1746 1747 hde->hpp.len = len; 1748 } 1749 return hde->hpp.len; 1750} 1751 1752static void update_dynamic_len(struct hpp_dynamic_entry *hde, 1753 struct hist_entry *he) 1754{ 1755 char *str, *pos; 1756 struct format_field *field = hde->field; 1757 size_t namelen; 1758 bool last = false; 1759 1760 if (hde->raw_trace) 1761 return; 1762 1763 /* parse pretty print result and update max length */ 1764 if (!he->trace_output) 1765 he->trace_output = get_trace_output(he); 1766 1767 namelen = strlen(field->name); 1768 str = he->trace_output; 1769 1770 while (str) { 1771 pos = strchr(str, ' '); 1772 if (pos == NULL) { 1773 last = true; 1774 pos = str + strlen(str); 1775 } 1776 1777 if (!strncmp(str, field->name, namelen)) { 1778 size_t len; 1779 1780 str += namelen + 1; 1781 len = pos - str; 1782 1783 if (len > hde->dynamic_len) 1784 hde->dynamic_len = len; 1785 break; 1786 } 1787 1788 if (last) 1789 str = NULL; 1790 else 1791 str = pos + 1; 1792 } 1793} 1794 1795static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1796 struct hists *hists __maybe_unused) 1797{ 1798 struct hpp_dynamic_entry *hde; 1799 size_t len = fmt->user_len; 1800 1801 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1802 1803 if (!len) 1804 len = hde_width(hde); 1805 1806 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 1807} 1808 1809static int __sort__hde_width(struct perf_hpp_fmt *fmt, 1810 struct perf_hpp *hpp __maybe_unused, 1811 struct hists *hists __maybe_unused) 1812{ 1813 struct hpp_dynamic_entry *hde; 1814 size_t len = fmt->user_len; 1815 1816 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1817 1818 if (!len) 1819 len = hde_width(hde); 1820 1821 return len; 1822} 1823 1824bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 1825{ 1826 struct hpp_dynamic_entry *hde; 1827 1828 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1829 1830 return hists_to_evsel(hists) == hde->evsel; 1831} 1832 1833static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1834 struct hist_entry *he) 1835{ 1836 struct hpp_dynamic_entry *hde; 1837 size_t len = fmt->user_len; 1838 char *str, *pos; 1839 struct format_field *field; 1840 size_t namelen; 1841 bool last = false; 1842 int ret; 1843 1844 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1845 1846 if (!len) 1847 len = hde_width(hde); 1848 1849 if (hde->raw_trace) 1850 goto raw_field; 1851 1852 if (!he->trace_output) 1853 he->trace_output = get_trace_output(he); 1854 1855 field = hde->field; 1856 namelen = strlen(field->name); 1857 str = he->trace_output; 1858 1859 while (str) { 1860 pos = strchr(str, ' '); 1861 if (pos == NULL) { 1862 last = true; 1863 pos = str + strlen(str); 1864 } 1865 1866 if (!strncmp(str, field->name, namelen)) { 1867 str += namelen + 1; 1868 str = strndup(str, pos - str); 1869 1870 if (str == NULL) 1871 return scnprintf(hpp->buf, hpp->size, 1872 "%*.*s", len, len, "ERROR"); 1873 break; 1874 } 1875 1876 if (last) 1877 str = NULL; 1878 else 1879 str = pos + 1; 1880 } 1881 1882 if (str == NULL) { 1883 struct trace_seq seq; 1884raw_field: 1885 trace_seq_init(&seq); 1886 pevent_print_field(&seq, he->raw_data, hde->field); 1887 str = seq.buffer; 1888 } 1889 1890 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 1891 free(str); 1892 return ret; 1893} 1894 1895static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 1896 struct hist_entry *a, struct hist_entry *b) 1897{ 1898 struct hpp_dynamic_entry *hde; 1899 struct format_field *field; 1900 unsigned offset, size; 1901 1902 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1903 1904 if (b == NULL) { 1905 update_dynamic_len(hde, a); 1906 return 0; 1907 } 1908 1909 field = hde->field; 1910 if (field->flags & FIELD_IS_DYNAMIC) { 1911 unsigned long long dyn; 1912 1913 pevent_read_number_field(field, a->raw_data, &dyn); 1914 offset = dyn & 0xffff; 1915 size = (dyn >> 16) & 0xffff; 1916 1917 /* record max width for output */ 1918 if (size > hde->dynamic_len) 1919 hde->dynamic_len = size; 1920 } else { 1921 offset = field->offset; 1922 size = field->size; 1923 } 1924 1925 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 1926} 1927 1928bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 1929{ 1930 return fmt->cmp == __sort__hde_cmp; 1931} 1932 1933static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 1934{ 1935 struct hpp_dynamic_entry *hde_a; 1936 struct hpp_dynamic_entry *hde_b; 1937 1938 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 1939 return false; 1940 1941 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 1942 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 1943 1944 return hde_a->field == hde_b->field; 1945} 1946 1947static void hde_free(struct perf_hpp_fmt *fmt) 1948{ 1949 struct hpp_dynamic_entry *hde; 1950 1951 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1952 free(hde); 1953} 1954 1955static struct hpp_dynamic_entry * 1956__alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field, 1957 int level) 1958{ 1959 struct hpp_dynamic_entry *hde; 1960 1961 hde = malloc(sizeof(*hde)); 1962 if (hde == NULL) { 1963 pr_debug("Memory allocation failed\n"); 1964 return NULL; 1965 } 1966 1967 hde->evsel = evsel; 1968 hde->field = field; 1969 hde->dynamic_len = 0; 1970 1971 hde->hpp.name = field->name; 1972 hde->hpp.header = __sort__hde_header; 1973 hde->hpp.width = __sort__hde_width; 1974 hde->hpp.entry = __sort__hde_entry; 1975 hde->hpp.color = NULL; 1976 1977 hde->hpp.cmp = __sort__hde_cmp; 1978 hde->hpp.collapse = __sort__hde_cmp; 1979 hde->hpp.sort = __sort__hde_cmp; 1980 hde->hpp.equal = __sort__hde_equal; 1981 hde->hpp.free = hde_free; 1982 1983 INIT_LIST_HEAD(&hde->hpp.list); 1984 INIT_LIST_HEAD(&hde->hpp.sort_list); 1985 hde->hpp.elide = false; 1986 hde->hpp.len = 0; 1987 hde->hpp.user_len = 0; 1988 hde->hpp.level = level; 1989 1990 return hde; 1991} 1992 1993struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 1994{ 1995 struct perf_hpp_fmt *new_fmt = NULL; 1996 1997 if (perf_hpp__is_sort_entry(fmt)) { 1998 struct hpp_sort_entry *hse, *new_hse; 1999 2000 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2001 new_hse = memdup(hse, sizeof(*hse)); 2002 if (new_hse) 2003 new_fmt = &new_hse->hpp; 2004 } else if (perf_hpp__is_dynamic_entry(fmt)) { 2005 struct hpp_dynamic_entry *hde, *new_hde; 2006 2007 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2008 new_hde = memdup(hde, sizeof(*hde)); 2009 if (new_hde) 2010 new_fmt = &new_hde->hpp; 2011 } else { 2012 new_fmt = memdup(fmt, sizeof(*fmt)); 2013 } 2014 2015 INIT_LIST_HEAD(&new_fmt->list); 2016 INIT_LIST_HEAD(&new_fmt->sort_list); 2017 2018 return new_fmt; 2019} 2020 2021static int parse_field_name(char *str, char **event, char **field, char **opt) 2022{ 2023 char *event_name, *field_name, *opt_name; 2024 2025 event_name = str; 2026 field_name = strchr(str, '.'); 2027 2028 if (field_name) { 2029 *field_name++ = '\0'; 2030 } else { 2031 event_name = NULL; 2032 field_name = str; 2033 } 2034 2035 opt_name = strchr(field_name, '/'); 2036 if (opt_name) 2037 *opt_name++ = '\0'; 2038 2039 *event = event_name; 2040 *field = field_name; 2041 *opt = opt_name; 2042 2043 return 0; 2044} 2045 2046/* find match evsel using a given event name. The event name can be: 2047 * 1. '%' + event index (e.g. '%1' for first event) 2048 * 2. full event name (e.g. sched:sched_switch) 2049 * 3. partial event name (should not contain ':') 2050 */ 2051static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name) 2052{ 2053 struct perf_evsel *evsel = NULL; 2054 struct perf_evsel *pos; 2055 bool full_name; 2056 2057 /* case 1 */ 2058 if (event_name[0] == '%') { 2059 int nr = strtol(event_name+1, NULL, 0); 2060 2061 if (nr > evlist->nr_entries) 2062 return NULL; 2063 2064 evsel = perf_evlist__first(evlist); 2065 while (--nr > 0) 2066 evsel = perf_evsel__next(evsel); 2067 2068 return evsel; 2069 } 2070 2071 full_name = !!strchr(event_name, ':'); 2072 evlist__for_each_entry(evlist, pos) { 2073 /* case 2 */ 2074 if (full_name && !strcmp(pos->name, event_name)) 2075 return pos; 2076 /* case 3 */ 2077 if (!full_name && strstr(pos->name, event_name)) { 2078 if (evsel) { 2079 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 2080 event_name, evsel->name, pos->name); 2081 return NULL; 2082 } 2083 evsel = pos; 2084 } 2085 } 2086 2087 return evsel; 2088} 2089 2090static int __dynamic_dimension__add(struct perf_evsel *evsel, 2091 struct format_field *field, 2092 bool raw_trace, int level) 2093{ 2094 struct hpp_dynamic_entry *hde; 2095 2096 hde = __alloc_dynamic_entry(evsel, field, level); 2097 if (hde == NULL) 2098 return -ENOMEM; 2099 2100 hde->raw_trace = raw_trace; 2101 2102 perf_hpp__register_sort_field(&hde->hpp); 2103 return 0; 2104} 2105 2106static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level) 2107{ 2108 int ret; 2109 struct format_field *field; 2110 2111 field = evsel->tp_format->format.fields; 2112 while (field) { 2113 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2114 if (ret < 0) 2115 return ret; 2116 2117 field = field->next; 2118 } 2119 return 0; 2120} 2121 2122static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace, 2123 int level) 2124{ 2125 int ret; 2126 struct perf_evsel *evsel; 2127 2128 evlist__for_each_entry(evlist, evsel) { 2129 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 2130 continue; 2131 2132 ret = add_evsel_fields(evsel, raw_trace, level); 2133 if (ret < 0) 2134 return ret; 2135 } 2136 return 0; 2137} 2138 2139static int add_all_matching_fields(struct perf_evlist *evlist, 2140 char *field_name, bool raw_trace, int level) 2141{ 2142 int ret = -ESRCH; 2143 struct perf_evsel *evsel; 2144 struct format_field *field; 2145 2146 evlist__for_each_entry(evlist, evsel) { 2147 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 2148 continue; 2149 2150 field = pevent_find_any_field(evsel->tp_format, field_name); 2151 if (field == NULL) 2152 continue; 2153 2154 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2155 if (ret < 0) 2156 break; 2157 } 2158 return ret; 2159} 2160 2161static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok, 2162 int level) 2163{ 2164 char *str, *event_name, *field_name, *opt_name; 2165 struct perf_evsel *evsel; 2166 struct format_field *field; 2167 bool raw_trace = symbol_conf.raw_trace; 2168 int ret = 0; 2169 2170 if (evlist == NULL) 2171 return -ENOENT; 2172 2173 str = strdup(tok); 2174 if (str == NULL) 2175 return -ENOMEM; 2176 2177 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 2178 ret = -EINVAL; 2179 goto out; 2180 } 2181 2182 if (opt_name) { 2183 if (strcmp(opt_name, "raw")) { 2184 pr_debug("unsupported field option %s\n", opt_name); 2185 ret = -EINVAL; 2186 goto out; 2187 } 2188 raw_trace = true; 2189 } 2190 2191 if (!strcmp(field_name, "trace_fields")) { 2192 ret = add_all_dynamic_fields(evlist, raw_trace, level); 2193 goto out; 2194 } 2195 2196 if (event_name == NULL) { 2197 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 2198 goto out; 2199 } 2200 2201 evsel = find_evsel(evlist, event_name); 2202 if (evsel == NULL) { 2203 pr_debug("Cannot find event: %s\n", event_name); 2204 ret = -ENOENT; 2205 goto out; 2206 } 2207 2208 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) { 2209 pr_debug("%s is not a tracepoint event\n", event_name); 2210 ret = -EINVAL; 2211 goto out; 2212 } 2213 2214 if (!strcmp(field_name, "*")) { 2215 ret = add_evsel_fields(evsel, raw_trace, level); 2216 } else { 2217 field = pevent_find_any_field(evsel->tp_format, field_name); 2218 if (field == NULL) { 2219 pr_debug("Cannot find event field for %s.%s\n", 2220 event_name, field_name); 2221 return -ENOENT; 2222 } 2223 2224 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2225 } 2226 2227out: 2228 free(str); 2229 return ret; 2230} 2231 2232static int __sort_dimension__add(struct sort_dimension *sd, 2233 struct perf_hpp_list *list, 2234 int level) 2235{ 2236 if (sd->taken) 2237 return 0; 2238 2239 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 2240 return -1; 2241 2242 if (sd->entry->se_collapse) 2243 list->need_collapse = 1; 2244 2245 sd->taken = 1; 2246 2247 return 0; 2248} 2249 2250static int __hpp_dimension__add(struct hpp_dimension *hd, 2251 struct perf_hpp_list *list, 2252 int level) 2253{ 2254 struct perf_hpp_fmt *fmt; 2255 2256 if (hd->taken) 2257 return 0; 2258 2259 fmt = __hpp_dimension__alloc_hpp(hd, level); 2260 if (!fmt) 2261 return -1; 2262 2263 hd->taken = 1; 2264 perf_hpp_list__register_sort_field(list, fmt); 2265 return 0; 2266} 2267 2268static int __sort_dimension__add_output(struct perf_hpp_list *list, 2269 struct sort_dimension *sd) 2270{ 2271 if (sd->taken) 2272 return 0; 2273 2274 if (__sort_dimension__add_hpp_output(sd, list) < 0) 2275 return -1; 2276 2277 sd->taken = 1; 2278 return 0; 2279} 2280 2281static int __hpp_dimension__add_output(struct perf_hpp_list *list, 2282 struct hpp_dimension *hd) 2283{ 2284 struct perf_hpp_fmt *fmt; 2285 2286 if (hd->taken) 2287 return 0; 2288 2289 fmt = __hpp_dimension__alloc_hpp(hd, 0); 2290 if (!fmt) 2291 return -1; 2292 2293 hd->taken = 1; 2294 perf_hpp_list__column_register(list, fmt); 2295 return 0; 2296} 2297 2298int hpp_dimension__add_output(unsigned col) 2299{ 2300 BUG_ON(col >= PERF_HPP__MAX_INDEX); 2301 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 2302} 2303 2304static int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 2305 struct perf_evlist *evlist, 2306 int level) 2307{ 2308 unsigned int i; 2309 2310 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2311 struct sort_dimension *sd = &common_sort_dimensions[i]; 2312 2313 if (strncasecmp(tok, sd->name, strlen(tok))) 2314 continue; 2315 2316 if (sd->entry == &sort_parent) { 2317 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 2318 if (ret) { 2319 char err[BUFSIZ]; 2320 2321 regerror(ret, &parent_regex, err, sizeof(err)); 2322 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 2323 return -EINVAL; 2324 } 2325 list->parent = 1; 2326 } else if (sd->entry == &sort_sym) { 2327 list->sym = 1; 2328 /* 2329 * perf diff displays the performance difference amongst 2330 * two or more perf.data files. Those files could come 2331 * from different binaries. So we should not compare 2332 * their ips, but the name of symbol. 2333 */ 2334 if (sort__mode == SORT_MODE__DIFF) 2335 sd->entry->se_collapse = sort__sym_sort; 2336 2337 } else if (sd->entry == &sort_dso) { 2338 list->dso = 1; 2339 } else if (sd->entry == &sort_socket) { 2340 list->socket = 1; 2341 } else if (sd->entry == &sort_thread) { 2342 list->thread = 1; 2343 } else if (sd->entry == &sort_comm) { 2344 list->comm = 1; 2345 } 2346 2347 return __sort_dimension__add(sd, list, level); 2348 } 2349 2350 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2351 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2352 2353 if (strncasecmp(tok, hd->name, strlen(tok))) 2354 continue; 2355 2356 return __hpp_dimension__add(hd, list, level); 2357 } 2358 2359 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2360 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2361 2362 if (strncasecmp(tok, sd->name, strlen(tok))) 2363 continue; 2364 2365 if (sort__mode != SORT_MODE__BRANCH) 2366 return -EINVAL; 2367 2368 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 2369 list->sym = 1; 2370 2371 __sort_dimension__add(sd, list, level); 2372 return 0; 2373 } 2374 2375 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2376 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2377 2378 if (strncasecmp(tok, sd->name, strlen(tok))) 2379 continue; 2380 2381 if (sort__mode != SORT_MODE__MEMORY) 2382 return -EINVAL; 2383 2384 if (sd->entry == &sort_mem_dcacheline && cacheline_size == 0) 2385 return -EINVAL; 2386 2387 if (sd->entry == &sort_mem_daddr_sym) 2388 list->sym = 1; 2389 2390 __sort_dimension__add(sd, list, level); 2391 return 0; 2392 } 2393 2394 if (!add_dynamic_entry(evlist, tok, level)) 2395 return 0; 2396 2397 return -ESRCH; 2398} 2399 2400static int setup_sort_list(struct perf_hpp_list *list, char *str, 2401 struct perf_evlist *evlist) 2402{ 2403 char *tmp, *tok; 2404 int ret = 0; 2405 int level = 0; 2406 int next_level = 1; 2407 bool in_group = false; 2408 2409 do { 2410 tok = str; 2411 tmp = strpbrk(str, "{}, "); 2412 if (tmp) { 2413 if (in_group) 2414 next_level = level; 2415 else 2416 next_level = level + 1; 2417 2418 if (*tmp == '{') 2419 in_group = true; 2420 else if (*tmp == '}') 2421 in_group = false; 2422 2423 *tmp = '\0'; 2424 str = tmp + 1; 2425 } 2426 2427 if (*tok) { 2428 ret = sort_dimension__add(list, tok, evlist, level); 2429 if (ret == -EINVAL) { 2430 if (!cacheline_size && !strncasecmp(tok, "dcacheline", strlen(tok))) 2431 error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 2432 else 2433 error("Invalid --sort key: `%s'", tok); 2434 break; 2435 } else if (ret == -ESRCH) { 2436 error("Unknown --sort key: `%s'", tok); 2437 break; 2438 } 2439 } 2440 2441 level = next_level; 2442 } while (tmp); 2443 2444 return ret; 2445} 2446 2447static const char *get_default_sort_order(struct perf_evlist *evlist) 2448{ 2449 const char *default_sort_orders[] = { 2450 default_sort_order, 2451 default_branch_sort_order, 2452 default_mem_sort_order, 2453 default_top_sort_order, 2454 default_diff_sort_order, 2455 default_tracepoint_sort_order, 2456 }; 2457 bool use_trace = true; 2458 struct perf_evsel *evsel; 2459 2460 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 2461 2462 if (evlist == NULL) 2463 goto out_no_evlist; 2464 2465 evlist__for_each_entry(evlist, evsel) { 2466 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) { 2467 use_trace = false; 2468 break; 2469 } 2470 } 2471 2472 if (use_trace) { 2473 sort__mode = SORT_MODE__TRACEPOINT; 2474 if (symbol_conf.raw_trace) 2475 return "trace_fields"; 2476 } 2477out_no_evlist: 2478 return default_sort_orders[sort__mode]; 2479} 2480 2481static int setup_sort_order(struct perf_evlist *evlist) 2482{ 2483 char *new_sort_order; 2484 2485 /* 2486 * Append '+'-prefixed sort order to the default sort 2487 * order string. 2488 */ 2489 if (!sort_order || is_strict_order(sort_order)) 2490 return 0; 2491 2492 if (sort_order[1] == '\0') { 2493 error("Invalid --sort key: `+'"); 2494 return -EINVAL; 2495 } 2496 2497 /* 2498 * We allocate new sort_order string, but we never free it, 2499 * because it's checked over the rest of the code. 2500 */ 2501 if (asprintf(&new_sort_order, "%s,%s", 2502 get_default_sort_order(evlist), sort_order + 1) < 0) { 2503 error("Not enough memory to set up --sort"); 2504 return -ENOMEM; 2505 } 2506 2507 sort_order = new_sort_order; 2508 return 0; 2509} 2510 2511/* 2512 * Adds 'pre,' prefix into 'str' is 'pre' is 2513 * not already part of 'str'. 2514 */ 2515static char *prefix_if_not_in(const char *pre, char *str) 2516{ 2517 char *n; 2518 2519 if (!str || strstr(str, pre)) 2520 return str; 2521 2522 if (asprintf(&n, "%s,%s", pre, str) < 0) 2523 return NULL; 2524 2525 free(str); 2526 return n; 2527} 2528 2529static char *setup_overhead(char *keys) 2530{ 2531 if (sort__mode == SORT_MODE__DIFF) 2532 return keys; 2533 2534 keys = prefix_if_not_in("overhead", keys); 2535 2536 if (symbol_conf.cumulate_callchain) 2537 keys = prefix_if_not_in("overhead_children", keys); 2538 2539 return keys; 2540} 2541 2542static int __setup_sorting(struct perf_evlist *evlist) 2543{ 2544 char *str; 2545 const char *sort_keys; 2546 int ret = 0; 2547 2548 ret = setup_sort_order(evlist); 2549 if (ret) 2550 return ret; 2551 2552 sort_keys = sort_order; 2553 if (sort_keys == NULL) { 2554 if (is_strict_order(field_order)) { 2555 /* 2556 * If user specified field order but no sort order, 2557 * we'll honor it and not add default sort orders. 2558 */ 2559 return 0; 2560 } 2561 2562 sort_keys = get_default_sort_order(evlist); 2563 } 2564 2565 str = strdup(sort_keys); 2566 if (str == NULL) { 2567 error("Not enough memory to setup sort keys"); 2568 return -ENOMEM; 2569 } 2570 2571 /* 2572 * Prepend overhead fields for backward compatibility. 2573 */ 2574 if (!is_strict_order(field_order)) { 2575 str = setup_overhead(str); 2576 if (str == NULL) { 2577 error("Not enough memory to setup overhead keys"); 2578 return -ENOMEM; 2579 } 2580 } 2581 2582 ret = setup_sort_list(&perf_hpp_list, str, evlist); 2583 2584 free(str); 2585 return ret; 2586} 2587 2588void perf_hpp__set_elide(int idx, bool elide) 2589{ 2590 struct perf_hpp_fmt *fmt; 2591 struct hpp_sort_entry *hse; 2592 2593 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2594 if (!perf_hpp__is_sort_entry(fmt)) 2595 continue; 2596 2597 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2598 if (hse->se->se_width_idx == idx) { 2599 fmt->elide = elide; 2600 break; 2601 } 2602 } 2603} 2604 2605static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 2606{ 2607 if (list && strlist__nr_entries(list) == 1) { 2608 if (fp != NULL) 2609 fprintf(fp, "# %s: %s\n", list_name, 2610 strlist__entry(list, 0)->s); 2611 return true; 2612 } 2613 return false; 2614} 2615 2616static bool get_elide(int idx, FILE *output) 2617{ 2618 switch (idx) { 2619 case HISTC_SYMBOL: 2620 return __get_elide(symbol_conf.sym_list, "symbol", output); 2621 case HISTC_DSO: 2622 return __get_elide(symbol_conf.dso_list, "dso", output); 2623 case HISTC_COMM: 2624 return __get_elide(symbol_conf.comm_list, "comm", output); 2625 default: 2626 break; 2627 } 2628 2629 if (sort__mode != SORT_MODE__BRANCH) 2630 return false; 2631 2632 switch (idx) { 2633 case HISTC_SYMBOL_FROM: 2634 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 2635 case HISTC_SYMBOL_TO: 2636 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 2637 case HISTC_DSO_FROM: 2638 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 2639 case HISTC_DSO_TO: 2640 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 2641 default: 2642 break; 2643 } 2644 2645 return false; 2646} 2647 2648void sort__setup_elide(FILE *output) 2649{ 2650 struct perf_hpp_fmt *fmt; 2651 struct hpp_sort_entry *hse; 2652 2653 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2654 if (!perf_hpp__is_sort_entry(fmt)) 2655 continue; 2656 2657 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2658 fmt->elide = get_elide(hse->se->se_width_idx, output); 2659 } 2660 2661 /* 2662 * It makes no sense to elide all of sort entries. 2663 * Just revert them to show up again. 2664 */ 2665 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2666 if (!perf_hpp__is_sort_entry(fmt)) 2667 continue; 2668 2669 if (!fmt->elide) 2670 return; 2671 } 2672 2673 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2674 if (!perf_hpp__is_sort_entry(fmt)) 2675 continue; 2676 2677 fmt->elide = false; 2678 } 2679} 2680 2681static int output_field_add(struct perf_hpp_list *list, char *tok) 2682{ 2683 unsigned int i; 2684 2685 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2686 struct sort_dimension *sd = &common_sort_dimensions[i]; 2687 2688 if (strncasecmp(tok, sd->name, strlen(tok))) 2689 continue; 2690 2691 return __sort_dimension__add_output(list, sd); 2692 } 2693 2694 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2695 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2696 2697 if (strncasecmp(tok, hd->name, strlen(tok))) 2698 continue; 2699 2700 return __hpp_dimension__add_output(list, hd); 2701 } 2702 2703 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2704 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2705 2706 if (strncasecmp(tok, sd->name, strlen(tok))) 2707 continue; 2708 2709 return __sort_dimension__add_output(list, sd); 2710 } 2711 2712 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2713 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2714 2715 if (strncasecmp(tok, sd->name, strlen(tok))) 2716 continue; 2717 2718 return __sort_dimension__add_output(list, sd); 2719 } 2720 2721 return -ESRCH; 2722} 2723 2724static int setup_output_list(struct perf_hpp_list *list, char *str) 2725{ 2726 char *tmp, *tok; 2727 int ret = 0; 2728 2729 for (tok = strtok_r(str, ", ", &tmp); 2730 tok; tok = strtok_r(NULL, ", ", &tmp)) { 2731 ret = output_field_add(list, tok); 2732 if (ret == -EINVAL) { 2733 error("Invalid --fields key: `%s'", tok); 2734 break; 2735 } else if (ret == -ESRCH) { 2736 error("Unknown --fields key: `%s'", tok); 2737 break; 2738 } 2739 } 2740 2741 return ret; 2742} 2743 2744static void reset_dimensions(void) 2745{ 2746 unsigned int i; 2747 2748 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 2749 common_sort_dimensions[i].taken = 0; 2750 2751 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 2752 hpp_sort_dimensions[i].taken = 0; 2753 2754 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 2755 bstack_sort_dimensions[i].taken = 0; 2756 2757 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 2758 memory_sort_dimensions[i].taken = 0; 2759} 2760 2761bool is_strict_order(const char *order) 2762{ 2763 return order && (*order != '+'); 2764} 2765 2766static int __setup_output_field(void) 2767{ 2768 char *str, *strp; 2769 int ret = -EINVAL; 2770 2771 if (field_order == NULL) 2772 return 0; 2773 2774 strp = str = strdup(field_order); 2775 if (str == NULL) { 2776 error("Not enough memory to setup output fields"); 2777 return -ENOMEM; 2778 } 2779 2780 if (!is_strict_order(field_order)) 2781 strp++; 2782 2783 if (!strlen(strp)) { 2784 error("Invalid --fields key: `+'"); 2785 goto out; 2786 } 2787 2788 ret = setup_output_list(&perf_hpp_list, strp); 2789 2790out: 2791 free(str); 2792 return ret; 2793} 2794 2795int setup_sorting(struct perf_evlist *evlist) 2796{ 2797 int err; 2798 2799 err = __setup_sorting(evlist); 2800 if (err < 0) 2801 return err; 2802 2803 if (parent_pattern != default_parent_pattern) { 2804 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 2805 if (err < 0) 2806 return err; 2807 } 2808 2809 reset_dimensions(); 2810 2811 /* 2812 * perf diff doesn't use default hpp output fields. 2813 */ 2814 if (sort__mode != SORT_MODE__DIFF) 2815 perf_hpp__init(); 2816 2817 err = __setup_output_field(); 2818 if (err < 0) 2819 return err; 2820 2821 /* copy sort keys to output fields */ 2822 perf_hpp__setup_output_field(&perf_hpp_list); 2823 /* and then copy output fields to sort keys */ 2824 perf_hpp__append_sort_keys(&perf_hpp_list); 2825 2826 /* setup hists-specific output fields */ 2827 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 2828 return -1; 2829 2830 return 0; 2831} 2832 2833void reset_output_field(void) 2834{ 2835 perf_hpp_list.need_collapse = 0; 2836 perf_hpp_list.parent = 0; 2837 perf_hpp_list.sym = 0; 2838 perf_hpp_list.dso = 0; 2839 2840 field_order = NULL; 2841 sort_order = NULL; 2842 2843 reset_dimensions(); 2844 perf_hpp__reset_output_field(&perf_hpp_list); 2845}