at v4.17-rc7 3046 lines 73 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2#include <errno.h> 3#include <inttypes.h> 4#include <regex.h> 5#include <sys/mman.h> 6#include "sort.h" 7#include "hist.h" 8#include "comm.h" 9#include "symbol.h" 10#include "thread.h" 11#include "evsel.h" 12#include "evlist.h" 13#include "strlist.h" 14#include <traceevent/event-parse.h> 15#include "mem-events.h" 16#include <linux/kernel.h> 17 18regex_t parent_regex; 19const char default_parent_pattern[] = "^sys_|^do_page_fault"; 20const char *parent_pattern = default_parent_pattern; 21const char *default_sort_order = "comm,dso,symbol"; 22const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 23const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked"; 24const char default_top_sort_order[] = "dso,symbol"; 25const char default_diff_sort_order[] = "dso,symbol"; 26const char default_tracepoint_sort_order[] = "trace"; 27const char *sort_order; 28const char *field_order; 29regex_t ignore_callees_regex; 30int have_ignore_callees = 0; 31enum sort_mode sort__mode = SORT_MODE__NORMAL; 32 33/* 34 * Replaces all occurrences of a char used with the: 35 * 36 * -t, --field-separator 37 * 38 * option, that uses a special separator character and don't pad with spaces, 39 * replacing all occurances of this separator in symbol names (and other 40 * output) with a '.' character, that thus it's the only non valid separator. 41*/ 42static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 43{ 44 int n; 45 va_list ap; 46 47 va_start(ap, fmt); 48 n = vsnprintf(bf, size, fmt, ap); 49 if (symbol_conf.field_sep && n > 0) { 50 char *sep = bf; 51 52 while (1) { 53 sep = strchr(sep, *symbol_conf.field_sep); 54 if (sep == NULL) 55 break; 56 *sep = '.'; 57 } 58 } 59 va_end(ap); 60 61 if (n >= (int)size) 62 return size - 1; 63 return n; 64} 65 66static int64_t cmp_null(const void *l, const void *r) 67{ 68 if (!l && !r) 69 return 0; 70 else if (!l) 71 return -1; 72 else 73 return 1; 74} 75 76/* --sort pid */ 77 78static int64_t 79sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 80{ 81 return right->thread->tid - left->thread->tid; 82} 83 84static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 85 size_t size, unsigned int width) 86{ 87 const char *comm = thread__comm_str(he->thread); 88 89 width = max(7U, width) - 8; 90 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid, 91 width, width, comm ?: ""); 92} 93 94static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 95{ 96 const struct thread *th = arg; 97 98 if (type != HIST_FILTER__THREAD) 99 return -1; 100 101 return th && he->thread != th; 102} 103 104struct sort_entry sort_thread = { 105 .se_header = " Pid:Command", 106 .se_cmp = sort__thread_cmp, 107 .se_snprintf = hist_entry__thread_snprintf, 108 .se_filter = hist_entry__thread_filter, 109 .se_width_idx = HISTC_THREAD, 110}; 111 112/* --sort comm */ 113 114/* 115 * We can't use pointer comparison in functions below, 116 * because it gives different results based on pointer 117 * values, which could break some sorting assumptions. 118 */ 119static int64_t 120sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 121{ 122 return strcmp(comm__str(right->comm), comm__str(left->comm)); 123} 124 125static int64_t 126sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 127{ 128 return strcmp(comm__str(right->comm), comm__str(left->comm)); 129} 130 131static int64_t 132sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 133{ 134 return strcmp(comm__str(right->comm), comm__str(left->comm)); 135} 136 137static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 138 size_t size, unsigned int width) 139{ 140 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 141} 142 143struct sort_entry sort_comm = { 144 .se_header = "Command", 145 .se_cmp = sort__comm_cmp, 146 .se_collapse = sort__comm_collapse, 147 .se_sort = sort__comm_sort, 148 .se_snprintf = hist_entry__comm_snprintf, 149 .se_filter = hist_entry__thread_filter, 150 .se_width_idx = HISTC_COMM, 151}; 152 153/* --sort dso */ 154 155static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 156{ 157 struct dso *dso_l = map_l ? map_l->dso : NULL; 158 struct dso *dso_r = map_r ? map_r->dso : NULL; 159 const char *dso_name_l, *dso_name_r; 160 161 if (!dso_l || !dso_r) 162 return cmp_null(dso_r, dso_l); 163 164 if (verbose > 0) { 165 dso_name_l = dso_l->long_name; 166 dso_name_r = dso_r->long_name; 167 } else { 168 dso_name_l = dso_l->short_name; 169 dso_name_r = dso_r->short_name; 170 } 171 172 return strcmp(dso_name_l, dso_name_r); 173} 174 175static int64_t 176sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 177{ 178 return _sort__dso_cmp(right->ms.map, left->ms.map); 179} 180 181static int _hist_entry__dso_snprintf(struct map *map, char *bf, 182 size_t size, unsigned int width) 183{ 184 if (map && map->dso) { 185 const char *dso_name = verbose > 0 ? map->dso->long_name : 186 map->dso->short_name; 187 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 188 } 189 190 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); 191} 192 193static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 194 size_t size, unsigned int width) 195{ 196 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 197} 198 199static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 200{ 201 const struct dso *dso = arg; 202 203 if (type != HIST_FILTER__DSO) 204 return -1; 205 206 return dso && (!he->ms.map || he->ms.map->dso != dso); 207} 208 209struct sort_entry sort_dso = { 210 .se_header = "Shared Object", 211 .se_cmp = sort__dso_cmp, 212 .se_snprintf = hist_entry__dso_snprintf, 213 .se_filter = hist_entry__dso_filter, 214 .se_width_idx = HISTC_DSO, 215}; 216 217/* --sort symbol */ 218 219static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 220{ 221 return (int64_t)(right_ip - left_ip); 222} 223 224static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 225{ 226 if (!sym_l || !sym_r) 227 return cmp_null(sym_l, sym_r); 228 229 if (sym_l == sym_r) 230 return 0; 231 232 if (sym_l->inlined || sym_r->inlined) 233 return strcmp(sym_l->name, sym_r->name); 234 235 if (sym_l->start != sym_r->start) 236 return (int64_t)(sym_r->start - sym_l->start); 237 238 return (int64_t)(sym_r->end - sym_l->end); 239} 240 241static int64_t 242sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 243{ 244 int64_t ret; 245 246 if (!left->ms.sym && !right->ms.sym) 247 return _sort__addr_cmp(left->ip, right->ip); 248 249 /* 250 * comparing symbol address alone is not enough since it's a 251 * relative address within a dso. 252 */ 253 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 254 ret = sort__dso_cmp(left, right); 255 if (ret != 0) 256 return ret; 257 } 258 259 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 260} 261 262static int64_t 263sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 264{ 265 if (!left->ms.sym || !right->ms.sym) 266 return cmp_null(left->ms.sym, right->ms.sym); 267 268 return strcmp(right->ms.sym->name, left->ms.sym->name); 269} 270 271static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, 272 u64 ip, char level, char *bf, size_t size, 273 unsigned int width) 274{ 275 size_t ret = 0; 276 277 if (verbose > 0) { 278 char o = map ? dso__symtab_origin(map->dso) : '!'; 279 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 280 BITS_PER_LONG / 4 + 2, ip, o); 281 } 282 283 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 284 if (sym && map) { 285 if (map->type == MAP__VARIABLE) { 286 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 287 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 288 ip - map->unmap_ip(map, sym->start)); 289 } else { 290 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 291 width - ret, 292 sym->name); 293 if (sym->inlined) 294 ret += repsep_snprintf(bf + ret, size - ret, 295 " (inlined)"); 296 } 297 } else { 298 size_t len = BITS_PER_LONG / 4; 299 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 300 len, ip); 301 } 302 303 return ret; 304} 305 306static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, 307 size_t size, unsigned int width) 308{ 309 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip, 310 he->level, bf, size, width); 311} 312 313static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 314{ 315 const char *sym = arg; 316 317 if (type != HIST_FILTER__SYMBOL) 318 return -1; 319 320 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 321} 322 323struct sort_entry sort_sym = { 324 .se_header = "Symbol", 325 .se_cmp = sort__sym_cmp, 326 .se_sort = sort__sym_sort, 327 .se_snprintf = hist_entry__sym_snprintf, 328 .se_filter = hist_entry__sym_filter, 329 .se_width_idx = HISTC_SYMBOL, 330}; 331 332/* --sort srcline */ 333 334char *hist_entry__get_srcline(struct hist_entry *he) 335{ 336 struct map *map = he->ms.map; 337 338 if (!map) 339 return SRCLINE_UNKNOWN; 340 341 return get_srcline(map->dso, map__rip_2objdump(map, he->ip), 342 he->ms.sym, true, true, he->ip); 343} 344 345static int64_t 346sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 347{ 348 if (!left->srcline) 349 left->srcline = hist_entry__get_srcline(left); 350 if (!right->srcline) 351 right->srcline = hist_entry__get_srcline(right); 352 353 return strcmp(right->srcline, left->srcline); 354} 355 356static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 357 size_t size, unsigned int width) 358{ 359 if (!he->srcline) 360 he->srcline = hist_entry__get_srcline(he); 361 362 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 363} 364 365struct sort_entry sort_srcline = { 366 .se_header = "Source:Line", 367 .se_cmp = sort__srcline_cmp, 368 .se_snprintf = hist_entry__srcline_snprintf, 369 .se_width_idx = HISTC_SRCLINE, 370}; 371 372/* --sort srcline_from */ 373 374static int64_t 375sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 376{ 377 if (!left->branch_info->srcline_from) { 378 struct map *map = left->branch_info->from.map; 379 if (!map) 380 left->branch_info->srcline_from = SRCLINE_UNKNOWN; 381 else 382 left->branch_info->srcline_from = get_srcline(map->dso, 383 map__rip_2objdump(map, 384 left->branch_info->from.al_addr), 385 left->branch_info->from.sym, 386 true, true, 387 left->branch_info->from.al_addr); 388 } 389 if (!right->branch_info->srcline_from) { 390 struct map *map = right->branch_info->from.map; 391 if (!map) 392 right->branch_info->srcline_from = SRCLINE_UNKNOWN; 393 else 394 right->branch_info->srcline_from = get_srcline(map->dso, 395 map__rip_2objdump(map, 396 right->branch_info->from.al_addr), 397 right->branch_info->from.sym, 398 true, true, 399 right->branch_info->from.al_addr); 400 } 401 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 402} 403 404static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 405 size_t size, unsigned int width) 406{ 407 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 408} 409 410struct sort_entry sort_srcline_from = { 411 .se_header = "From Source:Line", 412 .se_cmp = sort__srcline_from_cmp, 413 .se_snprintf = hist_entry__srcline_from_snprintf, 414 .se_width_idx = HISTC_SRCLINE_FROM, 415}; 416 417/* --sort srcline_to */ 418 419static int64_t 420sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 421{ 422 if (!left->branch_info->srcline_to) { 423 struct map *map = left->branch_info->to.map; 424 if (!map) 425 left->branch_info->srcline_to = SRCLINE_UNKNOWN; 426 else 427 left->branch_info->srcline_to = get_srcline(map->dso, 428 map__rip_2objdump(map, 429 left->branch_info->to.al_addr), 430 left->branch_info->from.sym, 431 true, true, 432 left->branch_info->to.al_addr); 433 } 434 if (!right->branch_info->srcline_to) { 435 struct map *map = right->branch_info->to.map; 436 if (!map) 437 right->branch_info->srcline_to = SRCLINE_UNKNOWN; 438 else 439 right->branch_info->srcline_to = get_srcline(map->dso, 440 map__rip_2objdump(map, 441 right->branch_info->to.al_addr), 442 right->branch_info->to.sym, 443 true, true, 444 right->branch_info->to.al_addr); 445 } 446 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 447} 448 449static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 450 size_t size, unsigned int width) 451{ 452 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 453} 454 455struct sort_entry sort_srcline_to = { 456 .se_header = "To Source:Line", 457 .se_cmp = sort__srcline_to_cmp, 458 .se_snprintf = hist_entry__srcline_to_snprintf, 459 .se_width_idx = HISTC_SRCLINE_TO, 460}; 461 462/* --sort srcfile */ 463 464static char no_srcfile[1]; 465 466static char *hist_entry__get_srcfile(struct hist_entry *e) 467{ 468 char *sf, *p; 469 struct map *map = e->ms.map; 470 471 if (!map) 472 return no_srcfile; 473 474 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip), 475 e->ms.sym, false, true, true, e->ip); 476 if (!strcmp(sf, SRCLINE_UNKNOWN)) 477 return no_srcfile; 478 p = strchr(sf, ':'); 479 if (p && *sf) { 480 *p = 0; 481 return sf; 482 } 483 free(sf); 484 return no_srcfile; 485} 486 487static int64_t 488sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 489{ 490 if (!left->srcfile) 491 left->srcfile = hist_entry__get_srcfile(left); 492 if (!right->srcfile) 493 right->srcfile = hist_entry__get_srcfile(right); 494 495 return strcmp(right->srcfile, left->srcfile); 496} 497 498static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 499 size_t size, unsigned int width) 500{ 501 if (!he->srcfile) 502 he->srcfile = hist_entry__get_srcfile(he); 503 504 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 505} 506 507struct sort_entry sort_srcfile = { 508 .se_header = "Source File", 509 .se_cmp = sort__srcfile_cmp, 510 .se_snprintf = hist_entry__srcfile_snprintf, 511 .se_width_idx = HISTC_SRCFILE, 512}; 513 514/* --sort parent */ 515 516static int64_t 517sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 518{ 519 struct symbol *sym_l = left->parent; 520 struct symbol *sym_r = right->parent; 521 522 if (!sym_l || !sym_r) 523 return cmp_null(sym_l, sym_r); 524 525 return strcmp(sym_r->name, sym_l->name); 526} 527 528static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 529 size_t size, unsigned int width) 530{ 531 return repsep_snprintf(bf, size, "%-*.*s", width, width, 532 he->parent ? he->parent->name : "[other]"); 533} 534 535struct sort_entry sort_parent = { 536 .se_header = "Parent symbol", 537 .se_cmp = sort__parent_cmp, 538 .se_snprintf = hist_entry__parent_snprintf, 539 .se_width_idx = HISTC_PARENT, 540}; 541 542/* --sort cpu */ 543 544static int64_t 545sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 546{ 547 return right->cpu - left->cpu; 548} 549 550static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 551 size_t size, unsigned int width) 552{ 553 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 554} 555 556struct sort_entry sort_cpu = { 557 .se_header = "CPU", 558 .se_cmp = sort__cpu_cmp, 559 .se_snprintf = hist_entry__cpu_snprintf, 560 .se_width_idx = HISTC_CPU, 561}; 562 563/* --sort cgroup_id */ 564 565static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 566{ 567 return (int64_t)(right_dev - left_dev); 568} 569 570static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 571{ 572 return (int64_t)(right_ino - left_ino); 573} 574 575static int64_t 576sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 577{ 578 int64_t ret; 579 580 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 581 if (ret != 0) 582 return ret; 583 584 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 585 left->cgroup_id.ino); 586} 587 588static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 589 char *bf, size_t size, 590 unsigned int width __maybe_unused) 591{ 592 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 593 he->cgroup_id.ino); 594} 595 596struct sort_entry sort_cgroup_id = { 597 .se_header = "cgroup id (dev/inode)", 598 .se_cmp = sort__cgroup_id_cmp, 599 .se_snprintf = hist_entry__cgroup_id_snprintf, 600 .se_width_idx = HISTC_CGROUP_ID, 601}; 602 603/* --sort socket */ 604 605static int64_t 606sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 607{ 608 return right->socket - left->socket; 609} 610 611static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 612 size_t size, unsigned int width) 613{ 614 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 615} 616 617static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 618{ 619 int sk = *(const int *)arg; 620 621 if (type != HIST_FILTER__SOCKET) 622 return -1; 623 624 return sk >= 0 && he->socket != sk; 625} 626 627struct sort_entry sort_socket = { 628 .se_header = "Socket", 629 .se_cmp = sort__socket_cmp, 630 .se_snprintf = hist_entry__socket_snprintf, 631 .se_filter = hist_entry__socket_filter, 632 .se_width_idx = HISTC_SOCKET, 633}; 634 635/* --sort trace */ 636 637static char *get_trace_output(struct hist_entry *he) 638{ 639 struct trace_seq seq; 640 struct perf_evsel *evsel; 641 struct pevent_record rec = { 642 .data = he->raw_data, 643 .size = he->raw_size, 644 }; 645 646 evsel = hists_to_evsel(he->hists); 647 648 trace_seq_init(&seq); 649 if (symbol_conf.raw_trace) { 650 pevent_print_fields(&seq, he->raw_data, he->raw_size, 651 evsel->tp_format); 652 } else { 653 pevent_event_info(&seq, evsel->tp_format, &rec); 654 } 655 /* 656 * Trim the buffer, it starts at 4KB and we're not going to 657 * add anything more to this buffer. 658 */ 659 return realloc(seq.buffer, seq.len + 1); 660} 661 662static int64_t 663sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 664{ 665 struct perf_evsel *evsel; 666 667 evsel = hists_to_evsel(left->hists); 668 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 669 return 0; 670 671 if (left->trace_output == NULL) 672 left->trace_output = get_trace_output(left); 673 if (right->trace_output == NULL) 674 right->trace_output = get_trace_output(right); 675 676 return strcmp(right->trace_output, left->trace_output); 677} 678 679static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 680 size_t size, unsigned int width) 681{ 682 struct perf_evsel *evsel; 683 684 evsel = hists_to_evsel(he->hists); 685 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 686 return scnprintf(bf, size, "%-.*s", width, "N/A"); 687 688 if (he->trace_output == NULL) 689 he->trace_output = get_trace_output(he); 690 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 691} 692 693struct sort_entry sort_trace = { 694 .se_header = "Trace output", 695 .se_cmp = sort__trace_cmp, 696 .se_snprintf = hist_entry__trace_snprintf, 697 .se_width_idx = HISTC_TRACE, 698}; 699 700/* sort keys for branch stacks */ 701 702static int64_t 703sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 704{ 705 if (!left->branch_info || !right->branch_info) 706 return cmp_null(left->branch_info, right->branch_info); 707 708 return _sort__dso_cmp(left->branch_info->from.map, 709 right->branch_info->from.map); 710} 711 712static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 713 size_t size, unsigned int width) 714{ 715 if (he->branch_info) 716 return _hist_entry__dso_snprintf(he->branch_info->from.map, 717 bf, size, width); 718 else 719 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 720} 721 722static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 723 const void *arg) 724{ 725 const struct dso *dso = arg; 726 727 if (type != HIST_FILTER__DSO) 728 return -1; 729 730 return dso && (!he->branch_info || !he->branch_info->from.map || 731 he->branch_info->from.map->dso != dso); 732} 733 734static int64_t 735sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 736{ 737 if (!left->branch_info || !right->branch_info) 738 return cmp_null(left->branch_info, right->branch_info); 739 740 return _sort__dso_cmp(left->branch_info->to.map, 741 right->branch_info->to.map); 742} 743 744static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 745 size_t size, unsigned int width) 746{ 747 if (he->branch_info) 748 return _hist_entry__dso_snprintf(he->branch_info->to.map, 749 bf, size, width); 750 else 751 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 752} 753 754static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 755 const void *arg) 756{ 757 const struct dso *dso = arg; 758 759 if (type != HIST_FILTER__DSO) 760 return -1; 761 762 return dso && (!he->branch_info || !he->branch_info->to.map || 763 he->branch_info->to.map->dso != dso); 764} 765 766static int64_t 767sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 768{ 769 struct addr_map_symbol *from_l = &left->branch_info->from; 770 struct addr_map_symbol *from_r = &right->branch_info->from; 771 772 if (!left->branch_info || !right->branch_info) 773 return cmp_null(left->branch_info, right->branch_info); 774 775 from_l = &left->branch_info->from; 776 from_r = &right->branch_info->from; 777 778 if (!from_l->sym && !from_r->sym) 779 return _sort__addr_cmp(from_l->addr, from_r->addr); 780 781 return _sort__sym_cmp(from_l->sym, from_r->sym); 782} 783 784static int64_t 785sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 786{ 787 struct addr_map_symbol *to_l, *to_r; 788 789 if (!left->branch_info || !right->branch_info) 790 return cmp_null(left->branch_info, right->branch_info); 791 792 to_l = &left->branch_info->to; 793 to_r = &right->branch_info->to; 794 795 if (!to_l->sym && !to_r->sym) 796 return _sort__addr_cmp(to_l->addr, to_r->addr); 797 798 return _sort__sym_cmp(to_l->sym, to_r->sym); 799} 800 801static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 802 size_t size, unsigned int width) 803{ 804 if (he->branch_info) { 805 struct addr_map_symbol *from = &he->branch_info->from; 806 807 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, 808 he->level, bf, size, width); 809 } 810 811 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 812} 813 814static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 815 size_t size, unsigned int width) 816{ 817 if (he->branch_info) { 818 struct addr_map_symbol *to = &he->branch_info->to; 819 820 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, 821 he->level, bf, size, width); 822 } 823 824 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 825} 826 827static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 828 const void *arg) 829{ 830 const char *sym = arg; 831 832 if (type != HIST_FILTER__SYMBOL) 833 return -1; 834 835 return sym && !(he->branch_info && he->branch_info->from.sym && 836 strstr(he->branch_info->from.sym->name, sym)); 837} 838 839static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 840 const void *arg) 841{ 842 const char *sym = arg; 843 844 if (type != HIST_FILTER__SYMBOL) 845 return -1; 846 847 return sym && !(he->branch_info && he->branch_info->to.sym && 848 strstr(he->branch_info->to.sym->name, sym)); 849} 850 851struct sort_entry sort_dso_from = { 852 .se_header = "Source Shared Object", 853 .se_cmp = sort__dso_from_cmp, 854 .se_snprintf = hist_entry__dso_from_snprintf, 855 .se_filter = hist_entry__dso_from_filter, 856 .se_width_idx = HISTC_DSO_FROM, 857}; 858 859struct sort_entry sort_dso_to = { 860 .se_header = "Target Shared Object", 861 .se_cmp = sort__dso_to_cmp, 862 .se_snprintf = hist_entry__dso_to_snprintf, 863 .se_filter = hist_entry__dso_to_filter, 864 .se_width_idx = HISTC_DSO_TO, 865}; 866 867struct sort_entry sort_sym_from = { 868 .se_header = "Source Symbol", 869 .se_cmp = sort__sym_from_cmp, 870 .se_snprintf = hist_entry__sym_from_snprintf, 871 .se_filter = hist_entry__sym_from_filter, 872 .se_width_idx = HISTC_SYMBOL_FROM, 873}; 874 875struct sort_entry sort_sym_to = { 876 .se_header = "Target Symbol", 877 .se_cmp = sort__sym_to_cmp, 878 .se_snprintf = hist_entry__sym_to_snprintf, 879 .se_filter = hist_entry__sym_to_filter, 880 .se_width_idx = HISTC_SYMBOL_TO, 881}; 882 883static int64_t 884sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 885{ 886 unsigned char mp, p; 887 888 if (!left->branch_info || !right->branch_info) 889 return cmp_null(left->branch_info, right->branch_info); 890 891 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 892 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 893 return mp || p; 894} 895 896static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 897 size_t size, unsigned int width){ 898 static const char *out = "N/A"; 899 900 if (he->branch_info) { 901 if (he->branch_info->flags.predicted) 902 out = "N"; 903 else if (he->branch_info->flags.mispred) 904 out = "Y"; 905 } 906 907 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 908} 909 910static int64_t 911sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 912{ 913 if (!left->branch_info || !right->branch_info) 914 return cmp_null(left->branch_info, right->branch_info); 915 916 return left->branch_info->flags.cycles - 917 right->branch_info->flags.cycles; 918} 919 920static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 921 size_t size, unsigned int width) 922{ 923 if (!he->branch_info) 924 return scnprintf(bf, size, "%-.*s", width, "N/A"); 925 if (he->branch_info->flags.cycles == 0) 926 return repsep_snprintf(bf, size, "%-*s", width, "-"); 927 return repsep_snprintf(bf, size, "%-*hd", width, 928 he->branch_info->flags.cycles); 929} 930 931struct sort_entry sort_cycles = { 932 .se_header = "Basic Block Cycles", 933 .se_cmp = sort__cycles_cmp, 934 .se_snprintf = hist_entry__cycles_snprintf, 935 .se_width_idx = HISTC_CYCLES, 936}; 937 938/* --sort daddr_sym */ 939int64_t 940sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 941{ 942 uint64_t l = 0, r = 0; 943 944 if (left->mem_info) 945 l = left->mem_info->daddr.addr; 946 if (right->mem_info) 947 r = right->mem_info->daddr.addr; 948 949 return (int64_t)(r - l); 950} 951 952static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 953 size_t size, unsigned int width) 954{ 955 uint64_t addr = 0; 956 struct map *map = NULL; 957 struct symbol *sym = NULL; 958 959 if (he->mem_info) { 960 addr = he->mem_info->daddr.addr; 961 map = he->mem_info->daddr.map; 962 sym = he->mem_info->daddr.sym; 963 } 964 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 965 width); 966} 967 968int64_t 969sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 970{ 971 uint64_t l = 0, r = 0; 972 973 if (left->mem_info) 974 l = left->mem_info->iaddr.addr; 975 if (right->mem_info) 976 r = right->mem_info->iaddr.addr; 977 978 return (int64_t)(r - l); 979} 980 981static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 982 size_t size, unsigned int width) 983{ 984 uint64_t addr = 0; 985 struct map *map = NULL; 986 struct symbol *sym = NULL; 987 988 if (he->mem_info) { 989 addr = he->mem_info->iaddr.addr; 990 map = he->mem_info->iaddr.map; 991 sym = he->mem_info->iaddr.sym; 992 } 993 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 994 width); 995} 996 997static int64_t 998sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 999{ 1000 struct map *map_l = NULL; 1001 struct map *map_r = NULL; 1002 1003 if (left->mem_info) 1004 map_l = left->mem_info->daddr.map; 1005 if (right->mem_info) 1006 map_r = right->mem_info->daddr.map; 1007 1008 return _sort__dso_cmp(map_l, map_r); 1009} 1010 1011static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 1012 size_t size, unsigned int width) 1013{ 1014 struct map *map = NULL; 1015 1016 if (he->mem_info) 1017 map = he->mem_info->daddr.map; 1018 1019 return _hist_entry__dso_snprintf(map, bf, size, width); 1020} 1021 1022static int64_t 1023sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1024{ 1025 union perf_mem_data_src data_src_l; 1026 union perf_mem_data_src data_src_r; 1027 1028 if (left->mem_info) 1029 data_src_l = left->mem_info->data_src; 1030 else 1031 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1032 1033 if (right->mem_info) 1034 data_src_r = right->mem_info->data_src; 1035 else 1036 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1037 1038 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1039} 1040 1041static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1042 size_t size, unsigned int width) 1043{ 1044 char out[10]; 1045 1046 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1047 return repsep_snprintf(bf, size, "%.*s", width, out); 1048} 1049 1050static int64_t 1051sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1052{ 1053 union perf_mem_data_src data_src_l; 1054 union perf_mem_data_src data_src_r; 1055 1056 if (left->mem_info) 1057 data_src_l = left->mem_info->data_src; 1058 else 1059 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1060 1061 if (right->mem_info) 1062 data_src_r = right->mem_info->data_src; 1063 else 1064 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1065 1066 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1067} 1068 1069static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1070 size_t size, unsigned int width) 1071{ 1072 char out[64]; 1073 1074 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1075 return repsep_snprintf(bf, size, "%-*s", width, out); 1076} 1077 1078static int64_t 1079sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1080{ 1081 union perf_mem_data_src data_src_l; 1082 union perf_mem_data_src data_src_r; 1083 1084 if (left->mem_info) 1085 data_src_l = left->mem_info->data_src; 1086 else 1087 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1088 1089 if (right->mem_info) 1090 data_src_r = right->mem_info->data_src; 1091 else 1092 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1093 1094 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1095} 1096 1097static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1098 size_t size, unsigned int width) 1099{ 1100 char out[64]; 1101 1102 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1103 return repsep_snprintf(bf, size, "%-*s", width, out); 1104} 1105 1106static int64_t 1107sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1108{ 1109 union perf_mem_data_src data_src_l; 1110 union perf_mem_data_src data_src_r; 1111 1112 if (left->mem_info) 1113 data_src_l = left->mem_info->data_src; 1114 else 1115 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1116 1117 if (right->mem_info) 1118 data_src_r = right->mem_info->data_src; 1119 else 1120 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1121 1122 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1123} 1124 1125static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1126 size_t size, unsigned int width) 1127{ 1128 char out[64]; 1129 1130 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1131 return repsep_snprintf(bf, size, "%-*s", width, out); 1132} 1133 1134int64_t 1135sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1136{ 1137 u64 l, r; 1138 struct map *l_map, *r_map; 1139 1140 if (!left->mem_info) return -1; 1141 if (!right->mem_info) return 1; 1142 1143 /* group event types together */ 1144 if (left->cpumode > right->cpumode) return -1; 1145 if (left->cpumode < right->cpumode) return 1; 1146 1147 l_map = left->mem_info->daddr.map; 1148 r_map = right->mem_info->daddr.map; 1149 1150 /* if both are NULL, jump to sort on al_addr instead */ 1151 if (!l_map && !r_map) 1152 goto addr; 1153 1154 if (!l_map) return -1; 1155 if (!r_map) return 1; 1156 1157 if (l_map->maj > r_map->maj) return -1; 1158 if (l_map->maj < r_map->maj) return 1; 1159 1160 if (l_map->min > r_map->min) return -1; 1161 if (l_map->min < r_map->min) return 1; 1162 1163 if (l_map->ino > r_map->ino) return -1; 1164 if (l_map->ino < r_map->ino) return 1; 1165 1166 if (l_map->ino_generation > r_map->ino_generation) return -1; 1167 if (l_map->ino_generation < r_map->ino_generation) return 1; 1168 1169 /* 1170 * Addresses with no major/minor numbers are assumed to be 1171 * anonymous in userspace. Sort those on pid then address. 1172 * 1173 * The kernel and non-zero major/minor mapped areas are 1174 * assumed to be unity mapped. Sort those on address. 1175 */ 1176 1177 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1178 (!(l_map->flags & MAP_SHARED)) && 1179 !l_map->maj && !l_map->min && !l_map->ino && 1180 !l_map->ino_generation) { 1181 /* userspace anonymous */ 1182 1183 if (left->thread->pid_ > right->thread->pid_) return -1; 1184 if (left->thread->pid_ < right->thread->pid_) return 1; 1185 } 1186 1187addr: 1188 /* al_addr does all the right addr - start + offset calculations */ 1189 l = cl_address(left->mem_info->daddr.al_addr); 1190 r = cl_address(right->mem_info->daddr.al_addr); 1191 1192 if (l > r) return -1; 1193 if (l < r) return 1; 1194 1195 return 0; 1196} 1197 1198static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1199 size_t size, unsigned int width) 1200{ 1201 1202 uint64_t addr = 0; 1203 struct map *map = NULL; 1204 struct symbol *sym = NULL; 1205 char level = he->level; 1206 1207 if (he->mem_info) { 1208 addr = cl_address(he->mem_info->daddr.al_addr); 1209 map = he->mem_info->daddr.map; 1210 sym = he->mem_info->daddr.sym; 1211 1212 /* print [s] for shared data mmaps */ 1213 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1214 map && (map->type == MAP__VARIABLE) && 1215 (map->flags & MAP_SHARED) && 1216 (map->maj || map->min || map->ino || 1217 map->ino_generation)) 1218 level = 's'; 1219 else if (!map) 1220 level = 'X'; 1221 } 1222 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size, 1223 width); 1224} 1225 1226struct sort_entry sort_mispredict = { 1227 .se_header = "Branch Mispredicted", 1228 .se_cmp = sort__mispredict_cmp, 1229 .se_snprintf = hist_entry__mispredict_snprintf, 1230 .se_width_idx = HISTC_MISPREDICT, 1231}; 1232 1233static u64 he_weight(struct hist_entry *he) 1234{ 1235 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; 1236} 1237 1238static int64_t 1239sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1240{ 1241 return he_weight(left) - he_weight(right); 1242} 1243 1244static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1245 size_t size, unsigned int width) 1246{ 1247 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); 1248} 1249 1250struct sort_entry sort_local_weight = { 1251 .se_header = "Local Weight", 1252 .se_cmp = sort__local_weight_cmp, 1253 .se_snprintf = hist_entry__local_weight_snprintf, 1254 .se_width_idx = HISTC_LOCAL_WEIGHT, 1255}; 1256 1257static int64_t 1258sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1259{ 1260 return left->stat.weight - right->stat.weight; 1261} 1262 1263static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1264 size_t size, unsigned int width) 1265{ 1266 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); 1267} 1268 1269struct sort_entry sort_global_weight = { 1270 .se_header = "Weight", 1271 .se_cmp = sort__global_weight_cmp, 1272 .se_snprintf = hist_entry__global_weight_snprintf, 1273 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1274}; 1275 1276struct sort_entry sort_mem_daddr_sym = { 1277 .se_header = "Data Symbol", 1278 .se_cmp = sort__daddr_cmp, 1279 .se_snprintf = hist_entry__daddr_snprintf, 1280 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1281}; 1282 1283struct sort_entry sort_mem_iaddr_sym = { 1284 .se_header = "Code Symbol", 1285 .se_cmp = sort__iaddr_cmp, 1286 .se_snprintf = hist_entry__iaddr_snprintf, 1287 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1288}; 1289 1290struct sort_entry sort_mem_daddr_dso = { 1291 .se_header = "Data Object", 1292 .se_cmp = sort__dso_daddr_cmp, 1293 .se_snprintf = hist_entry__dso_daddr_snprintf, 1294 .se_width_idx = HISTC_MEM_DADDR_DSO, 1295}; 1296 1297struct sort_entry sort_mem_locked = { 1298 .se_header = "Locked", 1299 .se_cmp = sort__locked_cmp, 1300 .se_snprintf = hist_entry__locked_snprintf, 1301 .se_width_idx = HISTC_MEM_LOCKED, 1302}; 1303 1304struct sort_entry sort_mem_tlb = { 1305 .se_header = "TLB access", 1306 .se_cmp = sort__tlb_cmp, 1307 .se_snprintf = hist_entry__tlb_snprintf, 1308 .se_width_idx = HISTC_MEM_TLB, 1309}; 1310 1311struct sort_entry sort_mem_lvl = { 1312 .se_header = "Memory access", 1313 .se_cmp = sort__lvl_cmp, 1314 .se_snprintf = hist_entry__lvl_snprintf, 1315 .se_width_idx = HISTC_MEM_LVL, 1316}; 1317 1318struct sort_entry sort_mem_snoop = { 1319 .se_header = "Snoop", 1320 .se_cmp = sort__snoop_cmp, 1321 .se_snprintf = hist_entry__snoop_snprintf, 1322 .se_width_idx = HISTC_MEM_SNOOP, 1323}; 1324 1325struct sort_entry sort_mem_dcacheline = { 1326 .se_header = "Data Cacheline", 1327 .se_cmp = sort__dcacheline_cmp, 1328 .se_snprintf = hist_entry__dcacheline_snprintf, 1329 .se_width_idx = HISTC_MEM_DCACHELINE, 1330}; 1331 1332static int64_t 1333sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1334{ 1335 uint64_t l = 0, r = 0; 1336 1337 if (left->mem_info) 1338 l = left->mem_info->daddr.phys_addr; 1339 if (right->mem_info) 1340 r = right->mem_info->daddr.phys_addr; 1341 1342 return (int64_t)(r - l); 1343} 1344 1345static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, 1346 size_t size, unsigned int width) 1347{ 1348 uint64_t addr = 0; 1349 size_t ret = 0; 1350 size_t len = BITS_PER_LONG / 4; 1351 1352 addr = he->mem_info->daddr.phys_addr; 1353 1354 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); 1355 1356 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr); 1357 1358 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); 1359 1360 if (ret > width) 1361 bf[width] = '\0'; 1362 1363 return width; 1364} 1365 1366struct sort_entry sort_mem_phys_daddr = { 1367 .se_header = "Data Physical Address", 1368 .se_cmp = sort__phys_daddr_cmp, 1369 .se_snprintf = hist_entry__phys_daddr_snprintf, 1370 .se_width_idx = HISTC_MEM_PHYS_DADDR, 1371}; 1372 1373static int64_t 1374sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1375{ 1376 if (!left->branch_info || !right->branch_info) 1377 return cmp_null(left->branch_info, right->branch_info); 1378 1379 return left->branch_info->flags.abort != 1380 right->branch_info->flags.abort; 1381} 1382 1383static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1384 size_t size, unsigned int width) 1385{ 1386 static const char *out = "N/A"; 1387 1388 if (he->branch_info) { 1389 if (he->branch_info->flags.abort) 1390 out = "A"; 1391 else 1392 out = "."; 1393 } 1394 1395 return repsep_snprintf(bf, size, "%-*s", width, out); 1396} 1397 1398struct sort_entry sort_abort = { 1399 .se_header = "Transaction abort", 1400 .se_cmp = sort__abort_cmp, 1401 .se_snprintf = hist_entry__abort_snprintf, 1402 .se_width_idx = HISTC_ABORT, 1403}; 1404 1405static int64_t 1406sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1407{ 1408 if (!left->branch_info || !right->branch_info) 1409 return cmp_null(left->branch_info, right->branch_info); 1410 1411 return left->branch_info->flags.in_tx != 1412 right->branch_info->flags.in_tx; 1413} 1414 1415static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1416 size_t size, unsigned int width) 1417{ 1418 static const char *out = "N/A"; 1419 1420 if (he->branch_info) { 1421 if (he->branch_info->flags.in_tx) 1422 out = "T"; 1423 else 1424 out = "."; 1425 } 1426 1427 return repsep_snprintf(bf, size, "%-*s", width, out); 1428} 1429 1430struct sort_entry sort_in_tx = { 1431 .se_header = "Branch in transaction", 1432 .se_cmp = sort__in_tx_cmp, 1433 .se_snprintf = hist_entry__in_tx_snprintf, 1434 .se_width_idx = HISTC_IN_TX, 1435}; 1436 1437static int64_t 1438sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1439{ 1440 return left->transaction - right->transaction; 1441} 1442 1443static inline char *add_str(char *p, const char *str) 1444{ 1445 strcpy(p, str); 1446 return p + strlen(str); 1447} 1448 1449static struct txbit { 1450 unsigned flag; 1451 const char *name; 1452 int skip_for_len; 1453} txbits[] = { 1454 { PERF_TXN_ELISION, "EL ", 0 }, 1455 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1456 { PERF_TXN_SYNC, "SYNC ", 1 }, 1457 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1458 { PERF_TXN_RETRY, "RETRY ", 0 }, 1459 { PERF_TXN_CONFLICT, "CON ", 0 }, 1460 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1461 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1462 { 0, NULL, 0 } 1463}; 1464 1465int hist_entry__transaction_len(void) 1466{ 1467 int i; 1468 int len = 0; 1469 1470 for (i = 0; txbits[i].name; i++) { 1471 if (!txbits[i].skip_for_len) 1472 len += strlen(txbits[i].name); 1473 } 1474 len += 4; /* :XX<space> */ 1475 return len; 1476} 1477 1478static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1479 size_t size, unsigned int width) 1480{ 1481 u64 t = he->transaction; 1482 char buf[128]; 1483 char *p = buf; 1484 int i; 1485 1486 buf[0] = 0; 1487 for (i = 0; txbits[i].name; i++) 1488 if (txbits[i].flag & t) 1489 p = add_str(p, txbits[i].name); 1490 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 1491 p = add_str(p, "NEITHER "); 1492 if (t & PERF_TXN_ABORT_MASK) { 1493 sprintf(p, ":%" PRIx64, 1494 (t & PERF_TXN_ABORT_MASK) >> 1495 PERF_TXN_ABORT_SHIFT); 1496 p += strlen(p); 1497 } 1498 1499 return repsep_snprintf(bf, size, "%-*s", width, buf); 1500} 1501 1502struct sort_entry sort_transaction = { 1503 .se_header = "Transaction ", 1504 .se_cmp = sort__transaction_cmp, 1505 .se_snprintf = hist_entry__transaction_snprintf, 1506 .se_width_idx = HISTC_TRANSACTION, 1507}; 1508 1509/* --sort symbol_size */ 1510 1511static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 1512{ 1513 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 1514 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 1515 1516 return size_l < size_r ? -1 : 1517 size_l == size_r ? 0 : 1; 1518} 1519 1520static int64_t 1521sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 1522{ 1523 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 1524} 1525 1526static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 1527 size_t bf_size, unsigned int width) 1528{ 1529 if (sym) 1530 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 1531 1532 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1533} 1534 1535static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 1536 size_t size, unsigned int width) 1537{ 1538 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 1539} 1540 1541struct sort_entry sort_sym_size = { 1542 .se_header = "Symbol size", 1543 .se_cmp = sort__sym_size_cmp, 1544 .se_snprintf = hist_entry__sym_size_snprintf, 1545 .se_width_idx = HISTC_SYM_SIZE, 1546}; 1547 1548/* --sort dso_size */ 1549 1550static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r) 1551{ 1552 int64_t size_l = map_l != NULL ? map__size(map_l) : 0; 1553 int64_t size_r = map_r != NULL ? map__size(map_r) : 0; 1554 1555 return size_l < size_r ? -1 : 1556 size_l == size_r ? 0 : 1; 1557} 1558 1559static int64_t 1560sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right) 1561{ 1562 return _sort__dso_size_cmp(right->ms.map, left->ms.map); 1563} 1564 1565static int _hist_entry__dso_size_snprintf(struct map *map, char *bf, 1566 size_t bf_size, unsigned int width) 1567{ 1568 if (map && map->dso) 1569 return repsep_snprintf(bf, bf_size, "%*d", width, 1570 map__size(map)); 1571 1572 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1573} 1574 1575static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf, 1576 size_t size, unsigned int width) 1577{ 1578 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width); 1579} 1580 1581struct sort_entry sort_dso_size = { 1582 .se_header = "DSO size", 1583 .se_cmp = sort__dso_size_cmp, 1584 .se_snprintf = hist_entry__dso_size_snprintf, 1585 .se_width_idx = HISTC_DSO_SIZE, 1586}; 1587 1588 1589struct sort_dimension { 1590 const char *name; 1591 struct sort_entry *entry; 1592 int taken; 1593}; 1594 1595#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 1596 1597static struct sort_dimension common_sort_dimensions[] = { 1598 DIM(SORT_PID, "pid", sort_thread), 1599 DIM(SORT_COMM, "comm", sort_comm), 1600 DIM(SORT_DSO, "dso", sort_dso), 1601 DIM(SORT_SYM, "symbol", sort_sym), 1602 DIM(SORT_PARENT, "parent", sort_parent), 1603 DIM(SORT_CPU, "cpu", sort_cpu), 1604 DIM(SORT_SOCKET, "socket", sort_socket), 1605 DIM(SORT_SRCLINE, "srcline", sort_srcline), 1606 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 1607 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 1608 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 1609 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 1610 DIM(SORT_TRACE, "trace", sort_trace), 1611 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 1612 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size), 1613 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 1614}; 1615 1616#undef DIM 1617 1618#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 1619 1620static struct sort_dimension bstack_sort_dimensions[] = { 1621 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 1622 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 1623 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 1624 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 1625 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 1626 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 1627 DIM(SORT_ABORT, "abort", sort_abort), 1628 DIM(SORT_CYCLES, "cycles", sort_cycles), 1629 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 1630 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 1631}; 1632 1633#undef DIM 1634 1635#define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 1636 1637static struct sort_dimension memory_sort_dimensions[] = { 1638 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 1639 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 1640 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 1641 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 1642 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 1643 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 1644 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 1645 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 1646 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr), 1647}; 1648 1649#undef DIM 1650 1651struct hpp_dimension { 1652 const char *name; 1653 struct perf_hpp_fmt *fmt; 1654 int taken; 1655}; 1656 1657#define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 1658 1659static struct hpp_dimension hpp_sort_dimensions[] = { 1660 DIM(PERF_HPP__OVERHEAD, "overhead"), 1661 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 1662 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 1663 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 1664 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 1665 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 1666 DIM(PERF_HPP__SAMPLES, "sample"), 1667 DIM(PERF_HPP__PERIOD, "period"), 1668}; 1669 1670#undef DIM 1671 1672struct hpp_sort_entry { 1673 struct perf_hpp_fmt hpp; 1674 struct sort_entry *se; 1675}; 1676 1677void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 1678{ 1679 struct hpp_sort_entry *hse; 1680 1681 if (!perf_hpp__is_sort_entry(fmt)) 1682 return; 1683 1684 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1685 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 1686} 1687 1688static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1689 struct hists *hists, int line __maybe_unused, 1690 int *span __maybe_unused) 1691{ 1692 struct hpp_sort_entry *hse; 1693 size_t len = fmt->user_len; 1694 1695 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1696 1697 if (!len) 1698 len = hists__col_len(hists, hse->se->se_width_idx); 1699 1700 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 1701} 1702 1703static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 1704 struct perf_hpp *hpp __maybe_unused, 1705 struct hists *hists) 1706{ 1707 struct hpp_sort_entry *hse; 1708 size_t len = fmt->user_len; 1709 1710 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1711 1712 if (!len) 1713 len = hists__col_len(hists, hse->se->se_width_idx); 1714 1715 return len; 1716} 1717 1718static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1719 struct hist_entry *he) 1720{ 1721 struct hpp_sort_entry *hse; 1722 size_t len = fmt->user_len; 1723 1724 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1725 1726 if (!len) 1727 len = hists__col_len(he->hists, hse->se->se_width_idx); 1728 1729 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 1730} 1731 1732static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 1733 struct hist_entry *a, struct hist_entry *b) 1734{ 1735 struct hpp_sort_entry *hse; 1736 1737 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1738 return hse->se->se_cmp(a, b); 1739} 1740 1741static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 1742 struct hist_entry *a, struct hist_entry *b) 1743{ 1744 struct hpp_sort_entry *hse; 1745 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 1746 1747 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1748 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 1749 return collapse_fn(a, b); 1750} 1751 1752static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 1753 struct hist_entry *a, struct hist_entry *b) 1754{ 1755 struct hpp_sort_entry *hse; 1756 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 1757 1758 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1759 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 1760 return sort_fn(a, b); 1761} 1762 1763bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 1764{ 1765 return format->header == __sort__hpp_header; 1766} 1767 1768#define MK_SORT_ENTRY_CHK(key) \ 1769bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 1770{ \ 1771 struct hpp_sort_entry *hse; \ 1772 \ 1773 if (!perf_hpp__is_sort_entry(fmt)) \ 1774 return false; \ 1775 \ 1776 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 1777 return hse->se == &sort_ ## key ; \ 1778} 1779 1780MK_SORT_ENTRY_CHK(trace) 1781MK_SORT_ENTRY_CHK(srcline) 1782MK_SORT_ENTRY_CHK(srcfile) 1783MK_SORT_ENTRY_CHK(thread) 1784MK_SORT_ENTRY_CHK(comm) 1785MK_SORT_ENTRY_CHK(dso) 1786MK_SORT_ENTRY_CHK(sym) 1787 1788 1789static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 1790{ 1791 struct hpp_sort_entry *hse_a; 1792 struct hpp_sort_entry *hse_b; 1793 1794 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 1795 return false; 1796 1797 hse_a = container_of(a, struct hpp_sort_entry, hpp); 1798 hse_b = container_of(b, struct hpp_sort_entry, hpp); 1799 1800 return hse_a->se == hse_b->se; 1801} 1802 1803static void hse_free(struct perf_hpp_fmt *fmt) 1804{ 1805 struct hpp_sort_entry *hse; 1806 1807 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1808 free(hse); 1809} 1810 1811static struct hpp_sort_entry * 1812__sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 1813{ 1814 struct hpp_sort_entry *hse; 1815 1816 hse = malloc(sizeof(*hse)); 1817 if (hse == NULL) { 1818 pr_err("Memory allocation failed\n"); 1819 return NULL; 1820 } 1821 1822 hse->se = sd->entry; 1823 hse->hpp.name = sd->entry->se_header; 1824 hse->hpp.header = __sort__hpp_header; 1825 hse->hpp.width = __sort__hpp_width; 1826 hse->hpp.entry = __sort__hpp_entry; 1827 hse->hpp.color = NULL; 1828 1829 hse->hpp.cmp = __sort__hpp_cmp; 1830 hse->hpp.collapse = __sort__hpp_collapse; 1831 hse->hpp.sort = __sort__hpp_sort; 1832 hse->hpp.equal = __sort__hpp_equal; 1833 hse->hpp.free = hse_free; 1834 1835 INIT_LIST_HEAD(&hse->hpp.list); 1836 INIT_LIST_HEAD(&hse->hpp.sort_list); 1837 hse->hpp.elide = false; 1838 hse->hpp.len = 0; 1839 hse->hpp.user_len = 0; 1840 hse->hpp.level = level; 1841 1842 return hse; 1843} 1844 1845static void hpp_free(struct perf_hpp_fmt *fmt) 1846{ 1847 free(fmt); 1848} 1849 1850static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 1851 int level) 1852{ 1853 struct perf_hpp_fmt *fmt; 1854 1855 fmt = memdup(hd->fmt, sizeof(*fmt)); 1856 if (fmt) { 1857 INIT_LIST_HEAD(&fmt->list); 1858 INIT_LIST_HEAD(&fmt->sort_list); 1859 fmt->free = hpp_free; 1860 fmt->level = level; 1861 } 1862 1863 return fmt; 1864} 1865 1866int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 1867{ 1868 struct perf_hpp_fmt *fmt; 1869 struct hpp_sort_entry *hse; 1870 int ret = -1; 1871 int r; 1872 1873 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1874 if (!perf_hpp__is_sort_entry(fmt)) 1875 continue; 1876 1877 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1878 if (hse->se->se_filter == NULL) 1879 continue; 1880 1881 /* 1882 * hist entry is filtered if any of sort key in the hpp list 1883 * is applied. But it should skip non-matched filter types. 1884 */ 1885 r = hse->se->se_filter(he, type, arg); 1886 if (r >= 0) { 1887 if (ret < 0) 1888 ret = 0; 1889 ret |= r; 1890 } 1891 } 1892 1893 return ret; 1894} 1895 1896static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 1897 struct perf_hpp_list *list, 1898 int level) 1899{ 1900 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 1901 1902 if (hse == NULL) 1903 return -1; 1904 1905 perf_hpp_list__register_sort_field(list, &hse->hpp); 1906 return 0; 1907} 1908 1909static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 1910 struct perf_hpp_list *list) 1911{ 1912 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 1913 1914 if (hse == NULL) 1915 return -1; 1916 1917 perf_hpp_list__column_register(list, &hse->hpp); 1918 return 0; 1919} 1920 1921struct hpp_dynamic_entry { 1922 struct perf_hpp_fmt hpp; 1923 struct perf_evsel *evsel; 1924 struct format_field *field; 1925 unsigned dynamic_len; 1926 bool raw_trace; 1927}; 1928 1929static int hde_width(struct hpp_dynamic_entry *hde) 1930{ 1931 if (!hde->hpp.len) { 1932 int len = hde->dynamic_len; 1933 int namelen = strlen(hde->field->name); 1934 int fieldlen = hde->field->size; 1935 1936 if (namelen > len) 1937 len = namelen; 1938 1939 if (!(hde->field->flags & FIELD_IS_STRING)) { 1940 /* length for print hex numbers */ 1941 fieldlen = hde->field->size * 2 + 2; 1942 } 1943 if (fieldlen > len) 1944 len = fieldlen; 1945 1946 hde->hpp.len = len; 1947 } 1948 return hde->hpp.len; 1949} 1950 1951static void update_dynamic_len(struct hpp_dynamic_entry *hde, 1952 struct hist_entry *he) 1953{ 1954 char *str, *pos; 1955 struct format_field *field = hde->field; 1956 size_t namelen; 1957 bool last = false; 1958 1959 if (hde->raw_trace) 1960 return; 1961 1962 /* parse pretty print result and update max length */ 1963 if (!he->trace_output) 1964 he->trace_output = get_trace_output(he); 1965 1966 namelen = strlen(field->name); 1967 str = he->trace_output; 1968 1969 while (str) { 1970 pos = strchr(str, ' '); 1971 if (pos == NULL) { 1972 last = true; 1973 pos = str + strlen(str); 1974 } 1975 1976 if (!strncmp(str, field->name, namelen)) { 1977 size_t len; 1978 1979 str += namelen + 1; 1980 len = pos - str; 1981 1982 if (len > hde->dynamic_len) 1983 hde->dynamic_len = len; 1984 break; 1985 } 1986 1987 if (last) 1988 str = NULL; 1989 else 1990 str = pos + 1; 1991 } 1992} 1993 1994static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1995 struct hists *hists __maybe_unused, 1996 int line __maybe_unused, 1997 int *span __maybe_unused) 1998{ 1999 struct hpp_dynamic_entry *hde; 2000 size_t len = fmt->user_len; 2001 2002 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2003 2004 if (!len) 2005 len = hde_width(hde); 2006 2007 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 2008} 2009 2010static int __sort__hde_width(struct perf_hpp_fmt *fmt, 2011 struct perf_hpp *hpp __maybe_unused, 2012 struct hists *hists __maybe_unused) 2013{ 2014 struct hpp_dynamic_entry *hde; 2015 size_t len = fmt->user_len; 2016 2017 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2018 2019 if (!len) 2020 len = hde_width(hde); 2021 2022 return len; 2023} 2024 2025bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 2026{ 2027 struct hpp_dynamic_entry *hde; 2028 2029 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2030 2031 return hists_to_evsel(hists) == hde->evsel; 2032} 2033 2034static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2035 struct hist_entry *he) 2036{ 2037 struct hpp_dynamic_entry *hde; 2038 size_t len = fmt->user_len; 2039 char *str, *pos; 2040 struct format_field *field; 2041 size_t namelen; 2042 bool last = false; 2043 int ret; 2044 2045 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2046 2047 if (!len) 2048 len = hde_width(hde); 2049 2050 if (hde->raw_trace) 2051 goto raw_field; 2052 2053 if (!he->trace_output) 2054 he->trace_output = get_trace_output(he); 2055 2056 field = hde->field; 2057 namelen = strlen(field->name); 2058 str = he->trace_output; 2059 2060 while (str) { 2061 pos = strchr(str, ' '); 2062 if (pos == NULL) { 2063 last = true; 2064 pos = str + strlen(str); 2065 } 2066 2067 if (!strncmp(str, field->name, namelen)) { 2068 str += namelen + 1; 2069 str = strndup(str, pos - str); 2070 2071 if (str == NULL) 2072 return scnprintf(hpp->buf, hpp->size, 2073 "%*.*s", len, len, "ERROR"); 2074 break; 2075 } 2076 2077 if (last) 2078 str = NULL; 2079 else 2080 str = pos + 1; 2081 } 2082 2083 if (str == NULL) { 2084 struct trace_seq seq; 2085raw_field: 2086 trace_seq_init(&seq); 2087 pevent_print_field(&seq, he->raw_data, hde->field); 2088 str = seq.buffer; 2089 } 2090 2091 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 2092 free(str); 2093 return ret; 2094} 2095 2096static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 2097 struct hist_entry *a, struct hist_entry *b) 2098{ 2099 struct hpp_dynamic_entry *hde; 2100 struct format_field *field; 2101 unsigned offset, size; 2102 2103 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2104 2105 if (b == NULL) { 2106 update_dynamic_len(hde, a); 2107 return 0; 2108 } 2109 2110 field = hde->field; 2111 if (field->flags & FIELD_IS_DYNAMIC) { 2112 unsigned long long dyn; 2113 2114 pevent_read_number_field(field, a->raw_data, &dyn); 2115 offset = dyn & 0xffff; 2116 size = (dyn >> 16) & 0xffff; 2117 2118 /* record max width for output */ 2119 if (size > hde->dynamic_len) 2120 hde->dynamic_len = size; 2121 } else { 2122 offset = field->offset; 2123 size = field->size; 2124 } 2125 2126 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 2127} 2128 2129bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 2130{ 2131 return fmt->cmp == __sort__hde_cmp; 2132} 2133 2134static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2135{ 2136 struct hpp_dynamic_entry *hde_a; 2137 struct hpp_dynamic_entry *hde_b; 2138 2139 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 2140 return false; 2141 2142 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 2143 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 2144 2145 return hde_a->field == hde_b->field; 2146} 2147 2148static void hde_free(struct perf_hpp_fmt *fmt) 2149{ 2150 struct hpp_dynamic_entry *hde; 2151 2152 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2153 free(hde); 2154} 2155 2156static struct hpp_dynamic_entry * 2157__alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field, 2158 int level) 2159{ 2160 struct hpp_dynamic_entry *hde; 2161 2162 hde = malloc(sizeof(*hde)); 2163 if (hde == NULL) { 2164 pr_debug("Memory allocation failed\n"); 2165 return NULL; 2166 } 2167 2168 hde->evsel = evsel; 2169 hde->field = field; 2170 hde->dynamic_len = 0; 2171 2172 hde->hpp.name = field->name; 2173 hde->hpp.header = __sort__hde_header; 2174 hde->hpp.width = __sort__hde_width; 2175 hde->hpp.entry = __sort__hde_entry; 2176 hde->hpp.color = NULL; 2177 2178 hde->hpp.cmp = __sort__hde_cmp; 2179 hde->hpp.collapse = __sort__hde_cmp; 2180 hde->hpp.sort = __sort__hde_cmp; 2181 hde->hpp.equal = __sort__hde_equal; 2182 hde->hpp.free = hde_free; 2183 2184 INIT_LIST_HEAD(&hde->hpp.list); 2185 INIT_LIST_HEAD(&hde->hpp.sort_list); 2186 hde->hpp.elide = false; 2187 hde->hpp.len = 0; 2188 hde->hpp.user_len = 0; 2189 hde->hpp.level = level; 2190 2191 return hde; 2192} 2193 2194struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 2195{ 2196 struct perf_hpp_fmt *new_fmt = NULL; 2197 2198 if (perf_hpp__is_sort_entry(fmt)) { 2199 struct hpp_sort_entry *hse, *new_hse; 2200 2201 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2202 new_hse = memdup(hse, sizeof(*hse)); 2203 if (new_hse) 2204 new_fmt = &new_hse->hpp; 2205 } else if (perf_hpp__is_dynamic_entry(fmt)) { 2206 struct hpp_dynamic_entry *hde, *new_hde; 2207 2208 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2209 new_hde = memdup(hde, sizeof(*hde)); 2210 if (new_hde) 2211 new_fmt = &new_hde->hpp; 2212 } else { 2213 new_fmt = memdup(fmt, sizeof(*fmt)); 2214 } 2215 2216 INIT_LIST_HEAD(&new_fmt->list); 2217 INIT_LIST_HEAD(&new_fmt->sort_list); 2218 2219 return new_fmt; 2220} 2221 2222static int parse_field_name(char *str, char **event, char **field, char **opt) 2223{ 2224 char *event_name, *field_name, *opt_name; 2225 2226 event_name = str; 2227 field_name = strchr(str, '.'); 2228 2229 if (field_name) { 2230 *field_name++ = '\0'; 2231 } else { 2232 event_name = NULL; 2233 field_name = str; 2234 } 2235 2236 opt_name = strchr(field_name, '/'); 2237 if (opt_name) 2238 *opt_name++ = '\0'; 2239 2240 *event = event_name; 2241 *field = field_name; 2242 *opt = opt_name; 2243 2244 return 0; 2245} 2246 2247/* find match evsel using a given event name. The event name can be: 2248 * 1. '%' + event index (e.g. '%1' for first event) 2249 * 2. full event name (e.g. sched:sched_switch) 2250 * 3. partial event name (should not contain ':') 2251 */ 2252static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name) 2253{ 2254 struct perf_evsel *evsel = NULL; 2255 struct perf_evsel *pos; 2256 bool full_name; 2257 2258 /* case 1 */ 2259 if (event_name[0] == '%') { 2260 int nr = strtol(event_name+1, NULL, 0); 2261 2262 if (nr > evlist->nr_entries) 2263 return NULL; 2264 2265 evsel = perf_evlist__first(evlist); 2266 while (--nr > 0) 2267 evsel = perf_evsel__next(evsel); 2268 2269 return evsel; 2270 } 2271 2272 full_name = !!strchr(event_name, ':'); 2273 evlist__for_each_entry(evlist, pos) { 2274 /* case 2 */ 2275 if (full_name && !strcmp(pos->name, event_name)) 2276 return pos; 2277 /* case 3 */ 2278 if (!full_name && strstr(pos->name, event_name)) { 2279 if (evsel) { 2280 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 2281 event_name, evsel->name, pos->name); 2282 return NULL; 2283 } 2284 evsel = pos; 2285 } 2286 } 2287 2288 return evsel; 2289} 2290 2291static int __dynamic_dimension__add(struct perf_evsel *evsel, 2292 struct format_field *field, 2293 bool raw_trace, int level) 2294{ 2295 struct hpp_dynamic_entry *hde; 2296 2297 hde = __alloc_dynamic_entry(evsel, field, level); 2298 if (hde == NULL) 2299 return -ENOMEM; 2300 2301 hde->raw_trace = raw_trace; 2302 2303 perf_hpp__register_sort_field(&hde->hpp); 2304 return 0; 2305} 2306 2307static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level) 2308{ 2309 int ret; 2310 struct format_field *field; 2311 2312 field = evsel->tp_format->format.fields; 2313 while (field) { 2314 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2315 if (ret < 0) 2316 return ret; 2317 2318 field = field->next; 2319 } 2320 return 0; 2321} 2322 2323static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace, 2324 int level) 2325{ 2326 int ret; 2327 struct perf_evsel *evsel; 2328 2329 evlist__for_each_entry(evlist, evsel) { 2330 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 2331 continue; 2332 2333 ret = add_evsel_fields(evsel, raw_trace, level); 2334 if (ret < 0) 2335 return ret; 2336 } 2337 return 0; 2338} 2339 2340static int add_all_matching_fields(struct perf_evlist *evlist, 2341 char *field_name, bool raw_trace, int level) 2342{ 2343 int ret = -ESRCH; 2344 struct perf_evsel *evsel; 2345 struct format_field *field; 2346 2347 evlist__for_each_entry(evlist, evsel) { 2348 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 2349 continue; 2350 2351 field = pevent_find_any_field(evsel->tp_format, field_name); 2352 if (field == NULL) 2353 continue; 2354 2355 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2356 if (ret < 0) 2357 break; 2358 } 2359 return ret; 2360} 2361 2362static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok, 2363 int level) 2364{ 2365 char *str, *event_name, *field_name, *opt_name; 2366 struct perf_evsel *evsel; 2367 struct format_field *field; 2368 bool raw_trace = symbol_conf.raw_trace; 2369 int ret = 0; 2370 2371 if (evlist == NULL) 2372 return -ENOENT; 2373 2374 str = strdup(tok); 2375 if (str == NULL) 2376 return -ENOMEM; 2377 2378 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 2379 ret = -EINVAL; 2380 goto out; 2381 } 2382 2383 if (opt_name) { 2384 if (strcmp(opt_name, "raw")) { 2385 pr_debug("unsupported field option %s\n", opt_name); 2386 ret = -EINVAL; 2387 goto out; 2388 } 2389 raw_trace = true; 2390 } 2391 2392 if (!strcmp(field_name, "trace_fields")) { 2393 ret = add_all_dynamic_fields(evlist, raw_trace, level); 2394 goto out; 2395 } 2396 2397 if (event_name == NULL) { 2398 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 2399 goto out; 2400 } 2401 2402 evsel = find_evsel(evlist, event_name); 2403 if (evsel == NULL) { 2404 pr_debug("Cannot find event: %s\n", event_name); 2405 ret = -ENOENT; 2406 goto out; 2407 } 2408 2409 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) { 2410 pr_debug("%s is not a tracepoint event\n", event_name); 2411 ret = -EINVAL; 2412 goto out; 2413 } 2414 2415 if (!strcmp(field_name, "*")) { 2416 ret = add_evsel_fields(evsel, raw_trace, level); 2417 } else { 2418 field = pevent_find_any_field(evsel->tp_format, field_name); 2419 if (field == NULL) { 2420 pr_debug("Cannot find event field for %s.%s\n", 2421 event_name, field_name); 2422 return -ENOENT; 2423 } 2424 2425 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2426 } 2427 2428out: 2429 free(str); 2430 return ret; 2431} 2432 2433static int __sort_dimension__add(struct sort_dimension *sd, 2434 struct perf_hpp_list *list, 2435 int level) 2436{ 2437 if (sd->taken) 2438 return 0; 2439 2440 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 2441 return -1; 2442 2443 if (sd->entry->se_collapse) 2444 list->need_collapse = 1; 2445 2446 sd->taken = 1; 2447 2448 return 0; 2449} 2450 2451static int __hpp_dimension__add(struct hpp_dimension *hd, 2452 struct perf_hpp_list *list, 2453 int level) 2454{ 2455 struct perf_hpp_fmt *fmt; 2456 2457 if (hd->taken) 2458 return 0; 2459 2460 fmt = __hpp_dimension__alloc_hpp(hd, level); 2461 if (!fmt) 2462 return -1; 2463 2464 hd->taken = 1; 2465 perf_hpp_list__register_sort_field(list, fmt); 2466 return 0; 2467} 2468 2469static int __sort_dimension__add_output(struct perf_hpp_list *list, 2470 struct sort_dimension *sd) 2471{ 2472 if (sd->taken) 2473 return 0; 2474 2475 if (__sort_dimension__add_hpp_output(sd, list) < 0) 2476 return -1; 2477 2478 sd->taken = 1; 2479 return 0; 2480} 2481 2482static int __hpp_dimension__add_output(struct perf_hpp_list *list, 2483 struct hpp_dimension *hd) 2484{ 2485 struct perf_hpp_fmt *fmt; 2486 2487 if (hd->taken) 2488 return 0; 2489 2490 fmt = __hpp_dimension__alloc_hpp(hd, 0); 2491 if (!fmt) 2492 return -1; 2493 2494 hd->taken = 1; 2495 perf_hpp_list__column_register(list, fmt); 2496 return 0; 2497} 2498 2499int hpp_dimension__add_output(unsigned col) 2500{ 2501 BUG_ON(col >= PERF_HPP__MAX_INDEX); 2502 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 2503} 2504 2505int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 2506 struct perf_evlist *evlist, 2507 int level) 2508{ 2509 unsigned int i; 2510 2511 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2512 struct sort_dimension *sd = &common_sort_dimensions[i]; 2513 2514 if (strncasecmp(tok, sd->name, strlen(tok))) 2515 continue; 2516 2517 if (sd->entry == &sort_parent) { 2518 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 2519 if (ret) { 2520 char err[BUFSIZ]; 2521 2522 regerror(ret, &parent_regex, err, sizeof(err)); 2523 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 2524 return -EINVAL; 2525 } 2526 list->parent = 1; 2527 } else if (sd->entry == &sort_sym) { 2528 list->sym = 1; 2529 /* 2530 * perf diff displays the performance difference amongst 2531 * two or more perf.data files. Those files could come 2532 * from different binaries. So we should not compare 2533 * their ips, but the name of symbol. 2534 */ 2535 if (sort__mode == SORT_MODE__DIFF) 2536 sd->entry->se_collapse = sort__sym_sort; 2537 2538 } else if (sd->entry == &sort_dso) { 2539 list->dso = 1; 2540 } else if (sd->entry == &sort_socket) { 2541 list->socket = 1; 2542 } else if (sd->entry == &sort_thread) { 2543 list->thread = 1; 2544 } else if (sd->entry == &sort_comm) { 2545 list->comm = 1; 2546 } 2547 2548 return __sort_dimension__add(sd, list, level); 2549 } 2550 2551 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2552 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2553 2554 if (strncasecmp(tok, hd->name, strlen(tok))) 2555 continue; 2556 2557 return __hpp_dimension__add(hd, list, level); 2558 } 2559 2560 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2561 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2562 2563 if (strncasecmp(tok, sd->name, strlen(tok))) 2564 continue; 2565 2566 if (sort__mode != SORT_MODE__BRANCH) 2567 return -EINVAL; 2568 2569 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 2570 list->sym = 1; 2571 2572 __sort_dimension__add(sd, list, level); 2573 return 0; 2574 } 2575 2576 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2577 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2578 2579 if (strncasecmp(tok, sd->name, strlen(tok))) 2580 continue; 2581 2582 if (sort__mode != SORT_MODE__MEMORY) 2583 return -EINVAL; 2584 2585 if (sd->entry == &sort_mem_dcacheline && cacheline_size == 0) 2586 return -EINVAL; 2587 2588 if (sd->entry == &sort_mem_daddr_sym) 2589 list->sym = 1; 2590 2591 __sort_dimension__add(sd, list, level); 2592 return 0; 2593 } 2594 2595 if (!add_dynamic_entry(evlist, tok, level)) 2596 return 0; 2597 2598 return -ESRCH; 2599} 2600 2601static int setup_sort_list(struct perf_hpp_list *list, char *str, 2602 struct perf_evlist *evlist) 2603{ 2604 char *tmp, *tok; 2605 int ret = 0; 2606 int level = 0; 2607 int next_level = 1; 2608 bool in_group = false; 2609 2610 do { 2611 tok = str; 2612 tmp = strpbrk(str, "{}, "); 2613 if (tmp) { 2614 if (in_group) 2615 next_level = level; 2616 else 2617 next_level = level + 1; 2618 2619 if (*tmp == '{') 2620 in_group = true; 2621 else if (*tmp == '}') 2622 in_group = false; 2623 2624 *tmp = '\0'; 2625 str = tmp + 1; 2626 } 2627 2628 if (*tok) { 2629 ret = sort_dimension__add(list, tok, evlist, level); 2630 if (ret == -EINVAL) { 2631 if (!cacheline_size && !strncasecmp(tok, "dcacheline", strlen(tok))) 2632 pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 2633 else 2634 pr_err("Invalid --sort key: `%s'", tok); 2635 break; 2636 } else if (ret == -ESRCH) { 2637 pr_err("Unknown --sort key: `%s'", tok); 2638 break; 2639 } 2640 } 2641 2642 level = next_level; 2643 } while (tmp); 2644 2645 return ret; 2646} 2647 2648static const char *get_default_sort_order(struct perf_evlist *evlist) 2649{ 2650 const char *default_sort_orders[] = { 2651 default_sort_order, 2652 default_branch_sort_order, 2653 default_mem_sort_order, 2654 default_top_sort_order, 2655 default_diff_sort_order, 2656 default_tracepoint_sort_order, 2657 }; 2658 bool use_trace = true; 2659 struct perf_evsel *evsel; 2660 2661 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 2662 2663 if (evlist == NULL || perf_evlist__empty(evlist)) 2664 goto out_no_evlist; 2665 2666 evlist__for_each_entry(evlist, evsel) { 2667 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) { 2668 use_trace = false; 2669 break; 2670 } 2671 } 2672 2673 if (use_trace) { 2674 sort__mode = SORT_MODE__TRACEPOINT; 2675 if (symbol_conf.raw_trace) 2676 return "trace_fields"; 2677 } 2678out_no_evlist: 2679 return default_sort_orders[sort__mode]; 2680} 2681 2682static int setup_sort_order(struct perf_evlist *evlist) 2683{ 2684 char *new_sort_order; 2685 2686 /* 2687 * Append '+'-prefixed sort order to the default sort 2688 * order string. 2689 */ 2690 if (!sort_order || is_strict_order(sort_order)) 2691 return 0; 2692 2693 if (sort_order[1] == '\0') { 2694 pr_err("Invalid --sort key: `+'"); 2695 return -EINVAL; 2696 } 2697 2698 /* 2699 * We allocate new sort_order string, but we never free it, 2700 * because it's checked over the rest of the code. 2701 */ 2702 if (asprintf(&new_sort_order, "%s,%s", 2703 get_default_sort_order(evlist), sort_order + 1) < 0) { 2704 pr_err("Not enough memory to set up --sort"); 2705 return -ENOMEM; 2706 } 2707 2708 sort_order = new_sort_order; 2709 return 0; 2710} 2711 2712/* 2713 * Adds 'pre,' prefix into 'str' is 'pre' is 2714 * not already part of 'str'. 2715 */ 2716static char *prefix_if_not_in(const char *pre, char *str) 2717{ 2718 char *n; 2719 2720 if (!str || strstr(str, pre)) 2721 return str; 2722 2723 if (asprintf(&n, "%s,%s", pre, str) < 0) 2724 return NULL; 2725 2726 free(str); 2727 return n; 2728} 2729 2730static char *setup_overhead(char *keys) 2731{ 2732 if (sort__mode == SORT_MODE__DIFF) 2733 return keys; 2734 2735 keys = prefix_if_not_in("overhead", keys); 2736 2737 if (symbol_conf.cumulate_callchain) 2738 keys = prefix_if_not_in("overhead_children", keys); 2739 2740 return keys; 2741} 2742 2743static int __setup_sorting(struct perf_evlist *evlist) 2744{ 2745 char *str; 2746 const char *sort_keys; 2747 int ret = 0; 2748 2749 ret = setup_sort_order(evlist); 2750 if (ret) 2751 return ret; 2752 2753 sort_keys = sort_order; 2754 if (sort_keys == NULL) { 2755 if (is_strict_order(field_order)) { 2756 /* 2757 * If user specified field order but no sort order, 2758 * we'll honor it and not add default sort orders. 2759 */ 2760 return 0; 2761 } 2762 2763 sort_keys = get_default_sort_order(evlist); 2764 } 2765 2766 str = strdup(sort_keys); 2767 if (str == NULL) { 2768 pr_err("Not enough memory to setup sort keys"); 2769 return -ENOMEM; 2770 } 2771 2772 /* 2773 * Prepend overhead fields for backward compatibility. 2774 */ 2775 if (!is_strict_order(field_order)) { 2776 str = setup_overhead(str); 2777 if (str == NULL) { 2778 pr_err("Not enough memory to setup overhead keys"); 2779 return -ENOMEM; 2780 } 2781 } 2782 2783 ret = setup_sort_list(&perf_hpp_list, str, evlist); 2784 2785 free(str); 2786 return ret; 2787} 2788 2789void perf_hpp__set_elide(int idx, bool elide) 2790{ 2791 struct perf_hpp_fmt *fmt; 2792 struct hpp_sort_entry *hse; 2793 2794 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2795 if (!perf_hpp__is_sort_entry(fmt)) 2796 continue; 2797 2798 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2799 if (hse->se->se_width_idx == idx) { 2800 fmt->elide = elide; 2801 break; 2802 } 2803 } 2804} 2805 2806static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 2807{ 2808 if (list && strlist__nr_entries(list) == 1) { 2809 if (fp != NULL) 2810 fprintf(fp, "# %s: %s\n", list_name, 2811 strlist__entry(list, 0)->s); 2812 return true; 2813 } 2814 return false; 2815} 2816 2817static bool get_elide(int idx, FILE *output) 2818{ 2819 switch (idx) { 2820 case HISTC_SYMBOL: 2821 return __get_elide(symbol_conf.sym_list, "symbol", output); 2822 case HISTC_DSO: 2823 return __get_elide(symbol_conf.dso_list, "dso", output); 2824 case HISTC_COMM: 2825 return __get_elide(symbol_conf.comm_list, "comm", output); 2826 default: 2827 break; 2828 } 2829 2830 if (sort__mode != SORT_MODE__BRANCH) 2831 return false; 2832 2833 switch (idx) { 2834 case HISTC_SYMBOL_FROM: 2835 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 2836 case HISTC_SYMBOL_TO: 2837 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 2838 case HISTC_DSO_FROM: 2839 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 2840 case HISTC_DSO_TO: 2841 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 2842 default: 2843 break; 2844 } 2845 2846 return false; 2847} 2848 2849void sort__setup_elide(FILE *output) 2850{ 2851 struct perf_hpp_fmt *fmt; 2852 struct hpp_sort_entry *hse; 2853 2854 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2855 if (!perf_hpp__is_sort_entry(fmt)) 2856 continue; 2857 2858 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2859 fmt->elide = get_elide(hse->se->se_width_idx, output); 2860 } 2861 2862 /* 2863 * It makes no sense to elide all of sort entries. 2864 * Just revert them to show up again. 2865 */ 2866 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2867 if (!perf_hpp__is_sort_entry(fmt)) 2868 continue; 2869 2870 if (!fmt->elide) 2871 return; 2872 } 2873 2874 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2875 if (!perf_hpp__is_sort_entry(fmt)) 2876 continue; 2877 2878 fmt->elide = false; 2879 } 2880} 2881 2882int output_field_add(struct perf_hpp_list *list, char *tok) 2883{ 2884 unsigned int i; 2885 2886 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2887 struct sort_dimension *sd = &common_sort_dimensions[i]; 2888 2889 if (strncasecmp(tok, sd->name, strlen(tok))) 2890 continue; 2891 2892 return __sort_dimension__add_output(list, sd); 2893 } 2894 2895 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2896 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2897 2898 if (strncasecmp(tok, hd->name, strlen(tok))) 2899 continue; 2900 2901 return __hpp_dimension__add_output(list, hd); 2902 } 2903 2904 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2905 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2906 2907 if (strncasecmp(tok, sd->name, strlen(tok))) 2908 continue; 2909 2910 return __sort_dimension__add_output(list, sd); 2911 } 2912 2913 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2914 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2915 2916 if (strncasecmp(tok, sd->name, strlen(tok))) 2917 continue; 2918 2919 return __sort_dimension__add_output(list, sd); 2920 } 2921 2922 return -ESRCH; 2923} 2924 2925static int setup_output_list(struct perf_hpp_list *list, char *str) 2926{ 2927 char *tmp, *tok; 2928 int ret = 0; 2929 2930 for (tok = strtok_r(str, ", ", &tmp); 2931 tok; tok = strtok_r(NULL, ", ", &tmp)) { 2932 ret = output_field_add(list, tok); 2933 if (ret == -EINVAL) { 2934 ui__error("Invalid --fields key: `%s'", tok); 2935 break; 2936 } else if (ret == -ESRCH) { 2937 ui__error("Unknown --fields key: `%s'", tok); 2938 break; 2939 } 2940 } 2941 2942 return ret; 2943} 2944 2945void reset_dimensions(void) 2946{ 2947 unsigned int i; 2948 2949 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 2950 common_sort_dimensions[i].taken = 0; 2951 2952 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 2953 hpp_sort_dimensions[i].taken = 0; 2954 2955 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 2956 bstack_sort_dimensions[i].taken = 0; 2957 2958 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 2959 memory_sort_dimensions[i].taken = 0; 2960} 2961 2962bool is_strict_order(const char *order) 2963{ 2964 return order && (*order != '+'); 2965} 2966 2967static int __setup_output_field(void) 2968{ 2969 char *str, *strp; 2970 int ret = -EINVAL; 2971 2972 if (field_order == NULL) 2973 return 0; 2974 2975 strp = str = strdup(field_order); 2976 if (str == NULL) { 2977 pr_err("Not enough memory to setup output fields"); 2978 return -ENOMEM; 2979 } 2980 2981 if (!is_strict_order(field_order)) 2982 strp++; 2983 2984 if (!strlen(strp)) { 2985 pr_err("Invalid --fields key: `+'"); 2986 goto out; 2987 } 2988 2989 ret = setup_output_list(&perf_hpp_list, strp); 2990 2991out: 2992 free(str); 2993 return ret; 2994} 2995 2996int setup_sorting(struct perf_evlist *evlist) 2997{ 2998 int err; 2999 3000 err = __setup_sorting(evlist); 3001 if (err < 0) 3002 return err; 3003 3004 if (parent_pattern != default_parent_pattern) { 3005 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 3006 if (err < 0) 3007 return err; 3008 } 3009 3010 reset_dimensions(); 3011 3012 /* 3013 * perf diff doesn't use default hpp output fields. 3014 */ 3015 if (sort__mode != SORT_MODE__DIFF) 3016 perf_hpp__init(); 3017 3018 err = __setup_output_field(); 3019 if (err < 0) 3020 return err; 3021 3022 /* copy sort keys to output fields */ 3023 perf_hpp__setup_output_field(&perf_hpp_list); 3024 /* and then copy output fields to sort keys */ 3025 perf_hpp__append_sort_keys(&perf_hpp_list); 3026 3027 /* setup hists-specific output fields */ 3028 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 3029 return -1; 3030 3031 return 0; 3032} 3033 3034void reset_output_field(void) 3035{ 3036 perf_hpp_list.need_collapse = 0; 3037 perf_hpp_list.parent = 0; 3038 perf_hpp_list.sym = 0; 3039 perf_hpp_list.dso = 0; 3040 3041 field_order = NULL; 3042 sort_order = NULL; 3043 3044 reset_dimensions(); 3045 perf_hpp__reset_output_field(&perf_hpp_list); 3046}