Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <inttypes.h>
4#include <regex.h>
5#include <stdlib.h>
6#include <linux/mman.h>
7#include <linux/time64.h>
8#include "debug.h"
9#include "dso.h"
10#include "sort.h"
11#include "hist.h"
12#include "cacheline.h"
13#include "comm.h"
14#include "map.h"
15#include "maps.h"
16#include "symbol.h"
17#include "map_symbol.h"
18#include "branch.h"
19#include "thread.h"
20#include "evsel.h"
21#include "evlist.h"
22#include "srcline.h"
23#include "strlist.h"
24#include "strbuf.h"
25#include "mem-events.h"
26#include "mem-info.h"
27#include "annotate.h"
28#include "annotate-data.h"
29#include "event.h"
30#include "time-utils.h"
31#include "cgroup.h"
32#include "machine.h"
33#include "trace-event.h"
34#include <linux/kernel.h>
35#include <linux/string.h>
36
37#ifdef HAVE_LIBTRACEEVENT
38#include <event-parse.h>
39#endif
40
41regex_t parent_regex;
42const char default_parent_pattern[] = "^sys_|^do_page_fault";
43const char *parent_pattern = default_parent_pattern;
44const char *default_sort_order = "comm,dso,symbol";
45const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
46const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc";
47const char default_top_sort_order[] = "dso,symbol";
48const char default_diff_sort_order[] = "dso,symbol";
49const char default_tracepoint_sort_order[] = "trace";
50const char *sort_order;
51const char *field_order;
52regex_t ignore_callees_regex;
53int have_ignore_callees = 0;
54enum sort_mode sort__mode = SORT_MODE__NORMAL;
55static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"};
56static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"};
57
58/*
59 * Some architectures have Adjacent Cacheline Prefetch feature, which
60 * behaves like the cacheline size is doubled. Enable this flag to
61 * check things in double cacheline granularity.
62 */
63bool chk_double_cl;
64
65/*
66 * Replaces all occurrences of a char used with the:
67 *
68 * -t, --field-separator
69 *
70 * option, that uses a special separator character and don't pad with spaces,
71 * replacing all occurrences of this separator in symbol names (and other
72 * output) with a '.' character, that thus it's the only non valid separator.
73*/
74static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
75{
76 int n;
77 va_list ap;
78
79 va_start(ap, fmt);
80 n = vsnprintf(bf, size, fmt, ap);
81 if (symbol_conf.field_sep && n > 0) {
82 char *sep = bf;
83
84 while (1) {
85 sep = strchr(sep, *symbol_conf.field_sep);
86 if (sep == NULL)
87 break;
88 *sep = '.';
89 }
90 }
91 va_end(ap);
92
93 if (n >= (int)size)
94 return size - 1;
95 return n;
96}
97
98static int64_t cmp_null(const void *l, const void *r)
99{
100 if (!l && !r)
101 return 0;
102 else if (!l)
103 return -1;
104 else
105 return 1;
106}
107
108/* --sort pid */
109
110static int64_t
111sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
112{
113 return thread__tid(right->thread) - thread__tid(left->thread);
114}
115
116static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
117 size_t size, unsigned int width)
118{
119 const char *comm = thread__comm_str(he->thread);
120
121 width = max(7U, width) - 8;
122 return repsep_snprintf(bf, size, "%7d:%-*.*s", thread__tid(he->thread),
123 width, width, comm ?: "");
124}
125
126static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
127{
128 const struct thread *th = arg;
129
130 if (type != HIST_FILTER__THREAD)
131 return -1;
132
133 return th && !RC_CHK_EQUAL(he->thread, th);
134}
135
136struct sort_entry sort_thread = {
137 .se_header = " Pid:Command",
138 .se_cmp = sort__thread_cmp,
139 .se_snprintf = hist_entry__thread_snprintf,
140 .se_filter = hist_entry__thread_filter,
141 .se_width_idx = HISTC_THREAD,
142};
143
144/* --sort tgid */
145
146static int64_t
147sort__tgid_cmp(struct hist_entry *left, struct hist_entry *right)
148{
149 return thread__pid(right->thread) - thread__pid(left->thread);
150}
151
152static int hist_entry__tgid_snprintf(struct hist_entry *he, char *bf,
153 size_t size, unsigned int width)
154{
155 int tgid = thread__pid(he->thread);
156 const char *comm = NULL;
157
158 /* display comm of the thread-group leader */
159 if (thread__pid(he->thread) == thread__tid(he->thread)) {
160 comm = thread__comm_str(he->thread);
161 } else {
162 struct maps *maps = thread__maps(he->thread);
163 struct thread *leader = machine__find_thread(maps__machine(maps),
164 tgid, tgid);
165 if (leader) {
166 comm = thread__comm_str(leader);
167 thread__put(leader);
168 }
169 }
170 width = max(7U, width) - 8;
171 return repsep_snprintf(bf, size, "%7d:%-*.*s", tgid, width, width, comm ?: "");
172}
173
174struct sort_entry sort_tgid = {
175 .se_header = " Tgid:Command",
176 .se_cmp = sort__tgid_cmp,
177 .se_snprintf = hist_entry__tgid_snprintf,
178 .se_width_idx = HISTC_TGID,
179};
180
181/* --sort simd */
182
183static int64_t
184sort__simd_cmp(struct hist_entry *left, struct hist_entry *right)
185{
186 if (left->simd_flags.arch != right->simd_flags.arch)
187 return (int64_t) left->simd_flags.arch - right->simd_flags.arch;
188
189 return (int64_t) left->simd_flags.pred - right->simd_flags.pred;
190}
191
192static const char *hist_entry__get_simd_name(struct simd_flags *simd_flags)
193{
194 u64 arch = simd_flags->arch;
195
196 if (arch & SIMD_OP_FLAGS_ARCH_SVE)
197 return "SVE";
198 else
199 return "n/a";
200}
201
202static int hist_entry__simd_snprintf(struct hist_entry *he, char *bf,
203 size_t size, unsigned int width __maybe_unused)
204{
205 const char *name;
206
207 if (!he->simd_flags.arch)
208 return repsep_snprintf(bf, size, "");
209
210 name = hist_entry__get_simd_name(&he->simd_flags);
211
212 if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_EMPTY)
213 return repsep_snprintf(bf, size, "[e] %s", name);
214 else if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_PARTIAL)
215 return repsep_snprintf(bf, size, "[p] %s", name);
216
217 return repsep_snprintf(bf, size, "[.] %s", name);
218}
219
220struct sort_entry sort_simd = {
221 .se_header = "Simd ",
222 .se_cmp = sort__simd_cmp,
223 .se_snprintf = hist_entry__simd_snprintf,
224 .se_width_idx = HISTC_SIMD,
225};
226
227/* --sort comm */
228
229/*
230 * We can't use pointer comparison in functions below,
231 * because it gives different results based on pointer
232 * values, which could break some sorting assumptions.
233 */
234static int64_t
235sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
236{
237 return strcmp(comm__str(right->comm), comm__str(left->comm));
238}
239
240static int64_t
241sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
242{
243 return strcmp(comm__str(right->comm), comm__str(left->comm));
244}
245
246static int64_t
247sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
248{
249 return strcmp(comm__str(right->comm), comm__str(left->comm));
250}
251
252static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
253 size_t size, unsigned int width)
254{
255 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
256}
257
258struct sort_entry sort_comm = {
259 .se_header = "Command",
260 .se_cmp = sort__comm_cmp,
261 .se_collapse = sort__comm_collapse,
262 .se_sort = sort__comm_sort,
263 .se_snprintf = hist_entry__comm_snprintf,
264 .se_filter = hist_entry__thread_filter,
265 .se_width_idx = HISTC_COMM,
266};
267
268/* --sort dso */
269
270static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
271{
272 struct dso *dso_l = map_l ? map__dso(map_l) : NULL;
273 struct dso *dso_r = map_r ? map__dso(map_r) : NULL;
274 const char *dso_name_l, *dso_name_r;
275
276 if (!dso_l || !dso_r)
277 return cmp_null(dso_r, dso_l);
278
279 if (verbose > 0) {
280 dso_name_l = dso__long_name(dso_l);
281 dso_name_r = dso__long_name(dso_r);
282 } else {
283 dso_name_l = dso__short_name(dso_l);
284 dso_name_r = dso__short_name(dso_r);
285 }
286
287 return strcmp(dso_name_l, dso_name_r);
288}
289
290static int64_t
291sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
292{
293 return _sort__dso_cmp(right->ms.map, left->ms.map);
294}
295
296static int _hist_entry__dso_snprintf(struct map *map, char *bf,
297 size_t size, unsigned int width)
298{
299 const struct dso *dso = map ? map__dso(map) : NULL;
300 const char *dso_name = "[unknown]";
301
302 if (dso)
303 dso_name = verbose > 0 ? dso__long_name(dso) : dso__short_name(dso);
304
305 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
306}
307
308static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
309 size_t size, unsigned int width)
310{
311 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
312}
313
314static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
315{
316 const struct dso *dso = arg;
317
318 if (type != HIST_FILTER__DSO)
319 return -1;
320
321 return dso && (!he->ms.map || map__dso(he->ms.map) != dso);
322}
323
324struct sort_entry sort_dso = {
325 .se_header = "Shared Object",
326 .se_cmp = sort__dso_cmp,
327 .se_snprintf = hist_entry__dso_snprintf,
328 .se_filter = hist_entry__dso_filter,
329 .se_width_idx = HISTC_DSO,
330};
331
332/* --sort symbol */
333
334static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
335{
336 return (int64_t)(right_ip - left_ip);
337}
338
339int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
340{
341 if (!sym_l || !sym_r)
342 return cmp_null(sym_l, sym_r);
343
344 if (sym_l == sym_r)
345 return 0;
346
347 if (sym_l->inlined || sym_r->inlined) {
348 int ret = strcmp(sym_l->name, sym_r->name);
349
350 if (ret)
351 return ret;
352 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
353 return 0;
354 }
355
356 if (sym_l->start != sym_r->start)
357 return (int64_t)(sym_r->start - sym_l->start);
358
359 return (int64_t)(sym_r->end - sym_l->end);
360}
361
362static int64_t
363sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
364{
365 int64_t ret;
366
367 if (!left->ms.sym && !right->ms.sym)
368 return _sort__addr_cmp(left->ip, right->ip);
369
370 /*
371 * comparing symbol address alone is not enough since it's a
372 * relative address within a dso.
373 */
374 if (!hists__has(left->hists, dso)) {
375 ret = sort__dso_cmp(left, right);
376 if (ret != 0)
377 return ret;
378 }
379
380 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
381}
382
383static int64_t
384sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
385{
386 if (!left->ms.sym || !right->ms.sym)
387 return cmp_null(left->ms.sym, right->ms.sym);
388
389 return strcmp(right->ms.sym->name, left->ms.sym->name);
390}
391
392static int _hist_entry__sym_snprintf(struct map_symbol *ms,
393 u64 ip, char level, char *bf, size_t size,
394 unsigned int width)
395{
396 struct symbol *sym = ms->sym;
397 struct map *map = ms->map;
398 size_t ret = 0;
399
400 if (verbose > 0) {
401 struct dso *dso = map ? map__dso(map) : NULL;
402 char o = dso ? dso__symtab_origin(dso) : '!';
403 u64 rip = ip;
404
405 if (dso && dso__kernel(dso) && dso__adjust_symbols(dso))
406 rip = map__unmap_ip(map, ip);
407
408 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
409 BITS_PER_LONG / 4 + 2, rip, o);
410 }
411
412 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
413 if (sym && map) {
414 if (sym->type == STT_OBJECT) {
415 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
416 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
417 ip - map__unmap_ip(map, sym->start));
418 } else {
419 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
420 width - ret,
421 sym->name);
422 if (sym->inlined)
423 ret += repsep_snprintf(bf + ret, size - ret,
424 " (inlined)");
425 }
426 } else {
427 size_t len = BITS_PER_LONG / 4;
428 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
429 len, ip);
430 }
431
432 return ret;
433}
434
435int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
436{
437 return _hist_entry__sym_snprintf(&he->ms, he->ip,
438 he->level, bf, size, width);
439}
440
441static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
442{
443 const char *sym = arg;
444
445 if (type != HIST_FILTER__SYMBOL)
446 return -1;
447
448 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
449}
450
451struct sort_entry sort_sym = {
452 .se_header = "Symbol",
453 .se_cmp = sort__sym_cmp,
454 .se_sort = sort__sym_sort,
455 .se_snprintf = hist_entry__sym_snprintf,
456 .se_filter = hist_entry__sym_filter,
457 .se_width_idx = HISTC_SYMBOL,
458};
459
460/* --sort symoff */
461
462static int64_t
463sort__symoff_cmp(struct hist_entry *left, struct hist_entry *right)
464{
465 int64_t ret;
466
467 ret = sort__sym_cmp(left, right);
468 if (ret)
469 return ret;
470
471 return left->ip - right->ip;
472}
473
474static int64_t
475sort__symoff_sort(struct hist_entry *left, struct hist_entry *right)
476{
477 int64_t ret;
478
479 ret = sort__sym_sort(left, right);
480 if (ret)
481 return ret;
482
483 return left->ip - right->ip;
484}
485
486static int
487hist_entry__symoff_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
488{
489 struct symbol *sym = he->ms.sym;
490
491 if (sym == NULL)
492 return repsep_snprintf(bf, size, "[%c] %-#.*llx", he->level, width - 4, he->ip);
493
494 return repsep_snprintf(bf, size, "[%c] %s+0x%llx", he->level, sym->name, he->ip - sym->start);
495}
496
497struct sort_entry sort_sym_offset = {
498 .se_header = "Symbol Offset",
499 .se_cmp = sort__symoff_cmp,
500 .se_sort = sort__symoff_sort,
501 .se_snprintf = hist_entry__symoff_snprintf,
502 .se_filter = hist_entry__sym_filter,
503 .se_width_idx = HISTC_SYMBOL_OFFSET,
504};
505
506/* --sort srcline */
507
508char *hist_entry__srcline(struct hist_entry *he)
509{
510 return map__srcline(he->ms.map, he->ip, he->ms.sym);
511}
512
513static int64_t
514sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
515{
516 int64_t ret;
517
518 ret = _sort__addr_cmp(left->ip, right->ip);
519 if (ret)
520 return ret;
521
522 return sort__dso_cmp(left, right);
523}
524
525static int64_t
526sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right)
527{
528 if (!left->srcline)
529 left->srcline = hist_entry__srcline(left);
530 if (!right->srcline)
531 right->srcline = hist_entry__srcline(right);
532
533 return strcmp(right->srcline, left->srcline);
534}
535
536static int64_t
537sort__srcline_sort(struct hist_entry *left, struct hist_entry *right)
538{
539 return sort__srcline_collapse(left, right);
540}
541
542static void
543sort__srcline_init(struct hist_entry *he)
544{
545 if (!he->srcline)
546 he->srcline = hist_entry__srcline(he);
547}
548
549static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
550 size_t size, unsigned int width)
551{
552 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
553}
554
555struct sort_entry sort_srcline = {
556 .se_header = "Source:Line",
557 .se_cmp = sort__srcline_cmp,
558 .se_collapse = sort__srcline_collapse,
559 .se_sort = sort__srcline_sort,
560 .se_init = sort__srcline_init,
561 .se_snprintf = hist_entry__srcline_snprintf,
562 .se_width_idx = HISTC_SRCLINE,
563};
564
565/* --sort srcline_from */
566
567static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
568{
569 return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym);
570}
571
572static int64_t
573sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
574{
575 return left->branch_info->from.addr - right->branch_info->from.addr;
576}
577
578static int64_t
579sort__srcline_from_collapse(struct hist_entry *left, struct hist_entry *right)
580{
581 if (!left->branch_info->srcline_from)
582 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
583
584 if (!right->branch_info->srcline_from)
585 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
586
587 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
588}
589
590static int64_t
591sort__srcline_from_sort(struct hist_entry *left, struct hist_entry *right)
592{
593 return sort__srcline_from_collapse(left, right);
594}
595
596static void sort__srcline_from_init(struct hist_entry *he)
597{
598 if (!he->branch_info->srcline_from)
599 he->branch_info->srcline_from = addr_map_symbol__srcline(&he->branch_info->from);
600}
601
602static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
603 size_t size, unsigned int width)
604{
605 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
606}
607
608struct sort_entry sort_srcline_from = {
609 .se_header = "From Source:Line",
610 .se_cmp = sort__srcline_from_cmp,
611 .se_collapse = sort__srcline_from_collapse,
612 .se_sort = sort__srcline_from_sort,
613 .se_init = sort__srcline_from_init,
614 .se_snprintf = hist_entry__srcline_from_snprintf,
615 .se_width_idx = HISTC_SRCLINE_FROM,
616};
617
618/* --sort srcline_to */
619
620static int64_t
621sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
622{
623 return left->branch_info->to.addr - right->branch_info->to.addr;
624}
625
626static int64_t
627sort__srcline_to_collapse(struct hist_entry *left, struct hist_entry *right)
628{
629 if (!left->branch_info->srcline_to)
630 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
631
632 if (!right->branch_info->srcline_to)
633 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
634
635 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
636}
637
638static int64_t
639sort__srcline_to_sort(struct hist_entry *left, struct hist_entry *right)
640{
641 return sort__srcline_to_collapse(left, right);
642}
643
644static void sort__srcline_to_init(struct hist_entry *he)
645{
646 if (!he->branch_info->srcline_to)
647 he->branch_info->srcline_to = addr_map_symbol__srcline(&he->branch_info->to);
648}
649
650static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
651 size_t size, unsigned int width)
652{
653 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
654}
655
656struct sort_entry sort_srcline_to = {
657 .se_header = "To Source:Line",
658 .se_cmp = sort__srcline_to_cmp,
659 .se_collapse = sort__srcline_to_collapse,
660 .se_sort = sort__srcline_to_sort,
661 .se_init = sort__srcline_to_init,
662 .se_snprintf = hist_entry__srcline_to_snprintf,
663 .se_width_idx = HISTC_SRCLINE_TO,
664};
665
666static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
667 size_t size, unsigned int width)
668{
669
670 struct symbol *sym = he->ms.sym;
671 struct annotated_branch *branch;
672 double ipc = 0.0, coverage = 0.0;
673 char tmp[64];
674
675 if (!sym)
676 return repsep_snprintf(bf, size, "%-*s", width, "-");
677
678 branch = symbol__annotation(sym)->branch;
679
680 if (branch && branch->hit_cycles)
681 ipc = branch->hit_insn / ((double)branch->hit_cycles);
682
683 if (branch && branch->total_insn) {
684 coverage = branch->cover_insn * 100.0 /
685 ((double)branch->total_insn);
686 }
687
688 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
689 return repsep_snprintf(bf, size, "%-*s", width, tmp);
690}
691
692struct sort_entry sort_sym_ipc = {
693 .se_header = "IPC [IPC Coverage]",
694 .se_cmp = sort__sym_cmp,
695 .se_snprintf = hist_entry__sym_ipc_snprintf,
696 .se_width_idx = HISTC_SYMBOL_IPC,
697};
698
699static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
700 __maybe_unused,
701 char *bf, size_t size,
702 unsigned int width)
703{
704 char tmp[64];
705
706 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
707 return repsep_snprintf(bf, size, "%-*s", width, tmp);
708}
709
710struct sort_entry sort_sym_ipc_null = {
711 .se_header = "IPC [IPC Coverage]",
712 .se_cmp = sort__sym_cmp,
713 .se_snprintf = hist_entry__sym_ipc_null_snprintf,
714 .se_width_idx = HISTC_SYMBOL_IPC,
715};
716
717/* --sort callchain_branch_predicted */
718
719static int64_t
720sort__callchain_branch_predicted_cmp(struct hist_entry *left __maybe_unused,
721 struct hist_entry *right __maybe_unused)
722{
723 return 0;
724}
725
726static int hist_entry__callchain_branch_predicted_snprintf(
727 struct hist_entry *he, char *bf, size_t size, unsigned int width)
728{
729 u64 branch_count, predicted_count;
730 double percent = 0.0;
731 char str[32];
732
733 callchain_branch_counts(he->callchain, &branch_count,
734 &predicted_count, NULL, NULL);
735
736 if (branch_count)
737 percent = predicted_count * 100.0 / branch_count;
738
739 snprintf(str, sizeof(str), "%.1f%%", percent);
740 return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
741}
742
743struct sort_entry sort_callchain_branch_predicted = {
744 .se_header = "Predicted",
745 .se_cmp = sort__callchain_branch_predicted_cmp,
746 .se_snprintf = hist_entry__callchain_branch_predicted_snprintf,
747 .se_width_idx = HISTC_CALLCHAIN_BRANCH_PREDICTED,
748};
749
750/* --sort callchain_branch_abort */
751
752static int64_t
753sort__callchain_branch_abort_cmp(struct hist_entry *left __maybe_unused,
754 struct hist_entry *right __maybe_unused)
755{
756 return 0;
757}
758
759static int hist_entry__callchain_branch_abort_snprintf(struct hist_entry *he,
760 char *bf, size_t size,
761 unsigned int width)
762{
763 u64 branch_count, abort_count;
764 char str[32];
765
766 callchain_branch_counts(he->callchain, &branch_count,
767 NULL, &abort_count, NULL);
768
769 snprintf(str, sizeof(str), "%" PRId64, abort_count);
770 return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
771}
772
773struct sort_entry sort_callchain_branch_abort = {
774 .se_header = "Abort",
775 .se_cmp = sort__callchain_branch_abort_cmp,
776 .se_snprintf = hist_entry__callchain_branch_abort_snprintf,
777 .se_width_idx = HISTC_CALLCHAIN_BRANCH_ABORT,
778};
779
780/* --sort callchain_branch_cycles */
781
782static int64_t
783sort__callchain_branch_cycles_cmp(struct hist_entry *left __maybe_unused,
784 struct hist_entry *right __maybe_unused)
785{
786 return 0;
787}
788
789static int hist_entry__callchain_branch_cycles_snprintf(struct hist_entry *he,
790 char *bf, size_t size,
791 unsigned int width)
792{
793 u64 branch_count, cycles_count, cycles = 0;
794 char str[32];
795
796 callchain_branch_counts(he->callchain, &branch_count,
797 NULL, NULL, &cycles_count);
798
799 if (branch_count)
800 cycles = cycles_count / branch_count;
801
802 snprintf(str, sizeof(str), "%" PRId64 "", cycles);
803 return repsep_snprintf(bf, size, "%-*.*s", width, width, str);
804}
805
806struct sort_entry sort_callchain_branch_cycles = {
807 .se_header = "Cycles",
808 .se_cmp = sort__callchain_branch_cycles_cmp,
809 .se_snprintf = hist_entry__callchain_branch_cycles_snprintf,
810 .se_width_idx = HISTC_CALLCHAIN_BRANCH_CYCLES,
811};
812
813/* --sort srcfile */
814
815static char no_srcfile[1];
816
817static char *hist_entry__get_srcfile(struct hist_entry *e)
818{
819 char *sf, *p;
820 struct map *map = e->ms.map;
821
822 if (!map)
823 return no_srcfile;
824
825 sf = __get_srcline(map__dso(map), map__rip_2objdump(map, e->ip),
826 e->ms.sym, false, true, true, e->ip);
827 if (sf == SRCLINE_UNKNOWN)
828 return no_srcfile;
829 p = strchr(sf, ':');
830 if (p && *sf) {
831 *p = 0;
832 return sf;
833 }
834 free(sf);
835 return no_srcfile;
836}
837
838static int64_t
839sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
840{
841 return sort__srcline_cmp(left, right);
842}
843
844static int64_t
845sort__srcfile_collapse(struct hist_entry *left, struct hist_entry *right)
846{
847 if (!left->srcfile)
848 left->srcfile = hist_entry__get_srcfile(left);
849 if (!right->srcfile)
850 right->srcfile = hist_entry__get_srcfile(right);
851
852 return strcmp(right->srcfile, left->srcfile);
853}
854
855static int64_t
856sort__srcfile_sort(struct hist_entry *left, struct hist_entry *right)
857{
858 return sort__srcfile_collapse(left, right);
859}
860
861static void sort__srcfile_init(struct hist_entry *he)
862{
863 if (!he->srcfile)
864 he->srcfile = hist_entry__get_srcfile(he);
865}
866
867static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
868 size_t size, unsigned int width)
869{
870 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
871}
872
873struct sort_entry sort_srcfile = {
874 .se_header = "Source File",
875 .se_cmp = sort__srcfile_cmp,
876 .se_collapse = sort__srcfile_collapse,
877 .se_sort = sort__srcfile_sort,
878 .se_init = sort__srcfile_init,
879 .se_snprintf = hist_entry__srcfile_snprintf,
880 .se_width_idx = HISTC_SRCFILE,
881};
882
883/* --sort parent */
884
885static int64_t
886sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
887{
888 struct symbol *sym_l = left->parent;
889 struct symbol *sym_r = right->parent;
890
891 if (!sym_l || !sym_r)
892 return cmp_null(sym_l, sym_r);
893
894 return strcmp(sym_r->name, sym_l->name);
895}
896
897static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
898 size_t size, unsigned int width)
899{
900 return repsep_snprintf(bf, size, "%-*.*s", width, width,
901 he->parent ? he->parent->name : "[other]");
902}
903
904struct sort_entry sort_parent = {
905 .se_header = "Parent symbol",
906 .se_cmp = sort__parent_cmp,
907 .se_snprintf = hist_entry__parent_snprintf,
908 .se_width_idx = HISTC_PARENT,
909};
910
911/* --sort cpu */
912
913static int64_t
914sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
915{
916 return right->cpu - left->cpu;
917}
918
919static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
920 size_t size, unsigned int width)
921{
922 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
923}
924
925struct sort_entry sort_cpu = {
926 .se_header = "CPU",
927 .se_cmp = sort__cpu_cmp,
928 .se_snprintf = hist_entry__cpu_snprintf,
929 .se_width_idx = HISTC_CPU,
930};
931
932/* --sort parallelism */
933
934static int64_t
935sort__parallelism_cmp(struct hist_entry *left, struct hist_entry *right)
936{
937 return right->parallelism - left->parallelism;
938}
939
940static int hist_entry__parallelism_filter(struct hist_entry *he, int type, const void *arg)
941{
942 const unsigned long *parallelism_filter = arg;
943
944 if (type != HIST_FILTER__PARALLELISM)
945 return -1;
946
947 return test_bit(he->parallelism, parallelism_filter);
948}
949
950static int hist_entry__parallelism_snprintf(struct hist_entry *he, char *bf,
951 size_t size, unsigned int width)
952{
953 return repsep_snprintf(bf, size, "%*d", width, he->parallelism);
954}
955
956struct sort_entry sort_parallelism = {
957 .se_header = "Parallelism",
958 .se_cmp = sort__parallelism_cmp,
959 .se_filter = hist_entry__parallelism_filter,
960 .se_snprintf = hist_entry__parallelism_snprintf,
961 .se_width_idx = HISTC_PARALLELISM,
962};
963
964/* --sort cgroup_id */
965
966static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
967{
968 return (int64_t)(right_dev - left_dev);
969}
970
971static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
972{
973 return (int64_t)(right_ino - left_ino);
974}
975
976static int64_t
977sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
978{
979 int64_t ret;
980
981 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
982 if (ret != 0)
983 return ret;
984
985 return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
986 left->cgroup_id.ino);
987}
988
989static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
990 char *bf, size_t size,
991 unsigned int width __maybe_unused)
992{
993 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
994 he->cgroup_id.ino);
995}
996
997struct sort_entry sort_cgroup_id = {
998 .se_header = "cgroup id (dev/inode)",
999 .se_cmp = sort__cgroup_id_cmp,
1000 .se_snprintf = hist_entry__cgroup_id_snprintf,
1001 .se_width_idx = HISTC_CGROUP_ID,
1002};
1003
1004/* --sort cgroup */
1005
1006static int64_t
1007sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right)
1008{
1009 return right->cgroup - left->cgroup;
1010}
1011
1012static int hist_entry__cgroup_snprintf(struct hist_entry *he,
1013 char *bf, size_t size,
1014 unsigned int width __maybe_unused)
1015{
1016 const char *cgrp_name = "N/A";
1017
1018 if (he->cgroup) {
1019 struct cgroup *cgrp = cgroup__find(maps__machine(he->ms.maps)->env,
1020 he->cgroup);
1021 if (cgrp != NULL)
1022 cgrp_name = cgrp->name;
1023 else
1024 cgrp_name = "unknown";
1025 }
1026
1027 return repsep_snprintf(bf, size, "%s", cgrp_name);
1028}
1029
1030struct sort_entry sort_cgroup = {
1031 .se_header = "Cgroup",
1032 .se_cmp = sort__cgroup_cmp,
1033 .se_snprintf = hist_entry__cgroup_snprintf,
1034 .se_width_idx = HISTC_CGROUP,
1035};
1036
1037/* --sort socket */
1038
1039static int64_t
1040sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
1041{
1042 return right->socket - left->socket;
1043}
1044
1045static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
1046 size_t size, unsigned int width)
1047{
1048 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
1049}
1050
1051static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
1052{
1053 int sk = *(const int *)arg;
1054
1055 if (type != HIST_FILTER__SOCKET)
1056 return -1;
1057
1058 return sk >= 0 && he->socket != sk;
1059}
1060
1061struct sort_entry sort_socket = {
1062 .se_header = "Socket",
1063 .se_cmp = sort__socket_cmp,
1064 .se_snprintf = hist_entry__socket_snprintf,
1065 .se_filter = hist_entry__socket_filter,
1066 .se_width_idx = HISTC_SOCKET,
1067};
1068
1069/* --sort time */
1070
1071static int64_t
1072sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
1073{
1074 return right->time - left->time;
1075}
1076
1077static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
1078 size_t size, unsigned int width)
1079{
1080 char he_time[32];
1081
1082 if (symbol_conf.nanosecs)
1083 timestamp__scnprintf_nsec(he->time, he_time,
1084 sizeof(he_time));
1085 else
1086 timestamp__scnprintf_usec(he->time, he_time,
1087 sizeof(he_time));
1088
1089 return repsep_snprintf(bf, size, "%-.*s", width, he_time);
1090}
1091
1092struct sort_entry sort_time = {
1093 .se_header = "Time",
1094 .se_cmp = sort__time_cmp,
1095 .se_snprintf = hist_entry__time_snprintf,
1096 .se_width_idx = HISTC_TIME,
1097};
1098
1099/* --sort trace */
1100
1101#ifdef HAVE_LIBTRACEEVENT
1102static char *get_trace_output(struct hist_entry *he)
1103{
1104 struct trace_seq seq;
1105 struct evsel *evsel;
1106 struct tep_record rec = {
1107 .data = he->raw_data,
1108 .size = he->raw_size,
1109 };
1110 struct tep_event *tp_format;
1111
1112 evsel = hists_to_evsel(he->hists);
1113
1114 trace_seq_init(&seq);
1115 tp_format = evsel__tp_format(evsel);
1116 if (tp_format) {
1117 if (symbol_conf.raw_trace)
1118 tep_print_fields(&seq, he->raw_data, he->raw_size, tp_format);
1119 else
1120 tep_print_event(tp_format->tep, &seq, &rec, "%s", TEP_PRINT_INFO);
1121 }
1122
1123 /*
1124 * Trim the buffer, it starts at 4KB and we're not going to
1125 * add anything more to this buffer.
1126 */
1127 return realloc(seq.buffer, seq.len + 1);
1128}
1129
1130static int64_t
1131sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
1132{
1133 struct evsel *evsel;
1134
1135 evsel = hists_to_evsel(left->hists);
1136 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1137 return 0;
1138
1139 if (left->trace_output == NULL)
1140 left->trace_output = get_trace_output(left);
1141 if (right->trace_output == NULL)
1142 right->trace_output = get_trace_output(right);
1143
1144 return strcmp(right->trace_output, left->trace_output);
1145}
1146
1147static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
1148 size_t size, unsigned int width)
1149{
1150 struct evsel *evsel;
1151
1152 evsel = hists_to_evsel(he->hists);
1153 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1154 return scnprintf(bf, size, "%-.*s", width, "N/A");
1155
1156 if (he->trace_output == NULL)
1157 he->trace_output = get_trace_output(he);
1158 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
1159}
1160
1161struct sort_entry sort_trace = {
1162 .se_header = "Trace output",
1163 .se_cmp = sort__trace_cmp,
1164 .se_snprintf = hist_entry__trace_snprintf,
1165 .se_width_idx = HISTC_TRACE,
1166};
1167#endif /* HAVE_LIBTRACEEVENT */
1168
1169/* sort keys for branch stacks */
1170
1171static int64_t
1172sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
1173{
1174 if (!left->branch_info || !right->branch_info)
1175 return cmp_null(left->branch_info, right->branch_info);
1176
1177 return _sort__dso_cmp(left->branch_info->from.ms.map,
1178 right->branch_info->from.ms.map);
1179}
1180
1181static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
1182 size_t size, unsigned int width)
1183{
1184 if (he->branch_info)
1185 return _hist_entry__dso_snprintf(he->branch_info->from.ms.map,
1186 bf, size, width);
1187 else
1188 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1189}
1190
1191static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
1192 const void *arg)
1193{
1194 const struct dso *dso = arg;
1195
1196 if (type != HIST_FILTER__DSO)
1197 return -1;
1198
1199 return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
1200 map__dso(he->branch_info->from.ms.map) != dso);
1201}
1202
1203static int64_t
1204sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
1205{
1206 if (!left->branch_info || !right->branch_info)
1207 return cmp_null(left->branch_info, right->branch_info);
1208
1209 return _sort__dso_cmp(left->branch_info->to.ms.map,
1210 right->branch_info->to.ms.map);
1211}
1212
1213static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
1214 size_t size, unsigned int width)
1215{
1216 if (he->branch_info)
1217 return _hist_entry__dso_snprintf(he->branch_info->to.ms.map,
1218 bf, size, width);
1219 else
1220 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1221}
1222
1223static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
1224 const void *arg)
1225{
1226 const struct dso *dso = arg;
1227
1228 if (type != HIST_FILTER__DSO)
1229 return -1;
1230
1231 return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
1232 map__dso(he->branch_info->to.ms.map) != dso);
1233}
1234
1235static int64_t
1236sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
1237{
1238 struct addr_map_symbol *from_l, *from_r;
1239
1240 if (!left->branch_info || !right->branch_info)
1241 return cmp_null(left->branch_info, right->branch_info);
1242
1243 from_l = &left->branch_info->from;
1244 from_r = &right->branch_info->from;
1245
1246 if (!from_l->ms.sym && !from_r->ms.sym)
1247 return _sort__addr_cmp(from_l->addr, from_r->addr);
1248
1249 return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym);
1250}
1251
1252static int64_t
1253sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
1254{
1255 struct addr_map_symbol *to_l, *to_r;
1256
1257 if (!left->branch_info || !right->branch_info)
1258 return cmp_null(left->branch_info, right->branch_info);
1259
1260 to_l = &left->branch_info->to;
1261 to_r = &right->branch_info->to;
1262
1263 if (!to_l->ms.sym && !to_r->ms.sym)
1264 return _sort__addr_cmp(to_l->addr, to_r->addr);
1265
1266 return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym);
1267}
1268
1269static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
1270 size_t size, unsigned int width)
1271{
1272 if (he->branch_info) {
1273 struct addr_map_symbol *from = &he->branch_info->from;
1274
1275 return _hist_entry__sym_snprintf(&from->ms, from->al_addr,
1276 from->al_level, bf, size, width);
1277 }
1278
1279 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1280}
1281
1282static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
1283 size_t size, unsigned int width)
1284{
1285 if (he->branch_info) {
1286 struct addr_map_symbol *to = &he->branch_info->to;
1287
1288 return _hist_entry__sym_snprintf(&to->ms, to->al_addr,
1289 to->al_level, bf, size, width);
1290 }
1291
1292 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1293}
1294
1295static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
1296 const void *arg)
1297{
1298 const char *sym = arg;
1299
1300 if (type != HIST_FILTER__SYMBOL)
1301 return -1;
1302
1303 return sym && !(he->branch_info && he->branch_info->from.ms.sym &&
1304 strstr(he->branch_info->from.ms.sym->name, sym));
1305}
1306
1307static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
1308 const void *arg)
1309{
1310 const char *sym = arg;
1311
1312 if (type != HIST_FILTER__SYMBOL)
1313 return -1;
1314
1315 return sym && !(he->branch_info && he->branch_info->to.ms.sym &&
1316 strstr(he->branch_info->to.ms.sym->name, sym));
1317}
1318
1319struct sort_entry sort_dso_from = {
1320 .se_header = "Source Shared Object",
1321 .se_cmp = sort__dso_from_cmp,
1322 .se_snprintf = hist_entry__dso_from_snprintf,
1323 .se_filter = hist_entry__dso_from_filter,
1324 .se_width_idx = HISTC_DSO_FROM,
1325};
1326
1327struct sort_entry sort_dso_to = {
1328 .se_header = "Target Shared Object",
1329 .se_cmp = sort__dso_to_cmp,
1330 .se_snprintf = hist_entry__dso_to_snprintf,
1331 .se_filter = hist_entry__dso_to_filter,
1332 .se_width_idx = HISTC_DSO_TO,
1333};
1334
1335struct sort_entry sort_sym_from = {
1336 .se_header = "Source Symbol",
1337 .se_cmp = sort__sym_from_cmp,
1338 .se_snprintf = hist_entry__sym_from_snprintf,
1339 .se_filter = hist_entry__sym_from_filter,
1340 .se_width_idx = HISTC_SYMBOL_FROM,
1341};
1342
1343struct sort_entry sort_sym_to = {
1344 .se_header = "Target Symbol",
1345 .se_cmp = sort__sym_to_cmp,
1346 .se_snprintf = hist_entry__sym_to_snprintf,
1347 .se_filter = hist_entry__sym_to_filter,
1348 .se_width_idx = HISTC_SYMBOL_TO,
1349};
1350
1351static int _hist_entry__addr_snprintf(struct map_symbol *ms,
1352 u64 ip, char level, char *bf, size_t size,
1353 unsigned int width)
1354{
1355 struct symbol *sym = ms->sym;
1356 struct map *map = ms->map;
1357 size_t ret = 0, offs;
1358
1359 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
1360 if (sym && map) {
1361 if (sym->type == STT_OBJECT) {
1362 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
1363 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
1364 ip - map__unmap_ip(map, sym->start));
1365 } else {
1366 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
1367 width - ret,
1368 sym->name);
1369 offs = ip - sym->start;
1370 if (offs)
1371 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs);
1372 }
1373 } else {
1374 size_t len = BITS_PER_LONG / 4;
1375 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
1376 len, ip);
1377 }
1378
1379 return ret;
1380}
1381
1382static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf,
1383 size_t size, unsigned int width)
1384{
1385 if (he->branch_info) {
1386 struct addr_map_symbol *from = &he->branch_info->from;
1387
1388 return _hist_entry__addr_snprintf(&from->ms, from->al_addr,
1389 he->level, bf, size, width);
1390 }
1391
1392 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1393}
1394
1395static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf,
1396 size_t size, unsigned int width)
1397{
1398 if (he->branch_info) {
1399 struct addr_map_symbol *to = &he->branch_info->to;
1400
1401 return _hist_entry__addr_snprintf(&to->ms, to->al_addr,
1402 he->level, bf, size, width);
1403 }
1404
1405 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1406}
1407
1408static int64_t
1409sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right)
1410{
1411 struct addr_map_symbol *from_l;
1412 struct addr_map_symbol *from_r;
1413 int64_t ret;
1414
1415 if (!left->branch_info || !right->branch_info)
1416 return cmp_null(left->branch_info, right->branch_info);
1417
1418 from_l = &left->branch_info->from;
1419 from_r = &right->branch_info->from;
1420
1421 /*
1422 * comparing symbol address alone is not enough since it's a
1423 * relative address within a dso.
1424 */
1425 ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map);
1426 if (ret != 0)
1427 return ret;
1428
1429 return _sort__addr_cmp(from_l->addr, from_r->addr);
1430}
1431
1432static int64_t
1433sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right)
1434{
1435 struct addr_map_symbol *to_l;
1436 struct addr_map_symbol *to_r;
1437 int64_t ret;
1438
1439 if (!left->branch_info || !right->branch_info)
1440 return cmp_null(left->branch_info, right->branch_info);
1441
1442 to_l = &left->branch_info->to;
1443 to_r = &right->branch_info->to;
1444
1445 /*
1446 * comparing symbol address alone is not enough since it's a
1447 * relative address within a dso.
1448 */
1449 ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map);
1450 if (ret != 0)
1451 return ret;
1452
1453 return _sort__addr_cmp(to_l->addr, to_r->addr);
1454}
1455
1456struct sort_entry sort_addr_from = {
1457 .se_header = "Source Address",
1458 .se_cmp = sort__addr_from_cmp,
1459 .se_snprintf = hist_entry__addr_from_snprintf,
1460 .se_filter = hist_entry__sym_from_filter, /* shared with sym_from */
1461 .se_width_idx = HISTC_ADDR_FROM,
1462};
1463
1464struct sort_entry sort_addr_to = {
1465 .se_header = "Target Address",
1466 .se_cmp = sort__addr_to_cmp,
1467 .se_snprintf = hist_entry__addr_to_snprintf,
1468 .se_filter = hist_entry__sym_to_filter, /* shared with sym_to */
1469 .se_width_idx = HISTC_ADDR_TO,
1470};
1471
1472
1473static int64_t
1474sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
1475{
1476 unsigned char mp, p;
1477
1478 if (!left->branch_info || !right->branch_info)
1479 return cmp_null(left->branch_info, right->branch_info);
1480
1481 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
1482 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
1483 return mp || p;
1484}
1485
1486static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
1487 size_t size, unsigned int width){
1488 static const char *out = "N/A";
1489
1490 if (he->branch_info) {
1491 if (he->branch_info->flags.predicted)
1492 out = "N";
1493 else if (he->branch_info->flags.mispred)
1494 out = "Y";
1495 }
1496
1497 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
1498}
1499
1500static int64_t
1501sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
1502{
1503 if (!left->branch_info || !right->branch_info)
1504 return cmp_null(left->branch_info, right->branch_info);
1505
1506 return left->branch_info->flags.cycles -
1507 right->branch_info->flags.cycles;
1508}
1509
1510static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
1511 size_t size, unsigned int width)
1512{
1513 if (!he->branch_info)
1514 return scnprintf(bf, size, "%-.*s", width, "N/A");
1515 if (he->branch_info->flags.cycles == 0)
1516 return repsep_snprintf(bf, size, "%-*s", width, "-");
1517 return repsep_snprintf(bf, size, "%-*hd", width,
1518 he->branch_info->flags.cycles);
1519}
1520
1521struct sort_entry sort_cycles = {
1522 .se_header = "Basic Block Cycles",
1523 .se_cmp = sort__cycles_cmp,
1524 .se_snprintf = hist_entry__cycles_snprintf,
1525 .se_width_idx = HISTC_CYCLES,
1526};
1527
1528/* --sort daddr_sym */
1529int64_t
1530sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1531{
1532 uint64_t l = 0, r = 0;
1533
1534 if (left->mem_info)
1535 l = mem_info__daddr(left->mem_info)->addr;
1536 if (right->mem_info)
1537 r = mem_info__daddr(right->mem_info)->addr;
1538
1539 return (int64_t)(r - l);
1540}
1541
1542static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
1543 size_t size, unsigned int width)
1544{
1545 uint64_t addr = 0;
1546 struct map_symbol *ms = NULL;
1547
1548 if (he->mem_info) {
1549 addr = mem_info__daddr(he->mem_info)->addr;
1550 ms = &mem_info__daddr(he->mem_info)->ms;
1551 }
1552 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1553}
1554
1555int64_t
1556sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
1557{
1558 uint64_t l = 0, r = 0;
1559
1560 if (left->mem_info)
1561 l = mem_info__iaddr(left->mem_info)->addr;
1562 if (right->mem_info)
1563 r = mem_info__iaddr(right->mem_info)->addr;
1564
1565 return (int64_t)(r - l);
1566}
1567
1568static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
1569 size_t size, unsigned int width)
1570{
1571 uint64_t addr = 0;
1572 struct map_symbol *ms = NULL;
1573
1574 if (he->mem_info) {
1575 addr = mem_info__iaddr(he->mem_info)->addr;
1576 ms = &mem_info__iaddr(he->mem_info)->ms;
1577 }
1578 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1579}
1580
1581static int64_t
1582sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1583{
1584 struct map *map_l = NULL;
1585 struct map *map_r = NULL;
1586
1587 if (left->mem_info)
1588 map_l = mem_info__daddr(left->mem_info)->ms.map;
1589 if (right->mem_info)
1590 map_r = mem_info__daddr(right->mem_info)->ms.map;
1591
1592 return _sort__dso_cmp(map_l, map_r);
1593}
1594
1595static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
1596 size_t size, unsigned int width)
1597{
1598 struct map *map = NULL;
1599
1600 if (he->mem_info)
1601 map = mem_info__daddr(he->mem_info)->ms.map;
1602
1603 return _hist_entry__dso_snprintf(map, bf, size, width);
1604}
1605
1606static int64_t
1607sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
1608{
1609 union perf_mem_data_src data_src_l;
1610 union perf_mem_data_src data_src_r;
1611
1612 if (left->mem_info)
1613 data_src_l = *mem_info__data_src(left->mem_info);
1614 else
1615 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1616
1617 if (right->mem_info)
1618 data_src_r = *mem_info__data_src(right->mem_info);
1619 else
1620 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1621
1622 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1623}
1624
1625static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1626 size_t size, unsigned int width)
1627{
1628 char out[10];
1629
1630 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1631 return repsep_snprintf(bf, size, "%.*s", width, out);
1632}
1633
1634static int64_t
1635sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1636{
1637 union perf_mem_data_src data_src_l;
1638 union perf_mem_data_src data_src_r;
1639
1640 if (left->mem_info)
1641 data_src_l = *mem_info__data_src(left->mem_info);
1642 else
1643 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1644
1645 if (right->mem_info)
1646 data_src_r = *mem_info__data_src(right->mem_info);
1647 else
1648 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1649
1650 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1651}
1652
1653static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1654 size_t size, unsigned int width)
1655{
1656 char out[64];
1657
1658 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1659 return repsep_snprintf(bf, size, "%-*s", width, out);
1660}
1661
1662static int64_t
1663sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1664{
1665 union perf_mem_data_src data_src_l;
1666 union perf_mem_data_src data_src_r;
1667
1668 if (left->mem_info)
1669 data_src_l = *mem_info__data_src(left->mem_info);
1670 else
1671 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1672
1673 if (right->mem_info)
1674 data_src_r = *mem_info__data_src(right->mem_info);
1675 else
1676 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1677
1678 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1679}
1680
1681static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1682 size_t size, unsigned int width)
1683{
1684 char out[64];
1685
1686 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1687 return repsep_snprintf(bf, size, "%-*s", width, out);
1688}
1689
1690static int64_t
1691sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1692{
1693 union perf_mem_data_src data_src_l;
1694 union perf_mem_data_src data_src_r;
1695
1696 if (left->mem_info)
1697 data_src_l = *mem_info__data_src(left->mem_info);
1698 else
1699 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1700
1701 if (right->mem_info)
1702 data_src_r = *mem_info__data_src(right->mem_info);
1703 else
1704 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1705
1706 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1707}
1708
1709static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1710 size_t size, unsigned int width)
1711{
1712 char out[64];
1713
1714 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1715 return repsep_snprintf(bf, size, "%-*s", width, out);
1716}
1717
1718int64_t
1719sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1720{
1721 u64 l, r;
1722 struct map *l_map, *r_map;
1723 struct dso *l_dso, *r_dso;
1724 int rc;
1725
1726 if (!left->mem_info) return -1;
1727 if (!right->mem_info) return 1;
1728
1729 /* group event types together */
1730 if (left->cpumode > right->cpumode) return -1;
1731 if (left->cpumode < right->cpumode) return 1;
1732
1733 l_map = mem_info__daddr(left->mem_info)->ms.map;
1734 r_map = mem_info__daddr(right->mem_info)->ms.map;
1735
1736 /* if both are NULL, jump to sort on al_addr instead */
1737 if (!l_map && !r_map)
1738 goto addr;
1739
1740 if (!l_map) return -1;
1741 if (!r_map) return 1;
1742
1743 l_dso = map__dso(l_map);
1744 r_dso = map__dso(r_map);
1745 rc = dso__cmp_id(l_dso, r_dso);
1746 if (rc)
1747 return rc;
1748 /*
1749 * Addresses with no major/minor numbers are assumed to be
1750 * anonymous in userspace. Sort those on pid then address.
1751 *
1752 * The kernel and non-zero major/minor mapped areas are
1753 * assumed to be unity mapped. Sort those on address.
1754 */
1755
1756 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1757 (!(map__flags(l_map) & MAP_SHARED)) && !dso__id(l_dso)->maj && !dso__id(l_dso)->min &&
1758 !dso__id(l_dso)->ino && !dso__id(l_dso)->ino_generation) {
1759 /* userspace anonymous */
1760
1761 if (thread__pid(left->thread) > thread__pid(right->thread))
1762 return -1;
1763 if (thread__pid(left->thread) < thread__pid(right->thread))
1764 return 1;
1765 }
1766
1767addr:
1768 /* al_addr does all the right addr - start + offset calculations */
1769 l = cl_address(mem_info__daddr(left->mem_info)->al_addr, chk_double_cl);
1770 r = cl_address(mem_info__daddr(right->mem_info)->al_addr, chk_double_cl);
1771
1772 if (l > r) return -1;
1773 if (l < r) return 1;
1774
1775 return 0;
1776}
1777
1778static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1779 size_t size, unsigned int width)
1780{
1781
1782 uint64_t addr = 0;
1783 struct map_symbol *ms = NULL;
1784 char level = he->level;
1785
1786 if (he->mem_info) {
1787 struct map *map = mem_info__daddr(he->mem_info)->ms.map;
1788 struct dso *dso = map ? map__dso(map) : NULL;
1789
1790 addr = cl_address(mem_info__daddr(he->mem_info)->al_addr, chk_double_cl);
1791 ms = &mem_info__daddr(he->mem_info)->ms;
1792
1793 /* print [s] for shared data mmaps */
1794 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1795 map && !(map__prot(map) & PROT_EXEC) &&
1796 (map__flags(map) & MAP_SHARED) &&
1797 (dso__id(dso)->maj || dso__id(dso)->min || dso__id(dso)->ino ||
1798 dso__id(dso)->ino_generation))
1799 level = 's';
1800 else if (!map)
1801 level = 'X';
1802 }
1803 return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width);
1804}
1805
1806struct sort_entry sort_mispredict = {
1807 .se_header = "Branch Mispredicted",
1808 .se_cmp = sort__mispredict_cmp,
1809 .se_snprintf = hist_entry__mispredict_snprintf,
1810 .se_width_idx = HISTC_MISPREDICT,
1811};
1812
1813static int64_t
1814sort__weight_cmp(struct hist_entry *left, struct hist_entry *right)
1815{
1816 return left->weight - right->weight;
1817}
1818
1819static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1820 size_t size, unsigned int width)
1821{
1822 return repsep_snprintf(bf, size, "%-*llu", width, he->weight);
1823}
1824
1825struct sort_entry sort_local_weight = {
1826 .se_header = "Local Weight",
1827 .se_cmp = sort__weight_cmp,
1828 .se_snprintf = hist_entry__local_weight_snprintf,
1829 .se_width_idx = HISTC_LOCAL_WEIGHT,
1830};
1831
1832static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1833 size_t size, unsigned int width)
1834{
1835 return repsep_snprintf(bf, size, "%-*llu", width,
1836 he->weight * he->stat.nr_events);
1837}
1838
1839struct sort_entry sort_global_weight = {
1840 .se_header = "Weight",
1841 .se_cmp = sort__weight_cmp,
1842 .se_snprintf = hist_entry__global_weight_snprintf,
1843 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1844};
1845
1846static int64_t
1847sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
1848{
1849 return left->ins_lat - right->ins_lat;
1850}
1851
1852static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf,
1853 size_t size, unsigned int width)
1854{
1855 return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat);
1856}
1857
1858struct sort_entry sort_local_ins_lat = {
1859 .se_header = "Local INSTR Latency",
1860 .se_cmp = sort__ins_lat_cmp,
1861 .se_snprintf = hist_entry__local_ins_lat_snprintf,
1862 .se_width_idx = HISTC_LOCAL_INS_LAT,
1863};
1864
1865static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf,
1866 size_t size, unsigned int width)
1867{
1868 return repsep_snprintf(bf, size, "%-*u", width,
1869 he->ins_lat * he->stat.nr_events);
1870}
1871
1872struct sort_entry sort_global_ins_lat = {
1873 .se_header = "INSTR Latency",
1874 .se_cmp = sort__ins_lat_cmp,
1875 .se_snprintf = hist_entry__global_ins_lat_snprintf,
1876 .se_width_idx = HISTC_GLOBAL_INS_LAT,
1877};
1878
1879static int64_t
1880sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
1881{
1882 return left->p_stage_cyc - right->p_stage_cyc;
1883}
1884
1885static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1886 size_t size, unsigned int width)
1887{
1888 return repsep_snprintf(bf, size, "%-*u", width,
1889 he->p_stage_cyc * he->stat.nr_events);
1890}
1891
1892
1893static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1894 size_t size, unsigned int width)
1895{
1896 return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc);
1897}
1898
1899struct sort_entry sort_local_p_stage_cyc = {
1900 .se_header = "Local Pipeline Stage Cycle",
1901 .se_cmp = sort__p_stage_cyc_cmp,
1902 .se_snprintf = hist_entry__p_stage_cyc_snprintf,
1903 .se_width_idx = HISTC_LOCAL_P_STAGE_CYC,
1904};
1905
1906struct sort_entry sort_global_p_stage_cyc = {
1907 .se_header = "Pipeline Stage Cycle",
1908 .se_cmp = sort__p_stage_cyc_cmp,
1909 .se_snprintf = hist_entry__global_p_stage_cyc_snprintf,
1910 .se_width_idx = HISTC_GLOBAL_P_STAGE_CYC,
1911};
1912
1913struct sort_entry sort_mem_daddr_sym = {
1914 .se_header = "Data Symbol",
1915 .se_cmp = sort__daddr_cmp,
1916 .se_snprintf = hist_entry__daddr_snprintf,
1917 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1918};
1919
1920struct sort_entry sort_mem_iaddr_sym = {
1921 .se_header = "Code Symbol",
1922 .se_cmp = sort__iaddr_cmp,
1923 .se_snprintf = hist_entry__iaddr_snprintf,
1924 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1925};
1926
1927struct sort_entry sort_mem_daddr_dso = {
1928 .se_header = "Data Object",
1929 .se_cmp = sort__dso_daddr_cmp,
1930 .se_snprintf = hist_entry__dso_daddr_snprintf,
1931 .se_width_idx = HISTC_MEM_DADDR_DSO,
1932};
1933
1934struct sort_entry sort_mem_locked = {
1935 .se_header = "Locked",
1936 .se_cmp = sort__locked_cmp,
1937 .se_snprintf = hist_entry__locked_snprintf,
1938 .se_width_idx = HISTC_MEM_LOCKED,
1939};
1940
1941struct sort_entry sort_mem_tlb = {
1942 .se_header = "TLB access",
1943 .se_cmp = sort__tlb_cmp,
1944 .se_snprintf = hist_entry__tlb_snprintf,
1945 .se_width_idx = HISTC_MEM_TLB,
1946};
1947
1948struct sort_entry sort_mem_lvl = {
1949 .se_header = "Memory access",
1950 .se_cmp = sort__lvl_cmp,
1951 .se_snprintf = hist_entry__lvl_snprintf,
1952 .se_width_idx = HISTC_MEM_LVL,
1953};
1954
1955struct sort_entry sort_mem_snoop = {
1956 .se_header = "Snoop",
1957 .se_cmp = sort__snoop_cmp,
1958 .se_snprintf = hist_entry__snoop_snprintf,
1959 .se_width_idx = HISTC_MEM_SNOOP,
1960};
1961
1962struct sort_entry sort_mem_dcacheline = {
1963 .se_header = "Data Cacheline",
1964 .se_cmp = sort__dcacheline_cmp,
1965 .se_snprintf = hist_entry__dcacheline_snprintf,
1966 .se_width_idx = HISTC_MEM_DCACHELINE,
1967};
1968
1969static int64_t
1970sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right)
1971{
1972 union perf_mem_data_src data_src_l;
1973 union perf_mem_data_src data_src_r;
1974
1975 if (left->mem_info)
1976 data_src_l = *mem_info__data_src(left->mem_info);
1977 else
1978 data_src_l.mem_blk = PERF_MEM_BLK_NA;
1979
1980 if (right->mem_info)
1981 data_src_r = *mem_info__data_src(right->mem_info);
1982 else
1983 data_src_r.mem_blk = PERF_MEM_BLK_NA;
1984
1985 return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk);
1986}
1987
1988static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf,
1989 size_t size, unsigned int width)
1990{
1991 char out[16];
1992
1993 perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info);
1994 return repsep_snprintf(bf, size, "%.*s", width, out);
1995}
1996
1997struct sort_entry sort_mem_blocked = {
1998 .se_header = "Blocked",
1999 .se_cmp = sort__blocked_cmp,
2000 .se_snprintf = hist_entry__blocked_snprintf,
2001 .se_width_idx = HISTC_MEM_BLOCKED,
2002};
2003
2004static int64_t
2005sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
2006{
2007 uint64_t l = 0, r = 0;
2008
2009 if (left->mem_info)
2010 l = mem_info__daddr(left->mem_info)->phys_addr;
2011 if (right->mem_info)
2012 r = mem_info__daddr(right->mem_info)->phys_addr;
2013
2014 return (int64_t)(r - l);
2015}
2016
2017static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
2018 size_t size, unsigned int width)
2019{
2020 uint64_t addr = 0;
2021 size_t ret = 0;
2022 size_t len = BITS_PER_LONG / 4;
2023
2024 addr = mem_info__daddr(he->mem_info)->phys_addr;
2025
2026 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
2027
2028 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
2029
2030 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
2031
2032 if (ret > width)
2033 bf[width] = '\0';
2034
2035 return width;
2036}
2037
2038struct sort_entry sort_mem_phys_daddr = {
2039 .se_header = "Data Physical Address",
2040 .se_cmp = sort__phys_daddr_cmp,
2041 .se_snprintf = hist_entry__phys_daddr_snprintf,
2042 .se_width_idx = HISTC_MEM_PHYS_DADDR,
2043};
2044
2045static int64_t
2046sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
2047{
2048 uint64_t l = 0, r = 0;
2049
2050 if (left->mem_info)
2051 l = mem_info__daddr(left->mem_info)->data_page_size;
2052 if (right->mem_info)
2053 r = mem_info__daddr(right->mem_info)->data_page_size;
2054
2055 return (int64_t)(r - l);
2056}
2057
2058static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf,
2059 size_t size, unsigned int width)
2060{
2061 char str[PAGE_SIZE_NAME_LEN];
2062
2063 return repsep_snprintf(bf, size, "%-*s", width,
2064 get_page_size_name(mem_info__daddr(he->mem_info)->data_page_size, str));
2065}
2066
2067struct sort_entry sort_mem_data_page_size = {
2068 .se_header = "Data Page Size",
2069 .se_cmp = sort__data_page_size_cmp,
2070 .se_snprintf = hist_entry__data_page_size_snprintf,
2071 .se_width_idx = HISTC_MEM_DATA_PAGE_SIZE,
2072};
2073
2074static int64_t
2075sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
2076{
2077 uint64_t l = left->code_page_size;
2078 uint64_t r = right->code_page_size;
2079
2080 return (int64_t)(r - l);
2081}
2082
2083static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf,
2084 size_t size, unsigned int width)
2085{
2086 char str[PAGE_SIZE_NAME_LEN];
2087
2088 return repsep_snprintf(bf, size, "%-*s", width,
2089 get_page_size_name(he->code_page_size, str));
2090}
2091
2092struct sort_entry sort_code_page_size = {
2093 .se_header = "Code Page Size",
2094 .se_cmp = sort__code_page_size_cmp,
2095 .se_snprintf = hist_entry__code_page_size_snprintf,
2096 .se_width_idx = HISTC_CODE_PAGE_SIZE,
2097};
2098
2099static int64_t
2100sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
2101{
2102 if (!left->branch_info || !right->branch_info)
2103 return cmp_null(left->branch_info, right->branch_info);
2104
2105 return left->branch_info->flags.abort !=
2106 right->branch_info->flags.abort;
2107}
2108
2109static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
2110 size_t size, unsigned int width)
2111{
2112 static const char *out = "N/A";
2113
2114 if (he->branch_info) {
2115 if (he->branch_info->flags.abort)
2116 out = "A";
2117 else
2118 out = ".";
2119 }
2120
2121 return repsep_snprintf(bf, size, "%-*s", width, out);
2122}
2123
2124struct sort_entry sort_abort = {
2125 .se_header = "Transaction abort",
2126 .se_cmp = sort__abort_cmp,
2127 .se_snprintf = hist_entry__abort_snprintf,
2128 .se_width_idx = HISTC_ABORT,
2129};
2130
2131static int64_t
2132sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
2133{
2134 if (!left->branch_info || !right->branch_info)
2135 return cmp_null(left->branch_info, right->branch_info);
2136
2137 return left->branch_info->flags.in_tx !=
2138 right->branch_info->flags.in_tx;
2139}
2140
2141static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
2142 size_t size, unsigned int width)
2143{
2144 static const char *out = "N/A";
2145
2146 if (he->branch_info) {
2147 if (he->branch_info->flags.in_tx)
2148 out = "T";
2149 else
2150 out = ".";
2151 }
2152
2153 return repsep_snprintf(bf, size, "%-*s", width, out);
2154}
2155
2156struct sort_entry sort_in_tx = {
2157 .se_header = "Branch in transaction",
2158 .se_cmp = sort__in_tx_cmp,
2159 .se_snprintf = hist_entry__in_tx_snprintf,
2160 .se_width_idx = HISTC_IN_TX,
2161};
2162
2163static int64_t
2164sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
2165{
2166 return left->transaction - right->transaction;
2167}
2168
2169static inline char *add_str(char *p, const char *str)
2170{
2171 strcpy(p, str);
2172 return p + strlen(str);
2173}
2174
2175static struct txbit {
2176 unsigned flag;
2177 const char *name;
2178 int skip_for_len;
2179} txbits[] = {
2180 { PERF_TXN_ELISION, "EL ", 0 },
2181 { PERF_TXN_TRANSACTION, "TX ", 1 },
2182 { PERF_TXN_SYNC, "SYNC ", 1 },
2183 { PERF_TXN_ASYNC, "ASYNC ", 0 },
2184 { PERF_TXN_RETRY, "RETRY ", 0 },
2185 { PERF_TXN_CONFLICT, "CON ", 0 },
2186 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
2187 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
2188 { 0, NULL, 0 }
2189};
2190
2191int hist_entry__transaction_len(void)
2192{
2193 int i;
2194 int len = 0;
2195
2196 for (i = 0; txbits[i].name; i++) {
2197 if (!txbits[i].skip_for_len)
2198 len += strlen(txbits[i].name);
2199 }
2200 len += 4; /* :XX<space> */
2201 return len;
2202}
2203
2204static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
2205 size_t size, unsigned int width)
2206{
2207 u64 t = he->transaction;
2208 char buf[128];
2209 char *p = buf;
2210 int i;
2211
2212 buf[0] = 0;
2213 for (i = 0; txbits[i].name; i++)
2214 if (txbits[i].flag & t)
2215 p = add_str(p, txbits[i].name);
2216 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
2217 p = add_str(p, "NEITHER ");
2218 if (t & PERF_TXN_ABORT_MASK) {
2219 sprintf(p, ":%" PRIx64,
2220 (t & PERF_TXN_ABORT_MASK) >>
2221 PERF_TXN_ABORT_SHIFT);
2222 p += strlen(p);
2223 }
2224
2225 return repsep_snprintf(bf, size, "%-*s", width, buf);
2226}
2227
2228struct sort_entry sort_transaction = {
2229 .se_header = "Transaction ",
2230 .se_cmp = sort__transaction_cmp,
2231 .se_snprintf = hist_entry__transaction_snprintf,
2232 .se_width_idx = HISTC_TRANSACTION,
2233};
2234
2235/* --sort symbol_size */
2236
2237static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
2238{
2239 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
2240 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
2241
2242 return size_l < size_r ? -1 :
2243 size_l == size_r ? 0 : 1;
2244}
2245
2246static int64_t
2247sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
2248{
2249 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
2250}
2251
2252static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
2253 size_t bf_size, unsigned int width)
2254{
2255 if (sym)
2256 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
2257
2258 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2259}
2260
2261static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
2262 size_t size, unsigned int width)
2263{
2264 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
2265}
2266
2267struct sort_entry sort_sym_size = {
2268 .se_header = "Symbol size",
2269 .se_cmp = sort__sym_size_cmp,
2270 .se_snprintf = hist_entry__sym_size_snprintf,
2271 .se_width_idx = HISTC_SYM_SIZE,
2272};
2273
2274/* --sort dso_size */
2275
2276static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
2277{
2278 int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
2279 int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
2280
2281 return size_l < size_r ? -1 :
2282 size_l == size_r ? 0 : 1;
2283}
2284
2285static int64_t
2286sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
2287{
2288 return _sort__dso_size_cmp(right->ms.map, left->ms.map);
2289}
2290
2291static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
2292 size_t bf_size, unsigned int width)
2293{
2294 if (map && map__dso(map))
2295 return repsep_snprintf(bf, bf_size, "%*d", width, map__size(map));
2296
2297 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2298}
2299
2300static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
2301 size_t size, unsigned int width)
2302{
2303 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
2304}
2305
2306struct sort_entry sort_dso_size = {
2307 .se_header = "DSO size",
2308 .se_cmp = sort__dso_size_cmp,
2309 .se_snprintf = hist_entry__dso_size_snprintf,
2310 .se_width_idx = HISTC_DSO_SIZE,
2311};
2312
2313/* --sort addr */
2314
2315static int64_t
2316sort__addr_cmp(struct hist_entry *left, struct hist_entry *right)
2317{
2318 u64 left_ip = left->ip;
2319 u64 right_ip = right->ip;
2320 struct map *left_map = left->ms.map;
2321 struct map *right_map = right->ms.map;
2322
2323 if (left_map)
2324 left_ip = map__unmap_ip(left_map, left_ip);
2325 if (right_map)
2326 right_ip = map__unmap_ip(right_map, right_ip);
2327
2328 return _sort__addr_cmp(left_ip, right_ip);
2329}
2330
2331static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf,
2332 size_t size, unsigned int width)
2333{
2334 u64 ip = he->ip;
2335 struct map *map = he->ms.map;
2336
2337 if (map)
2338 ip = map__unmap_ip(map, ip);
2339
2340 return repsep_snprintf(bf, size, "%-#*llx", width, ip);
2341}
2342
2343struct sort_entry sort_addr = {
2344 .se_header = "Address",
2345 .se_cmp = sort__addr_cmp,
2346 .se_snprintf = hist_entry__addr_snprintf,
2347 .se_width_idx = HISTC_ADDR,
2348};
2349
2350/* --sort type */
2351
2352struct annotated_data_type unknown_type = {
2353 .self = {
2354 .type_name = (char *)"(unknown)",
2355 .children = LIST_HEAD_INIT(unknown_type.self.children),
2356 },
2357};
2358
2359static int64_t
2360sort__type_cmp(struct hist_entry *left, struct hist_entry *right)
2361{
2362 return sort__addr_cmp(left, right);
2363}
2364
2365static void sort__type_init(struct hist_entry *he)
2366{
2367 if (he->mem_type)
2368 return;
2369
2370 he->mem_type = hist_entry__get_data_type(he);
2371 if (he->mem_type == NULL) {
2372 he->mem_type = &unknown_type;
2373 he->mem_type_off = 0;
2374 }
2375}
2376
2377static int64_t
2378sort__type_collapse(struct hist_entry *left, struct hist_entry *right)
2379{
2380 struct annotated_data_type *left_type = left->mem_type;
2381 struct annotated_data_type *right_type = right->mem_type;
2382
2383 if (!left_type) {
2384 sort__type_init(left);
2385 left_type = left->mem_type;
2386 }
2387
2388 if (!right_type) {
2389 sort__type_init(right);
2390 right_type = right->mem_type;
2391 }
2392
2393 return strcmp(left_type->self.type_name, right_type->self.type_name);
2394}
2395
2396static int64_t
2397sort__type_sort(struct hist_entry *left, struct hist_entry *right)
2398{
2399 return sort__type_collapse(left, right);
2400}
2401
2402static int hist_entry__type_snprintf(struct hist_entry *he, char *bf,
2403 size_t size, unsigned int width)
2404{
2405 return repsep_snprintf(bf, size, "%-*s", width, he->mem_type->self.type_name);
2406}
2407
2408struct sort_entry sort_type = {
2409 .se_header = "Data Type",
2410 .se_cmp = sort__type_cmp,
2411 .se_collapse = sort__type_collapse,
2412 .se_sort = sort__type_sort,
2413 .se_init = sort__type_init,
2414 .se_snprintf = hist_entry__type_snprintf,
2415 .se_width_idx = HISTC_TYPE,
2416};
2417
2418/* --sort typeoff */
2419
2420static int64_t
2421sort__typeoff_sort(struct hist_entry *left, struct hist_entry *right)
2422{
2423 struct annotated_data_type *left_type = left->mem_type;
2424 struct annotated_data_type *right_type = right->mem_type;
2425 int64_t ret;
2426
2427 if (!left_type) {
2428 sort__type_init(left);
2429 left_type = left->mem_type;
2430 }
2431
2432 if (!right_type) {
2433 sort__type_init(right);
2434 right_type = right->mem_type;
2435 }
2436
2437 ret = strcmp(left_type->self.type_name, right_type->self.type_name);
2438 if (ret)
2439 return ret;
2440 return left->mem_type_off - right->mem_type_off;
2441}
2442
2443static int hist_entry__typeoff_snprintf(struct hist_entry *he, char *bf,
2444 size_t size, unsigned int width __maybe_unused)
2445{
2446 struct annotated_data_type *he_type = he->mem_type;
2447 char buf[4096];
2448
2449 if (he_type == &unknown_type || he_type == &stackop_type ||
2450 he_type == &canary_type)
2451 return repsep_snprintf(bf, size, "%s", he_type->self.type_name);
2452
2453 if (!annotated_data_type__get_member_name(he_type, buf, sizeof(buf),
2454 he->mem_type_off))
2455 scnprintf(buf, sizeof(buf), "no field");
2456
2457 return repsep_snprintf(bf, size, "%s +%#x (%s)", he_type->self.type_name,
2458 he->mem_type_off, buf);
2459}
2460
2461struct sort_entry sort_type_offset = {
2462 .se_header = "Data Type Offset",
2463 .se_cmp = sort__type_cmp,
2464 .se_collapse = sort__typeoff_sort,
2465 .se_sort = sort__typeoff_sort,
2466 .se_init = sort__type_init,
2467 .se_snprintf = hist_entry__typeoff_snprintf,
2468 .se_width_idx = HISTC_TYPE_OFFSET,
2469};
2470
2471/* --sort typecln */
2472
2473/* TODO: use actual value in the system */
2474#define TYPE_CACHELINE_SIZE 64
2475
2476static int64_t
2477sort__typecln_sort(struct hist_entry *left, struct hist_entry *right)
2478{
2479 struct annotated_data_type *left_type = left->mem_type;
2480 struct annotated_data_type *right_type = right->mem_type;
2481 int64_t left_cln, right_cln;
2482 int64_t ret;
2483
2484 if (!left_type) {
2485 sort__type_init(left);
2486 left_type = left->mem_type;
2487 }
2488
2489 if (!right_type) {
2490 sort__type_init(right);
2491 right_type = right->mem_type;
2492 }
2493
2494 ret = strcmp(left_type->self.type_name, right_type->self.type_name);
2495 if (ret)
2496 return ret;
2497
2498 left_cln = left->mem_type_off / TYPE_CACHELINE_SIZE;
2499 right_cln = right->mem_type_off / TYPE_CACHELINE_SIZE;
2500 return left_cln - right_cln;
2501}
2502
2503static int hist_entry__typecln_snprintf(struct hist_entry *he, char *bf,
2504 size_t size, unsigned int width __maybe_unused)
2505{
2506 struct annotated_data_type *he_type = he->mem_type;
2507
2508 return repsep_snprintf(bf, size, "%s: cache-line %d", he_type->self.type_name,
2509 he->mem_type_off / TYPE_CACHELINE_SIZE);
2510}
2511
2512struct sort_entry sort_type_cacheline = {
2513 .se_header = "Data Type Cacheline",
2514 .se_cmp = sort__type_cmp,
2515 .se_collapse = sort__typecln_sort,
2516 .se_sort = sort__typecln_sort,
2517 .se_init = sort__type_init,
2518 .se_snprintf = hist_entry__typecln_snprintf,
2519 .se_width_idx = HISTC_TYPE_CACHELINE,
2520};
2521
2522
2523struct sort_dimension {
2524 const char *name;
2525 struct sort_entry *entry;
2526 int taken;
2527};
2528
2529int __weak arch_support_sort_key(const char *sort_key __maybe_unused)
2530{
2531 return 0;
2532}
2533
2534const char * __weak arch_perf_header_entry(const char *se_header)
2535{
2536 return se_header;
2537}
2538
2539static void sort_dimension_add_dynamic_header(struct sort_dimension *sd)
2540{
2541 sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header);
2542}
2543
2544#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
2545
2546static struct sort_dimension common_sort_dimensions[] = {
2547 DIM(SORT_PID, "pid", sort_thread),
2548 DIM(SORT_TGID, "tgid", sort_tgid),
2549 DIM(SORT_COMM, "comm", sort_comm),
2550 DIM(SORT_DSO, "dso", sort_dso),
2551 DIM(SORT_SYM, "symbol", sort_sym),
2552 DIM(SORT_PARENT, "parent", sort_parent),
2553 DIM(SORT_CPU, "cpu", sort_cpu),
2554 DIM(SORT_SOCKET, "socket", sort_socket),
2555 DIM(SORT_SRCLINE, "srcline", sort_srcline),
2556 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
2557 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
2558 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
2559 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
2560#ifdef HAVE_LIBTRACEEVENT
2561 DIM(SORT_TRACE, "trace", sort_trace),
2562#endif
2563 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
2564 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
2565 DIM(SORT_CGROUP, "cgroup", sort_cgroup),
2566 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
2567 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
2568 DIM(SORT_TIME, "time", sort_time),
2569 DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size),
2570 DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat),
2571 DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat),
2572 DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc),
2573 DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc),
2574 DIM(SORT_ADDR, "addr", sort_addr),
2575 DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc),
2576 DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc),
2577 DIM(SORT_SIMD, "simd", sort_simd),
2578 DIM(SORT_ANNOTATE_DATA_TYPE, "type", sort_type),
2579 DIM(SORT_ANNOTATE_DATA_TYPE_OFFSET, "typeoff", sort_type_offset),
2580 DIM(SORT_SYM_OFFSET, "symoff", sort_sym_offset),
2581 DIM(SORT_ANNOTATE_DATA_TYPE_CACHELINE, "typecln", sort_type_cacheline),
2582 DIM(SORT_PARALLELISM, "parallelism", sort_parallelism),
2583};
2584
2585#undef DIM
2586
2587#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
2588
2589static struct sort_dimension bstack_sort_dimensions[] = {
2590 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
2591 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
2592 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
2593 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
2594 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
2595 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
2596 DIM(SORT_ABORT, "abort", sort_abort),
2597 DIM(SORT_CYCLES, "cycles", sort_cycles),
2598 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
2599 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
2600 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
2601 DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from),
2602 DIM(SORT_ADDR_TO, "addr_to", sort_addr_to),
2603 DIM(SORT_CALLCHAIN_BRANCH_PREDICTED,
2604 "callchain_branch_predicted",
2605 sort_callchain_branch_predicted),
2606 DIM(SORT_CALLCHAIN_BRANCH_ABORT,
2607 "callchain_branch_abort",
2608 sort_callchain_branch_abort),
2609 DIM(SORT_CALLCHAIN_BRANCH_CYCLES,
2610 "callchain_branch_cycles",
2611 sort_callchain_branch_cycles)
2612};
2613
2614#undef DIM
2615
2616#define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
2617
2618static struct sort_dimension memory_sort_dimensions[] = {
2619 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
2620 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
2621 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
2622 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
2623 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
2624 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
2625 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
2626 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
2627 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
2628 DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size),
2629 DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked),
2630};
2631
2632#undef DIM
2633
2634struct hpp_dimension {
2635 const char *name;
2636 struct perf_hpp_fmt *fmt;
2637 int taken;
2638 int was_taken;
2639 int mem_mode;
2640};
2641
2642#define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
2643#define DIM_MEM(d, n) { .name = n, .fmt = &perf_hpp__format[d], .mem_mode = 1, }
2644
2645static struct hpp_dimension hpp_sort_dimensions[] = {
2646 DIM(PERF_HPP__OVERHEAD, "overhead"),
2647 DIM(PERF_HPP__LATENCY, "latency"),
2648 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
2649 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
2650 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
2651 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
2652 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
2653 DIM(PERF_HPP__LATENCY_ACC, "latency_children"),
2654 DIM(PERF_HPP__SAMPLES, "sample"),
2655 DIM(PERF_HPP__PERIOD, "period"),
2656 DIM(PERF_HPP__WEIGHT1, "weight1"),
2657 DIM(PERF_HPP__WEIGHT2, "weight2"),
2658 DIM(PERF_HPP__WEIGHT3, "weight3"),
2659 /* aliases for weight_struct */
2660 DIM(PERF_HPP__WEIGHT2, "ins_lat"),
2661 DIM(PERF_HPP__WEIGHT3, "retire_lat"),
2662 DIM(PERF_HPP__WEIGHT3, "p_stage_cyc"),
2663 /* used for output only when SORT_MODE__MEM */
2664 DIM_MEM(PERF_HPP__MEM_STAT_OP, "op"),
2665 DIM_MEM(PERF_HPP__MEM_STAT_CACHE, "cache"),
2666 DIM_MEM(PERF_HPP__MEM_STAT_MEMORY, "memory"),
2667 DIM_MEM(PERF_HPP__MEM_STAT_SNOOP, "snoop"),
2668 DIM_MEM(PERF_HPP__MEM_STAT_DTLB, "dtlb"),
2669};
2670
2671#undef DIM_MEM
2672#undef DIM
2673
2674struct hpp_sort_entry {
2675 struct perf_hpp_fmt hpp;
2676 struct sort_entry *se;
2677};
2678
2679void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
2680{
2681 struct hpp_sort_entry *hse;
2682
2683 if (!perf_hpp__is_sort_entry(fmt))
2684 return;
2685
2686 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2687 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
2688}
2689
2690static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2691 struct hists *hists, int line,
2692 int *span __maybe_unused)
2693{
2694 struct hpp_sort_entry *hse;
2695 size_t len = fmt->user_len;
2696 const char *hdr = "";
2697
2698 if (line == hists->hpp_list->nr_header_lines - 1)
2699 hdr = fmt->name;
2700
2701 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2702
2703 if (!len)
2704 len = hists__col_len(hists, hse->se->se_width_idx);
2705
2706 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, hdr);
2707}
2708
2709static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
2710 struct perf_hpp *hpp __maybe_unused,
2711 struct hists *hists)
2712{
2713 struct hpp_sort_entry *hse;
2714 size_t len = fmt->user_len;
2715
2716 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2717
2718 if (!len)
2719 len = hists__col_len(hists, hse->se->se_width_idx);
2720
2721 return len;
2722}
2723
2724static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2725 struct hist_entry *he)
2726{
2727 struct hpp_sort_entry *hse;
2728 size_t len = fmt->user_len;
2729
2730 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2731
2732 if (!len)
2733 len = hists__col_len(he->hists, hse->se->se_width_idx);
2734
2735 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
2736}
2737
2738static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
2739 struct hist_entry *a, struct hist_entry *b)
2740{
2741 struct hpp_sort_entry *hse;
2742
2743 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2744 return hse->se->se_cmp(a, b);
2745}
2746
2747static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
2748 struct hist_entry *a, struct hist_entry *b)
2749{
2750 struct hpp_sort_entry *hse;
2751 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
2752
2753 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2754 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
2755 return collapse_fn(a, b);
2756}
2757
2758static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
2759 struct hist_entry *a, struct hist_entry *b)
2760{
2761 struct hpp_sort_entry *hse;
2762 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
2763
2764 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2765 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
2766 return sort_fn(a, b);
2767}
2768
2769bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
2770{
2771 return format->header == __sort__hpp_header;
2772}
2773
2774#define MK_SORT_ENTRY_CHK(key) \
2775bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
2776{ \
2777 struct hpp_sort_entry *hse; \
2778 \
2779 if (!perf_hpp__is_sort_entry(fmt)) \
2780 return false; \
2781 \
2782 hse = container_of(fmt, struct hpp_sort_entry, hpp); \
2783 return hse->se == &sort_ ## key ; \
2784}
2785
2786#ifdef HAVE_LIBTRACEEVENT
2787MK_SORT_ENTRY_CHK(trace)
2788#else
2789bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2790{
2791 return false;
2792}
2793#endif
2794MK_SORT_ENTRY_CHK(srcline)
2795MK_SORT_ENTRY_CHK(srcfile)
2796MK_SORT_ENTRY_CHK(thread)
2797MK_SORT_ENTRY_CHK(comm)
2798MK_SORT_ENTRY_CHK(dso)
2799MK_SORT_ENTRY_CHK(sym)
2800MK_SORT_ENTRY_CHK(parallelism)
2801
2802
2803static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2804{
2805 struct hpp_sort_entry *hse_a;
2806 struct hpp_sort_entry *hse_b;
2807
2808 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
2809 return false;
2810
2811 hse_a = container_of(a, struct hpp_sort_entry, hpp);
2812 hse_b = container_of(b, struct hpp_sort_entry, hpp);
2813
2814 return hse_a->se == hse_b->se;
2815}
2816
2817static void hse_free(struct perf_hpp_fmt *fmt)
2818{
2819 struct hpp_sort_entry *hse;
2820
2821 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2822 free(hse);
2823}
2824
2825static void hse_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
2826{
2827 struct hpp_sort_entry *hse;
2828
2829 if (!perf_hpp__is_sort_entry(fmt))
2830 return;
2831
2832 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2833
2834 if (hse->se->se_init)
2835 hse->se->se_init(he);
2836}
2837
2838static struct hpp_sort_entry *
2839__sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
2840{
2841 struct hpp_sort_entry *hse;
2842
2843 hse = malloc(sizeof(*hse));
2844 if (hse == NULL) {
2845 pr_err("Memory allocation failed\n");
2846 return NULL;
2847 }
2848
2849 hse->se = sd->entry;
2850 hse->hpp.name = sd->entry->se_header;
2851 hse->hpp.header = __sort__hpp_header;
2852 hse->hpp.width = __sort__hpp_width;
2853 hse->hpp.entry = __sort__hpp_entry;
2854 hse->hpp.color = NULL;
2855
2856 hse->hpp.cmp = __sort__hpp_cmp;
2857 hse->hpp.collapse = __sort__hpp_collapse;
2858 hse->hpp.sort = __sort__hpp_sort;
2859 hse->hpp.equal = __sort__hpp_equal;
2860 hse->hpp.free = hse_free;
2861 hse->hpp.init = hse_init;
2862
2863 INIT_LIST_HEAD(&hse->hpp.list);
2864 INIT_LIST_HEAD(&hse->hpp.sort_list);
2865 hse->hpp.elide = false;
2866 hse->hpp.len = 0;
2867 hse->hpp.user_len = 0;
2868 hse->hpp.level = level;
2869
2870 return hse;
2871}
2872
2873static void hpp_free(struct perf_hpp_fmt *fmt)
2874{
2875 free(fmt);
2876}
2877
2878static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
2879 int level)
2880{
2881 struct perf_hpp_fmt *fmt;
2882
2883 fmt = memdup(hd->fmt, sizeof(*fmt));
2884 if (fmt) {
2885 INIT_LIST_HEAD(&fmt->list);
2886 INIT_LIST_HEAD(&fmt->sort_list);
2887 fmt->free = hpp_free;
2888 fmt->level = level;
2889 }
2890
2891 return fmt;
2892}
2893
2894int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
2895{
2896 struct perf_hpp_fmt *fmt;
2897 struct hpp_sort_entry *hse;
2898 int ret = -1;
2899 int r;
2900
2901 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
2902 if (!perf_hpp__is_sort_entry(fmt))
2903 continue;
2904
2905 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2906 if (hse->se->se_filter == NULL)
2907 continue;
2908
2909 /*
2910 * hist entry is filtered if any of sort key in the hpp list
2911 * is applied. But it should skip non-matched filter types.
2912 */
2913 r = hse->se->se_filter(he, type, arg);
2914 if (r >= 0) {
2915 if (ret < 0)
2916 ret = 0;
2917 ret |= r;
2918 }
2919 }
2920
2921 return ret;
2922}
2923
2924static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
2925 struct perf_hpp_list *list,
2926 int level)
2927{
2928 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
2929
2930 if (hse == NULL)
2931 return -1;
2932
2933 perf_hpp_list__register_sort_field(list, &hse->hpp);
2934 return 0;
2935}
2936
2937static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
2938 struct perf_hpp_list *list,
2939 int level)
2940{
2941 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
2942
2943 if (hse == NULL)
2944 return -1;
2945
2946 perf_hpp_list__column_register(list, &hse->hpp);
2947 return 0;
2948}
2949
2950#ifndef HAVE_LIBTRACEEVENT
2951bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2952{
2953 return false;
2954}
2955bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused,
2956 struct hists *hists __maybe_unused)
2957{
2958 return false;
2959}
2960#else
2961struct hpp_dynamic_entry {
2962 struct perf_hpp_fmt hpp;
2963 struct evsel *evsel;
2964 struct tep_format_field *field;
2965 unsigned dynamic_len;
2966 bool raw_trace;
2967};
2968
2969static int hde_width(struct hpp_dynamic_entry *hde)
2970{
2971 if (!hde->hpp.len) {
2972 int len = hde->dynamic_len;
2973 int namelen = strlen(hde->field->name);
2974 int fieldlen = hde->field->size;
2975
2976 if (namelen > len)
2977 len = namelen;
2978
2979 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
2980 /* length for print hex numbers */
2981 fieldlen = hde->field->size * 2 + 2;
2982 }
2983 if (fieldlen > len)
2984 len = fieldlen;
2985
2986 hde->hpp.len = len;
2987 }
2988 return hde->hpp.len;
2989}
2990
2991static void update_dynamic_len(struct hpp_dynamic_entry *hde,
2992 struct hist_entry *he)
2993{
2994 char *str, *pos;
2995 struct tep_format_field *field = hde->field;
2996 size_t namelen;
2997 bool last = false;
2998
2999 if (hde->raw_trace)
3000 return;
3001
3002 /* parse pretty print result and update max length */
3003 if (!he->trace_output)
3004 he->trace_output = get_trace_output(he);
3005
3006 namelen = strlen(field->name);
3007 str = he->trace_output;
3008
3009 while (str) {
3010 pos = strchr(str, ' ');
3011 if (pos == NULL) {
3012 last = true;
3013 pos = str + strlen(str);
3014 }
3015
3016 if (!strncmp(str, field->name, namelen)) {
3017 size_t len;
3018
3019 str += namelen + 1;
3020 len = pos - str;
3021
3022 if (len > hde->dynamic_len)
3023 hde->dynamic_len = len;
3024 break;
3025 }
3026
3027 if (last)
3028 str = NULL;
3029 else
3030 str = pos + 1;
3031 }
3032}
3033
3034static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
3035 struct hists *hists __maybe_unused,
3036 int line __maybe_unused,
3037 int *span __maybe_unused)
3038{
3039 struct hpp_dynamic_entry *hde;
3040 size_t len = fmt->user_len;
3041
3042 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3043
3044 if (!len)
3045 len = hde_width(hde);
3046
3047 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
3048}
3049
3050static int __sort__hde_width(struct perf_hpp_fmt *fmt,
3051 struct perf_hpp *hpp __maybe_unused,
3052 struct hists *hists __maybe_unused)
3053{
3054 struct hpp_dynamic_entry *hde;
3055 size_t len = fmt->user_len;
3056
3057 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3058
3059 if (!len)
3060 len = hde_width(hde);
3061
3062 return len;
3063}
3064
3065bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
3066{
3067 struct hpp_dynamic_entry *hde;
3068
3069 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3070
3071 return hists_to_evsel(hists) == hde->evsel;
3072}
3073
3074static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
3075 struct hist_entry *he)
3076{
3077 struct hpp_dynamic_entry *hde;
3078 size_t len = fmt->user_len;
3079 char *str, *pos;
3080 struct tep_format_field *field;
3081 size_t namelen;
3082 bool last = false;
3083 int ret;
3084
3085 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3086
3087 if (!len)
3088 len = hde_width(hde);
3089
3090 if (hde->raw_trace)
3091 goto raw_field;
3092
3093 if (!he->trace_output)
3094 he->trace_output = get_trace_output(he);
3095
3096 field = hde->field;
3097 namelen = strlen(field->name);
3098 str = he->trace_output;
3099
3100 while (str) {
3101 pos = strchr(str, ' ');
3102 if (pos == NULL) {
3103 last = true;
3104 pos = str + strlen(str);
3105 }
3106
3107 if (!strncmp(str, field->name, namelen)) {
3108 str += namelen + 1;
3109 str = strndup(str, pos - str);
3110
3111 if (str == NULL)
3112 return scnprintf(hpp->buf, hpp->size,
3113 "%*.*s", len, len, "ERROR");
3114 break;
3115 }
3116
3117 if (last)
3118 str = NULL;
3119 else
3120 str = pos + 1;
3121 }
3122
3123 if (str == NULL) {
3124 struct trace_seq seq;
3125raw_field:
3126 trace_seq_init(&seq);
3127 tep_print_field(&seq, he->raw_data, hde->field);
3128 str = seq.buffer;
3129 }
3130
3131 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
3132 free(str);
3133 return ret;
3134}
3135
3136static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
3137 struct hist_entry *a, struct hist_entry *b)
3138{
3139 struct hpp_dynamic_entry *hde;
3140 struct tep_format_field *field;
3141 unsigned offset, size;
3142
3143 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3144
3145 field = hde->field;
3146 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
3147 unsigned long long dyn;
3148
3149 tep_read_number_field(field, a->raw_data, &dyn);
3150 offset = dyn & 0xffff;
3151 size = (dyn >> 16) & 0xffff;
3152 if (tep_field_is_relative(field->flags))
3153 offset += field->offset + field->size;
3154 /* record max width for output */
3155 if (size > hde->dynamic_len)
3156 hde->dynamic_len = size;
3157 } else {
3158 offset = field->offset;
3159 size = field->size;
3160 }
3161
3162 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
3163}
3164
3165bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
3166{
3167 return fmt->cmp == __sort__hde_cmp;
3168}
3169
3170static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
3171{
3172 struct hpp_dynamic_entry *hde_a;
3173 struct hpp_dynamic_entry *hde_b;
3174
3175 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
3176 return false;
3177
3178 hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
3179 hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
3180
3181 return hde_a->field == hde_b->field;
3182}
3183
3184static void hde_free(struct perf_hpp_fmt *fmt)
3185{
3186 struct hpp_dynamic_entry *hde;
3187
3188 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3189 free(hde);
3190}
3191
3192static void __sort__hde_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
3193{
3194 struct hpp_dynamic_entry *hde;
3195
3196 if (!perf_hpp__is_dynamic_entry(fmt))
3197 return;
3198
3199 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3200 update_dynamic_len(hde, he);
3201}
3202
3203static struct hpp_dynamic_entry *
3204__alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field,
3205 int level)
3206{
3207 struct hpp_dynamic_entry *hde;
3208
3209 hde = malloc(sizeof(*hde));
3210 if (hde == NULL) {
3211 pr_debug("Memory allocation failed\n");
3212 return NULL;
3213 }
3214
3215 hde->evsel = evsel;
3216 hde->field = field;
3217 hde->dynamic_len = 0;
3218
3219 hde->hpp.name = field->name;
3220 hde->hpp.header = __sort__hde_header;
3221 hde->hpp.width = __sort__hde_width;
3222 hde->hpp.entry = __sort__hde_entry;
3223 hde->hpp.color = NULL;
3224
3225 hde->hpp.init = __sort__hde_init;
3226 hde->hpp.cmp = __sort__hde_cmp;
3227 hde->hpp.collapse = __sort__hde_cmp;
3228 hde->hpp.sort = __sort__hde_cmp;
3229 hde->hpp.equal = __sort__hde_equal;
3230 hde->hpp.free = hde_free;
3231
3232 INIT_LIST_HEAD(&hde->hpp.list);
3233 INIT_LIST_HEAD(&hde->hpp.sort_list);
3234 hde->hpp.elide = false;
3235 hde->hpp.len = 0;
3236 hde->hpp.user_len = 0;
3237 hde->hpp.level = level;
3238
3239 return hde;
3240}
3241#endif /* HAVE_LIBTRACEEVENT */
3242
3243struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
3244{
3245 struct perf_hpp_fmt *new_fmt = NULL;
3246
3247 if (perf_hpp__is_sort_entry(fmt)) {
3248 struct hpp_sort_entry *hse, *new_hse;
3249
3250 hse = container_of(fmt, struct hpp_sort_entry, hpp);
3251 new_hse = memdup(hse, sizeof(*hse));
3252 if (new_hse)
3253 new_fmt = &new_hse->hpp;
3254#ifdef HAVE_LIBTRACEEVENT
3255 } else if (perf_hpp__is_dynamic_entry(fmt)) {
3256 struct hpp_dynamic_entry *hde, *new_hde;
3257
3258 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3259 new_hde = memdup(hde, sizeof(*hde));
3260 if (new_hde)
3261 new_fmt = &new_hde->hpp;
3262#endif
3263 } else {
3264 new_fmt = memdup(fmt, sizeof(*fmt));
3265 }
3266
3267 INIT_LIST_HEAD(&new_fmt->list);
3268 INIT_LIST_HEAD(&new_fmt->sort_list);
3269
3270 return new_fmt;
3271}
3272
3273static int parse_field_name(char *str, char **event, char **field, char **opt)
3274{
3275 char *event_name, *field_name, *opt_name;
3276
3277 event_name = str;
3278 field_name = strchr(str, '.');
3279
3280 if (field_name) {
3281 *field_name++ = '\0';
3282 } else {
3283 event_name = NULL;
3284 field_name = str;
3285 }
3286
3287 opt_name = strchr(field_name, '/');
3288 if (opt_name)
3289 *opt_name++ = '\0';
3290
3291 *event = event_name;
3292 *field = field_name;
3293 *opt = opt_name;
3294
3295 return 0;
3296}
3297
3298/* find match evsel using a given event name. The event name can be:
3299 * 1. '%' + event index (e.g. '%1' for first event)
3300 * 2. full event name (e.g. sched:sched_switch)
3301 * 3. partial event name (should not contain ':')
3302 */
3303static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
3304{
3305 struct evsel *evsel = NULL;
3306 struct evsel *pos;
3307 bool full_name;
3308
3309 /* case 1 */
3310 if (event_name[0] == '%') {
3311 int nr = strtol(event_name+1, NULL, 0);
3312
3313 if (nr > evlist->core.nr_entries)
3314 return NULL;
3315
3316 evsel = evlist__first(evlist);
3317 while (--nr > 0)
3318 evsel = evsel__next(evsel);
3319
3320 return evsel;
3321 }
3322
3323 full_name = !!strchr(event_name, ':');
3324 evlist__for_each_entry(evlist, pos) {
3325 /* case 2 */
3326 if (full_name && evsel__name_is(pos, event_name))
3327 return pos;
3328 /* case 3 */
3329 if (!full_name && strstr(pos->name, event_name)) {
3330 if (evsel) {
3331 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
3332 event_name, evsel->name, pos->name);
3333 return NULL;
3334 }
3335 evsel = pos;
3336 }
3337 }
3338
3339 return evsel;
3340}
3341
3342#ifdef HAVE_LIBTRACEEVENT
3343static int __dynamic_dimension__add(struct evsel *evsel,
3344 struct tep_format_field *field,
3345 bool raw_trace, int level)
3346{
3347 struct hpp_dynamic_entry *hde;
3348
3349 hde = __alloc_dynamic_entry(evsel, field, level);
3350 if (hde == NULL)
3351 return -ENOMEM;
3352
3353 hde->raw_trace = raw_trace;
3354
3355 perf_hpp__register_sort_field(&hde->hpp);
3356 return 0;
3357}
3358
3359static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
3360{
3361 int ret;
3362 struct tep_event *tp_format = evsel__tp_format(evsel);
3363 struct tep_format_field *field = tp_format ? tp_format->format.fields : NULL;
3364 while (field) {
3365 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3366 if (ret < 0)
3367 return ret;
3368
3369 field = field->next;
3370 }
3371 return 0;
3372}
3373
3374static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace,
3375 int level)
3376{
3377 int ret;
3378 struct evsel *evsel;
3379
3380 evlist__for_each_entry(evlist, evsel) {
3381 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
3382 continue;
3383
3384 ret = add_evsel_fields(evsel, raw_trace, level);
3385 if (ret < 0)
3386 return ret;
3387 }
3388 return 0;
3389}
3390
3391static int add_all_matching_fields(struct evlist *evlist,
3392 char *field_name, bool raw_trace, int level)
3393{
3394 int ret = -ESRCH;
3395 struct evsel *evsel;
3396
3397 evlist__for_each_entry(evlist, evsel) {
3398 struct tep_event *tp_format;
3399 struct tep_format_field *field;
3400
3401 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
3402 continue;
3403
3404 tp_format = evsel__tp_format(evsel);
3405 if (tp_format == NULL)
3406 continue;
3407
3408 field = tep_find_any_field(tp_format, field_name);
3409 if (field == NULL)
3410 continue;
3411
3412 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3413 if (ret < 0)
3414 break;
3415 }
3416 return ret;
3417}
3418#endif /* HAVE_LIBTRACEEVENT */
3419
3420static int add_dynamic_entry(struct evlist *evlist, const char *tok,
3421 int level)
3422{
3423 char *str, *event_name, *field_name, *opt_name;
3424 struct evsel *evsel;
3425 bool raw_trace = symbol_conf.raw_trace;
3426 int ret = 0;
3427
3428 if (evlist == NULL)
3429 return -ENOENT;
3430
3431 str = strdup(tok);
3432 if (str == NULL)
3433 return -ENOMEM;
3434
3435 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
3436 ret = -EINVAL;
3437 goto out;
3438 }
3439
3440 if (opt_name) {
3441 if (strcmp(opt_name, "raw")) {
3442 pr_debug("unsupported field option %s\n", opt_name);
3443 ret = -EINVAL;
3444 goto out;
3445 }
3446 raw_trace = true;
3447 }
3448
3449#ifdef HAVE_LIBTRACEEVENT
3450 if (!strcmp(field_name, "trace_fields")) {
3451 ret = add_all_dynamic_fields(evlist, raw_trace, level);
3452 goto out;
3453 }
3454
3455 if (event_name == NULL) {
3456 ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
3457 goto out;
3458 }
3459#else
3460 evlist__for_each_entry(evlist, evsel) {
3461 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
3462 pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel));
3463 ret = -ENOTSUP;
3464 }
3465 }
3466
3467 if (ret) {
3468 pr_err("\n");
3469 goto out;
3470 }
3471#endif
3472
3473 evsel = find_evsel(evlist, event_name);
3474 if (evsel == NULL) {
3475 pr_debug("Cannot find event: %s\n", event_name);
3476 ret = -ENOENT;
3477 goto out;
3478 }
3479
3480 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3481 pr_debug("%s is not a tracepoint event\n", event_name);
3482 ret = -EINVAL;
3483 goto out;
3484 }
3485
3486#ifdef HAVE_LIBTRACEEVENT
3487 if (!strcmp(field_name, "*")) {
3488 ret = add_evsel_fields(evsel, raw_trace, level);
3489 } else {
3490 struct tep_event *tp_format = evsel__tp_format(evsel);
3491 struct tep_format_field *field =
3492 tp_format ? tep_find_any_field(tp_format, field_name) : NULL;
3493
3494 if (field == NULL) {
3495 pr_debug("Cannot find event field for %s.%s\n",
3496 event_name, field_name);
3497 return -ENOENT;
3498 }
3499
3500 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3501 }
3502#else
3503 (void)level;
3504 (void)raw_trace;
3505#endif /* HAVE_LIBTRACEEVENT */
3506
3507out:
3508 free(str);
3509 return ret;
3510}
3511
3512static int __sort_dimension__add(struct sort_dimension *sd,
3513 struct perf_hpp_list *list,
3514 int level)
3515{
3516 if (sd->taken)
3517 return 0;
3518
3519 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
3520 return -1;
3521
3522 if (sd->entry->se_collapse)
3523 list->need_collapse = 1;
3524
3525 sd->taken = 1;
3526
3527 return 0;
3528}
3529
3530static int __hpp_dimension__add(struct hpp_dimension *hd,
3531 struct perf_hpp_list *list,
3532 int level)
3533{
3534 struct perf_hpp_fmt *fmt;
3535
3536 if (hd->taken)
3537 return 0;
3538
3539 fmt = __hpp_dimension__alloc_hpp(hd, level);
3540 if (!fmt)
3541 return -1;
3542
3543 hd->taken = 1;
3544 hd->was_taken = 1;
3545 perf_hpp_list__register_sort_field(list, fmt);
3546 return 0;
3547}
3548
3549static int __sort_dimension__add_output(struct perf_hpp_list *list,
3550 struct sort_dimension *sd,
3551 int level)
3552{
3553 if (sd->taken)
3554 return 0;
3555
3556 if (__sort_dimension__add_hpp_output(sd, list, level) < 0)
3557 return -1;
3558
3559 sd->taken = 1;
3560 return 0;
3561}
3562
3563static int __hpp_dimension__add_output(struct perf_hpp_list *list,
3564 struct hpp_dimension *hd,
3565 int level)
3566{
3567 struct perf_hpp_fmt *fmt;
3568
3569 if (hd->taken)
3570 return 0;
3571
3572 fmt = __hpp_dimension__alloc_hpp(hd, level);
3573 if (!fmt)
3574 return -1;
3575
3576 hd->taken = 1;
3577 perf_hpp_list__column_register(list, fmt);
3578 return 0;
3579}
3580
3581int hpp_dimension__add_output(unsigned col, bool implicit)
3582{
3583 struct hpp_dimension *hd;
3584
3585 BUG_ON(col >= PERF_HPP__MAX_INDEX);
3586 hd = &hpp_sort_dimensions[col];
3587 if (implicit && !hd->was_taken)
3588 return 0;
3589 return __hpp_dimension__add_output(&perf_hpp_list, hd, /*level=*/0);
3590}
3591
3592int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
3593 struct evlist *evlist,
3594 int level)
3595{
3596 unsigned int i, j;
3597
3598 /*
3599 * Check to see if there are any arch specific
3600 * sort dimensions not applicable for the current
3601 * architecture. If so, Skip that sort key since
3602 * we don't want to display it in the output fields.
3603 */
3604 for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) {
3605 if (!strcmp(arch_specific_sort_keys[j], tok) &&
3606 !arch_support_sort_key(tok)) {
3607 return 0;
3608 }
3609 }
3610
3611 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3612 struct sort_dimension *sd = &common_sort_dimensions[i];
3613
3614 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3615 continue;
3616
3617 for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) {
3618 if (sd->name && !strcmp(dynamic_headers[j], sd->name))
3619 sort_dimension_add_dynamic_header(sd);
3620 }
3621
3622 if (sd->entry == &sort_parent && parent_pattern) {
3623 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
3624 if (ret) {
3625 char err[BUFSIZ];
3626
3627 regerror(ret, &parent_regex, err, sizeof(err));
3628 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
3629 return -EINVAL;
3630 }
3631 list->parent = 1;
3632 } else if (sd->entry == &sort_sym) {
3633 list->sym = 1;
3634 /*
3635 * perf diff displays the performance difference amongst
3636 * two or more perf.data files. Those files could come
3637 * from different binaries. So we should not compare
3638 * their ips, but the name of symbol.
3639 */
3640 if (sort__mode == SORT_MODE__DIFF)
3641 sd->entry->se_collapse = sort__sym_sort;
3642
3643 } else if (sd->entry == &sort_dso) {
3644 list->dso = 1;
3645 } else if (sd->entry == &sort_socket) {
3646 list->socket = 1;
3647 } else if (sd->entry == &sort_thread) {
3648 list->thread = 1;
3649 } else if (sd->entry == &sort_comm) {
3650 list->comm = 1;
3651 } else if (sd->entry == &sort_type_offset) {
3652 symbol_conf.annotate_data_member = true;
3653 }
3654
3655 return __sort_dimension__add(sd, list, level);
3656 }
3657
3658 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3659 struct sort_dimension *sd = &bstack_sort_dimensions[i];
3660
3661 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3662 continue;
3663
3664 if ((sort__mode != SORT_MODE__BRANCH) &&
3665 strncasecmp(tok, "callchain_branch_predicted",
3666 strlen(tok)) &&
3667 strncasecmp(tok, "callchain_branch_abort",
3668 strlen(tok)) &&
3669 strncasecmp(tok, "callchain_branch_cycles",
3670 strlen(tok)))
3671 return -EINVAL;
3672
3673 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
3674 list->sym = 1;
3675
3676 __sort_dimension__add(sd, list, level);
3677 return 0;
3678 }
3679
3680 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3681 struct sort_dimension *sd = &memory_sort_dimensions[i];
3682
3683 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3684 continue;
3685
3686 if (sort__mode != SORT_MODE__MEMORY)
3687 return -EINVAL;
3688
3689 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
3690 return -EINVAL;
3691
3692 if (sd->entry == &sort_mem_daddr_sym)
3693 list->sym = 1;
3694
3695 __sort_dimension__add(sd, list, level);
3696 return 0;
3697 }
3698
3699 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3700 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3701
3702 if (strncasecmp(tok, hd->name, strlen(tok)))
3703 continue;
3704
3705 return __hpp_dimension__add(hd, list, level);
3706 }
3707
3708 if (!add_dynamic_entry(evlist, tok, level))
3709 return 0;
3710
3711 return -ESRCH;
3712}
3713
3714/* This should match with sort_dimension__add() above */
3715static bool is_hpp_sort_key(const char *key)
3716{
3717 unsigned i;
3718
3719 for (i = 0; i < ARRAY_SIZE(arch_specific_sort_keys); i++) {
3720 if (!strcmp(arch_specific_sort_keys[i], key) &&
3721 !arch_support_sort_key(key)) {
3722 return false;
3723 }
3724 }
3725
3726 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3727 struct sort_dimension *sd = &common_sort_dimensions[i];
3728
3729 if (sd->name && !strncasecmp(key, sd->name, strlen(key)))
3730 return false;
3731 }
3732
3733 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3734 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3735
3736 if (!strncasecmp(key, hd->name, strlen(key)))
3737 return true;
3738 }
3739 return false;
3740}
3741
3742static int setup_sort_list(struct perf_hpp_list *list, char *str,
3743 struct evlist *evlist)
3744{
3745 char *tmp, *tok;
3746 int ret = 0;
3747 int level = 0;
3748 int next_level = 1;
3749 int prev_level = 0;
3750 bool in_group = false;
3751 bool prev_was_hpp = false;
3752
3753 do {
3754 tok = str;
3755 tmp = strpbrk(str, "{}, ");
3756 if (tmp) {
3757 if (in_group)
3758 next_level = level;
3759 else
3760 next_level = level + 1;
3761
3762 if (*tmp == '{')
3763 in_group = true;
3764 else if (*tmp == '}')
3765 in_group = false;
3766
3767 *tmp = '\0';
3768 str = tmp + 1;
3769 }
3770
3771 if (*tok) {
3772 if (is_hpp_sort_key(tok)) {
3773 /* keep output (hpp) sort keys in the same level */
3774 if (prev_was_hpp) {
3775 bool next_same = (level == next_level);
3776
3777 level = prev_level;
3778 next_level = next_same ? level : level+1;
3779 }
3780 prev_was_hpp = true;
3781 } else {
3782 prev_was_hpp = false;
3783 }
3784
3785 ret = sort_dimension__add(list, tok, evlist, level);
3786 if (ret == -EINVAL) {
3787 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
3788 ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
3789 else
3790 ui__error("Invalid --sort key: `%s'", tok);
3791 break;
3792 } else if (ret == -ESRCH) {
3793 ui__error("Unknown --sort key: `%s'", tok);
3794 break;
3795 }
3796 prev_level = level;
3797 }
3798
3799 level = next_level;
3800 } while (tmp);
3801
3802 return ret;
3803}
3804
3805static const char *get_default_sort_order(struct evlist *evlist)
3806{
3807 const char *default_sort_orders[] = {
3808 default_sort_order,
3809 default_branch_sort_order,
3810 default_mem_sort_order,
3811 default_top_sort_order,
3812 default_diff_sort_order,
3813 default_tracepoint_sort_order,
3814 };
3815 bool use_trace = true;
3816 struct evsel *evsel;
3817
3818 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
3819
3820 if (evlist == NULL || evlist__empty(evlist))
3821 goto out_no_evlist;
3822
3823 evlist__for_each_entry(evlist, evsel) {
3824 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3825 use_trace = false;
3826 break;
3827 }
3828 }
3829
3830 if (use_trace) {
3831 sort__mode = SORT_MODE__TRACEPOINT;
3832 if (symbol_conf.raw_trace)
3833 return "trace_fields";
3834 }
3835out_no_evlist:
3836 return default_sort_orders[sort__mode];
3837}
3838
3839static int setup_sort_order(struct evlist *evlist)
3840{
3841 char *new_sort_order;
3842
3843 /*
3844 * Append '+'-prefixed sort order to the default sort
3845 * order string.
3846 */
3847 if (!sort_order || is_strict_order(sort_order))
3848 return 0;
3849
3850 if (sort_order[1] == '\0') {
3851 ui__error("Invalid --sort key: `+'");
3852 return -EINVAL;
3853 }
3854
3855 /*
3856 * We allocate new sort_order string, but we never free it,
3857 * because it's checked over the rest of the code.
3858 */
3859 if (asprintf(&new_sort_order, "%s,%s",
3860 get_default_sort_order(evlist), sort_order + 1) < 0) {
3861 pr_err("Not enough memory to set up --sort");
3862 return -ENOMEM;
3863 }
3864
3865 sort_order = new_sort_order;
3866 return 0;
3867}
3868
3869/*
3870 * Adds 'pre,' prefix into 'str' is 'pre' is
3871 * not already part of 'str'.
3872 */
3873static char *prefix_if_not_in(const char *pre, char *str)
3874{
3875 char *n;
3876
3877 if (!str || strstr(str, pre))
3878 return str;
3879
3880 if (asprintf(&n, "%s,%s", pre, str) < 0)
3881 n = NULL;
3882
3883 free(str);
3884 return n;
3885}
3886
3887static char *setup_overhead(char *keys)
3888{
3889 if (sort__mode == SORT_MODE__DIFF)
3890 return keys;
3891
3892 if (symbol_conf.prefer_latency) {
3893 keys = prefix_if_not_in("overhead", keys);
3894 keys = prefix_if_not_in("latency", keys);
3895 if (symbol_conf.cumulate_callchain) {
3896 keys = prefix_if_not_in("overhead_children", keys);
3897 keys = prefix_if_not_in("latency_children", keys);
3898 }
3899 } else if (!keys || (!strstr(keys, "overhead") &&
3900 !strstr(keys, "latency"))) {
3901 if (symbol_conf.enable_latency)
3902 keys = prefix_if_not_in("latency", keys);
3903 keys = prefix_if_not_in("overhead", keys);
3904 if (symbol_conf.cumulate_callchain) {
3905 if (symbol_conf.enable_latency)
3906 keys = prefix_if_not_in("latency_children", keys);
3907 keys = prefix_if_not_in("overhead_children", keys);
3908 }
3909 }
3910
3911 return keys;
3912}
3913
3914static int __setup_sorting(struct evlist *evlist)
3915{
3916 char *str;
3917 const char *sort_keys;
3918 int ret = 0;
3919
3920 ret = setup_sort_order(evlist);
3921 if (ret)
3922 return ret;
3923
3924 sort_keys = sort_order;
3925 if (sort_keys == NULL) {
3926 if (is_strict_order(field_order)) {
3927 /*
3928 * If user specified field order but no sort order,
3929 * we'll honor it and not add default sort orders.
3930 */
3931 return 0;
3932 }
3933
3934 sort_keys = get_default_sort_order(evlist);
3935 }
3936
3937 str = strdup(sort_keys);
3938 if (str == NULL) {
3939 pr_err("Not enough memory to setup sort keys");
3940 return -ENOMEM;
3941 }
3942
3943 /*
3944 * Prepend overhead fields for backward compatibility.
3945 */
3946 if (!is_strict_order(field_order)) {
3947 str = setup_overhead(str);
3948 if (str == NULL) {
3949 pr_err("Not enough memory to setup overhead keys");
3950 return -ENOMEM;
3951 }
3952 }
3953
3954 ret = setup_sort_list(&perf_hpp_list, str, evlist);
3955
3956 free(str);
3957 return ret;
3958}
3959
3960void perf_hpp__set_elide(int idx, bool elide)
3961{
3962 struct perf_hpp_fmt *fmt;
3963 struct hpp_sort_entry *hse;
3964
3965 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3966 if (!perf_hpp__is_sort_entry(fmt))
3967 continue;
3968
3969 hse = container_of(fmt, struct hpp_sort_entry, hpp);
3970 if (hse->se->se_width_idx == idx) {
3971 fmt->elide = elide;
3972 break;
3973 }
3974 }
3975}
3976
3977static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
3978{
3979 if (list && strlist__nr_entries(list) == 1) {
3980 if (fp != NULL)
3981 fprintf(fp, "# %s: %s\n", list_name,
3982 strlist__entry(list, 0)->s);
3983 return true;
3984 }
3985 return false;
3986}
3987
3988static bool get_elide(int idx, FILE *output)
3989{
3990 switch (idx) {
3991 case HISTC_SYMBOL:
3992 return __get_elide(symbol_conf.sym_list, "symbol", output);
3993 case HISTC_DSO:
3994 return __get_elide(symbol_conf.dso_list, "dso", output);
3995 case HISTC_COMM:
3996 return __get_elide(symbol_conf.comm_list, "comm", output);
3997 default:
3998 break;
3999 }
4000
4001 if (sort__mode != SORT_MODE__BRANCH)
4002 return false;
4003
4004 switch (idx) {
4005 case HISTC_SYMBOL_FROM:
4006 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
4007 case HISTC_SYMBOL_TO:
4008 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
4009 case HISTC_DSO_FROM:
4010 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
4011 case HISTC_DSO_TO:
4012 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
4013 case HISTC_ADDR_FROM:
4014 return __get_elide(symbol_conf.sym_from_list, "addr_from", output);
4015 case HISTC_ADDR_TO:
4016 return __get_elide(symbol_conf.sym_to_list, "addr_to", output);
4017 default:
4018 break;
4019 }
4020
4021 return false;
4022}
4023
4024void sort__setup_elide(FILE *output)
4025{
4026 struct perf_hpp_fmt *fmt;
4027 struct hpp_sort_entry *hse;
4028
4029 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
4030 if (!perf_hpp__is_sort_entry(fmt))
4031 continue;
4032
4033 hse = container_of(fmt, struct hpp_sort_entry, hpp);
4034 fmt->elide = get_elide(hse->se->se_width_idx, output);
4035 }
4036
4037 /*
4038 * It makes no sense to elide all of sort entries.
4039 * Just revert them to show up again.
4040 */
4041 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
4042 if (!perf_hpp__is_sort_entry(fmt))
4043 continue;
4044
4045 if (!fmt->elide)
4046 return;
4047 }
4048
4049 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
4050 if (!perf_hpp__is_sort_entry(fmt))
4051 continue;
4052
4053 fmt->elide = false;
4054 }
4055}
4056
4057int output_field_add(struct perf_hpp_list *list, const char *tok, int *level)
4058{
4059 unsigned int i;
4060
4061 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
4062 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
4063
4064 if (strncasecmp(tok, hd->name, strlen(tok)))
4065 continue;
4066
4067 if (!strcasecmp(tok, "weight"))
4068 ui__warning("--fields weight shows the average value unlike in the --sort key.\n");
4069
4070 if (hd->mem_mode && sort__mode != SORT_MODE__MEMORY)
4071 continue;
4072
4073 return __hpp_dimension__add_output(list, hd, *level);
4074 }
4075
4076 /*
4077 * A non-output field will increase level so that it can be in a
4078 * different hierarchy.
4079 */
4080 (*level)++;
4081
4082 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
4083 struct sort_dimension *sd = &common_sort_dimensions[i];
4084
4085 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
4086 continue;
4087
4088 return __sort_dimension__add_output(list, sd, *level);
4089 }
4090
4091 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
4092 struct sort_dimension *sd = &bstack_sort_dimensions[i];
4093
4094 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
4095 continue;
4096
4097 if (sort__mode != SORT_MODE__BRANCH)
4098 return -EINVAL;
4099
4100 return __sort_dimension__add_output(list, sd, *level);
4101 }
4102
4103 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
4104 struct sort_dimension *sd = &memory_sort_dimensions[i];
4105
4106 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
4107 continue;
4108
4109 if (sort__mode != SORT_MODE__MEMORY)
4110 return -EINVAL;
4111
4112 return __sort_dimension__add_output(list, sd, *level);
4113 }
4114
4115 return -ESRCH;
4116}
4117
4118static int setup_output_list(struct perf_hpp_list *list, char *str)
4119{
4120 char *tmp, *tok;
4121 int ret = 0;
4122 int level = 0;
4123
4124 for (tok = strtok_r(str, ", ", &tmp);
4125 tok; tok = strtok_r(NULL, ", ", &tmp)) {
4126 ret = output_field_add(list, tok, &level);
4127 if (ret == -EINVAL) {
4128 ui__error("Invalid --fields key: `%s'", tok);
4129 break;
4130 } else if (ret == -ESRCH) {
4131 ui__error("Unknown --fields key: `%s'", tok);
4132 break;
4133 }
4134 }
4135
4136 return ret;
4137}
4138
4139void reset_dimensions(void)
4140{
4141 unsigned int i;
4142
4143 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
4144 common_sort_dimensions[i].taken = 0;
4145
4146 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
4147 hpp_sort_dimensions[i].taken = 0;
4148
4149 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
4150 bstack_sort_dimensions[i].taken = 0;
4151
4152 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
4153 memory_sort_dimensions[i].taken = 0;
4154}
4155
4156bool is_strict_order(const char *order)
4157{
4158 return order && (*order != '+');
4159}
4160
4161static int __setup_output_field(void)
4162{
4163 char *str, *strp;
4164 int ret = -EINVAL;
4165
4166 if (field_order == NULL)
4167 return 0;
4168
4169 strp = str = strdup(field_order);
4170 if (str == NULL) {
4171 pr_err("Not enough memory to setup output fields");
4172 return -ENOMEM;
4173 }
4174
4175 if (!is_strict_order(field_order))
4176 strp++;
4177
4178 if (!strlen(strp)) {
4179 ui__error("Invalid --fields key: `+'");
4180 goto out;
4181 }
4182
4183 ret = setup_output_list(&perf_hpp_list, strp);
4184
4185out:
4186 free(str);
4187 return ret;
4188}
4189
4190int setup_sorting(struct evlist *evlist)
4191{
4192 int err;
4193
4194 err = __setup_sorting(evlist);
4195 if (err < 0)
4196 return err;
4197
4198 if (parent_pattern != default_parent_pattern) {
4199 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
4200 if (err < 0)
4201 return err;
4202 }
4203
4204 reset_dimensions();
4205
4206 /*
4207 * perf diff doesn't use default hpp output fields.
4208 */
4209 if (sort__mode != SORT_MODE__DIFF)
4210 perf_hpp__init();
4211
4212 err = __setup_output_field();
4213 if (err < 0)
4214 return err;
4215
4216 err = perf_hpp__alloc_mem_stats(&perf_hpp_list, evlist);
4217 if (err < 0)
4218 return err;
4219
4220 /* copy sort keys to output fields */
4221 perf_hpp__setup_output_field(&perf_hpp_list);
4222 /* and then copy output fields to sort keys */
4223 perf_hpp__append_sort_keys(&perf_hpp_list);
4224
4225 /* setup hists-specific output fields */
4226 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
4227 return -1;
4228
4229 return 0;
4230}
4231
4232void reset_output_field(void)
4233{
4234 perf_hpp_list.need_collapse = 0;
4235 perf_hpp_list.parent = 0;
4236 perf_hpp_list.sym = 0;
4237 perf_hpp_list.dso = 0;
4238
4239 field_order = NULL;
4240 sort_order = NULL;
4241
4242 reset_dimensions();
4243 perf_hpp__reset_output_field(&perf_hpp_list);
4244}
4245
4246#define INDENT (3*8 + 1)
4247
4248static void add_key(struct strbuf *sb, const char *str, int *llen)
4249{
4250 if (!str)
4251 return;
4252
4253 if (*llen >= 75) {
4254 strbuf_addstr(sb, "\n\t\t\t ");
4255 *llen = INDENT;
4256 }
4257 strbuf_addf(sb, " %s", str);
4258 *llen += strlen(str) + 1;
4259}
4260
4261static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
4262 int *llen)
4263{
4264 int i;
4265
4266 for (i = 0; i < n; i++)
4267 add_key(sb, s[i].name, llen);
4268}
4269
4270static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
4271 int *llen)
4272{
4273 int i;
4274
4275 for (i = 0; i < n; i++)
4276 add_key(sb, s[i].name, llen);
4277}
4278
4279char *sort_help(const char *prefix, enum sort_mode mode)
4280{
4281 struct strbuf sb;
4282 char *s;
4283 int len = strlen(prefix) + INDENT;
4284
4285 strbuf_init(&sb, 300);
4286 strbuf_addstr(&sb, prefix);
4287 add_hpp_sort_string(&sb, hpp_sort_dimensions,
4288 ARRAY_SIZE(hpp_sort_dimensions), &len);
4289 add_sort_string(&sb, common_sort_dimensions,
4290 ARRAY_SIZE(common_sort_dimensions), &len);
4291 if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__BRANCH)
4292 add_sort_string(&sb, bstack_sort_dimensions,
4293 ARRAY_SIZE(bstack_sort_dimensions), &len);
4294 if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__MEMORY)
4295 add_sort_string(&sb, memory_sort_dimensions,
4296 ARRAY_SIZE(memory_sort_dimensions), &len);
4297 s = strbuf_detach(&sb, NULL);
4298 strbuf_release(&sb);
4299 return s;
4300}