Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <inttypes.h>
4#include <regex.h>
5#include <stdlib.h>
6#include <linux/mman.h>
7#include <linux/time64.h>
8#include "debug.h"
9#include "dso.h"
10#include "sort.h"
11#include "hist.h"
12#include "cacheline.h"
13#include "comm.h"
14#include "map.h"
15#include "maps.h"
16#include "symbol.h"
17#include "map_symbol.h"
18#include "branch.h"
19#include "thread.h"
20#include "evsel.h"
21#include "evlist.h"
22#include "srcline.h"
23#include "strlist.h"
24#include "strbuf.h"
25#include <traceevent/event-parse.h>
26#include "mem-events.h"
27#include "annotate.h"
28#include "event.h"
29#include "time-utils.h"
30#include "cgroup.h"
31#include "machine.h"
32#include <linux/kernel.h>
33#include <linux/string.h>
34
35regex_t parent_regex;
36const char default_parent_pattern[] = "^sys_|^do_page_fault";
37const char *parent_pattern = default_parent_pattern;
38const char *default_sort_order = "comm,dso,symbol";
39const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
40const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,p_stage_cyc";
41const char default_top_sort_order[] = "dso,symbol";
42const char default_diff_sort_order[] = "dso,symbol";
43const char default_tracepoint_sort_order[] = "trace";
44const char *sort_order;
45const char *field_order;
46regex_t ignore_callees_regex;
47int have_ignore_callees = 0;
48enum sort_mode sort__mode = SORT_MODE__NORMAL;
49const char *dynamic_headers[] = {"local_ins_lat", "p_stage_cyc"};
50const char *arch_specific_sort_keys[] = {"p_stage_cyc"};
51
52/*
53 * Replaces all occurrences of a char used with the:
54 *
55 * -t, --field-separator
56 *
57 * option, that uses a special separator character and don't pad with spaces,
58 * replacing all occurrences of this separator in symbol names (and other
59 * output) with a '.' character, that thus it's the only non valid separator.
60*/
61static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
62{
63 int n;
64 va_list ap;
65
66 va_start(ap, fmt);
67 n = vsnprintf(bf, size, fmt, ap);
68 if (symbol_conf.field_sep && n > 0) {
69 char *sep = bf;
70
71 while (1) {
72 sep = strchr(sep, *symbol_conf.field_sep);
73 if (sep == NULL)
74 break;
75 *sep = '.';
76 }
77 }
78 va_end(ap);
79
80 if (n >= (int)size)
81 return size - 1;
82 return n;
83}
84
85static int64_t cmp_null(const void *l, const void *r)
86{
87 if (!l && !r)
88 return 0;
89 else if (!l)
90 return -1;
91 else
92 return 1;
93}
94
95/* --sort pid */
96
97static int64_t
98sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
99{
100 return right->thread->tid - left->thread->tid;
101}
102
103static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
104 size_t size, unsigned int width)
105{
106 const char *comm = thread__comm_str(he->thread);
107
108 width = max(7U, width) - 8;
109 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid,
110 width, width, comm ?: "");
111}
112
113static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
114{
115 const struct thread *th = arg;
116
117 if (type != HIST_FILTER__THREAD)
118 return -1;
119
120 return th && he->thread != th;
121}
122
123struct sort_entry sort_thread = {
124 .se_header = " Pid:Command",
125 .se_cmp = sort__thread_cmp,
126 .se_snprintf = hist_entry__thread_snprintf,
127 .se_filter = hist_entry__thread_filter,
128 .se_width_idx = HISTC_THREAD,
129};
130
131/* --sort comm */
132
133/*
134 * We can't use pointer comparison in functions below,
135 * because it gives different results based on pointer
136 * values, which could break some sorting assumptions.
137 */
138static int64_t
139sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
140{
141 return strcmp(comm__str(right->comm), comm__str(left->comm));
142}
143
144static int64_t
145sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
146{
147 return strcmp(comm__str(right->comm), comm__str(left->comm));
148}
149
150static int64_t
151sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
152{
153 return strcmp(comm__str(right->comm), comm__str(left->comm));
154}
155
156static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
157 size_t size, unsigned int width)
158{
159 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
160}
161
162struct sort_entry sort_comm = {
163 .se_header = "Command",
164 .se_cmp = sort__comm_cmp,
165 .se_collapse = sort__comm_collapse,
166 .se_sort = sort__comm_sort,
167 .se_snprintf = hist_entry__comm_snprintf,
168 .se_filter = hist_entry__thread_filter,
169 .se_width_idx = HISTC_COMM,
170};
171
172/* --sort dso */
173
174static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
175{
176 struct dso *dso_l = map_l ? map_l->dso : NULL;
177 struct dso *dso_r = map_r ? map_r->dso : NULL;
178 const char *dso_name_l, *dso_name_r;
179
180 if (!dso_l || !dso_r)
181 return cmp_null(dso_r, dso_l);
182
183 if (verbose > 0) {
184 dso_name_l = dso_l->long_name;
185 dso_name_r = dso_r->long_name;
186 } else {
187 dso_name_l = dso_l->short_name;
188 dso_name_r = dso_r->short_name;
189 }
190
191 return strcmp(dso_name_l, dso_name_r);
192}
193
194static int64_t
195sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
196{
197 return _sort__dso_cmp(right->ms.map, left->ms.map);
198}
199
200static int _hist_entry__dso_snprintf(struct map *map, char *bf,
201 size_t size, unsigned int width)
202{
203 if (map && map->dso) {
204 const char *dso_name = verbose > 0 ? map->dso->long_name :
205 map->dso->short_name;
206 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
207 }
208
209 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
210}
211
212static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
213 size_t size, unsigned int width)
214{
215 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
216}
217
218static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
219{
220 const struct dso *dso = arg;
221
222 if (type != HIST_FILTER__DSO)
223 return -1;
224
225 return dso && (!he->ms.map || he->ms.map->dso != dso);
226}
227
228struct sort_entry sort_dso = {
229 .se_header = "Shared Object",
230 .se_cmp = sort__dso_cmp,
231 .se_snprintf = hist_entry__dso_snprintf,
232 .se_filter = hist_entry__dso_filter,
233 .se_width_idx = HISTC_DSO,
234};
235
236/* --sort symbol */
237
238static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
239{
240 return (int64_t)(right_ip - left_ip);
241}
242
243int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
244{
245 if (!sym_l || !sym_r)
246 return cmp_null(sym_l, sym_r);
247
248 if (sym_l == sym_r)
249 return 0;
250
251 if (sym_l->inlined || sym_r->inlined) {
252 int ret = strcmp(sym_l->name, sym_r->name);
253
254 if (ret)
255 return ret;
256 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
257 return 0;
258 }
259
260 if (sym_l->start != sym_r->start)
261 return (int64_t)(sym_r->start - sym_l->start);
262
263 return (int64_t)(sym_r->end - sym_l->end);
264}
265
266static int64_t
267sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
268{
269 int64_t ret;
270
271 if (!left->ms.sym && !right->ms.sym)
272 return _sort__addr_cmp(left->ip, right->ip);
273
274 /*
275 * comparing symbol address alone is not enough since it's a
276 * relative address within a dso.
277 */
278 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
279 ret = sort__dso_cmp(left, right);
280 if (ret != 0)
281 return ret;
282 }
283
284 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
285}
286
287static int64_t
288sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
289{
290 if (!left->ms.sym || !right->ms.sym)
291 return cmp_null(left->ms.sym, right->ms.sym);
292
293 return strcmp(right->ms.sym->name, left->ms.sym->name);
294}
295
296static int _hist_entry__sym_snprintf(struct map_symbol *ms,
297 u64 ip, char level, char *bf, size_t size,
298 unsigned int width)
299{
300 struct symbol *sym = ms->sym;
301 struct map *map = ms->map;
302 size_t ret = 0;
303
304 if (verbose > 0) {
305 char o = map ? dso__symtab_origin(map->dso) : '!';
306 u64 rip = ip;
307
308 if (map && map->dso && map->dso->kernel
309 && map->dso->adjust_symbols)
310 rip = map->unmap_ip(map, ip);
311
312 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
313 BITS_PER_LONG / 4 + 2, rip, o);
314 }
315
316 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
317 if (sym && map) {
318 if (sym->type == STT_OBJECT) {
319 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
320 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
321 ip - map->unmap_ip(map, sym->start));
322 } else {
323 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
324 width - ret,
325 sym->name);
326 if (sym->inlined)
327 ret += repsep_snprintf(bf + ret, size - ret,
328 " (inlined)");
329 }
330 } else {
331 size_t len = BITS_PER_LONG / 4;
332 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
333 len, ip);
334 }
335
336 return ret;
337}
338
339int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
340{
341 return _hist_entry__sym_snprintf(&he->ms, he->ip,
342 he->level, bf, size, width);
343}
344
345static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
346{
347 const char *sym = arg;
348
349 if (type != HIST_FILTER__SYMBOL)
350 return -1;
351
352 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
353}
354
355struct sort_entry sort_sym = {
356 .se_header = "Symbol",
357 .se_cmp = sort__sym_cmp,
358 .se_sort = sort__sym_sort,
359 .se_snprintf = hist_entry__sym_snprintf,
360 .se_filter = hist_entry__sym_filter,
361 .se_width_idx = HISTC_SYMBOL,
362};
363
364/* --sort srcline */
365
366char *hist_entry__srcline(struct hist_entry *he)
367{
368 return map__srcline(he->ms.map, he->ip, he->ms.sym);
369}
370
371static int64_t
372sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
373{
374 if (!left->srcline)
375 left->srcline = hist_entry__srcline(left);
376 if (!right->srcline)
377 right->srcline = hist_entry__srcline(right);
378
379 return strcmp(right->srcline, left->srcline);
380}
381
382static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
383 size_t size, unsigned int width)
384{
385 if (!he->srcline)
386 he->srcline = hist_entry__srcline(he);
387
388 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
389}
390
391struct sort_entry sort_srcline = {
392 .se_header = "Source:Line",
393 .se_cmp = sort__srcline_cmp,
394 .se_snprintf = hist_entry__srcline_snprintf,
395 .se_width_idx = HISTC_SRCLINE,
396};
397
398/* --sort srcline_from */
399
400static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
401{
402 return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym);
403}
404
405static int64_t
406sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
407{
408 if (!left->branch_info->srcline_from)
409 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
410
411 if (!right->branch_info->srcline_from)
412 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
413
414 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
415}
416
417static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
418 size_t size, unsigned int width)
419{
420 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
421}
422
423struct sort_entry sort_srcline_from = {
424 .se_header = "From Source:Line",
425 .se_cmp = sort__srcline_from_cmp,
426 .se_snprintf = hist_entry__srcline_from_snprintf,
427 .se_width_idx = HISTC_SRCLINE_FROM,
428};
429
430/* --sort srcline_to */
431
432static int64_t
433sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
434{
435 if (!left->branch_info->srcline_to)
436 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
437
438 if (!right->branch_info->srcline_to)
439 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
440
441 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
442}
443
444static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
445 size_t size, unsigned int width)
446{
447 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
448}
449
450struct sort_entry sort_srcline_to = {
451 .se_header = "To Source:Line",
452 .se_cmp = sort__srcline_to_cmp,
453 .se_snprintf = hist_entry__srcline_to_snprintf,
454 .se_width_idx = HISTC_SRCLINE_TO,
455};
456
457static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
458 size_t size, unsigned int width)
459{
460
461 struct symbol *sym = he->ms.sym;
462 struct annotation *notes;
463 double ipc = 0.0, coverage = 0.0;
464 char tmp[64];
465
466 if (!sym)
467 return repsep_snprintf(bf, size, "%-*s", width, "-");
468
469 notes = symbol__annotation(sym);
470
471 if (notes->hit_cycles)
472 ipc = notes->hit_insn / ((double)notes->hit_cycles);
473
474 if (notes->total_insn) {
475 coverage = notes->cover_insn * 100.0 /
476 ((double)notes->total_insn);
477 }
478
479 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
480 return repsep_snprintf(bf, size, "%-*s", width, tmp);
481}
482
483struct sort_entry sort_sym_ipc = {
484 .se_header = "IPC [IPC Coverage]",
485 .se_cmp = sort__sym_cmp,
486 .se_snprintf = hist_entry__sym_ipc_snprintf,
487 .se_width_idx = HISTC_SYMBOL_IPC,
488};
489
490static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
491 __maybe_unused,
492 char *bf, size_t size,
493 unsigned int width)
494{
495 char tmp[64];
496
497 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
498 return repsep_snprintf(bf, size, "%-*s", width, tmp);
499}
500
501struct sort_entry sort_sym_ipc_null = {
502 .se_header = "IPC [IPC Coverage]",
503 .se_cmp = sort__sym_cmp,
504 .se_snprintf = hist_entry__sym_ipc_null_snprintf,
505 .se_width_idx = HISTC_SYMBOL_IPC,
506};
507
508/* --sort srcfile */
509
510static char no_srcfile[1];
511
512static char *hist_entry__get_srcfile(struct hist_entry *e)
513{
514 char *sf, *p;
515 struct map *map = e->ms.map;
516
517 if (!map)
518 return no_srcfile;
519
520 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
521 e->ms.sym, false, true, true, e->ip);
522 if (!strcmp(sf, SRCLINE_UNKNOWN))
523 return no_srcfile;
524 p = strchr(sf, ':');
525 if (p && *sf) {
526 *p = 0;
527 return sf;
528 }
529 free(sf);
530 return no_srcfile;
531}
532
533static int64_t
534sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
535{
536 if (!left->srcfile)
537 left->srcfile = hist_entry__get_srcfile(left);
538 if (!right->srcfile)
539 right->srcfile = hist_entry__get_srcfile(right);
540
541 return strcmp(right->srcfile, left->srcfile);
542}
543
544static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
545 size_t size, unsigned int width)
546{
547 if (!he->srcfile)
548 he->srcfile = hist_entry__get_srcfile(he);
549
550 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
551}
552
553struct sort_entry sort_srcfile = {
554 .se_header = "Source File",
555 .se_cmp = sort__srcfile_cmp,
556 .se_snprintf = hist_entry__srcfile_snprintf,
557 .se_width_idx = HISTC_SRCFILE,
558};
559
560/* --sort parent */
561
562static int64_t
563sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
564{
565 struct symbol *sym_l = left->parent;
566 struct symbol *sym_r = right->parent;
567
568 if (!sym_l || !sym_r)
569 return cmp_null(sym_l, sym_r);
570
571 return strcmp(sym_r->name, sym_l->name);
572}
573
574static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
575 size_t size, unsigned int width)
576{
577 return repsep_snprintf(bf, size, "%-*.*s", width, width,
578 he->parent ? he->parent->name : "[other]");
579}
580
581struct sort_entry sort_parent = {
582 .se_header = "Parent symbol",
583 .se_cmp = sort__parent_cmp,
584 .se_snprintf = hist_entry__parent_snprintf,
585 .se_width_idx = HISTC_PARENT,
586};
587
588/* --sort cpu */
589
590static int64_t
591sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
592{
593 return right->cpu - left->cpu;
594}
595
596static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
597 size_t size, unsigned int width)
598{
599 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
600}
601
602struct sort_entry sort_cpu = {
603 .se_header = "CPU",
604 .se_cmp = sort__cpu_cmp,
605 .se_snprintf = hist_entry__cpu_snprintf,
606 .se_width_idx = HISTC_CPU,
607};
608
609/* --sort cgroup_id */
610
611static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
612{
613 return (int64_t)(right_dev - left_dev);
614}
615
616static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
617{
618 return (int64_t)(right_ino - left_ino);
619}
620
621static int64_t
622sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
623{
624 int64_t ret;
625
626 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
627 if (ret != 0)
628 return ret;
629
630 return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
631 left->cgroup_id.ino);
632}
633
634static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
635 char *bf, size_t size,
636 unsigned int width __maybe_unused)
637{
638 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
639 he->cgroup_id.ino);
640}
641
642struct sort_entry sort_cgroup_id = {
643 .se_header = "cgroup id (dev/inode)",
644 .se_cmp = sort__cgroup_id_cmp,
645 .se_snprintf = hist_entry__cgroup_id_snprintf,
646 .se_width_idx = HISTC_CGROUP_ID,
647};
648
649/* --sort cgroup */
650
651static int64_t
652sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right)
653{
654 return right->cgroup - left->cgroup;
655}
656
657static int hist_entry__cgroup_snprintf(struct hist_entry *he,
658 char *bf, size_t size,
659 unsigned int width __maybe_unused)
660{
661 const char *cgrp_name = "N/A";
662
663 if (he->cgroup) {
664 struct cgroup *cgrp = cgroup__find(he->ms.maps->machine->env,
665 he->cgroup);
666 if (cgrp != NULL)
667 cgrp_name = cgrp->name;
668 else
669 cgrp_name = "unknown";
670 }
671
672 return repsep_snprintf(bf, size, "%s", cgrp_name);
673}
674
675struct sort_entry sort_cgroup = {
676 .se_header = "Cgroup",
677 .se_cmp = sort__cgroup_cmp,
678 .se_snprintf = hist_entry__cgroup_snprintf,
679 .se_width_idx = HISTC_CGROUP,
680};
681
682/* --sort socket */
683
684static int64_t
685sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
686{
687 return right->socket - left->socket;
688}
689
690static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
691 size_t size, unsigned int width)
692{
693 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
694}
695
696static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
697{
698 int sk = *(const int *)arg;
699
700 if (type != HIST_FILTER__SOCKET)
701 return -1;
702
703 return sk >= 0 && he->socket != sk;
704}
705
706struct sort_entry sort_socket = {
707 .se_header = "Socket",
708 .se_cmp = sort__socket_cmp,
709 .se_snprintf = hist_entry__socket_snprintf,
710 .se_filter = hist_entry__socket_filter,
711 .se_width_idx = HISTC_SOCKET,
712};
713
714/* --sort time */
715
716static int64_t
717sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
718{
719 return right->time - left->time;
720}
721
722static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
723 size_t size, unsigned int width)
724{
725 char he_time[32];
726
727 if (symbol_conf.nanosecs)
728 timestamp__scnprintf_nsec(he->time, he_time,
729 sizeof(he_time));
730 else
731 timestamp__scnprintf_usec(he->time, he_time,
732 sizeof(he_time));
733
734 return repsep_snprintf(bf, size, "%-.*s", width, he_time);
735}
736
737struct sort_entry sort_time = {
738 .se_header = "Time",
739 .se_cmp = sort__time_cmp,
740 .se_snprintf = hist_entry__time_snprintf,
741 .se_width_idx = HISTC_TIME,
742};
743
744/* --sort trace */
745
746static char *get_trace_output(struct hist_entry *he)
747{
748 struct trace_seq seq;
749 struct evsel *evsel;
750 struct tep_record rec = {
751 .data = he->raw_data,
752 .size = he->raw_size,
753 };
754
755 evsel = hists_to_evsel(he->hists);
756
757 trace_seq_init(&seq);
758 if (symbol_conf.raw_trace) {
759 tep_print_fields(&seq, he->raw_data, he->raw_size,
760 evsel->tp_format);
761 } else {
762 tep_print_event(evsel->tp_format->tep,
763 &seq, &rec, "%s", TEP_PRINT_INFO);
764 }
765 /*
766 * Trim the buffer, it starts at 4KB and we're not going to
767 * add anything more to this buffer.
768 */
769 return realloc(seq.buffer, seq.len + 1);
770}
771
772static int64_t
773sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
774{
775 struct evsel *evsel;
776
777 evsel = hists_to_evsel(left->hists);
778 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
779 return 0;
780
781 if (left->trace_output == NULL)
782 left->trace_output = get_trace_output(left);
783 if (right->trace_output == NULL)
784 right->trace_output = get_trace_output(right);
785
786 return strcmp(right->trace_output, left->trace_output);
787}
788
789static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
790 size_t size, unsigned int width)
791{
792 struct evsel *evsel;
793
794 evsel = hists_to_evsel(he->hists);
795 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
796 return scnprintf(bf, size, "%-.*s", width, "N/A");
797
798 if (he->trace_output == NULL)
799 he->trace_output = get_trace_output(he);
800 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
801}
802
803struct sort_entry sort_trace = {
804 .se_header = "Trace output",
805 .se_cmp = sort__trace_cmp,
806 .se_snprintf = hist_entry__trace_snprintf,
807 .se_width_idx = HISTC_TRACE,
808};
809
810/* sort keys for branch stacks */
811
812static int64_t
813sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
814{
815 if (!left->branch_info || !right->branch_info)
816 return cmp_null(left->branch_info, right->branch_info);
817
818 return _sort__dso_cmp(left->branch_info->from.ms.map,
819 right->branch_info->from.ms.map);
820}
821
822static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
823 size_t size, unsigned int width)
824{
825 if (he->branch_info)
826 return _hist_entry__dso_snprintf(he->branch_info->from.ms.map,
827 bf, size, width);
828 else
829 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
830}
831
832static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
833 const void *arg)
834{
835 const struct dso *dso = arg;
836
837 if (type != HIST_FILTER__DSO)
838 return -1;
839
840 return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
841 he->branch_info->from.ms.map->dso != dso);
842}
843
844static int64_t
845sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
846{
847 if (!left->branch_info || !right->branch_info)
848 return cmp_null(left->branch_info, right->branch_info);
849
850 return _sort__dso_cmp(left->branch_info->to.ms.map,
851 right->branch_info->to.ms.map);
852}
853
854static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
855 size_t size, unsigned int width)
856{
857 if (he->branch_info)
858 return _hist_entry__dso_snprintf(he->branch_info->to.ms.map,
859 bf, size, width);
860 else
861 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
862}
863
864static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
865 const void *arg)
866{
867 const struct dso *dso = arg;
868
869 if (type != HIST_FILTER__DSO)
870 return -1;
871
872 return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
873 he->branch_info->to.ms.map->dso != dso);
874}
875
876static int64_t
877sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
878{
879 struct addr_map_symbol *from_l = &left->branch_info->from;
880 struct addr_map_symbol *from_r = &right->branch_info->from;
881
882 if (!left->branch_info || !right->branch_info)
883 return cmp_null(left->branch_info, right->branch_info);
884
885 from_l = &left->branch_info->from;
886 from_r = &right->branch_info->from;
887
888 if (!from_l->ms.sym && !from_r->ms.sym)
889 return _sort__addr_cmp(from_l->addr, from_r->addr);
890
891 return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym);
892}
893
894static int64_t
895sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
896{
897 struct addr_map_symbol *to_l, *to_r;
898
899 if (!left->branch_info || !right->branch_info)
900 return cmp_null(left->branch_info, right->branch_info);
901
902 to_l = &left->branch_info->to;
903 to_r = &right->branch_info->to;
904
905 if (!to_l->ms.sym && !to_r->ms.sym)
906 return _sort__addr_cmp(to_l->addr, to_r->addr);
907
908 return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym);
909}
910
911static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
912 size_t size, unsigned int width)
913{
914 if (he->branch_info) {
915 struct addr_map_symbol *from = &he->branch_info->from;
916
917 return _hist_entry__sym_snprintf(&from->ms, from->al_addr,
918 he->level, bf, size, width);
919 }
920
921 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
922}
923
924static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
925 size_t size, unsigned int width)
926{
927 if (he->branch_info) {
928 struct addr_map_symbol *to = &he->branch_info->to;
929
930 return _hist_entry__sym_snprintf(&to->ms, to->al_addr,
931 he->level, bf, size, width);
932 }
933
934 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
935}
936
937static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
938 const void *arg)
939{
940 const char *sym = arg;
941
942 if (type != HIST_FILTER__SYMBOL)
943 return -1;
944
945 return sym && !(he->branch_info && he->branch_info->from.ms.sym &&
946 strstr(he->branch_info->from.ms.sym->name, sym));
947}
948
949static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
950 const void *arg)
951{
952 const char *sym = arg;
953
954 if (type != HIST_FILTER__SYMBOL)
955 return -1;
956
957 return sym && !(he->branch_info && he->branch_info->to.ms.sym &&
958 strstr(he->branch_info->to.ms.sym->name, sym));
959}
960
961struct sort_entry sort_dso_from = {
962 .se_header = "Source Shared Object",
963 .se_cmp = sort__dso_from_cmp,
964 .se_snprintf = hist_entry__dso_from_snprintf,
965 .se_filter = hist_entry__dso_from_filter,
966 .se_width_idx = HISTC_DSO_FROM,
967};
968
969struct sort_entry sort_dso_to = {
970 .se_header = "Target Shared Object",
971 .se_cmp = sort__dso_to_cmp,
972 .se_snprintf = hist_entry__dso_to_snprintf,
973 .se_filter = hist_entry__dso_to_filter,
974 .se_width_idx = HISTC_DSO_TO,
975};
976
977struct sort_entry sort_sym_from = {
978 .se_header = "Source Symbol",
979 .se_cmp = sort__sym_from_cmp,
980 .se_snprintf = hist_entry__sym_from_snprintf,
981 .se_filter = hist_entry__sym_from_filter,
982 .se_width_idx = HISTC_SYMBOL_FROM,
983};
984
985struct sort_entry sort_sym_to = {
986 .se_header = "Target Symbol",
987 .se_cmp = sort__sym_to_cmp,
988 .se_snprintf = hist_entry__sym_to_snprintf,
989 .se_filter = hist_entry__sym_to_filter,
990 .se_width_idx = HISTC_SYMBOL_TO,
991};
992
993static int64_t
994sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
995{
996 unsigned char mp, p;
997
998 if (!left->branch_info || !right->branch_info)
999 return cmp_null(left->branch_info, right->branch_info);
1000
1001 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
1002 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
1003 return mp || p;
1004}
1005
1006static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
1007 size_t size, unsigned int width){
1008 static const char *out = "N/A";
1009
1010 if (he->branch_info) {
1011 if (he->branch_info->flags.predicted)
1012 out = "N";
1013 else if (he->branch_info->flags.mispred)
1014 out = "Y";
1015 }
1016
1017 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
1018}
1019
1020static int64_t
1021sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
1022{
1023 if (!left->branch_info || !right->branch_info)
1024 return cmp_null(left->branch_info, right->branch_info);
1025
1026 return left->branch_info->flags.cycles -
1027 right->branch_info->flags.cycles;
1028}
1029
1030static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
1031 size_t size, unsigned int width)
1032{
1033 if (!he->branch_info)
1034 return scnprintf(bf, size, "%-.*s", width, "N/A");
1035 if (he->branch_info->flags.cycles == 0)
1036 return repsep_snprintf(bf, size, "%-*s", width, "-");
1037 return repsep_snprintf(bf, size, "%-*hd", width,
1038 he->branch_info->flags.cycles);
1039}
1040
1041struct sort_entry sort_cycles = {
1042 .se_header = "Basic Block Cycles",
1043 .se_cmp = sort__cycles_cmp,
1044 .se_snprintf = hist_entry__cycles_snprintf,
1045 .se_width_idx = HISTC_CYCLES,
1046};
1047
1048/* --sort daddr_sym */
1049int64_t
1050sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1051{
1052 uint64_t l = 0, r = 0;
1053
1054 if (left->mem_info)
1055 l = left->mem_info->daddr.addr;
1056 if (right->mem_info)
1057 r = right->mem_info->daddr.addr;
1058
1059 return (int64_t)(r - l);
1060}
1061
1062static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
1063 size_t size, unsigned int width)
1064{
1065 uint64_t addr = 0;
1066 struct map_symbol *ms = NULL;
1067
1068 if (he->mem_info) {
1069 addr = he->mem_info->daddr.addr;
1070 ms = &he->mem_info->daddr.ms;
1071 }
1072 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1073}
1074
1075int64_t
1076sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
1077{
1078 uint64_t l = 0, r = 0;
1079
1080 if (left->mem_info)
1081 l = left->mem_info->iaddr.addr;
1082 if (right->mem_info)
1083 r = right->mem_info->iaddr.addr;
1084
1085 return (int64_t)(r - l);
1086}
1087
1088static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
1089 size_t size, unsigned int width)
1090{
1091 uint64_t addr = 0;
1092 struct map_symbol *ms = NULL;
1093
1094 if (he->mem_info) {
1095 addr = he->mem_info->iaddr.addr;
1096 ms = &he->mem_info->iaddr.ms;
1097 }
1098 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1099}
1100
1101static int64_t
1102sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1103{
1104 struct map *map_l = NULL;
1105 struct map *map_r = NULL;
1106
1107 if (left->mem_info)
1108 map_l = left->mem_info->daddr.ms.map;
1109 if (right->mem_info)
1110 map_r = right->mem_info->daddr.ms.map;
1111
1112 return _sort__dso_cmp(map_l, map_r);
1113}
1114
1115static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
1116 size_t size, unsigned int width)
1117{
1118 struct map *map = NULL;
1119
1120 if (he->mem_info)
1121 map = he->mem_info->daddr.ms.map;
1122
1123 return _hist_entry__dso_snprintf(map, bf, size, width);
1124}
1125
1126static int64_t
1127sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
1128{
1129 union perf_mem_data_src data_src_l;
1130 union perf_mem_data_src data_src_r;
1131
1132 if (left->mem_info)
1133 data_src_l = left->mem_info->data_src;
1134 else
1135 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1136
1137 if (right->mem_info)
1138 data_src_r = right->mem_info->data_src;
1139 else
1140 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1141
1142 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1143}
1144
1145static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1146 size_t size, unsigned int width)
1147{
1148 char out[10];
1149
1150 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1151 return repsep_snprintf(bf, size, "%.*s", width, out);
1152}
1153
1154static int64_t
1155sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1156{
1157 union perf_mem_data_src data_src_l;
1158 union perf_mem_data_src data_src_r;
1159
1160 if (left->mem_info)
1161 data_src_l = left->mem_info->data_src;
1162 else
1163 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1164
1165 if (right->mem_info)
1166 data_src_r = right->mem_info->data_src;
1167 else
1168 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1169
1170 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1171}
1172
1173static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1174 size_t size, unsigned int width)
1175{
1176 char out[64];
1177
1178 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1179 return repsep_snprintf(bf, size, "%-*s", width, out);
1180}
1181
1182static int64_t
1183sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1184{
1185 union perf_mem_data_src data_src_l;
1186 union perf_mem_data_src data_src_r;
1187
1188 if (left->mem_info)
1189 data_src_l = left->mem_info->data_src;
1190 else
1191 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1192
1193 if (right->mem_info)
1194 data_src_r = right->mem_info->data_src;
1195 else
1196 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1197
1198 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1199}
1200
1201static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1202 size_t size, unsigned int width)
1203{
1204 char out[64];
1205
1206 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1207 return repsep_snprintf(bf, size, "%-*s", width, out);
1208}
1209
1210static int64_t
1211sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1212{
1213 union perf_mem_data_src data_src_l;
1214 union perf_mem_data_src data_src_r;
1215
1216 if (left->mem_info)
1217 data_src_l = left->mem_info->data_src;
1218 else
1219 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1220
1221 if (right->mem_info)
1222 data_src_r = right->mem_info->data_src;
1223 else
1224 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1225
1226 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1227}
1228
1229static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1230 size_t size, unsigned int width)
1231{
1232 char out[64];
1233
1234 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1235 return repsep_snprintf(bf, size, "%-*s", width, out);
1236}
1237
1238int64_t
1239sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1240{
1241 u64 l, r;
1242 struct map *l_map, *r_map;
1243 int rc;
1244
1245 if (!left->mem_info) return -1;
1246 if (!right->mem_info) return 1;
1247
1248 /* group event types together */
1249 if (left->cpumode > right->cpumode) return -1;
1250 if (left->cpumode < right->cpumode) return 1;
1251
1252 l_map = left->mem_info->daddr.ms.map;
1253 r_map = right->mem_info->daddr.ms.map;
1254
1255 /* if both are NULL, jump to sort on al_addr instead */
1256 if (!l_map && !r_map)
1257 goto addr;
1258
1259 if (!l_map) return -1;
1260 if (!r_map) return 1;
1261
1262 rc = dso__cmp_id(l_map->dso, r_map->dso);
1263 if (rc)
1264 return rc;
1265 /*
1266 * Addresses with no major/minor numbers are assumed to be
1267 * anonymous in userspace. Sort those on pid then address.
1268 *
1269 * The kernel and non-zero major/minor mapped areas are
1270 * assumed to be unity mapped. Sort those on address.
1271 */
1272
1273 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1274 (!(l_map->flags & MAP_SHARED)) &&
1275 !l_map->dso->id.maj && !l_map->dso->id.min &&
1276 !l_map->dso->id.ino && !l_map->dso->id.ino_generation) {
1277 /* userspace anonymous */
1278
1279 if (left->thread->pid_ > right->thread->pid_) return -1;
1280 if (left->thread->pid_ < right->thread->pid_) return 1;
1281 }
1282
1283addr:
1284 /* al_addr does all the right addr - start + offset calculations */
1285 l = cl_address(left->mem_info->daddr.al_addr);
1286 r = cl_address(right->mem_info->daddr.al_addr);
1287
1288 if (l > r) return -1;
1289 if (l < r) return 1;
1290
1291 return 0;
1292}
1293
1294static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1295 size_t size, unsigned int width)
1296{
1297
1298 uint64_t addr = 0;
1299 struct map_symbol *ms = NULL;
1300 char level = he->level;
1301
1302 if (he->mem_info) {
1303 struct map *map = he->mem_info->daddr.ms.map;
1304
1305 addr = cl_address(he->mem_info->daddr.al_addr);
1306 ms = &he->mem_info->daddr.ms;
1307
1308 /* print [s] for shared data mmaps */
1309 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1310 map && !(map->prot & PROT_EXEC) &&
1311 (map->flags & MAP_SHARED) &&
1312 (map->dso->id.maj || map->dso->id.min ||
1313 map->dso->id.ino || map->dso->id.ino_generation))
1314 level = 's';
1315 else if (!map)
1316 level = 'X';
1317 }
1318 return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width);
1319}
1320
1321struct sort_entry sort_mispredict = {
1322 .se_header = "Branch Mispredicted",
1323 .se_cmp = sort__mispredict_cmp,
1324 .se_snprintf = hist_entry__mispredict_snprintf,
1325 .se_width_idx = HISTC_MISPREDICT,
1326};
1327
1328static int64_t
1329sort__weight_cmp(struct hist_entry *left, struct hist_entry *right)
1330{
1331 return left->weight - right->weight;
1332}
1333
1334static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1335 size_t size, unsigned int width)
1336{
1337 return repsep_snprintf(bf, size, "%-*llu", width, he->weight);
1338}
1339
1340struct sort_entry sort_local_weight = {
1341 .se_header = "Local Weight",
1342 .se_cmp = sort__weight_cmp,
1343 .se_snprintf = hist_entry__local_weight_snprintf,
1344 .se_width_idx = HISTC_LOCAL_WEIGHT,
1345};
1346
1347static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1348 size_t size, unsigned int width)
1349{
1350 return repsep_snprintf(bf, size, "%-*llu", width,
1351 he->weight * he->stat.nr_events);
1352}
1353
1354struct sort_entry sort_global_weight = {
1355 .se_header = "Weight",
1356 .se_cmp = sort__weight_cmp,
1357 .se_snprintf = hist_entry__global_weight_snprintf,
1358 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1359};
1360
1361static int64_t
1362sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
1363{
1364 return left->ins_lat - right->ins_lat;
1365}
1366
1367static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf,
1368 size_t size, unsigned int width)
1369{
1370 return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat);
1371}
1372
1373struct sort_entry sort_local_ins_lat = {
1374 .se_header = "Local INSTR Latency",
1375 .se_cmp = sort__ins_lat_cmp,
1376 .se_snprintf = hist_entry__local_ins_lat_snprintf,
1377 .se_width_idx = HISTC_LOCAL_INS_LAT,
1378};
1379
1380static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf,
1381 size_t size, unsigned int width)
1382{
1383 return repsep_snprintf(bf, size, "%-*u", width,
1384 he->ins_lat * he->stat.nr_events);
1385}
1386
1387struct sort_entry sort_global_ins_lat = {
1388 .se_header = "INSTR Latency",
1389 .se_cmp = sort__ins_lat_cmp,
1390 .se_snprintf = hist_entry__global_ins_lat_snprintf,
1391 .se_width_idx = HISTC_GLOBAL_INS_LAT,
1392};
1393
1394static int64_t
1395sort__global_p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
1396{
1397 return left->p_stage_cyc - right->p_stage_cyc;
1398}
1399
1400static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1401 size_t size, unsigned int width)
1402{
1403 return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc);
1404}
1405
1406struct sort_entry sort_p_stage_cyc = {
1407 .se_header = "Pipeline Stage Cycle",
1408 .se_cmp = sort__global_p_stage_cyc_cmp,
1409 .se_snprintf = hist_entry__p_stage_cyc_snprintf,
1410 .se_width_idx = HISTC_P_STAGE_CYC,
1411};
1412
1413struct sort_entry sort_mem_daddr_sym = {
1414 .se_header = "Data Symbol",
1415 .se_cmp = sort__daddr_cmp,
1416 .se_snprintf = hist_entry__daddr_snprintf,
1417 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1418};
1419
1420struct sort_entry sort_mem_iaddr_sym = {
1421 .se_header = "Code Symbol",
1422 .se_cmp = sort__iaddr_cmp,
1423 .se_snprintf = hist_entry__iaddr_snprintf,
1424 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1425};
1426
1427struct sort_entry sort_mem_daddr_dso = {
1428 .se_header = "Data Object",
1429 .se_cmp = sort__dso_daddr_cmp,
1430 .se_snprintf = hist_entry__dso_daddr_snprintf,
1431 .se_width_idx = HISTC_MEM_DADDR_DSO,
1432};
1433
1434struct sort_entry sort_mem_locked = {
1435 .se_header = "Locked",
1436 .se_cmp = sort__locked_cmp,
1437 .se_snprintf = hist_entry__locked_snprintf,
1438 .se_width_idx = HISTC_MEM_LOCKED,
1439};
1440
1441struct sort_entry sort_mem_tlb = {
1442 .se_header = "TLB access",
1443 .se_cmp = sort__tlb_cmp,
1444 .se_snprintf = hist_entry__tlb_snprintf,
1445 .se_width_idx = HISTC_MEM_TLB,
1446};
1447
1448struct sort_entry sort_mem_lvl = {
1449 .se_header = "Memory access",
1450 .se_cmp = sort__lvl_cmp,
1451 .se_snprintf = hist_entry__lvl_snprintf,
1452 .se_width_idx = HISTC_MEM_LVL,
1453};
1454
1455struct sort_entry sort_mem_snoop = {
1456 .se_header = "Snoop",
1457 .se_cmp = sort__snoop_cmp,
1458 .se_snprintf = hist_entry__snoop_snprintf,
1459 .se_width_idx = HISTC_MEM_SNOOP,
1460};
1461
1462struct sort_entry sort_mem_dcacheline = {
1463 .se_header = "Data Cacheline",
1464 .se_cmp = sort__dcacheline_cmp,
1465 .se_snprintf = hist_entry__dcacheline_snprintf,
1466 .se_width_idx = HISTC_MEM_DCACHELINE,
1467};
1468
1469static int64_t
1470sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right)
1471{
1472 union perf_mem_data_src data_src_l;
1473 union perf_mem_data_src data_src_r;
1474
1475 if (left->mem_info)
1476 data_src_l = left->mem_info->data_src;
1477 else
1478 data_src_l.mem_blk = PERF_MEM_BLK_NA;
1479
1480 if (right->mem_info)
1481 data_src_r = right->mem_info->data_src;
1482 else
1483 data_src_r.mem_blk = PERF_MEM_BLK_NA;
1484
1485 return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk);
1486}
1487
1488static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf,
1489 size_t size, unsigned int width)
1490{
1491 char out[16];
1492
1493 perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info);
1494 return repsep_snprintf(bf, size, "%.*s", width, out);
1495}
1496
1497struct sort_entry sort_mem_blocked = {
1498 .se_header = "Blocked",
1499 .se_cmp = sort__blocked_cmp,
1500 .se_snprintf = hist_entry__blocked_snprintf,
1501 .se_width_idx = HISTC_MEM_BLOCKED,
1502};
1503
1504static int64_t
1505sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1506{
1507 uint64_t l = 0, r = 0;
1508
1509 if (left->mem_info)
1510 l = left->mem_info->daddr.phys_addr;
1511 if (right->mem_info)
1512 r = right->mem_info->daddr.phys_addr;
1513
1514 return (int64_t)(r - l);
1515}
1516
1517static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
1518 size_t size, unsigned int width)
1519{
1520 uint64_t addr = 0;
1521 size_t ret = 0;
1522 size_t len = BITS_PER_LONG / 4;
1523
1524 addr = he->mem_info->daddr.phys_addr;
1525
1526 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
1527
1528 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
1529
1530 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
1531
1532 if (ret > width)
1533 bf[width] = '\0';
1534
1535 return width;
1536}
1537
1538struct sort_entry sort_mem_phys_daddr = {
1539 .se_header = "Data Physical Address",
1540 .se_cmp = sort__phys_daddr_cmp,
1541 .se_snprintf = hist_entry__phys_daddr_snprintf,
1542 .se_width_idx = HISTC_MEM_PHYS_DADDR,
1543};
1544
1545static int64_t
1546sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1547{
1548 uint64_t l = 0, r = 0;
1549
1550 if (left->mem_info)
1551 l = left->mem_info->daddr.data_page_size;
1552 if (right->mem_info)
1553 r = right->mem_info->daddr.data_page_size;
1554
1555 return (int64_t)(r - l);
1556}
1557
1558static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf,
1559 size_t size, unsigned int width)
1560{
1561 char str[PAGE_SIZE_NAME_LEN];
1562
1563 return repsep_snprintf(bf, size, "%-*s", width,
1564 get_page_size_name(he->mem_info->daddr.data_page_size, str));
1565}
1566
1567struct sort_entry sort_mem_data_page_size = {
1568 .se_header = "Data Page Size",
1569 .se_cmp = sort__data_page_size_cmp,
1570 .se_snprintf = hist_entry__data_page_size_snprintf,
1571 .se_width_idx = HISTC_MEM_DATA_PAGE_SIZE,
1572};
1573
1574static int64_t
1575sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1576{
1577 uint64_t l = left->code_page_size;
1578 uint64_t r = right->code_page_size;
1579
1580 return (int64_t)(r - l);
1581}
1582
1583static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf,
1584 size_t size, unsigned int width)
1585{
1586 char str[PAGE_SIZE_NAME_LEN];
1587
1588 return repsep_snprintf(bf, size, "%-*s", width,
1589 get_page_size_name(he->code_page_size, str));
1590}
1591
1592struct sort_entry sort_code_page_size = {
1593 .se_header = "Code Page Size",
1594 .se_cmp = sort__code_page_size_cmp,
1595 .se_snprintf = hist_entry__code_page_size_snprintf,
1596 .se_width_idx = HISTC_CODE_PAGE_SIZE,
1597};
1598
1599static int64_t
1600sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1601{
1602 if (!left->branch_info || !right->branch_info)
1603 return cmp_null(left->branch_info, right->branch_info);
1604
1605 return left->branch_info->flags.abort !=
1606 right->branch_info->flags.abort;
1607}
1608
1609static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1610 size_t size, unsigned int width)
1611{
1612 static const char *out = "N/A";
1613
1614 if (he->branch_info) {
1615 if (he->branch_info->flags.abort)
1616 out = "A";
1617 else
1618 out = ".";
1619 }
1620
1621 return repsep_snprintf(bf, size, "%-*s", width, out);
1622}
1623
1624struct sort_entry sort_abort = {
1625 .se_header = "Transaction abort",
1626 .se_cmp = sort__abort_cmp,
1627 .se_snprintf = hist_entry__abort_snprintf,
1628 .se_width_idx = HISTC_ABORT,
1629};
1630
1631static int64_t
1632sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1633{
1634 if (!left->branch_info || !right->branch_info)
1635 return cmp_null(left->branch_info, right->branch_info);
1636
1637 return left->branch_info->flags.in_tx !=
1638 right->branch_info->flags.in_tx;
1639}
1640
1641static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1642 size_t size, unsigned int width)
1643{
1644 static const char *out = "N/A";
1645
1646 if (he->branch_info) {
1647 if (he->branch_info->flags.in_tx)
1648 out = "T";
1649 else
1650 out = ".";
1651 }
1652
1653 return repsep_snprintf(bf, size, "%-*s", width, out);
1654}
1655
1656struct sort_entry sort_in_tx = {
1657 .se_header = "Branch in transaction",
1658 .se_cmp = sort__in_tx_cmp,
1659 .se_snprintf = hist_entry__in_tx_snprintf,
1660 .se_width_idx = HISTC_IN_TX,
1661};
1662
1663static int64_t
1664sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1665{
1666 return left->transaction - right->transaction;
1667}
1668
1669static inline char *add_str(char *p, const char *str)
1670{
1671 strcpy(p, str);
1672 return p + strlen(str);
1673}
1674
1675static struct txbit {
1676 unsigned flag;
1677 const char *name;
1678 int skip_for_len;
1679} txbits[] = {
1680 { PERF_TXN_ELISION, "EL ", 0 },
1681 { PERF_TXN_TRANSACTION, "TX ", 1 },
1682 { PERF_TXN_SYNC, "SYNC ", 1 },
1683 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1684 { PERF_TXN_RETRY, "RETRY ", 0 },
1685 { PERF_TXN_CONFLICT, "CON ", 0 },
1686 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1687 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1688 { 0, NULL, 0 }
1689};
1690
1691int hist_entry__transaction_len(void)
1692{
1693 int i;
1694 int len = 0;
1695
1696 for (i = 0; txbits[i].name; i++) {
1697 if (!txbits[i].skip_for_len)
1698 len += strlen(txbits[i].name);
1699 }
1700 len += 4; /* :XX<space> */
1701 return len;
1702}
1703
1704static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1705 size_t size, unsigned int width)
1706{
1707 u64 t = he->transaction;
1708 char buf[128];
1709 char *p = buf;
1710 int i;
1711
1712 buf[0] = 0;
1713 for (i = 0; txbits[i].name; i++)
1714 if (txbits[i].flag & t)
1715 p = add_str(p, txbits[i].name);
1716 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1717 p = add_str(p, "NEITHER ");
1718 if (t & PERF_TXN_ABORT_MASK) {
1719 sprintf(p, ":%" PRIx64,
1720 (t & PERF_TXN_ABORT_MASK) >>
1721 PERF_TXN_ABORT_SHIFT);
1722 p += strlen(p);
1723 }
1724
1725 return repsep_snprintf(bf, size, "%-*s", width, buf);
1726}
1727
1728struct sort_entry sort_transaction = {
1729 .se_header = "Transaction ",
1730 .se_cmp = sort__transaction_cmp,
1731 .se_snprintf = hist_entry__transaction_snprintf,
1732 .se_width_idx = HISTC_TRANSACTION,
1733};
1734
1735/* --sort symbol_size */
1736
1737static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
1738{
1739 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
1740 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
1741
1742 return size_l < size_r ? -1 :
1743 size_l == size_r ? 0 : 1;
1744}
1745
1746static int64_t
1747sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
1748{
1749 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
1750}
1751
1752static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
1753 size_t bf_size, unsigned int width)
1754{
1755 if (sym)
1756 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
1757
1758 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
1759}
1760
1761static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
1762 size_t size, unsigned int width)
1763{
1764 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
1765}
1766
1767struct sort_entry sort_sym_size = {
1768 .se_header = "Symbol size",
1769 .se_cmp = sort__sym_size_cmp,
1770 .se_snprintf = hist_entry__sym_size_snprintf,
1771 .se_width_idx = HISTC_SYM_SIZE,
1772};
1773
1774/* --sort dso_size */
1775
1776static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
1777{
1778 int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
1779 int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
1780
1781 return size_l < size_r ? -1 :
1782 size_l == size_r ? 0 : 1;
1783}
1784
1785static int64_t
1786sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
1787{
1788 return _sort__dso_size_cmp(right->ms.map, left->ms.map);
1789}
1790
1791static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
1792 size_t bf_size, unsigned int width)
1793{
1794 if (map && map->dso)
1795 return repsep_snprintf(bf, bf_size, "%*d", width,
1796 map__size(map));
1797
1798 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
1799}
1800
1801static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
1802 size_t size, unsigned int width)
1803{
1804 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
1805}
1806
1807struct sort_entry sort_dso_size = {
1808 .se_header = "DSO size",
1809 .se_cmp = sort__dso_size_cmp,
1810 .se_snprintf = hist_entry__dso_size_snprintf,
1811 .se_width_idx = HISTC_DSO_SIZE,
1812};
1813
1814
1815struct sort_dimension {
1816 const char *name;
1817 struct sort_entry *entry;
1818 int taken;
1819};
1820
1821int __weak arch_support_sort_key(const char *sort_key __maybe_unused)
1822{
1823 return 0;
1824}
1825
1826const char * __weak arch_perf_header_entry(const char *se_header)
1827{
1828 return se_header;
1829}
1830
1831static void sort_dimension_add_dynamic_header(struct sort_dimension *sd)
1832{
1833 sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header);
1834}
1835
1836#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1837
1838static struct sort_dimension common_sort_dimensions[] = {
1839 DIM(SORT_PID, "pid", sort_thread),
1840 DIM(SORT_COMM, "comm", sort_comm),
1841 DIM(SORT_DSO, "dso", sort_dso),
1842 DIM(SORT_SYM, "symbol", sort_sym),
1843 DIM(SORT_PARENT, "parent", sort_parent),
1844 DIM(SORT_CPU, "cpu", sort_cpu),
1845 DIM(SORT_SOCKET, "socket", sort_socket),
1846 DIM(SORT_SRCLINE, "srcline", sort_srcline),
1847 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
1848 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1849 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
1850 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
1851 DIM(SORT_TRACE, "trace", sort_trace),
1852 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
1853 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
1854 DIM(SORT_CGROUP, "cgroup", sort_cgroup),
1855 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
1856 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
1857 DIM(SORT_TIME, "time", sort_time),
1858 DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size),
1859 DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat),
1860 DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat),
1861 DIM(SORT_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_p_stage_cyc),
1862};
1863
1864#undef DIM
1865
1866#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1867
1868static struct sort_dimension bstack_sort_dimensions[] = {
1869 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1870 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1871 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1872 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1873 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
1874 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1875 DIM(SORT_ABORT, "abort", sort_abort),
1876 DIM(SORT_CYCLES, "cycles", sort_cycles),
1877 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
1878 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
1879 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
1880};
1881
1882#undef DIM
1883
1884#define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1885
1886static struct sort_dimension memory_sort_dimensions[] = {
1887 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
1888 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
1889 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1890 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1891 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1892 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1893 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
1894 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
1895 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
1896 DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size),
1897 DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked),
1898};
1899
1900#undef DIM
1901
1902struct hpp_dimension {
1903 const char *name;
1904 struct perf_hpp_fmt *fmt;
1905 int taken;
1906};
1907
1908#define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1909
1910static struct hpp_dimension hpp_sort_dimensions[] = {
1911 DIM(PERF_HPP__OVERHEAD, "overhead"),
1912 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1913 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1914 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1915 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
1916 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
1917 DIM(PERF_HPP__SAMPLES, "sample"),
1918 DIM(PERF_HPP__PERIOD, "period"),
1919};
1920
1921#undef DIM
1922
1923struct hpp_sort_entry {
1924 struct perf_hpp_fmt hpp;
1925 struct sort_entry *se;
1926};
1927
1928void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
1929{
1930 struct hpp_sort_entry *hse;
1931
1932 if (!perf_hpp__is_sort_entry(fmt))
1933 return;
1934
1935 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1936 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
1937}
1938
1939static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1940 struct hists *hists, int line __maybe_unused,
1941 int *span __maybe_unused)
1942{
1943 struct hpp_sort_entry *hse;
1944 size_t len = fmt->user_len;
1945
1946 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1947
1948 if (!len)
1949 len = hists__col_len(hists, hse->se->se_width_idx);
1950
1951 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
1952}
1953
1954static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1955 struct perf_hpp *hpp __maybe_unused,
1956 struct hists *hists)
1957{
1958 struct hpp_sort_entry *hse;
1959 size_t len = fmt->user_len;
1960
1961 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1962
1963 if (!len)
1964 len = hists__col_len(hists, hse->se->se_width_idx);
1965
1966 return len;
1967}
1968
1969static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1970 struct hist_entry *he)
1971{
1972 struct hpp_sort_entry *hse;
1973 size_t len = fmt->user_len;
1974
1975 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1976
1977 if (!len)
1978 len = hists__col_len(he->hists, hse->se->se_width_idx);
1979
1980 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1981}
1982
1983static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1984 struct hist_entry *a, struct hist_entry *b)
1985{
1986 struct hpp_sort_entry *hse;
1987
1988 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1989 return hse->se->se_cmp(a, b);
1990}
1991
1992static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1993 struct hist_entry *a, struct hist_entry *b)
1994{
1995 struct hpp_sort_entry *hse;
1996 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1997
1998 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1999 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
2000 return collapse_fn(a, b);
2001}
2002
2003static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
2004 struct hist_entry *a, struct hist_entry *b)
2005{
2006 struct hpp_sort_entry *hse;
2007 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
2008
2009 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2010 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
2011 return sort_fn(a, b);
2012}
2013
2014bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
2015{
2016 return format->header == __sort__hpp_header;
2017}
2018
2019#define MK_SORT_ENTRY_CHK(key) \
2020bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
2021{ \
2022 struct hpp_sort_entry *hse; \
2023 \
2024 if (!perf_hpp__is_sort_entry(fmt)) \
2025 return false; \
2026 \
2027 hse = container_of(fmt, struct hpp_sort_entry, hpp); \
2028 return hse->se == &sort_ ## key ; \
2029}
2030
2031MK_SORT_ENTRY_CHK(trace)
2032MK_SORT_ENTRY_CHK(srcline)
2033MK_SORT_ENTRY_CHK(srcfile)
2034MK_SORT_ENTRY_CHK(thread)
2035MK_SORT_ENTRY_CHK(comm)
2036MK_SORT_ENTRY_CHK(dso)
2037MK_SORT_ENTRY_CHK(sym)
2038
2039
2040static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2041{
2042 struct hpp_sort_entry *hse_a;
2043 struct hpp_sort_entry *hse_b;
2044
2045 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
2046 return false;
2047
2048 hse_a = container_of(a, struct hpp_sort_entry, hpp);
2049 hse_b = container_of(b, struct hpp_sort_entry, hpp);
2050
2051 return hse_a->se == hse_b->se;
2052}
2053
2054static void hse_free(struct perf_hpp_fmt *fmt)
2055{
2056 struct hpp_sort_entry *hse;
2057
2058 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2059 free(hse);
2060}
2061
2062static struct hpp_sort_entry *
2063__sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
2064{
2065 struct hpp_sort_entry *hse;
2066
2067 hse = malloc(sizeof(*hse));
2068 if (hse == NULL) {
2069 pr_err("Memory allocation failed\n");
2070 return NULL;
2071 }
2072
2073 hse->se = sd->entry;
2074 hse->hpp.name = sd->entry->se_header;
2075 hse->hpp.header = __sort__hpp_header;
2076 hse->hpp.width = __sort__hpp_width;
2077 hse->hpp.entry = __sort__hpp_entry;
2078 hse->hpp.color = NULL;
2079
2080 hse->hpp.cmp = __sort__hpp_cmp;
2081 hse->hpp.collapse = __sort__hpp_collapse;
2082 hse->hpp.sort = __sort__hpp_sort;
2083 hse->hpp.equal = __sort__hpp_equal;
2084 hse->hpp.free = hse_free;
2085
2086 INIT_LIST_HEAD(&hse->hpp.list);
2087 INIT_LIST_HEAD(&hse->hpp.sort_list);
2088 hse->hpp.elide = false;
2089 hse->hpp.len = 0;
2090 hse->hpp.user_len = 0;
2091 hse->hpp.level = level;
2092
2093 return hse;
2094}
2095
2096static void hpp_free(struct perf_hpp_fmt *fmt)
2097{
2098 free(fmt);
2099}
2100
2101static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
2102 int level)
2103{
2104 struct perf_hpp_fmt *fmt;
2105
2106 fmt = memdup(hd->fmt, sizeof(*fmt));
2107 if (fmt) {
2108 INIT_LIST_HEAD(&fmt->list);
2109 INIT_LIST_HEAD(&fmt->sort_list);
2110 fmt->free = hpp_free;
2111 fmt->level = level;
2112 }
2113
2114 return fmt;
2115}
2116
2117int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
2118{
2119 struct perf_hpp_fmt *fmt;
2120 struct hpp_sort_entry *hse;
2121 int ret = -1;
2122 int r;
2123
2124 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
2125 if (!perf_hpp__is_sort_entry(fmt))
2126 continue;
2127
2128 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2129 if (hse->se->se_filter == NULL)
2130 continue;
2131
2132 /*
2133 * hist entry is filtered if any of sort key in the hpp list
2134 * is applied. But it should skip non-matched filter types.
2135 */
2136 r = hse->se->se_filter(he, type, arg);
2137 if (r >= 0) {
2138 if (ret < 0)
2139 ret = 0;
2140 ret |= r;
2141 }
2142 }
2143
2144 return ret;
2145}
2146
2147static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
2148 struct perf_hpp_list *list,
2149 int level)
2150{
2151 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
2152
2153 if (hse == NULL)
2154 return -1;
2155
2156 perf_hpp_list__register_sort_field(list, &hse->hpp);
2157 return 0;
2158}
2159
2160static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
2161 struct perf_hpp_list *list)
2162{
2163 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
2164
2165 if (hse == NULL)
2166 return -1;
2167
2168 perf_hpp_list__column_register(list, &hse->hpp);
2169 return 0;
2170}
2171
2172struct hpp_dynamic_entry {
2173 struct perf_hpp_fmt hpp;
2174 struct evsel *evsel;
2175 struct tep_format_field *field;
2176 unsigned dynamic_len;
2177 bool raw_trace;
2178};
2179
2180static int hde_width(struct hpp_dynamic_entry *hde)
2181{
2182 if (!hde->hpp.len) {
2183 int len = hde->dynamic_len;
2184 int namelen = strlen(hde->field->name);
2185 int fieldlen = hde->field->size;
2186
2187 if (namelen > len)
2188 len = namelen;
2189
2190 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
2191 /* length for print hex numbers */
2192 fieldlen = hde->field->size * 2 + 2;
2193 }
2194 if (fieldlen > len)
2195 len = fieldlen;
2196
2197 hde->hpp.len = len;
2198 }
2199 return hde->hpp.len;
2200}
2201
2202static void update_dynamic_len(struct hpp_dynamic_entry *hde,
2203 struct hist_entry *he)
2204{
2205 char *str, *pos;
2206 struct tep_format_field *field = hde->field;
2207 size_t namelen;
2208 bool last = false;
2209
2210 if (hde->raw_trace)
2211 return;
2212
2213 /* parse pretty print result and update max length */
2214 if (!he->trace_output)
2215 he->trace_output = get_trace_output(he);
2216
2217 namelen = strlen(field->name);
2218 str = he->trace_output;
2219
2220 while (str) {
2221 pos = strchr(str, ' ');
2222 if (pos == NULL) {
2223 last = true;
2224 pos = str + strlen(str);
2225 }
2226
2227 if (!strncmp(str, field->name, namelen)) {
2228 size_t len;
2229
2230 str += namelen + 1;
2231 len = pos - str;
2232
2233 if (len > hde->dynamic_len)
2234 hde->dynamic_len = len;
2235 break;
2236 }
2237
2238 if (last)
2239 str = NULL;
2240 else
2241 str = pos + 1;
2242 }
2243}
2244
2245static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2246 struct hists *hists __maybe_unused,
2247 int line __maybe_unused,
2248 int *span __maybe_unused)
2249{
2250 struct hpp_dynamic_entry *hde;
2251 size_t len = fmt->user_len;
2252
2253 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2254
2255 if (!len)
2256 len = hde_width(hde);
2257
2258 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
2259}
2260
2261static int __sort__hde_width(struct perf_hpp_fmt *fmt,
2262 struct perf_hpp *hpp __maybe_unused,
2263 struct hists *hists __maybe_unused)
2264{
2265 struct hpp_dynamic_entry *hde;
2266 size_t len = fmt->user_len;
2267
2268 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2269
2270 if (!len)
2271 len = hde_width(hde);
2272
2273 return len;
2274}
2275
2276bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
2277{
2278 struct hpp_dynamic_entry *hde;
2279
2280 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2281
2282 return hists_to_evsel(hists) == hde->evsel;
2283}
2284
2285static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2286 struct hist_entry *he)
2287{
2288 struct hpp_dynamic_entry *hde;
2289 size_t len = fmt->user_len;
2290 char *str, *pos;
2291 struct tep_format_field *field;
2292 size_t namelen;
2293 bool last = false;
2294 int ret;
2295
2296 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2297
2298 if (!len)
2299 len = hde_width(hde);
2300
2301 if (hde->raw_trace)
2302 goto raw_field;
2303
2304 if (!he->trace_output)
2305 he->trace_output = get_trace_output(he);
2306
2307 field = hde->field;
2308 namelen = strlen(field->name);
2309 str = he->trace_output;
2310
2311 while (str) {
2312 pos = strchr(str, ' ');
2313 if (pos == NULL) {
2314 last = true;
2315 pos = str + strlen(str);
2316 }
2317
2318 if (!strncmp(str, field->name, namelen)) {
2319 str += namelen + 1;
2320 str = strndup(str, pos - str);
2321
2322 if (str == NULL)
2323 return scnprintf(hpp->buf, hpp->size,
2324 "%*.*s", len, len, "ERROR");
2325 break;
2326 }
2327
2328 if (last)
2329 str = NULL;
2330 else
2331 str = pos + 1;
2332 }
2333
2334 if (str == NULL) {
2335 struct trace_seq seq;
2336raw_field:
2337 trace_seq_init(&seq);
2338 tep_print_field(&seq, he->raw_data, hde->field);
2339 str = seq.buffer;
2340 }
2341
2342 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
2343 free(str);
2344 return ret;
2345}
2346
2347static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
2348 struct hist_entry *a, struct hist_entry *b)
2349{
2350 struct hpp_dynamic_entry *hde;
2351 struct tep_format_field *field;
2352 unsigned offset, size;
2353
2354 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2355
2356 if (b == NULL) {
2357 update_dynamic_len(hde, a);
2358 return 0;
2359 }
2360
2361 field = hde->field;
2362 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2363 unsigned long long dyn;
2364
2365 tep_read_number_field(field, a->raw_data, &dyn);
2366 offset = dyn & 0xffff;
2367 size = (dyn >> 16) & 0xffff;
2368
2369 /* record max width for output */
2370 if (size > hde->dynamic_len)
2371 hde->dynamic_len = size;
2372 } else {
2373 offset = field->offset;
2374 size = field->size;
2375 }
2376
2377 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
2378}
2379
2380bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
2381{
2382 return fmt->cmp == __sort__hde_cmp;
2383}
2384
2385static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2386{
2387 struct hpp_dynamic_entry *hde_a;
2388 struct hpp_dynamic_entry *hde_b;
2389
2390 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
2391 return false;
2392
2393 hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
2394 hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
2395
2396 return hde_a->field == hde_b->field;
2397}
2398
2399static void hde_free(struct perf_hpp_fmt *fmt)
2400{
2401 struct hpp_dynamic_entry *hde;
2402
2403 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2404 free(hde);
2405}
2406
2407static struct hpp_dynamic_entry *
2408__alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field,
2409 int level)
2410{
2411 struct hpp_dynamic_entry *hde;
2412
2413 hde = malloc(sizeof(*hde));
2414 if (hde == NULL) {
2415 pr_debug("Memory allocation failed\n");
2416 return NULL;
2417 }
2418
2419 hde->evsel = evsel;
2420 hde->field = field;
2421 hde->dynamic_len = 0;
2422
2423 hde->hpp.name = field->name;
2424 hde->hpp.header = __sort__hde_header;
2425 hde->hpp.width = __sort__hde_width;
2426 hde->hpp.entry = __sort__hde_entry;
2427 hde->hpp.color = NULL;
2428
2429 hde->hpp.cmp = __sort__hde_cmp;
2430 hde->hpp.collapse = __sort__hde_cmp;
2431 hde->hpp.sort = __sort__hde_cmp;
2432 hde->hpp.equal = __sort__hde_equal;
2433 hde->hpp.free = hde_free;
2434
2435 INIT_LIST_HEAD(&hde->hpp.list);
2436 INIT_LIST_HEAD(&hde->hpp.sort_list);
2437 hde->hpp.elide = false;
2438 hde->hpp.len = 0;
2439 hde->hpp.user_len = 0;
2440 hde->hpp.level = level;
2441
2442 return hde;
2443}
2444
2445struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
2446{
2447 struct perf_hpp_fmt *new_fmt = NULL;
2448
2449 if (perf_hpp__is_sort_entry(fmt)) {
2450 struct hpp_sort_entry *hse, *new_hse;
2451
2452 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2453 new_hse = memdup(hse, sizeof(*hse));
2454 if (new_hse)
2455 new_fmt = &new_hse->hpp;
2456 } else if (perf_hpp__is_dynamic_entry(fmt)) {
2457 struct hpp_dynamic_entry *hde, *new_hde;
2458
2459 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2460 new_hde = memdup(hde, sizeof(*hde));
2461 if (new_hde)
2462 new_fmt = &new_hde->hpp;
2463 } else {
2464 new_fmt = memdup(fmt, sizeof(*fmt));
2465 }
2466
2467 INIT_LIST_HEAD(&new_fmt->list);
2468 INIT_LIST_HEAD(&new_fmt->sort_list);
2469
2470 return new_fmt;
2471}
2472
2473static int parse_field_name(char *str, char **event, char **field, char **opt)
2474{
2475 char *event_name, *field_name, *opt_name;
2476
2477 event_name = str;
2478 field_name = strchr(str, '.');
2479
2480 if (field_name) {
2481 *field_name++ = '\0';
2482 } else {
2483 event_name = NULL;
2484 field_name = str;
2485 }
2486
2487 opt_name = strchr(field_name, '/');
2488 if (opt_name)
2489 *opt_name++ = '\0';
2490
2491 *event = event_name;
2492 *field = field_name;
2493 *opt = opt_name;
2494
2495 return 0;
2496}
2497
2498/* find match evsel using a given event name. The event name can be:
2499 * 1. '%' + event index (e.g. '%1' for first event)
2500 * 2. full event name (e.g. sched:sched_switch)
2501 * 3. partial event name (should not contain ':')
2502 */
2503static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
2504{
2505 struct evsel *evsel = NULL;
2506 struct evsel *pos;
2507 bool full_name;
2508
2509 /* case 1 */
2510 if (event_name[0] == '%') {
2511 int nr = strtol(event_name+1, NULL, 0);
2512
2513 if (nr > evlist->core.nr_entries)
2514 return NULL;
2515
2516 evsel = evlist__first(evlist);
2517 while (--nr > 0)
2518 evsel = evsel__next(evsel);
2519
2520 return evsel;
2521 }
2522
2523 full_name = !!strchr(event_name, ':');
2524 evlist__for_each_entry(evlist, pos) {
2525 /* case 2 */
2526 if (full_name && !strcmp(pos->name, event_name))
2527 return pos;
2528 /* case 3 */
2529 if (!full_name && strstr(pos->name, event_name)) {
2530 if (evsel) {
2531 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
2532 event_name, evsel->name, pos->name);
2533 return NULL;
2534 }
2535 evsel = pos;
2536 }
2537 }
2538
2539 return evsel;
2540}
2541
2542static int __dynamic_dimension__add(struct evsel *evsel,
2543 struct tep_format_field *field,
2544 bool raw_trace, int level)
2545{
2546 struct hpp_dynamic_entry *hde;
2547
2548 hde = __alloc_dynamic_entry(evsel, field, level);
2549 if (hde == NULL)
2550 return -ENOMEM;
2551
2552 hde->raw_trace = raw_trace;
2553
2554 perf_hpp__register_sort_field(&hde->hpp);
2555 return 0;
2556}
2557
2558static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
2559{
2560 int ret;
2561 struct tep_format_field *field;
2562
2563 field = evsel->tp_format->format.fields;
2564 while (field) {
2565 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2566 if (ret < 0)
2567 return ret;
2568
2569 field = field->next;
2570 }
2571 return 0;
2572}
2573
2574static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace,
2575 int level)
2576{
2577 int ret;
2578 struct evsel *evsel;
2579
2580 evlist__for_each_entry(evlist, evsel) {
2581 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
2582 continue;
2583
2584 ret = add_evsel_fields(evsel, raw_trace, level);
2585 if (ret < 0)
2586 return ret;
2587 }
2588 return 0;
2589}
2590
2591static int add_all_matching_fields(struct evlist *evlist,
2592 char *field_name, bool raw_trace, int level)
2593{
2594 int ret = -ESRCH;
2595 struct evsel *evsel;
2596 struct tep_format_field *field;
2597
2598 evlist__for_each_entry(evlist, evsel) {
2599 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
2600 continue;
2601
2602 field = tep_find_any_field(evsel->tp_format, field_name);
2603 if (field == NULL)
2604 continue;
2605
2606 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2607 if (ret < 0)
2608 break;
2609 }
2610 return ret;
2611}
2612
2613static int add_dynamic_entry(struct evlist *evlist, const char *tok,
2614 int level)
2615{
2616 char *str, *event_name, *field_name, *opt_name;
2617 struct evsel *evsel;
2618 struct tep_format_field *field;
2619 bool raw_trace = symbol_conf.raw_trace;
2620 int ret = 0;
2621
2622 if (evlist == NULL)
2623 return -ENOENT;
2624
2625 str = strdup(tok);
2626 if (str == NULL)
2627 return -ENOMEM;
2628
2629 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
2630 ret = -EINVAL;
2631 goto out;
2632 }
2633
2634 if (opt_name) {
2635 if (strcmp(opt_name, "raw")) {
2636 pr_debug("unsupported field option %s\n", opt_name);
2637 ret = -EINVAL;
2638 goto out;
2639 }
2640 raw_trace = true;
2641 }
2642
2643 if (!strcmp(field_name, "trace_fields")) {
2644 ret = add_all_dynamic_fields(evlist, raw_trace, level);
2645 goto out;
2646 }
2647
2648 if (event_name == NULL) {
2649 ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
2650 goto out;
2651 }
2652
2653 evsel = find_evsel(evlist, event_name);
2654 if (evsel == NULL) {
2655 pr_debug("Cannot find event: %s\n", event_name);
2656 ret = -ENOENT;
2657 goto out;
2658 }
2659
2660 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2661 pr_debug("%s is not a tracepoint event\n", event_name);
2662 ret = -EINVAL;
2663 goto out;
2664 }
2665
2666 if (!strcmp(field_name, "*")) {
2667 ret = add_evsel_fields(evsel, raw_trace, level);
2668 } else {
2669 field = tep_find_any_field(evsel->tp_format, field_name);
2670 if (field == NULL) {
2671 pr_debug("Cannot find event field for %s.%s\n",
2672 event_name, field_name);
2673 return -ENOENT;
2674 }
2675
2676 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2677 }
2678
2679out:
2680 free(str);
2681 return ret;
2682}
2683
2684static int __sort_dimension__add(struct sort_dimension *sd,
2685 struct perf_hpp_list *list,
2686 int level)
2687{
2688 if (sd->taken)
2689 return 0;
2690
2691 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
2692 return -1;
2693
2694 if (sd->entry->se_collapse)
2695 list->need_collapse = 1;
2696
2697 sd->taken = 1;
2698
2699 return 0;
2700}
2701
2702static int __hpp_dimension__add(struct hpp_dimension *hd,
2703 struct perf_hpp_list *list,
2704 int level)
2705{
2706 struct perf_hpp_fmt *fmt;
2707
2708 if (hd->taken)
2709 return 0;
2710
2711 fmt = __hpp_dimension__alloc_hpp(hd, level);
2712 if (!fmt)
2713 return -1;
2714
2715 hd->taken = 1;
2716 perf_hpp_list__register_sort_field(list, fmt);
2717 return 0;
2718}
2719
2720static int __sort_dimension__add_output(struct perf_hpp_list *list,
2721 struct sort_dimension *sd)
2722{
2723 if (sd->taken)
2724 return 0;
2725
2726 if (__sort_dimension__add_hpp_output(sd, list) < 0)
2727 return -1;
2728
2729 sd->taken = 1;
2730 return 0;
2731}
2732
2733static int __hpp_dimension__add_output(struct perf_hpp_list *list,
2734 struct hpp_dimension *hd)
2735{
2736 struct perf_hpp_fmt *fmt;
2737
2738 if (hd->taken)
2739 return 0;
2740
2741 fmt = __hpp_dimension__alloc_hpp(hd, 0);
2742 if (!fmt)
2743 return -1;
2744
2745 hd->taken = 1;
2746 perf_hpp_list__column_register(list, fmt);
2747 return 0;
2748}
2749
2750int hpp_dimension__add_output(unsigned col)
2751{
2752 BUG_ON(col >= PERF_HPP__MAX_INDEX);
2753 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
2754}
2755
2756int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
2757 struct evlist *evlist,
2758 int level)
2759{
2760 unsigned int i, j;
2761
2762 /*
2763 * Check to see if there are any arch specific
2764 * sort dimensions not applicable for the current
2765 * architecture. If so, Skip that sort key since
2766 * we don't want to display it in the output fields.
2767 */
2768 for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) {
2769 if (!strcmp(arch_specific_sort_keys[j], tok) &&
2770 !arch_support_sort_key(tok)) {
2771 return 0;
2772 }
2773 }
2774
2775 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2776 struct sort_dimension *sd = &common_sort_dimensions[i];
2777
2778 if (strncasecmp(tok, sd->name, strlen(tok)))
2779 continue;
2780
2781 for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) {
2782 if (!strcmp(dynamic_headers[j], sd->name))
2783 sort_dimension_add_dynamic_header(sd);
2784 }
2785
2786 if (sd->entry == &sort_parent) {
2787 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2788 if (ret) {
2789 char err[BUFSIZ];
2790
2791 regerror(ret, &parent_regex, err, sizeof(err));
2792 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2793 return -EINVAL;
2794 }
2795 list->parent = 1;
2796 } else if (sd->entry == &sort_sym) {
2797 list->sym = 1;
2798 /*
2799 * perf diff displays the performance difference amongst
2800 * two or more perf.data files. Those files could come
2801 * from different binaries. So we should not compare
2802 * their ips, but the name of symbol.
2803 */
2804 if (sort__mode == SORT_MODE__DIFF)
2805 sd->entry->se_collapse = sort__sym_sort;
2806
2807 } else if (sd->entry == &sort_dso) {
2808 list->dso = 1;
2809 } else if (sd->entry == &sort_socket) {
2810 list->socket = 1;
2811 } else if (sd->entry == &sort_thread) {
2812 list->thread = 1;
2813 } else if (sd->entry == &sort_comm) {
2814 list->comm = 1;
2815 }
2816
2817 return __sort_dimension__add(sd, list, level);
2818 }
2819
2820 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2821 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2822
2823 if (strncasecmp(tok, hd->name, strlen(tok)))
2824 continue;
2825
2826 return __hpp_dimension__add(hd, list, level);
2827 }
2828
2829 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2830 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2831
2832 if (strncasecmp(tok, sd->name, strlen(tok)))
2833 continue;
2834
2835 if (sort__mode != SORT_MODE__BRANCH)
2836 return -EINVAL;
2837
2838 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2839 list->sym = 1;
2840
2841 __sort_dimension__add(sd, list, level);
2842 return 0;
2843 }
2844
2845 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2846 struct sort_dimension *sd = &memory_sort_dimensions[i];
2847
2848 if (strncasecmp(tok, sd->name, strlen(tok)))
2849 continue;
2850
2851 if (sort__mode != SORT_MODE__MEMORY)
2852 return -EINVAL;
2853
2854 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
2855 return -EINVAL;
2856
2857 if (sd->entry == &sort_mem_daddr_sym)
2858 list->sym = 1;
2859
2860 __sort_dimension__add(sd, list, level);
2861 return 0;
2862 }
2863
2864 if (!add_dynamic_entry(evlist, tok, level))
2865 return 0;
2866
2867 return -ESRCH;
2868}
2869
2870static int setup_sort_list(struct perf_hpp_list *list, char *str,
2871 struct evlist *evlist)
2872{
2873 char *tmp, *tok;
2874 int ret = 0;
2875 int level = 0;
2876 int next_level = 1;
2877 bool in_group = false;
2878
2879 do {
2880 tok = str;
2881 tmp = strpbrk(str, "{}, ");
2882 if (tmp) {
2883 if (in_group)
2884 next_level = level;
2885 else
2886 next_level = level + 1;
2887
2888 if (*tmp == '{')
2889 in_group = true;
2890 else if (*tmp == '}')
2891 in_group = false;
2892
2893 *tmp = '\0';
2894 str = tmp + 1;
2895 }
2896
2897 if (*tok) {
2898 ret = sort_dimension__add(list, tok, evlist, level);
2899 if (ret == -EINVAL) {
2900 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
2901 ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
2902 else
2903 ui__error("Invalid --sort key: `%s'", tok);
2904 break;
2905 } else if (ret == -ESRCH) {
2906 ui__error("Unknown --sort key: `%s'", tok);
2907 break;
2908 }
2909 }
2910
2911 level = next_level;
2912 } while (tmp);
2913
2914 return ret;
2915}
2916
2917static const char *get_default_sort_order(struct evlist *evlist)
2918{
2919 const char *default_sort_orders[] = {
2920 default_sort_order,
2921 default_branch_sort_order,
2922 default_mem_sort_order,
2923 default_top_sort_order,
2924 default_diff_sort_order,
2925 default_tracepoint_sort_order,
2926 };
2927 bool use_trace = true;
2928 struct evsel *evsel;
2929
2930 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2931
2932 if (evlist == NULL || evlist__empty(evlist))
2933 goto out_no_evlist;
2934
2935 evlist__for_each_entry(evlist, evsel) {
2936 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
2937 use_trace = false;
2938 break;
2939 }
2940 }
2941
2942 if (use_trace) {
2943 sort__mode = SORT_MODE__TRACEPOINT;
2944 if (symbol_conf.raw_trace)
2945 return "trace_fields";
2946 }
2947out_no_evlist:
2948 return default_sort_orders[sort__mode];
2949}
2950
2951static int setup_sort_order(struct evlist *evlist)
2952{
2953 char *new_sort_order;
2954
2955 /*
2956 * Append '+'-prefixed sort order to the default sort
2957 * order string.
2958 */
2959 if (!sort_order || is_strict_order(sort_order))
2960 return 0;
2961
2962 if (sort_order[1] == '\0') {
2963 ui__error("Invalid --sort key: `+'");
2964 return -EINVAL;
2965 }
2966
2967 /*
2968 * We allocate new sort_order string, but we never free it,
2969 * because it's checked over the rest of the code.
2970 */
2971 if (asprintf(&new_sort_order, "%s,%s",
2972 get_default_sort_order(evlist), sort_order + 1) < 0) {
2973 pr_err("Not enough memory to set up --sort");
2974 return -ENOMEM;
2975 }
2976
2977 sort_order = new_sort_order;
2978 return 0;
2979}
2980
2981/*
2982 * Adds 'pre,' prefix into 'str' is 'pre' is
2983 * not already part of 'str'.
2984 */
2985static char *prefix_if_not_in(const char *pre, char *str)
2986{
2987 char *n;
2988
2989 if (!str || strstr(str, pre))
2990 return str;
2991
2992 if (asprintf(&n, "%s,%s", pre, str) < 0)
2993 n = NULL;
2994
2995 free(str);
2996 return n;
2997}
2998
2999static char *setup_overhead(char *keys)
3000{
3001 if (sort__mode == SORT_MODE__DIFF)
3002 return keys;
3003
3004 keys = prefix_if_not_in("overhead", keys);
3005
3006 if (symbol_conf.cumulate_callchain)
3007 keys = prefix_if_not_in("overhead_children", keys);
3008
3009 return keys;
3010}
3011
3012static int __setup_sorting(struct evlist *evlist)
3013{
3014 char *str;
3015 const char *sort_keys;
3016 int ret = 0;
3017
3018 ret = setup_sort_order(evlist);
3019 if (ret)
3020 return ret;
3021
3022 sort_keys = sort_order;
3023 if (sort_keys == NULL) {
3024 if (is_strict_order(field_order)) {
3025 /*
3026 * If user specified field order but no sort order,
3027 * we'll honor it and not add default sort orders.
3028 */
3029 return 0;
3030 }
3031
3032 sort_keys = get_default_sort_order(evlist);
3033 }
3034
3035 str = strdup(sort_keys);
3036 if (str == NULL) {
3037 pr_err("Not enough memory to setup sort keys");
3038 return -ENOMEM;
3039 }
3040
3041 /*
3042 * Prepend overhead fields for backward compatibility.
3043 */
3044 if (!is_strict_order(field_order)) {
3045 str = setup_overhead(str);
3046 if (str == NULL) {
3047 pr_err("Not enough memory to setup overhead keys");
3048 return -ENOMEM;
3049 }
3050 }
3051
3052 ret = setup_sort_list(&perf_hpp_list, str, evlist);
3053
3054 free(str);
3055 return ret;
3056}
3057
3058void perf_hpp__set_elide(int idx, bool elide)
3059{
3060 struct perf_hpp_fmt *fmt;
3061 struct hpp_sort_entry *hse;
3062
3063 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3064 if (!perf_hpp__is_sort_entry(fmt))
3065 continue;
3066
3067 hse = container_of(fmt, struct hpp_sort_entry, hpp);
3068 if (hse->se->se_width_idx == idx) {
3069 fmt->elide = elide;
3070 break;
3071 }
3072 }
3073}
3074
3075static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
3076{
3077 if (list && strlist__nr_entries(list) == 1) {
3078 if (fp != NULL)
3079 fprintf(fp, "# %s: %s\n", list_name,
3080 strlist__entry(list, 0)->s);
3081 return true;
3082 }
3083 return false;
3084}
3085
3086static bool get_elide(int idx, FILE *output)
3087{
3088 switch (idx) {
3089 case HISTC_SYMBOL:
3090 return __get_elide(symbol_conf.sym_list, "symbol", output);
3091 case HISTC_DSO:
3092 return __get_elide(symbol_conf.dso_list, "dso", output);
3093 case HISTC_COMM:
3094 return __get_elide(symbol_conf.comm_list, "comm", output);
3095 default:
3096 break;
3097 }
3098
3099 if (sort__mode != SORT_MODE__BRANCH)
3100 return false;
3101
3102 switch (idx) {
3103 case HISTC_SYMBOL_FROM:
3104 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
3105 case HISTC_SYMBOL_TO:
3106 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
3107 case HISTC_DSO_FROM:
3108 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
3109 case HISTC_DSO_TO:
3110 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
3111 default:
3112 break;
3113 }
3114
3115 return false;
3116}
3117
3118void sort__setup_elide(FILE *output)
3119{
3120 struct perf_hpp_fmt *fmt;
3121 struct hpp_sort_entry *hse;
3122
3123 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3124 if (!perf_hpp__is_sort_entry(fmt))
3125 continue;
3126
3127 hse = container_of(fmt, struct hpp_sort_entry, hpp);
3128 fmt->elide = get_elide(hse->se->se_width_idx, output);
3129 }
3130
3131 /*
3132 * It makes no sense to elide all of sort entries.
3133 * Just revert them to show up again.
3134 */
3135 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3136 if (!perf_hpp__is_sort_entry(fmt))
3137 continue;
3138
3139 if (!fmt->elide)
3140 return;
3141 }
3142
3143 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3144 if (!perf_hpp__is_sort_entry(fmt))
3145 continue;
3146
3147 fmt->elide = false;
3148 }
3149}
3150
3151int output_field_add(struct perf_hpp_list *list, char *tok)
3152{
3153 unsigned int i;
3154
3155 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3156 struct sort_dimension *sd = &common_sort_dimensions[i];
3157
3158 if (strncasecmp(tok, sd->name, strlen(tok)))
3159 continue;
3160
3161 return __sort_dimension__add_output(list, sd);
3162 }
3163
3164 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3165 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3166
3167 if (strncasecmp(tok, hd->name, strlen(tok)))
3168 continue;
3169
3170 return __hpp_dimension__add_output(list, hd);
3171 }
3172
3173 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3174 struct sort_dimension *sd = &bstack_sort_dimensions[i];
3175
3176 if (strncasecmp(tok, sd->name, strlen(tok)))
3177 continue;
3178
3179 if (sort__mode != SORT_MODE__BRANCH)
3180 return -EINVAL;
3181
3182 return __sort_dimension__add_output(list, sd);
3183 }
3184
3185 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3186 struct sort_dimension *sd = &memory_sort_dimensions[i];
3187
3188 if (strncasecmp(tok, sd->name, strlen(tok)))
3189 continue;
3190
3191 if (sort__mode != SORT_MODE__MEMORY)
3192 return -EINVAL;
3193
3194 return __sort_dimension__add_output(list, sd);
3195 }
3196
3197 return -ESRCH;
3198}
3199
3200static int setup_output_list(struct perf_hpp_list *list, char *str)
3201{
3202 char *tmp, *tok;
3203 int ret = 0;
3204
3205 for (tok = strtok_r(str, ", ", &tmp);
3206 tok; tok = strtok_r(NULL, ", ", &tmp)) {
3207 ret = output_field_add(list, tok);
3208 if (ret == -EINVAL) {
3209 ui__error("Invalid --fields key: `%s'", tok);
3210 break;
3211 } else if (ret == -ESRCH) {
3212 ui__error("Unknown --fields key: `%s'", tok);
3213 break;
3214 }
3215 }
3216
3217 return ret;
3218}
3219
3220void reset_dimensions(void)
3221{
3222 unsigned int i;
3223
3224 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
3225 common_sort_dimensions[i].taken = 0;
3226
3227 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
3228 hpp_sort_dimensions[i].taken = 0;
3229
3230 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
3231 bstack_sort_dimensions[i].taken = 0;
3232
3233 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
3234 memory_sort_dimensions[i].taken = 0;
3235}
3236
3237bool is_strict_order(const char *order)
3238{
3239 return order && (*order != '+');
3240}
3241
3242static int __setup_output_field(void)
3243{
3244 char *str, *strp;
3245 int ret = -EINVAL;
3246
3247 if (field_order == NULL)
3248 return 0;
3249
3250 strp = str = strdup(field_order);
3251 if (str == NULL) {
3252 pr_err("Not enough memory to setup output fields");
3253 return -ENOMEM;
3254 }
3255
3256 if (!is_strict_order(field_order))
3257 strp++;
3258
3259 if (!strlen(strp)) {
3260 ui__error("Invalid --fields key: `+'");
3261 goto out;
3262 }
3263
3264 ret = setup_output_list(&perf_hpp_list, strp);
3265
3266out:
3267 free(str);
3268 return ret;
3269}
3270
3271int setup_sorting(struct evlist *evlist)
3272{
3273 int err;
3274
3275 err = __setup_sorting(evlist);
3276 if (err < 0)
3277 return err;
3278
3279 if (parent_pattern != default_parent_pattern) {
3280 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
3281 if (err < 0)
3282 return err;
3283 }
3284
3285 reset_dimensions();
3286
3287 /*
3288 * perf diff doesn't use default hpp output fields.
3289 */
3290 if (sort__mode != SORT_MODE__DIFF)
3291 perf_hpp__init();
3292
3293 err = __setup_output_field();
3294 if (err < 0)
3295 return err;
3296
3297 /* copy sort keys to output fields */
3298 perf_hpp__setup_output_field(&perf_hpp_list);
3299 /* and then copy output fields to sort keys */
3300 perf_hpp__append_sort_keys(&perf_hpp_list);
3301
3302 /* setup hists-specific output fields */
3303 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
3304 return -1;
3305
3306 return 0;
3307}
3308
3309void reset_output_field(void)
3310{
3311 perf_hpp_list.need_collapse = 0;
3312 perf_hpp_list.parent = 0;
3313 perf_hpp_list.sym = 0;
3314 perf_hpp_list.dso = 0;
3315
3316 field_order = NULL;
3317 sort_order = NULL;
3318
3319 reset_dimensions();
3320 perf_hpp__reset_output_field(&perf_hpp_list);
3321}
3322
3323#define INDENT (3*8 + 1)
3324
3325static void add_key(struct strbuf *sb, const char *str, int *llen)
3326{
3327 if (*llen >= 75) {
3328 strbuf_addstr(sb, "\n\t\t\t ");
3329 *llen = INDENT;
3330 }
3331 strbuf_addf(sb, " %s", str);
3332 *llen += strlen(str) + 1;
3333}
3334
3335static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
3336 int *llen)
3337{
3338 int i;
3339
3340 for (i = 0; i < n; i++)
3341 add_key(sb, s[i].name, llen);
3342}
3343
3344static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
3345 int *llen)
3346{
3347 int i;
3348
3349 for (i = 0; i < n; i++)
3350 add_key(sb, s[i].name, llen);
3351}
3352
3353char *sort_help(const char *prefix)
3354{
3355 struct strbuf sb;
3356 char *s;
3357 int len = strlen(prefix) + INDENT;
3358
3359 strbuf_init(&sb, 300);
3360 strbuf_addstr(&sb, prefix);
3361 add_hpp_sort_string(&sb, hpp_sort_dimensions,
3362 ARRAY_SIZE(hpp_sort_dimensions), &len);
3363 add_sort_string(&sb, common_sort_dimensions,
3364 ARRAY_SIZE(common_sort_dimensions), &len);
3365 add_sort_string(&sb, bstack_sort_dimensions,
3366 ARRAY_SIZE(bstack_sort_dimensions), &len);
3367 add_sort_string(&sb, memory_sort_dimensions,
3368 ARRAY_SIZE(memory_sort_dimensions), &len);
3369 s = strbuf_detach(&sb, NULL);
3370 strbuf_release(&sb);
3371 return s;
3372}