Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Parts came from builtin-annotate.c, see those files for further
6 * copyright notes.
7 */
8
9#include <errno.h>
10#include <inttypes.h>
11#include <libgen.h>
12#include <stdlib.h>
13#include "util.h" // hex_width()
14#include "ui/ui.h"
15#include "sort.h"
16#include "build-id.h"
17#include "color.h"
18#include "config.h"
19#include "dso.h"
20#include "env.h"
21#include "map.h"
22#include "maps.h"
23#include "symbol.h"
24#include "srcline.h"
25#include "units.h"
26#include "debug.h"
27#include "annotate.h"
28#include "annotate-data.h"
29#include "evsel.h"
30#include "evlist.h"
31#include "bpf-event.h"
32#include "bpf-utils.h"
33#include "block-range.h"
34#include "string2.h"
35#include "dwarf-regs.h"
36#include "util/event.h"
37#include "util/sharded_mutex.h"
38#include "arch/common.h"
39#include "namespaces.h"
40#include "thread.h"
41#include "hashmap.h"
42#include <regex.h>
43#include <linux/bitops.h>
44#include <linux/kernel.h>
45#include <linux/string.h>
46#include <linux/zalloc.h>
47#include <subcmd/parse-options.h>
48#include <subcmd/run-command.h>
49
50/* FIXME: For the HE_COLORSET */
51#include "ui/browser.h"
52
53/*
54 * FIXME: Using the same values as slang.h,
55 * but that header may not be available everywhere
56 */
57#define LARROW_CHAR ((unsigned char)',')
58#define RARROW_CHAR ((unsigned char)'+')
59#define DARROW_CHAR ((unsigned char)'.')
60#define UARROW_CHAR ((unsigned char)'-')
61
62#include <linux/ctype.h>
63
64/* global annotation options */
65struct annotation_options annotate_opts;
66
67static regex_t file_lineno;
68
69static struct ins_ops *ins__find(struct arch *arch, const char *name);
70static void ins__sort(struct arch *arch);
71static int disasm_line__parse(char *line, const char **namep, char **rawp);
72static int call__scnprintf(struct ins *ins, char *bf, size_t size,
73 struct ins_operands *ops, int max_ins_name);
74static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
75 struct ins_operands *ops, int max_ins_name);
76
77struct arch {
78 const char *name;
79 struct ins *instructions;
80 size_t nr_instructions;
81 size_t nr_instructions_allocated;
82 struct ins_ops *(*associate_instruction_ops)(struct arch *arch, const char *name);
83 bool sorted_instructions;
84 bool initialized;
85 const char *insn_suffix;
86 void *priv;
87 unsigned int model;
88 unsigned int family;
89 int (*init)(struct arch *arch, char *cpuid);
90 bool (*ins_is_fused)(struct arch *arch, const char *ins1,
91 const char *ins2);
92 struct {
93 char comment_char;
94 char skip_functions_char;
95 char register_char;
96 char memory_ref_char;
97 } objdump;
98};
99
100static struct ins_ops call_ops;
101static struct ins_ops dec_ops;
102static struct ins_ops jump_ops;
103static struct ins_ops mov_ops;
104static struct ins_ops nop_ops;
105static struct ins_ops lock_ops;
106static struct ins_ops ret_ops;
107
108/* Data type collection debug statistics */
109struct annotated_data_stat ann_data_stat;
110LIST_HEAD(ann_insn_stat);
111
112/* Pseudo data types */
113struct annotated_data_type stackop_type = {
114 .self = {
115 .type_name = (char *)"(stack operation)",
116 .children = LIST_HEAD_INIT(stackop_type.self.children),
117 },
118};
119
120static int arch__grow_instructions(struct arch *arch)
121{
122 struct ins *new_instructions;
123 size_t new_nr_allocated;
124
125 if (arch->nr_instructions_allocated == 0 && arch->instructions)
126 goto grow_from_non_allocated_table;
127
128 new_nr_allocated = arch->nr_instructions_allocated + 128;
129 new_instructions = realloc(arch->instructions, new_nr_allocated * sizeof(struct ins));
130 if (new_instructions == NULL)
131 return -1;
132
133out_update_instructions:
134 arch->instructions = new_instructions;
135 arch->nr_instructions_allocated = new_nr_allocated;
136 return 0;
137
138grow_from_non_allocated_table:
139 new_nr_allocated = arch->nr_instructions + 128;
140 new_instructions = calloc(new_nr_allocated, sizeof(struct ins));
141 if (new_instructions == NULL)
142 return -1;
143
144 memcpy(new_instructions, arch->instructions, arch->nr_instructions);
145 goto out_update_instructions;
146}
147
148static int arch__associate_ins_ops(struct arch* arch, const char *name, struct ins_ops *ops)
149{
150 struct ins *ins;
151
152 if (arch->nr_instructions == arch->nr_instructions_allocated &&
153 arch__grow_instructions(arch))
154 return -1;
155
156 ins = &arch->instructions[arch->nr_instructions];
157 ins->name = strdup(name);
158 if (!ins->name)
159 return -1;
160
161 ins->ops = ops;
162 arch->nr_instructions++;
163
164 ins__sort(arch);
165 return 0;
166}
167
168#include "arch/arc/annotate/instructions.c"
169#include "arch/arm/annotate/instructions.c"
170#include "arch/arm64/annotate/instructions.c"
171#include "arch/csky/annotate/instructions.c"
172#include "arch/loongarch/annotate/instructions.c"
173#include "arch/mips/annotate/instructions.c"
174#include "arch/x86/annotate/instructions.c"
175#include "arch/powerpc/annotate/instructions.c"
176#include "arch/riscv64/annotate/instructions.c"
177#include "arch/s390/annotate/instructions.c"
178#include "arch/sparc/annotate/instructions.c"
179
180static struct arch architectures[] = {
181 {
182 .name = "arc",
183 .init = arc__annotate_init,
184 },
185 {
186 .name = "arm",
187 .init = arm__annotate_init,
188 },
189 {
190 .name = "arm64",
191 .init = arm64__annotate_init,
192 },
193 {
194 .name = "csky",
195 .init = csky__annotate_init,
196 },
197 {
198 .name = "mips",
199 .init = mips__annotate_init,
200 .objdump = {
201 .comment_char = '#',
202 },
203 },
204 {
205 .name = "x86",
206 .init = x86__annotate_init,
207 .instructions = x86__instructions,
208 .nr_instructions = ARRAY_SIZE(x86__instructions),
209 .insn_suffix = "bwlq",
210 .objdump = {
211 .comment_char = '#',
212 .register_char = '%',
213 .memory_ref_char = '(',
214 },
215 },
216 {
217 .name = "powerpc",
218 .init = powerpc__annotate_init,
219 },
220 {
221 .name = "riscv64",
222 .init = riscv64__annotate_init,
223 },
224 {
225 .name = "s390",
226 .init = s390__annotate_init,
227 .objdump = {
228 .comment_char = '#',
229 },
230 },
231 {
232 .name = "sparc",
233 .init = sparc__annotate_init,
234 .objdump = {
235 .comment_char = '#',
236 },
237 },
238 {
239 .name = "loongarch",
240 .init = loongarch__annotate_init,
241 .objdump = {
242 .comment_char = '#',
243 },
244 },
245};
246
247static void ins__delete(struct ins_operands *ops)
248{
249 if (ops == NULL)
250 return;
251 zfree(&ops->source.raw);
252 zfree(&ops->source.name);
253 zfree(&ops->target.raw);
254 zfree(&ops->target.name);
255}
256
257static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
258 struct ins_operands *ops, int max_ins_name)
259{
260 return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->raw);
261}
262
263int ins__scnprintf(struct ins *ins, char *bf, size_t size,
264 struct ins_operands *ops, int max_ins_name)
265{
266 if (ins->ops->scnprintf)
267 return ins->ops->scnprintf(ins, bf, size, ops, max_ins_name);
268
269 return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
270}
271
272bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2)
273{
274 if (!arch || !arch->ins_is_fused)
275 return false;
276
277 return arch->ins_is_fused(arch, ins1, ins2);
278}
279
280static int call__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
281{
282 char *endptr, *tok, *name;
283 struct map *map = ms->map;
284 struct addr_map_symbol target = {
285 .ms = { .map = map, },
286 };
287
288 ops->target.addr = strtoull(ops->raw, &endptr, 16);
289
290 name = strchr(endptr, '<');
291 if (name == NULL)
292 goto indirect_call;
293
294 name++;
295
296 if (arch->objdump.skip_functions_char &&
297 strchr(name, arch->objdump.skip_functions_char))
298 return -1;
299
300 tok = strchr(name, '>');
301 if (tok == NULL)
302 return -1;
303
304 *tok = '\0';
305 ops->target.name = strdup(name);
306 *tok = '>';
307
308 if (ops->target.name == NULL)
309 return -1;
310find_target:
311 target.addr = map__objdump_2mem(map, ops->target.addr);
312
313 if (maps__find_ams(ms->maps, &target) == 0 &&
314 map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr)
315 ops->target.sym = target.ms.sym;
316
317 return 0;
318
319indirect_call:
320 tok = strchr(endptr, '*');
321 if (tok != NULL) {
322 endptr++;
323
324 /* Indirect call can use a non-rip register and offset: callq *0x8(%rbx).
325 * Do not parse such instruction. */
326 if (strstr(endptr, "(%r") == NULL)
327 ops->target.addr = strtoull(endptr, NULL, 16);
328 }
329 goto find_target;
330}
331
332static int call__scnprintf(struct ins *ins, char *bf, size_t size,
333 struct ins_operands *ops, int max_ins_name)
334{
335 if (ops->target.sym)
336 return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name);
337
338 if (ops->target.addr == 0)
339 return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
340
341 if (ops->target.name)
342 return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.name);
343
344 return scnprintf(bf, size, "%-*s *%" PRIx64, max_ins_name, ins->name, ops->target.addr);
345}
346
347static struct ins_ops call_ops = {
348 .parse = call__parse,
349 .scnprintf = call__scnprintf,
350};
351
352bool ins__is_call(const struct ins *ins)
353{
354 return ins->ops == &call_ops || ins->ops == &s390_call_ops || ins->ops == &loongarch_call_ops;
355}
356
357/*
358 * Prevents from matching commas in the comment section, e.g.:
359 * ffff200008446e70: b.cs ffff2000084470f4 <generic_exec_single+0x314> // b.hs, b.nlast
360 *
361 * and skip comma as part of function arguments, e.g.:
362 * 1d8b4ac <linemap_lookup(line_maps const*, unsigned int)+0xcc>
363 */
364static inline const char *validate_comma(const char *c, struct ins_operands *ops)
365{
366 if (ops->jump.raw_comment && c > ops->jump.raw_comment)
367 return NULL;
368
369 if (ops->jump.raw_func_start && c > ops->jump.raw_func_start)
370 return NULL;
371
372 return c;
373}
374
375static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
376{
377 struct map *map = ms->map;
378 struct symbol *sym = ms->sym;
379 struct addr_map_symbol target = {
380 .ms = { .map = map, },
381 };
382 const char *c = strchr(ops->raw, ',');
383 u64 start, end;
384
385 ops->jump.raw_comment = strchr(ops->raw, arch->objdump.comment_char);
386 ops->jump.raw_func_start = strchr(ops->raw, '<');
387
388 c = validate_comma(c, ops);
389
390 /*
391 * Examples of lines to parse for the _cpp_lex_token@@Base
392 * function:
393 *
394 * 1159e6c: jne 115aa32 <_cpp_lex_token@@Base+0xf92>
395 * 1159e8b: jne c469be <cpp_named_operator2name@@Base+0xa72>
396 *
397 * The first is a jump to an offset inside the same function,
398 * the second is to another function, i.e. that 0xa72 is an
399 * offset in the cpp_named_operator2name@@base function.
400 */
401 /*
402 * skip over possible up to 2 operands to get to address, e.g.:
403 * tbnz w0, #26, ffff0000083cd190 <security_file_permission+0xd0>
404 */
405 if (c++ != NULL) {
406 ops->target.addr = strtoull(c, NULL, 16);
407 if (!ops->target.addr) {
408 c = strchr(c, ',');
409 c = validate_comma(c, ops);
410 if (c++ != NULL)
411 ops->target.addr = strtoull(c, NULL, 16);
412 }
413 } else {
414 ops->target.addr = strtoull(ops->raw, NULL, 16);
415 }
416
417 target.addr = map__objdump_2mem(map, ops->target.addr);
418 start = map__unmap_ip(map, sym->start);
419 end = map__unmap_ip(map, sym->end);
420
421 ops->target.outside = target.addr < start || target.addr > end;
422
423 /*
424 * FIXME: things like this in _cpp_lex_token (gcc's cc1 program):
425
426 cpp_named_operator2name@@Base+0xa72
427
428 * Point to a place that is after the cpp_named_operator2name
429 * boundaries, i.e. in the ELF symbol table for cc1
430 * cpp_named_operator2name is marked as being 32-bytes long, but it in
431 * fact is much larger than that, so we seem to need a symbols__find()
432 * routine that looks for >= current->start and < next_symbol->start,
433 * possibly just for C++ objects?
434 *
435 * For now lets just make some progress by marking jumps to outside the
436 * current function as call like.
437 *
438 * Actual navigation will come next, with further understanding of how
439 * the symbol searching and disassembly should be done.
440 */
441 if (maps__find_ams(ms->maps, &target) == 0 &&
442 map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr)
443 ops->target.sym = target.ms.sym;
444
445 if (!ops->target.outside) {
446 ops->target.offset = target.addr - start;
447 ops->target.offset_avail = true;
448 } else {
449 ops->target.offset_avail = false;
450 }
451
452 return 0;
453}
454
455static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
456 struct ins_operands *ops, int max_ins_name)
457{
458 const char *c;
459
460 if (!ops->target.addr || ops->target.offset < 0)
461 return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
462
463 if (ops->target.outside && ops->target.sym != NULL)
464 return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name);
465
466 c = strchr(ops->raw, ',');
467 c = validate_comma(c, ops);
468
469 if (c != NULL) {
470 const char *c2 = strchr(c + 1, ',');
471
472 c2 = validate_comma(c2, ops);
473 /* check for 3-op insn */
474 if (c2 != NULL)
475 c = c2;
476 c++;
477
478 /* mirror arch objdump's space-after-comma style */
479 if (*c == ' ')
480 c++;
481 }
482
483 return scnprintf(bf, size, "%-*s %.*s%" PRIx64, max_ins_name,
484 ins->name, c ? c - ops->raw : 0, ops->raw,
485 ops->target.offset);
486}
487
488static void jump__delete(struct ins_operands *ops __maybe_unused)
489{
490 /*
491 * The ops->jump.raw_comment and ops->jump.raw_func_start belong to the
492 * raw string, don't free them.
493 */
494}
495
496static struct ins_ops jump_ops = {
497 .free = jump__delete,
498 .parse = jump__parse,
499 .scnprintf = jump__scnprintf,
500};
501
502bool ins__is_jump(const struct ins *ins)
503{
504 return ins->ops == &jump_ops || ins->ops == &loongarch_jump_ops;
505}
506
507static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep)
508{
509 char *endptr, *name, *t;
510
511 if (strstr(raw, "(%rip)") == NULL)
512 return 0;
513
514 *addrp = strtoull(comment, &endptr, 16);
515 if (endptr == comment)
516 return 0;
517 name = strchr(endptr, '<');
518 if (name == NULL)
519 return -1;
520
521 name++;
522
523 t = strchr(name, '>');
524 if (t == NULL)
525 return 0;
526
527 *t = '\0';
528 *namep = strdup(name);
529 *t = '>';
530
531 return 0;
532}
533
534static int lock__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
535{
536 ops->locked.ops = zalloc(sizeof(*ops->locked.ops));
537 if (ops->locked.ops == NULL)
538 return 0;
539
540 if (disasm_line__parse(ops->raw, &ops->locked.ins.name, &ops->locked.ops->raw) < 0)
541 goto out_free_ops;
542
543 ops->locked.ins.ops = ins__find(arch, ops->locked.ins.name);
544
545 if (ops->locked.ins.ops == NULL)
546 goto out_free_ops;
547
548 if (ops->locked.ins.ops->parse &&
549 ops->locked.ins.ops->parse(arch, ops->locked.ops, ms) < 0)
550 goto out_free_ops;
551
552 return 0;
553
554out_free_ops:
555 zfree(&ops->locked.ops);
556 return 0;
557}
558
559static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
560 struct ins_operands *ops, int max_ins_name)
561{
562 int printed;
563
564 if (ops->locked.ins.ops == NULL)
565 return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
566
567 printed = scnprintf(bf, size, "%-*s ", max_ins_name, ins->name);
568 return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
569 size - printed, ops->locked.ops, max_ins_name);
570}
571
572static void lock__delete(struct ins_operands *ops)
573{
574 struct ins *ins = &ops->locked.ins;
575
576 if (ins->ops && ins->ops->free)
577 ins->ops->free(ops->locked.ops);
578 else
579 ins__delete(ops->locked.ops);
580
581 zfree(&ops->locked.ops);
582 zfree(&ops->target.raw);
583 zfree(&ops->target.name);
584}
585
586static struct ins_ops lock_ops = {
587 .free = lock__delete,
588 .parse = lock__parse,
589 .scnprintf = lock__scnprintf,
590};
591
592/*
593 * Check if the operand has more than one registers like x86 SIB addressing:
594 * 0x1234(%rax, %rbx, 8)
595 *
596 * But it doesn't care segment selectors like %gs:0x5678(%rcx), so just check
597 * the input string after 'memory_ref_char' if exists.
598 */
599static bool check_multi_regs(struct arch *arch, const char *op)
600{
601 int count = 0;
602
603 if (arch->objdump.register_char == 0)
604 return false;
605
606 if (arch->objdump.memory_ref_char) {
607 op = strchr(op, arch->objdump.memory_ref_char);
608 if (op == NULL)
609 return false;
610 }
611
612 while ((op = strchr(op, arch->objdump.register_char)) != NULL) {
613 count++;
614 op++;
615 }
616
617 return count > 1;
618}
619
620static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms __maybe_unused)
621{
622 char *s = strchr(ops->raw, ','), *target, *comment, prev;
623
624 if (s == NULL)
625 return -1;
626
627 *s = '\0';
628
629 /*
630 * x86 SIB addressing has something like 0x8(%rax, %rcx, 1)
631 * then it needs to have the closing parenthesis.
632 */
633 if (strchr(ops->raw, '(')) {
634 *s = ',';
635 s = strchr(ops->raw, ')');
636 if (s == NULL || s[1] != ',')
637 return -1;
638 *++s = '\0';
639 }
640
641 ops->source.raw = strdup(ops->raw);
642 *s = ',';
643
644 if (ops->source.raw == NULL)
645 return -1;
646
647 ops->source.multi_regs = check_multi_regs(arch, ops->source.raw);
648
649 target = skip_spaces(++s);
650 comment = strchr(s, arch->objdump.comment_char);
651
652 if (comment != NULL)
653 s = comment - 1;
654 else
655 s = strchr(s, '\0') - 1;
656
657 while (s > target && isspace(s[0]))
658 --s;
659 s++;
660 prev = *s;
661 *s = '\0';
662
663 ops->target.raw = strdup(target);
664 *s = prev;
665
666 if (ops->target.raw == NULL)
667 goto out_free_source;
668
669 ops->target.multi_regs = check_multi_regs(arch, ops->target.raw);
670
671 if (comment == NULL)
672 return 0;
673
674 comment = skip_spaces(comment);
675 comment__symbol(ops->source.raw, comment + 1, &ops->source.addr, &ops->source.name);
676 comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name);
677
678 return 0;
679
680out_free_source:
681 zfree(&ops->source.raw);
682 return -1;
683}
684
685static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
686 struct ins_operands *ops, int max_ins_name)
687{
688 return scnprintf(bf, size, "%-*s %s,%s", max_ins_name, ins->name,
689 ops->source.name ?: ops->source.raw,
690 ops->target.name ?: ops->target.raw);
691}
692
693static struct ins_ops mov_ops = {
694 .parse = mov__parse,
695 .scnprintf = mov__scnprintf,
696};
697
698static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map_symbol *ms __maybe_unused)
699{
700 char *target, *comment, *s, prev;
701
702 target = s = ops->raw;
703
704 while (s[0] != '\0' && !isspace(s[0]))
705 ++s;
706 prev = *s;
707 *s = '\0';
708
709 ops->target.raw = strdup(target);
710 *s = prev;
711
712 if (ops->target.raw == NULL)
713 return -1;
714
715 comment = strchr(s, arch->objdump.comment_char);
716 if (comment == NULL)
717 return 0;
718
719 comment = skip_spaces(comment);
720 comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name);
721
722 return 0;
723}
724
725static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
726 struct ins_operands *ops, int max_ins_name)
727{
728 return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name,
729 ops->target.name ?: ops->target.raw);
730}
731
732static struct ins_ops dec_ops = {
733 .parse = dec__parse,
734 .scnprintf = dec__scnprintf,
735};
736
737static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
738 struct ins_operands *ops __maybe_unused, int max_ins_name)
739{
740 return scnprintf(bf, size, "%-*s", max_ins_name, "nop");
741}
742
743static struct ins_ops nop_ops = {
744 .scnprintf = nop__scnprintf,
745};
746
747static struct ins_ops ret_ops = {
748 .scnprintf = ins__raw_scnprintf,
749};
750
751bool ins__is_ret(const struct ins *ins)
752{
753 return ins->ops == &ret_ops;
754}
755
756bool ins__is_lock(const struct ins *ins)
757{
758 return ins->ops == &lock_ops;
759}
760
761static int ins__key_cmp(const void *name, const void *insp)
762{
763 const struct ins *ins = insp;
764
765 return strcmp(name, ins->name);
766}
767
768static int ins__cmp(const void *a, const void *b)
769{
770 const struct ins *ia = a;
771 const struct ins *ib = b;
772
773 return strcmp(ia->name, ib->name);
774}
775
776static void ins__sort(struct arch *arch)
777{
778 const int nmemb = arch->nr_instructions;
779
780 qsort(arch->instructions, nmemb, sizeof(struct ins), ins__cmp);
781}
782
783static struct ins_ops *__ins__find(struct arch *arch, const char *name)
784{
785 struct ins *ins;
786 const int nmemb = arch->nr_instructions;
787
788 if (!arch->sorted_instructions) {
789 ins__sort(arch);
790 arch->sorted_instructions = true;
791 }
792
793 ins = bsearch(name, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp);
794 if (ins)
795 return ins->ops;
796
797 if (arch->insn_suffix) {
798 char tmp[32];
799 char suffix;
800 size_t len = strlen(name);
801
802 if (len == 0 || len >= sizeof(tmp))
803 return NULL;
804
805 suffix = name[len - 1];
806 if (strchr(arch->insn_suffix, suffix) == NULL)
807 return NULL;
808
809 strcpy(tmp, name);
810 tmp[len - 1] = '\0'; /* remove the suffix and check again */
811
812 ins = bsearch(tmp, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp);
813 }
814 return ins ? ins->ops : NULL;
815}
816
817static struct ins_ops *ins__find(struct arch *arch, const char *name)
818{
819 struct ins_ops *ops = __ins__find(arch, name);
820
821 if (!ops && arch->associate_instruction_ops)
822 ops = arch->associate_instruction_ops(arch, name);
823
824 return ops;
825}
826
827static int arch__key_cmp(const void *name, const void *archp)
828{
829 const struct arch *arch = archp;
830
831 return strcmp(name, arch->name);
832}
833
834static int arch__cmp(const void *a, const void *b)
835{
836 const struct arch *aa = a;
837 const struct arch *ab = b;
838
839 return strcmp(aa->name, ab->name);
840}
841
842static void arch__sort(void)
843{
844 const int nmemb = ARRAY_SIZE(architectures);
845
846 qsort(architectures, nmemb, sizeof(struct arch), arch__cmp);
847}
848
849static struct arch *arch__find(const char *name)
850{
851 const int nmemb = ARRAY_SIZE(architectures);
852 static bool sorted;
853
854 if (!sorted) {
855 arch__sort();
856 sorted = true;
857 }
858
859 return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp);
860}
861
862bool arch__is(struct arch *arch, const char *name)
863{
864 return !strcmp(arch->name, name);
865}
866
867/* symbol histogram: key = offset << 16 | evsel->core.idx */
868static size_t sym_hist_hash(long key, void *ctx __maybe_unused)
869{
870 return (key >> 16) + (key & 0xffff);
871}
872
873static bool sym_hist_equal(long key1, long key2, void *ctx __maybe_unused)
874{
875 return key1 == key2;
876}
877
878static struct annotated_source *annotated_source__new(void)
879{
880 struct annotated_source *src = zalloc(sizeof(*src));
881
882 if (src != NULL)
883 INIT_LIST_HEAD(&src->source);
884
885 return src;
886}
887
888static __maybe_unused void annotated_source__delete(struct annotated_source *src)
889{
890 if (src == NULL)
891 return;
892
893 hashmap__free(src->samples);
894 zfree(&src->histograms);
895 free(src);
896}
897
898static int annotated_source__alloc_histograms(struct annotated_source *src,
899 int nr_hists)
900{
901 src->nr_histograms = nr_hists;
902 src->histograms = calloc(nr_hists, sizeof(*src->histograms));
903
904 if (src->histograms == NULL)
905 return -1;
906
907 src->samples = hashmap__new(sym_hist_hash, sym_hist_equal, NULL);
908 if (src->samples == NULL)
909 zfree(&src->histograms);
910
911 return src->histograms ? 0 : -1;
912}
913
914void symbol__annotate_zero_histograms(struct symbol *sym)
915{
916 struct annotation *notes = symbol__annotation(sym);
917
918 annotation__lock(notes);
919 if (notes->src != NULL) {
920 memset(notes->src->histograms, 0,
921 notes->src->nr_histograms * sizeof(*notes->src->histograms));
922 hashmap__clear(notes->src->samples);
923 }
924 if (notes->branch && notes->branch->cycles_hist) {
925 memset(notes->branch->cycles_hist, 0,
926 symbol__size(sym) * sizeof(struct cyc_hist));
927 }
928 annotation__unlock(notes);
929}
930
931static int __symbol__account_cycles(struct cyc_hist *ch,
932 u64 start,
933 unsigned offset, unsigned cycles,
934 unsigned have_start)
935{
936 /*
937 * For now we can only account one basic block per
938 * final jump. But multiple could be overlapping.
939 * Always account the longest one. So when
940 * a shorter one has been already seen throw it away.
941 *
942 * We separately always account the full cycles.
943 */
944 ch[offset].num_aggr++;
945 ch[offset].cycles_aggr += cycles;
946
947 if (cycles > ch[offset].cycles_max)
948 ch[offset].cycles_max = cycles;
949
950 if (ch[offset].cycles_min) {
951 if (cycles && cycles < ch[offset].cycles_min)
952 ch[offset].cycles_min = cycles;
953 } else
954 ch[offset].cycles_min = cycles;
955
956 if (!have_start && ch[offset].have_start)
957 return 0;
958 if (ch[offset].num) {
959 if (have_start && (!ch[offset].have_start ||
960 ch[offset].start > start)) {
961 ch[offset].have_start = 0;
962 ch[offset].cycles = 0;
963 ch[offset].num = 0;
964 if (ch[offset].reset < 0xffff)
965 ch[offset].reset++;
966 } else if (have_start &&
967 ch[offset].start < start)
968 return 0;
969 }
970
971 if (ch[offset].num < NUM_SPARKS)
972 ch[offset].cycles_spark[ch[offset].num] = cycles;
973
974 ch[offset].have_start = have_start;
975 ch[offset].start = start;
976 ch[offset].cycles += cycles;
977 ch[offset].num++;
978 return 0;
979}
980
981static int __symbol__inc_addr_samples(struct map_symbol *ms,
982 struct annotated_source *src, int evidx, u64 addr,
983 struct perf_sample *sample)
984{
985 struct symbol *sym = ms->sym;
986 long hash_key;
987 u64 offset;
988 struct sym_hist *h;
989 struct sym_hist_entry *entry;
990
991 pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map__unmap_ip(ms->map, addr));
992
993 if ((addr < sym->start || addr >= sym->end) &&
994 (addr != sym->end || sym->start != sym->end)) {
995 pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n",
996 __func__, __LINE__, sym->name, sym->start, addr, sym->end);
997 return -ERANGE;
998 }
999
1000 offset = addr - sym->start;
1001 h = annotated_source__histogram(src, evidx);
1002 if (h == NULL) {
1003 pr_debug("%s(%d): ENOMEM! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 ", func: %d\n",
1004 __func__, __LINE__, sym->name, sym->start, addr, sym->end, sym->type == STT_FUNC);
1005 return -ENOMEM;
1006 }
1007
1008 hash_key = offset << 16 | evidx;
1009 if (!hashmap__find(src->samples, hash_key, &entry)) {
1010 entry = zalloc(sizeof(*entry));
1011 if (entry == NULL)
1012 return -ENOMEM;
1013
1014 if (hashmap__add(src->samples, hash_key, entry) < 0)
1015 return -ENOMEM;
1016 }
1017
1018 h->nr_samples++;
1019 h->period += sample->period;
1020 entry->nr_samples++;
1021 entry->period += sample->period;
1022
1023 pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
1024 ", evidx=%d] => nr_samples: %" PRIu64 ", period: %" PRIu64 "\n",
1025 sym->start, sym->name, addr, addr - sym->start, evidx,
1026 entry->nr_samples, entry->period);
1027 return 0;
1028}
1029
1030struct annotated_branch *annotation__get_branch(struct annotation *notes)
1031{
1032 if (notes == NULL)
1033 return NULL;
1034
1035 if (notes->branch == NULL)
1036 notes->branch = zalloc(sizeof(*notes->branch));
1037
1038 return notes->branch;
1039}
1040
1041static struct cyc_hist *symbol__cycles_hist(struct symbol *sym)
1042{
1043 struct annotation *notes = symbol__annotation(sym);
1044 struct annotated_branch *branch;
1045
1046 branch = annotation__get_branch(notes);
1047 if (branch == NULL)
1048 return NULL;
1049
1050 if (branch->cycles_hist == NULL) {
1051 const size_t size = symbol__size(sym);
1052
1053 branch->cycles_hist = calloc(size, sizeof(struct cyc_hist));
1054 }
1055
1056 return branch->cycles_hist;
1057}
1058
1059struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists)
1060{
1061 struct annotation *notes = symbol__annotation(sym);
1062
1063 if (notes->src == NULL) {
1064 notes->src = annotated_source__new();
1065 if (notes->src == NULL)
1066 return NULL;
1067 goto alloc_histograms;
1068 }
1069
1070 if (notes->src->histograms == NULL) {
1071alloc_histograms:
1072 annotated_source__alloc_histograms(notes->src, nr_hists);
1073 }
1074
1075 return notes->src;
1076}
1077
1078static int symbol__inc_addr_samples(struct map_symbol *ms,
1079 struct evsel *evsel, u64 addr,
1080 struct perf_sample *sample)
1081{
1082 struct symbol *sym = ms->sym;
1083 struct annotated_source *src;
1084
1085 if (sym == NULL)
1086 return 0;
1087 src = symbol__hists(sym, evsel->evlist->core.nr_entries);
1088 return src ? __symbol__inc_addr_samples(ms, src, evsel->core.idx, addr, sample) : 0;
1089}
1090
1091static int symbol__account_cycles(u64 addr, u64 start,
1092 struct symbol *sym, unsigned cycles)
1093{
1094 struct cyc_hist *cycles_hist;
1095 unsigned offset;
1096
1097 if (sym == NULL)
1098 return 0;
1099 cycles_hist = symbol__cycles_hist(sym);
1100 if (cycles_hist == NULL)
1101 return -ENOMEM;
1102 if (addr < sym->start || addr >= sym->end)
1103 return -ERANGE;
1104
1105 if (start) {
1106 if (start < sym->start || start >= sym->end)
1107 return -ERANGE;
1108 if (start >= addr)
1109 start = 0;
1110 }
1111 offset = addr - sym->start;
1112 return __symbol__account_cycles(cycles_hist,
1113 start ? start - sym->start : 0,
1114 offset, cycles,
1115 !!start);
1116}
1117
1118int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
1119 struct addr_map_symbol *start,
1120 unsigned cycles)
1121{
1122 u64 saddr = 0;
1123 int err;
1124
1125 if (!cycles)
1126 return 0;
1127
1128 /*
1129 * Only set start when IPC can be computed. We can only
1130 * compute it when the basic block is completely in a single
1131 * function.
1132 * Special case the case when the jump is elsewhere, but
1133 * it starts on the function start.
1134 */
1135 if (start &&
1136 (start->ms.sym == ams->ms.sym ||
1137 (ams->ms.sym &&
1138 start->addr == ams->ms.sym->start + map__start(ams->ms.map))))
1139 saddr = start->al_addr;
1140 if (saddr == 0)
1141 pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n",
1142 ams->addr,
1143 start ? start->addr : 0,
1144 ams->ms.sym ? ams->ms.sym->start + map__start(ams->ms.map) : 0,
1145 saddr);
1146 err = symbol__account_cycles(ams->al_addr, saddr, ams->ms.sym, cycles);
1147 if (err)
1148 pr_debug2("account_cycles failed %d\n", err);
1149 return err;
1150}
1151
1152static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 end)
1153{
1154 unsigned n_insn = 0;
1155 u64 offset;
1156
1157 for (offset = start; offset <= end; offset++) {
1158 if (notes->src->offsets[offset])
1159 n_insn++;
1160 }
1161 return n_insn;
1162}
1163
1164static void annotated_branch__delete(struct annotated_branch *branch)
1165{
1166 if (branch) {
1167 zfree(&branch->cycles_hist);
1168 free(branch);
1169 }
1170}
1171
1172static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch)
1173{
1174 unsigned n_insn;
1175 unsigned int cover_insn = 0;
1176 u64 offset;
1177
1178 n_insn = annotation__count_insn(notes, start, end);
1179 if (n_insn && ch->num && ch->cycles) {
1180 struct annotated_branch *branch;
1181 float ipc = n_insn / ((double)ch->cycles / (double)ch->num);
1182
1183 /* Hide data when there are too many overlaps. */
1184 if (ch->reset >= 0x7fff)
1185 return;
1186
1187 for (offset = start; offset <= end; offset++) {
1188 struct annotation_line *al = notes->src->offsets[offset];
1189
1190 if (al && al->cycles && al->cycles->ipc == 0.0) {
1191 al->cycles->ipc = ipc;
1192 cover_insn++;
1193 }
1194 }
1195
1196 branch = annotation__get_branch(notes);
1197 if (cover_insn && branch) {
1198 branch->hit_cycles += ch->cycles;
1199 branch->hit_insn += n_insn * ch->num;
1200 branch->cover_insn += cover_insn;
1201 }
1202 }
1203}
1204
1205static int annotation__compute_ipc(struct annotation *notes, size_t size)
1206{
1207 int err = 0;
1208 s64 offset;
1209
1210 if (!notes->branch || !notes->branch->cycles_hist)
1211 return 0;
1212
1213 notes->branch->total_insn = annotation__count_insn(notes, 0, size - 1);
1214 notes->branch->hit_cycles = 0;
1215 notes->branch->hit_insn = 0;
1216 notes->branch->cover_insn = 0;
1217
1218 annotation__lock(notes);
1219 for (offset = size - 1; offset >= 0; --offset) {
1220 struct cyc_hist *ch;
1221
1222 ch = ¬es->branch->cycles_hist[offset];
1223 if (ch && ch->cycles) {
1224 struct annotation_line *al;
1225
1226 al = notes->src->offsets[offset];
1227 if (al && al->cycles == NULL) {
1228 al->cycles = zalloc(sizeof(*al->cycles));
1229 if (al->cycles == NULL) {
1230 err = ENOMEM;
1231 break;
1232 }
1233 }
1234 if (ch->have_start)
1235 annotation__count_and_fill(notes, ch->start, offset, ch);
1236 if (al && ch->num_aggr) {
1237 al->cycles->avg = ch->cycles_aggr / ch->num_aggr;
1238 al->cycles->max = ch->cycles_max;
1239 al->cycles->min = ch->cycles_min;
1240 }
1241 }
1242 }
1243
1244 if (err) {
1245 while (++offset < (s64)size) {
1246 struct cyc_hist *ch = ¬es->branch->cycles_hist[offset];
1247
1248 if (ch && ch->cycles) {
1249 struct annotation_line *al = notes->src->offsets[offset];
1250 if (al)
1251 zfree(&al->cycles);
1252 }
1253 }
1254 }
1255
1256 annotation__unlock(notes);
1257 return 0;
1258}
1259
1260int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
1261 struct evsel *evsel)
1262{
1263 return symbol__inc_addr_samples(&ams->ms, evsel, ams->al_addr, sample);
1264}
1265
1266int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample,
1267 struct evsel *evsel, u64 ip)
1268{
1269 return symbol__inc_addr_samples(&he->ms, evsel, ip, sample);
1270}
1271
1272static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map_symbol *ms)
1273{
1274 dl->ins.ops = ins__find(arch, dl->ins.name);
1275
1276 if (!dl->ins.ops)
1277 return;
1278
1279 if (dl->ins.ops->parse && dl->ins.ops->parse(arch, &dl->ops, ms) < 0)
1280 dl->ins.ops = NULL;
1281}
1282
1283static int disasm_line__parse(char *line, const char **namep, char **rawp)
1284{
1285 char tmp, *name = skip_spaces(line);
1286
1287 if (name[0] == '\0')
1288 return -1;
1289
1290 *rawp = name + 1;
1291
1292 while ((*rawp)[0] != '\0' && !isspace((*rawp)[0]))
1293 ++*rawp;
1294
1295 tmp = (*rawp)[0];
1296 (*rawp)[0] = '\0';
1297 *namep = strdup(name);
1298
1299 if (*namep == NULL)
1300 goto out;
1301
1302 (*rawp)[0] = tmp;
1303 *rawp = strim(*rawp);
1304
1305 return 0;
1306
1307out:
1308 return -1;
1309}
1310
1311struct annotate_args {
1312 struct arch *arch;
1313 struct map_symbol ms;
1314 struct evsel *evsel;
1315 struct annotation_options *options;
1316 s64 offset;
1317 char *line;
1318 int line_nr;
1319 char *fileloc;
1320};
1321
1322static void annotation_line__init(struct annotation_line *al,
1323 struct annotate_args *args,
1324 int nr)
1325{
1326 al->offset = args->offset;
1327 al->line = strdup(args->line);
1328 al->line_nr = args->line_nr;
1329 al->fileloc = args->fileloc;
1330 al->data_nr = nr;
1331}
1332
1333static void annotation_line__exit(struct annotation_line *al)
1334{
1335 zfree_srcline(&al->path);
1336 zfree(&al->line);
1337 zfree(&al->cycles);
1338}
1339
1340static size_t disasm_line_size(int nr)
1341{
1342 struct annotation_line *al;
1343
1344 return (sizeof(struct disasm_line) + (sizeof(al->data[0]) * nr));
1345}
1346
1347/*
1348 * Allocating the disasm annotation line data with
1349 * following structure:
1350 *
1351 * -------------------------------------------
1352 * struct disasm_line | struct annotation_line
1353 * -------------------------------------------
1354 *
1355 * We have 'struct annotation_line' member as last member
1356 * of 'struct disasm_line' to have an easy access.
1357 */
1358static struct disasm_line *disasm_line__new(struct annotate_args *args)
1359{
1360 struct disasm_line *dl = NULL;
1361 int nr = 1;
1362
1363 if (evsel__is_group_event(args->evsel))
1364 nr = args->evsel->core.nr_members;
1365
1366 dl = zalloc(disasm_line_size(nr));
1367 if (!dl)
1368 return NULL;
1369
1370 annotation_line__init(&dl->al, args, nr);
1371 if (dl->al.line == NULL)
1372 goto out_delete;
1373
1374 if (args->offset != -1) {
1375 if (disasm_line__parse(dl->al.line, &dl->ins.name, &dl->ops.raw) < 0)
1376 goto out_free_line;
1377
1378 disasm_line__init_ins(dl, args->arch, &args->ms);
1379 }
1380
1381 return dl;
1382
1383out_free_line:
1384 zfree(&dl->al.line);
1385out_delete:
1386 free(dl);
1387 return NULL;
1388}
1389
1390void disasm_line__free(struct disasm_line *dl)
1391{
1392 if (dl->ins.ops && dl->ins.ops->free)
1393 dl->ins.ops->free(&dl->ops);
1394 else
1395 ins__delete(&dl->ops);
1396 zfree(&dl->ins.name);
1397 annotation_line__exit(&dl->al);
1398 free(dl);
1399}
1400
1401int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw, int max_ins_name)
1402{
1403 if (raw || !dl->ins.ops)
1404 return scnprintf(bf, size, "%-*s %s", max_ins_name, dl->ins.name, dl->ops.raw);
1405
1406 return ins__scnprintf(&dl->ins, bf, size, &dl->ops, max_ins_name);
1407}
1408
1409void annotation__exit(struct annotation *notes)
1410{
1411 annotated_source__delete(notes->src);
1412 annotated_branch__delete(notes->branch);
1413}
1414
1415static struct sharded_mutex *sharded_mutex;
1416
1417static void annotation__init_sharded_mutex(void)
1418{
1419 /* As many mutexes as there are CPUs. */
1420 sharded_mutex = sharded_mutex__new(cpu__max_present_cpu().cpu);
1421}
1422
1423static size_t annotation__hash(const struct annotation *notes)
1424{
1425 return (size_t)notes;
1426}
1427
1428static struct mutex *annotation__get_mutex(const struct annotation *notes)
1429{
1430 static pthread_once_t once = PTHREAD_ONCE_INIT;
1431
1432 pthread_once(&once, annotation__init_sharded_mutex);
1433 if (!sharded_mutex)
1434 return NULL;
1435
1436 return sharded_mutex__get_mutex(sharded_mutex, annotation__hash(notes));
1437}
1438
1439void annotation__lock(struct annotation *notes)
1440 NO_THREAD_SAFETY_ANALYSIS
1441{
1442 struct mutex *mutex = annotation__get_mutex(notes);
1443
1444 if (mutex)
1445 mutex_lock(mutex);
1446}
1447
1448void annotation__unlock(struct annotation *notes)
1449 NO_THREAD_SAFETY_ANALYSIS
1450{
1451 struct mutex *mutex = annotation__get_mutex(notes);
1452
1453 if (mutex)
1454 mutex_unlock(mutex);
1455}
1456
1457bool annotation__trylock(struct annotation *notes)
1458{
1459 struct mutex *mutex = annotation__get_mutex(notes);
1460
1461 if (!mutex)
1462 return false;
1463
1464 return mutex_trylock(mutex);
1465}
1466
1467
1468static void annotation_line__add(struct annotation_line *al, struct list_head *head)
1469{
1470 list_add_tail(&al->node, head);
1471}
1472
1473struct annotation_line *
1474annotation_line__next(struct annotation_line *pos, struct list_head *head)
1475{
1476 list_for_each_entry_continue(pos, head, node)
1477 if (pos->offset >= 0)
1478 return pos;
1479
1480 return NULL;
1481}
1482
1483static const char *annotate__address_color(struct block_range *br)
1484{
1485 double cov = block_range__coverage(br);
1486
1487 if (cov >= 0) {
1488 /* mark red for >75% coverage */
1489 if (cov > 0.75)
1490 return PERF_COLOR_RED;
1491
1492 /* mark dull for <1% coverage */
1493 if (cov < 0.01)
1494 return PERF_COLOR_NORMAL;
1495 }
1496
1497 return PERF_COLOR_MAGENTA;
1498}
1499
1500static const char *annotate__asm_color(struct block_range *br)
1501{
1502 double cov = block_range__coverage(br);
1503
1504 if (cov >= 0) {
1505 /* mark dull for <1% coverage */
1506 if (cov < 0.01)
1507 return PERF_COLOR_NORMAL;
1508 }
1509
1510 return PERF_COLOR_BLUE;
1511}
1512
1513static void annotate__branch_printf(struct block_range *br, u64 addr)
1514{
1515 bool emit_comment = true;
1516
1517 if (!br)
1518 return;
1519
1520#if 1
1521 if (br->is_target && br->start == addr) {
1522 struct block_range *branch = br;
1523 double p;
1524
1525 /*
1526 * Find matching branch to our target.
1527 */
1528 while (!branch->is_branch)
1529 branch = block_range__next(branch);
1530
1531 p = 100 *(double)br->entry / branch->coverage;
1532
1533 if (p > 0.1) {
1534 if (emit_comment) {
1535 emit_comment = false;
1536 printf("\t#");
1537 }
1538
1539 /*
1540 * The percentage of coverage joined at this target in relation
1541 * to the next branch.
1542 */
1543 printf(" +%.2f%%", p);
1544 }
1545 }
1546#endif
1547 if (br->is_branch && br->end == addr) {
1548 double p = 100*(double)br->taken / br->coverage;
1549
1550 if (p > 0.1) {
1551 if (emit_comment) {
1552 emit_comment = false;
1553 printf("\t#");
1554 }
1555
1556 /*
1557 * The percentage of coverage leaving at this branch, and
1558 * its prediction ratio.
1559 */
1560 printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred / br->taken);
1561 }
1562 }
1563}
1564
1565static int disasm_line__print(struct disasm_line *dl, u64 start, int addr_fmt_width)
1566{
1567 s64 offset = dl->al.offset;
1568 const u64 addr = start + offset;
1569 struct block_range *br;
1570
1571 br = block_range__find(addr);
1572 color_fprintf(stdout, annotate__address_color(br), " %*" PRIx64 ":", addr_fmt_width, addr);
1573 color_fprintf(stdout, annotate__asm_color(br), "%s", dl->al.line);
1574 annotate__branch_printf(br, addr);
1575 return 0;
1576}
1577
1578static int
1579annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start,
1580 struct evsel *evsel, u64 len, int min_pcnt, int printed,
1581 int max_lines, struct annotation_line *queue, int addr_fmt_width,
1582 int percent_type)
1583{
1584 struct disasm_line *dl = container_of(al, struct disasm_line, al);
1585 static const char *prev_line;
1586
1587 if (al->offset != -1) {
1588 double max_percent = 0.0;
1589 int i, nr_percent = 1;
1590 const char *color;
1591 struct annotation *notes = symbol__annotation(sym);
1592
1593 for (i = 0; i < al->data_nr; i++) {
1594 double percent;
1595
1596 percent = annotation_data__percent(&al->data[i],
1597 percent_type);
1598
1599 if (percent > max_percent)
1600 max_percent = percent;
1601 }
1602
1603 if (al->data_nr > nr_percent)
1604 nr_percent = al->data_nr;
1605
1606 if (max_percent < min_pcnt)
1607 return -1;
1608
1609 if (max_lines && printed >= max_lines)
1610 return 1;
1611
1612 if (queue != NULL) {
1613 list_for_each_entry_from(queue, ¬es->src->source, node) {
1614 if (queue == al)
1615 break;
1616 annotation_line__print(queue, sym, start, evsel, len,
1617 0, 0, 1, NULL, addr_fmt_width,
1618 percent_type);
1619 }
1620 }
1621
1622 color = get_percent_color(max_percent);
1623
1624 for (i = 0; i < nr_percent; i++) {
1625 struct annotation_data *data = &al->data[i];
1626 double percent;
1627
1628 percent = annotation_data__percent(data, percent_type);
1629 color = get_percent_color(percent);
1630
1631 if (symbol_conf.show_total_period)
1632 color_fprintf(stdout, color, " %11" PRIu64,
1633 data->he.period);
1634 else if (symbol_conf.show_nr_samples)
1635 color_fprintf(stdout, color, " %7" PRIu64,
1636 data->he.nr_samples);
1637 else
1638 color_fprintf(stdout, color, " %7.2f", percent);
1639 }
1640
1641 printf(" : ");
1642
1643 disasm_line__print(dl, start, addr_fmt_width);
1644
1645 /*
1646 * Also color the filename and line if needed, with
1647 * the same color than the percentage. Don't print it
1648 * twice for close colored addr with the same filename:line
1649 */
1650 if (al->path) {
1651 if (!prev_line || strcmp(prev_line, al->path)) {
1652 color_fprintf(stdout, color, " // %s", al->path);
1653 prev_line = al->path;
1654 }
1655 }
1656
1657 printf("\n");
1658 } else if (max_lines && printed >= max_lines)
1659 return 1;
1660 else {
1661 int width = symbol_conf.show_total_period ? 12 : 8;
1662
1663 if (queue)
1664 return -1;
1665
1666 if (evsel__is_group_event(evsel))
1667 width *= evsel->core.nr_members;
1668
1669 if (!*al->line)
1670 printf(" %*s:\n", width, " ");
1671 else
1672 printf(" %*s: %-*d %s\n", width, " ", addr_fmt_width, al->line_nr, al->line);
1673 }
1674
1675 return 0;
1676}
1677
1678/*
1679 * symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw)
1680 * which looks like following
1681 *
1682 * 0000000000415500 <_init>:
1683 * 415500: sub $0x8,%rsp
1684 * 415504: mov 0x2f5ad5(%rip),%rax # 70afe0 <_DYNAMIC+0x2f8>
1685 * 41550b: test %rax,%rax
1686 * 41550e: je 415515 <_init+0x15>
1687 * 415510: callq 416e70 <__gmon_start__@plt>
1688 * 415515: add $0x8,%rsp
1689 * 415519: retq
1690 *
1691 * it will be parsed and saved into struct disasm_line as
1692 * <offset> <name> <ops.raw>
1693 *
1694 * The offset will be a relative offset from the start of the symbol and -1
1695 * means that it's not a disassembly line so should be treated differently.
1696 * The ops.raw part will be parsed further according to type of the instruction.
1697 */
1698static int symbol__parse_objdump_line(struct symbol *sym,
1699 struct annotate_args *args,
1700 char *parsed_line, int *line_nr, char **fileloc)
1701{
1702 struct map *map = args->ms.map;
1703 struct annotation *notes = symbol__annotation(sym);
1704 struct disasm_line *dl;
1705 char *tmp;
1706 s64 line_ip, offset = -1;
1707 regmatch_t match[2];
1708
1709 /* /filename:linenr ? Save line number and ignore. */
1710 if (regexec(&file_lineno, parsed_line, 2, match, 0) == 0) {
1711 *line_nr = atoi(parsed_line + match[1].rm_so);
1712 free(*fileloc);
1713 *fileloc = strdup(parsed_line);
1714 return 0;
1715 }
1716
1717 /* Process hex address followed by ':'. */
1718 line_ip = strtoull(parsed_line, &tmp, 16);
1719 if (parsed_line != tmp && tmp[0] == ':' && tmp[1] != '\0') {
1720 u64 start = map__rip_2objdump(map, sym->start),
1721 end = map__rip_2objdump(map, sym->end);
1722
1723 offset = line_ip - start;
1724 if ((u64)line_ip < start || (u64)line_ip >= end)
1725 offset = -1;
1726 else
1727 parsed_line = tmp + 1;
1728 }
1729
1730 args->offset = offset;
1731 args->line = parsed_line;
1732 args->line_nr = *line_nr;
1733 args->fileloc = *fileloc;
1734 args->ms.sym = sym;
1735
1736 dl = disasm_line__new(args);
1737 (*line_nr)++;
1738
1739 if (dl == NULL)
1740 return -1;
1741
1742 if (!disasm_line__has_local_offset(dl)) {
1743 dl->ops.target.offset = dl->ops.target.addr -
1744 map__rip_2objdump(map, sym->start);
1745 dl->ops.target.offset_avail = true;
1746 }
1747
1748 /* kcore has no symbols, so add the call target symbol */
1749 if (dl->ins.ops && ins__is_call(&dl->ins) && !dl->ops.target.sym) {
1750 struct addr_map_symbol target = {
1751 .addr = dl->ops.target.addr,
1752 .ms = { .map = map, },
1753 };
1754
1755 if (!maps__find_ams(args->ms.maps, &target) &&
1756 target.ms.sym->start == target.al_addr)
1757 dl->ops.target.sym = target.ms.sym;
1758 }
1759
1760 annotation_line__add(&dl->al, ¬es->src->source);
1761 return 0;
1762}
1763
1764static __attribute__((constructor)) void symbol__init_regexpr(void)
1765{
1766 regcomp(&file_lineno, "^/[^:]+:([0-9]+)", REG_EXTENDED);
1767}
1768
1769static void delete_last_nop(struct symbol *sym)
1770{
1771 struct annotation *notes = symbol__annotation(sym);
1772 struct list_head *list = ¬es->src->source;
1773 struct disasm_line *dl;
1774
1775 while (!list_empty(list)) {
1776 dl = list_entry(list->prev, struct disasm_line, al.node);
1777
1778 if (dl->ins.ops) {
1779 if (dl->ins.ops != &nop_ops)
1780 return;
1781 } else {
1782 if (!strstr(dl->al.line, " nop ") &&
1783 !strstr(dl->al.line, " nopl ") &&
1784 !strstr(dl->al.line, " nopw "))
1785 return;
1786 }
1787
1788 list_del_init(&dl->al.node);
1789 disasm_line__free(dl);
1790 }
1791}
1792
1793int symbol__strerror_disassemble(struct map_symbol *ms, int errnum, char *buf, size_t buflen)
1794{
1795 struct dso *dso = map__dso(ms->map);
1796
1797 BUG_ON(buflen == 0);
1798
1799 if (errnum >= 0) {
1800 str_error_r(errnum, buf, buflen);
1801 return 0;
1802 }
1803
1804 switch (errnum) {
1805 case SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX: {
1806 char bf[SBUILD_ID_SIZE + 15] = " with build id ";
1807 char *build_id_msg = NULL;
1808
1809 if (dso->has_build_id) {
1810 build_id__sprintf(&dso->bid, bf + 15);
1811 build_id_msg = bf;
1812 }
1813 scnprintf(buf, buflen,
1814 "No vmlinux file%s\nwas found in the path.\n\n"
1815 "Note that annotation using /proc/kcore requires CAP_SYS_RAWIO capability.\n\n"
1816 "Please use:\n\n"
1817 " perf buildid-cache -vu vmlinux\n\n"
1818 "or:\n\n"
1819 " --vmlinux vmlinux\n", build_id_msg ?: "");
1820 }
1821 break;
1822 case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF:
1823 scnprintf(buf, buflen, "Please link with binutils's libopcode to enable BPF annotation");
1824 break;
1825 case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP:
1826 scnprintf(buf, buflen, "Problems with arch specific instruction name regular expressions.");
1827 break;
1828 case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING:
1829 scnprintf(buf, buflen, "Problems while parsing the CPUID in the arch specific initialization.");
1830 break;
1831 case SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE:
1832 scnprintf(buf, buflen, "Invalid BPF file: %s.", dso->long_name);
1833 break;
1834 case SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF:
1835 scnprintf(buf, buflen, "The %s BPF file has no BTF section, compile with -g or use pahole -J.",
1836 dso->long_name);
1837 break;
1838 default:
1839 scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
1840 break;
1841 }
1842
1843 return 0;
1844}
1845
1846static int dso__disassemble_filename(struct dso *dso, char *filename, size_t filename_size)
1847{
1848 char linkname[PATH_MAX];
1849 char *build_id_filename;
1850 char *build_id_path = NULL;
1851 char *pos;
1852 int len;
1853
1854 if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
1855 !dso__is_kcore(dso))
1856 return SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX;
1857
1858 build_id_filename = dso__build_id_filename(dso, NULL, 0, false);
1859 if (build_id_filename) {
1860 __symbol__join_symfs(filename, filename_size, build_id_filename);
1861 free(build_id_filename);
1862 } else {
1863 if (dso->has_build_id)
1864 return ENOMEM;
1865 goto fallback;
1866 }
1867
1868 build_id_path = strdup(filename);
1869 if (!build_id_path)
1870 return ENOMEM;
1871
1872 /*
1873 * old style build-id cache has name of XX/XXXXXXX.. while
1874 * new style has XX/XXXXXXX../{elf,kallsyms,vdso}.
1875 * extract the build-id part of dirname in the new style only.
1876 */
1877 pos = strrchr(build_id_path, '/');
1878 if (pos && strlen(pos) < SBUILD_ID_SIZE - 2)
1879 dirname(build_id_path);
1880
1881 if (dso__is_kcore(dso))
1882 goto fallback;
1883
1884 len = readlink(build_id_path, linkname, sizeof(linkname) - 1);
1885 if (len < 0)
1886 goto fallback;
1887
1888 linkname[len] = '\0';
1889 if (strstr(linkname, DSO__NAME_KALLSYMS) ||
1890 access(filename, R_OK)) {
1891fallback:
1892 /*
1893 * If we don't have build-ids or the build-id file isn't in the
1894 * cache, or is just a kallsyms file, well, lets hope that this
1895 * DSO is the same as when 'perf record' ran.
1896 */
1897 if (dso->kernel && dso->long_name[0] == '/')
1898 snprintf(filename, filename_size, "%s", dso->long_name);
1899 else
1900 __symbol__join_symfs(filename, filename_size, dso->long_name);
1901
1902 mutex_lock(&dso->lock);
1903 if (access(filename, R_OK) && errno == ENOENT && dso->nsinfo) {
1904 char *new_name = dso__filename_with_chroot(dso, filename);
1905 if (new_name) {
1906 strlcpy(filename, new_name, filename_size);
1907 free(new_name);
1908 }
1909 }
1910 mutex_unlock(&dso->lock);
1911 }
1912
1913 free(build_id_path);
1914 return 0;
1915}
1916
1917#if defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
1918#define PACKAGE "perf"
1919#include <bfd.h>
1920#include <dis-asm.h>
1921#include <bpf/bpf.h>
1922#include <bpf/btf.h>
1923#include <bpf/libbpf.h>
1924#include <linux/btf.h>
1925#include <tools/dis-asm-compat.h>
1926
1927static int symbol__disassemble_bpf(struct symbol *sym,
1928 struct annotate_args *args)
1929{
1930 struct annotation *notes = symbol__annotation(sym);
1931 struct bpf_prog_linfo *prog_linfo = NULL;
1932 struct bpf_prog_info_node *info_node;
1933 int len = sym->end - sym->start;
1934 disassembler_ftype disassemble;
1935 struct map *map = args->ms.map;
1936 struct perf_bpil *info_linear;
1937 struct disassemble_info info;
1938 struct dso *dso = map__dso(map);
1939 int pc = 0, count, sub_id;
1940 struct btf *btf = NULL;
1941 char tpath[PATH_MAX];
1942 size_t buf_size;
1943 int nr_skip = 0;
1944 char *buf;
1945 bfd *bfdf;
1946 int ret;
1947 FILE *s;
1948
1949 if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO)
1950 return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE;
1951
1952 pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__,
1953 sym->name, sym->start, sym->end - sym->start);
1954
1955 memset(tpath, 0, sizeof(tpath));
1956 perf_exe(tpath, sizeof(tpath));
1957
1958 bfdf = bfd_openr(tpath, NULL);
1959 if (bfdf == NULL)
1960 abort();
1961
1962 if (!bfd_check_format(bfdf, bfd_object))
1963 abort();
1964
1965 s = open_memstream(&buf, &buf_size);
1966 if (!s) {
1967 ret = errno;
1968 goto out;
1969 }
1970 init_disassemble_info_compat(&info, s,
1971 (fprintf_ftype) fprintf,
1972 fprintf_styled);
1973 info.arch = bfd_get_arch(bfdf);
1974 info.mach = bfd_get_mach(bfdf);
1975
1976 info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env,
1977 dso->bpf_prog.id);
1978 if (!info_node) {
1979 ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
1980 goto out;
1981 }
1982 info_linear = info_node->info_linear;
1983 sub_id = dso->bpf_prog.sub_id;
1984
1985 info.buffer = (void *)(uintptr_t)(info_linear->info.jited_prog_insns);
1986 info.buffer_length = info_linear->info.jited_prog_len;
1987
1988 if (info_linear->info.nr_line_info)
1989 prog_linfo = bpf_prog_linfo__new(&info_linear->info);
1990
1991 if (info_linear->info.btf_id) {
1992 struct btf_node *node;
1993
1994 node = perf_env__find_btf(dso->bpf_prog.env,
1995 info_linear->info.btf_id);
1996 if (node)
1997 btf = btf__new((__u8 *)(node->data),
1998 node->data_size);
1999 }
2000
2001 disassemble_init_for_target(&info);
2002
2003#ifdef DISASM_FOUR_ARGS_SIGNATURE
2004 disassemble = disassembler(info.arch,
2005 bfd_big_endian(bfdf),
2006 info.mach,
2007 bfdf);
2008#else
2009 disassemble = disassembler(bfdf);
2010#endif
2011 if (disassemble == NULL)
2012 abort();
2013
2014 fflush(s);
2015 do {
2016 const struct bpf_line_info *linfo = NULL;
2017 struct disasm_line *dl;
2018 size_t prev_buf_size;
2019 const char *srcline;
2020 u64 addr;
2021
2022 addr = pc + ((u64 *)(uintptr_t)(info_linear->info.jited_ksyms))[sub_id];
2023 count = disassemble(pc, &info);
2024
2025 if (prog_linfo)
2026 linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo,
2027 addr, sub_id,
2028 nr_skip);
2029
2030 if (linfo && btf) {
2031 srcline = btf__name_by_offset(btf, linfo->line_off);
2032 nr_skip++;
2033 } else
2034 srcline = NULL;
2035
2036 fprintf(s, "\n");
2037 prev_buf_size = buf_size;
2038 fflush(s);
2039
2040 if (!annotate_opts.hide_src_code && srcline) {
2041 args->offset = -1;
2042 args->line = strdup(srcline);
2043 args->line_nr = 0;
2044 args->fileloc = NULL;
2045 args->ms.sym = sym;
2046 dl = disasm_line__new(args);
2047 if (dl) {
2048 annotation_line__add(&dl->al,
2049 ¬es->src->source);
2050 }
2051 }
2052
2053 args->offset = pc;
2054 args->line = buf + prev_buf_size;
2055 args->line_nr = 0;
2056 args->fileloc = NULL;
2057 args->ms.sym = sym;
2058 dl = disasm_line__new(args);
2059 if (dl)
2060 annotation_line__add(&dl->al, ¬es->src->source);
2061
2062 pc += count;
2063 } while (count > 0 && pc < len);
2064
2065 ret = 0;
2066out:
2067 free(prog_linfo);
2068 btf__free(btf);
2069 fclose(s);
2070 bfd_close(bfdf);
2071 return ret;
2072}
2073#else // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
2074static int symbol__disassemble_bpf(struct symbol *sym __maybe_unused,
2075 struct annotate_args *args __maybe_unused)
2076{
2077 return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF;
2078}
2079#endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
2080
2081static int
2082symbol__disassemble_bpf_image(struct symbol *sym,
2083 struct annotate_args *args)
2084{
2085 struct annotation *notes = symbol__annotation(sym);
2086 struct disasm_line *dl;
2087
2088 args->offset = -1;
2089 args->line = strdup("to be implemented");
2090 args->line_nr = 0;
2091 args->fileloc = NULL;
2092 dl = disasm_line__new(args);
2093 if (dl)
2094 annotation_line__add(&dl->al, ¬es->src->source);
2095
2096 zfree(&args->line);
2097 return 0;
2098}
2099
2100/*
2101 * Possibly create a new version of line with tabs expanded. Returns the
2102 * existing or new line, storage is updated if a new line is allocated. If
2103 * allocation fails then NULL is returned.
2104 */
2105static char *expand_tabs(char *line, char **storage, size_t *storage_len)
2106{
2107 size_t i, src, dst, len, new_storage_len, num_tabs;
2108 char *new_line;
2109 size_t line_len = strlen(line);
2110
2111 for (num_tabs = 0, i = 0; i < line_len; i++)
2112 if (line[i] == '\t')
2113 num_tabs++;
2114
2115 if (num_tabs == 0)
2116 return line;
2117
2118 /*
2119 * Space for the line and '\0', less the leading and trailing
2120 * spaces. Each tab may introduce 7 additional spaces.
2121 */
2122 new_storage_len = line_len + 1 + (num_tabs * 7);
2123
2124 new_line = malloc(new_storage_len);
2125 if (new_line == NULL) {
2126 pr_err("Failure allocating memory for tab expansion\n");
2127 return NULL;
2128 }
2129
2130 /*
2131 * Copy regions starting at src and expand tabs. If there are two
2132 * adjacent tabs then 'src == i', the memcpy is of size 0 and the spaces
2133 * are inserted.
2134 */
2135 for (i = 0, src = 0, dst = 0; i < line_len && num_tabs; i++) {
2136 if (line[i] == '\t') {
2137 len = i - src;
2138 memcpy(&new_line[dst], &line[src], len);
2139 dst += len;
2140 new_line[dst++] = ' ';
2141 while (dst % 8 != 0)
2142 new_line[dst++] = ' ';
2143 src = i + 1;
2144 num_tabs--;
2145 }
2146 }
2147
2148 /* Expand the last region. */
2149 len = line_len - src;
2150 memcpy(&new_line[dst], &line[src], len);
2151 dst += len;
2152 new_line[dst] = '\0';
2153
2154 free(*storage);
2155 *storage = new_line;
2156 *storage_len = new_storage_len;
2157 return new_line;
2158
2159}
2160
2161static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
2162{
2163 struct annotation_options *opts = &annotate_opts;
2164 struct map *map = args->ms.map;
2165 struct dso *dso = map__dso(map);
2166 char *command;
2167 FILE *file;
2168 char symfs_filename[PATH_MAX];
2169 struct kcore_extract kce;
2170 bool delete_extract = false;
2171 bool decomp = false;
2172 int lineno = 0;
2173 char *fileloc = NULL;
2174 int nline;
2175 char *line;
2176 size_t line_len;
2177 const char *objdump_argv[] = {
2178 "/bin/sh",
2179 "-c",
2180 NULL, /* Will be the objdump command to run. */
2181 "--",
2182 NULL, /* Will be the symfs path. */
2183 NULL,
2184 };
2185 struct child_process objdump_process;
2186 int err = dso__disassemble_filename(dso, symfs_filename, sizeof(symfs_filename));
2187
2188 if (err)
2189 return err;
2190
2191 pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
2192 symfs_filename, sym->name, map__unmap_ip(map, sym->start),
2193 map__unmap_ip(map, sym->end));
2194
2195 pr_debug("annotating [%p] %30s : [%p] %30s\n",
2196 dso, dso->long_name, sym, sym->name);
2197
2198 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) {
2199 return symbol__disassemble_bpf(sym, args);
2200 } else if (dso->binary_type == DSO_BINARY_TYPE__BPF_IMAGE) {
2201 return symbol__disassemble_bpf_image(sym, args);
2202 } else if (dso__is_kcore(dso)) {
2203 kce.kcore_filename = symfs_filename;
2204 kce.addr = map__rip_2objdump(map, sym->start);
2205 kce.offs = sym->start;
2206 kce.len = sym->end - sym->start;
2207 if (!kcore_extract__create(&kce)) {
2208 delete_extract = true;
2209 strlcpy(symfs_filename, kce.extract_filename,
2210 sizeof(symfs_filename));
2211 }
2212 } else if (dso__needs_decompress(dso)) {
2213 char tmp[KMOD_DECOMP_LEN];
2214
2215 if (dso__decompress_kmodule_path(dso, symfs_filename,
2216 tmp, sizeof(tmp)) < 0)
2217 return -1;
2218
2219 decomp = true;
2220 strcpy(symfs_filename, tmp);
2221 }
2222
2223 err = asprintf(&command,
2224 "%s %s%s --start-address=0x%016" PRIx64
2225 " --stop-address=0x%016" PRIx64
2226 " %s -d %s %s %s %c%s%c %s%s -C \"$1\"",
2227 opts->objdump_path ?: "objdump",
2228 opts->disassembler_style ? "-M " : "",
2229 opts->disassembler_style ?: "",
2230 map__rip_2objdump(map, sym->start),
2231 map__rip_2objdump(map, sym->end),
2232 opts->show_linenr ? "-l" : "",
2233 opts->show_asm_raw ? "" : "--no-show-raw-insn",
2234 opts->annotate_src ? "-S" : "",
2235 opts->prefix ? "--prefix " : "",
2236 opts->prefix ? '"' : ' ',
2237 opts->prefix ?: "",
2238 opts->prefix ? '"' : ' ',
2239 opts->prefix_strip ? "--prefix-strip=" : "",
2240 opts->prefix_strip ?: "");
2241
2242 if (err < 0) {
2243 pr_err("Failure allocating memory for the command to run\n");
2244 goto out_remove_tmp;
2245 }
2246
2247 pr_debug("Executing: %s\n", command);
2248
2249 objdump_argv[2] = command;
2250 objdump_argv[4] = symfs_filename;
2251
2252 /* Create a pipe to read from for stdout */
2253 memset(&objdump_process, 0, sizeof(objdump_process));
2254 objdump_process.argv = objdump_argv;
2255 objdump_process.out = -1;
2256 objdump_process.err = -1;
2257 objdump_process.no_stderr = 1;
2258 if (start_command(&objdump_process)) {
2259 pr_err("Failure starting to run %s\n", command);
2260 err = -1;
2261 goto out_free_command;
2262 }
2263
2264 file = fdopen(objdump_process.out, "r");
2265 if (!file) {
2266 pr_err("Failure creating FILE stream for %s\n", command);
2267 /*
2268 * If we were using debug info should retry with
2269 * original binary.
2270 */
2271 err = -1;
2272 goto out_close_stdout;
2273 }
2274
2275 /* Storage for getline. */
2276 line = NULL;
2277 line_len = 0;
2278
2279 nline = 0;
2280 while (!feof(file)) {
2281 const char *match;
2282 char *expanded_line;
2283
2284 if (getline(&line, &line_len, file) < 0 || !line)
2285 break;
2286
2287 /* Skip lines containing "filename:" */
2288 match = strstr(line, symfs_filename);
2289 if (match && match[strlen(symfs_filename)] == ':')
2290 continue;
2291
2292 expanded_line = strim(line);
2293 expanded_line = expand_tabs(expanded_line, &line, &line_len);
2294 if (!expanded_line)
2295 break;
2296
2297 /*
2298 * The source code line number (lineno) needs to be kept in
2299 * across calls to symbol__parse_objdump_line(), so that it
2300 * can associate it with the instructions till the next one.
2301 * See disasm_line__new() and struct disasm_line::line_nr.
2302 */
2303 if (symbol__parse_objdump_line(sym, args, expanded_line,
2304 &lineno, &fileloc) < 0)
2305 break;
2306 nline++;
2307 }
2308 free(line);
2309 free(fileloc);
2310
2311 err = finish_command(&objdump_process);
2312 if (err)
2313 pr_err("Error running %s\n", command);
2314
2315 if (nline == 0) {
2316 err = -1;
2317 pr_err("No output from %s\n", command);
2318 }
2319
2320 /*
2321 * kallsyms does not have symbol sizes so there may a nop at the end.
2322 * Remove it.
2323 */
2324 if (dso__is_kcore(dso))
2325 delete_last_nop(sym);
2326
2327 fclose(file);
2328
2329out_close_stdout:
2330 close(objdump_process.out);
2331
2332out_free_command:
2333 free(command);
2334
2335out_remove_tmp:
2336 if (decomp)
2337 unlink(symfs_filename);
2338
2339 if (delete_extract)
2340 kcore_extract__delete(&kce);
2341
2342 return err;
2343}
2344
2345static void calc_percent(struct annotation *notes,
2346 struct evsel *evsel,
2347 struct annotation_data *data,
2348 s64 offset, s64 end)
2349{
2350 struct hists *hists = evsel__hists(evsel);
2351 int evidx = evsel->core.idx;
2352 struct sym_hist *sym_hist = annotation__histogram(notes, evidx);
2353 unsigned int hits = 0;
2354 u64 period = 0;
2355
2356 while (offset < end) {
2357 struct sym_hist_entry *entry;
2358
2359 entry = annotated_source__hist_entry(notes->src, evidx, offset);
2360 if (entry) {
2361 hits += entry->nr_samples;
2362 period += entry->period;
2363 }
2364 ++offset;
2365 }
2366
2367 if (sym_hist->nr_samples) {
2368 data->he.period = period;
2369 data->he.nr_samples = hits;
2370 data->percent[PERCENT_HITS_LOCAL] = 100.0 * hits / sym_hist->nr_samples;
2371 }
2372
2373 if (hists->stats.nr_non_filtered_samples)
2374 data->percent[PERCENT_HITS_GLOBAL] = 100.0 * hits / hists->stats.nr_non_filtered_samples;
2375
2376 if (sym_hist->period)
2377 data->percent[PERCENT_PERIOD_LOCAL] = 100.0 * period / sym_hist->period;
2378
2379 if (hists->stats.total_period)
2380 data->percent[PERCENT_PERIOD_GLOBAL] = 100.0 * period / hists->stats.total_period;
2381}
2382
2383static void annotation__calc_percent(struct annotation *notes,
2384 struct evsel *leader, s64 len)
2385{
2386 struct annotation_line *al, *next;
2387 struct evsel *evsel;
2388
2389 list_for_each_entry(al, ¬es->src->source, node) {
2390 s64 end;
2391 int i = 0;
2392
2393 if (al->offset == -1)
2394 continue;
2395
2396 next = annotation_line__next(al, ¬es->src->source);
2397 end = next ? next->offset : len;
2398
2399 for_each_group_evsel(evsel, leader) {
2400 struct annotation_data *data;
2401
2402 BUG_ON(i >= al->data_nr);
2403
2404 data = &al->data[i++];
2405
2406 calc_percent(notes, evsel, data, al->offset, end);
2407 }
2408 }
2409}
2410
2411void symbol__calc_percent(struct symbol *sym, struct evsel *evsel)
2412{
2413 struct annotation *notes = symbol__annotation(sym);
2414
2415 annotation__calc_percent(notes, evsel, symbol__size(sym));
2416}
2417
2418static int evsel__get_arch(struct evsel *evsel, struct arch **parch)
2419{
2420 struct perf_env *env = evsel__env(evsel);
2421 const char *arch_name = perf_env__arch(env);
2422 struct arch *arch;
2423 int err;
2424
2425 if (!arch_name)
2426 return errno;
2427
2428 *parch = arch = arch__find(arch_name);
2429 if (arch == NULL) {
2430 pr_err("%s: unsupported arch %s\n", __func__, arch_name);
2431 return ENOTSUP;
2432 }
2433
2434 if (arch->init) {
2435 err = arch->init(arch, env ? env->cpuid : NULL);
2436 if (err) {
2437 pr_err("%s: failed to initialize %s arch priv area\n",
2438 __func__, arch->name);
2439 return err;
2440 }
2441 }
2442 return 0;
2443}
2444
2445int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
2446 struct arch **parch)
2447{
2448 struct symbol *sym = ms->sym;
2449 struct annotation *notes = symbol__annotation(sym);
2450 struct annotate_args args = {
2451 .evsel = evsel,
2452 .options = &annotate_opts,
2453 };
2454 struct arch *arch = NULL;
2455 int err;
2456
2457 err = evsel__get_arch(evsel, &arch);
2458 if (err < 0)
2459 return err;
2460
2461 if (parch)
2462 *parch = arch;
2463
2464 args.arch = arch;
2465 args.ms = *ms;
2466 if (annotate_opts.full_addr)
2467 notes->start = map__objdump_2mem(ms->map, ms->sym->start);
2468 else
2469 notes->start = map__rip_2objdump(ms->map, ms->sym->start);
2470
2471 return symbol__disassemble(sym, &args);
2472}
2473
2474static void insert_source_line(struct rb_root *root, struct annotation_line *al)
2475{
2476 struct annotation_line *iter;
2477 struct rb_node **p = &root->rb_node;
2478 struct rb_node *parent = NULL;
2479 unsigned int percent_type = annotate_opts.percent_type;
2480 int i, ret;
2481
2482 while (*p != NULL) {
2483 parent = *p;
2484 iter = rb_entry(parent, struct annotation_line, rb_node);
2485
2486 ret = strcmp(iter->path, al->path);
2487 if (ret == 0) {
2488 for (i = 0; i < al->data_nr; i++) {
2489 iter->data[i].percent_sum += annotation_data__percent(&al->data[i],
2490 percent_type);
2491 }
2492 return;
2493 }
2494
2495 if (ret < 0)
2496 p = &(*p)->rb_left;
2497 else
2498 p = &(*p)->rb_right;
2499 }
2500
2501 for (i = 0; i < al->data_nr; i++) {
2502 al->data[i].percent_sum = annotation_data__percent(&al->data[i],
2503 percent_type);
2504 }
2505
2506 rb_link_node(&al->rb_node, parent, p);
2507 rb_insert_color(&al->rb_node, root);
2508}
2509
2510static int cmp_source_line(struct annotation_line *a, struct annotation_line *b)
2511{
2512 int i;
2513
2514 for (i = 0; i < a->data_nr; i++) {
2515 if (a->data[i].percent_sum == b->data[i].percent_sum)
2516 continue;
2517 return a->data[i].percent_sum > b->data[i].percent_sum;
2518 }
2519
2520 return 0;
2521}
2522
2523static void __resort_source_line(struct rb_root *root, struct annotation_line *al)
2524{
2525 struct annotation_line *iter;
2526 struct rb_node **p = &root->rb_node;
2527 struct rb_node *parent = NULL;
2528
2529 while (*p != NULL) {
2530 parent = *p;
2531 iter = rb_entry(parent, struct annotation_line, rb_node);
2532
2533 if (cmp_source_line(al, iter))
2534 p = &(*p)->rb_left;
2535 else
2536 p = &(*p)->rb_right;
2537 }
2538
2539 rb_link_node(&al->rb_node, parent, p);
2540 rb_insert_color(&al->rb_node, root);
2541}
2542
2543static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
2544{
2545 struct annotation_line *al;
2546 struct rb_node *node;
2547
2548 node = rb_first(src_root);
2549 while (node) {
2550 struct rb_node *next;
2551
2552 al = rb_entry(node, struct annotation_line, rb_node);
2553 next = rb_next(node);
2554 rb_erase(node, src_root);
2555
2556 __resort_source_line(dest_root, al);
2557 node = next;
2558 }
2559}
2560
2561static void print_summary(struct rb_root *root, const char *filename)
2562{
2563 struct annotation_line *al;
2564 struct rb_node *node;
2565
2566 printf("\nSorted summary for file %s\n", filename);
2567 printf("----------------------------------------------\n\n");
2568
2569 if (RB_EMPTY_ROOT(root)) {
2570 printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
2571 return;
2572 }
2573
2574 node = rb_first(root);
2575 while (node) {
2576 double percent, percent_max = 0.0;
2577 const char *color;
2578 char *path;
2579 int i;
2580
2581 al = rb_entry(node, struct annotation_line, rb_node);
2582 for (i = 0; i < al->data_nr; i++) {
2583 percent = al->data[i].percent_sum;
2584 color = get_percent_color(percent);
2585 color_fprintf(stdout, color, " %7.2f", percent);
2586
2587 if (percent > percent_max)
2588 percent_max = percent;
2589 }
2590
2591 path = al->path;
2592 color = get_percent_color(percent_max);
2593 color_fprintf(stdout, color, " %s\n", path);
2594
2595 node = rb_next(node);
2596 }
2597}
2598
2599static void symbol__annotate_hits(struct symbol *sym, struct evsel *evsel)
2600{
2601 int evidx = evsel->core.idx;
2602 struct annotation *notes = symbol__annotation(sym);
2603 struct sym_hist *h = annotation__histogram(notes, evidx);
2604 u64 len = symbol__size(sym), offset;
2605
2606 for (offset = 0; offset < len; ++offset) {
2607 struct sym_hist_entry *entry;
2608
2609 entry = annotated_source__hist_entry(notes->src, evidx, offset);
2610 if (entry && entry->nr_samples != 0)
2611 printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
2612 sym->start + offset, entry->nr_samples);
2613 }
2614 printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples);
2615}
2616
2617static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start)
2618{
2619 char bf[32];
2620 struct annotation_line *line;
2621
2622 list_for_each_entry_reverse(line, lines, node) {
2623 if (line->offset != -1)
2624 return scnprintf(bf, sizeof(bf), "%" PRIx64, start + line->offset);
2625 }
2626
2627 return 0;
2628}
2629
2630int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel)
2631{
2632 struct map *map = ms->map;
2633 struct symbol *sym = ms->sym;
2634 struct dso *dso = map__dso(map);
2635 char *filename;
2636 const char *d_filename;
2637 const char *evsel_name = evsel__name(evsel);
2638 struct annotation *notes = symbol__annotation(sym);
2639 struct sym_hist *h = annotation__histogram(notes, evsel->core.idx);
2640 struct annotation_line *pos, *queue = NULL;
2641 struct annotation_options *opts = &annotate_opts;
2642 u64 start = map__rip_2objdump(map, sym->start);
2643 int printed = 2, queue_len = 0, addr_fmt_width;
2644 int more = 0;
2645 bool context = opts->context;
2646 u64 len;
2647 int width = symbol_conf.show_total_period ? 12 : 8;
2648 int graph_dotted_len;
2649 char buf[512];
2650
2651 filename = strdup(dso->long_name);
2652 if (!filename)
2653 return -ENOMEM;
2654
2655 if (opts->full_path)
2656 d_filename = filename;
2657 else
2658 d_filename = basename(filename);
2659
2660 len = symbol__size(sym);
2661
2662 if (evsel__is_group_event(evsel)) {
2663 width *= evsel->core.nr_members;
2664 evsel__group_desc(evsel, buf, sizeof(buf));
2665 evsel_name = buf;
2666 }
2667
2668 graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples, "
2669 "percent: %s)\n",
2670 width, width, symbol_conf.show_total_period ? "Period" :
2671 symbol_conf.show_nr_samples ? "Samples" : "Percent",
2672 d_filename, evsel_name, h->nr_samples,
2673 percent_type_str(opts->percent_type));
2674
2675 printf("%-*.*s----\n",
2676 graph_dotted_len, graph_dotted_len, graph_dotted_line);
2677
2678 if (verbose > 0)
2679 symbol__annotate_hits(sym, evsel);
2680
2681 addr_fmt_width = annotated_source__addr_fmt_width(¬es->src->source, start);
2682
2683 list_for_each_entry(pos, ¬es->src->source, node) {
2684 int err;
2685
2686 if (context && queue == NULL) {
2687 queue = pos;
2688 queue_len = 0;
2689 }
2690
2691 err = annotation_line__print(pos, sym, start, evsel, len,
2692 opts->min_pcnt, printed, opts->max_lines,
2693 queue, addr_fmt_width, opts->percent_type);
2694
2695 switch (err) {
2696 case 0:
2697 ++printed;
2698 if (context) {
2699 printed += queue_len;
2700 queue = NULL;
2701 queue_len = 0;
2702 }
2703 break;
2704 case 1:
2705 /* filtered by max_lines */
2706 ++more;
2707 break;
2708 case -1:
2709 default:
2710 /*
2711 * Filtered by min_pcnt or non IP lines when
2712 * context != 0
2713 */
2714 if (!context)
2715 break;
2716 if (queue_len == context)
2717 queue = list_entry(queue->node.next, typeof(*queue), node);
2718 else
2719 ++queue_len;
2720 break;
2721 }
2722 }
2723
2724 free(filename);
2725
2726 return more;
2727}
2728
2729static void FILE__set_percent_color(void *fp __maybe_unused,
2730 double percent __maybe_unused,
2731 bool current __maybe_unused)
2732{
2733}
2734
2735static int FILE__set_jumps_percent_color(void *fp __maybe_unused,
2736 int nr __maybe_unused, bool current __maybe_unused)
2737{
2738 return 0;
2739}
2740
2741static int FILE__set_color(void *fp __maybe_unused, int color __maybe_unused)
2742{
2743 return 0;
2744}
2745
2746static void FILE__printf(void *fp, const char *fmt, ...)
2747{
2748 va_list args;
2749
2750 va_start(args, fmt);
2751 vfprintf(fp, fmt, args);
2752 va_end(args);
2753}
2754
2755static void FILE__write_graph(void *fp, int graph)
2756{
2757 const char *s;
2758 switch (graph) {
2759
2760 case DARROW_CHAR: s = "↓"; break;
2761 case UARROW_CHAR: s = "↑"; break;
2762 case LARROW_CHAR: s = "←"; break;
2763 case RARROW_CHAR: s = "→"; break;
2764 default: s = "?"; break;
2765 }
2766
2767 fputs(s, fp);
2768}
2769
2770static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp)
2771{
2772 struct annotation *notes = symbol__annotation(sym);
2773 struct annotation_write_ops wops = {
2774 .first_line = true,
2775 .obj = fp,
2776 .set_color = FILE__set_color,
2777 .set_percent_color = FILE__set_percent_color,
2778 .set_jumps_percent_color = FILE__set_jumps_percent_color,
2779 .printf = FILE__printf,
2780 .write_graph = FILE__write_graph,
2781 };
2782 struct annotation_line *al;
2783
2784 list_for_each_entry(al, ¬es->src->source, node) {
2785 if (annotation_line__filter(al))
2786 continue;
2787 annotation_line__write(al, notes, &wops);
2788 fputc('\n', fp);
2789 wops.first_line = false;
2790 }
2791
2792 return 0;
2793}
2794
2795int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel)
2796{
2797 const char *ev_name = evsel__name(evsel);
2798 char buf[1024];
2799 char *filename;
2800 int err = -1;
2801 FILE *fp;
2802
2803 if (asprintf(&filename, "%s.annotation", ms->sym->name) < 0)
2804 return -1;
2805
2806 fp = fopen(filename, "w");
2807 if (fp == NULL)
2808 goto out_free_filename;
2809
2810 if (evsel__is_group_event(evsel)) {
2811 evsel__group_desc(evsel, buf, sizeof(buf));
2812 ev_name = buf;
2813 }
2814
2815 fprintf(fp, "%s() %s\nEvent: %s\n\n",
2816 ms->sym->name, map__dso(ms->map)->long_name, ev_name);
2817 symbol__annotate_fprintf2(ms->sym, fp);
2818
2819 fclose(fp);
2820 err = 0;
2821out_free_filename:
2822 free(filename);
2823 return err;
2824}
2825
2826void symbol__annotate_zero_histogram(struct symbol *sym, int evidx)
2827{
2828 struct annotation *notes = symbol__annotation(sym);
2829 struct sym_hist *h = annotation__histogram(notes, evidx);
2830
2831 memset(h, 0, sizeof(*notes->src->histograms) * notes->src->nr_histograms);
2832}
2833
2834void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
2835{
2836 struct annotation *notes = symbol__annotation(sym);
2837 struct sym_hist *h = annotation__histogram(notes, evidx);
2838 int len = symbol__size(sym), offset;
2839
2840 h->nr_samples = 0;
2841 for (offset = 0; offset < len; ++offset) {
2842 struct sym_hist_entry *entry;
2843
2844 entry = annotated_source__hist_entry(notes->src, evidx, offset);
2845 if (entry == NULL)
2846 continue;
2847
2848 entry->nr_samples = entry->nr_samples * 7 / 8;
2849 h->nr_samples += entry->nr_samples;
2850 }
2851}
2852
2853void annotated_source__purge(struct annotated_source *as)
2854{
2855 struct annotation_line *al, *n;
2856
2857 list_for_each_entry_safe(al, n, &as->source, node) {
2858 list_del_init(&al->node);
2859 disasm_line__free(disasm_line(al));
2860 }
2861}
2862
2863static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
2864{
2865 size_t printed;
2866
2867 if (dl->al.offset == -1)
2868 return fprintf(fp, "%s\n", dl->al.line);
2869
2870 printed = fprintf(fp, "%#" PRIx64 " %s", dl->al.offset, dl->ins.name);
2871
2872 if (dl->ops.raw[0] != '\0') {
2873 printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
2874 dl->ops.raw);
2875 }
2876
2877 return printed + fprintf(fp, "\n");
2878}
2879
2880size_t disasm__fprintf(struct list_head *head, FILE *fp)
2881{
2882 struct disasm_line *pos;
2883 size_t printed = 0;
2884
2885 list_for_each_entry(pos, head, al.node)
2886 printed += disasm_line__fprintf(pos, fp);
2887
2888 return printed;
2889}
2890
2891bool disasm_line__is_valid_local_jump(struct disasm_line *dl, struct symbol *sym)
2892{
2893 if (!dl || !dl->ins.ops || !ins__is_jump(&dl->ins) ||
2894 !disasm_line__has_local_offset(dl) || dl->ops.target.offset < 0 ||
2895 dl->ops.target.offset >= (s64)symbol__size(sym))
2896 return false;
2897
2898 return true;
2899}
2900
2901void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
2902{
2903 u64 offset, size = symbol__size(sym);
2904
2905 /* PLT symbols contain external offsets */
2906 if (strstr(sym->name, "@plt"))
2907 return;
2908
2909 for (offset = 0; offset < size; ++offset) {
2910 struct annotation_line *al = notes->src->offsets[offset];
2911 struct disasm_line *dl;
2912
2913 dl = disasm_line(al);
2914
2915 if (!disasm_line__is_valid_local_jump(dl, sym))
2916 continue;
2917
2918 al = notes->src->offsets[dl->ops.target.offset];
2919
2920 /*
2921 * FIXME: Oops, no jump target? Buggy disassembler? Or do we
2922 * have to adjust to the previous offset?
2923 */
2924 if (al == NULL)
2925 continue;
2926
2927 if (++al->jump_sources > notes->max_jump_sources)
2928 notes->max_jump_sources = al->jump_sources;
2929 }
2930}
2931
2932void annotation__set_offsets(struct annotation *notes, s64 size)
2933{
2934 struct annotation_line *al;
2935 struct annotated_source *src = notes->src;
2936
2937 src->max_line_len = 0;
2938 src->nr_entries = 0;
2939 src->nr_asm_entries = 0;
2940
2941 list_for_each_entry(al, &src->source, node) {
2942 size_t line_len = strlen(al->line);
2943
2944 if (src->max_line_len < line_len)
2945 src->max_line_len = line_len;
2946 al->idx = src->nr_entries++;
2947 if (al->offset != -1) {
2948 al->idx_asm = src->nr_asm_entries++;
2949 /*
2950 * FIXME: short term bandaid to cope with assembly
2951 * routines that comes with labels in the same column
2952 * as the address in objdump, sigh.
2953 *
2954 * E.g. copy_user_generic_unrolled
2955 */
2956 if (al->offset < size)
2957 notes->src->offsets[al->offset] = al;
2958 } else
2959 al->idx_asm = -1;
2960 }
2961}
2962
2963static inline int width_jumps(int n)
2964{
2965 if (n >= 100)
2966 return 5;
2967 if (n / 10)
2968 return 2;
2969 return 1;
2970}
2971
2972static int annotation__max_ins_name(struct annotation *notes)
2973{
2974 int max_name = 0, len;
2975 struct annotation_line *al;
2976
2977 list_for_each_entry(al, ¬es->src->source, node) {
2978 if (al->offset == -1)
2979 continue;
2980
2981 len = strlen(disasm_line(al)->ins.name);
2982 if (max_name < len)
2983 max_name = len;
2984 }
2985
2986 return max_name;
2987}
2988
2989void annotation__init_column_widths(struct annotation *notes, struct symbol *sym)
2990{
2991 notes->widths.addr = notes->widths.target =
2992 notes->widths.min_addr = hex_width(symbol__size(sym));
2993 notes->widths.max_addr = hex_width(sym->end);
2994 notes->widths.jumps = width_jumps(notes->max_jump_sources);
2995 notes->widths.max_ins_name = annotation__max_ins_name(notes);
2996}
2997
2998void annotation__update_column_widths(struct annotation *notes)
2999{
3000 if (annotate_opts.use_offset)
3001 notes->widths.target = notes->widths.min_addr;
3002 else if (annotate_opts.full_addr)
3003 notes->widths.target = BITS_PER_LONG / 4;
3004 else
3005 notes->widths.target = notes->widths.max_addr;
3006
3007 notes->widths.addr = notes->widths.target;
3008
3009 if (annotate_opts.show_nr_jumps)
3010 notes->widths.addr += notes->widths.jumps + 1;
3011}
3012
3013void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms)
3014{
3015 annotate_opts.full_addr = !annotate_opts.full_addr;
3016
3017 if (annotate_opts.full_addr)
3018 notes->start = map__objdump_2mem(ms->map, ms->sym->start);
3019 else
3020 notes->start = map__rip_2objdump(ms->map, ms->sym->start);
3021
3022 annotation__update_column_widths(notes);
3023}
3024
3025static void annotation__calc_lines(struct annotation *notes, struct map *map,
3026 struct rb_root *root)
3027{
3028 struct annotation_line *al;
3029 struct rb_root tmp_root = RB_ROOT;
3030
3031 list_for_each_entry(al, ¬es->src->source, node) {
3032 double percent_max = 0.0;
3033 int i;
3034
3035 for (i = 0; i < al->data_nr; i++) {
3036 double percent;
3037
3038 percent = annotation_data__percent(&al->data[i],
3039 annotate_opts.percent_type);
3040
3041 if (percent > percent_max)
3042 percent_max = percent;
3043 }
3044
3045 if (percent_max <= 0.5)
3046 continue;
3047
3048 al->path = get_srcline(map__dso(map), notes->start + al->offset, NULL,
3049 false, true, notes->start + al->offset);
3050 insert_source_line(&tmp_root, al);
3051 }
3052
3053 resort_source_line(root, &tmp_root);
3054}
3055
3056static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root)
3057{
3058 struct annotation *notes = symbol__annotation(ms->sym);
3059
3060 annotation__calc_lines(notes, ms->map, root);
3061}
3062
3063int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel)
3064{
3065 struct dso *dso = map__dso(ms->map);
3066 struct symbol *sym = ms->sym;
3067 struct rb_root source_line = RB_ROOT;
3068 struct hists *hists = evsel__hists(evsel);
3069 char buf[1024];
3070 int err;
3071
3072 err = symbol__annotate2(ms, evsel, NULL);
3073 if (err) {
3074 char msg[BUFSIZ];
3075
3076 dso->annotate_warned = true;
3077 symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
3078 ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
3079 return -1;
3080 }
3081
3082 if (annotate_opts.print_lines) {
3083 srcline_full_filename = annotate_opts.full_path;
3084 symbol__calc_lines(ms, &source_line);
3085 print_summary(&source_line, dso->long_name);
3086 }
3087
3088 hists__scnprintf_title(hists, buf, sizeof(buf));
3089 fprintf(stdout, "%s, [percent: %s]\n%s() %s\n",
3090 buf, percent_type_str(annotate_opts.percent_type), sym->name,
3091 dso->long_name);
3092 symbol__annotate_fprintf2(sym, stdout);
3093
3094 annotated_source__purge(symbol__annotation(sym)->src);
3095
3096 return 0;
3097}
3098
3099int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel)
3100{
3101 struct dso *dso = map__dso(ms->map);
3102 struct symbol *sym = ms->sym;
3103 struct rb_root source_line = RB_ROOT;
3104 int err;
3105
3106 err = symbol__annotate(ms, evsel, NULL);
3107 if (err) {
3108 char msg[BUFSIZ];
3109
3110 dso->annotate_warned = true;
3111 symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
3112 ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
3113 return -1;
3114 }
3115
3116 symbol__calc_percent(sym, evsel);
3117
3118 if (annotate_opts.print_lines) {
3119 srcline_full_filename = annotate_opts.full_path;
3120 symbol__calc_lines(ms, &source_line);
3121 print_summary(&source_line, dso->long_name);
3122 }
3123
3124 symbol__annotate_printf(ms, evsel);
3125
3126 annotated_source__purge(symbol__annotation(sym)->src);
3127
3128 return 0;
3129}
3130
3131bool ui__has_annotation(void)
3132{
3133 return use_browser == 1 && perf_hpp_list.sym;
3134}
3135
3136
3137static double annotation_line__max_percent(struct annotation_line *al,
3138 struct annotation *notes,
3139 unsigned int percent_type)
3140{
3141 double percent_max = 0.0;
3142 int i;
3143
3144 for (i = 0; i < notes->nr_events; i++) {
3145 double percent;
3146
3147 percent = annotation_data__percent(&al->data[i],
3148 percent_type);
3149
3150 if (percent > percent_max)
3151 percent_max = percent;
3152 }
3153
3154 return percent_max;
3155}
3156
3157static void disasm_line__write(struct disasm_line *dl, struct annotation *notes,
3158 void *obj, char *bf, size_t size,
3159 void (*obj__printf)(void *obj, const char *fmt, ...),
3160 void (*obj__write_graph)(void *obj, int graph))
3161{
3162 if (dl->ins.ops && dl->ins.ops->scnprintf) {
3163 if (ins__is_jump(&dl->ins)) {
3164 bool fwd;
3165
3166 if (dl->ops.target.outside)
3167 goto call_like;
3168 fwd = dl->ops.target.offset > dl->al.offset;
3169 obj__write_graph(obj, fwd ? DARROW_CHAR : UARROW_CHAR);
3170 obj__printf(obj, " ");
3171 } else if (ins__is_call(&dl->ins)) {
3172call_like:
3173 obj__write_graph(obj, RARROW_CHAR);
3174 obj__printf(obj, " ");
3175 } else if (ins__is_ret(&dl->ins)) {
3176 obj__write_graph(obj, LARROW_CHAR);
3177 obj__printf(obj, " ");
3178 } else {
3179 obj__printf(obj, " ");
3180 }
3181 } else {
3182 obj__printf(obj, " ");
3183 }
3184
3185 disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset, notes->widths.max_ins_name);
3186}
3187
3188static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
3189{
3190 double ipc = 0.0, coverage = 0.0;
3191 struct annotated_branch *branch = annotation__get_branch(notes);
3192
3193 if (branch && branch->hit_cycles)
3194 ipc = branch->hit_insn / ((double)branch->hit_cycles);
3195
3196 if (branch && branch->total_insn) {
3197 coverage = branch->cover_insn * 100.0 /
3198 ((double)branch->total_insn);
3199 }
3200
3201 scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)",
3202 ipc, coverage);
3203}
3204
3205static void __annotation_line__write(struct annotation_line *al, struct annotation *notes,
3206 bool first_line, bool current_entry, bool change_color, int width,
3207 void *obj, unsigned int percent_type,
3208 int (*obj__set_color)(void *obj, int color),
3209 void (*obj__set_percent_color)(void *obj, double percent, bool current),
3210 int (*obj__set_jumps_percent_color)(void *obj, int nr, bool current),
3211 void (*obj__printf)(void *obj, const char *fmt, ...),
3212 void (*obj__write_graph)(void *obj, int graph))
3213
3214{
3215 double percent_max = annotation_line__max_percent(al, notes, percent_type);
3216 int pcnt_width = annotation__pcnt_width(notes),
3217 cycles_width = annotation__cycles_width(notes);
3218 bool show_title = false;
3219 char bf[256];
3220 int printed;
3221
3222 if (first_line && (al->offset == -1 || percent_max == 0.0)) {
3223 if (notes->branch && al->cycles) {
3224 if (al->cycles->ipc == 0.0 && al->cycles->avg == 0)
3225 show_title = true;
3226 } else
3227 show_title = true;
3228 }
3229
3230 if (al->offset != -1 && percent_max != 0.0) {
3231 int i;
3232
3233 for (i = 0; i < notes->nr_events; i++) {
3234 double percent;
3235
3236 percent = annotation_data__percent(&al->data[i], percent_type);
3237
3238 obj__set_percent_color(obj, percent, current_entry);
3239 if (symbol_conf.show_total_period) {
3240 obj__printf(obj, "%11" PRIu64 " ", al->data[i].he.period);
3241 } else if (symbol_conf.show_nr_samples) {
3242 obj__printf(obj, "%6" PRIu64 " ",
3243 al->data[i].he.nr_samples);
3244 } else {
3245 obj__printf(obj, "%6.2f ", percent);
3246 }
3247 }
3248 } else {
3249 obj__set_percent_color(obj, 0, current_entry);
3250
3251 if (!show_title)
3252 obj__printf(obj, "%-*s", pcnt_width, " ");
3253 else {
3254 obj__printf(obj, "%-*s", pcnt_width,
3255 symbol_conf.show_total_period ? "Period" :
3256 symbol_conf.show_nr_samples ? "Samples" : "Percent");
3257 }
3258 }
3259
3260 if (notes->branch) {
3261 if (al->cycles && al->cycles->ipc)
3262 obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->cycles->ipc);
3263 else if (!show_title)
3264 obj__printf(obj, "%*s", ANNOTATION__IPC_WIDTH, " ");
3265 else
3266 obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC");
3267
3268 if (!annotate_opts.show_minmax_cycle) {
3269 if (al->cycles && al->cycles->avg)
3270 obj__printf(obj, "%*" PRIu64 " ",
3271 ANNOTATION__CYCLES_WIDTH - 1, al->cycles->avg);
3272 else if (!show_title)
3273 obj__printf(obj, "%*s",
3274 ANNOTATION__CYCLES_WIDTH, " ");
3275 else
3276 obj__printf(obj, "%*s ",
3277 ANNOTATION__CYCLES_WIDTH - 1,
3278 "Cycle");
3279 } else {
3280 if (al->cycles) {
3281 char str[32];
3282
3283 scnprintf(str, sizeof(str),
3284 "%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")",
3285 al->cycles->avg, al->cycles->min,
3286 al->cycles->max);
3287
3288 obj__printf(obj, "%*s ",
3289 ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
3290 str);
3291 } else if (!show_title)
3292 obj__printf(obj, "%*s",
3293 ANNOTATION__MINMAX_CYCLES_WIDTH,
3294 " ");
3295 else
3296 obj__printf(obj, "%*s ",
3297 ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
3298 "Cycle(min/max)");
3299 }
3300
3301 if (show_title && !*al->line) {
3302 ipc_coverage_string(bf, sizeof(bf), notes);
3303 obj__printf(obj, "%*s", ANNOTATION__AVG_IPC_WIDTH, bf);
3304 }
3305 }
3306
3307 obj__printf(obj, " ");
3308
3309 if (!*al->line)
3310 obj__printf(obj, "%-*s", width - pcnt_width - cycles_width, " ");
3311 else if (al->offset == -1) {
3312 if (al->line_nr && annotate_opts.show_linenr)
3313 printed = scnprintf(bf, sizeof(bf), "%-*d ", notes->widths.addr + 1, al->line_nr);
3314 else
3315 printed = scnprintf(bf, sizeof(bf), "%-*s ", notes->widths.addr, " ");
3316 obj__printf(obj, bf);
3317 obj__printf(obj, "%-*s", width - printed - pcnt_width - cycles_width + 1, al->line);
3318 } else {
3319 u64 addr = al->offset;
3320 int color = -1;
3321
3322 if (!annotate_opts.use_offset)
3323 addr += notes->start;
3324
3325 if (!annotate_opts.use_offset) {
3326 printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
3327 } else {
3328 if (al->jump_sources &&
3329 annotate_opts.offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) {
3330 if (annotate_opts.show_nr_jumps) {
3331 int prev;
3332 printed = scnprintf(bf, sizeof(bf), "%*d ",
3333 notes->widths.jumps,
3334 al->jump_sources);
3335 prev = obj__set_jumps_percent_color(obj, al->jump_sources,
3336 current_entry);
3337 obj__printf(obj, bf);
3338 obj__set_color(obj, prev);
3339 }
3340print_addr:
3341 printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
3342 notes->widths.target, addr);
3343 } else if (ins__is_call(&disasm_line(al)->ins) &&
3344 annotate_opts.offset_level >= ANNOTATION__OFFSET_CALL) {
3345 goto print_addr;
3346 } else if (annotate_opts.offset_level == ANNOTATION__MAX_OFFSET_LEVEL) {
3347 goto print_addr;
3348 } else {
3349 printed = scnprintf(bf, sizeof(bf), "%-*s ",
3350 notes->widths.addr, " ");
3351 }
3352 }
3353
3354 if (change_color)
3355 color = obj__set_color(obj, HE_COLORSET_ADDR);
3356 obj__printf(obj, bf);
3357 if (change_color)
3358 obj__set_color(obj, color);
3359
3360 disasm_line__write(disasm_line(al), notes, obj, bf, sizeof(bf), obj__printf, obj__write_graph);
3361
3362 obj__printf(obj, "%-*s", width - pcnt_width - cycles_width - 3 - printed, bf);
3363 }
3364
3365}
3366
3367void annotation_line__write(struct annotation_line *al, struct annotation *notes,
3368 struct annotation_write_ops *wops)
3369{
3370 __annotation_line__write(al, notes, wops->first_line, wops->current_entry,
3371 wops->change_color, wops->width, wops->obj,
3372 annotate_opts.percent_type,
3373 wops->set_color, wops->set_percent_color,
3374 wops->set_jumps_percent_color, wops->printf,
3375 wops->write_graph);
3376}
3377
3378int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
3379 struct arch **parch)
3380{
3381 struct symbol *sym = ms->sym;
3382 struct annotation *notes = symbol__annotation(sym);
3383 size_t size = symbol__size(sym);
3384 int nr_pcnt = 1, err;
3385
3386 notes->src->offsets = zalloc(size * sizeof(struct annotation_line *));
3387 if (notes->src->offsets == NULL)
3388 return ENOMEM;
3389
3390 if (evsel__is_group_event(evsel))
3391 nr_pcnt = evsel->core.nr_members;
3392
3393 err = symbol__annotate(ms, evsel, parch);
3394 if (err)
3395 goto out_free_offsets;
3396
3397 symbol__calc_percent(sym, evsel);
3398
3399 annotation__set_offsets(notes, size);
3400 annotation__mark_jump_targets(notes, sym);
3401
3402 err = annotation__compute_ipc(notes, size);
3403 if (err)
3404 goto out_free_offsets;
3405
3406 annotation__init_column_widths(notes, sym);
3407 notes->nr_events = nr_pcnt;
3408
3409 annotation__update_column_widths(notes);
3410 sym->annotate2 = 1;
3411
3412 return 0;
3413
3414out_free_offsets:
3415 zfree(¬es->src->offsets);
3416 return err;
3417}
3418
3419static int annotation__config(const char *var, const char *value, void *data)
3420{
3421 struct annotation_options *opt = data;
3422
3423 if (!strstarts(var, "annotate."))
3424 return 0;
3425
3426 if (!strcmp(var, "annotate.offset_level")) {
3427 perf_config_u8(&opt->offset_level, "offset_level", value);
3428
3429 if (opt->offset_level > ANNOTATION__MAX_OFFSET_LEVEL)
3430 opt->offset_level = ANNOTATION__MAX_OFFSET_LEVEL;
3431 else if (opt->offset_level < ANNOTATION__MIN_OFFSET_LEVEL)
3432 opt->offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
3433 } else if (!strcmp(var, "annotate.hide_src_code")) {
3434 opt->hide_src_code = perf_config_bool("hide_src_code", value);
3435 } else if (!strcmp(var, "annotate.jump_arrows")) {
3436 opt->jump_arrows = perf_config_bool("jump_arrows", value);
3437 } else if (!strcmp(var, "annotate.show_linenr")) {
3438 opt->show_linenr = perf_config_bool("show_linenr", value);
3439 } else if (!strcmp(var, "annotate.show_nr_jumps")) {
3440 opt->show_nr_jumps = perf_config_bool("show_nr_jumps", value);
3441 } else if (!strcmp(var, "annotate.show_nr_samples")) {
3442 symbol_conf.show_nr_samples = perf_config_bool("show_nr_samples",
3443 value);
3444 } else if (!strcmp(var, "annotate.show_total_period")) {
3445 symbol_conf.show_total_period = perf_config_bool("show_total_period",
3446 value);
3447 } else if (!strcmp(var, "annotate.use_offset")) {
3448 opt->use_offset = perf_config_bool("use_offset", value);
3449 } else if (!strcmp(var, "annotate.disassembler_style")) {
3450 opt->disassembler_style = strdup(value);
3451 if (!opt->disassembler_style) {
3452 pr_err("Not enough memory for annotate.disassembler_style\n");
3453 return -1;
3454 }
3455 } else if (!strcmp(var, "annotate.objdump")) {
3456 opt->objdump_path = strdup(value);
3457 if (!opt->objdump_path) {
3458 pr_err("Not enough memory for annotate.objdump\n");
3459 return -1;
3460 }
3461 } else if (!strcmp(var, "annotate.addr2line")) {
3462 symbol_conf.addr2line_path = strdup(value);
3463 if (!symbol_conf.addr2line_path) {
3464 pr_err("Not enough memory for annotate.addr2line\n");
3465 return -1;
3466 }
3467 } else if (!strcmp(var, "annotate.demangle")) {
3468 symbol_conf.demangle = perf_config_bool("demangle", value);
3469 } else if (!strcmp(var, "annotate.demangle_kernel")) {
3470 symbol_conf.demangle_kernel = perf_config_bool("demangle_kernel", value);
3471 } else {
3472 pr_debug("%s variable unknown, ignoring...", var);
3473 }
3474
3475 return 0;
3476}
3477
3478void annotation_options__init(void)
3479{
3480 struct annotation_options *opt = &annotate_opts;
3481
3482 memset(opt, 0, sizeof(*opt));
3483
3484 /* Default values. */
3485 opt->use_offset = true;
3486 opt->jump_arrows = true;
3487 opt->annotate_src = true;
3488 opt->offset_level = ANNOTATION__OFFSET_JUMP_TARGETS;
3489 opt->percent_type = PERCENT_PERIOD_LOCAL;
3490}
3491
3492void annotation_options__exit(void)
3493{
3494 zfree(&annotate_opts.disassembler_style);
3495 zfree(&annotate_opts.objdump_path);
3496}
3497
3498void annotation_config__init(void)
3499{
3500 perf_config(annotation__config, &annotate_opts);
3501}
3502
3503static unsigned int parse_percent_type(char *str1, char *str2)
3504{
3505 unsigned int type = (unsigned int) -1;
3506
3507 if (!strcmp("period", str1)) {
3508 if (!strcmp("local", str2))
3509 type = PERCENT_PERIOD_LOCAL;
3510 else if (!strcmp("global", str2))
3511 type = PERCENT_PERIOD_GLOBAL;
3512 }
3513
3514 if (!strcmp("hits", str1)) {
3515 if (!strcmp("local", str2))
3516 type = PERCENT_HITS_LOCAL;
3517 else if (!strcmp("global", str2))
3518 type = PERCENT_HITS_GLOBAL;
3519 }
3520
3521 return type;
3522}
3523
3524int annotate_parse_percent_type(const struct option *opt __maybe_unused, const char *_str,
3525 int unset __maybe_unused)
3526{
3527 unsigned int type;
3528 char *str1, *str2;
3529 int err = -1;
3530
3531 str1 = strdup(_str);
3532 if (!str1)
3533 return -ENOMEM;
3534
3535 str2 = strchr(str1, '-');
3536 if (!str2)
3537 goto out;
3538
3539 *str2++ = 0;
3540
3541 type = parse_percent_type(str1, str2);
3542 if (type == (unsigned int) -1)
3543 type = parse_percent_type(str2, str1);
3544 if (type != (unsigned int) -1) {
3545 annotate_opts.percent_type = type;
3546 err = 0;
3547 }
3548
3549out:
3550 free(str1);
3551 return err;
3552}
3553
3554int annotate_check_args(void)
3555{
3556 struct annotation_options *args = &annotate_opts;
3557
3558 if (args->prefix_strip && !args->prefix) {
3559 pr_err("--prefix-strip requires --prefix\n");
3560 return -1;
3561 }
3562 return 0;
3563}
3564
3565/*
3566 * Get register number and access offset from the given instruction.
3567 * It assumes AT&T x86 asm format like OFFSET(REG). Maybe it needs
3568 * to revisit the format when it handles different architecture.
3569 * Fills @reg and @offset when return 0.
3570 */
3571static int extract_reg_offset(struct arch *arch, const char *str,
3572 struct annotated_op_loc *op_loc)
3573{
3574 char *p;
3575 char *regname;
3576
3577 if (arch->objdump.register_char == 0)
3578 return -1;
3579
3580 /*
3581 * It should start from offset, but it's possible to skip 0
3582 * in the asm. So 0(%rax) should be same as (%rax).
3583 *
3584 * However, it also start with a segment select register like
3585 * %gs:0x18(%rbx). In that case it should skip the part.
3586 */
3587 if (*str == arch->objdump.register_char) {
3588 while (*str && !isdigit(*str) &&
3589 *str != arch->objdump.memory_ref_char)
3590 str++;
3591 }
3592
3593 op_loc->offset = strtol(str, &p, 0);
3594
3595 p = strchr(p, arch->objdump.register_char);
3596 if (p == NULL)
3597 return -1;
3598
3599 regname = strdup(p);
3600 if (regname == NULL)
3601 return -1;
3602
3603 op_loc->reg1 = get_dwarf_regnum(regname, 0);
3604 free(regname);
3605
3606 /* Get the second register */
3607 if (op_loc->multi_regs) {
3608 p = strchr(p + 1, arch->objdump.register_char);
3609 if (p == NULL)
3610 return -1;
3611
3612 regname = strdup(p);
3613 if (regname == NULL)
3614 return -1;
3615
3616 op_loc->reg2 = get_dwarf_regnum(regname, 0);
3617 free(regname);
3618 }
3619 return 0;
3620}
3621
3622/**
3623 * annotate_get_insn_location - Get location of instruction
3624 * @arch: the architecture info
3625 * @dl: the target instruction
3626 * @loc: a buffer to save the data
3627 *
3628 * Get detailed location info (register and offset) in the instruction.
3629 * It needs both source and target operand and whether it accesses a
3630 * memory location. The offset field is meaningful only when the
3631 * corresponding mem flag is set. The reg2 field is meaningful only
3632 * when multi_regs flag is set.
3633 *
3634 * Some examples on x86:
3635 *
3636 * mov (%rax), %rcx # src_reg1 = rax, src_mem = 1, src_offset = 0
3637 * # dst_reg1 = rcx, dst_mem = 0
3638 *
3639 * mov 0x18, %r8 # src_reg1 = -1, src_mem = 0
3640 * # dst_reg1 = r8, dst_mem = 0
3641 *
3642 * mov %rsi, 8(%rbx,%rcx,4) # src_reg1 = rsi, src_mem = 0, dst_multi_regs = 0
3643 * # dst_reg1 = rbx, dst_reg2 = rcx, dst_mem = 1
3644 * # dst_multi_regs = 1, dst_offset = 8
3645 */
3646int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl,
3647 struct annotated_insn_loc *loc)
3648{
3649 struct ins_operands *ops;
3650 struct annotated_op_loc *op_loc;
3651 int i;
3652
3653 if (!strcmp(dl->ins.name, "lock"))
3654 ops = dl->ops.locked.ops;
3655 else
3656 ops = &dl->ops;
3657
3658 if (ops == NULL)
3659 return -1;
3660
3661 memset(loc, 0, sizeof(*loc));
3662
3663 for_each_insn_op_loc(loc, i, op_loc) {
3664 const char *insn_str = ops->source.raw;
3665 bool multi_regs = ops->source.multi_regs;
3666
3667 if (i == INSN_OP_TARGET) {
3668 insn_str = ops->target.raw;
3669 multi_regs = ops->target.multi_regs;
3670 }
3671
3672 /* Invalidate the register by default */
3673 op_loc->reg1 = -1;
3674 op_loc->reg2 = -1;
3675
3676 if (insn_str == NULL)
3677 continue;
3678
3679 if (strchr(insn_str, arch->objdump.memory_ref_char)) {
3680 op_loc->mem_ref = true;
3681 op_loc->multi_regs = multi_regs;
3682 extract_reg_offset(arch, insn_str, op_loc);
3683 } else {
3684 char *s = strdup(insn_str);
3685
3686 if (s) {
3687 op_loc->reg1 = get_dwarf_regnum(s, 0);
3688 free(s);
3689 }
3690 }
3691 }
3692
3693 return 0;
3694}
3695
3696static void symbol__ensure_annotate(struct map_symbol *ms, struct evsel *evsel)
3697{
3698 struct disasm_line *dl, *tmp_dl;
3699 struct annotation *notes;
3700
3701 notes = symbol__annotation(ms->sym);
3702 if (!list_empty(¬es->src->source))
3703 return;
3704
3705 if (symbol__annotate(ms, evsel, NULL) < 0)
3706 return;
3707
3708 /* remove non-insn disasm lines for simplicity */
3709 list_for_each_entry_safe(dl, tmp_dl, ¬es->src->source, al.node) {
3710 if (dl->al.offset == -1) {
3711 list_del(&dl->al.node);
3712 free(dl);
3713 }
3714 }
3715}
3716
3717static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip)
3718{
3719 struct disasm_line *dl;
3720 struct annotation *notes;
3721
3722 notes = symbol__annotation(sym);
3723
3724 list_for_each_entry(dl, ¬es->src->source, al.node) {
3725 if (sym->start + dl->al.offset == ip) {
3726 /*
3727 * llvm-objdump places "lock" in a separate line and
3728 * in that case, we want to get the next line.
3729 */
3730 if (!strcmp(dl->ins.name, "lock") && *dl->ops.raw == '\0') {
3731 ip++;
3732 continue;
3733 }
3734 return dl;
3735 }
3736 }
3737 return NULL;
3738}
3739
3740static struct annotated_item_stat *annotate_data_stat(struct list_head *head,
3741 const char *name)
3742{
3743 struct annotated_item_stat *istat;
3744
3745 list_for_each_entry(istat, head, list) {
3746 if (!strcmp(istat->name, name))
3747 return istat;
3748 }
3749
3750 istat = zalloc(sizeof(*istat));
3751 if (istat == NULL)
3752 return NULL;
3753
3754 istat->name = strdup(name);
3755 if (istat->name == NULL) {
3756 free(istat);
3757 return NULL;
3758 }
3759
3760 list_add_tail(&istat->list, head);
3761 return istat;
3762}
3763
3764static bool is_stack_operation(struct arch *arch, struct disasm_line *dl)
3765{
3766 if (arch__is(arch, "x86")) {
3767 if (!strncmp(dl->ins.name, "push", 4) ||
3768 !strncmp(dl->ins.name, "pop", 3) ||
3769 !strncmp(dl->ins.name, "ret", 3))
3770 return true;
3771 }
3772
3773 return false;
3774}
3775
3776u64 annotate_calc_pcrel(struct map_symbol *ms, u64 ip, int offset,
3777 struct disasm_line *dl)
3778{
3779 struct annotation *notes;
3780 struct disasm_line *next;
3781 u64 addr;
3782
3783 notes = symbol__annotation(ms->sym);
3784 /*
3785 * PC-relative addressing starts from the next instruction address
3786 * But the IP is for the current instruction. Since disasm_line
3787 * doesn't have the instruction size, calculate it using the next
3788 * disasm_line. If it's the last one, we can use symbol's end
3789 * address directly.
3790 */
3791 if (&dl->al.node == notes->src->source.prev)
3792 addr = ms->sym->end + offset;
3793 else {
3794 next = list_next_entry(dl, al.node);
3795 addr = ip + (next->al.offset - dl->al.offset) + offset;
3796 }
3797 return map__rip_2objdump(ms->map, addr);
3798}
3799
3800/**
3801 * hist_entry__get_data_type - find data type for given hist entry
3802 * @he: hist entry
3803 *
3804 * This function first annotates the instruction at @he->ip and extracts
3805 * register and offset info from it. Then it searches the DWARF debug
3806 * info to get a variable and type information using the address, register,
3807 * and offset.
3808 */
3809struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he)
3810{
3811 struct map_symbol *ms = &he->ms;
3812 struct evsel *evsel = hists_to_evsel(he->hists);
3813 struct arch *arch;
3814 struct disasm_line *dl;
3815 struct annotated_insn_loc loc;
3816 struct annotated_op_loc *op_loc;
3817 struct annotated_data_type *mem_type;
3818 struct annotated_item_stat *istat;
3819 u64 ip = he->ip, addr = 0;
3820 const char *var_name = NULL;
3821 int var_offset;
3822 int i;
3823
3824 ann_data_stat.total++;
3825
3826 if (ms->map == NULL || ms->sym == NULL) {
3827 ann_data_stat.no_sym++;
3828 return NULL;
3829 }
3830
3831 if (!symbol_conf.init_annotation) {
3832 ann_data_stat.no_sym++;
3833 return NULL;
3834 }
3835
3836 if (evsel__get_arch(evsel, &arch) < 0) {
3837 ann_data_stat.no_insn++;
3838 return NULL;
3839 }
3840
3841 /* Make sure it runs objdump to get disasm of the function */
3842 symbol__ensure_annotate(ms, evsel);
3843
3844 /*
3845 * Get a disasm to extract the location from the insn.
3846 * This is too slow...
3847 */
3848 dl = find_disasm_line(ms->sym, ip);
3849 if (dl == NULL) {
3850 ann_data_stat.no_insn++;
3851 return NULL;
3852 }
3853
3854retry:
3855 istat = annotate_data_stat(&ann_insn_stat, dl->ins.name);
3856 if (istat == NULL) {
3857 ann_data_stat.no_insn++;
3858 return NULL;
3859 }
3860
3861 if (annotate_get_insn_location(arch, dl, &loc) < 0) {
3862 ann_data_stat.no_insn_ops++;
3863 istat->bad++;
3864 return NULL;
3865 }
3866
3867 if (is_stack_operation(arch, dl)) {
3868 istat->good++;
3869 he->mem_type_off = 0;
3870 return &stackop_type;
3871 }
3872
3873 for_each_insn_op_loc(&loc, i, op_loc) {
3874 if (!op_loc->mem_ref)
3875 continue;
3876
3877 /* Recalculate IP because of LOCK prefix or insn fusion */
3878 ip = ms->sym->start + dl->al.offset;
3879
3880 var_offset = op_loc->offset;
3881
3882 /* PC-relative addressing */
3883 if (op_loc->reg1 == DWARF_REG_PC) {
3884 struct addr_location al;
3885 struct symbol *var;
3886 u64 map_addr;
3887
3888 addr = annotate_calc_pcrel(ms, ip, op_loc->offset, dl);
3889 /* Kernel symbols might be relocated */
3890 map_addr = addr + map__reloc(ms->map);
3891
3892 addr_location__init(&al);
3893 var = thread__find_symbol_fb(he->thread, he->cpumode,
3894 map_addr, &al);
3895 if (var) {
3896 var_name = var->name;
3897 /* Calculate type offset from the start of variable */
3898 var_offset = map_addr - map__unmap_ip(al.map, var->start);
3899 }
3900 addr_location__exit(&al);
3901 }
3902
3903 mem_type = find_data_type(ms, ip, op_loc, addr, var_name);
3904 if (mem_type)
3905 istat->good++;
3906 else
3907 istat->bad++;
3908
3909 if (mem_type && var_name)
3910 op_loc->offset = var_offset;
3911
3912 if (symbol_conf.annotate_data_sample) {
3913 annotated_data_type__update_samples(mem_type, evsel,
3914 op_loc->offset,
3915 he->stat.nr_events,
3916 he->stat.period);
3917 }
3918 he->mem_type_off = op_loc->offset;
3919 return mem_type;
3920 }
3921
3922 /*
3923 * Some instructions can be fused and the actual memory access came
3924 * from the previous instruction.
3925 */
3926 if (dl->al.offset > 0) {
3927 struct disasm_line *prev_dl;
3928
3929 prev_dl = list_prev_entry(dl, al.node);
3930 if (ins__is_fused(arch, prev_dl->ins.name, dl->ins.name)) {
3931 dl = prev_dl;
3932 goto retry;
3933 }
3934 }
3935
3936 ann_data_stat.no_mem_ops++;
3937 istat->bad++;
3938 return NULL;
3939}